1 //===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that X86 uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "X86ISelLowering.h"
15 #include "MCTargetDesc/X86ShuffleDecode.h"
16 #include "X86.h"
17 #include "X86CallingConv.h"
18 #include "X86FrameLowering.h"
19 #include "X86InstrBuilder.h"
20 #include "X86IntrinsicsInfo.h"
21 #include "X86MachineFunctionInfo.h"
22 #include "X86TargetMachine.h"
23 #include "X86TargetObjectFile.h"
24 #include "llvm/ADT/SmallBitVector.h"
25 #include "llvm/ADT/SmallSet.h"
26 #include "llvm/ADT/Statistic.h"
27 #include "llvm/ADT/StringExtras.h"
28 #include "llvm/ADT/StringSwitch.h"
29 #include "llvm/Analysis/BlockFrequencyInfo.h"
30 #include "llvm/Analysis/EHPersonalities.h"
31 #include "llvm/Analysis/ObjCARCUtil.h"
32 #include "llvm/Analysis/ProfileSummaryInfo.h"
33 #include "llvm/Analysis/VectorUtils.h"
34 #include "llvm/CodeGen/IntrinsicLowering.h"
35 #include "llvm/CodeGen/MachineFrameInfo.h"
36 #include "llvm/CodeGen/MachineFunction.h"
37 #include "llvm/CodeGen/MachineInstrBuilder.h"
38 #include "llvm/CodeGen/MachineJumpTableInfo.h"
39 #include "llvm/CodeGen/MachineLoopInfo.h"
40 #include "llvm/CodeGen/MachineModuleInfo.h"
41 #include "llvm/CodeGen/MachineRegisterInfo.h"
42 #include "llvm/CodeGen/TargetLowering.h"
43 #include "llvm/CodeGen/WinEHFuncInfo.h"
44 #include "llvm/IR/CallingConv.h"
45 #include "llvm/IR/Constants.h"
46 #include "llvm/IR/DerivedTypes.h"
47 #include "llvm/IR/DiagnosticInfo.h"
48 #include "llvm/IR/Function.h"
49 #include "llvm/IR/GlobalAlias.h"
50 #include "llvm/IR/GlobalVariable.h"
51 #include "llvm/IR/IRBuilder.h"
52 #include "llvm/IR/Instructions.h"
53 #include "llvm/IR/Intrinsics.h"
54 #include "llvm/IR/PatternMatch.h"
55 #include "llvm/MC/MCAsmInfo.h"
56 #include "llvm/MC/MCContext.h"
57 #include "llvm/MC/MCExpr.h"
58 #include "llvm/MC/MCSymbol.h"
59 #include "llvm/Support/CommandLine.h"
60 #include "llvm/Support/Debug.h"
61 #include "llvm/Support/ErrorHandling.h"
62 #include "llvm/Support/KnownBits.h"
63 #include "llvm/Support/MathExtras.h"
64 #include "llvm/Target/TargetOptions.h"
65 #include <algorithm>
66 #include <bitset>
67 #include <cctype>
68 #include <numeric>
69 using namespace llvm;
70 
71 #define DEBUG_TYPE "x86-isel"
72 
73 STATISTIC(NumTailCalls, "Number of tail calls");
74 
75 static cl::opt<int> ExperimentalPrefInnermostLoopAlignment(
76     "x86-experimental-pref-innermost-loop-alignment", cl::init(4),
77     cl::desc(
78         "Sets the preferable loop alignment for experiments (as log2 bytes) "
79         "for innermost loops only. If specified, this option overrides "
80         "alignment set by x86-experimental-pref-loop-alignment."),
81     cl::Hidden);
82 
83 static cl::opt<bool> MulConstantOptimization(
84     "mul-constant-optimization", cl::init(true),
85     cl::desc("Replace 'mul x, Const' with more effective instructions like "
86              "SHIFT, LEA, etc."),
87     cl::Hidden);
88 
89 static cl::opt<bool> ExperimentalUnorderedISEL(
90     "x86-experimental-unordered-atomic-isel", cl::init(false),
91     cl::desc("Use LoadSDNode and StoreSDNode instead of "
92              "AtomicSDNode for unordered atomic loads and "
93              "stores respectively."),
94     cl::Hidden);
95 
96 /// Call this when the user attempts to do something unsupported, like
97 /// returning a double without SSE2 enabled on x86_64. This is not fatal, unlike
98 /// report_fatal_error, so calling code should attempt to recover without
99 /// crashing.
errorUnsupported(SelectionDAG & DAG,const SDLoc & dl,const char * Msg)100 static void errorUnsupported(SelectionDAG &DAG, const SDLoc &dl,
101                              const char *Msg) {
102   MachineFunction &MF = DAG.getMachineFunction();
103   DAG.getContext()->diagnose(
104       DiagnosticInfoUnsupported(MF.getFunction(), Msg, dl.getDebugLoc()));
105 }
106 
X86TargetLowering(const X86TargetMachine & TM,const X86Subtarget & STI)107 X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
108                                      const X86Subtarget &STI)
109     : TargetLowering(TM), Subtarget(STI) {
110   bool UseX87 = !Subtarget.useSoftFloat() && Subtarget.hasX87();
111   MVT PtrVT = MVT::getIntegerVT(TM.getPointerSizeInBits(0));
112 
113   // Set up the TargetLowering object.
114 
115   // X86 is weird. It always uses i8 for shift amounts and setcc results.
116   setBooleanContents(ZeroOrOneBooleanContent);
117   // X86-SSE is even stranger. It uses -1 or 0 for vector masks.
118   setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
119 
120   // For 64-bit, since we have so many registers, use the ILP scheduler.
121   // For 32-bit, use the register pressure specific scheduling.
122   // For Atom, always use ILP scheduling.
123   if (Subtarget.isAtom())
124     setSchedulingPreference(Sched::ILP);
125   else if (Subtarget.is64Bit())
126     setSchedulingPreference(Sched::ILP);
127   else
128     setSchedulingPreference(Sched::RegPressure);
129   const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
130   setStackPointerRegisterToSaveRestore(RegInfo->getStackRegister());
131 
132   // Bypass expensive divides and use cheaper ones.
133   if (TM.getOptLevel() >= CodeGenOpt::Default) {
134     if (Subtarget.hasSlowDivide32())
135       addBypassSlowDiv(32, 8);
136     if (Subtarget.hasSlowDivide64() && Subtarget.is64Bit())
137       addBypassSlowDiv(64, 32);
138   }
139 
140   // Setup Windows compiler runtime calls.
141   if (Subtarget.isTargetWindowsMSVC() || Subtarget.isTargetWindowsItanium()) {
142     static const struct {
143       const RTLIB::Libcall Op;
144       const char * const Name;
145       const CallingConv::ID CC;
146     } LibraryCalls[] = {
147       { RTLIB::SDIV_I64, "_alldiv", CallingConv::X86_StdCall },
148       { RTLIB::UDIV_I64, "_aulldiv", CallingConv::X86_StdCall },
149       { RTLIB::SREM_I64, "_allrem", CallingConv::X86_StdCall },
150       { RTLIB::UREM_I64, "_aullrem", CallingConv::X86_StdCall },
151       { RTLIB::MUL_I64, "_allmul", CallingConv::X86_StdCall },
152     };
153 
154     for (const auto &LC : LibraryCalls) {
155       setLibcallName(LC.Op, LC.Name);
156       setLibcallCallingConv(LC.Op, LC.CC);
157     }
158   }
159 
160   if (Subtarget.getTargetTriple().isOSMSVCRT()) {
161     // MSVCRT doesn't have powi; fall back to pow
162     setLibcallName(RTLIB::POWI_F32, nullptr);
163     setLibcallName(RTLIB::POWI_F64, nullptr);
164   }
165 
166   // If we don't have cmpxchg8b(meaing this is a 386/486), limit atomic size to
167   // 32 bits so the AtomicExpandPass will expand it so we don't need cmpxchg8b.
168   // FIXME: Should we be limiting the atomic size on other configs? Default is
169   // 1024.
170   if (!Subtarget.canUseCMPXCHG8B())
171     setMaxAtomicSizeInBitsSupported(32);
172 
173   setMaxDivRemBitWidthSupported(Subtarget.is64Bit() ? 128 : 64);
174 
175   setMaxLargeFPConvertBitWidthSupported(128);
176 
177   // Set up the register classes.
178   addRegisterClass(MVT::i8, &X86::GR8RegClass);
179   addRegisterClass(MVT::i16, &X86::GR16RegClass);
180   addRegisterClass(MVT::i32, &X86::GR32RegClass);
181   if (Subtarget.is64Bit())
182     addRegisterClass(MVT::i64, &X86::GR64RegClass);
183 
184   for (MVT VT : MVT::integer_valuetypes())
185     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
186 
187   // We don't accept any truncstore of integer registers.
188   setTruncStoreAction(MVT::i64, MVT::i32, Expand);
189   setTruncStoreAction(MVT::i64, MVT::i16, Expand);
190   setTruncStoreAction(MVT::i64, MVT::i8 , Expand);
191   setTruncStoreAction(MVT::i32, MVT::i16, Expand);
192   setTruncStoreAction(MVT::i32, MVT::i8 , Expand);
193   setTruncStoreAction(MVT::i16, MVT::i8,  Expand);
194 
195   setTruncStoreAction(MVT::f64, MVT::f32, Expand);
196 
197   // SETOEQ and SETUNE require checking two conditions.
198   for (auto VT : {MVT::f32, MVT::f64, MVT::f80}) {
199     setCondCodeAction(ISD::SETOEQ, VT, Expand);
200     setCondCodeAction(ISD::SETUNE, VT, Expand);
201   }
202 
203   // Integer absolute.
204   if (Subtarget.canUseCMOV()) {
205     setOperationAction(ISD::ABS            , MVT::i16  , Custom);
206     setOperationAction(ISD::ABS            , MVT::i32  , Custom);
207     if (Subtarget.is64Bit())
208       setOperationAction(ISD::ABS          , MVT::i64  , Custom);
209   }
210 
211   // Signed saturation subtraction.
212   setOperationAction(ISD::SSUBSAT          , MVT::i8   , Custom);
213   setOperationAction(ISD::SSUBSAT          , MVT::i16  , Custom);
214   setOperationAction(ISD::SSUBSAT          , MVT::i32  , Custom);
215   if (Subtarget.is64Bit())
216     setOperationAction(ISD::SSUBSAT        , MVT::i64  , Custom);
217 
218   // Funnel shifts.
219   for (auto ShiftOp : {ISD::FSHL, ISD::FSHR}) {
220     // For slow shld targets we only lower for code size.
221     LegalizeAction ShiftDoubleAction = Subtarget.isSHLDSlow() ? Custom : Legal;
222 
223     setOperationAction(ShiftOp             , MVT::i8   , Custom);
224     setOperationAction(ShiftOp             , MVT::i16  , Custom);
225     setOperationAction(ShiftOp             , MVT::i32  , ShiftDoubleAction);
226     if (Subtarget.is64Bit())
227       setOperationAction(ShiftOp           , MVT::i64  , ShiftDoubleAction);
228   }
229 
230   if (!Subtarget.useSoftFloat()) {
231     // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
232     // operation.
233     setOperationAction(ISD::UINT_TO_FP,        MVT::i8, Promote);
234     setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i8, Promote);
235     setOperationAction(ISD::UINT_TO_FP,        MVT::i16, Promote);
236     setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i16, Promote);
237     // We have an algorithm for SSE2, and we turn this into a 64-bit
238     // FILD or VCVTUSI2SS/SD for other targets.
239     setOperationAction(ISD::UINT_TO_FP,        MVT::i32, Custom);
240     setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Custom);
241     // We have an algorithm for SSE2->double, and we turn this into a
242     // 64-bit FILD followed by conditional FADD for other targets.
243     setOperationAction(ISD::UINT_TO_FP,        MVT::i64, Custom);
244     setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i64, Custom);
245 
246     // Promote i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have
247     // this operation.
248     setOperationAction(ISD::SINT_TO_FP,        MVT::i8, Promote);
249     setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i8, Promote);
250     // SSE has no i16 to fp conversion, only i32. We promote in the handler
251     // to allow f80 to use i16 and f64 to use i16 with sse1 only
252     setOperationAction(ISD::SINT_TO_FP,        MVT::i16, Custom);
253     setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i16, Custom);
254     // f32 and f64 cases are Legal with SSE1/SSE2, f80 case is not
255     setOperationAction(ISD::SINT_TO_FP,        MVT::i32, Custom);
256     setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Custom);
257     // In 32-bit mode these are custom lowered.  In 64-bit mode F32 and F64
258     // are Legal, f80 is custom lowered.
259     setOperationAction(ISD::SINT_TO_FP,        MVT::i64, Custom);
260     setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i64, Custom);
261 
262     // Promote i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
263     // this operation.
264     setOperationAction(ISD::FP_TO_SINT,        MVT::i8,  Promote);
265     // FIXME: This doesn't generate invalid exception when it should. PR44019.
266     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i8,  Promote);
267     setOperationAction(ISD::FP_TO_SINT,        MVT::i16, Custom);
268     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i16, Custom);
269     setOperationAction(ISD::FP_TO_SINT,        MVT::i32, Custom);
270     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
271     // In 32-bit mode these are custom lowered.  In 64-bit mode F32 and F64
272     // are Legal, f80 is custom lowered.
273     setOperationAction(ISD::FP_TO_SINT,        MVT::i64, Custom);
274     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i64, Custom);
275 
276     // Handle FP_TO_UINT by promoting the destination to a larger signed
277     // conversion.
278     setOperationAction(ISD::FP_TO_UINT,        MVT::i8,  Promote);
279     // FIXME: This doesn't generate invalid exception when it should. PR44019.
280     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i8,  Promote);
281     setOperationAction(ISD::FP_TO_UINT,        MVT::i16, Promote);
282     // FIXME: This doesn't generate invalid exception when it should. PR44019.
283     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i16, Promote);
284     setOperationAction(ISD::FP_TO_UINT,        MVT::i32, Custom);
285     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
286     setOperationAction(ISD::FP_TO_UINT,        MVT::i64, Custom);
287     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i64, Custom);
288 
289     setOperationAction(ISD::LRINT,             MVT::f32, Custom);
290     setOperationAction(ISD::LRINT,             MVT::f64, Custom);
291     setOperationAction(ISD::LLRINT,            MVT::f32, Custom);
292     setOperationAction(ISD::LLRINT,            MVT::f64, Custom);
293 
294     if (!Subtarget.is64Bit()) {
295       setOperationAction(ISD::LRINT,  MVT::i64, Custom);
296       setOperationAction(ISD::LLRINT, MVT::i64, Custom);
297     }
298   }
299 
300   if (Subtarget.hasSSE2()) {
301     // Custom lowering for saturating float to int conversions.
302     // We handle promotion to larger result types manually.
303     for (MVT VT : { MVT::i8, MVT::i16, MVT::i32 }) {
304       setOperationAction(ISD::FP_TO_UINT_SAT, VT, Custom);
305       setOperationAction(ISD::FP_TO_SINT_SAT, VT, Custom);
306     }
307     if (Subtarget.is64Bit()) {
308       setOperationAction(ISD::FP_TO_UINT_SAT, MVT::i64, Custom);
309       setOperationAction(ISD::FP_TO_SINT_SAT, MVT::i64, Custom);
310     }
311   }
312 
313   // Handle address space casts between mixed sized pointers.
314   setOperationAction(ISD::ADDRSPACECAST, MVT::i32, Custom);
315   setOperationAction(ISD::ADDRSPACECAST, MVT::i64, Custom);
316 
317   // TODO: when we have SSE, these could be more efficient, by using movd/movq.
318   if (!Subtarget.hasSSE2()) {
319     setOperationAction(ISD::BITCAST        , MVT::f32  , Expand);
320     setOperationAction(ISD::BITCAST        , MVT::i32  , Expand);
321     if (Subtarget.is64Bit()) {
322       setOperationAction(ISD::BITCAST      , MVT::f64  , Expand);
323       // Without SSE, i64->f64 goes through memory.
324       setOperationAction(ISD::BITCAST      , MVT::i64  , Expand);
325     }
326   } else if (!Subtarget.is64Bit())
327     setOperationAction(ISD::BITCAST      , MVT::i64  , Custom);
328 
329   // Scalar integer divide and remainder are lowered to use operations that
330   // produce two results, to match the available instructions. This exposes
331   // the two-result form to trivial CSE, which is able to combine x/y and x%y
332   // into a single instruction.
333   //
334   // Scalar integer multiply-high is also lowered to use two-result
335   // operations, to match the available instructions. However, plain multiply
336   // (low) operations are left as Legal, as there are single-result
337   // instructions for this in x86. Using the two-result multiply instructions
338   // when both high and low results are needed must be arranged by dagcombine.
339   for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
340     setOperationAction(ISD::MULHS, VT, Expand);
341     setOperationAction(ISD::MULHU, VT, Expand);
342     setOperationAction(ISD::SDIV, VT, Expand);
343     setOperationAction(ISD::UDIV, VT, Expand);
344     setOperationAction(ISD::SREM, VT, Expand);
345     setOperationAction(ISD::UREM, VT, Expand);
346   }
347 
348   setOperationAction(ISD::BR_JT            , MVT::Other, Expand);
349   setOperationAction(ISD::BRCOND           , MVT::Other, Custom);
350   for (auto VT : { MVT::f32, MVT::f64, MVT::f80, MVT::f128,
351                    MVT::i8,  MVT::i16, MVT::i32, MVT::i64 }) {
352     setOperationAction(ISD::BR_CC,     VT, Expand);
353     setOperationAction(ISD::SELECT_CC, VT, Expand);
354   }
355   if (Subtarget.is64Bit())
356     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
357   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16  , Legal);
358   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8   , Legal);
359   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1   , Expand);
360 
361   setOperationAction(ISD::FREM             , MVT::f32  , Expand);
362   setOperationAction(ISD::FREM             , MVT::f64  , Expand);
363   setOperationAction(ISD::FREM             , MVT::f80  , Expand);
364   setOperationAction(ISD::FREM             , MVT::f128 , Expand);
365 
366   if (!Subtarget.useSoftFloat() && Subtarget.hasX87()) {
367     setOperationAction(ISD::GET_ROUNDING   , MVT::i32  , Custom);
368     setOperationAction(ISD::SET_ROUNDING   , MVT::Other, Custom);
369   }
370 
371   // Promote the i8 variants and force them on up to i32 which has a shorter
372   // encoding.
373   setOperationPromotedToType(ISD::CTTZ           , MVT::i8   , MVT::i32);
374   setOperationPromotedToType(ISD::CTTZ_ZERO_UNDEF, MVT::i8   , MVT::i32);
375   // Promoted i16. tzcntw has a false dependency on Intel CPUs. For BSF, we emit
376   // a REP prefix to encode it as TZCNT for modern CPUs so it makes sense to
377   // promote that too.
378   setOperationPromotedToType(ISD::CTTZ           , MVT::i16  , MVT::i32);
379   setOperationPromotedToType(ISD::CTTZ_ZERO_UNDEF, MVT::i16  , MVT::i32);
380 
381   if (!Subtarget.hasBMI()) {
382     setOperationAction(ISD::CTTZ           , MVT::i32  , Custom);
383     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32  , Legal);
384     if (Subtarget.is64Bit()) {
385       setOperationAction(ISD::CTTZ         , MVT::i64  , Custom);
386       setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Legal);
387     }
388   }
389 
390   if (Subtarget.hasLZCNT()) {
391     // When promoting the i8 variants, force them to i32 for a shorter
392     // encoding.
393     setOperationPromotedToType(ISD::CTLZ           , MVT::i8   , MVT::i32);
394     setOperationPromotedToType(ISD::CTLZ_ZERO_UNDEF, MVT::i8   , MVT::i32);
395   } else {
396     for (auto VT : {MVT::i8, MVT::i16, MVT::i32, MVT::i64}) {
397       if (VT == MVT::i64 && !Subtarget.is64Bit())
398         continue;
399       setOperationAction(ISD::CTLZ           , VT, Custom);
400       setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Custom);
401     }
402   }
403 
404   for (auto Op : {ISD::FP16_TO_FP, ISD::STRICT_FP16_TO_FP, ISD::FP_TO_FP16,
405                   ISD::STRICT_FP_TO_FP16}) {
406     // Special handling for half-precision floating point conversions.
407     // If we don't have F16C support, then lower half float conversions
408     // into library calls.
409     setOperationAction(
410         Op, MVT::f32,
411         (!Subtarget.useSoftFloat() && Subtarget.hasF16C()) ? Custom : Expand);
412     // There's never any support for operations beyond MVT::f32.
413     setOperationAction(Op, MVT::f64, Expand);
414     setOperationAction(Op, MVT::f80, Expand);
415     setOperationAction(Op, MVT::f128, Expand);
416   }
417 
418   for (MVT VT : {MVT::f32, MVT::f64, MVT::f80, MVT::f128}) {
419     setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand);
420     setLoadExtAction(ISD::EXTLOAD, VT, MVT::bf16, Expand);
421     setTruncStoreAction(VT, MVT::f16, Expand);
422     setTruncStoreAction(VT, MVT::bf16, Expand);
423 
424     setOperationAction(ISD::BF16_TO_FP, VT, Expand);
425     setOperationAction(ISD::FP_TO_BF16, VT, Custom);
426   }
427 
428   setOperationAction(ISD::PARITY, MVT::i8, Custom);
429   setOperationAction(ISD::PARITY, MVT::i16, Custom);
430   setOperationAction(ISD::PARITY, MVT::i32, Custom);
431   if (Subtarget.is64Bit())
432     setOperationAction(ISD::PARITY, MVT::i64, Custom);
433   if (Subtarget.hasPOPCNT()) {
434     setOperationPromotedToType(ISD::CTPOP, MVT::i8, MVT::i32);
435     // popcntw is longer to encode than popcntl and also has a false dependency
436     // on the dest that popcntl hasn't had since Cannon Lake.
437     setOperationPromotedToType(ISD::CTPOP, MVT::i16, MVT::i32);
438   } else {
439     setOperationAction(ISD::CTPOP          , MVT::i8   , Expand);
440     setOperationAction(ISD::CTPOP          , MVT::i16  , Expand);
441     setOperationAction(ISD::CTPOP          , MVT::i32  , Expand);
442     if (Subtarget.is64Bit())
443       setOperationAction(ISD::CTPOP        , MVT::i64  , Expand);
444     else
445       setOperationAction(ISD::CTPOP        , MVT::i64  , Custom);
446   }
447 
448   setOperationAction(ISD::READCYCLECOUNTER , MVT::i64  , Custom);
449 
450   if (!Subtarget.hasMOVBE())
451     setOperationAction(ISD::BSWAP          , MVT::i16  , Expand);
452 
453   // X86 wants to expand cmov itself.
454   for (auto VT : { MVT::f32, MVT::f64, MVT::f80, MVT::f128 }) {
455     setOperationAction(ISD::SELECT, VT, Custom);
456     setOperationAction(ISD::SETCC, VT, Custom);
457     setOperationAction(ISD::STRICT_FSETCC, VT, Custom);
458     setOperationAction(ISD::STRICT_FSETCCS, VT, Custom);
459   }
460   for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
461     if (VT == MVT::i64 && !Subtarget.is64Bit())
462       continue;
463     setOperationAction(ISD::SELECT, VT, Custom);
464     setOperationAction(ISD::SETCC,  VT, Custom);
465   }
466 
467   // Custom action for SELECT MMX and expand action for SELECT_CC MMX
468   setOperationAction(ISD::SELECT, MVT::x86mmx, Custom);
469   setOperationAction(ISD::SELECT_CC, MVT::x86mmx, Expand);
470 
471   setOperationAction(ISD::EH_RETURN       , MVT::Other, Custom);
472   // NOTE: EH_SJLJ_SETJMP/_LONGJMP are not recommended, since
473   // LLVM/Clang supports zero-cost DWARF and SEH exception handling.
474   setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
475   setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
476   setOperationAction(ISD::EH_SJLJ_SETUP_DISPATCH, MVT::Other, Custom);
477   if (TM.Options.ExceptionModel == ExceptionHandling::SjLj)
478     setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume");
479 
480   // Darwin ABI issue.
481   for (auto VT : { MVT::i32, MVT::i64 }) {
482     if (VT == MVT::i64 && !Subtarget.is64Bit())
483       continue;
484     setOperationAction(ISD::ConstantPool    , VT, Custom);
485     setOperationAction(ISD::JumpTable       , VT, Custom);
486     setOperationAction(ISD::GlobalAddress   , VT, Custom);
487     setOperationAction(ISD::GlobalTLSAddress, VT, Custom);
488     setOperationAction(ISD::ExternalSymbol  , VT, Custom);
489     setOperationAction(ISD::BlockAddress    , VT, Custom);
490   }
491 
492   // 64-bit shl, sra, srl (iff 32-bit x86)
493   for (auto VT : { MVT::i32, MVT::i64 }) {
494     if (VT == MVT::i64 && !Subtarget.is64Bit())
495       continue;
496     setOperationAction(ISD::SHL_PARTS, VT, Custom);
497     setOperationAction(ISD::SRA_PARTS, VT, Custom);
498     setOperationAction(ISD::SRL_PARTS, VT, Custom);
499   }
500 
501   if (Subtarget.hasSSEPrefetch() || Subtarget.hasThreeDNow())
502     setOperationAction(ISD::PREFETCH      , MVT::Other, Custom);
503 
504   setOperationAction(ISD::ATOMIC_FENCE  , MVT::Other, Custom);
505 
506   // Expand certain atomics
507   for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
508     setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Custom);
509     setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom);
510     setOperationAction(ISD::ATOMIC_LOAD_ADD, VT, Custom);
511     setOperationAction(ISD::ATOMIC_LOAD_OR, VT, Custom);
512     setOperationAction(ISD::ATOMIC_LOAD_XOR, VT, Custom);
513     setOperationAction(ISD::ATOMIC_LOAD_AND, VT, Custom);
514     setOperationAction(ISD::ATOMIC_STORE, VT, Custom);
515   }
516 
517   if (!Subtarget.is64Bit())
518     setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Custom);
519 
520   if (Subtarget.canUseCMPXCHG16B())
521     setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i128, Custom);
522 
523   // FIXME - use subtarget debug flags
524   if (!Subtarget.isTargetDarwin() && !Subtarget.isTargetELF() &&
525       !Subtarget.isTargetCygMing() && !Subtarget.isTargetWin64() &&
526       TM.Options.ExceptionModel != ExceptionHandling::SjLj) {
527     setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
528   }
529 
530   setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom);
531   setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i64, Custom);
532 
533   setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
534   setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
535 
536   setOperationAction(ISD::TRAP, MVT::Other, Legal);
537   setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
538   if (Subtarget.isTargetPS())
539     setOperationAction(ISD::UBSANTRAP, MVT::Other, Expand);
540   else
541     setOperationAction(ISD::UBSANTRAP, MVT::Other, Legal);
542 
543   // VASTART needs to be custom lowered to use the VarArgsFrameIndex
544   setOperationAction(ISD::VASTART           , MVT::Other, Custom);
545   setOperationAction(ISD::VAEND             , MVT::Other, Expand);
546   bool Is64Bit = Subtarget.is64Bit();
547   setOperationAction(ISD::VAARG,  MVT::Other, Is64Bit ? Custom : Expand);
548   setOperationAction(ISD::VACOPY, MVT::Other, Is64Bit ? Custom : Expand);
549 
550   setOperationAction(ISD::STACKSAVE,          MVT::Other, Expand);
551   setOperationAction(ISD::STACKRESTORE,       MVT::Other, Expand);
552 
553   setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom);
554 
555   // GC_TRANSITION_START and GC_TRANSITION_END need custom lowering.
556   setOperationAction(ISD::GC_TRANSITION_START, MVT::Other, Custom);
557   setOperationAction(ISD::GC_TRANSITION_END, MVT::Other, Custom);
558 
559   setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Legal);
560 
561   auto setF16Action = [&] (MVT VT, LegalizeAction Action) {
562     setOperationAction(ISD::FABS, VT, Action);
563     setOperationAction(ISD::FNEG, VT, Action);
564     setOperationAction(ISD::FCOPYSIGN, VT, Expand);
565     setOperationAction(ISD::FREM, VT, Action);
566     setOperationAction(ISD::FMA, VT, Action);
567     setOperationAction(ISD::FMINNUM, VT, Action);
568     setOperationAction(ISD::FMAXNUM, VT, Action);
569     setOperationAction(ISD::FMINIMUM, VT, Action);
570     setOperationAction(ISD::FMAXIMUM, VT, Action);
571     setOperationAction(ISD::FSIN, VT, Action);
572     setOperationAction(ISD::FCOS, VT, Action);
573     setOperationAction(ISD::FSINCOS, VT, Action);
574     setOperationAction(ISD::FSQRT, VT, Action);
575     setOperationAction(ISD::FPOW, VT, Action);
576     setOperationAction(ISD::FLOG, VT, Action);
577     setOperationAction(ISD::FLOG2, VT, Action);
578     setOperationAction(ISD::FLOG10, VT, Action);
579     setOperationAction(ISD::FEXP, VT, Action);
580     setOperationAction(ISD::FEXP2, VT, Action);
581     setOperationAction(ISD::FCEIL, VT, Action);
582     setOperationAction(ISD::FFLOOR, VT, Action);
583     setOperationAction(ISD::FNEARBYINT, VT, Action);
584     setOperationAction(ISD::FRINT, VT, Action);
585     setOperationAction(ISD::BR_CC, VT, Action);
586     setOperationAction(ISD::SETCC, VT, Action);
587     setOperationAction(ISD::SELECT, VT, Custom);
588     setOperationAction(ISD::SELECT_CC, VT, Action);
589     setOperationAction(ISD::FROUND, VT, Action);
590     setOperationAction(ISD::FROUNDEVEN, VT, Action);
591     setOperationAction(ISD::FTRUNC, VT, Action);
592   };
593 
594   if (!Subtarget.useSoftFloat() && Subtarget.hasSSE2()) {
595     // f16, f32 and f64 use SSE.
596     // Set up the FP register classes.
597     addRegisterClass(MVT::f16, Subtarget.hasAVX512() ? &X86::FR16XRegClass
598                                                      : &X86::FR16RegClass);
599     addRegisterClass(MVT::f32, Subtarget.hasAVX512() ? &X86::FR32XRegClass
600                                                      : &X86::FR32RegClass);
601     addRegisterClass(MVT::f64, Subtarget.hasAVX512() ? &X86::FR64XRegClass
602                                                      : &X86::FR64RegClass);
603 
604     // Disable f32->f64 extload as we can only generate this in one instruction
605     // under optsize. So its easier to pattern match (fpext (load)) for that
606     // case instead of needing to emit 2 instructions for extload in the
607     // non-optsize case.
608     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
609 
610     for (auto VT : { MVT::f32, MVT::f64 }) {
611       // Use ANDPD to simulate FABS.
612       setOperationAction(ISD::FABS, VT, Custom);
613 
614       // Use XORP to simulate FNEG.
615       setOperationAction(ISD::FNEG, VT, Custom);
616 
617       // Use ANDPD and ORPD to simulate FCOPYSIGN.
618       setOperationAction(ISD::FCOPYSIGN, VT, Custom);
619 
620       // These might be better off as horizontal vector ops.
621       setOperationAction(ISD::FADD, VT, Custom);
622       setOperationAction(ISD::FSUB, VT, Custom);
623 
624       // We don't support sin/cos/fmod
625       setOperationAction(ISD::FSIN   , VT, Expand);
626       setOperationAction(ISD::FCOS   , VT, Expand);
627       setOperationAction(ISD::FSINCOS, VT, Expand);
628     }
629 
630     // Half type will be promoted by default.
631     setF16Action(MVT::f16, Promote);
632     setOperationAction(ISD::FADD, MVT::f16, Promote);
633     setOperationAction(ISD::FSUB, MVT::f16, Promote);
634     setOperationAction(ISD::FMUL, MVT::f16, Promote);
635     setOperationAction(ISD::FDIV, MVT::f16, Promote);
636     setOperationAction(ISD::FP_ROUND, MVT::f16, Custom);
637     setOperationAction(ISD::FP_EXTEND, MVT::f32, Custom);
638     setOperationAction(ISD::FP_EXTEND, MVT::f64, Custom);
639 
640     setOperationAction(ISD::STRICT_FADD, MVT::f16, Promote);
641     setOperationAction(ISD::STRICT_FSUB, MVT::f16, Promote);
642     setOperationAction(ISD::STRICT_FMUL, MVT::f16, Promote);
643     setOperationAction(ISD::STRICT_FDIV, MVT::f16, Promote);
644     setOperationAction(ISD::STRICT_FMA, MVT::f16, Promote);
645     setOperationAction(ISD::STRICT_FMINNUM, MVT::f16, Promote);
646     setOperationAction(ISD::STRICT_FMAXNUM, MVT::f16, Promote);
647     setOperationAction(ISD::STRICT_FMINIMUM, MVT::f16, Promote);
648     setOperationAction(ISD::STRICT_FMAXIMUM, MVT::f16, Promote);
649     setOperationAction(ISD::STRICT_FSQRT, MVT::f16, Promote);
650     setOperationAction(ISD::STRICT_FPOW, MVT::f16, Promote);
651     setOperationAction(ISD::STRICT_FLOG, MVT::f16, Promote);
652     setOperationAction(ISD::STRICT_FLOG2, MVT::f16, Promote);
653     setOperationAction(ISD::STRICT_FLOG10, MVT::f16, Promote);
654     setOperationAction(ISD::STRICT_FEXP, MVT::f16, Promote);
655     setOperationAction(ISD::STRICT_FEXP2, MVT::f16, Promote);
656     setOperationAction(ISD::STRICT_FCEIL, MVT::f16, Promote);
657     setOperationAction(ISD::STRICT_FFLOOR, MVT::f16, Promote);
658     setOperationAction(ISD::STRICT_FNEARBYINT, MVT::f16, Promote);
659     setOperationAction(ISD::STRICT_FRINT, MVT::f16, Promote);
660     setOperationAction(ISD::STRICT_FSETCC, MVT::f16, Promote);
661     setOperationAction(ISD::STRICT_FSETCCS, MVT::f16, Promote);
662     setOperationAction(ISD::STRICT_FROUND, MVT::f16, Promote);
663     setOperationAction(ISD::STRICT_FROUNDEVEN, MVT::f16, Promote);
664     setOperationAction(ISD::STRICT_FTRUNC, MVT::f16, Promote);
665     setOperationAction(ISD::STRICT_FP_ROUND, MVT::f16, Custom);
666     setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, Custom);
667     setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Custom);
668 
669     setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
670     setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
671 
672     // Lower this to MOVMSK plus an AND.
673     setOperationAction(ISD::FGETSIGN, MVT::i64, Custom);
674     setOperationAction(ISD::FGETSIGN, MVT::i32, Custom);
675 
676   } else if (!Subtarget.useSoftFloat() && Subtarget.hasSSE1() &&
677              (UseX87 || Is64Bit)) {
678     // Use SSE for f32, x87 for f64.
679     // Set up the FP register classes.
680     addRegisterClass(MVT::f32, &X86::FR32RegClass);
681     if (UseX87)
682       addRegisterClass(MVT::f64, &X86::RFP64RegClass);
683 
684     // Use ANDPS to simulate FABS.
685     setOperationAction(ISD::FABS , MVT::f32, Custom);
686 
687     // Use XORP to simulate FNEG.
688     setOperationAction(ISD::FNEG , MVT::f32, Custom);
689 
690     if (UseX87)
691       setOperationAction(ISD::UNDEF, MVT::f64, Expand);
692 
693     // Use ANDPS and ORPS to simulate FCOPYSIGN.
694     if (UseX87)
695       setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
696     setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
697 
698     // We don't support sin/cos/fmod
699     setOperationAction(ISD::FSIN   , MVT::f32, Expand);
700     setOperationAction(ISD::FCOS   , MVT::f32, Expand);
701     setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
702 
703     if (UseX87) {
704       // Always expand sin/cos functions even though x87 has an instruction.
705       setOperationAction(ISD::FSIN, MVT::f64, Expand);
706       setOperationAction(ISD::FCOS, MVT::f64, Expand);
707       setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
708     }
709   } else if (UseX87) {
710     // f32 and f64 in x87.
711     // Set up the FP register classes.
712     addRegisterClass(MVT::f64, &X86::RFP64RegClass);
713     addRegisterClass(MVT::f32, &X86::RFP32RegClass);
714 
715     for (auto VT : { MVT::f32, MVT::f64 }) {
716       setOperationAction(ISD::UNDEF,     VT, Expand);
717       setOperationAction(ISD::FCOPYSIGN, VT, Expand);
718 
719       // Always expand sin/cos functions even though x87 has an instruction.
720       setOperationAction(ISD::FSIN   , VT, Expand);
721       setOperationAction(ISD::FCOS   , VT, Expand);
722       setOperationAction(ISD::FSINCOS, VT, Expand);
723     }
724   }
725 
726   // Expand FP32 immediates into loads from the stack, save special cases.
727   if (isTypeLegal(MVT::f32)) {
728     if (UseX87 && (getRegClassFor(MVT::f32) == &X86::RFP32RegClass)) {
729       addLegalFPImmediate(APFloat(+0.0f)); // FLD0
730       addLegalFPImmediate(APFloat(+1.0f)); // FLD1
731       addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS
732       addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS
733     } else // SSE immediates.
734       addLegalFPImmediate(APFloat(+0.0f)); // xorps
735   }
736   // Expand FP64 immediates into loads from the stack, save special cases.
737   if (isTypeLegal(MVT::f64)) {
738     if (UseX87 && getRegClassFor(MVT::f64) == &X86::RFP64RegClass) {
739       addLegalFPImmediate(APFloat(+0.0)); // FLD0
740       addLegalFPImmediate(APFloat(+1.0)); // FLD1
741       addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
742       addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
743     } else // SSE immediates.
744       addLegalFPImmediate(APFloat(+0.0)); // xorpd
745   }
746   // Support fp16 0 immediate.
747   if (isTypeLegal(MVT::f16))
748     addLegalFPImmediate(APFloat::getZero(APFloat::IEEEhalf()));
749 
750   // Handle constrained floating-point operations of scalar.
751   setOperationAction(ISD::STRICT_FADD,      MVT::f32, Legal);
752   setOperationAction(ISD::STRICT_FADD,      MVT::f64, Legal);
753   setOperationAction(ISD::STRICT_FSUB,      MVT::f32, Legal);
754   setOperationAction(ISD::STRICT_FSUB,      MVT::f64, Legal);
755   setOperationAction(ISD::STRICT_FMUL,      MVT::f32, Legal);
756   setOperationAction(ISD::STRICT_FMUL,      MVT::f64, Legal);
757   setOperationAction(ISD::STRICT_FDIV,      MVT::f32, Legal);
758   setOperationAction(ISD::STRICT_FDIV,      MVT::f64, Legal);
759   setOperationAction(ISD::STRICT_FP_ROUND,  MVT::f32, Legal);
760   setOperationAction(ISD::STRICT_FP_ROUND,  MVT::f64, Legal);
761   setOperationAction(ISD::STRICT_FSQRT,     MVT::f32, Legal);
762   setOperationAction(ISD::STRICT_FSQRT,     MVT::f64, Legal);
763 
764   // We don't support FMA.
765   setOperationAction(ISD::FMA, MVT::f64, Expand);
766   setOperationAction(ISD::FMA, MVT::f32, Expand);
767 
768   // f80 always uses X87.
769   if (UseX87) {
770     addRegisterClass(MVT::f80, &X86::RFP80RegClass);
771     setOperationAction(ISD::UNDEF,     MVT::f80, Expand);
772     setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand);
773     {
774       APFloat TmpFlt = APFloat::getZero(APFloat::x87DoubleExtended());
775       addLegalFPImmediate(TmpFlt);  // FLD0
776       TmpFlt.changeSign();
777       addLegalFPImmediate(TmpFlt);  // FLD0/FCHS
778 
779       bool ignored;
780       APFloat TmpFlt2(+1.0);
781       TmpFlt2.convert(APFloat::x87DoubleExtended(), APFloat::rmNearestTiesToEven,
782                       &ignored);
783       addLegalFPImmediate(TmpFlt2);  // FLD1
784       TmpFlt2.changeSign();
785       addLegalFPImmediate(TmpFlt2);  // FLD1/FCHS
786     }
787 
788     // Always expand sin/cos functions even though x87 has an instruction.
789     setOperationAction(ISD::FSIN   , MVT::f80, Expand);
790     setOperationAction(ISD::FCOS   , MVT::f80, Expand);
791     setOperationAction(ISD::FSINCOS, MVT::f80, Expand);
792 
793     setOperationAction(ISD::FFLOOR, MVT::f80, Expand);
794     setOperationAction(ISD::FCEIL,  MVT::f80, Expand);
795     setOperationAction(ISD::FTRUNC, MVT::f80, Expand);
796     setOperationAction(ISD::FRINT,  MVT::f80, Expand);
797     setOperationAction(ISD::FNEARBYINT, MVT::f80, Expand);
798     setOperationAction(ISD::FMA, MVT::f80, Expand);
799     setOperationAction(ISD::LROUND, MVT::f80, Expand);
800     setOperationAction(ISD::LLROUND, MVT::f80, Expand);
801     setOperationAction(ISD::LRINT, MVT::f80, Custom);
802     setOperationAction(ISD::LLRINT, MVT::f80, Custom);
803 
804     // Handle constrained floating-point operations of scalar.
805     setOperationAction(ISD::STRICT_FADD     , MVT::f80, Legal);
806     setOperationAction(ISD::STRICT_FSUB     , MVT::f80, Legal);
807     setOperationAction(ISD::STRICT_FMUL     , MVT::f80, Legal);
808     setOperationAction(ISD::STRICT_FDIV     , MVT::f80, Legal);
809     setOperationAction(ISD::STRICT_FSQRT    , MVT::f80, Legal);
810     if (isTypeLegal(MVT::f16)) {
811       setOperationAction(ISD::FP_EXTEND, MVT::f80, Custom);
812       setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f80, Custom);
813     } else {
814       setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f80, Legal);
815     }
816     // FIXME: When the target is 64-bit, STRICT_FP_ROUND will be overwritten
817     // as Custom.
818     setOperationAction(ISD::STRICT_FP_ROUND, MVT::f80, Legal);
819   }
820 
821   // f128 uses xmm registers, but most operations require libcalls.
822   if (!Subtarget.useSoftFloat() && Subtarget.is64Bit() && Subtarget.hasSSE1()) {
823     addRegisterClass(MVT::f128, Subtarget.hasVLX() ? &X86::VR128XRegClass
824                                                    : &X86::VR128RegClass);
825 
826     addLegalFPImmediate(APFloat::getZero(APFloat::IEEEquad())); // xorps
827 
828     setOperationAction(ISD::FADD,        MVT::f128, LibCall);
829     setOperationAction(ISD::STRICT_FADD, MVT::f128, LibCall);
830     setOperationAction(ISD::FSUB,        MVT::f128, LibCall);
831     setOperationAction(ISD::STRICT_FSUB, MVT::f128, LibCall);
832     setOperationAction(ISD::FDIV,        MVT::f128, LibCall);
833     setOperationAction(ISD::STRICT_FDIV, MVT::f128, LibCall);
834     setOperationAction(ISD::FMUL,        MVT::f128, LibCall);
835     setOperationAction(ISD::STRICT_FMUL, MVT::f128, LibCall);
836     setOperationAction(ISD::FMA,         MVT::f128, LibCall);
837     setOperationAction(ISD::STRICT_FMA,  MVT::f128, LibCall);
838 
839     setOperationAction(ISD::FABS, MVT::f128, Custom);
840     setOperationAction(ISD::FNEG, MVT::f128, Custom);
841     setOperationAction(ISD::FCOPYSIGN, MVT::f128, Custom);
842 
843     setOperationAction(ISD::FSIN,         MVT::f128, LibCall);
844     setOperationAction(ISD::STRICT_FSIN,  MVT::f128, LibCall);
845     setOperationAction(ISD::FCOS,         MVT::f128, LibCall);
846     setOperationAction(ISD::STRICT_FCOS,  MVT::f128, LibCall);
847     setOperationAction(ISD::FSINCOS,      MVT::f128, LibCall);
848     // No STRICT_FSINCOS
849     setOperationAction(ISD::FSQRT,        MVT::f128, LibCall);
850     setOperationAction(ISD::STRICT_FSQRT, MVT::f128, LibCall);
851 
852     setOperationAction(ISD::FP_EXTEND,        MVT::f128, Custom);
853     setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f128, Custom);
854     // We need to custom handle any FP_ROUND with an f128 input, but
855     // LegalizeDAG uses the result type to know when to run a custom handler.
856     // So we have to list all legal floating point result types here.
857     if (isTypeLegal(MVT::f32)) {
858       setOperationAction(ISD::FP_ROUND, MVT::f32, Custom);
859       setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Custom);
860     }
861     if (isTypeLegal(MVT::f64)) {
862       setOperationAction(ISD::FP_ROUND, MVT::f64, Custom);
863       setOperationAction(ISD::STRICT_FP_ROUND, MVT::f64, Custom);
864     }
865     if (isTypeLegal(MVT::f80)) {
866       setOperationAction(ISD::FP_ROUND, MVT::f80, Custom);
867       setOperationAction(ISD::STRICT_FP_ROUND, MVT::f80, Custom);
868     }
869 
870     setOperationAction(ISD::SETCC, MVT::f128, Custom);
871 
872     setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f32, Expand);
873     setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f64, Expand);
874     setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f80, Expand);
875     setTruncStoreAction(MVT::f128, MVT::f32, Expand);
876     setTruncStoreAction(MVT::f128, MVT::f64, Expand);
877     setTruncStoreAction(MVT::f128, MVT::f80, Expand);
878   }
879 
880   // Always use a library call for pow.
881   setOperationAction(ISD::FPOW             , MVT::f32  , Expand);
882   setOperationAction(ISD::FPOW             , MVT::f64  , Expand);
883   setOperationAction(ISD::FPOW             , MVT::f80  , Expand);
884   setOperationAction(ISD::FPOW             , MVT::f128 , Expand);
885 
886   setOperationAction(ISD::FLOG, MVT::f80, Expand);
887   setOperationAction(ISD::FLOG2, MVT::f80, Expand);
888   setOperationAction(ISD::FLOG10, MVT::f80, Expand);
889   setOperationAction(ISD::FEXP, MVT::f80, Expand);
890   setOperationAction(ISD::FEXP2, MVT::f80, Expand);
891   setOperationAction(ISD::FMINNUM, MVT::f80, Expand);
892   setOperationAction(ISD::FMAXNUM, MVT::f80, Expand);
893 
894   // Some FP actions are always expanded for vector types.
895   for (auto VT : { MVT::v8f16, MVT::v16f16, MVT::v32f16,
896                    MVT::v4f32, MVT::v8f32,  MVT::v16f32,
897                    MVT::v2f64, MVT::v4f64,  MVT::v8f64 }) {
898     setOperationAction(ISD::FSIN,      VT, Expand);
899     setOperationAction(ISD::FSINCOS,   VT, Expand);
900     setOperationAction(ISD::FCOS,      VT, Expand);
901     setOperationAction(ISD::FREM,      VT, Expand);
902     setOperationAction(ISD::FCOPYSIGN, VT, Expand);
903     setOperationAction(ISD::FPOW,      VT, Expand);
904     setOperationAction(ISD::FLOG,      VT, Expand);
905     setOperationAction(ISD::FLOG2,     VT, Expand);
906     setOperationAction(ISD::FLOG10,    VT, Expand);
907     setOperationAction(ISD::FEXP,      VT, Expand);
908     setOperationAction(ISD::FEXP2,     VT, Expand);
909   }
910 
911   // First set operation action for all vector types to either promote
912   // (for widening) or expand (for scalarization). Then we will selectively
913   // turn on ones that can be effectively codegen'd.
914   for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
915     setOperationAction(ISD::SDIV, VT, Expand);
916     setOperationAction(ISD::UDIV, VT, Expand);
917     setOperationAction(ISD::SREM, VT, Expand);
918     setOperationAction(ISD::UREM, VT, Expand);
919     setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT,Expand);
920     setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
921     setOperationAction(ISD::EXTRACT_SUBVECTOR, VT,Expand);
922     setOperationAction(ISD::INSERT_SUBVECTOR, VT,Expand);
923     setOperationAction(ISD::FMA,  VT, Expand);
924     setOperationAction(ISD::FFLOOR, VT, Expand);
925     setOperationAction(ISD::FCEIL, VT, Expand);
926     setOperationAction(ISD::FTRUNC, VT, Expand);
927     setOperationAction(ISD::FRINT, VT, Expand);
928     setOperationAction(ISD::FNEARBYINT, VT, Expand);
929     setOperationAction(ISD::SMUL_LOHI, VT, Expand);
930     setOperationAction(ISD::MULHS, VT, Expand);
931     setOperationAction(ISD::UMUL_LOHI, VT, Expand);
932     setOperationAction(ISD::MULHU, VT, Expand);
933     setOperationAction(ISD::SDIVREM, VT, Expand);
934     setOperationAction(ISD::UDIVREM, VT, Expand);
935     setOperationAction(ISD::CTPOP, VT, Expand);
936     setOperationAction(ISD::CTTZ, VT, Expand);
937     setOperationAction(ISD::CTLZ, VT, Expand);
938     setOperationAction(ISD::ROTL, VT, Expand);
939     setOperationAction(ISD::ROTR, VT, Expand);
940     setOperationAction(ISD::BSWAP, VT, Expand);
941     setOperationAction(ISD::SETCC, VT, Expand);
942     setOperationAction(ISD::FP_TO_UINT, VT, Expand);
943     setOperationAction(ISD::FP_TO_SINT, VT, Expand);
944     setOperationAction(ISD::UINT_TO_FP, VT, Expand);
945     setOperationAction(ISD::SINT_TO_FP, VT, Expand);
946     setOperationAction(ISD::SIGN_EXTEND_INREG, VT,Expand);
947     setOperationAction(ISD::TRUNCATE, VT, Expand);
948     setOperationAction(ISD::SIGN_EXTEND, VT, Expand);
949     setOperationAction(ISD::ZERO_EXTEND, VT, Expand);
950     setOperationAction(ISD::ANY_EXTEND, VT, Expand);
951     setOperationAction(ISD::SELECT_CC, VT, Expand);
952     for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) {
953       setTruncStoreAction(InnerVT, VT, Expand);
954 
955       setLoadExtAction(ISD::SEXTLOAD, InnerVT, VT, Expand);
956       setLoadExtAction(ISD::ZEXTLOAD, InnerVT, VT, Expand);
957 
958       // N.b. ISD::EXTLOAD legality is basically ignored except for i1-like
959       // types, we have to deal with them whether we ask for Expansion or not.
960       // Setting Expand causes its own optimisation problems though, so leave
961       // them legal.
962       if (VT.getVectorElementType() == MVT::i1)
963         setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
964 
965       // EXTLOAD for MVT::f16 vectors is not legal because f16 vectors are
966       // split/scalarized right now.
967       if (VT.getVectorElementType() == MVT::f16 ||
968           VT.getVectorElementType() == MVT::bf16)
969         setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
970     }
971   }
972 
973   // FIXME: In order to prevent SSE instructions being expanded to MMX ones
974   // with -msoft-float, disable use of MMX as well.
975   if (!Subtarget.useSoftFloat() && Subtarget.hasMMX()) {
976     addRegisterClass(MVT::x86mmx, &X86::VR64RegClass);
977     // No operations on x86mmx supported, everything uses intrinsics.
978   }
979 
980   if (!Subtarget.useSoftFloat() && Subtarget.hasSSE1()) {
981     addRegisterClass(MVT::v4f32, Subtarget.hasVLX() ? &X86::VR128XRegClass
982                                                     : &X86::VR128RegClass);
983 
984     setOperationAction(ISD::FNEG,               MVT::v4f32, Custom);
985     setOperationAction(ISD::FABS,               MVT::v4f32, Custom);
986     setOperationAction(ISD::FCOPYSIGN,          MVT::v4f32, Custom);
987     setOperationAction(ISD::BUILD_VECTOR,       MVT::v4f32, Custom);
988     setOperationAction(ISD::VECTOR_SHUFFLE,     MVT::v4f32, Custom);
989     setOperationAction(ISD::VSELECT,            MVT::v4f32, Custom);
990     setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
991     setOperationAction(ISD::SELECT,             MVT::v4f32, Custom);
992 
993     setOperationAction(ISD::LOAD,               MVT::v2f32, Custom);
994     setOperationAction(ISD::STORE,              MVT::v2f32, Custom);
995 
996     setOperationAction(ISD::STRICT_FADD,        MVT::v4f32, Legal);
997     setOperationAction(ISD::STRICT_FSUB,        MVT::v4f32, Legal);
998     setOperationAction(ISD::STRICT_FMUL,        MVT::v4f32, Legal);
999     setOperationAction(ISD::STRICT_FDIV,        MVT::v4f32, Legal);
1000     setOperationAction(ISD::STRICT_FSQRT,       MVT::v4f32, Legal);
1001   }
1002 
1003   if (!Subtarget.useSoftFloat() && Subtarget.hasSSE2()) {
1004     addRegisterClass(MVT::v2f64, Subtarget.hasVLX() ? &X86::VR128XRegClass
1005                                                     : &X86::VR128RegClass);
1006 
1007     // FIXME: Unfortunately, -soft-float and -no-implicit-float mean XMM
1008     // registers cannot be used even for integer operations.
1009     addRegisterClass(MVT::v16i8, Subtarget.hasVLX() ? &X86::VR128XRegClass
1010                                                     : &X86::VR128RegClass);
1011     addRegisterClass(MVT::v8i16, Subtarget.hasVLX() ? &X86::VR128XRegClass
1012                                                     : &X86::VR128RegClass);
1013     addRegisterClass(MVT::v8f16, Subtarget.hasVLX() ? &X86::VR128XRegClass
1014                                                     : &X86::VR128RegClass);
1015     addRegisterClass(MVT::v4i32, Subtarget.hasVLX() ? &X86::VR128XRegClass
1016                                                     : &X86::VR128RegClass);
1017     addRegisterClass(MVT::v2i64, Subtarget.hasVLX() ? &X86::VR128XRegClass
1018                                                     : &X86::VR128RegClass);
1019 
1020     for (auto VT : { MVT::v2i8, MVT::v4i8, MVT::v8i8,
1021                      MVT::v2i16, MVT::v4i16, MVT::v2i32 }) {
1022       setOperationAction(ISD::SDIV, VT, Custom);
1023       setOperationAction(ISD::SREM, VT, Custom);
1024       setOperationAction(ISD::UDIV, VT, Custom);
1025       setOperationAction(ISD::UREM, VT, Custom);
1026     }
1027 
1028     setOperationAction(ISD::MUL,                MVT::v2i8,  Custom);
1029     setOperationAction(ISD::MUL,                MVT::v4i8,  Custom);
1030     setOperationAction(ISD::MUL,                MVT::v8i8,  Custom);
1031 
1032     setOperationAction(ISD::MUL,                MVT::v16i8, Custom);
1033     setOperationAction(ISD::MUL,                MVT::v4i32, Custom);
1034     setOperationAction(ISD::MUL,                MVT::v2i64, Custom);
1035     setOperationAction(ISD::MULHU,              MVT::v4i32, Custom);
1036     setOperationAction(ISD::MULHS,              MVT::v4i32, Custom);
1037     setOperationAction(ISD::MULHU,              MVT::v16i8, Custom);
1038     setOperationAction(ISD::MULHS,              MVT::v16i8, Custom);
1039     setOperationAction(ISD::MULHU,              MVT::v8i16, Legal);
1040     setOperationAction(ISD::MULHS,              MVT::v8i16, Legal);
1041     setOperationAction(ISD::MUL,                MVT::v8i16, Legal);
1042     setOperationAction(ISD::AVGCEILU,           MVT::v16i8, Legal);
1043     setOperationAction(ISD::AVGCEILU,           MVT::v8i16, Legal);
1044 
1045     setOperationAction(ISD::SMULO,              MVT::v16i8, Custom);
1046     setOperationAction(ISD::UMULO,              MVT::v16i8, Custom);
1047     setOperationAction(ISD::UMULO,              MVT::v2i32, Custom);
1048 
1049     setOperationAction(ISD::FNEG,               MVT::v2f64, Custom);
1050     setOperationAction(ISD::FABS,               MVT::v2f64, Custom);
1051     setOperationAction(ISD::FCOPYSIGN,          MVT::v2f64, Custom);
1052 
1053     for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1054       setOperationAction(ISD::SMAX, VT, VT == MVT::v8i16 ? Legal : Custom);
1055       setOperationAction(ISD::SMIN, VT, VT == MVT::v8i16 ? Legal : Custom);
1056       setOperationAction(ISD::UMAX, VT, VT == MVT::v16i8 ? Legal : Custom);
1057       setOperationAction(ISD::UMIN, VT, VT == MVT::v16i8 ? Legal : Custom);
1058     }
1059 
1060     setOperationAction(ISD::UADDSAT,            MVT::v16i8, Legal);
1061     setOperationAction(ISD::SADDSAT,            MVT::v16i8, Legal);
1062     setOperationAction(ISD::USUBSAT,            MVT::v16i8, Legal);
1063     setOperationAction(ISD::SSUBSAT,            MVT::v16i8, Legal);
1064     setOperationAction(ISD::UADDSAT,            MVT::v8i16, Legal);
1065     setOperationAction(ISD::SADDSAT,            MVT::v8i16, Legal);
1066     setOperationAction(ISD::USUBSAT,            MVT::v8i16, Legal);
1067     setOperationAction(ISD::SSUBSAT,            MVT::v8i16, Legal);
1068     setOperationAction(ISD::USUBSAT,            MVT::v4i32, Custom);
1069     setOperationAction(ISD::USUBSAT,            MVT::v2i64, Custom);
1070 
1071     setOperationAction(ISD::INSERT_VECTOR_ELT,  MVT::v16i8, Custom);
1072     setOperationAction(ISD::INSERT_VECTOR_ELT,  MVT::v8i16, Custom);
1073     setOperationAction(ISD::INSERT_VECTOR_ELT,  MVT::v4i32, Custom);
1074     setOperationAction(ISD::INSERT_VECTOR_ELT,  MVT::v4f32, Custom);
1075 
1076     for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1077       setOperationAction(ISD::SETCC,              VT, Custom);
1078       setOperationAction(ISD::STRICT_FSETCC,      VT, Custom);
1079       setOperationAction(ISD::STRICT_FSETCCS,     VT, Custom);
1080       setOperationAction(ISD::CTPOP,              VT, Custom);
1081       setOperationAction(ISD::ABS,                VT, Custom);
1082 
1083       // The condition codes aren't legal in SSE/AVX and under AVX512 we use
1084       // setcc all the way to isel and prefer SETGT in some isel patterns.
1085       setCondCodeAction(ISD::SETLT, VT, Custom);
1086       setCondCodeAction(ISD::SETLE, VT, Custom);
1087     }
1088 
1089     for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) {
1090       setOperationAction(ISD::SCALAR_TO_VECTOR,   VT, Custom);
1091       setOperationAction(ISD::BUILD_VECTOR,       VT, Custom);
1092       setOperationAction(ISD::VECTOR_SHUFFLE,     VT, Custom);
1093       setOperationAction(ISD::VSELECT,            VT, Custom);
1094       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1095     }
1096 
1097     for (auto VT : { MVT::v8f16, MVT::v2f64, MVT::v2i64 }) {
1098       setOperationAction(ISD::BUILD_VECTOR,       VT, Custom);
1099       setOperationAction(ISD::VECTOR_SHUFFLE,     VT, Custom);
1100       setOperationAction(ISD::VSELECT,            VT, Custom);
1101 
1102       if (VT == MVT::v2i64 && !Subtarget.is64Bit())
1103         continue;
1104 
1105       setOperationAction(ISD::INSERT_VECTOR_ELT,  VT, Custom);
1106       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1107     }
1108     setF16Action(MVT::v8f16, Expand);
1109     setOperationAction(ISD::FADD, MVT::v8f16, Expand);
1110     setOperationAction(ISD::FSUB, MVT::v8f16, Expand);
1111     setOperationAction(ISD::FMUL, MVT::v8f16, Expand);
1112     setOperationAction(ISD::FDIV, MVT::v8f16, Expand);
1113 
1114     // Custom lower v2i64 and v2f64 selects.
1115     setOperationAction(ISD::SELECT,             MVT::v2f64, Custom);
1116     setOperationAction(ISD::SELECT,             MVT::v2i64, Custom);
1117     setOperationAction(ISD::SELECT,             MVT::v4i32, Custom);
1118     setOperationAction(ISD::SELECT,             MVT::v8i16, Custom);
1119     setOperationAction(ISD::SELECT,             MVT::v8f16, Custom);
1120     setOperationAction(ISD::SELECT,             MVT::v16i8, Custom);
1121 
1122     setOperationAction(ISD::FP_TO_SINT,         MVT::v4i32, Custom);
1123     setOperationAction(ISD::FP_TO_UINT,         MVT::v4i32, Custom);
1124     setOperationAction(ISD::FP_TO_SINT,         MVT::v2i32, Custom);
1125     setOperationAction(ISD::FP_TO_UINT,         MVT::v2i32, Custom);
1126     setOperationAction(ISD::STRICT_FP_TO_SINT,  MVT::v4i32, Custom);
1127     setOperationAction(ISD::STRICT_FP_TO_SINT,  MVT::v2i32, Custom);
1128 
1129     // Custom legalize these to avoid over promotion or custom promotion.
1130     for (auto VT : {MVT::v2i8, MVT::v4i8, MVT::v8i8, MVT::v2i16, MVT::v4i16}) {
1131       setOperationAction(ISD::FP_TO_SINT,        VT, Custom);
1132       setOperationAction(ISD::FP_TO_UINT,        VT, Custom);
1133       setOperationAction(ISD::STRICT_FP_TO_SINT, VT, Custom);
1134       setOperationAction(ISD::STRICT_FP_TO_UINT, VT, Custom);
1135     }
1136 
1137     setOperationAction(ISD::SINT_TO_FP,         MVT::v4i32, Custom);
1138     setOperationAction(ISD::STRICT_SINT_TO_FP,  MVT::v4i32, Custom);
1139     setOperationAction(ISD::SINT_TO_FP,         MVT::v2i32, Custom);
1140     setOperationAction(ISD::STRICT_SINT_TO_FP,  MVT::v2i32, Custom);
1141 
1142     setOperationAction(ISD::UINT_TO_FP,         MVT::v2i32, Custom);
1143     setOperationAction(ISD::STRICT_UINT_TO_FP,  MVT::v2i32, Custom);
1144 
1145     setOperationAction(ISD::UINT_TO_FP,         MVT::v4i32, Custom);
1146     setOperationAction(ISD::STRICT_UINT_TO_FP,  MVT::v4i32, Custom);
1147 
1148     // Fast v2f32 UINT_TO_FP( v2i32 ) custom conversion.
1149     setOperationAction(ISD::SINT_TO_FP,         MVT::v2f32, Custom);
1150     setOperationAction(ISD::STRICT_SINT_TO_FP,  MVT::v2f32, Custom);
1151     setOperationAction(ISD::UINT_TO_FP,         MVT::v2f32, Custom);
1152     setOperationAction(ISD::STRICT_UINT_TO_FP,  MVT::v2f32, Custom);
1153 
1154     setOperationAction(ISD::FP_EXTEND,          MVT::v2f32, Custom);
1155     setOperationAction(ISD::STRICT_FP_EXTEND,   MVT::v2f32, Custom);
1156     setOperationAction(ISD::FP_ROUND,           MVT::v2f32, Custom);
1157     setOperationAction(ISD::STRICT_FP_ROUND,    MVT::v2f32, Custom);
1158 
1159     // We want to legalize this to an f64 load rather than an i64 load on
1160     // 64-bit targets and two 32-bit loads on a 32-bit target. Similar for
1161     // store.
1162     setOperationAction(ISD::LOAD,               MVT::v2i32, Custom);
1163     setOperationAction(ISD::LOAD,               MVT::v4i16, Custom);
1164     setOperationAction(ISD::LOAD,               MVT::v8i8,  Custom);
1165     setOperationAction(ISD::STORE,              MVT::v2i32, Custom);
1166     setOperationAction(ISD::STORE,              MVT::v4i16, Custom);
1167     setOperationAction(ISD::STORE,              MVT::v8i8,  Custom);
1168 
1169     // Add 32-bit vector stores to help vectorization opportunities.
1170     setOperationAction(ISD::STORE,              MVT::v2i16, Custom);
1171     setOperationAction(ISD::STORE,              MVT::v4i8,  Custom);
1172 
1173     setOperationAction(ISD::BITCAST,            MVT::v2i32, Custom);
1174     setOperationAction(ISD::BITCAST,            MVT::v4i16, Custom);
1175     setOperationAction(ISD::BITCAST,            MVT::v8i8,  Custom);
1176     if (!Subtarget.hasAVX512())
1177       setOperationAction(ISD::BITCAST, MVT::v16i1, Custom);
1178 
1179     setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v2i64, Custom);
1180     setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v4i32, Custom);
1181     setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v8i16, Custom);
1182 
1183     setOperationAction(ISD::SIGN_EXTEND, MVT::v4i64, Custom);
1184 
1185     setOperationAction(ISD::TRUNCATE,    MVT::v2i8,  Custom);
1186     setOperationAction(ISD::TRUNCATE,    MVT::v2i16, Custom);
1187     setOperationAction(ISD::TRUNCATE,    MVT::v2i32, Custom);
1188     setOperationAction(ISD::TRUNCATE,    MVT::v4i8,  Custom);
1189     setOperationAction(ISD::TRUNCATE,    MVT::v4i16, Custom);
1190     setOperationAction(ISD::TRUNCATE,    MVT::v8i8,  Custom);
1191 
1192     // In the customized shift lowering, the legal v4i32/v2i64 cases
1193     // in AVX2 will be recognized.
1194     for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1195       setOperationAction(ISD::SRL,              VT, Custom);
1196       setOperationAction(ISD::SHL,              VT, Custom);
1197       setOperationAction(ISD::SRA,              VT, Custom);
1198       if (VT == MVT::v2i64) continue;
1199       setOperationAction(ISD::ROTL,             VT, Custom);
1200       setOperationAction(ISD::ROTR,             VT, Custom);
1201       setOperationAction(ISD::FSHL,             VT, Custom);
1202       setOperationAction(ISD::FSHR,             VT, Custom);
1203     }
1204 
1205     setOperationAction(ISD::STRICT_FSQRT,       MVT::v2f64, Legal);
1206     setOperationAction(ISD::STRICT_FADD,        MVT::v2f64, Legal);
1207     setOperationAction(ISD::STRICT_FSUB,        MVT::v2f64, Legal);
1208     setOperationAction(ISD::STRICT_FMUL,        MVT::v2f64, Legal);
1209     setOperationAction(ISD::STRICT_FDIV,        MVT::v2f64, Legal);
1210   }
1211 
1212   if (!Subtarget.useSoftFloat() && Subtarget.hasSSSE3()) {
1213     setOperationAction(ISD::ABS,                MVT::v16i8, Legal);
1214     setOperationAction(ISD::ABS,                MVT::v8i16, Legal);
1215     setOperationAction(ISD::ABS,                MVT::v4i32, Legal);
1216     setOperationAction(ISD::BITREVERSE,         MVT::v16i8, Custom);
1217     setOperationAction(ISD::CTLZ,               MVT::v16i8, Custom);
1218     setOperationAction(ISD::CTLZ,               MVT::v8i16, Custom);
1219     setOperationAction(ISD::CTLZ,               MVT::v4i32, Custom);
1220     setOperationAction(ISD::CTLZ,               MVT::v2i64, Custom);
1221 
1222     // These might be better off as horizontal vector ops.
1223     setOperationAction(ISD::ADD,                MVT::i16, Custom);
1224     setOperationAction(ISD::ADD,                MVT::i32, Custom);
1225     setOperationAction(ISD::SUB,                MVT::i16, Custom);
1226     setOperationAction(ISD::SUB,                MVT::i32, Custom);
1227   }
1228 
1229   if (!Subtarget.useSoftFloat() && Subtarget.hasSSE41()) {
1230     for (MVT RoundedTy : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64}) {
1231       setOperationAction(ISD::FFLOOR,            RoundedTy,  Legal);
1232       setOperationAction(ISD::STRICT_FFLOOR,     RoundedTy,  Legal);
1233       setOperationAction(ISD::FCEIL,             RoundedTy,  Legal);
1234       setOperationAction(ISD::STRICT_FCEIL,      RoundedTy,  Legal);
1235       setOperationAction(ISD::FTRUNC,            RoundedTy,  Legal);
1236       setOperationAction(ISD::STRICT_FTRUNC,     RoundedTy,  Legal);
1237       setOperationAction(ISD::FRINT,             RoundedTy,  Legal);
1238       setOperationAction(ISD::STRICT_FRINT,      RoundedTy,  Legal);
1239       setOperationAction(ISD::FNEARBYINT,        RoundedTy,  Legal);
1240       setOperationAction(ISD::STRICT_FNEARBYINT, RoundedTy,  Legal);
1241       setOperationAction(ISD::FROUNDEVEN,        RoundedTy,  Legal);
1242       setOperationAction(ISD::STRICT_FROUNDEVEN, RoundedTy,  Legal);
1243 
1244       setOperationAction(ISD::FROUND,            RoundedTy,  Custom);
1245     }
1246 
1247     setOperationAction(ISD::SMAX,               MVT::v16i8, Legal);
1248     setOperationAction(ISD::SMAX,               MVT::v4i32, Legal);
1249     setOperationAction(ISD::UMAX,               MVT::v8i16, Legal);
1250     setOperationAction(ISD::UMAX,               MVT::v4i32, Legal);
1251     setOperationAction(ISD::SMIN,               MVT::v16i8, Legal);
1252     setOperationAction(ISD::SMIN,               MVT::v4i32, Legal);
1253     setOperationAction(ISD::UMIN,               MVT::v8i16, Legal);
1254     setOperationAction(ISD::UMIN,               MVT::v4i32, Legal);
1255 
1256     setOperationAction(ISD::UADDSAT,            MVT::v4i32, Custom);
1257     setOperationAction(ISD::SADDSAT,            MVT::v2i64, Custom);
1258     setOperationAction(ISD::SSUBSAT,            MVT::v2i64, Custom);
1259 
1260     // FIXME: Do we need to handle scalar-to-vector here?
1261     setOperationAction(ISD::MUL,                MVT::v4i32, Legal);
1262     setOperationAction(ISD::SMULO,              MVT::v2i32, Custom);
1263 
1264     // We directly match byte blends in the backend as they match the VSELECT
1265     // condition form.
1266     setOperationAction(ISD::VSELECT,            MVT::v16i8, Legal);
1267 
1268     // SSE41 brings specific instructions for doing vector sign extend even in
1269     // cases where we don't have SRA.
1270     for (auto VT : { MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1271       setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Legal);
1272       setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Legal);
1273     }
1274 
1275     // SSE41 also has vector sign/zero extending loads, PMOV[SZ]X
1276     for (auto LoadExtOp : { ISD::SEXTLOAD, ISD::ZEXTLOAD }) {
1277       setLoadExtAction(LoadExtOp, MVT::v8i16, MVT::v8i8,  Legal);
1278       setLoadExtAction(LoadExtOp, MVT::v4i32, MVT::v4i8,  Legal);
1279       setLoadExtAction(LoadExtOp, MVT::v2i64, MVT::v2i8,  Legal);
1280       setLoadExtAction(LoadExtOp, MVT::v4i32, MVT::v4i16, Legal);
1281       setLoadExtAction(LoadExtOp, MVT::v2i64, MVT::v2i16, Legal);
1282       setLoadExtAction(LoadExtOp, MVT::v2i64, MVT::v2i32, Legal);
1283     }
1284 
1285     if (Subtarget.is64Bit() && !Subtarget.hasAVX512()) {
1286       // We need to scalarize v4i64->v432 uint_to_fp using cvtsi2ss, but we can
1287       // do the pre and post work in the vector domain.
1288       setOperationAction(ISD::UINT_TO_FP,        MVT::v4i64, Custom);
1289       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i64, Custom);
1290       // We need to mark SINT_TO_FP as Custom even though we want to expand it
1291       // so that DAG combine doesn't try to turn it into uint_to_fp.
1292       setOperationAction(ISD::SINT_TO_FP,        MVT::v4i64, Custom);
1293       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i64, Custom);
1294     }
1295   }
1296 
1297   if (!Subtarget.useSoftFloat() && Subtarget.hasSSE42()) {
1298     setOperationAction(ISD::UADDSAT,            MVT::v2i64, Custom);
1299   }
1300 
1301   if (!Subtarget.useSoftFloat() && Subtarget.hasXOP()) {
1302     for (auto VT : { MVT::v16i8, MVT::v8i16,  MVT::v4i32, MVT::v2i64,
1303                      MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1304       setOperationAction(ISD::ROTL, VT, Custom);
1305       setOperationAction(ISD::ROTR, VT, Custom);
1306     }
1307 
1308     // XOP can efficiently perform BITREVERSE with VPPERM.
1309     for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 })
1310       setOperationAction(ISD::BITREVERSE, VT, Custom);
1311 
1312     for (auto VT : { MVT::v16i8, MVT::v8i16,  MVT::v4i32, MVT::v2i64,
1313                      MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 })
1314       setOperationAction(ISD::BITREVERSE, VT, Custom);
1315   }
1316 
1317   if (!Subtarget.useSoftFloat() && Subtarget.hasAVX()) {
1318     bool HasInt256 = Subtarget.hasInt256();
1319 
1320     addRegisterClass(MVT::v32i8,  Subtarget.hasVLX() ? &X86::VR256XRegClass
1321                                                      : &X86::VR256RegClass);
1322     addRegisterClass(MVT::v16i16, Subtarget.hasVLX() ? &X86::VR256XRegClass
1323                                                      : &X86::VR256RegClass);
1324     addRegisterClass(MVT::v16f16, Subtarget.hasVLX() ? &X86::VR256XRegClass
1325                                                      : &X86::VR256RegClass);
1326     addRegisterClass(MVT::v8i32,  Subtarget.hasVLX() ? &X86::VR256XRegClass
1327                                                      : &X86::VR256RegClass);
1328     addRegisterClass(MVT::v8f32,  Subtarget.hasVLX() ? &X86::VR256XRegClass
1329                                                      : &X86::VR256RegClass);
1330     addRegisterClass(MVT::v4i64,  Subtarget.hasVLX() ? &X86::VR256XRegClass
1331                                                      : &X86::VR256RegClass);
1332     addRegisterClass(MVT::v4f64,  Subtarget.hasVLX() ? &X86::VR256XRegClass
1333                                                      : &X86::VR256RegClass);
1334 
1335     for (auto VT : { MVT::v8f32, MVT::v4f64 }) {
1336       setOperationAction(ISD::FFLOOR,            VT, Legal);
1337       setOperationAction(ISD::STRICT_FFLOOR,     VT, Legal);
1338       setOperationAction(ISD::FCEIL,             VT, Legal);
1339       setOperationAction(ISD::STRICT_FCEIL,      VT, Legal);
1340       setOperationAction(ISD::FTRUNC,            VT, Legal);
1341       setOperationAction(ISD::STRICT_FTRUNC,     VT, Legal);
1342       setOperationAction(ISD::FRINT,             VT, Legal);
1343       setOperationAction(ISD::STRICT_FRINT,      VT, Legal);
1344       setOperationAction(ISD::FNEARBYINT,        VT, Legal);
1345       setOperationAction(ISD::STRICT_FNEARBYINT, VT, Legal);
1346       setOperationAction(ISD::FROUNDEVEN,        VT, Legal);
1347       setOperationAction(ISD::STRICT_FROUNDEVEN, VT, Legal);
1348 
1349       setOperationAction(ISD::FROUND,            VT, Custom);
1350 
1351       setOperationAction(ISD::FNEG,              VT, Custom);
1352       setOperationAction(ISD::FABS,              VT, Custom);
1353       setOperationAction(ISD::FCOPYSIGN,         VT, Custom);
1354     }
1355 
1356     // (fp_to_int:v8i16 (v8f32 ..)) requires the result type to be promoted
1357     // even though v8i16 is a legal type.
1358     setOperationPromotedToType(ISD::FP_TO_SINT,        MVT::v8i16, MVT::v8i32);
1359     setOperationPromotedToType(ISD::FP_TO_UINT,        MVT::v8i16, MVT::v8i32);
1360     setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::v8i16, MVT::v8i32);
1361     setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::v8i16, MVT::v8i32);
1362     setOperationAction(ISD::FP_TO_SINT,                MVT::v8i32, Custom);
1363     setOperationAction(ISD::FP_TO_UINT,                MVT::v8i32, Custom);
1364     setOperationAction(ISD::STRICT_FP_TO_SINT,         MVT::v8i32, Custom);
1365 
1366     setOperationAction(ISD::SINT_TO_FP,         MVT::v8i32, Custom);
1367     setOperationAction(ISD::STRICT_SINT_TO_FP,  MVT::v8i32, Custom);
1368     setOperationAction(ISD::FP_EXTEND,          MVT::v8f32, Expand);
1369     setOperationAction(ISD::FP_ROUND,           MVT::v8f16, Expand);
1370     setOperationAction(ISD::FP_EXTEND,          MVT::v4f64, Custom);
1371     setOperationAction(ISD::STRICT_FP_EXTEND,   MVT::v4f64, Custom);
1372 
1373     setOperationAction(ISD::STRICT_FP_ROUND,    MVT::v4f32, Legal);
1374     setOperationAction(ISD::STRICT_FADD,        MVT::v8f32, Legal);
1375     setOperationAction(ISD::STRICT_FADD,        MVT::v4f64, Legal);
1376     setOperationAction(ISD::STRICT_FSUB,        MVT::v8f32, Legal);
1377     setOperationAction(ISD::STRICT_FSUB,        MVT::v4f64, Legal);
1378     setOperationAction(ISD::STRICT_FMUL,        MVT::v8f32, Legal);
1379     setOperationAction(ISD::STRICT_FMUL,        MVT::v4f64, Legal);
1380     setOperationAction(ISD::STRICT_FDIV,        MVT::v8f32, Legal);
1381     setOperationAction(ISD::STRICT_FDIV,        MVT::v4f64, Legal);
1382     setOperationAction(ISD::STRICT_FSQRT,       MVT::v8f32, Legal);
1383     setOperationAction(ISD::STRICT_FSQRT,       MVT::v4f64, Legal);
1384 
1385     if (!Subtarget.hasAVX512())
1386       setOperationAction(ISD::BITCAST, MVT::v32i1, Custom);
1387 
1388     // In the customized shift lowering, the legal v8i32/v4i64 cases
1389     // in AVX2 will be recognized.
1390     for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1391       setOperationAction(ISD::SRL, VT, Custom);
1392       setOperationAction(ISD::SHL, VT, Custom);
1393       setOperationAction(ISD::SRA, VT, Custom);
1394       if (VT == MVT::v4i64) continue;
1395       setOperationAction(ISD::ROTL, VT, Custom);
1396       setOperationAction(ISD::ROTR, VT, Custom);
1397       setOperationAction(ISD::FSHL, VT, Custom);
1398       setOperationAction(ISD::FSHR, VT, Custom);
1399     }
1400 
1401     // These types need custom splitting if their input is a 128-bit vector.
1402     setOperationAction(ISD::SIGN_EXTEND,       MVT::v8i64,  Custom);
1403     setOperationAction(ISD::SIGN_EXTEND,       MVT::v16i32, Custom);
1404     setOperationAction(ISD::ZERO_EXTEND,       MVT::v8i64,  Custom);
1405     setOperationAction(ISD::ZERO_EXTEND,       MVT::v16i32, Custom);
1406 
1407     setOperationAction(ISD::SELECT,            MVT::v4f64, Custom);
1408     setOperationAction(ISD::SELECT,            MVT::v4i64, Custom);
1409     setOperationAction(ISD::SELECT,            MVT::v8i32, Custom);
1410     setOperationAction(ISD::SELECT,            MVT::v16i16, Custom);
1411     setOperationAction(ISD::SELECT,            MVT::v16f16, Custom);
1412     setOperationAction(ISD::SELECT,            MVT::v32i8, Custom);
1413     setOperationAction(ISD::SELECT,            MVT::v8f32, Custom);
1414 
1415     for (auto VT : { MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1416       setOperationAction(ISD::SIGN_EXTEND,     VT, Custom);
1417       setOperationAction(ISD::ZERO_EXTEND,     VT, Custom);
1418       setOperationAction(ISD::ANY_EXTEND,      VT, Custom);
1419     }
1420 
1421     setOperationAction(ISD::TRUNCATE,          MVT::v16i8, Custom);
1422     setOperationAction(ISD::TRUNCATE,          MVT::v8i16, Custom);
1423     setOperationAction(ISD::TRUNCATE,          MVT::v4i32, Custom);
1424     setOperationAction(ISD::BITREVERSE,        MVT::v32i8, Custom);
1425 
1426     for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1427       setOperationAction(ISD::SETCC,           VT, Custom);
1428       setOperationAction(ISD::STRICT_FSETCC,   VT, Custom);
1429       setOperationAction(ISD::STRICT_FSETCCS,  VT, Custom);
1430       setOperationAction(ISD::CTPOP,           VT, Custom);
1431       setOperationAction(ISD::CTLZ,            VT, Custom);
1432 
1433       // The condition codes aren't legal in SSE/AVX and under AVX512 we use
1434       // setcc all the way to isel and prefer SETGT in some isel patterns.
1435       setCondCodeAction(ISD::SETLT, VT, Custom);
1436       setCondCodeAction(ISD::SETLE, VT, Custom);
1437     }
1438 
1439     if (Subtarget.hasAnyFMA()) {
1440       for (auto VT : { MVT::f32, MVT::f64, MVT::v4f32, MVT::v8f32,
1441                        MVT::v2f64, MVT::v4f64 }) {
1442         setOperationAction(ISD::FMA, VT, Legal);
1443         setOperationAction(ISD::STRICT_FMA, VT, Legal);
1444       }
1445     }
1446 
1447     for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1448       setOperationAction(ISD::ADD, VT, HasInt256 ? Legal : Custom);
1449       setOperationAction(ISD::SUB, VT, HasInt256 ? Legal : Custom);
1450     }
1451 
1452     setOperationAction(ISD::MUL,       MVT::v4i64,  Custom);
1453     setOperationAction(ISD::MUL,       MVT::v8i32,  HasInt256 ? Legal : Custom);
1454     setOperationAction(ISD::MUL,       MVT::v16i16, HasInt256 ? Legal : Custom);
1455     setOperationAction(ISD::MUL,       MVT::v32i8,  Custom);
1456 
1457     setOperationAction(ISD::MULHU,     MVT::v8i32,  Custom);
1458     setOperationAction(ISD::MULHS,     MVT::v8i32,  Custom);
1459     setOperationAction(ISD::MULHU,     MVT::v16i16, HasInt256 ? Legal : Custom);
1460     setOperationAction(ISD::MULHS,     MVT::v16i16, HasInt256 ? Legal : Custom);
1461     setOperationAction(ISD::MULHU,     MVT::v32i8,  Custom);
1462     setOperationAction(ISD::MULHS,     MVT::v32i8,  Custom);
1463     setOperationAction(ISD::AVGCEILU,  MVT::v16i16, HasInt256 ? Legal : Custom);
1464     setOperationAction(ISD::AVGCEILU,  MVT::v32i8,  HasInt256 ? Legal : Custom);
1465 
1466     setOperationAction(ISD::SMULO,     MVT::v32i8, Custom);
1467     setOperationAction(ISD::UMULO,     MVT::v32i8, Custom);
1468 
1469     setOperationAction(ISD::ABS,       MVT::v4i64,  Custom);
1470     setOperationAction(ISD::SMAX,      MVT::v4i64,  Custom);
1471     setOperationAction(ISD::UMAX,      MVT::v4i64,  Custom);
1472     setOperationAction(ISD::SMIN,      MVT::v4i64,  Custom);
1473     setOperationAction(ISD::UMIN,      MVT::v4i64,  Custom);
1474 
1475     setOperationAction(ISD::UADDSAT,   MVT::v32i8,  HasInt256 ? Legal : Custom);
1476     setOperationAction(ISD::SADDSAT,   MVT::v32i8,  HasInt256 ? Legal : Custom);
1477     setOperationAction(ISD::USUBSAT,   MVT::v32i8,  HasInt256 ? Legal : Custom);
1478     setOperationAction(ISD::SSUBSAT,   MVT::v32i8,  HasInt256 ? Legal : Custom);
1479     setOperationAction(ISD::UADDSAT,   MVT::v16i16, HasInt256 ? Legal : Custom);
1480     setOperationAction(ISD::SADDSAT,   MVT::v16i16, HasInt256 ? Legal : Custom);
1481     setOperationAction(ISD::USUBSAT,   MVT::v16i16, HasInt256 ? Legal : Custom);
1482     setOperationAction(ISD::SSUBSAT,   MVT::v16i16, HasInt256 ? Legal : Custom);
1483     setOperationAction(ISD::UADDSAT,   MVT::v8i32, Custom);
1484     setOperationAction(ISD::USUBSAT,   MVT::v8i32, Custom);
1485     setOperationAction(ISD::UADDSAT,   MVT::v4i64, Custom);
1486     setOperationAction(ISD::USUBSAT,   MVT::v4i64, Custom);
1487 
1488     for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32 }) {
1489       setOperationAction(ISD::ABS,  VT, HasInt256 ? Legal : Custom);
1490       setOperationAction(ISD::SMAX, VT, HasInt256 ? Legal : Custom);
1491       setOperationAction(ISD::UMAX, VT, HasInt256 ? Legal : Custom);
1492       setOperationAction(ISD::SMIN, VT, HasInt256 ? Legal : Custom);
1493       setOperationAction(ISD::UMIN, VT, HasInt256 ? Legal : Custom);
1494     }
1495 
1496     for (auto VT : {MVT::v16i16, MVT::v8i32, MVT::v4i64}) {
1497       setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Custom);
1498       setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Custom);
1499     }
1500 
1501     if (HasInt256) {
1502       // The custom lowering for UINT_TO_FP for v8i32 becomes interesting
1503       // when we have a 256bit-wide blend with immediate.
1504       setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Custom);
1505       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v8i32, Custom);
1506 
1507       // AVX2 also has wider vector sign/zero extending loads, VPMOV[SZ]X
1508       for (auto LoadExtOp : { ISD::SEXTLOAD, ISD::ZEXTLOAD }) {
1509         setLoadExtAction(LoadExtOp, MVT::v16i16, MVT::v16i8, Legal);
1510         setLoadExtAction(LoadExtOp, MVT::v8i32,  MVT::v8i8,  Legal);
1511         setLoadExtAction(LoadExtOp, MVT::v4i64,  MVT::v4i8,  Legal);
1512         setLoadExtAction(LoadExtOp, MVT::v8i32,  MVT::v8i16, Legal);
1513         setLoadExtAction(LoadExtOp, MVT::v4i64,  MVT::v4i16, Legal);
1514         setLoadExtAction(LoadExtOp, MVT::v4i64,  MVT::v4i32, Legal);
1515       }
1516     }
1517 
1518     for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1519                      MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 }) {
1520       setOperationAction(ISD::MLOAD,  VT, Subtarget.hasVLX() ? Legal : Custom);
1521       setOperationAction(ISD::MSTORE, VT, Legal);
1522     }
1523 
1524     // Extract subvector is special because the value type
1525     // (result) is 128-bit but the source is 256-bit wide.
1526     for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64,
1527                      MVT::v8f16, MVT::v4f32, MVT::v2f64 }) {
1528       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
1529     }
1530 
1531     // Custom lower several nodes for 256-bit types.
1532     for (MVT VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64,
1533                     MVT::v16f16, MVT::v8f32, MVT::v4f64 }) {
1534       setOperationAction(ISD::BUILD_VECTOR,       VT, Custom);
1535       setOperationAction(ISD::VECTOR_SHUFFLE,     VT, Custom);
1536       setOperationAction(ISD::VSELECT,            VT, Custom);
1537       setOperationAction(ISD::INSERT_VECTOR_ELT,  VT, Custom);
1538       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1539       setOperationAction(ISD::SCALAR_TO_VECTOR,   VT, Custom);
1540       setOperationAction(ISD::INSERT_SUBVECTOR,   VT, Legal);
1541       setOperationAction(ISD::CONCAT_VECTORS,     VT, Custom);
1542       setOperationAction(ISD::STORE,              VT, Custom);
1543     }
1544     setF16Action(MVT::v16f16, Expand);
1545     setOperationAction(ISD::FADD, MVT::v16f16, Expand);
1546     setOperationAction(ISD::FSUB, MVT::v16f16, Expand);
1547     setOperationAction(ISD::FMUL, MVT::v16f16, Expand);
1548     setOperationAction(ISD::FDIV, MVT::v16f16, Expand);
1549 
1550     if (HasInt256) {
1551       setOperationAction(ISD::VSELECT, MVT::v32i8, Legal);
1552 
1553       // Custom legalize 2x32 to get a little better code.
1554       setOperationAction(ISD::MGATHER, MVT::v2f32, Custom);
1555       setOperationAction(ISD::MGATHER, MVT::v2i32, Custom);
1556 
1557       for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1558                        MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 })
1559         setOperationAction(ISD::MGATHER,  VT, Custom);
1560     }
1561   }
1562 
1563   if (!Subtarget.useSoftFloat() && !Subtarget.hasFP16() &&
1564       Subtarget.hasF16C()) {
1565     for (MVT VT : { MVT::f16, MVT::v2f16, MVT::v4f16, MVT::v8f16 }) {
1566       setOperationAction(ISD::FP_ROUND,           VT, Custom);
1567       setOperationAction(ISD::STRICT_FP_ROUND,    VT, Custom);
1568     }
1569     for (MVT VT : { MVT::f32, MVT::v2f32, MVT::v4f32 }) {
1570       setOperationAction(ISD::FP_EXTEND,          VT, Custom);
1571       setOperationAction(ISD::STRICT_FP_EXTEND,   VT, Custom);
1572     }
1573     for (unsigned Opc : {ISD::FADD, ISD::FSUB, ISD::FMUL, ISD::FDIV}) {
1574       setOperationPromotedToType(Opc, MVT::v8f16, MVT::v8f32);
1575       setOperationPromotedToType(Opc, MVT::v16f16, MVT::v16f32);
1576     }
1577 
1578     setOperationAction(ISD::FP_EXTEND, MVT::v8f32, Legal);
1579     setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v8f32, Legal);
1580   }
1581 
1582   // This block controls legalization of the mask vector sizes that are
1583   // available with AVX512. 512-bit vectors are in a separate block controlled
1584   // by useAVX512Regs.
1585   if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512()) {
1586     addRegisterClass(MVT::v1i1,   &X86::VK1RegClass);
1587     addRegisterClass(MVT::v2i1,   &X86::VK2RegClass);
1588     addRegisterClass(MVT::v4i1,   &X86::VK4RegClass);
1589     addRegisterClass(MVT::v8i1,   &X86::VK8RegClass);
1590     addRegisterClass(MVT::v16i1,  &X86::VK16RegClass);
1591 
1592     setOperationAction(ISD::SELECT,             MVT::v1i1, Custom);
1593     setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v1i1, Custom);
1594     setOperationAction(ISD::BUILD_VECTOR,       MVT::v1i1, Custom);
1595 
1596     setOperationPromotedToType(ISD::FP_TO_SINT,        MVT::v8i1,  MVT::v8i32);
1597     setOperationPromotedToType(ISD::FP_TO_UINT,        MVT::v8i1,  MVT::v8i32);
1598     setOperationPromotedToType(ISD::FP_TO_SINT,        MVT::v4i1,  MVT::v4i32);
1599     setOperationPromotedToType(ISD::FP_TO_UINT,        MVT::v4i1,  MVT::v4i32);
1600     setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::v8i1,  MVT::v8i32);
1601     setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::v8i1,  MVT::v8i32);
1602     setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::v4i1,  MVT::v4i32);
1603     setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::v4i1,  MVT::v4i32);
1604     setOperationAction(ISD::FP_TO_SINT,                MVT::v2i1,  Custom);
1605     setOperationAction(ISD::FP_TO_UINT,                MVT::v2i1,  Custom);
1606     setOperationAction(ISD::STRICT_FP_TO_SINT,         MVT::v2i1,  Custom);
1607     setOperationAction(ISD::STRICT_FP_TO_UINT,         MVT::v2i1,  Custom);
1608 
1609     // There is no byte sized k-register load or store without AVX512DQ.
1610     if (!Subtarget.hasDQI()) {
1611       setOperationAction(ISD::LOAD, MVT::v1i1, Custom);
1612       setOperationAction(ISD::LOAD, MVT::v2i1, Custom);
1613       setOperationAction(ISD::LOAD, MVT::v4i1, Custom);
1614       setOperationAction(ISD::LOAD, MVT::v8i1, Custom);
1615 
1616       setOperationAction(ISD::STORE, MVT::v1i1, Custom);
1617       setOperationAction(ISD::STORE, MVT::v2i1, Custom);
1618       setOperationAction(ISD::STORE, MVT::v4i1, Custom);
1619       setOperationAction(ISD::STORE, MVT::v8i1, Custom);
1620     }
1621 
1622     // Extends of v16i1/v8i1/v4i1/v2i1 to 128-bit vectors.
1623     for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1624       setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
1625       setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
1626       setOperationAction(ISD::ANY_EXTEND,  VT, Custom);
1627     }
1628 
1629     for (auto VT : { MVT::v1i1, MVT::v2i1, MVT::v4i1, MVT::v8i1, MVT::v16i1 })
1630       setOperationAction(ISD::VSELECT,          VT, Expand);
1631 
1632     for (auto VT : { MVT::v2i1, MVT::v4i1, MVT::v8i1, MVT::v16i1 }) {
1633       setOperationAction(ISD::SETCC,            VT, Custom);
1634       setOperationAction(ISD::STRICT_FSETCC,    VT, Custom);
1635       setOperationAction(ISD::STRICT_FSETCCS,   VT, Custom);
1636       setOperationAction(ISD::SELECT,           VT, Custom);
1637       setOperationAction(ISD::TRUNCATE,         VT, Custom);
1638 
1639       setOperationAction(ISD::BUILD_VECTOR,     VT, Custom);
1640       setOperationAction(ISD::CONCAT_VECTORS,   VT, Custom);
1641       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1642       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1643       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1644       setOperationAction(ISD::VECTOR_SHUFFLE,   VT,  Custom);
1645     }
1646 
1647     for (auto VT : { MVT::v1i1, MVT::v2i1, MVT::v4i1, MVT::v8i1 })
1648       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1649   }
1650 
1651   // This block controls legalization for 512-bit operations with 32/64 bit
1652   // elements. 512-bits can be disabled based on prefer-vector-width and
1653   // required-vector-width function attributes.
1654   if (!Subtarget.useSoftFloat() && Subtarget.useAVX512Regs()) {
1655     bool HasBWI = Subtarget.hasBWI();
1656 
1657     addRegisterClass(MVT::v16i32, &X86::VR512RegClass);
1658     addRegisterClass(MVT::v16f32, &X86::VR512RegClass);
1659     addRegisterClass(MVT::v8i64,  &X86::VR512RegClass);
1660     addRegisterClass(MVT::v8f64,  &X86::VR512RegClass);
1661     addRegisterClass(MVT::v32i16, &X86::VR512RegClass);
1662     addRegisterClass(MVT::v32f16, &X86::VR512RegClass);
1663     addRegisterClass(MVT::v64i8,  &X86::VR512RegClass);
1664 
1665     for (auto ExtType : {ISD::ZEXTLOAD, ISD::SEXTLOAD}) {
1666       setLoadExtAction(ExtType, MVT::v16i32, MVT::v16i8,  Legal);
1667       setLoadExtAction(ExtType, MVT::v16i32, MVT::v16i16, Legal);
1668       setLoadExtAction(ExtType, MVT::v8i64,  MVT::v8i8,   Legal);
1669       setLoadExtAction(ExtType, MVT::v8i64,  MVT::v8i16,  Legal);
1670       setLoadExtAction(ExtType, MVT::v8i64,  MVT::v8i32,  Legal);
1671       if (HasBWI)
1672         setLoadExtAction(ExtType, MVT::v32i16, MVT::v32i8, Legal);
1673     }
1674 
1675     for (MVT VT : { MVT::v16f32, MVT::v8f64 }) {
1676       setOperationAction(ISD::FNEG,  VT, Custom);
1677       setOperationAction(ISD::FABS,  VT, Custom);
1678       setOperationAction(ISD::FMA,   VT, Legal);
1679       setOperationAction(ISD::STRICT_FMA, VT, Legal);
1680       setOperationAction(ISD::FCOPYSIGN, VT, Custom);
1681     }
1682 
1683     for (MVT VT : { MVT::v16i1, MVT::v16i8, MVT::v16i16 }) {
1684       setOperationPromotedToType(ISD::FP_TO_SINT       , VT, MVT::v16i32);
1685       setOperationPromotedToType(ISD::FP_TO_UINT       , VT, MVT::v16i32);
1686       setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, VT, MVT::v16i32);
1687       setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, VT, MVT::v16i32);
1688     }
1689     setOperationAction(ISD::FP_TO_SINT,        MVT::v16i32, Custom);
1690     setOperationAction(ISD::FP_TO_UINT,        MVT::v16i32, Custom);
1691     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v16i32, Custom);
1692     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v16i32, Custom);
1693     setOperationAction(ISD::SINT_TO_FP,        MVT::v16i32, Custom);
1694     setOperationAction(ISD::UINT_TO_FP,        MVT::v16i32, Custom);
1695     setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v16i32, Custom);
1696     setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v16i32, Custom);
1697     setOperationAction(ISD::FP_EXTEND,         MVT::v8f64,  Custom);
1698     setOperationAction(ISD::STRICT_FP_EXTEND,  MVT::v8f64,  Custom);
1699 
1700     setOperationAction(ISD::STRICT_FADD,      MVT::v16f32, Legal);
1701     setOperationAction(ISD::STRICT_FADD,      MVT::v8f64,  Legal);
1702     setOperationAction(ISD::STRICT_FSUB,      MVT::v16f32, Legal);
1703     setOperationAction(ISD::STRICT_FSUB,      MVT::v8f64,  Legal);
1704     setOperationAction(ISD::STRICT_FMUL,      MVT::v16f32, Legal);
1705     setOperationAction(ISD::STRICT_FMUL,      MVT::v8f64,  Legal);
1706     setOperationAction(ISD::STRICT_FDIV,      MVT::v16f32, Legal);
1707     setOperationAction(ISD::STRICT_FDIV,      MVT::v8f64,  Legal);
1708     setOperationAction(ISD::STRICT_FSQRT,     MVT::v16f32, Legal);
1709     setOperationAction(ISD::STRICT_FSQRT,     MVT::v8f64,  Legal);
1710     setOperationAction(ISD::STRICT_FP_ROUND,  MVT::v8f32,  Legal);
1711 
1712     setTruncStoreAction(MVT::v8i64,   MVT::v8i8,   Legal);
1713     setTruncStoreAction(MVT::v8i64,   MVT::v8i16,  Legal);
1714     setTruncStoreAction(MVT::v8i64,   MVT::v8i32,  Legal);
1715     setTruncStoreAction(MVT::v16i32,  MVT::v16i8,  Legal);
1716     setTruncStoreAction(MVT::v16i32,  MVT::v16i16, Legal);
1717     if (HasBWI)
1718       setTruncStoreAction(MVT::v32i16,  MVT::v32i8, Legal);
1719 
1720     // With 512-bit vectors and no VLX, we prefer to widen MLOAD/MSTORE
1721     // to 512-bit rather than use the AVX2 instructions so that we can use
1722     // k-masks.
1723     if (!Subtarget.hasVLX()) {
1724       for (auto VT : {MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1725            MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64}) {
1726         setOperationAction(ISD::MLOAD,  VT, Custom);
1727         setOperationAction(ISD::MSTORE, VT, Custom);
1728       }
1729     }
1730 
1731     setOperationAction(ISD::TRUNCATE,    MVT::v8i32,  Legal);
1732     setOperationAction(ISD::TRUNCATE,    MVT::v16i16, Legal);
1733     setOperationAction(ISD::TRUNCATE,    MVT::v32i8,  HasBWI ? Legal : Custom);
1734     setOperationAction(ISD::TRUNCATE,    MVT::v16i64, Custom);
1735     setOperationAction(ISD::ZERO_EXTEND, MVT::v32i16, Custom);
1736     setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom);
1737     setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64,  Custom);
1738     setOperationAction(ISD::ANY_EXTEND,  MVT::v32i16, Custom);
1739     setOperationAction(ISD::ANY_EXTEND,  MVT::v16i32, Custom);
1740     setOperationAction(ISD::ANY_EXTEND,  MVT::v8i64,  Custom);
1741     setOperationAction(ISD::SIGN_EXTEND, MVT::v32i16, Custom);
1742     setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom);
1743     setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64,  Custom);
1744 
1745     if (HasBWI) {
1746       // Extends from v64i1 masks to 512-bit vectors.
1747       setOperationAction(ISD::SIGN_EXTEND,        MVT::v64i8, Custom);
1748       setOperationAction(ISD::ZERO_EXTEND,        MVT::v64i8, Custom);
1749       setOperationAction(ISD::ANY_EXTEND,         MVT::v64i8, Custom);
1750     }
1751 
1752     for (auto VT : { MVT::v16f32, MVT::v8f64 }) {
1753       setOperationAction(ISD::FFLOOR,            VT, Legal);
1754       setOperationAction(ISD::STRICT_FFLOOR,     VT, Legal);
1755       setOperationAction(ISD::FCEIL,             VT, Legal);
1756       setOperationAction(ISD::STRICT_FCEIL,      VT, Legal);
1757       setOperationAction(ISD::FTRUNC,            VT, Legal);
1758       setOperationAction(ISD::STRICT_FTRUNC,     VT, Legal);
1759       setOperationAction(ISD::FRINT,             VT, Legal);
1760       setOperationAction(ISD::STRICT_FRINT,      VT, Legal);
1761       setOperationAction(ISD::FNEARBYINT,        VT, Legal);
1762       setOperationAction(ISD::STRICT_FNEARBYINT, VT, Legal);
1763       setOperationAction(ISD::FROUNDEVEN,        VT, Legal);
1764       setOperationAction(ISD::STRICT_FROUNDEVEN, VT, Legal);
1765 
1766       setOperationAction(ISD::FROUND,            VT, Custom);
1767     }
1768 
1769     for (auto VT : {MVT::v32i16, MVT::v16i32, MVT::v8i64}) {
1770       setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Custom);
1771       setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Custom);
1772     }
1773 
1774     setOperationAction(ISD::ADD, MVT::v32i16, HasBWI ? Legal : Custom);
1775     setOperationAction(ISD::SUB, MVT::v32i16, HasBWI ? Legal : Custom);
1776     setOperationAction(ISD::ADD, MVT::v64i8,  HasBWI ? Legal : Custom);
1777     setOperationAction(ISD::SUB, MVT::v64i8,  HasBWI ? Legal : Custom);
1778 
1779     setOperationAction(ISD::MUL, MVT::v8i64,  Custom);
1780     setOperationAction(ISD::MUL, MVT::v16i32, Legal);
1781     setOperationAction(ISD::MUL, MVT::v32i16, HasBWI ? Legal : Custom);
1782     setOperationAction(ISD::MUL, MVT::v64i8,  Custom);
1783 
1784     setOperationAction(ISD::MULHU, MVT::v16i32, Custom);
1785     setOperationAction(ISD::MULHS, MVT::v16i32, Custom);
1786     setOperationAction(ISD::MULHS, MVT::v32i16, HasBWI ? Legal : Custom);
1787     setOperationAction(ISD::MULHU, MVT::v32i16, HasBWI ? Legal : Custom);
1788     setOperationAction(ISD::MULHS, MVT::v64i8,  Custom);
1789     setOperationAction(ISD::MULHU, MVT::v64i8,  Custom);
1790     setOperationAction(ISD::AVGCEILU, MVT::v32i16, HasBWI ? Legal : Custom);
1791     setOperationAction(ISD::AVGCEILU, MVT::v64i8,  HasBWI ? Legal : Custom);
1792 
1793     setOperationAction(ISD::SMULO, MVT::v64i8, Custom);
1794     setOperationAction(ISD::UMULO, MVT::v64i8, Custom);
1795 
1796     setOperationAction(ISD::BITREVERSE, MVT::v64i8,  Custom);
1797 
1798     for (auto VT : { MVT::v64i8, MVT::v32i16, MVT::v16i32, MVT::v8i64 }) {
1799       setOperationAction(ISD::SRL,              VT, Custom);
1800       setOperationAction(ISD::SHL,              VT, Custom);
1801       setOperationAction(ISD::SRA,              VT, Custom);
1802       setOperationAction(ISD::ROTL,             VT, Custom);
1803       setOperationAction(ISD::ROTR,             VT, Custom);
1804       setOperationAction(ISD::SETCC,            VT, Custom);
1805 
1806       // The condition codes aren't legal in SSE/AVX and under AVX512 we use
1807       // setcc all the way to isel and prefer SETGT in some isel patterns.
1808       setCondCodeAction(ISD::SETLT, VT, Custom);
1809       setCondCodeAction(ISD::SETLE, VT, Custom);
1810     }
1811     for (auto VT : { MVT::v16i32, MVT::v8i64 }) {
1812       setOperationAction(ISD::SMAX,             VT, Legal);
1813       setOperationAction(ISD::UMAX,             VT, Legal);
1814       setOperationAction(ISD::SMIN,             VT, Legal);
1815       setOperationAction(ISD::UMIN,             VT, Legal);
1816       setOperationAction(ISD::ABS,              VT, Legal);
1817       setOperationAction(ISD::CTPOP,            VT, Custom);
1818       setOperationAction(ISD::STRICT_FSETCC,    VT, Custom);
1819       setOperationAction(ISD::STRICT_FSETCCS,   VT, Custom);
1820     }
1821 
1822     for (auto VT : { MVT::v64i8, MVT::v32i16 }) {
1823       setOperationAction(ISD::ABS,     VT, HasBWI ? Legal : Custom);
1824       setOperationAction(ISD::CTPOP,   VT, Subtarget.hasBITALG() ? Legal : Custom);
1825       setOperationAction(ISD::CTLZ,    VT, Custom);
1826       setOperationAction(ISD::SMAX,    VT, HasBWI ? Legal : Custom);
1827       setOperationAction(ISD::UMAX,    VT, HasBWI ? Legal : Custom);
1828       setOperationAction(ISD::SMIN,    VT, HasBWI ? Legal : Custom);
1829       setOperationAction(ISD::UMIN,    VT, HasBWI ? Legal : Custom);
1830       setOperationAction(ISD::UADDSAT, VT, HasBWI ? Legal : Custom);
1831       setOperationAction(ISD::SADDSAT, VT, HasBWI ? Legal : Custom);
1832       setOperationAction(ISD::USUBSAT, VT, HasBWI ? Legal : Custom);
1833       setOperationAction(ISD::SSUBSAT, VT, HasBWI ? Legal : Custom);
1834     }
1835 
1836     setOperationAction(ISD::FSHL,       MVT::v64i8, Custom);
1837     setOperationAction(ISD::FSHR,       MVT::v64i8, Custom);
1838     setOperationAction(ISD::FSHL,      MVT::v32i16, Custom);
1839     setOperationAction(ISD::FSHR,      MVT::v32i16, Custom);
1840     setOperationAction(ISD::FSHL,      MVT::v16i32, Custom);
1841     setOperationAction(ISD::FSHR,      MVT::v16i32, Custom);
1842 
1843     if (Subtarget.hasDQI()) {
1844       for (auto Opc : {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::STRICT_SINT_TO_FP,
1845                        ISD::STRICT_UINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT,
1846                        ISD::STRICT_FP_TO_SINT, ISD::STRICT_FP_TO_UINT})
1847         setOperationAction(Opc,           MVT::v8i64, Custom);
1848       setOperationAction(ISD::MUL,        MVT::v8i64, Legal);
1849     }
1850 
1851     if (Subtarget.hasCDI()) {
1852       // NonVLX sub-targets extend 128/256 vectors to use the 512 version.
1853       for (auto VT : { MVT::v16i32, MVT::v8i64} ) {
1854         setOperationAction(ISD::CTLZ,            VT, Legal);
1855       }
1856     } // Subtarget.hasCDI()
1857 
1858     if (Subtarget.hasVPOPCNTDQ()) {
1859       for (auto VT : { MVT::v16i32, MVT::v8i64 })
1860         setOperationAction(ISD::CTPOP, VT, Legal);
1861     }
1862 
1863     // Extract subvector is special because the value type
1864     // (result) is 256-bit but the source is 512-bit wide.
1865     // 128-bit was made Legal under AVX1.
1866     for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64,
1867                      MVT::v16f16, MVT::v8f32, MVT::v4f64 })
1868       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
1869 
1870     for (auto VT : { MVT::v64i8, MVT::v32i16, MVT::v16i32, MVT::v8i64,
1871                      MVT::v32f16, MVT::v16f32, MVT::v8f64 }) {
1872       setOperationAction(ISD::CONCAT_VECTORS,     VT, Custom);
1873       setOperationAction(ISD::INSERT_SUBVECTOR,   VT, Legal);
1874       setOperationAction(ISD::SELECT,             VT, Custom);
1875       setOperationAction(ISD::VSELECT,            VT, Custom);
1876       setOperationAction(ISD::BUILD_VECTOR,       VT, Custom);
1877       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1878       setOperationAction(ISD::VECTOR_SHUFFLE,     VT, Custom);
1879       setOperationAction(ISD::SCALAR_TO_VECTOR,   VT, Custom);
1880       setOperationAction(ISD::INSERT_VECTOR_ELT,  VT, Custom);
1881     }
1882     setF16Action(MVT::v32f16, Expand);
1883     setOperationAction(ISD::FP_ROUND, MVT::v16f16, Custom);
1884     setOperationAction(ISD::STRICT_FP_ROUND, MVT::v16f16, Custom);
1885     setOperationAction(ISD::FP_EXTEND, MVT::v16f32, Legal);
1886     setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v16f32, Legal);
1887     for (unsigned Opc : {ISD::FADD, ISD::FSUB, ISD::FMUL, ISD::FDIV}) {
1888       setOperationPromotedToType(Opc, MVT::v16f16, MVT::v16f32);
1889       setOperationPromotedToType(Opc, MVT::v32f16, MVT::v32f32);
1890     }
1891 
1892     for (auto VT : { MVT::v16i32, MVT::v8i64, MVT::v16f32, MVT::v8f64 }) {
1893       setOperationAction(ISD::MLOAD,               VT, Legal);
1894       setOperationAction(ISD::MSTORE,              VT, Legal);
1895       setOperationAction(ISD::MGATHER,             VT, Custom);
1896       setOperationAction(ISD::MSCATTER,            VT, Custom);
1897     }
1898     if (HasBWI) {
1899       for (auto VT : { MVT::v64i8, MVT::v32i16 }) {
1900         setOperationAction(ISD::MLOAD,        VT, Legal);
1901         setOperationAction(ISD::MSTORE,       VT, Legal);
1902       }
1903     } else {
1904       setOperationAction(ISD::STORE, MVT::v32i16, Custom);
1905       setOperationAction(ISD::STORE, MVT::v64i8,  Custom);
1906     }
1907 
1908     if (Subtarget.hasVBMI2()) {
1909       for (auto VT : { MVT::v8i16, MVT::v4i32, MVT::v2i64,
1910                        MVT::v16i16, MVT::v8i32, MVT::v4i64,
1911                        MVT::v32i16, MVT::v16i32, MVT::v8i64 }) {
1912         setOperationAction(ISD::FSHL, VT, Custom);
1913         setOperationAction(ISD::FSHR, VT, Custom);
1914       }
1915 
1916       setOperationAction(ISD::ROTL, MVT::v32i16, Custom);
1917       setOperationAction(ISD::ROTR, MVT::v8i16,  Custom);
1918       setOperationAction(ISD::ROTR, MVT::v16i16, Custom);
1919       setOperationAction(ISD::ROTR, MVT::v32i16, Custom);
1920     }
1921   }// useAVX512Regs
1922 
1923   // This block controls legalization for operations that don't have
1924   // pre-AVX512 equivalents. Without VLX we use 512-bit operations for
1925   // narrower widths.
1926   if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512()) {
1927     // These operations are handled on non-VLX by artificially widening in
1928     // isel patterns.
1929 
1930     setOperationAction(ISD::STRICT_FP_TO_UINT,  MVT::v8i32, Custom);
1931     setOperationAction(ISD::STRICT_FP_TO_UINT,  MVT::v4i32, Custom);
1932     setOperationAction(ISD::STRICT_FP_TO_UINT,  MVT::v2i32, Custom);
1933 
1934     if (Subtarget.hasDQI()) {
1935       // Fast v2f32 SINT_TO_FP( v2i64 ) custom conversion.
1936       // v2f32 UINT_TO_FP is already custom under SSE2.
1937       assert(isOperationCustom(ISD::UINT_TO_FP, MVT::v2f32) &&
1938              isOperationCustom(ISD::STRICT_UINT_TO_FP, MVT::v2f32) &&
1939              "Unexpected operation action!");
1940       // v2i64 FP_TO_S/UINT(v2f32) custom conversion.
1941       setOperationAction(ISD::FP_TO_SINT,        MVT::v2f32, Custom);
1942       setOperationAction(ISD::FP_TO_UINT,        MVT::v2f32, Custom);
1943       setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2f32, Custom);
1944       setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2f32, Custom);
1945     }
1946 
1947     for (auto VT : { MVT::v2i64, MVT::v4i64 }) {
1948       setOperationAction(ISD::SMAX, VT, Legal);
1949       setOperationAction(ISD::UMAX, VT, Legal);
1950       setOperationAction(ISD::SMIN, VT, Legal);
1951       setOperationAction(ISD::UMIN, VT, Legal);
1952       setOperationAction(ISD::ABS,  VT, Legal);
1953     }
1954 
1955     for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 }) {
1956       setOperationAction(ISD::ROTL,     VT, Custom);
1957       setOperationAction(ISD::ROTR,     VT, Custom);
1958     }
1959 
1960     // Custom legalize 2x32 to get a little better code.
1961     setOperationAction(ISD::MSCATTER, MVT::v2f32, Custom);
1962     setOperationAction(ISD::MSCATTER, MVT::v2i32, Custom);
1963 
1964     for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1965                      MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 })
1966       setOperationAction(ISD::MSCATTER, VT, Custom);
1967 
1968     if (Subtarget.hasDQI()) {
1969       for (auto Opc : {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::STRICT_SINT_TO_FP,
1970                        ISD::STRICT_UINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT,
1971                        ISD::STRICT_FP_TO_SINT, ISD::STRICT_FP_TO_UINT}) {
1972         setOperationAction(Opc, MVT::v2i64, Custom);
1973         setOperationAction(Opc, MVT::v4i64, Custom);
1974       }
1975       setOperationAction(ISD::MUL, MVT::v2i64, Legal);
1976       setOperationAction(ISD::MUL, MVT::v4i64, Legal);
1977     }
1978 
1979     if (Subtarget.hasCDI()) {
1980       for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 }) {
1981         setOperationAction(ISD::CTLZ,            VT, Legal);
1982       }
1983     } // Subtarget.hasCDI()
1984 
1985     if (Subtarget.hasVPOPCNTDQ()) {
1986       for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 })
1987         setOperationAction(ISD::CTPOP, VT, Legal);
1988     }
1989   }
1990 
1991   // This block control legalization of v32i1/v64i1 which are available with
1992   // AVX512BW. 512-bit v32i16 and v64i8 vector legalization is controlled with
1993   // useBWIRegs.
1994   if (!Subtarget.useSoftFloat() && Subtarget.hasBWI()) {
1995     addRegisterClass(MVT::v32i1,  &X86::VK32RegClass);
1996     addRegisterClass(MVT::v64i1,  &X86::VK64RegClass);
1997 
1998     for (auto VT : { MVT::v32i1, MVT::v64i1 }) {
1999       setOperationAction(ISD::VSELECT,            VT, Expand);
2000       setOperationAction(ISD::TRUNCATE,           VT, Custom);
2001       setOperationAction(ISD::SETCC,              VT, Custom);
2002       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
2003       setOperationAction(ISD::INSERT_VECTOR_ELT,  VT, Custom);
2004       setOperationAction(ISD::SELECT,             VT, Custom);
2005       setOperationAction(ISD::BUILD_VECTOR,       VT, Custom);
2006       setOperationAction(ISD::VECTOR_SHUFFLE,     VT, Custom);
2007       setOperationAction(ISD::CONCAT_VECTORS,     VT, Custom);
2008       setOperationAction(ISD::INSERT_SUBVECTOR,   VT, Custom);
2009     }
2010 
2011     for (auto VT : { MVT::v16i1, MVT::v32i1 })
2012       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
2013 
2014     // Extends from v32i1 masks to 256-bit vectors.
2015     setOperationAction(ISD::SIGN_EXTEND,        MVT::v32i8, Custom);
2016     setOperationAction(ISD::ZERO_EXTEND,        MVT::v32i8, Custom);
2017     setOperationAction(ISD::ANY_EXTEND,         MVT::v32i8, Custom);
2018 
2019     for (auto VT : { MVT::v32i8, MVT::v16i8, MVT::v16i16, MVT::v8i16 }) {
2020       setOperationAction(ISD::MLOAD,  VT, Subtarget.hasVLX() ? Legal : Custom);
2021       setOperationAction(ISD::MSTORE, VT, Subtarget.hasVLX() ? Legal : Custom);
2022     }
2023 
2024     // These operations are handled on non-VLX by artificially widening in
2025     // isel patterns.
2026     // TODO: Custom widen in lowering on non-VLX and drop the isel patterns?
2027 
2028     if (Subtarget.hasBITALG()) {
2029       for (auto VT : { MVT::v16i8, MVT::v32i8, MVT::v8i16, MVT::v16i16 })
2030         setOperationAction(ISD::CTPOP, VT, Legal);
2031     }
2032   }
2033 
2034   if (!Subtarget.useSoftFloat() && Subtarget.hasFP16()) {
2035     auto setGroup = [&] (MVT VT) {
2036       setOperationAction(ISD::FADD,               VT, Legal);
2037       setOperationAction(ISD::STRICT_FADD,        VT, Legal);
2038       setOperationAction(ISD::FSUB,               VT, Legal);
2039       setOperationAction(ISD::STRICT_FSUB,        VT, Legal);
2040       setOperationAction(ISD::FMUL,               VT, Legal);
2041       setOperationAction(ISD::STRICT_FMUL,        VT, Legal);
2042       setOperationAction(ISD::FDIV,               VT, Legal);
2043       setOperationAction(ISD::STRICT_FDIV,        VT, Legal);
2044       setOperationAction(ISD::FSQRT,              VT, Legal);
2045       setOperationAction(ISD::STRICT_FSQRT,       VT, Legal);
2046 
2047       setOperationAction(ISD::FFLOOR,             VT, Legal);
2048       setOperationAction(ISD::STRICT_FFLOOR,      VT, Legal);
2049       setOperationAction(ISD::FCEIL,              VT, Legal);
2050       setOperationAction(ISD::STRICT_FCEIL,       VT, Legal);
2051       setOperationAction(ISD::FTRUNC,             VT, Legal);
2052       setOperationAction(ISD::STRICT_FTRUNC,      VT, Legal);
2053       setOperationAction(ISD::FRINT,              VT, Legal);
2054       setOperationAction(ISD::STRICT_FRINT,       VT, Legal);
2055       setOperationAction(ISD::FNEARBYINT,         VT, Legal);
2056       setOperationAction(ISD::STRICT_FNEARBYINT,  VT, Legal);
2057 
2058       setOperationAction(ISD::LOAD,               VT, Legal);
2059       setOperationAction(ISD::STORE,              VT, Legal);
2060 
2061       setOperationAction(ISD::FMA,                VT, Legal);
2062       setOperationAction(ISD::STRICT_FMA,         VT, Legal);
2063       setOperationAction(ISD::VSELECT,            VT, Legal);
2064       setOperationAction(ISD::BUILD_VECTOR,       VT, Custom);
2065       setOperationAction(ISD::SELECT,             VT, Custom);
2066 
2067       setOperationAction(ISD::FNEG,               VT, Custom);
2068       setOperationAction(ISD::FABS,               VT, Custom);
2069       setOperationAction(ISD::FCOPYSIGN,          VT, Custom);
2070       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
2071       setOperationAction(ISD::VECTOR_SHUFFLE,     VT, Custom);
2072     };
2073 
2074     // AVX512_FP16 scalar operations
2075     setGroup(MVT::f16);
2076     setOperationAction(ISD::FREM,                 MVT::f16, Promote);
2077     setOperationAction(ISD::STRICT_FREM,          MVT::f16, Promote);
2078     setOperationAction(ISD::SELECT_CC,            MVT::f16, Expand);
2079     setOperationAction(ISD::BR_CC,                MVT::f16, Expand);
2080     setOperationAction(ISD::SETCC,                MVT::f16, Custom);
2081     setOperationAction(ISD::STRICT_FSETCC,        MVT::f16, Custom);
2082     setOperationAction(ISD::STRICT_FSETCCS,       MVT::f16, Custom);
2083     setOperationAction(ISD::FROUND,               MVT::f16, Custom);
2084     setOperationAction(ISD::STRICT_FROUND,        MVT::f16, Promote);
2085     setOperationAction(ISD::FROUNDEVEN,           MVT::f16, Legal);
2086     setOperationAction(ISD::STRICT_FROUNDEVEN,    MVT::f16, Legal);
2087     setOperationAction(ISD::FP_ROUND,             MVT::f16, Custom);
2088     setOperationAction(ISD::STRICT_FP_ROUND,      MVT::f16, Custom);
2089     setOperationAction(ISD::FP_EXTEND,            MVT::f32, Legal);
2090     setOperationAction(ISD::STRICT_FP_EXTEND,     MVT::f32, Legal);
2091 
2092     setCondCodeAction(ISD::SETOEQ, MVT::f16, Expand);
2093     setCondCodeAction(ISD::SETUNE, MVT::f16, Expand);
2094 
2095     if (Subtarget.useAVX512Regs()) {
2096       setGroup(MVT::v32f16);
2097       setOperationAction(ISD::SCALAR_TO_VECTOR,       MVT::v32f16, Custom);
2098       setOperationAction(ISD::SINT_TO_FP,             MVT::v32i16, Legal);
2099       setOperationAction(ISD::STRICT_SINT_TO_FP,      MVT::v32i16, Legal);
2100       setOperationAction(ISD::UINT_TO_FP,             MVT::v32i16, Legal);
2101       setOperationAction(ISD::STRICT_UINT_TO_FP,      MVT::v32i16, Legal);
2102       setOperationAction(ISD::FP_ROUND,               MVT::v16f16, Legal);
2103       setOperationAction(ISD::STRICT_FP_ROUND,        MVT::v16f16, Legal);
2104       setOperationAction(ISD::FP_EXTEND,              MVT::v16f32, Legal);
2105       setOperationAction(ISD::STRICT_FP_EXTEND,       MVT::v16f32, Legal);
2106       setOperationAction(ISD::FP_EXTEND,              MVT::v8f64,  Legal);
2107       setOperationAction(ISD::STRICT_FP_EXTEND,       MVT::v8f64,  Legal);
2108       setOperationAction(ISD::INSERT_VECTOR_ELT,      MVT::v32f16, Custom);
2109 
2110       setOperationAction(ISD::FP_TO_SINT,             MVT::v32i16, Custom);
2111       setOperationAction(ISD::STRICT_FP_TO_SINT,      MVT::v32i16, Custom);
2112       setOperationAction(ISD::FP_TO_UINT,             MVT::v32i16, Custom);
2113       setOperationAction(ISD::STRICT_FP_TO_UINT,      MVT::v32i16, Custom);
2114       setOperationPromotedToType(ISD::FP_TO_SINT,     MVT::v32i8,  MVT::v32i16);
2115       setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::v32i8,
2116                                  MVT::v32i16);
2117       setOperationPromotedToType(ISD::FP_TO_UINT,     MVT::v32i8,  MVT::v32i16);
2118       setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::v32i8,
2119                                  MVT::v32i16);
2120       setOperationPromotedToType(ISD::FP_TO_SINT,     MVT::v32i1,  MVT::v32i16);
2121       setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::v32i1,
2122                                  MVT::v32i16);
2123       setOperationPromotedToType(ISD::FP_TO_UINT,     MVT::v32i1,  MVT::v32i16);
2124       setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::v32i1,
2125                                  MVT::v32i16);
2126 
2127       setOperationAction(ISD::EXTRACT_SUBVECTOR,      MVT::v16f16, Legal);
2128       setOperationAction(ISD::INSERT_SUBVECTOR,       MVT::v32f16, Legal);
2129       setOperationAction(ISD::CONCAT_VECTORS,         MVT::v32f16, Custom);
2130 
2131       setLoadExtAction(ISD::EXTLOAD, MVT::v8f64,  MVT::v8f16,  Legal);
2132       setLoadExtAction(ISD::EXTLOAD, MVT::v16f32, MVT::v16f16, Legal);
2133 
2134       setOperationAction(ISD::STRICT_FSETCC,      MVT::v32i1, Custom);
2135       setOperationAction(ISD::STRICT_FSETCCS,     MVT::v32i1, Custom);
2136     }
2137 
2138     if (Subtarget.hasVLX()) {
2139       setGroup(MVT::v8f16);
2140       setGroup(MVT::v16f16);
2141 
2142       setOperationAction(ISD::SCALAR_TO_VECTOR,   MVT::v8f16,  Legal);
2143       setOperationAction(ISD::SCALAR_TO_VECTOR,   MVT::v16f16, Custom);
2144       setOperationAction(ISD::SINT_TO_FP,         MVT::v16i16, Legal);
2145       setOperationAction(ISD::STRICT_SINT_TO_FP,  MVT::v16i16, Legal);
2146       setOperationAction(ISD::SINT_TO_FP,         MVT::v8i16,  Legal);
2147       setOperationAction(ISD::STRICT_SINT_TO_FP,  MVT::v8i16,  Legal);
2148       setOperationAction(ISD::UINT_TO_FP,         MVT::v16i16, Legal);
2149       setOperationAction(ISD::STRICT_UINT_TO_FP,  MVT::v16i16, Legal);
2150       setOperationAction(ISD::UINT_TO_FP,         MVT::v8i16,  Legal);
2151       setOperationAction(ISD::STRICT_UINT_TO_FP,  MVT::v8i16,  Legal);
2152 
2153       setOperationAction(ISD::FP_TO_SINT,         MVT::v8i16, Custom);
2154       setOperationAction(ISD::STRICT_FP_TO_SINT,  MVT::v8i16, Custom);
2155       setOperationAction(ISD::FP_TO_UINT,         MVT::v8i16, Custom);
2156       setOperationAction(ISD::STRICT_FP_TO_UINT,  MVT::v8i16, Custom);
2157       setOperationAction(ISD::FP_ROUND,           MVT::v8f16, Legal);
2158       setOperationAction(ISD::STRICT_FP_ROUND,    MVT::v8f16, Legal);
2159       setOperationAction(ISD::FP_EXTEND,          MVT::v8f32, Legal);
2160       setOperationAction(ISD::STRICT_FP_EXTEND,   MVT::v8f32, Legal);
2161       setOperationAction(ISD::FP_EXTEND,          MVT::v4f64, Legal);
2162       setOperationAction(ISD::STRICT_FP_EXTEND,   MVT::v4f64, Legal);
2163 
2164       // INSERT_VECTOR_ELT v8f16 extended to VECTOR_SHUFFLE
2165       setOperationAction(ISD::INSERT_VECTOR_ELT,    MVT::v8f16,  Custom);
2166       setOperationAction(ISD::INSERT_VECTOR_ELT,    MVT::v16f16, Custom);
2167 
2168       setOperationAction(ISD::EXTRACT_SUBVECTOR,    MVT::v8f16, Legal);
2169       setOperationAction(ISD::INSERT_SUBVECTOR,     MVT::v16f16, Legal);
2170       setOperationAction(ISD::CONCAT_VECTORS,       MVT::v16f16, Custom);
2171 
2172       setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f16, Legal);
2173       setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f16, Legal);
2174       setLoadExtAction(ISD::EXTLOAD, MVT::v8f32, MVT::v8f16, Legal);
2175       setLoadExtAction(ISD::EXTLOAD, MVT::v4f32, MVT::v4f16, Legal);
2176 
2177       // Need to custom widen these to prevent scalarization.
2178       setOperationAction(ISD::LOAD,  MVT::v4f16, Custom);
2179       setOperationAction(ISD::STORE, MVT::v4f16, Custom);
2180     }
2181   }
2182 
2183   if (!Subtarget.useSoftFloat() &&
2184       (Subtarget.hasAVXNECONVERT() || Subtarget.hasBF16())) {
2185     addRegisterClass(MVT::v8bf16, &X86::VR128XRegClass);
2186     addRegisterClass(MVT::v16bf16, &X86::VR256XRegClass);
2187     // We set the type action of bf16 to TypeSoftPromoteHalf, but we don't
2188     // provide the method to promote BUILD_VECTOR. Set the operation action
2189     // Custom to do the customization later.
2190     setOperationAction(ISD::BUILD_VECTOR, MVT::bf16, Custom);
2191     for (auto VT : {MVT::v8bf16, MVT::v16bf16}) {
2192       setF16Action(VT, Expand);
2193       setOperationAction(ISD::FADD, VT, Expand);
2194       setOperationAction(ISD::FSUB, VT, Expand);
2195       setOperationAction(ISD::FMUL, VT, Expand);
2196       setOperationAction(ISD::FDIV, VT, Expand);
2197       setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
2198       setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
2199     }
2200     addLegalFPImmediate(APFloat::getZero(APFloat::BFloat()));
2201   }
2202 
2203   if (!Subtarget.useSoftFloat() && Subtarget.hasBF16()) {
2204     addRegisterClass(MVT::v32bf16, &X86::VR512RegClass);
2205     setF16Action(MVT::v32bf16, Expand);
2206     setOperationAction(ISD::FADD, MVT::v32bf16, Expand);
2207     setOperationAction(ISD::FSUB, MVT::v32bf16, Expand);
2208     setOperationAction(ISD::FMUL, MVT::v32bf16, Expand);
2209     setOperationAction(ISD::FDIV, MVT::v32bf16, Expand);
2210     setOperationAction(ISD::BUILD_VECTOR, MVT::v32bf16, Custom);
2211     setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v32bf16, Custom);
2212   }
2213 
2214   if (!Subtarget.useSoftFloat() && Subtarget.hasVLX()) {
2215     setTruncStoreAction(MVT::v4i64, MVT::v4i8,  Legal);
2216     setTruncStoreAction(MVT::v4i64, MVT::v4i16, Legal);
2217     setTruncStoreAction(MVT::v4i64, MVT::v4i32, Legal);
2218     setTruncStoreAction(MVT::v8i32, MVT::v8i8,  Legal);
2219     setTruncStoreAction(MVT::v8i32, MVT::v8i16, Legal);
2220 
2221     setTruncStoreAction(MVT::v2i64, MVT::v2i8,  Legal);
2222     setTruncStoreAction(MVT::v2i64, MVT::v2i16, Legal);
2223     setTruncStoreAction(MVT::v2i64, MVT::v2i32, Legal);
2224     setTruncStoreAction(MVT::v4i32, MVT::v4i8,  Legal);
2225     setTruncStoreAction(MVT::v4i32, MVT::v4i16, Legal);
2226 
2227     if (Subtarget.hasBWI()) {
2228       setTruncStoreAction(MVT::v16i16,  MVT::v16i8, Legal);
2229       setTruncStoreAction(MVT::v8i16,   MVT::v8i8,  Legal);
2230     }
2231 
2232     if (Subtarget.hasFP16()) {
2233       // vcvttph2[u]dq v4f16 -> v4i32/64, v2f16 -> v2i32/64
2234       setOperationAction(ISD::FP_TO_SINT,        MVT::v2f16, Custom);
2235       setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2f16, Custom);
2236       setOperationAction(ISD::FP_TO_UINT,        MVT::v2f16, Custom);
2237       setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2f16, Custom);
2238       setOperationAction(ISD::FP_TO_SINT,        MVT::v4f16, Custom);
2239       setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v4f16, Custom);
2240       setOperationAction(ISD::FP_TO_UINT,        MVT::v4f16, Custom);
2241       setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v4f16, Custom);
2242       // vcvt[u]dq2ph v4i32/64 -> v4f16, v2i32/64 -> v2f16
2243       setOperationAction(ISD::SINT_TO_FP,        MVT::v2f16, Custom);
2244       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2f16, Custom);
2245       setOperationAction(ISD::UINT_TO_FP,        MVT::v2f16, Custom);
2246       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2f16, Custom);
2247       setOperationAction(ISD::SINT_TO_FP,        MVT::v4f16, Custom);
2248       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4f16, Custom);
2249       setOperationAction(ISD::UINT_TO_FP,        MVT::v4f16, Custom);
2250       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4f16, Custom);
2251       // vcvtps2phx v4f32 -> v4f16, v2f32 -> v2f16
2252       setOperationAction(ISD::FP_ROUND,          MVT::v2f16, Custom);
2253       setOperationAction(ISD::STRICT_FP_ROUND,   MVT::v2f16, Custom);
2254       setOperationAction(ISD::FP_ROUND,          MVT::v4f16, Custom);
2255       setOperationAction(ISD::STRICT_FP_ROUND,   MVT::v4f16, Custom);
2256       // vcvtph2psx v4f16 -> v4f32, v2f16 -> v2f32
2257       setOperationAction(ISD::FP_EXTEND,         MVT::v2f16, Custom);
2258       setOperationAction(ISD::STRICT_FP_EXTEND,  MVT::v2f16, Custom);
2259       setOperationAction(ISD::FP_EXTEND,         MVT::v4f16, Custom);
2260       setOperationAction(ISD::STRICT_FP_EXTEND,  MVT::v4f16, Custom);
2261     }
2262 
2263     setOperationAction(ISD::TRUNCATE, MVT::v16i32, Custom);
2264     setOperationAction(ISD::TRUNCATE, MVT::v8i64, Custom);
2265     setOperationAction(ISD::TRUNCATE, MVT::v16i64, Custom);
2266   }
2267 
2268   if (Subtarget.hasAMXTILE()) {
2269     addRegisterClass(MVT::x86amx, &X86::TILERegClass);
2270   }
2271 
2272   // We want to custom lower some of our intrinsics.
2273   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
2274   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
2275   setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
2276   if (!Subtarget.is64Bit()) {
2277     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
2278   }
2279 
2280   // Only custom-lower 64-bit SADDO and friends on 64-bit because we don't
2281   // handle type legalization for these operations here.
2282   //
2283   // FIXME: We really should do custom legalization for addition and
2284   // subtraction on x86-32 once PR3203 is fixed.  We really can't do much better
2285   // than generic legalization for 64-bit multiplication-with-overflow, though.
2286   for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
2287     if (VT == MVT::i64 && !Subtarget.is64Bit())
2288       continue;
2289     // Add/Sub/Mul with overflow operations are custom lowered.
2290     setOperationAction(ISD::SADDO, VT, Custom);
2291     setOperationAction(ISD::UADDO, VT, Custom);
2292     setOperationAction(ISD::SSUBO, VT, Custom);
2293     setOperationAction(ISD::USUBO, VT, Custom);
2294     setOperationAction(ISD::SMULO, VT, Custom);
2295     setOperationAction(ISD::UMULO, VT, Custom);
2296 
2297     // Support carry in as value rather than glue.
2298     setOperationAction(ISD::ADDCARRY, VT, Custom);
2299     setOperationAction(ISD::SUBCARRY, VT, Custom);
2300     setOperationAction(ISD::SETCCCARRY, VT, Custom);
2301     setOperationAction(ISD::SADDO_CARRY, VT, Custom);
2302     setOperationAction(ISD::SSUBO_CARRY, VT, Custom);
2303   }
2304 
2305   if (!Subtarget.is64Bit()) {
2306     // These libcalls are not available in 32-bit.
2307     setLibcallName(RTLIB::SHL_I128, nullptr);
2308     setLibcallName(RTLIB::SRL_I128, nullptr);
2309     setLibcallName(RTLIB::SRA_I128, nullptr);
2310     setLibcallName(RTLIB::MUL_I128, nullptr);
2311     // The MULO libcall is not part of libgcc, only compiler-rt.
2312     setLibcallName(RTLIB::MULO_I64, nullptr);
2313   }
2314   // The MULO libcall is not part of libgcc, only compiler-rt.
2315   setLibcallName(RTLIB::MULO_I128, nullptr);
2316 
2317   // Combine sin / cos into _sincos_stret if it is available.
2318   if (getLibcallName(RTLIB::SINCOS_STRET_F32) != nullptr &&
2319       getLibcallName(RTLIB::SINCOS_STRET_F64) != nullptr) {
2320     setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
2321     setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
2322   }
2323 
2324   if (Subtarget.isTargetWin64()) {
2325     setOperationAction(ISD::SDIV, MVT::i128, Custom);
2326     setOperationAction(ISD::UDIV, MVT::i128, Custom);
2327     setOperationAction(ISD::SREM, MVT::i128, Custom);
2328     setOperationAction(ISD::UREM, MVT::i128, Custom);
2329     setOperationAction(ISD::FP_TO_SINT, MVT::i128, Custom);
2330     setOperationAction(ISD::FP_TO_UINT, MVT::i128, Custom);
2331     setOperationAction(ISD::SINT_TO_FP, MVT::i128, Custom);
2332     setOperationAction(ISD::UINT_TO_FP, MVT::i128, Custom);
2333     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i128, Custom);
2334     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i128, Custom);
2335     setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i128, Custom);
2336     setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i128, Custom);
2337   }
2338 
2339   // On 32 bit MSVC, `fmodf(f32)` is not defined - only `fmod(f64)`
2340   // is. We should promote the value to 64-bits to solve this.
2341   // This is what the CRT headers do - `fmodf` is an inline header
2342   // function casting to f64 and calling `fmod`.
2343   if (Subtarget.is32Bit() &&
2344       (Subtarget.isTargetWindowsMSVC() || Subtarget.isTargetWindowsItanium()))
2345     for (ISD::NodeType Op :
2346          {ISD::FCEIL,  ISD::STRICT_FCEIL,
2347           ISD::FCOS,   ISD::STRICT_FCOS,
2348           ISD::FEXP,   ISD::STRICT_FEXP,
2349           ISD::FFLOOR, ISD::STRICT_FFLOOR,
2350           ISD::FREM,   ISD::STRICT_FREM,
2351           ISD::FLOG,   ISD::STRICT_FLOG,
2352           ISD::FLOG10, ISD::STRICT_FLOG10,
2353           ISD::FPOW,   ISD::STRICT_FPOW,
2354           ISD::FSIN,   ISD::STRICT_FSIN})
2355       if (isOperationExpand(Op, MVT::f32))
2356         setOperationAction(Op, MVT::f32, Promote);
2357 
2358   // We have target-specific dag combine patterns for the following nodes:
2359   setTargetDAGCombine({ISD::VECTOR_SHUFFLE,
2360                        ISD::SCALAR_TO_VECTOR,
2361                        ISD::INSERT_VECTOR_ELT,
2362                        ISD::EXTRACT_VECTOR_ELT,
2363                        ISD::CONCAT_VECTORS,
2364                        ISD::INSERT_SUBVECTOR,
2365                        ISD::EXTRACT_SUBVECTOR,
2366                        ISD::BITCAST,
2367                        ISD::VSELECT,
2368                        ISD::SELECT,
2369                        ISD::SHL,
2370                        ISD::SRA,
2371                        ISD::SRL,
2372                        ISD::OR,
2373                        ISD::AND,
2374                        ISD::ADD,
2375                        ISD::FADD,
2376                        ISD::FSUB,
2377                        ISD::FNEG,
2378                        ISD::FMA,
2379                        ISD::STRICT_FMA,
2380                        ISD::FMINNUM,
2381                        ISD::FMAXNUM,
2382                        ISD::SUB,
2383                        ISD::LOAD,
2384                        ISD::MLOAD,
2385                        ISD::STORE,
2386                        ISD::MSTORE,
2387                        ISD::TRUNCATE,
2388                        ISD::ZERO_EXTEND,
2389                        ISD::ANY_EXTEND,
2390                        ISD::SIGN_EXTEND,
2391                        ISD::SIGN_EXTEND_INREG,
2392                        ISD::ANY_EXTEND_VECTOR_INREG,
2393                        ISD::SIGN_EXTEND_VECTOR_INREG,
2394                        ISD::ZERO_EXTEND_VECTOR_INREG,
2395                        ISD::SINT_TO_FP,
2396                        ISD::UINT_TO_FP,
2397                        ISD::STRICT_SINT_TO_FP,
2398                        ISD::STRICT_UINT_TO_FP,
2399                        ISD::SETCC,
2400                        ISD::MUL,
2401                        ISD::XOR,
2402                        ISD::MSCATTER,
2403                        ISD::MGATHER,
2404                        ISD::FP16_TO_FP,
2405                        ISD::FP_EXTEND,
2406                        ISD::STRICT_FP_EXTEND,
2407                        ISD::FP_ROUND,
2408                        ISD::STRICT_FP_ROUND});
2409 
2410   computeRegisterProperties(Subtarget.getRegisterInfo());
2411 
2412   MaxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores
2413   MaxStoresPerMemsetOptSize = 8;
2414   MaxStoresPerMemcpy = 8; // For @llvm.memcpy -> sequence of stores
2415   MaxStoresPerMemcpyOptSize = 4;
2416   MaxStoresPerMemmove = 8; // For @llvm.memmove -> sequence of stores
2417   MaxStoresPerMemmoveOptSize = 4;
2418 
2419   // TODO: These control memcmp expansion in CGP and could be raised higher, but
2420   // that needs to benchmarked and balanced with the potential use of vector
2421   // load/store types (PR33329, PR33914).
2422   MaxLoadsPerMemcmp = 2;
2423   MaxLoadsPerMemcmpOptSize = 2;
2424 
2425   // Default loop alignment, which can be overridden by -align-loops.
2426   setPrefLoopAlignment(Align(16));
2427 
2428   // An out-of-order CPU can speculatively execute past a predictable branch,
2429   // but a conditional move could be stalled by an expensive earlier operation.
2430   PredictableSelectIsExpensive = Subtarget.getSchedModel().isOutOfOrder();
2431   EnableExtLdPromotion = true;
2432   setPrefFunctionAlignment(Align(16));
2433 
2434   verifyIntrinsicTables();
2435 
2436   // Default to having -disable-strictnode-mutation on
2437   IsStrictFPEnabled = true;
2438 }
2439 
2440 // This has so far only been implemented for 64-bit MachO.
useLoadStackGuardNode() const2441 bool X86TargetLowering::useLoadStackGuardNode() const {
2442   return Subtarget.isTargetMachO() && Subtarget.is64Bit();
2443 }
2444 
useStackGuardXorFP() const2445 bool X86TargetLowering::useStackGuardXorFP() const {
2446   // Currently only MSVC CRTs XOR the frame pointer into the stack guard value.
2447   return Subtarget.getTargetTriple().isOSMSVCRT() && !Subtarget.isTargetMachO();
2448 }
2449 
emitStackGuardXorFP(SelectionDAG & DAG,SDValue Val,const SDLoc & DL) const2450 SDValue X86TargetLowering::emitStackGuardXorFP(SelectionDAG &DAG, SDValue Val,
2451                                                const SDLoc &DL) const {
2452   EVT PtrTy = getPointerTy(DAG.getDataLayout());
2453   unsigned XorOp = Subtarget.is64Bit() ? X86::XOR64_FP : X86::XOR32_FP;
2454   MachineSDNode *Node = DAG.getMachineNode(XorOp, DL, PtrTy, Val);
2455   return SDValue(Node, 0);
2456 }
2457 
2458 TargetLoweringBase::LegalizeTypeAction
getPreferredVectorAction(MVT VT) const2459 X86TargetLowering::getPreferredVectorAction(MVT VT) const {
2460   if ((VT == MVT::v32i1 || VT == MVT::v64i1) && Subtarget.hasAVX512() &&
2461       !Subtarget.hasBWI())
2462     return TypeSplitVector;
2463 
2464   if (!VT.isScalableVector() && VT.getVectorNumElements() != 1 &&
2465       !Subtarget.hasF16C() && VT.getVectorElementType() == MVT::f16)
2466     return TypeSplitVector;
2467 
2468   if (!VT.isScalableVector() && VT.getVectorNumElements() != 1 &&
2469       VT.getVectorElementType() != MVT::i1)
2470     return TypeWidenVector;
2471 
2472   return TargetLoweringBase::getPreferredVectorAction(VT);
2473 }
2474 
2475 static std::pair<MVT, unsigned>
handleMaskRegisterForCallingConv(unsigned NumElts,CallingConv::ID CC,const X86Subtarget & Subtarget)2476 handleMaskRegisterForCallingConv(unsigned NumElts, CallingConv::ID CC,
2477                                  const X86Subtarget &Subtarget) {
2478   // v2i1/v4i1/v8i1/v16i1 all pass in xmm registers unless the calling
2479   // convention is one that uses k registers.
2480   if (NumElts == 2)
2481     return {MVT::v2i64, 1};
2482   if (NumElts == 4)
2483     return {MVT::v4i32, 1};
2484   if (NumElts == 8 && CC != CallingConv::X86_RegCall &&
2485       CC != CallingConv::Intel_OCL_BI)
2486     return {MVT::v8i16, 1};
2487   if (NumElts == 16 && CC != CallingConv::X86_RegCall &&
2488       CC != CallingConv::Intel_OCL_BI)
2489     return {MVT::v16i8, 1};
2490   // v32i1 passes in ymm unless we have BWI and the calling convention is
2491   // regcall.
2492   if (NumElts == 32 && (!Subtarget.hasBWI() || CC != CallingConv::X86_RegCall))
2493     return {MVT::v32i8, 1};
2494   // Split v64i1 vectors if we don't have v64i8 available.
2495   if (NumElts == 64 && Subtarget.hasBWI() && CC != CallingConv::X86_RegCall) {
2496     if (Subtarget.useAVX512Regs())
2497       return {MVT::v64i8, 1};
2498     return {MVT::v32i8, 2};
2499   }
2500 
2501   // Break wide or odd vXi1 vectors into scalars to match avx2 behavior.
2502   if (!isPowerOf2_32(NumElts) || (NumElts == 64 && !Subtarget.hasBWI()) ||
2503       NumElts > 64)
2504     return {MVT::i8, NumElts};
2505 
2506   return {MVT::INVALID_SIMPLE_VALUE_TYPE, 0};
2507 }
2508 
getRegisterTypeForCallingConv(LLVMContext & Context,CallingConv::ID CC,EVT VT) const2509 MVT X86TargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
2510                                                      CallingConv::ID CC,
2511                                                      EVT VT) const {
2512   if (VT.isVector()) {
2513     if (VT.getVectorElementType() == MVT::i1 && Subtarget.hasAVX512()) {
2514       unsigned NumElts = VT.getVectorNumElements();
2515 
2516       MVT RegisterVT;
2517       unsigned NumRegisters;
2518       std::tie(RegisterVT, NumRegisters) =
2519           handleMaskRegisterForCallingConv(NumElts, CC, Subtarget);
2520       if (RegisterVT != MVT::INVALID_SIMPLE_VALUE_TYPE)
2521         return RegisterVT;
2522     }
2523 
2524     if (VT.getVectorElementType() == MVT::f16 && VT.getVectorNumElements() < 8)
2525       return MVT::v8f16;
2526   }
2527 
2528   // We will use more GPRs for f64 and f80 on 32 bits when x87 is disabled.
2529   if ((VT == MVT::f64 || VT == MVT::f80) && !Subtarget.is64Bit() &&
2530       !Subtarget.hasX87())
2531     return MVT::i32;
2532 
2533   if (VT.isVector() && VT.getVectorElementType() == MVT::bf16)
2534     return getRegisterTypeForCallingConv(Context, CC,
2535                                          VT.changeVectorElementTypeToInteger());
2536 
2537   return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
2538 }
2539 
getNumRegistersForCallingConv(LLVMContext & Context,CallingConv::ID CC,EVT VT) const2540 unsigned X86TargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
2541                                                           CallingConv::ID CC,
2542                                                           EVT VT) const {
2543   if (VT.isVector()) {
2544     if (VT.getVectorElementType() == MVT::i1 && Subtarget.hasAVX512()) {
2545       unsigned NumElts = VT.getVectorNumElements();
2546 
2547       MVT RegisterVT;
2548       unsigned NumRegisters;
2549       std::tie(RegisterVT, NumRegisters) =
2550           handleMaskRegisterForCallingConv(NumElts, CC, Subtarget);
2551       if (RegisterVT != MVT::INVALID_SIMPLE_VALUE_TYPE)
2552         return NumRegisters;
2553     }
2554 
2555     if (VT.getVectorElementType() == MVT::f16 && VT.getVectorNumElements() < 8)
2556       return 1;
2557   }
2558 
2559   // We have to split f64 to 2 registers and f80 to 3 registers on 32 bits if
2560   // x87 is disabled.
2561   if (!Subtarget.is64Bit() && !Subtarget.hasX87()) {
2562     if (VT == MVT::f64)
2563       return 2;
2564     if (VT == MVT::f80)
2565       return 3;
2566   }
2567 
2568   if (VT.isVector() && VT.getVectorElementType() == MVT::bf16)
2569     return getNumRegistersForCallingConv(Context, CC,
2570                                          VT.changeVectorElementTypeToInteger());
2571 
2572   return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
2573 }
2574 
getVectorTypeBreakdownForCallingConv(LLVMContext & Context,CallingConv::ID CC,EVT VT,EVT & IntermediateVT,unsigned & NumIntermediates,MVT & RegisterVT) const2575 unsigned X86TargetLowering::getVectorTypeBreakdownForCallingConv(
2576     LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT,
2577     unsigned &NumIntermediates, MVT &RegisterVT) const {
2578   // Break wide or odd vXi1 vectors into scalars to match avx2 behavior.
2579   if (VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
2580       Subtarget.hasAVX512() &&
2581       (!isPowerOf2_32(VT.getVectorNumElements()) ||
2582        (VT.getVectorNumElements() == 64 && !Subtarget.hasBWI()) ||
2583        VT.getVectorNumElements() > 64)) {
2584     RegisterVT = MVT::i8;
2585     IntermediateVT = MVT::i1;
2586     NumIntermediates = VT.getVectorNumElements();
2587     return NumIntermediates;
2588   }
2589 
2590   // Split v64i1 vectors if we don't have v64i8 available.
2591   if (VT == MVT::v64i1 && Subtarget.hasBWI() && !Subtarget.useAVX512Regs() &&
2592       CC != CallingConv::X86_RegCall) {
2593     RegisterVT = MVT::v32i8;
2594     IntermediateVT = MVT::v32i1;
2595     NumIntermediates = 2;
2596     return 2;
2597   }
2598 
2599   return TargetLowering::getVectorTypeBreakdownForCallingConv(Context, CC, VT, IntermediateVT,
2600                                               NumIntermediates, RegisterVT);
2601 }
2602 
getSetCCResultType(const DataLayout & DL,LLVMContext & Context,EVT VT) const2603 EVT X86TargetLowering::getSetCCResultType(const DataLayout &DL,
2604                                           LLVMContext& Context,
2605                                           EVT VT) const {
2606   if (!VT.isVector())
2607     return MVT::i8;
2608 
2609   if (Subtarget.hasAVX512()) {
2610     // Figure out what this type will be legalized to.
2611     EVT LegalVT = VT;
2612     while (getTypeAction(Context, LegalVT) != TypeLegal)
2613       LegalVT = getTypeToTransformTo(Context, LegalVT);
2614 
2615     // If we got a 512-bit vector then we'll definitely have a vXi1 compare.
2616     if (LegalVT.getSimpleVT().is512BitVector())
2617       return EVT::getVectorVT(Context, MVT::i1, VT.getVectorElementCount());
2618 
2619     if (LegalVT.getSimpleVT().isVector() && Subtarget.hasVLX()) {
2620       // If we legalized to less than a 512-bit vector, then we will use a vXi1
2621       // compare for vXi32/vXi64 for sure. If we have BWI we will also support
2622       // vXi16/vXi8.
2623       MVT EltVT = LegalVT.getSimpleVT().getVectorElementType();
2624       if (Subtarget.hasBWI() || EltVT.getSizeInBits() >= 32)
2625         return EVT::getVectorVT(Context, MVT::i1, VT.getVectorElementCount());
2626     }
2627   }
2628 
2629   return VT.changeVectorElementTypeToInteger();
2630 }
2631 
2632 /// Helper for getByValTypeAlignment to determine
2633 /// the desired ByVal argument alignment.
getMaxByValAlign(Type * Ty,Align & MaxAlign)2634 static void getMaxByValAlign(Type *Ty, Align &MaxAlign) {
2635   if (MaxAlign == 16)
2636     return;
2637   if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
2638     if (VTy->getPrimitiveSizeInBits().getFixedValue() == 128)
2639       MaxAlign = Align(16);
2640   } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
2641     Align EltAlign;
2642     getMaxByValAlign(ATy->getElementType(), EltAlign);
2643     if (EltAlign > MaxAlign)
2644       MaxAlign = EltAlign;
2645   } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
2646     for (auto *EltTy : STy->elements()) {
2647       Align EltAlign;
2648       getMaxByValAlign(EltTy, EltAlign);
2649       if (EltAlign > MaxAlign)
2650         MaxAlign = EltAlign;
2651       if (MaxAlign == 16)
2652         break;
2653     }
2654   }
2655 }
2656 
2657 /// Return the desired alignment for ByVal aggregate
2658 /// function arguments in the caller parameter area. For X86, aggregates
2659 /// that contain SSE vectors are placed at 16-byte boundaries while the rest
2660 /// are at 4-byte boundaries.
getByValTypeAlignment(Type * Ty,const DataLayout & DL) const2661 uint64_t X86TargetLowering::getByValTypeAlignment(Type *Ty,
2662                                                   const DataLayout &DL) const {
2663   if (Subtarget.is64Bit()) {
2664     // Max of 8 and alignment of type.
2665     Align TyAlign = DL.getABITypeAlign(Ty);
2666     if (TyAlign > 8)
2667       return TyAlign.value();
2668     return 8;
2669   }
2670 
2671   Align Alignment(4);
2672   if (Subtarget.hasSSE1())
2673     getMaxByValAlign(Ty, Alignment);
2674   return Alignment.value();
2675 }
2676 
2677 /// It returns EVT::Other if the type should be determined using generic
2678 /// target-independent logic.
2679 /// For vector ops we check that the overall size isn't larger than our
2680 /// preferred vector width.
getOptimalMemOpType(const MemOp & Op,const AttributeList & FuncAttributes) const2681 EVT X86TargetLowering::getOptimalMemOpType(
2682     const MemOp &Op, const AttributeList &FuncAttributes) const {
2683   if (!FuncAttributes.hasFnAttr(Attribute::NoImplicitFloat)) {
2684     if (Op.size() >= 16 &&
2685         (!Subtarget.isUnalignedMem16Slow() || Op.isAligned(Align(16)))) {
2686       // FIXME: Check if unaligned 64-byte accesses are slow.
2687       if (Op.size() >= 64 && Subtarget.hasAVX512() &&
2688           (Subtarget.getPreferVectorWidth() >= 512)) {
2689         return Subtarget.hasBWI() ? MVT::v64i8 : MVT::v16i32;
2690       }
2691       // FIXME: Check if unaligned 32-byte accesses are slow.
2692       if (Op.size() >= 32 && Subtarget.hasAVX() &&
2693           Subtarget.useLight256BitInstructions()) {
2694         // Although this isn't a well-supported type for AVX1, we'll let
2695         // legalization and shuffle lowering produce the optimal codegen. If we
2696         // choose an optimal type with a vector element larger than a byte,
2697         // getMemsetStores() may create an intermediate splat (using an integer
2698         // multiply) before we splat as a vector.
2699         return MVT::v32i8;
2700       }
2701       if (Subtarget.hasSSE2() && (Subtarget.getPreferVectorWidth() >= 128))
2702         return MVT::v16i8;
2703       // TODO: Can SSE1 handle a byte vector?
2704       // If we have SSE1 registers we should be able to use them.
2705       if (Subtarget.hasSSE1() && (Subtarget.is64Bit() || Subtarget.hasX87()) &&
2706           (Subtarget.getPreferVectorWidth() >= 128))
2707         return MVT::v4f32;
2708     } else if (((Op.isMemcpy() && !Op.isMemcpyStrSrc()) || Op.isZeroMemset()) &&
2709                Op.size() >= 8 && !Subtarget.is64Bit() && Subtarget.hasSSE2()) {
2710       // Do not use f64 to lower memcpy if source is string constant. It's
2711       // better to use i32 to avoid the loads.
2712       // Also, do not use f64 to lower memset unless this is a memset of zeros.
2713       // The gymnastics of splatting a byte value into an XMM register and then
2714       // only using 8-byte stores (because this is a CPU with slow unaligned
2715       // 16-byte accesses) makes that a loser.
2716       return MVT::f64;
2717     }
2718   }
2719   // This is a compromise. If we reach here, unaligned accesses may be slow on
2720   // this target. However, creating smaller, aligned accesses could be even
2721   // slower and would certainly be a lot more code.
2722   if (Subtarget.is64Bit() && Op.size() >= 8)
2723     return MVT::i64;
2724   return MVT::i32;
2725 }
2726 
isSafeMemOpType(MVT VT) const2727 bool X86TargetLowering::isSafeMemOpType(MVT VT) const {
2728   if (VT == MVT::f32)
2729     return Subtarget.hasSSE1();
2730   if (VT == MVT::f64)
2731     return Subtarget.hasSSE2();
2732   return true;
2733 }
2734 
isBitAligned(Align Alignment,uint64_t SizeInBits)2735 static bool isBitAligned(Align Alignment, uint64_t SizeInBits) {
2736   return (8 * Alignment.value()) % SizeInBits == 0;
2737 }
2738 
isMemoryAccessFast(EVT VT,Align Alignment) const2739 bool X86TargetLowering::isMemoryAccessFast(EVT VT, Align Alignment) const {
2740   if (isBitAligned(Alignment, VT.getSizeInBits()))
2741     return true;
2742   switch (VT.getSizeInBits()) {
2743   default:
2744     // 8-byte and under are always assumed to be fast.
2745     return true;
2746   case 128:
2747     return !Subtarget.isUnalignedMem16Slow();
2748   case 256:
2749     return !Subtarget.isUnalignedMem32Slow();
2750     // TODO: What about AVX-512 (512-bit) accesses?
2751   }
2752 }
2753 
allowsMisalignedMemoryAccesses(EVT VT,unsigned,Align Alignment,MachineMemOperand::Flags Flags,unsigned * Fast) const2754 bool X86TargetLowering::allowsMisalignedMemoryAccesses(
2755     EVT VT, unsigned, Align Alignment, MachineMemOperand::Flags Flags,
2756     unsigned *Fast) const {
2757   if (Fast)
2758     *Fast = isMemoryAccessFast(VT, Alignment);
2759   // NonTemporal vector memory ops must be aligned.
2760   if (!!(Flags & MachineMemOperand::MONonTemporal) && VT.isVector()) {
2761     // NT loads can only be vector aligned, so if its less aligned than the
2762     // minimum vector size (which we can split the vector down to), we might as
2763     // well use a regular unaligned vector load.
2764     // We don't have any NT loads pre-SSE41.
2765     if (!!(Flags & MachineMemOperand::MOLoad))
2766       return (Alignment < 16 || !Subtarget.hasSSE41());
2767     return false;
2768   }
2769   // Misaligned accesses of any size are always allowed.
2770   return true;
2771 }
2772 
allowsMemoryAccess(LLVMContext & Context,const DataLayout & DL,EVT VT,unsigned AddrSpace,Align Alignment,MachineMemOperand::Flags Flags,unsigned * Fast) const2773 bool X86TargetLowering::allowsMemoryAccess(LLVMContext &Context,
2774                                            const DataLayout &DL, EVT VT,
2775                                            unsigned AddrSpace, Align Alignment,
2776                                            MachineMemOperand::Flags Flags,
2777                                            unsigned *Fast) const {
2778   if (Fast)
2779     *Fast = isMemoryAccessFast(VT, Alignment);
2780   if (!!(Flags & MachineMemOperand::MONonTemporal) && VT.isVector()) {
2781     if (allowsMisalignedMemoryAccesses(VT, AddrSpace, Alignment, Flags,
2782                                        /*Fast=*/nullptr))
2783       return true;
2784     // NonTemporal vector memory ops are special, and must be aligned.
2785     if (!isBitAligned(Alignment, VT.getSizeInBits()))
2786       return false;
2787     switch (VT.getSizeInBits()) {
2788     case 128:
2789       if (!!(Flags & MachineMemOperand::MOLoad) && Subtarget.hasSSE41())
2790         return true;
2791       if (!!(Flags & MachineMemOperand::MOStore) && Subtarget.hasSSE2())
2792         return true;
2793       return false;
2794     case 256:
2795       if (!!(Flags & MachineMemOperand::MOLoad) && Subtarget.hasAVX2())
2796         return true;
2797       if (!!(Flags & MachineMemOperand::MOStore) && Subtarget.hasAVX())
2798         return true;
2799       return false;
2800     case 512:
2801       if (Subtarget.hasAVX512())
2802         return true;
2803       return false;
2804     default:
2805       return false; // Don't have NonTemporal vector memory ops of this size.
2806     }
2807   }
2808   return true;
2809 }
2810 
2811 /// Return the entry encoding for a jump table in the
2812 /// current function.  The returned value is a member of the
2813 /// MachineJumpTableInfo::JTEntryKind enum.
getJumpTableEncoding() const2814 unsigned X86TargetLowering::getJumpTableEncoding() const {
2815   // In GOT pic mode, each entry in the jump table is emitted as a @GOTOFF
2816   // symbol.
2817   if (isPositionIndependent() && Subtarget.isPICStyleGOT())
2818     return MachineJumpTableInfo::EK_Custom32;
2819 
2820   // Otherwise, use the normal jump table encoding heuristics.
2821   return TargetLowering::getJumpTableEncoding();
2822 }
2823 
splitValueIntoRegisterParts(SelectionDAG & DAG,const SDLoc & DL,SDValue Val,SDValue * Parts,unsigned NumParts,MVT PartVT,std::optional<CallingConv::ID> CC) const2824 bool X86TargetLowering::splitValueIntoRegisterParts(
2825     SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
2826     unsigned NumParts, MVT PartVT, std::optional<CallingConv::ID> CC) const {
2827   bool IsABIRegCopy = CC.has_value();
2828   EVT ValueVT = Val.getValueType();
2829   if (IsABIRegCopy && ValueVT == MVT::bf16 && PartVT == MVT::f32) {
2830     unsigned ValueBits = ValueVT.getSizeInBits();
2831     unsigned PartBits = PartVT.getSizeInBits();
2832     Val = DAG.getNode(ISD::BITCAST, DL, MVT::getIntegerVT(ValueBits), Val);
2833     Val = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::getIntegerVT(PartBits), Val);
2834     Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
2835     Parts[0] = Val;
2836     return true;
2837   }
2838   return false;
2839 }
2840 
joinRegisterPartsIntoValue(SelectionDAG & DAG,const SDLoc & DL,const SDValue * Parts,unsigned NumParts,MVT PartVT,EVT ValueVT,std::optional<CallingConv::ID> CC) const2841 SDValue X86TargetLowering::joinRegisterPartsIntoValue(
2842     SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts,
2843     MVT PartVT, EVT ValueVT, std::optional<CallingConv::ID> CC) const {
2844   bool IsABIRegCopy = CC.has_value();
2845   if (IsABIRegCopy && ValueVT == MVT::bf16 && PartVT == MVT::f32) {
2846     unsigned ValueBits = ValueVT.getSizeInBits();
2847     unsigned PartBits = PartVT.getSizeInBits();
2848     SDValue Val = Parts[0];
2849 
2850     Val = DAG.getNode(ISD::BITCAST, DL, MVT::getIntegerVT(PartBits), Val);
2851     Val = DAG.getNode(ISD::TRUNCATE, DL, MVT::getIntegerVT(ValueBits), Val);
2852     Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
2853     return Val;
2854   }
2855   return SDValue();
2856 }
2857 
useSoftFloat() const2858 bool X86TargetLowering::useSoftFloat() const {
2859   return Subtarget.useSoftFloat();
2860 }
2861 
markLibCallAttributes(MachineFunction * MF,unsigned CC,ArgListTy & Args) const2862 void X86TargetLowering::markLibCallAttributes(MachineFunction *MF, unsigned CC,
2863                                               ArgListTy &Args) const {
2864 
2865   // Only relabel X86-32 for C / Stdcall CCs.
2866   if (Subtarget.is64Bit())
2867     return;
2868   if (CC != CallingConv::C && CC != CallingConv::X86_StdCall)
2869     return;
2870   unsigned ParamRegs = 0;
2871   if (auto *M = MF->getFunction().getParent())
2872     ParamRegs = M->getNumberRegisterParameters();
2873 
2874   // Mark the first N int arguments as having reg
2875   for (auto &Arg : Args) {
2876     Type *T = Arg.Ty;
2877     if (T->isIntOrPtrTy())
2878       if (MF->getDataLayout().getTypeAllocSize(T) <= 8) {
2879         unsigned numRegs = 1;
2880         if (MF->getDataLayout().getTypeAllocSize(T) > 4)
2881           numRegs = 2;
2882         if (ParamRegs < numRegs)
2883           return;
2884         ParamRegs -= numRegs;
2885         Arg.IsInReg = true;
2886       }
2887   }
2888 }
2889 
2890 const MCExpr *
LowerCustomJumpTableEntry(const MachineJumpTableInfo * MJTI,const MachineBasicBlock * MBB,unsigned uid,MCContext & Ctx) const2891 X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
2892                                              const MachineBasicBlock *MBB,
2893                                              unsigned uid,MCContext &Ctx) const{
2894   assert(isPositionIndependent() && Subtarget.isPICStyleGOT());
2895   // In 32-bit ELF systems, our jump table entries are formed with @GOTOFF
2896   // entries.
2897   return MCSymbolRefExpr::create(MBB->getSymbol(),
2898                                  MCSymbolRefExpr::VK_GOTOFF, Ctx);
2899 }
2900 
2901 /// Returns relocation base for the given PIC jumptable.
getPICJumpTableRelocBase(SDValue Table,SelectionDAG & DAG) const2902 SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table,
2903                                                     SelectionDAG &DAG) const {
2904   if (!Subtarget.is64Bit())
2905     // This doesn't have SDLoc associated with it, but is not really the
2906     // same as a Register.
2907     return DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(),
2908                        getPointerTy(DAG.getDataLayout()));
2909   return Table;
2910 }
2911 
2912 /// This returns the relocation base for the given PIC jumptable,
2913 /// the same as getPICJumpTableRelocBase, but as an MCExpr.
2914 const MCExpr *X86TargetLowering::
getPICJumpTableRelocBaseExpr(const MachineFunction * MF,unsigned JTI,MCContext & Ctx) const2915 getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI,
2916                              MCContext &Ctx) const {
2917   // X86-64 uses RIP relative addressing based on the jump table label.
2918   if (Subtarget.isPICStyleRIPRel())
2919     return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
2920 
2921   // Otherwise, the reference is relative to the PIC base.
2922   return MCSymbolRefExpr::create(MF->getPICBaseSymbol(), Ctx);
2923 }
2924 
2925 std::pair<const TargetRegisterClass *, uint8_t>
findRepresentativeClass(const TargetRegisterInfo * TRI,MVT VT) const2926 X86TargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI,
2927                                            MVT VT) const {
2928   const TargetRegisterClass *RRC = nullptr;
2929   uint8_t Cost = 1;
2930   switch (VT.SimpleTy) {
2931   default:
2932     return TargetLowering::findRepresentativeClass(TRI, VT);
2933   case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64:
2934     RRC = Subtarget.is64Bit() ? &X86::GR64RegClass : &X86::GR32RegClass;
2935     break;
2936   case MVT::x86mmx:
2937     RRC = &X86::VR64RegClass;
2938     break;
2939   case MVT::f32: case MVT::f64:
2940   case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64:
2941   case MVT::v4f32: case MVT::v2f64:
2942   case MVT::v32i8: case MVT::v16i16: case MVT::v8i32: case MVT::v4i64:
2943   case MVT::v8f32: case MVT::v4f64:
2944   case MVT::v64i8: case MVT::v32i16: case MVT::v16i32: case MVT::v8i64:
2945   case MVT::v16f32: case MVT::v8f64:
2946     RRC = &X86::VR128XRegClass;
2947     break;
2948   }
2949   return std::make_pair(RRC, Cost);
2950 }
2951 
getAddressSpace() const2952 unsigned X86TargetLowering::getAddressSpace() const {
2953   if (Subtarget.is64Bit())
2954     return (getTargetMachine().getCodeModel() == CodeModel::Kernel) ? 256 : 257;
2955   return 256;
2956 }
2957 
hasStackGuardSlotTLS(const Triple & TargetTriple)2958 static bool hasStackGuardSlotTLS(const Triple &TargetTriple) {
2959   return TargetTriple.isOSGlibc() || TargetTriple.isOSFuchsia() ||
2960          (TargetTriple.isAndroid() && !TargetTriple.isAndroidVersionLT(17));
2961 }
2962 
SegmentOffset(IRBuilderBase & IRB,int Offset,unsigned AddressSpace)2963 static Constant* SegmentOffset(IRBuilderBase &IRB,
2964                                int Offset, unsigned AddressSpace) {
2965   return ConstantExpr::getIntToPtr(
2966       ConstantInt::get(Type::getInt32Ty(IRB.getContext()), Offset),
2967       Type::getInt8PtrTy(IRB.getContext())->getPointerTo(AddressSpace));
2968 }
2969 
getIRStackGuard(IRBuilderBase & IRB) const2970 Value *X86TargetLowering::getIRStackGuard(IRBuilderBase &IRB) const {
2971   // glibc, bionic, and Fuchsia have a special slot for the stack guard in
2972   // tcbhead_t; use it instead of the usual global variable (see
2973   // sysdeps/{i386,x86_64}/nptl/tls.h)
2974   if (hasStackGuardSlotTLS(Subtarget.getTargetTriple())) {
2975     if (Subtarget.isTargetFuchsia()) {
2976       // <zircon/tls.h> defines ZX_TLS_STACK_GUARD_OFFSET with this value.
2977       return SegmentOffset(IRB, 0x10, getAddressSpace());
2978     } else {
2979       unsigned AddressSpace = getAddressSpace();
2980       Module *M = IRB.GetInsertBlock()->getParent()->getParent();
2981       // Specially, some users may customize the base reg and offset.
2982       int Offset = M->getStackProtectorGuardOffset();
2983       // If we don't set -stack-protector-guard-offset value:
2984       // %fs:0x28, unless we're using a Kernel code model, in which case
2985       // it's %gs:0x28.  gs:0x14 on i386.
2986       if (Offset == INT_MAX)
2987         Offset = (Subtarget.is64Bit()) ? 0x28 : 0x14;
2988 
2989       StringRef GuardReg = M->getStackProtectorGuardReg();
2990       if (GuardReg == "fs")
2991         AddressSpace = X86AS::FS;
2992       else if (GuardReg == "gs")
2993         AddressSpace = X86AS::GS;
2994 
2995       // Use symbol guard if user specify.
2996       StringRef GuardSymb = M->getStackProtectorGuardSymbol();
2997       if (!GuardSymb.empty()) {
2998         GlobalVariable *GV = M->getGlobalVariable(GuardSymb);
2999         if (!GV) {
3000           Type *Ty = Subtarget.is64Bit() ? Type::getInt64Ty(M->getContext())
3001                                          : Type::getInt32Ty(M->getContext());
3002           GV = new GlobalVariable(*M, Ty, false, GlobalValue::ExternalLinkage,
3003                                   nullptr, GuardSymb, nullptr,
3004                                   GlobalValue::NotThreadLocal, AddressSpace);
3005         }
3006         return GV;
3007       }
3008 
3009       return SegmentOffset(IRB, Offset, AddressSpace);
3010     }
3011   }
3012   return TargetLowering::getIRStackGuard(IRB);
3013 }
3014 
insertSSPDeclarations(Module & M) const3015 void X86TargetLowering::insertSSPDeclarations(Module &M) const {
3016   // MSVC CRT provides functionalities for stack protection.
3017   if (Subtarget.getTargetTriple().isWindowsMSVCEnvironment() ||
3018       Subtarget.getTargetTriple().isWindowsItaniumEnvironment()) {
3019     // MSVC CRT has a global variable holding security cookie.
3020     M.getOrInsertGlobal("__security_cookie",
3021                         Type::getInt8PtrTy(M.getContext()));
3022 
3023     // MSVC CRT has a function to validate security cookie.
3024     FunctionCallee SecurityCheckCookie = M.getOrInsertFunction(
3025         "__security_check_cookie", Type::getVoidTy(M.getContext()),
3026         Type::getInt8PtrTy(M.getContext()));
3027     if (Function *F = dyn_cast<Function>(SecurityCheckCookie.getCallee())) {
3028       F->setCallingConv(CallingConv::X86_FastCall);
3029       F->addParamAttr(0, Attribute::AttrKind::InReg);
3030     }
3031     return;
3032   }
3033 
3034   StringRef GuardMode = M.getStackProtectorGuard();
3035 
3036   // glibc, bionic, and Fuchsia have a special slot for the stack guard.
3037   if ((GuardMode == "tls" || GuardMode.empty()) &&
3038       hasStackGuardSlotTLS(Subtarget.getTargetTriple()))
3039     return;
3040   TargetLowering::insertSSPDeclarations(M);
3041 }
3042 
getSDagStackGuard(const Module & M) const3043 Value *X86TargetLowering::getSDagStackGuard(const Module &M) const {
3044   // MSVC CRT has a global variable holding security cookie.
3045   if (Subtarget.getTargetTriple().isWindowsMSVCEnvironment() ||
3046       Subtarget.getTargetTriple().isWindowsItaniumEnvironment()) {
3047     return M.getGlobalVariable("__security_cookie");
3048   }
3049   return TargetLowering::getSDagStackGuard(M);
3050 }
3051 
getSSPStackGuardCheck(const Module & M) const3052 Function *X86TargetLowering::getSSPStackGuardCheck(const Module &M) const {
3053   // MSVC CRT has a function to validate security cookie.
3054   if (Subtarget.getTargetTriple().isWindowsMSVCEnvironment() ||
3055       Subtarget.getTargetTriple().isWindowsItaniumEnvironment()) {
3056     return M.getFunction("__security_check_cookie");
3057   }
3058   return TargetLowering::getSSPStackGuardCheck(M);
3059 }
3060 
3061 Value *
getSafeStackPointerLocation(IRBuilderBase & IRB) const3062 X86TargetLowering::getSafeStackPointerLocation(IRBuilderBase &IRB) const {
3063   if (Subtarget.getTargetTriple().isOSContiki())
3064     return getDefaultSafeStackPointerLocation(IRB, false);
3065 
3066   // Android provides a fixed TLS slot for the SafeStack pointer. See the
3067   // definition of TLS_SLOT_SAFESTACK in
3068   // https://android.googlesource.com/platform/bionic/+/master/libc/private/bionic_tls.h
3069   if (Subtarget.isTargetAndroid()) {
3070     // %fs:0x48, unless we're using a Kernel code model, in which case it's %gs:
3071     // %gs:0x24 on i386
3072     int Offset = (Subtarget.is64Bit()) ? 0x48 : 0x24;
3073     return SegmentOffset(IRB, Offset, getAddressSpace());
3074   }
3075 
3076   // Fuchsia is similar.
3077   if (Subtarget.isTargetFuchsia()) {
3078     // <zircon/tls.h> defines ZX_TLS_UNSAFE_SP_OFFSET with this value.
3079     return SegmentOffset(IRB, 0x18, getAddressSpace());
3080   }
3081 
3082   return TargetLowering::getSafeStackPointerLocation(IRB);
3083 }
3084 
3085 //===----------------------------------------------------------------------===//
3086 //               Return Value Calling Convention Implementation
3087 //===----------------------------------------------------------------------===//
3088 
CanLowerReturn(CallingConv::ID CallConv,MachineFunction & MF,bool isVarArg,const SmallVectorImpl<ISD::OutputArg> & Outs,LLVMContext & Context) const3089 bool X86TargetLowering::CanLowerReturn(
3090     CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg,
3091     const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
3092   SmallVector<CCValAssign, 16> RVLocs;
3093   CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
3094   return CCInfo.CheckReturn(Outs, RetCC_X86);
3095 }
3096 
getScratchRegisters(CallingConv::ID) const3097 const MCPhysReg *X86TargetLowering::getScratchRegisters(CallingConv::ID) const {
3098   static const MCPhysReg ScratchRegs[] = { X86::R11, 0 };
3099   return ScratchRegs;
3100 }
3101 
3102 /// Lowers masks values (v*i1) to the local register values
3103 /// \returns DAG node after lowering to register type
lowerMasksToReg(const SDValue & ValArg,const EVT & ValLoc,const SDLoc & Dl,SelectionDAG & DAG)3104 static SDValue lowerMasksToReg(const SDValue &ValArg, const EVT &ValLoc,
3105                                const SDLoc &Dl, SelectionDAG &DAG) {
3106   EVT ValVT = ValArg.getValueType();
3107 
3108   if (ValVT == MVT::v1i1)
3109     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, Dl, ValLoc, ValArg,
3110                        DAG.getIntPtrConstant(0, Dl));
3111 
3112   if ((ValVT == MVT::v8i1 && (ValLoc == MVT::i8 || ValLoc == MVT::i32)) ||
3113       (ValVT == MVT::v16i1 && (ValLoc == MVT::i16 || ValLoc == MVT::i32))) {
3114     // Two stage lowering might be required
3115     // bitcast:   v8i1 -> i8 / v16i1 -> i16
3116     // anyextend: i8   -> i32 / i16   -> i32
3117     EVT TempValLoc = ValVT == MVT::v8i1 ? MVT::i8 : MVT::i16;
3118     SDValue ValToCopy = DAG.getBitcast(TempValLoc, ValArg);
3119     if (ValLoc == MVT::i32)
3120       ValToCopy = DAG.getNode(ISD::ANY_EXTEND, Dl, ValLoc, ValToCopy);
3121     return ValToCopy;
3122   }
3123 
3124   if ((ValVT == MVT::v32i1 && ValLoc == MVT::i32) ||
3125       (ValVT == MVT::v64i1 && ValLoc == MVT::i64)) {
3126     // One stage lowering is required
3127     // bitcast:   v32i1 -> i32 / v64i1 -> i64
3128     return DAG.getBitcast(ValLoc, ValArg);
3129   }
3130 
3131   return DAG.getNode(ISD::ANY_EXTEND, Dl, ValLoc, ValArg);
3132 }
3133 
3134 /// Breaks v64i1 value into two registers and adds the new node to the DAG
Passv64i1ArgInRegs(const SDLoc & Dl,SelectionDAG & DAG,SDValue & Arg,SmallVectorImpl<std::pair<Register,SDValue>> & RegsToPass,CCValAssign & VA,CCValAssign & NextVA,const X86Subtarget & Subtarget)3135 static void Passv64i1ArgInRegs(
3136     const SDLoc &Dl, SelectionDAG &DAG, SDValue &Arg,
3137     SmallVectorImpl<std::pair<Register, SDValue>> &RegsToPass, CCValAssign &VA,
3138     CCValAssign &NextVA, const X86Subtarget &Subtarget) {
3139   assert(Subtarget.hasBWI() && "Expected AVX512BW target!");
3140   assert(Subtarget.is32Bit() && "Expecting 32 bit target");
3141   assert(Arg.getValueType() == MVT::i64 && "Expecting 64 bit value");
3142   assert(VA.isRegLoc() && NextVA.isRegLoc() &&
3143          "The value should reside in two registers");
3144 
3145   // Before splitting the value we cast it to i64
3146   Arg = DAG.getBitcast(MVT::i64, Arg);
3147 
3148   // Splitting the value into two i32 types
3149   SDValue Lo, Hi;
3150   Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, Dl, MVT::i32, Arg,
3151                    DAG.getConstant(0, Dl, MVT::i32));
3152   Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, Dl, MVT::i32, Arg,
3153                    DAG.getConstant(1, Dl, MVT::i32));
3154 
3155   // Attach the two i32 types into corresponding registers
3156   RegsToPass.push_back(std::make_pair(VA.getLocReg(), Lo));
3157   RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), Hi));
3158 }
3159 
3160 SDValue
LowerReturn(SDValue Chain,CallingConv::ID CallConv,bool isVarArg,const SmallVectorImpl<ISD::OutputArg> & Outs,const SmallVectorImpl<SDValue> & OutVals,const SDLoc & dl,SelectionDAG & DAG) const3161 X86TargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
3162                                bool isVarArg,
3163                                const SmallVectorImpl<ISD::OutputArg> &Outs,
3164                                const SmallVectorImpl<SDValue> &OutVals,
3165                                const SDLoc &dl, SelectionDAG &DAG) const {
3166   MachineFunction &MF = DAG.getMachineFunction();
3167   X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
3168 
3169   // In some cases we need to disable registers from the default CSR list.
3170   // For example, when they are used for argument passing.
3171   bool ShouldDisableCalleeSavedRegister =
3172       CallConv == CallingConv::X86_RegCall ||
3173       MF.getFunction().hasFnAttribute("no_caller_saved_registers");
3174 
3175   if (CallConv == CallingConv::X86_INTR && !Outs.empty())
3176     report_fatal_error("X86 interrupts may not return any value");
3177 
3178   SmallVector<CCValAssign, 16> RVLocs;
3179   CCState CCInfo(CallConv, isVarArg, MF, RVLocs, *DAG.getContext());
3180   CCInfo.AnalyzeReturn(Outs, RetCC_X86);
3181 
3182   SmallVector<std::pair<Register, SDValue>, 4> RetVals;
3183   for (unsigned I = 0, OutsIndex = 0, E = RVLocs.size(); I != E;
3184        ++I, ++OutsIndex) {
3185     CCValAssign &VA = RVLocs[I];
3186     assert(VA.isRegLoc() && "Can only return in registers!");
3187 
3188     // Add the register to the CalleeSaveDisableRegs list.
3189     if (ShouldDisableCalleeSavedRegister)
3190       MF.getRegInfo().disableCalleeSavedRegister(VA.getLocReg());
3191 
3192     SDValue ValToCopy = OutVals[OutsIndex];
3193     EVT ValVT = ValToCopy.getValueType();
3194 
3195     // Promote values to the appropriate types.
3196     if (VA.getLocInfo() == CCValAssign::SExt)
3197       ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), ValToCopy);
3198     else if (VA.getLocInfo() == CCValAssign::ZExt)
3199       ValToCopy = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), ValToCopy);
3200     else if (VA.getLocInfo() == CCValAssign::AExt) {
3201       if (ValVT.isVector() && ValVT.getVectorElementType() == MVT::i1)
3202         ValToCopy = lowerMasksToReg(ValToCopy, VA.getLocVT(), dl, DAG);
3203       else
3204         ValToCopy = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), ValToCopy);
3205     }
3206     else if (VA.getLocInfo() == CCValAssign::BCvt)
3207       ValToCopy = DAG.getBitcast(VA.getLocVT(), ValToCopy);
3208 
3209     assert(VA.getLocInfo() != CCValAssign::FPExt &&
3210            "Unexpected FP-extend for return value.");
3211 
3212     // Report an error if we have attempted to return a value via an XMM
3213     // register and SSE was disabled.
3214     if (!Subtarget.hasSSE1() && X86::FR32XRegClass.contains(VA.getLocReg())) {
3215       errorUnsupported(DAG, dl, "SSE register return with SSE disabled");
3216       VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
3217     } else if (!Subtarget.hasSSE2() &&
3218                X86::FR64XRegClass.contains(VA.getLocReg()) &&
3219                ValVT == MVT::f64) {
3220       // When returning a double via an XMM register, report an error if SSE2 is
3221       // not enabled.
3222       errorUnsupported(DAG, dl, "SSE2 register return with SSE2 disabled");
3223       VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
3224     }
3225 
3226     // Returns in ST0/ST1 are handled specially: these are pushed as operands to
3227     // the RET instruction and handled by the FP Stackifier.
3228     if (VA.getLocReg() == X86::FP0 ||
3229         VA.getLocReg() == X86::FP1) {
3230       // If this is a copy from an xmm register to ST(0), use an FPExtend to
3231       // change the value to the FP stack register class.
3232       if (isScalarFPTypeInSSEReg(VA.getValVT()))
3233         ValToCopy = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f80, ValToCopy);
3234       RetVals.push_back(std::make_pair(VA.getLocReg(), ValToCopy));
3235       // Don't emit a copytoreg.
3236       continue;
3237     }
3238 
3239     // 64-bit vector (MMX) values are returned in XMM0 / XMM1 except for v1i64
3240     // which is returned in RAX / RDX.
3241     if (Subtarget.is64Bit()) {
3242       if (ValVT == MVT::x86mmx) {
3243         if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) {
3244           ValToCopy = DAG.getBitcast(MVT::i64, ValToCopy);
3245           ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
3246                                   ValToCopy);
3247           // If we don't have SSE2 available, convert to v4f32 so the generated
3248           // register is legal.
3249           if (!Subtarget.hasSSE2())
3250             ValToCopy = DAG.getBitcast(MVT::v4f32, ValToCopy);
3251         }
3252       }
3253     }
3254 
3255     if (VA.needsCustom()) {
3256       assert(VA.getValVT() == MVT::v64i1 &&
3257              "Currently the only custom case is when we split v64i1 to 2 regs");
3258 
3259       Passv64i1ArgInRegs(dl, DAG, ValToCopy, RetVals, VA, RVLocs[++I],
3260                          Subtarget);
3261 
3262       // Add the second register to the CalleeSaveDisableRegs list.
3263       if (ShouldDisableCalleeSavedRegister)
3264         MF.getRegInfo().disableCalleeSavedRegister(RVLocs[I].getLocReg());
3265     } else {
3266       RetVals.push_back(std::make_pair(VA.getLocReg(), ValToCopy));
3267     }
3268   }
3269 
3270   SDValue Flag;
3271   SmallVector<SDValue, 6> RetOps;
3272   RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
3273   // Operand #1 = Bytes To Pop
3274   RetOps.push_back(DAG.getTargetConstant(FuncInfo->getBytesToPopOnReturn(), dl,
3275                    MVT::i32));
3276 
3277   // Copy the result values into the output registers.
3278   for (auto &RetVal : RetVals) {
3279     if (RetVal.first == X86::FP0 || RetVal.first == X86::FP1) {
3280       RetOps.push_back(RetVal.second);
3281       continue; // Don't emit a copytoreg.
3282     }
3283 
3284     Chain = DAG.getCopyToReg(Chain, dl, RetVal.first, RetVal.second, Flag);
3285     Flag = Chain.getValue(1);
3286     RetOps.push_back(
3287         DAG.getRegister(RetVal.first, RetVal.second.getValueType()));
3288   }
3289 
3290   // Swift calling convention does not require we copy the sret argument
3291   // into %rax/%eax for the return, and SRetReturnReg is not set for Swift.
3292 
3293   // All x86 ABIs require that for returning structs by value we copy
3294   // the sret argument into %rax/%eax (depending on ABI) for the return.
3295   // We saved the argument into a virtual register in the entry block,
3296   // so now we copy the value out and into %rax/%eax.
3297   //
3298   // Checking Function.hasStructRetAttr() here is insufficient because the IR
3299   // may not have an explicit sret argument. If FuncInfo.CanLowerReturn is
3300   // false, then an sret argument may be implicitly inserted in the SelDAG. In
3301   // either case FuncInfo->setSRetReturnReg() will have been called.
3302   if (Register SRetReg = FuncInfo->getSRetReturnReg()) {
3303     // When we have both sret and another return value, we should use the
3304     // original Chain stored in RetOps[0], instead of the current Chain updated
3305     // in the above loop. If we only have sret, RetOps[0] equals to Chain.
3306 
3307     // For the case of sret and another return value, we have
3308     //   Chain_0 at the function entry
3309     //   Chain_1 = getCopyToReg(Chain_0) in the above loop
3310     // If we use Chain_1 in getCopyFromReg, we will have
3311     //   Val = getCopyFromReg(Chain_1)
3312     //   Chain_2 = getCopyToReg(Chain_1, Val) from below
3313 
3314     // getCopyToReg(Chain_0) will be glued together with
3315     // getCopyToReg(Chain_1, Val) into Unit A, getCopyFromReg(Chain_1) will be
3316     // in Unit B, and we will have cyclic dependency between Unit A and Unit B:
3317     //   Data dependency from Unit B to Unit A due to usage of Val in
3318     //     getCopyToReg(Chain_1, Val)
3319     //   Chain dependency from Unit A to Unit B
3320 
3321     // So here, we use RetOps[0] (i.e Chain_0) for getCopyFromReg.
3322     SDValue Val = DAG.getCopyFromReg(RetOps[0], dl, SRetReg,
3323                                      getPointerTy(MF.getDataLayout()));
3324 
3325     Register RetValReg
3326         = (Subtarget.is64Bit() && !Subtarget.isTarget64BitILP32()) ?
3327           X86::RAX : X86::EAX;
3328     Chain = DAG.getCopyToReg(Chain, dl, RetValReg, Val, Flag);
3329     Flag = Chain.getValue(1);
3330 
3331     // RAX/EAX now acts like a return value.
3332     RetOps.push_back(
3333         DAG.getRegister(RetValReg, getPointerTy(DAG.getDataLayout())));
3334 
3335     // Add the returned register to the CalleeSaveDisableRegs list.
3336     if (ShouldDisableCalleeSavedRegister)
3337       MF.getRegInfo().disableCalleeSavedRegister(RetValReg);
3338   }
3339 
3340   const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
3341   const MCPhysReg *I =
3342       TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction());
3343   if (I) {
3344     for (; *I; ++I) {
3345       if (X86::GR64RegClass.contains(*I))
3346         RetOps.push_back(DAG.getRegister(*I, MVT::i64));
3347       else
3348         llvm_unreachable("Unexpected register class in CSRsViaCopy!");
3349     }
3350   }
3351 
3352   RetOps[0] = Chain;  // Update chain.
3353 
3354   // Add the flag if we have it.
3355   if (Flag.getNode())
3356     RetOps.push_back(Flag);
3357 
3358   X86ISD::NodeType opcode = X86ISD::RET_FLAG;
3359   if (CallConv == CallingConv::X86_INTR)
3360     opcode = X86ISD::IRET;
3361   return DAG.getNode(opcode, dl, MVT::Other, RetOps);
3362 }
3363 
isUsedByReturnOnly(SDNode * N,SDValue & Chain) const3364 bool X86TargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
3365   if (N->getNumValues() != 1 || !N->hasNUsesOfValue(1, 0))
3366     return false;
3367 
3368   SDValue TCChain = Chain;
3369   SDNode *Copy = *N->use_begin();
3370   if (Copy->getOpcode() == ISD::CopyToReg) {
3371     // If the copy has a glue operand, we conservatively assume it isn't safe to
3372     // perform a tail call.
3373     if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
3374       return false;
3375     TCChain = Copy->getOperand(0);
3376   } else if (Copy->getOpcode() != ISD::FP_EXTEND)
3377     return false;
3378 
3379   bool HasRet = false;
3380   for (const SDNode *U : Copy->uses()) {
3381     if (U->getOpcode() != X86ISD::RET_FLAG)
3382       return false;
3383     // If we are returning more than one value, we can definitely
3384     // not make a tail call see PR19530
3385     if (U->getNumOperands() > 4)
3386       return false;
3387     if (U->getNumOperands() == 4 &&
3388         U->getOperand(U->getNumOperands() - 1).getValueType() != MVT::Glue)
3389       return false;
3390     HasRet = true;
3391   }
3392 
3393   if (!HasRet)
3394     return false;
3395 
3396   Chain = TCChain;
3397   return true;
3398 }
3399 
getTypeForExtReturn(LLVMContext & Context,EVT VT,ISD::NodeType ExtendKind) const3400 EVT X86TargetLowering::getTypeForExtReturn(LLVMContext &Context, EVT VT,
3401                                            ISD::NodeType ExtendKind) const {
3402   MVT ReturnMVT = MVT::i32;
3403 
3404   bool Darwin = Subtarget.getTargetTriple().isOSDarwin();
3405   if (VT == MVT::i1 || (!Darwin && (VT == MVT::i8 || VT == MVT::i16))) {
3406     // The ABI does not require i1, i8 or i16 to be extended.
3407     //
3408     // On Darwin, there is code in the wild relying on Clang's old behaviour of
3409     // always extending i8/i16 return values, so keep doing that for now.
3410     // (PR26665).
3411     ReturnMVT = MVT::i8;
3412   }
3413 
3414   EVT MinVT = getRegisterType(Context, ReturnMVT);
3415   return VT.bitsLT(MinVT) ? MinVT : VT;
3416 }
3417 
3418 /// Reads two 32 bit registers and creates a 64 bit mask value.
3419 /// \param VA The current 32 bit value that need to be assigned.
3420 /// \param NextVA The next 32 bit value that need to be assigned.
3421 /// \param Root The parent DAG node.
3422 /// \param [in,out] InFlag Represents SDvalue in the parent DAG node for
3423 ///                        glue purposes. In the case the DAG is already using
3424 ///                        physical register instead of virtual, we should glue
3425 ///                        our new SDValue to InFlag SDvalue.
3426 /// \return a new SDvalue of size 64bit.
getv64i1Argument(CCValAssign & VA,CCValAssign & NextVA,SDValue & Root,SelectionDAG & DAG,const SDLoc & Dl,const X86Subtarget & Subtarget,SDValue * InFlag=nullptr)3427 static SDValue getv64i1Argument(CCValAssign &VA, CCValAssign &NextVA,
3428                                 SDValue &Root, SelectionDAG &DAG,
3429                                 const SDLoc &Dl, const X86Subtarget &Subtarget,
3430                                 SDValue *InFlag = nullptr) {
3431   assert((Subtarget.hasBWI()) && "Expected AVX512BW target!");
3432   assert(Subtarget.is32Bit() && "Expecting 32 bit target");
3433   assert(VA.getValVT() == MVT::v64i1 &&
3434          "Expecting first location of 64 bit width type");
3435   assert(NextVA.getValVT() == VA.getValVT() &&
3436          "The locations should have the same type");
3437   assert(VA.isRegLoc() && NextVA.isRegLoc() &&
3438          "The values should reside in two registers");
3439 
3440   SDValue Lo, Hi;
3441   SDValue ArgValueLo, ArgValueHi;
3442 
3443   MachineFunction &MF = DAG.getMachineFunction();
3444   const TargetRegisterClass *RC = &X86::GR32RegClass;
3445 
3446   // Read a 32 bit value from the registers.
3447   if (nullptr == InFlag) {
3448     // When no physical register is present,
3449     // create an intermediate virtual register.
3450     Register Reg = MF.addLiveIn(VA.getLocReg(), RC);
3451     ArgValueLo = DAG.getCopyFromReg(Root, Dl, Reg, MVT::i32);
3452     Reg = MF.addLiveIn(NextVA.getLocReg(), RC);
3453     ArgValueHi = DAG.getCopyFromReg(Root, Dl, Reg, MVT::i32);
3454   } else {
3455     // When a physical register is available read the value from it and glue
3456     // the reads together.
3457     ArgValueLo =
3458       DAG.getCopyFromReg(Root, Dl, VA.getLocReg(), MVT::i32, *InFlag);
3459     *InFlag = ArgValueLo.getValue(2);
3460     ArgValueHi =
3461       DAG.getCopyFromReg(Root, Dl, NextVA.getLocReg(), MVT::i32, *InFlag);
3462     *InFlag = ArgValueHi.getValue(2);
3463   }
3464 
3465   // Convert the i32 type into v32i1 type.
3466   Lo = DAG.getBitcast(MVT::v32i1, ArgValueLo);
3467 
3468   // Convert the i32 type into v32i1 type.
3469   Hi = DAG.getBitcast(MVT::v32i1, ArgValueHi);
3470 
3471   // Concatenate the two values together.
3472   return DAG.getNode(ISD::CONCAT_VECTORS, Dl, MVT::v64i1, Lo, Hi);
3473 }
3474 
3475 /// The function will lower a register of various sizes (8/16/32/64)
3476 /// to a mask value of the expected size (v8i1/v16i1/v32i1/v64i1)
3477 /// \returns a DAG node contains the operand after lowering to mask type.
lowerRegToMasks(const SDValue & ValArg,const EVT & ValVT,const EVT & ValLoc,const SDLoc & Dl,SelectionDAG & DAG)3478 static SDValue lowerRegToMasks(const SDValue &ValArg, const EVT &ValVT,
3479                                const EVT &ValLoc, const SDLoc &Dl,
3480                                SelectionDAG &DAG) {
3481   SDValue ValReturned = ValArg;
3482 
3483   if (ValVT == MVT::v1i1)
3484     return DAG.getNode(ISD::SCALAR_TO_VECTOR, Dl, MVT::v1i1, ValReturned);
3485 
3486   if (ValVT == MVT::v64i1) {
3487     // In 32 bit machine, this case is handled by getv64i1Argument
3488     assert(ValLoc == MVT::i64 && "Expecting only i64 locations");
3489     // In 64 bit machine, There is no need to truncate the value only bitcast
3490   } else {
3491     MVT maskLen;
3492     switch (ValVT.getSimpleVT().SimpleTy) {
3493     case MVT::v8i1:
3494       maskLen = MVT::i8;
3495       break;
3496     case MVT::v16i1:
3497       maskLen = MVT::i16;
3498       break;
3499     case MVT::v32i1:
3500       maskLen = MVT::i32;
3501       break;
3502     default:
3503       llvm_unreachable("Expecting a vector of i1 types");
3504     }
3505 
3506     ValReturned = DAG.getNode(ISD::TRUNCATE, Dl, maskLen, ValReturned);
3507   }
3508   return DAG.getBitcast(ValVT, ValReturned);
3509 }
3510 
3511 /// Lower the result values of a call into the
3512 /// appropriate copies out of appropriate physical registers.
3513 ///
LowerCallResult(SDValue Chain,SDValue InFlag,CallingConv::ID CallConv,bool isVarArg,const SmallVectorImpl<ISD::InputArg> & Ins,const SDLoc & dl,SelectionDAG & DAG,SmallVectorImpl<SDValue> & InVals,uint32_t * RegMask) const3514 SDValue X86TargetLowering::LowerCallResult(
3515     SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
3516     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3517     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
3518     uint32_t *RegMask) const {
3519 
3520   const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
3521   // Assign locations to each value returned by this call.
3522   SmallVector<CCValAssign, 16> RVLocs;
3523   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
3524                  *DAG.getContext());
3525   CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
3526 
3527   // Copy all of the result registers out of their specified physreg.
3528   for (unsigned I = 0, InsIndex = 0, E = RVLocs.size(); I != E;
3529        ++I, ++InsIndex) {
3530     CCValAssign &VA = RVLocs[I];
3531     EVT CopyVT = VA.getLocVT();
3532 
3533     // In some calling conventions we need to remove the used registers
3534     // from the register mask.
3535     if (RegMask) {
3536       for (MCSubRegIterator SubRegs(VA.getLocReg(), TRI, /*IncludeSelf=*/true);
3537            SubRegs.isValid(); ++SubRegs)
3538         RegMask[*SubRegs / 32] &= ~(1u << (*SubRegs % 32));
3539     }
3540 
3541     // Report an error if there was an attempt to return FP values via XMM
3542     // registers.
3543     if (!Subtarget.hasSSE1() && X86::FR32XRegClass.contains(VA.getLocReg())) {
3544       errorUnsupported(DAG, dl, "SSE register return with SSE disabled");
3545       if (VA.getLocReg() == X86::XMM1)
3546         VA.convertToReg(X86::FP1); // Set reg to FP1, avoid hitting asserts.
3547       else
3548         VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
3549     } else if (!Subtarget.hasSSE2() &&
3550                X86::FR64XRegClass.contains(VA.getLocReg()) &&
3551                CopyVT == MVT::f64) {
3552       errorUnsupported(DAG, dl, "SSE2 register return with SSE2 disabled");
3553       if (VA.getLocReg() == X86::XMM1)
3554         VA.convertToReg(X86::FP1); // Set reg to FP1, avoid hitting asserts.
3555       else
3556         VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
3557     }
3558 
3559     // If we prefer to use the value in xmm registers, copy it out as f80 and
3560     // use a truncate to move it from fp stack reg to xmm reg.
3561     bool RoundAfterCopy = false;
3562     if ((VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1) &&
3563         isScalarFPTypeInSSEReg(VA.getValVT())) {
3564       if (!Subtarget.hasX87())
3565         report_fatal_error("X87 register return with X87 disabled");
3566       CopyVT = MVT::f80;
3567       RoundAfterCopy = (CopyVT != VA.getLocVT());
3568     }
3569 
3570     SDValue Val;
3571     if (VA.needsCustom()) {
3572       assert(VA.getValVT() == MVT::v64i1 &&
3573              "Currently the only custom case is when we split v64i1 to 2 regs");
3574       Val =
3575           getv64i1Argument(VA, RVLocs[++I], Chain, DAG, dl, Subtarget, &InFlag);
3576     } else {
3577       Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), CopyVT, InFlag)
3578                   .getValue(1);
3579       Val = Chain.getValue(0);
3580       InFlag = Chain.getValue(2);
3581     }
3582 
3583     if (RoundAfterCopy)
3584       Val = DAG.getNode(ISD::FP_ROUND, dl, VA.getValVT(), Val,
3585                         // This truncation won't change the value.
3586                         DAG.getIntPtrConstant(1, dl, /*isTarget=*/true));
3587 
3588     if (VA.isExtInLoc()) {
3589       if (VA.getValVT().isVector() &&
3590           VA.getValVT().getScalarType() == MVT::i1 &&
3591           ((VA.getLocVT() == MVT::i64) || (VA.getLocVT() == MVT::i32) ||
3592            (VA.getLocVT() == MVT::i16) || (VA.getLocVT() == MVT::i8))) {
3593         // promoting a mask type (v*i1) into a register of type i64/i32/i16/i8
3594         Val = lowerRegToMasks(Val, VA.getValVT(), VA.getLocVT(), dl, DAG);
3595       } else
3596         Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
3597     }
3598 
3599     if (VA.getLocInfo() == CCValAssign::BCvt)
3600       Val = DAG.getBitcast(VA.getValVT(), Val);
3601 
3602     InVals.push_back(Val);
3603   }
3604 
3605   return Chain;
3606 }
3607 
3608 //===----------------------------------------------------------------------===//
3609 //                C & StdCall & Fast Calling Convention implementation
3610 //===----------------------------------------------------------------------===//
3611 //  StdCall calling convention seems to be standard for many Windows' API
3612 //  routines and around. It differs from C calling convention just a little:
3613 //  callee should clean up the stack, not caller. Symbols should be also
3614 //  decorated in some fancy way :) It doesn't support any vector arguments.
3615 //  For info on fast calling convention see Fast Calling Convention (tail call)
3616 //  implementation LowerX86_32FastCCCallTo.
3617 
3618 /// Determines whether Args, either a set of outgoing arguments to a call, or a
3619 /// set of incoming args of a call, contains an sret pointer that the callee
3620 /// pops
3621 template <typename T>
hasCalleePopSRet(const SmallVectorImpl<T> & Args,const X86Subtarget & Subtarget)3622 static bool hasCalleePopSRet(const SmallVectorImpl<T> &Args,
3623                              const X86Subtarget &Subtarget) {
3624   // Not C++20 (yet), so no concepts available.
3625   static_assert(std::is_same_v<T, ISD::OutputArg> ||
3626                     std::is_same_v<T, ISD::InputArg>,
3627                 "requires ISD::OutputArg or ISD::InputArg");
3628 
3629   // Only 32-bit pops the sret.  It's a 64-bit world these days, so early-out
3630   // for most compilations.
3631   if (!Subtarget.is32Bit())
3632     return false;
3633 
3634   if (Args.empty())
3635     return false;
3636 
3637   // Most calls do not have an sret argument, check the arg next.
3638   const ISD::ArgFlagsTy &Flags = Args[0].Flags;
3639   if (!Flags.isSRet() || Flags.isInReg())
3640     return false;
3641 
3642   // The MSVCabi does not pop the sret.
3643   if (Subtarget.getTargetTriple().isOSMSVCRT())
3644     return false;
3645 
3646   // MCUs don't pop the sret
3647   if (Subtarget.isTargetMCU())
3648     return false;
3649 
3650   // Callee pops argument
3651   return true;
3652 }
3653 
3654 /// Make a copy of an aggregate at address specified by "Src" to address
3655 /// "Dst" with size and alignment information specified by the specific
3656 /// parameter attribute. The copy will be passed as a byval function parameter.
CreateCopyOfByValArgument(SDValue Src,SDValue Dst,SDValue Chain,ISD::ArgFlagsTy Flags,SelectionDAG & DAG,const SDLoc & dl)3657 static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst,
3658                                          SDValue Chain, ISD::ArgFlagsTy Flags,
3659                                          SelectionDAG &DAG, const SDLoc &dl) {
3660   SDValue SizeNode = DAG.getIntPtrConstant(Flags.getByValSize(), dl);
3661 
3662   return DAG.getMemcpy(
3663       Chain, dl, Dst, Src, SizeNode, Flags.getNonZeroByValAlign(),
3664       /*isVolatile*/ false, /*AlwaysInline=*/true,
3665       /*isTailCall*/ false, MachinePointerInfo(), MachinePointerInfo());
3666 }
3667 
3668 /// Return true if the calling convention is one that we can guarantee TCO for.
canGuaranteeTCO(CallingConv::ID CC)3669 static bool canGuaranteeTCO(CallingConv::ID CC) {
3670   return (CC == CallingConv::Fast || CC == CallingConv::GHC ||
3671           CC == CallingConv::X86_RegCall || CC == CallingConv::HiPE ||
3672           CC == CallingConv::HHVM || CC == CallingConv::Tail ||
3673           CC == CallingConv::SwiftTail);
3674 }
3675 
3676 /// Return true if we might ever do TCO for calls with this calling convention.
mayTailCallThisCC(CallingConv::ID CC)3677 static bool mayTailCallThisCC(CallingConv::ID CC) {
3678   switch (CC) {
3679   // C calling conventions:
3680   case CallingConv::C:
3681   case CallingConv::Win64:
3682   case CallingConv::X86_64_SysV:
3683   // Callee pop conventions:
3684   case CallingConv::X86_ThisCall:
3685   case CallingConv::X86_StdCall:
3686   case CallingConv::X86_VectorCall:
3687   case CallingConv::X86_FastCall:
3688   // Swift:
3689   case CallingConv::Swift:
3690     return true;
3691   default:
3692     return canGuaranteeTCO(CC);
3693   }
3694 }
3695 
3696 /// Return true if the function is being made into a tailcall target by
3697 /// changing its ABI.
shouldGuaranteeTCO(CallingConv::ID CC,bool GuaranteedTailCallOpt)3698 static bool shouldGuaranteeTCO(CallingConv::ID CC, bool GuaranteedTailCallOpt) {
3699   return (GuaranteedTailCallOpt && canGuaranteeTCO(CC)) ||
3700          CC == CallingConv::Tail || CC == CallingConv::SwiftTail;
3701 }
3702 
mayBeEmittedAsTailCall(const CallInst * CI) const3703 bool X86TargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
3704   if (!CI->isTailCall())
3705     return false;
3706 
3707   CallingConv::ID CalleeCC = CI->getCallingConv();
3708   if (!mayTailCallThisCC(CalleeCC))
3709     return false;
3710 
3711   return true;
3712 }
3713 
3714 SDValue
LowerMemArgument(SDValue Chain,CallingConv::ID CallConv,const SmallVectorImpl<ISD::InputArg> & Ins,const SDLoc & dl,SelectionDAG & DAG,const CCValAssign & VA,MachineFrameInfo & MFI,unsigned i) const3715 X86TargetLowering::LowerMemArgument(SDValue Chain, CallingConv::ID CallConv,
3716                                     const SmallVectorImpl<ISD::InputArg> &Ins,
3717                                     const SDLoc &dl, SelectionDAG &DAG,
3718                                     const CCValAssign &VA,
3719                                     MachineFrameInfo &MFI, unsigned i) const {
3720   // Create the nodes corresponding to a load from this parameter slot.
3721   ISD::ArgFlagsTy Flags = Ins[i].Flags;
3722   bool AlwaysUseMutable = shouldGuaranteeTCO(
3723       CallConv, DAG.getTarget().Options.GuaranteedTailCallOpt);
3724   bool isImmutable = !AlwaysUseMutable && !Flags.isByVal();
3725   EVT ValVT;
3726   MVT PtrVT = getPointerTy(DAG.getDataLayout());
3727 
3728   // If value is passed by pointer we have address passed instead of the value
3729   // itself. No need to extend if the mask value and location share the same
3730   // absolute size.
3731   bool ExtendedInMem =
3732       VA.isExtInLoc() && VA.getValVT().getScalarType() == MVT::i1 &&
3733       VA.getValVT().getSizeInBits() != VA.getLocVT().getSizeInBits();
3734 
3735   if (VA.getLocInfo() == CCValAssign::Indirect || ExtendedInMem)
3736     ValVT = VA.getLocVT();
3737   else
3738     ValVT = VA.getValVT();
3739 
3740   // FIXME: For now, all byval parameter objects are marked mutable. This can be
3741   // changed with more analysis.
3742   // In case of tail call optimization mark all arguments mutable. Since they
3743   // could be overwritten by lowering of arguments in case of a tail call.
3744   if (Flags.isByVal()) {
3745     unsigned Bytes = Flags.getByValSize();
3746     if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects.
3747 
3748     // FIXME: For now, all byval parameter objects are marked as aliasing. This
3749     // can be improved with deeper analysis.
3750     int FI = MFI.CreateFixedObject(Bytes, VA.getLocMemOffset(), isImmutable,
3751                                    /*isAliased=*/true);
3752     return DAG.getFrameIndex(FI, PtrVT);
3753   }
3754 
3755   EVT ArgVT = Ins[i].ArgVT;
3756 
3757   // If this is a vector that has been split into multiple parts, and the
3758   // scalar size of the parts don't match the vector element size, then we can't
3759   // elide the copy. The parts will have padding between them instead of being
3760   // packed like a vector.
3761   bool ScalarizedAndExtendedVector =
3762       ArgVT.isVector() && !VA.getLocVT().isVector() &&
3763       VA.getLocVT().getSizeInBits() != ArgVT.getScalarSizeInBits();
3764 
3765   // This is an argument in memory. We might be able to perform copy elision.
3766   // If the argument is passed directly in memory without any extension, then we
3767   // can perform copy elision. Large vector types, for example, may be passed
3768   // indirectly by pointer.
3769   if (Flags.isCopyElisionCandidate() &&
3770       VA.getLocInfo() != CCValAssign::Indirect && !ExtendedInMem &&
3771       !ScalarizedAndExtendedVector) {
3772     SDValue PartAddr;
3773     if (Ins[i].PartOffset == 0) {
3774       // If this is a one-part value or the first part of a multi-part value,
3775       // create a stack object for the entire argument value type and return a
3776       // load from our portion of it. This assumes that if the first part of an
3777       // argument is in memory, the rest will also be in memory.
3778       int FI = MFI.CreateFixedObject(ArgVT.getStoreSize(), VA.getLocMemOffset(),
3779                                      /*IsImmutable=*/false);
3780       PartAddr = DAG.getFrameIndex(FI, PtrVT);
3781       return DAG.getLoad(
3782           ValVT, dl, Chain, PartAddr,
3783           MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
3784     } else {
3785       // This is not the first piece of an argument in memory. See if there is
3786       // already a fixed stack object including this offset. If so, assume it
3787       // was created by the PartOffset == 0 branch above and create a load from
3788       // the appropriate offset into it.
3789       int64_t PartBegin = VA.getLocMemOffset();
3790       int64_t PartEnd = PartBegin + ValVT.getSizeInBits() / 8;
3791       int FI = MFI.getObjectIndexBegin();
3792       for (; MFI.isFixedObjectIndex(FI); ++FI) {
3793         int64_t ObjBegin = MFI.getObjectOffset(FI);
3794         int64_t ObjEnd = ObjBegin + MFI.getObjectSize(FI);
3795         if (ObjBegin <= PartBegin && PartEnd <= ObjEnd)
3796           break;
3797       }
3798       if (MFI.isFixedObjectIndex(FI)) {
3799         SDValue Addr =
3800             DAG.getNode(ISD::ADD, dl, PtrVT, DAG.getFrameIndex(FI, PtrVT),
3801                         DAG.getIntPtrConstant(Ins[i].PartOffset, dl));
3802         return DAG.getLoad(
3803             ValVT, dl, Chain, Addr,
3804             MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI,
3805                                               Ins[i].PartOffset));
3806       }
3807     }
3808   }
3809 
3810   int FI = MFI.CreateFixedObject(ValVT.getSizeInBits() / 8,
3811                                  VA.getLocMemOffset(), isImmutable);
3812 
3813   // Set SExt or ZExt flag.
3814   if (VA.getLocInfo() == CCValAssign::ZExt) {
3815     MFI.setObjectZExt(FI, true);
3816   } else if (VA.getLocInfo() == CCValAssign::SExt) {
3817     MFI.setObjectSExt(FI, true);
3818   }
3819 
3820   MaybeAlign Alignment;
3821   if (Subtarget.isTargetWindowsMSVC() && !Subtarget.is64Bit() &&
3822       ValVT != MVT::f80)
3823     Alignment = MaybeAlign(4);
3824   SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3825   SDValue Val = DAG.getLoad(
3826       ValVT, dl, Chain, FIN,
3827       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI),
3828       Alignment);
3829   return ExtendedInMem
3830              ? (VA.getValVT().isVector()
3831                     ? DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VA.getValVT(), Val)
3832                     : DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val))
3833              : Val;
3834 }
3835 
3836 // FIXME: Get this from tablegen.
get64BitArgumentGPRs(CallingConv::ID CallConv,const X86Subtarget & Subtarget)3837 static ArrayRef<MCPhysReg> get64BitArgumentGPRs(CallingConv::ID CallConv,
3838                                                 const X86Subtarget &Subtarget) {
3839   assert(Subtarget.is64Bit());
3840 
3841   if (Subtarget.isCallingConvWin64(CallConv)) {
3842     static const MCPhysReg GPR64ArgRegsWin64[] = {
3843       X86::RCX, X86::RDX, X86::R8,  X86::R9
3844     };
3845     return ArrayRef(std::begin(GPR64ArgRegsWin64), std::end(GPR64ArgRegsWin64));
3846   }
3847 
3848   static const MCPhysReg GPR64ArgRegs64Bit[] = {
3849     X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9
3850   };
3851   return ArrayRef(std::begin(GPR64ArgRegs64Bit), std::end(GPR64ArgRegs64Bit));
3852 }
3853 
3854 // FIXME: Get this from tablegen.
get64BitArgumentXMMs(MachineFunction & MF,CallingConv::ID CallConv,const X86Subtarget & Subtarget)3855 static ArrayRef<MCPhysReg> get64BitArgumentXMMs(MachineFunction &MF,
3856                                                 CallingConv::ID CallConv,
3857                                                 const X86Subtarget &Subtarget) {
3858   assert(Subtarget.is64Bit());
3859   if (Subtarget.isCallingConvWin64(CallConv)) {
3860     // The XMM registers which might contain var arg parameters are shadowed
3861     // in their paired GPR.  So we only need to save the GPR to their home
3862     // slots.
3863     // TODO: __vectorcall will change this.
3864     return std::nullopt;
3865   }
3866 
3867   bool isSoftFloat = Subtarget.useSoftFloat();
3868   if (isSoftFloat || !Subtarget.hasSSE1())
3869     // Kernel mode asks for SSE to be disabled, so there are no XMM argument
3870     // registers.
3871     return std::nullopt;
3872 
3873   static const MCPhysReg XMMArgRegs64Bit[] = {
3874     X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
3875     X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
3876   };
3877   return ArrayRef(std::begin(XMMArgRegs64Bit), std::end(XMMArgRegs64Bit));
3878 }
3879 
3880 #ifndef NDEBUG
isSortedByValueNo(ArrayRef<CCValAssign> ArgLocs)3881 static bool isSortedByValueNo(ArrayRef<CCValAssign> ArgLocs) {
3882   return llvm::is_sorted(
3883       ArgLocs, [](const CCValAssign &A, const CCValAssign &B) -> bool {
3884         return A.getValNo() < B.getValNo();
3885       });
3886 }
3887 #endif
3888 
3889 namespace {
3890 /// This is a helper class for lowering variable arguments parameters.
3891 class VarArgsLoweringHelper {
3892 public:
VarArgsLoweringHelper(X86MachineFunctionInfo * FuncInfo,const SDLoc & Loc,SelectionDAG & DAG,const X86Subtarget & Subtarget,CallingConv::ID CallConv,CCState & CCInfo)3893   VarArgsLoweringHelper(X86MachineFunctionInfo *FuncInfo, const SDLoc &Loc,
3894                         SelectionDAG &DAG, const X86Subtarget &Subtarget,
3895                         CallingConv::ID CallConv, CCState &CCInfo)
3896       : FuncInfo(FuncInfo), DL(Loc), DAG(DAG), Subtarget(Subtarget),
3897         TheMachineFunction(DAG.getMachineFunction()),
3898         TheFunction(TheMachineFunction.getFunction()),
3899         FrameInfo(TheMachineFunction.getFrameInfo()),
3900         FrameLowering(*Subtarget.getFrameLowering()),
3901         TargLowering(DAG.getTargetLoweringInfo()), CallConv(CallConv),
3902         CCInfo(CCInfo) {}
3903 
3904   // Lower variable arguments parameters.
3905   void lowerVarArgsParameters(SDValue &Chain, unsigned StackSize);
3906 
3907 private:
3908   void createVarArgAreaAndStoreRegisters(SDValue &Chain, unsigned StackSize);
3909 
3910   void forwardMustTailParameters(SDValue &Chain);
3911 
is64Bit() const3912   bool is64Bit() const { return Subtarget.is64Bit(); }
isWin64() const3913   bool isWin64() const { return Subtarget.isCallingConvWin64(CallConv); }
3914 
3915   X86MachineFunctionInfo *FuncInfo;
3916   const SDLoc &DL;
3917   SelectionDAG &DAG;
3918   const X86Subtarget &Subtarget;
3919   MachineFunction &TheMachineFunction;
3920   const Function &TheFunction;
3921   MachineFrameInfo &FrameInfo;
3922   const TargetFrameLowering &FrameLowering;
3923   const TargetLowering &TargLowering;
3924   CallingConv::ID CallConv;
3925   CCState &CCInfo;
3926 };
3927 } // namespace
3928 
createVarArgAreaAndStoreRegisters(SDValue & Chain,unsigned StackSize)3929 void VarArgsLoweringHelper::createVarArgAreaAndStoreRegisters(
3930     SDValue &Chain, unsigned StackSize) {
3931   // If the function takes variable number of arguments, make a frame index for
3932   // the start of the first vararg value... for expansion of llvm.va_start. We
3933   // can skip this if there are no va_start calls.
3934   if (is64Bit() || (CallConv != CallingConv::X86_FastCall &&
3935                     CallConv != CallingConv::X86_ThisCall)) {
3936     FuncInfo->setVarArgsFrameIndex(
3937         FrameInfo.CreateFixedObject(1, StackSize, true));
3938   }
3939 
3940   // 64-bit calling conventions support varargs and register parameters, so we
3941   // have to do extra work to spill them in the prologue.
3942   if (is64Bit()) {
3943     // Find the first unallocated argument registers.
3944     ArrayRef<MCPhysReg> ArgGPRs = get64BitArgumentGPRs(CallConv, Subtarget);
3945     ArrayRef<MCPhysReg> ArgXMMs =
3946         get64BitArgumentXMMs(TheMachineFunction, CallConv, Subtarget);
3947     unsigned NumIntRegs = CCInfo.getFirstUnallocated(ArgGPRs);
3948     unsigned NumXMMRegs = CCInfo.getFirstUnallocated(ArgXMMs);
3949 
3950     assert(!(NumXMMRegs && !Subtarget.hasSSE1()) &&
3951            "SSE register cannot be used when SSE is disabled!");
3952 
3953     if (isWin64()) {
3954       // Get to the caller-allocated home save location.  Add 8 to account
3955       // for the return address.
3956       int HomeOffset = FrameLowering.getOffsetOfLocalArea() + 8;
3957       FuncInfo->setRegSaveFrameIndex(
3958           FrameInfo.CreateFixedObject(1, NumIntRegs * 8 + HomeOffset, false));
3959       // Fixup to set vararg frame on shadow area (4 x i64).
3960       if (NumIntRegs < 4)
3961         FuncInfo->setVarArgsFrameIndex(FuncInfo->getRegSaveFrameIndex());
3962     } else {
3963       // For X86-64, if there are vararg parameters that are passed via
3964       // registers, then we must store them to their spots on the stack so
3965       // they may be loaded by dereferencing the result of va_next.
3966       FuncInfo->setVarArgsGPOffset(NumIntRegs * 8);
3967       FuncInfo->setVarArgsFPOffset(ArgGPRs.size() * 8 + NumXMMRegs * 16);
3968       FuncInfo->setRegSaveFrameIndex(FrameInfo.CreateStackObject(
3969           ArgGPRs.size() * 8 + ArgXMMs.size() * 16, Align(16), false));
3970     }
3971 
3972     SmallVector<SDValue, 6>
3973         LiveGPRs; // list of SDValue for GPR registers keeping live input value
3974     SmallVector<SDValue, 8> LiveXMMRegs; // list of SDValue for XMM registers
3975                                          // keeping live input value
3976     SDValue ALVal; // if applicable keeps SDValue for %al register
3977 
3978     // Gather all the live in physical registers.
3979     for (MCPhysReg Reg : ArgGPRs.slice(NumIntRegs)) {
3980       Register GPR = TheMachineFunction.addLiveIn(Reg, &X86::GR64RegClass);
3981       LiveGPRs.push_back(DAG.getCopyFromReg(Chain, DL, GPR, MVT::i64));
3982     }
3983     const auto &AvailableXmms = ArgXMMs.slice(NumXMMRegs);
3984     if (!AvailableXmms.empty()) {
3985       Register AL = TheMachineFunction.addLiveIn(X86::AL, &X86::GR8RegClass);
3986       ALVal = DAG.getCopyFromReg(Chain, DL, AL, MVT::i8);
3987       for (MCPhysReg Reg : AvailableXmms) {
3988         // FastRegisterAllocator spills virtual registers at basic
3989         // block boundary. That leads to usages of xmm registers
3990         // outside of check for %al. Pass physical registers to
3991         // VASTART_SAVE_XMM_REGS to avoid unneccessary spilling.
3992         TheMachineFunction.getRegInfo().addLiveIn(Reg);
3993         LiveXMMRegs.push_back(DAG.getRegister(Reg, MVT::v4f32));
3994       }
3995     }
3996 
3997     // Store the integer parameter registers.
3998     SmallVector<SDValue, 8> MemOps;
3999     SDValue RSFIN =
4000         DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
4001                           TargLowering.getPointerTy(DAG.getDataLayout()));
4002     unsigned Offset = FuncInfo->getVarArgsGPOffset();
4003     for (SDValue Val : LiveGPRs) {
4004       SDValue FIN = DAG.getNode(ISD::ADD, DL,
4005                                 TargLowering.getPointerTy(DAG.getDataLayout()),
4006                                 RSFIN, DAG.getIntPtrConstant(Offset, DL));
4007       SDValue Store =
4008           DAG.getStore(Val.getValue(1), DL, Val, FIN,
4009                        MachinePointerInfo::getFixedStack(
4010                            DAG.getMachineFunction(),
4011                            FuncInfo->getRegSaveFrameIndex(), Offset));
4012       MemOps.push_back(Store);
4013       Offset += 8;
4014     }
4015 
4016     // Now store the XMM (fp + vector) parameter registers.
4017     if (!LiveXMMRegs.empty()) {
4018       SmallVector<SDValue, 12> SaveXMMOps;
4019       SaveXMMOps.push_back(Chain);
4020       SaveXMMOps.push_back(ALVal);
4021       SaveXMMOps.push_back(RSFIN);
4022       SaveXMMOps.push_back(
4023           DAG.getTargetConstant(FuncInfo->getVarArgsFPOffset(), DL, MVT::i32));
4024       llvm::append_range(SaveXMMOps, LiveXMMRegs);
4025       MachineMemOperand *StoreMMO =
4026           DAG.getMachineFunction().getMachineMemOperand(
4027               MachinePointerInfo::getFixedStack(
4028                   DAG.getMachineFunction(), FuncInfo->getRegSaveFrameIndex(),
4029                   Offset),
4030               MachineMemOperand::MOStore, 128, Align(16));
4031       MemOps.push_back(DAG.getMemIntrinsicNode(X86ISD::VASTART_SAVE_XMM_REGS,
4032                                                DL, DAG.getVTList(MVT::Other),
4033                                                SaveXMMOps, MVT::i8, StoreMMO));
4034     }
4035 
4036     if (!MemOps.empty())
4037       Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
4038   }
4039 }
4040 
forwardMustTailParameters(SDValue & Chain)4041 void VarArgsLoweringHelper::forwardMustTailParameters(SDValue &Chain) {
4042   // Find the largest legal vector type.
4043   MVT VecVT = MVT::Other;
4044   // FIXME: Only some x86_32 calling conventions support AVX512.
4045   if (Subtarget.useAVX512Regs() &&
4046       (is64Bit() || (CallConv == CallingConv::X86_VectorCall ||
4047                      CallConv == CallingConv::Intel_OCL_BI)))
4048     VecVT = MVT::v16f32;
4049   else if (Subtarget.hasAVX())
4050     VecVT = MVT::v8f32;
4051   else if (Subtarget.hasSSE2())
4052     VecVT = MVT::v4f32;
4053 
4054   // We forward some GPRs and some vector types.
4055   SmallVector<MVT, 2> RegParmTypes;
4056   MVT IntVT = is64Bit() ? MVT::i64 : MVT::i32;
4057   RegParmTypes.push_back(IntVT);
4058   if (VecVT != MVT::Other)
4059     RegParmTypes.push_back(VecVT);
4060 
4061   // Compute the set of forwarded registers. The rest are scratch.
4062   SmallVectorImpl<ForwardedRegister> &Forwards =
4063       FuncInfo->getForwardedMustTailRegParms();
4064   CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes, CC_X86);
4065 
4066   // Forward AL for SysV x86_64 targets, since it is used for varargs.
4067   if (is64Bit() && !isWin64() && !CCInfo.isAllocated(X86::AL)) {
4068     Register ALVReg = TheMachineFunction.addLiveIn(X86::AL, &X86::GR8RegClass);
4069     Forwards.push_back(ForwardedRegister(ALVReg, X86::AL, MVT::i8));
4070   }
4071 
4072   // Copy all forwards from physical to virtual registers.
4073   for (ForwardedRegister &FR : Forwards) {
4074     // FIXME: Can we use a less constrained schedule?
4075     SDValue RegVal = DAG.getCopyFromReg(Chain, DL, FR.VReg, FR.VT);
4076     FR.VReg = TheMachineFunction.getRegInfo().createVirtualRegister(
4077         TargLowering.getRegClassFor(FR.VT));
4078     Chain = DAG.getCopyToReg(Chain, DL, FR.VReg, RegVal);
4079   }
4080 }
4081 
lowerVarArgsParameters(SDValue & Chain,unsigned StackSize)4082 void VarArgsLoweringHelper::lowerVarArgsParameters(SDValue &Chain,
4083                                                    unsigned StackSize) {
4084   // Set FrameIndex to the 0xAAAAAAA value to mark unset state.
4085   // If necessary, it would be set into the correct value later.
4086   FuncInfo->setVarArgsFrameIndex(0xAAAAAAA);
4087   FuncInfo->setRegSaveFrameIndex(0xAAAAAAA);
4088 
4089   if (FrameInfo.hasVAStart())
4090     createVarArgAreaAndStoreRegisters(Chain, StackSize);
4091 
4092   if (FrameInfo.hasMustTailInVarArgFunc())
4093     forwardMustTailParameters(Chain);
4094 }
4095 
LowerFormalArguments(SDValue Chain,CallingConv::ID CallConv,bool IsVarArg,const SmallVectorImpl<ISD::InputArg> & Ins,const SDLoc & dl,SelectionDAG & DAG,SmallVectorImpl<SDValue> & InVals) const4096 SDValue X86TargetLowering::LowerFormalArguments(
4097     SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
4098     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
4099     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
4100   MachineFunction &MF = DAG.getMachineFunction();
4101   X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
4102 
4103   const Function &F = MF.getFunction();
4104   if (F.hasExternalLinkage() && Subtarget.isTargetCygMing() &&
4105       F.getName() == "main")
4106     FuncInfo->setForceFramePointer(true);
4107 
4108   MachineFrameInfo &MFI = MF.getFrameInfo();
4109   bool Is64Bit = Subtarget.is64Bit();
4110   bool IsWin64 = Subtarget.isCallingConvWin64(CallConv);
4111 
4112   assert(
4113       !(IsVarArg && canGuaranteeTCO(CallConv)) &&
4114       "Var args not supported with calling conv' regcall, fastcc, ghc or hipe");
4115 
4116   // Assign locations to all of the incoming arguments.
4117   SmallVector<CCValAssign, 16> ArgLocs;
4118   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
4119 
4120   // Allocate shadow area for Win64.
4121   if (IsWin64)
4122     CCInfo.AllocateStack(32, Align(8));
4123 
4124   CCInfo.AnalyzeArguments(Ins, CC_X86);
4125 
4126   // In vectorcall calling convention a second pass is required for the HVA
4127   // types.
4128   if (CallingConv::X86_VectorCall == CallConv) {
4129     CCInfo.AnalyzeArgumentsSecondPass(Ins, CC_X86);
4130   }
4131 
4132   // The next loop assumes that the locations are in the same order of the
4133   // input arguments.
4134   assert(isSortedByValueNo(ArgLocs) &&
4135          "Argument Location list must be sorted before lowering");
4136 
4137   SDValue ArgValue;
4138   for (unsigned I = 0, InsIndex = 0, E = ArgLocs.size(); I != E;
4139        ++I, ++InsIndex) {
4140     assert(InsIndex < Ins.size() && "Invalid Ins index");
4141     CCValAssign &VA = ArgLocs[I];
4142 
4143     if (VA.isRegLoc()) {
4144       EVT RegVT = VA.getLocVT();
4145       if (VA.needsCustom()) {
4146         assert(
4147             VA.getValVT() == MVT::v64i1 &&
4148             "Currently the only custom case is when we split v64i1 to 2 regs");
4149 
4150         // v64i1 values, in regcall calling convention, that are
4151         // compiled to 32 bit arch, are split up into two registers.
4152         ArgValue =
4153             getv64i1Argument(VA, ArgLocs[++I], Chain, DAG, dl, Subtarget);
4154       } else {
4155         const TargetRegisterClass *RC;
4156         if (RegVT == MVT::i8)
4157           RC = &X86::GR8RegClass;
4158         else if (RegVT == MVT::i16)
4159           RC = &X86::GR16RegClass;
4160         else if (RegVT == MVT::i32)
4161           RC = &X86::GR32RegClass;
4162         else if (Is64Bit && RegVT == MVT::i64)
4163           RC = &X86::GR64RegClass;
4164         else if (RegVT == MVT::f16)
4165           RC = Subtarget.hasAVX512() ? &X86::FR16XRegClass : &X86::FR16RegClass;
4166         else if (RegVT == MVT::f32)
4167           RC = Subtarget.hasAVX512() ? &X86::FR32XRegClass : &X86::FR32RegClass;
4168         else if (RegVT == MVT::f64)
4169           RC = Subtarget.hasAVX512() ? &X86::FR64XRegClass : &X86::FR64RegClass;
4170         else if (RegVT == MVT::f80)
4171           RC = &X86::RFP80RegClass;
4172         else if (RegVT == MVT::f128)
4173           RC = &X86::VR128RegClass;
4174         else if (RegVT.is512BitVector())
4175           RC = &X86::VR512RegClass;
4176         else if (RegVT.is256BitVector())
4177           RC = Subtarget.hasVLX() ? &X86::VR256XRegClass : &X86::VR256RegClass;
4178         else if (RegVT.is128BitVector())
4179           RC = Subtarget.hasVLX() ? &X86::VR128XRegClass : &X86::VR128RegClass;
4180         else if (RegVT == MVT::x86mmx)
4181           RC = &X86::VR64RegClass;
4182         else if (RegVT == MVT::v1i1)
4183           RC = &X86::VK1RegClass;
4184         else if (RegVT == MVT::v8i1)
4185           RC = &X86::VK8RegClass;
4186         else if (RegVT == MVT::v16i1)
4187           RC = &X86::VK16RegClass;
4188         else if (RegVT == MVT::v32i1)
4189           RC = &X86::VK32RegClass;
4190         else if (RegVT == MVT::v64i1)
4191           RC = &X86::VK64RegClass;
4192         else
4193           llvm_unreachable("Unknown argument type!");
4194 
4195         Register Reg = MF.addLiveIn(VA.getLocReg(), RC);
4196         ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
4197       }
4198 
4199       // If this is an 8 or 16-bit value, it is really passed promoted to 32
4200       // bits.  Insert an assert[sz]ext to capture this, then truncate to the
4201       // right size.
4202       if (VA.getLocInfo() == CCValAssign::SExt)
4203         ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
4204                                DAG.getValueType(VA.getValVT()));
4205       else if (VA.getLocInfo() == CCValAssign::ZExt)
4206         ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
4207                                DAG.getValueType(VA.getValVT()));
4208       else if (VA.getLocInfo() == CCValAssign::BCvt)
4209         ArgValue = DAG.getBitcast(VA.getValVT(), ArgValue);
4210 
4211       if (VA.isExtInLoc()) {
4212         // Handle MMX values passed in XMM regs.
4213         if (RegVT.isVector() && VA.getValVT().getScalarType() != MVT::i1)
4214           ArgValue = DAG.getNode(X86ISD::MOVDQ2Q, dl, VA.getValVT(), ArgValue);
4215         else if (VA.getValVT().isVector() &&
4216                  VA.getValVT().getScalarType() == MVT::i1 &&
4217                  ((VA.getLocVT() == MVT::i64) || (VA.getLocVT() == MVT::i32) ||
4218                   (VA.getLocVT() == MVT::i16) || (VA.getLocVT() == MVT::i8))) {
4219           // Promoting a mask type (v*i1) into a register of type i64/i32/i16/i8
4220           ArgValue = lowerRegToMasks(ArgValue, VA.getValVT(), RegVT, dl, DAG);
4221         } else
4222           ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
4223       }
4224     } else {
4225       assert(VA.isMemLoc());
4226       ArgValue =
4227           LowerMemArgument(Chain, CallConv, Ins, dl, DAG, VA, MFI, InsIndex);
4228     }
4229 
4230     // If value is passed via pointer - do a load.
4231     if (VA.getLocInfo() == CCValAssign::Indirect && !Ins[I].Flags.isByVal())
4232       ArgValue =
4233           DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue, MachinePointerInfo());
4234 
4235     InVals.push_back(ArgValue);
4236   }
4237 
4238   for (unsigned I = 0, E = Ins.size(); I != E; ++I) {
4239     if (Ins[I].Flags.isSwiftAsync()) {
4240       auto X86FI = MF.getInfo<X86MachineFunctionInfo>();
4241       if (Subtarget.is64Bit())
4242         X86FI->setHasSwiftAsyncContext(true);
4243       else {
4244         int FI = MF.getFrameInfo().CreateStackObject(4, Align(4), false);
4245         X86FI->setSwiftAsyncContextFrameIdx(FI);
4246         SDValue St = DAG.getStore(DAG.getEntryNode(), dl, InVals[I],
4247                                   DAG.getFrameIndex(FI, MVT::i32),
4248                                   MachinePointerInfo::getFixedStack(MF, FI));
4249         Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, St, Chain);
4250       }
4251     }
4252 
4253     // Swift calling convention does not require we copy the sret argument
4254     // into %rax/%eax for the return. We don't set SRetReturnReg for Swift.
4255     if (CallConv == CallingConv::Swift || CallConv == CallingConv::SwiftTail)
4256       continue;
4257 
4258     // All x86 ABIs require that for returning structs by value we copy the
4259     // sret argument into %rax/%eax (depending on ABI) for the return. Save
4260     // the argument into a virtual register so that we can access it from the
4261     // return points.
4262     if (Ins[I].Flags.isSRet()) {
4263       assert(!FuncInfo->getSRetReturnReg() &&
4264              "SRet return has already been set");
4265       MVT PtrTy = getPointerTy(DAG.getDataLayout());
4266       Register Reg =
4267           MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrTy));
4268       FuncInfo->setSRetReturnReg(Reg);
4269       SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[I]);
4270       Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
4271       break;
4272     }
4273   }
4274 
4275   unsigned StackSize = CCInfo.getNextStackOffset();
4276   // Align stack specially for tail calls.
4277   if (shouldGuaranteeTCO(CallConv,
4278                          MF.getTarget().Options.GuaranteedTailCallOpt))
4279     StackSize = GetAlignedArgumentStackSize(StackSize, DAG);
4280 
4281   if (IsVarArg)
4282     VarArgsLoweringHelper(FuncInfo, dl, DAG, Subtarget, CallConv, CCInfo)
4283         .lowerVarArgsParameters(Chain, StackSize);
4284 
4285   // Some CCs need callee pop.
4286   if (X86::isCalleePop(CallConv, Is64Bit, IsVarArg,
4287                        MF.getTarget().Options.GuaranteedTailCallOpt)) {
4288     FuncInfo->setBytesToPopOnReturn(StackSize); // Callee pops everything.
4289   } else if (CallConv == CallingConv::X86_INTR && Ins.size() == 2) {
4290     // X86 interrupts must pop the error code (and the alignment padding) if
4291     // present.
4292     FuncInfo->setBytesToPopOnReturn(Is64Bit ? 16 : 4);
4293   } else {
4294     FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing.
4295     // If this is an sret function, the return should pop the hidden pointer.
4296     if (!canGuaranteeTCO(CallConv) && hasCalleePopSRet(Ins, Subtarget))
4297       FuncInfo->setBytesToPopOnReturn(4);
4298   }
4299 
4300   if (!Is64Bit) {
4301     // RegSaveFrameIndex is X86-64 only.
4302     FuncInfo->setRegSaveFrameIndex(0xAAAAAAA);
4303   }
4304 
4305   FuncInfo->setArgumentStackSize(StackSize);
4306 
4307   if (WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo()) {
4308     EHPersonality Personality = classifyEHPersonality(F.getPersonalityFn());
4309     if (Personality == EHPersonality::CoreCLR) {
4310       assert(Is64Bit);
4311       // TODO: Add a mechanism to frame lowering that will allow us to indicate
4312       // that we'd prefer this slot be allocated towards the bottom of the frame
4313       // (i.e. near the stack pointer after allocating the frame).  Every
4314       // funclet needs a copy of this slot in its (mostly empty) frame, and the
4315       // offset from the bottom of this and each funclet's frame must be the
4316       // same, so the size of funclets' (mostly empty) frames is dictated by
4317       // how far this slot is from the bottom (since they allocate just enough
4318       // space to accommodate holding this slot at the correct offset).
4319       int PSPSymFI = MFI.CreateStackObject(8, Align(8), /*isSpillSlot=*/false);
4320       EHInfo->PSPSymFrameIdx = PSPSymFI;
4321     }
4322   }
4323 
4324   if (CallConv == CallingConv::X86_RegCall ||
4325       F.hasFnAttribute("no_caller_saved_registers")) {
4326     MachineRegisterInfo &MRI = MF.getRegInfo();
4327     for (std::pair<Register, Register> Pair : MRI.liveins())
4328       MRI.disableCalleeSavedRegister(Pair.first);
4329   }
4330 
4331   return Chain;
4332 }
4333 
LowerMemOpCallTo(SDValue Chain,SDValue StackPtr,SDValue Arg,const SDLoc & dl,SelectionDAG & DAG,const CCValAssign & VA,ISD::ArgFlagsTy Flags,bool isByVal) const4334 SDValue X86TargetLowering::LowerMemOpCallTo(SDValue Chain, SDValue StackPtr,
4335                                             SDValue Arg, const SDLoc &dl,
4336                                             SelectionDAG &DAG,
4337                                             const CCValAssign &VA,
4338                                             ISD::ArgFlagsTy Flags,
4339                                             bool isByVal) const {
4340   unsigned LocMemOffset = VA.getLocMemOffset();
4341   SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
4342   PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
4343                        StackPtr, PtrOff);
4344   if (isByVal)
4345     return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl);
4346 
4347   MaybeAlign Alignment;
4348   if (Subtarget.isTargetWindowsMSVC() && !Subtarget.is64Bit() &&
4349       Arg.getSimpleValueType() != MVT::f80)
4350     Alignment = MaybeAlign(4);
4351   return DAG.getStore(
4352       Chain, dl, Arg, PtrOff,
4353       MachinePointerInfo::getStack(DAG.getMachineFunction(), LocMemOffset),
4354       Alignment);
4355 }
4356 
4357 /// Emit a load of return address if tail call
4358 /// optimization is performed and it is required.
EmitTailCallLoadRetAddr(SelectionDAG & DAG,SDValue & OutRetAddr,SDValue Chain,bool IsTailCall,bool Is64Bit,int FPDiff,const SDLoc & dl) const4359 SDValue X86TargetLowering::EmitTailCallLoadRetAddr(
4360     SelectionDAG &DAG, SDValue &OutRetAddr, SDValue Chain, bool IsTailCall,
4361     bool Is64Bit, int FPDiff, const SDLoc &dl) const {
4362   // Adjust the Return address stack slot.
4363   EVT VT = getPointerTy(DAG.getDataLayout());
4364   OutRetAddr = getReturnAddressFrameIndex(DAG);
4365 
4366   // Load the "old" Return address.
4367   OutRetAddr = DAG.getLoad(VT, dl, Chain, OutRetAddr, MachinePointerInfo());
4368   return SDValue(OutRetAddr.getNode(), 1);
4369 }
4370 
4371 /// Emit a store of the return address if tail call
4372 /// optimization is performed and it is required (FPDiff!=0).
EmitTailCallStoreRetAddr(SelectionDAG & DAG,MachineFunction & MF,SDValue Chain,SDValue RetAddrFrIdx,EVT PtrVT,unsigned SlotSize,int FPDiff,const SDLoc & dl)4373 static SDValue EmitTailCallStoreRetAddr(SelectionDAG &DAG, MachineFunction &MF,
4374                                         SDValue Chain, SDValue RetAddrFrIdx,
4375                                         EVT PtrVT, unsigned SlotSize,
4376                                         int FPDiff, const SDLoc &dl) {
4377   // Store the return address to the appropriate stack slot.
4378   if (!FPDiff) return Chain;
4379   // Calculate the new stack slot for the return address.
4380   int NewReturnAddrFI =
4381     MF.getFrameInfo().CreateFixedObject(SlotSize, (int64_t)FPDiff - SlotSize,
4382                                          false);
4383   SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, PtrVT);
4384   Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx,
4385                        MachinePointerInfo::getFixedStack(
4386                            DAG.getMachineFunction(), NewReturnAddrFI));
4387   return Chain;
4388 }
4389 
4390 /// Returns a vector_shuffle mask for an movs{s|d}, movd
4391 /// operation of specified width.
getMOVL(SelectionDAG & DAG,const SDLoc & dl,MVT VT,SDValue V1,SDValue V2)4392 static SDValue getMOVL(SelectionDAG &DAG, const SDLoc &dl, MVT VT, SDValue V1,
4393                        SDValue V2) {
4394   unsigned NumElems = VT.getVectorNumElements();
4395   SmallVector<int, 8> Mask;
4396   Mask.push_back(NumElems);
4397   for (unsigned i = 1; i != NumElems; ++i)
4398     Mask.push_back(i);
4399   return DAG.getVectorShuffle(VT, dl, V1, V2, Mask);
4400 }
4401 
4402 SDValue
LowerCall(TargetLowering::CallLoweringInfo & CLI,SmallVectorImpl<SDValue> & InVals) const4403 X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
4404                              SmallVectorImpl<SDValue> &InVals) const {
4405   SelectionDAG &DAG                     = CLI.DAG;
4406   SDLoc &dl                             = CLI.DL;
4407   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
4408   SmallVectorImpl<SDValue> &OutVals     = CLI.OutVals;
4409   SmallVectorImpl<ISD::InputArg> &Ins   = CLI.Ins;
4410   SDValue Chain                         = CLI.Chain;
4411   SDValue Callee                        = CLI.Callee;
4412   CallingConv::ID CallConv              = CLI.CallConv;
4413   bool &isTailCall                      = CLI.IsTailCall;
4414   bool isVarArg                         = CLI.IsVarArg;
4415   const auto *CB                        = CLI.CB;
4416 
4417   MachineFunction &MF = DAG.getMachineFunction();
4418   bool Is64Bit        = Subtarget.is64Bit();
4419   bool IsWin64        = Subtarget.isCallingConvWin64(CallConv);
4420   bool IsSibcall      = false;
4421   bool IsGuaranteeTCO = MF.getTarget().Options.GuaranteedTailCallOpt ||
4422       CallConv == CallingConv::Tail || CallConv == CallingConv::SwiftTail;
4423   bool IsCalleePopSRet = !IsGuaranteeTCO && hasCalleePopSRet(Outs, Subtarget);
4424   X86MachineFunctionInfo *X86Info = MF.getInfo<X86MachineFunctionInfo>();
4425   bool HasNCSR = (CB && isa<CallInst>(CB) &&
4426                   CB->hasFnAttr("no_caller_saved_registers"));
4427   bool HasNoCfCheck = (CB && CB->doesNoCfCheck());
4428   bool IsIndirectCall = (CB && isa<CallInst>(CB) && CB->isIndirectCall());
4429   bool IsCFICall = IsIndirectCall && CLI.CFIType;
4430   const Module *M = MF.getMMI().getModule();
4431   Metadata *IsCFProtectionSupported = M->getModuleFlag("cf-protection-branch");
4432 
4433   MachineFunction::CallSiteInfo CSInfo;
4434   if (CallConv == CallingConv::X86_INTR)
4435     report_fatal_error("X86 interrupts may not be called directly");
4436 
4437   bool IsMustTail = CLI.CB && CLI.CB->isMustTailCall();
4438   if (Subtarget.isPICStyleGOT() && !IsGuaranteeTCO && !IsMustTail) {
4439     // If we are using a GOT, disable tail calls to external symbols with
4440     // default visibility. Tail calling such a symbol requires using a GOT
4441     // relocation, which forces early binding of the symbol. This breaks code
4442     // that require lazy function symbol resolution. Using musttail or
4443     // GuaranteedTailCallOpt will override this.
4444     GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
4445     if (!G || (!G->getGlobal()->hasLocalLinkage() &&
4446                G->getGlobal()->hasDefaultVisibility()))
4447       isTailCall = false;
4448   }
4449 
4450   if (isTailCall && !IsMustTail) {
4451     // Check if it's really possible to do a tail call.
4452     isTailCall = IsEligibleForTailCallOptimization(
4453         Callee, CallConv, IsCalleePopSRet, isVarArg, CLI.RetTy, Outs, OutVals,
4454         Ins, DAG);
4455 
4456     // Sibcalls are automatically detected tailcalls which do not require
4457     // ABI changes.
4458     if (!IsGuaranteeTCO && isTailCall)
4459       IsSibcall = true;
4460 
4461     if (isTailCall)
4462       ++NumTailCalls;
4463   }
4464 
4465   if (IsMustTail && !isTailCall)
4466     report_fatal_error("failed to perform tail call elimination on a call "
4467                        "site marked musttail");
4468 
4469   assert(!(isVarArg && canGuaranteeTCO(CallConv)) &&
4470          "Var args not supported with calling convention fastcc, ghc or hipe");
4471 
4472   // Analyze operands of the call, assigning locations to each operand.
4473   SmallVector<CCValAssign, 16> ArgLocs;
4474   CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
4475 
4476   // Allocate shadow area for Win64.
4477   if (IsWin64)
4478     CCInfo.AllocateStack(32, Align(8));
4479 
4480   CCInfo.AnalyzeArguments(Outs, CC_X86);
4481 
4482   // In vectorcall calling convention a second pass is required for the HVA
4483   // types.
4484   if (CallingConv::X86_VectorCall == CallConv) {
4485     CCInfo.AnalyzeArgumentsSecondPass(Outs, CC_X86);
4486   }
4487 
4488   // Get a count of how many bytes are to be pushed on the stack.
4489   unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
4490   if (IsSibcall)
4491     // This is a sibcall. The memory operands are available in caller's
4492     // own caller's stack.
4493     NumBytes = 0;
4494   else if (IsGuaranteeTCO && canGuaranteeTCO(CallConv))
4495     NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);
4496 
4497   int FPDiff = 0;
4498   if (isTailCall &&
4499       shouldGuaranteeTCO(CallConv,
4500                          MF.getTarget().Options.GuaranteedTailCallOpt)) {
4501     // Lower arguments at fp - stackoffset + fpdiff.
4502     unsigned NumBytesCallerPushed = X86Info->getBytesToPopOnReturn();
4503 
4504     FPDiff = NumBytesCallerPushed - NumBytes;
4505 
4506     // Set the delta of movement of the returnaddr stackslot.
4507     // But only set if delta is greater than previous delta.
4508     if (FPDiff < X86Info->getTCReturnAddrDelta())
4509       X86Info->setTCReturnAddrDelta(FPDiff);
4510   }
4511 
4512   unsigned NumBytesToPush = NumBytes;
4513   unsigned NumBytesToPop = NumBytes;
4514 
4515   // If we have an inalloca argument, all stack space has already been allocated
4516   // for us and be right at the top of the stack.  We don't support multiple
4517   // arguments passed in memory when using inalloca.
4518   if (!Outs.empty() && Outs.back().Flags.isInAlloca()) {
4519     NumBytesToPush = 0;
4520     if (!ArgLocs.back().isMemLoc())
4521       report_fatal_error("cannot use inalloca attribute on a register "
4522                          "parameter");
4523     if (ArgLocs.back().getLocMemOffset() != 0)
4524       report_fatal_error("any parameter with the inalloca attribute must be "
4525                          "the only memory argument");
4526   } else if (CLI.IsPreallocated) {
4527     assert(ArgLocs.back().isMemLoc() &&
4528            "cannot use preallocated attribute on a register "
4529            "parameter");
4530     SmallVector<size_t, 4> PreallocatedOffsets;
4531     for (size_t i = 0; i < CLI.OutVals.size(); ++i) {
4532       if (CLI.CB->paramHasAttr(i, Attribute::Preallocated)) {
4533         PreallocatedOffsets.push_back(ArgLocs[i].getLocMemOffset());
4534       }
4535     }
4536     auto *MFI = DAG.getMachineFunction().getInfo<X86MachineFunctionInfo>();
4537     size_t PreallocatedId = MFI->getPreallocatedIdForCallSite(CLI.CB);
4538     MFI->setPreallocatedStackSize(PreallocatedId, NumBytes);
4539     MFI->setPreallocatedArgOffsets(PreallocatedId, PreallocatedOffsets);
4540     NumBytesToPush = 0;
4541   }
4542 
4543   if (!IsSibcall && !IsMustTail)
4544     Chain = DAG.getCALLSEQ_START(Chain, NumBytesToPush,
4545                                  NumBytes - NumBytesToPush, dl);
4546 
4547   SDValue RetAddrFrIdx;
4548   // Load return address for tail calls.
4549   if (isTailCall && FPDiff)
4550     Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, isTailCall,
4551                                     Is64Bit, FPDiff, dl);
4552 
4553   SmallVector<std::pair<Register, SDValue>, 8> RegsToPass;
4554   SmallVector<SDValue, 8> MemOpChains;
4555   SDValue StackPtr;
4556 
4557   // The next loop assumes that the locations are in the same order of the
4558   // input arguments.
4559   assert(isSortedByValueNo(ArgLocs) &&
4560          "Argument Location list must be sorted before lowering");
4561 
4562   // Walk the register/memloc assignments, inserting copies/loads.  In the case
4563   // of tail call optimization arguments are handle later.
4564   const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
4565   for (unsigned I = 0, OutIndex = 0, E = ArgLocs.size(); I != E;
4566        ++I, ++OutIndex) {
4567     assert(OutIndex < Outs.size() && "Invalid Out index");
4568     // Skip inalloca/preallocated arguments, they have already been written.
4569     ISD::ArgFlagsTy Flags = Outs[OutIndex].Flags;
4570     if (Flags.isInAlloca() || Flags.isPreallocated())
4571       continue;
4572 
4573     CCValAssign &VA = ArgLocs[I];
4574     EVT RegVT = VA.getLocVT();
4575     SDValue Arg = OutVals[OutIndex];
4576     bool isByVal = Flags.isByVal();
4577 
4578     // Promote the value if needed.
4579     switch (VA.getLocInfo()) {
4580     default: llvm_unreachable("Unknown loc info!");
4581     case CCValAssign::Full: break;
4582     case CCValAssign::SExt:
4583       Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, RegVT, Arg);
4584       break;
4585     case CCValAssign::ZExt:
4586       Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, RegVT, Arg);
4587       break;
4588     case CCValAssign::AExt:
4589       if (Arg.getValueType().isVector() &&
4590           Arg.getValueType().getVectorElementType() == MVT::i1)
4591         Arg = lowerMasksToReg(Arg, RegVT, dl, DAG);
4592       else if (RegVT.is128BitVector()) {
4593         // Special case: passing MMX values in XMM registers.
4594         Arg = DAG.getBitcast(MVT::i64, Arg);
4595         Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg);
4596         Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.getUNDEF(MVT::v2i64), Arg);
4597       } else
4598         Arg = DAG.getNode(ISD::ANY_EXTEND, dl, RegVT, Arg);
4599       break;
4600     case CCValAssign::BCvt:
4601       Arg = DAG.getBitcast(RegVT, Arg);
4602       break;
4603     case CCValAssign::Indirect: {
4604       if (isByVal) {
4605         // Memcpy the argument to a temporary stack slot to prevent
4606         // the caller from seeing any modifications the callee may make
4607         // as guaranteed by the `byval` attribute.
4608         int FrameIdx = MF.getFrameInfo().CreateStackObject(
4609             Flags.getByValSize(),
4610             std::max(Align(16), Flags.getNonZeroByValAlign()), false);
4611         SDValue StackSlot =
4612             DAG.getFrameIndex(FrameIdx, getPointerTy(DAG.getDataLayout()));
4613         Chain =
4614             CreateCopyOfByValArgument(Arg, StackSlot, Chain, Flags, DAG, dl);
4615         // From now on treat this as a regular pointer
4616         Arg = StackSlot;
4617         isByVal = false;
4618       } else {
4619         // Store the argument.
4620         SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT());
4621         int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
4622         Chain = DAG.getStore(
4623             Chain, dl, Arg, SpillSlot,
4624             MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
4625         Arg = SpillSlot;
4626       }
4627       break;
4628     }
4629     }
4630 
4631     if (VA.needsCustom()) {
4632       assert(VA.getValVT() == MVT::v64i1 &&
4633              "Currently the only custom case is when we split v64i1 to 2 regs");
4634       // Split v64i1 value into two registers
4635       Passv64i1ArgInRegs(dl, DAG, Arg, RegsToPass, VA, ArgLocs[++I], Subtarget);
4636     } else if (VA.isRegLoc()) {
4637       RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
4638       const TargetOptions &Options = DAG.getTarget().Options;
4639       if (Options.EmitCallSiteInfo)
4640         CSInfo.emplace_back(VA.getLocReg(), I);
4641       if (isVarArg && IsWin64) {
4642         // Win64 ABI requires argument XMM reg to be copied to the corresponding
4643         // shadow reg if callee is a varargs function.
4644         Register ShadowReg;
4645         switch (VA.getLocReg()) {
4646         case X86::XMM0: ShadowReg = X86::RCX; break;
4647         case X86::XMM1: ShadowReg = X86::RDX; break;
4648         case X86::XMM2: ShadowReg = X86::R8; break;
4649         case X86::XMM3: ShadowReg = X86::R9; break;
4650         }
4651         if (ShadowReg)
4652           RegsToPass.push_back(std::make_pair(ShadowReg, Arg));
4653       }
4654     } else if (!IsSibcall && (!isTailCall || isByVal)) {
4655       assert(VA.isMemLoc());
4656       if (!StackPtr.getNode())
4657         StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(),
4658                                       getPointerTy(DAG.getDataLayout()));
4659       MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
4660                                              dl, DAG, VA, Flags, isByVal));
4661     }
4662   }
4663 
4664   if (!MemOpChains.empty())
4665     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
4666 
4667   if (Subtarget.isPICStyleGOT()) {
4668     // ELF / PIC requires GOT in the EBX register before function calls via PLT
4669     // GOT pointer (except regcall).
4670     if (!isTailCall) {
4671       // Indirect call with RegCall calling convertion may use up all the
4672       // general registers, so it is not suitable to bind EBX reister for
4673       // GOT address, just let register allocator handle it.
4674       if (CallConv != CallingConv::X86_RegCall)
4675         RegsToPass.push_back(std::make_pair(
4676           Register(X86::EBX), DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(),
4677                                           getPointerTy(DAG.getDataLayout()))));
4678     } else {
4679       // If we are tail calling and generating PIC/GOT style code load the
4680       // address of the callee into ECX. The value in ecx is used as target of
4681       // the tail jump. This is done to circumvent the ebx/callee-saved problem
4682       // for tail calls on PIC/GOT architectures. Normally we would just put the
4683       // address of GOT into ebx and then call target@PLT. But for tail calls
4684       // ebx would be restored (since ebx is callee saved) before jumping to the
4685       // target@PLT.
4686 
4687       // Note: The actual moving to ECX is done further down.
4688       GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
4689       if (G && !G->getGlobal()->hasLocalLinkage() &&
4690           G->getGlobal()->hasDefaultVisibility())
4691         Callee = LowerGlobalAddress(Callee, DAG);
4692       else if (isa<ExternalSymbolSDNode>(Callee))
4693         Callee = LowerExternalSymbol(Callee, DAG);
4694     }
4695   }
4696 
4697   if (Is64Bit && isVarArg && !IsWin64 && !IsMustTail &&
4698       (Subtarget.hasSSE1() || !M->getModuleFlag("SkipRaxSetup"))) {
4699     // From AMD64 ABI document:
4700     // For calls that may call functions that use varargs or stdargs
4701     // (prototype-less calls or calls to functions containing ellipsis (...) in
4702     // the declaration) %al is used as hidden argument to specify the number
4703     // of SSE registers used. The contents of %al do not need to match exactly
4704     // the number of registers, but must be an ubound on the number of SSE
4705     // registers used and is in the range 0 - 8 inclusive.
4706 
4707     // Count the number of XMM registers allocated.
4708     static const MCPhysReg XMMArgRegs[] = {
4709       X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
4710       X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
4711     };
4712     unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs);
4713     assert((Subtarget.hasSSE1() || !NumXMMRegs)
4714            && "SSE registers cannot be used when SSE is disabled");
4715     RegsToPass.push_back(std::make_pair(Register(X86::AL),
4716                                         DAG.getConstant(NumXMMRegs, dl,
4717                                                         MVT::i8)));
4718   }
4719 
4720   if (isVarArg && IsMustTail) {
4721     const auto &Forwards = X86Info->getForwardedMustTailRegParms();
4722     for (const auto &F : Forwards) {
4723       SDValue Val = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT);
4724       RegsToPass.push_back(std::make_pair(F.PReg, Val));
4725     }
4726   }
4727 
4728   // For tail calls lower the arguments to the 'real' stack slots.  Sibcalls
4729   // don't need this because the eligibility check rejects calls that require
4730   // shuffling arguments passed in memory.
4731   if (!IsSibcall && isTailCall) {
4732     // Force all the incoming stack arguments to be loaded from the stack
4733     // before any new outgoing arguments are stored to the stack, because the
4734     // outgoing stack slots may alias the incoming argument stack slots, and
4735     // the alias isn't otherwise explicit. This is slightly more conservative
4736     // than necessary, because it means that each store effectively depends
4737     // on every argument instead of just those arguments it would clobber.
4738     SDValue ArgChain = DAG.getStackArgumentTokenFactor(Chain);
4739 
4740     SmallVector<SDValue, 8> MemOpChains2;
4741     SDValue FIN;
4742     int FI = 0;
4743     for (unsigned I = 0, OutsIndex = 0, E = ArgLocs.size(); I != E;
4744          ++I, ++OutsIndex) {
4745       CCValAssign &VA = ArgLocs[I];
4746 
4747       if (VA.isRegLoc()) {
4748         if (VA.needsCustom()) {
4749           assert((CallConv == CallingConv::X86_RegCall) &&
4750                  "Expecting custom case only in regcall calling convention");
4751           // This means that we are in special case where one argument was
4752           // passed through two register locations - Skip the next location
4753           ++I;
4754         }
4755 
4756         continue;
4757       }
4758 
4759       assert(VA.isMemLoc());
4760       SDValue Arg = OutVals[OutsIndex];
4761       ISD::ArgFlagsTy Flags = Outs[OutsIndex].Flags;
4762       // Skip inalloca/preallocated arguments.  They don't require any work.
4763       if (Flags.isInAlloca() || Flags.isPreallocated())
4764         continue;
4765       // Create frame index.
4766       int32_t Offset = VA.getLocMemOffset()+FPDiff;
4767       uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8;
4768       FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true);
4769       FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
4770 
4771       if (Flags.isByVal()) {
4772         // Copy relative to framepointer.
4773         SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset(), dl);
4774         if (!StackPtr.getNode())
4775           StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(),
4776                                         getPointerTy(DAG.getDataLayout()));
4777         Source = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
4778                              StackPtr, Source);
4779 
4780         MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN,
4781                                                          ArgChain,
4782                                                          Flags, DAG, dl));
4783       } else {
4784         // Store relative to framepointer.
4785         MemOpChains2.push_back(DAG.getStore(
4786             ArgChain, dl, Arg, FIN,
4787             MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)));
4788       }
4789     }
4790 
4791     if (!MemOpChains2.empty())
4792       Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
4793 
4794     // Store the return address to the appropriate stack slot.
4795     Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx,
4796                                      getPointerTy(DAG.getDataLayout()),
4797                                      RegInfo->getSlotSize(), FPDiff, dl);
4798   }
4799 
4800   // Build a sequence of copy-to-reg nodes chained together with token chain
4801   // and flag operands which copy the outgoing args into registers.
4802   SDValue InFlag;
4803   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
4804     Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
4805                              RegsToPass[i].second, InFlag);
4806     InFlag = Chain.getValue(1);
4807   }
4808 
4809   if (DAG.getTarget().getCodeModel() == CodeModel::Large) {
4810     assert(Is64Bit && "Large code model is only legal in 64-bit mode.");
4811     // In the 64-bit large code model, we have to make all calls
4812     // through a register, since the call instruction's 32-bit
4813     // pc-relative offset may not be large enough to hold the whole
4814     // address.
4815   } else if (Callee->getOpcode() == ISD::GlobalAddress ||
4816              Callee->getOpcode() == ISD::ExternalSymbol) {
4817     // Lower direct calls to global addresses and external symbols. Setting
4818     // ForCall to true here has the effect of removing WrapperRIP when possible
4819     // to allow direct calls to be selected without first materializing the
4820     // address into a register.
4821     Callee = LowerGlobalOrExternal(Callee, DAG, /*ForCall=*/true);
4822   } else if (Subtarget.isTarget64BitILP32() &&
4823              Callee.getValueType() == MVT::i32) {
4824     // Zero-extend the 32-bit Callee address into a 64-bit according to x32 ABI
4825     Callee = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Callee);
4826   }
4827 
4828   // Returns a chain & a flag for retval copy to use.
4829   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
4830   SmallVector<SDValue, 8> Ops;
4831 
4832   if (!IsSibcall && isTailCall && !IsMustTail) {
4833     Chain = DAG.getCALLSEQ_END(Chain, NumBytesToPop, 0, InFlag, dl);
4834     InFlag = Chain.getValue(1);
4835   }
4836 
4837   Ops.push_back(Chain);
4838   Ops.push_back(Callee);
4839 
4840   if (isTailCall)
4841     Ops.push_back(DAG.getTargetConstant(FPDiff, dl, MVT::i32));
4842 
4843   // Add argument registers to the end of the list so that they are known live
4844   // into the call.
4845   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
4846     Ops.push_back(DAG.getRegister(RegsToPass[i].first,
4847                                   RegsToPass[i].second.getValueType()));
4848 
4849   // Add a register mask operand representing the call-preserved registers.
4850   const uint32_t *Mask = [&]() {
4851     auto AdaptedCC = CallConv;
4852     // If HasNCSR is asserted (attribute NoCallerSavedRegisters exists),
4853     // use X86_INTR calling convention because it has the same CSR mask
4854     // (same preserved registers).
4855     if (HasNCSR)
4856       AdaptedCC = (CallingConv::ID)CallingConv::X86_INTR;
4857     // If NoCalleeSavedRegisters is requested, than use GHC since it happens
4858     // to use the CSR_NoRegs_RegMask.
4859     if (CB && CB->hasFnAttr("no_callee_saved_registers"))
4860       AdaptedCC = (CallingConv::ID)CallingConv::GHC;
4861     return RegInfo->getCallPreservedMask(MF, AdaptedCC);
4862   }();
4863   assert(Mask && "Missing call preserved mask for calling convention");
4864 
4865   // If this is an invoke in a 32-bit function using a funclet-based
4866   // personality, assume the function clobbers all registers. If an exception
4867   // is thrown, the runtime will not restore CSRs.
4868   // FIXME: Model this more precisely so that we can register allocate across
4869   // the normal edge and spill and fill across the exceptional edge.
4870   if (!Is64Bit && CLI.CB && isa<InvokeInst>(CLI.CB)) {
4871     const Function &CallerFn = MF.getFunction();
4872     EHPersonality Pers =
4873         CallerFn.hasPersonalityFn()
4874             ? classifyEHPersonality(CallerFn.getPersonalityFn())
4875             : EHPersonality::Unknown;
4876     if (isFuncletEHPersonality(Pers))
4877       Mask = RegInfo->getNoPreservedMask();
4878   }
4879 
4880   // Define a new register mask from the existing mask.
4881   uint32_t *RegMask = nullptr;
4882 
4883   // In some calling conventions we need to remove the used physical registers
4884   // from the reg mask.
4885   if (CallConv == CallingConv::X86_RegCall || HasNCSR) {
4886     const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
4887 
4888     // Allocate a new Reg Mask and copy Mask.
4889     RegMask = MF.allocateRegMask();
4890     unsigned RegMaskSize = MachineOperand::getRegMaskSize(TRI->getNumRegs());
4891     memcpy(RegMask, Mask, sizeof(RegMask[0]) * RegMaskSize);
4892 
4893     // Make sure all sub registers of the argument registers are reset
4894     // in the RegMask.
4895     for (auto const &RegPair : RegsToPass)
4896       for (MCSubRegIterator SubRegs(RegPair.first, TRI, /*IncludeSelf=*/true);
4897            SubRegs.isValid(); ++SubRegs)
4898         RegMask[*SubRegs / 32] &= ~(1u << (*SubRegs % 32));
4899 
4900     // Create the RegMask Operand according to our updated mask.
4901     Ops.push_back(DAG.getRegisterMask(RegMask));
4902   } else {
4903     // Create the RegMask Operand according to the static mask.
4904     Ops.push_back(DAG.getRegisterMask(Mask));
4905   }
4906 
4907   if (InFlag.getNode())
4908     Ops.push_back(InFlag);
4909 
4910   if (isTailCall) {
4911     // We used to do:
4912     //// If this is the first return lowered for this function, add the regs
4913     //// to the liveout set for the function.
4914     // This isn't right, although it's probably harmless on x86; liveouts
4915     // should be computed from returns not tail calls.  Consider a void
4916     // function making a tail call to a function returning int.
4917     MF.getFrameInfo().setHasTailCall();
4918     SDValue Ret = DAG.getNode(X86ISD::TC_RETURN, dl, NodeTys, Ops);
4919 
4920     if (IsCFICall)
4921       Ret.getNode()->setCFIType(CLI.CFIType->getZExtValue());
4922 
4923     DAG.addCallSiteInfo(Ret.getNode(), std::move(CSInfo));
4924     return Ret;
4925   }
4926 
4927   if (HasNoCfCheck && IsCFProtectionSupported && IsIndirectCall) {
4928     Chain = DAG.getNode(X86ISD::NT_CALL, dl, NodeTys, Ops);
4929   } else if (CLI.CB && objcarc::hasAttachedCallOpBundle(CLI.CB)) {
4930     // Calls with a "clang.arc.attachedcall" bundle are special. They should be
4931     // expanded to the call, directly followed by a special marker sequence and
4932     // a call to a ObjC library function. Use the CALL_RVMARKER to do that.
4933     assert(!isTailCall &&
4934            "tail calls cannot be marked with clang.arc.attachedcall");
4935     assert(Is64Bit && "clang.arc.attachedcall is only supported in 64bit mode");
4936 
4937     // Add a target global address for the retainRV/claimRV runtime function
4938     // just before the call target.
4939     Function *ARCFn = *objcarc::getAttachedARCFunction(CLI.CB);
4940     auto PtrVT = getPointerTy(DAG.getDataLayout());
4941     auto GA = DAG.getTargetGlobalAddress(ARCFn, dl, PtrVT);
4942     Ops.insert(Ops.begin() + 1, GA);
4943     Chain = DAG.getNode(X86ISD::CALL_RVMARKER, dl, NodeTys, Ops);
4944   } else {
4945     Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, Ops);
4946   }
4947 
4948   if (IsCFICall)
4949     Chain.getNode()->setCFIType(CLI.CFIType->getZExtValue());
4950 
4951   InFlag = Chain.getValue(1);
4952   DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
4953   DAG.addCallSiteInfo(Chain.getNode(), std::move(CSInfo));
4954 
4955   // Save heapallocsite metadata.
4956   if (CLI.CB)
4957     if (MDNode *HeapAlloc = CLI.CB->getMetadata("heapallocsite"))
4958       DAG.addHeapAllocSite(Chain.getNode(), HeapAlloc);
4959 
4960   // Create the CALLSEQ_END node.
4961   unsigned NumBytesForCalleeToPop = 0; // Callee pops nothing.
4962   if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
4963                        DAG.getTarget().Options.GuaranteedTailCallOpt))
4964     NumBytesForCalleeToPop = NumBytes;    // Callee pops everything
4965   else if (!canGuaranteeTCO(CallConv) && IsCalleePopSRet)
4966     // If this call passes a struct-return pointer, the callee
4967     // pops that struct pointer.
4968     NumBytesForCalleeToPop = 4;
4969 
4970   // Returns a flag for retval copy to use.
4971   if (!IsSibcall) {
4972     Chain = DAG.getCALLSEQ_END(Chain, NumBytesToPop, NumBytesForCalleeToPop,
4973                                InFlag, dl);
4974     InFlag = Chain.getValue(1);
4975   }
4976 
4977   // Handle result values, copying them out of physregs into vregs that we
4978   // return.
4979   return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl, DAG,
4980                          InVals, RegMask);
4981 }
4982 
4983 //===----------------------------------------------------------------------===//
4984 //                Fast Calling Convention (tail call) implementation
4985 //===----------------------------------------------------------------------===//
4986 
4987 //  Like std call, callee cleans arguments, convention except that ECX is
4988 //  reserved for storing the tail called function address. Only 2 registers are
4989 //  free for argument passing (inreg). Tail call optimization is performed
4990 //  provided:
4991 //                * tailcallopt is enabled
4992 //                * caller/callee are fastcc
4993 //  On X86_64 architecture with GOT-style position independent code only local
4994 //  (within module) calls are supported at the moment.
4995 //  To keep the stack aligned according to platform abi the function
4996 //  GetAlignedArgumentStackSize ensures that argument delta is always multiples
4997 //  of stack alignment. (Dynamic linkers need this - Darwin's dyld for example)
4998 //  If a tail called function callee has more arguments than the caller the
4999 //  caller needs to make sure that there is room to move the RETADDR to. This is
5000 //  achieved by reserving an area the size of the argument delta right after the
5001 //  original RETADDR, but before the saved framepointer or the spilled registers
5002 //  e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4)
5003 //  stack layout:
5004 //    arg1
5005 //    arg2
5006 //    RETADDR
5007 //    [ new RETADDR
5008 //      move area ]
5009 //    (possible EBP)
5010 //    ESI
5011 //    EDI
5012 //    local1 ..
5013 
5014 /// Make the stack size align e.g 16n + 12 aligned for a 16-byte align
5015 /// requirement.
5016 unsigned
GetAlignedArgumentStackSize(const unsigned StackSize,SelectionDAG & DAG) const5017 X86TargetLowering::GetAlignedArgumentStackSize(const unsigned StackSize,
5018                                                SelectionDAG &DAG) const {
5019   const Align StackAlignment = Subtarget.getFrameLowering()->getStackAlign();
5020   const uint64_t SlotSize = Subtarget.getRegisterInfo()->getSlotSize();
5021   assert(StackSize % SlotSize == 0 &&
5022          "StackSize must be a multiple of SlotSize");
5023   return alignTo(StackSize + SlotSize, StackAlignment) - SlotSize;
5024 }
5025 
5026 /// Return true if the given stack call argument is already available in the
5027 /// same position (relatively) of the caller's incoming argument stack.
5028 static
MatchingStackOffset(SDValue Arg,unsigned Offset,ISD::ArgFlagsTy Flags,MachineFrameInfo & MFI,const MachineRegisterInfo * MRI,const X86InstrInfo * TII,const CCValAssign & VA)5029 bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
5030                          MachineFrameInfo &MFI, const MachineRegisterInfo *MRI,
5031                          const X86InstrInfo *TII, const CCValAssign &VA) {
5032   unsigned Bytes = Arg.getValueSizeInBits() / 8;
5033 
5034   for (;;) {
5035     // Look through nodes that don't alter the bits of the incoming value.
5036     unsigned Op = Arg.getOpcode();
5037     if (Op == ISD::ZERO_EXTEND || Op == ISD::ANY_EXTEND || Op == ISD::BITCAST) {
5038       Arg = Arg.getOperand(0);
5039       continue;
5040     }
5041     if (Op == ISD::TRUNCATE) {
5042       const SDValue &TruncInput = Arg.getOperand(0);
5043       if (TruncInput.getOpcode() == ISD::AssertZext &&
5044           cast<VTSDNode>(TruncInput.getOperand(1))->getVT() ==
5045               Arg.getValueType()) {
5046         Arg = TruncInput.getOperand(0);
5047         continue;
5048       }
5049     }
5050     break;
5051   }
5052 
5053   int FI = INT_MAX;
5054   if (Arg.getOpcode() == ISD::CopyFromReg) {
5055     Register VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
5056     if (!VR.isVirtual())
5057       return false;
5058     MachineInstr *Def = MRI->getVRegDef(VR);
5059     if (!Def)
5060       return false;
5061     if (!Flags.isByVal()) {
5062       if (!TII->isLoadFromStackSlot(*Def, FI))
5063         return false;
5064     } else {
5065       unsigned Opcode = Def->getOpcode();
5066       if ((Opcode == X86::LEA32r || Opcode == X86::LEA64r ||
5067            Opcode == X86::LEA64_32r) &&
5068           Def->getOperand(1).isFI()) {
5069         FI = Def->getOperand(1).getIndex();
5070         Bytes = Flags.getByValSize();
5071       } else
5072         return false;
5073     }
5074   } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
5075     if (Flags.isByVal())
5076       // ByVal argument is passed in as a pointer but it's now being
5077       // dereferenced. e.g.
5078       // define @foo(%struct.X* %A) {
5079       //   tail call @bar(%struct.X* byval %A)
5080       // }
5081       return false;
5082     SDValue Ptr = Ld->getBasePtr();
5083     FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
5084     if (!FINode)
5085       return false;
5086     FI = FINode->getIndex();
5087   } else if (Arg.getOpcode() == ISD::FrameIndex && Flags.isByVal()) {
5088     FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Arg);
5089     FI = FINode->getIndex();
5090     Bytes = Flags.getByValSize();
5091   } else
5092     return false;
5093 
5094   assert(FI != INT_MAX);
5095   if (!MFI.isFixedObjectIndex(FI))
5096     return false;
5097 
5098   if (Offset != MFI.getObjectOffset(FI))
5099     return false;
5100 
5101   // If this is not byval, check that the argument stack object is immutable.
5102   // inalloca and argument copy elision can create mutable argument stack
5103   // objects. Byval objects can be mutated, but a byval call intends to pass the
5104   // mutated memory.
5105   if (!Flags.isByVal() && !MFI.isImmutableObjectIndex(FI))
5106     return false;
5107 
5108   if (VA.getLocVT().getFixedSizeInBits() >
5109       Arg.getValueSizeInBits().getFixedValue()) {
5110     // If the argument location is wider than the argument type, check that any
5111     // extension flags match.
5112     if (Flags.isZExt() != MFI.isObjectZExt(FI) ||
5113         Flags.isSExt() != MFI.isObjectSExt(FI)) {
5114       return false;
5115     }
5116   }
5117 
5118   return Bytes == MFI.getObjectSize(FI);
5119 }
5120 
5121 /// Check whether the call is eligible for tail call optimization. Targets
5122 /// that want to do tail call optimization should implement this function.
IsEligibleForTailCallOptimization(SDValue Callee,CallingConv::ID CalleeCC,bool IsCalleePopSRet,bool isVarArg,Type * RetTy,const SmallVectorImpl<ISD::OutputArg> & Outs,const SmallVectorImpl<SDValue> & OutVals,const SmallVectorImpl<ISD::InputArg> & Ins,SelectionDAG & DAG) const5123 bool X86TargetLowering::IsEligibleForTailCallOptimization(
5124     SDValue Callee, CallingConv::ID CalleeCC, bool IsCalleePopSRet,
5125     bool isVarArg, Type *RetTy, const SmallVectorImpl<ISD::OutputArg> &Outs,
5126     const SmallVectorImpl<SDValue> &OutVals,
5127     const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
5128   if (!mayTailCallThisCC(CalleeCC))
5129     return false;
5130 
5131   // If -tailcallopt is specified, make fastcc functions tail-callable.
5132   MachineFunction &MF = DAG.getMachineFunction();
5133   const Function &CallerF = MF.getFunction();
5134 
5135   // If the function return type is x86_fp80 and the callee return type is not,
5136   // then the FP_EXTEND of the call result is not a nop. It's not safe to
5137   // perform a tailcall optimization here.
5138   if (CallerF.getReturnType()->isX86_FP80Ty() && !RetTy->isX86_FP80Ty())
5139     return false;
5140 
5141   CallingConv::ID CallerCC = CallerF.getCallingConv();
5142   bool CCMatch = CallerCC == CalleeCC;
5143   bool IsCalleeWin64 = Subtarget.isCallingConvWin64(CalleeCC);
5144   bool IsCallerWin64 = Subtarget.isCallingConvWin64(CallerCC);
5145   bool IsGuaranteeTCO = DAG.getTarget().Options.GuaranteedTailCallOpt ||
5146       CalleeCC == CallingConv::Tail || CalleeCC == CallingConv::SwiftTail;
5147 
5148   // Win64 functions have extra shadow space for argument homing. Don't do the
5149   // sibcall if the caller and callee have mismatched expectations for this
5150   // space.
5151   if (IsCalleeWin64 != IsCallerWin64)
5152     return false;
5153 
5154   if (IsGuaranteeTCO) {
5155     if (canGuaranteeTCO(CalleeCC) && CCMatch)
5156       return true;
5157     return false;
5158   }
5159 
5160   // Look for obvious safe cases to perform tail call optimization that do not
5161   // require ABI changes. This is what gcc calls sibcall.
5162 
5163   // Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to
5164   // emit a special epilogue.
5165   const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
5166   if (RegInfo->hasStackRealignment(MF))
5167     return false;
5168 
5169   // Also avoid sibcall optimization if we're an sret return fn and the callee
5170   // is incompatible. See comment in LowerReturn about why hasStructRetAttr is
5171   // insufficient.
5172   if (MF.getInfo<X86MachineFunctionInfo>()->getSRetReturnReg()) {
5173     // For a compatible tail call the callee must return our sret pointer. So it
5174     // needs to be (a) an sret function itself and (b) we pass our sret as its
5175     // sret. Condition #b is harder to determine.
5176     return false;
5177   } else if (IsCalleePopSRet)
5178     // The callee pops an sret, so we cannot tail-call, as our caller doesn't
5179     // expect that.
5180     return false;
5181 
5182   // Do not sibcall optimize vararg calls unless all arguments are passed via
5183   // registers.
5184   LLVMContext &C = *DAG.getContext();
5185   if (isVarArg && !Outs.empty()) {
5186     // Optimizing for varargs on Win64 is unlikely to be safe without
5187     // additional testing.
5188     if (IsCalleeWin64 || IsCallerWin64)
5189       return false;
5190 
5191     SmallVector<CCValAssign, 16> ArgLocs;
5192     CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C);
5193 
5194     CCInfo.AnalyzeCallOperands(Outs, CC_X86);
5195     for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i)
5196       if (!ArgLocs[i].isRegLoc())
5197         return false;
5198   }
5199 
5200   // If the call result is in ST0 / ST1, it needs to be popped off the x87
5201   // stack.  Therefore, if it's not used by the call it is not safe to optimize
5202   // this into a sibcall.
5203   bool Unused = false;
5204   for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
5205     if (!Ins[i].Used) {
5206       Unused = true;
5207       break;
5208     }
5209   }
5210   if (Unused) {
5211     SmallVector<CCValAssign, 16> RVLocs;
5212     CCState CCInfo(CalleeCC, false, MF, RVLocs, C);
5213     CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
5214     for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
5215       CCValAssign &VA = RVLocs[i];
5216       if (VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1)
5217         return false;
5218     }
5219   }
5220 
5221   // Check that the call results are passed in the same way.
5222   if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, C, Ins,
5223                                   RetCC_X86, RetCC_X86))
5224     return false;
5225   // The callee has to preserve all registers the caller needs to preserve.
5226   const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
5227   const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
5228   if (!CCMatch) {
5229     const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
5230     if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
5231       return false;
5232   }
5233 
5234   unsigned StackArgsSize = 0;
5235 
5236   // If the callee takes no arguments then go on to check the results of the
5237   // call.
5238   if (!Outs.empty()) {
5239     // Check if stack adjustment is needed. For now, do not do this if any
5240     // argument is passed on the stack.
5241     SmallVector<CCValAssign, 16> ArgLocs;
5242     CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C);
5243 
5244     // Allocate shadow area for Win64
5245     if (IsCalleeWin64)
5246       CCInfo.AllocateStack(32, Align(8));
5247 
5248     CCInfo.AnalyzeCallOperands(Outs, CC_X86);
5249     StackArgsSize = CCInfo.getNextStackOffset();
5250 
5251     if (CCInfo.getNextStackOffset()) {
5252       // Check if the arguments are already laid out in the right way as
5253       // the caller's fixed stack objects.
5254       MachineFrameInfo &MFI = MF.getFrameInfo();
5255       const MachineRegisterInfo *MRI = &MF.getRegInfo();
5256       const X86InstrInfo *TII = Subtarget.getInstrInfo();
5257       for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
5258         CCValAssign &VA = ArgLocs[i];
5259         SDValue Arg = OutVals[i];
5260         ISD::ArgFlagsTy Flags = Outs[i].Flags;
5261         if (VA.getLocInfo() == CCValAssign::Indirect)
5262           return false;
5263         if (!VA.isRegLoc()) {
5264           if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
5265                                    MFI, MRI, TII, VA))
5266             return false;
5267         }
5268       }
5269     }
5270 
5271     bool PositionIndependent = isPositionIndependent();
5272     // If the tailcall address may be in a register, then make sure it's
5273     // possible to register allocate for it. In 32-bit, the call address can
5274     // only target EAX, EDX, or ECX since the tail call must be scheduled after
5275     // callee-saved registers are restored. These happen to be the same
5276     // registers used to pass 'inreg' arguments so watch out for those.
5277     if (!Subtarget.is64Bit() && ((!isa<GlobalAddressSDNode>(Callee) &&
5278                                   !isa<ExternalSymbolSDNode>(Callee)) ||
5279                                  PositionIndependent)) {
5280       unsigned NumInRegs = 0;
5281       // In PIC we need an extra register to formulate the address computation
5282       // for the callee.
5283       unsigned MaxInRegs = PositionIndependent ? 2 : 3;
5284 
5285       for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
5286         CCValAssign &VA = ArgLocs[i];
5287         if (!VA.isRegLoc())
5288           continue;
5289         Register Reg = VA.getLocReg();
5290         switch (Reg) {
5291         default: break;
5292         case X86::EAX: case X86::EDX: case X86::ECX:
5293           if (++NumInRegs == MaxInRegs)
5294             return false;
5295           break;
5296         }
5297       }
5298     }
5299 
5300     const MachineRegisterInfo &MRI = MF.getRegInfo();
5301     if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals))
5302       return false;
5303   }
5304 
5305   bool CalleeWillPop =
5306       X86::isCalleePop(CalleeCC, Subtarget.is64Bit(), isVarArg,
5307                        MF.getTarget().Options.GuaranteedTailCallOpt);
5308 
5309   if (unsigned BytesToPop =
5310           MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn()) {
5311     // If we have bytes to pop, the callee must pop them.
5312     bool CalleePopMatches = CalleeWillPop && BytesToPop == StackArgsSize;
5313     if (!CalleePopMatches)
5314       return false;
5315   } else if (CalleeWillPop && StackArgsSize > 0) {
5316     // If we don't have bytes to pop, make sure the callee doesn't pop any.
5317     return false;
5318   }
5319 
5320   return true;
5321 }
5322 
5323 FastISel *
createFastISel(FunctionLoweringInfo & funcInfo,const TargetLibraryInfo * libInfo) const5324 X86TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
5325                                   const TargetLibraryInfo *libInfo) const {
5326   return X86::createFastISel(funcInfo, libInfo);
5327 }
5328 
5329 //===----------------------------------------------------------------------===//
5330 //                           Other Lowering Hooks
5331 //===----------------------------------------------------------------------===//
5332 
mayFoldLoad(SDValue Op,const X86Subtarget & Subtarget,bool AssumeSingleUse)5333 bool X86::mayFoldLoad(SDValue Op, const X86Subtarget &Subtarget,
5334                       bool AssumeSingleUse) {
5335   if (!AssumeSingleUse && !Op.hasOneUse())
5336     return false;
5337   if (!ISD::isNormalLoad(Op.getNode()))
5338     return false;
5339 
5340   // If this is an unaligned vector, make sure the target supports folding it.
5341   auto *Ld = cast<LoadSDNode>(Op.getNode());
5342   if (!Subtarget.hasAVX() && !Subtarget.hasSSEUnalignedMem() &&
5343       Ld->getValueSizeInBits(0) == 128 && Ld->getAlign() < Align(16))
5344     return false;
5345 
5346   // TODO: If this is a non-temporal load and the target has an instruction
5347   //       for it, it should not be folded. See "useNonTemporalLoad()".
5348 
5349   return true;
5350 }
5351 
mayFoldLoadIntoBroadcastFromMem(SDValue Op,MVT EltVT,const X86Subtarget & Subtarget,bool AssumeSingleUse)5352 bool X86::mayFoldLoadIntoBroadcastFromMem(SDValue Op, MVT EltVT,
5353                                           const X86Subtarget &Subtarget,
5354                                           bool AssumeSingleUse) {
5355   assert(Subtarget.hasAVX() && "Expected AVX for broadcast from memory");
5356   if (!X86::mayFoldLoad(Op, Subtarget, AssumeSingleUse))
5357     return false;
5358 
5359   // We can not replace a wide volatile load with a broadcast-from-memory,
5360   // because that would narrow the load, which isn't legal for volatiles.
5361   auto *Ld = cast<LoadSDNode>(Op.getNode());
5362   return !Ld->isVolatile() ||
5363          Ld->getValueSizeInBits(0) == EltVT.getScalarSizeInBits();
5364 }
5365 
mayFoldIntoStore(SDValue Op)5366 bool X86::mayFoldIntoStore(SDValue Op) {
5367   return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->use_begin());
5368 }
5369 
mayFoldIntoZeroExtend(SDValue Op)5370 bool X86::mayFoldIntoZeroExtend(SDValue Op) {
5371   if (Op.hasOneUse()) {
5372     unsigned Opcode = Op.getNode()->use_begin()->getOpcode();
5373     return (ISD::ZERO_EXTEND == Opcode);
5374   }
5375   return false;
5376 }
5377 
isTargetShuffle(unsigned Opcode)5378 static bool isTargetShuffle(unsigned Opcode) {
5379   switch(Opcode) {
5380   default: return false;
5381   case X86ISD::BLENDI:
5382   case X86ISD::PSHUFB:
5383   case X86ISD::PSHUFD:
5384   case X86ISD::PSHUFHW:
5385   case X86ISD::PSHUFLW:
5386   case X86ISD::SHUFP:
5387   case X86ISD::INSERTPS:
5388   case X86ISD::EXTRQI:
5389   case X86ISD::INSERTQI:
5390   case X86ISD::VALIGN:
5391   case X86ISD::PALIGNR:
5392   case X86ISD::VSHLDQ:
5393   case X86ISD::VSRLDQ:
5394   case X86ISD::MOVLHPS:
5395   case X86ISD::MOVHLPS:
5396   case X86ISD::MOVSHDUP:
5397   case X86ISD::MOVSLDUP:
5398   case X86ISD::MOVDDUP:
5399   case X86ISD::MOVSS:
5400   case X86ISD::MOVSD:
5401   case X86ISD::MOVSH:
5402   case X86ISD::UNPCKL:
5403   case X86ISD::UNPCKH:
5404   case X86ISD::VBROADCAST:
5405   case X86ISD::VPERMILPI:
5406   case X86ISD::VPERMILPV:
5407   case X86ISD::VPERM2X128:
5408   case X86ISD::SHUF128:
5409   case X86ISD::VPERMIL2:
5410   case X86ISD::VPERMI:
5411   case X86ISD::VPPERM:
5412   case X86ISD::VPERMV:
5413   case X86ISD::VPERMV3:
5414   case X86ISD::VZEXT_MOVL:
5415     return true;
5416   }
5417 }
5418 
isTargetShuffleVariableMask(unsigned Opcode)5419 static bool isTargetShuffleVariableMask(unsigned Opcode) {
5420   switch (Opcode) {
5421   default: return false;
5422   // Target Shuffles.
5423   case X86ISD::PSHUFB:
5424   case X86ISD::VPERMILPV:
5425   case X86ISD::VPERMIL2:
5426   case X86ISD::VPPERM:
5427   case X86ISD::VPERMV:
5428   case X86ISD::VPERMV3:
5429     return true;
5430   // 'Faux' Target Shuffles.
5431   case ISD::OR:
5432   case ISD::AND:
5433   case X86ISD::ANDNP:
5434     return true;
5435   }
5436 }
5437 
getReturnAddressFrameIndex(SelectionDAG & DAG) const5438 SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {
5439   MachineFunction &MF = DAG.getMachineFunction();
5440   const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
5441   X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
5442   int ReturnAddrIndex = FuncInfo->getRAIndex();
5443 
5444   if (ReturnAddrIndex == 0) {
5445     // Set up a frame object for the return address.
5446     unsigned SlotSize = RegInfo->getSlotSize();
5447     ReturnAddrIndex = MF.getFrameInfo().CreateFixedObject(SlotSize,
5448                                                           -(int64_t)SlotSize,
5449                                                           false);
5450     FuncInfo->setRAIndex(ReturnAddrIndex);
5451   }
5452 
5453   return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy(DAG.getDataLayout()));
5454 }
5455 
isOffsetSuitableForCodeModel(int64_t Offset,CodeModel::Model M,bool hasSymbolicDisplacement)5456 bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
5457                                        bool hasSymbolicDisplacement) {
5458   // Offset should fit into 32 bit immediate field.
5459   if (!isInt<32>(Offset))
5460     return false;
5461 
5462   // If we don't have a symbolic displacement - we don't have any extra
5463   // restrictions.
5464   if (!hasSymbolicDisplacement)
5465     return true;
5466 
5467   // FIXME: Some tweaks might be needed for medium code model.
5468   if (M != CodeModel::Small && M != CodeModel::Kernel)
5469     return false;
5470 
5471   // For small code model we assume that latest object is 16MB before end of 31
5472   // bits boundary. We may also accept pretty large negative constants knowing
5473   // that all objects are in the positive half of address space.
5474   if (M == CodeModel::Small && Offset < 16*1024*1024)
5475     return true;
5476 
5477   // For kernel code model we know that all object resist in the negative half
5478   // of 32bits address space. We may not accept negative offsets, since they may
5479   // be just off and we may accept pretty large positive ones.
5480   if (M == CodeModel::Kernel && Offset >= 0)
5481     return true;
5482 
5483   return false;
5484 }
5485 
5486 /// Determines whether the callee is required to pop its own arguments.
5487 /// Callee pop is necessary to support tail calls.
isCalleePop(CallingConv::ID CallingConv,bool is64Bit,bool IsVarArg,bool GuaranteeTCO)5488 bool X86::isCalleePop(CallingConv::ID CallingConv,
5489                       bool is64Bit, bool IsVarArg, bool GuaranteeTCO) {
5490   // If GuaranteeTCO is true, we force some calls to be callee pop so that we
5491   // can guarantee TCO.
5492   if (!IsVarArg && shouldGuaranteeTCO(CallingConv, GuaranteeTCO))
5493     return true;
5494 
5495   switch (CallingConv) {
5496   default:
5497     return false;
5498   case CallingConv::X86_StdCall:
5499   case CallingConv::X86_FastCall:
5500   case CallingConv::X86_ThisCall:
5501   case CallingConv::X86_VectorCall:
5502     return !is64Bit;
5503   }
5504 }
5505 
5506 /// Return true if the condition is an signed comparison operation.
isX86CCSigned(unsigned X86CC)5507 static bool isX86CCSigned(unsigned X86CC) {
5508   switch (X86CC) {
5509   default:
5510     llvm_unreachable("Invalid integer condition!");
5511   case X86::COND_E:
5512   case X86::COND_NE:
5513   case X86::COND_B:
5514   case X86::COND_A:
5515   case X86::COND_BE:
5516   case X86::COND_AE:
5517     return false;
5518   case X86::COND_G:
5519   case X86::COND_GE:
5520   case X86::COND_L:
5521   case X86::COND_LE:
5522     return true;
5523   }
5524 }
5525 
TranslateIntegerX86CC(ISD::CondCode SetCCOpcode)5526 static X86::CondCode TranslateIntegerX86CC(ISD::CondCode SetCCOpcode) {
5527   switch (SetCCOpcode) {
5528   default: llvm_unreachable("Invalid integer condition!");
5529   case ISD::SETEQ:  return X86::COND_E;
5530   case ISD::SETGT:  return X86::COND_G;
5531   case ISD::SETGE:  return X86::COND_GE;
5532   case ISD::SETLT:  return X86::COND_L;
5533   case ISD::SETLE:  return X86::COND_LE;
5534   case ISD::SETNE:  return X86::COND_NE;
5535   case ISD::SETULT: return X86::COND_B;
5536   case ISD::SETUGT: return X86::COND_A;
5537   case ISD::SETULE: return X86::COND_BE;
5538   case ISD::SETUGE: return X86::COND_AE;
5539   }
5540 }
5541 
5542 /// Do a one-to-one translation of a ISD::CondCode to the X86-specific
5543 /// condition code, returning the condition code and the LHS/RHS of the
5544 /// comparison to make.
TranslateX86CC(ISD::CondCode SetCCOpcode,const SDLoc & DL,bool isFP,SDValue & LHS,SDValue & RHS,SelectionDAG & DAG)5545 static X86::CondCode TranslateX86CC(ISD::CondCode SetCCOpcode, const SDLoc &DL,
5546                                     bool isFP, SDValue &LHS, SDValue &RHS,
5547                                     SelectionDAG &DAG) {
5548   if (!isFP) {
5549     if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
5550       if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnes()) {
5551         // X > -1   -> X == 0, jump !sign.
5552         RHS = DAG.getConstant(0, DL, RHS.getValueType());
5553         return X86::COND_NS;
5554       }
5555       if (SetCCOpcode == ISD::SETLT && RHSC->isZero()) {
5556         // X < 0   -> X == 0, jump on sign.
5557         return X86::COND_S;
5558       }
5559       if (SetCCOpcode == ISD::SETGE && RHSC->isZero()) {
5560         // X >= 0   -> X == 0, jump on !sign.
5561         return X86::COND_NS;
5562       }
5563       if (SetCCOpcode == ISD::SETLT && RHSC->isOne()) {
5564         // X < 1   -> X <= 0
5565         RHS = DAG.getConstant(0, DL, RHS.getValueType());
5566         return X86::COND_LE;
5567       }
5568     }
5569 
5570     return TranslateIntegerX86CC(SetCCOpcode);
5571   }
5572 
5573   // First determine if it is required or is profitable to flip the operands.
5574 
5575   // If LHS is a foldable load, but RHS is not, flip the condition.
5576   if (ISD::isNON_EXTLoad(LHS.getNode()) &&
5577       !ISD::isNON_EXTLoad(RHS.getNode())) {
5578     SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode);
5579     std::swap(LHS, RHS);
5580   }
5581 
5582   switch (SetCCOpcode) {
5583   default: break;
5584   case ISD::SETOLT:
5585   case ISD::SETOLE:
5586   case ISD::SETUGT:
5587   case ISD::SETUGE:
5588     std::swap(LHS, RHS);
5589     break;
5590   }
5591 
5592   // On a floating point condition, the flags are set as follows:
5593   // ZF  PF  CF   op
5594   //  0 | 0 | 0 | X > Y
5595   //  0 | 0 | 1 | X < Y
5596   //  1 | 0 | 0 | X == Y
5597   //  1 | 1 | 1 | unordered
5598   switch (SetCCOpcode) {
5599   default: llvm_unreachable("Condcode should be pre-legalized away");
5600   case ISD::SETUEQ:
5601   case ISD::SETEQ:   return X86::COND_E;
5602   case ISD::SETOLT:              // flipped
5603   case ISD::SETOGT:
5604   case ISD::SETGT:   return X86::COND_A;
5605   case ISD::SETOLE:              // flipped
5606   case ISD::SETOGE:
5607   case ISD::SETGE:   return X86::COND_AE;
5608   case ISD::SETUGT:              // flipped
5609   case ISD::SETULT:
5610   case ISD::SETLT:   return X86::COND_B;
5611   case ISD::SETUGE:              // flipped
5612   case ISD::SETULE:
5613   case ISD::SETLE:   return X86::COND_BE;
5614   case ISD::SETONE:
5615   case ISD::SETNE:   return X86::COND_NE;
5616   case ISD::SETUO:   return X86::COND_P;
5617   case ISD::SETO:    return X86::COND_NP;
5618   case ISD::SETOEQ:
5619   case ISD::SETUNE:  return X86::COND_INVALID;
5620   }
5621 }
5622 
5623 /// Is there a floating point cmov for the specific X86 condition code?
5624 /// Current x86 isa includes the following FP cmov instructions:
5625 /// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu.
hasFPCMov(unsigned X86CC)5626 static bool hasFPCMov(unsigned X86CC) {
5627   switch (X86CC) {
5628   default:
5629     return false;
5630   case X86::COND_B:
5631   case X86::COND_BE:
5632   case X86::COND_E:
5633   case X86::COND_P:
5634   case X86::COND_A:
5635   case X86::COND_AE:
5636   case X86::COND_NE:
5637   case X86::COND_NP:
5638     return true;
5639   }
5640 }
5641 
useVPTERNLOG(const X86Subtarget & Subtarget,MVT VT)5642 static bool useVPTERNLOG(const X86Subtarget &Subtarget, MVT VT) {
5643   return Subtarget.hasVLX() || Subtarget.canExtendTo512DQ() ||
5644          VT.is512BitVector();
5645 }
5646 
getTgtMemIntrinsic(IntrinsicInfo & Info,const CallInst & I,MachineFunction & MF,unsigned Intrinsic) const5647 bool X86TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
5648                                            const CallInst &I,
5649                                            MachineFunction &MF,
5650                                            unsigned Intrinsic) const {
5651   Info.flags = MachineMemOperand::MONone;
5652   Info.offset = 0;
5653 
5654   const IntrinsicData* IntrData = getIntrinsicWithChain(Intrinsic);
5655   if (!IntrData) {
5656     switch (Intrinsic) {
5657     case Intrinsic::x86_aesenc128kl:
5658     case Intrinsic::x86_aesdec128kl:
5659       Info.opc = ISD::INTRINSIC_W_CHAIN;
5660       Info.ptrVal = I.getArgOperand(1);
5661       Info.memVT = EVT::getIntegerVT(I.getType()->getContext(), 48);
5662       Info.align = Align(1);
5663       Info.flags |= MachineMemOperand::MOLoad;
5664       return true;
5665     case Intrinsic::x86_aesenc256kl:
5666     case Intrinsic::x86_aesdec256kl:
5667       Info.opc = ISD::INTRINSIC_W_CHAIN;
5668       Info.ptrVal = I.getArgOperand(1);
5669       Info.memVT = EVT::getIntegerVT(I.getType()->getContext(), 64);
5670       Info.align = Align(1);
5671       Info.flags |= MachineMemOperand::MOLoad;
5672       return true;
5673     case Intrinsic::x86_aesencwide128kl:
5674     case Intrinsic::x86_aesdecwide128kl:
5675       Info.opc = ISD::INTRINSIC_W_CHAIN;
5676       Info.ptrVal = I.getArgOperand(0);
5677       Info.memVT = EVT::getIntegerVT(I.getType()->getContext(), 48);
5678       Info.align = Align(1);
5679       Info.flags |= MachineMemOperand::MOLoad;
5680       return true;
5681     case Intrinsic::x86_aesencwide256kl:
5682     case Intrinsic::x86_aesdecwide256kl:
5683       Info.opc = ISD::INTRINSIC_W_CHAIN;
5684       Info.ptrVal = I.getArgOperand(0);
5685       Info.memVT = EVT::getIntegerVT(I.getType()->getContext(), 64);
5686       Info.align = Align(1);
5687       Info.flags |= MachineMemOperand::MOLoad;
5688       return true;
5689     case Intrinsic::x86_cmpccxadd32:
5690     case Intrinsic::x86_cmpccxadd64:
5691     case Intrinsic::x86_atomic_bts:
5692     case Intrinsic::x86_atomic_btc:
5693     case Intrinsic::x86_atomic_btr: {
5694       Info.opc = ISD::INTRINSIC_W_CHAIN;
5695       Info.ptrVal = I.getArgOperand(0);
5696       unsigned Size = I.getType()->getScalarSizeInBits();
5697       Info.memVT = EVT::getIntegerVT(I.getType()->getContext(), Size);
5698       Info.align = Align(Size);
5699       Info.flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
5700                     MachineMemOperand::MOVolatile;
5701       return true;
5702     }
5703     case Intrinsic::x86_atomic_bts_rm:
5704     case Intrinsic::x86_atomic_btc_rm:
5705     case Intrinsic::x86_atomic_btr_rm: {
5706       Info.opc = ISD::INTRINSIC_W_CHAIN;
5707       Info.ptrVal = I.getArgOperand(0);
5708       unsigned Size = I.getArgOperand(1)->getType()->getScalarSizeInBits();
5709       Info.memVT = EVT::getIntegerVT(I.getType()->getContext(), Size);
5710       Info.align = Align(Size);
5711       Info.flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
5712                     MachineMemOperand::MOVolatile;
5713       return true;
5714     }
5715     case Intrinsic::x86_aadd32:
5716     case Intrinsic::x86_aadd64:
5717     case Intrinsic::x86_aand32:
5718     case Intrinsic::x86_aand64:
5719     case Intrinsic::x86_aor32:
5720     case Intrinsic::x86_aor64:
5721     case Intrinsic::x86_axor32:
5722     case Intrinsic::x86_axor64:
5723     case Intrinsic::x86_atomic_add_cc:
5724     case Intrinsic::x86_atomic_sub_cc:
5725     case Intrinsic::x86_atomic_or_cc:
5726     case Intrinsic::x86_atomic_and_cc:
5727     case Intrinsic::x86_atomic_xor_cc: {
5728       Info.opc = ISD::INTRINSIC_W_CHAIN;
5729       Info.ptrVal = I.getArgOperand(0);
5730       unsigned Size = I.getArgOperand(1)->getType()->getScalarSizeInBits();
5731       Info.memVT = EVT::getIntegerVT(I.getType()->getContext(), Size);
5732       Info.align = Align(Size);
5733       Info.flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
5734                     MachineMemOperand::MOVolatile;
5735       return true;
5736     }
5737     }
5738     return false;
5739   }
5740 
5741   switch (IntrData->Type) {
5742   case TRUNCATE_TO_MEM_VI8:
5743   case TRUNCATE_TO_MEM_VI16:
5744   case TRUNCATE_TO_MEM_VI32: {
5745     Info.opc = ISD::INTRINSIC_VOID;
5746     Info.ptrVal = I.getArgOperand(0);
5747     MVT VT  = MVT::getVT(I.getArgOperand(1)->getType());
5748     MVT ScalarVT = MVT::INVALID_SIMPLE_VALUE_TYPE;
5749     if (IntrData->Type == TRUNCATE_TO_MEM_VI8)
5750       ScalarVT = MVT::i8;
5751     else if (IntrData->Type == TRUNCATE_TO_MEM_VI16)
5752       ScalarVT = MVT::i16;
5753     else if (IntrData->Type == TRUNCATE_TO_MEM_VI32)
5754       ScalarVT = MVT::i32;
5755 
5756     Info.memVT = MVT::getVectorVT(ScalarVT, VT.getVectorNumElements());
5757     Info.align = Align(1);
5758     Info.flags |= MachineMemOperand::MOStore;
5759     break;
5760   }
5761   case GATHER:
5762   case GATHER_AVX2: {
5763     Info.opc = ISD::INTRINSIC_W_CHAIN;
5764     Info.ptrVal = nullptr;
5765     MVT DataVT = MVT::getVT(I.getType());
5766     MVT IndexVT = MVT::getVT(I.getArgOperand(2)->getType());
5767     unsigned NumElts = std::min(DataVT.getVectorNumElements(),
5768                                 IndexVT.getVectorNumElements());
5769     Info.memVT = MVT::getVectorVT(DataVT.getVectorElementType(), NumElts);
5770     Info.align = Align(1);
5771     Info.flags |= MachineMemOperand::MOLoad;
5772     break;
5773   }
5774   case SCATTER: {
5775     Info.opc = ISD::INTRINSIC_VOID;
5776     Info.ptrVal = nullptr;
5777     MVT DataVT = MVT::getVT(I.getArgOperand(3)->getType());
5778     MVT IndexVT = MVT::getVT(I.getArgOperand(2)->getType());
5779     unsigned NumElts = std::min(DataVT.getVectorNumElements(),
5780                                 IndexVT.getVectorNumElements());
5781     Info.memVT = MVT::getVectorVT(DataVT.getVectorElementType(), NumElts);
5782     Info.align = Align(1);
5783     Info.flags |= MachineMemOperand::MOStore;
5784     break;
5785   }
5786   default:
5787     return false;
5788   }
5789 
5790   return true;
5791 }
5792 
5793 /// Returns true if the target can instruction select the
5794 /// specified FP immediate natively. If false, the legalizer will
5795 /// materialize the FP immediate as a load from a constant pool.
isFPImmLegal(const APFloat & Imm,EVT VT,bool ForCodeSize) const5796 bool X86TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
5797                                      bool ForCodeSize) const {
5798   for (const APFloat &FPImm : LegalFPImmediates)
5799     if (Imm.bitwiseIsEqual(FPImm))
5800       return true;
5801   return false;
5802 }
5803 
shouldReduceLoadWidth(SDNode * Load,ISD::LoadExtType ExtTy,EVT NewVT) const5804 bool X86TargetLowering::shouldReduceLoadWidth(SDNode *Load,
5805                                               ISD::LoadExtType ExtTy,
5806                                               EVT NewVT) const {
5807   assert(cast<LoadSDNode>(Load)->isSimple() && "illegal to narrow");
5808 
5809   // "ELF Handling for Thread-Local Storage" specifies that R_X86_64_GOTTPOFF
5810   // relocation target a movq or addq instruction: don't let the load shrink.
5811   SDValue BasePtr = cast<LoadSDNode>(Load)->getBasePtr();
5812   if (BasePtr.getOpcode() == X86ISD::WrapperRIP)
5813     if (const auto *GA = dyn_cast<GlobalAddressSDNode>(BasePtr.getOperand(0)))
5814       return GA->getTargetFlags() != X86II::MO_GOTTPOFF;
5815 
5816   // If this is an (1) AVX vector load with (2) multiple uses and (3) all of
5817   // those uses are extracted directly into a store, then the extract + store
5818   // can be store-folded. Therefore, it's probably not worth splitting the load.
5819   EVT VT = Load->getValueType(0);
5820   if ((VT.is256BitVector() || VT.is512BitVector()) && !Load->hasOneUse()) {
5821     for (auto UI = Load->use_begin(), UE = Load->use_end(); UI != UE; ++UI) {
5822       // Skip uses of the chain value. Result 0 of the node is the load value.
5823       if (UI.getUse().getResNo() != 0)
5824         continue;
5825 
5826       // If this use is not an extract + store, it's probably worth splitting.
5827       if (UI->getOpcode() != ISD::EXTRACT_SUBVECTOR || !UI->hasOneUse() ||
5828           UI->use_begin()->getOpcode() != ISD::STORE)
5829         return true;
5830     }
5831     // All non-chain uses are extract + store.
5832     return false;
5833   }
5834 
5835   return true;
5836 }
5837 
5838 /// Returns true if it is beneficial to convert a load of a constant
5839 /// to just the constant itself.
shouldConvertConstantLoadToIntImm(const APInt & Imm,Type * Ty) const5840 bool X86TargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
5841                                                           Type *Ty) const {
5842   assert(Ty->isIntegerTy());
5843 
5844   unsigned BitSize = Ty->getPrimitiveSizeInBits();
5845   if (BitSize == 0 || BitSize > 64)
5846     return false;
5847   return true;
5848 }
5849 
reduceSelectOfFPConstantLoads(EVT CmpOpVT) const5850 bool X86TargetLowering::reduceSelectOfFPConstantLoads(EVT CmpOpVT) const {
5851   // If we are using XMM registers in the ABI and the condition of the select is
5852   // a floating-point compare and we have blendv or conditional move, then it is
5853   // cheaper to select instead of doing a cross-register move and creating a
5854   // load that depends on the compare result.
5855   bool IsFPSetCC = CmpOpVT.isFloatingPoint() && CmpOpVT != MVT::f128;
5856   return !IsFPSetCC || !Subtarget.isTarget64BitLP64() || !Subtarget.hasAVX();
5857 }
5858 
convertSelectOfConstantsToMath(EVT VT) const5859 bool X86TargetLowering::convertSelectOfConstantsToMath(EVT VT) const {
5860   // TODO: It might be a win to ease or lift this restriction, but the generic
5861   // folds in DAGCombiner conflict with vector folds for an AVX512 target.
5862   if (VT.isVector() && Subtarget.hasAVX512())
5863     return false;
5864 
5865   return true;
5866 }
5867 
decomposeMulByConstant(LLVMContext & Context,EVT VT,SDValue C) const5868 bool X86TargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
5869                                                SDValue C) const {
5870   // TODO: We handle scalars using custom code, but generic combining could make
5871   // that unnecessary.
5872   APInt MulC;
5873   if (!ISD::isConstantSplatVector(C.getNode(), MulC))
5874     return false;
5875 
5876   // Find the type this will be legalized too. Otherwise we might prematurely
5877   // convert this to shl+add/sub and then still have to type legalize those ops.
5878   // Another choice would be to defer the decision for illegal types until
5879   // after type legalization. But constant splat vectors of i64 can't make it
5880   // through type legalization on 32-bit targets so we would need to special
5881   // case vXi64.
5882   while (getTypeAction(Context, VT) != TypeLegal)
5883     VT = getTypeToTransformTo(Context, VT);
5884 
5885   // If vector multiply is legal, assume that's faster than shl + add/sub.
5886   // Multiply is a complex op with higher latency and lower throughput in
5887   // most implementations, sub-vXi32 vector multiplies are always fast,
5888   // vXi32 mustn't have a SlowMULLD implementation, and anything larger (vXi64)
5889   // is always going to be slow.
5890   unsigned EltSizeInBits = VT.getScalarSizeInBits();
5891   if (isOperationLegal(ISD::MUL, VT) && EltSizeInBits <= 32 &&
5892       (EltSizeInBits != 32 || !Subtarget.isPMULLDSlow()))
5893     return false;
5894 
5895   // shl+add, shl+sub, shl+add+neg
5896   return (MulC + 1).isPowerOf2() || (MulC - 1).isPowerOf2() ||
5897          (1 - MulC).isPowerOf2() || (-(MulC + 1)).isPowerOf2();
5898 }
5899 
isExtractSubvectorCheap(EVT ResVT,EVT SrcVT,unsigned Index) const5900 bool X86TargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
5901                                                 unsigned Index) const {
5902   if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT))
5903     return false;
5904 
5905   // Mask vectors support all subregister combinations and operations that
5906   // extract half of vector.
5907   if (ResVT.getVectorElementType() == MVT::i1)
5908     return Index == 0 || ((ResVT.getSizeInBits() == SrcVT.getSizeInBits()*2) &&
5909                           (Index == ResVT.getVectorNumElements()));
5910 
5911   return (Index % ResVT.getVectorNumElements()) == 0;
5912 }
5913 
shouldScalarizeBinop(SDValue VecOp) const5914 bool X86TargetLowering::shouldScalarizeBinop(SDValue VecOp) const {
5915   unsigned Opc = VecOp.getOpcode();
5916 
5917   // Assume target opcodes can't be scalarized.
5918   // TODO - do we have any exceptions?
5919   if (Opc >= ISD::BUILTIN_OP_END)
5920     return false;
5921 
5922   // If the vector op is not supported, try to convert to scalar.
5923   EVT VecVT = VecOp.getValueType();
5924   if (!isOperationLegalOrCustomOrPromote(Opc, VecVT))
5925     return true;
5926 
5927   // If the vector op is supported, but the scalar op is not, the transform may
5928   // not be worthwhile.
5929   EVT ScalarVT = VecVT.getScalarType();
5930   return isOperationLegalOrCustomOrPromote(Opc, ScalarVT);
5931 }
5932 
shouldFormOverflowOp(unsigned Opcode,EVT VT,bool) const5933 bool X86TargetLowering::shouldFormOverflowOp(unsigned Opcode, EVT VT,
5934                                              bool) const {
5935   // TODO: Allow vectors?
5936   if (VT.isVector())
5937     return false;
5938   return VT.isSimple() || !isOperationExpand(Opcode, VT);
5939 }
5940 
isCheapToSpeculateCttz(Type * Ty) const5941 bool X86TargetLowering::isCheapToSpeculateCttz(Type *Ty) const {
5942   // Speculate cttz only if we can directly use TZCNT or can promote to i32.
5943   return Subtarget.hasBMI() ||
5944          (!Ty->isVectorTy() && Ty->getScalarSizeInBits() < 32);
5945 }
5946 
isCheapToSpeculateCtlz(Type * Ty) const5947 bool X86TargetLowering::isCheapToSpeculateCtlz(Type *Ty) const {
5948   // Speculate ctlz only if we can directly use LZCNT.
5949   return Subtarget.hasLZCNT();
5950 }
5951 
hasBitPreservingFPLogic(EVT VT) const5952 bool X86TargetLowering::hasBitPreservingFPLogic(EVT VT) const {
5953   return VT == MVT::f32 || VT == MVT::f64 || VT.isVector();
5954 }
5955 
ShouldShrinkFPConstant(EVT VT) const5956 bool X86TargetLowering::ShouldShrinkFPConstant(EVT VT) const {
5957   // Don't shrink FP constpool if SSE2 is available since cvtss2sd is more
5958   // expensive than a straight movsd. On the other hand, it's important to
5959   // shrink long double fp constant since fldt is very slow.
5960   return !Subtarget.hasSSE2() || VT == MVT::f80;
5961 }
5962 
isScalarFPTypeInSSEReg(EVT VT) const5963 bool X86TargetLowering::isScalarFPTypeInSSEReg(EVT VT) const {
5964   return (VT == MVT::f64 && Subtarget.hasSSE2()) ||
5965          (VT == MVT::f32 && Subtarget.hasSSE1()) || VT == MVT::f16;
5966 }
5967 
isLoadBitCastBeneficial(EVT LoadVT,EVT BitcastVT,const SelectionDAG & DAG,const MachineMemOperand & MMO) const5968 bool X86TargetLowering::isLoadBitCastBeneficial(EVT LoadVT, EVT BitcastVT,
5969                                                 const SelectionDAG &DAG,
5970                                                 const MachineMemOperand &MMO) const {
5971   if (!Subtarget.hasAVX512() && !LoadVT.isVector() && BitcastVT.isVector() &&
5972       BitcastVT.getVectorElementType() == MVT::i1)
5973     return false;
5974 
5975   if (!Subtarget.hasDQI() && BitcastVT == MVT::v8i1 && LoadVT == MVT::i8)
5976     return false;
5977 
5978   // If both types are legal vectors, it's always ok to convert them.
5979   if (LoadVT.isVector() && BitcastVT.isVector() &&
5980       isTypeLegal(LoadVT) && isTypeLegal(BitcastVT))
5981     return true;
5982 
5983   return TargetLowering::isLoadBitCastBeneficial(LoadVT, BitcastVT, DAG, MMO);
5984 }
5985 
canMergeStoresTo(unsigned AddressSpace,EVT MemVT,const MachineFunction & MF) const5986 bool X86TargetLowering::canMergeStoresTo(unsigned AddressSpace, EVT MemVT,
5987                                          const MachineFunction &MF) const {
5988   // Do not merge to float value size (128 bytes) if no implicit
5989   // float attribute is set.
5990   bool NoFloat = MF.getFunction().hasFnAttribute(Attribute::NoImplicitFloat);
5991 
5992   if (NoFloat) {
5993     unsigned MaxIntSize = Subtarget.is64Bit() ? 64 : 32;
5994     return (MemVT.getSizeInBits() <= MaxIntSize);
5995   }
5996   // Make sure we don't merge greater than our preferred vector
5997   // width.
5998   if (MemVT.getSizeInBits() > Subtarget.getPreferVectorWidth())
5999     return false;
6000 
6001   return true;
6002 }
6003 
isCtlzFast() const6004 bool X86TargetLowering::isCtlzFast() const {
6005   return Subtarget.hasFastLZCNT();
6006 }
6007 
isMaskAndCmp0FoldingBeneficial(const Instruction & AndI) const6008 bool X86TargetLowering::isMaskAndCmp0FoldingBeneficial(
6009     const Instruction &AndI) const {
6010   return true;
6011 }
6012 
hasAndNotCompare(SDValue Y) const6013 bool X86TargetLowering::hasAndNotCompare(SDValue Y) const {
6014   EVT VT = Y.getValueType();
6015 
6016   if (VT.isVector())
6017     return false;
6018 
6019   if (!Subtarget.hasBMI())
6020     return false;
6021 
6022   // There are only 32-bit and 64-bit forms for 'andn'.
6023   if (VT != MVT::i32 && VT != MVT::i64)
6024     return false;
6025 
6026   return !isa<ConstantSDNode>(Y);
6027 }
6028 
hasAndNot(SDValue Y) const6029 bool X86TargetLowering::hasAndNot(SDValue Y) const {
6030   EVT VT = Y.getValueType();
6031 
6032   if (!VT.isVector())
6033     return hasAndNotCompare(Y);
6034 
6035   // Vector.
6036 
6037   if (!Subtarget.hasSSE1() || VT.getSizeInBits() < 128)
6038     return false;
6039 
6040   if (VT == MVT::v4i32)
6041     return true;
6042 
6043   return Subtarget.hasSSE2();
6044 }
6045 
hasBitTest(SDValue X,SDValue Y) const6046 bool X86TargetLowering::hasBitTest(SDValue X, SDValue Y) const {
6047   return X.getValueType().isScalarInteger(); // 'bt'
6048 }
6049 
6050 bool X86TargetLowering::
shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(SDValue X,ConstantSDNode * XC,ConstantSDNode * CC,SDValue Y,unsigned OldShiftOpcode,unsigned NewShiftOpcode,SelectionDAG & DAG) const6051     shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
6052         SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y,
6053         unsigned OldShiftOpcode, unsigned NewShiftOpcode,
6054         SelectionDAG &DAG) const {
6055   // Does baseline recommend not to perform the fold by default?
6056   if (!TargetLowering::shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
6057           X, XC, CC, Y, OldShiftOpcode, NewShiftOpcode, DAG))
6058     return false;
6059   // For scalars this transform is always beneficial.
6060   if (X.getValueType().isScalarInteger())
6061     return true;
6062   // If all the shift amounts are identical, then transform is beneficial even
6063   // with rudimentary SSE2 shifts.
6064   if (DAG.isSplatValue(Y, /*AllowUndefs=*/true))
6065     return true;
6066   // If we have AVX2 with it's powerful shift operations, then it's also good.
6067   if (Subtarget.hasAVX2())
6068     return true;
6069   // Pre-AVX2 vector codegen for this pattern is best for variant with 'shl'.
6070   return NewShiftOpcode == ISD::SHL;
6071 }
6072 
preferScalarizeSplat(unsigned Opc) const6073 bool X86TargetLowering::preferScalarizeSplat(unsigned Opc) const {
6074   return Opc != ISD::FP_EXTEND;
6075 }
6076 
shouldFoldConstantShiftPairToMask(const SDNode * N,CombineLevel Level) const6077 bool X86TargetLowering::shouldFoldConstantShiftPairToMask(
6078     const SDNode *N, CombineLevel Level) const {
6079   assert(((N->getOpcode() == ISD::SHL &&
6080            N->getOperand(0).getOpcode() == ISD::SRL) ||
6081           (N->getOpcode() == ISD::SRL &&
6082            N->getOperand(0).getOpcode() == ISD::SHL)) &&
6083          "Expected shift-shift mask");
6084   // TODO: Should we always create i64 masks? Or only folded immediates?
6085   EVT VT = N->getValueType(0);
6086   if ((Subtarget.hasFastVectorShiftMasks() && VT.isVector()) ||
6087       (Subtarget.hasFastScalarShiftMasks() && !VT.isVector())) {
6088     // Only fold if the shift values are equal - so it folds to AND.
6089     // TODO - we should fold if either is a non-uniform vector but we don't do
6090     // the fold for non-splats yet.
6091     return N->getOperand(1) == N->getOperand(0).getOperand(1);
6092   }
6093   return TargetLoweringBase::shouldFoldConstantShiftPairToMask(N, Level);
6094 }
6095 
shouldFoldMaskToVariableShiftPair(SDValue Y) const6096 bool X86TargetLowering::shouldFoldMaskToVariableShiftPair(SDValue Y) const {
6097   EVT VT = Y.getValueType();
6098 
6099   // For vectors, we don't have a preference, but we probably want a mask.
6100   if (VT.isVector())
6101     return false;
6102 
6103   // 64-bit shifts on 32-bit targets produce really bad bloated code.
6104   if (VT == MVT::i64 && !Subtarget.is64Bit())
6105     return false;
6106 
6107   return true;
6108 }
6109 
6110 TargetLowering::ShiftLegalizationStrategy
preferredShiftLegalizationStrategy(SelectionDAG & DAG,SDNode * N,unsigned ExpansionFactor) const6111 X86TargetLowering::preferredShiftLegalizationStrategy(
6112     SelectionDAG &DAG, SDNode *N, unsigned ExpansionFactor) const {
6113   if (DAG.getMachineFunction().getFunction().hasMinSize() &&
6114       !Subtarget.isOSWindows())
6115     return ShiftLegalizationStrategy::LowerToLibcall;
6116   return TargetLowering::preferredShiftLegalizationStrategy(DAG, N,
6117                                                             ExpansionFactor);
6118 }
6119 
shouldSplatInsEltVarIndex(EVT VT) const6120 bool X86TargetLowering::shouldSplatInsEltVarIndex(EVT VT) const {
6121   // Any legal vector type can be splatted more efficiently than
6122   // loading/spilling from memory.
6123   return isTypeLegal(VT);
6124 }
6125 
hasFastEqualityCompare(unsigned NumBits) const6126 MVT X86TargetLowering::hasFastEqualityCompare(unsigned NumBits) const {
6127   MVT VT = MVT::getIntegerVT(NumBits);
6128   if (isTypeLegal(VT))
6129     return VT;
6130 
6131   // PMOVMSKB can handle this.
6132   if (NumBits == 128 && isTypeLegal(MVT::v16i8))
6133     return MVT::v16i8;
6134 
6135   // VPMOVMSKB can handle this.
6136   if (NumBits == 256 && isTypeLegal(MVT::v32i8))
6137     return MVT::v32i8;
6138 
6139   // TODO: Allow 64-bit type for 32-bit target.
6140   // TODO: 512-bit types should be allowed, but make sure that those
6141   // cases are handled in combineVectorSizedSetCCEquality().
6142 
6143   return MVT::INVALID_SIMPLE_VALUE_TYPE;
6144 }
6145 
6146 /// Val is the undef sentinel value or equal to the specified value.
isUndefOrEqual(int Val,int CmpVal)6147 static bool isUndefOrEqual(int Val, int CmpVal) {
6148   return ((Val == SM_SentinelUndef) || (Val == CmpVal));
6149 }
6150 
6151 /// Return true if every element in Mask is the undef sentinel value or equal to
6152 /// the specified value..
isUndefOrEqual(ArrayRef<int> Mask,int CmpVal)6153 static bool isUndefOrEqual(ArrayRef<int> Mask, int CmpVal) {
6154   return llvm::all_of(Mask, [CmpVal](int M) {
6155     return (M == SM_SentinelUndef) || (M == CmpVal);
6156   });
6157 }
6158 
6159 /// Val is either the undef or zero sentinel value.
isUndefOrZero(int Val)6160 static bool isUndefOrZero(int Val) {
6161   return ((Val == SM_SentinelUndef) || (Val == SM_SentinelZero));
6162 }
6163 
6164 /// Return true if every element in Mask, beginning from position Pos and ending
6165 /// in Pos+Size is the undef sentinel value.
isUndefInRange(ArrayRef<int> Mask,unsigned Pos,unsigned Size)6166 static bool isUndefInRange(ArrayRef<int> Mask, unsigned Pos, unsigned Size) {
6167   return llvm::all_of(Mask.slice(Pos, Size),
6168                       [](int M) { return M == SM_SentinelUndef; });
6169 }
6170 
6171 /// Return true if the mask creates a vector whose lower half is undefined.
isUndefLowerHalf(ArrayRef<int> Mask)6172 static bool isUndefLowerHalf(ArrayRef<int> Mask) {
6173   unsigned NumElts = Mask.size();
6174   return isUndefInRange(Mask, 0, NumElts / 2);
6175 }
6176 
6177 /// Return true if the mask creates a vector whose upper half is undefined.
isUndefUpperHalf(ArrayRef<int> Mask)6178 static bool isUndefUpperHalf(ArrayRef<int> Mask) {
6179   unsigned NumElts = Mask.size();
6180   return isUndefInRange(Mask, NumElts / 2, NumElts / 2);
6181 }
6182 
6183 /// Return true if Val falls within the specified range (L, H].
isInRange(int Val,int Low,int Hi)6184 static bool isInRange(int Val, int Low, int Hi) {
6185   return (Val >= Low && Val < Hi);
6186 }
6187 
6188 /// Return true if the value of any element in Mask falls within the specified
6189 /// range (L, H].
isAnyInRange(ArrayRef<int> Mask,int Low,int Hi)6190 static bool isAnyInRange(ArrayRef<int> Mask, int Low, int Hi) {
6191   return llvm::any_of(Mask, [Low, Hi](int M) { return isInRange(M, Low, Hi); });
6192 }
6193 
6194 /// Return true if the value of any element in Mask is the zero sentinel value.
isAnyZero(ArrayRef<int> Mask)6195 static bool isAnyZero(ArrayRef<int> Mask) {
6196   return llvm::any_of(Mask, [](int M) { return M == SM_SentinelZero; });
6197 }
6198 
6199 /// Return true if the value of any element in Mask is the zero or undef
6200 /// sentinel values.
isAnyZeroOrUndef(ArrayRef<int> Mask)6201 static bool isAnyZeroOrUndef(ArrayRef<int> Mask) {
6202   return llvm::any_of(Mask, [](int M) {
6203     return M == SM_SentinelZero || M == SM_SentinelUndef;
6204   });
6205 }
6206 
6207 /// Return true if Val is undef or if its value falls within the
6208 /// specified range (L, H].
isUndefOrInRange(int Val,int Low,int Hi)6209 static bool isUndefOrInRange(int Val, int Low, int Hi) {
6210   return (Val == SM_SentinelUndef) || isInRange(Val, Low, Hi);
6211 }
6212 
6213 /// Return true if every element in Mask is undef or if its value
6214 /// falls within the specified range (L, H].
isUndefOrInRange(ArrayRef<int> Mask,int Low,int Hi)6215 static bool isUndefOrInRange(ArrayRef<int> Mask, int Low, int Hi) {
6216   return llvm::all_of(
6217       Mask, [Low, Hi](int M) { return isUndefOrInRange(M, Low, Hi); });
6218 }
6219 
6220 /// Return true if Val is undef, zero or if its value falls within the
6221 /// specified range (L, H].
isUndefOrZeroOrInRange(int Val,int Low,int Hi)6222 static bool isUndefOrZeroOrInRange(int Val, int Low, int Hi) {
6223   return isUndefOrZero(Val) || isInRange(Val, Low, Hi);
6224 }
6225 
6226 /// Return true if every element in Mask is undef, zero or if its value
6227 /// falls within the specified range (L, H].
isUndefOrZeroOrInRange(ArrayRef<int> Mask,int Low,int Hi)6228 static bool isUndefOrZeroOrInRange(ArrayRef<int> Mask, int Low, int Hi) {
6229   return llvm::all_of(
6230       Mask, [Low, Hi](int M) { return isUndefOrZeroOrInRange(M, Low, Hi); });
6231 }
6232 
6233 /// Return true if every element in Mask, beginning
6234 /// from position Pos and ending in Pos + Size, falls within the specified
6235 /// sequence (Low, Low + Step, ..., Low + (Size - 1) * Step) or is undef.
isSequentialOrUndefInRange(ArrayRef<int> Mask,unsigned Pos,unsigned Size,int Low,int Step=1)6236 static bool isSequentialOrUndefInRange(ArrayRef<int> Mask, unsigned Pos,
6237                                        unsigned Size, int Low, int Step = 1) {
6238   for (unsigned i = Pos, e = Pos + Size; i != e; ++i, Low += Step)
6239     if (!isUndefOrEqual(Mask[i], Low))
6240       return false;
6241   return true;
6242 }
6243 
6244 /// Return true if every element in Mask, beginning
6245 /// from position Pos and ending in Pos+Size, falls within the specified
6246 /// sequential range (Low, Low+Size], or is undef or is zero.
isSequentialOrUndefOrZeroInRange(ArrayRef<int> Mask,unsigned Pos,unsigned Size,int Low,int Step=1)6247 static bool isSequentialOrUndefOrZeroInRange(ArrayRef<int> Mask, unsigned Pos,
6248                                              unsigned Size, int Low,
6249                                              int Step = 1) {
6250   for (unsigned i = Pos, e = Pos + Size; i != e; ++i, Low += Step)
6251     if (!isUndefOrZero(Mask[i]) && Mask[i] != Low)
6252       return false;
6253   return true;
6254 }
6255 
6256 /// Return true if every element in Mask, beginning
6257 /// from position Pos and ending in Pos+Size is undef or is zero.
isUndefOrZeroInRange(ArrayRef<int> Mask,unsigned Pos,unsigned Size)6258 static bool isUndefOrZeroInRange(ArrayRef<int> Mask, unsigned Pos,
6259                                  unsigned Size) {
6260   return llvm::all_of(Mask.slice(Pos, Size), isUndefOrZero);
6261 }
6262 
6263 /// Helper function to test whether a shuffle mask could be
6264 /// simplified by widening the elements being shuffled.
6265 ///
6266 /// Appends the mask for wider elements in WidenedMask if valid. Otherwise
6267 /// leaves it in an unspecified state.
6268 ///
6269 /// NOTE: This must handle normal vector shuffle masks and *target* vector
6270 /// shuffle masks. The latter have the special property of a '-2' representing
6271 /// a zero-ed lane of a vector.
canWidenShuffleElements(ArrayRef<int> Mask,SmallVectorImpl<int> & WidenedMask)6272 static bool canWidenShuffleElements(ArrayRef<int> Mask,
6273                                     SmallVectorImpl<int> &WidenedMask) {
6274   WidenedMask.assign(Mask.size() / 2, 0);
6275   for (int i = 0, Size = Mask.size(); i < Size; i += 2) {
6276     int M0 = Mask[i];
6277     int M1 = Mask[i + 1];
6278 
6279     // If both elements are undef, its trivial.
6280     if (M0 == SM_SentinelUndef && M1 == SM_SentinelUndef) {
6281       WidenedMask[i / 2] = SM_SentinelUndef;
6282       continue;
6283     }
6284 
6285     // Check for an undef mask and a mask value properly aligned to fit with
6286     // a pair of values. If we find such a case, use the non-undef mask's value.
6287     if (M0 == SM_SentinelUndef && M1 >= 0 && (M1 % 2) == 1) {
6288       WidenedMask[i / 2] = M1 / 2;
6289       continue;
6290     }
6291     if (M1 == SM_SentinelUndef && M0 >= 0 && (M0 % 2) == 0) {
6292       WidenedMask[i / 2] = M0 / 2;
6293       continue;
6294     }
6295 
6296     // When zeroing, we need to spread the zeroing across both lanes to widen.
6297     if (M0 == SM_SentinelZero || M1 == SM_SentinelZero) {
6298       if ((M0 == SM_SentinelZero || M0 == SM_SentinelUndef) &&
6299           (M1 == SM_SentinelZero || M1 == SM_SentinelUndef)) {
6300         WidenedMask[i / 2] = SM_SentinelZero;
6301         continue;
6302       }
6303       return false;
6304     }
6305 
6306     // Finally check if the two mask values are adjacent and aligned with
6307     // a pair.
6308     if (M0 != SM_SentinelUndef && (M0 % 2) == 0 && (M0 + 1) == M1) {
6309       WidenedMask[i / 2] = M0 / 2;
6310       continue;
6311     }
6312 
6313     // Otherwise we can't safely widen the elements used in this shuffle.
6314     return false;
6315   }
6316   assert(WidenedMask.size() == Mask.size() / 2 &&
6317          "Incorrect size of mask after widening the elements!");
6318 
6319   return true;
6320 }
6321 
canWidenShuffleElements(ArrayRef<int> Mask,const APInt & Zeroable,bool V2IsZero,SmallVectorImpl<int> & WidenedMask)6322 static bool canWidenShuffleElements(ArrayRef<int> Mask,
6323                                     const APInt &Zeroable,
6324                                     bool V2IsZero,
6325                                     SmallVectorImpl<int> &WidenedMask) {
6326   // Create an alternative mask with info about zeroable elements.
6327   // Here we do not set undef elements as zeroable.
6328   SmallVector<int, 64> ZeroableMask(Mask);
6329   if (V2IsZero) {
6330     assert(!Zeroable.isZero() && "V2's non-undef elements are used?!");
6331     for (int i = 0, Size = Mask.size(); i != Size; ++i)
6332       if (Mask[i] != SM_SentinelUndef && Zeroable[i])
6333         ZeroableMask[i] = SM_SentinelZero;
6334   }
6335   return canWidenShuffleElements(ZeroableMask, WidenedMask);
6336 }
6337 
canWidenShuffleElements(ArrayRef<int> Mask)6338 static bool canWidenShuffleElements(ArrayRef<int> Mask) {
6339   SmallVector<int, 32> WidenedMask;
6340   return canWidenShuffleElements(Mask, WidenedMask);
6341 }
6342 
6343 // Attempt to narrow/widen shuffle mask until it matches the target number of
6344 // elements.
scaleShuffleElements(ArrayRef<int> Mask,unsigned NumDstElts,SmallVectorImpl<int> & ScaledMask)6345 static bool scaleShuffleElements(ArrayRef<int> Mask, unsigned NumDstElts,
6346                                  SmallVectorImpl<int> &ScaledMask) {
6347   unsigned NumSrcElts = Mask.size();
6348   assert(((NumSrcElts % NumDstElts) == 0 || (NumDstElts % NumSrcElts) == 0) &&
6349          "Illegal shuffle scale factor");
6350 
6351   // Narrowing is guaranteed to work.
6352   if (NumDstElts >= NumSrcElts) {
6353     int Scale = NumDstElts / NumSrcElts;
6354     llvm::narrowShuffleMaskElts(Scale, Mask, ScaledMask);
6355     return true;
6356   }
6357 
6358   // We have to repeat the widening until we reach the target size, but we can
6359   // split out the first widening as it sets up ScaledMask for us.
6360   if (canWidenShuffleElements(Mask, ScaledMask)) {
6361     while (ScaledMask.size() > NumDstElts) {
6362       SmallVector<int, 16> WidenedMask;
6363       if (!canWidenShuffleElements(ScaledMask, WidenedMask))
6364         return false;
6365       ScaledMask = std::move(WidenedMask);
6366     }
6367     return true;
6368   }
6369 
6370   return false;
6371 }
6372 
6373 /// Returns true if Elt is a constant zero or a floating point constant +0.0.
isZeroNode(SDValue Elt)6374 bool X86::isZeroNode(SDValue Elt) {
6375   return isNullConstant(Elt) || isNullFPConstant(Elt);
6376 }
6377 
6378 // Build a vector of constants.
6379 // Use an UNDEF node if MaskElt == -1.
6380 // Split 64-bit constants in the 32-bit mode.
getConstVector(ArrayRef<int> Values,MVT VT,SelectionDAG & DAG,const SDLoc & dl,bool IsMask=false)6381 static SDValue getConstVector(ArrayRef<int> Values, MVT VT, SelectionDAG &DAG,
6382                               const SDLoc &dl, bool IsMask = false) {
6383 
6384   SmallVector<SDValue, 32>  Ops;
6385   bool Split = false;
6386 
6387   MVT ConstVecVT = VT;
6388   unsigned NumElts = VT.getVectorNumElements();
6389   bool In64BitMode = DAG.getTargetLoweringInfo().isTypeLegal(MVT::i64);
6390   if (!In64BitMode && VT.getVectorElementType() == MVT::i64) {
6391     ConstVecVT = MVT::getVectorVT(MVT::i32, NumElts * 2);
6392     Split = true;
6393   }
6394 
6395   MVT EltVT = ConstVecVT.getVectorElementType();
6396   for (unsigned i = 0; i < NumElts; ++i) {
6397     bool IsUndef = Values[i] < 0 && IsMask;
6398     SDValue OpNode = IsUndef ? DAG.getUNDEF(EltVT) :
6399       DAG.getConstant(Values[i], dl, EltVT);
6400     Ops.push_back(OpNode);
6401     if (Split)
6402       Ops.push_back(IsUndef ? DAG.getUNDEF(EltVT) :
6403                     DAG.getConstant(0, dl, EltVT));
6404   }
6405   SDValue ConstsNode = DAG.getBuildVector(ConstVecVT, dl, Ops);
6406   if (Split)
6407     ConstsNode = DAG.getBitcast(VT, ConstsNode);
6408   return ConstsNode;
6409 }
6410 
getConstVector(ArrayRef<APInt> Bits,APInt & Undefs,MVT VT,SelectionDAG & DAG,const SDLoc & dl)6411 static SDValue getConstVector(ArrayRef<APInt> Bits, APInt &Undefs,
6412                               MVT VT, SelectionDAG &DAG, const SDLoc &dl) {
6413   assert(Bits.size() == Undefs.getBitWidth() &&
6414          "Unequal constant and undef arrays");
6415   SmallVector<SDValue, 32> Ops;
6416   bool Split = false;
6417 
6418   MVT ConstVecVT = VT;
6419   unsigned NumElts = VT.getVectorNumElements();
6420   bool In64BitMode = DAG.getTargetLoweringInfo().isTypeLegal(MVT::i64);
6421   if (!In64BitMode && VT.getVectorElementType() == MVT::i64) {
6422     ConstVecVT = MVT::getVectorVT(MVT::i32, NumElts * 2);
6423     Split = true;
6424   }
6425 
6426   MVT EltVT = ConstVecVT.getVectorElementType();
6427   for (unsigned i = 0, e = Bits.size(); i != e; ++i) {
6428     if (Undefs[i]) {
6429       Ops.append(Split ? 2 : 1, DAG.getUNDEF(EltVT));
6430       continue;
6431     }
6432     const APInt &V = Bits[i];
6433     assert(V.getBitWidth() == VT.getScalarSizeInBits() && "Unexpected sizes");
6434     if (Split) {
6435       Ops.push_back(DAG.getConstant(V.trunc(32), dl, EltVT));
6436       Ops.push_back(DAG.getConstant(V.lshr(32).trunc(32), dl, EltVT));
6437     } else if (EltVT == MVT::f32) {
6438       APFloat FV(APFloat::IEEEsingle(), V);
6439       Ops.push_back(DAG.getConstantFP(FV, dl, EltVT));
6440     } else if (EltVT == MVT::f64) {
6441       APFloat FV(APFloat::IEEEdouble(), V);
6442       Ops.push_back(DAG.getConstantFP(FV, dl, EltVT));
6443     } else {
6444       Ops.push_back(DAG.getConstant(V, dl, EltVT));
6445     }
6446   }
6447 
6448   SDValue ConstsNode = DAG.getBuildVector(ConstVecVT, dl, Ops);
6449   return DAG.getBitcast(VT, ConstsNode);
6450 }
6451 
6452 /// Returns a vector of specified type with all zero elements.
getZeroVector(MVT VT,const X86Subtarget & Subtarget,SelectionDAG & DAG,const SDLoc & dl)6453 static SDValue getZeroVector(MVT VT, const X86Subtarget &Subtarget,
6454                              SelectionDAG &DAG, const SDLoc &dl) {
6455   assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector() ||
6456           VT.getVectorElementType() == MVT::i1) &&
6457          "Unexpected vector type");
6458 
6459   // Try to build SSE/AVX zero vectors as <N x i32> bitcasted to their dest
6460   // type. This ensures they get CSE'd. But if the integer type is not
6461   // available, use a floating-point +0.0 instead.
6462   SDValue Vec;
6463   if (!Subtarget.hasSSE2() && VT.is128BitVector()) {
6464     Vec = DAG.getConstantFP(+0.0, dl, MVT::v4f32);
6465   } else if (VT.isFloatingPoint()) {
6466     Vec = DAG.getConstantFP(+0.0, dl, VT);
6467   } else if (VT.getVectorElementType() == MVT::i1) {
6468     assert((Subtarget.hasBWI() || VT.getVectorNumElements() <= 16) &&
6469            "Unexpected vector type");
6470     Vec = DAG.getConstant(0, dl, VT);
6471   } else {
6472     unsigned Num32BitElts = VT.getSizeInBits() / 32;
6473     Vec = DAG.getConstant(0, dl, MVT::getVectorVT(MVT::i32, Num32BitElts));
6474   }
6475   return DAG.getBitcast(VT, Vec);
6476 }
6477 
6478 // Helper to determine if the ops are all the extracted subvectors come from a
6479 // single source. If we allow commute they don't have to be in order (Lo/Hi).
getSplitVectorSrc(SDValue LHS,SDValue RHS,bool AllowCommute)6480 static SDValue getSplitVectorSrc(SDValue LHS, SDValue RHS, bool AllowCommute) {
6481   if (LHS.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
6482       RHS.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
6483       LHS.getValueType() != RHS.getValueType() ||
6484       LHS.getOperand(0) != RHS.getOperand(0))
6485     return SDValue();
6486 
6487   SDValue Src = LHS.getOperand(0);
6488   if (Src.getValueSizeInBits() != (LHS.getValueSizeInBits() * 2))
6489     return SDValue();
6490 
6491   unsigned NumElts = LHS.getValueType().getVectorNumElements();
6492   if ((LHS.getConstantOperandAPInt(1) == 0 &&
6493        RHS.getConstantOperandAPInt(1) == NumElts) ||
6494       (AllowCommute && RHS.getConstantOperandAPInt(1) == 0 &&
6495        LHS.getConstantOperandAPInt(1) == NumElts))
6496     return Src;
6497 
6498   return SDValue();
6499 }
6500 
extractSubVector(SDValue Vec,unsigned IdxVal,SelectionDAG & DAG,const SDLoc & dl,unsigned vectorWidth)6501 static SDValue extractSubVector(SDValue Vec, unsigned IdxVal, SelectionDAG &DAG,
6502                                 const SDLoc &dl, unsigned vectorWidth) {
6503   EVT VT = Vec.getValueType();
6504   EVT ElVT = VT.getVectorElementType();
6505   unsigned Factor = VT.getSizeInBits() / vectorWidth;
6506   EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT,
6507                                   VT.getVectorNumElements() / Factor);
6508 
6509   // Extract the relevant vectorWidth bits.  Generate an EXTRACT_SUBVECTOR
6510   unsigned ElemsPerChunk = vectorWidth / ElVT.getSizeInBits();
6511   assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
6512 
6513   // This is the index of the first element of the vectorWidth-bit chunk
6514   // we want. Since ElemsPerChunk is a power of 2 just need to clear bits.
6515   IdxVal &= ~(ElemsPerChunk - 1);
6516 
6517   // If the input is a buildvector just emit a smaller one.
6518   if (Vec.getOpcode() == ISD::BUILD_VECTOR)
6519     return DAG.getBuildVector(ResultVT, dl,
6520                               Vec->ops().slice(IdxVal, ElemsPerChunk));
6521 
6522   SDValue VecIdx = DAG.getIntPtrConstant(IdxVal, dl);
6523   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec, VecIdx);
6524 }
6525 
6526 /// Generate a DAG to grab 128-bits from a vector > 128 bits.  This
6527 /// sets things up to match to an AVX VEXTRACTF128 / VEXTRACTI128
6528 /// or AVX-512 VEXTRACTF32x4 / VEXTRACTI32x4
6529 /// instructions or a simple subregister reference. Idx is an index in the
6530 /// 128 bits we want.  It need not be aligned to a 128-bit boundary.  That makes
6531 /// lowering EXTRACT_VECTOR_ELT operations easier.
extract128BitVector(SDValue Vec,unsigned IdxVal,SelectionDAG & DAG,const SDLoc & dl)6532 static SDValue extract128BitVector(SDValue Vec, unsigned IdxVal,
6533                                    SelectionDAG &DAG, const SDLoc &dl) {
6534   assert((Vec.getValueType().is256BitVector() ||
6535           Vec.getValueType().is512BitVector()) && "Unexpected vector size!");
6536   return extractSubVector(Vec, IdxVal, DAG, dl, 128);
6537 }
6538 
6539 /// Generate a DAG to grab 256-bits from a 512-bit vector.
extract256BitVector(SDValue Vec,unsigned IdxVal,SelectionDAG & DAG,const SDLoc & dl)6540 static SDValue extract256BitVector(SDValue Vec, unsigned IdxVal,
6541                                    SelectionDAG &DAG, const SDLoc &dl) {
6542   assert(Vec.getValueType().is512BitVector() && "Unexpected vector size!");
6543   return extractSubVector(Vec, IdxVal, DAG, dl, 256);
6544 }
6545 
insertSubVector(SDValue Result,SDValue Vec,unsigned IdxVal,SelectionDAG & DAG,const SDLoc & dl,unsigned vectorWidth)6546 static SDValue insertSubVector(SDValue Result, SDValue Vec, unsigned IdxVal,
6547                                SelectionDAG &DAG, const SDLoc &dl,
6548                                unsigned vectorWidth) {
6549   assert((vectorWidth == 128 || vectorWidth == 256) &&
6550          "Unsupported vector width");
6551   // Inserting UNDEF is Result
6552   if (Vec.isUndef())
6553     return Result;
6554   EVT VT = Vec.getValueType();
6555   EVT ElVT = VT.getVectorElementType();
6556   EVT ResultVT = Result.getValueType();
6557 
6558   // Insert the relevant vectorWidth bits.
6559   unsigned ElemsPerChunk = vectorWidth/ElVT.getSizeInBits();
6560   assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
6561 
6562   // This is the index of the first element of the vectorWidth-bit chunk
6563   // we want. Since ElemsPerChunk is a power of 2 just need to clear bits.
6564   IdxVal &= ~(ElemsPerChunk - 1);
6565 
6566   SDValue VecIdx = DAG.getIntPtrConstant(IdxVal, dl);
6567   return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec, VecIdx);
6568 }
6569 
6570 /// Generate a DAG to put 128-bits into a vector > 128 bits.  This
6571 /// sets things up to match to an AVX VINSERTF128/VINSERTI128 or
6572 /// AVX-512 VINSERTF32x4/VINSERTI32x4 instructions or a
6573 /// simple superregister reference.  Idx is an index in the 128 bits
6574 /// we want.  It need not be aligned to a 128-bit boundary.  That makes
6575 /// lowering INSERT_VECTOR_ELT operations easier.
insert128BitVector(SDValue Result,SDValue Vec,unsigned IdxVal,SelectionDAG & DAG,const SDLoc & dl)6576 static SDValue insert128BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
6577                                   SelectionDAG &DAG, const SDLoc &dl) {
6578   assert(Vec.getValueType().is128BitVector() && "Unexpected vector size!");
6579   return insertSubVector(Result, Vec, IdxVal, DAG, dl, 128);
6580 }
6581 
6582 /// Widen a vector to a larger size with the same scalar type, with the new
6583 /// elements either zero or undef.
widenSubVector(MVT VT,SDValue Vec,bool ZeroNewElements,const X86Subtarget & Subtarget,SelectionDAG & DAG,const SDLoc & dl)6584 static SDValue widenSubVector(MVT VT, SDValue Vec, bool ZeroNewElements,
6585                               const X86Subtarget &Subtarget, SelectionDAG &DAG,
6586                               const SDLoc &dl) {
6587   assert(Vec.getValueSizeInBits().getFixedValue() < VT.getFixedSizeInBits() &&
6588          Vec.getValueType().getScalarType() == VT.getScalarType() &&
6589          "Unsupported vector widening type");
6590   SDValue Res = ZeroNewElements ? getZeroVector(VT, Subtarget, DAG, dl)
6591                                 : DAG.getUNDEF(VT);
6592   return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, VT, Res, Vec,
6593                      DAG.getIntPtrConstant(0, dl));
6594 }
6595 
6596 /// Widen a vector to a larger size with the same scalar type, with the new
6597 /// elements either zero or undef.
widenSubVector(SDValue Vec,bool ZeroNewElements,const X86Subtarget & Subtarget,SelectionDAG & DAG,const SDLoc & dl,unsigned WideSizeInBits)6598 static SDValue widenSubVector(SDValue Vec, bool ZeroNewElements,
6599                               const X86Subtarget &Subtarget, SelectionDAG &DAG,
6600                               const SDLoc &dl, unsigned WideSizeInBits) {
6601   assert(Vec.getValueSizeInBits() < WideSizeInBits &&
6602          (WideSizeInBits % Vec.getScalarValueSizeInBits()) == 0 &&
6603          "Unsupported vector widening type");
6604   unsigned WideNumElts = WideSizeInBits / Vec.getScalarValueSizeInBits();
6605   MVT SVT = Vec.getSimpleValueType().getScalarType();
6606   MVT VT = MVT::getVectorVT(SVT, WideNumElts);
6607   return widenSubVector(VT, Vec, ZeroNewElements, Subtarget, DAG, dl);
6608 }
6609 
6610 // Helper function to collect subvector ops that are concatenated together,
6611 // either by ISD::CONCAT_VECTORS or a ISD::INSERT_SUBVECTOR series.
6612 // The subvectors in Ops are guaranteed to be the same type.
collectConcatOps(SDNode * N,SmallVectorImpl<SDValue> & Ops,SelectionDAG & DAG)6613 static bool collectConcatOps(SDNode *N, SmallVectorImpl<SDValue> &Ops,
6614                              SelectionDAG &DAG) {
6615   assert(Ops.empty() && "Expected an empty ops vector");
6616 
6617   if (N->getOpcode() == ISD::CONCAT_VECTORS) {
6618     Ops.append(N->op_begin(), N->op_end());
6619     return true;
6620   }
6621 
6622   if (N->getOpcode() == ISD::INSERT_SUBVECTOR) {
6623     SDValue Src = N->getOperand(0);
6624     SDValue Sub = N->getOperand(1);
6625     const APInt &Idx = N->getConstantOperandAPInt(2);
6626     EVT VT = Src.getValueType();
6627     EVT SubVT = Sub.getValueType();
6628 
6629     // TODO - Handle more general insert_subvector chains.
6630     if (VT.getSizeInBits() == (SubVT.getSizeInBits() * 2)) {
6631       // insert_subvector(undef, x, lo)
6632       if (Idx == 0 && Src.isUndef()) {
6633         Ops.push_back(Sub);
6634         Ops.push_back(DAG.getUNDEF(SubVT));
6635         return true;
6636       }
6637       if (Idx == (VT.getVectorNumElements() / 2)) {
6638         // insert_subvector(insert_subvector(undef, x, lo), y, hi)
6639         if (Src.getOpcode() == ISD::INSERT_SUBVECTOR &&
6640             Src.getOperand(1).getValueType() == SubVT &&
6641             isNullConstant(Src.getOperand(2))) {
6642           Ops.push_back(Src.getOperand(1));
6643           Ops.push_back(Sub);
6644           return true;
6645         }
6646         // insert_subvector(x, extract_subvector(x, lo), hi)
6647         if (Sub.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
6648             Sub.getOperand(0) == Src && isNullConstant(Sub.getOperand(1))) {
6649           Ops.append(2, Sub);
6650           return true;
6651         }
6652         // insert_subvector(undef, x, hi)
6653         if (Src.isUndef()) {
6654           Ops.push_back(DAG.getUNDEF(SubVT));
6655           Ops.push_back(Sub);
6656           return true;
6657         }
6658       }
6659     }
6660   }
6661 
6662   return false;
6663 }
6664 
splitVector(SDValue Op,SelectionDAG & DAG,const SDLoc & dl)6665 static std::pair<SDValue, SDValue> splitVector(SDValue Op, SelectionDAG &DAG,
6666                                                const SDLoc &dl) {
6667   EVT VT = Op.getValueType();
6668   unsigned NumElems = VT.getVectorNumElements();
6669   unsigned SizeInBits = VT.getSizeInBits();
6670   assert((NumElems % 2) == 0 && (SizeInBits % 2) == 0 &&
6671          "Can't split odd sized vector");
6672 
6673   // If this is a splat value (with no-undefs) then use the lower subvector,
6674   // which should be a free extraction.
6675   SDValue Lo = extractSubVector(Op, 0, DAG, dl, SizeInBits / 2);
6676   if (DAG.isSplatValue(Op, /*AllowUndefs*/ false))
6677     return std::make_pair(Lo, Lo);
6678 
6679   SDValue Hi = extractSubVector(Op, NumElems / 2, DAG, dl, SizeInBits / 2);
6680   return std::make_pair(Lo, Hi);
6681 }
6682 
6683 /// Break an operation into 2 half sized ops and then concatenate the results.
splitVectorOp(SDValue Op,SelectionDAG & DAG)6684 static SDValue splitVectorOp(SDValue Op, SelectionDAG &DAG) {
6685   unsigned NumOps = Op.getNumOperands();
6686   EVT VT = Op.getValueType();
6687   SDLoc dl(Op);
6688 
6689   // Extract the LHS Lo/Hi vectors
6690   SmallVector<SDValue> LoOps(NumOps, SDValue());
6691   SmallVector<SDValue> HiOps(NumOps, SDValue());
6692   for (unsigned I = 0; I != NumOps; ++I) {
6693     SDValue SrcOp = Op.getOperand(I);
6694     if (!SrcOp.getValueType().isVector()) {
6695       LoOps[I] = HiOps[I] = SrcOp;
6696       continue;
6697     }
6698     std::tie(LoOps[I], HiOps[I]) = splitVector(SrcOp, DAG, dl);
6699   }
6700 
6701   EVT LoVT, HiVT;
6702   std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
6703   return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
6704                      DAG.getNode(Op.getOpcode(), dl, LoVT, LoOps),
6705                      DAG.getNode(Op.getOpcode(), dl, HiVT, HiOps));
6706 }
6707 
6708 /// Break an unary integer operation into 2 half sized ops and then
6709 /// concatenate the result back.
splitVectorIntUnary(SDValue Op,SelectionDAG & DAG)6710 static SDValue splitVectorIntUnary(SDValue Op, SelectionDAG &DAG) {
6711   // Make sure we only try to split 256/512-bit types to avoid creating
6712   // narrow vectors.
6713   EVT VT = Op.getValueType();
6714   (void)VT;
6715   assert((Op.getOperand(0).getValueType().is256BitVector() ||
6716           Op.getOperand(0).getValueType().is512BitVector()) &&
6717          (VT.is256BitVector() || VT.is512BitVector()) && "Unsupported VT!");
6718   assert(Op.getOperand(0).getValueType().getVectorNumElements() ==
6719              VT.getVectorNumElements() &&
6720          "Unexpected VTs!");
6721   return splitVectorOp(Op, DAG);
6722 }
6723 
6724 /// Break a binary integer operation into 2 half sized ops and then
6725 /// concatenate the result back.
splitVectorIntBinary(SDValue Op,SelectionDAG & DAG)6726 static SDValue splitVectorIntBinary(SDValue Op, SelectionDAG &DAG) {
6727   // Assert that all the types match.
6728   EVT VT = Op.getValueType();
6729   (void)VT;
6730   assert(Op.getOperand(0).getValueType() == VT &&
6731          Op.getOperand(1).getValueType() == VT && "Unexpected VTs!");
6732   assert((VT.is256BitVector() || VT.is512BitVector()) && "Unsupported VT!");
6733   return splitVectorOp(Op, DAG);
6734 }
6735 
6736 // Helper for splitting operands of an operation to legal target size and
6737 // apply a function on each part.
6738 // Useful for operations that are available on SSE2 in 128-bit, on AVX2 in
6739 // 256-bit and on AVX512BW in 512-bit. The argument VT is the type used for
6740 // deciding if/how to split Ops. Ops elements do *not* have to be of type VT.
6741 // The argument Builder is a function that will be applied on each split part:
6742 // SDValue Builder(SelectionDAG&G, SDLoc, ArrayRef<SDValue>)
6743 template <typename F>
SplitOpsAndApply(SelectionDAG & DAG,const X86Subtarget & Subtarget,const SDLoc & DL,EVT VT,ArrayRef<SDValue> Ops,F Builder,bool CheckBWI=true)6744 SDValue SplitOpsAndApply(SelectionDAG &DAG, const X86Subtarget &Subtarget,
6745                          const SDLoc &DL, EVT VT, ArrayRef<SDValue> Ops,
6746                          F Builder, bool CheckBWI = true) {
6747   assert(Subtarget.hasSSE2() && "Target assumed to support at least SSE2");
6748   unsigned NumSubs = 1;
6749   if ((CheckBWI && Subtarget.useBWIRegs()) ||
6750       (!CheckBWI && Subtarget.useAVX512Regs())) {
6751     if (VT.getSizeInBits() > 512) {
6752       NumSubs = VT.getSizeInBits() / 512;
6753       assert((VT.getSizeInBits() % 512) == 0 && "Illegal vector size");
6754     }
6755   } else if (Subtarget.hasAVX2()) {
6756     if (VT.getSizeInBits() > 256) {
6757       NumSubs = VT.getSizeInBits() / 256;
6758       assert((VT.getSizeInBits() % 256) == 0 && "Illegal vector size");
6759     }
6760   } else {
6761     if (VT.getSizeInBits() > 128) {
6762       NumSubs = VT.getSizeInBits() / 128;
6763       assert((VT.getSizeInBits() % 128) == 0 && "Illegal vector size");
6764     }
6765   }
6766 
6767   if (NumSubs == 1)
6768     return Builder(DAG, DL, Ops);
6769 
6770   SmallVector<SDValue, 4> Subs;
6771   for (unsigned i = 0; i != NumSubs; ++i) {
6772     SmallVector<SDValue, 2> SubOps;
6773     for (SDValue Op : Ops) {
6774       EVT OpVT = Op.getValueType();
6775       unsigned NumSubElts = OpVT.getVectorNumElements() / NumSubs;
6776       unsigned SizeSub = OpVT.getSizeInBits() / NumSubs;
6777       SubOps.push_back(extractSubVector(Op, i * NumSubElts, DAG, DL, SizeSub));
6778     }
6779     Subs.push_back(Builder(DAG, DL, SubOps));
6780   }
6781   return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Subs);
6782 }
6783 
6784 // Helper function that extends a non-512-bit vector op to 512-bits on non-VLX
6785 // targets.
getAVX512Node(unsigned Opcode,const SDLoc & DL,MVT VT,ArrayRef<SDValue> Ops,SelectionDAG & DAG,const X86Subtarget & Subtarget)6786 static SDValue getAVX512Node(unsigned Opcode, const SDLoc &DL, MVT VT,
6787                              ArrayRef<SDValue> Ops, SelectionDAG &DAG,
6788                              const X86Subtarget &Subtarget) {
6789   assert(Subtarget.hasAVX512() && "AVX512 target expected");
6790   MVT SVT = VT.getScalarType();
6791 
6792   // If we have a 32/64 splatted constant, splat it to DstTy to
6793   // encourage a foldable broadcast'd operand.
6794   auto MakeBroadcastOp = [&](SDValue Op, MVT OpVT, MVT DstVT) {
6795     unsigned OpEltSizeInBits = OpVT.getScalarSizeInBits();
6796     // AVX512 broadcasts 32/64-bit operands.
6797     // TODO: Support float once getAVX512Node is used by fp-ops.
6798     if (!OpVT.isInteger() || OpEltSizeInBits < 32 ||
6799         !DAG.getTargetLoweringInfo().isTypeLegal(SVT))
6800       return SDValue();
6801     // If we're not widening, don't bother if we're not bitcasting.
6802     if (OpVT == DstVT && Op.getOpcode() != ISD::BITCAST)
6803       return SDValue();
6804     if (auto *BV = dyn_cast<BuildVectorSDNode>(peekThroughBitcasts(Op))) {
6805       APInt SplatValue, SplatUndef;
6806       unsigned SplatBitSize;
6807       bool HasAnyUndefs;
6808       if (BV->isConstantSplat(SplatValue, SplatUndef, SplatBitSize,
6809                               HasAnyUndefs, OpEltSizeInBits) &&
6810           !HasAnyUndefs && SplatValue.getBitWidth() == OpEltSizeInBits)
6811         return DAG.getConstant(SplatValue, DL, DstVT);
6812     }
6813     return SDValue();
6814   };
6815 
6816   bool Widen = !(Subtarget.hasVLX() || VT.is512BitVector());
6817 
6818   MVT DstVT = VT;
6819   if (Widen)
6820     DstVT = MVT::getVectorVT(SVT, 512 / SVT.getSizeInBits());
6821 
6822   // Canonicalize src operands.
6823   SmallVector<SDValue> SrcOps(Ops.begin(), Ops.end());
6824   for (SDValue &Op : SrcOps) {
6825     MVT OpVT = Op.getSimpleValueType();
6826     // Just pass through scalar operands.
6827     if (!OpVT.isVector())
6828       continue;
6829     assert(OpVT == VT && "Vector type mismatch");
6830 
6831     if (SDValue BroadcastOp = MakeBroadcastOp(Op, OpVT, DstVT)) {
6832       Op = BroadcastOp;
6833       continue;
6834     }
6835 
6836     // Just widen the subvector by inserting into an undef wide vector.
6837     if (Widen)
6838       Op = widenSubVector(Op, false, Subtarget, DAG, DL, 512);
6839   }
6840 
6841   SDValue Res = DAG.getNode(Opcode, DL, DstVT, SrcOps);
6842 
6843   // Perform the 512-bit op then extract the bottom subvector.
6844   if (Widen)
6845     Res = extractSubVector(Res, 0, DAG, DL, VT.getSizeInBits());
6846   return Res;
6847 }
6848 
6849 /// Insert i1-subvector to i1-vector.
insert1BitVector(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)6850 static SDValue insert1BitVector(SDValue Op, SelectionDAG &DAG,
6851                                 const X86Subtarget &Subtarget) {
6852 
6853   SDLoc dl(Op);
6854   SDValue Vec = Op.getOperand(0);
6855   SDValue SubVec = Op.getOperand(1);
6856   SDValue Idx = Op.getOperand(2);
6857   unsigned IdxVal = Op.getConstantOperandVal(2);
6858 
6859   // Inserting undef is a nop. We can just return the original vector.
6860   if (SubVec.isUndef())
6861     return Vec;
6862 
6863   if (IdxVal == 0 && Vec.isUndef()) // the operation is legal
6864     return Op;
6865 
6866   MVT OpVT = Op.getSimpleValueType();
6867   unsigned NumElems = OpVT.getVectorNumElements();
6868   SDValue ZeroIdx = DAG.getIntPtrConstant(0, dl);
6869 
6870   // Extend to natively supported kshift.
6871   MVT WideOpVT = OpVT;
6872   if ((!Subtarget.hasDQI() && NumElems == 8) || NumElems < 8)
6873     WideOpVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
6874 
6875   // Inserting into the lsbs of a zero vector is legal. ISel will insert shifts
6876   // if necessary.
6877   if (IdxVal == 0 && ISD::isBuildVectorAllZeros(Vec.getNode())) {
6878     // May need to promote to a legal type.
6879     Op = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
6880                      DAG.getConstant(0, dl, WideOpVT),
6881                      SubVec, Idx);
6882     return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
6883   }
6884 
6885   MVT SubVecVT = SubVec.getSimpleValueType();
6886   unsigned SubVecNumElems = SubVecVT.getVectorNumElements();
6887   assert(IdxVal + SubVecNumElems <= NumElems &&
6888          IdxVal % SubVecVT.getSizeInBits() == 0 &&
6889          "Unexpected index value in INSERT_SUBVECTOR");
6890 
6891   SDValue Undef = DAG.getUNDEF(WideOpVT);
6892 
6893   if (IdxVal == 0) {
6894     // Zero lower bits of the Vec
6895     SDValue ShiftBits = DAG.getTargetConstant(SubVecNumElems, dl, MVT::i8);
6896     Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, Undef, Vec,
6897                       ZeroIdx);
6898     Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Vec, ShiftBits);
6899     Vec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, Vec, ShiftBits);
6900     // Merge them together, SubVec should be zero extended.
6901     SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
6902                          DAG.getConstant(0, dl, WideOpVT),
6903                          SubVec, ZeroIdx);
6904     Op = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, SubVec);
6905     return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
6906   }
6907 
6908   SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
6909                        Undef, SubVec, ZeroIdx);
6910 
6911   if (Vec.isUndef()) {
6912     assert(IdxVal != 0 && "Unexpected index");
6913     SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
6914                          DAG.getTargetConstant(IdxVal, dl, MVT::i8));
6915     return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, SubVec, ZeroIdx);
6916   }
6917 
6918   if (ISD::isBuildVectorAllZeros(Vec.getNode())) {
6919     assert(IdxVal != 0 && "Unexpected index");
6920     // If upper elements of Vec are known undef, then just shift into place.
6921     if (llvm::all_of(Vec->ops().slice(IdxVal + SubVecNumElems),
6922                      [](SDValue V) { return V.isUndef(); })) {
6923       SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
6924                            DAG.getTargetConstant(IdxVal, dl, MVT::i8));
6925     } else {
6926       NumElems = WideOpVT.getVectorNumElements();
6927       unsigned ShiftLeft = NumElems - SubVecNumElems;
6928       unsigned ShiftRight = NumElems - SubVecNumElems - IdxVal;
6929       SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
6930                            DAG.getTargetConstant(ShiftLeft, dl, MVT::i8));
6931       if (ShiftRight != 0)
6932         SubVec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, SubVec,
6933                              DAG.getTargetConstant(ShiftRight, dl, MVT::i8));
6934     }
6935     return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, SubVec, ZeroIdx);
6936   }
6937 
6938   // Simple case when we put subvector in the upper part
6939   if (IdxVal + SubVecNumElems == NumElems) {
6940     SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
6941                          DAG.getTargetConstant(IdxVal, dl, MVT::i8));
6942     if (SubVecNumElems * 2 == NumElems) {
6943       // Special case, use legal zero extending insert_subvector. This allows
6944       // isel to optimize when bits are known zero.
6945       Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, SubVecVT, Vec, ZeroIdx);
6946       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
6947                         DAG.getConstant(0, dl, WideOpVT),
6948                         Vec, ZeroIdx);
6949     } else {
6950       // Otherwise use explicit shifts to zero the bits.
6951       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
6952                         Undef, Vec, ZeroIdx);
6953       NumElems = WideOpVT.getVectorNumElements();
6954       SDValue ShiftBits = DAG.getTargetConstant(NumElems - IdxVal, dl, MVT::i8);
6955       Vec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, Vec, ShiftBits);
6956       Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Vec, ShiftBits);
6957     }
6958     Op = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, SubVec);
6959     return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
6960   }
6961 
6962   // Inserting into the middle is more complicated.
6963 
6964   NumElems = WideOpVT.getVectorNumElements();
6965 
6966   // Widen the vector if needed.
6967   Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, Undef, Vec, ZeroIdx);
6968 
6969   unsigned ShiftLeft = NumElems - SubVecNumElems;
6970   unsigned ShiftRight = NumElems - SubVecNumElems - IdxVal;
6971 
6972   // Do an optimization for the the most frequently used types.
6973   if (WideOpVT != MVT::v64i1 || Subtarget.is64Bit()) {
6974     APInt Mask0 = APInt::getBitsSet(NumElems, IdxVal, IdxVal + SubVecNumElems);
6975     Mask0.flipAllBits();
6976     SDValue CMask0 = DAG.getConstant(Mask0, dl, MVT::getIntegerVT(NumElems));
6977     SDValue VMask0 = DAG.getNode(ISD::BITCAST, dl, WideOpVT, CMask0);
6978     Vec = DAG.getNode(ISD::AND, dl, WideOpVT, Vec, VMask0);
6979     SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
6980                          DAG.getTargetConstant(ShiftLeft, dl, MVT::i8));
6981     SubVec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, SubVec,
6982                          DAG.getTargetConstant(ShiftRight, dl, MVT::i8));
6983     Op = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, SubVec);
6984 
6985     // Reduce to original width if needed.
6986     return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
6987   }
6988 
6989   // Clear the upper bits of the subvector and move it to its insert position.
6990   SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
6991                        DAG.getTargetConstant(ShiftLeft, dl, MVT::i8));
6992   SubVec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, SubVec,
6993                        DAG.getTargetConstant(ShiftRight, dl, MVT::i8));
6994 
6995   // Isolate the bits below the insertion point.
6996   unsigned LowShift = NumElems - IdxVal;
6997   SDValue Low = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, Vec,
6998                             DAG.getTargetConstant(LowShift, dl, MVT::i8));
6999   Low = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Low,
7000                     DAG.getTargetConstant(LowShift, dl, MVT::i8));
7001 
7002   // Isolate the bits after the last inserted bit.
7003   unsigned HighShift = IdxVal + SubVecNumElems;
7004   SDValue High = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Vec,
7005                             DAG.getTargetConstant(HighShift, dl, MVT::i8));
7006   High = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, High,
7007                     DAG.getTargetConstant(HighShift, dl, MVT::i8));
7008 
7009   // Now OR all 3 pieces together.
7010   Vec = DAG.getNode(ISD::OR, dl, WideOpVT, Low, High);
7011   SubVec = DAG.getNode(ISD::OR, dl, WideOpVT, SubVec, Vec);
7012 
7013   // Reduce to original width if needed.
7014   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, SubVec, ZeroIdx);
7015 }
7016 
concatSubVectors(SDValue V1,SDValue V2,SelectionDAG & DAG,const SDLoc & dl)7017 static SDValue concatSubVectors(SDValue V1, SDValue V2, SelectionDAG &DAG,
7018                                 const SDLoc &dl) {
7019   assert(V1.getValueType() == V2.getValueType() && "subvector type mismatch");
7020   EVT SubVT = V1.getValueType();
7021   EVT SubSVT = SubVT.getScalarType();
7022   unsigned SubNumElts = SubVT.getVectorNumElements();
7023   unsigned SubVectorWidth = SubVT.getSizeInBits();
7024   EVT VT = EVT::getVectorVT(*DAG.getContext(), SubSVT, 2 * SubNumElts);
7025   SDValue V = insertSubVector(DAG.getUNDEF(VT), V1, 0, DAG, dl, SubVectorWidth);
7026   return insertSubVector(V, V2, SubNumElts, DAG, dl, SubVectorWidth);
7027 }
7028 
7029 /// Returns a vector of specified type with all bits set.
7030 /// Always build ones vectors as <4 x i32>, <8 x i32> or <16 x i32>.
7031 /// Then bitcast to their original type, ensuring they get CSE'd.
getOnesVector(EVT VT,SelectionDAG & DAG,const SDLoc & dl)7032 static SDValue getOnesVector(EVT VT, SelectionDAG &DAG, const SDLoc &dl) {
7033   assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&
7034          "Expected a 128/256/512-bit vector type");
7035 
7036   APInt Ones = APInt::getAllOnes(32);
7037   unsigned NumElts = VT.getSizeInBits() / 32;
7038   SDValue Vec = DAG.getConstant(Ones, dl, MVT::getVectorVT(MVT::i32, NumElts));
7039   return DAG.getBitcast(VT, Vec);
7040 }
7041 
getEXTEND_VECTOR_INREG(unsigned Opcode,const SDLoc & DL,EVT VT,SDValue In,SelectionDAG & DAG)7042 static SDValue getEXTEND_VECTOR_INREG(unsigned Opcode, const SDLoc &DL, EVT VT,
7043                                       SDValue In, SelectionDAG &DAG) {
7044   EVT InVT = In.getValueType();
7045   assert(VT.isVector() && InVT.isVector() && "Expected vector VTs.");
7046   assert((ISD::ANY_EXTEND == Opcode || ISD::SIGN_EXTEND == Opcode ||
7047           ISD::ZERO_EXTEND == Opcode) &&
7048          "Unknown extension opcode");
7049 
7050   // For 256-bit vectors, we only need the lower (128-bit) input half.
7051   // For 512-bit vectors, we only need the lower input half or quarter.
7052   if (InVT.getSizeInBits() > 128) {
7053     assert(VT.getSizeInBits() == InVT.getSizeInBits() &&
7054            "Expected VTs to be the same size!");
7055     unsigned Scale = VT.getScalarSizeInBits() / InVT.getScalarSizeInBits();
7056     In = extractSubVector(In, 0, DAG, DL,
7057                           std::max(128U, (unsigned)VT.getSizeInBits() / Scale));
7058     InVT = In.getValueType();
7059   }
7060 
7061   if (VT.getVectorNumElements() != InVT.getVectorNumElements())
7062     Opcode = DAG.getOpcode_EXTEND_VECTOR_INREG(Opcode);
7063 
7064   return DAG.getNode(Opcode, DL, VT, In);
7065 }
7066 
7067 // Match (xor X, -1) -> X.
7068 // Match extract_subvector(xor X, -1) -> extract_subvector(X).
7069 // Match concat_vectors(xor X, -1, xor Y, -1) -> concat_vectors(X, Y).
IsNOT(SDValue V,SelectionDAG & DAG)7070 static SDValue IsNOT(SDValue V, SelectionDAG &DAG) {
7071   V = peekThroughBitcasts(V);
7072   if (V.getOpcode() == ISD::XOR &&
7073       (ISD::isBuildVectorAllOnes(V.getOperand(1).getNode()) ||
7074        isAllOnesConstant(V.getOperand(1))))
7075     return V.getOperand(0);
7076   if (V.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
7077       (isNullConstant(V.getOperand(1)) || V.getOperand(0).hasOneUse())) {
7078     if (SDValue Not = IsNOT(V.getOperand(0), DAG)) {
7079       Not = DAG.getBitcast(V.getOperand(0).getValueType(), Not);
7080       return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(Not), V.getValueType(),
7081                          Not, V.getOperand(1));
7082     }
7083   }
7084   SmallVector<SDValue, 2> CatOps;
7085   if (collectConcatOps(V.getNode(), CatOps, DAG)) {
7086     for (SDValue &CatOp : CatOps) {
7087       SDValue NotCat = IsNOT(CatOp, DAG);
7088       if (!NotCat) return SDValue();
7089       CatOp = DAG.getBitcast(CatOp.getValueType(), NotCat);
7090     }
7091     return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(V), V.getValueType(), CatOps);
7092   }
7093   return SDValue();
7094 }
7095 
createUnpackShuffleMask(EVT VT,SmallVectorImpl<int> & Mask,bool Lo,bool Unary)7096 void llvm::createUnpackShuffleMask(EVT VT, SmallVectorImpl<int> &Mask,
7097                                    bool Lo, bool Unary) {
7098   assert(VT.getScalarType().isSimple() && (VT.getSizeInBits() % 128) == 0 &&
7099          "Illegal vector type to unpack");
7100   assert(Mask.empty() && "Expected an empty shuffle mask vector");
7101   int NumElts = VT.getVectorNumElements();
7102   int NumEltsInLane = 128 / VT.getScalarSizeInBits();
7103   for (int i = 0; i < NumElts; ++i) {
7104     unsigned LaneStart = (i / NumEltsInLane) * NumEltsInLane;
7105     int Pos = (i % NumEltsInLane) / 2 + LaneStart;
7106     Pos += (Unary ? 0 : NumElts * (i % 2));
7107     Pos += (Lo ? 0 : NumEltsInLane / 2);
7108     Mask.push_back(Pos);
7109   }
7110 }
7111 
7112 /// Similar to unpacklo/unpackhi, but without the 128-bit lane limitation
7113 /// imposed by AVX and specific to the unary pattern. Example:
7114 /// v8iX Lo --> <0, 0, 1, 1, 2, 2, 3, 3>
7115 /// v8iX Hi --> <4, 4, 5, 5, 6, 6, 7, 7>
createSplat2ShuffleMask(MVT VT,SmallVectorImpl<int> & Mask,bool Lo)7116 void llvm::createSplat2ShuffleMask(MVT VT, SmallVectorImpl<int> &Mask,
7117                                    bool Lo) {
7118   assert(Mask.empty() && "Expected an empty shuffle mask vector");
7119   int NumElts = VT.getVectorNumElements();
7120   for (int i = 0; i < NumElts; ++i) {
7121     int Pos = i / 2;
7122     Pos += (Lo ? 0 : NumElts / 2);
7123     Mask.push_back(Pos);
7124   }
7125 }
7126 
7127 // Attempt to constant fold, else just create a VECTOR_SHUFFLE.
getVectorShuffle(SelectionDAG & DAG,EVT VT,const SDLoc & dl,SDValue V1,SDValue V2,ArrayRef<int> Mask)7128 static SDValue getVectorShuffle(SelectionDAG &DAG, EVT VT, const SDLoc &dl,
7129                                 SDValue V1, SDValue V2, ArrayRef<int> Mask) {
7130   if ((ISD::isBuildVectorOfConstantSDNodes(V1.getNode()) || V1.isUndef()) &&
7131       (ISD::isBuildVectorOfConstantSDNodes(V2.getNode()) || V2.isUndef())) {
7132     SmallVector<SDValue> Ops(Mask.size(), DAG.getUNDEF(VT.getScalarType()));
7133     for (int I = 0, NumElts = Mask.size(); I != NumElts; ++I) {
7134       int M = Mask[I];
7135       if (M < 0)
7136         continue;
7137       SDValue V = (M < NumElts) ? V1 : V2;
7138       if (V.isUndef())
7139         continue;
7140       Ops[I] = V.getOperand(M % NumElts);
7141     }
7142     return DAG.getBuildVector(VT, dl, Ops);
7143   }
7144 
7145   return DAG.getVectorShuffle(VT, dl, V1, V2, Mask);
7146 }
7147 
7148 /// Returns a vector_shuffle node for an unpackl operation.
getUnpackl(SelectionDAG & DAG,const SDLoc & dl,EVT VT,SDValue V1,SDValue V2)7149 static SDValue getUnpackl(SelectionDAG &DAG, const SDLoc &dl, EVT VT,
7150                           SDValue V1, SDValue V2) {
7151   SmallVector<int, 8> Mask;
7152   createUnpackShuffleMask(VT, Mask, /* Lo = */ true, /* Unary = */ false);
7153   return getVectorShuffle(DAG, VT, dl, V1, V2, Mask);
7154 }
7155 
7156 /// Returns a vector_shuffle node for an unpackh operation.
getUnpackh(SelectionDAG & DAG,const SDLoc & dl,EVT VT,SDValue V1,SDValue V2)7157 static SDValue getUnpackh(SelectionDAG &DAG, const SDLoc &dl, EVT VT,
7158                           SDValue V1, SDValue V2) {
7159   SmallVector<int, 8> Mask;
7160   createUnpackShuffleMask(VT, Mask, /* Lo = */ false, /* Unary = */ false);
7161   return getVectorShuffle(DAG, VT, dl, V1, V2, Mask);
7162 }
7163 
7164 /// Returns a node that packs the LHS + RHS nodes together at half width.
7165 /// May return X86ISD::PACKSS/PACKUS, packing the top/bottom half.
7166 /// TODO: Add subvector splitting if/when we have a need for it.
getPack(SelectionDAG & DAG,const X86Subtarget & Subtarget,const SDLoc & dl,MVT VT,SDValue LHS,SDValue RHS,bool PackHiHalf=false)7167 static SDValue getPack(SelectionDAG &DAG, const X86Subtarget &Subtarget,
7168                        const SDLoc &dl, MVT VT, SDValue LHS, SDValue RHS,
7169                        bool PackHiHalf = false) {
7170   MVT OpVT = LHS.getSimpleValueType();
7171   unsigned EltSizeInBits = VT.getScalarSizeInBits();
7172   bool UsePackUS = Subtarget.hasSSE41() || EltSizeInBits == 8;
7173   assert(OpVT == RHS.getSimpleValueType() &&
7174          VT.getSizeInBits() == OpVT.getSizeInBits() &&
7175          (EltSizeInBits * 2) == OpVT.getScalarSizeInBits() &&
7176          "Unexpected PACK operand types");
7177   assert((EltSizeInBits == 8 || EltSizeInBits == 16 || EltSizeInBits == 32) &&
7178          "Unexpected PACK result type");
7179 
7180   // Rely on vector shuffles for vXi64 -> vXi32 packing.
7181   if (EltSizeInBits == 32) {
7182     SmallVector<int> PackMask;
7183     int Offset = PackHiHalf ? 1 : 0;
7184     int NumElts = VT.getVectorNumElements();
7185     for (int I = 0; I != NumElts; I += 4) {
7186       PackMask.push_back(I + Offset);
7187       PackMask.push_back(I + Offset + 2);
7188       PackMask.push_back(I + Offset + NumElts);
7189       PackMask.push_back(I + Offset + NumElts + 2);
7190     }
7191     return DAG.getVectorShuffle(VT, dl, DAG.getBitcast(VT, LHS),
7192                                 DAG.getBitcast(VT, RHS), PackMask);
7193   }
7194 
7195   // See if we already have sufficient leading bits for PACKSS/PACKUS.
7196   if (!PackHiHalf) {
7197     if (UsePackUS &&
7198         DAG.computeKnownBits(LHS).countMaxActiveBits() <= EltSizeInBits &&
7199         DAG.computeKnownBits(RHS).countMaxActiveBits() <= EltSizeInBits)
7200       return DAG.getNode(X86ISD::PACKUS, dl, VT, LHS, RHS);
7201 
7202     if (DAG.ComputeMaxSignificantBits(LHS) <= EltSizeInBits &&
7203         DAG.ComputeMaxSignificantBits(RHS) <= EltSizeInBits)
7204       return DAG.getNode(X86ISD::PACKSS, dl, VT, LHS, RHS);
7205   }
7206 
7207   // Fallback to sign/zero extending the requested half and pack.
7208   SDValue Amt = DAG.getTargetConstant(EltSizeInBits, dl, MVT::i8);
7209   if (UsePackUS) {
7210     if (PackHiHalf) {
7211       LHS = DAG.getNode(X86ISD::VSRLI, dl, OpVT, LHS, Amt);
7212       RHS = DAG.getNode(X86ISD::VSRLI, dl, OpVT, RHS, Amt);
7213     } else {
7214       SDValue Mask = DAG.getConstant((1ULL << EltSizeInBits) - 1, dl, OpVT);
7215       LHS = DAG.getNode(ISD::AND, dl, OpVT, LHS, Mask);
7216       RHS = DAG.getNode(ISD::AND, dl, OpVT, RHS, Mask);
7217     };
7218     return DAG.getNode(X86ISD::PACKUS, dl, VT, LHS, RHS);
7219   };
7220 
7221   if (!PackHiHalf) {
7222     LHS = DAG.getNode(X86ISD::VSHLI, dl, OpVT, LHS, Amt);
7223     RHS = DAG.getNode(X86ISD::VSHLI, dl, OpVT, RHS, Amt);
7224   }
7225   LHS = DAG.getNode(X86ISD::VSRAI, dl, OpVT, LHS, Amt);
7226   RHS = DAG.getNode(X86ISD::VSRAI, dl, OpVT, RHS, Amt);
7227   return DAG.getNode(X86ISD::PACKSS, dl, VT, LHS, RHS);
7228 }
7229 
7230 /// Return a vector_shuffle of the specified vector of zero or undef vector.
7231 /// This produces a shuffle where the low element of V2 is swizzled into the
7232 /// zero/undef vector, landing at element Idx.
7233 /// This produces a shuffle mask like 4,1,2,3 (idx=0) or  0,1,2,4 (idx=3).
getShuffleVectorZeroOrUndef(SDValue V2,int Idx,bool IsZero,const X86Subtarget & Subtarget,SelectionDAG & DAG)7234 static SDValue getShuffleVectorZeroOrUndef(SDValue V2, int Idx,
7235                                            bool IsZero,
7236                                            const X86Subtarget &Subtarget,
7237                                            SelectionDAG &DAG) {
7238   MVT VT = V2.getSimpleValueType();
7239   SDValue V1 = IsZero
7240     ? getZeroVector(VT, Subtarget, DAG, SDLoc(V2)) : DAG.getUNDEF(VT);
7241   int NumElems = VT.getVectorNumElements();
7242   SmallVector<int, 16> MaskVec(NumElems);
7243   for (int i = 0; i != NumElems; ++i)
7244     // If this is the insertion idx, put the low elt of V2 here.
7245     MaskVec[i] = (i == Idx) ? NumElems : i;
7246   return DAG.getVectorShuffle(VT, SDLoc(V2), V1, V2, MaskVec);
7247 }
7248 
getTargetConstantFromBasePtr(SDValue Ptr)7249 static const Constant *getTargetConstantFromBasePtr(SDValue Ptr) {
7250   if (Ptr.getOpcode() == X86ISD::Wrapper ||
7251       Ptr.getOpcode() == X86ISD::WrapperRIP)
7252     Ptr = Ptr.getOperand(0);
7253 
7254   auto *CNode = dyn_cast<ConstantPoolSDNode>(Ptr);
7255   if (!CNode || CNode->isMachineConstantPoolEntry() || CNode->getOffset() != 0)
7256     return nullptr;
7257 
7258   return CNode->getConstVal();
7259 }
7260 
getTargetConstantFromNode(LoadSDNode * Load)7261 static const Constant *getTargetConstantFromNode(LoadSDNode *Load) {
7262   if (!Load || !ISD::isNormalLoad(Load))
7263     return nullptr;
7264   return getTargetConstantFromBasePtr(Load->getBasePtr());
7265 }
7266 
getTargetConstantFromNode(SDValue Op)7267 static const Constant *getTargetConstantFromNode(SDValue Op) {
7268   Op = peekThroughBitcasts(Op);
7269   return getTargetConstantFromNode(dyn_cast<LoadSDNode>(Op));
7270 }
7271 
7272 const Constant *
getTargetConstantFromLoad(LoadSDNode * LD) const7273 X86TargetLowering::getTargetConstantFromLoad(LoadSDNode *LD) const {
7274   assert(LD && "Unexpected null LoadSDNode");
7275   return getTargetConstantFromNode(LD);
7276 }
7277 
7278 // Extract raw constant bits from constant pools.
getTargetConstantBitsFromNode(SDValue Op,unsigned EltSizeInBits,APInt & UndefElts,SmallVectorImpl<APInt> & EltBits,bool AllowWholeUndefs=true,bool AllowPartialUndefs=true)7279 static bool getTargetConstantBitsFromNode(SDValue Op, unsigned EltSizeInBits,
7280                                           APInt &UndefElts,
7281                                           SmallVectorImpl<APInt> &EltBits,
7282                                           bool AllowWholeUndefs = true,
7283                                           bool AllowPartialUndefs = true) {
7284   assert(EltBits.empty() && "Expected an empty EltBits vector");
7285 
7286   Op = peekThroughBitcasts(Op);
7287 
7288   EVT VT = Op.getValueType();
7289   unsigned SizeInBits = VT.getSizeInBits();
7290   assert((SizeInBits % EltSizeInBits) == 0 && "Can't split constant!");
7291   unsigned NumElts = SizeInBits / EltSizeInBits;
7292 
7293   // Bitcast a source array of element bits to the target size.
7294   auto CastBitData = [&](APInt &UndefSrcElts, ArrayRef<APInt> SrcEltBits) {
7295     unsigned NumSrcElts = UndefSrcElts.getBitWidth();
7296     unsigned SrcEltSizeInBits = SrcEltBits[0].getBitWidth();
7297     assert((NumSrcElts * SrcEltSizeInBits) == SizeInBits &&
7298            "Constant bit sizes don't match");
7299 
7300     // Don't split if we don't allow undef bits.
7301     bool AllowUndefs = AllowWholeUndefs || AllowPartialUndefs;
7302     if (UndefSrcElts.getBoolValue() && !AllowUndefs)
7303       return false;
7304 
7305     // If we're already the right size, don't bother bitcasting.
7306     if (NumSrcElts == NumElts) {
7307       UndefElts = UndefSrcElts;
7308       EltBits.assign(SrcEltBits.begin(), SrcEltBits.end());
7309       return true;
7310     }
7311 
7312     // Extract all the undef/constant element data and pack into single bitsets.
7313     APInt UndefBits(SizeInBits, 0);
7314     APInt MaskBits(SizeInBits, 0);
7315 
7316     for (unsigned i = 0; i != NumSrcElts; ++i) {
7317       unsigned BitOffset = i * SrcEltSizeInBits;
7318       if (UndefSrcElts[i])
7319         UndefBits.setBits(BitOffset, BitOffset + SrcEltSizeInBits);
7320       MaskBits.insertBits(SrcEltBits[i], BitOffset);
7321     }
7322 
7323     // Split the undef/constant single bitset data into the target elements.
7324     UndefElts = APInt(NumElts, 0);
7325     EltBits.resize(NumElts, APInt(EltSizeInBits, 0));
7326 
7327     for (unsigned i = 0; i != NumElts; ++i) {
7328       unsigned BitOffset = i * EltSizeInBits;
7329       APInt UndefEltBits = UndefBits.extractBits(EltSizeInBits, BitOffset);
7330 
7331       // Only treat an element as UNDEF if all bits are UNDEF.
7332       if (UndefEltBits.isAllOnes()) {
7333         if (!AllowWholeUndefs)
7334           return false;
7335         UndefElts.setBit(i);
7336         continue;
7337       }
7338 
7339       // If only some bits are UNDEF then treat them as zero (or bail if not
7340       // supported).
7341       if (UndefEltBits.getBoolValue() && !AllowPartialUndefs)
7342         return false;
7343 
7344       EltBits[i] = MaskBits.extractBits(EltSizeInBits, BitOffset);
7345     }
7346     return true;
7347   };
7348 
7349   // Collect constant bits and insert into mask/undef bit masks.
7350   auto CollectConstantBits = [](const Constant *Cst, APInt &Mask, APInt &Undefs,
7351                                 unsigned UndefBitIndex) {
7352     if (!Cst)
7353       return false;
7354     if (isa<UndefValue>(Cst)) {
7355       Undefs.setBit(UndefBitIndex);
7356       return true;
7357     }
7358     if (auto *CInt = dyn_cast<ConstantInt>(Cst)) {
7359       Mask = CInt->getValue();
7360       return true;
7361     }
7362     if (auto *CFP = dyn_cast<ConstantFP>(Cst)) {
7363       Mask = CFP->getValueAPF().bitcastToAPInt();
7364       return true;
7365     }
7366     return false;
7367   };
7368 
7369   // Handle UNDEFs.
7370   if (Op.isUndef()) {
7371     APInt UndefSrcElts = APInt::getAllOnes(NumElts);
7372     SmallVector<APInt, 64> SrcEltBits(NumElts, APInt(EltSizeInBits, 0));
7373     return CastBitData(UndefSrcElts, SrcEltBits);
7374   }
7375 
7376   // Extract scalar constant bits.
7377   if (auto *Cst = dyn_cast<ConstantSDNode>(Op)) {
7378     APInt UndefSrcElts = APInt::getZero(1);
7379     SmallVector<APInt, 64> SrcEltBits(1, Cst->getAPIntValue());
7380     return CastBitData(UndefSrcElts, SrcEltBits);
7381   }
7382   if (auto *Cst = dyn_cast<ConstantFPSDNode>(Op)) {
7383     APInt UndefSrcElts = APInt::getZero(1);
7384     APInt RawBits = Cst->getValueAPF().bitcastToAPInt();
7385     SmallVector<APInt, 64> SrcEltBits(1, RawBits);
7386     return CastBitData(UndefSrcElts, SrcEltBits);
7387   }
7388 
7389   // Extract constant bits from build vector.
7390   if (auto *BV = dyn_cast<BuildVectorSDNode>(Op)) {
7391     BitVector Undefs;
7392     SmallVector<APInt> SrcEltBits;
7393     unsigned SrcEltSizeInBits = VT.getScalarSizeInBits();
7394     if (BV->getConstantRawBits(true, SrcEltSizeInBits, SrcEltBits, Undefs)) {
7395       APInt UndefSrcElts = APInt::getNullValue(SrcEltBits.size());
7396       for (unsigned I = 0, E = SrcEltBits.size(); I != E; ++I)
7397         if (Undefs[I])
7398           UndefSrcElts.setBit(I);
7399       return CastBitData(UndefSrcElts, SrcEltBits);
7400     }
7401   }
7402 
7403   // Extract constant bits from constant pool vector.
7404   if (auto *Cst = getTargetConstantFromNode(Op)) {
7405     Type *CstTy = Cst->getType();
7406     unsigned CstSizeInBits = CstTy->getPrimitiveSizeInBits();
7407     if (!CstTy->isVectorTy() || (CstSizeInBits % SizeInBits) != 0)
7408       return false;
7409 
7410     unsigned SrcEltSizeInBits = CstTy->getScalarSizeInBits();
7411     unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
7412 
7413     APInt UndefSrcElts(NumSrcElts, 0);
7414     SmallVector<APInt, 64> SrcEltBits(NumSrcElts, APInt(SrcEltSizeInBits, 0));
7415     for (unsigned i = 0; i != NumSrcElts; ++i)
7416       if (!CollectConstantBits(Cst->getAggregateElement(i), SrcEltBits[i],
7417                                UndefSrcElts, i))
7418         return false;
7419 
7420     return CastBitData(UndefSrcElts, SrcEltBits);
7421   }
7422 
7423   // Extract constant bits from a broadcasted constant pool scalar.
7424   if (Op.getOpcode() == X86ISD::VBROADCAST_LOAD &&
7425       EltSizeInBits <= VT.getScalarSizeInBits()) {
7426     auto *MemIntr = cast<MemIntrinsicSDNode>(Op);
7427     if (MemIntr->getMemoryVT().getScalarSizeInBits() != VT.getScalarSizeInBits())
7428       return false;
7429 
7430     SDValue Ptr = MemIntr->getBasePtr();
7431     if (const Constant *C = getTargetConstantFromBasePtr(Ptr)) {
7432       unsigned SrcEltSizeInBits = C->getType()->getScalarSizeInBits();
7433       unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
7434 
7435       APInt UndefSrcElts(NumSrcElts, 0);
7436       SmallVector<APInt, 64> SrcEltBits(1, APInt(SrcEltSizeInBits, 0));
7437       if (CollectConstantBits(C, SrcEltBits[0], UndefSrcElts, 0)) {
7438         if (UndefSrcElts[0])
7439           UndefSrcElts.setBits(0, NumSrcElts);
7440         SrcEltBits.append(NumSrcElts - 1, SrcEltBits[0]);
7441         return CastBitData(UndefSrcElts, SrcEltBits);
7442       }
7443     }
7444   }
7445 
7446   // Extract constant bits from a subvector broadcast.
7447   if (Op.getOpcode() == X86ISD::SUBV_BROADCAST_LOAD) {
7448     auto *MemIntr = cast<MemIntrinsicSDNode>(Op);
7449     SDValue Ptr = MemIntr->getBasePtr();
7450     // The source constant may be larger than the subvector broadcast,
7451     // ensure we extract the correct subvector constants.
7452     if (const Constant *Cst = getTargetConstantFromBasePtr(Ptr)) {
7453       Type *CstTy = Cst->getType();
7454       unsigned CstSizeInBits = CstTy->getPrimitiveSizeInBits();
7455       unsigned SubVecSizeInBits = MemIntr->getMemoryVT().getStoreSizeInBits();
7456       if (!CstTy->isVectorTy() || (CstSizeInBits % SubVecSizeInBits) != 0 ||
7457           (SizeInBits % SubVecSizeInBits) != 0)
7458         return false;
7459       unsigned CstEltSizeInBits = CstTy->getScalarSizeInBits();
7460       unsigned NumSubElts = SubVecSizeInBits / CstEltSizeInBits;
7461       unsigned NumSubVecs = SizeInBits / SubVecSizeInBits;
7462       APInt UndefSubElts(NumSubElts, 0);
7463       SmallVector<APInt, 64> SubEltBits(NumSubElts * NumSubVecs,
7464                                         APInt(CstEltSizeInBits, 0));
7465       for (unsigned i = 0; i != NumSubElts; ++i) {
7466         if (!CollectConstantBits(Cst->getAggregateElement(i), SubEltBits[i],
7467                                  UndefSubElts, i))
7468           return false;
7469         for (unsigned j = 1; j != NumSubVecs; ++j)
7470           SubEltBits[i + (j * NumSubElts)] = SubEltBits[i];
7471       }
7472       UndefSubElts = APInt::getSplat(NumSubVecs * UndefSubElts.getBitWidth(),
7473                                      UndefSubElts);
7474       return CastBitData(UndefSubElts, SubEltBits);
7475     }
7476   }
7477 
7478   // Extract a rematerialized scalar constant insertion.
7479   if (Op.getOpcode() == X86ISD::VZEXT_MOVL &&
7480       Op.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR &&
7481       isa<ConstantSDNode>(Op.getOperand(0).getOperand(0))) {
7482     unsigned SrcEltSizeInBits = VT.getScalarSizeInBits();
7483     unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
7484 
7485     APInt UndefSrcElts(NumSrcElts, 0);
7486     SmallVector<APInt, 64> SrcEltBits;
7487     auto *CN = cast<ConstantSDNode>(Op.getOperand(0).getOperand(0));
7488     SrcEltBits.push_back(CN->getAPIntValue().zextOrTrunc(SrcEltSizeInBits));
7489     SrcEltBits.append(NumSrcElts - 1, APInt(SrcEltSizeInBits, 0));
7490     return CastBitData(UndefSrcElts, SrcEltBits);
7491   }
7492 
7493   // Insert constant bits from a base and sub vector sources.
7494   if (Op.getOpcode() == ISD::INSERT_SUBVECTOR) {
7495     // If bitcasts to larger elements we might lose track of undefs - don't
7496     // allow any to be safe.
7497     unsigned SrcEltSizeInBits = VT.getScalarSizeInBits();
7498     bool AllowUndefs = EltSizeInBits >= SrcEltSizeInBits;
7499 
7500     APInt UndefSrcElts, UndefSubElts;
7501     SmallVector<APInt, 32> EltSrcBits, EltSubBits;
7502     if (getTargetConstantBitsFromNode(Op.getOperand(1), SrcEltSizeInBits,
7503                                       UndefSubElts, EltSubBits,
7504                                       AllowWholeUndefs && AllowUndefs,
7505                                       AllowPartialUndefs && AllowUndefs) &&
7506         getTargetConstantBitsFromNode(Op.getOperand(0), SrcEltSizeInBits,
7507                                       UndefSrcElts, EltSrcBits,
7508                                       AllowWholeUndefs && AllowUndefs,
7509                                       AllowPartialUndefs && AllowUndefs)) {
7510       unsigned BaseIdx = Op.getConstantOperandVal(2);
7511       UndefSrcElts.insertBits(UndefSubElts, BaseIdx);
7512       for (unsigned i = 0, e = EltSubBits.size(); i != e; ++i)
7513         EltSrcBits[BaseIdx + i] = EltSubBits[i];
7514       return CastBitData(UndefSrcElts, EltSrcBits);
7515     }
7516   }
7517 
7518   // Extract constant bits from a subvector's source.
7519   if (Op.getOpcode() == ISD::EXTRACT_SUBVECTOR) {
7520     // TODO - support extract_subvector through bitcasts.
7521     if (EltSizeInBits != VT.getScalarSizeInBits())
7522       return false;
7523 
7524     if (getTargetConstantBitsFromNode(Op.getOperand(0), EltSizeInBits,
7525                                       UndefElts, EltBits, AllowWholeUndefs,
7526                                       AllowPartialUndefs)) {
7527       EVT SrcVT = Op.getOperand(0).getValueType();
7528       unsigned NumSrcElts = SrcVT.getVectorNumElements();
7529       unsigned NumSubElts = VT.getVectorNumElements();
7530       unsigned BaseIdx = Op.getConstantOperandVal(1);
7531       UndefElts = UndefElts.extractBits(NumSubElts, BaseIdx);
7532       if ((BaseIdx + NumSubElts) != NumSrcElts)
7533         EltBits.erase(EltBits.begin() + BaseIdx + NumSubElts, EltBits.end());
7534       if (BaseIdx != 0)
7535         EltBits.erase(EltBits.begin(), EltBits.begin() + BaseIdx);
7536       return true;
7537     }
7538   }
7539 
7540   // Extract constant bits from shuffle node sources.
7541   if (auto *SVN = dyn_cast<ShuffleVectorSDNode>(Op)) {
7542     // TODO - support shuffle through bitcasts.
7543     if (EltSizeInBits != VT.getScalarSizeInBits())
7544       return false;
7545 
7546     ArrayRef<int> Mask = SVN->getMask();
7547     if ((!AllowWholeUndefs || !AllowPartialUndefs) &&
7548         llvm::any_of(Mask, [](int M) { return M < 0; }))
7549       return false;
7550 
7551     APInt UndefElts0, UndefElts1;
7552     SmallVector<APInt, 32> EltBits0, EltBits1;
7553     if (isAnyInRange(Mask, 0, NumElts) &&
7554         !getTargetConstantBitsFromNode(Op.getOperand(0), EltSizeInBits,
7555                                        UndefElts0, EltBits0, AllowWholeUndefs,
7556                                        AllowPartialUndefs))
7557       return false;
7558     if (isAnyInRange(Mask, NumElts, 2 * NumElts) &&
7559         !getTargetConstantBitsFromNode(Op.getOperand(1), EltSizeInBits,
7560                                        UndefElts1, EltBits1, AllowWholeUndefs,
7561                                        AllowPartialUndefs))
7562       return false;
7563 
7564     UndefElts = APInt::getZero(NumElts);
7565     for (int i = 0; i != (int)NumElts; ++i) {
7566       int M = Mask[i];
7567       if (M < 0) {
7568         UndefElts.setBit(i);
7569         EltBits.push_back(APInt::getZero(EltSizeInBits));
7570       } else if (M < (int)NumElts) {
7571         if (UndefElts0[M])
7572           UndefElts.setBit(i);
7573         EltBits.push_back(EltBits0[M]);
7574       } else {
7575         if (UndefElts1[M - NumElts])
7576           UndefElts.setBit(i);
7577         EltBits.push_back(EltBits1[M - NumElts]);
7578       }
7579     }
7580     return true;
7581   }
7582 
7583   return false;
7584 }
7585 
7586 namespace llvm {
7587 namespace X86 {
isConstantSplat(SDValue Op,APInt & SplatVal,bool AllowPartialUndefs)7588 bool isConstantSplat(SDValue Op, APInt &SplatVal, bool AllowPartialUndefs) {
7589   APInt UndefElts;
7590   SmallVector<APInt, 16> EltBits;
7591   if (getTargetConstantBitsFromNode(Op, Op.getScalarValueSizeInBits(),
7592                                     UndefElts, EltBits, true,
7593                                     AllowPartialUndefs)) {
7594     int SplatIndex = -1;
7595     for (int i = 0, e = EltBits.size(); i != e; ++i) {
7596       if (UndefElts[i])
7597         continue;
7598       if (0 <= SplatIndex && EltBits[i] != EltBits[SplatIndex]) {
7599         SplatIndex = -1;
7600         break;
7601       }
7602       SplatIndex = i;
7603     }
7604     if (0 <= SplatIndex) {
7605       SplatVal = EltBits[SplatIndex];
7606       return true;
7607     }
7608   }
7609 
7610   return false;
7611 }
7612 } // namespace X86
7613 } // namespace llvm
7614 
getTargetShuffleMaskIndices(SDValue MaskNode,unsigned MaskEltSizeInBits,SmallVectorImpl<uint64_t> & RawMask,APInt & UndefElts)7615 static bool getTargetShuffleMaskIndices(SDValue MaskNode,
7616                                         unsigned MaskEltSizeInBits,
7617                                         SmallVectorImpl<uint64_t> &RawMask,
7618                                         APInt &UndefElts) {
7619   // Extract the raw target constant bits.
7620   SmallVector<APInt, 64> EltBits;
7621   if (!getTargetConstantBitsFromNode(MaskNode, MaskEltSizeInBits, UndefElts,
7622                                      EltBits, /* AllowWholeUndefs */ true,
7623                                      /* AllowPartialUndefs */ false))
7624     return false;
7625 
7626   // Insert the extracted elements into the mask.
7627   for (const APInt &Elt : EltBits)
7628     RawMask.push_back(Elt.getZExtValue());
7629 
7630   return true;
7631 }
7632 
7633 /// Create a shuffle mask that matches the PACKSS/PACKUS truncation.
7634 /// A multi-stage pack shuffle mask is created by specifying NumStages > 1.
7635 /// Note: This ignores saturation, so inputs must be checked first.
createPackShuffleMask(MVT VT,SmallVectorImpl<int> & Mask,bool Unary,unsigned NumStages=1)7636 static void createPackShuffleMask(MVT VT, SmallVectorImpl<int> &Mask,
7637                                   bool Unary, unsigned NumStages = 1) {
7638   assert(Mask.empty() && "Expected an empty shuffle mask vector");
7639   unsigned NumElts = VT.getVectorNumElements();
7640   unsigned NumLanes = VT.getSizeInBits() / 128;
7641   unsigned NumEltsPerLane = 128 / VT.getScalarSizeInBits();
7642   unsigned Offset = Unary ? 0 : NumElts;
7643   unsigned Repetitions = 1u << (NumStages - 1);
7644   unsigned Increment = 1u << NumStages;
7645   assert((NumEltsPerLane >> NumStages) > 0 && "Illegal packing compaction");
7646 
7647   for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
7648     for (unsigned Stage = 0; Stage != Repetitions; ++Stage) {
7649       for (unsigned Elt = 0; Elt != NumEltsPerLane; Elt += Increment)
7650         Mask.push_back(Elt + (Lane * NumEltsPerLane));
7651       for (unsigned Elt = 0; Elt != NumEltsPerLane; Elt += Increment)
7652         Mask.push_back(Elt + (Lane * NumEltsPerLane) + Offset);
7653     }
7654   }
7655 }
7656 
7657 // Split the demanded elts of a PACKSS/PACKUS node between its operands.
getPackDemandedElts(EVT VT,const APInt & DemandedElts,APInt & DemandedLHS,APInt & DemandedRHS)7658 static void getPackDemandedElts(EVT VT, const APInt &DemandedElts,
7659                                 APInt &DemandedLHS, APInt &DemandedRHS) {
7660   int NumLanes = VT.getSizeInBits() / 128;
7661   int NumElts = DemandedElts.getBitWidth();
7662   int NumInnerElts = NumElts / 2;
7663   int NumEltsPerLane = NumElts / NumLanes;
7664   int NumInnerEltsPerLane = NumInnerElts / NumLanes;
7665 
7666   DemandedLHS = APInt::getZero(NumInnerElts);
7667   DemandedRHS = APInt::getZero(NumInnerElts);
7668 
7669   // Map DemandedElts to the packed operands.
7670   for (int Lane = 0; Lane != NumLanes; ++Lane) {
7671     for (int Elt = 0; Elt != NumInnerEltsPerLane; ++Elt) {
7672       int OuterIdx = (Lane * NumEltsPerLane) + Elt;
7673       int InnerIdx = (Lane * NumInnerEltsPerLane) + Elt;
7674       if (DemandedElts[OuterIdx])
7675         DemandedLHS.setBit(InnerIdx);
7676       if (DemandedElts[OuterIdx + NumInnerEltsPerLane])
7677         DemandedRHS.setBit(InnerIdx);
7678     }
7679   }
7680 }
7681 
7682 // Split the demanded elts of a HADD/HSUB node between its operands.
getHorizDemandedElts(EVT VT,const APInt & DemandedElts,APInt & DemandedLHS,APInt & DemandedRHS)7683 static void getHorizDemandedElts(EVT VT, const APInt &DemandedElts,
7684                                  APInt &DemandedLHS, APInt &DemandedRHS) {
7685   int NumLanes = VT.getSizeInBits() / 128;
7686   int NumElts = DemandedElts.getBitWidth();
7687   int NumEltsPerLane = NumElts / NumLanes;
7688   int HalfEltsPerLane = NumEltsPerLane / 2;
7689 
7690   DemandedLHS = APInt::getZero(NumElts);
7691   DemandedRHS = APInt::getZero(NumElts);
7692 
7693   // Map DemandedElts to the horizontal operands.
7694   for (int Idx = 0; Idx != NumElts; ++Idx) {
7695     if (!DemandedElts[Idx])
7696       continue;
7697     int LaneIdx = (Idx / NumEltsPerLane) * NumEltsPerLane;
7698     int LocalIdx = Idx % NumEltsPerLane;
7699     if (LocalIdx < HalfEltsPerLane) {
7700       DemandedLHS.setBit(LaneIdx + 2 * LocalIdx + 0);
7701       DemandedLHS.setBit(LaneIdx + 2 * LocalIdx + 1);
7702     } else {
7703       LocalIdx -= HalfEltsPerLane;
7704       DemandedRHS.setBit(LaneIdx + 2 * LocalIdx + 0);
7705       DemandedRHS.setBit(LaneIdx + 2 * LocalIdx + 1);
7706     }
7707   }
7708 }
7709 
7710 /// Calculates the shuffle mask corresponding to the target-specific opcode.
7711 /// If the mask could be calculated, returns it in \p Mask, returns the shuffle
7712 /// operands in \p Ops, and returns true.
7713 /// Sets \p IsUnary to true if only one source is used. Note that this will set
7714 /// IsUnary for shuffles which use a single input multiple times, and in those
7715 /// cases it will adjust the mask to only have indices within that single input.
7716 /// It is an error to call this with non-empty Mask/Ops vectors.
getTargetShuffleMask(SDNode * N,MVT VT,bool AllowSentinelZero,SmallVectorImpl<SDValue> & Ops,SmallVectorImpl<int> & Mask,bool & IsUnary)7717 static bool getTargetShuffleMask(SDNode *N, MVT VT, bool AllowSentinelZero,
7718                                  SmallVectorImpl<SDValue> &Ops,
7719                                  SmallVectorImpl<int> &Mask, bool &IsUnary) {
7720   unsigned NumElems = VT.getVectorNumElements();
7721   unsigned MaskEltSize = VT.getScalarSizeInBits();
7722   SmallVector<uint64_t, 32> RawMask;
7723   APInt RawUndefs;
7724   uint64_t ImmN;
7725 
7726   assert(Mask.empty() && "getTargetShuffleMask expects an empty Mask vector");
7727   assert(Ops.empty() && "getTargetShuffleMask expects an empty Ops vector");
7728 
7729   IsUnary = false;
7730   bool IsFakeUnary = false;
7731   switch (N->getOpcode()) {
7732   case X86ISD::BLENDI:
7733     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7734     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7735     ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
7736     DecodeBLENDMask(NumElems, ImmN, Mask);
7737     IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
7738     break;
7739   case X86ISD::SHUFP:
7740     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7741     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7742     ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
7743     DecodeSHUFPMask(NumElems, MaskEltSize, ImmN, Mask);
7744     IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
7745     break;
7746   case X86ISD::INSERTPS:
7747     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7748     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7749     ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
7750     DecodeINSERTPSMask(ImmN, Mask);
7751     IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
7752     break;
7753   case X86ISD::EXTRQI:
7754     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7755     if (isa<ConstantSDNode>(N->getOperand(1)) &&
7756         isa<ConstantSDNode>(N->getOperand(2))) {
7757       int BitLen = N->getConstantOperandVal(1);
7758       int BitIdx = N->getConstantOperandVal(2);
7759       DecodeEXTRQIMask(NumElems, MaskEltSize, BitLen, BitIdx, Mask);
7760       IsUnary = true;
7761     }
7762     break;
7763   case X86ISD::INSERTQI:
7764     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7765     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7766     if (isa<ConstantSDNode>(N->getOperand(2)) &&
7767         isa<ConstantSDNode>(N->getOperand(3))) {
7768       int BitLen = N->getConstantOperandVal(2);
7769       int BitIdx = N->getConstantOperandVal(3);
7770       DecodeINSERTQIMask(NumElems, MaskEltSize, BitLen, BitIdx, Mask);
7771       IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
7772     }
7773     break;
7774   case X86ISD::UNPCKH:
7775     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7776     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7777     DecodeUNPCKHMask(NumElems, MaskEltSize, Mask);
7778     IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
7779     break;
7780   case X86ISD::UNPCKL:
7781     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7782     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7783     DecodeUNPCKLMask(NumElems, MaskEltSize, Mask);
7784     IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
7785     break;
7786   case X86ISD::MOVHLPS:
7787     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7788     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7789     DecodeMOVHLPSMask(NumElems, Mask);
7790     IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
7791     break;
7792   case X86ISD::MOVLHPS:
7793     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7794     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7795     DecodeMOVLHPSMask(NumElems, Mask);
7796     IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
7797     break;
7798   case X86ISD::VALIGN:
7799     assert((VT.getScalarType() == MVT::i32 || VT.getScalarType() == MVT::i64) &&
7800            "Only 32-bit and 64-bit elements are supported!");
7801     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7802     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7803     ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
7804     DecodeVALIGNMask(NumElems, ImmN, Mask);
7805     IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
7806     Ops.push_back(N->getOperand(1));
7807     Ops.push_back(N->getOperand(0));
7808     break;
7809   case X86ISD::PALIGNR:
7810     assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
7811     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7812     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7813     ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
7814     DecodePALIGNRMask(NumElems, ImmN, Mask);
7815     IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
7816     Ops.push_back(N->getOperand(1));
7817     Ops.push_back(N->getOperand(0));
7818     break;
7819   case X86ISD::VSHLDQ:
7820     assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
7821     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7822     ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
7823     DecodePSLLDQMask(NumElems, ImmN, Mask);
7824     IsUnary = true;
7825     break;
7826   case X86ISD::VSRLDQ:
7827     assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
7828     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7829     ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
7830     DecodePSRLDQMask(NumElems, ImmN, Mask);
7831     IsUnary = true;
7832     break;
7833   case X86ISD::PSHUFD:
7834   case X86ISD::VPERMILPI:
7835     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7836     ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
7837     DecodePSHUFMask(NumElems, MaskEltSize, ImmN, Mask);
7838     IsUnary = true;
7839     break;
7840   case X86ISD::PSHUFHW:
7841     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7842     ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
7843     DecodePSHUFHWMask(NumElems, ImmN, Mask);
7844     IsUnary = true;
7845     break;
7846   case X86ISD::PSHUFLW:
7847     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7848     ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
7849     DecodePSHUFLWMask(NumElems, ImmN, Mask);
7850     IsUnary = true;
7851     break;
7852   case X86ISD::VZEXT_MOVL:
7853     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7854     DecodeZeroMoveLowMask(NumElems, Mask);
7855     IsUnary = true;
7856     break;
7857   case X86ISD::VBROADCAST:
7858     // We only decode broadcasts of same-sized vectors, peeking through to
7859     // extracted subvectors is likely to cause hasOneUse issues with
7860     // SimplifyDemandedBits etc.
7861     if (N->getOperand(0).getValueType() == VT) {
7862       DecodeVectorBroadcast(NumElems, Mask);
7863       IsUnary = true;
7864       break;
7865     }
7866     return false;
7867   case X86ISD::VPERMILPV: {
7868     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7869     IsUnary = true;
7870     SDValue MaskNode = N->getOperand(1);
7871     if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
7872                                     RawUndefs)) {
7873       DecodeVPERMILPMask(NumElems, MaskEltSize, RawMask, RawUndefs, Mask);
7874       break;
7875     }
7876     return false;
7877   }
7878   case X86ISD::PSHUFB: {
7879     assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
7880     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7881     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7882     IsUnary = true;
7883     SDValue MaskNode = N->getOperand(1);
7884     if (getTargetShuffleMaskIndices(MaskNode, 8, RawMask, RawUndefs)) {
7885       DecodePSHUFBMask(RawMask, RawUndefs, Mask);
7886       break;
7887     }
7888     return false;
7889   }
7890   case X86ISD::VPERMI:
7891     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7892     ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
7893     DecodeVPERMMask(NumElems, ImmN, Mask);
7894     IsUnary = true;
7895     break;
7896   case X86ISD::MOVSS:
7897   case X86ISD::MOVSD:
7898   case X86ISD::MOVSH:
7899     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7900     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7901     DecodeScalarMoveMask(NumElems, /* IsLoad */ false, Mask);
7902     break;
7903   case X86ISD::VPERM2X128:
7904     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7905     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7906     ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
7907     DecodeVPERM2X128Mask(NumElems, ImmN, Mask);
7908     IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
7909     break;
7910   case X86ISD::SHUF128:
7911     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7912     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7913     ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
7914     decodeVSHUF64x2FamilyMask(NumElems, MaskEltSize, ImmN, Mask);
7915     IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
7916     break;
7917   case X86ISD::MOVSLDUP:
7918     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7919     DecodeMOVSLDUPMask(NumElems, Mask);
7920     IsUnary = true;
7921     break;
7922   case X86ISD::MOVSHDUP:
7923     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7924     DecodeMOVSHDUPMask(NumElems, Mask);
7925     IsUnary = true;
7926     break;
7927   case X86ISD::MOVDDUP:
7928     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7929     DecodeMOVDDUPMask(NumElems, Mask);
7930     IsUnary = true;
7931     break;
7932   case X86ISD::VPERMIL2: {
7933     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7934     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7935     IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
7936     SDValue MaskNode = N->getOperand(2);
7937     SDValue CtrlNode = N->getOperand(3);
7938     if (ConstantSDNode *CtrlOp = dyn_cast<ConstantSDNode>(CtrlNode)) {
7939       unsigned CtrlImm = CtrlOp->getZExtValue();
7940       if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
7941                                       RawUndefs)) {
7942         DecodeVPERMIL2PMask(NumElems, MaskEltSize, CtrlImm, RawMask, RawUndefs,
7943                             Mask);
7944         break;
7945       }
7946     }
7947     return false;
7948   }
7949   case X86ISD::VPPERM: {
7950     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7951     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7952     IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
7953     SDValue MaskNode = N->getOperand(2);
7954     if (getTargetShuffleMaskIndices(MaskNode, 8, RawMask, RawUndefs)) {
7955       DecodeVPPERMMask(RawMask, RawUndefs, Mask);
7956       break;
7957     }
7958     return false;
7959   }
7960   case X86ISD::VPERMV: {
7961     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7962     IsUnary = true;
7963     // Unlike most shuffle nodes, VPERMV's mask operand is operand 0.
7964     Ops.push_back(N->getOperand(1));
7965     SDValue MaskNode = N->getOperand(0);
7966     if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
7967                                     RawUndefs)) {
7968       DecodeVPERMVMask(RawMask, RawUndefs, Mask);
7969       break;
7970     }
7971     return false;
7972   }
7973   case X86ISD::VPERMV3: {
7974     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7975     assert(N->getOperand(2).getValueType() == VT && "Unexpected value type");
7976     IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(2);
7977     // Unlike most shuffle nodes, VPERMV3's mask operand is the middle one.
7978     Ops.push_back(N->getOperand(0));
7979     Ops.push_back(N->getOperand(2));
7980     SDValue MaskNode = N->getOperand(1);
7981     if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
7982                                     RawUndefs)) {
7983       DecodeVPERMV3Mask(RawMask, RawUndefs, Mask);
7984       break;
7985     }
7986     return false;
7987   }
7988   default: llvm_unreachable("unknown target shuffle node");
7989   }
7990 
7991   // Empty mask indicates the decode failed.
7992   if (Mask.empty())
7993     return false;
7994 
7995   // Check if we're getting a shuffle mask with zero'd elements.
7996   if (!AllowSentinelZero && isAnyZero(Mask))
7997     return false;
7998 
7999   // If we have a fake unary shuffle, the shuffle mask is spread across two
8000   // inputs that are actually the same node. Re-map the mask to always point
8001   // into the first input.
8002   if (IsFakeUnary)
8003     for (int &M : Mask)
8004       if (M >= (int)Mask.size())
8005         M -= Mask.size();
8006 
8007   // If we didn't already add operands in the opcode-specific code, default to
8008   // adding 1 or 2 operands starting at 0.
8009   if (Ops.empty()) {
8010     Ops.push_back(N->getOperand(0));
8011     if (!IsUnary || IsFakeUnary)
8012       Ops.push_back(N->getOperand(1));
8013   }
8014 
8015   return true;
8016 }
8017 
8018 // Wrapper for getTargetShuffleMask with InUnary;
getTargetShuffleMask(SDNode * N,MVT VT,bool AllowSentinelZero,SmallVectorImpl<SDValue> & Ops,SmallVectorImpl<int> & Mask)8019 static bool getTargetShuffleMask(SDNode *N, MVT VT, bool AllowSentinelZero,
8020                                  SmallVectorImpl<SDValue> &Ops,
8021                                  SmallVectorImpl<int> &Mask) {
8022   bool IsUnary;
8023   return getTargetShuffleMask(N, VT, AllowSentinelZero, Ops, Mask, IsUnary);
8024 }
8025 
8026 /// Compute whether each element of a shuffle is zeroable.
8027 ///
8028 /// A "zeroable" vector shuffle element is one which can be lowered to zero.
8029 /// Either it is an undef element in the shuffle mask, the element of the input
8030 /// referenced is undef, or the element of the input referenced is known to be
8031 /// zero. Many x86 shuffles can zero lanes cheaply and we often want to handle
8032 /// as many lanes with this technique as possible to simplify the remaining
8033 /// shuffle.
computeZeroableShuffleElements(ArrayRef<int> Mask,SDValue V1,SDValue V2,APInt & KnownUndef,APInt & KnownZero)8034 static void computeZeroableShuffleElements(ArrayRef<int> Mask,
8035                                            SDValue V1, SDValue V2,
8036                                            APInt &KnownUndef, APInt &KnownZero) {
8037   int Size = Mask.size();
8038   KnownUndef = KnownZero = APInt::getZero(Size);
8039 
8040   V1 = peekThroughBitcasts(V1);
8041   V2 = peekThroughBitcasts(V2);
8042 
8043   bool V1IsZero = ISD::isBuildVectorAllZeros(V1.getNode());
8044   bool V2IsZero = ISD::isBuildVectorAllZeros(V2.getNode());
8045 
8046   int VectorSizeInBits = V1.getValueSizeInBits();
8047   int ScalarSizeInBits = VectorSizeInBits / Size;
8048   assert(!(VectorSizeInBits % ScalarSizeInBits) && "Illegal shuffle mask size");
8049 
8050   for (int i = 0; i < Size; ++i) {
8051     int M = Mask[i];
8052     // Handle the easy cases.
8053     if (M < 0) {
8054       KnownUndef.setBit(i);
8055       continue;
8056     }
8057     if ((M >= 0 && M < Size && V1IsZero) || (M >= Size && V2IsZero)) {
8058       KnownZero.setBit(i);
8059       continue;
8060     }
8061 
8062     // Determine shuffle input and normalize the mask.
8063     SDValue V = M < Size ? V1 : V2;
8064     M %= Size;
8065 
8066     // Currently we can only search BUILD_VECTOR for UNDEF/ZERO elements.
8067     if (V.getOpcode() != ISD::BUILD_VECTOR)
8068       continue;
8069 
8070     // If the BUILD_VECTOR has fewer elements then the bitcasted portion of
8071     // the (larger) source element must be UNDEF/ZERO.
8072     if ((Size % V.getNumOperands()) == 0) {
8073       int Scale = Size / V->getNumOperands();
8074       SDValue Op = V.getOperand(M / Scale);
8075       if (Op.isUndef())
8076         KnownUndef.setBit(i);
8077       if (X86::isZeroNode(Op))
8078         KnownZero.setBit(i);
8079       else if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Op)) {
8080         APInt Val = Cst->getAPIntValue();
8081         Val = Val.extractBits(ScalarSizeInBits, (M % Scale) * ScalarSizeInBits);
8082         if (Val == 0)
8083           KnownZero.setBit(i);
8084       } else if (ConstantFPSDNode *Cst = dyn_cast<ConstantFPSDNode>(Op)) {
8085         APInt Val = Cst->getValueAPF().bitcastToAPInt();
8086         Val = Val.extractBits(ScalarSizeInBits, (M % Scale) * ScalarSizeInBits);
8087         if (Val == 0)
8088           KnownZero.setBit(i);
8089       }
8090       continue;
8091     }
8092 
8093     // If the BUILD_VECTOR has more elements then all the (smaller) source
8094     // elements must be UNDEF or ZERO.
8095     if ((V.getNumOperands() % Size) == 0) {
8096       int Scale = V->getNumOperands() / Size;
8097       bool AllUndef = true;
8098       bool AllZero = true;
8099       for (int j = 0; j < Scale; ++j) {
8100         SDValue Op = V.getOperand((M * Scale) + j);
8101         AllUndef &= Op.isUndef();
8102         AllZero &= X86::isZeroNode(Op);
8103       }
8104       if (AllUndef)
8105         KnownUndef.setBit(i);
8106       if (AllZero)
8107         KnownZero.setBit(i);
8108       continue;
8109     }
8110   }
8111 }
8112 
8113 /// Decode a target shuffle mask and inputs and see if any values are
8114 /// known to be undef or zero from their inputs.
8115 /// Returns true if the target shuffle mask was decoded.
8116 /// FIXME: Merge this with computeZeroableShuffleElements?
getTargetShuffleAndZeroables(SDValue N,SmallVectorImpl<int> & Mask,SmallVectorImpl<SDValue> & Ops,APInt & KnownUndef,APInt & KnownZero)8117 static bool getTargetShuffleAndZeroables(SDValue N, SmallVectorImpl<int> &Mask,
8118                                          SmallVectorImpl<SDValue> &Ops,
8119                                          APInt &KnownUndef, APInt &KnownZero) {
8120   bool IsUnary;
8121   if (!isTargetShuffle(N.getOpcode()))
8122     return false;
8123 
8124   MVT VT = N.getSimpleValueType();
8125   if (!getTargetShuffleMask(N.getNode(), VT, true, Ops, Mask, IsUnary))
8126     return false;
8127 
8128   int Size = Mask.size();
8129   SDValue V1 = Ops[0];
8130   SDValue V2 = IsUnary ? V1 : Ops[1];
8131   KnownUndef = KnownZero = APInt::getZero(Size);
8132 
8133   V1 = peekThroughBitcasts(V1);
8134   V2 = peekThroughBitcasts(V2);
8135 
8136   assert((VT.getSizeInBits() % Size) == 0 &&
8137          "Illegal split of shuffle value type");
8138   unsigned EltSizeInBits = VT.getSizeInBits() / Size;
8139 
8140   // Extract known constant input data.
8141   APInt UndefSrcElts[2];
8142   SmallVector<APInt, 32> SrcEltBits[2];
8143   bool IsSrcConstant[2] = {
8144       getTargetConstantBitsFromNode(V1, EltSizeInBits, UndefSrcElts[0],
8145                                     SrcEltBits[0], true, false),
8146       getTargetConstantBitsFromNode(V2, EltSizeInBits, UndefSrcElts[1],
8147                                     SrcEltBits[1], true, false)};
8148 
8149   for (int i = 0; i < Size; ++i) {
8150     int M = Mask[i];
8151 
8152     // Already decoded as SM_SentinelZero / SM_SentinelUndef.
8153     if (M < 0) {
8154       assert(isUndefOrZero(M) && "Unknown shuffle sentinel value!");
8155       if (SM_SentinelUndef == M)
8156         KnownUndef.setBit(i);
8157       if (SM_SentinelZero == M)
8158         KnownZero.setBit(i);
8159       continue;
8160     }
8161 
8162     // Determine shuffle input and normalize the mask.
8163     unsigned SrcIdx = M / Size;
8164     SDValue V = M < Size ? V1 : V2;
8165     M %= Size;
8166 
8167     // We are referencing an UNDEF input.
8168     if (V.isUndef()) {
8169       KnownUndef.setBit(i);
8170       continue;
8171     }
8172 
8173     // SCALAR_TO_VECTOR - only the first element is defined, and the rest UNDEF.
8174     // TODO: We currently only set UNDEF for integer types - floats use the same
8175     // registers as vectors and many of the scalar folded loads rely on the
8176     // SCALAR_TO_VECTOR pattern.
8177     if (V.getOpcode() == ISD::SCALAR_TO_VECTOR &&
8178         (Size % V.getValueType().getVectorNumElements()) == 0) {
8179       int Scale = Size / V.getValueType().getVectorNumElements();
8180       int Idx = M / Scale;
8181       if (Idx != 0 && !VT.isFloatingPoint())
8182         KnownUndef.setBit(i);
8183       else if (Idx == 0 && X86::isZeroNode(V.getOperand(0)))
8184         KnownZero.setBit(i);
8185       continue;
8186     }
8187 
8188     // INSERT_SUBVECTOR - to widen vectors we often insert them into UNDEF
8189     // base vectors.
8190     if (V.getOpcode() == ISD::INSERT_SUBVECTOR) {
8191       SDValue Vec = V.getOperand(0);
8192       int NumVecElts = Vec.getValueType().getVectorNumElements();
8193       if (Vec.isUndef() && Size == NumVecElts) {
8194         int Idx = V.getConstantOperandVal(2);
8195         int NumSubElts = V.getOperand(1).getValueType().getVectorNumElements();
8196         if (M < Idx || (Idx + NumSubElts) <= M)
8197           KnownUndef.setBit(i);
8198       }
8199       continue;
8200     }
8201 
8202     // Attempt to extract from the source's constant bits.
8203     if (IsSrcConstant[SrcIdx]) {
8204       if (UndefSrcElts[SrcIdx][M])
8205         KnownUndef.setBit(i);
8206       else if (SrcEltBits[SrcIdx][M] == 0)
8207         KnownZero.setBit(i);
8208     }
8209   }
8210 
8211   assert(VT.getVectorNumElements() == (unsigned)Size &&
8212          "Different mask size from vector size!");
8213   return true;
8214 }
8215 
8216 // Replace target shuffle mask elements with known undef/zero sentinels.
resolveTargetShuffleFromZeroables(SmallVectorImpl<int> & Mask,const APInt & KnownUndef,const APInt & KnownZero,bool ResolveKnownZeros=true)8217 static void resolveTargetShuffleFromZeroables(SmallVectorImpl<int> &Mask,
8218                                               const APInt &KnownUndef,
8219                                               const APInt &KnownZero,
8220                                               bool ResolveKnownZeros= true) {
8221   unsigned NumElts = Mask.size();
8222   assert(KnownUndef.getBitWidth() == NumElts &&
8223          KnownZero.getBitWidth() == NumElts && "Shuffle mask size mismatch");
8224 
8225   for (unsigned i = 0; i != NumElts; ++i) {
8226     if (KnownUndef[i])
8227       Mask[i] = SM_SentinelUndef;
8228     else if (ResolveKnownZeros && KnownZero[i])
8229       Mask[i] = SM_SentinelZero;
8230   }
8231 }
8232 
8233 // Extract target shuffle mask sentinel elements to known undef/zero bitmasks.
resolveZeroablesFromTargetShuffle(const SmallVectorImpl<int> & Mask,APInt & KnownUndef,APInt & KnownZero)8234 static void resolveZeroablesFromTargetShuffle(const SmallVectorImpl<int> &Mask,
8235                                               APInt &KnownUndef,
8236                                               APInt &KnownZero) {
8237   unsigned NumElts = Mask.size();
8238   KnownUndef = KnownZero = APInt::getZero(NumElts);
8239 
8240   for (unsigned i = 0; i != NumElts; ++i) {
8241     int M = Mask[i];
8242     if (SM_SentinelUndef == M)
8243       KnownUndef.setBit(i);
8244     if (SM_SentinelZero == M)
8245       KnownZero.setBit(i);
8246   }
8247 }
8248 
8249 // Attempt to create a shuffle mask from a VSELECT/BLENDV condition mask.
createShuffleMaskFromVSELECT(SmallVectorImpl<int> & Mask,SDValue Cond,bool IsBLENDV=false)8250 static bool createShuffleMaskFromVSELECT(SmallVectorImpl<int> &Mask,
8251                                          SDValue Cond, bool IsBLENDV = false) {
8252   EVT CondVT = Cond.getValueType();
8253   unsigned EltSizeInBits = CondVT.getScalarSizeInBits();
8254   unsigned NumElts = CondVT.getVectorNumElements();
8255 
8256   APInt UndefElts;
8257   SmallVector<APInt, 32> EltBits;
8258   if (!getTargetConstantBitsFromNode(Cond, EltSizeInBits, UndefElts, EltBits,
8259                                      true, false))
8260     return false;
8261 
8262   Mask.resize(NumElts, SM_SentinelUndef);
8263 
8264   for (int i = 0; i != (int)NumElts; ++i) {
8265     Mask[i] = i;
8266     // Arbitrarily choose from the 2nd operand if the select condition element
8267     // is undef.
8268     // TODO: Can we do better by matching patterns such as even/odd?
8269     if (UndefElts[i] || (!IsBLENDV && EltBits[i].isZero()) ||
8270         (IsBLENDV && EltBits[i].isNonNegative()))
8271       Mask[i] += NumElts;
8272   }
8273 
8274   return true;
8275 }
8276 
8277 // Forward declaration (for getFauxShuffleMask recursive check).
8278 static bool getTargetShuffleInputs(SDValue Op, const APInt &DemandedElts,
8279                                    SmallVectorImpl<SDValue> &Inputs,
8280                                    SmallVectorImpl<int> &Mask,
8281                                    const SelectionDAG &DAG, unsigned Depth,
8282                                    bool ResolveKnownElts);
8283 
8284 // Attempt to decode ops that could be represented as a shuffle mask.
8285 // The decoded shuffle mask may contain a different number of elements to the
8286 // destination value type.
8287 // TODO: Merge into getTargetShuffleInputs()
getFauxShuffleMask(SDValue N,const APInt & DemandedElts,SmallVectorImpl<int> & Mask,SmallVectorImpl<SDValue> & Ops,const SelectionDAG & DAG,unsigned Depth,bool ResolveKnownElts)8288 static bool getFauxShuffleMask(SDValue N, const APInt &DemandedElts,
8289                                SmallVectorImpl<int> &Mask,
8290                                SmallVectorImpl<SDValue> &Ops,
8291                                const SelectionDAG &DAG, unsigned Depth,
8292                                bool ResolveKnownElts) {
8293   Mask.clear();
8294   Ops.clear();
8295 
8296   MVT VT = N.getSimpleValueType();
8297   unsigned NumElts = VT.getVectorNumElements();
8298   unsigned NumSizeInBits = VT.getSizeInBits();
8299   unsigned NumBitsPerElt = VT.getScalarSizeInBits();
8300   if ((NumBitsPerElt % 8) != 0 || (NumSizeInBits % 8) != 0)
8301     return false;
8302   assert(NumElts == DemandedElts.getBitWidth() && "Unexpected vector size");
8303   unsigned NumSizeInBytes = NumSizeInBits / 8;
8304   unsigned NumBytesPerElt = NumBitsPerElt / 8;
8305 
8306   unsigned Opcode = N.getOpcode();
8307   switch (Opcode) {
8308   case ISD::VECTOR_SHUFFLE: {
8309     // Don't treat ISD::VECTOR_SHUFFLE as a target shuffle so decode it here.
8310     ArrayRef<int> ShuffleMask = cast<ShuffleVectorSDNode>(N)->getMask();
8311     if (isUndefOrInRange(ShuffleMask, 0, 2 * NumElts)) {
8312       Mask.append(ShuffleMask.begin(), ShuffleMask.end());
8313       Ops.push_back(N.getOperand(0));
8314       Ops.push_back(N.getOperand(1));
8315       return true;
8316     }
8317     return false;
8318   }
8319   case ISD::AND:
8320   case X86ISD::ANDNP: {
8321     // Attempt to decode as a per-byte mask.
8322     APInt UndefElts;
8323     SmallVector<APInt, 32> EltBits;
8324     SDValue N0 = N.getOperand(0);
8325     SDValue N1 = N.getOperand(1);
8326     bool IsAndN = (X86ISD::ANDNP == Opcode);
8327     uint64_t ZeroMask = IsAndN ? 255 : 0;
8328     if (!getTargetConstantBitsFromNode(IsAndN ? N0 : N1, 8, UndefElts, EltBits))
8329       return false;
8330     // We can't assume an undef src element gives an undef dst - the other src
8331     // might be zero.
8332     if (!UndefElts.isZero())
8333       return false;
8334     for (int i = 0, e = (int)EltBits.size(); i != e; ++i) {
8335       const APInt &ByteBits = EltBits[i];
8336       if (ByteBits != 0 && ByteBits != 255)
8337         return false;
8338       Mask.push_back(ByteBits == ZeroMask ? SM_SentinelZero : i);
8339     }
8340     Ops.push_back(IsAndN ? N1 : N0);
8341     return true;
8342   }
8343   case ISD::OR: {
8344     // Handle OR(SHUFFLE,SHUFFLE) case where one source is zero and the other
8345     // is a valid shuffle index.
8346     SDValue N0 = peekThroughBitcasts(N.getOperand(0));
8347     SDValue N1 = peekThroughBitcasts(N.getOperand(1));
8348     if (!N0.getValueType().isVector() || !N1.getValueType().isVector())
8349       return false;
8350 
8351     SmallVector<int, 64> SrcMask0, SrcMask1;
8352     SmallVector<SDValue, 2> SrcInputs0, SrcInputs1;
8353     APInt Demand0 = APInt::getAllOnes(N0.getValueType().getVectorNumElements());
8354     APInt Demand1 = APInt::getAllOnes(N1.getValueType().getVectorNumElements());
8355     if (!getTargetShuffleInputs(N0, Demand0, SrcInputs0, SrcMask0, DAG,
8356                                 Depth + 1, true) ||
8357         !getTargetShuffleInputs(N1, Demand1, SrcInputs1, SrcMask1, DAG,
8358                                 Depth + 1, true))
8359       return false;
8360 
8361     size_t MaskSize = std::max(SrcMask0.size(), SrcMask1.size());
8362     SmallVector<int, 64> Mask0, Mask1;
8363     narrowShuffleMaskElts(MaskSize / SrcMask0.size(), SrcMask0, Mask0);
8364     narrowShuffleMaskElts(MaskSize / SrcMask1.size(), SrcMask1, Mask1);
8365     for (int i = 0; i != (int)MaskSize; ++i) {
8366       // NOTE: Don't handle SM_SentinelUndef, as we can end up in infinite
8367       // loops converting between OR and BLEND shuffles due to
8368       // canWidenShuffleElements merging away undef elements, meaning we
8369       // fail to recognise the OR as the undef element isn't known zero.
8370       if (Mask0[i] == SM_SentinelZero && Mask1[i] == SM_SentinelZero)
8371         Mask.push_back(SM_SentinelZero);
8372       else if (Mask1[i] == SM_SentinelZero)
8373         Mask.push_back(i);
8374       else if (Mask0[i] == SM_SentinelZero)
8375         Mask.push_back(i + MaskSize);
8376       else
8377         return false;
8378     }
8379     Ops.push_back(N0);
8380     Ops.push_back(N1);
8381     return true;
8382   }
8383   case ISD::INSERT_SUBVECTOR: {
8384     SDValue Src = N.getOperand(0);
8385     SDValue Sub = N.getOperand(1);
8386     EVT SubVT = Sub.getValueType();
8387     unsigned NumSubElts = SubVT.getVectorNumElements();
8388     if (!N->isOnlyUserOf(Sub.getNode()))
8389       return false;
8390     uint64_t InsertIdx = N.getConstantOperandVal(2);
8391     // Handle INSERT_SUBVECTOR(SRC0, EXTRACT_SUBVECTOR(SRC1)).
8392     if (Sub.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
8393         Sub.getOperand(0).getValueType() == VT) {
8394       uint64_t ExtractIdx = Sub.getConstantOperandVal(1);
8395       for (int i = 0; i != (int)NumElts; ++i)
8396         Mask.push_back(i);
8397       for (int i = 0; i != (int)NumSubElts; ++i)
8398         Mask[InsertIdx + i] = NumElts + ExtractIdx + i;
8399       Ops.push_back(Src);
8400       Ops.push_back(Sub.getOperand(0));
8401       return true;
8402     }
8403     // Handle INSERT_SUBVECTOR(SRC0, SHUFFLE(SRC1)).
8404     SmallVector<int, 64> SubMask;
8405     SmallVector<SDValue, 2> SubInputs;
8406     SDValue SubSrc = peekThroughOneUseBitcasts(Sub);
8407     EVT SubSrcVT = SubSrc.getValueType();
8408     if (!SubSrcVT.isVector())
8409       return false;
8410 
8411     APInt SubDemand = APInt::getAllOnes(SubSrcVT.getVectorNumElements());
8412     if (!getTargetShuffleInputs(SubSrc, SubDemand, SubInputs, SubMask, DAG,
8413                                 Depth + 1, ResolveKnownElts))
8414       return false;
8415 
8416     // Subvector shuffle inputs must not be larger than the subvector.
8417     if (llvm::any_of(SubInputs, [SubVT](SDValue SubInput) {
8418           return SubVT.getFixedSizeInBits() <
8419                  SubInput.getValueSizeInBits().getFixedValue();
8420         }))
8421       return false;
8422 
8423     if (SubMask.size() != NumSubElts) {
8424       assert(((SubMask.size() % NumSubElts) == 0 ||
8425               (NumSubElts % SubMask.size()) == 0) && "Illegal submask scale");
8426       if ((NumSubElts % SubMask.size()) == 0) {
8427         int Scale = NumSubElts / SubMask.size();
8428         SmallVector<int,64> ScaledSubMask;
8429         narrowShuffleMaskElts(Scale, SubMask, ScaledSubMask);
8430         SubMask = ScaledSubMask;
8431       } else {
8432         int Scale = SubMask.size() / NumSubElts;
8433         NumSubElts = SubMask.size();
8434         NumElts *= Scale;
8435         InsertIdx *= Scale;
8436       }
8437     }
8438     Ops.push_back(Src);
8439     Ops.append(SubInputs.begin(), SubInputs.end());
8440     if (ISD::isBuildVectorAllZeros(Src.getNode()))
8441       Mask.append(NumElts, SM_SentinelZero);
8442     else
8443       for (int i = 0; i != (int)NumElts; ++i)
8444         Mask.push_back(i);
8445     for (int i = 0; i != (int)NumSubElts; ++i) {
8446       int M = SubMask[i];
8447       if (0 <= M) {
8448         int InputIdx = M / NumSubElts;
8449         M = (NumElts * (1 + InputIdx)) + (M % NumSubElts);
8450       }
8451       Mask[i + InsertIdx] = M;
8452     }
8453     return true;
8454   }
8455   case X86ISD::PINSRB:
8456   case X86ISD::PINSRW:
8457   case ISD::SCALAR_TO_VECTOR:
8458   case ISD::INSERT_VECTOR_ELT: {
8459     // Match against a insert_vector_elt/scalar_to_vector of an extract from a
8460     // vector, for matching src/dst vector types.
8461     SDValue Scl = N.getOperand(Opcode == ISD::SCALAR_TO_VECTOR ? 0 : 1);
8462 
8463     unsigned DstIdx = 0;
8464     if (Opcode != ISD::SCALAR_TO_VECTOR) {
8465       // Check we have an in-range constant insertion index.
8466       if (!isa<ConstantSDNode>(N.getOperand(2)) ||
8467           N.getConstantOperandAPInt(2).uge(NumElts))
8468         return false;
8469       DstIdx = N.getConstantOperandVal(2);
8470 
8471       // Attempt to recognise an INSERT*(VEC, 0, DstIdx) shuffle pattern.
8472       if (X86::isZeroNode(Scl)) {
8473         Ops.push_back(N.getOperand(0));
8474         for (unsigned i = 0; i != NumElts; ++i)
8475           Mask.push_back(i == DstIdx ? SM_SentinelZero : (int)i);
8476         return true;
8477       }
8478     }
8479 
8480     // Peek through trunc/aext/zext.
8481     // TODO: aext shouldn't require SM_SentinelZero padding.
8482     // TODO: handle shift of scalars.
8483     unsigned MinBitsPerElt = Scl.getScalarValueSizeInBits();
8484     while (Scl.getOpcode() == ISD::TRUNCATE ||
8485            Scl.getOpcode() == ISD::ANY_EXTEND ||
8486            Scl.getOpcode() == ISD::ZERO_EXTEND) {
8487       Scl = Scl.getOperand(0);
8488       MinBitsPerElt =
8489           std::min<unsigned>(MinBitsPerElt, Scl.getScalarValueSizeInBits());
8490     }
8491     if ((MinBitsPerElt % 8) != 0)
8492       return false;
8493 
8494     // Attempt to find the source vector the scalar was extracted from.
8495     SDValue SrcExtract;
8496     if ((Scl.getOpcode() == ISD::EXTRACT_VECTOR_ELT ||
8497          Scl.getOpcode() == X86ISD::PEXTRW ||
8498          Scl.getOpcode() == X86ISD::PEXTRB) &&
8499         Scl.getOperand(0).getValueSizeInBits() == NumSizeInBits) {
8500       SrcExtract = Scl;
8501     }
8502     if (!SrcExtract || !isa<ConstantSDNode>(SrcExtract.getOperand(1)))
8503       return false;
8504 
8505     SDValue SrcVec = SrcExtract.getOperand(0);
8506     EVT SrcVT = SrcVec.getValueType();
8507     if (!SrcVT.getScalarType().isByteSized())
8508       return false;
8509     unsigned SrcIdx = SrcExtract.getConstantOperandVal(1);
8510     unsigned SrcByte = SrcIdx * (SrcVT.getScalarSizeInBits() / 8);
8511     unsigned DstByte = DstIdx * NumBytesPerElt;
8512     MinBitsPerElt =
8513         std::min<unsigned>(MinBitsPerElt, SrcVT.getScalarSizeInBits());
8514 
8515     // Create 'identity' byte level shuffle mask and then add inserted bytes.
8516     if (Opcode == ISD::SCALAR_TO_VECTOR) {
8517       Ops.push_back(SrcVec);
8518       Mask.append(NumSizeInBytes, SM_SentinelUndef);
8519     } else {
8520       Ops.push_back(SrcVec);
8521       Ops.push_back(N.getOperand(0));
8522       for (int i = 0; i != (int)NumSizeInBytes; ++i)
8523         Mask.push_back(NumSizeInBytes + i);
8524     }
8525 
8526     unsigned MinBytesPerElts = MinBitsPerElt / 8;
8527     MinBytesPerElts = std::min(MinBytesPerElts, NumBytesPerElt);
8528     for (unsigned i = 0; i != MinBytesPerElts; ++i)
8529       Mask[DstByte + i] = SrcByte + i;
8530     for (unsigned i = MinBytesPerElts; i < NumBytesPerElt; ++i)
8531       Mask[DstByte + i] = SM_SentinelZero;
8532     return true;
8533   }
8534   case X86ISD::PACKSS:
8535   case X86ISD::PACKUS: {
8536     SDValue N0 = N.getOperand(0);
8537     SDValue N1 = N.getOperand(1);
8538     assert(N0.getValueType().getVectorNumElements() == (NumElts / 2) &&
8539            N1.getValueType().getVectorNumElements() == (NumElts / 2) &&
8540            "Unexpected input value type");
8541 
8542     APInt EltsLHS, EltsRHS;
8543     getPackDemandedElts(VT, DemandedElts, EltsLHS, EltsRHS);
8544 
8545     // If we know input saturation won't happen (or we don't care for particular
8546     // lanes), we can treat this as a truncation shuffle.
8547     bool Offset0 = false, Offset1 = false;
8548     if (Opcode == X86ISD::PACKSS) {
8549       if ((!(N0.isUndef() || EltsLHS.isZero()) &&
8550            DAG.ComputeNumSignBits(N0, EltsLHS, Depth + 1) <= NumBitsPerElt) ||
8551           (!(N1.isUndef() || EltsRHS.isZero()) &&
8552            DAG.ComputeNumSignBits(N1, EltsRHS, Depth + 1) <= NumBitsPerElt))
8553         return false;
8554       // We can't easily fold ASHR into a shuffle, but if it was feeding a
8555       // PACKSS then it was likely being used for sign-extension for a
8556       // truncation, so just peek through and adjust the mask accordingly.
8557       if (N0.getOpcode() == X86ISD::VSRAI && N->isOnlyUserOf(N0.getNode()) &&
8558           N0.getConstantOperandAPInt(1) == NumBitsPerElt) {
8559         Offset0 = true;
8560         N0 = N0.getOperand(0);
8561       }
8562       if (N1.getOpcode() == X86ISD::VSRAI && N->isOnlyUserOf(N1.getNode()) &&
8563           N1.getConstantOperandAPInt(1) == NumBitsPerElt) {
8564         Offset1 = true;
8565         N1 = N1.getOperand(0);
8566       }
8567     } else {
8568       APInt ZeroMask = APInt::getHighBitsSet(2 * NumBitsPerElt, NumBitsPerElt);
8569       if ((!(N0.isUndef() || EltsLHS.isZero()) &&
8570            !DAG.MaskedValueIsZero(N0, ZeroMask, EltsLHS, Depth + 1)) ||
8571           (!(N1.isUndef() || EltsRHS.isZero()) &&
8572            !DAG.MaskedValueIsZero(N1, ZeroMask, EltsRHS, Depth + 1)))
8573         return false;
8574     }
8575 
8576     bool IsUnary = (N0 == N1);
8577 
8578     Ops.push_back(N0);
8579     if (!IsUnary)
8580       Ops.push_back(N1);
8581 
8582     createPackShuffleMask(VT, Mask, IsUnary);
8583 
8584     if (Offset0 || Offset1) {
8585       for (int &M : Mask)
8586         if ((Offset0 && isInRange(M, 0, NumElts)) ||
8587             (Offset1 && isInRange(M, NumElts, 2 * NumElts)))
8588           ++M;
8589     }
8590     return true;
8591   }
8592   case ISD::VSELECT:
8593   case X86ISD::BLENDV: {
8594     SDValue Cond = N.getOperand(0);
8595     if (createShuffleMaskFromVSELECT(Mask, Cond, Opcode == X86ISD::BLENDV)) {
8596       Ops.push_back(N.getOperand(1));
8597       Ops.push_back(N.getOperand(2));
8598       return true;
8599     }
8600     return false;
8601   }
8602   case X86ISD::VTRUNC: {
8603     SDValue Src = N.getOperand(0);
8604     EVT SrcVT = Src.getValueType();
8605     // Truncated source must be a simple vector.
8606     if (!SrcVT.isSimple() || (SrcVT.getSizeInBits() % 128) != 0 ||
8607         (SrcVT.getScalarSizeInBits() % 8) != 0)
8608       return false;
8609     unsigned NumSrcElts = SrcVT.getVectorNumElements();
8610     unsigned NumBitsPerSrcElt = SrcVT.getScalarSizeInBits();
8611     unsigned Scale = NumBitsPerSrcElt / NumBitsPerElt;
8612     assert((NumBitsPerSrcElt % NumBitsPerElt) == 0 && "Illegal truncation");
8613     for (unsigned i = 0; i != NumSrcElts; ++i)
8614       Mask.push_back(i * Scale);
8615     Mask.append(NumElts - NumSrcElts, SM_SentinelZero);
8616     Ops.push_back(Src);
8617     return true;
8618   }
8619   case X86ISD::VSHLI:
8620   case X86ISD::VSRLI: {
8621     uint64_t ShiftVal = N.getConstantOperandVal(1);
8622     // Out of range bit shifts are guaranteed to be zero.
8623     if (NumBitsPerElt <= ShiftVal) {
8624       Mask.append(NumElts, SM_SentinelZero);
8625       return true;
8626     }
8627 
8628     // We can only decode 'whole byte' bit shifts as shuffles.
8629     if ((ShiftVal % 8) != 0)
8630       break;
8631 
8632     uint64_t ByteShift = ShiftVal / 8;
8633     Ops.push_back(N.getOperand(0));
8634 
8635     // Clear mask to all zeros and insert the shifted byte indices.
8636     Mask.append(NumSizeInBytes, SM_SentinelZero);
8637 
8638     if (X86ISD::VSHLI == Opcode) {
8639       for (unsigned i = 0; i != NumSizeInBytes; i += NumBytesPerElt)
8640         for (unsigned j = ByteShift; j != NumBytesPerElt; ++j)
8641           Mask[i + j] = i + j - ByteShift;
8642     } else {
8643       for (unsigned i = 0; i != NumSizeInBytes; i += NumBytesPerElt)
8644         for (unsigned j = ByteShift; j != NumBytesPerElt; ++j)
8645           Mask[i + j - ByteShift] = i + j;
8646     }
8647     return true;
8648   }
8649   case X86ISD::VROTLI:
8650   case X86ISD::VROTRI: {
8651     // We can only decode 'whole byte' bit rotates as shuffles.
8652     uint64_t RotateVal = N.getConstantOperandAPInt(1).urem(NumBitsPerElt);
8653     if ((RotateVal % 8) != 0)
8654       return false;
8655     Ops.push_back(N.getOperand(0));
8656     int Offset = RotateVal / 8;
8657     Offset = (X86ISD::VROTLI == Opcode ? NumBytesPerElt - Offset : Offset);
8658     for (int i = 0; i != (int)NumElts; ++i) {
8659       int BaseIdx = i * NumBytesPerElt;
8660       for (int j = 0; j != (int)NumBytesPerElt; ++j) {
8661         Mask.push_back(BaseIdx + ((Offset + j) % NumBytesPerElt));
8662       }
8663     }
8664     return true;
8665   }
8666   case X86ISD::VBROADCAST: {
8667     SDValue Src = N.getOperand(0);
8668     if (!Src.getSimpleValueType().isVector()) {
8669       if (Src.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
8670           !isNullConstant(Src.getOperand(1)) ||
8671           Src.getOperand(0).getValueType().getScalarType() !=
8672               VT.getScalarType())
8673         return false;
8674       Src = Src.getOperand(0);
8675     }
8676     Ops.push_back(Src);
8677     Mask.append(NumElts, 0);
8678     return true;
8679   }
8680   case ISD::ZERO_EXTEND:
8681   case ISD::ANY_EXTEND:
8682   case ISD::ZERO_EXTEND_VECTOR_INREG:
8683   case ISD::ANY_EXTEND_VECTOR_INREG: {
8684     SDValue Src = N.getOperand(0);
8685     EVT SrcVT = Src.getValueType();
8686 
8687     // Extended source must be a simple vector.
8688     if (!SrcVT.isSimple() || (SrcVT.getSizeInBits() % 128) != 0 ||
8689         (SrcVT.getScalarSizeInBits() % 8) != 0)
8690       return false;
8691 
8692     bool IsAnyExtend =
8693         (ISD::ANY_EXTEND == Opcode || ISD::ANY_EXTEND_VECTOR_INREG == Opcode);
8694     DecodeZeroExtendMask(SrcVT.getScalarSizeInBits(), NumBitsPerElt, NumElts,
8695                          IsAnyExtend, Mask);
8696     Ops.push_back(Src);
8697     return true;
8698   }
8699   }
8700 
8701   return false;
8702 }
8703 
8704 /// Removes unused/repeated shuffle source inputs and adjusts the shuffle mask.
resolveTargetShuffleInputsAndMask(SmallVectorImpl<SDValue> & Inputs,SmallVectorImpl<int> & Mask)8705 static void resolveTargetShuffleInputsAndMask(SmallVectorImpl<SDValue> &Inputs,
8706                                               SmallVectorImpl<int> &Mask) {
8707   int MaskWidth = Mask.size();
8708   SmallVector<SDValue, 16> UsedInputs;
8709   for (int i = 0, e = Inputs.size(); i < e; ++i) {
8710     int lo = UsedInputs.size() * MaskWidth;
8711     int hi = lo + MaskWidth;
8712 
8713     // Strip UNDEF input usage.
8714     if (Inputs[i].isUndef())
8715       for (int &M : Mask)
8716         if ((lo <= M) && (M < hi))
8717           M = SM_SentinelUndef;
8718 
8719     // Check for unused inputs.
8720     if (none_of(Mask, [lo, hi](int i) { return (lo <= i) && (i < hi); })) {
8721       for (int &M : Mask)
8722         if (lo <= M)
8723           M -= MaskWidth;
8724       continue;
8725     }
8726 
8727     // Check for repeated inputs.
8728     bool IsRepeat = false;
8729     for (int j = 0, ue = UsedInputs.size(); j != ue; ++j) {
8730       if (UsedInputs[j] != Inputs[i])
8731         continue;
8732       for (int &M : Mask)
8733         if (lo <= M)
8734           M = (M < hi) ? ((M - lo) + (j * MaskWidth)) : (M - MaskWidth);
8735       IsRepeat = true;
8736       break;
8737     }
8738     if (IsRepeat)
8739       continue;
8740 
8741     UsedInputs.push_back(Inputs[i]);
8742   }
8743   Inputs = UsedInputs;
8744 }
8745 
8746 /// Calls getTargetShuffleAndZeroables to resolve a target shuffle mask's inputs
8747 /// and then sets the SM_SentinelUndef and SM_SentinelZero values.
8748 /// Returns true if the target shuffle mask was decoded.
getTargetShuffleInputs(SDValue Op,const APInt & DemandedElts,SmallVectorImpl<SDValue> & Inputs,SmallVectorImpl<int> & Mask,APInt & KnownUndef,APInt & KnownZero,const SelectionDAG & DAG,unsigned Depth,bool ResolveKnownElts)8749 static bool getTargetShuffleInputs(SDValue Op, const APInt &DemandedElts,
8750                                    SmallVectorImpl<SDValue> &Inputs,
8751                                    SmallVectorImpl<int> &Mask,
8752                                    APInt &KnownUndef, APInt &KnownZero,
8753                                    const SelectionDAG &DAG, unsigned Depth,
8754                                    bool ResolveKnownElts) {
8755   if (Depth >= SelectionDAG::MaxRecursionDepth)
8756     return false; // Limit search depth.
8757 
8758   EVT VT = Op.getValueType();
8759   if (!VT.isSimple() || !VT.isVector())
8760     return false;
8761 
8762   if (getTargetShuffleAndZeroables(Op, Mask, Inputs, KnownUndef, KnownZero)) {
8763     if (ResolveKnownElts)
8764       resolveTargetShuffleFromZeroables(Mask, KnownUndef, KnownZero);
8765     return true;
8766   }
8767   if (getFauxShuffleMask(Op, DemandedElts, Mask, Inputs, DAG, Depth,
8768                          ResolveKnownElts)) {
8769     resolveZeroablesFromTargetShuffle(Mask, KnownUndef, KnownZero);
8770     return true;
8771   }
8772   return false;
8773 }
8774 
getTargetShuffleInputs(SDValue Op,const APInt & DemandedElts,SmallVectorImpl<SDValue> & Inputs,SmallVectorImpl<int> & Mask,const SelectionDAG & DAG,unsigned Depth,bool ResolveKnownElts)8775 static bool getTargetShuffleInputs(SDValue Op, const APInt &DemandedElts,
8776                                    SmallVectorImpl<SDValue> &Inputs,
8777                                    SmallVectorImpl<int> &Mask,
8778                                    const SelectionDAG &DAG, unsigned Depth,
8779                                    bool ResolveKnownElts) {
8780   APInt KnownUndef, KnownZero;
8781   return getTargetShuffleInputs(Op, DemandedElts, Inputs, Mask, KnownUndef,
8782                                 KnownZero, DAG, Depth, ResolveKnownElts);
8783 }
8784 
getTargetShuffleInputs(SDValue Op,SmallVectorImpl<SDValue> & Inputs,SmallVectorImpl<int> & Mask,const SelectionDAG & DAG,unsigned Depth=0,bool ResolveKnownElts=true)8785 static bool getTargetShuffleInputs(SDValue Op, SmallVectorImpl<SDValue> &Inputs,
8786                                    SmallVectorImpl<int> &Mask,
8787                                    const SelectionDAG &DAG, unsigned Depth = 0,
8788                                    bool ResolveKnownElts = true) {
8789   EVT VT = Op.getValueType();
8790   if (!VT.isSimple() || !VT.isVector())
8791     return false;
8792 
8793   unsigned NumElts = Op.getValueType().getVectorNumElements();
8794   APInt DemandedElts = APInt::getAllOnes(NumElts);
8795   return getTargetShuffleInputs(Op, DemandedElts, Inputs, Mask, DAG, Depth,
8796                                 ResolveKnownElts);
8797 }
8798 
8799 // Attempt to create a scalar/subvector broadcast from the base MemSDNode.
getBROADCAST_LOAD(unsigned Opcode,const SDLoc & DL,EVT VT,EVT MemVT,MemSDNode * Mem,unsigned Offset,SelectionDAG & DAG)8800 static SDValue getBROADCAST_LOAD(unsigned Opcode, const SDLoc &DL, EVT VT,
8801                                  EVT MemVT, MemSDNode *Mem, unsigned Offset,
8802                                  SelectionDAG &DAG) {
8803   assert((Opcode == X86ISD::VBROADCAST_LOAD ||
8804           Opcode == X86ISD::SUBV_BROADCAST_LOAD) &&
8805          "Unknown broadcast load type");
8806 
8807   // Ensure this is a simple (non-atomic, non-voltile), temporal read memop.
8808   if (!Mem || !Mem->readMem() || !Mem->isSimple() || Mem->isNonTemporal())
8809     return SDValue();
8810 
8811   SDValue Ptr =
8812       DAG.getMemBasePlusOffset(Mem->getBasePtr(), TypeSize::Fixed(Offset), DL);
8813   SDVTList Tys = DAG.getVTList(VT, MVT::Other);
8814   SDValue Ops[] = {Mem->getChain(), Ptr};
8815   SDValue BcstLd = DAG.getMemIntrinsicNode(
8816       Opcode, DL, Tys, Ops, MemVT,
8817       DAG.getMachineFunction().getMachineMemOperand(
8818           Mem->getMemOperand(), Offset, MemVT.getStoreSize()));
8819   DAG.makeEquivalentMemoryOrdering(SDValue(Mem, 1), BcstLd.getValue(1));
8820   return BcstLd;
8821 }
8822 
8823 /// Returns the scalar element that will make up the i'th
8824 /// element of the result of the vector shuffle.
getShuffleScalarElt(SDValue Op,unsigned Index,SelectionDAG & DAG,unsigned Depth)8825 static SDValue getShuffleScalarElt(SDValue Op, unsigned Index,
8826                                    SelectionDAG &DAG, unsigned Depth) {
8827   if (Depth >= SelectionDAG::MaxRecursionDepth)
8828     return SDValue(); // Limit search depth.
8829 
8830   EVT VT = Op.getValueType();
8831   unsigned Opcode = Op.getOpcode();
8832   unsigned NumElems = VT.getVectorNumElements();
8833 
8834   // Recurse into ISD::VECTOR_SHUFFLE node to find scalars.
8835   if (auto *SV = dyn_cast<ShuffleVectorSDNode>(Op)) {
8836     int Elt = SV->getMaskElt(Index);
8837 
8838     if (Elt < 0)
8839       return DAG.getUNDEF(VT.getVectorElementType());
8840 
8841     SDValue Src = (Elt < (int)NumElems) ? SV->getOperand(0) : SV->getOperand(1);
8842     return getShuffleScalarElt(Src, Elt % NumElems, DAG, Depth + 1);
8843   }
8844 
8845   // Recurse into target specific vector shuffles to find scalars.
8846   if (isTargetShuffle(Opcode)) {
8847     MVT ShufVT = VT.getSimpleVT();
8848     MVT ShufSVT = ShufVT.getVectorElementType();
8849     int NumElems = (int)ShufVT.getVectorNumElements();
8850     SmallVector<int, 16> ShuffleMask;
8851     SmallVector<SDValue, 16> ShuffleOps;
8852     if (!getTargetShuffleMask(Op.getNode(), ShufVT, true, ShuffleOps,
8853                               ShuffleMask))
8854       return SDValue();
8855 
8856     int Elt = ShuffleMask[Index];
8857     if (Elt == SM_SentinelZero)
8858       return ShufSVT.isInteger() ? DAG.getConstant(0, SDLoc(Op), ShufSVT)
8859                                  : DAG.getConstantFP(+0.0, SDLoc(Op), ShufSVT);
8860     if (Elt == SM_SentinelUndef)
8861       return DAG.getUNDEF(ShufSVT);
8862 
8863     assert(0 <= Elt && Elt < (2 * NumElems) && "Shuffle index out of range");
8864     SDValue Src = (Elt < NumElems) ? ShuffleOps[0] : ShuffleOps[1];
8865     return getShuffleScalarElt(Src, Elt % NumElems, DAG, Depth + 1);
8866   }
8867 
8868   // Recurse into insert_subvector base/sub vector to find scalars.
8869   if (Opcode == ISD::INSERT_SUBVECTOR) {
8870     SDValue Vec = Op.getOperand(0);
8871     SDValue Sub = Op.getOperand(1);
8872     uint64_t SubIdx = Op.getConstantOperandVal(2);
8873     unsigned NumSubElts = Sub.getValueType().getVectorNumElements();
8874 
8875     if (SubIdx <= Index && Index < (SubIdx + NumSubElts))
8876       return getShuffleScalarElt(Sub, Index - SubIdx, DAG, Depth + 1);
8877     return getShuffleScalarElt(Vec, Index, DAG, Depth + 1);
8878   }
8879 
8880   // Recurse into concat_vectors sub vector to find scalars.
8881   if (Opcode == ISD::CONCAT_VECTORS) {
8882     EVT SubVT = Op.getOperand(0).getValueType();
8883     unsigned NumSubElts = SubVT.getVectorNumElements();
8884     uint64_t SubIdx = Index / NumSubElts;
8885     uint64_t SubElt = Index % NumSubElts;
8886     return getShuffleScalarElt(Op.getOperand(SubIdx), SubElt, DAG, Depth + 1);
8887   }
8888 
8889   // Recurse into extract_subvector src vector to find scalars.
8890   if (Opcode == ISD::EXTRACT_SUBVECTOR) {
8891     SDValue Src = Op.getOperand(0);
8892     uint64_t SrcIdx = Op.getConstantOperandVal(1);
8893     return getShuffleScalarElt(Src, Index + SrcIdx, DAG, Depth + 1);
8894   }
8895 
8896   // We only peek through bitcasts of the same vector width.
8897   if (Opcode == ISD::BITCAST) {
8898     SDValue Src = Op.getOperand(0);
8899     EVT SrcVT = Src.getValueType();
8900     if (SrcVT.isVector() && SrcVT.getVectorNumElements() == NumElems)
8901       return getShuffleScalarElt(Src, Index, DAG, Depth + 1);
8902     return SDValue();
8903   }
8904 
8905   // Actual nodes that may contain scalar elements
8906 
8907   // For insert_vector_elt - either return the index matching scalar or recurse
8908   // into the base vector.
8909   if (Opcode == ISD::INSERT_VECTOR_ELT &&
8910       isa<ConstantSDNode>(Op.getOperand(2))) {
8911     if (Op.getConstantOperandAPInt(2) == Index)
8912       return Op.getOperand(1);
8913     return getShuffleScalarElt(Op.getOperand(0), Index, DAG, Depth + 1);
8914   }
8915 
8916   if (Opcode == ISD::SCALAR_TO_VECTOR)
8917     return (Index == 0) ? Op.getOperand(0)
8918                         : DAG.getUNDEF(VT.getVectorElementType());
8919 
8920   if (Opcode == ISD::BUILD_VECTOR)
8921     return Op.getOperand(Index);
8922 
8923   return SDValue();
8924 }
8925 
8926 // Use PINSRB/PINSRW/PINSRD to create a build vector.
LowerBuildVectorAsInsert(SDValue Op,const APInt & NonZeroMask,unsigned NumNonZero,unsigned NumZero,SelectionDAG & DAG,const X86Subtarget & Subtarget)8927 static SDValue LowerBuildVectorAsInsert(SDValue Op, const APInt &NonZeroMask,
8928                                         unsigned NumNonZero, unsigned NumZero,
8929                                         SelectionDAG &DAG,
8930                                         const X86Subtarget &Subtarget) {
8931   MVT VT = Op.getSimpleValueType();
8932   unsigned NumElts = VT.getVectorNumElements();
8933   assert(((VT == MVT::v8i16 && Subtarget.hasSSE2()) ||
8934           ((VT == MVT::v16i8 || VT == MVT::v4i32) && Subtarget.hasSSE41())) &&
8935          "Illegal vector insertion");
8936 
8937   SDLoc dl(Op);
8938   SDValue V;
8939   bool First = true;
8940 
8941   for (unsigned i = 0; i < NumElts; ++i) {
8942     bool IsNonZero = NonZeroMask[i];
8943     if (!IsNonZero)
8944       continue;
8945 
8946     // If the build vector contains zeros or our first insertion is not the
8947     // first index then insert into zero vector to break any register
8948     // dependency else use SCALAR_TO_VECTOR.
8949     if (First) {
8950       First = false;
8951       if (NumZero || 0 != i)
8952         V = getZeroVector(VT, Subtarget, DAG, dl);
8953       else {
8954         assert(0 == i && "Expected insertion into zero-index");
8955         V = DAG.getAnyExtOrTrunc(Op.getOperand(i), dl, MVT::i32);
8956         V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, V);
8957         V = DAG.getBitcast(VT, V);
8958         continue;
8959       }
8960     }
8961     V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, V, Op.getOperand(i),
8962                     DAG.getIntPtrConstant(i, dl));
8963   }
8964 
8965   return V;
8966 }
8967 
8968 /// Custom lower build_vector of v16i8.
LowerBuildVectorv16i8(SDValue Op,const APInt & NonZeroMask,unsigned NumNonZero,unsigned NumZero,SelectionDAG & DAG,const X86Subtarget & Subtarget)8969 static SDValue LowerBuildVectorv16i8(SDValue Op, const APInt &NonZeroMask,
8970                                      unsigned NumNonZero, unsigned NumZero,
8971                                      SelectionDAG &DAG,
8972                                      const X86Subtarget &Subtarget) {
8973   if (NumNonZero > 8 && !Subtarget.hasSSE41())
8974     return SDValue();
8975 
8976   // SSE4.1 - use PINSRB to insert each byte directly.
8977   if (Subtarget.hasSSE41())
8978     return LowerBuildVectorAsInsert(Op, NonZeroMask, NumNonZero, NumZero, DAG,
8979                                     Subtarget);
8980 
8981   SDLoc dl(Op);
8982   SDValue V;
8983 
8984   // Pre-SSE4.1 - merge byte pairs and insert with PINSRW.
8985   for (unsigned i = 0; i < 16; i += 2) {
8986     bool ThisIsNonZero = NonZeroMask[i];
8987     bool NextIsNonZero = NonZeroMask[i + 1];
8988     if (!ThisIsNonZero && !NextIsNonZero)
8989       continue;
8990 
8991     // FIXME: Investigate combining the first 4 bytes as a i32 instead.
8992     SDValue Elt;
8993     if (ThisIsNonZero) {
8994       if (NumZero || NextIsNonZero)
8995         Elt = DAG.getZExtOrTrunc(Op.getOperand(i), dl, MVT::i32);
8996       else
8997         Elt = DAG.getAnyExtOrTrunc(Op.getOperand(i), dl, MVT::i32);
8998     }
8999 
9000     if (NextIsNonZero) {
9001       SDValue NextElt = Op.getOperand(i + 1);
9002       if (i == 0 && NumZero)
9003         NextElt = DAG.getZExtOrTrunc(NextElt, dl, MVT::i32);
9004       else
9005         NextElt = DAG.getAnyExtOrTrunc(NextElt, dl, MVT::i32);
9006       NextElt = DAG.getNode(ISD::SHL, dl, MVT::i32, NextElt,
9007                             DAG.getConstant(8, dl, MVT::i8));
9008       if (ThisIsNonZero)
9009         Elt = DAG.getNode(ISD::OR, dl, MVT::i32, NextElt, Elt);
9010       else
9011         Elt = NextElt;
9012     }
9013 
9014     // If our first insertion is not the first index or zeros are needed, then
9015     // insert into zero vector. Otherwise, use SCALAR_TO_VECTOR (leaves high
9016     // elements undefined).
9017     if (!V) {
9018       if (i != 0 || NumZero)
9019         V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl);
9020       else {
9021         V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Elt);
9022         V = DAG.getBitcast(MVT::v8i16, V);
9023         continue;
9024       }
9025     }
9026     Elt = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, Elt);
9027     V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, V, Elt,
9028                     DAG.getIntPtrConstant(i / 2, dl));
9029   }
9030 
9031   return DAG.getBitcast(MVT::v16i8, V);
9032 }
9033 
9034 /// Custom lower build_vector of v8i16.
LowerBuildVectorv8i16(SDValue Op,const APInt & NonZeroMask,unsigned NumNonZero,unsigned NumZero,SelectionDAG & DAG,const X86Subtarget & Subtarget)9035 static SDValue LowerBuildVectorv8i16(SDValue Op, const APInt &NonZeroMask,
9036                                      unsigned NumNonZero, unsigned NumZero,
9037                                      SelectionDAG &DAG,
9038                                      const X86Subtarget &Subtarget) {
9039   if (NumNonZero > 4 && !Subtarget.hasSSE41())
9040     return SDValue();
9041 
9042   // Use PINSRW to insert each byte directly.
9043   return LowerBuildVectorAsInsert(Op, NonZeroMask, NumNonZero, NumZero, DAG,
9044                                   Subtarget);
9045 }
9046 
9047 /// Custom lower build_vector of v4i32 or v4f32.
LowerBuildVectorv4x32(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)9048 static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG,
9049                                      const X86Subtarget &Subtarget) {
9050   // If this is a splat of a pair of elements, use MOVDDUP (unless the target
9051   // has XOP; in that case defer lowering to potentially use VPERMIL2PS).
9052   // Because we're creating a less complicated build vector here, we may enable
9053   // further folding of the MOVDDUP via shuffle transforms.
9054   if (Subtarget.hasSSE3() && !Subtarget.hasXOP() &&
9055       Op.getOperand(0) == Op.getOperand(2) &&
9056       Op.getOperand(1) == Op.getOperand(3) &&
9057       Op.getOperand(0) != Op.getOperand(1)) {
9058     SDLoc DL(Op);
9059     MVT VT = Op.getSimpleValueType();
9060     MVT EltVT = VT.getVectorElementType();
9061     // Create a new build vector with the first 2 elements followed by undef
9062     // padding, bitcast to v2f64, duplicate, and bitcast back.
9063     SDValue Ops[4] = { Op.getOperand(0), Op.getOperand(1),
9064                        DAG.getUNDEF(EltVT), DAG.getUNDEF(EltVT) };
9065     SDValue NewBV = DAG.getBitcast(MVT::v2f64, DAG.getBuildVector(VT, DL, Ops));
9066     SDValue Dup = DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v2f64, NewBV);
9067     return DAG.getBitcast(VT, Dup);
9068   }
9069 
9070   // Find all zeroable elements.
9071   std::bitset<4> Zeroable, Undefs;
9072   for (int i = 0; i < 4; ++i) {
9073     SDValue Elt = Op.getOperand(i);
9074     Undefs[i] = Elt.isUndef();
9075     Zeroable[i] = (Elt.isUndef() || X86::isZeroNode(Elt));
9076   }
9077   assert(Zeroable.size() - Zeroable.count() > 1 &&
9078          "We expect at least two non-zero elements!");
9079 
9080   // We only know how to deal with build_vector nodes where elements are either
9081   // zeroable or extract_vector_elt with constant index.
9082   SDValue FirstNonZero;
9083   unsigned FirstNonZeroIdx;
9084   for (unsigned i = 0; i < 4; ++i) {
9085     if (Zeroable[i])
9086       continue;
9087     SDValue Elt = Op.getOperand(i);
9088     if (Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
9089         !isa<ConstantSDNode>(Elt.getOperand(1)))
9090       return SDValue();
9091     // Make sure that this node is extracting from a 128-bit vector.
9092     MVT VT = Elt.getOperand(0).getSimpleValueType();
9093     if (!VT.is128BitVector())
9094       return SDValue();
9095     if (!FirstNonZero.getNode()) {
9096       FirstNonZero = Elt;
9097       FirstNonZeroIdx = i;
9098     }
9099   }
9100 
9101   assert(FirstNonZero.getNode() && "Unexpected build vector of all zeros!");
9102   SDValue V1 = FirstNonZero.getOperand(0);
9103   MVT VT = V1.getSimpleValueType();
9104 
9105   // See if this build_vector can be lowered as a blend with zero.
9106   SDValue Elt;
9107   unsigned EltMaskIdx, EltIdx;
9108   int Mask[4];
9109   for (EltIdx = 0; EltIdx < 4; ++EltIdx) {
9110     if (Zeroable[EltIdx]) {
9111       // The zero vector will be on the right hand side.
9112       Mask[EltIdx] = EltIdx+4;
9113       continue;
9114     }
9115 
9116     Elt = Op->getOperand(EltIdx);
9117     // By construction, Elt is a EXTRACT_VECTOR_ELT with constant index.
9118     EltMaskIdx = Elt.getConstantOperandVal(1);
9119     if (Elt.getOperand(0) != V1 || EltMaskIdx != EltIdx)
9120       break;
9121     Mask[EltIdx] = EltIdx;
9122   }
9123 
9124   if (EltIdx == 4) {
9125     // Let the shuffle legalizer deal with blend operations.
9126     SDValue VZeroOrUndef = (Zeroable == Undefs)
9127                                ? DAG.getUNDEF(VT)
9128                                : getZeroVector(VT, Subtarget, DAG, SDLoc(Op));
9129     if (V1.getSimpleValueType() != VT)
9130       V1 = DAG.getBitcast(VT, V1);
9131     return DAG.getVectorShuffle(VT, SDLoc(V1), V1, VZeroOrUndef, Mask);
9132   }
9133 
9134   // See if we can lower this build_vector to a INSERTPS.
9135   if (!Subtarget.hasSSE41())
9136     return SDValue();
9137 
9138   SDValue V2 = Elt.getOperand(0);
9139   if (Elt == FirstNonZero && EltIdx == FirstNonZeroIdx)
9140     V1 = SDValue();
9141 
9142   bool CanFold = true;
9143   for (unsigned i = EltIdx + 1; i < 4 && CanFold; ++i) {
9144     if (Zeroable[i])
9145       continue;
9146 
9147     SDValue Current = Op->getOperand(i);
9148     SDValue SrcVector = Current->getOperand(0);
9149     if (!V1.getNode())
9150       V1 = SrcVector;
9151     CanFold = (SrcVector == V1) && (Current.getConstantOperandAPInt(1) == i);
9152   }
9153 
9154   if (!CanFold)
9155     return SDValue();
9156 
9157   assert(V1.getNode() && "Expected at least two non-zero elements!");
9158   if (V1.getSimpleValueType() != MVT::v4f32)
9159     V1 = DAG.getBitcast(MVT::v4f32, V1);
9160   if (V2.getSimpleValueType() != MVT::v4f32)
9161     V2 = DAG.getBitcast(MVT::v4f32, V2);
9162 
9163   // Ok, we can emit an INSERTPS instruction.
9164   unsigned ZMask = Zeroable.to_ulong();
9165 
9166   unsigned InsertPSMask = EltMaskIdx << 6 | EltIdx << 4 | ZMask;
9167   assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
9168   SDLoc DL(Op);
9169   SDValue Result = DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
9170                                DAG.getIntPtrConstant(InsertPSMask, DL, true));
9171   return DAG.getBitcast(VT, Result);
9172 }
9173 
9174 /// Return a vector logical shift node.
getVShift(bool isLeft,EVT VT,SDValue SrcOp,unsigned NumBits,SelectionDAG & DAG,const TargetLowering & TLI,const SDLoc & dl)9175 static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp, unsigned NumBits,
9176                          SelectionDAG &DAG, const TargetLowering &TLI,
9177                          const SDLoc &dl) {
9178   assert(VT.is128BitVector() && "Unknown type for VShift");
9179   MVT ShVT = MVT::v16i8;
9180   unsigned Opc = isLeft ? X86ISD::VSHLDQ : X86ISD::VSRLDQ;
9181   SrcOp = DAG.getBitcast(ShVT, SrcOp);
9182   assert(NumBits % 8 == 0 && "Only support byte sized shifts");
9183   SDValue ShiftVal = DAG.getTargetConstant(NumBits / 8, dl, MVT::i8);
9184   return DAG.getBitcast(VT, DAG.getNode(Opc, dl, ShVT, SrcOp, ShiftVal));
9185 }
9186 
LowerAsSplatVectorLoad(SDValue SrcOp,MVT VT,const SDLoc & dl,SelectionDAG & DAG)9187 static SDValue LowerAsSplatVectorLoad(SDValue SrcOp, MVT VT, const SDLoc &dl,
9188                                       SelectionDAG &DAG) {
9189 
9190   // Check if the scalar load can be widened into a vector load. And if
9191   // the address is "base + cst" see if the cst can be "absorbed" into
9192   // the shuffle mask.
9193   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(SrcOp)) {
9194     SDValue Ptr = LD->getBasePtr();
9195     if (!ISD::isNormalLoad(LD) || !LD->isSimple())
9196       return SDValue();
9197     EVT PVT = LD->getValueType(0);
9198     if (PVT != MVT::i32 && PVT != MVT::f32)
9199       return SDValue();
9200 
9201     int FI = -1;
9202     int64_t Offset = 0;
9203     if (FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr)) {
9204       FI = FINode->getIndex();
9205       Offset = 0;
9206     } else if (DAG.isBaseWithConstantOffset(Ptr) &&
9207                isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
9208       FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
9209       Offset = Ptr.getConstantOperandVal(1);
9210       Ptr = Ptr.getOperand(0);
9211     } else {
9212       return SDValue();
9213     }
9214 
9215     // FIXME: 256-bit vector instructions don't require a strict alignment,
9216     // improve this code to support it better.
9217     Align RequiredAlign(VT.getSizeInBits() / 8);
9218     SDValue Chain = LD->getChain();
9219     // Make sure the stack object alignment is at least 16 or 32.
9220     MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
9221     MaybeAlign InferredAlign = DAG.InferPtrAlign(Ptr);
9222     if (!InferredAlign || *InferredAlign < RequiredAlign) {
9223       if (MFI.isFixedObjectIndex(FI)) {
9224         // Can't change the alignment. FIXME: It's possible to compute
9225         // the exact stack offset and reference FI + adjust offset instead.
9226         // If someone *really* cares about this. That's the way to implement it.
9227         return SDValue();
9228       } else {
9229         MFI.setObjectAlignment(FI, RequiredAlign);
9230       }
9231     }
9232 
9233     // (Offset % 16 or 32) must be multiple of 4. Then address is then
9234     // Ptr + (Offset & ~15).
9235     if (Offset < 0)
9236       return SDValue();
9237     if ((Offset % RequiredAlign.value()) & 3)
9238       return SDValue();
9239     int64_t StartOffset = Offset & ~int64_t(RequiredAlign.value() - 1);
9240     if (StartOffset) {
9241       SDLoc DL(Ptr);
9242       Ptr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr,
9243                         DAG.getConstant(StartOffset, DL, Ptr.getValueType()));
9244     }
9245 
9246     int EltNo = (Offset - StartOffset) >> 2;
9247     unsigned NumElems = VT.getVectorNumElements();
9248 
9249     EVT NVT = EVT::getVectorVT(*DAG.getContext(), PVT, NumElems);
9250     SDValue V1 = DAG.getLoad(NVT, dl, Chain, Ptr,
9251                              LD->getPointerInfo().getWithOffset(StartOffset));
9252 
9253     SmallVector<int, 8> Mask(NumElems, EltNo);
9254 
9255     return DAG.getVectorShuffle(NVT, dl, V1, DAG.getUNDEF(NVT), Mask);
9256   }
9257 
9258   return SDValue();
9259 }
9260 
9261 // Recurse to find a LoadSDNode source and the accumulated ByteOffest.
findEltLoadSrc(SDValue Elt,LoadSDNode * & Ld,int64_t & ByteOffset)9262 static bool findEltLoadSrc(SDValue Elt, LoadSDNode *&Ld, int64_t &ByteOffset) {
9263   if (ISD::isNON_EXTLoad(Elt.getNode())) {
9264     auto *BaseLd = cast<LoadSDNode>(Elt);
9265     if (!BaseLd->isSimple())
9266       return false;
9267     Ld = BaseLd;
9268     ByteOffset = 0;
9269     return true;
9270   }
9271 
9272   switch (Elt.getOpcode()) {
9273   case ISD::BITCAST:
9274   case ISD::TRUNCATE:
9275   case ISD::SCALAR_TO_VECTOR:
9276     return findEltLoadSrc(Elt.getOperand(0), Ld, ByteOffset);
9277   case ISD::SRL:
9278     if (auto *AmtC = dyn_cast<ConstantSDNode>(Elt.getOperand(1))) {
9279       uint64_t Amt = AmtC->getZExtValue();
9280       if ((Amt % 8) == 0 && findEltLoadSrc(Elt.getOperand(0), Ld, ByteOffset)) {
9281         ByteOffset += Amt / 8;
9282         return true;
9283       }
9284     }
9285     break;
9286   case ISD::EXTRACT_VECTOR_ELT:
9287     if (auto *IdxC = dyn_cast<ConstantSDNode>(Elt.getOperand(1))) {
9288       SDValue Src = Elt.getOperand(0);
9289       unsigned SrcSizeInBits = Src.getScalarValueSizeInBits();
9290       unsigned DstSizeInBits = Elt.getScalarValueSizeInBits();
9291       if (DstSizeInBits == SrcSizeInBits && (SrcSizeInBits % 8) == 0 &&
9292           findEltLoadSrc(Src, Ld, ByteOffset)) {
9293         uint64_t Idx = IdxC->getZExtValue();
9294         ByteOffset += Idx * (SrcSizeInBits / 8);
9295         return true;
9296       }
9297     }
9298     break;
9299   }
9300 
9301   return false;
9302 }
9303 
9304 /// Given the initializing elements 'Elts' of a vector of type 'VT', see if the
9305 /// elements can be replaced by a single large load which has the same value as
9306 /// a build_vector or insert_subvector whose loaded operands are 'Elts'.
9307 ///
9308 /// Example: <load i32 *a, load i32 *a+4, zero, undef> -> zextload a
EltsFromConsecutiveLoads(EVT VT,ArrayRef<SDValue> Elts,const SDLoc & DL,SelectionDAG & DAG,const X86Subtarget & Subtarget,bool IsAfterLegalize)9309 static SDValue EltsFromConsecutiveLoads(EVT VT, ArrayRef<SDValue> Elts,
9310                                         const SDLoc &DL, SelectionDAG &DAG,
9311                                         const X86Subtarget &Subtarget,
9312                                         bool IsAfterLegalize) {
9313   if ((VT.getScalarSizeInBits() % 8) != 0)
9314     return SDValue();
9315 
9316   unsigned NumElems = Elts.size();
9317 
9318   int LastLoadedElt = -1;
9319   APInt LoadMask = APInt::getZero(NumElems);
9320   APInt ZeroMask = APInt::getZero(NumElems);
9321   APInt UndefMask = APInt::getZero(NumElems);
9322 
9323   SmallVector<LoadSDNode*, 8> Loads(NumElems, nullptr);
9324   SmallVector<int64_t, 8> ByteOffsets(NumElems, 0);
9325 
9326   // For each element in the initializer, see if we've found a load, zero or an
9327   // undef.
9328   for (unsigned i = 0; i < NumElems; ++i) {
9329     SDValue Elt = peekThroughBitcasts(Elts[i]);
9330     if (!Elt.getNode())
9331       return SDValue();
9332     if (Elt.isUndef()) {
9333       UndefMask.setBit(i);
9334       continue;
9335     }
9336     if (X86::isZeroNode(Elt) || ISD::isBuildVectorAllZeros(Elt.getNode())) {
9337       ZeroMask.setBit(i);
9338       continue;
9339     }
9340 
9341     // Each loaded element must be the correct fractional portion of the
9342     // requested vector load.
9343     unsigned EltSizeInBits = Elt.getValueSizeInBits();
9344     if ((NumElems * EltSizeInBits) != VT.getSizeInBits())
9345       return SDValue();
9346 
9347     if (!findEltLoadSrc(Elt, Loads[i], ByteOffsets[i]) || ByteOffsets[i] < 0)
9348       return SDValue();
9349     unsigned LoadSizeInBits = Loads[i]->getValueSizeInBits(0);
9350     if (((ByteOffsets[i] * 8) + EltSizeInBits) > LoadSizeInBits)
9351       return SDValue();
9352 
9353     LoadMask.setBit(i);
9354     LastLoadedElt = i;
9355   }
9356   assert((ZeroMask.countPopulation() + UndefMask.countPopulation() +
9357           LoadMask.countPopulation()) == NumElems &&
9358          "Incomplete element masks");
9359 
9360   // Handle Special Cases - all undef or undef/zero.
9361   if (UndefMask.countPopulation() == NumElems)
9362     return DAG.getUNDEF(VT);
9363   if ((ZeroMask.countPopulation() + UndefMask.countPopulation()) == NumElems)
9364     return VT.isInteger() ? DAG.getConstant(0, DL, VT)
9365                           : DAG.getConstantFP(0.0, DL, VT);
9366 
9367   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9368   int FirstLoadedElt = LoadMask.countTrailingZeros();
9369   SDValue EltBase = peekThroughBitcasts(Elts[FirstLoadedElt]);
9370   EVT EltBaseVT = EltBase.getValueType();
9371   assert(EltBaseVT.getSizeInBits() == EltBaseVT.getStoreSizeInBits() &&
9372          "Register/Memory size mismatch");
9373   LoadSDNode *LDBase = Loads[FirstLoadedElt];
9374   assert(LDBase && "Did not find base load for merging consecutive loads");
9375   unsigned BaseSizeInBits = EltBaseVT.getStoreSizeInBits();
9376   unsigned BaseSizeInBytes = BaseSizeInBits / 8;
9377   int NumLoadedElts = (1 + LastLoadedElt - FirstLoadedElt);
9378   int LoadSizeInBits = NumLoadedElts * BaseSizeInBits;
9379   assert((BaseSizeInBits % 8) == 0 && "Sub-byte element loads detected");
9380 
9381   // TODO: Support offsetting the base load.
9382   if (ByteOffsets[FirstLoadedElt] != 0)
9383     return SDValue();
9384 
9385   // Check to see if the element's load is consecutive to the base load
9386   // or offset from a previous (already checked) load.
9387   auto CheckConsecutiveLoad = [&](LoadSDNode *Base, int EltIdx) {
9388     LoadSDNode *Ld = Loads[EltIdx];
9389     int64_t ByteOffset = ByteOffsets[EltIdx];
9390     if (ByteOffset && (ByteOffset % BaseSizeInBytes) == 0) {
9391       int64_t BaseIdx = EltIdx - (ByteOffset / BaseSizeInBytes);
9392       return (0 <= BaseIdx && BaseIdx < (int)NumElems && LoadMask[BaseIdx] &&
9393               Loads[BaseIdx] == Ld && ByteOffsets[BaseIdx] == 0);
9394     }
9395     return DAG.areNonVolatileConsecutiveLoads(Ld, Base, BaseSizeInBytes,
9396                                               EltIdx - FirstLoadedElt);
9397   };
9398 
9399   // Consecutive loads can contain UNDEFS but not ZERO elements.
9400   // Consecutive loads with UNDEFs and ZEROs elements require a
9401   // an additional shuffle stage to clear the ZERO elements.
9402   bool IsConsecutiveLoad = true;
9403   bool IsConsecutiveLoadWithZeros = true;
9404   for (int i = FirstLoadedElt + 1; i <= LastLoadedElt; ++i) {
9405     if (LoadMask[i]) {
9406       if (!CheckConsecutiveLoad(LDBase, i)) {
9407         IsConsecutiveLoad = false;
9408         IsConsecutiveLoadWithZeros = false;
9409         break;
9410       }
9411     } else if (ZeroMask[i]) {
9412       IsConsecutiveLoad = false;
9413     }
9414   }
9415 
9416   auto CreateLoad = [&DAG, &DL, &Loads](EVT VT, LoadSDNode *LDBase) {
9417     auto MMOFlags = LDBase->getMemOperand()->getFlags();
9418     assert(LDBase->isSimple() &&
9419            "Cannot merge volatile or atomic loads.");
9420     SDValue NewLd =
9421         DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(),
9422                     LDBase->getPointerInfo(), LDBase->getOriginalAlign(),
9423                     MMOFlags);
9424     for (auto *LD : Loads)
9425       if (LD)
9426         DAG.makeEquivalentMemoryOrdering(LD, NewLd);
9427     return NewLd;
9428   };
9429 
9430   // Check if the base load is entirely dereferenceable.
9431   bool IsDereferenceable = LDBase->getPointerInfo().isDereferenceable(
9432       VT.getSizeInBits() / 8, *DAG.getContext(), DAG.getDataLayout());
9433 
9434   // LOAD - all consecutive load/undefs (must start/end with a load or be
9435   // entirely dereferenceable). If we have found an entire vector of loads and
9436   // undefs, then return a large load of the entire vector width starting at the
9437   // base pointer. If the vector contains zeros, then attempt to shuffle those
9438   // elements.
9439   if (FirstLoadedElt == 0 &&
9440       (NumLoadedElts == (int)NumElems || IsDereferenceable) &&
9441       (IsConsecutiveLoad || IsConsecutiveLoadWithZeros)) {
9442     if (IsAfterLegalize && !TLI.isOperationLegal(ISD::LOAD, VT))
9443       return SDValue();
9444 
9445     // Don't create 256-bit non-temporal aligned loads without AVX2 as these
9446     // will lower to regular temporal loads and use the cache.
9447     if (LDBase->isNonTemporal() && LDBase->getAlign() >= Align(32) &&
9448         VT.is256BitVector() && !Subtarget.hasInt256())
9449       return SDValue();
9450 
9451     if (NumElems == 1)
9452       return DAG.getBitcast(VT, Elts[FirstLoadedElt]);
9453 
9454     if (!ZeroMask)
9455       return CreateLoad(VT, LDBase);
9456 
9457     // IsConsecutiveLoadWithZeros - we need to create a shuffle of the loaded
9458     // vector and a zero vector to clear out the zero elements.
9459     if (!IsAfterLegalize && VT.isVector()) {
9460       unsigned NumMaskElts = VT.getVectorNumElements();
9461       if ((NumMaskElts % NumElems) == 0) {
9462         unsigned Scale = NumMaskElts / NumElems;
9463         SmallVector<int, 4> ClearMask(NumMaskElts, -1);
9464         for (unsigned i = 0; i < NumElems; ++i) {
9465           if (UndefMask[i])
9466             continue;
9467           int Offset = ZeroMask[i] ? NumMaskElts : 0;
9468           for (unsigned j = 0; j != Scale; ++j)
9469             ClearMask[(i * Scale) + j] = (i * Scale) + j + Offset;
9470         }
9471         SDValue V = CreateLoad(VT, LDBase);
9472         SDValue Z = VT.isInteger() ? DAG.getConstant(0, DL, VT)
9473                                    : DAG.getConstantFP(0.0, DL, VT);
9474         return DAG.getVectorShuffle(VT, DL, V, Z, ClearMask);
9475       }
9476     }
9477   }
9478 
9479   // If the upper half of a ymm/zmm load is undef then just load the lower half.
9480   if (VT.is256BitVector() || VT.is512BitVector()) {
9481     unsigned HalfNumElems = NumElems / 2;
9482     if (UndefMask.extractBits(HalfNumElems, HalfNumElems).isAllOnes()) {
9483       EVT HalfVT =
9484           EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(), HalfNumElems);
9485       SDValue HalfLD =
9486           EltsFromConsecutiveLoads(HalfVT, Elts.drop_back(HalfNumElems), DL,
9487                                    DAG, Subtarget, IsAfterLegalize);
9488       if (HalfLD)
9489         return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT),
9490                            HalfLD, DAG.getIntPtrConstant(0, DL));
9491     }
9492   }
9493 
9494   // VZEXT_LOAD - consecutive 32/64-bit load/undefs followed by zeros/undefs.
9495   if (IsConsecutiveLoad && FirstLoadedElt == 0 &&
9496       ((LoadSizeInBits == 16 && Subtarget.hasFP16()) || LoadSizeInBits == 32 ||
9497        LoadSizeInBits == 64) &&
9498       ((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()))) {
9499     MVT VecSVT = VT.isFloatingPoint() ? MVT::getFloatingPointVT(LoadSizeInBits)
9500                                       : MVT::getIntegerVT(LoadSizeInBits);
9501     MVT VecVT = MVT::getVectorVT(VecSVT, VT.getSizeInBits() / LoadSizeInBits);
9502     // Allow v4f32 on SSE1 only targets.
9503     // FIXME: Add more isel patterns so we can just use VT directly.
9504     if (!Subtarget.hasSSE2() && VT == MVT::v4f32)
9505       VecVT = MVT::v4f32;
9506     if (TLI.isTypeLegal(VecVT)) {
9507       SDVTList Tys = DAG.getVTList(VecVT, MVT::Other);
9508       SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() };
9509       SDValue ResNode = DAG.getMemIntrinsicNode(
9510           X86ISD::VZEXT_LOAD, DL, Tys, Ops, VecSVT, LDBase->getPointerInfo(),
9511           LDBase->getOriginalAlign(), MachineMemOperand::MOLoad);
9512       for (auto *LD : Loads)
9513         if (LD)
9514           DAG.makeEquivalentMemoryOrdering(LD, ResNode);
9515       return DAG.getBitcast(VT, ResNode);
9516     }
9517   }
9518 
9519   // BROADCAST - match the smallest possible repetition pattern, load that
9520   // scalar/subvector element and then broadcast to the entire vector.
9521   if (ZeroMask.isZero() && isPowerOf2_32(NumElems) && Subtarget.hasAVX() &&
9522       (VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector())) {
9523     for (unsigned SubElems = 1; SubElems < NumElems; SubElems *= 2) {
9524       unsigned RepeatSize = SubElems * BaseSizeInBits;
9525       unsigned ScalarSize = std::min(RepeatSize, 64u);
9526       if (!Subtarget.hasAVX2() && ScalarSize < 32)
9527         continue;
9528 
9529       // Don't attempt a 1:N subvector broadcast - it should be caught by
9530       // combineConcatVectorOps, else will cause infinite loops.
9531       if (RepeatSize > ScalarSize && SubElems == 1)
9532         continue;
9533 
9534       bool Match = true;
9535       SmallVector<SDValue, 8> RepeatedLoads(SubElems, DAG.getUNDEF(EltBaseVT));
9536       for (unsigned i = 0; i != NumElems && Match; ++i) {
9537         if (!LoadMask[i])
9538           continue;
9539         SDValue Elt = peekThroughBitcasts(Elts[i]);
9540         if (RepeatedLoads[i % SubElems].isUndef())
9541           RepeatedLoads[i % SubElems] = Elt;
9542         else
9543           Match &= (RepeatedLoads[i % SubElems] == Elt);
9544       }
9545 
9546       // We must have loads at both ends of the repetition.
9547       Match &= !RepeatedLoads.front().isUndef();
9548       Match &= !RepeatedLoads.back().isUndef();
9549       if (!Match)
9550         continue;
9551 
9552       EVT RepeatVT =
9553           VT.isInteger() && (RepeatSize != 64 || TLI.isTypeLegal(MVT::i64))
9554               ? EVT::getIntegerVT(*DAG.getContext(), ScalarSize)
9555               : EVT::getFloatingPointVT(ScalarSize);
9556       if (RepeatSize > ScalarSize)
9557         RepeatVT = EVT::getVectorVT(*DAG.getContext(), RepeatVT,
9558                                     RepeatSize / ScalarSize);
9559       EVT BroadcastVT =
9560           EVT::getVectorVT(*DAG.getContext(), RepeatVT.getScalarType(),
9561                            VT.getSizeInBits() / ScalarSize);
9562       if (TLI.isTypeLegal(BroadcastVT)) {
9563         if (SDValue RepeatLoad = EltsFromConsecutiveLoads(
9564                 RepeatVT, RepeatedLoads, DL, DAG, Subtarget, IsAfterLegalize)) {
9565           SDValue Broadcast = RepeatLoad;
9566           if (RepeatSize > ScalarSize) {
9567             while (Broadcast.getValueSizeInBits() < VT.getSizeInBits())
9568               Broadcast = concatSubVectors(Broadcast, Broadcast, DAG, DL);
9569           } else {
9570             if (!Subtarget.hasAVX2() &&
9571                 !X86::mayFoldLoadIntoBroadcastFromMem(
9572                     RepeatLoad, RepeatVT.getScalarType().getSimpleVT(),
9573                     Subtarget,
9574                     /*AssumeSingleUse=*/true))
9575               return SDValue();
9576             Broadcast =
9577                 DAG.getNode(X86ISD::VBROADCAST, DL, BroadcastVT, RepeatLoad);
9578           }
9579           return DAG.getBitcast(VT, Broadcast);
9580         }
9581       }
9582     }
9583   }
9584 
9585   return SDValue();
9586 }
9587 
9588 // Combine a vector ops (shuffles etc.) that is equal to build_vector load1,
9589 // load2, load3, load4, <0, 1, 2, 3> into a vector load if the load addresses
9590 // are consecutive, non-overlapping, and in the right order.
combineToConsecutiveLoads(EVT VT,SDValue Op,const SDLoc & DL,SelectionDAG & DAG,const X86Subtarget & Subtarget,bool IsAfterLegalize)9591 static SDValue combineToConsecutiveLoads(EVT VT, SDValue Op, const SDLoc &DL,
9592                                          SelectionDAG &DAG,
9593                                          const X86Subtarget &Subtarget,
9594                                          bool IsAfterLegalize) {
9595   SmallVector<SDValue, 64> Elts;
9596   for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
9597     if (SDValue Elt = getShuffleScalarElt(Op, i, DAG, 0)) {
9598       Elts.push_back(Elt);
9599       continue;
9600     }
9601     return SDValue();
9602   }
9603   assert(Elts.size() == VT.getVectorNumElements());
9604   return EltsFromConsecutiveLoads(VT, Elts, DL, DAG, Subtarget,
9605                                   IsAfterLegalize);
9606 }
9607 
getConstantVector(MVT VT,const APInt & SplatValue,unsigned SplatBitSize,LLVMContext & C)9608 static Constant *getConstantVector(MVT VT, const APInt &SplatValue,
9609                                    unsigned SplatBitSize, LLVMContext &C) {
9610   unsigned ScalarSize = VT.getScalarSizeInBits();
9611   unsigned NumElm = SplatBitSize / ScalarSize;
9612 
9613   SmallVector<Constant *, 32> ConstantVec;
9614   for (unsigned i = 0; i < NumElm; i++) {
9615     APInt Val = SplatValue.extractBits(ScalarSize, ScalarSize * i);
9616     Constant *Const;
9617     if (VT.isFloatingPoint()) {
9618       if (ScalarSize == 16) {
9619         Const = ConstantFP::get(C, APFloat(APFloat::IEEEhalf(), Val));
9620       } else if (ScalarSize == 32) {
9621         Const = ConstantFP::get(C, APFloat(APFloat::IEEEsingle(), Val));
9622       } else {
9623         assert(ScalarSize == 64 && "Unsupported floating point scalar size");
9624         Const = ConstantFP::get(C, APFloat(APFloat::IEEEdouble(), Val));
9625       }
9626     } else
9627       Const = Constant::getIntegerValue(Type::getIntNTy(C, ScalarSize), Val);
9628     ConstantVec.push_back(Const);
9629   }
9630   return ConstantVector::get(ArrayRef<Constant *>(ConstantVec));
9631 }
9632 
isFoldableUseOfShuffle(SDNode * N)9633 static bool isFoldableUseOfShuffle(SDNode *N) {
9634   for (auto *U : N->uses()) {
9635     unsigned Opc = U->getOpcode();
9636     // VPERMV/VPERMV3 shuffles can never fold their index operands.
9637     if (Opc == X86ISD::VPERMV && U->getOperand(0).getNode() == N)
9638       return false;
9639     if (Opc == X86ISD::VPERMV3 && U->getOperand(1).getNode() == N)
9640       return false;
9641     if (isTargetShuffle(Opc))
9642       return true;
9643     if (Opc == ISD::BITCAST) // Ignore bitcasts
9644       return isFoldableUseOfShuffle(U);
9645     if (N->hasOneUse()) {
9646       // TODO, there may be some general way to know if a SDNode can
9647       // be folded. We now only know whether an MI is foldable.
9648       if (Opc == X86ISD::VPDPBUSD && U->getOperand(2).getNode() != N)
9649         return false;
9650       return true;
9651     }
9652   }
9653   return false;
9654 }
9655 
9656 /// Attempt to use the vbroadcast instruction to generate a splat value
9657 /// from a splat BUILD_VECTOR which uses:
9658 ///  a. A single scalar load, or a constant.
9659 ///  b. Repeated pattern of constants (e.g. <0,1,0,1> or <0,1,2,3,0,1,2,3>).
9660 ///
9661 /// The VBROADCAST node is returned when a pattern is found,
9662 /// or SDValue() otherwise.
lowerBuildVectorAsBroadcast(BuildVectorSDNode * BVOp,const X86Subtarget & Subtarget,SelectionDAG & DAG)9663 static SDValue lowerBuildVectorAsBroadcast(BuildVectorSDNode *BVOp,
9664                                            const X86Subtarget &Subtarget,
9665                                            SelectionDAG &DAG) {
9666   // VBROADCAST requires AVX.
9667   // TODO: Splats could be generated for non-AVX CPUs using SSE
9668   // instructions, but there's less potential gain for only 128-bit vectors.
9669   if (!Subtarget.hasAVX())
9670     return SDValue();
9671 
9672   MVT VT = BVOp->getSimpleValueType(0);
9673   unsigned NumElts = VT.getVectorNumElements();
9674   SDLoc dl(BVOp);
9675 
9676   assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&
9677          "Unsupported vector type for broadcast.");
9678 
9679   // See if the build vector is a repeating sequence of scalars (inc. splat).
9680   SDValue Ld;
9681   BitVector UndefElements;
9682   SmallVector<SDValue, 16> Sequence;
9683   if (BVOp->getRepeatedSequence(Sequence, &UndefElements)) {
9684     assert((NumElts % Sequence.size()) == 0 && "Sequence doesn't fit.");
9685     if (Sequence.size() == 1)
9686       Ld = Sequence[0];
9687   }
9688 
9689   // Attempt to use VBROADCASTM
9690   // From this pattern:
9691   // a. t0 = (zext_i64 (bitcast_i8 v2i1 X))
9692   // b. t1 = (build_vector t0 t0)
9693   //
9694   // Create (VBROADCASTM v2i1 X)
9695   if (!Sequence.empty() && Subtarget.hasCDI()) {
9696     // If not a splat, are the upper sequence values zeroable?
9697     unsigned SeqLen = Sequence.size();
9698     bool UpperZeroOrUndef =
9699         SeqLen == 1 ||
9700         llvm::all_of(ArrayRef(Sequence).drop_front(), [](SDValue V) {
9701           return !V || V.isUndef() || isNullConstant(V);
9702         });
9703     SDValue Op0 = Sequence[0];
9704     if (UpperZeroOrUndef && ((Op0.getOpcode() == ISD::BITCAST) ||
9705                              (Op0.getOpcode() == ISD::ZERO_EXTEND &&
9706                               Op0.getOperand(0).getOpcode() == ISD::BITCAST))) {
9707       SDValue BOperand = Op0.getOpcode() == ISD::BITCAST
9708                              ? Op0.getOperand(0)
9709                              : Op0.getOperand(0).getOperand(0);
9710       MVT MaskVT = BOperand.getSimpleValueType();
9711       MVT EltType = MVT::getIntegerVT(VT.getScalarSizeInBits() * SeqLen);
9712       if ((EltType == MVT::i64 && MaskVT == MVT::v8i1) ||  // for broadcastmb2q
9713           (EltType == MVT::i32 && MaskVT == MVT::v16i1)) { // for broadcastmw2d
9714         MVT BcstVT = MVT::getVectorVT(EltType, NumElts / SeqLen);
9715         if (!VT.is512BitVector() && !Subtarget.hasVLX()) {
9716           unsigned Scale = 512 / VT.getSizeInBits();
9717           BcstVT = MVT::getVectorVT(EltType, Scale * (NumElts / SeqLen));
9718         }
9719         SDValue Bcst = DAG.getNode(X86ISD::VBROADCASTM, dl, BcstVT, BOperand);
9720         if (BcstVT.getSizeInBits() != VT.getSizeInBits())
9721           Bcst = extractSubVector(Bcst, 0, DAG, dl, VT.getSizeInBits());
9722         return DAG.getBitcast(VT, Bcst);
9723       }
9724     }
9725   }
9726 
9727   unsigned NumUndefElts = UndefElements.count();
9728   if (!Ld || (NumElts - NumUndefElts) <= 1) {
9729     APInt SplatValue, Undef;
9730     unsigned SplatBitSize;
9731     bool HasUndef;
9732     // Check if this is a repeated constant pattern suitable for broadcasting.
9733     if (BVOp->isConstantSplat(SplatValue, Undef, SplatBitSize, HasUndef) &&
9734         SplatBitSize > VT.getScalarSizeInBits() &&
9735         SplatBitSize < VT.getSizeInBits()) {
9736       // Avoid replacing with broadcast when it's a use of a shuffle
9737       // instruction to preserve the present custom lowering of shuffles.
9738       if (isFoldableUseOfShuffle(BVOp))
9739         return SDValue();
9740       // replace BUILD_VECTOR with broadcast of the repeated constants.
9741       const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9742       LLVMContext *Ctx = DAG.getContext();
9743       MVT PVT = TLI.getPointerTy(DAG.getDataLayout());
9744       if (Subtarget.hasAVX()) {
9745         if (SplatBitSize == 32 || SplatBitSize == 64 ||
9746             (SplatBitSize < 32 && Subtarget.hasAVX2())) {
9747           // Splatted value can fit in one INTEGER constant in constant pool.
9748           // Load the constant and broadcast it.
9749           MVT CVT = MVT::getIntegerVT(SplatBitSize);
9750           Type *ScalarTy = Type::getIntNTy(*Ctx, SplatBitSize);
9751           Constant *C = Constant::getIntegerValue(ScalarTy, SplatValue);
9752           SDValue CP = DAG.getConstantPool(C, PVT);
9753           unsigned Repeat = VT.getSizeInBits() / SplatBitSize;
9754 
9755           Align Alignment = cast<ConstantPoolSDNode>(CP)->getAlign();
9756           SDVTList Tys =
9757               DAG.getVTList(MVT::getVectorVT(CVT, Repeat), MVT::Other);
9758           SDValue Ops[] = {DAG.getEntryNode(), CP};
9759           MachinePointerInfo MPI =
9760               MachinePointerInfo::getConstantPool(DAG.getMachineFunction());
9761           SDValue Brdcst = DAG.getMemIntrinsicNode(
9762               X86ISD::VBROADCAST_LOAD, dl, Tys, Ops, CVT, MPI, Alignment,
9763               MachineMemOperand::MOLoad);
9764           return DAG.getBitcast(VT, Brdcst);
9765         }
9766         if (SplatBitSize > 64) {
9767           // Load the vector of constants and broadcast it.
9768           Constant *VecC = getConstantVector(VT, SplatValue, SplatBitSize,
9769                                              *Ctx);
9770           SDValue VCP = DAG.getConstantPool(VecC, PVT);
9771           unsigned NumElm = SplatBitSize / VT.getScalarSizeInBits();
9772           MVT VVT = MVT::getVectorVT(VT.getScalarType(), NumElm);
9773           Align Alignment = cast<ConstantPoolSDNode>(VCP)->getAlign();
9774           SDVTList Tys = DAG.getVTList(VT, MVT::Other);
9775           SDValue Ops[] = {DAG.getEntryNode(), VCP};
9776           MachinePointerInfo MPI =
9777               MachinePointerInfo::getConstantPool(DAG.getMachineFunction());
9778           return DAG.getMemIntrinsicNode(
9779               X86ISD::SUBV_BROADCAST_LOAD, dl, Tys, Ops, VVT, MPI, Alignment,
9780               MachineMemOperand::MOLoad);
9781         }
9782       }
9783     }
9784 
9785     // If we are moving a scalar into a vector (Ld must be set and all elements
9786     // but 1 are undef) and that operation is not obviously supported by
9787     // vmovd/vmovq/vmovss/vmovsd, then keep trying to form a broadcast.
9788     // That's better than general shuffling and may eliminate a load to GPR and
9789     // move from scalar to vector register.
9790     if (!Ld || NumElts - NumUndefElts != 1)
9791       return SDValue();
9792     unsigned ScalarSize = Ld.getValueSizeInBits();
9793     if (!(UndefElements[0] || (ScalarSize != 32 && ScalarSize != 64)))
9794       return SDValue();
9795   }
9796 
9797   bool ConstSplatVal =
9798       (Ld.getOpcode() == ISD::Constant || Ld.getOpcode() == ISD::ConstantFP);
9799   bool IsLoad = ISD::isNormalLoad(Ld.getNode());
9800 
9801   // TODO: Handle broadcasts of non-constant sequences.
9802 
9803   // Make sure that all of the users of a non-constant load are from the
9804   // BUILD_VECTOR node.
9805   // FIXME: Is the use count needed for non-constant, non-load case?
9806   if (!ConstSplatVal && !IsLoad && !BVOp->isOnlyUserOf(Ld.getNode()))
9807     return SDValue();
9808 
9809   unsigned ScalarSize = Ld.getValueSizeInBits();
9810   bool IsGE256 = (VT.getSizeInBits() >= 256);
9811 
9812   // When optimizing for size, generate up to 5 extra bytes for a broadcast
9813   // instruction to save 8 or more bytes of constant pool data.
9814   // TODO: If multiple splats are generated to load the same constant,
9815   // it may be detrimental to overall size. There needs to be a way to detect
9816   // that condition to know if this is truly a size win.
9817   bool OptForSize = DAG.shouldOptForSize();
9818 
9819   // Handle broadcasting a single constant scalar from the constant pool
9820   // into a vector.
9821   // On Sandybridge (no AVX2), it is still better to load a constant vector
9822   // from the constant pool and not to broadcast it from a scalar.
9823   // But override that restriction when optimizing for size.
9824   // TODO: Check if splatting is recommended for other AVX-capable CPUs.
9825   if (ConstSplatVal && (Subtarget.hasAVX2() || OptForSize)) {
9826     EVT CVT = Ld.getValueType();
9827     assert(!CVT.isVector() && "Must not broadcast a vector type");
9828 
9829     // Splat f16, f32, i32, v4f64, v4i64 in all cases with AVX2.
9830     // For size optimization, also splat v2f64 and v2i64, and for size opt
9831     // with AVX2, also splat i8 and i16.
9832     // With pattern matching, the VBROADCAST node may become a VMOVDDUP.
9833     if (ScalarSize == 32 ||
9834         (ScalarSize == 64 && (IsGE256 || Subtarget.hasVLX())) ||
9835         CVT == MVT::f16 ||
9836         (OptForSize && (ScalarSize == 64 || Subtarget.hasAVX2()))) {
9837       const Constant *C = nullptr;
9838       if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Ld))
9839         C = CI->getConstantIntValue();
9840       else if (ConstantFPSDNode *CF = dyn_cast<ConstantFPSDNode>(Ld))
9841         C = CF->getConstantFPValue();
9842 
9843       assert(C && "Invalid constant type");
9844 
9845       const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9846       SDValue CP =
9847           DAG.getConstantPool(C, TLI.getPointerTy(DAG.getDataLayout()));
9848       Align Alignment = cast<ConstantPoolSDNode>(CP)->getAlign();
9849 
9850       SDVTList Tys = DAG.getVTList(VT, MVT::Other);
9851       SDValue Ops[] = {DAG.getEntryNode(), CP};
9852       MachinePointerInfo MPI =
9853           MachinePointerInfo::getConstantPool(DAG.getMachineFunction());
9854       return DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, dl, Tys, Ops, CVT,
9855                                      MPI, Alignment, MachineMemOperand::MOLoad);
9856     }
9857   }
9858 
9859   // Handle AVX2 in-register broadcasts.
9860   if (!IsLoad && Subtarget.hasInt256() &&
9861       (ScalarSize == 32 || (IsGE256 && ScalarSize == 64)))
9862     return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
9863 
9864   // The scalar source must be a normal load.
9865   if (!IsLoad)
9866     return SDValue();
9867 
9868   // Make sure the non-chain result is only used by this build vector.
9869   if (!Ld->hasNUsesOfValue(NumElts - NumUndefElts, 0))
9870     return SDValue();
9871 
9872   if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
9873       (Subtarget.hasVLX() && ScalarSize == 64)) {
9874     auto *LN = cast<LoadSDNode>(Ld);
9875     SDVTList Tys = DAG.getVTList(VT, MVT::Other);
9876     SDValue Ops[] = {LN->getChain(), LN->getBasePtr()};
9877     SDValue BCast =
9878         DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, dl, Tys, Ops,
9879                                 LN->getMemoryVT(), LN->getMemOperand());
9880     DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BCast.getValue(1));
9881     return BCast;
9882   }
9883 
9884   // The integer check is needed for the 64-bit into 128-bit so it doesn't match
9885   // double since there is no vbroadcastsd xmm
9886   if (Subtarget.hasInt256() && Ld.getValueType().isInteger() &&
9887       (ScalarSize == 8 || ScalarSize == 16 || ScalarSize == 64)) {
9888     auto *LN = cast<LoadSDNode>(Ld);
9889     SDVTList Tys = DAG.getVTList(VT, MVT::Other);
9890     SDValue Ops[] = {LN->getChain(), LN->getBasePtr()};
9891     SDValue BCast =
9892         DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, dl, Tys, Ops,
9893                                 LN->getMemoryVT(), LN->getMemOperand());
9894     DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BCast.getValue(1));
9895     return BCast;
9896   }
9897 
9898   if (ScalarSize == 16 && Subtarget.hasFP16() && IsGE256)
9899     return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
9900 
9901   // Unsupported broadcast.
9902   return SDValue();
9903 }
9904 
9905 /// For an EXTRACT_VECTOR_ELT with a constant index return the real
9906 /// underlying vector and index.
9907 ///
9908 /// Modifies \p ExtractedFromVec to the real vector and returns the real
9909 /// index.
getUnderlyingExtractedFromVec(SDValue & ExtractedFromVec,SDValue ExtIdx)9910 static int getUnderlyingExtractedFromVec(SDValue &ExtractedFromVec,
9911                                          SDValue ExtIdx) {
9912   int Idx = cast<ConstantSDNode>(ExtIdx)->getZExtValue();
9913   if (!isa<ShuffleVectorSDNode>(ExtractedFromVec))
9914     return Idx;
9915 
9916   // For 256-bit vectors, LowerEXTRACT_VECTOR_ELT_SSE4 may have already
9917   // lowered this:
9918   //   (extract_vector_elt (v8f32 %1), Constant<6>)
9919   // to:
9920   //   (extract_vector_elt (vector_shuffle<2,u,u,u>
9921   //                           (extract_subvector (v8f32 %0), Constant<4>),
9922   //                           undef)
9923   //                       Constant<0>)
9924   // In this case the vector is the extract_subvector expression and the index
9925   // is 2, as specified by the shuffle.
9926   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(ExtractedFromVec);
9927   SDValue ShuffleVec = SVOp->getOperand(0);
9928   MVT ShuffleVecVT = ShuffleVec.getSimpleValueType();
9929   assert(ShuffleVecVT.getVectorElementType() ==
9930          ExtractedFromVec.getSimpleValueType().getVectorElementType());
9931 
9932   int ShuffleIdx = SVOp->getMaskElt(Idx);
9933   if (isUndefOrInRange(ShuffleIdx, 0, ShuffleVecVT.getVectorNumElements())) {
9934     ExtractedFromVec = ShuffleVec;
9935     return ShuffleIdx;
9936   }
9937   return Idx;
9938 }
9939 
buildFromShuffleMostly(SDValue Op,SelectionDAG & DAG)9940 static SDValue buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) {
9941   MVT VT = Op.getSimpleValueType();
9942 
9943   // Skip if insert_vec_elt is not supported.
9944   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9945   if (!TLI.isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT))
9946     return SDValue();
9947 
9948   SDLoc DL(Op);
9949   unsigned NumElems = Op.getNumOperands();
9950 
9951   SDValue VecIn1;
9952   SDValue VecIn2;
9953   SmallVector<unsigned, 4> InsertIndices;
9954   SmallVector<int, 8> Mask(NumElems, -1);
9955 
9956   for (unsigned i = 0; i != NumElems; ++i) {
9957     unsigned Opc = Op.getOperand(i).getOpcode();
9958 
9959     if (Opc == ISD::UNDEF)
9960       continue;
9961 
9962     if (Opc != ISD::EXTRACT_VECTOR_ELT) {
9963       // Quit if more than 1 elements need inserting.
9964       if (InsertIndices.size() > 1)
9965         return SDValue();
9966 
9967       InsertIndices.push_back(i);
9968       continue;
9969     }
9970 
9971     SDValue ExtractedFromVec = Op.getOperand(i).getOperand(0);
9972     SDValue ExtIdx = Op.getOperand(i).getOperand(1);
9973 
9974     // Quit if non-constant index.
9975     if (!isa<ConstantSDNode>(ExtIdx))
9976       return SDValue();
9977     int Idx = getUnderlyingExtractedFromVec(ExtractedFromVec, ExtIdx);
9978 
9979     // Quit if extracted from vector of different type.
9980     if (ExtractedFromVec.getValueType() != VT)
9981       return SDValue();
9982 
9983     if (!VecIn1.getNode())
9984       VecIn1 = ExtractedFromVec;
9985     else if (VecIn1 != ExtractedFromVec) {
9986       if (!VecIn2.getNode())
9987         VecIn2 = ExtractedFromVec;
9988       else if (VecIn2 != ExtractedFromVec)
9989         // Quit if more than 2 vectors to shuffle
9990         return SDValue();
9991     }
9992 
9993     if (ExtractedFromVec == VecIn1)
9994       Mask[i] = Idx;
9995     else if (ExtractedFromVec == VecIn2)
9996       Mask[i] = Idx + NumElems;
9997   }
9998 
9999   if (!VecIn1.getNode())
10000     return SDValue();
10001 
10002   VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(VT);
10003   SDValue NV = DAG.getVectorShuffle(VT, DL, VecIn1, VecIn2, Mask);
10004 
10005   for (unsigned Idx : InsertIndices)
10006     NV = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, NV, Op.getOperand(Idx),
10007                      DAG.getIntPtrConstant(Idx, DL));
10008 
10009   return NV;
10010 }
10011 
10012 // Lower BUILD_VECTOR operation for v8bf16, v16bf16 and v32bf16 types.
LowerBUILD_VECTORvXbf16(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)10013 static SDValue LowerBUILD_VECTORvXbf16(SDValue Op, SelectionDAG &DAG,
10014                                        const X86Subtarget &Subtarget) {
10015   MVT VT = Op.getSimpleValueType();
10016   MVT IVT = VT.changeVectorElementTypeToInteger();
10017   SmallVector<SDValue, 16> NewOps;
10018   for (unsigned I = 0, E = Op.getNumOperands(); I != E; ++I)
10019     NewOps.push_back(DAG.getBitcast(MVT::i16, Op.getOperand(I)));
10020   SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, SDLoc(), IVT, NewOps);
10021   return DAG.getBitcast(VT, Res);
10022 }
10023 
10024 // Lower BUILD_VECTOR operation for v8i1 and v16i1 types.
LowerBUILD_VECTORvXi1(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)10025 static SDValue LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG,
10026                                      const X86Subtarget &Subtarget) {
10027 
10028   MVT VT = Op.getSimpleValueType();
10029   assert((VT.getVectorElementType() == MVT::i1) &&
10030          "Unexpected type in LowerBUILD_VECTORvXi1!");
10031 
10032   SDLoc dl(Op);
10033   if (ISD::isBuildVectorAllZeros(Op.getNode()) ||
10034       ISD::isBuildVectorAllOnes(Op.getNode()))
10035     return Op;
10036 
10037   uint64_t Immediate = 0;
10038   SmallVector<unsigned, 16> NonConstIdx;
10039   bool IsSplat = true;
10040   bool HasConstElts = false;
10041   int SplatIdx = -1;
10042   for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) {
10043     SDValue In = Op.getOperand(idx);
10044     if (In.isUndef())
10045       continue;
10046     if (auto *InC = dyn_cast<ConstantSDNode>(In)) {
10047       Immediate |= (InC->getZExtValue() & 0x1) << idx;
10048       HasConstElts = true;
10049     } else {
10050       NonConstIdx.push_back(idx);
10051     }
10052     if (SplatIdx < 0)
10053       SplatIdx = idx;
10054     else if (In != Op.getOperand(SplatIdx))
10055       IsSplat = false;
10056   }
10057 
10058   // for splat use " (select i1 splat_elt, all-ones, all-zeroes)"
10059   if (IsSplat) {
10060     // The build_vector allows the scalar element to be larger than the vector
10061     // element type. We need to mask it to use as a condition unless we know
10062     // the upper bits are zero.
10063     // FIXME: Use computeKnownBits instead of checking specific opcode?
10064     SDValue Cond = Op.getOperand(SplatIdx);
10065     assert(Cond.getValueType() == MVT::i8 && "Unexpected VT!");
10066     if (Cond.getOpcode() != ISD::SETCC)
10067       Cond = DAG.getNode(ISD::AND, dl, MVT::i8, Cond,
10068                          DAG.getConstant(1, dl, MVT::i8));
10069 
10070     // Perform the select in the scalar domain so we can use cmov.
10071     if (VT == MVT::v64i1 && !Subtarget.is64Bit()) {
10072       SDValue Select = DAG.getSelect(dl, MVT::i32, Cond,
10073                                      DAG.getAllOnesConstant(dl, MVT::i32),
10074                                      DAG.getConstant(0, dl, MVT::i32));
10075       Select = DAG.getBitcast(MVT::v32i1, Select);
10076       return DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, Select, Select);
10077     } else {
10078       MVT ImmVT = MVT::getIntegerVT(std::max((unsigned)VT.getSizeInBits(), 8U));
10079       SDValue Select = DAG.getSelect(dl, ImmVT, Cond,
10080                                      DAG.getAllOnesConstant(dl, ImmVT),
10081                                      DAG.getConstant(0, dl, ImmVT));
10082       MVT VecVT = VT.getSizeInBits() >= 8 ? VT : MVT::v8i1;
10083       Select = DAG.getBitcast(VecVT, Select);
10084       return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Select,
10085                          DAG.getIntPtrConstant(0, dl));
10086     }
10087   }
10088 
10089   // insert elements one by one
10090   SDValue DstVec;
10091   if (HasConstElts) {
10092     if (VT == MVT::v64i1 && !Subtarget.is64Bit()) {
10093       SDValue ImmL = DAG.getConstant(Lo_32(Immediate), dl, MVT::i32);
10094       SDValue ImmH = DAG.getConstant(Hi_32(Immediate), dl, MVT::i32);
10095       ImmL = DAG.getBitcast(MVT::v32i1, ImmL);
10096       ImmH = DAG.getBitcast(MVT::v32i1, ImmH);
10097       DstVec = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, ImmL, ImmH);
10098     } else {
10099       MVT ImmVT = MVT::getIntegerVT(std::max((unsigned)VT.getSizeInBits(), 8U));
10100       SDValue Imm = DAG.getConstant(Immediate, dl, ImmVT);
10101       MVT VecVT = VT.getSizeInBits() >= 8 ? VT : MVT::v8i1;
10102       DstVec = DAG.getBitcast(VecVT, Imm);
10103       DstVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, DstVec,
10104                            DAG.getIntPtrConstant(0, dl));
10105     }
10106   } else
10107     DstVec = DAG.getUNDEF(VT);
10108 
10109   for (unsigned i = 0, e = NonConstIdx.size(); i != e; ++i) {
10110     unsigned InsertIdx = NonConstIdx[i];
10111     DstVec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DstVec,
10112                          Op.getOperand(InsertIdx),
10113                          DAG.getIntPtrConstant(InsertIdx, dl));
10114   }
10115   return DstVec;
10116 }
10117 
isHorizOp(unsigned Opcode)10118 LLVM_ATTRIBUTE_UNUSED static bool isHorizOp(unsigned Opcode) {
10119   switch (Opcode) {
10120   case X86ISD::PACKSS:
10121   case X86ISD::PACKUS:
10122   case X86ISD::FHADD:
10123   case X86ISD::FHSUB:
10124   case X86ISD::HADD:
10125   case X86ISD::HSUB:
10126     return true;
10127   }
10128   return false;
10129 }
10130 
10131 /// This is a helper function of LowerToHorizontalOp().
10132 /// This function checks that the build_vector \p N in input implements a
10133 /// 128-bit partial horizontal operation on a 256-bit vector, but that operation
10134 /// may not match the layout of an x86 256-bit horizontal instruction.
10135 /// In other words, if this returns true, then some extraction/insertion will
10136 /// be required to produce a valid horizontal instruction.
10137 ///
10138 /// Parameter \p Opcode defines the kind of horizontal operation to match.
10139 /// For example, if \p Opcode is equal to ISD::ADD, then this function
10140 /// checks if \p N implements a horizontal arithmetic add; if instead \p Opcode
10141 /// is equal to ISD::SUB, then this function checks if this is a horizontal
10142 /// arithmetic sub.
10143 ///
10144 /// This function only analyzes elements of \p N whose indices are
10145 /// in range [BaseIdx, LastIdx).
10146 ///
10147 /// TODO: This function was originally used to match both real and fake partial
10148 /// horizontal operations, but the index-matching logic is incorrect for that.
10149 /// See the corrected implementation in isHopBuildVector(). Can we reduce this
10150 /// code because it is only used for partial h-op matching now?
isHorizontalBinOpPart(const BuildVectorSDNode * N,unsigned Opcode,SelectionDAG & DAG,unsigned BaseIdx,unsigned LastIdx,SDValue & V0,SDValue & V1)10151 static bool isHorizontalBinOpPart(const BuildVectorSDNode *N, unsigned Opcode,
10152                                   SelectionDAG &DAG,
10153                                   unsigned BaseIdx, unsigned LastIdx,
10154                                   SDValue &V0, SDValue &V1) {
10155   EVT VT = N->getValueType(0);
10156   assert(VT.is256BitVector() && "Only use for matching partial 256-bit h-ops");
10157   assert(BaseIdx * 2 <= LastIdx && "Invalid Indices in input!");
10158   assert(VT.isVector() && VT.getVectorNumElements() >= LastIdx &&
10159          "Invalid Vector in input!");
10160 
10161   bool IsCommutable = (Opcode == ISD::ADD || Opcode == ISD::FADD);
10162   bool CanFold = true;
10163   unsigned ExpectedVExtractIdx = BaseIdx;
10164   unsigned NumElts = LastIdx - BaseIdx;
10165   V0 = DAG.getUNDEF(VT);
10166   V1 = DAG.getUNDEF(VT);
10167 
10168   // Check if N implements a horizontal binop.
10169   for (unsigned i = 0, e = NumElts; i != e && CanFold; ++i) {
10170     SDValue Op = N->getOperand(i + BaseIdx);
10171 
10172     // Skip UNDEFs.
10173     if (Op->isUndef()) {
10174       // Update the expected vector extract index.
10175       if (i * 2 == NumElts)
10176         ExpectedVExtractIdx = BaseIdx;
10177       ExpectedVExtractIdx += 2;
10178       continue;
10179     }
10180 
10181     CanFold = Op->getOpcode() == Opcode && Op->hasOneUse();
10182 
10183     if (!CanFold)
10184       break;
10185 
10186     SDValue Op0 = Op.getOperand(0);
10187     SDValue Op1 = Op.getOperand(1);
10188 
10189     // Try to match the following pattern:
10190     // (BINOP (extract_vector_elt A, I), (extract_vector_elt A, I+1))
10191     CanFold = (Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
10192         Op1.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
10193         Op0.getOperand(0) == Op1.getOperand(0) &&
10194         isa<ConstantSDNode>(Op0.getOperand(1)) &&
10195         isa<ConstantSDNode>(Op1.getOperand(1)));
10196     if (!CanFold)
10197       break;
10198 
10199     unsigned I0 = Op0.getConstantOperandVal(1);
10200     unsigned I1 = Op1.getConstantOperandVal(1);
10201 
10202     if (i * 2 < NumElts) {
10203       if (V0.isUndef()) {
10204         V0 = Op0.getOperand(0);
10205         if (V0.getValueType() != VT)
10206           return false;
10207       }
10208     } else {
10209       if (V1.isUndef()) {
10210         V1 = Op0.getOperand(0);
10211         if (V1.getValueType() != VT)
10212           return false;
10213       }
10214       if (i * 2 == NumElts)
10215         ExpectedVExtractIdx = BaseIdx;
10216     }
10217 
10218     SDValue Expected = (i * 2 < NumElts) ? V0 : V1;
10219     if (I0 == ExpectedVExtractIdx)
10220       CanFold = I1 == I0 + 1 && Op0.getOperand(0) == Expected;
10221     else if (IsCommutable && I1 == ExpectedVExtractIdx) {
10222       // Try to match the following dag sequence:
10223       // (BINOP (extract_vector_elt A, I+1), (extract_vector_elt A, I))
10224       CanFold = I0 == I1 + 1 && Op1.getOperand(0) == Expected;
10225     } else
10226       CanFold = false;
10227 
10228     ExpectedVExtractIdx += 2;
10229   }
10230 
10231   return CanFold;
10232 }
10233 
10234 /// Emit a sequence of two 128-bit horizontal add/sub followed by
10235 /// a concat_vector.
10236 ///
10237 /// This is a helper function of LowerToHorizontalOp().
10238 /// This function expects two 256-bit vectors called V0 and V1.
10239 /// At first, each vector is split into two separate 128-bit vectors.
10240 /// Then, the resulting 128-bit vectors are used to implement two
10241 /// horizontal binary operations.
10242 ///
10243 /// The kind of horizontal binary operation is defined by \p X86Opcode.
10244 ///
10245 /// \p Mode specifies how the 128-bit parts of V0 and V1 are passed in input to
10246 /// the two new horizontal binop.
10247 /// When Mode is set, the first horizontal binop dag node would take as input
10248 /// the lower 128-bit of V0 and the upper 128-bit of V0. The second
10249 /// horizontal binop dag node would take as input the lower 128-bit of V1
10250 /// and the upper 128-bit of V1.
10251 ///   Example:
10252 ///     HADD V0_LO, V0_HI
10253 ///     HADD V1_LO, V1_HI
10254 ///
10255 /// Otherwise, the first horizontal binop dag node takes as input the lower
10256 /// 128-bit of V0 and the lower 128-bit of V1, and the second horizontal binop
10257 /// dag node takes the upper 128-bit of V0 and the upper 128-bit of V1.
10258 ///   Example:
10259 ///     HADD V0_LO, V1_LO
10260 ///     HADD V0_HI, V1_HI
10261 ///
10262 /// If \p isUndefLO is set, then the algorithm propagates UNDEF to the lower
10263 /// 128-bits of the result. If \p isUndefHI is set, then UNDEF is propagated to
10264 /// the upper 128-bits of the result.
ExpandHorizontalBinOp(const SDValue & V0,const SDValue & V1,const SDLoc & DL,SelectionDAG & DAG,unsigned X86Opcode,bool Mode,bool isUndefLO,bool isUndefHI)10265 static SDValue ExpandHorizontalBinOp(const SDValue &V0, const SDValue &V1,
10266                                      const SDLoc &DL, SelectionDAG &DAG,
10267                                      unsigned X86Opcode, bool Mode,
10268                                      bool isUndefLO, bool isUndefHI) {
10269   MVT VT = V0.getSimpleValueType();
10270   assert(VT.is256BitVector() && VT == V1.getSimpleValueType() &&
10271          "Invalid nodes in input!");
10272 
10273   unsigned NumElts = VT.getVectorNumElements();
10274   SDValue V0_LO = extract128BitVector(V0, 0, DAG, DL);
10275   SDValue V0_HI = extract128BitVector(V0, NumElts/2, DAG, DL);
10276   SDValue V1_LO = extract128BitVector(V1, 0, DAG, DL);
10277   SDValue V1_HI = extract128BitVector(V1, NumElts/2, DAG, DL);
10278   MVT NewVT = V0_LO.getSimpleValueType();
10279 
10280   SDValue LO = DAG.getUNDEF(NewVT);
10281   SDValue HI = DAG.getUNDEF(NewVT);
10282 
10283   if (Mode) {
10284     // Don't emit a horizontal binop if the result is expected to be UNDEF.
10285     if (!isUndefLO && !V0->isUndef())
10286       LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V0_HI);
10287     if (!isUndefHI && !V1->isUndef())
10288       HI = DAG.getNode(X86Opcode, DL, NewVT, V1_LO, V1_HI);
10289   } else {
10290     // Don't emit a horizontal binop if the result is expected to be UNDEF.
10291     if (!isUndefLO && (!V0_LO->isUndef() || !V1_LO->isUndef()))
10292       LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V1_LO);
10293 
10294     if (!isUndefHI && (!V0_HI->isUndef() || !V1_HI->isUndef()))
10295       HI = DAG.getNode(X86Opcode, DL, NewVT, V0_HI, V1_HI);
10296   }
10297 
10298   return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LO, HI);
10299 }
10300 
10301 /// Returns true iff \p BV builds a vector with the result equivalent to
10302 /// the result of ADDSUB/SUBADD operation.
10303 /// If true is returned then the operands of ADDSUB = Opnd0 +- Opnd1
10304 /// (SUBADD = Opnd0 -+ Opnd1) operation are written to the parameters
10305 /// \p Opnd0 and \p Opnd1.
isAddSubOrSubAdd(const BuildVectorSDNode * BV,const X86Subtarget & Subtarget,SelectionDAG & DAG,SDValue & Opnd0,SDValue & Opnd1,unsigned & NumExtracts,bool & IsSubAdd)10306 static bool isAddSubOrSubAdd(const BuildVectorSDNode *BV,
10307                              const X86Subtarget &Subtarget, SelectionDAG &DAG,
10308                              SDValue &Opnd0, SDValue &Opnd1,
10309                              unsigned &NumExtracts,
10310                              bool &IsSubAdd) {
10311 
10312   MVT VT = BV->getSimpleValueType(0);
10313   if (!Subtarget.hasSSE3() || !VT.isFloatingPoint())
10314     return false;
10315 
10316   unsigned NumElts = VT.getVectorNumElements();
10317   SDValue InVec0 = DAG.getUNDEF(VT);
10318   SDValue InVec1 = DAG.getUNDEF(VT);
10319 
10320   NumExtracts = 0;
10321 
10322   // Odd-numbered elements in the input build vector are obtained from
10323   // adding/subtracting two integer/float elements.
10324   // Even-numbered elements in the input build vector are obtained from
10325   // subtracting/adding two integer/float elements.
10326   unsigned Opc[2] = {0, 0};
10327   for (unsigned i = 0, e = NumElts; i != e; ++i) {
10328     SDValue Op = BV->getOperand(i);
10329 
10330     // Skip 'undef' values.
10331     unsigned Opcode = Op.getOpcode();
10332     if (Opcode == ISD::UNDEF)
10333       continue;
10334 
10335     // Early exit if we found an unexpected opcode.
10336     if (Opcode != ISD::FADD && Opcode != ISD::FSUB)
10337       return false;
10338 
10339     SDValue Op0 = Op.getOperand(0);
10340     SDValue Op1 = Op.getOperand(1);
10341 
10342     // Try to match the following pattern:
10343     // (BINOP (extract_vector_elt A, i), (extract_vector_elt B, i))
10344     // Early exit if we cannot match that sequence.
10345     if (Op0.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
10346         Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
10347         !isa<ConstantSDNode>(Op0.getOperand(1)) ||
10348         Op0.getOperand(1) != Op1.getOperand(1))
10349       return false;
10350 
10351     unsigned I0 = Op0.getConstantOperandVal(1);
10352     if (I0 != i)
10353       return false;
10354 
10355     // We found a valid add/sub node, make sure its the same opcode as previous
10356     // elements for this parity.
10357     if (Opc[i % 2] != 0 && Opc[i % 2] != Opcode)
10358       return false;
10359     Opc[i % 2] = Opcode;
10360 
10361     // Update InVec0 and InVec1.
10362     if (InVec0.isUndef()) {
10363       InVec0 = Op0.getOperand(0);
10364       if (InVec0.getSimpleValueType() != VT)
10365         return false;
10366     }
10367     if (InVec1.isUndef()) {
10368       InVec1 = Op1.getOperand(0);
10369       if (InVec1.getSimpleValueType() != VT)
10370         return false;
10371     }
10372 
10373     // Make sure that operands in input to each add/sub node always
10374     // come from a same pair of vectors.
10375     if (InVec0 != Op0.getOperand(0)) {
10376       if (Opcode == ISD::FSUB)
10377         return false;
10378 
10379       // FADD is commutable. Try to commute the operands
10380       // and then test again.
10381       std::swap(Op0, Op1);
10382       if (InVec0 != Op0.getOperand(0))
10383         return false;
10384     }
10385 
10386     if (InVec1 != Op1.getOperand(0))
10387       return false;
10388 
10389     // Increment the number of extractions done.
10390     ++NumExtracts;
10391   }
10392 
10393   // Ensure we have found an opcode for both parities and that they are
10394   // different. Don't try to fold this build_vector into an ADDSUB/SUBADD if the
10395   // inputs are undef.
10396   if (!Opc[0] || !Opc[1] || Opc[0] == Opc[1] ||
10397       InVec0.isUndef() || InVec1.isUndef())
10398     return false;
10399 
10400   IsSubAdd = Opc[0] == ISD::FADD;
10401 
10402   Opnd0 = InVec0;
10403   Opnd1 = InVec1;
10404   return true;
10405 }
10406 
10407 /// Returns true if is possible to fold MUL and an idiom that has already been
10408 /// recognized as ADDSUB/SUBADD(\p Opnd0, \p Opnd1) into
10409 /// FMADDSUB/FMSUBADD(x, y, \p Opnd1). If (and only if) true is returned, the
10410 /// operands of FMADDSUB/FMSUBADD are written to parameters \p Opnd0, \p Opnd1, \p Opnd2.
10411 ///
10412 /// Prior to calling this function it should be known that there is some
10413 /// SDNode that potentially can be replaced with an X86ISD::ADDSUB operation
10414 /// using \p Opnd0 and \p Opnd1 as operands. Also, this method is called
10415 /// before replacement of such SDNode with ADDSUB operation. Thus the number
10416 /// of \p Opnd0 uses is expected to be equal to 2.
10417 /// For example, this function may be called for the following IR:
10418 ///    %AB = fmul fast <2 x double> %A, %B
10419 ///    %Sub = fsub fast <2 x double> %AB, %C
10420 ///    %Add = fadd fast <2 x double> %AB, %C
10421 ///    %Addsub = shufflevector <2 x double> %Sub, <2 x double> %Add,
10422 ///                            <2 x i32> <i32 0, i32 3>
10423 /// There is a def for %Addsub here, which potentially can be replaced by
10424 /// X86ISD::ADDSUB operation:
10425 ///    %Addsub = X86ISD::ADDSUB %AB, %C
10426 /// and such ADDSUB can further be replaced with FMADDSUB:
10427 ///    %Addsub = FMADDSUB %A, %B, %C.
10428 ///
10429 /// The main reason why this method is called before the replacement of the
10430 /// recognized ADDSUB idiom with ADDSUB operation is that such replacement
10431 /// is illegal sometimes. E.g. 512-bit ADDSUB is not available, while 512-bit
10432 /// FMADDSUB is.
isFMAddSubOrFMSubAdd(const X86Subtarget & Subtarget,SelectionDAG & DAG,SDValue & Opnd0,SDValue & Opnd1,SDValue & Opnd2,unsigned ExpectedUses)10433 static bool isFMAddSubOrFMSubAdd(const X86Subtarget &Subtarget,
10434                                  SelectionDAG &DAG,
10435                                  SDValue &Opnd0, SDValue &Opnd1, SDValue &Opnd2,
10436                                  unsigned ExpectedUses) {
10437   if (Opnd0.getOpcode() != ISD::FMUL ||
10438       !Opnd0->hasNUsesOfValue(ExpectedUses, 0) || !Subtarget.hasAnyFMA())
10439     return false;
10440 
10441   // FIXME: These checks must match the similar ones in
10442   // DAGCombiner::visitFADDForFMACombine. It would be good to have one
10443   // function that would answer if it is Ok to fuse MUL + ADD to FMADD
10444   // or MUL + ADDSUB to FMADDSUB.
10445   const TargetOptions &Options = DAG.getTarget().Options;
10446   bool AllowFusion =
10447       (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath);
10448   if (!AllowFusion)
10449     return false;
10450 
10451   Opnd2 = Opnd1;
10452   Opnd1 = Opnd0.getOperand(1);
10453   Opnd0 = Opnd0.getOperand(0);
10454 
10455   return true;
10456 }
10457 
10458 /// Try to fold a build_vector that performs an 'addsub' or 'fmaddsub' or
10459 /// 'fsubadd' operation accordingly to X86ISD::ADDSUB or X86ISD::FMADDSUB or
10460 /// X86ISD::FMSUBADD node.
lowerToAddSubOrFMAddSub(const BuildVectorSDNode * BV,const X86Subtarget & Subtarget,SelectionDAG & DAG)10461 static SDValue lowerToAddSubOrFMAddSub(const BuildVectorSDNode *BV,
10462                                        const X86Subtarget &Subtarget,
10463                                        SelectionDAG &DAG) {
10464   SDValue Opnd0, Opnd1;
10465   unsigned NumExtracts;
10466   bool IsSubAdd;
10467   if (!isAddSubOrSubAdd(BV, Subtarget, DAG, Opnd0, Opnd1, NumExtracts,
10468                         IsSubAdd))
10469     return SDValue();
10470 
10471   MVT VT = BV->getSimpleValueType(0);
10472   SDLoc DL(BV);
10473 
10474   // Try to generate X86ISD::FMADDSUB node here.
10475   SDValue Opnd2;
10476   if (isFMAddSubOrFMSubAdd(Subtarget, DAG, Opnd0, Opnd1, Opnd2, NumExtracts)) {
10477     unsigned Opc = IsSubAdd ? X86ISD::FMSUBADD : X86ISD::FMADDSUB;
10478     return DAG.getNode(Opc, DL, VT, Opnd0, Opnd1, Opnd2);
10479   }
10480 
10481   // We only support ADDSUB.
10482   if (IsSubAdd)
10483     return SDValue();
10484 
10485   // There are no known X86 targets with 512-bit ADDSUB instructions!
10486   // Convert to blend(fsub,fadd).
10487   if (VT.is512BitVector()) {
10488     SmallVector<int> Mask;
10489     for (int I = 0, E = VT.getVectorNumElements(); I != E; I += 2) {
10490         Mask.push_back(I);
10491         Mask.push_back(I + E + 1);
10492     }
10493     SDValue Sub = DAG.getNode(ISD::FSUB, DL, VT, Opnd0, Opnd1);
10494     SDValue Add = DAG.getNode(ISD::FADD, DL, VT, Opnd0, Opnd1);
10495     return DAG.getVectorShuffle(VT, DL, Sub, Add, Mask);
10496   }
10497 
10498   return DAG.getNode(X86ISD::ADDSUB, DL, VT, Opnd0, Opnd1);
10499 }
10500 
isHopBuildVector(const BuildVectorSDNode * BV,SelectionDAG & DAG,unsigned & HOpcode,SDValue & V0,SDValue & V1)10501 static bool isHopBuildVector(const BuildVectorSDNode *BV, SelectionDAG &DAG,
10502                              unsigned &HOpcode, SDValue &V0, SDValue &V1) {
10503   // Initialize outputs to known values.
10504   MVT VT = BV->getSimpleValueType(0);
10505   HOpcode = ISD::DELETED_NODE;
10506   V0 = DAG.getUNDEF(VT);
10507   V1 = DAG.getUNDEF(VT);
10508 
10509   // x86 256-bit horizontal ops are defined in a non-obvious way. Each 128-bit
10510   // half of the result is calculated independently from the 128-bit halves of
10511   // the inputs, so that makes the index-checking logic below more complicated.
10512   unsigned NumElts = VT.getVectorNumElements();
10513   unsigned GenericOpcode = ISD::DELETED_NODE;
10514   unsigned Num128BitChunks = VT.is256BitVector() ? 2 : 1;
10515   unsigned NumEltsIn128Bits = NumElts / Num128BitChunks;
10516   unsigned NumEltsIn64Bits = NumEltsIn128Bits / 2;
10517   for (unsigned i = 0; i != Num128BitChunks; ++i) {
10518     for (unsigned j = 0; j != NumEltsIn128Bits; ++j) {
10519       // Ignore undef elements.
10520       SDValue Op = BV->getOperand(i * NumEltsIn128Bits + j);
10521       if (Op.isUndef())
10522         continue;
10523 
10524       // If there's an opcode mismatch, we're done.
10525       if (HOpcode != ISD::DELETED_NODE && Op.getOpcode() != GenericOpcode)
10526         return false;
10527 
10528       // Initialize horizontal opcode.
10529       if (HOpcode == ISD::DELETED_NODE) {
10530         GenericOpcode = Op.getOpcode();
10531         switch (GenericOpcode) {
10532         case ISD::ADD: HOpcode = X86ISD::HADD; break;
10533         case ISD::SUB: HOpcode = X86ISD::HSUB; break;
10534         case ISD::FADD: HOpcode = X86ISD::FHADD; break;
10535         case ISD::FSUB: HOpcode = X86ISD::FHSUB; break;
10536         default: return false;
10537         }
10538       }
10539 
10540       SDValue Op0 = Op.getOperand(0);
10541       SDValue Op1 = Op.getOperand(1);
10542       if (Op0.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
10543           Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
10544           Op0.getOperand(0) != Op1.getOperand(0) ||
10545           !isa<ConstantSDNode>(Op0.getOperand(1)) ||
10546           !isa<ConstantSDNode>(Op1.getOperand(1)) || !Op.hasOneUse())
10547         return false;
10548 
10549       // The source vector is chosen based on which 64-bit half of the
10550       // destination vector is being calculated.
10551       if (j < NumEltsIn64Bits) {
10552         if (V0.isUndef())
10553           V0 = Op0.getOperand(0);
10554       } else {
10555         if (V1.isUndef())
10556           V1 = Op0.getOperand(0);
10557       }
10558 
10559       SDValue SourceVec = (j < NumEltsIn64Bits) ? V0 : V1;
10560       if (SourceVec != Op0.getOperand(0))
10561         return false;
10562 
10563       // op (extract_vector_elt A, I), (extract_vector_elt A, I+1)
10564       unsigned ExtIndex0 = Op0.getConstantOperandVal(1);
10565       unsigned ExtIndex1 = Op1.getConstantOperandVal(1);
10566       unsigned ExpectedIndex = i * NumEltsIn128Bits +
10567                                (j % NumEltsIn64Bits) * 2;
10568       if (ExpectedIndex == ExtIndex0 && ExtIndex1 == ExtIndex0 + 1)
10569         continue;
10570 
10571       // If this is not a commutative op, this does not match.
10572       if (GenericOpcode != ISD::ADD && GenericOpcode != ISD::FADD)
10573         return false;
10574 
10575       // Addition is commutative, so try swapping the extract indexes.
10576       // op (extract_vector_elt A, I+1), (extract_vector_elt A, I)
10577       if (ExpectedIndex == ExtIndex1 && ExtIndex0 == ExtIndex1 + 1)
10578         continue;
10579 
10580       // Extract indexes do not match horizontal requirement.
10581       return false;
10582     }
10583   }
10584   // We matched. Opcode and operands are returned by reference as arguments.
10585   return true;
10586 }
10587 
getHopForBuildVector(const BuildVectorSDNode * BV,SelectionDAG & DAG,unsigned HOpcode,SDValue V0,SDValue V1)10588 static SDValue getHopForBuildVector(const BuildVectorSDNode *BV,
10589                                     SelectionDAG &DAG, unsigned HOpcode,
10590                                     SDValue V0, SDValue V1) {
10591   // If either input vector is not the same size as the build vector,
10592   // extract/insert the low bits to the correct size.
10593   // This is free (examples: zmm --> xmm, xmm --> ymm).
10594   MVT VT = BV->getSimpleValueType(0);
10595   unsigned Width = VT.getSizeInBits();
10596   if (V0.getValueSizeInBits() > Width)
10597     V0 = extractSubVector(V0, 0, DAG, SDLoc(BV), Width);
10598   else if (V0.getValueSizeInBits() < Width)
10599     V0 = insertSubVector(DAG.getUNDEF(VT), V0, 0, DAG, SDLoc(BV), Width);
10600 
10601   if (V1.getValueSizeInBits() > Width)
10602     V1 = extractSubVector(V1, 0, DAG, SDLoc(BV), Width);
10603   else if (V1.getValueSizeInBits() < Width)
10604     V1 = insertSubVector(DAG.getUNDEF(VT), V1, 0, DAG, SDLoc(BV), Width);
10605 
10606   unsigned NumElts = VT.getVectorNumElements();
10607   APInt DemandedElts = APInt::getAllOnes(NumElts);
10608   for (unsigned i = 0; i != NumElts; ++i)
10609     if (BV->getOperand(i).isUndef())
10610       DemandedElts.clearBit(i);
10611 
10612   // If we don't need the upper xmm, then perform as a xmm hop.
10613   unsigned HalfNumElts = NumElts / 2;
10614   if (VT.is256BitVector() && DemandedElts.lshr(HalfNumElts) == 0) {
10615     MVT HalfVT = VT.getHalfNumVectorElementsVT();
10616     V0 = extractSubVector(V0, 0, DAG, SDLoc(BV), 128);
10617     V1 = extractSubVector(V1, 0, DAG, SDLoc(BV), 128);
10618     SDValue Half = DAG.getNode(HOpcode, SDLoc(BV), HalfVT, V0, V1);
10619     return insertSubVector(DAG.getUNDEF(VT), Half, 0, DAG, SDLoc(BV), 256);
10620   }
10621 
10622   return DAG.getNode(HOpcode, SDLoc(BV), VT, V0, V1);
10623 }
10624 
10625 /// Lower BUILD_VECTOR to a horizontal add/sub operation if possible.
LowerToHorizontalOp(const BuildVectorSDNode * BV,const X86Subtarget & Subtarget,SelectionDAG & DAG)10626 static SDValue LowerToHorizontalOp(const BuildVectorSDNode *BV,
10627                                    const X86Subtarget &Subtarget,
10628                                    SelectionDAG &DAG) {
10629   // We need at least 2 non-undef elements to make this worthwhile by default.
10630   unsigned NumNonUndefs =
10631       count_if(BV->op_values(), [](SDValue V) { return !V.isUndef(); });
10632   if (NumNonUndefs < 2)
10633     return SDValue();
10634 
10635   // There are 4 sets of horizontal math operations distinguished by type:
10636   // int/FP at 128-bit/256-bit. Each type was introduced with a different
10637   // subtarget feature. Try to match those "native" patterns first.
10638   MVT VT = BV->getSimpleValueType(0);
10639   if (((VT == MVT::v4f32 || VT == MVT::v2f64) && Subtarget.hasSSE3()) ||
10640       ((VT == MVT::v8i16 || VT == MVT::v4i32) && Subtarget.hasSSSE3()) ||
10641       ((VT == MVT::v8f32 || VT == MVT::v4f64) && Subtarget.hasAVX()) ||
10642       ((VT == MVT::v16i16 || VT == MVT::v8i32) && Subtarget.hasAVX2())) {
10643     unsigned HOpcode;
10644     SDValue V0, V1;
10645     if (isHopBuildVector(BV, DAG, HOpcode, V0, V1))
10646       return getHopForBuildVector(BV, DAG, HOpcode, V0, V1);
10647   }
10648 
10649   // Try harder to match 256-bit ops by using extract/concat.
10650   if (!Subtarget.hasAVX() || !VT.is256BitVector())
10651     return SDValue();
10652 
10653   // Count the number of UNDEF operands in the build_vector in input.
10654   unsigned NumElts = VT.getVectorNumElements();
10655   unsigned Half = NumElts / 2;
10656   unsigned NumUndefsLO = 0;
10657   unsigned NumUndefsHI = 0;
10658   for (unsigned i = 0, e = Half; i != e; ++i)
10659     if (BV->getOperand(i)->isUndef())
10660       NumUndefsLO++;
10661 
10662   for (unsigned i = Half, e = NumElts; i != e; ++i)
10663     if (BV->getOperand(i)->isUndef())
10664       NumUndefsHI++;
10665 
10666   SDLoc DL(BV);
10667   SDValue InVec0, InVec1;
10668   if (VT == MVT::v8i32 || VT == MVT::v16i16) {
10669     SDValue InVec2, InVec3;
10670     unsigned X86Opcode;
10671     bool CanFold = true;
10672 
10673     if (isHorizontalBinOpPart(BV, ISD::ADD, DAG, 0, Half, InVec0, InVec1) &&
10674         isHorizontalBinOpPart(BV, ISD::ADD, DAG, Half, NumElts, InVec2,
10675                               InVec3) &&
10676         ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
10677         ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
10678       X86Opcode = X86ISD::HADD;
10679     else if (isHorizontalBinOpPart(BV, ISD::SUB, DAG, 0, Half, InVec0,
10680                                    InVec1) &&
10681              isHorizontalBinOpPart(BV, ISD::SUB, DAG, Half, NumElts, InVec2,
10682                                    InVec3) &&
10683              ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
10684              ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
10685       X86Opcode = X86ISD::HSUB;
10686     else
10687       CanFold = false;
10688 
10689     if (CanFold) {
10690       // Do not try to expand this build_vector into a pair of horizontal
10691       // add/sub if we can emit a pair of scalar add/sub.
10692       if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
10693         return SDValue();
10694 
10695       // Convert this build_vector into a pair of horizontal binops followed by
10696       // a concat vector. We must adjust the outputs from the partial horizontal
10697       // matching calls above to account for undefined vector halves.
10698       SDValue V0 = InVec0.isUndef() ? InVec2 : InVec0;
10699       SDValue V1 = InVec1.isUndef() ? InVec3 : InVec1;
10700       assert((!V0.isUndef() || !V1.isUndef()) && "Horizontal-op of undefs?");
10701       bool isUndefLO = NumUndefsLO == Half;
10702       bool isUndefHI = NumUndefsHI == Half;
10703       return ExpandHorizontalBinOp(V0, V1, DL, DAG, X86Opcode, false, isUndefLO,
10704                                    isUndefHI);
10705     }
10706   }
10707 
10708   if (VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v8i32 ||
10709       VT == MVT::v16i16) {
10710     unsigned X86Opcode;
10711     if (isHorizontalBinOpPart(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
10712       X86Opcode = X86ISD::HADD;
10713     else if (isHorizontalBinOpPart(BV, ISD::SUB, DAG, 0, NumElts, InVec0,
10714                                    InVec1))
10715       X86Opcode = X86ISD::HSUB;
10716     else if (isHorizontalBinOpPart(BV, ISD::FADD, DAG, 0, NumElts, InVec0,
10717                                    InVec1))
10718       X86Opcode = X86ISD::FHADD;
10719     else if (isHorizontalBinOpPart(BV, ISD::FSUB, DAG, 0, NumElts, InVec0,
10720                                    InVec1))
10721       X86Opcode = X86ISD::FHSUB;
10722     else
10723       return SDValue();
10724 
10725     // Don't try to expand this build_vector into a pair of horizontal add/sub
10726     // if we can simply emit a pair of scalar add/sub.
10727     if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
10728       return SDValue();
10729 
10730     // Convert this build_vector into two horizontal add/sub followed by
10731     // a concat vector.
10732     bool isUndefLO = NumUndefsLO == Half;
10733     bool isUndefHI = NumUndefsHI == Half;
10734     return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, true,
10735                                  isUndefLO, isUndefHI);
10736   }
10737 
10738   return SDValue();
10739 }
10740 
10741 static SDValue LowerShift(SDValue Op, const X86Subtarget &Subtarget,
10742                           SelectionDAG &DAG);
10743 
10744 /// If a BUILD_VECTOR's source elements all apply the same bit operation and
10745 /// one of their operands is constant, lower to a pair of BUILD_VECTOR and
10746 /// just apply the bit to the vectors.
10747 /// NOTE: Its not in our interest to start make a general purpose vectorizer
10748 /// from this, but enough scalar bit operations are created from the later
10749 /// legalization + scalarization stages to need basic support.
lowerBuildVectorToBitOp(BuildVectorSDNode * Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)10750 static SDValue lowerBuildVectorToBitOp(BuildVectorSDNode *Op,
10751                                        const X86Subtarget &Subtarget,
10752                                        SelectionDAG &DAG) {
10753   SDLoc DL(Op);
10754   MVT VT = Op->getSimpleValueType(0);
10755   unsigned NumElems = VT.getVectorNumElements();
10756   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
10757 
10758   // Check that all elements have the same opcode.
10759   // TODO: Should we allow UNDEFS and if so how many?
10760   unsigned Opcode = Op->getOperand(0).getOpcode();
10761   for (unsigned i = 1; i < NumElems; ++i)
10762     if (Opcode != Op->getOperand(i).getOpcode())
10763       return SDValue();
10764 
10765   // TODO: We may be able to add support for other Ops (ADD/SUB + shifts).
10766   bool IsShift = false;
10767   switch (Opcode) {
10768   default:
10769     return SDValue();
10770   case ISD::SHL:
10771   case ISD::SRL:
10772   case ISD::SRA:
10773     IsShift = true;
10774     break;
10775   case ISD::AND:
10776   case ISD::XOR:
10777   case ISD::OR:
10778     // Don't do this if the buildvector is a splat - we'd replace one
10779     // constant with an entire vector.
10780     if (Op->getSplatValue())
10781       return SDValue();
10782     if (!TLI.isOperationLegalOrPromote(Opcode, VT))
10783       return SDValue();
10784     break;
10785   }
10786 
10787   SmallVector<SDValue, 4> LHSElts, RHSElts;
10788   for (SDValue Elt : Op->ops()) {
10789     SDValue LHS = Elt.getOperand(0);
10790     SDValue RHS = Elt.getOperand(1);
10791 
10792     // We expect the canonicalized RHS operand to be the constant.
10793     if (!isa<ConstantSDNode>(RHS))
10794       return SDValue();
10795 
10796     // Extend shift amounts.
10797     if (RHS.getValueSizeInBits() != VT.getScalarSizeInBits()) {
10798       if (!IsShift)
10799         return SDValue();
10800       RHS = DAG.getZExtOrTrunc(RHS, DL, VT.getScalarType());
10801     }
10802 
10803     LHSElts.push_back(LHS);
10804     RHSElts.push_back(RHS);
10805   }
10806 
10807   // Limit to shifts by uniform immediates.
10808   // TODO: Only accept vXi8/vXi64 special cases?
10809   // TODO: Permit non-uniform XOP/AVX2/MULLO cases?
10810   if (IsShift && any_of(RHSElts, [&](SDValue V) { return RHSElts[0] != V; }))
10811     return SDValue();
10812 
10813   SDValue LHS = DAG.getBuildVector(VT, DL, LHSElts);
10814   SDValue RHS = DAG.getBuildVector(VT, DL, RHSElts);
10815   SDValue Res = DAG.getNode(Opcode, DL, VT, LHS, RHS);
10816 
10817   if (!IsShift)
10818     return Res;
10819 
10820   // Immediately lower the shift to ensure the constant build vector doesn't
10821   // get converted to a constant pool before the shift is lowered.
10822   return LowerShift(Res, Subtarget, DAG);
10823 }
10824 
10825 /// Create a vector constant without a load. SSE/AVX provide the bare minimum
10826 /// functionality to do this, so it's all zeros, all ones, or some derivation
10827 /// that is cheap to calculate.
materializeVectorConstant(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)10828 static SDValue materializeVectorConstant(SDValue Op, SelectionDAG &DAG,
10829                                          const X86Subtarget &Subtarget) {
10830   SDLoc DL(Op);
10831   MVT VT = Op.getSimpleValueType();
10832 
10833   // Vectors containing all zeros can be matched by pxor and xorps.
10834   if (ISD::isBuildVectorAllZeros(Op.getNode()))
10835     return Op;
10836 
10837   // Vectors containing all ones can be matched by pcmpeqd on 128-bit width
10838   // vectors or broken into v4i32 operations on 256-bit vectors. AVX2 can use
10839   // vpcmpeqd on 256-bit vectors.
10840   if (Subtarget.hasSSE2() && ISD::isBuildVectorAllOnes(Op.getNode())) {
10841     if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32)
10842       return Op;
10843 
10844     return getOnesVector(VT, DAG, DL);
10845   }
10846 
10847   return SDValue();
10848 }
10849 
10850 /// Look for opportunities to create a VPERMV/VPERMILPV/PSHUFB variable permute
10851 /// from a vector of source values and a vector of extraction indices.
10852 /// The vectors might be manipulated to match the type of the permute op.
createVariablePermute(MVT VT,SDValue SrcVec,SDValue IndicesVec,SDLoc & DL,SelectionDAG & DAG,const X86Subtarget & Subtarget)10853 static SDValue createVariablePermute(MVT VT, SDValue SrcVec, SDValue IndicesVec,
10854                                      SDLoc &DL, SelectionDAG &DAG,
10855                                      const X86Subtarget &Subtarget) {
10856   MVT ShuffleVT = VT;
10857   EVT IndicesVT = EVT(VT).changeVectorElementTypeToInteger();
10858   unsigned NumElts = VT.getVectorNumElements();
10859   unsigned SizeInBits = VT.getSizeInBits();
10860 
10861   // Adjust IndicesVec to match VT size.
10862   assert(IndicesVec.getValueType().getVectorNumElements() >= NumElts &&
10863          "Illegal variable permute mask size");
10864   if (IndicesVec.getValueType().getVectorNumElements() > NumElts) {
10865     // Narrow/widen the indices vector to the correct size.
10866     if (IndicesVec.getValueSizeInBits() > SizeInBits)
10867       IndicesVec = extractSubVector(IndicesVec, 0, DAG, SDLoc(IndicesVec),
10868                                     NumElts * VT.getScalarSizeInBits());
10869     else if (IndicesVec.getValueSizeInBits() < SizeInBits)
10870       IndicesVec = widenSubVector(IndicesVec, false, Subtarget, DAG,
10871                                   SDLoc(IndicesVec), SizeInBits);
10872     // Zero-extend the index elements within the vector.
10873     if (IndicesVec.getValueType().getVectorNumElements() > NumElts)
10874       IndicesVec = DAG.getNode(ISD::ZERO_EXTEND_VECTOR_INREG, SDLoc(IndicesVec),
10875                                IndicesVT, IndicesVec);
10876   }
10877   IndicesVec = DAG.getZExtOrTrunc(IndicesVec, SDLoc(IndicesVec), IndicesVT);
10878 
10879   // Handle SrcVec that don't match VT type.
10880   if (SrcVec.getValueSizeInBits() != SizeInBits) {
10881     if ((SrcVec.getValueSizeInBits() % SizeInBits) == 0) {
10882       // Handle larger SrcVec by treating it as a larger permute.
10883       unsigned Scale = SrcVec.getValueSizeInBits() / SizeInBits;
10884       VT = MVT::getVectorVT(VT.getScalarType(), Scale * NumElts);
10885       IndicesVT = EVT(VT).changeVectorElementTypeToInteger();
10886       IndicesVec = widenSubVector(IndicesVT.getSimpleVT(), IndicesVec, false,
10887                                   Subtarget, DAG, SDLoc(IndicesVec));
10888       SDValue NewSrcVec =
10889           createVariablePermute(VT, SrcVec, IndicesVec, DL, DAG, Subtarget);
10890       if (NewSrcVec)
10891         return extractSubVector(NewSrcVec, 0, DAG, DL, SizeInBits);
10892       return SDValue();
10893     } else if (SrcVec.getValueSizeInBits() < SizeInBits) {
10894       // Widen smaller SrcVec to match VT.
10895       SrcVec = widenSubVector(VT, SrcVec, false, Subtarget, DAG, SDLoc(SrcVec));
10896     } else
10897       return SDValue();
10898   }
10899 
10900   auto ScaleIndices = [&DAG](SDValue Idx, uint64_t Scale) {
10901     assert(isPowerOf2_64(Scale) && "Illegal variable permute shuffle scale");
10902     EVT SrcVT = Idx.getValueType();
10903     unsigned NumDstBits = SrcVT.getScalarSizeInBits() / Scale;
10904     uint64_t IndexScale = 0;
10905     uint64_t IndexOffset = 0;
10906 
10907     // If we're scaling a smaller permute op, then we need to repeat the
10908     // indices, scaling and offsetting them as well.
10909     // e.g. v4i32 -> v16i8 (Scale = 4)
10910     // IndexScale = v4i32 Splat(4 << 24 | 4 << 16 | 4 << 8 | 4)
10911     // IndexOffset = v4i32 Splat(3 << 24 | 2 << 16 | 1 << 8 | 0)
10912     for (uint64_t i = 0; i != Scale; ++i) {
10913       IndexScale |= Scale << (i * NumDstBits);
10914       IndexOffset |= i << (i * NumDstBits);
10915     }
10916 
10917     Idx = DAG.getNode(ISD::MUL, SDLoc(Idx), SrcVT, Idx,
10918                       DAG.getConstant(IndexScale, SDLoc(Idx), SrcVT));
10919     Idx = DAG.getNode(ISD::ADD, SDLoc(Idx), SrcVT, Idx,
10920                       DAG.getConstant(IndexOffset, SDLoc(Idx), SrcVT));
10921     return Idx;
10922   };
10923 
10924   unsigned Opcode = 0;
10925   switch (VT.SimpleTy) {
10926   default:
10927     break;
10928   case MVT::v16i8:
10929     if (Subtarget.hasSSSE3())
10930       Opcode = X86ISD::PSHUFB;
10931     break;
10932   case MVT::v8i16:
10933     if (Subtarget.hasVLX() && Subtarget.hasBWI())
10934       Opcode = X86ISD::VPERMV;
10935     else if (Subtarget.hasSSSE3()) {
10936       Opcode = X86ISD::PSHUFB;
10937       ShuffleVT = MVT::v16i8;
10938     }
10939     break;
10940   case MVT::v4f32:
10941   case MVT::v4i32:
10942     if (Subtarget.hasAVX()) {
10943       Opcode = X86ISD::VPERMILPV;
10944       ShuffleVT = MVT::v4f32;
10945     } else if (Subtarget.hasSSSE3()) {
10946       Opcode = X86ISD::PSHUFB;
10947       ShuffleVT = MVT::v16i8;
10948     }
10949     break;
10950   case MVT::v2f64:
10951   case MVT::v2i64:
10952     if (Subtarget.hasAVX()) {
10953       // VPERMILPD selects using bit#1 of the index vector, so scale IndicesVec.
10954       IndicesVec = DAG.getNode(ISD::ADD, DL, IndicesVT, IndicesVec, IndicesVec);
10955       Opcode = X86ISD::VPERMILPV;
10956       ShuffleVT = MVT::v2f64;
10957     } else if (Subtarget.hasSSE41()) {
10958       // SSE41 can compare v2i64 - select between indices 0 and 1.
10959       return DAG.getSelectCC(
10960           DL, IndicesVec,
10961           getZeroVector(IndicesVT.getSimpleVT(), Subtarget, DAG, DL),
10962           DAG.getVectorShuffle(VT, DL, SrcVec, SrcVec, {0, 0}),
10963           DAG.getVectorShuffle(VT, DL, SrcVec, SrcVec, {1, 1}),
10964           ISD::CondCode::SETEQ);
10965     }
10966     break;
10967   case MVT::v32i8:
10968     if (Subtarget.hasVLX() && Subtarget.hasVBMI())
10969       Opcode = X86ISD::VPERMV;
10970     else if (Subtarget.hasXOP()) {
10971       SDValue LoSrc = extract128BitVector(SrcVec, 0, DAG, DL);
10972       SDValue HiSrc = extract128BitVector(SrcVec, 16, DAG, DL);
10973       SDValue LoIdx = extract128BitVector(IndicesVec, 0, DAG, DL);
10974       SDValue HiIdx = extract128BitVector(IndicesVec, 16, DAG, DL);
10975       return DAG.getNode(
10976           ISD::CONCAT_VECTORS, DL, VT,
10977           DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, LoSrc, HiSrc, LoIdx),
10978           DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, LoSrc, HiSrc, HiIdx));
10979     } else if (Subtarget.hasAVX()) {
10980       SDValue Lo = extract128BitVector(SrcVec, 0, DAG, DL);
10981       SDValue Hi = extract128BitVector(SrcVec, 16, DAG, DL);
10982       SDValue LoLo = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Lo);
10983       SDValue HiHi = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Hi, Hi);
10984       auto PSHUFBBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
10985                               ArrayRef<SDValue> Ops) {
10986         // Permute Lo and Hi and then select based on index range.
10987         // This works as SHUFB uses bits[3:0] to permute elements and we don't
10988         // care about the bit[7] as its just an index vector.
10989         SDValue Idx = Ops[2];
10990         EVT VT = Idx.getValueType();
10991         return DAG.getSelectCC(DL, Idx, DAG.getConstant(15, DL, VT),
10992                                DAG.getNode(X86ISD::PSHUFB, DL, VT, Ops[1], Idx),
10993                                DAG.getNode(X86ISD::PSHUFB, DL, VT, Ops[0], Idx),
10994                                ISD::CondCode::SETGT);
10995       };
10996       SDValue Ops[] = {LoLo, HiHi, IndicesVec};
10997       return SplitOpsAndApply(DAG, Subtarget, DL, MVT::v32i8, Ops,
10998                               PSHUFBBuilder);
10999     }
11000     break;
11001   case MVT::v16i16:
11002     if (Subtarget.hasVLX() && Subtarget.hasBWI())
11003       Opcode = X86ISD::VPERMV;
11004     else if (Subtarget.hasAVX()) {
11005       // Scale to v32i8 and perform as v32i8.
11006       IndicesVec = ScaleIndices(IndicesVec, 2);
11007       return DAG.getBitcast(
11008           VT, createVariablePermute(
11009                   MVT::v32i8, DAG.getBitcast(MVT::v32i8, SrcVec),
11010                   DAG.getBitcast(MVT::v32i8, IndicesVec), DL, DAG, Subtarget));
11011     }
11012     break;
11013   case MVT::v8f32:
11014   case MVT::v8i32:
11015     if (Subtarget.hasAVX2())
11016       Opcode = X86ISD::VPERMV;
11017     else if (Subtarget.hasAVX()) {
11018       SrcVec = DAG.getBitcast(MVT::v8f32, SrcVec);
11019       SDValue LoLo = DAG.getVectorShuffle(MVT::v8f32, DL, SrcVec, SrcVec,
11020                                           {0, 1, 2, 3, 0, 1, 2, 3});
11021       SDValue HiHi = DAG.getVectorShuffle(MVT::v8f32, DL, SrcVec, SrcVec,
11022                                           {4, 5, 6, 7, 4, 5, 6, 7});
11023       if (Subtarget.hasXOP())
11024         return DAG.getBitcast(
11025             VT, DAG.getNode(X86ISD::VPERMIL2, DL, MVT::v8f32, LoLo, HiHi,
11026                             IndicesVec, DAG.getTargetConstant(0, DL, MVT::i8)));
11027       // Permute Lo and Hi and then select based on index range.
11028       // This works as VPERMILPS only uses index bits[0:1] to permute elements.
11029       SDValue Res = DAG.getSelectCC(
11030           DL, IndicesVec, DAG.getConstant(3, DL, MVT::v8i32),
11031           DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, HiHi, IndicesVec),
11032           DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, LoLo, IndicesVec),
11033           ISD::CondCode::SETGT);
11034       return DAG.getBitcast(VT, Res);
11035     }
11036     break;
11037   case MVT::v4i64:
11038   case MVT::v4f64:
11039     if (Subtarget.hasAVX512()) {
11040       if (!Subtarget.hasVLX()) {
11041         MVT WidenSrcVT = MVT::getVectorVT(VT.getScalarType(), 8);
11042         SrcVec = widenSubVector(WidenSrcVT, SrcVec, false, Subtarget, DAG,
11043                                 SDLoc(SrcVec));
11044         IndicesVec = widenSubVector(MVT::v8i64, IndicesVec, false, Subtarget,
11045                                     DAG, SDLoc(IndicesVec));
11046         SDValue Res = createVariablePermute(WidenSrcVT, SrcVec, IndicesVec, DL,
11047                                             DAG, Subtarget);
11048         return extract256BitVector(Res, 0, DAG, DL);
11049       }
11050       Opcode = X86ISD::VPERMV;
11051     } else if (Subtarget.hasAVX()) {
11052       SrcVec = DAG.getBitcast(MVT::v4f64, SrcVec);
11053       SDValue LoLo =
11054           DAG.getVectorShuffle(MVT::v4f64, DL, SrcVec, SrcVec, {0, 1, 0, 1});
11055       SDValue HiHi =
11056           DAG.getVectorShuffle(MVT::v4f64, DL, SrcVec, SrcVec, {2, 3, 2, 3});
11057       // VPERMIL2PD selects with bit#1 of the index vector, so scale IndicesVec.
11058       IndicesVec = DAG.getNode(ISD::ADD, DL, IndicesVT, IndicesVec, IndicesVec);
11059       if (Subtarget.hasXOP())
11060         return DAG.getBitcast(
11061             VT, DAG.getNode(X86ISD::VPERMIL2, DL, MVT::v4f64, LoLo, HiHi,
11062                             IndicesVec, DAG.getTargetConstant(0, DL, MVT::i8)));
11063       // Permute Lo and Hi and then select based on index range.
11064       // This works as VPERMILPD only uses index bit[1] to permute elements.
11065       SDValue Res = DAG.getSelectCC(
11066           DL, IndicesVec, DAG.getConstant(2, DL, MVT::v4i64),
11067           DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v4f64, HiHi, IndicesVec),
11068           DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v4f64, LoLo, IndicesVec),
11069           ISD::CondCode::SETGT);
11070       return DAG.getBitcast(VT, Res);
11071     }
11072     break;
11073   case MVT::v64i8:
11074     if (Subtarget.hasVBMI())
11075       Opcode = X86ISD::VPERMV;
11076     break;
11077   case MVT::v32i16:
11078     if (Subtarget.hasBWI())
11079       Opcode = X86ISD::VPERMV;
11080     break;
11081   case MVT::v16f32:
11082   case MVT::v16i32:
11083   case MVT::v8f64:
11084   case MVT::v8i64:
11085     if (Subtarget.hasAVX512())
11086       Opcode = X86ISD::VPERMV;
11087     break;
11088   }
11089   if (!Opcode)
11090     return SDValue();
11091 
11092   assert((VT.getSizeInBits() == ShuffleVT.getSizeInBits()) &&
11093          (VT.getScalarSizeInBits() % ShuffleVT.getScalarSizeInBits()) == 0 &&
11094          "Illegal variable permute shuffle type");
11095 
11096   uint64_t Scale = VT.getScalarSizeInBits() / ShuffleVT.getScalarSizeInBits();
11097   if (Scale > 1)
11098     IndicesVec = ScaleIndices(IndicesVec, Scale);
11099 
11100   EVT ShuffleIdxVT = EVT(ShuffleVT).changeVectorElementTypeToInteger();
11101   IndicesVec = DAG.getBitcast(ShuffleIdxVT, IndicesVec);
11102 
11103   SrcVec = DAG.getBitcast(ShuffleVT, SrcVec);
11104   SDValue Res = Opcode == X86ISD::VPERMV
11105                     ? DAG.getNode(Opcode, DL, ShuffleVT, IndicesVec, SrcVec)
11106                     : DAG.getNode(Opcode, DL, ShuffleVT, SrcVec, IndicesVec);
11107   return DAG.getBitcast(VT, Res);
11108 }
11109 
11110 // Tries to lower a BUILD_VECTOR composed of extract-extract chains that can be
11111 // reasoned to be a permutation of a vector by indices in a non-constant vector.
11112 // (build_vector (extract_elt V, (extract_elt I, 0)),
11113 //               (extract_elt V, (extract_elt I, 1)),
11114 //                    ...
11115 // ->
11116 // (vpermv I, V)
11117 //
11118 // TODO: Handle undefs
11119 // TODO: Utilize pshufb and zero mask blending to support more efficient
11120 // construction of vectors with constant-0 elements.
11121 static SDValue
LowerBUILD_VECTORAsVariablePermute(SDValue V,SelectionDAG & DAG,const X86Subtarget & Subtarget)11122 LowerBUILD_VECTORAsVariablePermute(SDValue V, SelectionDAG &DAG,
11123                                    const X86Subtarget &Subtarget) {
11124   SDValue SrcVec, IndicesVec;
11125   // Check for a match of the permute source vector and permute index elements.
11126   // This is done by checking that the i-th build_vector operand is of the form:
11127   // (extract_elt SrcVec, (extract_elt IndicesVec, i)).
11128   for (unsigned Idx = 0, E = V.getNumOperands(); Idx != E; ++Idx) {
11129     SDValue Op = V.getOperand(Idx);
11130     if (Op.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
11131       return SDValue();
11132 
11133     // If this is the first extract encountered in V, set the source vector,
11134     // otherwise verify the extract is from the previously defined source
11135     // vector.
11136     if (!SrcVec)
11137       SrcVec = Op.getOperand(0);
11138     else if (SrcVec != Op.getOperand(0))
11139       return SDValue();
11140     SDValue ExtractedIndex = Op->getOperand(1);
11141     // Peek through extends.
11142     if (ExtractedIndex.getOpcode() == ISD::ZERO_EXTEND ||
11143         ExtractedIndex.getOpcode() == ISD::SIGN_EXTEND)
11144       ExtractedIndex = ExtractedIndex.getOperand(0);
11145     if (ExtractedIndex.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
11146       return SDValue();
11147 
11148     // If this is the first extract from the index vector candidate, set the
11149     // indices vector, otherwise verify the extract is from the previously
11150     // defined indices vector.
11151     if (!IndicesVec)
11152       IndicesVec = ExtractedIndex.getOperand(0);
11153     else if (IndicesVec != ExtractedIndex.getOperand(0))
11154       return SDValue();
11155 
11156     auto *PermIdx = dyn_cast<ConstantSDNode>(ExtractedIndex.getOperand(1));
11157     if (!PermIdx || PermIdx->getAPIntValue() != Idx)
11158       return SDValue();
11159   }
11160 
11161   SDLoc DL(V);
11162   MVT VT = V.getSimpleValueType();
11163   return createVariablePermute(VT, SrcVec, IndicesVec, DL, DAG, Subtarget);
11164 }
11165 
11166 SDValue
LowerBUILD_VECTOR(SDValue Op,SelectionDAG & DAG) const11167 X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
11168   SDLoc dl(Op);
11169 
11170   MVT VT = Op.getSimpleValueType();
11171   MVT EltVT = VT.getVectorElementType();
11172   MVT OpEltVT = Op.getOperand(0).getSimpleValueType();
11173   unsigned NumElems = Op.getNumOperands();
11174 
11175   // Generate vectors for predicate vectors.
11176   if (VT.getVectorElementType() == MVT::i1 && Subtarget.hasAVX512())
11177     return LowerBUILD_VECTORvXi1(Op, DAG, Subtarget);
11178 
11179   if (VT.getVectorElementType() == MVT::bf16 && Subtarget.hasBF16())
11180     return LowerBUILD_VECTORvXbf16(Op, DAG, Subtarget);
11181 
11182   if (SDValue VectorConstant = materializeVectorConstant(Op, DAG, Subtarget))
11183     return VectorConstant;
11184 
11185   unsigned EVTBits = EltVT.getSizeInBits();
11186   APInt UndefMask = APInt::getZero(NumElems);
11187   APInt FrozenUndefMask = APInt::getZero(NumElems);
11188   APInt ZeroMask = APInt::getZero(NumElems);
11189   APInt NonZeroMask = APInt::getZero(NumElems);
11190   bool IsAllConstants = true;
11191   SmallSet<SDValue, 8> Values;
11192   unsigned NumConstants = NumElems;
11193   for (unsigned i = 0; i < NumElems; ++i) {
11194     SDValue Elt = Op.getOperand(i);
11195     if (Elt.isUndef()) {
11196       UndefMask.setBit(i);
11197       continue;
11198     }
11199     if (Elt.getOpcode() == ISD::FREEZE && Elt.getOperand(0).isUndef()) {
11200       FrozenUndefMask.setBit(i);
11201       continue;
11202     }
11203     Values.insert(Elt);
11204     if (!isa<ConstantSDNode>(Elt) && !isa<ConstantFPSDNode>(Elt)) {
11205       IsAllConstants = false;
11206       NumConstants--;
11207     }
11208     if (X86::isZeroNode(Elt)) {
11209       ZeroMask.setBit(i);
11210     } else {
11211       NonZeroMask.setBit(i);
11212     }
11213   }
11214 
11215   // All undef vector. Return an UNDEF.
11216   if (UndefMask.isAllOnes())
11217     return DAG.getUNDEF(VT);
11218 
11219   // If we have multiple FREEZE-UNDEF operands, we are likely going to end up
11220   // lowering into a suboptimal insertion sequence. Instead, thaw the UNDEF in
11221   // our source BUILD_VECTOR, create another FREEZE-UNDEF splat BUILD_VECTOR,
11222   // and blend the FREEZE-UNDEF operands back in.
11223   // FIXME: is this worthwhile even for a single FREEZE-UNDEF operand?
11224   if (unsigned NumFrozenUndefElts = FrozenUndefMask.countPopulation();
11225       NumFrozenUndefElts >= 2 && NumFrozenUndefElts < NumElems) {
11226     SmallVector<int, 16> BlendMask(NumElems, -1);
11227     SmallVector<SDValue, 16> Elts(NumElems, DAG.getUNDEF(OpEltVT));
11228     for (unsigned i = 0; i < NumElems; ++i) {
11229       if (UndefMask[i]) {
11230         BlendMask[i] = -1;
11231         continue;
11232       }
11233       BlendMask[i] = i;
11234       if (!FrozenUndefMask[i])
11235         Elts[i] = Op.getOperand(i);
11236       else
11237         BlendMask[i] += NumElems;
11238     }
11239     SDValue EltsBV = DAG.getBuildVector(VT, dl, Elts);
11240     SDValue FrozenUndefElt = DAG.getFreeze(DAG.getUNDEF(OpEltVT));
11241     SDValue FrozenUndefBV = DAG.getSplatBuildVector(VT, dl, FrozenUndefElt);
11242     return DAG.getVectorShuffle(VT, dl, EltsBV, FrozenUndefBV, BlendMask);
11243   }
11244 
11245   BuildVectorSDNode *BV = cast<BuildVectorSDNode>(Op.getNode());
11246 
11247   // If the upper elts of a ymm/zmm are undef/zero then we might be better off
11248   // lowering to a smaller build vector and padding with undef/zero.
11249   if ((VT.is256BitVector() || VT.is512BitVector()) &&
11250       !isFoldableUseOfShuffle(BV)) {
11251     unsigned UpperElems = NumElems / 2;
11252     APInt UndefOrZeroMask = UndefMask | ZeroMask;
11253     unsigned NumUpperUndefsOrZeros = UndefOrZeroMask.countLeadingOnes();
11254     if (NumUpperUndefsOrZeros >= UpperElems) {
11255       if (VT.is512BitVector() &&
11256           NumUpperUndefsOrZeros >= (NumElems - (NumElems / 4)))
11257         UpperElems = NumElems - (NumElems / 4);
11258       bool UndefUpper = UndefMask.countLeadingOnes() >= UpperElems;
11259       MVT LowerVT = MVT::getVectorVT(EltVT, NumElems - UpperElems);
11260       SDValue NewBV =
11261           DAG.getBuildVector(LowerVT, dl, Op->ops().drop_back(UpperElems));
11262       return widenSubVector(VT, NewBV, !UndefUpper, Subtarget, DAG, dl);
11263     }
11264   }
11265 
11266   if (SDValue AddSub = lowerToAddSubOrFMAddSub(BV, Subtarget, DAG))
11267     return AddSub;
11268   if (SDValue HorizontalOp = LowerToHorizontalOp(BV, Subtarget, DAG))
11269     return HorizontalOp;
11270   if (SDValue Broadcast = lowerBuildVectorAsBroadcast(BV, Subtarget, DAG))
11271     return Broadcast;
11272   if (SDValue BitOp = lowerBuildVectorToBitOp(BV, Subtarget, DAG))
11273     return BitOp;
11274 
11275   unsigned NumZero = ZeroMask.countPopulation();
11276   unsigned NumNonZero = NonZeroMask.countPopulation();
11277 
11278   // If we are inserting one variable into a vector of non-zero constants, try
11279   // to avoid loading each constant element as a scalar. Load the constants as a
11280   // vector and then insert the variable scalar element. If insertion is not
11281   // supported, fall back to a shuffle to get the scalar blended with the
11282   // constants. Insertion into a zero vector is handled as a special-case
11283   // somewhere below here.
11284   if (NumConstants == NumElems - 1 && NumNonZero != 1 &&
11285       (isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT) ||
11286        isOperationLegalOrCustom(ISD::VECTOR_SHUFFLE, VT))) {
11287     // Create an all-constant vector. The variable element in the old
11288     // build vector is replaced by undef in the constant vector. Save the
11289     // variable scalar element and its index for use in the insertelement.
11290     LLVMContext &Context = *DAG.getContext();
11291     Type *EltType = Op.getValueType().getScalarType().getTypeForEVT(Context);
11292     SmallVector<Constant *, 16> ConstVecOps(NumElems, UndefValue::get(EltType));
11293     SDValue VarElt;
11294     SDValue InsIndex;
11295     for (unsigned i = 0; i != NumElems; ++i) {
11296       SDValue Elt = Op.getOperand(i);
11297       if (auto *C = dyn_cast<ConstantSDNode>(Elt))
11298         ConstVecOps[i] = ConstantInt::get(Context, C->getAPIntValue());
11299       else if (auto *C = dyn_cast<ConstantFPSDNode>(Elt))
11300         ConstVecOps[i] = ConstantFP::get(Context, C->getValueAPF());
11301       else if (!Elt.isUndef()) {
11302         assert(!VarElt.getNode() && !InsIndex.getNode() &&
11303                "Expected one variable element in this vector");
11304         VarElt = Elt;
11305         InsIndex = DAG.getVectorIdxConstant(i, dl);
11306       }
11307     }
11308     Constant *CV = ConstantVector::get(ConstVecOps);
11309     SDValue DAGConstVec = DAG.getConstantPool(CV, VT);
11310 
11311     // The constants we just created may not be legal (eg, floating point). We
11312     // must lower the vector right here because we can not guarantee that we'll
11313     // legalize it before loading it. This is also why we could not just create
11314     // a new build vector here. If the build vector contains illegal constants,
11315     // it could get split back up into a series of insert elements.
11316     // TODO: Improve this by using shorter loads with broadcast/VZEXT_LOAD.
11317     SDValue LegalDAGConstVec = LowerConstantPool(DAGConstVec, DAG);
11318     MachineFunction &MF = DAG.getMachineFunction();
11319     MachinePointerInfo MPI = MachinePointerInfo::getConstantPool(MF);
11320     SDValue Ld = DAG.getLoad(VT, dl, DAG.getEntryNode(), LegalDAGConstVec, MPI);
11321     unsigned InsertC = cast<ConstantSDNode>(InsIndex)->getZExtValue();
11322     unsigned NumEltsInLow128Bits = 128 / VT.getScalarSizeInBits();
11323     if (InsertC < NumEltsInLow128Bits)
11324       return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Ld, VarElt, InsIndex);
11325 
11326     // There's no good way to insert into the high elements of a >128-bit
11327     // vector, so use shuffles to avoid an extract/insert sequence.
11328     assert(VT.getSizeInBits() > 128 && "Invalid insertion index?");
11329     assert(Subtarget.hasAVX() && "Must have AVX with >16-byte vector");
11330     SmallVector<int, 8> ShuffleMask;
11331     unsigned NumElts = VT.getVectorNumElements();
11332     for (unsigned i = 0; i != NumElts; ++i)
11333       ShuffleMask.push_back(i == InsertC ? NumElts : i);
11334     SDValue S2V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, VarElt);
11335     return DAG.getVectorShuffle(VT, dl, Ld, S2V, ShuffleMask);
11336   }
11337 
11338   // Special case for single non-zero, non-undef, element.
11339   if (NumNonZero == 1) {
11340     unsigned Idx = NonZeroMask.countTrailingZeros();
11341     SDValue Item = Op.getOperand(Idx);
11342 
11343     // If we have a constant or non-constant insertion into the low element of
11344     // a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into
11345     // the rest of the elements.  This will be matched as movd/movq/movss/movsd
11346     // depending on what the source datatype is.
11347     if (Idx == 0) {
11348       if (NumZero == 0)
11349         return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
11350 
11351       if (EltVT == MVT::i32 || EltVT == MVT::f16 || EltVT == MVT::f32 ||
11352           EltVT == MVT::f64 || (EltVT == MVT::i64 && Subtarget.is64Bit()) ||
11353           (EltVT == MVT::i16 && Subtarget.hasFP16())) {
11354         assert((VT.is128BitVector() || VT.is256BitVector() ||
11355                 VT.is512BitVector()) &&
11356                "Expected an SSE value type!");
11357         Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
11358         // Turn it into a MOVL (i.e. movsh, movss, movsd, movw or movd) to a
11359         // zero vector.
11360         return getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
11361       }
11362 
11363       // We can't directly insert an i8 or i16 into a vector, so zero extend
11364       // it to i32 first.
11365       if (EltVT == MVT::i16 || EltVT == MVT::i8) {
11366         Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item);
11367         MVT ShufVT = MVT::getVectorVT(MVT::i32, VT.getSizeInBits() / 32);
11368         Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, ShufVT, Item);
11369         Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
11370         return DAG.getBitcast(VT, Item);
11371       }
11372     }
11373 
11374     // Is it a vector logical left shift?
11375     if (NumElems == 2 && Idx == 1 &&
11376         X86::isZeroNode(Op.getOperand(0)) &&
11377         !X86::isZeroNode(Op.getOperand(1))) {
11378       unsigned NumBits = VT.getSizeInBits();
11379       return getVShift(true, VT,
11380                        DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
11381                                    VT, Op.getOperand(1)),
11382                        NumBits/2, DAG, *this, dl);
11383     }
11384 
11385     if (IsAllConstants) // Otherwise, it's better to do a constpool load.
11386       return SDValue();
11387 
11388     // Otherwise, if this is a vector with i32 or f32 elements, and the element
11389     // is a non-constant being inserted into an element other than the low one,
11390     // we can't use a constant pool load.  Instead, use SCALAR_TO_VECTOR (aka
11391     // movd/movss) to move this into the low element, then shuffle it into
11392     // place.
11393     if (EVTBits == 32) {
11394       Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
11395       return getShuffleVectorZeroOrUndef(Item, Idx, NumZero > 0, Subtarget, DAG);
11396     }
11397   }
11398 
11399   // Splat is obviously ok. Let legalizer expand it to a shuffle.
11400   if (Values.size() == 1) {
11401     if (EVTBits == 32) {
11402       // Instead of a shuffle like this:
11403       // shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0>
11404       // Check if it's possible to issue this instead.
11405       // shuffle (vload ptr)), undef, <1, 1, 1, 1>
11406       unsigned Idx = NonZeroMask.countTrailingZeros();
11407       SDValue Item = Op.getOperand(Idx);
11408       if (Op.getNode()->isOnlyUserOf(Item.getNode()))
11409         return LowerAsSplatVectorLoad(Item, VT, dl, DAG);
11410     }
11411     return SDValue();
11412   }
11413 
11414   // A vector full of immediates; various special cases are already
11415   // handled, so this is best done with a single constant-pool load.
11416   if (IsAllConstants)
11417     return SDValue();
11418 
11419   if (SDValue V = LowerBUILD_VECTORAsVariablePermute(Op, DAG, Subtarget))
11420       return V;
11421 
11422   // See if we can use a vector load to get all of the elements.
11423   {
11424     SmallVector<SDValue, 64> Ops(Op->op_begin(), Op->op_begin() + NumElems);
11425     if (SDValue LD =
11426             EltsFromConsecutiveLoads(VT, Ops, dl, DAG, Subtarget, false))
11427       return LD;
11428   }
11429 
11430   // If this is a splat of pairs of 32-bit elements, we can use a narrower
11431   // build_vector and broadcast it.
11432   // TODO: We could probably generalize this more.
11433   if (Subtarget.hasAVX2() && EVTBits == 32 && Values.size() == 2) {
11434     SDValue Ops[4] = { Op.getOperand(0), Op.getOperand(1),
11435                        DAG.getUNDEF(EltVT), DAG.getUNDEF(EltVT) };
11436     auto CanSplat = [](SDValue Op, unsigned NumElems, ArrayRef<SDValue> Ops) {
11437       // Make sure all the even/odd operands match.
11438       for (unsigned i = 2; i != NumElems; ++i)
11439         if (Ops[i % 2] != Op.getOperand(i))
11440           return false;
11441       return true;
11442     };
11443     if (CanSplat(Op, NumElems, Ops)) {
11444       MVT WideEltVT = VT.isFloatingPoint() ? MVT::f64 : MVT::i64;
11445       MVT NarrowVT = MVT::getVectorVT(EltVT, 4);
11446       // Create a new build vector and cast to v2i64/v2f64.
11447       SDValue NewBV = DAG.getBitcast(MVT::getVectorVT(WideEltVT, 2),
11448                                      DAG.getBuildVector(NarrowVT, dl, Ops));
11449       // Broadcast from v2i64/v2f64 and cast to final VT.
11450       MVT BcastVT = MVT::getVectorVT(WideEltVT, NumElems / 2);
11451       return DAG.getBitcast(VT, DAG.getNode(X86ISD::VBROADCAST, dl, BcastVT,
11452                                             NewBV));
11453     }
11454   }
11455 
11456   // For AVX-length vectors, build the individual 128-bit pieces and use
11457   // shuffles to put them in place.
11458   if (VT.getSizeInBits() > 128) {
11459     MVT HVT = MVT::getVectorVT(EltVT, NumElems / 2);
11460 
11461     // Build both the lower and upper subvector.
11462     SDValue Lower =
11463         DAG.getBuildVector(HVT, dl, Op->ops().slice(0, NumElems / 2));
11464     SDValue Upper = DAG.getBuildVector(
11465         HVT, dl, Op->ops().slice(NumElems / 2, NumElems /2));
11466 
11467     // Recreate the wider vector with the lower and upper part.
11468     return concatSubVectors(Lower, Upper, DAG, dl);
11469   }
11470 
11471   // Let legalizer expand 2-wide build_vectors.
11472   if (EVTBits == 64) {
11473     if (NumNonZero == 1) {
11474       // One half is zero or undef.
11475       unsigned Idx = NonZeroMask.countTrailingZeros();
11476       SDValue V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT,
11477                                Op.getOperand(Idx));
11478       return getShuffleVectorZeroOrUndef(V2, Idx, true, Subtarget, DAG);
11479     }
11480     return SDValue();
11481   }
11482 
11483   // If element VT is < 32 bits, convert it to inserts into a zero vector.
11484   if (EVTBits == 8 && NumElems == 16)
11485     if (SDValue V = LowerBuildVectorv16i8(Op, NonZeroMask, NumNonZero, NumZero,
11486                                           DAG, Subtarget))
11487       return V;
11488 
11489   if (EltVT == MVT::i16 && NumElems == 8)
11490     if (SDValue V = LowerBuildVectorv8i16(Op, NonZeroMask, NumNonZero, NumZero,
11491                                           DAG, Subtarget))
11492       return V;
11493 
11494   // If element VT is == 32 bits and has 4 elems, try to generate an INSERTPS
11495   if (EVTBits == 32 && NumElems == 4)
11496     if (SDValue V = LowerBuildVectorv4x32(Op, DAG, Subtarget))
11497       return V;
11498 
11499   // If element VT is == 32 bits, turn it into a number of shuffles.
11500   if (NumElems == 4 && NumZero > 0) {
11501     SmallVector<SDValue, 8> Ops(NumElems);
11502     for (unsigned i = 0; i < 4; ++i) {
11503       bool isZero = !NonZeroMask[i];
11504       if (isZero)
11505         Ops[i] = getZeroVector(VT, Subtarget, DAG, dl);
11506       else
11507         Ops[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
11508     }
11509 
11510     for (unsigned i = 0; i < 2; ++i) {
11511       switch (NonZeroMask.extractBitsAsZExtValue(2, i * 2)) {
11512         default: llvm_unreachable("Unexpected NonZero count");
11513         case 0:
11514           Ops[i] = Ops[i*2];  // Must be a zero vector.
11515           break;
11516         case 1:
11517           Ops[i] = getMOVL(DAG, dl, VT, Ops[i*2+1], Ops[i*2]);
11518           break;
11519         case 2:
11520           Ops[i] = getMOVL(DAG, dl, VT, Ops[i*2], Ops[i*2+1]);
11521           break;
11522         case 3:
11523           Ops[i] = getUnpackl(DAG, dl, VT, Ops[i*2], Ops[i*2+1]);
11524           break;
11525       }
11526     }
11527 
11528     bool Reverse1 = NonZeroMask.extractBitsAsZExtValue(2, 0) == 2;
11529     bool Reverse2 = NonZeroMask.extractBitsAsZExtValue(2, 2) == 2;
11530     int MaskVec[] = {
11531       Reverse1 ? 1 : 0,
11532       Reverse1 ? 0 : 1,
11533       static_cast<int>(Reverse2 ? NumElems+1 : NumElems),
11534       static_cast<int>(Reverse2 ? NumElems   : NumElems+1)
11535     };
11536     return DAG.getVectorShuffle(VT, dl, Ops[0], Ops[1], MaskVec);
11537   }
11538 
11539   assert(Values.size() > 1 && "Expected non-undef and non-splat vector");
11540 
11541   // Check for a build vector from mostly shuffle plus few inserting.
11542   if (SDValue Sh = buildFromShuffleMostly(Op, DAG))
11543     return Sh;
11544 
11545   // For SSE 4.1, use insertps to put the high elements into the low element.
11546   if (Subtarget.hasSSE41() && EltVT != MVT::f16) {
11547     SDValue Result;
11548     if (!Op.getOperand(0).isUndef())
11549       Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0));
11550     else
11551       Result = DAG.getUNDEF(VT);
11552 
11553     for (unsigned i = 1; i < NumElems; ++i) {
11554       if (Op.getOperand(i).isUndef()) continue;
11555       Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result,
11556                            Op.getOperand(i), DAG.getIntPtrConstant(i, dl));
11557     }
11558     return Result;
11559   }
11560 
11561   // Otherwise, expand into a number of unpckl*, start by extending each of
11562   // our (non-undef) elements to the full vector width with the element in the
11563   // bottom slot of the vector (which generates no code for SSE).
11564   SmallVector<SDValue, 8> Ops(NumElems);
11565   for (unsigned i = 0; i < NumElems; ++i) {
11566     if (!Op.getOperand(i).isUndef())
11567       Ops[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
11568     else
11569       Ops[i] = DAG.getUNDEF(VT);
11570   }
11571 
11572   // Next, we iteratively mix elements, e.g. for v4f32:
11573   //   Step 1: unpcklps 0, 1 ==> X: <?, ?, 1, 0>
11574   //         : unpcklps 2, 3 ==> Y: <?, ?, 3, 2>
11575   //   Step 2: unpcklpd X, Y ==>    <3, 2, 1, 0>
11576   for (unsigned Scale = 1; Scale < NumElems; Scale *= 2) {
11577     // Generate scaled UNPCKL shuffle mask.
11578     SmallVector<int, 16> Mask;
11579     for(unsigned i = 0; i != Scale; ++i)
11580       Mask.push_back(i);
11581     for (unsigned i = 0; i != Scale; ++i)
11582       Mask.push_back(NumElems+i);
11583     Mask.append(NumElems - Mask.size(), SM_SentinelUndef);
11584 
11585     for (unsigned i = 0, e = NumElems / (2 * Scale); i != e; ++i)
11586       Ops[i] = DAG.getVectorShuffle(VT, dl, Ops[2*i], Ops[(2*i)+1], Mask);
11587   }
11588   return Ops[0];
11589 }
11590 
11591 // 256-bit AVX can use the vinsertf128 instruction
11592 // to create 256-bit vectors from two other 128-bit ones.
11593 // TODO: Detect subvector broadcast here instead of DAG combine?
LowerAVXCONCAT_VECTORS(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)11594 static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG,
11595                                       const X86Subtarget &Subtarget) {
11596   SDLoc dl(Op);
11597   MVT ResVT = Op.getSimpleValueType();
11598 
11599   assert((ResVT.is256BitVector() ||
11600           ResVT.is512BitVector()) && "Value type must be 256-/512-bit wide");
11601 
11602   unsigned NumOperands = Op.getNumOperands();
11603   unsigned NumFreezeUndef = 0;
11604   unsigned NumZero = 0;
11605   unsigned NumNonZero = 0;
11606   unsigned NonZeros = 0;
11607   for (unsigned i = 0; i != NumOperands; ++i) {
11608     SDValue SubVec = Op.getOperand(i);
11609     if (SubVec.isUndef())
11610       continue;
11611     if (ISD::isFreezeUndef(SubVec.getNode()) && SubVec.hasOneUse())
11612       ++NumFreezeUndef;
11613     else if (ISD::isBuildVectorAllZeros(SubVec.getNode()))
11614       ++NumZero;
11615     else {
11616       assert(i < sizeof(NonZeros) * CHAR_BIT); // Ensure the shift is in range.
11617       NonZeros |= 1 << i;
11618       ++NumNonZero;
11619     }
11620   }
11621 
11622   // If we have more than 2 non-zeros, build each half separately.
11623   if (NumNonZero > 2) {
11624     MVT HalfVT = ResVT.getHalfNumVectorElementsVT();
11625     ArrayRef<SDUse> Ops = Op->ops();
11626     SDValue Lo = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
11627                              Ops.slice(0, NumOperands/2));
11628     SDValue Hi = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
11629                              Ops.slice(NumOperands/2));
11630     return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Lo, Hi);
11631   }
11632 
11633   // Otherwise, build it up through insert_subvectors.
11634   SDValue Vec = NumZero ? getZeroVector(ResVT, Subtarget, DAG, dl)
11635                         : (NumFreezeUndef ? DAG.getFreeze(DAG.getUNDEF(ResVT))
11636                                           : DAG.getUNDEF(ResVT));
11637 
11638   MVT SubVT = Op.getOperand(0).getSimpleValueType();
11639   unsigned NumSubElems = SubVT.getVectorNumElements();
11640   for (unsigned i = 0; i != NumOperands; ++i) {
11641     if ((NonZeros & (1 << i)) == 0)
11642       continue;
11643 
11644     Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec,
11645                       Op.getOperand(i),
11646                       DAG.getIntPtrConstant(i * NumSubElems, dl));
11647   }
11648 
11649   return Vec;
11650 }
11651 
11652 // Returns true if the given node is a type promotion (by concatenating i1
11653 // zeros) of the result of a node that already zeros all upper bits of
11654 // k-register.
11655 // TODO: Merge this with LowerAVXCONCAT_VECTORS?
LowerCONCAT_VECTORSvXi1(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)11656 static SDValue LowerCONCAT_VECTORSvXi1(SDValue Op,
11657                                        const X86Subtarget &Subtarget,
11658                                        SelectionDAG & DAG) {
11659   SDLoc dl(Op);
11660   MVT ResVT = Op.getSimpleValueType();
11661   unsigned NumOperands = Op.getNumOperands();
11662 
11663   assert(NumOperands > 1 && isPowerOf2_32(NumOperands) &&
11664          "Unexpected number of operands in CONCAT_VECTORS");
11665 
11666   uint64_t Zeros = 0;
11667   uint64_t NonZeros = 0;
11668   for (unsigned i = 0; i != NumOperands; ++i) {
11669     SDValue SubVec = Op.getOperand(i);
11670     if (SubVec.isUndef())
11671       continue;
11672     assert(i < sizeof(NonZeros) * CHAR_BIT); // Ensure the shift is in range.
11673     if (ISD::isBuildVectorAllZeros(SubVec.getNode()))
11674       Zeros |= (uint64_t)1 << i;
11675     else
11676       NonZeros |= (uint64_t)1 << i;
11677   }
11678 
11679   unsigned NumElems = ResVT.getVectorNumElements();
11680 
11681   // If we are inserting non-zero vector and there are zeros in LSBs and undef
11682   // in the MSBs we need to emit a KSHIFTL. The generic lowering to
11683   // insert_subvector will give us two kshifts.
11684   if (isPowerOf2_64(NonZeros) && Zeros != 0 && NonZeros > Zeros &&
11685       Log2_64(NonZeros) != NumOperands - 1) {
11686     MVT ShiftVT = ResVT;
11687     if ((!Subtarget.hasDQI() && NumElems == 8) || NumElems < 8)
11688       ShiftVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
11689     unsigned Idx = Log2_64(NonZeros);
11690     SDValue SubVec = Op.getOperand(Idx);
11691     unsigned SubVecNumElts = SubVec.getSimpleValueType().getVectorNumElements();
11692     SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ShiftVT,
11693                          DAG.getUNDEF(ShiftVT), SubVec,
11694                          DAG.getIntPtrConstant(0, dl));
11695     Op = DAG.getNode(X86ISD::KSHIFTL, dl, ShiftVT, SubVec,
11696                      DAG.getTargetConstant(Idx * SubVecNumElts, dl, MVT::i8));
11697     return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResVT, Op,
11698                        DAG.getIntPtrConstant(0, dl));
11699   }
11700 
11701   // If there are zero or one non-zeros we can handle this very simply.
11702   if (NonZeros == 0 || isPowerOf2_64(NonZeros)) {
11703     SDValue Vec = Zeros ? DAG.getConstant(0, dl, ResVT) : DAG.getUNDEF(ResVT);
11704     if (!NonZeros)
11705       return Vec;
11706     unsigned Idx = Log2_64(NonZeros);
11707     SDValue SubVec = Op.getOperand(Idx);
11708     unsigned SubVecNumElts = SubVec.getSimpleValueType().getVectorNumElements();
11709     return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec, SubVec,
11710                        DAG.getIntPtrConstant(Idx * SubVecNumElts, dl));
11711   }
11712 
11713   if (NumOperands > 2) {
11714     MVT HalfVT = ResVT.getHalfNumVectorElementsVT();
11715     ArrayRef<SDUse> Ops = Op->ops();
11716     SDValue Lo = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
11717                              Ops.slice(0, NumOperands/2));
11718     SDValue Hi = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
11719                              Ops.slice(NumOperands/2));
11720     return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Lo, Hi);
11721   }
11722 
11723   assert(llvm::popcount(NonZeros) == 2 && "Simple cases not handled?");
11724 
11725   if (ResVT.getVectorNumElements() >= 16)
11726     return Op; // The operation is legal with KUNPCK
11727 
11728   SDValue Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT,
11729                             DAG.getUNDEF(ResVT), Op.getOperand(0),
11730                             DAG.getIntPtrConstant(0, dl));
11731   return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec, Op.getOperand(1),
11732                      DAG.getIntPtrConstant(NumElems/2, dl));
11733 }
11734 
LowerCONCAT_VECTORS(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)11735 static SDValue LowerCONCAT_VECTORS(SDValue Op,
11736                                    const X86Subtarget &Subtarget,
11737                                    SelectionDAG &DAG) {
11738   MVT VT = Op.getSimpleValueType();
11739   if (VT.getVectorElementType() == MVT::i1)
11740     return LowerCONCAT_VECTORSvXi1(Op, Subtarget, DAG);
11741 
11742   assert((VT.is256BitVector() && Op.getNumOperands() == 2) ||
11743          (VT.is512BitVector() && (Op.getNumOperands() == 2 ||
11744           Op.getNumOperands() == 4)));
11745 
11746   // AVX can use the vinsertf128 instruction to create 256-bit vectors
11747   // from two other 128-bit ones.
11748 
11749   // 512-bit vector may contain 2 256-bit vectors or 4 128-bit vectors
11750   return LowerAVXCONCAT_VECTORS(Op, DAG, Subtarget);
11751 }
11752 
11753 //===----------------------------------------------------------------------===//
11754 // Vector shuffle lowering
11755 //
11756 // This is an experimental code path for lowering vector shuffles on x86. It is
11757 // designed to handle arbitrary vector shuffles and blends, gracefully
11758 // degrading performance as necessary. It works hard to recognize idiomatic
11759 // shuffles and lower them to optimal instruction patterns without leaving
11760 // a framework that allows reasonably efficient handling of all vector shuffle
11761 // patterns.
11762 //===----------------------------------------------------------------------===//
11763 
11764 /// Tiny helper function to identify a no-op mask.
11765 ///
11766 /// This is a somewhat boring predicate function. It checks whether the mask
11767 /// array input, which is assumed to be a single-input shuffle mask of the kind
11768 /// used by the X86 shuffle instructions (not a fully general
11769 /// ShuffleVectorSDNode mask) requires any shuffles to occur. Both undef and an
11770 /// in-place shuffle are 'no-op's.
isNoopShuffleMask(ArrayRef<int> Mask)11771 static bool isNoopShuffleMask(ArrayRef<int> Mask) {
11772   for (int i = 0, Size = Mask.size(); i < Size; ++i) {
11773     assert(Mask[i] >= -1 && "Out of bound mask element!");
11774     if (Mask[i] >= 0 && Mask[i] != i)
11775       return false;
11776   }
11777   return true;
11778 }
11779 
11780 /// Test whether there are elements crossing LaneSizeInBits lanes in this
11781 /// shuffle mask.
11782 ///
11783 /// X86 divides up its shuffles into in-lane and cross-lane shuffle operations
11784 /// and we routinely test for these.
isLaneCrossingShuffleMask(unsigned LaneSizeInBits,unsigned ScalarSizeInBits,ArrayRef<int> Mask)11785 static bool isLaneCrossingShuffleMask(unsigned LaneSizeInBits,
11786                                       unsigned ScalarSizeInBits,
11787                                       ArrayRef<int> Mask) {
11788   assert(LaneSizeInBits && ScalarSizeInBits &&
11789          (LaneSizeInBits % ScalarSizeInBits) == 0 &&
11790          "Illegal shuffle lane size");
11791   int LaneSize = LaneSizeInBits / ScalarSizeInBits;
11792   int Size = Mask.size();
11793   for (int i = 0; i < Size; ++i)
11794     if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
11795       return true;
11796   return false;
11797 }
11798 
11799 /// Test whether there are elements crossing 128-bit lanes in this
11800 /// shuffle mask.
is128BitLaneCrossingShuffleMask(MVT VT,ArrayRef<int> Mask)11801 static bool is128BitLaneCrossingShuffleMask(MVT VT, ArrayRef<int> Mask) {
11802   return isLaneCrossingShuffleMask(128, VT.getScalarSizeInBits(), Mask);
11803 }
11804 
11805 /// Test whether elements in each LaneSizeInBits lane in this shuffle mask come
11806 /// from multiple lanes - this is different to isLaneCrossingShuffleMask to
11807 /// better support 'repeated mask + lane permute' style shuffles.
isMultiLaneShuffleMask(unsigned LaneSizeInBits,unsigned ScalarSizeInBits,ArrayRef<int> Mask)11808 static bool isMultiLaneShuffleMask(unsigned LaneSizeInBits,
11809                                    unsigned ScalarSizeInBits,
11810                                    ArrayRef<int> Mask) {
11811   assert(LaneSizeInBits && ScalarSizeInBits &&
11812          (LaneSizeInBits % ScalarSizeInBits) == 0 &&
11813          "Illegal shuffle lane size");
11814   int NumElts = Mask.size();
11815   int NumEltsPerLane = LaneSizeInBits / ScalarSizeInBits;
11816   int NumLanes = NumElts / NumEltsPerLane;
11817   if (NumLanes > 1) {
11818     for (int i = 0; i != NumLanes; ++i) {
11819       int SrcLane = -1;
11820       for (int j = 0; j != NumEltsPerLane; ++j) {
11821         int M = Mask[(i * NumEltsPerLane) + j];
11822         if (M < 0)
11823           continue;
11824         int Lane = (M % NumElts) / NumEltsPerLane;
11825         if (SrcLane >= 0 && SrcLane != Lane)
11826           return true;
11827         SrcLane = Lane;
11828       }
11829     }
11830   }
11831   return false;
11832 }
11833 
11834 /// Test whether a shuffle mask is equivalent within each sub-lane.
11835 ///
11836 /// This checks a shuffle mask to see if it is performing the same
11837 /// lane-relative shuffle in each sub-lane. This trivially implies
11838 /// that it is also not lane-crossing. It may however involve a blend from the
11839 /// same lane of a second vector.
11840 ///
11841 /// The specific repeated shuffle mask is populated in \p RepeatedMask, as it is
11842 /// non-trivial to compute in the face of undef lanes. The representation is
11843 /// suitable for use with existing 128-bit shuffles as entries from the second
11844 /// vector have been remapped to [LaneSize, 2*LaneSize).
isRepeatedShuffleMask(unsigned LaneSizeInBits,MVT VT,ArrayRef<int> Mask,SmallVectorImpl<int> & RepeatedMask)11845 static bool isRepeatedShuffleMask(unsigned LaneSizeInBits, MVT VT,
11846                                   ArrayRef<int> Mask,
11847                                   SmallVectorImpl<int> &RepeatedMask) {
11848   auto LaneSize = LaneSizeInBits / VT.getScalarSizeInBits();
11849   RepeatedMask.assign(LaneSize, -1);
11850   int Size = Mask.size();
11851   for (int i = 0; i < Size; ++i) {
11852     assert(Mask[i] == SM_SentinelUndef || Mask[i] >= 0);
11853     if (Mask[i] < 0)
11854       continue;
11855     if ((Mask[i] % Size) / LaneSize != i / LaneSize)
11856       // This entry crosses lanes, so there is no way to model this shuffle.
11857       return false;
11858 
11859     // Ok, handle the in-lane shuffles by detecting if and when they repeat.
11860     // Adjust second vector indices to start at LaneSize instead of Size.
11861     int LocalM = Mask[i] < Size ? Mask[i] % LaneSize
11862                                 : Mask[i] % LaneSize + LaneSize;
11863     if (RepeatedMask[i % LaneSize] < 0)
11864       // This is the first non-undef entry in this slot of a 128-bit lane.
11865       RepeatedMask[i % LaneSize] = LocalM;
11866     else if (RepeatedMask[i % LaneSize] != LocalM)
11867       // Found a mismatch with the repeated mask.
11868       return false;
11869   }
11870   return true;
11871 }
11872 
11873 /// Test whether a shuffle mask is equivalent within each 128-bit lane.
11874 static bool
is128BitLaneRepeatedShuffleMask(MVT VT,ArrayRef<int> Mask,SmallVectorImpl<int> & RepeatedMask)11875 is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
11876                                 SmallVectorImpl<int> &RepeatedMask) {
11877   return isRepeatedShuffleMask(128, VT, Mask, RepeatedMask);
11878 }
11879 
11880 static bool
is128BitLaneRepeatedShuffleMask(MVT VT,ArrayRef<int> Mask)11881 is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask) {
11882   SmallVector<int, 32> RepeatedMask;
11883   return isRepeatedShuffleMask(128, VT, Mask, RepeatedMask);
11884 }
11885 
11886 /// Test whether a shuffle mask is equivalent within each 256-bit lane.
11887 static bool
is256BitLaneRepeatedShuffleMask(MVT VT,ArrayRef<int> Mask,SmallVectorImpl<int> & RepeatedMask)11888 is256BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
11889                                 SmallVectorImpl<int> &RepeatedMask) {
11890   return isRepeatedShuffleMask(256, VT, Mask, RepeatedMask);
11891 }
11892 
11893 /// Test whether a target shuffle mask is equivalent within each sub-lane.
11894 /// Unlike isRepeatedShuffleMask we must respect SM_SentinelZero.
isRepeatedTargetShuffleMask(unsigned LaneSizeInBits,unsigned EltSizeInBits,ArrayRef<int> Mask,SmallVectorImpl<int> & RepeatedMask)11895 static bool isRepeatedTargetShuffleMask(unsigned LaneSizeInBits,
11896                                         unsigned EltSizeInBits,
11897                                         ArrayRef<int> Mask,
11898                                         SmallVectorImpl<int> &RepeatedMask) {
11899   int LaneSize = LaneSizeInBits / EltSizeInBits;
11900   RepeatedMask.assign(LaneSize, SM_SentinelUndef);
11901   int Size = Mask.size();
11902   for (int i = 0; i < Size; ++i) {
11903     assert(isUndefOrZero(Mask[i]) || (Mask[i] >= 0));
11904     if (Mask[i] == SM_SentinelUndef)
11905       continue;
11906     if (Mask[i] == SM_SentinelZero) {
11907       if (!isUndefOrZero(RepeatedMask[i % LaneSize]))
11908         return false;
11909       RepeatedMask[i % LaneSize] = SM_SentinelZero;
11910       continue;
11911     }
11912     if ((Mask[i] % Size) / LaneSize != i / LaneSize)
11913       // This entry crosses lanes, so there is no way to model this shuffle.
11914       return false;
11915 
11916     // Handle the in-lane shuffles by detecting if and when they repeat. Adjust
11917     // later vector indices to start at multiples of LaneSize instead of Size.
11918     int LaneM = Mask[i] / Size;
11919     int LocalM = (Mask[i] % LaneSize) + (LaneM * LaneSize);
11920     if (RepeatedMask[i % LaneSize] == SM_SentinelUndef)
11921       // This is the first non-undef entry in this slot of a 128-bit lane.
11922       RepeatedMask[i % LaneSize] = LocalM;
11923     else if (RepeatedMask[i % LaneSize] != LocalM)
11924       // Found a mismatch with the repeated mask.
11925       return false;
11926   }
11927   return true;
11928 }
11929 
11930 /// Test whether a target shuffle mask is equivalent within each sub-lane.
11931 /// Unlike isRepeatedShuffleMask we must respect SM_SentinelZero.
isRepeatedTargetShuffleMask(unsigned LaneSizeInBits,MVT VT,ArrayRef<int> Mask,SmallVectorImpl<int> & RepeatedMask)11932 static bool isRepeatedTargetShuffleMask(unsigned LaneSizeInBits, MVT VT,
11933                                         ArrayRef<int> Mask,
11934                                         SmallVectorImpl<int> &RepeatedMask) {
11935   return isRepeatedTargetShuffleMask(LaneSizeInBits, VT.getScalarSizeInBits(),
11936                                      Mask, RepeatedMask);
11937 }
11938 
11939 /// Checks whether the vector elements referenced by two shuffle masks are
11940 /// equivalent.
IsElementEquivalent(int MaskSize,SDValue Op,SDValue ExpectedOp,int Idx,int ExpectedIdx)11941 static bool IsElementEquivalent(int MaskSize, SDValue Op, SDValue ExpectedOp,
11942                                 int Idx, int ExpectedIdx) {
11943   assert(0 <= Idx && Idx < MaskSize && 0 <= ExpectedIdx &&
11944          ExpectedIdx < MaskSize && "Out of range element index");
11945   if (!Op || !ExpectedOp || Op.getOpcode() != ExpectedOp.getOpcode())
11946     return false;
11947 
11948   switch (Op.getOpcode()) {
11949   case ISD::BUILD_VECTOR:
11950     // If the values are build vectors, we can look through them to find
11951     // equivalent inputs that make the shuffles equivalent.
11952     // TODO: Handle MaskSize != Op.getNumOperands()?
11953     if (MaskSize == (int)Op.getNumOperands() &&
11954         MaskSize == (int)ExpectedOp.getNumOperands())
11955       return Op.getOperand(Idx) == ExpectedOp.getOperand(ExpectedIdx);
11956     break;
11957   case X86ISD::VBROADCAST:
11958   case X86ISD::VBROADCAST_LOAD:
11959     // TODO: Handle MaskSize != Op.getValueType().getVectorNumElements()?
11960     return (Op == ExpectedOp &&
11961             (int)Op.getValueType().getVectorNumElements() == MaskSize);
11962   case X86ISD::HADD:
11963   case X86ISD::HSUB:
11964   case X86ISD::FHADD:
11965   case X86ISD::FHSUB:
11966   case X86ISD::PACKSS:
11967   case X86ISD::PACKUS:
11968     // HOP(X,X) can refer to the elt from the lower/upper half of a lane.
11969     // TODO: Handle MaskSize != NumElts?
11970     // TODO: Handle HOP(X,Y) vs HOP(Y,X) equivalence cases.
11971     if (Op == ExpectedOp && Op.getOperand(0) == Op.getOperand(1)) {
11972       MVT VT = Op.getSimpleValueType();
11973       int NumElts = VT.getVectorNumElements();
11974       if (MaskSize == NumElts) {
11975         int NumLanes = VT.getSizeInBits() / 128;
11976         int NumEltsPerLane = NumElts / NumLanes;
11977         int NumHalfEltsPerLane = NumEltsPerLane / 2;
11978         bool SameLane =
11979             (Idx / NumEltsPerLane) == (ExpectedIdx / NumEltsPerLane);
11980         bool SameElt =
11981             (Idx % NumHalfEltsPerLane) == (ExpectedIdx % NumHalfEltsPerLane);
11982         return SameLane && SameElt;
11983       }
11984     }
11985     break;
11986   }
11987 
11988   return false;
11989 }
11990 
11991 /// Checks whether a shuffle mask is equivalent to an explicit list of
11992 /// arguments.
11993 ///
11994 /// This is a fast way to test a shuffle mask against a fixed pattern:
11995 ///
11996 ///   if (isShuffleEquivalent(Mask, 3, 2, {1, 0})) { ... }
11997 ///
11998 /// It returns true if the mask is exactly as wide as the argument list, and
11999 /// each element of the mask is either -1 (signifying undef) or the value given
12000 /// in the argument.
isShuffleEquivalent(ArrayRef<int> Mask,ArrayRef<int> ExpectedMask,SDValue V1=SDValue (),SDValue V2=SDValue ())12001 static bool isShuffleEquivalent(ArrayRef<int> Mask, ArrayRef<int> ExpectedMask,
12002                                 SDValue V1 = SDValue(),
12003                                 SDValue V2 = SDValue()) {
12004   int Size = Mask.size();
12005   if (Size != (int)ExpectedMask.size())
12006     return false;
12007 
12008   for (int i = 0; i < Size; ++i) {
12009     assert(Mask[i] >= -1 && "Out of bound mask element!");
12010     int MaskIdx = Mask[i];
12011     int ExpectedIdx = ExpectedMask[i];
12012     if (0 <= MaskIdx && MaskIdx != ExpectedIdx) {
12013       SDValue MaskV = MaskIdx < Size ? V1 : V2;
12014       SDValue ExpectedV = ExpectedIdx < Size ? V1 : V2;
12015       MaskIdx = MaskIdx < Size ? MaskIdx : (MaskIdx - Size);
12016       ExpectedIdx = ExpectedIdx < Size ? ExpectedIdx : (ExpectedIdx - Size);
12017       if (!IsElementEquivalent(Size, MaskV, ExpectedV, MaskIdx, ExpectedIdx))
12018         return false;
12019     }
12020   }
12021   return true;
12022 }
12023 
12024 /// Checks whether a target shuffle mask is equivalent to an explicit pattern.
12025 ///
12026 /// The masks must be exactly the same width.
12027 ///
12028 /// If an element in Mask matches SM_SentinelUndef (-1) then the corresponding
12029 /// value in ExpectedMask is always accepted. Otherwise the indices must match.
12030 ///
12031 /// SM_SentinelZero is accepted as a valid negative index but must match in
12032 /// both, or via a known bits test.
isTargetShuffleEquivalent(MVT VT,ArrayRef<int> Mask,ArrayRef<int> ExpectedMask,const SelectionDAG & DAG,SDValue V1=SDValue (),SDValue V2=SDValue ())12033 static bool isTargetShuffleEquivalent(MVT VT, ArrayRef<int> Mask,
12034                                       ArrayRef<int> ExpectedMask,
12035                                       const SelectionDAG &DAG,
12036                                       SDValue V1 = SDValue(),
12037                                       SDValue V2 = SDValue()) {
12038   int Size = Mask.size();
12039   if (Size != (int)ExpectedMask.size())
12040     return false;
12041   assert(llvm::all_of(ExpectedMask,
12042                       [Size](int M) { return isInRange(M, 0, 2 * Size); }) &&
12043          "Illegal target shuffle mask");
12044 
12045   // Check for out-of-range target shuffle mask indices.
12046   if (!isUndefOrZeroOrInRange(Mask, 0, 2 * Size))
12047     return false;
12048 
12049   // Don't use V1/V2 if they're not the same size as the shuffle mask type.
12050   if (V1 && V1.getValueSizeInBits() != VT.getSizeInBits())
12051     V1 = SDValue();
12052   if (V2 && V2.getValueSizeInBits() != VT.getSizeInBits())
12053     V2 = SDValue();
12054 
12055   APInt ZeroV1 = APInt::getNullValue(Size);
12056   APInt ZeroV2 = APInt::getNullValue(Size);
12057 
12058   for (int i = 0; i < Size; ++i) {
12059     int MaskIdx = Mask[i];
12060     int ExpectedIdx = ExpectedMask[i];
12061     if (MaskIdx == SM_SentinelUndef || MaskIdx == ExpectedIdx)
12062       continue;
12063     if (MaskIdx == SM_SentinelZero) {
12064       // If we need this expected index to be a zero element, then update the
12065       // relevant zero mask and perform the known bits at the end to minimize
12066       // repeated computes.
12067       SDValue ExpectedV = ExpectedIdx < Size ? V1 : V2;
12068       if (ExpectedV &&
12069           Size == (int)ExpectedV.getValueType().getVectorNumElements()) {
12070         int BitIdx = ExpectedIdx < Size ? ExpectedIdx : (ExpectedIdx - Size);
12071         APInt &ZeroMask = ExpectedIdx < Size ? ZeroV1 : ZeroV2;
12072         ZeroMask.setBit(BitIdx);
12073         continue;
12074       }
12075     }
12076     if (MaskIdx >= 0) {
12077       SDValue MaskV = MaskIdx < Size ? V1 : V2;
12078       SDValue ExpectedV = ExpectedIdx < Size ? V1 : V2;
12079       MaskIdx = MaskIdx < Size ? MaskIdx : (MaskIdx - Size);
12080       ExpectedIdx = ExpectedIdx < Size ? ExpectedIdx : (ExpectedIdx - Size);
12081       if (IsElementEquivalent(Size, MaskV, ExpectedV, MaskIdx, ExpectedIdx))
12082         continue;
12083     }
12084     return false;
12085   }
12086   return (ZeroV1.isNullValue() || DAG.MaskedVectorIsZero(V1, ZeroV1)) &&
12087          (ZeroV2.isNullValue() || DAG.MaskedVectorIsZero(V2, ZeroV2));
12088 }
12089 
12090 // Check if the shuffle mask is suitable for the AVX vpunpcklwd or vpunpckhwd
12091 // instructions.
isUnpackWdShuffleMask(ArrayRef<int> Mask,MVT VT,const SelectionDAG & DAG)12092 static bool isUnpackWdShuffleMask(ArrayRef<int> Mask, MVT VT,
12093                                   const SelectionDAG &DAG) {
12094   if (VT != MVT::v8i32 && VT != MVT::v8f32)
12095     return false;
12096 
12097   SmallVector<int, 8> Unpcklwd;
12098   createUnpackShuffleMask(MVT::v8i16, Unpcklwd, /* Lo = */ true,
12099                           /* Unary = */ false);
12100   SmallVector<int, 8> Unpckhwd;
12101   createUnpackShuffleMask(MVT::v8i16, Unpckhwd, /* Lo = */ false,
12102                           /* Unary = */ false);
12103   bool IsUnpackwdMask = (isTargetShuffleEquivalent(VT, Mask, Unpcklwd, DAG) ||
12104                          isTargetShuffleEquivalent(VT, Mask, Unpckhwd, DAG));
12105   return IsUnpackwdMask;
12106 }
12107 
is128BitUnpackShuffleMask(ArrayRef<int> Mask,const SelectionDAG & DAG)12108 static bool is128BitUnpackShuffleMask(ArrayRef<int> Mask,
12109                                       const SelectionDAG &DAG) {
12110   // Create 128-bit vector type based on mask size.
12111   MVT EltVT = MVT::getIntegerVT(128 / Mask.size());
12112   MVT VT = MVT::getVectorVT(EltVT, Mask.size());
12113 
12114   // We can't assume a canonical shuffle mask, so try the commuted version too.
12115   SmallVector<int, 4> CommutedMask(Mask);
12116   ShuffleVectorSDNode::commuteMask(CommutedMask);
12117 
12118   // Match any of unary/binary or low/high.
12119   for (unsigned i = 0; i != 4; ++i) {
12120     SmallVector<int, 16> UnpackMask;
12121     createUnpackShuffleMask(VT, UnpackMask, (i >> 1) % 2, i % 2);
12122     if (isTargetShuffleEquivalent(VT, Mask, UnpackMask, DAG) ||
12123         isTargetShuffleEquivalent(VT, CommutedMask, UnpackMask, DAG))
12124       return true;
12125   }
12126   return false;
12127 }
12128 
12129 /// Return true if a shuffle mask chooses elements identically in its top and
12130 /// bottom halves. For example, any splat mask has the same top and bottom
12131 /// halves. If an element is undefined in only one half of the mask, the halves
12132 /// are not considered identical.
hasIdenticalHalvesShuffleMask(ArrayRef<int> Mask)12133 static bool hasIdenticalHalvesShuffleMask(ArrayRef<int> Mask) {
12134   assert(Mask.size() % 2 == 0 && "Expecting even number of elements in mask");
12135   unsigned HalfSize = Mask.size() / 2;
12136   for (unsigned i = 0; i != HalfSize; ++i) {
12137     if (Mask[i] != Mask[i + HalfSize])
12138       return false;
12139   }
12140   return true;
12141 }
12142 
12143 /// Get a 4-lane 8-bit shuffle immediate for a mask.
12144 ///
12145 /// This helper function produces an 8-bit shuffle immediate corresponding to
12146 /// the ubiquitous shuffle encoding scheme used in x86 instructions for
12147 /// shuffling 4 lanes. It can be used with most of the PSHUF instructions for
12148 /// example.
12149 ///
12150 /// NB: We rely heavily on "undef" masks preserving the input lane.
getV4X86ShuffleImm(ArrayRef<int> Mask)12151 static unsigned getV4X86ShuffleImm(ArrayRef<int> Mask) {
12152   assert(Mask.size() == 4 && "Only 4-lane shuffle masks");
12153   assert(Mask[0] >= -1 && Mask[0] < 4 && "Out of bound mask element!");
12154   assert(Mask[1] >= -1 && Mask[1] < 4 && "Out of bound mask element!");
12155   assert(Mask[2] >= -1 && Mask[2] < 4 && "Out of bound mask element!");
12156   assert(Mask[3] >= -1 && Mask[3] < 4 && "Out of bound mask element!");
12157 
12158   // If the mask only uses one non-undef element, then fully 'splat' it to
12159   // improve later broadcast matching.
12160   int FirstIndex = find_if(Mask, [](int M) { return M >= 0; }) - Mask.begin();
12161   assert(0 <= FirstIndex && FirstIndex < 4 && "All undef shuffle mask");
12162 
12163   int FirstElt = Mask[FirstIndex];
12164   if (all_of(Mask, [FirstElt](int M) { return M < 0 || M == FirstElt; }))
12165     return (FirstElt << 6) | (FirstElt << 4) | (FirstElt << 2) | FirstElt;
12166 
12167   unsigned Imm = 0;
12168   Imm |= (Mask[0] < 0 ? 0 : Mask[0]) << 0;
12169   Imm |= (Mask[1] < 0 ? 1 : Mask[1]) << 2;
12170   Imm |= (Mask[2] < 0 ? 2 : Mask[2]) << 4;
12171   Imm |= (Mask[3] < 0 ? 3 : Mask[3]) << 6;
12172   return Imm;
12173 }
12174 
getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask,const SDLoc & DL,SelectionDAG & DAG)12175 static SDValue getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask, const SDLoc &DL,
12176                                           SelectionDAG &DAG) {
12177   return DAG.getTargetConstant(getV4X86ShuffleImm(Mask), DL, MVT::i8);
12178 }
12179 
12180 // The Shuffle result is as follow:
12181 // 0*a[0]0*a[1]...0*a[n] , n >=0 where a[] elements in a ascending order.
12182 // Each Zeroable's element correspond to a particular Mask's element.
12183 // As described in computeZeroableShuffleElements function.
12184 //
12185 // The function looks for a sub-mask that the nonzero elements are in
12186 // increasing order. If such sub-mask exist. The function returns true.
isNonZeroElementsInOrder(const APInt & Zeroable,ArrayRef<int> Mask,const EVT & VectorType,bool & IsZeroSideLeft)12187 static bool isNonZeroElementsInOrder(const APInt &Zeroable,
12188                                      ArrayRef<int> Mask, const EVT &VectorType,
12189                                      bool &IsZeroSideLeft) {
12190   int NextElement = -1;
12191   // Check if the Mask's nonzero elements are in increasing order.
12192   for (int i = 0, e = Mask.size(); i < e; i++) {
12193     // Checks if the mask's zeros elements are built from only zeros.
12194     assert(Mask[i] >= -1 && "Out of bound mask element!");
12195     if (Mask[i] < 0)
12196       return false;
12197     if (Zeroable[i])
12198       continue;
12199     // Find the lowest non zero element
12200     if (NextElement < 0) {
12201       NextElement = Mask[i] != 0 ? VectorType.getVectorNumElements() : 0;
12202       IsZeroSideLeft = NextElement != 0;
12203     }
12204     // Exit if the mask's non zero elements are not in increasing order.
12205     if (NextElement != Mask[i])
12206       return false;
12207     NextElement++;
12208   }
12209   return true;
12210 }
12211 
12212 /// Try to lower a shuffle with a single PSHUFB of V1 or V2.
lowerShuffleWithPSHUFB(const SDLoc & DL,MVT VT,ArrayRef<int> Mask,SDValue V1,SDValue V2,const APInt & Zeroable,const X86Subtarget & Subtarget,SelectionDAG & DAG)12213 static SDValue lowerShuffleWithPSHUFB(const SDLoc &DL, MVT VT,
12214                                       ArrayRef<int> Mask, SDValue V1,
12215                                       SDValue V2, const APInt &Zeroable,
12216                                       const X86Subtarget &Subtarget,
12217                                       SelectionDAG &DAG) {
12218   int Size = Mask.size();
12219   int LaneSize = 128 / VT.getScalarSizeInBits();
12220   const int NumBytes = VT.getSizeInBits() / 8;
12221   const int NumEltBytes = VT.getScalarSizeInBits() / 8;
12222 
12223   assert((Subtarget.hasSSSE3() && VT.is128BitVector()) ||
12224          (Subtarget.hasAVX2() && VT.is256BitVector()) ||
12225          (Subtarget.hasBWI() && VT.is512BitVector()));
12226 
12227   SmallVector<SDValue, 64> PSHUFBMask(NumBytes);
12228   // Sign bit set in i8 mask means zero element.
12229   SDValue ZeroMask = DAG.getConstant(0x80, DL, MVT::i8);
12230 
12231   SDValue V;
12232   for (int i = 0; i < NumBytes; ++i) {
12233     int M = Mask[i / NumEltBytes];
12234     if (M < 0) {
12235       PSHUFBMask[i] = DAG.getUNDEF(MVT::i8);
12236       continue;
12237     }
12238     if (Zeroable[i / NumEltBytes]) {
12239       PSHUFBMask[i] = ZeroMask;
12240       continue;
12241     }
12242 
12243     // We can only use a single input of V1 or V2.
12244     SDValue SrcV = (M >= Size ? V2 : V1);
12245     if (V && V != SrcV)
12246       return SDValue();
12247     V = SrcV;
12248     M %= Size;
12249 
12250     // PSHUFB can't cross lanes, ensure this doesn't happen.
12251     if ((M / LaneSize) != ((i / NumEltBytes) / LaneSize))
12252       return SDValue();
12253 
12254     M = M % LaneSize;
12255     M = M * NumEltBytes + (i % NumEltBytes);
12256     PSHUFBMask[i] = DAG.getConstant(M, DL, MVT::i8);
12257   }
12258   assert(V && "Failed to find a source input");
12259 
12260   MVT I8VT = MVT::getVectorVT(MVT::i8, NumBytes);
12261   return DAG.getBitcast(
12262       VT, DAG.getNode(X86ISD::PSHUFB, DL, I8VT, DAG.getBitcast(I8VT, V),
12263                       DAG.getBuildVector(I8VT, DL, PSHUFBMask)));
12264 }
12265 
12266 static SDValue getMaskNode(SDValue Mask, MVT MaskVT,
12267                            const X86Subtarget &Subtarget, SelectionDAG &DAG,
12268                            const SDLoc &dl);
12269 
12270 // X86 has dedicated shuffle that can be lowered to VEXPAND
lowerShuffleToEXPAND(const SDLoc & DL,MVT VT,const APInt & Zeroable,ArrayRef<int> Mask,SDValue & V1,SDValue & V2,SelectionDAG & DAG,const X86Subtarget & Subtarget)12271 static SDValue lowerShuffleToEXPAND(const SDLoc &DL, MVT VT,
12272                                     const APInt &Zeroable,
12273                                     ArrayRef<int> Mask, SDValue &V1,
12274                                     SDValue &V2, SelectionDAG &DAG,
12275                                     const X86Subtarget &Subtarget) {
12276   bool IsLeftZeroSide = true;
12277   if (!isNonZeroElementsInOrder(Zeroable, Mask, V1.getValueType(),
12278                                 IsLeftZeroSide))
12279     return SDValue();
12280   unsigned VEXPANDMask = (~Zeroable).getZExtValue();
12281   MVT IntegerType =
12282       MVT::getIntegerVT(std::max((int)VT.getVectorNumElements(), 8));
12283   SDValue MaskNode = DAG.getConstant(VEXPANDMask, DL, IntegerType);
12284   unsigned NumElts = VT.getVectorNumElements();
12285   assert((NumElts == 4 || NumElts == 8 || NumElts == 16) &&
12286          "Unexpected number of vector elements");
12287   SDValue VMask = getMaskNode(MaskNode, MVT::getVectorVT(MVT::i1, NumElts),
12288                               Subtarget, DAG, DL);
12289   SDValue ZeroVector = getZeroVector(VT, Subtarget, DAG, DL);
12290   SDValue ExpandedVector = IsLeftZeroSide ? V2 : V1;
12291   return DAG.getNode(X86ISD::EXPAND, DL, VT, ExpandedVector, ZeroVector, VMask);
12292 }
12293 
matchShuffleWithUNPCK(MVT VT,SDValue & V1,SDValue & V2,unsigned & UnpackOpcode,bool IsUnary,ArrayRef<int> TargetMask,const SDLoc & DL,SelectionDAG & DAG,const X86Subtarget & Subtarget)12294 static bool matchShuffleWithUNPCK(MVT VT, SDValue &V1, SDValue &V2,
12295                                   unsigned &UnpackOpcode, bool IsUnary,
12296                                   ArrayRef<int> TargetMask, const SDLoc &DL,
12297                                   SelectionDAG &DAG,
12298                                   const X86Subtarget &Subtarget) {
12299   int NumElts = VT.getVectorNumElements();
12300 
12301   bool Undef1 = true, Undef2 = true, Zero1 = true, Zero2 = true;
12302   for (int i = 0; i != NumElts; i += 2) {
12303     int M1 = TargetMask[i + 0];
12304     int M2 = TargetMask[i + 1];
12305     Undef1 &= (SM_SentinelUndef == M1);
12306     Undef2 &= (SM_SentinelUndef == M2);
12307     Zero1 &= isUndefOrZero(M1);
12308     Zero2 &= isUndefOrZero(M2);
12309   }
12310   assert(!((Undef1 || Zero1) && (Undef2 || Zero2)) &&
12311          "Zeroable shuffle detected");
12312 
12313   // Attempt to match the target mask against the unpack lo/hi mask patterns.
12314   SmallVector<int, 64> Unpckl, Unpckh;
12315   createUnpackShuffleMask(VT, Unpckl, /* Lo = */ true, IsUnary);
12316   if (isTargetShuffleEquivalent(VT, TargetMask, Unpckl, DAG, V1,
12317                                 (IsUnary ? V1 : V2))) {
12318     UnpackOpcode = X86ISD::UNPCKL;
12319     V2 = (Undef2 ? DAG.getUNDEF(VT) : (IsUnary ? V1 : V2));
12320     V1 = (Undef1 ? DAG.getUNDEF(VT) : V1);
12321     return true;
12322   }
12323 
12324   createUnpackShuffleMask(VT, Unpckh, /* Lo = */ false, IsUnary);
12325   if (isTargetShuffleEquivalent(VT, TargetMask, Unpckh, DAG, V1,
12326                                 (IsUnary ? V1 : V2))) {
12327     UnpackOpcode = X86ISD::UNPCKH;
12328     V2 = (Undef2 ? DAG.getUNDEF(VT) : (IsUnary ? V1 : V2));
12329     V1 = (Undef1 ? DAG.getUNDEF(VT) : V1);
12330     return true;
12331   }
12332 
12333   // If an unary shuffle, attempt to match as an unpack lo/hi with zero.
12334   if (IsUnary && (Zero1 || Zero2)) {
12335     // Don't bother if we can blend instead.
12336     if ((Subtarget.hasSSE41() || VT == MVT::v2i64 || VT == MVT::v2f64) &&
12337         isSequentialOrUndefOrZeroInRange(TargetMask, 0, NumElts, 0))
12338       return false;
12339 
12340     bool MatchLo = true, MatchHi = true;
12341     for (int i = 0; (i != NumElts) && (MatchLo || MatchHi); ++i) {
12342       int M = TargetMask[i];
12343 
12344       // Ignore if the input is known to be zero or the index is undef.
12345       if ((((i & 1) == 0) && Zero1) || (((i & 1) == 1) && Zero2) ||
12346           (M == SM_SentinelUndef))
12347         continue;
12348 
12349       MatchLo &= (M == Unpckl[i]);
12350       MatchHi &= (M == Unpckh[i]);
12351     }
12352 
12353     if (MatchLo || MatchHi) {
12354       UnpackOpcode = MatchLo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
12355       V2 = Zero2 ? getZeroVector(VT, Subtarget, DAG, DL) : V1;
12356       V1 = Zero1 ? getZeroVector(VT, Subtarget, DAG, DL) : V1;
12357       return true;
12358     }
12359   }
12360 
12361   // If a binary shuffle, commute and try again.
12362   if (!IsUnary) {
12363     ShuffleVectorSDNode::commuteMask(Unpckl);
12364     if (isTargetShuffleEquivalent(VT, TargetMask, Unpckl, DAG)) {
12365       UnpackOpcode = X86ISD::UNPCKL;
12366       std::swap(V1, V2);
12367       return true;
12368     }
12369 
12370     ShuffleVectorSDNode::commuteMask(Unpckh);
12371     if (isTargetShuffleEquivalent(VT, TargetMask, Unpckh, DAG)) {
12372       UnpackOpcode = X86ISD::UNPCKH;
12373       std::swap(V1, V2);
12374       return true;
12375     }
12376   }
12377 
12378   return false;
12379 }
12380 
12381 // X86 has dedicated unpack instructions that can handle specific blend
12382 // operations: UNPCKH and UNPCKL.
lowerShuffleWithUNPCK(const SDLoc & DL,MVT VT,ArrayRef<int> Mask,SDValue V1,SDValue V2,SelectionDAG & DAG)12383 static SDValue lowerShuffleWithUNPCK(const SDLoc &DL, MVT VT,
12384                                      ArrayRef<int> Mask, SDValue V1, SDValue V2,
12385                                      SelectionDAG &DAG) {
12386   SmallVector<int, 8> Unpckl;
12387   createUnpackShuffleMask(VT, Unpckl, /* Lo = */ true, /* Unary = */ false);
12388   if (isShuffleEquivalent(Mask, Unpckl, V1, V2))
12389     return DAG.getNode(X86ISD::UNPCKL, DL, VT, V1, V2);
12390 
12391   SmallVector<int, 8> Unpckh;
12392   createUnpackShuffleMask(VT, Unpckh, /* Lo = */ false, /* Unary = */ false);
12393   if (isShuffleEquivalent(Mask, Unpckh, V1, V2))
12394     return DAG.getNode(X86ISD::UNPCKH, DL, VT, V1, V2);
12395 
12396   // Commute and try again.
12397   ShuffleVectorSDNode::commuteMask(Unpckl);
12398   if (isShuffleEquivalent(Mask, Unpckl, V1, V2))
12399     return DAG.getNode(X86ISD::UNPCKL, DL, VT, V2, V1);
12400 
12401   ShuffleVectorSDNode::commuteMask(Unpckh);
12402   if (isShuffleEquivalent(Mask, Unpckh, V1, V2))
12403     return DAG.getNode(X86ISD::UNPCKH, DL, VT, V2, V1);
12404 
12405   return SDValue();
12406 }
12407 
12408 /// Check if the mask can be mapped to a preliminary shuffle (vperm 64-bit)
12409 /// followed by unpack 256-bit.
lowerShuffleWithUNPCK256(const SDLoc & DL,MVT VT,ArrayRef<int> Mask,SDValue V1,SDValue V2,SelectionDAG & DAG)12410 static SDValue lowerShuffleWithUNPCK256(const SDLoc &DL, MVT VT,
12411                                         ArrayRef<int> Mask, SDValue V1,
12412                                         SDValue V2, SelectionDAG &DAG) {
12413   SmallVector<int, 32> Unpckl, Unpckh;
12414   createSplat2ShuffleMask(VT, Unpckl, /* Lo */ true);
12415   createSplat2ShuffleMask(VT, Unpckh, /* Lo */ false);
12416 
12417   unsigned UnpackOpcode;
12418   if (isShuffleEquivalent(Mask, Unpckl, V1, V2))
12419     UnpackOpcode = X86ISD::UNPCKL;
12420   else if (isShuffleEquivalent(Mask, Unpckh, V1, V2))
12421     UnpackOpcode = X86ISD::UNPCKH;
12422   else
12423     return SDValue();
12424 
12425   // This is a "natural" unpack operation (rather than the 128-bit sectored
12426   // operation implemented by AVX). We need to rearrange 64-bit chunks of the
12427   // input in order to use the x86 instruction.
12428   V1 = DAG.getVectorShuffle(MVT::v4f64, DL, DAG.getBitcast(MVT::v4f64, V1),
12429                             DAG.getUNDEF(MVT::v4f64), {0, 2, 1, 3});
12430   V1 = DAG.getBitcast(VT, V1);
12431   return DAG.getNode(UnpackOpcode, DL, VT, V1, V1);
12432 }
12433 
12434 // Check if the mask can be mapped to a TRUNCATE or VTRUNC, truncating the
12435 // source into the lower elements and zeroing the upper elements.
matchShuffleAsVTRUNC(MVT & SrcVT,MVT & DstVT,MVT VT,ArrayRef<int> Mask,const APInt & Zeroable,const X86Subtarget & Subtarget)12436 static bool matchShuffleAsVTRUNC(MVT &SrcVT, MVT &DstVT, MVT VT,
12437                                  ArrayRef<int> Mask, const APInt &Zeroable,
12438                                  const X86Subtarget &Subtarget) {
12439   if (!VT.is512BitVector() && !Subtarget.hasVLX())
12440     return false;
12441 
12442   unsigned NumElts = Mask.size();
12443   unsigned EltSizeInBits = VT.getScalarSizeInBits();
12444   unsigned MaxScale = 64 / EltSizeInBits;
12445 
12446   for (unsigned Scale = 2; Scale <= MaxScale; Scale += Scale) {
12447     unsigned SrcEltBits = EltSizeInBits * Scale;
12448     if (SrcEltBits < 32 && !Subtarget.hasBWI())
12449       continue;
12450     unsigned NumSrcElts = NumElts / Scale;
12451     if (!isSequentialOrUndefInRange(Mask, 0, NumSrcElts, 0, Scale))
12452       continue;
12453     unsigned UpperElts = NumElts - NumSrcElts;
12454     if (!Zeroable.extractBits(UpperElts, NumSrcElts).isAllOnes())
12455       continue;
12456     SrcVT = MVT::getIntegerVT(EltSizeInBits * Scale);
12457     SrcVT = MVT::getVectorVT(SrcVT, NumSrcElts);
12458     DstVT = MVT::getIntegerVT(EltSizeInBits);
12459     if ((NumSrcElts * EltSizeInBits) >= 128) {
12460       // ISD::TRUNCATE
12461       DstVT = MVT::getVectorVT(DstVT, NumSrcElts);
12462     } else {
12463       // X86ISD::VTRUNC
12464       DstVT = MVT::getVectorVT(DstVT, 128 / EltSizeInBits);
12465     }
12466     return true;
12467   }
12468 
12469   return false;
12470 }
12471 
12472 // Helper to create TRUNCATE/VTRUNC nodes, optionally with zero/undef upper
12473 // element padding to the final DstVT.
getAVX512TruncNode(const SDLoc & DL,MVT DstVT,SDValue Src,const X86Subtarget & Subtarget,SelectionDAG & DAG,bool ZeroUppers)12474 static SDValue getAVX512TruncNode(const SDLoc &DL, MVT DstVT, SDValue Src,
12475                                   const X86Subtarget &Subtarget,
12476                                   SelectionDAG &DAG, bool ZeroUppers) {
12477   MVT SrcVT = Src.getSimpleValueType();
12478   MVT DstSVT = DstVT.getScalarType();
12479   unsigned NumDstElts = DstVT.getVectorNumElements();
12480   unsigned NumSrcElts = SrcVT.getVectorNumElements();
12481   unsigned DstEltSizeInBits = DstVT.getScalarSizeInBits();
12482 
12483   if (!DAG.getTargetLoweringInfo().isTypeLegal(SrcVT))
12484     return SDValue();
12485 
12486   // Perform a direct ISD::TRUNCATE if possible.
12487   if (NumSrcElts == NumDstElts)
12488     return DAG.getNode(ISD::TRUNCATE, DL, DstVT, Src);
12489 
12490   if (NumSrcElts > NumDstElts) {
12491     MVT TruncVT = MVT::getVectorVT(DstSVT, NumSrcElts);
12492     SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, TruncVT, Src);
12493     return extractSubVector(Trunc, 0, DAG, DL, DstVT.getSizeInBits());
12494   }
12495 
12496   if ((NumSrcElts * DstEltSizeInBits) >= 128) {
12497     MVT TruncVT = MVT::getVectorVT(DstSVT, NumSrcElts);
12498     SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, TruncVT, Src);
12499     return widenSubVector(Trunc, ZeroUppers, Subtarget, DAG, DL,
12500                           DstVT.getSizeInBits());
12501   }
12502 
12503   // Non-VLX targets must truncate from a 512-bit type, so we need to
12504   // widen, truncate and then possibly extract the original subvector.
12505   if (!Subtarget.hasVLX() && !SrcVT.is512BitVector()) {
12506     SDValue NewSrc = widenSubVector(Src, ZeroUppers, Subtarget, DAG, DL, 512);
12507     return getAVX512TruncNode(DL, DstVT, NewSrc, Subtarget, DAG, ZeroUppers);
12508   }
12509 
12510   // Fallback to a X86ISD::VTRUNC, padding if necessary.
12511   MVT TruncVT = MVT::getVectorVT(DstSVT, 128 / DstEltSizeInBits);
12512   SDValue Trunc = DAG.getNode(X86ISD::VTRUNC, DL, TruncVT, Src);
12513   if (DstVT != TruncVT)
12514     Trunc = widenSubVector(Trunc, ZeroUppers, Subtarget, DAG, DL,
12515                            DstVT.getSizeInBits());
12516   return Trunc;
12517 }
12518 
12519 // Try to lower trunc+vector_shuffle to a vpmovdb or a vpmovdw instruction.
12520 //
12521 // An example is the following:
12522 //
12523 // t0: ch = EntryToken
12524 //           t2: v4i64,ch = CopyFromReg t0, Register:v4i64 %0
12525 //         t25: v4i32 = truncate t2
12526 //       t41: v8i16 = bitcast t25
12527 //       t21: v8i16 = BUILD_VECTOR undef:i16, undef:i16, undef:i16, undef:i16,
12528 //       Constant:i16<0>, Constant:i16<0>, Constant:i16<0>, Constant:i16<0>
12529 //     t51: v8i16 = vector_shuffle<0,2,4,6,12,13,14,15> t41, t21
12530 //   t18: v2i64 = bitcast t51
12531 //
12532 // One can just use a single vpmovdw instruction, without avx512vl we need to
12533 // use the zmm variant and extract the lower subvector, padding with zeroes.
12534 // TODO: Merge with lowerShuffleAsVTRUNC.
lowerShuffleWithVPMOV(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const APInt & Zeroable,const X86Subtarget & Subtarget,SelectionDAG & DAG)12535 static SDValue lowerShuffleWithVPMOV(const SDLoc &DL, MVT VT, SDValue V1,
12536                                      SDValue V2, ArrayRef<int> Mask,
12537                                      const APInt &Zeroable,
12538                                      const X86Subtarget &Subtarget,
12539                                      SelectionDAG &DAG) {
12540   assert((VT == MVT::v16i8 || VT == MVT::v8i16) && "Unexpected VTRUNC type");
12541   if (!Subtarget.hasAVX512())
12542     return SDValue();
12543 
12544   unsigned NumElts = VT.getVectorNumElements();
12545   unsigned EltSizeInBits = VT.getScalarSizeInBits();
12546   unsigned MaxScale = 64 / EltSizeInBits;
12547   for (unsigned Scale = 2; Scale <= MaxScale; Scale += Scale) {
12548     unsigned SrcEltBits = EltSizeInBits * Scale;
12549     unsigned NumSrcElts = NumElts / Scale;
12550     unsigned UpperElts = NumElts - NumSrcElts;
12551     if (!isSequentialOrUndefInRange(Mask, 0, NumSrcElts, 0, Scale) ||
12552         !Zeroable.extractBits(UpperElts, NumSrcElts).isAllOnes())
12553       continue;
12554 
12555     // Attempt to find a matching source truncation, but as a fall back VLX
12556     // cases can use the VPMOV directly.
12557     SDValue Src = peekThroughBitcasts(V1);
12558     if (Src.getOpcode() == ISD::TRUNCATE &&
12559         Src.getScalarValueSizeInBits() == SrcEltBits) {
12560       Src = Src.getOperand(0);
12561     } else if (Subtarget.hasVLX()) {
12562       MVT SrcSVT = MVT::getIntegerVT(SrcEltBits);
12563       MVT SrcVT = MVT::getVectorVT(SrcSVT, NumSrcElts);
12564       Src = DAG.getBitcast(SrcVT, Src);
12565       // Don't do this if PACKSS/PACKUS could perform it cheaper.
12566       if (Scale == 2 &&
12567           ((DAG.ComputeNumSignBits(Src) > EltSizeInBits) ||
12568            (DAG.computeKnownBits(Src).countMinLeadingZeros() >= EltSizeInBits)))
12569         return SDValue();
12570     } else
12571       return SDValue();
12572 
12573     // VPMOVWB is only available with avx512bw.
12574     if (!Subtarget.hasBWI() && Src.getScalarValueSizeInBits() < 32)
12575       return SDValue();
12576 
12577     bool UndefUppers = isUndefInRange(Mask, NumSrcElts, UpperElts);
12578     return getAVX512TruncNode(DL, VT, Src, Subtarget, DAG, !UndefUppers);
12579   }
12580 
12581   return SDValue();
12582 }
12583 
12584 // Attempt to match binary shuffle patterns as a truncate.
lowerShuffleAsVTRUNC(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const APInt & Zeroable,const X86Subtarget & Subtarget,SelectionDAG & DAG)12585 static SDValue lowerShuffleAsVTRUNC(const SDLoc &DL, MVT VT, SDValue V1,
12586                                     SDValue V2, ArrayRef<int> Mask,
12587                                     const APInt &Zeroable,
12588                                     const X86Subtarget &Subtarget,
12589                                     SelectionDAG &DAG) {
12590   assert((VT.is128BitVector() || VT.is256BitVector()) &&
12591          "Unexpected VTRUNC type");
12592   if (!Subtarget.hasAVX512())
12593     return SDValue();
12594 
12595   unsigned NumElts = VT.getVectorNumElements();
12596   unsigned EltSizeInBits = VT.getScalarSizeInBits();
12597   unsigned MaxScale = 64 / EltSizeInBits;
12598   for (unsigned Scale = 2; Scale <= MaxScale; Scale += Scale) {
12599     // TODO: Support non-BWI VPMOVWB truncations?
12600     unsigned SrcEltBits = EltSizeInBits * Scale;
12601     if (SrcEltBits < 32 && !Subtarget.hasBWI())
12602       continue;
12603 
12604     // Match shuffle <Ofs,Ofs+Scale,Ofs+2*Scale,..,undef_or_zero,undef_or_zero>
12605     // Bail if the V2 elements are undef.
12606     unsigned NumHalfSrcElts = NumElts / Scale;
12607     unsigned NumSrcElts = 2 * NumHalfSrcElts;
12608     for (unsigned Offset = 0; Offset != Scale; ++Offset) {
12609       if (!isSequentialOrUndefInRange(Mask, 0, NumSrcElts, Offset, Scale) ||
12610           isUndefInRange(Mask, NumHalfSrcElts, NumHalfSrcElts))
12611         continue;
12612 
12613       // The elements beyond the truncation must be undef/zero.
12614       unsigned UpperElts = NumElts - NumSrcElts;
12615       if (UpperElts > 0 &&
12616           !Zeroable.extractBits(UpperElts, NumSrcElts).isAllOnes())
12617         continue;
12618       bool UndefUppers =
12619           UpperElts > 0 && isUndefInRange(Mask, NumSrcElts, UpperElts);
12620 
12621       // For offset truncations, ensure that the concat is cheap.
12622       if (Offset) {
12623         auto IsCheapConcat = [&](SDValue Lo, SDValue Hi) {
12624           if (Lo.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
12625               Hi.getOpcode() == ISD::EXTRACT_SUBVECTOR)
12626             return Lo.getOperand(0) == Hi.getOperand(0);
12627           if (ISD::isNormalLoad(Lo.getNode()) &&
12628               ISD::isNormalLoad(Hi.getNode())) {
12629             auto *LDLo = cast<LoadSDNode>(Lo);
12630             auto *LDHi = cast<LoadSDNode>(Hi);
12631             return DAG.areNonVolatileConsecutiveLoads(
12632                 LDHi, LDLo, Lo.getValueType().getStoreSize(), 1);
12633           }
12634           return false;
12635         };
12636         if (!IsCheapConcat(V1, V2))
12637           continue;
12638       }
12639 
12640       // As we're using both sources then we need to concat them together
12641       // and truncate from the double-sized src.
12642       MVT ConcatVT = MVT::getVectorVT(VT.getScalarType(), NumElts * 2);
12643       SDValue Src = DAG.getNode(ISD::CONCAT_VECTORS, DL, ConcatVT, V1, V2);
12644 
12645       MVT SrcSVT = MVT::getIntegerVT(SrcEltBits);
12646       MVT SrcVT = MVT::getVectorVT(SrcSVT, NumSrcElts);
12647       Src = DAG.getBitcast(SrcVT, Src);
12648 
12649       // Shift the offset'd elements into place for the truncation.
12650       // TODO: Use getTargetVShiftByConstNode.
12651       if (Offset)
12652         Src = DAG.getNode(
12653             X86ISD::VSRLI, DL, SrcVT, Src,
12654             DAG.getTargetConstant(Offset * EltSizeInBits, DL, MVT::i8));
12655 
12656       return getAVX512TruncNode(DL, VT, Src, Subtarget, DAG, !UndefUppers);
12657     }
12658   }
12659 
12660   return SDValue();
12661 }
12662 
12663 /// Check whether a compaction lowering can be done by dropping even/odd
12664 /// elements and compute how many times even/odd elements must be dropped.
12665 ///
12666 /// This handles shuffles which take every Nth element where N is a power of
12667 /// two. Example shuffle masks:
12668 ///
12669 /// (even)
12670 ///  N = 1:  0,  2,  4,  6,  8, 10, 12, 14,  0,  2,  4,  6,  8, 10, 12, 14
12671 ///  N = 1:  0,  2,  4,  6,  8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
12672 ///  N = 2:  0,  4,  8, 12,  0,  4,  8, 12,  0,  4,  8, 12,  0,  4,  8, 12
12673 ///  N = 2:  0,  4,  8, 12, 16, 20, 24, 28,  0,  4,  8, 12, 16, 20, 24, 28
12674 ///  N = 3:  0,  8,  0,  8,  0,  8,  0,  8,  0,  8,  0,  8,  0,  8,  0,  8
12675 ///  N = 3:  0,  8, 16, 24,  0,  8, 16, 24,  0,  8, 16, 24,  0,  8, 16, 24
12676 ///
12677 /// (odd)
12678 ///  N = 1:  1,  3,  5,  7,  9, 11, 13, 15,  0,  2,  4,  6,  8, 10, 12, 14
12679 ///  N = 1:  1,  3,  5,  7,  9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31
12680 ///
12681 /// Any of these lanes can of course be undef.
12682 ///
12683 /// This routine only supports N <= 3.
12684 /// FIXME: Evaluate whether either AVX or AVX-512 have any opportunities here
12685 /// for larger N.
12686 ///
12687 /// \returns N above, or the number of times even/odd elements must be dropped
12688 /// if there is such a number. Otherwise returns zero.
canLowerByDroppingElements(ArrayRef<int> Mask,bool MatchEven,bool IsSingleInput)12689 static int canLowerByDroppingElements(ArrayRef<int> Mask, bool MatchEven,
12690                                       bool IsSingleInput) {
12691   // The modulus for the shuffle vector entries is based on whether this is
12692   // a single input or not.
12693   int ShuffleModulus = Mask.size() * (IsSingleInput ? 1 : 2);
12694   assert(isPowerOf2_32((uint32_t)ShuffleModulus) &&
12695          "We should only be called with masks with a power-of-2 size!");
12696 
12697   uint64_t ModMask = (uint64_t)ShuffleModulus - 1;
12698   int Offset = MatchEven ? 0 : 1;
12699 
12700   // We track whether the input is viable for all power-of-2 strides 2^1, 2^2,
12701   // and 2^3 simultaneously. This is because we may have ambiguity with
12702   // partially undef inputs.
12703   bool ViableForN[3] = {true, true, true};
12704 
12705   for (int i = 0, e = Mask.size(); i < e; ++i) {
12706     // Ignore undef lanes, we'll optimistically collapse them to the pattern we
12707     // want.
12708     if (Mask[i] < 0)
12709       continue;
12710 
12711     bool IsAnyViable = false;
12712     for (unsigned j = 0; j != std::size(ViableForN); ++j)
12713       if (ViableForN[j]) {
12714         uint64_t N = j + 1;
12715 
12716         // The shuffle mask must be equal to (i * 2^N) % M.
12717         if ((uint64_t)(Mask[i] - Offset) == (((uint64_t)i << N) & ModMask))
12718           IsAnyViable = true;
12719         else
12720           ViableForN[j] = false;
12721       }
12722     // Early exit if we exhaust the possible powers of two.
12723     if (!IsAnyViable)
12724       break;
12725   }
12726 
12727   for (unsigned j = 0; j != std::size(ViableForN); ++j)
12728     if (ViableForN[j])
12729       return j + 1;
12730 
12731   // Return 0 as there is no viable power of two.
12732   return 0;
12733 }
12734 
12735 // X86 has dedicated pack instructions that can handle specific truncation
12736 // operations: PACKSS and PACKUS.
12737 // Checks for compaction shuffle masks if MaxStages > 1.
12738 // TODO: Add support for matching multiple PACKSS/PACKUS stages.
matchShuffleWithPACK(MVT VT,MVT & SrcVT,SDValue & V1,SDValue & V2,unsigned & PackOpcode,ArrayRef<int> TargetMask,const SelectionDAG & DAG,const X86Subtarget & Subtarget,unsigned MaxStages=1)12739 static bool matchShuffleWithPACK(MVT VT, MVT &SrcVT, SDValue &V1, SDValue &V2,
12740                                  unsigned &PackOpcode, ArrayRef<int> TargetMask,
12741                                  const SelectionDAG &DAG,
12742                                  const X86Subtarget &Subtarget,
12743                                  unsigned MaxStages = 1) {
12744   unsigned NumElts = VT.getVectorNumElements();
12745   unsigned BitSize = VT.getScalarSizeInBits();
12746   assert(0 < MaxStages && MaxStages <= 3 && (BitSize << MaxStages) <= 64 &&
12747          "Illegal maximum compaction");
12748 
12749   auto MatchPACK = [&](SDValue N1, SDValue N2, MVT PackVT) {
12750     unsigned NumSrcBits = PackVT.getScalarSizeInBits();
12751     unsigned NumPackedBits = NumSrcBits - BitSize;
12752     N1 = peekThroughBitcasts(N1);
12753     N2 = peekThroughBitcasts(N2);
12754     unsigned NumBits1 = N1.getScalarValueSizeInBits();
12755     unsigned NumBits2 = N2.getScalarValueSizeInBits();
12756     bool IsZero1 = llvm::isNullOrNullSplat(N1, /*AllowUndefs*/ false);
12757     bool IsZero2 = llvm::isNullOrNullSplat(N2, /*AllowUndefs*/ false);
12758     if ((!N1.isUndef() && !IsZero1 && NumBits1 != NumSrcBits) ||
12759         (!N2.isUndef() && !IsZero2 && NumBits2 != NumSrcBits))
12760       return false;
12761     if (Subtarget.hasSSE41() || BitSize == 8) {
12762       APInt ZeroMask = APInt::getHighBitsSet(NumSrcBits, NumPackedBits);
12763       if ((N1.isUndef() || IsZero1 || DAG.MaskedValueIsZero(N1, ZeroMask)) &&
12764           (N2.isUndef() || IsZero2 || DAG.MaskedValueIsZero(N2, ZeroMask))) {
12765         V1 = N1;
12766         V2 = N2;
12767         SrcVT = PackVT;
12768         PackOpcode = X86ISD::PACKUS;
12769         return true;
12770       }
12771     }
12772     bool IsAllOnes1 = llvm::isAllOnesOrAllOnesSplat(N1, /*AllowUndefs*/ false);
12773     bool IsAllOnes2 = llvm::isAllOnesOrAllOnesSplat(N2, /*AllowUndefs*/ false);
12774     if ((N1.isUndef() || IsZero1 || IsAllOnes1 ||
12775          DAG.ComputeNumSignBits(N1) > NumPackedBits) &&
12776         (N2.isUndef() || IsZero2 || IsAllOnes2 ||
12777          DAG.ComputeNumSignBits(N2) > NumPackedBits)) {
12778       V1 = N1;
12779       V2 = N2;
12780       SrcVT = PackVT;
12781       PackOpcode = X86ISD::PACKSS;
12782       return true;
12783     }
12784     return false;
12785   };
12786 
12787   // Attempt to match against wider and wider compaction patterns.
12788   for (unsigned NumStages = 1; NumStages <= MaxStages; ++NumStages) {
12789     MVT PackSVT = MVT::getIntegerVT(BitSize << NumStages);
12790     MVT PackVT = MVT::getVectorVT(PackSVT, NumElts >> NumStages);
12791 
12792     // Try binary shuffle.
12793     SmallVector<int, 32> BinaryMask;
12794     createPackShuffleMask(VT, BinaryMask, false, NumStages);
12795     if (isTargetShuffleEquivalent(VT, TargetMask, BinaryMask, DAG, V1, V2))
12796       if (MatchPACK(V1, V2, PackVT))
12797         return true;
12798 
12799     // Try unary shuffle.
12800     SmallVector<int, 32> UnaryMask;
12801     createPackShuffleMask(VT, UnaryMask, true, NumStages);
12802     if (isTargetShuffleEquivalent(VT, TargetMask, UnaryMask, DAG, V1))
12803       if (MatchPACK(V1, V1, PackVT))
12804         return true;
12805   }
12806 
12807   return false;
12808 }
12809 
lowerShuffleWithPACK(const SDLoc & DL,MVT VT,ArrayRef<int> Mask,SDValue V1,SDValue V2,SelectionDAG & DAG,const X86Subtarget & Subtarget)12810 static SDValue lowerShuffleWithPACK(const SDLoc &DL, MVT VT, ArrayRef<int> Mask,
12811                                     SDValue V1, SDValue V2, SelectionDAG &DAG,
12812                                     const X86Subtarget &Subtarget) {
12813   MVT PackVT;
12814   unsigned PackOpcode;
12815   unsigned SizeBits = VT.getSizeInBits();
12816   unsigned EltBits = VT.getScalarSizeInBits();
12817   unsigned MaxStages = Log2_32(64 / EltBits);
12818   if (!matchShuffleWithPACK(VT, PackVT, V1, V2, PackOpcode, Mask, DAG,
12819                             Subtarget, MaxStages))
12820     return SDValue();
12821 
12822   unsigned CurrentEltBits = PackVT.getScalarSizeInBits();
12823   unsigned NumStages = Log2_32(CurrentEltBits / EltBits);
12824 
12825   // Don't lower multi-stage packs on AVX512, truncation is better.
12826   if (NumStages != 1 && SizeBits == 128 && Subtarget.hasVLX())
12827     return SDValue();
12828 
12829   // Pack to the largest type possible:
12830   // vXi64/vXi32 -> PACK*SDW and vXi16 -> PACK*SWB.
12831   unsigned MaxPackBits = 16;
12832   if (CurrentEltBits > 16 &&
12833       (PackOpcode == X86ISD::PACKSS || Subtarget.hasSSE41()))
12834     MaxPackBits = 32;
12835 
12836   // Repeatedly pack down to the target size.
12837   SDValue Res;
12838   for (unsigned i = 0; i != NumStages; ++i) {
12839     unsigned SrcEltBits = std::min(MaxPackBits, CurrentEltBits);
12840     unsigned NumSrcElts = SizeBits / SrcEltBits;
12841     MVT SrcSVT = MVT::getIntegerVT(SrcEltBits);
12842     MVT DstSVT = MVT::getIntegerVT(SrcEltBits / 2);
12843     MVT SrcVT = MVT::getVectorVT(SrcSVT, NumSrcElts);
12844     MVT DstVT = MVT::getVectorVT(DstSVT, NumSrcElts * 2);
12845     Res = DAG.getNode(PackOpcode, DL, DstVT, DAG.getBitcast(SrcVT, V1),
12846                       DAG.getBitcast(SrcVT, V2));
12847     V1 = V2 = Res;
12848     CurrentEltBits /= 2;
12849   }
12850   assert(Res && Res.getValueType() == VT &&
12851          "Failed to lower compaction shuffle");
12852   return Res;
12853 }
12854 
12855 /// Try to emit a bitmask instruction for a shuffle.
12856 ///
12857 /// This handles cases where we can model a blend exactly as a bitmask due to
12858 /// one of the inputs being zeroable.
lowerShuffleAsBitMask(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const APInt & Zeroable,const X86Subtarget & Subtarget,SelectionDAG & DAG)12859 static SDValue lowerShuffleAsBitMask(const SDLoc &DL, MVT VT, SDValue V1,
12860                                      SDValue V2, ArrayRef<int> Mask,
12861                                      const APInt &Zeroable,
12862                                      const X86Subtarget &Subtarget,
12863                                      SelectionDAG &DAG) {
12864   MVT MaskVT = VT;
12865   MVT EltVT = VT.getVectorElementType();
12866   SDValue Zero, AllOnes;
12867   // Use f64 if i64 isn't legal.
12868   if (EltVT == MVT::i64 && !Subtarget.is64Bit()) {
12869     EltVT = MVT::f64;
12870     MaskVT = MVT::getVectorVT(EltVT, Mask.size());
12871   }
12872 
12873   MVT LogicVT = VT;
12874   if (EltVT == MVT::f32 || EltVT == MVT::f64) {
12875     Zero = DAG.getConstantFP(0.0, DL, EltVT);
12876     APFloat AllOnesValue =
12877         APFloat::getAllOnesValue(SelectionDAG::EVTToAPFloatSemantics(EltVT));
12878     AllOnes = DAG.getConstantFP(AllOnesValue, DL, EltVT);
12879     LogicVT =
12880         MVT::getVectorVT(EltVT == MVT::f64 ? MVT::i64 : MVT::i32, Mask.size());
12881   } else {
12882     Zero = DAG.getConstant(0, DL, EltVT);
12883     AllOnes = DAG.getAllOnesConstant(DL, EltVT);
12884   }
12885 
12886   SmallVector<SDValue, 16> VMaskOps(Mask.size(), Zero);
12887   SDValue V;
12888   for (int i = 0, Size = Mask.size(); i < Size; ++i) {
12889     if (Zeroable[i])
12890       continue;
12891     if (Mask[i] % Size != i)
12892       return SDValue(); // Not a blend.
12893     if (!V)
12894       V = Mask[i] < Size ? V1 : V2;
12895     else if (V != (Mask[i] < Size ? V1 : V2))
12896       return SDValue(); // Can only let one input through the mask.
12897 
12898     VMaskOps[i] = AllOnes;
12899   }
12900   if (!V)
12901     return SDValue(); // No non-zeroable elements!
12902 
12903   SDValue VMask = DAG.getBuildVector(MaskVT, DL, VMaskOps);
12904   VMask = DAG.getBitcast(LogicVT, VMask);
12905   V = DAG.getBitcast(LogicVT, V);
12906   SDValue And = DAG.getNode(ISD::AND, DL, LogicVT, V, VMask);
12907   return DAG.getBitcast(VT, And);
12908 }
12909 
12910 /// Try to emit a blend instruction for a shuffle using bit math.
12911 ///
12912 /// This is used as a fallback approach when first class blend instructions are
12913 /// unavailable. Currently it is only suitable for integer vectors, but could
12914 /// be generalized for floating point vectors if desirable.
lowerShuffleAsBitBlend(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,SelectionDAG & DAG)12915 static SDValue lowerShuffleAsBitBlend(const SDLoc &DL, MVT VT, SDValue V1,
12916                                       SDValue V2, ArrayRef<int> Mask,
12917                                       SelectionDAG &DAG) {
12918   assert(VT.isInteger() && "Only supports integer vector types!");
12919   MVT EltVT = VT.getVectorElementType();
12920   SDValue Zero = DAG.getConstant(0, DL, EltVT);
12921   SDValue AllOnes = DAG.getAllOnesConstant(DL, EltVT);
12922   SmallVector<SDValue, 16> MaskOps;
12923   for (int i = 0, Size = Mask.size(); i < Size; ++i) {
12924     if (Mask[i] >= 0 && Mask[i] != i && Mask[i] != i + Size)
12925       return SDValue(); // Shuffled input!
12926     MaskOps.push_back(Mask[i] < Size ? AllOnes : Zero);
12927   }
12928 
12929   SDValue V1Mask = DAG.getBuildVector(VT, DL, MaskOps);
12930   V1 = DAG.getNode(ISD::AND, DL, VT, V1, V1Mask);
12931   V2 = DAG.getNode(X86ISD::ANDNP, DL, VT, V1Mask, V2);
12932   return DAG.getNode(ISD::OR, DL, VT, V1, V2);
12933 }
12934 
12935 static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
12936                                     SDValue PreservedSrc,
12937                                     const X86Subtarget &Subtarget,
12938                                     SelectionDAG &DAG);
12939 
matchShuffleAsBlend(SDValue V1,SDValue V2,MutableArrayRef<int> Mask,const APInt & Zeroable,bool & ForceV1Zero,bool & ForceV2Zero,uint64_t & BlendMask)12940 static bool matchShuffleAsBlend(SDValue V1, SDValue V2,
12941                                 MutableArrayRef<int> Mask,
12942                                 const APInt &Zeroable, bool &ForceV1Zero,
12943                                 bool &ForceV2Zero, uint64_t &BlendMask) {
12944   bool V1IsZeroOrUndef =
12945       V1.isUndef() || ISD::isBuildVectorAllZeros(V1.getNode());
12946   bool V2IsZeroOrUndef =
12947       V2.isUndef() || ISD::isBuildVectorAllZeros(V2.getNode());
12948 
12949   BlendMask = 0;
12950   ForceV1Zero = false, ForceV2Zero = false;
12951   assert(Mask.size() <= 64 && "Shuffle mask too big for blend mask");
12952 
12953   // Attempt to generate the binary blend mask. If an input is zero then
12954   // we can use any lane.
12955   for (int i = 0, Size = Mask.size(); i < Size; ++i) {
12956     int M = Mask[i];
12957     if (M == SM_SentinelUndef)
12958       continue;
12959     if (M == i ||
12960         (0 <= M && M < Size && IsElementEquivalent(Size, V1, V1, M, i))) {
12961       Mask[i] = i;
12962       continue;
12963     }
12964     if (M == (i + Size) ||
12965         (Size <= M && IsElementEquivalent(Size, V2, V2, M - Size, i))) {
12966       BlendMask |= 1ull << i;
12967       Mask[i] = i + Size;
12968       continue;
12969     }
12970     if (Zeroable[i]) {
12971       if (V1IsZeroOrUndef) {
12972         ForceV1Zero = true;
12973         Mask[i] = i;
12974         continue;
12975       }
12976       if (V2IsZeroOrUndef) {
12977         ForceV2Zero = true;
12978         BlendMask |= 1ull << i;
12979         Mask[i] = i + Size;
12980         continue;
12981       }
12982     }
12983     return false;
12984   }
12985   return true;
12986 }
12987 
scaleVectorShuffleBlendMask(uint64_t BlendMask,int Size,int Scale)12988 static uint64_t scaleVectorShuffleBlendMask(uint64_t BlendMask, int Size,
12989                                             int Scale) {
12990   uint64_t ScaledMask = 0;
12991   for (int i = 0; i != Size; ++i)
12992     if (BlendMask & (1ull << i))
12993       ScaledMask |= ((1ull << Scale) - 1) << (i * Scale);
12994   return ScaledMask;
12995 }
12996 
12997 /// Try to emit a blend instruction for a shuffle.
12998 ///
12999 /// This doesn't do any checks for the availability of instructions for blending
13000 /// these values. It relies on the availability of the X86ISD::BLENDI pattern to
13001 /// be matched in the backend with the type given. What it does check for is
13002 /// that the shuffle mask is a blend, or convertible into a blend with zero.
lowerShuffleAsBlend(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Original,const APInt & Zeroable,const X86Subtarget & Subtarget,SelectionDAG & DAG)13003 static SDValue lowerShuffleAsBlend(const SDLoc &DL, MVT VT, SDValue V1,
13004                                    SDValue V2, ArrayRef<int> Original,
13005                                    const APInt &Zeroable,
13006                                    const X86Subtarget &Subtarget,
13007                                    SelectionDAG &DAG) {
13008   uint64_t BlendMask = 0;
13009   bool ForceV1Zero = false, ForceV2Zero = false;
13010   SmallVector<int, 64> Mask(Original);
13011   if (!matchShuffleAsBlend(V1, V2, Mask, Zeroable, ForceV1Zero, ForceV2Zero,
13012                            BlendMask))
13013     return SDValue();
13014 
13015   // Create a REAL zero vector - ISD::isBuildVectorAllZeros allows UNDEFs.
13016   if (ForceV1Zero)
13017     V1 = getZeroVector(VT, Subtarget, DAG, DL);
13018   if (ForceV2Zero)
13019     V2 = getZeroVector(VT, Subtarget, DAG, DL);
13020 
13021   unsigned NumElts = VT.getVectorNumElements();
13022 
13023   switch (VT.SimpleTy) {
13024   case MVT::v4i64:
13025   case MVT::v8i32:
13026     assert(Subtarget.hasAVX2() && "256-bit integer blends require AVX2!");
13027     [[fallthrough]];
13028   case MVT::v4f64:
13029   case MVT::v8f32:
13030     assert(Subtarget.hasAVX() && "256-bit float blends require AVX!");
13031     [[fallthrough]];
13032   case MVT::v2f64:
13033   case MVT::v2i64:
13034   case MVT::v4f32:
13035   case MVT::v4i32:
13036   case MVT::v8i16:
13037     assert(Subtarget.hasSSE41() && "128-bit blends require SSE41!");
13038     return DAG.getNode(X86ISD::BLENDI, DL, VT, V1, V2,
13039                        DAG.getTargetConstant(BlendMask, DL, MVT::i8));
13040   case MVT::v16i16: {
13041     assert(Subtarget.hasAVX2() && "v16i16 blends require AVX2!");
13042     SmallVector<int, 8> RepeatedMask;
13043     if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) {
13044       // We can lower these with PBLENDW which is mirrored across 128-bit lanes.
13045       assert(RepeatedMask.size() == 8 && "Repeated mask size doesn't match!");
13046       BlendMask = 0;
13047       for (int i = 0; i < 8; ++i)
13048         if (RepeatedMask[i] >= 8)
13049           BlendMask |= 1ull << i;
13050       return DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
13051                          DAG.getTargetConstant(BlendMask, DL, MVT::i8));
13052     }
13053     // Use PBLENDW for lower/upper lanes and then blend lanes.
13054     // TODO - we should allow 2 PBLENDW here and leave shuffle combine to
13055     // merge to VSELECT where useful.
13056     uint64_t LoMask = BlendMask & 0xFF;
13057     uint64_t HiMask = (BlendMask >> 8) & 0xFF;
13058     if (LoMask == 0 || LoMask == 255 || HiMask == 0 || HiMask == 255) {
13059       SDValue Lo = DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
13060                                DAG.getTargetConstant(LoMask, DL, MVT::i8));
13061       SDValue Hi = DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
13062                                DAG.getTargetConstant(HiMask, DL, MVT::i8));
13063       return DAG.getVectorShuffle(
13064           MVT::v16i16, DL, Lo, Hi,
13065           {0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31});
13066     }
13067     [[fallthrough]];
13068   }
13069   case MVT::v32i8:
13070     assert(Subtarget.hasAVX2() && "256-bit byte-blends require AVX2!");
13071     [[fallthrough]];
13072   case MVT::v16i8: {
13073     assert(Subtarget.hasSSE41() && "128-bit byte-blends require SSE41!");
13074 
13075     // Attempt to lower to a bitmask if we can. VPAND is faster than VPBLENDVB.
13076     if (SDValue Masked = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable,
13077                                                Subtarget, DAG))
13078       return Masked;
13079 
13080     if (Subtarget.hasBWI() && Subtarget.hasVLX()) {
13081       MVT IntegerType = MVT::getIntegerVT(std::max<unsigned>(NumElts, 8));
13082       SDValue MaskNode = DAG.getConstant(BlendMask, DL, IntegerType);
13083       return getVectorMaskingNode(V2, MaskNode, V1, Subtarget, DAG);
13084     }
13085 
13086     // If we have VPTERNLOG, we can use that as a bit blend.
13087     if (Subtarget.hasVLX())
13088       if (SDValue BitBlend =
13089               lowerShuffleAsBitBlend(DL, VT, V1, V2, Mask, DAG))
13090         return BitBlend;
13091 
13092     // Scale the blend by the number of bytes per element.
13093     int Scale = VT.getScalarSizeInBits() / 8;
13094 
13095     // This form of blend is always done on bytes. Compute the byte vector
13096     // type.
13097     MVT BlendVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
13098 
13099     // x86 allows load folding with blendvb from the 2nd source operand. But
13100     // we are still using LLVM select here (see comment below), so that's V1.
13101     // If V2 can be load-folded and V1 cannot be load-folded, then commute to
13102     // allow that load-folding possibility.
13103     if (!ISD::isNormalLoad(V1.getNode()) && ISD::isNormalLoad(V2.getNode())) {
13104       ShuffleVectorSDNode::commuteMask(Mask);
13105       std::swap(V1, V2);
13106     }
13107 
13108     // Compute the VSELECT mask. Note that VSELECT is really confusing in the
13109     // mix of LLVM's code generator and the x86 backend. We tell the code
13110     // generator that boolean values in the elements of an x86 vector register
13111     // are -1 for true and 0 for false. We then use the LLVM semantics of 'true'
13112     // mapping a select to operand #1, and 'false' mapping to operand #2. The
13113     // reality in x86 is that vector masks (pre-AVX-512) use only the high bit
13114     // of the element (the remaining are ignored) and 0 in that high bit would
13115     // mean operand #1 while 1 in the high bit would mean operand #2. So while
13116     // the LLVM model for boolean values in vector elements gets the relevant
13117     // bit set, it is set backwards and over constrained relative to x86's
13118     // actual model.
13119     SmallVector<SDValue, 32> VSELECTMask;
13120     for (int i = 0, Size = Mask.size(); i < Size; ++i)
13121       for (int j = 0; j < Scale; ++j)
13122         VSELECTMask.push_back(
13123             Mask[i] < 0 ? DAG.getUNDEF(MVT::i8)
13124                         : DAG.getConstant(Mask[i] < Size ? -1 : 0, DL,
13125                                           MVT::i8));
13126 
13127     V1 = DAG.getBitcast(BlendVT, V1);
13128     V2 = DAG.getBitcast(BlendVT, V2);
13129     return DAG.getBitcast(
13130         VT,
13131         DAG.getSelect(DL, BlendVT, DAG.getBuildVector(BlendVT, DL, VSELECTMask),
13132                       V1, V2));
13133   }
13134   case MVT::v16f32:
13135   case MVT::v8f64:
13136   case MVT::v8i64:
13137   case MVT::v16i32:
13138   case MVT::v32i16:
13139   case MVT::v64i8: {
13140     // Attempt to lower to a bitmask if we can. Only if not optimizing for size.
13141     bool OptForSize = DAG.shouldOptForSize();
13142     if (!OptForSize) {
13143       if (SDValue Masked = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable,
13144                                                  Subtarget, DAG))
13145         return Masked;
13146     }
13147 
13148     // Otherwise load an immediate into a GPR, cast to k-register, and use a
13149     // masked move.
13150     MVT IntegerType = MVT::getIntegerVT(std::max<unsigned>(NumElts, 8));
13151     SDValue MaskNode = DAG.getConstant(BlendMask, DL, IntegerType);
13152     return getVectorMaskingNode(V2, MaskNode, V1, Subtarget, DAG);
13153   }
13154   default:
13155     llvm_unreachable("Not a supported integer vector type!");
13156   }
13157 }
13158 
13159 /// Try to lower as a blend of elements from two inputs followed by
13160 /// a single-input permutation.
13161 ///
13162 /// This matches the pattern where we can blend elements from two inputs and
13163 /// then reduce the shuffle to a single-input permutation.
lowerShuffleAsBlendAndPermute(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,SelectionDAG & DAG,bool ImmBlends=false)13164 static SDValue lowerShuffleAsBlendAndPermute(const SDLoc &DL, MVT VT,
13165                                              SDValue V1, SDValue V2,
13166                                              ArrayRef<int> Mask,
13167                                              SelectionDAG &DAG,
13168                                              bool ImmBlends = false) {
13169   // We build up the blend mask while checking whether a blend is a viable way
13170   // to reduce the shuffle.
13171   SmallVector<int, 32> BlendMask(Mask.size(), -1);
13172   SmallVector<int, 32> PermuteMask(Mask.size(), -1);
13173 
13174   for (int i = 0, Size = Mask.size(); i < Size; ++i) {
13175     if (Mask[i] < 0)
13176       continue;
13177 
13178     assert(Mask[i] < Size * 2 && "Shuffle input is out of bounds.");
13179 
13180     if (BlendMask[Mask[i] % Size] < 0)
13181       BlendMask[Mask[i] % Size] = Mask[i];
13182     else if (BlendMask[Mask[i] % Size] != Mask[i])
13183       return SDValue(); // Can't blend in the needed input!
13184 
13185     PermuteMask[i] = Mask[i] % Size;
13186   }
13187 
13188   // If only immediate blends, then bail if the blend mask can't be widened to
13189   // i16.
13190   unsigned EltSize = VT.getScalarSizeInBits();
13191   if (ImmBlends && EltSize == 8 && !canWidenShuffleElements(BlendMask))
13192     return SDValue();
13193 
13194   SDValue V = DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
13195   return DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), PermuteMask);
13196 }
13197 
13198 /// Try to lower as an unpack of elements from two inputs followed by
13199 /// a single-input permutation.
13200 ///
13201 /// This matches the pattern where we can unpack elements from two inputs and
13202 /// then reduce the shuffle to a single-input (wider) permutation.
lowerShuffleAsUNPCKAndPermute(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,SelectionDAG & DAG)13203 static SDValue lowerShuffleAsUNPCKAndPermute(const SDLoc &DL, MVT VT,
13204                                              SDValue V1, SDValue V2,
13205                                              ArrayRef<int> Mask,
13206                                              SelectionDAG &DAG) {
13207   int NumElts = Mask.size();
13208   int NumLanes = VT.getSizeInBits() / 128;
13209   int NumLaneElts = NumElts / NumLanes;
13210   int NumHalfLaneElts = NumLaneElts / 2;
13211 
13212   bool MatchLo = true, MatchHi = true;
13213   SDValue Ops[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT)};
13214 
13215   // Determine UNPCKL/UNPCKH type and operand order.
13216   for (int Lane = 0; Lane != NumElts; Lane += NumLaneElts) {
13217     for (int Elt = 0; Elt != NumLaneElts; ++Elt) {
13218       int M = Mask[Lane + Elt];
13219       if (M < 0)
13220         continue;
13221 
13222       SDValue &Op = Ops[Elt & 1];
13223       if (M < NumElts && (Op.isUndef() || Op == V1))
13224         Op = V1;
13225       else if (NumElts <= M && (Op.isUndef() || Op == V2))
13226         Op = V2;
13227       else
13228         return SDValue();
13229 
13230       int Lo = Lane, Mid = Lane + NumHalfLaneElts, Hi = Lane + NumLaneElts;
13231       MatchLo &= isUndefOrInRange(M, Lo, Mid) ||
13232                  isUndefOrInRange(M, NumElts + Lo, NumElts + Mid);
13233       MatchHi &= isUndefOrInRange(M, Mid, Hi) ||
13234                  isUndefOrInRange(M, NumElts + Mid, NumElts + Hi);
13235       if (!MatchLo && !MatchHi)
13236         return SDValue();
13237     }
13238   }
13239   assert((MatchLo ^ MatchHi) && "Failed to match UNPCKLO/UNPCKHI");
13240 
13241   // Now check that each pair of elts come from the same unpack pair
13242   // and set the permute mask based on each pair.
13243   // TODO - Investigate cases where we permute individual elements.
13244   SmallVector<int, 32> PermuteMask(NumElts, -1);
13245   for (int Lane = 0; Lane != NumElts; Lane += NumLaneElts) {
13246     for (int Elt = 0; Elt != NumLaneElts; Elt += 2) {
13247       int M0 = Mask[Lane + Elt + 0];
13248       int M1 = Mask[Lane + Elt + 1];
13249       if (0 <= M0 && 0 <= M1 &&
13250           (M0 % NumHalfLaneElts) != (M1 % NumHalfLaneElts))
13251         return SDValue();
13252       if (0 <= M0)
13253         PermuteMask[Lane + Elt + 0] = Lane + (2 * (M0 % NumHalfLaneElts));
13254       if (0 <= M1)
13255         PermuteMask[Lane + Elt + 1] = Lane + (2 * (M1 % NumHalfLaneElts)) + 1;
13256     }
13257   }
13258 
13259   unsigned UnpckOp = MatchLo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
13260   SDValue Unpck = DAG.getNode(UnpckOp, DL, VT, Ops);
13261   return DAG.getVectorShuffle(VT, DL, Unpck, DAG.getUNDEF(VT), PermuteMask);
13262 }
13263 
13264 /// Try to lower a shuffle as a permute of the inputs followed by an
13265 /// UNPCK instruction.
13266 ///
13267 /// This specifically targets cases where we end up with alternating between
13268 /// the two inputs, and so can permute them into something that feeds a single
13269 /// UNPCK instruction. Note that this routine only targets integer vectors
13270 /// because for floating point vectors we have a generalized SHUFPS lowering
13271 /// strategy that handles everything that doesn't *exactly* match an unpack,
13272 /// making this clever lowering unnecessary.
lowerShuffleAsPermuteAndUnpack(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const X86Subtarget & Subtarget,SelectionDAG & DAG)13273 static SDValue lowerShuffleAsPermuteAndUnpack(const SDLoc &DL, MVT VT,
13274                                               SDValue V1, SDValue V2,
13275                                               ArrayRef<int> Mask,
13276                                               const X86Subtarget &Subtarget,
13277                                               SelectionDAG &DAG) {
13278   int Size = Mask.size();
13279   assert(Mask.size() >= 2 && "Single element masks are invalid.");
13280 
13281   // This routine only supports 128-bit integer dual input vectors.
13282   if (VT.isFloatingPoint() || !VT.is128BitVector() || V2.isUndef())
13283     return SDValue();
13284 
13285   int NumLoInputs =
13286       count_if(Mask, [Size](int M) { return M >= 0 && M % Size < Size / 2; });
13287   int NumHiInputs =
13288       count_if(Mask, [Size](int M) { return M % Size >= Size / 2; });
13289 
13290   bool UnpackLo = NumLoInputs >= NumHiInputs;
13291 
13292   auto TryUnpack = [&](int ScalarSize, int Scale) {
13293     SmallVector<int, 16> V1Mask((unsigned)Size, -1);
13294     SmallVector<int, 16> V2Mask((unsigned)Size, -1);
13295 
13296     for (int i = 0; i < Size; ++i) {
13297       if (Mask[i] < 0)
13298         continue;
13299 
13300       // Each element of the unpack contains Scale elements from this mask.
13301       int UnpackIdx = i / Scale;
13302 
13303       // We only handle the case where V1 feeds the first slots of the unpack.
13304       // We rely on canonicalization to ensure this is the case.
13305       if ((UnpackIdx % 2 == 0) != (Mask[i] < Size))
13306         return SDValue();
13307 
13308       // Setup the mask for this input. The indexing is tricky as we have to
13309       // handle the unpack stride.
13310       SmallVectorImpl<int> &VMask = (UnpackIdx % 2 == 0) ? V1Mask : V2Mask;
13311       VMask[(UnpackIdx / 2) * Scale + i % Scale + (UnpackLo ? 0 : Size / 2)] =
13312           Mask[i] % Size;
13313     }
13314 
13315     // If we will have to shuffle both inputs to use the unpack, check whether
13316     // we can just unpack first and shuffle the result. If so, skip this unpack.
13317     if ((NumLoInputs == 0 || NumHiInputs == 0) && !isNoopShuffleMask(V1Mask) &&
13318         !isNoopShuffleMask(V2Mask))
13319       return SDValue();
13320 
13321     // Shuffle the inputs into place.
13322     V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
13323     V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
13324 
13325     // Cast the inputs to the type we will use to unpack them.
13326     MVT UnpackVT =
13327         MVT::getVectorVT(MVT::getIntegerVT(ScalarSize), Size / Scale);
13328     V1 = DAG.getBitcast(UnpackVT, V1);
13329     V2 = DAG.getBitcast(UnpackVT, V2);
13330 
13331     // Unpack the inputs and cast the result back to the desired type.
13332     return DAG.getBitcast(
13333         VT, DAG.getNode(UnpackLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
13334                         UnpackVT, V1, V2));
13335   };
13336 
13337   // We try each unpack from the largest to the smallest to try and find one
13338   // that fits this mask.
13339   int OrigScalarSize = VT.getScalarSizeInBits();
13340   for (int ScalarSize = 64; ScalarSize >= OrigScalarSize; ScalarSize /= 2)
13341     if (SDValue Unpack = TryUnpack(ScalarSize, ScalarSize / OrigScalarSize))
13342       return Unpack;
13343 
13344   // If we're shuffling with a zero vector then we're better off not doing
13345   // VECTOR_SHUFFLE(UNPCK()) as we lose track of those zero elements.
13346   if (ISD::isBuildVectorAllZeros(V1.getNode()) ||
13347       ISD::isBuildVectorAllZeros(V2.getNode()))
13348     return SDValue();
13349 
13350   // If none of the unpack-rooted lowerings worked (or were profitable) try an
13351   // initial unpack.
13352   if (NumLoInputs == 0 || NumHiInputs == 0) {
13353     assert((NumLoInputs > 0 || NumHiInputs > 0) &&
13354            "We have to have *some* inputs!");
13355     int HalfOffset = NumLoInputs == 0 ? Size / 2 : 0;
13356 
13357     // FIXME: We could consider the total complexity of the permute of each
13358     // possible unpacking. Or at the least we should consider how many
13359     // half-crossings are created.
13360     // FIXME: We could consider commuting the unpacks.
13361 
13362     SmallVector<int, 32> PermMask((unsigned)Size, -1);
13363     for (int i = 0; i < Size; ++i) {
13364       if (Mask[i] < 0)
13365         continue;
13366 
13367       assert(Mask[i] % Size >= HalfOffset && "Found input from wrong half!");
13368 
13369       PermMask[i] =
13370           2 * ((Mask[i] % Size) - HalfOffset) + (Mask[i] < Size ? 0 : 1);
13371     }
13372     return DAG.getVectorShuffle(
13373         VT, DL,
13374         DAG.getNode(NumLoInputs == 0 ? X86ISD::UNPCKH : X86ISD::UNPCKL, DL, VT,
13375                     V1, V2),
13376         DAG.getUNDEF(VT), PermMask);
13377   }
13378 
13379   return SDValue();
13380 }
13381 
13382 /// Helper to form a PALIGNR-based rotate+permute, merging 2 inputs and then
13383 /// permuting the elements of the result in place.
lowerShuffleAsByteRotateAndPermute(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const X86Subtarget & Subtarget,SelectionDAG & DAG)13384 static SDValue lowerShuffleAsByteRotateAndPermute(
13385     const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
13386     const X86Subtarget &Subtarget, SelectionDAG &DAG) {
13387   if ((VT.is128BitVector() && !Subtarget.hasSSSE3()) ||
13388       (VT.is256BitVector() && !Subtarget.hasAVX2()) ||
13389       (VT.is512BitVector() && !Subtarget.hasBWI()))
13390     return SDValue();
13391 
13392   // We don't currently support lane crossing permutes.
13393   if (is128BitLaneCrossingShuffleMask(VT, Mask))
13394     return SDValue();
13395 
13396   int Scale = VT.getScalarSizeInBits() / 8;
13397   int NumLanes = VT.getSizeInBits() / 128;
13398   int NumElts = VT.getVectorNumElements();
13399   int NumEltsPerLane = NumElts / NumLanes;
13400 
13401   // Determine range of mask elts.
13402   bool Blend1 = true;
13403   bool Blend2 = true;
13404   std::pair<int, int> Range1 = std::make_pair(INT_MAX, INT_MIN);
13405   std::pair<int, int> Range2 = std::make_pair(INT_MAX, INT_MIN);
13406   for (int Lane = 0; Lane != NumElts; Lane += NumEltsPerLane) {
13407     for (int Elt = 0; Elt != NumEltsPerLane; ++Elt) {
13408       int M = Mask[Lane + Elt];
13409       if (M < 0)
13410         continue;
13411       if (M < NumElts) {
13412         Blend1 &= (M == (Lane + Elt));
13413         assert(Lane <= M && M < (Lane + NumEltsPerLane) && "Out of range mask");
13414         M = M % NumEltsPerLane;
13415         Range1.first = std::min(Range1.first, M);
13416         Range1.second = std::max(Range1.second, M);
13417       } else {
13418         M -= NumElts;
13419         Blend2 &= (M == (Lane + Elt));
13420         assert(Lane <= M && M < (Lane + NumEltsPerLane) && "Out of range mask");
13421         M = M % NumEltsPerLane;
13422         Range2.first = std::min(Range2.first, M);
13423         Range2.second = std::max(Range2.second, M);
13424       }
13425     }
13426   }
13427 
13428   // Bail if we don't need both elements.
13429   // TODO - it might be worth doing this for unary shuffles if the permute
13430   // can be widened.
13431   if (!(0 <= Range1.first && Range1.second < NumEltsPerLane) ||
13432       !(0 <= Range2.first && Range2.second < NumEltsPerLane))
13433     return SDValue();
13434 
13435   if (VT.getSizeInBits() > 128 && (Blend1 || Blend2))
13436     return SDValue();
13437 
13438   // Rotate the 2 ops so we can access both ranges, then permute the result.
13439   auto RotateAndPermute = [&](SDValue Lo, SDValue Hi, int RotAmt, int Ofs) {
13440     MVT ByteVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
13441     SDValue Rotate = DAG.getBitcast(
13442         VT, DAG.getNode(X86ISD::PALIGNR, DL, ByteVT, DAG.getBitcast(ByteVT, Hi),
13443                         DAG.getBitcast(ByteVT, Lo),
13444                         DAG.getTargetConstant(Scale * RotAmt, DL, MVT::i8)));
13445     SmallVector<int, 64> PermMask(NumElts, SM_SentinelUndef);
13446     for (int Lane = 0; Lane != NumElts; Lane += NumEltsPerLane) {
13447       for (int Elt = 0; Elt != NumEltsPerLane; ++Elt) {
13448         int M = Mask[Lane + Elt];
13449         if (M < 0)
13450           continue;
13451         if (M < NumElts)
13452           PermMask[Lane + Elt] = Lane + ((M + Ofs - RotAmt) % NumEltsPerLane);
13453         else
13454           PermMask[Lane + Elt] = Lane + ((M - Ofs - RotAmt) % NumEltsPerLane);
13455       }
13456     }
13457     return DAG.getVectorShuffle(VT, DL, Rotate, DAG.getUNDEF(VT), PermMask);
13458   };
13459 
13460   // Check if the ranges are small enough to rotate from either direction.
13461   if (Range2.second < Range1.first)
13462     return RotateAndPermute(V1, V2, Range1.first, 0);
13463   if (Range1.second < Range2.first)
13464     return RotateAndPermute(V2, V1, Range2.first, NumElts);
13465   return SDValue();
13466 }
13467 
isBroadcastShuffleMask(ArrayRef<int> Mask)13468 static bool isBroadcastShuffleMask(ArrayRef<int> Mask) {
13469   return isUndefOrEqual(Mask, 0);
13470 }
13471 
isNoopOrBroadcastShuffleMask(ArrayRef<int> Mask)13472 static bool isNoopOrBroadcastShuffleMask(ArrayRef<int> Mask) {
13473   return isNoopShuffleMask(Mask) || isBroadcastShuffleMask(Mask);
13474 }
13475 
13476 /// Generic routine to decompose a shuffle and blend into independent
13477 /// blends and permutes.
13478 ///
13479 /// This matches the extremely common pattern for handling combined
13480 /// shuffle+blend operations on newer X86 ISAs where we have very fast blend
13481 /// operations. It will try to pick the best arrangement of shuffles and
13482 /// blends. For vXi8/vXi16 shuffles we may use unpack instead of blend.
lowerShuffleAsDecomposedShuffleMerge(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const X86Subtarget & Subtarget,SelectionDAG & DAG)13483 static SDValue lowerShuffleAsDecomposedShuffleMerge(
13484     const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
13485     const X86Subtarget &Subtarget, SelectionDAG &DAG) {
13486   int NumElts = Mask.size();
13487   int NumLanes = VT.getSizeInBits() / 128;
13488   int NumEltsPerLane = NumElts / NumLanes;
13489 
13490   // Shuffle the input elements into the desired positions in V1 and V2 and
13491   // unpack/blend them together.
13492   bool IsAlternating = true;
13493   SmallVector<int, 32> V1Mask(NumElts, -1);
13494   SmallVector<int, 32> V2Mask(NumElts, -1);
13495   SmallVector<int, 32> FinalMask(NumElts, -1);
13496   for (int i = 0; i < NumElts; ++i) {
13497     int M = Mask[i];
13498     if (M >= 0 && M < NumElts) {
13499       V1Mask[i] = M;
13500       FinalMask[i] = i;
13501       IsAlternating &= (i & 1) == 0;
13502     } else if (M >= NumElts) {
13503       V2Mask[i] = M - NumElts;
13504       FinalMask[i] = i + NumElts;
13505       IsAlternating &= (i & 1) == 1;
13506     }
13507   }
13508 
13509   // If we effectively only demand the 0'th element of \p Input, and not only
13510   // as 0'th element, then broadcast said input,
13511   // and change \p InputMask to be a no-op (identity) mask.
13512   auto canonicalizeBroadcastableInput = [DL, VT, &Subtarget,
13513                                          &DAG](SDValue &Input,
13514                                                MutableArrayRef<int> InputMask) {
13515     unsigned EltSizeInBits = Input.getScalarValueSizeInBits();
13516     if (!Subtarget.hasAVX2() && (!Subtarget.hasAVX() || EltSizeInBits < 32 ||
13517                                  !X86::mayFoldLoad(Input, Subtarget)))
13518       return;
13519     if (isNoopShuffleMask(InputMask))
13520       return;
13521     assert(isBroadcastShuffleMask(InputMask) &&
13522            "Expected to demand only the 0'th element.");
13523     Input = DAG.getNode(X86ISD::VBROADCAST, DL, VT, Input);
13524     for (auto I : enumerate(InputMask)) {
13525       int &InputMaskElt = I.value();
13526       if (InputMaskElt >= 0)
13527         InputMaskElt = I.index();
13528     }
13529   };
13530 
13531   // Currently, we may need to produce one shuffle per input, and blend results.
13532   // It is possible that the shuffle for one of the inputs is already a no-op.
13533   // See if we can simplify non-no-op shuffles into broadcasts,
13534   // which we consider to be strictly better than an arbitrary shuffle.
13535   if (isNoopOrBroadcastShuffleMask(V1Mask) &&
13536       isNoopOrBroadcastShuffleMask(V2Mask)) {
13537     canonicalizeBroadcastableInput(V1, V1Mask);
13538     canonicalizeBroadcastableInput(V2, V2Mask);
13539   }
13540 
13541   // Try to lower with the simpler initial blend/unpack/rotate strategies unless
13542   // one of the input shuffles would be a no-op. We prefer to shuffle inputs as
13543   // the shuffle may be able to fold with a load or other benefit. However, when
13544   // we'll have to do 2x as many shuffles in order to achieve this, a 2-input
13545   // pre-shuffle first is a better strategy.
13546   if (!isNoopShuffleMask(V1Mask) && !isNoopShuffleMask(V2Mask)) {
13547     // Only prefer immediate blends to unpack/rotate.
13548     if (SDValue BlendPerm = lowerShuffleAsBlendAndPermute(DL, VT, V1, V2, Mask,
13549                                                           DAG, true))
13550       return BlendPerm;
13551     if (SDValue UnpackPerm = lowerShuffleAsUNPCKAndPermute(DL, VT, V1, V2, Mask,
13552                                                            DAG))
13553       return UnpackPerm;
13554     if (SDValue RotatePerm = lowerShuffleAsByteRotateAndPermute(
13555             DL, VT, V1, V2, Mask, Subtarget, DAG))
13556       return RotatePerm;
13557     // Unpack/rotate failed - try again with variable blends.
13558     if (SDValue BlendPerm = lowerShuffleAsBlendAndPermute(DL, VT, V1, V2, Mask,
13559                                                           DAG))
13560       return BlendPerm;
13561     if (VT.getScalarSizeInBits() >= 32)
13562       if (SDValue PermUnpack = lowerShuffleAsPermuteAndUnpack(
13563               DL, VT, V1, V2, Mask, Subtarget, DAG))
13564         return PermUnpack;
13565   }
13566 
13567   // If the final mask is an alternating blend of vXi8/vXi16, convert to an
13568   // UNPCKL(SHUFFLE, SHUFFLE) pattern.
13569   // TODO: It doesn't have to be alternating - but each lane mustn't have more
13570   // than half the elements coming from each source.
13571   if (IsAlternating && VT.getScalarSizeInBits() < 32) {
13572     V1Mask.assign(NumElts, -1);
13573     V2Mask.assign(NumElts, -1);
13574     FinalMask.assign(NumElts, -1);
13575     for (int i = 0; i != NumElts; i += NumEltsPerLane)
13576       for (int j = 0; j != NumEltsPerLane; ++j) {
13577         int M = Mask[i + j];
13578         if (M >= 0 && M < NumElts) {
13579           V1Mask[i + (j / 2)] = M;
13580           FinalMask[i + j] = i + (j / 2);
13581         } else if (M >= NumElts) {
13582           V2Mask[i + (j / 2)] = M - NumElts;
13583           FinalMask[i + j] = i + (j / 2) + NumElts;
13584         }
13585       }
13586   }
13587 
13588   V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
13589   V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
13590   return DAG.getVectorShuffle(VT, DL, V1, V2, FinalMask);
13591 }
13592 
13593 /// Try to lower a vector shuffle as a bit rotation.
13594 ///
13595 /// Look for a repeated rotation pattern in each sub group.
13596 /// Returns a ISD::ROTL element rotation amount or -1 if failed.
matchShuffleAsBitRotate(ArrayRef<int> Mask,int NumSubElts)13597 static int matchShuffleAsBitRotate(ArrayRef<int> Mask, int NumSubElts) {
13598   int NumElts = Mask.size();
13599   assert((NumElts % NumSubElts) == 0 && "Illegal shuffle mask");
13600 
13601   int RotateAmt = -1;
13602   for (int i = 0; i != NumElts; i += NumSubElts) {
13603     for (int j = 0; j != NumSubElts; ++j) {
13604       int M = Mask[i + j];
13605       if (M < 0)
13606         continue;
13607       if (!isInRange(M, i, i + NumSubElts))
13608         return -1;
13609       int Offset = (NumSubElts - (M - (i + j))) % NumSubElts;
13610       if (0 <= RotateAmt && Offset != RotateAmt)
13611         return -1;
13612       RotateAmt = Offset;
13613     }
13614   }
13615   return RotateAmt;
13616 }
13617 
matchShuffleAsBitRotate(MVT & RotateVT,int EltSizeInBits,const X86Subtarget & Subtarget,ArrayRef<int> Mask)13618 static int matchShuffleAsBitRotate(MVT &RotateVT, int EltSizeInBits,
13619                                    const X86Subtarget &Subtarget,
13620                                    ArrayRef<int> Mask) {
13621   assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
13622   assert(EltSizeInBits < 64 && "Can't rotate 64-bit integers");
13623 
13624   // AVX512 only has vXi32/vXi64 rotates, so limit the rotation sub group size.
13625   int MinSubElts = Subtarget.hasAVX512() ? std::max(32 / EltSizeInBits, 2) : 2;
13626   int MaxSubElts = 64 / EltSizeInBits;
13627   for (int NumSubElts = MinSubElts; NumSubElts <= MaxSubElts; NumSubElts *= 2) {
13628     int RotateAmt = matchShuffleAsBitRotate(Mask, NumSubElts);
13629     if (RotateAmt < 0)
13630       continue;
13631 
13632     int NumElts = Mask.size();
13633     MVT RotateSVT = MVT::getIntegerVT(EltSizeInBits * NumSubElts);
13634     RotateVT = MVT::getVectorVT(RotateSVT, NumElts / NumSubElts);
13635     return RotateAmt * EltSizeInBits;
13636   }
13637 
13638   return -1;
13639 }
13640 
13641 /// Lower shuffle using X86ISD::VROTLI rotations.
lowerShuffleAsBitRotate(const SDLoc & DL,MVT VT,SDValue V1,ArrayRef<int> Mask,const X86Subtarget & Subtarget,SelectionDAG & DAG)13642 static SDValue lowerShuffleAsBitRotate(const SDLoc &DL, MVT VT, SDValue V1,
13643                                        ArrayRef<int> Mask,
13644                                        const X86Subtarget &Subtarget,
13645                                        SelectionDAG &DAG) {
13646   // Only XOP + AVX512 targets have bit rotation instructions.
13647   // If we at least have SSSE3 (PSHUFB) then we shouldn't attempt to use this.
13648   bool IsLegal =
13649       (VT.is128BitVector() && Subtarget.hasXOP()) || Subtarget.hasAVX512();
13650   if (!IsLegal && Subtarget.hasSSE3())
13651     return SDValue();
13652 
13653   MVT RotateVT;
13654   int RotateAmt = matchShuffleAsBitRotate(RotateVT, VT.getScalarSizeInBits(),
13655                                           Subtarget, Mask);
13656   if (RotateAmt < 0)
13657     return SDValue();
13658 
13659   // For pre-SSSE3 targets, if we are shuffling vXi8 elts then ISD::ROTL,
13660   // expanded to OR(SRL,SHL), will be more efficient, but if they can
13661   // widen to vXi16 or more then existing lowering should will be better.
13662   if (!IsLegal) {
13663     if ((RotateAmt % 16) == 0)
13664       return SDValue();
13665     // TODO: Use getTargetVShiftByConstNode.
13666     unsigned ShlAmt = RotateAmt;
13667     unsigned SrlAmt = RotateVT.getScalarSizeInBits() - RotateAmt;
13668     V1 = DAG.getBitcast(RotateVT, V1);
13669     SDValue SHL = DAG.getNode(X86ISD::VSHLI, DL, RotateVT, V1,
13670                               DAG.getTargetConstant(ShlAmt, DL, MVT::i8));
13671     SDValue SRL = DAG.getNode(X86ISD::VSRLI, DL, RotateVT, V1,
13672                               DAG.getTargetConstant(SrlAmt, DL, MVT::i8));
13673     SDValue Rot = DAG.getNode(ISD::OR, DL, RotateVT, SHL, SRL);
13674     return DAG.getBitcast(VT, Rot);
13675   }
13676 
13677   SDValue Rot =
13678       DAG.getNode(X86ISD::VROTLI, DL, RotateVT, DAG.getBitcast(RotateVT, V1),
13679                   DAG.getTargetConstant(RotateAmt, DL, MVT::i8));
13680   return DAG.getBitcast(VT, Rot);
13681 }
13682 
13683 /// Try to match a vector shuffle as an element rotation.
13684 ///
13685 /// This is used for support PALIGNR for SSSE3 or VALIGND/Q for AVX512.
matchShuffleAsElementRotate(SDValue & V1,SDValue & V2,ArrayRef<int> Mask)13686 static int matchShuffleAsElementRotate(SDValue &V1, SDValue &V2,
13687                                        ArrayRef<int> Mask) {
13688   int NumElts = Mask.size();
13689 
13690   // We need to detect various ways of spelling a rotation:
13691   //   [11, 12, 13, 14, 15,  0,  1,  2]
13692   //   [-1, 12, 13, 14, -1, -1,  1, -1]
13693   //   [-1, -1, -1, -1, -1, -1,  1,  2]
13694   //   [ 3,  4,  5,  6,  7,  8,  9, 10]
13695   //   [-1,  4,  5,  6, -1, -1,  9, -1]
13696   //   [-1,  4,  5,  6, -1, -1, -1, -1]
13697   int Rotation = 0;
13698   SDValue Lo, Hi;
13699   for (int i = 0; i < NumElts; ++i) {
13700     int M = Mask[i];
13701     assert((M == SM_SentinelUndef || (0 <= M && M < (2*NumElts))) &&
13702            "Unexpected mask index.");
13703     if (M < 0)
13704       continue;
13705 
13706     // Determine where a rotated vector would have started.
13707     int StartIdx = i - (M % NumElts);
13708     if (StartIdx == 0)
13709       // The identity rotation isn't interesting, stop.
13710       return -1;
13711 
13712     // If we found the tail of a vector the rotation must be the missing
13713     // front. If we found the head of a vector, it must be how much of the
13714     // head.
13715     int CandidateRotation = StartIdx < 0 ? -StartIdx : NumElts - StartIdx;
13716 
13717     if (Rotation == 0)
13718       Rotation = CandidateRotation;
13719     else if (Rotation != CandidateRotation)
13720       // The rotations don't match, so we can't match this mask.
13721       return -1;
13722 
13723     // Compute which value this mask is pointing at.
13724     SDValue MaskV = M < NumElts ? V1 : V2;
13725 
13726     // Compute which of the two target values this index should be assigned
13727     // to. This reflects whether the high elements are remaining or the low
13728     // elements are remaining.
13729     SDValue &TargetV = StartIdx < 0 ? Hi : Lo;
13730 
13731     // Either set up this value if we've not encountered it before, or check
13732     // that it remains consistent.
13733     if (!TargetV)
13734       TargetV = MaskV;
13735     else if (TargetV != MaskV)
13736       // This may be a rotation, but it pulls from the inputs in some
13737       // unsupported interleaving.
13738       return -1;
13739   }
13740 
13741   // Check that we successfully analyzed the mask, and normalize the results.
13742   assert(Rotation != 0 && "Failed to locate a viable rotation!");
13743   assert((Lo || Hi) && "Failed to find a rotated input vector!");
13744   if (!Lo)
13745     Lo = Hi;
13746   else if (!Hi)
13747     Hi = Lo;
13748 
13749   V1 = Lo;
13750   V2 = Hi;
13751 
13752   return Rotation;
13753 }
13754 
13755 /// Try to lower a vector shuffle as a byte rotation.
13756 ///
13757 /// SSSE3 has a generic PALIGNR instruction in x86 that will do an arbitrary
13758 /// byte-rotation of the concatenation of two vectors; pre-SSSE3 can use
13759 /// a PSRLDQ/PSLLDQ/POR pattern to get a similar effect. This routine will
13760 /// try to generically lower a vector shuffle through such an pattern. It
13761 /// does not check for the profitability of lowering either as PALIGNR or
13762 /// PSRLDQ/PSLLDQ/POR, only whether the mask is valid to lower in that form.
13763 /// This matches shuffle vectors that look like:
13764 ///
13765 ///   v8i16 [11, 12, 13, 14, 15, 0, 1, 2]
13766 ///
13767 /// Essentially it concatenates V1 and V2, shifts right by some number of
13768 /// elements, and takes the low elements as the result. Note that while this is
13769 /// specified as a *right shift* because x86 is little-endian, it is a *left
13770 /// rotate* of the vector lanes.
matchShuffleAsByteRotate(MVT VT,SDValue & V1,SDValue & V2,ArrayRef<int> Mask)13771 static int matchShuffleAsByteRotate(MVT VT, SDValue &V1, SDValue &V2,
13772                                     ArrayRef<int> Mask) {
13773   // Don't accept any shuffles with zero elements.
13774   if (isAnyZero(Mask))
13775     return -1;
13776 
13777   // PALIGNR works on 128-bit lanes.
13778   SmallVector<int, 16> RepeatedMask;
13779   if (!is128BitLaneRepeatedShuffleMask(VT, Mask, RepeatedMask))
13780     return -1;
13781 
13782   int Rotation = matchShuffleAsElementRotate(V1, V2, RepeatedMask);
13783   if (Rotation <= 0)
13784     return -1;
13785 
13786   // PALIGNR rotates bytes, so we need to scale the
13787   // rotation based on how many bytes are in the vector lane.
13788   int NumElts = RepeatedMask.size();
13789   int Scale = 16 / NumElts;
13790   return Rotation * Scale;
13791 }
13792 
lowerShuffleAsByteRotate(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const X86Subtarget & Subtarget,SelectionDAG & DAG)13793 static SDValue lowerShuffleAsByteRotate(const SDLoc &DL, MVT VT, SDValue V1,
13794                                         SDValue V2, ArrayRef<int> Mask,
13795                                         const X86Subtarget &Subtarget,
13796                                         SelectionDAG &DAG) {
13797   assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
13798 
13799   SDValue Lo = V1, Hi = V2;
13800   int ByteRotation = matchShuffleAsByteRotate(VT, Lo, Hi, Mask);
13801   if (ByteRotation <= 0)
13802     return SDValue();
13803 
13804   // Cast the inputs to i8 vector of correct length to match PALIGNR or
13805   // PSLLDQ/PSRLDQ.
13806   MVT ByteVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
13807   Lo = DAG.getBitcast(ByteVT, Lo);
13808   Hi = DAG.getBitcast(ByteVT, Hi);
13809 
13810   // SSSE3 targets can use the palignr instruction.
13811   if (Subtarget.hasSSSE3()) {
13812     assert((!VT.is512BitVector() || Subtarget.hasBWI()) &&
13813            "512-bit PALIGNR requires BWI instructions");
13814     return DAG.getBitcast(
13815         VT, DAG.getNode(X86ISD::PALIGNR, DL, ByteVT, Lo, Hi,
13816                         DAG.getTargetConstant(ByteRotation, DL, MVT::i8)));
13817   }
13818 
13819   assert(VT.is128BitVector() &&
13820          "Rotate-based lowering only supports 128-bit lowering!");
13821   assert(Mask.size() <= 16 &&
13822          "Can shuffle at most 16 bytes in a 128-bit vector!");
13823   assert(ByteVT == MVT::v16i8 &&
13824          "SSE2 rotate lowering only needed for v16i8!");
13825 
13826   // Default SSE2 implementation
13827   int LoByteShift = 16 - ByteRotation;
13828   int HiByteShift = ByteRotation;
13829 
13830   SDValue LoShift =
13831       DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Lo,
13832                   DAG.getTargetConstant(LoByteShift, DL, MVT::i8));
13833   SDValue HiShift =
13834       DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Hi,
13835                   DAG.getTargetConstant(HiByteShift, DL, MVT::i8));
13836   return DAG.getBitcast(VT,
13837                         DAG.getNode(ISD::OR, DL, MVT::v16i8, LoShift, HiShift));
13838 }
13839 
13840 /// Try to lower a vector shuffle as a dword/qword rotation.
13841 ///
13842 /// AVX512 has a VALIGND/VALIGNQ instructions that will do an arbitrary
13843 /// rotation of the concatenation of two vectors; This routine will
13844 /// try to generically lower a vector shuffle through such an pattern.
13845 ///
13846 /// Essentially it concatenates V1 and V2, shifts right by some number of
13847 /// elements, and takes the low elements as the result. Note that while this is
13848 /// specified as a *right shift* because x86 is little-endian, it is a *left
13849 /// rotate* of the vector lanes.
lowerShuffleAsVALIGN(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const X86Subtarget & Subtarget,SelectionDAG & DAG)13850 static SDValue lowerShuffleAsVALIGN(const SDLoc &DL, MVT VT, SDValue V1,
13851                                     SDValue V2, ArrayRef<int> Mask,
13852                                     const X86Subtarget &Subtarget,
13853                                     SelectionDAG &DAG) {
13854   assert((VT.getScalarType() == MVT::i32 || VT.getScalarType() == MVT::i64) &&
13855          "Only 32-bit and 64-bit elements are supported!");
13856 
13857   // 128/256-bit vectors are only supported with VLX.
13858   assert((Subtarget.hasVLX() || (!VT.is128BitVector() && !VT.is256BitVector()))
13859          && "VLX required for 128/256-bit vectors");
13860 
13861   SDValue Lo = V1, Hi = V2;
13862   int Rotation = matchShuffleAsElementRotate(Lo, Hi, Mask);
13863   if (Rotation <= 0)
13864     return SDValue();
13865 
13866   return DAG.getNode(X86ISD::VALIGN, DL, VT, Lo, Hi,
13867                      DAG.getTargetConstant(Rotation, DL, MVT::i8));
13868 }
13869 
13870 /// Try to lower a vector shuffle as a byte shift sequence.
lowerShuffleAsByteShiftMask(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const APInt & Zeroable,const X86Subtarget & Subtarget,SelectionDAG & DAG)13871 static SDValue lowerShuffleAsByteShiftMask(const SDLoc &DL, MVT VT, SDValue V1,
13872                                            SDValue V2, ArrayRef<int> Mask,
13873                                            const APInt &Zeroable,
13874                                            const X86Subtarget &Subtarget,
13875                                            SelectionDAG &DAG) {
13876   assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
13877   assert(VT.is128BitVector() && "Only 128-bit vectors supported");
13878 
13879   // We need a shuffle that has zeros at one/both ends and a sequential
13880   // shuffle from one source within.
13881   unsigned ZeroLo = Zeroable.countTrailingOnes();
13882   unsigned ZeroHi = Zeroable.countLeadingOnes();
13883   if (!ZeroLo && !ZeroHi)
13884     return SDValue();
13885 
13886   unsigned NumElts = Mask.size();
13887   unsigned Len = NumElts - (ZeroLo + ZeroHi);
13888   if (!isSequentialOrUndefInRange(Mask, ZeroLo, Len, Mask[ZeroLo]))
13889     return SDValue();
13890 
13891   unsigned Scale = VT.getScalarSizeInBits() / 8;
13892   ArrayRef<int> StubMask = Mask.slice(ZeroLo, Len);
13893   if (!isUndefOrInRange(StubMask, 0, NumElts) &&
13894       !isUndefOrInRange(StubMask, NumElts, 2 * NumElts))
13895     return SDValue();
13896 
13897   SDValue Res = Mask[ZeroLo] < (int)NumElts ? V1 : V2;
13898   Res = DAG.getBitcast(MVT::v16i8, Res);
13899 
13900   // Use VSHLDQ/VSRLDQ ops to zero the ends of a vector and leave an
13901   // inner sequential set of elements, possibly offset:
13902   // 01234567 --> zzzzzz01 --> 1zzzzzzz
13903   // 01234567 --> 4567zzzz --> zzzzz456
13904   // 01234567 --> z0123456 --> 3456zzzz --> zz3456zz
13905   if (ZeroLo == 0) {
13906     unsigned Shift = (NumElts - 1) - (Mask[ZeroLo + Len - 1] % NumElts);
13907     Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
13908                       DAG.getTargetConstant(Scale * Shift, DL, MVT::i8));
13909     Res = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Res,
13910                       DAG.getTargetConstant(Scale * ZeroHi, DL, MVT::i8));
13911   } else if (ZeroHi == 0) {
13912     unsigned Shift = Mask[ZeroLo] % NumElts;
13913     Res = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Res,
13914                       DAG.getTargetConstant(Scale * Shift, DL, MVT::i8));
13915     Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
13916                       DAG.getTargetConstant(Scale * ZeroLo, DL, MVT::i8));
13917   } else if (!Subtarget.hasSSSE3()) {
13918     // If we don't have PSHUFB then its worth avoiding an AND constant mask
13919     // by performing 3 byte shifts. Shuffle combining can kick in above that.
13920     // TODO: There may be some cases where VSH{LR}DQ+PAND is still better.
13921     unsigned Shift = (NumElts - 1) - (Mask[ZeroLo + Len - 1] % NumElts);
13922     Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
13923                       DAG.getTargetConstant(Scale * Shift, DL, MVT::i8));
13924     Shift += Mask[ZeroLo] % NumElts;
13925     Res = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Res,
13926                       DAG.getTargetConstant(Scale * Shift, DL, MVT::i8));
13927     Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
13928                       DAG.getTargetConstant(Scale * ZeroLo, DL, MVT::i8));
13929   } else
13930     return SDValue();
13931 
13932   return DAG.getBitcast(VT, Res);
13933 }
13934 
13935 /// Try to lower a vector shuffle as a bit shift (shifts in zeros).
13936 ///
13937 /// Attempts to match a shuffle mask against the PSLL(W/D/Q/DQ) and
13938 /// PSRL(W/D/Q/DQ) SSE2 and AVX2 logical bit-shift instructions. The function
13939 /// matches elements from one of the input vectors shuffled to the left or
13940 /// right with zeroable elements 'shifted in'. It handles both the strictly
13941 /// bit-wise element shifts and the byte shift across an entire 128-bit double
13942 /// quad word lane.
13943 ///
13944 /// PSHL : (little-endian) left bit shift.
13945 /// [ zz, 0, zz,  2 ]
13946 /// [ -1, 4, zz, -1 ]
13947 /// PSRL : (little-endian) right bit shift.
13948 /// [  1, zz,  3, zz]
13949 /// [ -1, -1,  7, zz]
13950 /// PSLLDQ : (little-endian) left byte shift
13951 /// [ zz,  0,  1,  2,  3,  4,  5,  6]
13952 /// [ zz, zz, -1, -1,  2,  3,  4, -1]
13953 /// [ zz, zz, zz, zz, zz, zz, -1,  1]
13954 /// PSRLDQ : (little-endian) right byte shift
13955 /// [  5, 6,  7, zz, zz, zz, zz, zz]
13956 /// [ -1, 5,  6,  7, zz, zz, zz, zz]
13957 /// [  1, 2, -1, -1, -1, -1, zz, zz]
matchShuffleAsShift(MVT & ShiftVT,unsigned & Opcode,unsigned ScalarSizeInBits,ArrayRef<int> Mask,int MaskOffset,const APInt & Zeroable,const X86Subtarget & Subtarget)13958 static int matchShuffleAsShift(MVT &ShiftVT, unsigned &Opcode,
13959                                unsigned ScalarSizeInBits, ArrayRef<int> Mask,
13960                                int MaskOffset, const APInt &Zeroable,
13961                                const X86Subtarget &Subtarget) {
13962   int Size = Mask.size();
13963   unsigned SizeInBits = Size * ScalarSizeInBits;
13964 
13965   auto CheckZeros = [&](int Shift, int Scale, bool Left) {
13966     for (int i = 0; i < Size; i += Scale)
13967       for (int j = 0; j < Shift; ++j)
13968         if (!Zeroable[i + j + (Left ? 0 : (Scale - Shift))])
13969           return false;
13970 
13971     return true;
13972   };
13973 
13974   auto MatchShift = [&](int Shift, int Scale, bool Left) {
13975     for (int i = 0; i != Size; i += Scale) {
13976       unsigned Pos = Left ? i + Shift : i;
13977       unsigned Low = Left ? i : i + Shift;
13978       unsigned Len = Scale - Shift;
13979       if (!isSequentialOrUndefInRange(Mask, Pos, Len, Low + MaskOffset))
13980         return -1;
13981     }
13982 
13983     int ShiftEltBits = ScalarSizeInBits * Scale;
13984     bool ByteShift = ShiftEltBits > 64;
13985     Opcode = Left ? (ByteShift ? X86ISD::VSHLDQ : X86ISD::VSHLI)
13986                   : (ByteShift ? X86ISD::VSRLDQ : X86ISD::VSRLI);
13987     int ShiftAmt = Shift * ScalarSizeInBits / (ByteShift ? 8 : 1);
13988 
13989     // Normalize the scale for byte shifts to still produce an i64 element
13990     // type.
13991     Scale = ByteShift ? Scale / 2 : Scale;
13992 
13993     // We need to round trip through the appropriate type for the shift.
13994     MVT ShiftSVT = MVT::getIntegerVT(ScalarSizeInBits * Scale);
13995     ShiftVT = ByteShift ? MVT::getVectorVT(MVT::i8, SizeInBits / 8)
13996                         : MVT::getVectorVT(ShiftSVT, Size / Scale);
13997     return (int)ShiftAmt;
13998   };
13999 
14000   // SSE/AVX supports logical shifts up to 64-bit integers - so we can just
14001   // keep doubling the size of the integer elements up to that. We can
14002   // then shift the elements of the integer vector by whole multiples of
14003   // their width within the elements of the larger integer vector. Test each
14004   // multiple to see if we can find a match with the moved element indices
14005   // and that the shifted in elements are all zeroable.
14006   unsigned MaxWidth = ((SizeInBits == 512) && !Subtarget.hasBWI() ? 64 : 128);
14007   for (int Scale = 2; Scale * ScalarSizeInBits <= MaxWidth; Scale *= 2)
14008     for (int Shift = 1; Shift != Scale; ++Shift)
14009       for (bool Left : {true, false})
14010         if (CheckZeros(Shift, Scale, Left)) {
14011           int ShiftAmt = MatchShift(Shift, Scale, Left);
14012           if (0 < ShiftAmt)
14013             return ShiftAmt;
14014         }
14015 
14016   // no match
14017   return -1;
14018 }
14019 
lowerShuffleAsShift(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const APInt & Zeroable,const X86Subtarget & Subtarget,SelectionDAG & DAG)14020 static SDValue lowerShuffleAsShift(const SDLoc &DL, MVT VT, SDValue V1,
14021                                    SDValue V2, ArrayRef<int> Mask,
14022                                    const APInt &Zeroable,
14023                                    const X86Subtarget &Subtarget,
14024                                    SelectionDAG &DAG) {
14025   int Size = Mask.size();
14026   assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
14027 
14028   MVT ShiftVT;
14029   SDValue V = V1;
14030   unsigned Opcode;
14031 
14032   // Try to match shuffle against V1 shift.
14033   int ShiftAmt = matchShuffleAsShift(ShiftVT, Opcode, VT.getScalarSizeInBits(),
14034                                      Mask, 0, Zeroable, Subtarget);
14035 
14036   // If V1 failed, try to match shuffle against V2 shift.
14037   if (ShiftAmt < 0) {
14038     ShiftAmt = matchShuffleAsShift(ShiftVT, Opcode, VT.getScalarSizeInBits(),
14039                                    Mask, Size, Zeroable, Subtarget);
14040     V = V2;
14041   }
14042 
14043   if (ShiftAmt < 0)
14044     return SDValue();
14045 
14046   assert(DAG.getTargetLoweringInfo().isTypeLegal(ShiftVT) &&
14047          "Illegal integer vector type");
14048   V = DAG.getBitcast(ShiftVT, V);
14049   V = DAG.getNode(Opcode, DL, ShiftVT, V,
14050                   DAG.getTargetConstant(ShiftAmt, DL, MVT::i8));
14051   return DAG.getBitcast(VT, V);
14052 }
14053 
14054 // EXTRQ: Extract Len elements from lower half of source, starting at Idx.
14055 // Remainder of lower half result is zero and upper half is all undef.
matchShuffleAsEXTRQ(MVT VT,SDValue & V1,SDValue & V2,ArrayRef<int> Mask,uint64_t & BitLen,uint64_t & BitIdx,const APInt & Zeroable)14056 static bool matchShuffleAsEXTRQ(MVT VT, SDValue &V1, SDValue &V2,
14057                                 ArrayRef<int> Mask, uint64_t &BitLen,
14058                                 uint64_t &BitIdx, const APInt &Zeroable) {
14059   int Size = Mask.size();
14060   int HalfSize = Size / 2;
14061   assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
14062   assert(!Zeroable.isAllOnes() && "Fully zeroable shuffle mask");
14063 
14064   // Upper half must be undefined.
14065   if (!isUndefUpperHalf(Mask))
14066     return false;
14067 
14068   // Determine the extraction length from the part of the
14069   // lower half that isn't zeroable.
14070   int Len = HalfSize;
14071   for (; Len > 0; --Len)
14072     if (!Zeroable[Len - 1])
14073       break;
14074   assert(Len > 0 && "Zeroable shuffle mask");
14075 
14076   // Attempt to match first Len sequential elements from the lower half.
14077   SDValue Src;
14078   int Idx = -1;
14079   for (int i = 0; i != Len; ++i) {
14080     int M = Mask[i];
14081     if (M == SM_SentinelUndef)
14082       continue;
14083     SDValue &V = (M < Size ? V1 : V2);
14084     M = M % Size;
14085 
14086     // The extracted elements must start at a valid index and all mask
14087     // elements must be in the lower half.
14088     if (i > M || M >= HalfSize)
14089       return false;
14090 
14091     if (Idx < 0 || (Src == V && Idx == (M - i))) {
14092       Src = V;
14093       Idx = M - i;
14094       continue;
14095     }
14096     return false;
14097   }
14098 
14099   if (!Src || Idx < 0)
14100     return false;
14101 
14102   assert((Idx + Len) <= HalfSize && "Illegal extraction mask");
14103   BitLen = (Len * VT.getScalarSizeInBits()) & 0x3f;
14104   BitIdx = (Idx * VT.getScalarSizeInBits()) & 0x3f;
14105   V1 = Src;
14106   return true;
14107 }
14108 
14109 // INSERTQ: Extract lowest Len elements from lower half of second source and
14110 // insert over first source, starting at Idx.
14111 // { A[0], .., A[Idx-1], B[0], .., B[Len-1], A[Idx+Len], .., UNDEF, ... }
matchShuffleAsINSERTQ(MVT VT,SDValue & V1,SDValue & V2,ArrayRef<int> Mask,uint64_t & BitLen,uint64_t & BitIdx)14112 static bool matchShuffleAsINSERTQ(MVT VT, SDValue &V1, SDValue &V2,
14113                                   ArrayRef<int> Mask, uint64_t &BitLen,
14114                                   uint64_t &BitIdx) {
14115   int Size = Mask.size();
14116   int HalfSize = Size / 2;
14117   assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
14118 
14119   // Upper half must be undefined.
14120   if (!isUndefUpperHalf(Mask))
14121     return false;
14122 
14123   for (int Idx = 0; Idx != HalfSize; ++Idx) {
14124     SDValue Base;
14125 
14126     // Attempt to match first source from mask before insertion point.
14127     if (isUndefInRange(Mask, 0, Idx)) {
14128       /* EMPTY */
14129     } else if (isSequentialOrUndefInRange(Mask, 0, Idx, 0)) {
14130       Base = V1;
14131     } else if (isSequentialOrUndefInRange(Mask, 0, Idx, Size)) {
14132       Base = V2;
14133     } else {
14134       continue;
14135     }
14136 
14137     // Extend the extraction length looking to match both the insertion of
14138     // the second source and the remaining elements of the first.
14139     for (int Hi = Idx + 1; Hi <= HalfSize; ++Hi) {
14140       SDValue Insert;
14141       int Len = Hi - Idx;
14142 
14143       // Match insertion.
14144       if (isSequentialOrUndefInRange(Mask, Idx, Len, 0)) {
14145         Insert = V1;
14146       } else if (isSequentialOrUndefInRange(Mask, Idx, Len, Size)) {
14147         Insert = V2;
14148       } else {
14149         continue;
14150       }
14151 
14152       // Match the remaining elements of the lower half.
14153       if (isUndefInRange(Mask, Hi, HalfSize - Hi)) {
14154         /* EMPTY */
14155       } else if ((!Base || (Base == V1)) &&
14156                  isSequentialOrUndefInRange(Mask, Hi, HalfSize - Hi, Hi)) {
14157         Base = V1;
14158       } else if ((!Base || (Base == V2)) &&
14159                  isSequentialOrUndefInRange(Mask, Hi, HalfSize - Hi,
14160                                             Size + Hi)) {
14161         Base = V2;
14162       } else {
14163         continue;
14164       }
14165 
14166       BitLen = (Len * VT.getScalarSizeInBits()) & 0x3f;
14167       BitIdx = (Idx * VT.getScalarSizeInBits()) & 0x3f;
14168       V1 = Base;
14169       V2 = Insert;
14170       return true;
14171     }
14172   }
14173 
14174   return false;
14175 }
14176 
14177 /// Try to lower a vector shuffle using SSE4a EXTRQ/INSERTQ.
lowerShuffleWithSSE4A(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const APInt & Zeroable,SelectionDAG & DAG)14178 static SDValue lowerShuffleWithSSE4A(const SDLoc &DL, MVT VT, SDValue V1,
14179                                      SDValue V2, ArrayRef<int> Mask,
14180                                      const APInt &Zeroable, SelectionDAG &DAG) {
14181   uint64_t BitLen, BitIdx;
14182   if (matchShuffleAsEXTRQ(VT, V1, V2, Mask, BitLen, BitIdx, Zeroable))
14183     return DAG.getNode(X86ISD::EXTRQI, DL, VT, V1,
14184                        DAG.getTargetConstant(BitLen, DL, MVT::i8),
14185                        DAG.getTargetConstant(BitIdx, DL, MVT::i8));
14186 
14187   if (matchShuffleAsINSERTQ(VT, V1, V2, Mask, BitLen, BitIdx))
14188     return DAG.getNode(X86ISD::INSERTQI, DL, VT, V1 ? V1 : DAG.getUNDEF(VT),
14189                        V2 ? V2 : DAG.getUNDEF(VT),
14190                        DAG.getTargetConstant(BitLen, DL, MVT::i8),
14191                        DAG.getTargetConstant(BitIdx, DL, MVT::i8));
14192 
14193   return SDValue();
14194 }
14195 
14196 /// Lower a vector shuffle as a zero or any extension.
14197 ///
14198 /// Given a specific number of elements, element bit width, and extension
14199 /// stride, produce either a zero or any extension based on the available
14200 /// features of the subtarget. The extended elements are consecutive and
14201 /// begin and can start from an offsetted element index in the input; to
14202 /// avoid excess shuffling the offset must either being in the bottom lane
14203 /// or at the start of a higher lane. All extended elements must be from
14204 /// the same lane.
lowerShuffleAsSpecificZeroOrAnyExtend(const SDLoc & DL,MVT VT,int Scale,int Offset,bool AnyExt,SDValue InputV,ArrayRef<int> Mask,const X86Subtarget & Subtarget,SelectionDAG & DAG)14205 static SDValue lowerShuffleAsSpecificZeroOrAnyExtend(
14206     const SDLoc &DL, MVT VT, int Scale, int Offset, bool AnyExt, SDValue InputV,
14207     ArrayRef<int> Mask, const X86Subtarget &Subtarget, SelectionDAG &DAG) {
14208   assert(Scale > 1 && "Need a scale to extend.");
14209   int EltBits = VT.getScalarSizeInBits();
14210   int NumElements = VT.getVectorNumElements();
14211   int NumEltsPerLane = 128 / EltBits;
14212   int OffsetLane = Offset / NumEltsPerLane;
14213   assert((EltBits == 8 || EltBits == 16 || EltBits == 32) &&
14214          "Only 8, 16, and 32 bit elements can be extended.");
14215   assert(Scale * EltBits <= 64 && "Cannot zero extend past 64 bits.");
14216   assert(0 <= Offset && "Extension offset must be positive.");
14217   assert((Offset < NumEltsPerLane || Offset % NumEltsPerLane == 0) &&
14218          "Extension offset must be in the first lane or start an upper lane.");
14219 
14220   // Check that an index is in same lane as the base offset.
14221   auto SafeOffset = [&](int Idx) {
14222     return OffsetLane == (Idx / NumEltsPerLane);
14223   };
14224 
14225   // Shift along an input so that the offset base moves to the first element.
14226   auto ShuffleOffset = [&](SDValue V) {
14227     if (!Offset)
14228       return V;
14229 
14230     SmallVector<int, 8> ShMask((unsigned)NumElements, -1);
14231     for (int i = 0; i * Scale < NumElements; ++i) {
14232       int SrcIdx = i + Offset;
14233       ShMask[i] = SafeOffset(SrcIdx) ? SrcIdx : -1;
14234     }
14235     return DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), ShMask);
14236   };
14237 
14238   // Found a valid a/zext mask! Try various lowering strategies based on the
14239   // input type and available ISA extensions.
14240   if (Subtarget.hasSSE41()) {
14241     // Not worth offsetting 128-bit vectors if scale == 2, a pattern using
14242     // PUNPCK will catch this in a later shuffle match.
14243     if (Offset && Scale == 2 && VT.is128BitVector())
14244       return SDValue();
14245     MVT ExtVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits * Scale),
14246                                  NumElements / Scale);
14247     InputV = ShuffleOffset(InputV);
14248     InputV = getEXTEND_VECTOR_INREG(AnyExt ? ISD::ANY_EXTEND : ISD::ZERO_EXTEND,
14249                                     DL, ExtVT, InputV, DAG);
14250     return DAG.getBitcast(VT, InputV);
14251   }
14252 
14253   assert(VT.is128BitVector() && "Only 128-bit vectors can be extended.");
14254 
14255   // For any extends we can cheat for larger element sizes and use shuffle
14256   // instructions that can fold with a load and/or copy.
14257   if (AnyExt && EltBits == 32) {
14258     int PSHUFDMask[4] = {Offset, -1, SafeOffset(Offset + 1) ? Offset + 1 : -1,
14259                          -1};
14260     return DAG.getBitcast(
14261         VT, DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
14262                         DAG.getBitcast(MVT::v4i32, InputV),
14263                         getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
14264   }
14265   if (AnyExt && EltBits == 16 && Scale > 2) {
14266     int PSHUFDMask[4] = {Offset / 2, -1,
14267                          SafeOffset(Offset + 1) ? (Offset + 1) / 2 : -1, -1};
14268     InputV = DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
14269                          DAG.getBitcast(MVT::v4i32, InputV),
14270                          getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG));
14271     int PSHUFWMask[4] = {1, -1, -1, -1};
14272     unsigned OddEvenOp = (Offset & 1) ? X86ISD::PSHUFLW : X86ISD::PSHUFHW;
14273     return DAG.getBitcast(
14274         VT, DAG.getNode(OddEvenOp, DL, MVT::v8i16,
14275                         DAG.getBitcast(MVT::v8i16, InputV),
14276                         getV4X86ShuffleImm8ForMask(PSHUFWMask, DL, DAG)));
14277   }
14278 
14279   // The SSE4A EXTRQ instruction can efficiently extend the first 2 lanes
14280   // to 64-bits.
14281   if ((Scale * EltBits) == 64 && EltBits < 32 && Subtarget.hasSSE4A()) {
14282     assert(NumElements == (int)Mask.size() && "Unexpected shuffle mask size!");
14283     assert(VT.is128BitVector() && "Unexpected vector width!");
14284 
14285     int LoIdx = Offset * EltBits;
14286     SDValue Lo = DAG.getBitcast(
14287         MVT::v2i64, DAG.getNode(X86ISD::EXTRQI, DL, VT, InputV,
14288                                 DAG.getTargetConstant(EltBits, DL, MVT::i8),
14289                                 DAG.getTargetConstant(LoIdx, DL, MVT::i8)));
14290 
14291     if (isUndefUpperHalf(Mask) || !SafeOffset(Offset + 1))
14292       return DAG.getBitcast(VT, Lo);
14293 
14294     int HiIdx = (Offset + 1) * EltBits;
14295     SDValue Hi = DAG.getBitcast(
14296         MVT::v2i64, DAG.getNode(X86ISD::EXTRQI, DL, VT, InputV,
14297                                 DAG.getTargetConstant(EltBits, DL, MVT::i8),
14298                                 DAG.getTargetConstant(HiIdx, DL, MVT::i8)));
14299     return DAG.getBitcast(VT,
14300                           DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2i64, Lo, Hi));
14301   }
14302 
14303   // If this would require more than 2 unpack instructions to expand, use
14304   // pshufb when available. We can only use more than 2 unpack instructions
14305   // when zero extending i8 elements which also makes it easier to use pshufb.
14306   if (Scale > 4 && EltBits == 8 && Subtarget.hasSSSE3()) {
14307     assert(NumElements == 16 && "Unexpected byte vector width!");
14308     SDValue PSHUFBMask[16];
14309     for (int i = 0; i < 16; ++i) {
14310       int Idx = Offset + (i / Scale);
14311       if ((i % Scale == 0 && SafeOffset(Idx))) {
14312         PSHUFBMask[i] = DAG.getConstant(Idx, DL, MVT::i8);
14313         continue;
14314       }
14315       PSHUFBMask[i] =
14316           AnyExt ? DAG.getUNDEF(MVT::i8) : DAG.getConstant(0x80, DL, MVT::i8);
14317     }
14318     InputV = DAG.getBitcast(MVT::v16i8, InputV);
14319     return DAG.getBitcast(
14320         VT, DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, InputV,
14321                         DAG.getBuildVector(MVT::v16i8, DL, PSHUFBMask)));
14322   }
14323 
14324   // If we are extending from an offset, ensure we start on a boundary that
14325   // we can unpack from.
14326   int AlignToUnpack = Offset % (NumElements / Scale);
14327   if (AlignToUnpack) {
14328     SmallVector<int, 8> ShMask((unsigned)NumElements, -1);
14329     for (int i = AlignToUnpack; i < NumElements; ++i)
14330       ShMask[i - AlignToUnpack] = i;
14331     InputV = DAG.getVectorShuffle(VT, DL, InputV, DAG.getUNDEF(VT), ShMask);
14332     Offset -= AlignToUnpack;
14333   }
14334 
14335   // Otherwise emit a sequence of unpacks.
14336   do {
14337     unsigned UnpackLoHi = X86ISD::UNPCKL;
14338     if (Offset >= (NumElements / 2)) {
14339       UnpackLoHi = X86ISD::UNPCKH;
14340       Offset -= (NumElements / 2);
14341     }
14342 
14343     MVT InputVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits), NumElements);
14344     SDValue Ext = AnyExt ? DAG.getUNDEF(InputVT)
14345                          : getZeroVector(InputVT, Subtarget, DAG, DL);
14346     InputV = DAG.getBitcast(InputVT, InputV);
14347     InputV = DAG.getNode(UnpackLoHi, DL, InputVT, InputV, Ext);
14348     Scale /= 2;
14349     EltBits *= 2;
14350     NumElements /= 2;
14351   } while (Scale > 1);
14352   return DAG.getBitcast(VT, InputV);
14353 }
14354 
14355 /// Try to lower a vector shuffle as a zero extension on any microarch.
14356 ///
14357 /// This routine will try to do everything in its power to cleverly lower
14358 /// a shuffle which happens to match the pattern of a zero extend. It doesn't
14359 /// check for the profitability of this lowering,  it tries to aggressively
14360 /// match this pattern. It will use all of the micro-architectural details it
14361 /// can to emit an efficient lowering. It handles both blends with all-zero
14362 /// inputs to explicitly zero-extend and undef-lanes (sometimes undef due to
14363 /// masking out later).
14364 ///
14365 /// The reason we have dedicated lowering for zext-style shuffles is that they
14366 /// are both incredibly common and often quite performance sensitive.
lowerShuffleAsZeroOrAnyExtend(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const APInt & Zeroable,const X86Subtarget & Subtarget,SelectionDAG & DAG)14367 static SDValue lowerShuffleAsZeroOrAnyExtend(
14368     const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
14369     const APInt &Zeroable, const X86Subtarget &Subtarget,
14370     SelectionDAG &DAG) {
14371   int Bits = VT.getSizeInBits();
14372   int NumLanes = Bits / 128;
14373   int NumElements = VT.getVectorNumElements();
14374   int NumEltsPerLane = NumElements / NumLanes;
14375   assert(VT.getScalarSizeInBits() <= 32 &&
14376          "Exceeds 32-bit integer zero extension limit");
14377   assert((int)Mask.size() == NumElements && "Unexpected shuffle mask size");
14378 
14379   // Define a helper function to check a particular ext-scale and lower to it if
14380   // valid.
14381   auto Lower = [&](int Scale) -> SDValue {
14382     SDValue InputV;
14383     bool AnyExt = true;
14384     int Offset = 0;
14385     int Matches = 0;
14386     for (int i = 0; i < NumElements; ++i) {
14387       int M = Mask[i];
14388       if (M < 0)
14389         continue; // Valid anywhere but doesn't tell us anything.
14390       if (i % Scale != 0) {
14391         // Each of the extended elements need to be zeroable.
14392         if (!Zeroable[i])
14393           return SDValue();
14394 
14395         // We no longer are in the anyext case.
14396         AnyExt = false;
14397         continue;
14398       }
14399 
14400       // Each of the base elements needs to be consecutive indices into the
14401       // same input vector.
14402       SDValue V = M < NumElements ? V1 : V2;
14403       M = M % NumElements;
14404       if (!InputV) {
14405         InputV = V;
14406         Offset = M - (i / Scale);
14407       } else if (InputV != V)
14408         return SDValue(); // Flip-flopping inputs.
14409 
14410       // Offset must start in the lowest 128-bit lane or at the start of an
14411       // upper lane.
14412       // FIXME: Is it ever worth allowing a negative base offset?
14413       if (!((0 <= Offset && Offset < NumEltsPerLane) ||
14414             (Offset % NumEltsPerLane) == 0))
14415         return SDValue();
14416 
14417       // If we are offsetting, all referenced entries must come from the same
14418       // lane.
14419       if (Offset && (Offset / NumEltsPerLane) != (M / NumEltsPerLane))
14420         return SDValue();
14421 
14422       if ((M % NumElements) != (Offset + (i / Scale)))
14423         return SDValue(); // Non-consecutive strided elements.
14424       Matches++;
14425     }
14426 
14427     // If we fail to find an input, we have a zero-shuffle which should always
14428     // have already been handled.
14429     // FIXME: Maybe handle this here in case during blending we end up with one?
14430     if (!InputV)
14431       return SDValue();
14432 
14433     // If we are offsetting, don't extend if we only match a single input, we
14434     // can always do better by using a basic PSHUF or PUNPCK.
14435     if (Offset != 0 && Matches < 2)
14436       return SDValue();
14437 
14438     return lowerShuffleAsSpecificZeroOrAnyExtend(DL, VT, Scale, Offset, AnyExt,
14439                                                  InputV, Mask, Subtarget, DAG);
14440   };
14441 
14442   // The widest scale possible for extending is to a 64-bit integer.
14443   assert(Bits % 64 == 0 &&
14444          "The number of bits in a vector must be divisible by 64 on x86!");
14445   int NumExtElements = Bits / 64;
14446 
14447   // Each iteration, try extending the elements half as much, but into twice as
14448   // many elements.
14449   for (; NumExtElements < NumElements; NumExtElements *= 2) {
14450     assert(NumElements % NumExtElements == 0 &&
14451            "The input vector size must be divisible by the extended size.");
14452     if (SDValue V = Lower(NumElements / NumExtElements))
14453       return V;
14454   }
14455 
14456   // General extends failed, but 128-bit vectors may be able to use MOVQ.
14457   if (Bits != 128)
14458     return SDValue();
14459 
14460   // Returns one of the source operands if the shuffle can be reduced to a
14461   // MOVQ, copying the lower 64-bits and zero-extending to the upper 64-bits.
14462   auto CanZExtLowHalf = [&]() {
14463     for (int i = NumElements / 2; i != NumElements; ++i)
14464       if (!Zeroable[i])
14465         return SDValue();
14466     if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, 0))
14467       return V1;
14468     if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, NumElements))
14469       return V2;
14470     return SDValue();
14471   };
14472 
14473   if (SDValue V = CanZExtLowHalf()) {
14474     V = DAG.getBitcast(MVT::v2i64, V);
14475     V = DAG.getNode(X86ISD::VZEXT_MOVL, DL, MVT::v2i64, V);
14476     return DAG.getBitcast(VT, V);
14477   }
14478 
14479   // No viable ext lowering found.
14480   return SDValue();
14481 }
14482 
14483 /// Try to get a scalar value for a specific element of a vector.
14484 ///
14485 /// Looks through BUILD_VECTOR and SCALAR_TO_VECTOR nodes to find a scalar.
getScalarValueForVectorElement(SDValue V,int Idx,SelectionDAG & DAG)14486 static SDValue getScalarValueForVectorElement(SDValue V, int Idx,
14487                                               SelectionDAG &DAG) {
14488   MVT VT = V.getSimpleValueType();
14489   MVT EltVT = VT.getVectorElementType();
14490   V = peekThroughBitcasts(V);
14491 
14492   // If the bitcasts shift the element size, we can't extract an equivalent
14493   // element from it.
14494   MVT NewVT = V.getSimpleValueType();
14495   if (!NewVT.isVector() || NewVT.getScalarSizeInBits() != VT.getScalarSizeInBits())
14496     return SDValue();
14497 
14498   if (V.getOpcode() == ISD::BUILD_VECTOR ||
14499       (Idx == 0 && V.getOpcode() == ISD::SCALAR_TO_VECTOR)) {
14500     // Ensure the scalar operand is the same size as the destination.
14501     // FIXME: Add support for scalar truncation where possible.
14502     SDValue S = V.getOperand(Idx);
14503     if (EltVT.getSizeInBits() == S.getSimpleValueType().getSizeInBits())
14504       return DAG.getBitcast(EltVT, S);
14505   }
14506 
14507   return SDValue();
14508 }
14509 
14510 /// Helper to test for a load that can be folded with x86 shuffles.
14511 ///
14512 /// This is particularly important because the set of instructions varies
14513 /// significantly based on whether the operand is a load or not.
isShuffleFoldableLoad(SDValue V)14514 static bool isShuffleFoldableLoad(SDValue V) {
14515   return V->hasOneUse() &&
14516          ISD::isNON_EXTLoad(peekThroughOneUseBitcasts(V).getNode());
14517 }
14518 
14519 template<typename T>
isSoftFP16(T VT,const X86Subtarget & Subtarget)14520 static bool isSoftFP16(T VT, const X86Subtarget &Subtarget) {
14521   return VT.getScalarType() == MVT::f16 && !Subtarget.hasFP16();
14522 }
14523 
14524 template<typename T>
isSoftFP16(T VT) const14525 bool X86TargetLowering::isSoftFP16(T VT) const {
14526   return ::isSoftFP16(VT, Subtarget);
14527 }
14528 
14529 /// Try to lower insertion of a single element into a zero vector.
14530 ///
14531 /// This is a common pattern that we have especially efficient patterns to lower
14532 /// across all subtarget feature sets.
lowerShuffleAsElementInsertion(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const APInt & Zeroable,const X86Subtarget & Subtarget,SelectionDAG & DAG)14533 static SDValue lowerShuffleAsElementInsertion(
14534     const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
14535     const APInt &Zeroable, const X86Subtarget &Subtarget,
14536     SelectionDAG &DAG) {
14537   MVT ExtVT = VT;
14538   MVT EltVT = VT.getVectorElementType();
14539 
14540   if (isSoftFP16(EltVT, Subtarget))
14541     return SDValue();
14542 
14543   int V2Index =
14544       find_if(Mask, [&Mask](int M) { return M >= (int)Mask.size(); }) -
14545       Mask.begin();
14546   bool IsV1Zeroable = true;
14547   for (int i = 0, Size = Mask.size(); i < Size; ++i)
14548     if (i != V2Index && !Zeroable[i]) {
14549       IsV1Zeroable = false;
14550       break;
14551     }
14552 
14553   // Check for a single input from a SCALAR_TO_VECTOR node.
14554   // FIXME: All of this should be canonicalized into INSERT_VECTOR_ELT and
14555   // all the smarts here sunk into that routine. However, the current
14556   // lowering of BUILD_VECTOR makes that nearly impossible until the old
14557   // vector shuffle lowering is dead.
14558   SDValue V2S = getScalarValueForVectorElement(V2, Mask[V2Index] - Mask.size(),
14559                                                DAG);
14560   if (V2S && DAG.getTargetLoweringInfo().isTypeLegal(V2S.getValueType())) {
14561     // We need to zext the scalar if it is smaller than an i32.
14562     V2S = DAG.getBitcast(EltVT, V2S);
14563     if (EltVT == MVT::i8 || (EltVT == MVT::i16 && !Subtarget.hasFP16())) {
14564       // Using zext to expand a narrow element won't work for non-zero
14565       // insertions.
14566       if (!IsV1Zeroable)
14567         return SDValue();
14568 
14569       // Zero-extend directly to i32.
14570       ExtVT = MVT::getVectorVT(MVT::i32, ExtVT.getSizeInBits() / 32);
14571       V2S = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, V2S);
14572     }
14573     V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, ExtVT, V2S);
14574   } else if (Mask[V2Index] != (int)Mask.size() || EltVT == MVT::i8 ||
14575              EltVT == MVT::i16) {
14576     // Either not inserting from the low element of the input or the input
14577     // element size is too small to use VZEXT_MOVL to clear the high bits.
14578     return SDValue();
14579   }
14580 
14581   if (!IsV1Zeroable) {
14582     // If V1 can't be treated as a zero vector we have fewer options to lower
14583     // this. We can't support integer vectors or non-zero targets cheaply, and
14584     // the V1 elements can't be permuted in any way.
14585     assert(VT == ExtVT && "Cannot change extended type when non-zeroable!");
14586     if (!VT.isFloatingPoint() || V2Index != 0)
14587       return SDValue();
14588     SmallVector<int, 8> V1Mask(Mask);
14589     V1Mask[V2Index] = -1;
14590     if (!isNoopShuffleMask(V1Mask))
14591       return SDValue();
14592     if (!VT.is128BitVector())
14593       return SDValue();
14594 
14595     // Otherwise, use MOVSD, MOVSS or MOVSH.
14596     unsigned MovOpc = 0;
14597     if (EltVT == MVT::f16)
14598       MovOpc = X86ISD::MOVSH;
14599     else if (EltVT == MVT::f32)
14600       MovOpc = X86ISD::MOVSS;
14601     else if (EltVT == MVT::f64)
14602       MovOpc = X86ISD::MOVSD;
14603     else
14604       llvm_unreachable("Unsupported floating point element type to handle!");
14605     return DAG.getNode(MovOpc, DL, ExtVT, V1, V2);
14606   }
14607 
14608   // This lowering only works for the low element with floating point vectors.
14609   if (VT.isFloatingPoint() && V2Index != 0)
14610     return SDValue();
14611 
14612   V2 = DAG.getNode(X86ISD::VZEXT_MOVL, DL, ExtVT, V2);
14613   if (ExtVT != VT)
14614     V2 = DAG.getBitcast(VT, V2);
14615 
14616   if (V2Index != 0) {
14617     // If we have 4 or fewer lanes we can cheaply shuffle the element into
14618     // the desired position. Otherwise it is more efficient to do a vector
14619     // shift left. We know that we can do a vector shift left because all
14620     // the inputs are zero.
14621     if (VT.isFloatingPoint() || VT.getVectorNumElements() <= 4) {
14622       SmallVector<int, 4> V2Shuffle(Mask.size(), 1);
14623       V2Shuffle[V2Index] = 0;
14624       V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Shuffle);
14625     } else {
14626       V2 = DAG.getBitcast(MVT::v16i8, V2);
14627       V2 = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, V2,
14628                        DAG.getTargetConstant(
14629                            V2Index * EltVT.getSizeInBits() / 8, DL, MVT::i8));
14630       V2 = DAG.getBitcast(VT, V2);
14631     }
14632   }
14633   return V2;
14634 }
14635 
14636 /// Try to lower broadcast of a single - truncated - integer element,
14637 /// coming from a scalar_to_vector/build_vector node \p V0 with larger elements.
14638 ///
14639 /// This assumes we have AVX2.
lowerShuffleAsTruncBroadcast(const SDLoc & DL,MVT VT,SDValue V0,int BroadcastIdx,const X86Subtarget & Subtarget,SelectionDAG & DAG)14640 static SDValue lowerShuffleAsTruncBroadcast(const SDLoc &DL, MVT VT, SDValue V0,
14641                                             int BroadcastIdx,
14642                                             const X86Subtarget &Subtarget,
14643                                             SelectionDAG &DAG) {
14644   assert(Subtarget.hasAVX2() &&
14645          "We can only lower integer broadcasts with AVX2!");
14646 
14647   MVT EltVT = VT.getVectorElementType();
14648   MVT V0VT = V0.getSimpleValueType();
14649 
14650   assert(VT.isInteger() && "Unexpected non-integer trunc broadcast!");
14651   assert(V0VT.isVector() && "Unexpected non-vector vector-sized value!");
14652 
14653   MVT V0EltVT = V0VT.getVectorElementType();
14654   if (!V0EltVT.isInteger())
14655     return SDValue();
14656 
14657   const unsigned EltSize = EltVT.getSizeInBits();
14658   const unsigned V0EltSize = V0EltVT.getSizeInBits();
14659 
14660   // This is only a truncation if the original element type is larger.
14661   if (V0EltSize <= EltSize)
14662     return SDValue();
14663 
14664   assert(((V0EltSize % EltSize) == 0) &&
14665          "Scalar type sizes must all be powers of 2 on x86!");
14666 
14667   const unsigned V0Opc = V0.getOpcode();
14668   const unsigned Scale = V0EltSize / EltSize;
14669   const unsigned V0BroadcastIdx = BroadcastIdx / Scale;
14670 
14671   if ((V0Opc != ISD::SCALAR_TO_VECTOR || V0BroadcastIdx != 0) &&
14672       V0Opc != ISD::BUILD_VECTOR)
14673     return SDValue();
14674 
14675   SDValue Scalar = V0.getOperand(V0BroadcastIdx);
14676 
14677   // If we're extracting non-least-significant bits, shift so we can truncate.
14678   // Hopefully, we can fold away the trunc/srl/load into the broadcast.
14679   // Even if we can't (and !isShuffleFoldableLoad(Scalar)), prefer
14680   // vpbroadcast+vmovd+shr to vpshufb(m)+vmovd.
14681   if (const int OffsetIdx = BroadcastIdx % Scale)
14682     Scalar = DAG.getNode(ISD::SRL, DL, Scalar.getValueType(), Scalar,
14683                          DAG.getConstant(OffsetIdx * EltSize, DL, MVT::i8));
14684 
14685   return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
14686                      DAG.getNode(ISD::TRUNCATE, DL, EltVT, Scalar));
14687 }
14688 
14689 /// Test whether this can be lowered with a single SHUFPS instruction.
14690 ///
14691 /// This is used to disable more specialized lowerings when the shufps lowering
14692 /// will happen to be efficient.
isSingleSHUFPSMask(ArrayRef<int> Mask)14693 static bool isSingleSHUFPSMask(ArrayRef<int> Mask) {
14694   // This routine only handles 128-bit shufps.
14695   assert(Mask.size() == 4 && "Unsupported mask size!");
14696   assert(Mask[0] >= -1 && Mask[0] < 8 && "Out of bound mask element!");
14697   assert(Mask[1] >= -1 && Mask[1] < 8 && "Out of bound mask element!");
14698   assert(Mask[2] >= -1 && Mask[2] < 8 && "Out of bound mask element!");
14699   assert(Mask[3] >= -1 && Mask[3] < 8 && "Out of bound mask element!");
14700 
14701   // To lower with a single SHUFPS we need to have the low half and high half
14702   // each requiring a single input.
14703   if (Mask[0] >= 0 && Mask[1] >= 0 && (Mask[0] < 4) != (Mask[1] < 4))
14704     return false;
14705   if (Mask[2] >= 0 && Mask[3] >= 0 && (Mask[2] < 4) != (Mask[3] < 4))
14706     return false;
14707 
14708   return true;
14709 }
14710 
14711 /// Test whether the specified input (0 or 1) is in-place blended by the
14712 /// given mask.
14713 ///
14714 /// This returns true if the elements from a particular input are already in the
14715 /// slot required by the given mask and require no permutation.
isShuffleMaskInputInPlace(int Input,ArrayRef<int> Mask)14716 static bool isShuffleMaskInputInPlace(int Input, ArrayRef<int> Mask) {
14717   assert((Input == 0 || Input == 1) && "Only two inputs to shuffles.");
14718   int Size = Mask.size();
14719   for (int i = 0; i < Size; ++i)
14720     if (Mask[i] >= 0 && Mask[i] / Size == Input && Mask[i] % Size != i)
14721       return false;
14722 
14723   return true;
14724 }
14725 
14726 /// If we are extracting two 128-bit halves of a vector and shuffling the
14727 /// result, match that to a 256-bit AVX2 vperm* instruction to avoid a
14728 /// multi-shuffle lowering.
lowerShuffleOfExtractsAsVperm(const SDLoc & DL,SDValue N0,SDValue N1,ArrayRef<int> Mask,SelectionDAG & DAG)14729 static SDValue lowerShuffleOfExtractsAsVperm(const SDLoc &DL, SDValue N0,
14730                                              SDValue N1, ArrayRef<int> Mask,
14731                                              SelectionDAG &DAG) {
14732   MVT VT = N0.getSimpleValueType();
14733   assert((VT.is128BitVector() &&
14734           (VT.getScalarSizeInBits() == 32 || VT.getScalarSizeInBits() == 64)) &&
14735          "VPERM* family of shuffles requires 32-bit or 64-bit elements");
14736 
14737   // Check that both sources are extracts of the same source vector.
14738   if (!N0.hasOneUse() || !N1.hasOneUse() ||
14739       N0.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
14740       N1.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
14741       N0.getOperand(0) != N1.getOperand(0))
14742     return SDValue();
14743 
14744   SDValue WideVec = N0.getOperand(0);
14745   MVT WideVT = WideVec.getSimpleValueType();
14746   if (!WideVT.is256BitVector())
14747     return SDValue();
14748 
14749   // Match extracts of each half of the wide source vector. Commute the shuffle
14750   // if the extract of the low half is N1.
14751   unsigned NumElts = VT.getVectorNumElements();
14752   SmallVector<int, 4> NewMask(Mask);
14753   const APInt &ExtIndex0 = N0.getConstantOperandAPInt(1);
14754   const APInt &ExtIndex1 = N1.getConstantOperandAPInt(1);
14755   if (ExtIndex1 == 0 && ExtIndex0 == NumElts)
14756     ShuffleVectorSDNode::commuteMask(NewMask);
14757   else if (ExtIndex0 != 0 || ExtIndex1 != NumElts)
14758     return SDValue();
14759 
14760   // Final bailout: if the mask is simple, we are better off using an extract
14761   // and a simple narrow shuffle. Prefer extract+unpack(h/l)ps to vpermps
14762   // because that avoids a constant load from memory.
14763   if (NumElts == 4 &&
14764       (isSingleSHUFPSMask(NewMask) || is128BitUnpackShuffleMask(NewMask, DAG)))
14765     return SDValue();
14766 
14767   // Extend the shuffle mask with undef elements.
14768   NewMask.append(NumElts, -1);
14769 
14770   // shuf (extract X, 0), (extract X, 4), M --> extract (shuf X, undef, M'), 0
14771   SDValue Shuf = DAG.getVectorShuffle(WideVT, DL, WideVec, DAG.getUNDEF(WideVT),
14772                                       NewMask);
14773   // This is free: ymm -> xmm.
14774   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Shuf,
14775                      DAG.getIntPtrConstant(0, DL));
14776 }
14777 
14778 /// Try to lower broadcast of a single element.
14779 ///
14780 /// For convenience, this code also bundles all of the subtarget feature set
14781 /// filtering. While a little annoying to re-dispatch on type here, there isn't
14782 /// a convenient way to factor it out.
lowerShuffleAsBroadcast(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const X86Subtarget & Subtarget,SelectionDAG & DAG)14783 static SDValue lowerShuffleAsBroadcast(const SDLoc &DL, MVT VT, SDValue V1,
14784                                        SDValue V2, ArrayRef<int> Mask,
14785                                        const X86Subtarget &Subtarget,
14786                                        SelectionDAG &DAG) {
14787   if (!((Subtarget.hasSSE3() && VT == MVT::v2f64) ||
14788         (Subtarget.hasAVX() && VT.isFloatingPoint()) ||
14789         (Subtarget.hasAVX2() && VT.isInteger())))
14790     return SDValue();
14791 
14792   // With MOVDDUP (v2f64) we can broadcast from a register or a load, otherwise
14793   // we can only broadcast from a register with AVX2.
14794   unsigned NumEltBits = VT.getScalarSizeInBits();
14795   unsigned Opcode = (VT == MVT::v2f64 && !Subtarget.hasAVX2())
14796                         ? X86ISD::MOVDDUP
14797                         : X86ISD::VBROADCAST;
14798   bool BroadcastFromReg = (Opcode == X86ISD::MOVDDUP) || Subtarget.hasAVX2();
14799 
14800   // Check that the mask is a broadcast.
14801   int BroadcastIdx = getSplatIndex(Mask);
14802   if (BroadcastIdx < 0)
14803     return SDValue();
14804   assert(BroadcastIdx < (int)Mask.size() && "We only expect to be called with "
14805                                             "a sorted mask where the broadcast "
14806                                             "comes from V1.");
14807 
14808   // Go up the chain of (vector) values to find a scalar load that we can
14809   // combine with the broadcast.
14810   // TODO: Combine this logic with findEltLoadSrc() used by
14811   //       EltsFromConsecutiveLoads().
14812   int BitOffset = BroadcastIdx * NumEltBits;
14813   SDValue V = V1;
14814   for (;;) {
14815     switch (V.getOpcode()) {
14816     case ISD::BITCAST: {
14817       V = V.getOperand(0);
14818       continue;
14819     }
14820     case ISD::CONCAT_VECTORS: {
14821       int OpBitWidth = V.getOperand(0).getValueSizeInBits();
14822       int OpIdx = BitOffset / OpBitWidth;
14823       V = V.getOperand(OpIdx);
14824       BitOffset %= OpBitWidth;
14825       continue;
14826     }
14827     case ISD::EXTRACT_SUBVECTOR: {
14828       // The extraction index adds to the existing offset.
14829       unsigned EltBitWidth = V.getScalarValueSizeInBits();
14830       unsigned Idx = V.getConstantOperandVal(1);
14831       unsigned BeginOffset = Idx * EltBitWidth;
14832       BitOffset += BeginOffset;
14833       V = V.getOperand(0);
14834       continue;
14835     }
14836     case ISD::INSERT_SUBVECTOR: {
14837       SDValue VOuter = V.getOperand(0), VInner = V.getOperand(1);
14838       int EltBitWidth = VOuter.getScalarValueSizeInBits();
14839       int Idx = (int)V.getConstantOperandVal(2);
14840       int NumSubElts = (int)VInner.getSimpleValueType().getVectorNumElements();
14841       int BeginOffset = Idx * EltBitWidth;
14842       int EndOffset = BeginOffset + NumSubElts * EltBitWidth;
14843       if (BeginOffset <= BitOffset && BitOffset < EndOffset) {
14844         BitOffset -= BeginOffset;
14845         V = VInner;
14846       } else {
14847         V = VOuter;
14848       }
14849       continue;
14850     }
14851     }
14852     break;
14853   }
14854   assert((BitOffset % NumEltBits) == 0 && "Illegal bit-offset");
14855   BroadcastIdx = BitOffset / NumEltBits;
14856 
14857   // Do we need to bitcast the source to retrieve the original broadcast index?
14858   bool BitCastSrc = V.getScalarValueSizeInBits() != NumEltBits;
14859 
14860   // Check if this is a broadcast of a scalar. We special case lowering
14861   // for scalars so that we can more effectively fold with loads.
14862   // If the original value has a larger element type than the shuffle, the
14863   // broadcast element is in essence truncated. Make that explicit to ease
14864   // folding.
14865   if (BitCastSrc && VT.isInteger())
14866     if (SDValue TruncBroadcast = lowerShuffleAsTruncBroadcast(
14867             DL, VT, V, BroadcastIdx, Subtarget, DAG))
14868       return TruncBroadcast;
14869 
14870   // Also check the simpler case, where we can directly reuse the scalar.
14871   if (!BitCastSrc &&
14872       ((V.getOpcode() == ISD::BUILD_VECTOR && V.hasOneUse()) ||
14873        (V.getOpcode() == ISD::SCALAR_TO_VECTOR && BroadcastIdx == 0))) {
14874     V = V.getOperand(BroadcastIdx);
14875 
14876     // If we can't broadcast from a register, check that the input is a load.
14877     if (!BroadcastFromReg && !isShuffleFoldableLoad(V))
14878       return SDValue();
14879   } else if (ISD::isNormalLoad(V.getNode()) &&
14880              cast<LoadSDNode>(V)->isSimple()) {
14881     // We do not check for one-use of the vector load because a broadcast load
14882     // is expected to be a win for code size, register pressure, and possibly
14883     // uops even if the original vector load is not eliminated.
14884 
14885     // Reduce the vector load and shuffle to a broadcasted scalar load.
14886     LoadSDNode *Ld = cast<LoadSDNode>(V);
14887     SDValue BaseAddr = Ld->getOperand(1);
14888     MVT SVT = VT.getScalarType();
14889     unsigned Offset = BroadcastIdx * SVT.getStoreSize();
14890     assert((int)(Offset * 8) == BitOffset && "Unexpected bit-offset");
14891     SDValue NewAddr =
14892         DAG.getMemBasePlusOffset(BaseAddr, TypeSize::Fixed(Offset), DL);
14893 
14894     // Directly form VBROADCAST_LOAD if we're using VBROADCAST opcode rather
14895     // than MOVDDUP.
14896     // FIXME: Should we add VBROADCAST_LOAD isel patterns for pre-AVX?
14897     if (Opcode == X86ISD::VBROADCAST) {
14898       SDVTList Tys = DAG.getVTList(VT, MVT::Other);
14899       SDValue Ops[] = {Ld->getChain(), NewAddr};
14900       V = DAG.getMemIntrinsicNode(
14901           X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, SVT,
14902           DAG.getMachineFunction().getMachineMemOperand(
14903               Ld->getMemOperand(), Offset, SVT.getStoreSize()));
14904       DAG.makeEquivalentMemoryOrdering(Ld, V);
14905       return DAG.getBitcast(VT, V);
14906     }
14907     assert(SVT == MVT::f64 && "Unexpected VT!");
14908     V = DAG.getLoad(SVT, DL, Ld->getChain(), NewAddr,
14909                     DAG.getMachineFunction().getMachineMemOperand(
14910                         Ld->getMemOperand(), Offset, SVT.getStoreSize()));
14911     DAG.makeEquivalentMemoryOrdering(Ld, V);
14912   } else if (!BroadcastFromReg) {
14913     // We can't broadcast from a vector register.
14914     return SDValue();
14915   } else if (BitOffset != 0) {
14916     // We can only broadcast from the zero-element of a vector register,
14917     // but it can be advantageous to broadcast from the zero-element of a
14918     // subvector.
14919     if (!VT.is256BitVector() && !VT.is512BitVector())
14920       return SDValue();
14921 
14922     // VPERMQ/VPERMPD can perform the cross-lane shuffle directly.
14923     if (VT == MVT::v4f64 || VT == MVT::v4i64)
14924       return SDValue();
14925 
14926     // Only broadcast the zero-element of a 128-bit subvector.
14927     if ((BitOffset % 128) != 0)
14928       return SDValue();
14929 
14930     assert((BitOffset % V.getScalarValueSizeInBits()) == 0 &&
14931            "Unexpected bit-offset");
14932     assert((V.getValueSizeInBits() == 256 || V.getValueSizeInBits() == 512) &&
14933            "Unexpected vector size");
14934     unsigned ExtractIdx = BitOffset / V.getScalarValueSizeInBits();
14935     V = extract128BitVector(V, ExtractIdx, DAG, DL);
14936   }
14937 
14938   // On AVX we can use VBROADCAST directly for scalar sources.
14939   if (Opcode == X86ISD::MOVDDUP && !V.getValueType().isVector()) {
14940     V = DAG.getBitcast(MVT::f64, V);
14941     if (Subtarget.hasAVX()) {
14942       V = DAG.getNode(X86ISD::VBROADCAST, DL, MVT::v2f64, V);
14943       return DAG.getBitcast(VT, V);
14944     }
14945     V = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64, V);
14946   }
14947 
14948   // If this is a scalar, do the broadcast on this type and bitcast.
14949   if (!V.getValueType().isVector()) {
14950     assert(V.getScalarValueSizeInBits() == NumEltBits &&
14951            "Unexpected scalar size");
14952     MVT BroadcastVT = MVT::getVectorVT(V.getSimpleValueType(),
14953                                        VT.getVectorNumElements());
14954     return DAG.getBitcast(VT, DAG.getNode(Opcode, DL, BroadcastVT, V));
14955   }
14956 
14957   // We only support broadcasting from 128-bit vectors to minimize the
14958   // number of patterns we need to deal with in isel. So extract down to
14959   // 128-bits, removing as many bitcasts as possible.
14960   if (V.getValueSizeInBits() > 128)
14961     V = extract128BitVector(peekThroughBitcasts(V), 0, DAG, DL);
14962 
14963   // Otherwise cast V to a vector with the same element type as VT, but
14964   // possibly narrower than VT. Then perform the broadcast.
14965   unsigned NumSrcElts = V.getValueSizeInBits() / NumEltBits;
14966   MVT CastVT = MVT::getVectorVT(VT.getVectorElementType(), NumSrcElts);
14967   return DAG.getNode(Opcode, DL, VT, DAG.getBitcast(CastVT, V));
14968 }
14969 
14970 // Check for whether we can use INSERTPS to perform the shuffle. We only use
14971 // INSERTPS when the V1 elements are already in the correct locations
14972 // because otherwise we can just always use two SHUFPS instructions which
14973 // are much smaller to encode than a SHUFPS and an INSERTPS. We can also
14974 // perform INSERTPS if a single V1 element is out of place and all V2
14975 // elements are zeroable.
matchShuffleAsInsertPS(SDValue & V1,SDValue & V2,unsigned & InsertPSMask,const APInt & Zeroable,ArrayRef<int> Mask,SelectionDAG & DAG)14976 static bool matchShuffleAsInsertPS(SDValue &V1, SDValue &V2,
14977                                    unsigned &InsertPSMask,
14978                                    const APInt &Zeroable,
14979                                    ArrayRef<int> Mask, SelectionDAG &DAG) {
14980   assert(V1.getSimpleValueType().is128BitVector() && "Bad operand type!");
14981   assert(V2.getSimpleValueType().is128BitVector() && "Bad operand type!");
14982   assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
14983 
14984   // Attempt to match INSERTPS with one element from VA or VB being
14985   // inserted into VA (or undef). If successful, V1, V2 and InsertPSMask
14986   // are updated.
14987   auto matchAsInsertPS = [&](SDValue VA, SDValue VB,
14988                              ArrayRef<int> CandidateMask) {
14989     unsigned ZMask = 0;
14990     int VADstIndex = -1;
14991     int VBDstIndex = -1;
14992     bool VAUsedInPlace = false;
14993 
14994     for (int i = 0; i < 4; ++i) {
14995       // Synthesize a zero mask from the zeroable elements (includes undefs).
14996       if (Zeroable[i]) {
14997         ZMask |= 1 << i;
14998         continue;
14999       }
15000 
15001       // Flag if we use any VA inputs in place.
15002       if (i == CandidateMask[i]) {
15003         VAUsedInPlace = true;
15004         continue;
15005       }
15006 
15007       // We can only insert a single non-zeroable element.
15008       if (VADstIndex >= 0 || VBDstIndex >= 0)
15009         return false;
15010 
15011       if (CandidateMask[i] < 4) {
15012         // VA input out of place for insertion.
15013         VADstIndex = i;
15014       } else {
15015         // VB input for insertion.
15016         VBDstIndex = i;
15017       }
15018     }
15019 
15020     // Don't bother if we have no (non-zeroable) element for insertion.
15021     if (VADstIndex < 0 && VBDstIndex < 0)
15022       return false;
15023 
15024     // Determine element insertion src/dst indices. The src index is from the
15025     // start of the inserted vector, not the start of the concatenated vector.
15026     unsigned VBSrcIndex = 0;
15027     if (VADstIndex >= 0) {
15028       // If we have a VA input out of place, we use VA as the V2 element
15029       // insertion and don't use the original V2 at all.
15030       VBSrcIndex = CandidateMask[VADstIndex];
15031       VBDstIndex = VADstIndex;
15032       VB = VA;
15033     } else {
15034       VBSrcIndex = CandidateMask[VBDstIndex] - 4;
15035     }
15036 
15037     // If no V1 inputs are used in place, then the result is created only from
15038     // the zero mask and the V2 insertion - so remove V1 dependency.
15039     if (!VAUsedInPlace)
15040       VA = DAG.getUNDEF(MVT::v4f32);
15041 
15042     // Update V1, V2 and InsertPSMask accordingly.
15043     V1 = VA;
15044     V2 = VB;
15045 
15046     // Insert the V2 element into the desired position.
15047     InsertPSMask = VBSrcIndex << 6 | VBDstIndex << 4 | ZMask;
15048     assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
15049     return true;
15050   };
15051 
15052   if (matchAsInsertPS(V1, V2, Mask))
15053     return true;
15054 
15055   // Commute and try again.
15056   SmallVector<int, 4> CommutedMask(Mask);
15057   ShuffleVectorSDNode::commuteMask(CommutedMask);
15058   if (matchAsInsertPS(V2, V1, CommutedMask))
15059     return true;
15060 
15061   return false;
15062 }
15063 
lowerShuffleAsInsertPS(const SDLoc & DL,SDValue V1,SDValue V2,ArrayRef<int> Mask,const APInt & Zeroable,SelectionDAG & DAG)15064 static SDValue lowerShuffleAsInsertPS(const SDLoc &DL, SDValue V1, SDValue V2,
15065                                       ArrayRef<int> Mask, const APInt &Zeroable,
15066                                       SelectionDAG &DAG) {
15067   assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
15068   assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
15069 
15070   // Attempt to match the insertps pattern.
15071   unsigned InsertPSMask = 0;
15072   if (!matchShuffleAsInsertPS(V1, V2, InsertPSMask, Zeroable, Mask, DAG))
15073     return SDValue();
15074 
15075   // Insert the V2 element into the desired position.
15076   return DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
15077                      DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
15078 }
15079 
15080 /// Handle lowering of 2-lane 64-bit floating point shuffles.
15081 ///
15082 /// This is the basis function for the 2-lane 64-bit shuffles as we have full
15083 /// support for floating point shuffles but not integer shuffles. These
15084 /// instructions will incur a domain crossing penalty on some chips though so
15085 /// it is better to avoid lowering through this for integer vectors where
15086 /// possible.
lowerV2F64Shuffle(const SDLoc & DL,ArrayRef<int> Mask,const APInt & Zeroable,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)15087 static SDValue lowerV2F64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
15088                                  const APInt &Zeroable, SDValue V1, SDValue V2,
15089                                  const X86Subtarget &Subtarget,
15090                                  SelectionDAG &DAG) {
15091   assert(V1.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
15092   assert(V2.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
15093   assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
15094 
15095   if (V2.isUndef()) {
15096     // Check for being able to broadcast a single element.
15097     if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v2f64, V1, V2,
15098                                                     Mask, Subtarget, DAG))
15099       return Broadcast;
15100 
15101     // Straight shuffle of a single input vector. Simulate this by using the
15102     // single input as both of the "inputs" to this instruction..
15103     unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1);
15104 
15105     if (Subtarget.hasAVX()) {
15106       // If we have AVX, we can use VPERMILPS which will allow folding a load
15107       // into the shuffle.
15108       return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v2f64, V1,
15109                          DAG.getTargetConstant(SHUFPDMask, DL, MVT::i8));
15110     }
15111 
15112     return DAG.getNode(
15113         X86ISD::SHUFP, DL, MVT::v2f64,
15114         Mask[0] == SM_SentinelUndef ? DAG.getUNDEF(MVT::v2f64) : V1,
15115         Mask[1] == SM_SentinelUndef ? DAG.getUNDEF(MVT::v2f64) : V1,
15116         DAG.getTargetConstant(SHUFPDMask, DL, MVT::i8));
15117   }
15118   assert(Mask[0] >= 0 && "No undef lanes in multi-input v2 shuffles!");
15119   assert(Mask[1] >= 0 && "No undef lanes in multi-input v2 shuffles!");
15120   assert(Mask[0] < 2 && "We sort V1 to be the first input.");
15121   assert(Mask[1] >= 2 && "We sort V2 to be the second input.");
15122 
15123   if (Subtarget.hasAVX2())
15124     if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
15125       return Extract;
15126 
15127   // When loading a scalar and then shuffling it into a vector we can often do
15128   // the insertion cheaply.
15129   if (SDValue Insertion = lowerShuffleAsElementInsertion(
15130           DL, MVT::v2f64, V1, V2, Mask, Zeroable, Subtarget, DAG))
15131     return Insertion;
15132   // Try inverting the insertion since for v2 masks it is easy to do and we
15133   // can't reliably sort the mask one way or the other.
15134   int InverseMask[2] = {Mask[0] < 0 ? -1 : (Mask[0] ^ 2),
15135                         Mask[1] < 0 ? -1 : (Mask[1] ^ 2)};
15136   if (SDValue Insertion = lowerShuffleAsElementInsertion(
15137           DL, MVT::v2f64, V2, V1, InverseMask, Zeroable, Subtarget, DAG))
15138     return Insertion;
15139 
15140   // Try to use one of the special instruction patterns to handle two common
15141   // blend patterns if a zero-blend above didn't work.
15142   if (isShuffleEquivalent(Mask, {0, 3}, V1, V2) ||
15143       isShuffleEquivalent(Mask, {1, 3}, V1, V2))
15144     if (SDValue V1S = getScalarValueForVectorElement(V1, Mask[0], DAG))
15145       // We can either use a special instruction to load over the low double or
15146       // to move just the low double.
15147       return DAG.getNode(
15148           X86ISD::MOVSD, DL, MVT::v2f64, V2,
15149           DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64, V1S));
15150 
15151   if (Subtarget.hasSSE41())
15152     if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v2f64, V1, V2, Mask,
15153                                             Zeroable, Subtarget, DAG))
15154       return Blend;
15155 
15156   // Use dedicated unpack instructions for masks that match their pattern.
15157   if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v2f64, Mask, V1, V2, DAG))
15158     return V;
15159 
15160   unsigned SHUFPDMask = (Mask[0] == 1) | (((Mask[1] - 2) == 1) << 1);
15161   return DAG.getNode(X86ISD::SHUFP, DL, MVT::v2f64, V1, V2,
15162                      DAG.getTargetConstant(SHUFPDMask, DL, MVT::i8));
15163 }
15164 
15165 /// Handle lowering of 2-lane 64-bit integer shuffles.
15166 ///
15167 /// Tries to lower a 2-lane 64-bit shuffle using shuffle operations provided by
15168 /// the integer unit to minimize domain crossing penalties. However, for blends
15169 /// it falls back to the floating point shuffle operation with appropriate bit
15170 /// casting.
lowerV2I64Shuffle(const SDLoc & DL,ArrayRef<int> Mask,const APInt & Zeroable,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)15171 static SDValue lowerV2I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
15172                                  const APInt &Zeroable, SDValue V1, SDValue V2,
15173                                  const X86Subtarget &Subtarget,
15174                                  SelectionDAG &DAG) {
15175   assert(V1.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
15176   assert(V2.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
15177   assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
15178 
15179   if (V2.isUndef()) {
15180     // Check for being able to broadcast a single element.
15181     if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v2i64, V1, V2,
15182                                                     Mask, Subtarget, DAG))
15183       return Broadcast;
15184 
15185     // Straight shuffle of a single input vector. For everything from SSE2
15186     // onward this has a single fast instruction with no scary immediates.
15187     // We have to map the mask as it is actually a v4i32 shuffle instruction.
15188     V1 = DAG.getBitcast(MVT::v4i32, V1);
15189     int WidenedMask[4] = {Mask[0] < 0 ? -1 : (Mask[0] * 2),
15190                           Mask[0] < 0 ? -1 : ((Mask[0] * 2) + 1),
15191                           Mask[1] < 0 ? -1 : (Mask[1] * 2),
15192                           Mask[1] < 0 ? -1 : ((Mask[1] * 2) + 1)};
15193     return DAG.getBitcast(
15194         MVT::v2i64,
15195         DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
15196                     getV4X86ShuffleImm8ForMask(WidenedMask, DL, DAG)));
15197   }
15198   assert(Mask[0] != -1 && "No undef lanes in multi-input v2 shuffles!");
15199   assert(Mask[1] != -1 && "No undef lanes in multi-input v2 shuffles!");
15200   assert(Mask[0] < 2 && "We sort V1 to be the first input.");
15201   assert(Mask[1] >= 2 && "We sort V2 to be the second input.");
15202 
15203   if (Subtarget.hasAVX2())
15204     if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
15205       return Extract;
15206 
15207   // Try to use shift instructions.
15208   if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v2i64, V1, V2, Mask,
15209                                           Zeroable, Subtarget, DAG))
15210     return Shift;
15211 
15212   // When loading a scalar and then shuffling it into a vector we can often do
15213   // the insertion cheaply.
15214   if (SDValue Insertion = lowerShuffleAsElementInsertion(
15215           DL, MVT::v2i64, V1, V2, Mask, Zeroable, Subtarget, DAG))
15216     return Insertion;
15217   // Try inverting the insertion since for v2 masks it is easy to do and we
15218   // can't reliably sort the mask one way or the other.
15219   int InverseMask[2] = {Mask[0] ^ 2, Mask[1] ^ 2};
15220   if (SDValue Insertion = lowerShuffleAsElementInsertion(
15221           DL, MVT::v2i64, V2, V1, InverseMask, Zeroable, Subtarget, DAG))
15222     return Insertion;
15223 
15224   // We have different paths for blend lowering, but they all must use the
15225   // *exact* same predicate.
15226   bool IsBlendSupported = Subtarget.hasSSE41();
15227   if (IsBlendSupported)
15228     if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v2i64, V1, V2, Mask,
15229                                             Zeroable, Subtarget, DAG))
15230       return Blend;
15231 
15232   // Use dedicated unpack instructions for masks that match their pattern.
15233   if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v2i64, Mask, V1, V2, DAG))
15234     return V;
15235 
15236   // Try to use byte rotation instructions.
15237   // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
15238   if (Subtarget.hasSSSE3()) {
15239     if (Subtarget.hasVLX())
15240       if (SDValue Rotate = lowerShuffleAsVALIGN(DL, MVT::v2i64, V1, V2, Mask,
15241                                                 Subtarget, DAG))
15242         return Rotate;
15243 
15244     if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v2i64, V1, V2, Mask,
15245                                                   Subtarget, DAG))
15246       return Rotate;
15247   }
15248 
15249   // If we have direct support for blends, we should lower by decomposing into
15250   // a permute. That will be faster than the domain cross.
15251   if (IsBlendSupported)
15252     return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v2i64, V1, V2, Mask,
15253                                                 Subtarget, DAG);
15254 
15255   // We implement this with SHUFPD which is pretty lame because it will likely
15256   // incur 2 cycles of stall for integer vectors on Nehalem and older chips.
15257   // However, all the alternatives are still more cycles and newer chips don't
15258   // have this problem. It would be really nice if x86 had better shuffles here.
15259   V1 = DAG.getBitcast(MVT::v2f64, V1);
15260   V2 = DAG.getBitcast(MVT::v2f64, V2);
15261   return DAG.getBitcast(MVT::v2i64,
15262                         DAG.getVectorShuffle(MVT::v2f64, DL, V1, V2, Mask));
15263 }
15264 
15265 /// Lower a vector shuffle using the SHUFPS instruction.
15266 ///
15267 /// This is a helper routine dedicated to lowering vector shuffles using SHUFPS.
15268 /// It makes no assumptions about whether this is the *best* lowering, it simply
15269 /// uses it.
lowerShuffleWithSHUFPS(const SDLoc & DL,MVT VT,ArrayRef<int> Mask,SDValue V1,SDValue V2,SelectionDAG & DAG)15270 static SDValue lowerShuffleWithSHUFPS(const SDLoc &DL, MVT VT,
15271                                       ArrayRef<int> Mask, SDValue V1,
15272                                       SDValue V2, SelectionDAG &DAG) {
15273   SDValue LowV = V1, HighV = V2;
15274   SmallVector<int, 4> NewMask(Mask);
15275   int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
15276 
15277   if (NumV2Elements == 1) {
15278     int V2Index = find_if(Mask, [](int M) { return M >= 4; }) - Mask.begin();
15279 
15280     // Compute the index adjacent to V2Index and in the same half by toggling
15281     // the low bit.
15282     int V2AdjIndex = V2Index ^ 1;
15283 
15284     if (Mask[V2AdjIndex] < 0) {
15285       // Handles all the cases where we have a single V2 element and an undef.
15286       // This will only ever happen in the high lanes because we commute the
15287       // vector otherwise.
15288       if (V2Index < 2)
15289         std::swap(LowV, HighV);
15290       NewMask[V2Index] -= 4;
15291     } else {
15292       // Handle the case where the V2 element ends up adjacent to a V1 element.
15293       // To make this work, blend them together as the first step.
15294       int V1Index = V2AdjIndex;
15295       int BlendMask[4] = {Mask[V2Index] - 4, 0, Mask[V1Index], 0};
15296       V2 = DAG.getNode(X86ISD::SHUFP, DL, VT, V2, V1,
15297                        getV4X86ShuffleImm8ForMask(BlendMask, DL, DAG));
15298 
15299       // Now proceed to reconstruct the final blend as we have the necessary
15300       // high or low half formed.
15301       if (V2Index < 2) {
15302         LowV = V2;
15303         HighV = V1;
15304       } else {
15305         HighV = V2;
15306       }
15307       NewMask[V1Index] = 2; // We put the V1 element in V2[2].
15308       NewMask[V2Index] = 0; // We shifted the V2 element into V2[0].
15309     }
15310   } else if (NumV2Elements == 2) {
15311     if (Mask[0] < 4 && Mask[1] < 4) {
15312       // Handle the easy case where we have V1 in the low lanes and V2 in the
15313       // high lanes.
15314       NewMask[2] -= 4;
15315       NewMask[3] -= 4;
15316     } else if (Mask[2] < 4 && Mask[3] < 4) {
15317       // We also handle the reversed case because this utility may get called
15318       // when we detect a SHUFPS pattern but can't easily commute the shuffle to
15319       // arrange things in the right direction.
15320       NewMask[0] -= 4;
15321       NewMask[1] -= 4;
15322       HighV = V1;
15323       LowV = V2;
15324     } else {
15325       // We have a mixture of V1 and V2 in both low and high lanes. Rather than
15326       // trying to place elements directly, just blend them and set up the final
15327       // shuffle to place them.
15328 
15329       // The first two blend mask elements are for V1, the second two are for
15330       // V2.
15331       int BlendMask[4] = {Mask[0] < 4 ? Mask[0] : Mask[1],
15332                           Mask[2] < 4 ? Mask[2] : Mask[3],
15333                           (Mask[0] >= 4 ? Mask[0] : Mask[1]) - 4,
15334                           (Mask[2] >= 4 ? Mask[2] : Mask[3]) - 4};
15335       V1 = DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2,
15336                        getV4X86ShuffleImm8ForMask(BlendMask, DL, DAG));
15337 
15338       // Now we do a normal shuffle of V1 by giving V1 as both operands to
15339       // a blend.
15340       LowV = HighV = V1;
15341       NewMask[0] = Mask[0] < 4 ? 0 : 2;
15342       NewMask[1] = Mask[0] < 4 ? 2 : 0;
15343       NewMask[2] = Mask[2] < 4 ? 1 : 3;
15344       NewMask[3] = Mask[2] < 4 ? 3 : 1;
15345     }
15346   } else if (NumV2Elements == 3) {
15347     // Ideally canonicalizeShuffleMaskWithCommute should have caught this, but
15348     // we can get here due to other paths (e.g repeated mask matching) that we
15349     // don't want to do another round of lowerVECTOR_SHUFFLE.
15350     ShuffleVectorSDNode::commuteMask(NewMask);
15351     return lowerShuffleWithSHUFPS(DL, VT, NewMask, V2, V1, DAG);
15352   }
15353   return DAG.getNode(X86ISD::SHUFP, DL, VT, LowV, HighV,
15354                      getV4X86ShuffleImm8ForMask(NewMask, DL, DAG));
15355 }
15356 
15357 /// Lower 4-lane 32-bit floating point shuffles.
15358 ///
15359 /// Uses instructions exclusively from the floating point unit to minimize
15360 /// domain crossing penalties, as these are sufficient to implement all v4f32
15361 /// shuffles.
lowerV4F32Shuffle(const SDLoc & DL,ArrayRef<int> Mask,const APInt & Zeroable,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)15362 static SDValue lowerV4F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
15363                                  const APInt &Zeroable, SDValue V1, SDValue V2,
15364                                  const X86Subtarget &Subtarget,
15365                                  SelectionDAG &DAG) {
15366   assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
15367   assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
15368   assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
15369 
15370   int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
15371 
15372   if (NumV2Elements == 0) {
15373     // Check for being able to broadcast a single element.
15374     if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4f32, V1, V2,
15375                                                     Mask, Subtarget, DAG))
15376       return Broadcast;
15377 
15378     // Use even/odd duplicate instructions for masks that match their pattern.
15379     if (Subtarget.hasSSE3()) {
15380       if (isShuffleEquivalent(Mask, {0, 0, 2, 2}, V1, V2))
15381         return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v4f32, V1);
15382       if (isShuffleEquivalent(Mask, {1, 1, 3, 3}, V1, V2))
15383         return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v4f32, V1);
15384     }
15385 
15386     if (Subtarget.hasAVX()) {
15387       // If we have AVX, we can use VPERMILPS which will allow folding a load
15388       // into the shuffle.
15389       return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f32, V1,
15390                          getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
15391     }
15392 
15393     // Use MOVLHPS/MOVHLPS to simulate unary shuffles. These are only valid
15394     // in SSE1 because otherwise they are widened to v2f64 and never get here.
15395     if (!Subtarget.hasSSE2()) {
15396       if (isShuffleEquivalent(Mask, {0, 1, 0, 1}, V1, V2))
15397         return DAG.getNode(X86ISD::MOVLHPS, DL, MVT::v4f32, V1, V1);
15398       if (isShuffleEquivalent(Mask, {2, 3, 2, 3}, V1, V2))
15399         return DAG.getNode(X86ISD::MOVHLPS, DL, MVT::v4f32, V1, V1);
15400     }
15401 
15402     // Otherwise, use a straight shuffle of a single input vector. We pass the
15403     // input vector to both operands to simulate this with a SHUFPS.
15404     return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f32, V1, V1,
15405                        getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
15406   }
15407 
15408   if (Subtarget.hasAVX2())
15409     if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
15410       return Extract;
15411 
15412   // There are special ways we can lower some single-element blends. However, we
15413   // have custom ways we can lower more complex single-element blends below that
15414   // we defer to if both this and BLENDPS fail to match, so restrict this to
15415   // when the V2 input is targeting element 0 of the mask -- that is the fast
15416   // case here.
15417   if (NumV2Elements == 1 && Mask[0] >= 4)
15418     if (SDValue V = lowerShuffleAsElementInsertion(
15419             DL, MVT::v4f32, V1, V2, Mask, Zeroable, Subtarget, DAG))
15420       return V;
15421 
15422   if (Subtarget.hasSSE41()) {
15423     if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4f32, V1, V2, Mask,
15424                                             Zeroable, Subtarget, DAG))
15425       return Blend;
15426 
15427     // Use INSERTPS if we can complete the shuffle efficiently.
15428     if (SDValue V = lowerShuffleAsInsertPS(DL, V1, V2, Mask, Zeroable, DAG))
15429       return V;
15430 
15431     if (!isSingleSHUFPSMask(Mask))
15432       if (SDValue BlendPerm = lowerShuffleAsBlendAndPermute(DL, MVT::v4f32, V1,
15433                                                             V2, Mask, DAG))
15434         return BlendPerm;
15435   }
15436 
15437   // Use low/high mov instructions. These are only valid in SSE1 because
15438   // otherwise they are widened to v2f64 and never get here.
15439   if (!Subtarget.hasSSE2()) {
15440     if (isShuffleEquivalent(Mask, {0, 1, 4, 5}, V1, V2))
15441       return DAG.getNode(X86ISD::MOVLHPS, DL, MVT::v4f32, V1, V2);
15442     if (isShuffleEquivalent(Mask, {2, 3, 6, 7}, V1, V2))
15443       return DAG.getNode(X86ISD::MOVHLPS, DL, MVT::v4f32, V2, V1);
15444   }
15445 
15446   // Use dedicated unpack instructions for masks that match their pattern.
15447   if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4f32, Mask, V1, V2, DAG))
15448     return V;
15449 
15450   // Otherwise fall back to a SHUFPS lowering strategy.
15451   return lowerShuffleWithSHUFPS(DL, MVT::v4f32, Mask, V1, V2, DAG);
15452 }
15453 
15454 /// Lower 4-lane i32 vector shuffles.
15455 ///
15456 /// We try to handle these with integer-domain shuffles where we can, but for
15457 /// blends we use the floating point domain blend instructions.
lowerV4I32Shuffle(const SDLoc & DL,ArrayRef<int> Mask,const APInt & Zeroable,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)15458 static SDValue lowerV4I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
15459                                  const APInt &Zeroable, SDValue V1, SDValue V2,
15460                                  const X86Subtarget &Subtarget,
15461                                  SelectionDAG &DAG) {
15462   assert(V1.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
15463   assert(V2.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
15464   assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
15465 
15466   // Whenever we can lower this as a zext, that instruction is strictly faster
15467   // than any alternative. It also allows us to fold memory operands into the
15468   // shuffle in many cases.
15469   if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v4i32, V1, V2, Mask,
15470                                                    Zeroable, Subtarget, DAG))
15471     return ZExt;
15472 
15473   int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
15474 
15475   if (NumV2Elements == 0) {
15476     // Try to use broadcast unless the mask only has one non-undef element.
15477     if (count_if(Mask, [](int M) { return M >= 0 && M < 4; }) > 1) {
15478       if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4i32, V1, V2,
15479                                                       Mask, Subtarget, DAG))
15480         return Broadcast;
15481     }
15482 
15483     // Straight shuffle of a single input vector. For everything from SSE2
15484     // onward this has a single fast instruction with no scary immediates.
15485     // We coerce the shuffle pattern to be compatible with UNPCK instructions
15486     // but we aren't actually going to use the UNPCK instruction because doing
15487     // so prevents folding a load into this instruction or making a copy.
15488     const int UnpackLoMask[] = {0, 0, 1, 1};
15489     const int UnpackHiMask[] = {2, 2, 3, 3};
15490     if (isShuffleEquivalent(Mask, {0, 0, 1, 1}, V1, V2))
15491       Mask = UnpackLoMask;
15492     else if (isShuffleEquivalent(Mask, {2, 2, 3, 3}, V1, V2))
15493       Mask = UnpackHiMask;
15494 
15495     return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
15496                        getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
15497   }
15498 
15499   if (Subtarget.hasAVX2())
15500     if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
15501       return Extract;
15502 
15503   // Try to use shift instructions.
15504   if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v4i32, V1, V2, Mask,
15505                                           Zeroable, Subtarget, DAG))
15506     return Shift;
15507 
15508   // There are special ways we can lower some single-element blends.
15509   if (NumV2Elements == 1)
15510     if (SDValue V = lowerShuffleAsElementInsertion(
15511             DL, MVT::v4i32, V1, V2, Mask, Zeroable, Subtarget, DAG))
15512       return V;
15513 
15514   // We have different paths for blend lowering, but they all must use the
15515   // *exact* same predicate.
15516   bool IsBlendSupported = Subtarget.hasSSE41();
15517   if (IsBlendSupported)
15518     if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4i32, V1, V2, Mask,
15519                                             Zeroable, Subtarget, DAG))
15520       return Blend;
15521 
15522   if (SDValue Masked = lowerShuffleAsBitMask(DL, MVT::v4i32, V1, V2, Mask,
15523                                              Zeroable, Subtarget, DAG))
15524     return Masked;
15525 
15526   // Use dedicated unpack instructions for masks that match their pattern.
15527   if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4i32, Mask, V1, V2, DAG))
15528     return V;
15529 
15530   // Try to use byte rotation instructions.
15531   // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
15532   if (Subtarget.hasSSSE3()) {
15533     if (Subtarget.hasVLX())
15534       if (SDValue Rotate = lowerShuffleAsVALIGN(DL, MVT::v4i32, V1, V2, Mask,
15535                                                 Subtarget, DAG))
15536         return Rotate;
15537 
15538     if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v4i32, V1, V2, Mask,
15539                                                   Subtarget, DAG))
15540       return Rotate;
15541   }
15542 
15543   // Assume that a single SHUFPS is faster than an alternative sequence of
15544   // multiple instructions (even if the CPU has a domain penalty).
15545   // If some CPU is harmed by the domain switch, we can fix it in a later pass.
15546   if (!isSingleSHUFPSMask(Mask)) {
15547     // If we have direct support for blends, we should lower by decomposing into
15548     // a permute. That will be faster than the domain cross.
15549     if (IsBlendSupported)
15550       return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v4i32, V1, V2, Mask,
15551                                                   Subtarget, DAG);
15552 
15553     // Try to lower by permuting the inputs into an unpack instruction.
15554     if (SDValue Unpack = lowerShuffleAsPermuteAndUnpack(DL, MVT::v4i32, V1, V2,
15555                                                         Mask, Subtarget, DAG))
15556       return Unpack;
15557   }
15558 
15559   // We implement this with SHUFPS because it can blend from two vectors.
15560   // Because we're going to eventually use SHUFPS, we use SHUFPS even to build
15561   // up the inputs, bypassing domain shift penalties that we would incur if we
15562   // directly used PSHUFD on Nehalem and older. For newer chips, this isn't
15563   // relevant.
15564   SDValue CastV1 = DAG.getBitcast(MVT::v4f32, V1);
15565   SDValue CastV2 = DAG.getBitcast(MVT::v4f32, V2);
15566   SDValue ShufPS = DAG.getVectorShuffle(MVT::v4f32, DL, CastV1, CastV2, Mask);
15567   return DAG.getBitcast(MVT::v4i32, ShufPS);
15568 }
15569 
15570 /// Lowering of single-input v8i16 shuffles is the cornerstone of SSE2
15571 /// shuffle lowering, and the most complex part.
15572 ///
15573 /// The lowering strategy is to try to form pairs of input lanes which are
15574 /// targeted at the same half of the final vector, and then use a dword shuffle
15575 /// to place them onto the right half, and finally unpack the paired lanes into
15576 /// their final position.
15577 ///
15578 /// The exact breakdown of how to form these dword pairs and align them on the
15579 /// correct sides is really tricky. See the comments within the function for
15580 /// more of the details.
15581 ///
15582 /// This code also handles repeated 128-bit lanes of v8i16 shuffles, but each
15583 /// lane must shuffle the *exact* same way. In fact, you must pass a v8 Mask to
15584 /// this routine for it to work correctly. To shuffle a 256-bit or 512-bit i16
15585 /// vector, form the analogous 128-bit 8-element Mask.
lowerV8I16GeneralSingleInputShuffle(const SDLoc & DL,MVT VT,SDValue V,MutableArrayRef<int> Mask,const X86Subtarget & Subtarget,SelectionDAG & DAG)15586 static SDValue lowerV8I16GeneralSingleInputShuffle(
15587     const SDLoc &DL, MVT VT, SDValue V, MutableArrayRef<int> Mask,
15588     const X86Subtarget &Subtarget, SelectionDAG &DAG) {
15589   assert(VT.getVectorElementType() == MVT::i16 && "Bad input type!");
15590   MVT PSHUFDVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() / 2);
15591 
15592   assert(Mask.size() == 8 && "Shuffle mask length doesn't match!");
15593   MutableArrayRef<int> LoMask = Mask.slice(0, 4);
15594   MutableArrayRef<int> HiMask = Mask.slice(4, 4);
15595 
15596   // Attempt to directly match PSHUFLW or PSHUFHW.
15597   if (isUndefOrInRange(LoMask, 0, 4) &&
15598       isSequentialOrUndefInRange(HiMask, 0, 4, 4)) {
15599     return DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
15600                        getV4X86ShuffleImm8ForMask(LoMask, DL, DAG));
15601   }
15602   if (isUndefOrInRange(HiMask, 4, 8) &&
15603       isSequentialOrUndefInRange(LoMask, 0, 4, 0)) {
15604     for (int i = 0; i != 4; ++i)
15605       HiMask[i] = (HiMask[i] < 0 ? HiMask[i] : (HiMask[i] - 4));
15606     return DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
15607                        getV4X86ShuffleImm8ForMask(HiMask, DL, DAG));
15608   }
15609 
15610   SmallVector<int, 4> LoInputs;
15611   copy_if(LoMask, std::back_inserter(LoInputs), [](int M) { return M >= 0; });
15612   array_pod_sort(LoInputs.begin(), LoInputs.end());
15613   LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()), LoInputs.end());
15614   SmallVector<int, 4> HiInputs;
15615   copy_if(HiMask, std::back_inserter(HiInputs), [](int M) { return M >= 0; });
15616   array_pod_sort(HiInputs.begin(), HiInputs.end());
15617   HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()), HiInputs.end());
15618   int NumLToL = llvm::lower_bound(LoInputs, 4) - LoInputs.begin();
15619   int NumHToL = LoInputs.size() - NumLToL;
15620   int NumLToH = llvm::lower_bound(HiInputs, 4) - HiInputs.begin();
15621   int NumHToH = HiInputs.size() - NumLToH;
15622   MutableArrayRef<int> LToLInputs(LoInputs.data(), NumLToL);
15623   MutableArrayRef<int> LToHInputs(HiInputs.data(), NumLToH);
15624   MutableArrayRef<int> HToLInputs(LoInputs.data() + NumLToL, NumHToL);
15625   MutableArrayRef<int> HToHInputs(HiInputs.data() + NumLToH, NumHToH);
15626 
15627   // If we are shuffling values from one half - check how many different DWORD
15628   // pairs we need to create. If only 1 or 2 then we can perform this as a
15629   // PSHUFLW/PSHUFHW + PSHUFD instead of the PSHUFD+PSHUFLW+PSHUFHW chain below.
15630   auto ShuffleDWordPairs = [&](ArrayRef<int> PSHUFHalfMask,
15631                                ArrayRef<int> PSHUFDMask, unsigned ShufWOp) {
15632     V = DAG.getNode(ShufWOp, DL, VT, V,
15633                     getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DL, DAG));
15634     V = DAG.getBitcast(PSHUFDVT, V);
15635     V = DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, V,
15636                     getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG));
15637     return DAG.getBitcast(VT, V);
15638   };
15639 
15640   if ((NumHToL + NumHToH) == 0 || (NumLToL + NumLToH) == 0) {
15641     int PSHUFDMask[4] = { -1, -1, -1, -1 };
15642     SmallVector<std::pair<int, int>, 4> DWordPairs;
15643     int DOffset = ((NumHToL + NumHToH) == 0 ? 0 : 2);
15644 
15645     // Collect the different DWORD pairs.
15646     for (int DWord = 0; DWord != 4; ++DWord) {
15647       int M0 = Mask[2 * DWord + 0];
15648       int M1 = Mask[2 * DWord + 1];
15649       M0 = (M0 >= 0 ? M0 % 4 : M0);
15650       M1 = (M1 >= 0 ? M1 % 4 : M1);
15651       if (M0 < 0 && M1 < 0)
15652         continue;
15653 
15654       bool Match = false;
15655       for (int j = 0, e = DWordPairs.size(); j < e; ++j) {
15656         auto &DWordPair = DWordPairs[j];
15657         if ((M0 < 0 || isUndefOrEqual(DWordPair.first, M0)) &&
15658             (M1 < 0 || isUndefOrEqual(DWordPair.second, M1))) {
15659           DWordPair.first = (M0 >= 0 ? M0 : DWordPair.first);
15660           DWordPair.second = (M1 >= 0 ? M1 : DWordPair.second);
15661           PSHUFDMask[DWord] = DOffset + j;
15662           Match = true;
15663           break;
15664         }
15665       }
15666       if (!Match) {
15667         PSHUFDMask[DWord] = DOffset + DWordPairs.size();
15668         DWordPairs.push_back(std::make_pair(M0, M1));
15669       }
15670     }
15671 
15672     if (DWordPairs.size() <= 2) {
15673       DWordPairs.resize(2, std::make_pair(-1, -1));
15674       int PSHUFHalfMask[4] = {DWordPairs[0].first, DWordPairs[0].second,
15675                               DWordPairs[1].first, DWordPairs[1].second};
15676       if ((NumHToL + NumHToH) == 0)
15677         return ShuffleDWordPairs(PSHUFHalfMask, PSHUFDMask, X86ISD::PSHUFLW);
15678       if ((NumLToL + NumLToH) == 0)
15679         return ShuffleDWordPairs(PSHUFHalfMask, PSHUFDMask, X86ISD::PSHUFHW);
15680     }
15681   }
15682 
15683   // Simplify the 1-into-3 and 3-into-1 cases with a single pshufd. For all
15684   // such inputs we can swap two of the dwords across the half mark and end up
15685   // with <=2 inputs to each half in each half. Once there, we can fall through
15686   // to the generic code below. For example:
15687   //
15688   // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
15689   // Mask:  [0, 1, 2, 7, 4, 5, 6, 3] -----------------> [0, 1, 4, 7, 2, 3, 6, 5]
15690   //
15691   // However in some very rare cases we have a 1-into-3 or 3-into-1 on one half
15692   // and an existing 2-into-2 on the other half. In this case we may have to
15693   // pre-shuffle the 2-into-2 half to avoid turning it into a 3-into-1 or
15694   // 1-into-3 which could cause us to cycle endlessly fixing each side in turn.
15695   // Fortunately, we don't have to handle anything but a 2-into-2 pattern
15696   // because any other situation (including a 3-into-1 or 1-into-3 in the other
15697   // half than the one we target for fixing) will be fixed when we re-enter this
15698   // path. We will also combine away any sequence of PSHUFD instructions that
15699   // result into a single instruction. Here is an example of the tricky case:
15700   //
15701   // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
15702   // Mask:  [3, 7, 1, 0, 2, 7, 3, 5] -THIS-IS-BAD!!!!-> [5, 7, 1, 0, 4, 7, 5, 3]
15703   //
15704   // This now has a 1-into-3 in the high half! Instead, we do two shuffles:
15705   //
15706   // Input: [a, b, c, d, e, f, g, h] PSHUFHW[0,2,1,3]-> [a, b, c, d, e, g, f, h]
15707   // Mask:  [3, 7, 1, 0, 2, 7, 3, 5] -----------------> [3, 7, 1, 0, 2, 7, 3, 6]
15708   //
15709   // Input: [a, b, c, d, e, g, f, h] -PSHUFD[0,2,1,3]-> [a, b, e, g, c, d, f, h]
15710   // Mask:  [3, 7, 1, 0, 2, 7, 3, 6] -----------------> [5, 7, 1, 0, 4, 7, 5, 6]
15711   //
15712   // The result is fine to be handled by the generic logic.
15713   auto balanceSides = [&](ArrayRef<int> AToAInputs, ArrayRef<int> BToAInputs,
15714                           ArrayRef<int> BToBInputs, ArrayRef<int> AToBInputs,
15715                           int AOffset, int BOffset) {
15716     assert((AToAInputs.size() == 3 || AToAInputs.size() == 1) &&
15717            "Must call this with A having 3 or 1 inputs from the A half.");
15718     assert((BToAInputs.size() == 1 || BToAInputs.size() == 3) &&
15719            "Must call this with B having 1 or 3 inputs from the B half.");
15720     assert(AToAInputs.size() + BToAInputs.size() == 4 &&
15721            "Must call this with either 3:1 or 1:3 inputs (summing to 4).");
15722 
15723     bool ThreeAInputs = AToAInputs.size() == 3;
15724 
15725     // Compute the index of dword with only one word among the three inputs in
15726     // a half by taking the sum of the half with three inputs and subtracting
15727     // the sum of the actual three inputs. The difference is the remaining
15728     // slot.
15729     int ADWord = 0, BDWord = 0;
15730     int &TripleDWord = ThreeAInputs ? ADWord : BDWord;
15731     int &OneInputDWord = ThreeAInputs ? BDWord : ADWord;
15732     int TripleInputOffset = ThreeAInputs ? AOffset : BOffset;
15733     ArrayRef<int> TripleInputs = ThreeAInputs ? AToAInputs : BToAInputs;
15734     int OneInput = ThreeAInputs ? BToAInputs[0] : AToAInputs[0];
15735     int TripleInputSum = 0 + 1 + 2 + 3 + (4 * TripleInputOffset);
15736     int TripleNonInputIdx =
15737         TripleInputSum - std::accumulate(TripleInputs.begin(), TripleInputs.end(), 0);
15738     TripleDWord = TripleNonInputIdx / 2;
15739 
15740     // We use xor with one to compute the adjacent DWord to whichever one the
15741     // OneInput is in.
15742     OneInputDWord = (OneInput / 2) ^ 1;
15743 
15744     // Check for one tricky case: We're fixing a 3<-1 or a 1<-3 shuffle for AToA
15745     // and BToA inputs. If there is also such a problem with the BToB and AToB
15746     // inputs, we don't try to fix it necessarily -- we'll recurse and see it in
15747     // the next pass. However, if we have a 2<-2 in the BToB and AToB inputs, it
15748     // is essential that we don't *create* a 3<-1 as then we might oscillate.
15749     if (BToBInputs.size() == 2 && AToBInputs.size() == 2) {
15750       // Compute how many inputs will be flipped by swapping these DWords. We
15751       // need
15752       // to balance this to ensure we don't form a 3-1 shuffle in the other
15753       // half.
15754       int NumFlippedAToBInputs = llvm::count(AToBInputs, 2 * ADWord) +
15755                                  llvm::count(AToBInputs, 2 * ADWord + 1);
15756       int NumFlippedBToBInputs = llvm::count(BToBInputs, 2 * BDWord) +
15757                                  llvm::count(BToBInputs, 2 * BDWord + 1);
15758       if ((NumFlippedAToBInputs == 1 &&
15759            (NumFlippedBToBInputs == 0 || NumFlippedBToBInputs == 2)) ||
15760           (NumFlippedBToBInputs == 1 &&
15761            (NumFlippedAToBInputs == 0 || NumFlippedAToBInputs == 2))) {
15762         // We choose whether to fix the A half or B half based on whether that
15763         // half has zero flipped inputs. At zero, we may not be able to fix it
15764         // with that half. We also bias towards fixing the B half because that
15765         // will more commonly be the high half, and we have to bias one way.
15766         auto FixFlippedInputs = [&V, &DL, &Mask, &DAG](int PinnedIdx, int DWord,
15767                                                        ArrayRef<int> Inputs) {
15768           int FixIdx = PinnedIdx ^ 1; // The adjacent slot to the pinned slot.
15769           bool IsFixIdxInput = is_contained(Inputs, PinnedIdx ^ 1);
15770           // Determine whether the free index is in the flipped dword or the
15771           // unflipped dword based on where the pinned index is. We use this bit
15772           // in an xor to conditionally select the adjacent dword.
15773           int FixFreeIdx = 2 * (DWord ^ (PinnedIdx / 2 == DWord));
15774           bool IsFixFreeIdxInput = is_contained(Inputs, FixFreeIdx);
15775           if (IsFixIdxInput == IsFixFreeIdxInput)
15776             FixFreeIdx += 1;
15777           IsFixFreeIdxInput = is_contained(Inputs, FixFreeIdx);
15778           assert(IsFixIdxInput != IsFixFreeIdxInput &&
15779                  "We need to be changing the number of flipped inputs!");
15780           int PSHUFHalfMask[] = {0, 1, 2, 3};
15781           std::swap(PSHUFHalfMask[FixFreeIdx % 4], PSHUFHalfMask[FixIdx % 4]);
15782           V = DAG.getNode(
15783               FixIdx < 4 ? X86ISD::PSHUFLW : X86ISD::PSHUFHW, DL,
15784               MVT::getVectorVT(MVT::i16, V.getValueSizeInBits() / 16), V,
15785               getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DL, DAG));
15786 
15787           for (int &M : Mask)
15788             if (M >= 0 && M == FixIdx)
15789               M = FixFreeIdx;
15790             else if (M >= 0 && M == FixFreeIdx)
15791               M = FixIdx;
15792         };
15793         if (NumFlippedBToBInputs != 0) {
15794           int BPinnedIdx =
15795               BToAInputs.size() == 3 ? TripleNonInputIdx : OneInput;
15796           FixFlippedInputs(BPinnedIdx, BDWord, BToBInputs);
15797         } else {
15798           assert(NumFlippedAToBInputs != 0 && "Impossible given predicates!");
15799           int APinnedIdx = ThreeAInputs ? TripleNonInputIdx : OneInput;
15800           FixFlippedInputs(APinnedIdx, ADWord, AToBInputs);
15801         }
15802       }
15803     }
15804 
15805     int PSHUFDMask[] = {0, 1, 2, 3};
15806     PSHUFDMask[ADWord] = BDWord;
15807     PSHUFDMask[BDWord] = ADWord;
15808     V = DAG.getBitcast(
15809         VT,
15810         DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, DAG.getBitcast(PSHUFDVT, V),
15811                     getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
15812 
15813     // Adjust the mask to match the new locations of A and B.
15814     for (int &M : Mask)
15815       if (M >= 0 && M/2 == ADWord)
15816         M = 2 * BDWord + M % 2;
15817       else if (M >= 0 && M/2 == BDWord)
15818         M = 2 * ADWord + M % 2;
15819 
15820     // Recurse back into this routine to re-compute state now that this isn't
15821     // a 3 and 1 problem.
15822     return lowerV8I16GeneralSingleInputShuffle(DL, VT, V, Mask, Subtarget, DAG);
15823   };
15824   if ((NumLToL == 3 && NumHToL == 1) || (NumLToL == 1 && NumHToL == 3))
15825     return balanceSides(LToLInputs, HToLInputs, HToHInputs, LToHInputs, 0, 4);
15826   if ((NumHToH == 3 && NumLToH == 1) || (NumHToH == 1 && NumLToH == 3))
15827     return balanceSides(HToHInputs, LToHInputs, LToLInputs, HToLInputs, 4, 0);
15828 
15829   // At this point there are at most two inputs to the low and high halves from
15830   // each half. That means the inputs can always be grouped into dwords and
15831   // those dwords can then be moved to the correct half with a dword shuffle.
15832   // We use at most one low and one high word shuffle to collect these paired
15833   // inputs into dwords, and finally a dword shuffle to place them.
15834   int PSHUFLMask[4] = {-1, -1, -1, -1};
15835   int PSHUFHMask[4] = {-1, -1, -1, -1};
15836   int PSHUFDMask[4] = {-1, -1, -1, -1};
15837 
15838   // First fix the masks for all the inputs that are staying in their
15839   // original halves. This will then dictate the targets of the cross-half
15840   // shuffles.
15841   auto fixInPlaceInputs =
15842       [&PSHUFDMask](ArrayRef<int> InPlaceInputs, ArrayRef<int> IncomingInputs,
15843                     MutableArrayRef<int> SourceHalfMask,
15844                     MutableArrayRef<int> HalfMask, int HalfOffset) {
15845     if (InPlaceInputs.empty())
15846       return;
15847     if (InPlaceInputs.size() == 1) {
15848       SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
15849           InPlaceInputs[0] - HalfOffset;
15850       PSHUFDMask[InPlaceInputs[0] / 2] = InPlaceInputs[0] / 2;
15851       return;
15852     }
15853     if (IncomingInputs.empty()) {
15854       // Just fix all of the in place inputs.
15855       for (int Input : InPlaceInputs) {
15856         SourceHalfMask[Input - HalfOffset] = Input - HalfOffset;
15857         PSHUFDMask[Input / 2] = Input / 2;
15858       }
15859       return;
15860     }
15861 
15862     assert(InPlaceInputs.size() == 2 && "Cannot handle 3 or 4 inputs!");
15863     SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
15864         InPlaceInputs[0] - HalfOffset;
15865     // Put the second input next to the first so that they are packed into
15866     // a dword. We find the adjacent index by toggling the low bit.
15867     int AdjIndex = InPlaceInputs[0] ^ 1;
15868     SourceHalfMask[AdjIndex - HalfOffset] = InPlaceInputs[1] - HalfOffset;
15869     std::replace(HalfMask.begin(), HalfMask.end(), InPlaceInputs[1], AdjIndex);
15870     PSHUFDMask[AdjIndex / 2] = AdjIndex / 2;
15871   };
15872   fixInPlaceInputs(LToLInputs, HToLInputs, PSHUFLMask, LoMask, 0);
15873   fixInPlaceInputs(HToHInputs, LToHInputs, PSHUFHMask, HiMask, 4);
15874 
15875   // Now gather the cross-half inputs and place them into a free dword of
15876   // their target half.
15877   // FIXME: This operation could almost certainly be simplified dramatically to
15878   // look more like the 3-1 fixing operation.
15879   auto moveInputsToRightHalf = [&PSHUFDMask](
15880       MutableArrayRef<int> IncomingInputs, ArrayRef<int> ExistingInputs,
15881       MutableArrayRef<int> SourceHalfMask, MutableArrayRef<int> HalfMask,
15882       MutableArrayRef<int> FinalSourceHalfMask, int SourceOffset,
15883       int DestOffset) {
15884     auto isWordClobbered = [](ArrayRef<int> SourceHalfMask, int Word) {
15885       return SourceHalfMask[Word] >= 0 && SourceHalfMask[Word] != Word;
15886     };
15887     auto isDWordClobbered = [&isWordClobbered](ArrayRef<int> SourceHalfMask,
15888                                                int Word) {
15889       int LowWord = Word & ~1;
15890       int HighWord = Word | 1;
15891       return isWordClobbered(SourceHalfMask, LowWord) ||
15892              isWordClobbered(SourceHalfMask, HighWord);
15893     };
15894 
15895     if (IncomingInputs.empty())
15896       return;
15897 
15898     if (ExistingInputs.empty()) {
15899       // Map any dwords with inputs from them into the right half.
15900       for (int Input : IncomingInputs) {
15901         // If the source half mask maps over the inputs, turn those into
15902         // swaps and use the swapped lane.
15903         if (isWordClobbered(SourceHalfMask, Input - SourceOffset)) {
15904           if (SourceHalfMask[SourceHalfMask[Input - SourceOffset]] < 0) {
15905             SourceHalfMask[SourceHalfMask[Input - SourceOffset]] =
15906                 Input - SourceOffset;
15907             // We have to swap the uses in our half mask in one sweep.
15908             for (int &M : HalfMask)
15909               if (M == SourceHalfMask[Input - SourceOffset] + SourceOffset)
15910                 M = Input;
15911               else if (M == Input)
15912                 M = SourceHalfMask[Input - SourceOffset] + SourceOffset;
15913           } else {
15914             assert(SourceHalfMask[SourceHalfMask[Input - SourceOffset]] ==
15915                        Input - SourceOffset &&
15916                    "Previous placement doesn't match!");
15917           }
15918           // Note that this correctly re-maps both when we do a swap and when
15919           // we observe the other side of the swap above. We rely on that to
15920           // avoid swapping the members of the input list directly.
15921           Input = SourceHalfMask[Input - SourceOffset] + SourceOffset;
15922         }
15923 
15924         // Map the input's dword into the correct half.
15925         if (PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] < 0)
15926           PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] = Input / 2;
15927         else
15928           assert(PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] ==
15929                      Input / 2 &&
15930                  "Previous placement doesn't match!");
15931       }
15932 
15933       // And just directly shift any other-half mask elements to be same-half
15934       // as we will have mirrored the dword containing the element into the
15935       // same position within that half.
15936       for (int &M : HalfMask)
15937         if (M >= SourceOffset && M < SourceOffset + 4) {
15938           M = M - SourceOffset + DestOffset;
15939           assert(M >= 0 && "This should never wrap below zero!");
15940         }
15941       return;
15942     }
15943 
15944     // Ensure we have the input in a viable dword of its current half. This
15945     // is particularly tricky because the original position may be clobbered
15946     // by inputs being moved and *staying* in that half.
15947     if (IncomingInputs.size() == 1) {
15948       if (isWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
15949         int InputFixed = find(SourceHalfMask, -1) - std::begin(SourceHalfMask) +
15950                          SourceOffset;
15951         SourceHalfMask[InputFixed - SourceOffset] =
15952             IncomingInputs[0] - SourceOffset;
15953         std::replace(HalfMask.begin(), HalfMask.end(), IncomingInputs[0],
15954                      InputFixed);
15955         IncomingInputs[0] = InputFixed;
15956       }
15957     } else if (IncomingInputs.size() == 2) {
15958       if (IncomingInputs[0] / 2 != IncomingInputs[1] / 2 ||
15959           isDWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
15960         // We have two non-adjacent or clobbered inputs we need to extract from
15961         // the source half. To do this, we need to map them into some adjacent
15962         // dword slot in the source mask.
15963         int InputsFixed[2] = {IncomingInputs[0] - SourceOffset,
15964                               IncomingInputs[1] - SourceOffset};
15965 
15966         // If there is a free slot in the source half mask adjacent to one of
15967         // the inputs, place the other input in it. We use (Index XOR 1) to
15968         // compute an adjacent index.
15969         if (!isWordClobbered(SourceHalfMask, InputsFixed[0]) &&
15970             SourceHalfMask[InputsFixed[0] ^ 1] < 0) {
15971           SourceHalfMask[InputsFixed[0]] = InputsFixed[0];
15972           SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
15973           InputsFixed[1] = InputsFixed[0] ^ 1;
15974         } else if (!isWordClobbered(SourceHalfMask, InputsFixed[1]) &&
15975                    SourceHalfMask[InputsFixed[1] ^ 1] < 0) {
15976           SourceHalfMask[InputsFixed[1]] = InputsFixed[1];
15977           SourceHalfMask[InputsFixed[1] ^ 1] = InputsFixed[0];
15978           InputsFixed[0] = InputsFixed[1] ^ 1;
15979         } else if (SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] < 0 &&
15980                    SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] < 0) {
15981           // The two inputs are in the same DWord but it is clobbered and the
15982           // adjacent DWord isn't used at all. Move both inputs to the free
15983           // slot.
15984           SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] = InputsFixed[0];
15985           SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] = InputsFixed[1];
15986           InputsFixed[0] = 2 * ((InputsFixed[0] / 2) ^ 1);
15987           InputsFixed[1] = 2 * ((InputsFixed[0] / 2) ^ 1) + 1;
15988         } else {
15989           // The only way we hit this point is if there is no clobbering
15990           // (because there are no off-half inputs to this half) and there is no
15991           // free slot adjacent to one of the inputs. In this case, we have to
15992           // swap an input with a non-input.
15993           for (int i = 0; i < 4; ++i)
15994             assert((SourceHalfMask[i] < 0 || SourceHalfMask[i] == i) &&
15995                    "We can't handle any clobbers here!");
15996           assert(InputsFixed[1] != (InputsFixed[0] ^ 1) &&
15997                  "Cannot have adjacent inputs here!");
15998 
15999           SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
16000           SourceHalfMask[InputsFixed[1]] = InputsFixed[0] ^ 1;
16001 
16002           // We also have to update the final source mask in this case because
16003           // it may need to undo the above swap.
16004           for (int &M : FinalSourceHalfMask)
16005             if (M == (InputsFixed[0] ^ 1) + SourceOffset)
16006               M = InputsFixed[1] + SourceOffset;
16007             else if (M == InputsFixed[1] + SourceOffset)
16008               M = (InputsFixed[0] ^ 1) + SourceOffset;
16009 
16010           InputsFixed[1] = InputsFixed[0] ^ 1;
16011         }
16012 
16013         // Point everything at the fixed inputs.
16014         for (int &M : HalfMask)
16015           if (M == IncomingInputs[0])
16016             M = InputsFixed[0] + SourceOffset;
16017           else if (M == IncomingInputs[1])
16018             M = InputsFixed[1] + SourceOffset;
16019 
16020         IncomingInputs[0] = InputsFixed[0] + SourceOffset;
16021         IncomingInputs[1] = InputsFixed[1] + SourceOffset;
16022       }
16023     } else {
16024       llvm_unreachable("Unhandled input size!");
16025     }
16026 
16027     // Now hoist the DWord down to the right half.
16028     int FreeDWord = (PSHUFDMask[DestOffset / 2] < 0 ? 0 : 1) + DestOffset / 2;
16029     assert(PSHUFDMask[FreeDWord] < 0 && "DWord not free");
16030     PSHUFDMask[FreeDWord] = IncomingInputs[0] / 2;
16031     for (int &M : HalfMask)
16032       for (int Input : IncomingInputs)
16033         if (M == Input)
16034           M = FreeDWord * 2 + Input % 2;
16035   };
16036   moveInputsToRightHalf(HToLInputs, LToLInputs, PSHUFHMask, LoMask, HiMask,
16037                         /*SourceOffset*/ 4, /*DestOffset*/ 0);
16038   moveInputsToRightHalf(LToHInputs, HToHInputs, PSHUFLMask, HiMask, LoMask,
16039                         /*SourceOffset*/ 0, /*DestOffset*/ 4);
16040 
16041   // Now enact all the shuffles we've computed to move the inputs into their
16042   // target half.
16043   if (!isNoopShuffleMask(PSHUFLMask))
16044     V = DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
16045                     getV4X86ShuffleImm8ForMask(PSHUFLMask, DL, DAG));
16046   if (!isNoopShuffleMask(PSHUFHMask))
16047     V = DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
16048                     getV4X86ShuffleImm8ForMask(PSHUFHMask, DL, DAG));
16049   if (!isNoopShuffleMask(PSHUFDMask))
16050     V = DAG.getBitcast(
16051         VT,
16052         DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, DAG.getBitcast(PSHUFDVT, V),
16053                     getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
16054 
16055   // At this point, each half should contain all its inputs, and we can then
16056   // just shuffle them into their final position.
16057   assert(count_if(LoMask, [](int M) { return M >= 4; }) == 0 &&
16058          "Failed to lift all the high half inputs to the low mask!");
16059   assert(count_if(HiMask, [](int M) { return M >= 0 && M < 4; }) == 0 &&
16060          "Failed to lift all the low half inputs to the high mask!");
16061 
16062   // Do a half shuffle for the low mask.
16063   if (!isNoopShuffleMask(LoMask))
16064     V = DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
16065                     getV4X86ShuffleImm8ForMask(LoMask, DL, DAG));
16066 
16067   // Do a half shuffle with the high mask after shifting its values down.
16068   for (int &M : HiMask)
16069     if (M >= 0)
16070       M -= 4;
16071   if (!isNoopShuffleMask(HiMask))
16072     V = DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
16073                     getV4X86ShuffleImm8ForMask(HiMask, DL, DAG));
16074 
16075   return V;
16076 }
16077 
16078 /// Helper to form a PSHUFB-based shuffle+blend, opportunistically avoiding the
16079 /// blend if only one input is used.
lowerShuffleAsBlendOfPSHUFBs(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const APInt & Zeroable,SelectionDAG & DAG,bool & V1InUse,bool & V2InUse)16080 static SDValue lowerShuffleAsBlendOfPSHUFBs(
16081     const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
16082     const APInt &Zeroable, SelectionDAG &DAG, bool &V1InUse, bool &V2InUse) {
16083   assert(!is128BitLaneCrossingShuffleMask(VT, Mask) &&
16084          "Lane crossing shuffle masks not supported");
16085 
16086   int NumBytes = VT.getSizeInBits() / 8;
16087   int Size = Mask.size();
16088   int Scale = NumBytes / Size;
16089 
16090   SmallVector<SDValue, 64> V1Mask(NumBytes, DAG.getUNDEF(MVT::i8));
16091   SmallVector<SDValue, 64> V2Mask(NumBytes, DAG.getUNDEF(MVT::i8));
16092   V1InUse = false;
16093   V2InUse = false;
16094 
16095   for (int i = 0; i < NumBytes; ++i) {
16096     int M = Mask[i / Scale];
16097     if (M < 0)
16098       continue;
16099 
16100     const int ZeroMask = 0x80;
16101     int V1Idx = M < Size ? M * Scale + i % Scale : ZeroMask;
16102     int V2Idx = M < Size ? ZeroMask : (M - Size) * Scale + i % Scale;
16103     if (Zeroable[i / Scale])
16104       V1Idx = V2Idx = ZeroMask;
16105 
16106     V1Mask[i] = DAG.getConstant(V1Idx, DL, MVT::i8);
16107     V2Mask[i] = DAG.getConstant(V2Idx, DL, MVT::i8);
16108     V1InUse |= (ZeroMask != V1Idx);
16109     V2InUse |= (ZeroMask != V2Idx);
16110   }
16111 
16112   MVT ShufVT = MVT::getVectorVT(MVT::i8, NumBytes);
16113   if (V1InUse)
16114     V1 = DAG.getNode(X86ISD::PSHUFB, DL, ShufVT, DAG.getBitcast(ShufVT, V1),
16115                      DAG.getBuildVector(ShufVT, DL, V1Mask));
16116   if (V2InUse)
16117     V2 = DAG.getNode(X86ISD::PSHUFB, DL, ShufVT, DAG.getBitcast(ShufVT, V2),
16118                      DAG.getBuildVector(ShufVT, DL, V2Mask));
16119 
16120   // If we need shuffled inputs from both, blend the two.
16121   SDValue V;
16122   if (V1InUse && V2InUse)
16123     V = DAG.getNode(ISD::OR, DL, ShufVT, V1, V2);
16124   else
16125     V = V1InUse ? V1 : V2;
16126 
16127   // Cast the result back to the correct type.
16128   return DAG.getBitcast(VT, V);
16129 }
16130 
16131 /// Generic lowering of 8-lane i16 shuffles.
16132 ///
16133 /// This handles both single-input shuffles and combined shuffle/blends with
16134 /// two inputs. The single input shuffles are immediately delegated to
16135 /// a dedicated lowering routine.
16136 ///
16137 /// The blends are lowered in one of three fundamental ways. If there are few
16138 /// enough inputs, it delegates to a basic UNPCK-based strategy. If the shuffle
16139 /// of the input is significantly cheaper when lowered as an interleaving of
16140 /// the two inputs, try to interleave them. Otherwise, blend the low and high
16141 /// halves of the inputs separately (making them have relatively few inputs)
16142 /// and then concatenate them.
lowerV8I16Shuffle(const SDLoc & DL,ArrayRef<int> Mask,const APInt & Zeroable,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)16143 static SDValue lowerV8I16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16144                                  const APInt &Zeroable, SDValue V1, SDValue V2,
16145                                  const X86Subtarget &Subtarget,
16146                                  SelectionDAG &DAG) {
16147   assert(V1.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
16148   assert(V2.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
16149   assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
16150 
16151   // Whenever we can lower this as a zext, that instruction is strictly faster
16152   // than any alternative.
16153   if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v8i16, V1, V2, Mask,
16154                                                    Zeroable, Subtarget, DAG))
16155     return ZExt;
16156 
16157   // Try to use lower using a truncation.
16158   if (SDValue V = lowerShuffleWithVPMOV(DL, MVT::v8i16, V1, V2, Mask, Zeroable,
16159                                         Subtarget, DAG))
16160     return V;
16161 
16162   int NumV2Inputs = count_if(Mask, [](int M) { return M >= 8; });
16163 
16164   if (NumV2Inputs == 0) {
16165     // Try to use shift instructions.
16166     if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v8i16, V1, V1, Mask,
16167                                             Zeroable, Subtarget, DAG))
16168       return Shift;
16169 
16170     // Check for being able to broadcast a single element.
16171     if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v8i16, V1, V2,
16172                                                     Mask, Subtarget, DAG))
16173       return Broadcast;
16174 
16175     // Try to use bit rotation instructions.
16176     if (SDValue Rotate = lowerShuffleAsBitRotate(DL, MVT::v8i16, V1, Mask,
16177                                                  Subtarget, DAG))
16178       return Rotate;
16179 
16180     // Use dedicated unpack instructions for masks that match their pattern.
16181     if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8i16, Mask, V1, V2, DAG))
16182       return V;
16183 
16184     // Use dedicated pack instructions for masks that match their pattern.
16185     if (SDValue V = lowerShuffleWithPACK(DL, MVT::v8i16, Mask, V1, V2, DAG,
16186                                          Subtarget))
16187       return V;
16188 
16189     // Try to use byte rotation instructions.
16190     if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i16, V1, V1, Mask,
16191                                                   Subtarget, DAG))
16192       return Rotate;
16193 
16194     // Make a copy of the mask so it can be modified.
16195     SmallVector<int, 8> MutableMask(Mask);
16196     return lowerV8I16GeneralSingleInputShuffle(DL, MVT::v8i16, V1, MutableMask,
16197                                                Subtarget, DAG);
16198   }
16199 
16200   assert(llvm::any_of(Mask, [](int M) { return M >= 0 && M < 8; }) &&
16201          "All single-input shuffles should be canonicalized to be V1-input "
16202          "shuffles.");
16203 
16204   // Try to use shift instructions.
16205   if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v8i16, V1, V2, Mask,
16206                                           Zeroable, Subtarget, DAG))
16207     return Shift;
16208 
16209   // See if we can use SSE4A Extraction / Insertion.
16210   if (Subtarget.hasSSE4A())
16211     if (SDValue V = lowerShuffleWithSSE4A(DL, MVT::v8i16, V1, V2, Mask,
16212                                           Zeroable, DAG))
16213       return V;
16214 
16215   // There are special ways we can lower some single-element blends.
16216   if (NumV2Inputs == 1)
16217     if (SDValue V = lowerShuffleAsElementInsertion(
16218             DL, MVT::v8i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
16219       return V;
16220 
16221   // We have different paths for blend lowering, but they all must use the
16222   // *exact* same predicate.
16223   bool IsBlendSupported = Subtarget.hasSSE41();
16224   if (IsBlendSupported)
16225     if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8i16, V1, V2, Mask,
16226                                             Zeroable, Subtarget, DAG))
16227       return Blend;
16228 
16229   if (SDValue Masked = lowerShuffleAsBitMask(DL, MVT::v8i16, V1, V2, Mask,
16230                                              Zeroable, Subtarget, DAG))
16231     return Masked;
16232 
16233   // Use dedicated unpack instructions for masks that match their pattern.
16234   if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8i16, Mask, V1, V2, DAG))
16235     return V;
16236 
16237   // Use dedicated pack instructions for masks that match their pattern.
16238   if (SDValue V = lowerShuffleWithPACK(DL, MVT::v8i16, Mask, V1, V2, DAG,
16239                                        Subtarget))
16240     return V;
16241 
16242   // Try to use lower using a truncation.
16243   if (SDValue V = lowerShuffleAsVTRUNC(DL, MVT::v8i16, V1, V2, Mask, Zeroable,
16244                                        Subtarget, DAG))
16245     return V;
16246 
16247   // Try to use byte rotation instructions.
16248   if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i16, V1, V2, Mask,
16249                                                 Subtarget, DAG))
16250     return Rotate;
16251 
16252   if (SDValue BitBlend =
16253           lowerShuffleAsBitBlend(DL, MVT::v8i16, V1, V2, Mask, DAG))
16254     return BitBlend;
16255 
16256   // Try to use byte shift instructions to mask.
16257   if (SDValue V = lowerShuffleAsByteShiftMask(DL, MVT::v8i16, V1, V2, Mask,
16258                                               Zeroable, Subtarget, DAG))
16259     return V;
16260 
16261   // Attempt to lower using compaction, SSE41 is necessary for PACKUSDW.
16262   // We could use SIGN_EXTEND_INREG+PACKSSDW for older targets but this seems to
16263   // be slower than a PSHUFLW+PSHUFHW+PSHUFD chain.
16264   int NumEvenDrops = canLowerByDroppingElements(Mask, true, false);
16265   if ((NumEvenDrops == 1 || NumEvenDrops == 2) && Subtarget.hasSSE41() &&
16266       !Subtarget.hasVLX()) {
16267     // Check if this is part of a 256-bit vector truncation.
16268     if (NumEvenDrops == 2 && Subtarget.hasAVX2() &&
16269         peekThroughBitcasts(V1).getOpcode() == ISD::EXTRACT_SUBVECTOR &&
16270         peekThroughBitcasts(V2).getOpcode() == ISD::EXTRACT_SUBVECTOR) {
16271       SDValue V1V2 = concatSubVectors(V1, V2, DAG, DL);
16272       V1V2 = DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1V2,
16273                          getZeroVector(MVT::v16i16, Subtarget, DAG, DL),
16274                          DAG.getTargetConstant(0xEE, DL, MVT::i8));
16275       V1V2 = DAG.getBitcast(MVT::v8i32, V1V2);
16276       V1 = extract128BitVector(V1V2, 0, DAG, DL);
16277       V2 = extract128BitVector(V1V2, 4, DAG, DL);
16278     } else {
16279       SmallVector<SDValue, 4> DWordClearOps(4,
16280                                             DAG.getConstant(0, DL, MVT::i32));
16281       for (unsigned i = 0; i != 4; i += 1 << (NumEvenDrops - 1))
16282         DWordClearOps[i] = DAG.getConstant(0xFFFF, DL, MVT::i32);
16283       SDValue DWordClearMask =
16284           DAG.getBuildVector(MVT::v4i32, DL, DWordClearOps);
16285       V1 = DAG.getNode(ISD::AND, DL, MVT::v4i32, DAG.getBitcast(MVT::v4i32, V1),
16286                        DWordClearMask);
16287       V2 = DAG.getNode(ISD::AND, DL, MVT::v4i32, DAG.getBitcast(MVT::v4i32, V2),
16288                        DWordClearMask);
16289     }
16290     // Now pack things back together.
16291     SDValue Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v8i16, V1, V2);
16292     if (NumEvenDrops == 2) {
16293       Result = DAG.getBitcast(MVT::v4i32, Result);
16294       Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v8i16, Result, Result);
16295     }
16296     return Result;
16297   }
16298 
16299   // When compacting odd (upper) elements, use PACKSS pre-SSE41.
16300   int NumOddDrops = canLowerByDroppingElements(Mask, false, false);
16301   if (NumOddDrops == 1) {
16302     bool HasSSE41 = Subtarget.hasSSE41();
16303     V1 = DAG.getNode(HasSSE41 ? X86ISD::VSRLI : X86ISD::VSRAI, DL, MVT::v4i32,
16304                      DAG.getBitcast(MVT::v4i32, V1),
16305                      DAG.getTargetConstant(16, DL, MVT::i8));
16306     V2 = DAG.getNode(HasSSE41 ? X86ISD::VSRLI : X86ISD::VSRAI, DL, MVT::v4i32,
16307                      DAG.getBitcast(MVT::v4i32, V2),
16308                      DAG.getTargetConstant(16, DL, MVT::i8));
16309     return DAG.getNode(HasSSE41 ? X86ISD::PACKUS : X86ISD::PACKSS, DL,
16310                        MVT::v8i16, V1, V2);
16311   }
16312 
16313   // Try to lower by permuting the inputs into an unpack instruction.
16314   if (SDValue Unpack = lowerShuffleAsPermuteAndUnpack(DL, MVT::v8i16, V1, V2,
16315                                                       Mask, Subtarget, DAG))
16316     return Unpack;
16317 
16318   // If we can't directly blend but can use PSHUFB, that will be better as it
16319   // can both shuffle and set up the inefficient blend.
16320   if (!IsBlendSupported && Subtarget.hasSSSE3()) {
16321     bool V1InUse, V2InUse;
16322     return lowerShuffleAsBlendOfPSHUFBs(DL, MVT::v8i16, V1, V2, Mask,
16323                                         Zeroable, DAG, V1InUse, V2InUse);
16324   }
16325 
16326   // We can always bit-blend if we have to so the fallback strategy is to
16327   // decompose into single-input permutes and blends/unpacks.
16328   return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v8i16, V1, V2,
16329                                               Mask, Subtarget, DAG);
16330 }
16331 
16332 /// Lower 8-lane 16-bit floating point shuffles.
lowerV8F16Shuffle(const SDLoc & DL,ArrayRef<int> Mask,const APInt & Zeroable,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)16333 static SDValue lowerV8F16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16334                                  const APInt &Zeroable, SDValue V1, SDValue V2,
16335                                  const X86Subtarget &Subtarget,
16336                                  SelectionDAG &DAG) {
16337   assert(V1.getSimpleValueType() == MVT::v8f16 && "Bad operand type!");
16338   assert(V2.getSimpleValueType() == MVT::v8f16 && "Bad operand type!");
16339   assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
16340   int NumV2Elements = count_if(Mask, [](int M) { return M >= 8; });
16341 
16342   if (Subtarget.hasFP16()) {
16343     if (NumV2Elements == 0) {
16344       // Check for being able to broadcast a single element.
16345       if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v8f16, V1, V2,
16346                                                       Mask, Subtarget, DAG))
16347         return Broadcast;
16348     }
16349     if (NumV2Elements == 1 && Mask[0] >= 8)
16350       if (SDValue V = lowerShuffleAsElementInsertion(
16351               DL, MVT::v8f16, V1, V2, Mask, Zeroable, Subtarget, DAG))
16352         return V;
16353   }
16354 
16355   V1 = DAG.getBitcast(MVT::v8i16, V1);
16356   V2 = DAG.getBitcast(MVT::v8i16, V2);
16357   return DAG.getBitcast(MVT::v8f16,
16358                         DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, Mask));
16359 }
16360 
16361 // Lowers unary/binary shuffle as VPERMV/VPERMV3, for non-VLX targets,
16362 // sub-512-bit shuffles are padded to 512-bits for the shuffle and then
16363 // the active subvector is extracted.
lowerShuffleWithPERMV(const SDLoc & DL,MVT VT,ArrayRef<int> Mask,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)16364 static SDValue lowerShuffleWithPERMV(const SDLoc &DL, MVT VT,
16365                                      ArrayRef<int> Mask, SDValue V1, SDValue V2,
16366                                      const X86Subtarget &Subtarget,
16367                                      SelectionDAG &DAG) {
16368   MVT MaskVT = VT.changeTypeToInteger();
16369   SDValue MaskNode;
16370   MVT ShuffleVT = VT;
16371   if (!VT.is512BitVector() && !Subtarget.hasVLX()) {
16372     V1 = widenSubVector(V1, false, Subtarget, DAG, DL, 512);
16373     V2 = widenSubVector(V2, false, Subtarget, DAG, DL, 512);
16374     ShuffleVT = V1.getSimpleValueType();
16375 
16376     // Adjust mask to correct indices for the second input.
16377     int NumElts = VT.getVectorNumElements();
16378     unsigned Scale = 512 / VT.getSizeInBits();
16379     SmallVector<int, 32> AdjustedMask(Mask);
16380     for (int &M : AdjustedMask)
16381       if (NumElts <= M)
16382         M += (Scale - 1) * NumElts;
16383     MaskNode = getConstVector(AdjustedMask, MaskVT, DAG, DL, true);
16384     MaskNode = widenSubVector(MaskNode, false, Subtarget, DAG, DL, 512);
16385   } else {
16386     MaskNode = getConstVector(Mask, MaskVT, DAG, DL, true);
16387   }
16388 
16389   SDValue Result;
16390   if (V2.isUndef())
16391     Result = DAG.getNode(X86ISD::VPERMV, DL, ShuffleVT, MaskNode, V1);
16392   else
16393     Result = DAG.getNode(X86ISD::VPERMV3, DL, ShuffleVT, V1, MaskNode, V2);
16394 
16395   if (VT != ShuffleVT)
16396     Result = extractSubVector(Result, 0, DAG, DL, VT.getSizeInBits());
16397 
16398   return Result;
16399 }
16400 
16401 /// Generic lowering of v16i8 shuffles.
16402 ///
16403 /// This is a hybrid strategy to lower v16i8 vectors. It first attempts to
16404 /// detect any complexity reducing interleaving. If that doesn't help, it uses
16405 /// UNPCK to spread the i8 elements across two i16-element vectors, and uses
16406 /// the existing lowering for v8i16 blends on each half, finally PACK-ing them
16407 /// back together.
lowerV16I8Shuffle(const SDLoc & DL,ArrayRef<int> Mask,const APInt & Zeroable,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)16408 static SDValue lowerV16I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16409                                  const APInt &Zeroable, SDValue V1, SDValue V2,
16410                                  const X86Subtarget &Subtarget,
16411                                  SelectionDAG &DAG) {
16412   assert(V1.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
16413   assert(V2.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
16414   assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
16415 
16416   // Try to use shift instructions.
16417   if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v16i8, V1, V2, Mask,
16418                                           Zeroable, Subtarget, DAG))
16419     return Shift;
16420 
16421   // Try to use byte rotation instructions.
16422   if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v16i8, V1, V2, Mask,
16423                                                 Subtarget, DAG))
16424     return Rotate;
16425 
16426   // Use dedicated pack instructions for masks that match their pattern.
16427   if (SDValue V = lowerShuffleWithPACK(DL, MVT::v16i8, Mask, V1, V2, DAG,
16428                                        Subtarget))
16429     return V;
16430 
16431   // Try to use a zext lowering.
16432   if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v16i8, V1, V2, Mask,
16433                                                    Zeroable, Subtarget, DAG))
16434     return ZExt;
16435 
16436   // Try to use lower using a truncation.
16437   if (SDValue V = lowerShuffleWithVPMOV(DL, MVT::v16i8, V1, V2, Mask, Zeroable,
16438                                         Subtarget, DAG))
16439     return V;
16440 
16441   if (SDValue V = lowerShuffleAsVTRUNC(DL, MVT::v16i8, V1, V2, Mask, Zeroable,
16442                                        Subtarget, DAG))
16443     return V;
16444 
16445   // See if we can use SSE4A Extraction / Insertion.
16446   if (Subtarget.hasSSE4A())
16447     if (SDValue V = lowerShuffleWithSSE4A(DL, MVT::v16i8, V1, V2, Mask,
16448                                           Zeroable, DAG))
16449       return V;
16450 
16451   int NumV2Elements = count_if(Mask, [](int M) { return M >= 16; });
16452 
16453   // For single-input shuffles, there are some nicer lowering tricks we can use.
16454   if (NumV2Elements == 0) {
16455     // Check for being able to broadcast a single element.
16456     if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v16i8, V1, V2,
16457                                                     Mask, Subtarget, DAG))
16458       return Broadcast;
16459 
16460     // Try to use bit rotation instructions.
16461     if (SDValue Rotate = lowerShuffleAsBitRotate(DL, MVT::v16i8, V1, Mask,
16462                                                  Subtarget, DAG))
16463       return Rotate;
16464 
16465     if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i8, Mask, V1, V2, DAG))
16466       return V;
16467 
16468     // Check whether we can widen this to an i16 shuffle by duplicating bytes.
16469     // Notably, this handles splat and partial-splat shuffles more efficiently.
16470     // However, it only makes sense if the pre-duplication shuffle simplifies
16471     // things significantly. Currently, this means we need to be able to
16472     // express the pre-duplication shuffle as an i16 shuffle.
16473     //
16474     // FIXME: We should check for other patterns which can be widened into an
16475     // i16 shuffle as well.
16476     auto canWidenViaDuplication = [](ArrayRef<int> Mask) {
16477       for (int i = 0; i < 16; i += 2)
16478         if (Mask[i] >= 0 && Mask[i + 1] >= 0 && Mask[i] != Mask[i + 1])
16479           return false;
16480 
16481       return true;
16482     };
16483     auto tryToWidenViaDuplication = [&]() -> SDValue {
16484       if (!canWidenViaDuplication(Mask))
16485         return SDValue();
16486       SmallVector<int, 4> LoInputs;
16487       copy_if(Mask, std::back_inserter(LoInputs),
16488               [](int M) { return M >= 0 && M < 8; });
16489       array_pod_sort(LoInputs.begin(), LoInputs.end());
16490       LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()),
16491                      LoInputs.end());
16492       SmallVector<int, 4> HiInputs;
16493       copy_if(Mask, std::back_inserter(HiInputs), [](int M) { return M >= 8; });
16494       array_pod_sort(HiInputs.begin(), HiInputs.end());
16495       HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()),
16496                      HiInputs.end());
16497 
16498       bool TargetLo = LoInputs.size() >= HiInputs.size();
16499       ArrayRef<int> InPlaceInputs = TargetLo ? LoInputs : HiInputs;
16500       ArrayRef<int> MovingInputs = TargetLo ? HiInputs : LoInputs;
16501 
16502       int PreDupI16Shuffle[] = {-1, -1, -1, -1, -1, -1, -1, -1};
16503       SmallDenseMap<int, int, 8> LaneMap;
16504       for (int I : InPlaceInputs) {
16505         PreDupI16Shuffle[I/2] = I/2;
16506         LaneMap[I] = I;
16507       }
16508       int j = TargetLo ? 0 : 4, je = j + 4;
16509       for (int i = 0, ie = MovingInputs.size(); i < ie; ++i) {
16510         // Check if j is already a shuffle of this input. This happens when
16511         // there are two adjacent bytes after we move the low one.
16512         if (PreDupI16Shuffle[j] != MovingInputs[i] / 2) {
16513           // If we haven't yet mapped the input, search for a slot into which
16514           // we can map it.
16515           while (j < je && PreDupI16Shuffle[j] >= 0)
16516             ++j;
16517 
16518           if (j == je)
16519             // We can't place the inputs into a single half with a simple i16 shuffle, so bail.
16520             return SDValue();
16521 
16522           // Map this input with the i16 shuffle.
16523           PreDupI16Shuffle[j] = MovingInputs[i] / 2;
16524         }
16525 
16526         // Update the lane map based on the mapping we ended up with.
16527         LaneMap[MovingInputs[i]] = 2 * j + MovingInputs[i] % 2;
16528       }
16529       V1 = DAG.getBitcast(
16530           MVT::v16i8,
16531           DAG.getVectorShuffle(MVT::v8i16, DL, DAG.getBitcast(MVT::v8i16, V1),
16532                                DAG.getUNDEF(MVT::v8i16), PreDupI16Shuffle));
16533 
16534       // Unpack the bytes to form the i16s that will be shuffled into place.
16535       bool EvenInUse = false, OddInUse = false;
16536       for (int i = 0; i < 16; i += 2) {
16537         EvenInUse |= (Mask[i + 0] >= 0);
16538         OddInUse |= (Mask[i + 1] >= 0);
16539         if (EvenInUse && OddInUse)
16540           break;
16541       }
16542       V1 = DAG.getNode(TargetLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
16543                        MVT::v16i8, EvenInUse ? V1 : DAG.getUNDEF(MVT::v16i8),
16544                        OddInUse ? V1 : DAG.getUNDEF(MVT::v16i8));
16545 
16546       int PostDupI16Shuffle[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
16547       for (int i = 0; i < 16; ++i)
16548         if (Mask[i] >= 0) {
16549           int MappedMask = LaneMap[Mask[i]] - (TargetLo ? 0 : 8);
16550           assert(MappedMask < 8 && "Invalid v8 shuffle mask!");
16551           if (PostDupI16Shuffle[i / 2] < 0)
16552             PostDupI16Shuffle[i / 2] = MappedMask;
16553           else
16554             assert(PostDupI16Shuffle[i / 2] == MappedMask &&
16555                    "Conflicting entries in the original shuffle!");
16556         }
16557       return DAG.getBitcast(
16558           MVT::v16i8,
16559           DAG.getVectorShuffle(MVT::v8i16, DL, DAG.getBitcast(MVT::v8i16, V1),
16560                                DAG.getUNDEF(MVT::v8i16), PostDupI16Shuffle));
16561     };
16562     if (SDValue V = tryToWidenViaDuplication())
16563       return V;
16564   }
16565 
16566   if (SDValue Masked = lowerShuffleAsBitMask(DL, MVT::v16i8, V1, V2, Mask,
16567                                              Zeroable, Subtarget, DAG))
16568     return Masked;
16569 
16570   // Use dedicated unpack instructions for masks that match their pattern.
16571   if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i8, Mask, V1, V2, DAG))
16572     return V;
16573 
16574   // Try to use byte shift instructions to mask.
16575   if (SDValue V = lowerShuffleAsByteShiftMask(DL, MVT::v16i8, V1, V2, Mask,
16576                                               Zeroable, Subtarget, DAG))
16577     return V;
16578 
16579   // Check for compaction patterns.
16580   bool IsSingleInput = V2.isUndef();
16581   int NumEvenDrops = canLowerByDroppingElements(Mask, true, IsSingleInput);
16582 
16583   // Check for SSSE3 which lets us lower all v16i8 shuffles much more directly
16584   // with PSHUFB. It is important to do this before we attempt to generate any
16585   // blends but after all of the single-input lowerings. If the single input
16586   // lowerings can find an instruction sequence that is faster than a PSHUFB, we
16587   // want to preserve that and we can DAG combine any longer sequences into
16588   // a PSHUFB in the end. But once we start blending from multiple inputs,
16589   // the complexity of DAG combining bad patterns back into PSHUFB is too high,
16590   // and there are *very* few patterns that would actually be faster than the
16591   // PSHUFB approach because of its ability to zero lanes.
16592   //
16593   // If the mask is a binary compaction, we can more efficiently perform this
16594   // as a PACKUS(AND(),AND()) - which is quicker than UNPACK(PSHUFB(),PSHUFB()).
16595   //
16596   // FIXME: The only exceptions to the above are blends which are exact
16597   // interleavings with direct instructions supporting them. We currently don't
16598   // handle those well here.
16599   if (Subtarget.hasSSSE3() && (IsSingleInput || NumEvenDrops != 1)) {
16600     bool V1InUse = false;
16601     bool V2InUse = false;
16602 
16603     SDValue PSHUFB = lowerShuffleAsBlendOfPSHUFBs(
16604         DL, MVT::v16i8, V1, V2, Mask, Zeroable, DAG, V1InUse, V2InUse);
16605 
16606     // If both V1 and V2 are in use and we can use a direct blend or an unpack,
16607     // do so. This avoids using them to handle blends-with-zero which is
16608     // important as a single pshufb is significantly faster for that.
16609     if (V1InUse && V2InUse) {
16610       if (Subtarget.hasSSE41())
16611         if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16i8, V1, V2, Mask,
16612                                                 Zeroable, Subtarget, DAG))
16613           return Blend;
16614 
16615       // We can use an unpack to do the blending rather than an or in some
16616       // cases. Even though the or may be (very minorly) more efficient, we
16617       // preference this lowering because there are common cases where part of
16618       // the complexity of the shuffles goes away when we do the final blend as
16619       // an unpack.
16620       // FIXME: It might be worth trying to detect if the unpack-feeding
16621       // shuffles will both be pshufb, in which case we shouldn't bother with
16622       // this.
16623       if (SDValue Unpack = lowerShuffleAsPermuteAndUnpack(
16624               DL, MVT::v16i8, V1, V2, Mask, Subtarget, DAG))
16625         return Unpack;
16626 
16627       // AVX512VBMI can lower to VPERMB (non-VLX will pad to v64i8).
16628       if (Subtarget.hasVBMI())
16629         return lowerShuffleWithPERMV(DL, MVT::v16i8, Mask, V1, V2, Subtarget,
16630                                      DAG);
16631 
16632       // If we have XOP we can use one VPPERM instead of multiple PSHUFBs.
16633       if (Subtarget.hasXOP()) {
16634         SDValue MaskNode = getConstVector(Mask, MVT::v16i8, DAG, DL, true);
16635         return DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, V1, V2, MaskNode);
16636       }
16637 
16638       // Use PALIGNR+Permute if possible - permute might become PSHUFB but the
16639       // PALIGNR will be cheaper than the second PSHUFB+OR.
16640       if (SDValue V = lowerShuffleAsByteRotateAndPermute(
16641               DL, MVT::v16i8, V1, V2, Mask, Subtarget, DAG))
16642         return V;
16643     }
16644 
16645     return PSHUFB;
16646   }
16647 
16648   // There are special ways we can lower some single-element blends.
16649   if (NumV2Elements == 1)
16650     if (SDValue V = lowerShuffleAsElementInsertion(
16651             DL, MVT::v16i8, V1, V2, Mask, Zeroable, Subtarget, DAG))
16652       return V;
16653 
16654   if (SDValue Blend = lowerShuffleAsBitBlend(DL, MVT::v16i8, V1, V2, Mask, DAG))
16655     return Blend;
16656 
16657   // Check whether a compaction lowering can be done. This handles shuffles
16658   // which take every Nth element for some even N. See the helper function for
16659   // details.
16660   //
16661   // We special case these as they can be particularly efficiently handled with
16662   // the PACKUSB instruction on x86 and they show up in common patterns of
16663   // rearranging bytes to truncate wide elements.
16664   if (NumEvenDrops) {
16665     // NumEvenDrops is the power of two stride of the elements. Another way of
16666     // thinking about it is that we need to drop the even elements this many
16667     // times to get the original input.
16668 
16669     // First we need to zero all the dropped bytes.
16670     assert(NumEvenDrops <= 3 &&
16671            "No support for dropping even elements more than 3 times.");
16672     SmallVector<SDValue, 8> WordClearOps(8, DAG.getConstant(0, DL, MVT::i16));
16673     for (unsigned i = 0; i != 8; i += 1 << (NumEvenDrops - 1))
16674       WordClearOps[i] = DAG.getConstant(0xFF, DL, MVT::i16);
16675     SDValue WordClearMask = DAG.getBuildVector(MVT::v8i16, DL, WordClearOps);
16676     V1 = DAG.getNode(ISD::AND, DL, MVT::v8i16, DAG.getBitcast(MVT::v8i16, V1),
16677                      WordClearMask);
16678     if (!IsSingleInput)
16679       V2 = DAG.getNode(ISD::AND, DL, MVT::v8i16, DAG.getBitcast(MVT::v8i16, V2),
16680                        WordClearMask);
16681 
16682     // Now pack things back together.
16683     SDValue Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, V1,
16684                                  IsSingleInput ? V1 : V2);
16685     for (int i = 1; i < NumEvenDrops; ++i) {
16686       Result = DAG.getBitcast(MVT::v8i16, Result);
16687       Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, Result, Result);
16688     }
16689     return Result;
16690   }
16691 
16692   int NumOddDrops = canLowerByDroppingElements(Mask, false, IsSingleInput);
16693   if (NumOddDrops == 1) {
16694     V1 = DAG.getNode(X86ISD::VSRLI, DL, MVT::v8i16,
16695                      DAG.getBitcast(MVT::v8i16, V1),
16696                      DAG.getTargetConstant(8, DL, MVT::i8));
16697     if (!IsSingleInput)
16698       V2 = DAG.getNode(X86ISD::VSRLI, DL, MVT::v8i16,
16699                        DAG.getBitcast(MVT::v8i16, V2),
16700                        DAG.getTargetConstant(8, DL, MVT::i8));
16701     return DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, V1,
16702                        IsSingleInput ? V1 : V2);
16703   }
16704 
16705   // Handle multi-input cases by blending/unpacking single-input shuffles.
16706   if (NumV2Elements > 0)
16707     return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v16i8, V1, V2, Mask,
16708                                                 Subtarget, DAG);
16709 
16710   // The fallback path for single-input shuffles widens this into two v8i16
16711   // vectors with unpacks, shuffles those, and then pulls them back together
16712   // with a pack.
16713   SDValue V = V1;
16714 
16715   std::array<int, 8> LoBlendMask = {{-1, -1, -1, -1, -1, -1, -1, -1}};
16716   std::array<int, 8> HiBlendMask = {{-1, -1, -1, -1, -1, -1, -1, -1}};
16717   for (int i = 0; i < 16; ++i)
16718     if (Mask[i] >= 0)
16719       (i < 8 ? LoBlendMask[i] : HiBlendMask[i % 8]) = Mask[i];
16720 
16721   SDValue VLoHalf, VHiHalf;
16722   // Check if any of the odd lanes in the v16i8 are used. If not, we can mask
16723   // them out and avoid using UNPCK{L,H} to extract the elements of V as
16724   // i16s.
16725   if (none_of(LoBlendMask, [](int M) { return M >= 0 && M % 2 == 1; }) &&
16726       none_of(HiBlendMask, [](int M) { return M >= 0 && M % 2 == 1; })) {
16727     // Use a mask to drop the high bytes.
16728     VLoHalf = DAG.getBitcast(MVT::v8i16, V);
16729     VLoHalf = DAG.getNode(ISD::AND, DL, MVT::v8i16, VLoHalf,
16730                           DAG.getConstant(0x00FF, DL, MVT::v8i16));
16731 
16732     // This will be a single vector shuffle instead of a blend so nuke VHiHalf.
16733     VHiHalf = DAG.getUNDEF(MVT::v8i16);
16734 
16735     // Squash the masks to point directly into VLoHalf.
16736     for (int &M : LoBlendMask)
16737       if (M >= 0)
16738         M /= 2;
16739     for (int &M : HiBlendMask)
16740       if (M >= 0)
16741         M /= 2;
16742   } else {
16743     // Otherwise just unpack the low half of V into VLoHalf and the high half into
16744     // VHiHalf so that we can blend them as i16s.
16745     SDValue Zero = getZeroVector(MVT::v16i8, Subtarget, DAG, DL);
16746 
16747     VLoHalf = DAG.getBitcast(
16748         MVT::v8i16, DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i8, V, Zero));
16749     VHiHalf = DAG.getBitcast(
16750         MVT::v8i16, DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i8, V, Zero));
16751   }
16752 
16753   SDValue LoV = DAG.getVectorShuffle(MVT::v8i16, DL, VLoHalf, VHiHalf, LoBlendMask);
16754   SDValue HiV = DAG.getVectorShuffle(MVT::v8i16, DL, VLoHalf, VHiHalf, HiBlendMask);
16755 
16756   return DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, LoV, HiV);
16757 }
16758 
16759 /// Dispatching routine to lower various 128-bit x86 vector shuffles.
16760 ///
16761 /// This routine breaks down the specific type of 128-bit shuffle and
16762 /// dispatches to the lowering routines accordingly.
lower128BitShuffle(const SDLoc & DL,ArrayRef<int> Mask,MVT VT,SDValue V1,SDValue V2,const APInt & Zeroable,const X86Subtarget & Subtarget,SelectionDAG & DAG)16763 static SDValue lower128BitShuffle(const SDLoc &DL, ArrayRef<int> Mask,
16764                                   MVT VT, SDValue V1, SDValue V2,
16765                                   const APInt &Zeroable,
16766                                   const X86Subtarget &Subtarget,
16767                                   SelectionDAG &DAG) {
16768   switch (VT.SimpleTy) {
16769   case MVT::v2i64:
16770     return lowerV2I64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16771   case MVT::v2f64:
16772     return lowerV2F64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16773   case MVT::v4i32:
16774     return lowerV4I32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16775   case MVT::v4f32:
16776     return lowerV4F32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16777   case MVT::v8i16:
16778     return lowerV8I16Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16779   case MVT::v8f16:
16780     return lowerV8F16Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16781   case MVT::v16i8:
16782     return lowerV16I8Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16783 
16784   default:
16785     llvm_unreachable("Unimplemented!");
16786   }
16787 }
16788 
16789 /// Generic routine to split vector shuffle into half-sized shuffles.
16790 ///
16791 /// This routine just extracts two subvectors, shuffles them independently, and
16792 /// then concatenates them back together. This should work effectively with all
16793 /// AVX vector shuffle types.
splitAndLowerShuffle(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,SelectionDAG & DAG)16794 static SDValue splitAndLowerShuffle(const SDLoc &DL, MVT VT, SDValue V1,
16795                                     SDValue V2, ArrayRef<int> Mask,
16796                                     SelectionDAG &DAG) {
16797   assert(VT.getSizeInBits() >= 256 &&
16798          "Only for 256-bit or wider vector shuffles!");
16799   assert(V1.getSimpleValueType() == VT && "Bad operand type!");
16800   assert(V2.getSimpleValueType() == VT && "Bad operand type!");
16801 
16802   ArrayRef<int> LoMask = Mask.slice(0, Mask.size() / 2);
16803   ArrayRef<int> HiMask = Mask.slice(Mask.size() / 2);
16804 
16805   int NumElements = VT.getVectorNumElements();
16806   int SplitNumElements = NumElements / 2;
16807   MVT ScalarVT = VT.getVectorElementType();
16808   MVT SplitVT = MVT::getVectorVT(ScalarVT, SplitNumElements);
16809 
16810   // Use splitVector/extractSubVector so that split build-vectors just build two
16811   // narrower build vectors. This helps shuffling with splats and zeros.
16812   auto SplitVector = [&](SDValue V) {
16813     SDValue LoV, HiV;
16814     std::tie(LoV, HiV) = splitVector(peekThroughBitcasts(V), DAG, DL);
16815     return std::make_pair(DAG.getBitcast(SplitVT, LoV),
16816                           DAG.getBitcast(SplitVT, HiV));
16817   };
16818 
16819   SDValue LoV1, HiV1, LoV2, HiV2;
16820   std::tie(LoV1, HiV1) = SplitVector(V1);
16821   std::tie(LoV2, HiV2) = SplitVector(V2);
16822 
16823   // Now create two 4-way blends of these half-width vectors.
16824   auto HalfBlend = [&](ArrayRef<int> HalfMask) {
16825     bool UseLoV1 = false, UseHiV1 = false, UseLoV2 = false, UseHiV2 = false;
16826     SmallVector<int, 32> V1BlendMask((unsigned)SplitNumElements, -1);
16827     SmallVector<int, 32> V2BlendMask((unsigned)SplitNumElements, -1);
16828     SmallVector<int, 32> BlendMask((unsigned)SplitNumElements, -1);
16829     for (int i = 0; i < SplitNumElements; ++i) {
16830       int M = HalfMask[i];
16831       if (M >= NumElements) {
16832         if (M >= NumElements + SplitNumElements)
16833           UseHiV2 = true;
16834         else
16835           UseLoV2 = true;
16836         V2BlendMask[i] = M - NumElements;
16837         BlendMask[i] = SplitNumElements + i;
16838       } else if (M >= 0) {
16839         if (M >= SplitNumElements)
16840           UseHiV1 = true;
16841         else
16842           UseLoV1 = true;
16843         V1BlendMask[i] = M;
16844         BlendMask[i] = i;
16845       }
16846     }
16847 
16848     // Because the lowering happens after all combining takes place, we need to
16849     // manually combine these blend masks as much as possible so that we create
16850     // a minimal number of high-level vector shuffle nodes.
16851 
16852     // First try just blending the halves of V1 or V2.
16853     if (!UseLoV1 && !UseHiV1 && !UseLoV2 && !UseHiV2)
16854       return DAG.getUNDEF(SplitVT);
16855     if (!UseLoV2 && !UseHiV2)
16856       return DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
16857     if (!UseLoV1 && !UseHiV1)
16858       return DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
16859 
16860     SDValue V1Blend, V2Blend;
16861     if (UseLoV1 && UseHiV1) {
16862       V1Blend =
16863         DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
16864     } else {
16865       // We only use half of V1 so map the usage down into the final blend mask.
16866       V1Blend = UseLoV1 ? LoV1 : HiV1;
16867       for (int i = 0; i < SplitNumElements; ++i)
16868         if (BlendMask[i] >= 0 && BlendMask[i] < SplitNumElements)
16869           BlendMask[i] = V1BlendMask[i] - (UseLoV1 ? 0 : SplitNumElements);
16870     }
16871     if (UseLoV2 && UseHiV2) {
16872       V2Blend =
16873         DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
16874     } else {
16875       // We only use half of V2 so map the usage down into the final blend mask.
16876       V2Blend = UseLoV2 ? LoV2 : HiV2;
16877       for (int i = 0; i < SplitNumElements; ++i)
16878         if (BlendMask[i] >= SplitNumElements)
16879           BlendMask[i] = V2BlendMask[i] + (UseLoV2 ? SplitNumElements : 0);
16880     }
16881     return DAG.getVectorShuffle(SplitVT, DL, V1Blend, V2Blend, BlendMask);
16882   };
16883   SDValue Lo = HalfBlend(LoMask);
16884   SDValue Hi = HalfBlend(HiMask);
16885   return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
16886 }
16887 
16888 /// Either split a vector in halves or decompose the shuffles and the
16889 /// blend/unpack.
16890 ///
16891 /// This is provided as a good fallback for many lowerings of non-single-input
16892 /// shuffles with more than one 128-bit lane. In those cases, we want to select
16893 /// between splitting the shuffle into 128-bit components and stitching those
16894 /// back together vs. extracting the single-input shuffles and blending those
16895 /// results.
lowerShuffleAsSplitOrBlend(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const X86Subtarget & Subtarget,SelectionDAG & DAG)16896 static SDValue lowerShuffleAsSplitOrBlend(const SDLoc &DL, MVT VT, SDValue V1,
16897                                           SDValue V2, ArrayRef<int> Mask,
16898                                           const X86Subtarget &Subtarget,
16899                                           SelectionDAG &DAG) {
16900   assert(!V2.isUndef() && "This routine must not be used to lower single-input "
16901          "shuffles as it could then recurse on itself.");
16902   int Size = Mask.size();
16903 
16904   // If this can be modeled as a broadcast of two elements followed by a blend,
16905   // prefer that lowering. This is especially important because broadcasts can
16906   // often fold with memory operands.
16907   auto DoBothBroadcast = [&] {
16908     int V1BroadcastIdx = -1, V2BroadcastIdx = -1;
16909     for (int M : Mask)
16910       if (M >= Size) {
16911         if (V2BroadcastIdx < 0)
16912           V2BroadcastIdx = M - Size;
16913         else if (M - Size != V2BroadcastIdx)
16914           return false;
16915       } else if (M >= 0) {
16916         if (V1BroadcastIdx < 0)
16917           V1BroadcastIdx = M;
16918         else if (M != V1BroadcastIdx)
16919           return false;
16920       }
16921     return true;
16922   };
16923   if (DoBothBroadcast())
16924     return lowerShuffleAsDecomposedShuffleMerge(DL, VT, V1, V2, Mask, Subtarget,
16925                                                 DAG);
16926 
16927   // If the inputs all stem from a single 128-bit lane of each input, then we
16928   // split them rather than blending because the split will decompose to
16929   // unusually few instructions.
16930   int LaneCount = VT.getSizeInBits() / 128;
16931   int LaneSize = Size / LaneCount;
16932   SmallBitVector LaneInputs[2];
16933   LaneInputs[0].resize(LaneCount, false);
16934   LaneInputs[1].resize(LaneCount, false);
16935   for (int i = 0; i < Size; ++i)
16936     if (Mask[i] >= 0)
16937       LaneInputs[Mask[i] / Size][(Mask[i] % Size) / LaneSize] = true;
16938   if (LaneInputs[0].count() <= 1 && LaneInputs[1].count() <= 1)
16939     return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG);
16940 
16941   // Otherwise, just fall back to decomposed shuffles and a blend/unpack. This
16942   // requires that the decomposed single-input shuffles don't end up here.
16943   return lowerShuffleAsDecomposedShuffleMerge(DL, VT, V1, V2, Mask, Subtarget,
16944                                               DAG);
16945 }
16946 
16947 // Lower as SHUFPD(VPERM2F128(V1, V2), VPERM2F128(V1, V2)).
16948 // TODO: Extend to support v8f32 (+ 512-bit shuffles).
lowerShuffleAsLanePermuteAndSHUFP(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,SelectionDAG & DAG)16949 static SDValue lowerShuffleAsLanePermuteAndSHUFP(const SDLoc &DL, MVT VT,
16950                                                  SDValue V1, SDValue V2,
16951                                                  ArrayRef<int> Mask,
16952                                                  SelectionDAG &DAG) {
16953   assert(VT == MVT::v4f64 && "Only for v4f64 shuffles");
16954 
16955   int LHSMask[4] = {-1, -1, -1, -1};
16956   int RHSMask[4] = {-1, -1, -1, -1};
16957   unsigned SHUFPMask = 0;
16958 
16959   // As SHUFPD uses a single LHS/RHS element per lane, we can always
16960   // perform the shuffle once the lanes have been shuffled in place.
16961   for (int i = 0; i != 4; ++i) {
16962     int M = Mask[i];
16963     if (M < 0)
16964       continue;
16965     int LaneBase = i & ~1;
16966     auto &LaneMask = (i & 1) ? RHSMask : LHSMask;
16967     LaneMask[LaneBase + (M & 1)] = M;
16968     SHUFPMask |= (M & 1) << i;
16969   }
16970 
16971   SDValue LHS = DAG.getVectorShuffle(VT, DL, V1, V2, LHSMask);
16972   SDValue RHS = DAG.getVectorShuffle(VT, DL, V1, V2, RHSMask);
16973   return DAG.getNode(X86ISD::SHUFP, DL, VT, LHS, RHS,
16974                      DAG.getTargetConstant(SHUFPMask, DL, MVT::i8));
16975 }
16976 
16977 /// Lower a vector shuffle crossing multiple 128-bit lanes as
16978 /// a lane permutation followed by a per-lane permutation.
16979 ///
16980 /// This is mainly for cases where we can have non-repeating permutes
16981 /// in each lane.
16982 ///
16983 /// TODO: This is very similar to lowerShuffleAsLanePermuteAndRepeatedMask,
16984 /// we should investigate merging them.
lowerShuffleAsLanePermuteAndPermute(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,SelectionDAG & DAG,const X86Subtarget & Subtarget)16985 static SDValue lowerShuffleAsLanePermuteAndPermute(
16986     const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
16987     SelectionDAG &DAG, const X86Subtarget &Subtarget) {
16988   int NumElts = VT.getVectorNumElements();
16989   int NumLanes = VT.getSizeInBits() / 128;
16990   int NumEltsPerLane = NumElts / NumLanes;
16991   bool CanUseSublanes = Subtarget.hasAVX2() && V2.isUndef();
16992 
16993   /// Attempts to find a sublane permute with the given size
16994   /// that gets all elements into their target lanes.
16995   ///
16996   /// If successful, fills CrossLaneMask and InLaneMask and returns true.
16997   /// If unsuccessful, returns false and may overwrite InLaneMask.
16998   auto getSublanePermute = [&](int NumSublanes) -> SDValue {
16999     int NumSublanesPerLane = NumSublanes / NumLanes;
17000     int NumEltsPerSublane = NumElts / NumSublanes;
17001 
17002     SmallVector<int, 16> CrossLaneMask;
17003     SmallVector<int, 16> InLaneMask(NumElts, SM_SentinelUndef);
17004     // CrossLaneMask but one entry == one sublane.
17005     SmallVector<int, 16> CrossLaneMaskLarge(NumSublanes, SM_SentinelUndef);
17006 
17007     for (int i = 0; i != NumElts; ++i) {
17008       int M = Mask[i];
17009       if (M < 0)
17010         continue;
17011 
17012       int SrcSublane = M / NumEltsPerSublane;
17013       int DstLane = i / NumEltsPerLane;
17014 
17015       // We only need to get the elements into the right lane, not sublane.
17016       // So search all sublanes that make up the destination lane.
17017       bool Found = false;
17018       int DstSubStart = DstLane * NumSublanesPerLane;
17019       int DstSubEnd = DstSubStart + NumSublanesPerLane;
17020       for (int DstSublane = DstSubStart; DstSublane < DstSubEnd; ++DstSublane) {
17021         if (!isUndefOrEqual(CrossLaneMaskLarge[DstSublane], SrcSublane))
17022           continue;
17023 
17024         Found = true;
17025         CrossLaneMaskLarge[DstSublane] = SrcSublane;
17026         int DstSublaneOffset = DstSublane * NumEltsPerSublane;
17027         InLaneMask[i] = DstSublaneOffset + M % NumEltsPerSublane;
17028         break;
17029       }
17030       if (!Found)
17031         return SDValue();
17032     }
17033 
17034     // Fill CrossLaneMask using CrossLaneMaskLarge.
17035     narrowShuffleMaskElts(NumEltsPerSublane, CrossLaneMaskLarge, CrossLaneMask);
17036 
17037     if (!CanUseSublanes) {
17038       // If we're only shuffling a single lowest lane and the rest are identity
17039       // then don't bother.
17040       // TODO - isShuffleMaskInputInPlace could be extended to something like
17041       // this.
17042       int NumIdentityLanes = 0;
17043       bool OnlyShuffleLowestLane = true;
17044       for (int i = 0; i != NumLanes; ++i) {
17045         int LaneOffset = i * NumEltsPerLane;
17046         if (isSequentialOrUndefInRange(InLaneMask, LaneOffset, NumEltsPerLane,
17047                                        i * NumEltsPerLane))
17048           NumIdentityLanes++;
17049         else if (CrossLaneMask[LaneOffset] != 0)
17050           OnlyShuffleLowestLane = false;
17051       }
17052       if (OnlyShuffleLowestLane && NumIdentityLanes == (NumLanes - 1))
17053         return SDValue();
17054     }
17055 
17056     // Avoid returning the same shuffle operation. For example,
17057     // t7: v16i16 = vector_shuffle<8,9,10,11,4,5,6,7,0,1,2,3,12,13,14,15> t5,
17058     //                             undef:v16i16
17059     if (CrossLaneMask == Mask || InLaneMask == Mask)
17060       return SDValue();
17061 
17062     SDValue CrossLane = DAG.getVectorShuffle(VT, DL, V1, V2, CrossLaneMask);
17063     return DAG.getVectorShuffle(VT, DL, CrossLane, DAG.getUNDEF(VT),
17064                                 InLaneMask);
17065   };
17066 
17067   // First attempt a solution with full lanes.
17068   if (SDValue V = getSublanePermute(/*NumSublanes=*/NumLanes))
17069     return V;
17070 
17071   // The rest of the solutions use sublanes.
17072   if (!CanUseSublanes)
17073     return SDValue();
17074 
17075   // Then attempt a solution with 64-bit sublanes (vpermq).
17076   if (SDValue V = getSublanePermute(/*NumSublanes=*/NumLanes * 2))
17077     return V;
17078 
17079   // If that doesn't work and we have fast variable cross-lane shuffle,
17080   // attempt 32-bit sublanes (vpermd).
17081   if (!Subtarget.hasFastVariableCrossLaneShuffle())
17082     return SDValue();
17083 
17084   return getSublanePermute(/*NumSublanes=*/NumLanes * 4);
17085 }
17086 
17087 /// Lower a vector shuffle crossing multiple 128-bit lanes by shuffling one
17088 /// source with a lane permutation.
17089 ///
17090 /// This lowering strategy results in four instructions in the worst case for a
17091 /// single-input cross lane shuffle which is lower than any other fully general
17092 /// cross-lane shuffle strategy I'm aware of. Special cases for each particular
17093 /// shuffle pattern should be handled prior to trying this lowering.
lowerShuffleAsLanePermuteAndShuffle(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,SelectionDAG & DAG,const X86Subtarget & Subtarget)17094 static SDValue lowerShuffleAsLanePermuteAndShuffle(
17095     const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
17096     SelectionDAG &DAG, const X86Subtarget &Subtarget) {
17097   // FIXME: This should probably be generalized for 512-bit vectors as well.
17098   assert(VT.is256BitVector() && "Only for 256-bit vector shuffles!");
17099   int Size = Mask.size();
17100   int LaneSize = Size / 2;
17101 
17102   // Fold to SHUFPD(VPERM2F128(V1, V2), VPERM2F128(V1, V2)).
17103   // Only do this if the elements aren't all from the lower lane,
17104   // otherwise we're (probably) better off doing a split.
17105   if (VT == MVT::v4f64 &&
17106       !all_of(Mask, [LaneSize](int M) { return M < LaneSize; }))
17107     return lowerShuffleAsLanePermuteAndSHUFP(DL, VT, V1, V2, Mask, DAG);
17108 
17109   // If there are only inputs from one 128-bit lane, splitting will in fact be
17110   // less expensive. The flags track whether the given lane contains an element
17111   // that crosses to another lane.
17112   bool AllLanes;
17113   if (!Subtarget.hasAVX2()) {
17114     bool LaneCrossing[2] = {false, false};
17115     for (int i = 0; i < Size; ++i)
17116       if (Mask[i] >= 0 && ((Mask[i] % Size) / LaneSize) != (i / LaneSize))
17117         LaneCrossing[(Mask[i] % Size) / LaneSize] = true;
17118     AllLanes = LaneCrossing[0] && LaneCrossing[1];
17119   } else {
17120     bool LaneUsed[2] = {false, false};
17121     for (int i = 0; i < Size; ++i)
17122       if (Mask[i] >= 0)
17123         LaneUsed[(Mask[i] % Size) / LaneSize] = true;
17124     AllLanes = LaneUsed[0] && LaneUsed[1];
17125   }
17126 
17127   // TODO - we could support shuffling V2 in the Flipped input.
17128   assert(V2.isUndef() &&
17129          "This last part of this routine only works on single input shuffles");
17130 
17131   SmallVector<int, 32> InLaneMask(Mask);
17132   for (int i = 0; i < Size; ++i) {
17133     int &M = InLaneMask[i];
17134     if (M < 0)
17135       continue;
17136     if (((M % Size) / LaneSize) != (i / LaneSize))
17137       M = (M % LaneSize) + ((i / LaneSize) * LaneSize) + Size;
17138   }
17139   assert(!is128BitLaneCrossingShuffleMask(VT, InLaneMask) &&
17140          "In-lane shuffle mask expected");
17141 
17142   // If we're not using both lanes in each lane and the inlane mask is not
17143   // repeating, then we're better off splitting.
17144   if (!AllLanes && !is128BitLaneRepeatedShuffleMask(VT, InLaneMask))
17145     return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG);
17146 
17147   // Flip the lanes, and shuffle the results which should now be in-lane.
17148   MVT PVT = VT.isFloatingPoint() ? MVT::v4f64 : MVT::v4i64;
17149   SDValue Flipped = DAG.getBitcast(PVT, V1);
17150   Flipped =
17151       DAG.getVectorShuffle(PVT, DL, Flipped, DAG.getUNDEF(PVT), {2, 3, 0, 1});
17152   Flipped = DAG.getBitcast(VT, Flipped);
17153   return DAG.getVectorShuffle(VT, DL, V1, Flipped, InLaneMask);
17154 }
17155 
17156 /// Handle lowering 2-lane 128-bit shuffles.
lowerV2X128Shuffle(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const APInt & Zeroable,const X86Subtarget & Subtarget,SelectionDAG & DAG)17157 static SDValue lowerV2X128Shuffle(const SDLoc &DL, MVT VT, SDValue V1,
17158                                   SDValue V2, ArrayRef<int> Mask,
17159                                   const APInt &Zeroable,
17160                                   const X86Subtarget &Subtarget,
17161                                   SelectionDAG &DAG) {
17162   if (V2.isUndef()) {
17163     // Attempt to match VBROADCAST*128 subvector broadcast load.
17164     bool SplatLo = isShuffleEquivalent(Mask, {0, 1, 0, 1}, V1);
17165     bool SplatHi = isShuffleEquivalent(Mask, {2, 3, 2, 3}, V1);
17166     if ((SplatLo || SplatHi) && !Subtarget.hasAVX512() && V1.hasOneUse() &&
17167         X86::mayFoldLoad(peekThroughOneUseBitcasts(V1), Subtarget)) {
17168       MVT MemVT = VT.getHalfNumVectorElementsVT();
17169       unsigned Ofs = SplatLo ? 0 : MemVT.getStoreSize();
17170       auto *Ld = cast<LoadSDNode>(peekThroughOneUseBitcasts(V1));
17171       if (SDValue BcstLd = getBROADCAST_LOAD(X86ISD::SUBV_BROADCAST_LOAD, DL,
17172                                              VT, MemVT, Ld, Ofs, DAG))
17173         return BcstLd;
17174     }
17175 
17176     // With AVX2, use VPERMQ/VPERMPD for unary shuffles to allow memory folding.
17177     if (Subtarget.hasAVX2())
17178       return SDValue();
17179   }
17180 
17181   bool V2IsZero = !V2.isUndef() && ISD::isBuildVectorAllZeros(V2.getNode());
17182 
17183   SmallVector<int, 4> WidenedMask;
17184   if (!canWidenShuffleElements(Mask, Zeroable, V2IsZero, WidenedMask))
17185     return SDValue();
17186 
17187   bool IsLowZero = (Zeroable & 0x3) == 0x3;
17188   bool IsHighZero = (Zeroable & 0xc) == 0xc;
17189 
17190   // Try to use an insert into a zero vector.
17191   if (WidenedMask[0] == 0 && IsHighZero) {
17192     MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 2);
17193     SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
17194                               DAG.getIntPtrConstant(0, DL));
17195     return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
17196                        getZeroVector(VT, Subtarget, DAG, DL), LoV,
17197                        DAG.getIntPtrConstant(0, DL));
17198   }
17199 
17200   // TODO: If minimizing size and one of the inputs is a zero vector and the
17201   // the zero vector has only one use, we could use a VPERM2X128 to save the
17202   // instruction bytes needed to explicitly generate the zero vector.
17203 
17204   // Blends are faster and handle all the non-lane-crossing cases.
17205   if (SDValue Blend = lowerShuffleAsBlend(DL, VT, V1, V2, Mask, Zeroable,
17206                                           Subtarget, DAG))
17207     return Blend;
17208 
17209   // If either input operand is a zero vector, use VPERM2X128 because its mask
17210   // allows us to replace the zero input with an implicit zero.
17211   if (!IsLowZero && !IsHighZero) {
17212     // Check for patterns which can be matched with a single insert of a 128-bit
17213     // subvector.
17214     bool OnlyUsesV1 = isShuffleEquivalent(Mask, {0, 1, 0, 1}, V1, V2);
17215     if (OnlyUsesV1 || isShuffleEquivalent(Mask, {0, 1, 4, 5}, V1, V2)) {
17216 
17217       // With AVX1, use vperm2f128 (below) to allow load folding. Otherwise,
17218       // this will likely become vinsertf128 which can't fold a 256-bit memop.
17219       if (!isa<LoadSDNode>(peekThroughBitcasts(V1))) {
17220         MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 2);
17221         SDValue SubVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT,
17222                                      OnlyUsesV1 ? V1 : V2,
17223                                      DAG.getIntPtrConstant(0, DL));
17224         return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, V1, SubVec,
17225                            DAG.getIntPtrConstant(2, DL));
17226       }
17227     }
17228 
17229     // Try to use SHUF128 if possible.
17230     if (Subtarget.hasVLX()) {
17231       if (WidenedMask[0] < 2 && WidenedMask[1] >= 2) {
17232         unsigned PermMask = ((WidenedMask[0] % 2) << 0) |
17233                             ((WidenedMask[1] % 2) << 1);
17234         return DAG.getNode(X86ISD::SHUF128, DL, VT, V1, V2,
17235                            DAG.getTargetConstant(PermMask, DL, MVT::i8));
17236       }
17237     }
17238   }
17239 
17240   // Otherwise form a 128-bit permutation. After accounting for undefs,
17241   // convert the 64-bit shuffle mask selection values into 128-bit
17242   // selection bits by dividing the indexes by 2 and shifting into positions
17243   // defined by a vperm2*128 instruction's immediate control byte.
17244 
17245   // The immediate permute control byte looks like this:
17246   //    [1:0] - select 128 bits from sources for low half of destination
17247   //    [2]   - ignore
17248   //    [3]   - zero low half of destination
17249   //    [5:4] - select 128 bits from sources for high half of destination
17250   //    [6]   - ignore
17251   //    [7]   - zero high half of destination
17252 
17253   assert((WidenedMask[0] >= 0 || IsLowZero) &&
17254          (WidenedMask[1] >= 0 || IsHighZero) && "Undef half?");
17255 
17256   unsigned PermMask = 0;
17257   PermMask |= IsLowZero  ? 0x08 : (WidenedMask[0] << 0);
17258   PermMask |= IsHighZero ? 0x80 : (WidenedMask[1] << 4);
17259 
17260   // Check the immediate mask and replace unused sources with undef.
17261   if ((PermMask & 0x0a) != 0x00 && (PermMask & 0xa0) != 0x00)
17262     V1 = DAG.getUNDEF(VT);
17263   if ((PermMask & 0x0a) != 0x02 && (PermMask & 0xa0) != 0x20)
17264     V2 = DAG.getUNDEF(VT);
17265 
17266   return DAG.getNode(X86ISD::VPERM2X128, DL, VT, V1, V2,
17267                      DAG.getTargetConstant(PermMask, DL, MVT::i8));
17268 }
17269 
17270 /// Lower a vector shuffle by first fixing the 128-bit lanes and then
17271 /// shuffling each lane.
17272 ///
17273 /// This attempts to create a repeated lane shuffle where each lane uses one
17274 /// or two of the lanes of the inputs. The lanes of the input vectors are
17275 /// shuffled in one or two independent shuffles to get the lanes into the
17276 /// position needed by the final shuffle.
lowerShuffleAsLanePermuteAndRepeatedMask(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const X86Subtarget & Subtarget,SelectionDAG & DAG)17277 static SDValue lowerShuffleAsLanePermuteAndRepeatedMask(
17278     const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
17279     const X86Subtarget &Subtarget, SelectionDAG &DAG) {
17280   assert(!V2.isUndef() && "This is only useful with multiple inputs.");
17281 
17282   if (is128BitLaneRepeatedShuffleMask(VT, Mask))
17283     return SDValue();
17284 
17285   int NumElts = Mask.size();
17286   int NumLanes = VT.getSizeInBits() / 128;
17287   int NumLaneElts = 128 / VT.getScalarSizeInBits();
17288   SmallVector<int, 16> RepeatMask(NumLaneElts, -1);
17289   SmallVector<std::array<int, 2>, 2> LaneSrcs(NumLanes, {{-1, -1}});
17290 
17291   // First pass will try to fill in the RepeatMask from lanes that need two
17292   // sources.
17293   for (int Lane = 0; Lane != NumLanes; ++Lane) {
17294     int Srcs[2] = {-1, -1};
17295     SmallVector<int, 16> InLaneMask(NumLaneElts, -1);
17296     for (int i = 0; i != NumLaneElts; ++i) {
17297       int M = Mask[(Lane * NumLaneElts) + i];
17298       if (M < 0)
17299         continue;
17300       // Determine which of the possible input lanes (NumLanes from each source)
17301       // this element comes from. Assign that as one of the sources for this
17302       // lane. We can assign up to 2 sources for this lane. If we run out
17303       // sources we can't do anything.
17304       int LaneSrc = M / NumLaneElts;
17305       int Src;
17306       if (Srcs[0] < 0 || Srcs[0] == LaneSrc)
17307         Src = 0;
17308       else if (Srcs[1] < 0 || Srcs[1] == LaneSrc)
17309         Src = 1;
17310       else
17311         return SDValue();
17312 
17313       Srcs[Src] = LaneSrc;
17314       InLaneMask[i] = (M % NumLaneElts) + Src * NumElts;
17315     }
17316 
17317     // If this lane has two sources, see if it fits with the repeat mask so far.
17318     if (Srcs[1] < 0)
17319       continue;
17320 
17321     LaneSrcs[Lane][0] = Srcs[0];
17322     LaneSrcs[Lane][1] = Srcs[1];
17323 
17324     auto MatchMasks = [](ArrayRef<int> M1, ArrayRef<int> M2) {
17325       assert(M1.size() == M2.size() && "Unexpected mask size");
17326       for (int i = 0, e = M1.size(); i != e; ++i)
17327         if (M1[i] >= 0 && M2[i] >= 0 && M1[i] != M2[i])
17328           return false;
17329       return true;
17330     };
17331 
17332     auto MergeMasks = [](ArrayRef<int> Mask, MutableArrayRef<int> MergedMask) {
17333       assert(Mask.size() == MergedMask.size() && "Unexpected mask size");
17334       for (int i = 0, e = MergedMask.size(); i != e; ++i) {
17335         int M = Mask[i];
17336         if (M < 0)
17337           continue;
17338         assert((MergedMask[i] < 0 || MergedMask[i] == M) &&
17339                "Unexpected mask element");
17340         MergedMask[i] = M;
17341       }
17342     };
17343 
17344     if (MatchMasks(InLaneMask, RepeatMask)) {
17345       // Merge this lane mask into the final repeat mask.
17346       MergeMasks(InLaneMask, RepeatMask);
17347       continue;
17348     }
17349 
17350     // Didn't find a match. Swap the operands and try again.
17351     std::swap(LaneSrcs[Lane][0], LaneSrcs[Lane][1]);
17352     ShuffleVectorSDNode::commuteMask(InLaneMask);
17353 
17354     if (MatchMasks(InLaneMask, RepeatMask)) {
17355       // Merge this lane mask into the final repeat mask.
17356       MergeMasks(InLaneMask, RepeatMask);
17357       continue;
17358     }
17359 
17360     // Couldn't find a match with the operands in either order.
17361     return SDValue();
17362   }
17363 
17364   // Now handle any lanes with only one source.
17365   for (int Lane = 0; Lane != NumLanes; ++Lane) {
17366     // If this lane has already been processed, skip it.
17367     if (LaneSrcs[Lane][0] >= 0)
17368       continue;
17369 
17370     for (int i = 0; i != NumLaneElts; ++i) {
17371       int M = Mask[(Lane * NumLaneElts) + i];
17372       if (M < 0)
17373         continue;
17374 
17375       // If RepeatMask isn't defined yet we can define it ourself.
17376       if (RepeatMask[i] < 0)
17377         RepeatMask[i] = M % NumLaneElts;
17378 
17379       if (RepeatMask[i] < NumElts) {
17380         if (RepeatMask[i] != M % NumLaneElts)
17381           return SDValue();
17382         LaneSrcs[Lane][0] = M / NumLaneElts;
17383       } else {
17384         if (RepeatMask[i] != ((M % NumLaneElts) + NumElts))
17385           return SDValue();
17386         LaneSrcs[Lane][1] = M / NumLaneElts;
17387       }
17388     }
17389 
17390     if (LaneSrcs[Lane][0] < 0 && LaneSrcs[Lane][1] < 0)
17391       return SDValue();
17392   }
17393 
17394   SmallVector<int, 16> NewMask(NumElts, -1);
17395   for (int Lane = 0; Lane != NumLanes; ++Lane) {
17396     int Src = LaneSrcs[Lane][0];
17397     for (int i = 0; i != NumLaneElts; ++i) {
17398       int M = -1;
17399       if (Src >= 0)
17400         M = Src * NumLaneElts + i;
17401       NewMask[Lane * NumLaneElts + i] = M;
17402     }
17403   }
17404   SDValue NewV1 = DAG.getVectorShuffle(VT, DL, V1, V2, NewMask);
17405   // Ensure we didn't get back the shuffle we started with.
17406   // FIXME: This is a hack to make up for some splat handling code in
17407   // getVectorShuffle.
17408   if (isa<ShuffleVectorSDNode>(NewV1) &&
17409       cast<ShuffleVectorSDNode>(NewV1)->getMask() == Mask)
17410     return SDValue();
17411 
17412   for (int Lane = 0; Lane != NumLanes; ++Lane) {
17413     int Src = LaneSrcs[Lane][1];
17414     for (int i = 0; i != NumLaneElts; ++i) {
17415       int M = -1;
17416       if (Src >= 0)
17417         M = Src * NumLaneElts + i;
17418       NewMask[Lane * NumLaneElts + i] = M;
17419     }
17420   }
17421   SDValue NewV2 = DAG.getVectorShuffle(VT, DL, V1, V2, NewMask);
17422   // Ensure we didn't get back the shuffle we started with.
17423   // FIXME: This is a hack to make up for some splat handling code in
17424   // getVectorShuffle.
17425   if (isa<ShuffleVectorSDNode>(NewV2) &&
17426       cast<ShuffleVectorSDNode>(NewV2)->getMask() == Mask)
17427     return SDValue();
17428 
17429   for (int i = 0; i != NumElts; ++i) {
17430     NewMask[i] = RepeatMask[i % NumLaneElts];
17431     if (NewMask[i] < 0)
17432       continue;
17433 
17434     NewMask[i] += (i / NumLaneElts) * NumLaneElts;
17435   }
17436   return DAG.getVectorShuffle(VT, DL, NewV1, NewV2, NewMask);
17437 }
17438 
17439 /// If the input shuffle mask results in a vector that is undefined in all upper
17440 /// or lower half elements and that mask accesses only 2 halves of the
17441 /// shuffle's operands, return true. A mask of half the width with mask indexes
17442 /// adjusted to access the extracted halves of the original shuffle operands is
17443 /// returned in HalfMask. HalfIdx1 and HalfIdx2 return whether the upper or
17444 /// lower half of each input operand is accessed.
17445 static bool
getHalfShuffleMask(ArrayRef<int> Mask,MutableArrayRef<int> HalfMask,int & HalfIdx1,int & HalfIdx2)17446 getHalfShuffleMask(ArrayRef<int> Mask, MutableArrayRef<int> HalfMask,
17447                    int &HalfIdx1, int &HalfIdx2) {
17448   assert((Mask.size() == HalfMask.size() * 2) &&
17449          "Expected input mask to be twice as long as output");
17450 
17451   // Exactly one half of the result must be undef to allow narrowing.
17452   bool UndefLower = isUndefLowerHalf(Mask);
17453   bool UndefUpper = isUndefUpperHalf(Mask);
17454   if (UndefLower == UndefUpper)
17455     return false;
17456 
17457   unsigned HalfNumElts = HalfMask.size();
17458   unsigned MaskIndexOffset = UndefLower ? HalfNumElts : 0;
17459   HalfIdx1 = -1;
17460   HalfIdx2 = -1;
17461   for (unsigned i = 0; i != HalfNumElts; ++i) {
17462     int M = Mask[i + MaskIndexOffset];
17463     if (M < 0) {
17464       HalfMask[i] = M;
17465       continue;
17466     }
17467 
17468     // Determine which of the 4 half vectors this element is from.
17469     // i.e. 0 = Lower V1, 1 = Upper V1, 2 = Lower V2, 3 = Upper V2.
17470     int HalfIdx = M / HalfNumElts;
17471 
17472     // Determine the element index into its half vector source.
17473     int HalfElt = M % HalfNumElts;
17474 
17475     // We can shuffle with up to 2 half vectors, set the new 'half'
17476     // shuffle mask accordingly.
17477     if (HalfIdx1 < 0 || HalfIdx1 == HalfIdx) {
17478       HalfMask[i] = HalfElt;
17479       HalfIdx1 = HalfIdx;
17480       continue;
17481     }
17482     if (HalfIdx2 < 0 || HalfIdx2 == HalfIdx) {
17483       HalfMask[i] = HalfElt + HalfNumElts;
17484       HalfIdx2 = HalfIdx;
17485       continue;
17486     }
17487 
17488     // Too many half vectors referenced.
17489     return false;
17490   }
17491 
17492   return true;
17493 }
17494 
17495 /// Given the output values from getHalfShuffleMask(), create a half width
17496 /// shuffle of extracted vectors followed by an insert back to full width.
getShuffleHalfVectors(const SDLoc & DL,SDValue V1,SDValue V2,ArrayRef<int> HalfMask,int HalfIdx1,int HalfIdx2,bool UndefLower,SelectionDAG & DAG,bool UseConcat=false)17497 static SDValue getShuffleHalfVectors(const SDLoc &DL, SDValue V1, SDValue V2,
17498                                      ArrayRef<int> HalfMask, int HalfIdx1,
17499                                      int HalfIdx2, bool UndefLower,
17500                                      SelectionDAG &DAG, bool UseConcat = false) {
17501   assert(V1.getValueType() == V2.getValueType() && "Different sized vectors?");
17502   assert(V1.getValueType().isSimple() && "Expecting only simple types");
17503 
17504   MVT VT = V1.getSimpleValueType();
17505   MVT HalfVT = VT.getHalfNumVectorElementsVT();
17506   unsigned HalfNumElts = HalfVT.getVectorNumElements();
17507 
17508   auto getHalfVector = [&](int HalfIdx) {
17509     if (HalfIdx < 0)
17510       return DAG.getUNDEF(HalfVT);
17511     SDValue V = (HalfIdx < 2 ? V1 : V2);
17512     HalfIdx = (HalfIdx % 2) * HalfNumElts;
17513     return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V,
17514                        DAG.getIntPtrConstant(HalfIdx, DL));
17515   };
17516 
17517   // ins undef, (shuf (ext V1, HalfIdx1), (ext V2, HalfIdx2), HalfMask), Offset
17518   SDValue Half1 = getHalfVector(HalfIdx1);
17519   SDValue Half2 = getHalfVector(HalfIdx2);
17520   SDValue V = DAG.getVectorShuffle(HalfVT, DL, Half1, Half2, HalfMask);
17521   if (UseConcat) {
17522     SDValue Op0 = V;
17523     SDValue Op1 = DAG.getUNDEF(HalfVT);
17524     if (UndefLower)
17525       std::swap(Op0, Op1);
17526     return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Op0, Op1);
17527   }
17528 
17529   unsigned Offset = UndefLower ? HalfNumElts : 0;
17530   return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V,
17531                      DAG.getIntPtrConstant(Offset, DL));
17532 }
17533 
17534 /// Lower shuffles where an entire half of a 256 or 512-bit vector is UNDEF.
17535 /// This allows for fast cases such as subvector extraction/insertion
17536 /// or shuffling smaller vector types which can lower more efficiently.
lowerShuffleWithUndefHalf(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const X86Subtarget & Subtarget,SelectionDAG & DAG)17537 static SDValue lowerShuffleWithUndefHalf(const SDLoc &DL, MVT VT, SDValue V1,
17538                                          SDValue V2, ArrayRef<int> Mask,
17539                                          const X86Subtarget &Subtarget,
17540                                          SelectionDAG &DAG) {
17541   assert((VT.is256BitVector() || VT.is512BitVector()) &&
17542          "Expected 256-bit or 512-bit vector");
17543 
17544   bool UndefLower = isUndefLowerHalf(Mask);
17545   if (!UndefLower && !isUndefUpperHalf(Mask))
17546     return SDValue();
17547 
17548   assert((!UndefLower || !isUndefUpperHalf(Mask)) &&
17549          "Completely undef shuffle mask should have been simplified already");
17550 
17551   // Upper half is undef and lower half is whole upper subvector.
17552   // e.g. vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u>
17553   MVT HalfVT = VT.getHalfNumVectorElementsVT();
17554   unsigned HalfNumElts = HalfVT.getVectorNumElements();
17555   if (!UndefLower &&
17556       isSequentialOrUndefInRange(Mask, 0, HalfNumElts, HalfNumElts)) {
17557     SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V1,
17558                              DAG.getIntPtrConstant(HalfNumElts, DL));
17559     return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), Hi,
17560                        DAG.getIntPtrConstant(0, DL));
17561   }
17562 
17563   // Lower half is undef and upper half is whole lower subvector.
17564   // e.g. vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1>
17565   if (UndefLower &&
17566       isSequentialOrUndefInRange(Mask, HalfNumElts, HalfNumElts, 0)) {
17567     SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V1,
17568                              DAG.getIntPtrConstant(0, DL));
17569     return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), Hi,
17570                        DAG.getIntPtrConstant(HalfNumElts, DL));
17571   }
17572 
17573   int HalfIdx1, HalfIdx2;
17574   SmallVector<int, 8> HalfMask(HalfNumElts);
17575   if (!getHalfShuffleMask(Mask, HalfMask, HalfIdx1, HalfIdx2))
17576     return SDValue();
17577 
17578   assert(HalfMask.size() == HalfNumElts && "Unexpected shuffle mask length");
17579 
17580   // Only shuffle the halves of the inputs when useful.
17581   unsigned NumLowerHalves =
17582       (HalfIdx1 == 0 || HalfIdx1 == 2) + (HalfIdx2 == 0 || HalfIdx2 == 2);
17583   unsigned NumUpperHalves =
17584       (HalfIdx1 == 1 || HalfIdx1 == 3) + (HalfIdx2 == 1 || HalfIdx2 == 3);
17585   assert(NumLowerHalves + NumUpperHalves <= 2 && "Only 1 or 2 halves allowed");
17586 
17587   // Determine the larger pattern of undef/halves, then decide if it's worth
17588   // splitting the shuffle based on subtarget capabilities and types.
17589   unsigned EltWidth = VT.getVectorElementType().getSizeInBits();
17590   if (!UndefLower) {
17591     // XXXXuuuu: no insert is needed.
17592     // Always extract lowers when setting lower - these are all free subreg ops.
17593     if (NumUpperHalves == 0)
17594       return getShuffleHalfVectors(DL, V1, V2, HalfMask, HalfIdx1, HalfIdx2,
17595                                    UndefLower, DAG);
17596 
17597     if (NumUpperHalves == 1) {
17598       // AVX2 has efficient 32/64-bit element cross-lane shuffles.
17599       if (Subtarget.hasAVX2()) {
17600         // extract128 + vunpckhps/vshufps, is better than vblend + vpermps.
17601         if (EltWidth == 32 && NumLowerHalves && HalfVT.is128BitVector() &&
17602             !is128BitUnpackShuffleMask(HalfMask, DAG) &&
17603             (!isSingleSHUFPSMask(HalfMask) ||
17604              Subtarget.hasFastVariableCrossLaneShuffle()))
17605           return SDValue();
17606         // If this is a unary shuffle (assume that the 2nd operand is
17607         // canonicalized to undef), then we can use vpermpd. Otherwise, we
17608         // are better off extracting the upper half of 1 operand and using a
17609         // narrow shuffle.
17610         if (EltWidth == 64 && V2.isUndef())
17611           return SDValue();
17612       }
17613       // AVX512 has efficient cross-lane shuffles for all legal 512-bit types.
17614       if (Subtarget.hasAVX512() && VT.is512BitVector())
17615         return SDValue();
17616       // Extract + narrow shuffle is better than the wide alternative.
17617       return getShuffleHalfVectors(DL, V1, V2, HalfMask, HalfIdx1, HalfIdx2,
17618                                    UndefLower, DAG);
17619     }
17620 
17621     // Don't extract both uppers, instead shuffle and then extract.
17622     assert(NumUpperHalves == 2 && "Half vector count went wrong");
17623     return SDValue();
17624   }
17625 
17626   // UndefLower - uuuuXXXX: an insert to high half is required if we split this.
17627   if (NumUpperHalves == 0) {
17628     // AVX2 has efficient 64-bit element cross-lane shuffles.
17629     // TODO: Refine to account for unary shuffle, splat, and other masks?
17630     if (Subtarget.hasAVX2() && EltWidth == 64)
17631       return SDValue();
17632     // AVX512 has efficient cross-lane shuffles for all legal 512-bit types.
17633     if (Subtarget.hasAVX512() && VT.is512BitVector())
17634       return SDValue();
17635     // Narrow shuffle + insert is better than the wide alternative.
17636     return getShuffleHalfVectors(DL, V1, V2, HalfMask, HalfIdx1, HalfIdx2,
17637                                  UndefLower, DAG);
17638   }
17639 
17640   // NumUpperHalves != 0: don't bother with extract, shuffle, and then insert.
17641   return SDValue();
17642 }
17643 
17644 /// Handle case where shuffle sources are coming from the same 128-bit lane and
17645 /// every lane can be represented as the same repeating mask - allowing us to
17646 /// shuffle the sources with the repeating shuffle and then permute the result
17647 /// to the destination lanes.
lowerShuffleAsRepeatedMaskAndLanePermute(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const X86Subtarget & Subtarget,SelectionDAG & DAG)17648 static SDValue lowerShuffleAsRepeatedMaskAndLanePermute(
17649     const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
17650     const X86Subtarget &Subtarget, SelectionDAG &DAG) {
17651   int NumElts = VT.getVectorNumElements();
17652   int NumLanes = VT.getSizeInBits() / 128;
17653   int NumLaneElts = NumElts / NumLanes;
17654 
17655   // On AVX2 we may be able to just shuffle the lowest elements and then
17656   // broadcast the result.
17657   if (Subtarget.hasAVX2()) {
17658     for (unsigned BroadcastSize : {16, 32, 64}) {
17659       if (BroadcastSize <= VT.getScalarSizeInBits())
17660         continue;
17661       int NumBroadcastElts = BroadcastSize / VT.getScalarSizeInBits();
17662 
17663       // Attempt to match a repeating pattern every NumBroadcastElts,
17664       // accounting for UNDEFs but only references the lowest 128-bit
17665       // lane of the inputs.
17666       auto FindRepeatingBroadcastMask = [&](SmallVectorImpl<int> &RepeatMask) {
17667         for (int i = 0; i != NumElts; i += NumBroadcastElts)
17668           for (int j = 0; j != NumBroadcastElts; ++j) {
17669             int M = Mask[i + j];
17670             if (M < 0)
17671               continue;
17672             int &R = RepeatMask[j];
17673             if (0 != ((M % NumElts) / NumLaneElts))
17674               return false;
17675             if (0 <= R && R != M)
17676               return false;
17677             R = M;
17678           }
17679         return true;
17680       };
17681 
17682       SmallVector<int, 8> RepeatMask((unsigned)NumElts, -1);
17683       if (!FindRepeatingBroadcastMask(RepeatMask))
17684         continue;
17685 
17686       // Shuffle the (lowest) repeated elements in place for broadcast.
17687       SDValue RepeatShuf = DAG.getVectorShuffle(VT, DL, V1, V2, RepeatMask);
17688 
17689       // Shuffle the actual broadcast.
17690       SmallVector<int, 8> BroadcastMask((unsigned)NumElts, -1);
17691       for (int i = 0; i != NumElts; i += NumBroadcastElts)
17692         for (int j = 0; j != NumBroadcastElts; ++j)
17693           BroadcastMask[i + j] = j;
17694       return DAG.getVectorShuffle(VT, DL, RepeatShuf, DAG.getUNDEF(VT),
17695                                   BroadcastMask);
17696     }
17697   }
17698 
17699   // Bail if the shuffle mask doesn't cross 128-bit lanes.
17700   if (!is128BitLaneCrossingShuffleMask(VT, Mask))
17701     return SDValue();
17702 
17703   // Bail if we already have a repeated lane shuffle mask.
17704   if (is128BitLaneRepeatedShuffleMask(VT, Mask))
17705     return SDValue();
17706 
17707   // Helper to look for repeated mask in each split sublane, and that those
17708   // sublanes can then be permuted into place.
17709   auto ShuffleSubLanes = [&](int SubLaneScale) {
17710     int NumSubLanes = NumLanes * SubLaneScale;
17711     int NumSubLaneElts = NumLaneElts / SubLaneScale;
17712 
17713     // Check that all the sources are coming from the same lane and see if we
17714     // can form a repeating shuffle mask (local to each sub-lane). At the same
17715     // time, determine the source sub-lane for each destination sub-lane.
17716     int TopSrcSubLane = -1;
17717     SmallVector<int, 8> Dst2SrcSubLanes((unsigned)NumSubLanes, -1);
17718     SmallVector<SmallVector<int, 8>> RepeatedSubLaneMasks(
17719         SubLaneScale,
17720         SmallVector<int, 8>((unsigned)NumSubLaneElts, SM_SentinelUndef));
17721 
17722     for (int DstSubLane = 0; DstSubLane != NumSubLanes; ++DstSubLane) {
17723       // Extract the sub-lane mask, check that it all comes from the same lane
17724       // and normalize the mask entries to come from the first lane.
17725       int SrcLane = -1;
17726       SmallVector<int, 8> SubLaneMask((unsigned)NumSubLaneElts, -1);
17727       for (int Elt = 0; Elt != NumSubLaneElts; ++Elt) {
17728         int M = Mask[(DstSubLane * NumSubLaneElts) + Elt];
17729         if (M < 0)
17730           continue;
17731         int Lane = (M % NumElts) / NumLaneElts;
17732         if ((0 <= SrcLane) && (SrcLane != Lane))
17733           return SDValue();
17734         SrcLane = Lane;
17735         int LocalM = (M % NumLaneElts) + (M < NumElts ? 0 : NumElts);
17736         SubLaneMask[Elt] = LocalM;
17737       }
17738 
17739       // Whole sub-lane is UNDEF.
17740       if (SrcLane < 0)
17741         continue;
17742 
17743       // Attempt to match against the candidate repeated sub-lane masks.
17744       for (int SubLane = 0; SubLane != SubLaneScale; ++SubLane) {
17745         auto MatchMasks = [NumSubLaneElts](ArrayRef<int> M1, ArrayRef<int> M2) {
17746           for (int i = 0; i != NumSubLaneElts; ++i) {
17747             if (M1[i] < 0 || M2[i] < 0)
17748               continue;
17749             if (M1[i] != M2[i])
17750               return false;
17751           }
17752           return true;
17753         };
17754 
17755         auto &RepeatedSubLaneMask = RepeatedSubLaneMasks[SubLane];
17756         if (!MatchMasks(SubLaneMask, RepeatedSubLaneMask))
17757           continue;
17758 
17759         // Merge the sub-lane mask into the matching repeated sub-lane mask.
17760         for (int i = 0; i != NumSubLaneElts; ++i) {
17761           int M = SubLaneMask[i];
17762           if (M < 0)
17763             continue;
17764           assert((RepeatedSubLaneMask[i] < 0 || RepeatedSubLaneMask[i] == M) &&
17765                  "Unexpected mask element");
17766           RepeatedSubLaneMask[i] = M;
17767         }
17768 
17769         // Track the top most source sub-lane - by setting the remaining to
17770         // UNDEF we can greatly simplify shuffle matching.
17771         int SrcSubLane = (SrcLane * SubLaneScale) + SubLane;
17772         TopSrcSubLane = std::max(TopSrcSubLane, SrcSubLane);
17773         Dst2SrcSubLanes[DstSubLane] = SrcSubLane;
17774         break;
17775       }
17776 
17777       // Bail if we failed to find a matching repeated sub-lane mask.
17778       if (Dst2SrcSubLanes[DstSubLane] < 0)
17779         return SDValue();
17780     }
17781     assert(0 <= TopSrcSubLane && TopSrcSubLane < NumSubLanes &&
17782            "Unexpected source lane");
17783 
17784     // Create a repeating shuffle mask for the entire vector.
17785     SmallVector<int, 8> RepeatedMask((unsigned)NumElts, -1);
17786     for (int SubLane = 0; SubLane <= TopSrcSubLane; ++SubLane) {
17787       int Lane = SubLane / SubLaneScale;
17788       auto &RepeatedSubLaneMask = RepeatedSubLaneMasks[SubLane % SubLaneScale];
17789       for (int Elt = 0; Elt != NumSubLaneElts; ++Elt) {
17790         int M = RepeatedSubLaneMask[Elt];
17791         if (M < 0)
17792           continue;
17793         int Idx = (SubLane * NumSubLaneElts) + Elt;
17794         RepeatedMask[Idx] = M + (Lane * NumLaneElts);
17795       }
17796     }
17797 
17798     // Shuffle each source sub-lane to its destination.
17799     SmallVector<int, 8> SubLaneMask((unsigned)NumElts, -1);
17800     for (int i = 0; i != NumElts; i += NumSubLaneElts) {
17801       int SrcSubLane = Dst2SrcSubLanes[i / NumSubLaneElts];
17802       if (SrcSubLane < 0)
17803         continue;
17804       for (int j = 0; j != NumSubLaneElts; ++j)
17805         SubLaneMask[i + j] = j + (SrcSubLane * NumSubLaneElts);
17806     }
17807 
17808     // Avoid returning the same shuffle operation.
17809     // v8i32 = vector_shuffle<0,1,4,5,2,3,6,7> t5, undef:v8i32
17810     if (RepeatedMask == Mask || SubLaneMask == Mask)
17811       return SDValue();
17812 
17813     SDValue RepeatedShuffle =
17814         DAG.getVectorShuffle(VT, DL, V1, V2, RepeatedMask);
17815 
17816     return DAG.getVectorShuffle(VT, DL, RepeatedShuffle, DAG.getUNDEF(VT),
17817                                 SubLaneMask);
17818   };
17819 
17820   // On AVX2 targets we can permute 256-bit vectors as 64-bit sub-lanes
17821   // (with PERMQ/PERMPD). On AVX2/AVX512BW targets, permuting 32-bit sub-lanes,
17822   // even with a variable shuffle, can be worth it for v32i8/v64i8 vectors.
17823   // Otherwise we can only permute whole 128-bit lanes.
17824   int MinSubLaneScale = 1, MaxSubLaneScale = 1;
17825   if (Subtarget.hasAVX2() && VT.is256BitVector()) {
17826     bool OnlyLowestElts = isUndefOrInRange(Mask, 0, NumLaneElts);
17827     MinSubLaneScale = 2;
17828     MaxSubLaneScale =
17829         (!OnlyLowestElts && V2.isUndef() && VT == MVT::v32i8) ? 4 : 2;
17830   }
17831   if (Subtarget.hasBWI() && VT == MVT::v64i8)
17832     MinSubLaneScale = MaxSubLaneScale = 4;
17833 
17834   for (int Scale = MinSubLaneScale; Scale <= MaxSubLaneScale; Scale *= 2)
17835     if (SDValue Shuffle = ShuffleSubLanes(Scale))
17836       return Shuffle;
17837 
17838   return SDValue();
17839 }
17840 
matchShuffleWithSHUFPD(MVT VT,SDValue & V1,SDValue & V2,bool & ForceV1Zero,bool & ForceV2Zero,unsigned & ShuffleImm,ArrayRef<int> Mask,const APInt & Zeroable)17841 static bool matchShuffleWithSHUFPD(MVT VT, SDValue &V1, SDValue &V2,
17842                                    bool &ForceV1Zero, bool &ForceV2Zero,
17843                                    unsigned &ShuffleImm, ArrayRef<int> Mask,
17844                                    const APInt &Zeroable) {
17845   int NumElts = VT.getVectorNumElements();
17846   assert(VT.getScalarSizeInBits() == 64 &&
17847          (NumElts == 2 || NumElts == 4 || NumElts == 8) &&
17848          "Unexpected data type for VSHUFPD");
17849   assert(isUndefOrZeroOrInRange(Mask, 0, 2 * NumElts) &&
17850          "Illegal shuffle mask");
17851 
17852   bool ZeroLane[2] = { true, true };
17853   for (int i = 0; i < NumElts; ++i)
17854     ZeroLane[i & 1] &= Zeroable[i];
17855 
17856   // Mask for V8F64: 0/1,  8/9,  2/3,  10/11, 4/5, ..
17857   // Mask for V4F64; 0/1,  4/5,  2/3,  6/7..
17858   ShuffleImm = 0;
17859   bool ShufpdMask = true;
17860   bool CommutableMask = true;
17861   for (int i = 0; i < NumElts; ++i) {
17862     if (Mask[i] == SM_SentinelUndef || ZeroLane[i & 1])
17863       continue;
17864     if (Mask[i] < 0)
17865       return false;
17866     int Val = (i & 6) + NumElts * (i & 1);
17867     int CommutVal = (i & 0xe) + NumElts * ((i & 1) ^ 1);
17868     if (Mask[i] < Val || Mask[i] > Val + 1)
17869       ShufpdMask = false;
17870     if (Mask[i] < CommutVal || Mask[i] > CommutVal + 1)
17871       CommutableMask = false;
17872     ShuffleImm |= (Mask[i] % 2) << i;
17873   }
17874 
17875   if (!ShufpdMask && !CommutableMask)
17876     return false;
17877 
17878   if (!ShufpdMask && CommutableMask)
17879     std::swap(V1, V2);
17880 
17881   ForceV1Zero = ZeroLane[0];
17882   ForceV2Zero = ZeroLane[1];
17883   return true;
17884 }
17885 
lowerShuffleWithSHUFPD(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const APInt & Zeroable,const X86Subtarget & Subtarget,SelectionDAG & DAG)17886 static SDValue lowerShuffleWithSHUFPD(const SDLoc &DL, MVT VT, SDValue V1,
17887                                       SDValue V2, ArrayRef<int> Mask,
17888                                       const APInt &Zeroable,
17889                                       const X86Subtarget &Subtarget,
17890                                       SelectionDAG &DAG) {
17891   assert((VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v8f64) &&
17892          "Unexpected data type for VSHUFPD");
17893 
17894   unsigned Immediate = 0;
17895   bool ForceV1Zero = false, ForceV2Zero = false;
17896   if (!matchShuffleWithSHUFPD(VT, V1, V2, ForceV1Zero, ForceV2Zero, Immediate,
17897                               Mask, Zeroable))
17898     return SDValue();
17899 
17900   // Create a REAL zero vector - ISD::isBuildVectorAllZeros allows UNDEFs.
17901   if (ForceV1Zero)
17902     V1 = getZeroVector(VT, Subtarget, DAG, DL);
17903   if (ForceV2Zero)
17904     V2 = getZeroVector(VT, Subtarget, DAG, DL);
17905 
17906   return DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2,
17907                      DAG.getTargetConstant(Immediate, DL, MVT::i8));
17908 }
17909 
17910 // Look for {0, 8, 16, 24, 32, 40, 48, 56 } in the first 8 elements. Followed
17911 // by zeroable elements in the remaining 24 elements. Turn this into two
17912 // vmovqb instructions shuffled together.
lowerShuffleAsVTRUNCAndUnpack(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const APInt & Zeroable,SelectionDAG & DAG)17913 static SDValue lowerShuffleAsVTRUNCAndUnpack(const SDLoc &DL, MVT VT,
17914                                              SDValue V1, SDValue V2,
17915                                              ArrayRef<int> Mask,
17916                                              const APInt &Zeroable,
17917                                              SelectionDAG &DAG) {
17918   assert(VT == MVT::v32i8 && "Unexpected type!");
17919 
17920   // The first 8 indices should be every 8th element.
17921   if (!isSequentialOrUndefInRange(Mask, 0, 8, 0, 8))
17922     return SDValue();
17923 
17924   // Remaining elements need to be zeroable.
17925   if (Zeroable.countLeadingOnes() < (Mask.size() - 8))
17926     return SDValue();
17927 
17928   V1 = DAG.getBitcast(MVT::v4i64, V1);
17929   V2 = DAG.getBitcast(MVT::v4i64, V2);
17930 
17931   V1 = DAG.getNode(X86ISD::VTRUNC, DL, MVT::v16i8, V1);
17932   V2 = DAG.getNode(X86ISD::VTRUNC, DL, MVT::v16i8, V2);
17933 
17934   // The VTRUNCs will put 0s in the upper 12 bytes. Use them to put zeroes in
17935   // the upper bits of the result using an unpckldq.
17936   SDValue Unpack = DAG.getVectorShuffle(MVT::v16i8, DL, V1, V2,
17937                                         { 0, 1, 2, 3, 16, 17, 18, 19,
17938                                           4, 5, 6, 7, 20, 21, 22, 23 });
17939   // Insert the unpckldq into a zero vector to widen to v32i8.
17940   return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, MVT::v32i8,
17941                      DAG.getConstant(0, DL, MVT::v32i8), Unpack,
17942                      DAG.getIntPtrConstant(0, DL));
17943 }
17944 
17945 // a = shuffle v1, v2, mask1    ; interleaving lower lanes of v1 and v2
17946 // b = shuffle v1, v2, mask2    ; interleaving higher lanes of v1 and v2
17947 //     =>
17948 // ul = unpckl v1, v2
17949 // uh = unpckh v1, v2
17950 // a = vperm ul, uh
17951 // b = vperm ul, uh
17952 //
17953 // Pattern-match interleave(256b v1, 256b v2) -> 512b v3 and lower it into unpck
17954 // and permute. We cannot directly match v3 because it is split into two
17955 // 256-bit vectors in earlier isel stages. Therefore, this function matches a
17956 // pair of 256-bit shuffles and makes sure the masks are consecutive.
17957 //
17958 // Once unpck and permute nodes are created, the permute corresponding to this
17959 // shuffle is returned, while the other permute replaces the other half of the
17960 // shuffle in the selection dag.
lowerShufflePairAsUNPCKAndPermute(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,SelectionDAG & DAG)17961 static SDValue lowerShufflePairAsUNPCKAndPermute(const SDLoc &DL, MVT VT,
17962                                                  SDValue V1, SDValue V2,
17963                                                  ArrayRef<int> Mask,
17964                                                  SelectionDAG &DAG) {
17965   if (VT != MVT::v8f32 && VT != MVT::v8i32 && VT != MVT::v16i16 &&
17966       VT != MVT::v32i8)
17967     return SDValue();
17968   // <B0, B1, B0+1, B1+1, ..., >
17969   auto IsInterleavingPattern = [&](ArrayRef<int> Mask, unsigned Begin0,
17970                                    unsigned Begin1) {
17971     size_t Size = Mask.size();
17972     assert(Size % 2 == 0 && "Expected even mask size");
17973     for (unsigned I = 0; I < Size; I += 2) {
17974       if (Mask[I] != (int)(Begin0 + I / 2) ||
17975           Mask[I + 1] != (int)(Begin1 + I / 2))
17976         return false;
17977     }
17978     return true;
17979   };
17980   // Check which half is this shuffle node
17981   int NumElts = VT.getVectorNumElements();
17982   size_t FirstQtr = NumElts / 2;
17983   size_t ThirdQtr = NumElts + NumElts / 2;
17984   bool IsFirstHalf = IsInterleavingPattern(Mask, 0, NumElts);
17985   bool IsSecondHalf = IsInterleavingPattern(Mask, FirstQtr, ThirdQtr);
17986   if (!IsFirstHalf && !IsSecondHalf)
17987     return SDValue();
17988 
17989   // Find the intersection between shuffle users of V1 and V2.
17990   SmallVector<SDNode *, 2> Shuffles;
17991   for (SDNode *User : V1->uses())
17992     if (User->getOpcode() == ISD::VECTOR_SHUFFLE && User->getOperand(0) == V1 &&
17993         User->getOperand(1) == V2)
17994       Shuffles.push_back(User);
17995   // Limit user size to two for now.
17996   if (Shuffles.size() != 2)
17997     return SDValue();
17998   // Find out which half of the 512-bit shuffles is each smaller shuffle
17999   auto *SVN1 = cast<ShuffleVectorSDNode>(Shuffles[0]);
18000   auto *SVN2 = cast<ShuffleVectorSDNode>(Shuffles[1]);
18001   SDNode *FirstHalf;
18002   SDNode *SecondHalf;
18003   if (IsInterleavingPattern(SVN1->getMask(), 0, NumElts) &&
18004       IsInterleavingPattern(SVN2->getMask(), FirstQtr, ThirdQtr)) {
18005     FirstHalf = Shuffles[0];
18006     SecondHalf = Shuffles[1];
18007   } else if (IsInterleavingPattern(SVN1->getMask(), FirstQtr, ThirdQtr) &&
18008              IsInterleavingPattern(SVN2->getMask(), 0, NumElts)) {
18009     FirstHalf = Shuffles[1];
18010     SecondHalf = Shuffles[0];
18011   } else {
18012     return SDValue();
18013   }
18014   // Lower into unpck and perm. Return the perm of this shuffle and replace
18015   // the other.
18016   SDValue Unpckl = DAG.getNode(X86ISD::UNPCKL, DL, VT, V1, V2);
18017   SDValue Unpckh = DAG.getNode(X86ISD::UNPCKH, DL, VT, V1, V2);
18018   SDValue Perm1 = DAG.getNode(X86ISD::VPERM2X128, DL, VT, Unpckl, Unpckh,
18019                               DAG.getTargetConstant(0x20, DL, MVT::i8));
18020   SDValue Perm2 = DAG.getNode(X86ISD::VPERM2X128, DL, VT, Unpckl, Unpckh,
18021                               DAG.getTargetConstant(0x31, DL, MVT::i8));
18022   if (IsFirstHalf) {
18023     DAG.ReplaceAllUsesWith(SecondHalf, &Perm2);
18024     return Perm1;
18025   }
18026   DAG.ReplaceAllUsesWith(FirstHalf, &Perm1);
18027   return Perm2;
18028 }
18029 
18030 /// Handle lowering of 4-lane 64-bit floating point shuffles.
18031 ///
18032 /// Also ends up handling lowering of 4-lane 64-bit integer shuffles when AVX2
18033 /// isn't available.
lowerV4F64Shuffle(const SDLoc & DL,ArrayRef<int> Mask,const APInt & Zeroable,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)18034 static SDValue lowerV4F64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
18035                                  const APInt &Zeroable, SDValue V1, SDValue V2,
18036                                  const X86Subtarget &Subtarget,
18037                                  SelectionDAG &DAG) {
18038   assert(V1.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
18039   assert(V2.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
18040   assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
18041 
18042   if (SDValue V = lowerV2X128Shuffle(DL, MVT::v4f64, V1, V2, Mask, Zeroable,
18043                                      Subtarget, DAG))
18044     return V;
18045 
18046   if (V2.isUndef()) {
18047     // Check for being able to broadcast a single element.
18048     if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4f64, V1, V2,
18049                                                     Mask, Subtarget, DAG))
18050       return Broadcast;
18051 
18052     // Use low duplicate instructions for masks that match their pattern.
18053     if (isShuffleEquivalent(Mask, {0, 0, 2, 2}, V1, V2))
18054       return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v4f64, V1);
18055 
18056     if (!is128BitLaneCrossingShuffleMask(MVT::v4f64, Mask)) {
18057       // Non-half-crossing single input shuffles can be lowered with an
18058       // interleaved permutation.
18059       unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
18060                               ((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3);
18061       return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f64, V1,
18062                          DAG.getTargetConstant(VPERMILPMask, DL, MVT::i8));
18063     }
18064 
18065     // With AVX2 we have direct support for this permutation.
18066     if (Subtarget.hasAVX2())
18067       return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4f64, V1,
18068                          getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
18069 
18070     // Try to create an in-lane repeating shuffle mask and then shuffle the
18071     // results into the target lanes.
18072     if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
18073             DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
18074       return V;
18075 
18076     // Try to permute the lanes and then use a per-lane permute.
18077     if (SDValue V = lowerShuffleAsLanePermuteAndPermute(DL, MVT::v4f64, V1, V2,
18078                                                         Mask, DAG, Subtarget))
18079       return V;
18080 
18081     // Otherwise, fall back.
18082     return lowerShuffleAsLanePermuteAndShuffle(DL, MVT::v4f64, V1, V2, Mask,
18083                                                DAG, Subtarget);
18084   }
18085 
18086   // Use dedicated unpack instructions for masks that match their pattern.
18087   if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4f64, Mask, V1, V2, DAG))
18088     return V;
18089 
18090   if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4f64, V1, V2, Mask,
18091                                           Zeroable, Subtarget, DAG))
18092     return Blend;
18093 
18094   // Check if the blend happens to exactly fit that of SHUFPD.
18095   if (SDValue Op = lowerShuffleWithSHUFPD(DL, MVT::v4f64, V1, V2, Mask,
18096                                           Zeroable, Subtarget, DAG))
18097     return Op;
18098 
18099   bool V1IsInPlace = isShuffleMaskInputInPlace(0, Mask);
18100   bool V2IsInPlace = isShuffleMaskInputInPlace(1, Mask);
18101 
18102   // If we have lane crossing shuffles AND they don't all come from the lower
18103   // lane elements, lower to SHUFPD(VPERM2F128(V1, V2), VPERM2F128(V1, V2)).
18104   // TODO: Handle BUILD_VECTOR sources which getVectorShuffle currently
18105   // canonicalize to a blend of splat which isn't necessary for this combine.
18106   if (is128BitLaneCrossingShuffleMask(MVT::v4f64, Mask) &&
18107       !all_of(Mask, [](int M) { return M < 2 || (4 <= M && M < 6); }) &&
18108       (V1.getOpcode() != ISD::BUILD_VECTOR) &&
18109       (V2.getOpcode() != ISD::BUILD_VECTOR))
18110     return lowerShuffleAsLanePermuteAndSHUFP(DL, MVT::v4f64, V1, V2, Mask, DAG);
18111 
18112   // If we have one input in place, then we can permute the other input and
18113   // blend the result.
18114   if (V1IsInPlace || V2IsInPlace)
18115     return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v4f64, V1, V2, Mask,
18116                                                 Subtarget, DAG);
18117 
18118   // Try to create an in-lane repeating shuffle mask and then shuffle the
18119   // results into the target lanes.
18120   if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
18121           DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
18122     return V;
18123 
18124   // Try to simplify this by merging 128-bit lanes to enable a lane-based
18125   // shuffle. However, if we have AVX2 and either inputs are already in place,
18126   // we will be able to shuffle even across lanes the other input in a single
18127   // instruction so skip this pattern.
18128   if (!(Subtarget.hasAVX2() && (V1IsInPlace || V2IsInPlace)))
18129     if (SDValue V = lowerShuffleAsLanePermuteAndRepeatedMask(
18130             DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
18131       return V;
18132 
18133   // If we have VLX support, we can use VEXPAND.
18134   if (Subtarget.hasVLX())
18135     if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v4f64, Zeroable, Mask, V1, V2,
18136                                          DAG, Subtarget))
18137       return V;
18138 
18139   // If we have AVX2 then we always want to lower with a blend because an v4 we
18140   // can fully permute the elements.
18141   if (Subtarget.hasAVX2())
18142     return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v4f64, V1, V2, Mask,
18143                                                 Subtarget, DAG);
18144 
18145   // Otherwise fall back on generic lowering.
18146   return lowerShuffleAsSplitOrBlend(DL, MVT::v4f64, V1, V2, Mask,
18147                                     Subtarget, DAG);
18148 }
18149 
18150 /// Handle lowering of 4-lane 64-bit integer shuffles.
18151 ///
18152 /// This routine is only called when we have AVX2 and thus a reasonable
18153 /// instruction set for v4i64 shuffling..
lowerV4I64Shuffle(const SDLoc & DL,ArrayRef<int> Mask,const APInt & Zeroable,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)18154 static SDValue lowerV4I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
18155                                  const APInt &Zeroable, SDValue V1, SDValue V2,
18156                                  const X86Subtarget &Subtarget,
18157                                  SelectionDAG &DAG) {
18158   assert(V1.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
18159   assert(V2.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
18160   assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
18161   assert(Subtarget.hasAVX2() && "We can only lower v4i64 with AVX2!");
18162 
18163   if (SDValue V = lowerV2X128Shuffle(DL, MVT::v4i64, V1, V2, Mask, Zeroable,
18164                                      Subtarget, DAG))
18165     return V;
18166 
18167   if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4i64, V1, V2, Mask,
18168                                           Zeroable, Subtarget, DAG))
18169     return Blend;
18170 
18171   // Check for being able to broadcast a single element.
18172   if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4i64, V1, V2, Mask,
18173                                                   Subtarget, DAG))
18174     return Broadcast;
18175 
18176   if (V2.isUndef()) {
18177     // When the shuffle is mirrored between the 128-bit lanes of the unit, we
18178     // can use lower latency instructions that will operate on both lanes.
18179     SmallVector<int, 2> RepeatedMask;
18180     if (is128BitLaneRepeatedShuffleMask(MVT::v4i64, Mask, RepeatedMask)) {
18181       SmallVector<int, 4> PSHUFDMask;
18182       narrowShuffleMaskElts(2, RepeatedMask, PSHUFDMask);
18183       return DAG.getBitcast(
18184           MVT::v4i64,
18185           DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32,
18186                       DAG.getBitcast(MVT::v8i32, V1),
18187                       getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
18188     }
18189 
18190     // AVX2 provides a direct instruction for permuting a single input across
18191     // lanes.
18192     return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4i64, V1,
18193                        getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
18194   }
18195 
18196   // Try to use shift instructions.
18197   if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v4i64, V1, V2, Mask,
18198                                           Zeroable, Subtarget, DAG))
18199     return Shift;
18200 
18201   // If we have VLX support, we can use VALIGN or VEXPAND.
18202   if (Subtarget.hasVLX()) {
18203     if (SDValue Rotate = lowerShuffleAsVALIGN(DL, MVT::v4i64, V1, V2, Mask,
18204                                               Subtarget, DAG))
18205       return Rotate;
18206 
18207     if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v4i64, Zeroable, Mask, V1, V2,
18208                                          DAG, Subtarget))
18209       return V;
18210   }
18211 
18212   // Try to use PALIGNR.
18213   if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v4i64, V1, V2, Mask,
18214                                                 Subtarget, DAG))
18215     return Rotate;
18216 
18217   // Use dedicated unpack instructions for masks that match their pattern.
18218   if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4i64, Mask, V1, V2, DAG))
18219     return V;
18220 
18221   bool V1IsInPlace = isShuffleMaskInputInPlace(0, Mask);
18222   bool V2IsInPlace = isShuffleMaskInputInPlace(1, Mask);
18223 
18224   // If we have one input in place, then we can permute the other input and
18225   // blend the result.
18226   if (V1IsInPlace || V2IsInPlace)
18227     return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v4i64, V1, V2, Mask,
18228                                                 Subtarget, DAG);
18229 
18230   // Try to create an in-lane repeating shuffle mask and then shuffle the
18231   // results into the target lanes.
18232   if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
18233           DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG))
18234     return V;
18235 
18236   // Try to lower to PERMQ(BLENDD(V1,V2)).
18237   if (SDValue V =
18238           lowerShuffleAsBlendAndPermute(DL, MVT::v4i64, V1, V2, Mask, DAG))
18239     return V;
18240 
18241   // Try to simplify this by merging 128-bit lanes to enable a lane-based
18242   // shuffle. However, if we have AVX2 and either inputs are already in place,
18243   // we will be able to shuffle even across lanes the other input in a single
18244   // instruction so skip this pattern.
18245   if (!V1IsInPlace && !V2IsInPlace)
18246     if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
18247             DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG))
18248       return Result;
18249 
18250   // Otherwise fall back on generic blend lowering.
18251   return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v4i64, V1, V2, Mask,
18252                                               Subtarget, DAG);
18253 }
18254 
18255 /// Handle lowering of 8-lane 32-bit floating point shuffles.
18256 ///
18257 /// Also ends up handling lowering of 8-lane 32-bit integer shuffles when AVX2
18258 /// isn't available.
lowerV8F32Shuffle(const SDLoc & DL,ArrayRef<int> Mask,const APInt & Zeroable,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)18259 static SDValue lowerV8F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
18260                                  const APInt &Zeroable, SDValue V1, SDValue V2,
18261                                  const X86Subtarget &Subtarget,
18262                                  SelectionDAG &DAG) {
18263   assert(V1.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
18264   assert(V2.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
18265   assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
18266 
18267   if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8f32, V1, V2, Mask,
18268                                           Zeroable, Subtarget, DAG))
18269     return Blend;
18270 
18271   // Check for being able to broadcast a single element.
18272   if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v8f32, V1, V2, Mask,
18273                                                   Subtarget, DAG))
18274     return Broadcast;
18275 
18276   // If the shuffle mask is repeated in each 128-bit lane, we have many more
18277   // options to efficiently lower the shuffle.
18278   SmallVector<int, 4> RepeatedMask;
18279   if (is128BitLaneRepeatedShuffleMask(MVT::v8f32, Mask, RepeatedMask)) {
18280     assert(RepeatedMask.size() == 4 &&
18281            "Repeated masks must be half the mask width!");
18282 
18283     // Use even/odd duplicate instructions for masks that match their pattern.
18284     if (isShuffleEquivalent(RepeatedMask, {0, 0, 2, 2}, V1, V2))
18285       return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v8f32, V1);
18286     if (isShuffleEquivalent(RepeatedMask, {1, 1, 3, 3}, V1, V2))
18287       return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v8f32, V1);
18288 
18289     if (V2.isUndef())
18290       return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f32, V1,
18291                          getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
18292 
18293     // Use dedicated unpack instructions for masks that match their pattern.
18294     if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8f32, Mask, V1, V2, DAG))
18295       return V;
18296 
18297     // Otherwise, fall back to a SHUFPS sequence. Here it is important that we
18298     // have already handled any direct blends.
18299     return lowerShuffleWithSHUFPS(DL, MVT::v8f32, RepeatedMask, V1, V2, DAG);
18300   }
18301 
18302   // Try to create an in-lane repeating shuffle mask and then shuffle the
18303   // results into the target lanes.
18304   if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
18305           DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
18306     return V;
18307 
18308   // If we have a single input shuffle with different shuffle patterns in the
18309   // two 128-bit lanes use the variable mask to VPERMILPS.
18310   if (V2.isUndef()) {
18311     if (!is128BitLaneCrossingShuffleMask(MVT::v8f32, Mask)) {
18312       SDValue VPermMask = getConstVector(Mask, MVT::v8i32, DAG, DL, true);
18313       return DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, V1, VPermMask);
18314     }
18315     if (Subtarget.hasAVX2()) {
18316       SDValue VPermMask = getConstVector(Mask, MVT::v8i32, DAG, DL, true);
18317       return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8f32, VPermMask, V1);
18318     }
18319     // Otherwise, fall back.
18320     return lowerShuffleAsLanePermuteAndShuffle(DL, MVT::v8f32, V1, V2, Mask,
18321                                                DAG, Subtarget);
18322   }
18323 
18324   // Try to simplify this by merging 128-bit lanes to enable a lane-based
18325   // shuffle.
18326   if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
18327           DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
18328     return Result;
18329 
18330   // If we have VLX support, we can use VEXPAND.
18331   if (Subtarget.hasVLX())
18332     if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8f32, Zeroable, Mask, V1, V2,
18333                                          DAG, Subtarget))
18334       return V;
18335 
18336   // Try to match an interleave of two v8f32s and lower them as unpck and
18337   // permutes using ymms. This needs to go before we try to split the vectors.
18338   //
18339   // TODO: Expand this to AVX1. Currently v8i32 is casted to v8f32 and hits
18340   // this path inadvertently.
18341   if (Subtarget.hasAVX2() && !Subtarget.hasAVX512())
18342     if (SDValue V = lowerShufflePairAsUNPCKAndPermute(DL, MVT::v8f32, V1, V2,
18343                                                       Mask, DAG))
18344       return V;
18345 
18346   // For non-AVX512 if the Mask is of 16bit elements in lane then try to split
18347   // since after split we get a more efficient code using vpunpcklwd and
18348   // vpunpckhwd instrs than vblend.
18349   if (!Subtarget.hasAVX512() && isUnpackWdShuffleMask(Mask, MVT::v8f32, DAG))
18350     return lowerShuffleAsSplitOrBlend(DL, MVT::v8f32, V1, V2, Mask, Subtarget,
18351                                       DAG);
18352 
18353   // If we have AVX2 then we always want to lower with a blend because at v8 we
18354   // can fully permute the elements.
18355   if (Subtarget.hasAVX2())
18356     return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v8f32, V1, V2, Mask,
18357                                                 Subtarget, DAG);
18358 
18359   // Otherwise fall back on generic lowering.
18360   return lowerShuffleAsSplitOrBlend(DL, MVT::v8f32, V1, V2, Mask,
18361                                     Subtarget, DAG);
18362 }
18363 
18364 /// Handle lowering of 8-lane 32-bit integer shuffles.
18365 ///
18366 /// This routine is only called when we have AVX2 and thus a reasonable
18367 /// instruction set for v8i32 shuffling..
lowerV8I32Shuffle(const SDLoc & DL,ArrayRef<int> Mask,const APInt & Zeroable,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)18368 static SDValue lowerV8I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
18369                                  const APInt &Zeroable, SDValue V1, SDValue V2,
18370                                  const X86Subtarget &Subtarget,
18371                                  SelectionDAG &DAG) {
18372   assert(V1.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
18373   assert(V2.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
18374   assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
18375   assert(Subtarget.hasAVX2() && "We can only lower v8i32 with AVX2!");
18376 
18377   // Whenever we can lower this as a zext, that instruction is strictly faster
18378   // than any alternative. It also allows us to fold memory operands into the
18379   // shuffle in many cases.
18380   if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v8i32, V1, V2, Mask,
18381                                                    Zeroable, Subtarget, DAG))
18382     return ZExt;
18383 
18384   // Try to match an interleave of two v8i32s and lower them as unpck and
18385   // permutes using ymms. This needs to go before we try to split the vectors.
18386   if (!Subtarget.hasAVX512())
18387     if (SDValue V = lowerShufflePairAsUNPCKAndPermute(DL, MVT::v8i32, V1, V2,
18388                                                       Mask, DAG))
18389       return V;
18390 
18391   // For non-AVX512 if the Mask is of 16bit elements in lane then try to split
18392   // since after split we get a more efficient code than vblend by using
18393   // vpunpcklwd and vpunpckhwd instrs.
18394   if (isUnpackWdShuffleMask(Mask, MVT::v8i32, DAG) && !V2.isUndef() &&
18395       !Subtarget.hasAVX512())
18396     return lowerShuffleAsSplitOrBlend(DL, MVT::v8i32, V1, V2, Mask, Subtarget,
18397                                       DAG);
18398 
18399   if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8i32, V1, V2, Mask,
18400                                           Zeroable, Subtarget, DAG))
18401     return Blend;
18402 
18403   // Check for being able to broadcast a single element.
18404   if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v8i32, V1, V2, Mask,
18405                                                   Subtarget, DAG))
18406     return Broadcast;
18407 
18408   // If the shuffle mask is repeated in each 128-bit lane we can use more
18409   // efficient instructions that mirror the shuffles across the two 128-bit
18410   // lanes.
18411   SmallVector<int, 4> RepeatedMask;
18412   bool Is128BitLaneRepeatedShuffle =
18413       is128BitLaneRepeatedShuffleMask(MVT::v8i32, Mask, RepeatedMask);
18414   if (Is128BitLaneRepeatedShuffle) {
18415     assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
18416     if (V2.isUndef())
18417       return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32, V1,
18418                          getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
18419 
18420     // Use dedicated unpack instructions for masks that match their pattern.
18421     if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8i32, Mask, V1, V2, DAG))
18422       return V;
18423   }
18424 
18425   // Try to use shift instructions.
18426   if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v8i32, V1, V2, Mask,
18427                                           Zeroable, Subtarget, DAG))
18428     return Shift;
18429 
18430   // If we have VLX support, we can use VALIGN or EXPAND.
18431   if (Subtarget.hasVLX()) {
18432     if (SDValue Rotate = lowerShuffleAsVALIGN(DL, MVT::v8i32, V1, V2, Mask,
18433                                               Subtarget, DAG))
18434       return Rotate;
18435 
18436     if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8i32, Zeroable, Mask, V1, V2,
18437                                          DAG, Subtarget))
18438       return V;
18439   }
18440 
18441   // Try to use byte rotation instructions.
18442   if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i32, V1, V2, Mask,
18443                                                 Subtarget, DAG))
18444     return Rotate;
18445 
18446   // Try to create an in-lane repeating shuffle mask and then shuffle the
18447   // results into the target lanes.
18448   if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
18449           DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
18450     return V;
18451 
18452   if (V2.isUndef()) {
18453     // Try to produce a fixed cross-128-bit lane permute followed by unpack
18454     // because that should be faster than the variable permute alternatives.
18455     if (SDValue V = lowerShuffleWithUNPCK256(DL, MVT::v8i32, Mask, V1, V2, DAG))
18456       return V;
18457 
18458     // If the shuffle patterns aren't repeated but it's a single input, directly
18459     // generate a cross-lane VPERMD instruction.
18460     SDValue VPermMask = getConstVector(Mask, MVT::v8i32, DAG, DL, true);
18461     return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8i32, VPermMask, V1);
18462   }
18463 
18464   // Assume that a single SHUFPS is faster than an alternative sequence of
18465   // multiple instructions (even if the CPU has a domain penalty).
18466   // If some CPU is harmed by the domain switch, we can fix it in a later pass.
18467   if (Is128BitLaneRepeatedShuffle && isSingleSHUFPSMask(RepeatedMask)) {
18468     SDValue CastV1 = DAG.getBitcast(MVT::v8f32, V1);
18469     SDValue CastV2 = DAG.getBitcast(MVT::v8f32, V2);
18470     SDValue ShufPS = lowerShuffleWithSHUFPS(DL, MVT::v8f32, RepeatedMask,
18471                                             CastV1, CastV2, DAG);
18472     return DAG.getBitcast(MVT::v8i32, ShufPS);
18473   }
18474 
18475   // Try to simplify this by merging 128-bit lanes to enable a lane-based
18476   // shuffle.
18477   if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
18478           DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
18479     return Result;
18480 
18481   // Otherwise fall back on generic blend lowering.
18482   return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v8i32, V1, V2, Mask,
18483                                               Subtarget, DAG);
18484 }
18485 
18486 /// Handle lowering of 16-lane 16-bit integer shuffles.
18487 ///
18488 /// This routine is only called when we have AVX2 and thus a reasonable
18489 /// instruction set for v16i16 shuffling..
lowerV16I16Shuffle(const SDLoc & DL,ArrayRef<int> Mask,const APInt & Zeroable,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)18490 static SDValue lowerV16I16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
18491                                   const APInt &Zeroable, SDValue V1, SDValue V2,
18492                                   const X86Subtarget &Subtarget,
18493                                   SelectionDAG &DAG) {
18494   assert(V1.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
18495   assert(V2.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
18496   assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
18497   assert(Subtarget.hasAVX2() && "We can only lower v16i16 with AVX2!");
18498 
18499   // Whenever we can lower this as a zext, that instruction is strictly faster
18500   // than any alternative. It also allows us to fold memory operands into the
18501   // shuffle in many cases.
18502   if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
18503           DL, MVT::v16i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
18504     return ZExt;
18505 
18506   // Check for being able to broadcast a single element.
18507   if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v16i16, V1, V2, Mask,
18508                                                   Subtarget, DAG))
18509     return Broadcast;
18510 
18511   if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16i16, V1, V2, Mask,
18512                                           Zeroable, Subtarget, DAG))
18513     return Blend;
18514 
18515   // Use dedicated unpack instructions for masks that match their pattern.
18516   if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i16, Mask, V1, V2, DAG))
18517     return V;
18518 
18519   // Use dedicated pack instructions for masks that match their pattern.
18520   if (SDValue V = lowerShuffleWithPACK(DL, MVT::v16i16, Mask, V1, V2, DAG,
18521                                        Subtarget))
18522     return V;
18523 
18524   // Try to use lower using a truncation.
18525   if (SDValue V = lowerShuffleAsVTRUNC(DL, MVT::v16i16, V1, V2, Mask, Zeroable,
18526                                        Subtarget, DAG))
18527     return V;
18528 
18529   // Try to use shift instructions.
18530   if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v16i16, V1, V2, Mask,
18531                                           Zeroable, Subtarget, DAG))
18532     return Shift;
18533 
18534   // Try to use byte rotation instructions.
18535   if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v16i16, V1, V2, Mask,
18536                                                 Subtarget, DAG))
18537     return Rotate;
18538 
18539   // Try to create an in-lane repeating shuffle mask and then shuffle the
18540   // results into the target lanes.
18541   if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
18542           DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
18543     return V;
18544 
18545   if (V2.isUndef()) {
18546     // Try to use bit rotation instructions.
18547     if (SDValue Rotate =
18548             lowerShuffleAsBitRotate(DL, MVT::v16i16, V1, Mask, Subtarget, DAG))
18549       return Rotate;
18550 
18551     // Try to produce a fixed cross-128-bit lane permute followed by unpack
18552     // because that should be faster than the variable permute alternatives.
18553     if (SDValue V = lowerShuffleWithUNPCK256(DL, MVT::v16i16, Mask, V1, V2, DAG))
18554       return V;
18555 
18556     // There are no generalized cross-lane shuffle operations available on i16
18557     // element types.
18558     if (is128BitLaneCrossingShuffleMask(MVT::v16i16, Mask)) {
18559       if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
18560               DL, MVT::v16i16, V1, V2, Mask, DAG, Subtarget))
18561         return V;
18562 
18563       return lowerShuffleAsLanePermuteAndShuffle(DL, MVT::v16i16, V1, V2, Mask,
18564                                                  DAG, Subtarget);
18565     }
18566 
18567     SmallVector<int, 8> RepeatedMask;
18568     if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) {
18569       // As this is a single-input shuffle, the repeated mask should be
18570       // a strictly valid v8i16 mask that we can pass through to the v8i16
18571       // lowering to handle even the v16 case.
18572       return lowerV8I16GeneralSingleInputShuffle(
18573           DL, MVT::v16i16, V1, RepeatedMask, Subtarget, DAG);
18574     }
18575   }
18576 
18577   if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v16i16, Mask, V1, V2,
18578                                               Zeroable, Subtarget, DAG))
18579     return PSHUFB;
18580 
18581   // AVX512BW can lower to VPERMW (non-VLX will pad to v32i16).
18582   if (Subtarget.hasBWI())
18583     return lowerShuffleWithPERMV(DL, MVT::v16i16, Mask, V1, V2, Subtarget, DAG);
18584 
18585   // Try to simplify this by merging 128-bit lanes to enable a lane-based
18586   // shuffle.
18587   if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
18588           DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
18589     return Result;
18590 
18591   // Try to permute the lanes and then use a per-lane permute.
18592   if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
18593           DL, MVT::v16i16, V1, V2, Mask, DAG, Subtarget))
18594     return V;
18595 
18596   // Try to match an interleave of two v16i16s and lower them as unpck and
18597   // permutes using ymms.
18598   if (!Subtarget.hasAVX512())
18599     if (SDValue V = lowerShufflePairAsUNPCKAndPermute(DL, MVT::v16i16, V1, V2,
18600                                                       Mask, DAG))
18601       return V;
18602 
18603   // Otherwise fall back on generic lowering.
18604   return lowerShuffleAsSplitOrBlend(DL, MVT::v16i16, V1, V2, Mask,
18605                                     Subtarget, DAG);
18606 }
18607 
18608 /// Handle lowering of 32-lane 8-bit integer shuffles.
18609 ///
18610 /// This routine is only called when we have AVX2 and thus a reasonable
18611 /// instruction set for v32i8 shuffling..
lowerV32I8Shuffle(const SDLoc & DL,ArrayRef<int> Mask,const APInt & Zeroable,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)18612 static SDValue lowerV32I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
18613                                  const APInt &Zeroable, SDValue V1, SDValue V2,
18614                                  const X86Subtarget &Subtarget,
18615                                  SelectionDAG &DAG) {
18616   assert(V1.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
18617   assert(V2.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
18618   assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
18619   assert(Subtarget.hasAVX2() && "We can only lower v32i8 with AVX2!");
18620 
18621   // Whenever we can lower this as a zext, that instruction is strictly faster
18622   // than any alternative. It also allows us to fold memory operands into the
18623   // shuffle in many cases.
18624   if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v32i8, V1, V2, Mask,
18625                                                    Zeroable, Subtarget, DAG))
18626     return ZExt;
18627 
18628   // Check for being able to broadcast a single element.
18629   if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v32i8, V1, V2, Mask,
18630                                                   Subtarget, DAG))
18631     return Broadcast;
18632 
18633   if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v32i8, V1, V2, Mask,
18634                                           Zeroable, Subtarget, DAG))
18635     return Blend;
18636 
18637   // Use dedicated unpack instructions for masks that match their pattern.
18638   if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v32i8, Mask, V1, V2, DAG))
18639     return V;
18640 
18641   // Use dedicated pack instructions for masks that match their pattern.
18642   if (SDValue V = lowerShuffleWithPACK(DL, MVT::v32i8, Mask, V1, V2, DAG,
18643                                        Subtarget))
18644     return V;
18645 
18646   // Try to use lower using a truncation.
18647   if (SDValue V = lowerShuffleAsVTRUNC(DL, MVT::v32i8, V1, V2, Mask, Zeroable,
18648                                        Subtarget, DAG))
18649     return V;
18650 
18651   // Try to use shift instructions.
18652   if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v32i8, V1, V2, Mask,
18653                                           Zeroable, Subtarget, DAG))
18654     return Shift;
18655 
18656   // Try to use byte rotation instructions.
18657   if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v32i8, V1, V2, Mask,
18658                                                 Subtarget, DAG))
18659     return Rotate;
18660 
18661   // Try to use bit rotation instructions.
18662   if (V2.isUndef())
18663     if (SDValue Rotate =
18664             lowerShuffleAsBitRotate(DL, MVT::v32i8, V1, Mask, Subtarget, DAG))
18665       return Rotate;
18666 
18667   // Try to create an in-lane repeating shuffle mask and then shuffle the
18668   // results into the target lanes.
18669   if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
18670           DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
18671     return V;
18672 
18673   // There are no generalized cross-lane shuffle operations available on i8
18674   // element types.
18675   if (V2.isUndef() && is128BitLaneCrossingShuffleMask(MVT::v32i8, Mask)) {
18676     // Try to produce a fixed cross-128-bit lane permute followed by unpack
18677     // because that should be faster than the variable permute alternatives.
18678     if (SDValue V = lowerShuffleWithUNPCK256(DL, MVT::v32i8, Mask, V1, V2, DAG))
18679       return V;
18680 
18681     if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
18682             DL, MVT::v32i8, V1, V2, Mask, DAG, Subtarget))
18683       return V;
18684 
18685     return lowerShuffleAsLanePermuteAndShuffle(DL, MVT::v32i8, V1, V2, Mask,
18686                                                DAG, Subtarget);
18687   }
18688 
18689   if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v32i8, Mask, V1, V2,
18690                                               Zeroable, Subtarget, DAG))
18691     return PSHUFB;
18692 
18693   // AVX512VBMI can lower to VPERMB (non-VLX will pad to v64i8).
18694   if (Subtarget.hasVBMI())
18695     return lowerShuffleWithPERMV(DL, MVT::v32i8, Mask, V1, V2, Subtarget, DAG);
18696 
18697   // Try to simplify this by merging 128-bit lanes to enable a lane-based
18698   // shuffle.
18699   if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
18700           DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
18701     return Result;
18702 
18703   // Try to permute the lanes and then use a per-lane permute.
18704   if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
18705           DL, MVT::v32i8, V1, V2, Mask, DAG, Subtarget))
18706     return V;
18707 
18708   // Look for {0, 8, 16, 24, 32, 40, 48, 56 } in the first 8 elements. Followed
18709   // by zeroable elements in the remaining 24 elements. Turn this into two
18710   // vmovqb instructions shuffled together.
18711   if (Subtarget.hasVLX())
18712     if (SDValue V = lowerShuffleAsVTRUNCAndUnpack(DL, MVT::v32i8, V1, V2,
18713                                                   Mask, Zeroable, DAG))
18714       return V;
18715 
18716   // Try to match an interleave of two v32i8s and lower them as unpck and
18717   // permutes using ymms.
18718   if (!Subtarget.hasAVX512())
18719     if (SDValue V = lowerShufflePairAsUNPCKAndPermute(DL, MVT::v32i8, V1, V2,
18720                                                       Mask, DAG))
18721       return V;
18722 
18723   // Otherwise fall back on generic lowering.
18724   return lowerShuffleAsSplitOrBlend(DL, MVT::v32i8, V1, V2, Mask,
18725                                     Subtarget, DAG);
18726 }
18727 
18728 /// High-level routine to lower various 256-bit x86 vector shuffles.
18729 ///
18730 /// This routine either breaks down the specific type of a 256-bit x86 vector
18731 /// shuffle or splits it into two 128-bit shuffles and fuses the results back
18732 /// together based on the available instructions.
lower256BitShuffle(const SDLoc & DL,ArrayRef<int> Mask,MVT VT,SDValue V1,SDValue V2,const APInt & Zeroable,const X86Subtarget & Subtarget,SelectionDAG & DAG)18733 static SDValue lower256BitShuffle(const SDLoc &DL, ArrayRef<int> Mask, MVT VT,
18734                                   SDValue V1, SDValue V2, const APInt &Zeroable,
18735                                   const X86Subtarget &Subtarget,
18736                                   SelectionDAG &DAG) {
18737   // If we have a single input to the zero element, insert that into V1 if we
18738   // can do so cheaply.
18739   int NumElts = VT.getVectorNumElements();
18740   int NumV2Elements = count_if(Mask, [NumElts](int M) { return M >= NumElts; });
18741 
18742   if (NumV2Elements == 1 && Mask[0] >= NumElts)
18743     if (SDValue Insertion = lowerShuffleAsElementInsertion(
18744             DL, VT, V1, V2, Mask, Zeroable, Subtarget, DAG))
18745       return Insertion;
18746 
18747   // Handle special cases where the lower or upper half is UNDEF.
18748   if (SDValue V =
18749           lowerShuffleWithUndefHalf(DL, VT, V1, V2, Mask, Subtarget, DAG))
18750     return V;
18751 
18752   // There is a really nice hard cut-over between AVX1 and AVX2 that means we
18753   // can check for those subtargets here and avoid much of the subtarget
18754   // querying in the per-vector-type lowering routines. With AVX1 we have
18755   // essentially *zero* ability to manipulate a 256-bit vector with integer
18756   // types. Since we'll use floating point types there eventually, just
18757   // immediately cast everything to a float and operate entirely in that domain.
18758   if (VT.isInteger() && !Subtarget.hasAVX2()) {
18759     int ElementBits = VT.getScalarSizeInBits();
18760     if (ElementBits < 32) {
18761       // No floating point type available, if we can't use the bit operations
18762       // for masking/blending then decompose into 128-bit vectors.
18763       if (SDValue V = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable,
18764                                             Subtarget, DAG))
18765         return V;
18766       if (SDValue V = lowerShuffleAsBitBlend(DL, VT, V1, V2, Mask, DAG))
18767         return V;
18768       return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG);
18769     }
18770 
18771     MVT FpVT = MVT::getVectorVT(MVT::getFloatingPointVT(ElementBits),
18772                                 VT.getVectorNumElements());
18773     V1 = DAG.getBitcast(FpVT, V1);
18774     V2 = DAG.getBitcast(FpVT, V2);
18775     return DAG.getBitcast(VT, DAG.getVectorShuffle(FpVT, DL, V1, V2, Mask));
18776   }
18777 
18778   if (VT == MVT::v16f16 || VT.getVectorElementType() == MVT::bf16) {
18779     MVT IVT = VT.changeVectorElementTypeToInteger();
18780     V1 = DAG.getBitcast(IVT, V1);
18781     V2 = DAG.getBitcast(IVT, V2);
18782     return DAG.getBitcast(VT, DAG.getVectorShuffle(IVT, DL, V1, V2, Mask));
18783   }
18784 
18785   switch (VT.SimpleTy) {
18786   case MVT::v4f64:
18787     return lowerV4F64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
18788   case MVT::v4i64:
18789     return lowerV4I64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
18790   case MVT::v8f32:
18791     return lowerV8F32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
18792   case MVT::v8i32:
18793     return lowerV8I32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
18794   case MVT::v16i16:
18795     return lowerV16I16Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
18796   case MVT::v32i8:
18797     return lowerV32I8Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
18798 
18799   default:
18800     llvm_unreachable("Not a valid 256-bit x86 vector type!");
18801   }
18802 }
18803 
18804 /// Try to lower a vector shuffle as a 128-bit shuffles.
lowerV4X128Shuffle(const SDLoc & DL,MVT VT,ArrayRef<int> Mask,const APInt & Zeroable,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)18805 static SDValue lowerV4X128Shuffle(const SDLoc &DL, MVT VT, ArrayRef<int> Mask,
18806                                   const APInt &Zeroable, SDValue V1, SDValue V2,
18807                                   const X86Subtarget &Subtarget,
18808                                   SelectionDAG &DAG) {
18809   assert(VT.getScalarSizeInBits() == 64 &&
18810          "Unexpected element type size for 128bit shuffle.");
18811 
18812   // To handle 256 bit vector requires VLX and most probably
18813   // function lowerV2X128VectorShuffle() is better solution.
18814   assert(VT.is512BitVector() && "Unexpected vector size for 512bit shuffle.");
18815 
18816   // TODO - use Zeroable like we do for lowerV2X128VectorShuffle?
18817   SmallVector<int, 4> Widened128Mask;
18818   if (!canWidenShuffleElements(Mask, Widened128Mask))
18819     return SDValue();
18820   assert(Widened128Mask.size() == 4 && "Shuffle widening mismatch");
18821 
18822   // Try to use an insert into a zero vector.
18823   if (Widened128Mask[0] == 0 && (Zeroable & 0xf0) == 0xf0 &&
18824       (Widened128Mask[1] == 1 || (Zeroable & 0x0c) == 0x0c)) {
18825     unsigned NumElts = ((Zeroable & 0x0c) == 0x0c) ? 2 : 4;
18826     MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), NumElts);
18827     SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
18828                               DAG.getIntPtrConstant(0, DL));
18829     return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
18830                        getZeroVector(VT, Subtarget, DAG, DL), LoV,
18831                        DAG.getIntPtrConstant(0, DL));
18832   }
18833 
18834   // Check for patterns which can be matched with a single insert of a 256-bit
18835   // subvector.
18836   bool OnlyUsesV1 = isShuffleEquivalent(Mask, {0, 1, 2, 3, 0, 1, 2, 3}, V1, V2);
18837   if (OnlyUsesV1 ||
18838       isShuffleEquivalent(Mask, {0, 1, 2, 3, 8, 9, 10, 11}, V1, V2)) {
18839     MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 4);
18840     SDValue SubVec =
18841         DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, OnlyUsesV1 ? V1 : V2,
18842                     DAG.getIntPtrConstant(0, DL));
18843     return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, V1, SubVec,
18844                        DAG.getIntPtrConstant(4, DL));
18845   }
18846 
18847   // See if this is an insertion of the lower 128-bits of V2 into V1.
18848   bool IsInsert = true;
18849   int V2Index = -1;
18850   for (int i = 0; i < 4; ++i) {
18851     assert(Widened128Mask[i] >= -1 && "Illegal shuffle sentinel value");
18852     if (Widened128Mask[i] < 0)
18853       continue;
18854 
18855     // Make sure all V1 subvectors are in place.
18856     if (Widened128Mask[i] < 4) {
18857       if (Widened128Mask[i] != i) {
18858         IsInsert = false;
18859         break;
18860       }
18861     } else {
18862       // Make sure we only have a single V2 index and its the lowest 128-bits.
18863       if (V2Index >= 0 || Widened128Mask[i] != 4) {
18864         IsInsert = false;
18865         break;
18866       }
18867       V2Index = i;
18868     }
18869   }
18870   if (IsInsert && V2Index >= 0) {
18871     MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 2);
18872     SDValue Subvec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V2,
18873                                  DAG.getIntPtrConstant(0, DL));
18874     return insert128BitVector(V1, Subvec, V2Index * 2, DAG, DL);
18875   }
18876 
18877   // See if we can widen to a 256-bit lane shuffle, we're going to lose 128-lane
18878   // UNDEF info by lowering to X86ISD::SHUF128 anyway, so by widening where
18879   // possible we at least ensure the lanes stay sequential to help later
18880   // combines.
18881   SmallVector<int, 2> Widened256Mask;
18882   if (canWidenShuffleElements(Widened128Mask, Widened256Mask)) {
18883     Widened128Mask.clear();
18884     narrowShuffleMaskElts(2, Widened256Mask, Widened128Mask);
18885   }
18886 
18887   // Try to lower to vshuf64x2/vshuf32x4.
18888   SDValue Ops[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT)};
18889   unsigned PermMask = 0;
18890   // Insure elements came from the same Op.
18891   for (int i = 0; i < 4; ++i) {
18892     assert(Widened128Mask[i] >= -1 && "Illegal shuffle sentinel value");
18893     if (Widened128Mask[i] < 0)
18894       continue;
18895 
18896     SDValue Op = Widened128Mask[i] >= 4 ? V2 : V1;
18897     unsigned OpIndex = i / 2;
18898     if (Ops[OpIndex].isUndef())
18899       Ops[OpIndex] = Op;
18900     else if (Ops[OpIndex] != Op)
18901       return SDValue();
18902 
18903     // Convert the 128-bit shuffle mask selection values into 128-bit selection
18904     // bits defined by a vshuf64x2 instruction's immediate control byte.
18905     PermMask |= (Widened128Mask[i] % 4) << (i * 2);
18906   }
18907 
18908   return DAG.getNode(X86ISD::SHUF128, DL, VT, Ops[0], Ops[1],
18909                      DAG.getTargetConstant(PermMask, DL, MVT::i8));
18910 }
18911 
18912 /// Handle lowering of 8-lane 64-bit floating point shuffles.
lowerV8F64Shuffle(const SDLoc & DL,ArrayRef<int> Mask,const APInt & Zeroable,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)18913 static SDValue lowerV8F64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
18914                                  const APInt &Zeroable, SDValue V1, SDValue V2,
18915                                  const X86Subtarget &Subtarget,
18916                                  SelectionDAG &DAG) {
18917   assert(V1.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
18918   assert(V2.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
18919   assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
18920 
18921   if (V2.isUndef()) {
18922     // Use low duplicate instructions for masks that match their pattern.
18923     if (isShuffleEquivalent(Mask, {0, 0, 2, 2, 4, 4, 6, 6}, V1, V2))
18924       return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v8f64, V1);
18925 
18926     if (!is128BitLaneCrossingShuffleMask(MVT::v8f64, Mask)) {
18927       // Non-half-crossing single input shuffles can be lowered with an
18928       // interleaved permutation.
18929       unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
18930                               ((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3) |
18931                               ((Mask[4] == 5) << 4) | ((Mask[5] == 5) << 5) |
18932                               ((Mask[6] == 7) << 6) | ((Mask[7] == 7) << 7);
18933       return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f64, V1,
18934                          DAG.getTargetConstant(VPERMILPMask, DL, MVT::i8));
18935     }
18936 
18937     SmallVector<int, 4> RepeatedMask;
18938     if (is256BitLaneRepeatedShuffleMask(MVT::v8f64, Mask, RepeatedMask))
18939       return DAG.getNode(X86ISD::VPERMI, DL, MVT::v8f64, V1,
18940                          getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
18941   }
18942 
18943   if (SDValue Shuf128 = lowerV4X128Shuffle(DL, MVT::v8f64, Mask, Zeroable, V1,
18944                                            V2, Subtarget, DAG))
18945     return Shuf128;
18946 
18947   if (SDValue Unpck = lowerShuffleWithUNPCK(DL, MVT::v8f64, Mask, V1, V2, DAG))
18948     return Unpck;
18949 
18950   // Check if the blend happens to exactly fit that of SHUFPD.
18951   if (SDValue Op = lowerShuffleWithSHUFPD(DL, MVT::v8f64, V1, V2, Mask,
18952                                           Zeroable, Subtarget, DAG))
18953     return Op;
18954 
18955   if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8f64, Zeroable, Mask, V1, V2,
18956                                        DAG, Subtarget))
18957     return V;
18958 
18959   if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8f64, V1, V2, Mask,
18960                                           Zeroable, Subtarget, DAG))
18961     return Blend;
18962 
18963   return lowerShuffleWithPERMV(DL, MVT::v8f64, Mask, V1, V2, Subtarget, DAG);
18964 }
18965 
18966 /// Handle lowering of 16-lane 32-bit floating point shuffles.
lowerV16F32Shuffle(const SDLoc & DL,ArrayRef<int> Mask,const APInt & Zeroable,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)18967 static SDValue lowerV16F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
18968                                   const APInt &Zeroable, SDValue V1, SDValue V2,
18969                                   const X86Subtarget &Subtarget,
18970                                   SelectionDAG &DAG) {
18971   assert(V1.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
18972   assert(V2.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
18973   assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
18974 
18975   // If the shuffle mask is repeated in each 128-bit lane, we have many more
18976   // options to efficiently lower the shuffle.
18977   SmallVector<int, 4> RepeatedMask;
18978   if (is128BitLaneRepeatedShuffleMask(MVT::v16f32, Mask, RepeatedMask)) {
18979     assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
18980 
18981     // Use even/odd duplicate instructions for masks that match their pattern.
18982     if (isShuffleEquivalent(RepeatedMask, {0, 0, 2, 2}, V1, V2))
18983       return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v16f32, V1);
18984     if (isShuffleEquivalent(RepeatedMask, {1, 1, 3, 3}, V1, V2))
18985       return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v16f32, V1);
18986 
18987     if (V2.isUndef())
18988       return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v16f32, V1,
18989                          getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
18990 
18991     // Use dedicated unpack instructions for masks that match their pattern.
18992     if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16f32, Mask, V1, V2, DAG))
18993       return V;
18994 
18995     if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16f32, V1, V2, Mask,
18996                                             Zeroable, Subtarget, DAG))
18997       return Blend;
18998 
18999     // Otherwise, fall back to a SHUFPS sequence.
19000     return lowerShuffleWithSHUFPS(DL, MVT::v16f32, RepeatedMask, V1, V2, DAG);
19001   }
19002 
19003   // Try to create an in-lane repeating shuffle mask and then shuffle the
19004   // results into the target lanes.
19005   if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
19006           DL, MVT::v16f32, V1, V2, Mask, Subtarget, DAG))
19007     return V;
19008 
19009   // If we have a single input shuffle with different shuffle patterns in the
19010   // 128-bit lanes and don't lane cross, use variable mask VPERMILPS.
19011   if (V2.isUndef() &&
19012       !is128BitLaneCrossingShuffleMask(MVT::v16f32, Mask)) {
19013     SDValue VPermMask = getConstVector(Mask, MVT::v16i32, DAG, DL, true);
19014     return DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v16f32, V1, VPermMask);
19015   }
19016 
19017   // If we have AVX512F support, we can use VEXPAND.
19018   if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v16f32, Zeroable, Mask,
19019                                              V1, V2, DAG, Subtarget))
19020     return V;
19021 
19022   return lowerShuffleWithPERMV(DL, MVT::v16f32, Mask, V1, V2, Subtarget, DAG);
19023 }
19024 
19025 /// Handle lowering of 8-lane 64-bit integer shuffles.
lowerV8I64Shuffle(const SDLoc & DL,ArrayRef<int> Mask,const APInt & Zeroable,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)19026 static SDValue lowerV8I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
19027                                  const APInt &Zeroable, SDValue V1, SDValue V2,
19028                                  const X86Subtarget &Subtarget,
19029                                  SelectionDAG &DAG) {
19030   assert(V1.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
19031   assert(V2.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
19032   assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
19033 
19034   if (V2.isUndef()) {
19035     // When the shuffle is mirrored between the 128-bit lanes of the unit, we
19036     // can use lower latency instructions that will operate on all four
19037     // 128-bit lanes.
19038     SmallVector<int, 2> Repeated128Mask;
19039     if (is128BitLaneRepeatedShuffleMask(MVT::v8i64, Mask, Repeated128Mask)) {
19040       SmallVector<int, 4> PSHUFDMask;
19041       narrowShuffleMaskElts(2, Repeated128Mask, PSHUFDMask);
19042       return DAG.getBitcast(
19043           MVT::v8i64,
19044           DAG.getNode(X86ISD::PSHUFD, DL, MVT::v16i32,
19045                       DAG.getBitcast(MVT::v16i32, V1),
19046                       getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
19047     }
19048 
19049     SmallVector<int, 4> Repeated256Mask;
19050     if (is256BitLaneRepeatedShuffleMask(MVT::v8i64, Mask, Repeated256Mask))
19051       return DAG.getNode(X86ISD::VPERMI, DL, MVT::v8i64, V1,
19052                          getV4X86ShuffleImm8ForMask(Repeated256Mask, DL, DAG));
19053   }
19054 
19055   if (SDValue Shuf128 = lowerV4X128Shuffle(DL, MVT::v8i64, Mask, Zeroable, V1,
19056                                            V2, Subtarget, DAG))
19057     return Shuf128;
19058 
19059   // Try to use shift instructions.
19060   if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v8i64, V1, V2, Mask,
19061                                           Zeroable, Subtarget, DAG))
19062     return Shift;
19063 
19064   // Try to use VALIGN.
19065   if (SDValue Rotate = lowerShuffleAsVALIGN(DL, MVT::v8i64, V1, V2, Mask,
19066                                             Subtarget, DAG))
19067     return Rotate;
19068 
19069   // Try to use PALIGNR.
19070   if (Subtarget.hasBWI())
19071     if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i64, V1, V2, Mask,
19072                                                   Subtarget, DAG))
19073       return Rotate;
19074 
19075   if (SDValue Unpck = lowerShuffleWithUNPCK(DL, MVT::v8i64, Mask, V1, V2, DAG))
19076     return Unpck;
19077 
19078   // If we have AVX512F support, we can use VEXPAND.
19079   if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8i64, Zeroable, Mask, V1, V2,
19080                                        DAG, Subtarget))
19081     return V;
19082 
19083   if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8i64, V1, V2, Mask,
19084                                           Zeroable, Subtarget, DAG))
19085     return Blend;
19086 
19087   return lowerShuffleWithPERMV(DL, MVT::v8i64, Mask, V1, V2, Subtarget, DAG);
19088 }
19089 
19090 /// Handle lowering of 16-lane 32-bit integer shuffles.
lowerV16I32Shuffle(const SDLoc & DL,ArrayRef<int> Mask,const APInt & Zeroable,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)19091 static SDValue lowerV16I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
19092                                   const APInt &Zeroable, SDValue V1, SDValue V2,
19093                                   const X86Subtarget &Subtarget,
19094                                   SelectionDAG &DAG) {
19095   assert(V1.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
19096   assert(V2.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
19097   assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
19098 
19099   // Whenever we can lower this as a zext, that instruction is strictly faster
19100   // than any alternative. It also allows us to fold memory operands into the
19101   // shuffle in many cases.
19102   if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
19103           DL, MVT::v16i32, V1, V2, Mask, Zeroable, Subtarget, DAG))
19104     return ZExt;
19105 
19106   // If the shuffle mask is repeated in each 128-bit lane we can use more
19107   // efficient instructions that mirror the shuffles across the four 128-bit
19108   // lanes.
19109   SmallVector<int, 4> RepeatedMask;
19110   bool Is128BitLaneRepeatedShuffle =
19111       is128BitLaneRepeatedShuffleMask(MVT::v16i32, Mask, RepeatedMask);
19112   if (Is128BitLaneRepeatedShuffle) {
19113     assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
19114     if (V2.isUndef())
19115       return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v16i32, V1,
19116                          getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
19117 
19118     // Use dedicated unpack instructions for masks that match their pattern.
19119     if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i32, Mask, V1, V2, DAG))
19120       return V;
19121   }
19122 
19123   // Try to use shift instructions.
19124   if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v16i32, V1, V2, Mask,
19125                                           Zeroable, Subtarget, DAG))
19126     return Shift;
19127 
19128   // Try to use VALIGN.
19129   if (SDValue Rotate = lowerShuffleAsVALIGN(DL, MVT::v16i32, V1, V2, Mask,
19130                                             Subtarget, DAG))
19131     return Rotate;
19132 
19133   // Try to use byte rotation instructions.
19134   if (Subtarget.hasBWI())
19135     if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v16i32, V1, V2, Mask,
19136                                                   Subtarget, DAG))
19137       return Rotate;
19138 
19139   // Assume that a single SHUFPS is faster than using a permv shuffle.
19140   // If some CPU is harmed by the domain switch, we can fix it in a later pass.
19141   if (Is128BitLaneRepeatedShuffle && isSingleSHUFPSMask(RepeatedMask)) {
19142     SDValue CastV1 = DAG.getBitcast(MVT::v16f32, V1);
19143     SDValue CastV2 = DAG.getBitcast(MVT::v16f32, V2);
19144     SDValue ShufPS = lowerShuffleWithSHUFPS(DL, MVT::v16f32, RepeatedMask,
19145                                             CastV1, CastV2, DAG);
19146     return DAG.getBitcast(MVT::v16i32, ShufPS);
19147   }
19148 
19149   // Try to create an in-lane repeating shuffle mask and then shuffle the
19150   // results into the target lanes.
19151   if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
19152           DL, MVT::v16i32, V1, V2, Mask, Subtarget, DAG))
19153     return V;
19154 
19155   // If we have AVX512F support, we can use VEXPAND.
19156   if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v16i32, Zeroable, Mask, V1, V2,
19157                                        DAG, Subtarget))
19158     return V;
19159 
19160   if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16i32, V1, V2, Mask,
19161                                           Zeroable, Subtarget, DAG))
19162     return Blend;
19163 
19164   return lowerShuffleWithPERMV(DL, MVT::v16i32, Mask, V1, V2, Subtarget, DAG);
19165 }
19166 
19167 /// Handle lowering of 32-lane 16-bit integer shuffles.
lowerV32I16Shuffle(const SDLoc & DL,ArrayRef<int> Mask,const APInt & Zeroable,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)19168 static SDValue lowerV32I16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
19169                                   const APInt &Zeroable, SDValue V1, SDValue V2,
19170                                   const X86Subtarget &Subtarget,
19171                                   SelectionDAG &DAG) {
19172   assert(V1.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
19173   assert(V2.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
19174   assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
19175   assert(Subtarget.hasBWI() && "We can only lower v32i16 with AVX-512-BWI!");
19176 
19177   // Whenever we can lower this as a zext, that instruction is strictly faster
19178   // than any alternative. It also allows us to fold memory operands into the
19179   // shuffle in many cases.
19180   if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
19181           DL, MVT::v32i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
19182     return ZExt;
19183 
19184   // Use dedicated unpack instructions for masks that match their pattern.
19185   if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v32i16, Mask, V1, V2, DAG))
19186     return V;
19187 
19188   // Use dedicated pack instructions for masks that match their pattern.
19189   if (SDValue V =
19190           lowerShuffleWithPACK(DL, MVT::v32i16, Mask, V1, V2, DAG, Subtarget))
19191     return V;
19192 
19193   // Try to use shift instructions.
19194   if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v32i16, V1, V2, Mask,
19195                                           Zeroable, Subtarget, DAG))
19196     return Shift;
19197 
19198   // Try to use byte rotation instructions.
19199   if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v32i16, V1, V2, Mask,
19200                                                 Subtarget, DAG))
19201     return Rotate;
19202 
19203   if (V2.isUndef()) {
19204     // Try to use bit rotation instructions.
19205     if (SDValue Rotate =
19206             lowerShuffleAsBitRotate(DL, MVT::v32i16, V1, Mask, Subtarget, DAG))
19207       return Rotate;
19208 
19209     SmallVector<int, 8> RepeatedMask;
19210     if (is128BitLaneRepeatedShuffleMask(MVT::v32i16, Mask, RepeatedMask)) {
19211       // As this is a single-input shuffle, the repeated mask should be
19212       // a strictly valid v8i16 mask that we can pass through to the v8i16
19213       // lowering to handle even the v32 case.
19214       return lowerV8I16GeneralSingleInputShuffle(DL, MVT::v32i16, V1,
19215                                                  RepeatedMask, Subtarget, DAG);
19216     }
19217   }
19218 
19219   if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v32i16, V1, V2, Mask,
19220                                           Zeroable, Subtarget, DAG))
19221     return Blend;
19222 
19223   if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v32i16, Mask, V1, V2,
19224                                               Zeroable, Subtarget, DAG))
19225     return PSHUFB;
19226 
19227   return lowerShuffleWithPERMV(DL, MVT::v32i16, Mask, V1, V2, Subtarget, DAG);
19228 }
19229 
19230 /// Handle lowering of 64-lane 8-bit integer shuffles.
lowerV64I8Shuffle(const SDLoc & DL,ArrayRef<int> Mask,const APInt & Zeroable,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)19231 static SDValue lowerV64I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
19232                                  const APInt &Zeroable, SDValue V1, SDValue V2,
19233                                  const X86Subtarget &Subtarget,
19234                                  SelectionDAG &DAG) {
19235   assert(V1.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
19236   assert(V2.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
19237   assert(Mask.size() == 64 && "Unexpected mask size for v64 shuffle!");
19238   assert(Subtarget.hasBWI() && "We can only lower v64i8 with AVX-512-BWI!");
19239 
19240   // Whenever we can lower this as a zext, that instruction is strictly faster
19241   // than any alternative. It also allows us to fold memory operands into the
19242   // shuffle in many cases.
19243   if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
19244           DL, MVT::v64i8, V1, V2, Mask, Zeroable, Subtarget, DAG))
19245     return ZExt;
19246 
19247   // Use dedicated unpack instructions for masks that match their pattern.
19248   if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v64i8, Mask, V1, V2, DAG))
19249     return V;
19250 
19251   // Use dedicated pack instructions for masks that match their pattern.
19252   if (SDValue V = lowerShuffleWithPACK(DL, MVT::v64i8, Mask, V1, V2, DAG,
19253                                        Subtarget))
19254     return V;
19255 
19256   // Try to use shift instructions.
19257   if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v64i8, V1, V2, Mask,
19258                                           Zeroable, Subtarget, DAG))
19259     return Shift;
19260 
19261   // Try to use byte rotation instructions.
19262   if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v64i8, V1, V2, Mask,
19263                                                 Subtarget, DAG))
19264     return Rotate;
19265 
19266   // Try to use bit rotation instructions.
19267   if (V2.isUndef())
19268     if (SDValue Rotate =
19269             lowerShuffleAsBitRotate(DL, MVT::v64i8, V1, Mask, Subtarget, DAG))
19270       return Rotate;
19271 
19272   // Lower as AND if possible.
19273   if (SDValue Masked = lowerShuffleAsBitMask(DL, MVT::v64i8, V1, V2, Mask,
19274                                              Zeroable, Subtarget, DAG))
19275     return Masked;
19276 
19277   if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v64i8, Mask, V1, V2,
19278                                               Zeroable, Subtarget, DAG))
19279     return PSHUFB;
19280 
19281   // Try to create an in-lane repeating shuffle mask and then shuffle the
19282   // results into the target lanes.
19283   if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
19284           DL, MVT::v64i8, V1, V2, Mask, Subtarget, DAG))
19285     return V;
19286 
19287   if (SDValue Result = lowerShuffleAsLanePermuteAndPermute(
19288           DL, MVT::v64i8, V1, V2, Mask, DAG, Subtarget))
19289     return Result;
19290 
19291   if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v64i8, V1, V2, Mask,
19292                                           Zeroable, Subtarget, DAG))
19293     return Blend;
19294 
19295   if (!is128BitLaneCrossingShuffleMask(MVT::v64i8, Mask)) {
19296     // Use PALIGNR+Permute if possible - permute might become PSHUFB but the
19297     // PALIGNR will be cheaper than the second PSHUFB+OR.
19298     if (SDValue V = lowerShuffleAsByteRotateAndPermute(DL, MVT::v64i8, V1, V2,
19299                                                        Mask, Subtarget, DAG))
19300       return V;
19301 
19302     // If we can't directly blend but can use PSHUFB, that will be better as it
19303     // can both shuffle and set up the inefficient blend.
19304     bool V1InUse, V2InUse;
19305     return lowerShuffleAsBlendOfPSHUFBs(DL, MVT::v64i8, V1, V2, Mask, Zeroable,
19306                                         DAG, V1InUse, V2InUse);
19307   }
19308 
19309   // Try to simplify this by merging 128-bit lanes to enable a lane-based
19310   // shuffle.
19311   if (!V2.isUndef())
19312     if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
19313             DL, MVT::v64i8, V1, V2, Mask, Subtarget, DAG))
19314       return Result;
19315 
19316   // VBMI can use VPERMV/VPERMV3 byte shuffles.
19317   if (Subtarget.hasVBMI())
19318     return lowerShuffleWithPERMV(DL, MVT::v64i8, Mask, V1, V2, Subtarget, DAG);
19319 
19320   return splitAndLowerShuffle(DL, MVT::v64i8, V1, V2, Mask, DAG);
19321 }
19322 
19323 /// High-level routine to lower various 512-bit x86 vector shuffles.
19324 ///
19325 /// This routine either breaks down the specific type of a 512-bit x86 vector
19326 /// shuffle or splits it into two 256-bit shuffles and fuses the results back
19327 /// together based on the available instructions.
lower512BitShuffle(const SDLoc & DL,ArrayRef<int> Mask,MVT VT,SDValue V1,SDValue V2,const APInt & Zeroable,const X86Subtarget & Subtarget,SelectionDAG & DAG)19328 static SDValue lower512BitShuffle(const SDLoc &DL, ArrayRef<int> Mask,
19329                                   MVT VT, SDValue V1, SDValue V2,
19330                                   const APInt &Zeroable,
19331                                   const X86Subtarget &Subtarget,
19332                                   SelectionDAG &DAG) {
19333   assert(Subtarget.hasAVX512() &&
19334          "Cannot lower 512-bit vectors w/ basic ISA!");
19335 
19336   // If we have a single input to the zero element, insert that into V1 if we
19337   // can do so cheaply.
19338   int NumElts = Mask.size();
19339   int NumV2Elements = count_if(Mask, [NumElts](int M) { return M >= NumElts; });
19340 
19341   if (NumV2Elements == 1 && Mask[0] >= NumElts)
19342     if (SDValue Insertion = lowerShuffleAsElementInsertion(
19343             DL, VT, V1, V2, Mask, Zeroable, Subtarget, DAG))
19344       return Insertion;
19345 
19346   // Handle special cases where the lower or upper half is UNDEF.
19347   if (SDValue V =
19348           lowerShuffleWithUndefHalf(DL, VT, V1, V2, Mask, Subtarget, DAG))
19349     return V;
19350 
19351   // Check for being able to broadcast a single element.
19352   if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, VT, V1, V2, Mask,
19353                                                   Subtarget, DAG))
19354     return Broadcast;
19355 
19356   if ((VT == MVT::v32i16 || VT == MVT::v64i8) && !Subtarget.hasBWI()) {
19357     // Try using bit ops for masking and blending before falling back to
19358     // splitting.
19359     if (SDValue V = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable,
19360                                           Subtarget, DAG))
19361       return V;
19362     if (SDValue V = lowerShuffleAsBitBlend(DL, VT, V1, V2, Mask, DAG))
19363       return V;
19364 
19365     return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG);
19366   }
19367 
19368   if (VT == MVT::v32f16) {
19369     V1 = DAG.getBitcast(MVT::v32i16, V1);
19370     V2 = DAG.getBitcast(MVT::v32i16, V2);
19371     return DAG.getBitcast(MVT::v32f16,
19372                           DAG.getVectorShuffle(MVT::v32i16, DL, V1, V2, Mask));
19373   }
19374 
19375   // Dispatch to each element type for lowering. If we don't have support for
19376   // specific element type shuffles at 512 bits, immediately split them and
19377   // lower them. Each lowering routine of a given type is allowed to assume that
19378   // the requisite ISA extensions for that element type are available.
19379   switch (VT.SimpleTy) {
19380   case MVT::v8f64:
19381     return lowerV8F64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
19382   case MVT::v16f32:
19383     return lowerV16F32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
19384   case MVT::v8i64:
19385     return lowerV8I64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
19386   case MVT::v16i32:
19387     return lowerV16I32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
19388   case MVT::v32i16:
19389     return lowerV32I16Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
19390   case MVT::v64i8:
19391     return lowerV64I8Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
19392 
19393   default:
19394     llvm_unreachable("Not a valid 512-bit x86 vector type!");
19395   }
19396 }
19397 
lower1BitShuffleAsKSHIFTR(const SDLoc & DL,ArrayRef<int> Mask,MVT VT,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)19398 static SDValue lower1BitShuffleAsKSHIFTR(const SDLoc &DL, ArrayRef<int> Mask,
19399                                          MVT VT, SDValue V1, SDValue V2,
19400                                          const X86Subtarget &Subtarget,
19401                                          SelectionDAG &DAG) {
19402   // Shuffle should be unary.
19403   if (!V2.isUndef())
19404     return SDValue();
19405 
19406   int ShiftAmt = -1;
19407   int NumElts = Mask.size();
19408   for (int i = 0; i != NumElts; ++i) {
19409     int M = Mask[i];
19410     assert((M == SM_SentinelUndef || (0 <= M && M < NumElts)) &&
19411            "Unexpected mask index.");
19412     if (M < 0)
19413       continue;
19414 
19415     // The first non-undef element determines our shift amount.
19416     if (ShiftAmt < 0) {
19417       ShiftAmt = M - i;
19418       // Need to be shifting right.
19419       if (ShiftAmt <= 0)
19420         return SDValue();
19421     }
19422     // All non-undef elements must shift by the same amount.
19423     if (ShiftAmt != M - i)
19424       return SDValue();
19425   }
19426   assert(ShiftAmt >= 0 && "All undef?");
19427 
19428   // Great we found a shift right.
19429   MVT WideVT = VT;
19430   if ((!Subtarget.hasDQI() && NumElts == 8) || NumElts < 8)
19431     WideVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
19432   SDValue Res = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, WideVT,
19433                             DAG.getUNDEF(WideVT), V1,
19434                             DAG.getIntPtrConstant(0, DL));
19435   Res = DAG.getNode(X86ISD::KSHIFTR, DL, WideVT, Res,
19436                     DAG.getTargetConstant(ShiftAmt, DL, MVT::i8));
19437   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
19438                      DAG.getIntPtrConstant(0, DL));
19439 }
19440 
19441 // Determine if this shuffle can be implemented with a KSHIFT instruction.
19442 // Returns the shift amount if possible or -1 if not. This is a simplified
19443 // version of matchShuffleAsShift.
match1BitShuffleAsKSHIFT(unsigned & Opcode,ArrayRef<int> Mask,int MaskOffset,const APInt & Zeroable)19444 static int match1BitShuffleAsKSHIFT(unsigned &Opcode, ArrayRef<int> Mask,
19445                                     int MaskOffset, const APInt &Zeroable) {
19446   int Size = Mask.size();
19447 
19448   auto CheckZeros = [&](int Shift, bool Left) {
19449     for (int j = 0; j < Shift; ++j)
19450       if (!Zeroable[j + (Left ? 0 : (Size - Shift))])
19451         return false;
19452 
19453     return true;
19454   };
19455 
19456   auto MatchShift = [&](int Shift, bool Left) {
19457     unsigned Pos = Left ? Shift : 0;
19458     unsigned Low = Left ? 0 : Shift;
19459     unsigned Len = Size - Shift;
19460     return isSequentialOrUndefInRange(Mask, Pos, Len, Low + MaskOffset);
19461   };
19462 
19463   for (int Shift = 1; Shift != Size; ++Shift)
19464     for (bool Left : {true, false})
19465       if (CheckZeros(Shift, Left) && MatchShift(Shift, Left)) {
19466         Opcode = Left ? X86ISD::KSHIFTL : X86ISD::KSHIFTR;
19467         return Shift;
19468       }
19469 
19470   return -1;
19471 }
19472 
19473 
19474 // Lower vXi1 vector shuffles.
19475 // There is no a dedicated instruction on AVX-512 that shuffles the masks.
19476 // The only way to shuffle bits is to sign-extend the mask vector to SIMD
19477 // vector, shuffle and then truncate it back.
lower1BitShuffle(const SDLoc & DL,ArrayRef<int> Mask,MVT VT,SDValue V1,SDValue V2,const APInt & Zeroable,const X86Subtarget & Subtarget,SelectionDAG & DAG)19478 static SDValue lower1BitShuffle(const SDLoc &DL, ArrayRef<int> Mask,
19479                                 MVT VT, SDValue V1, SDValue V2,
19480                                 const APInt &Zeroable,
19481                                 const X86Subtarget &Subtarget,
19482                                 SelectionDAG &DAG) {
19483   assert(Subtarget.hasAVX512() &&
19484          "Cannot lower 512-bit vectors w/o basic ISA!");
19485 
19486   int NumElts = Mask.size();
19487 
19488   // Try to recognize shuffles that are just padding a subvector with zeros.
19489   int SubvecElts = 0;
19490   int Src = -1;
19491   for (int i = 0; i != NumElts; ++i) {
19492     if (Mask[i] >= 0) {
19493       // Grab the source from the first valid mask. All subsequent elements need
19494       // to use this same source.
19495       if (Src < 0)
19496         Src = Mask[i] / NumElts;
19497       if (Src != (Mask[i] / NumElts) || (Mask[i] % NumElts) != i)
19498         break;
19499     }
19500 
19501     ++SubvecElts;
19502   }
19503   assert(SubvecElts != NumElts && "Identity shuffle?");
19504 
19505   // Clip to a power 2.
19506   SubvecElts = PowerOf2Floor(SubvecElts);
19507 
19508   // Make sure the number of zeroable bits in the top at least covers the bits
19509   // not covered by the subvector.
19510   if ((int)Zeroable.countLeadingOnes() >= (NumElts - SubvecElts)) {
19511     assert(Src >= 0 && "Expected a source!");
19512     MVT ExtractVT = MVT::getVectorVT(MVT::i1, SubvecElts);
19513     SDValue Extract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtractVT,
19514                                   Src == 0 ? V1 : V2,
19515                                   DAG.getIntPtrConstant(0, DL));
19516     return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
19517                        DAG.getConstant(0, DL, VT),
19518                        Extract, DAG.getIntPtrConstant(0, DL));
19519   }
19520 
19521   // Try a simple shift right with undef elements. Later we'll try with zeros.
19522   if (SDValue Shift = lower1BitShuffleAsKSHIFTR(DL, Mask, VT, V1, V2, Subtarget,
19523                                                 DAG))
19524     return Shift;
19525 
19526   // Try to match KSHIFTs.
19527   unsigned Offset = 0;
19528   for (SDValue V : { V1, V2 }) {
19529     unsigned Opcode;
19530     int ShiftAmt = match1BitShuffleAsKSHIFT(Opcode, Mask, Offset, Zeroable);
19531     if (ShiftAmt >= 0) {
19532       MVT WideVT = VT;
19533       if ((!Subtarget.hasDQI() && NumElts == 8) || NumElts < 8)
19534         WideVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
19535       SDValue Res = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, WideVT,
19536                                 DAG.getUNDEF(WideVT), V,
19537                                 DAG.getIntPtrConstant(0, DL));
19538       // Widened right shifts need two shifts to ensure we shift in zeroes.
19539       if (Opcode == X86ISD::KSHIFTR && WideVT != VT) {
19540         int WideElts = WideVT.getVectorNumElements();
19541         // Shift left to put the original vector in the MSBs of the new size.
19542         Res = DAG.getNode(X86ISD::KSHIFTL, DL, WideVT, Res,
19543                           DAG.getTargetConstant(WideElts - NumElts, DL, MVT::i8));
19544         // Increase the shift amount to account for the left shift.
19545         ShiftAmt += WideElts - NumElts;
19546       }
19547 
19548       Res = DAG.getNode(Opcode, DL, WideVT, Res,
19549                         DAG.getTargetConstant(ShiftAmt, DL, MVT::i8));
19550       return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
19551                          DAG.getIntPtrConstant(0, DL));
19552     }
19553     Offset += NumElts; // Increment for next iteration.
19554   }
19555 
19556   // If we're broadcasting a SETCC result, try to broadcast the ops instead.
19557   // TODO: What other unary shuffles would benefit from this?
19558   if (isBroadcastShuffleMask(Mask) && V1.getOpcode() == ISD::SETCC &&
19559       V1->hasOneUse()) {
19560     SDValue Op0 = V1.getOperand(0);
19561     SDValue Op1 = V1.getOperand(1);
19562     ISD::CondCode CC = cast<CondCodeSDNode>(V1.getOperand(2))->get();
19563     EVT OpVT = Op0.getValueType();
19564     return DAG.getSetCC(
19565         DL, VT, DAG.getVectorShuffle(OpVT, DL, Op0, DAG.getUNDEF(OpVT), Mask),
19566         DAG.getVectorShuffle(OpVT, DL, Op1, DAG.getUNDEF(OpVT), Mask), CC);
19567   }
19568 
19569   MVT ExtVT;
19570   switch (VT.SimpleTy) {
19571   default:
19572     llvm_unreachable("Expected a vector of i1 elements");
19573   case MVT::v2i1:
19574     ExtVT = MVT::v2i64;
19575     break;
19576   case MVT::v4i1:
19577     ExtVT = MVT::v4i32;
19578     break;
19579   case MVT::v8i1:
19580     // Take 512-bit type, more shuffles on KNL. If we have VLX use a 256-bit
19581     // shuffle.
19582     ExtVT = Subtarget.hasVLX() ? MVT::v8i32 : MVT::v8i64;
19583     break;
19584   case MVT::v16i1:
19585     // Take 512-bit type, unless we are avoiding 512-bit types and have the
19586     // 256-bit operation available.
19587     ExtVT = Subtarget.canExtendTo512DQ() ? MVT::v16i32 : MVT::v16i16;
19588     break;
19589   case MVT::v32i1:
19590     // Take 512-bit type, unless we are avoiding 512-bit types and have the
19591     // 256-bit operation available.
19592     assert(Subtarget.hasBWI() && "Expected AVX512BW support");
19593     ExtVT = Subtarget.canExtendTo512BW() ? MVT::v32i16 : MVT::v32i8;
19594     break;
19595   case MVT::v64i1:
19596     // Fall back to scalarization. FIXME: We can do better if the shuffle
19597     // can be partitioned cleanly.
19598     if (!Subtarget.useBWIRegs())
19599       return SDValue();
19600     ExtVT = MVT::v64i8;
19601     break;
19602   }
19603 
19604   V1 = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, V1);
19605   V2 = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, V2);
19606 
19607   SDValue Shuffle = DAG.getVectorShuffle(ExtVT, DL, V1, V2, Mask);
19608   // i1 was sign extended we can use X86ISD::CVT2MASK.
19609   int NumElems = VT.getVectorNumElements();
19610   if ((Subtarget.hasBWI() && (NumElems >= 32)) ||
19611       (Subtarget.hasDQI() && (NumElems < 32)))
19612     return DAG.getSetCC(DL, VT, DAG.getConstant(0, DL, ExtVT),
19613                        Shuffle, ISD::SETGT);
19614 
19615   return DAG.getNode(ISD::TRUNCATE, DL, VT, Shuffle);
19616 }
19617 
19618 /// Helper function that returns true if the shuffle mask should be
19619 /// commuted to improve canonicalization.
canonicalizeShuffleMaskWithCommute(ArrayRef<int> Mask)19620 static bool canonicalizeShuffleMaskWithCommute(ArrayRef<int> Mask) {
19621   int NumElements = Mask.size();
19622 
19623   int NumV1Elements = 0, NumV2Elements = 0;
19624   for (int M : Mask)
19625     if (M < 0)
19626       continue;
19627     else if (M < NumElements)
19628       ++NumV1Elements;
19629     else
19630       ++NumV2Elements;
19631 
19632   // Commute the shuffle as needed such that more elements come from V1 than
19633   // V2. This allows us to match the shuffle pattern strictly on how many
19634   // elements come from V1 without handling the symmetric cases.
19635   if (NumV2Elements > NumV1Elements)
19636     return true;
19637 
19638   assert(NumV1Elements > 0 && "No V1 indices");
19639 
19640   if (NumV2Elements == 0)
19641     return false;
19642 
19643   // When the number of V1 and V2 elements are the same, try to minimize the
19644   // number of uses of V2 in the low half of the vector. When that is tied,
19645   // ensure that the sum of indices for V1 is equal to or lower than the sum
19646   // indices for V2. When those are equal, try to ensure that the number of odd
19647   // indices for V1 is lower than the number of odd indices for V2.
19648   if (NumV1Elements == NumV2Elements) {
19649     int LowV1Elements = 0, LowV2Elements = 0;
19650     for (int M : Mask.slice(0, NumElements / 2))
19651       if (M >= NumElements)
19652         ++LowV2Elements;
19653       else if (M >= 0)
19654         ++LowV1Elements;
19655     if (LowV2Elements > LowV1Elements)
19656       return true;
19657     if (LowV2Elements == LowV1Elements) {
19658       int SumV1Indices = 0, SumV2Indices = 0;
19659       for (int i = 0, Size = Mask.size(); i < Size; ++i)
19660         if (Mask[i] >= NumElements)
19661           SumV2Indices += i;
19662         else if (Mask[i] >= 0)
19663           SumV1Indices += i;
19664       if (SumV2Indices < SumV1Indices)
19665         return true;
19666       if (SumV2Indices == SumV1Indices) {
19667         int NumV1OddIndices = 0, NumV2OddIndices = 0;
19668         for (int i = 0, Size = Mask.size(); i < Size; ++i)
19669           if (Mask[i] >= NumElements)
19670             NumV2OddIndices += i % 2;
19671           else if (Mask[i] >= 0)
19672             NumV1OddIndices += i % 2;
19673         if (NumV2OddIndices < NumV1OddIndices)
19674           return true;
19675       }
19676     }
19677   }
19678 
19679   return false;
19680 }
19681 
canCombineAsMaskOperation(SDValue V1,SDValue V2,const X86Subtarget & Subtarget)19682 static bool canCombineAsMaskOperation(SDValue V1, SDValue V2,
19683                                       const X86Subtarget &Subtarget) {
19684   if (!Subtarget.hasAVX512())
19685     return false;
19686 
19687   MVT VT = V1.getSimpleValueType().getScalarType();
19688   if ((VT == MVT::i16 || VT == MVT::i8) && !Subtarget.hasBWI())
19689     return false;
19690 
19691   // i8 is better to be widen to i16, because there is PBLENDW for vXi16
19692   // when the vector bit size is 128 or 256.
19693   if (VT == MVT::i8 && V1.getSimpleValueType().getSizeInBits() < 512)
19694     return false;
19695 
19696   auto HasMaskOperation = [&](SDValue V) {
19697     // TODO: Currently we only check limited opcode. We probably extend
19698     // it to all binary operation by checking TLI.isBinOp().
19699     switch (V->getOpcode()) {
19700     default:
19701       return false;
19702     case ISD::ADD:
19703     case ISD::SUB:
19704     case ISD::AND:
19705     case ISD::XOR:
19706       break;
19707     }
19708     if (!V->hasOneUse())
19709       return false;
19710 
19711     return true;
19712   };
19713 
19714   if (HasMaskOperation(V1) || HasMaskOperation(V2))
19715     return true;
19716 
19717   return false;
19718 }
19719 
19720 // Forward declaration.
19721 static SDValue canonicalizeShuffleMaskWithHorizOp(
19722     MutableArrayRef<SDValue> Ops, MutableArrayRef<int> Mask,
19723     unsigned RootSizeInBits, const SDLoc &DL, SelectionDAG &DAG,
19724     const X86Subtarget &Subtarget);
19725 
19726     /// Top-level lowering for x86 vector shuffles.
19727 ///
19728 /// This handles decomposition, canonicalization, and lowering of all x86
19729 /// vector shuffles. Most of the specific lowering strategies are encapsulated
19730 /// above in helper routines. The canonicalization attempts to widen shuffles
19731 /// to involve fewer lanes of wider elements, consolidate symmetric patterns
19732 /// s.t. only one of the two inputs needs to be tested, etc.
lowerVECTOR_SHUFFLE(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)19733 static SDValue lowerVECTOR_SHUFFLE(SDValue Op, const X86Subtarget &Subtarget,
19734                                    SelectionDAG &DAG) {
19735   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
19736   ArrayRef<int> OrigMask = SVOp->getMask();
19737   SDValue V1 = Op.getOperand(0);
19738   SDValue V2 = Op.getOperand(1);
19739   MVT VT = Op.getSimpleValueType();
19740   int NumElements = VT.getVectorNumElements();
19741   SDLoc DL(Op);
19742   bool Is1BitVector = (VT.getVectorElementType() == MVT::i1);
19743 
19744   assert((VT.getSizeInBits() != 64 || Is1BitVector) &&
19745          "Can't lower MMX shuffles");
19746 
19747   bool V1IsUndef = V1.isUndef();
19748   bool V2IsUndef = V2.isUndef();
19749   if (V1IsUndef && V2IsUndef)
19750     return DAG.getUNDEF(VT);
19751 
19752   // When we create a shuffle node we put the UNDEF node to second operand,
19753   // but in some cases the first operand may be transformed to UNDEF.
19754   // In this case we should just commute the node.
19755   if (V1IsUndef)
19756     return DAG.getCommutedVectorShuffle(*SVOp);
19757 
19758   // Check for non-undef masks pointing at an undef vector and make the masks
19759   // undef as well. This makes it easier to match the shuffle based solely on
19760   // the mask.
19761   if (V2IsUndef &&
19762       any_of(OrigMask, [NumElements](int M) { return M >= NumElements; })) {
19763     SmallVector<int, 8> NewMask(OrigMask);
19764     for (int &M : NewMask)
19765       if (M >= NumElements)
19766         M = -1;
19767     return DAG.getVectorShuffle(VT, DL, V1, V2, NewMask);
19768   }
19769 
19770   // Check for illegal shuffle mask element index values.
19771   int MaskUpperLimit = OrigMask.size() * (V2IsUndef ? 1 : 2);
19772   (void)MaskUpperLimit;
19773   assert(llvm::all_of(OrigMask,
19774                       [&](int M) { return -1 <= M && M < MaskUpperLimit; }) &&
19775          "Out of bounds shuffle index");
19776 
19777   // We actually see shuffles that are entirely re-arrangements of a set of
19778   // zero inputs. This mostly happens while decomposing complex shuffles into
19779   // simple ones. Directly lower these as a buildvector of zeros.
19780   APInt KnownUndef, KnownZero;
19781   computeZeroableShuffleElements(OrigMask, V1, V2, KnownUndef, KnownZero);
19782 
19783   APInt Zeroable = KnownUndef | KnownZero;
19784   if (Zeroable.isAllOnes())
19785     return getZeroVector(VT, Subtarget, DAG, DL);
19786 
19787   bool V2IsZero = !V2IsUndef && ISD::isBuildVectorAllZeros(V2.getNode());
19788 
19789   // Try to collapse shuffles into using a vector type with fewer elements but
19790   // wider element types. We cap this to not form integers or floating point
19791   // elements wider than 64 bits. It does not seem beneficial to form i128
19792   // integers to handle flipping the low and high halves of AVX 256-bit vectors.
19793   SmallVector<int, 16> WidenedMask;
19794   if (VT.getScalarSizeInBits() < 64 && !Is1BitVector &&
19795       !canCombineAsMaskOperation(V1, V2, Subtarget) &&
19796       canWidenShuffleElements(OrigMask, Zeroable, V2IsZero, WidenedMask)) {
19797     // Shuffle mask widening should not interfere with a broadcast opportunity
19798     // by obfuscating the operands with bitcasts.
19799     // TODO: Avoid lowering directly from this top-level function: make this
19800     // a query (canLowerAsBroadcast) and defer lowering to the type-based calls.
19801     if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, VT, V1, V2, OrigMask,
19802                                                     Subtarget, DAG))
19803       return Broadcast;
19804 
19805     MVT NewEltVT = VT.isFloatingPoint()
19806                        ? MVT::getFloatingPointVT(VT.getScalarSizeInBits() * 2)
19807                        : MVT::getIntegerVT(VT.getScalarSizeInBits() * 2);
19808     int NewNumElts = NumElements / 2;
19809     MVT NewVT = MVT::getVectorVT(NewEltVT, NewNumElts);
19810     // Make sure that the new vector type is legal. For example, v2f64 isn't
19811     // legal on SSE1.
19812     if (DAG.getTargetLoweringInfo().isTypeLegal(NewVT)) {
19813       if (V2IsZero) {
19814         // Modify the new Mask to take all zeros from the all-zero vector.
19815         // Choose indices that are blend-friendly.
19816         bool UsedZeroVector = false;
19817         assert(is_contained(WidenedMask, SM_SentinelZero) &&
19818                "V2's non-undef elements are used?!");
19819         for (int i = 0; i != NewNumElts; ++i)
19820           if (WidenedMask[i] == SM_SentinelZero) {
19821             WidenedMask[i] = i + NewNumElts;
19822             UsedZeroVector = true;
19823           }
19824         // Ensure all elements of V2 are zero - isBuildVectorAllZeros permits
19825         // some elements to be undef.
19826         if (UsedZeroVector)
19827           V2 = getZeroVector(NewVT, Subtarget, DAG, DL);
19828       }
19829       V1 = DAG.getBitcast(NewVT, V1);
19830       V2 = DAG.getBitcast(NewVT, V2);
19831       return DAG.getBitcast(
19832           VT, DAG.getVectorShuffle(NewVT, DL, V1, V2, WidenedMask));
19833     }
19834   }
19835 
19836   SmallVector<SDValue> Ops = {V1, V2};
19837   SmallVector<int> Mask(OrigMask);
19838 
19839   // Canonicalize the shuffle with any horizontal ops inputs.
19840   // NOTE: This may update Ops and Mask.
19841   if (SDValue HOp = canonicalizeShuffleMaskWithHorizOp(
19842           Ops, Mask, VT.getSizeInBits(), DL, DAG, Subtarget))
19843     return DAG.getBitcast(VT, HOp);
19844 
19845   V1 = DAG.getBitcast(VT, Ops[0]);
19846   V2 = DAG.getBitcast(VT, Ops[1]);
19847   assert(NumElements == (int)Mask.size() &&
19848          "canonicalizeShuffleMaskWithHorizOp "
19849          "shouldn't alter the shuffle mask size");
19850 
19851   // Commute the shuffle if it will improve canonicalization.
19852   if (canonicalizeShuffleMaskWithCommute(Mask)) {
19853     ShuffleVectorSDNode::commuteMask(Mask);
19854     std::swap(V1, V2);
19855   }
19856 
19857   // For each vector width, delegate to a specialized lowering routine.
19858   if (VT.is128BitVector())
19859     return lower128BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);
19860 
19861   if (VT.is256BitVector())
19862     return lower256BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);
19863 
19864   if (VT.is512BitVector())
19865     return lower512BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);
19866 
19867   if (Is1BitVector)
19868     return lower1BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);
19869 
19870   llvm_unreachable("Unimplemented!");
19871 }
19872 
19873 /// Try to lower a VSELECT instruction to a vector shuffle.
lowerVSELECTtoVectorShuffle(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)19874 static SDValue lowerVSELECTtoVectorShuffle(SDValue Op,
19875                                            const X86Subtarget &Subtarget,
19876                                            SelectionDAG &DAG) {
19877   SDValue Cond = Op.getOperand(0);
19878   SDValue LHS = Op.getOperand(1);
19879   SDValue RHS = Op.getOperand(2);
19880   MVT VT = Op.getSimpleValueType();
19881 
19882   // Only non-legal VSELECTs reach this lowering, convert those into generic
19883   // shuffles and re-use the shuffle lowering path for blends.
19884   if (ISD::isBuildVectorOfConstantSDNodes(Cond.getNode())) {
19885     SmallVector<int, 32> Mask;
19886     if (createShuffleMaskFromVSELECT(Mask, Cond))
19887       return DAG.getVectorShuffle(VT, SDLoc(Op), LHS, RHS, Mask);
19888   }
19889 
19890   return SDValue();
19891 }
19892 
LowerVSELECT(SDValue Op,SelectionDAG & DAG) const19893 SDValue X86TargetLowering::LowerVSELECT(SDValue Op, SelectionDAG &DAG) const {
19894   SDValue Cond = Op.getOperand(0);
19895   SDValue LHS = Op.getOperand(1);
19896   SDValue RHS = Op.getOperand(2);
19897 
19898   SDLoc dl(Op);
19899   MVT VT = Op.getSimpleValueType();
19900   if (isSoftFP16(VT)) {
19901     MVT NVT = VT.changeVectorElementTypeToInteger();
19902     return DAG.getBitcast(VT, DAG.getNode(ISD::VSELECT, dl, NVT, Cond,
19903                                           DAG.getBitcast(NVT, LHS),
19904                                           DAG.getBitcast(NVT, RHS)));
19905   }
19906 
19907   // A vselect where all conditions and data are constants can be optimized into
19908   // a single vector load by SelectionDAGLegalize::ExpandBUILD_VECTOR().
19909   if (ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()) &&
19910       ISD::isBuildVectorOfConstantSDNodes(LHS.getNode()) &&
19911       ISD::isBuildVectorOfConstantSDNodes(RHS.getNode()))
19912     return SDValue();
19913 
19914   // Try to lower this to a blend-style vector shuffle. This can handle all
19915   // constant condition cases.
19916   if (SDValue BlendOp = lowerVSELECTtoVectorShuffle(Op, Subtarget, DAG))
19917     return BlendOp;
19918 
19919   // If this VSELECT has a vector if i1 as a mask, it will be directly matched
19920   // with patterns on the mask registers on AVX-512.
19921   MVT CondVT = Cond.getSimpleValueType();
19922   unsigned CondEltSize = Cond.getScalarValueSizeInBits();
19923   if (CondEltSize == 1)
19924     return Op;
19925 
19926   // Variable blends are only legal from SSE4.1 onward.
19927   if (!Subtarget.hasSSE41())
19928     return SDValue();
19929 
19930   unsigned EltSize = VT.getScalarSizeInBits();
19931   unsigned NumElts = VT.getVectorNumElements();
19932 
19933   // Expand v32i16/v64i8 without BWI.
19934   if ((VT == MVT::v32i16 || VT == MVT::v64i8) && !Subtarget.hasBWI())
19935     return SDValue();
19936 
19937   // If the VSELECT is on a 512-bit type, we have to convert a non-i1 condition
19938   // into an i1 condition so that we can use the mask-based 512-bit blend
19939   // instructions.
19940   if (VT.getSizeInBits() == 512) {
19941     // Build a mask by testing the condition against zero.
19942     MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
19943     SDValue Mask = DAG.getSetCC(dl, MaskVT, Cond,
19944                                 DAG.getConstant(0, dl, CondVT),
19945                                 ISD::SETNE);
19946     // Now return a new VSELECT using the mask.
19947     return DAG.getSelect(dl, VT, Mask, LHS, RHS);
19948   }
19949 
19950   // SEXT/TRUNC cases where the mask doesn't match the destination size.
19951   if (CondEltSize != EltSize) {
19952     // If we don't have a sign splat, rely on the expansion.
19953     if (CondEltSize != DAG.ComputeNumSignBits(Cond))
19954       return SDValue();
19955 
19956     MVT NewCondSVT = MVT::getIntegerVT(EltSize);
19957     MVT NewCondVT = MVT::getVectorVT(NewCondSVT, NumElts);
19958     Cond = DAG.getSExtOrTrunc(Cond, dl, NewCondVT);
19959     return DAG.getNode(ISD::VSELECT, dl, VT, Cond, LHS, RHS);
19960   }
19961 
19962   // Only some types will be legal on some subtargets. If we can emit a legal
19963   // VSELECT-matching blend, return Op, and but if we need to expand, return
19964   // a null value.
19965   switch (VT.SimpleTy) {
19966   default:
19967     // Most of the vector types have blends past SSE4.1.
19968     return Op;
19969 
19970   case MVT::v32i8:
19971     // The byte blends for AVX vectors were introduced only in AVX2.
19972     if (Subtarget.hasAVX2())
19973       return Op;
19974 
19975     return SDValue();
19976 
19977   case MVT::v8i16:
19978   case MVT::v16i16: {
19979     // Bitcast everything to the vXi8 type and use a vXi8 vselect.
19980     MVT CastVT = MVT::getVectorVT(MVT::i8, NumElts * 2);
19981     Cond = DAG.getBitcast(CastVT, Cond);
19982     LHS = DAG.getBitcast(CastVT, LHS);
19983     RHS = DAG.getBitcast(CastVT, RHS);
19984     SDValue Select = DAG.getNode(ISD::VSELECT, dl, CastVT, Cond, LHS, RHS);
19985     return DAG.getBitcast(VT, Select);
19986   }
19987   }
19988 }
19989 
LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op,SelectionDAG & DAG)19990 static SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) {
19991   MVT VT = Op.getSimpleValueType();
19992   SDValue Vec = Op.getOperand(0);
19993   SDValue Idx = Op.getOperand(1);
19994   assert(isa<ConstantSDNode>(Idx) && "Constant index expected");
19995   SDLoc dl(Op);
19996 
19997   if (!Vec.getSimpleValueType().is128BitVector())
19998     return SDValue();
19999 
20000   if (VT.getSizeInBits() == 8) {
20001     // If IdxVal is 0, it's cheaper to do a move instead of a pextrb, unless
20002     // we're going to zero extend the register or fold the store.
20003     if (llvm::isNullConstant(Idx) && !X86::mayFoldIntoZeroExtend(Op) &&
20004         !X86::mayFoldIntoStore(Op))
20005       return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
20006                          DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
20007                                      DAG.getBitcast(MVT::v4i32, Vec), Idx));
20008 
20009     unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
20010     SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, MVT::i32, Vec,
20011                                   DAG.getTargetConstant(IdxVal, dl, MVT::i8));
20012     return DAG.getNode(ISD::TRUNCATE, dl, VT, Extract);
20013   }
20014 
20015   if (VT == MVT::f32) {
20016     // EXTRACTPS outputs to a GPR32 register which will require a movd to copy
20017     // the result back to FR32 register. It's only worth matching if the
20018     // result has a single use which is a store or a bitcast to i32.  And in
20019     // the case of a store, it's not worth it if the index is a constant 0,
20020     // because a MOVSSmr can be used instead, which is smaller and faster.
20021     if (!Op.hasOneUse())
20022       return SDValue();
20023     SDNode *User = *Op.getNode()->use_begin();
20024     if ((User->getOpcode() != ISD::STORE || isNullConstant(Idx)) &&
20025         (User->getOpcode() != ISD::BITCAST ||
20026          User->getValueType(0) != MVT::i32))
20027       return SDValue();
20028     SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
20029                                   DAG.getBitcast(MVT::v4i32, Vec), Idx);
20030     return DAG.getBitcast(MVT::f32, Extract);
20031   }
20032 
20033   if (VT == MVT::i32 || VT == MVT::i64)
20034       return Op;
20035 
20036   return SDValue();
20037 }
20038 
20039 /// Extract one bit from mask vector, like v16i1 or v8i1.
20040 /// AVX-512 feature.
ExtractBitFromMaskVector(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)20041 static SDValue ExtractBitFromMaskVector(SDValue Op, SelectionDAG &DAG,
20042                                         const X86Subtarget &Subtarget) {
20043   SDValue Vec = Op.getOperand(0);
20044   SDLoc dl(Vec);
20045   MVT VecVT = Vec.getSimpleValueType();
20046   SDValue Idx = Op.getOperand(1);
20047   auto* IdxC = dyn_cast<ConstantSDNode>(Idx);
20048   MVT EltVT = Op.getSimpleValueType();
20049 
20050   assert((VecVT.getVectorNumElements() <= 16 || Subtarget.hasBWI()) &&
20051          "Unexpected vector type in ExtractBitFromMaskVector");
20052 
20053   // variable index can't be handled in mask registers,
20054   // extend vector to VR512/128
20055   if (!IdxC) {
20056     unsigned NumElts = VecVT.getVectorNumElements();
20057     // Extending v8i1/v16i1 to 512-bit get better performance on KNL
20058     // than extending to 128/256bit.
20059     MVT ExtEltVT = (NumElts <= 8) ? MVT::getIntegerVT(128 / NumElts) : MVT::i8;
20060     MVT ExtVecVT = MVT::getVectorVT(ExtEltVT, NumElts);
20061     SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND, dl, ExtVecVT, Vec);
20062     SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ExtEltVT, Ext, Idx);
20063     return DAG.getNode(ISD::TRUNCATE, dl, EltVT, Elt);
20064   }
20065 
20066   unsigned IdxVal = IdxC->getZExtValue();
20067   if (IdxVal == 0) // the operation is legal
20068     return Op;
20069 
20070   // Extend to natively supported kshift.
20071   unsigned NumElems = VecVT.getVectorNumElements();
20072   MVT WideVecVT = VecVT;
20073   if ((!Subtarget.hasDQI() && NumElems == 8) || NumElems < 8) {
20074     WideVecVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
20075     Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideVecVT,
20076                       DAG.getUNDEF(WideVecVT), Vec,
20077                       DAG.getIntPtrConstant(0, dl));
20078   }
20079 
20080   // Use kshiftr instruction to move to the lower element.
20081   Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideVecVT, Vec,
20082                     DAG.getTargetConstant(IdxVal, dl, MVT::i8));
20083 
20084   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec,
20085                      DAG.getIntPtrConstant(0, dl));
20086 }
20087 
20088 SDValue
LowerEXTRACT_VECTOR_ELT(SDValue Op,SelectionDAG & DAG) const20089 X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
20090                                            SelectionDAG &DAG) const {
20091   SDLoc dl(Op);
20092   SDValue Vec = Op.getOperand(0);
20093   MVT VecVT = Vec.getSimpleValueType();
20094   SDValue Idx = Op.getOperand(1);
20095   auto* IdxC = dyn_cast<ConstantSDNode>(Idx);
20096 
20097   if (VecVT.getVectorElementType() == MVT::i1)
20098     return ExtractBitFromMaskVector(Op, DAG, Subtarget);
20099 
20100   if (!IdxC) {
20101     // Its more profitable to go through memory (1 cycles throughput)
20102     // than using VMOVD + VPERMV/PSHUFB sequence ( 2/3 cycles throughput)
20103     // IACA tool was used to get performance estimation
20104     // (https://software.intel.com/en-us/articles/intel-architecture-code-analyzer)
20105     //
20106     // example : extractelement <16 x i8> %a, i32 %i
20107     //
20108     // Block Throughput: 3.00 Cycles
20109     // Throughput Bottleneck: Port5
20110     //
20111     // | Num Of |   Ports pressure in cycles  |    |
20112     // |  Uops  |  0  - DV  |  5  |  6  |  7  |    |
20113     // ---------------------------------------------
20114     // |   1    |           | 1.0 |     |     | CP | vmovd xmm1, edi
20115     // |   1    |           | 1.0 |     |     | CP | vpshufb xmm0, xmm0, xmm1
20116     // |   2    | 1.0       | 1.0 |     |     | CP | vpextrb eax, xmm0, 0x0
20117     // Total Num Of Uops: 4
20118     //
20119     //
20120     // Block Throughput: 1.00 Cycles
20121     // Throughput Bottleneck: PORT2_AGU, PORT3_AGU, Port4
20122     //
20123     // |    |  Ports pressure in cycles   |  |
20124     // |Uops| 1 | 2 - D  |3 -  D  | 4 | 5 |  |
20125     // ---------------------------------------------------------
20126     // |2^  |   | 0.5    | 0.5    |1.0|   |CP| vmovaps xmmword ptr [rsp-0x18], xmm0
20127     // |1   |0.5|        |        |   |0.5|  | lea rax, ptr [rsp-0x18]
20128     // |1   |   |0.5, 0.5|0.5, 0.5|   |   |CP| mov al, byte ptr [rdi+rax*1]
20129     // Total Num Of Uops: 4
20130 
20131     return SDValue();
20132   }
20133 
20134   unsigned IdxVal = IdxC->getZExtValue();
20135 
20136   // If this is a 256-bit vector result, first extract the 128-bit vector and
20137   // then extract the element from the 128-bit vector.
20138   if (VecVT.is256BitVector() || VecVT.is512BitVector()) {
20139     // Get the 128-bit vector.
20140     Vec = extract128BitVector(Vec, IdxVal, DAG, dl);
20141     MVT EltVT = VecVT.getVectorElementType();
20142 
20143     unsigned ElemsPerChunk = 128 / EltVT.getSizeInBits();
20144     assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
20145 
20146     // Find IdxVal modulo ElemsPerChunk. Since ElemsPerChunk is a power of 2
20147     // this can be done with a mask.
20148     IdxVal &= ElemsPerChunk - 1;
20149     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec,
20150                        DAG.getIntPtrConstant(IdxVal, dl));
20151   }
20152 
20153   assert(VecVT.is128BitVector() && "Unexpected vector length");
20154 
20155   MVT VT = Op.getSimpleValueType();
20156 
20157   if (VT == MVT::i16) {
20158     // If IdxVal is 0, it's cheaper to do a move instead of a pextrw, unless
20159     // we're going to zero extend the register or fold the store (SSE41 only).
20160     if (IdxVal == 0 && !X86::mayFoldIntoZeroExtend(Op) &&
20161         !(Subtarget.hasSSE41() && X86::mayFoldIntoStore(Op))) {
20162       if (Subtarget.hasFP16())
20163         return Op;
20164 
20165       return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
20166                          DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
20167                                      DAG.getBitcast(MVT::v4i32, Vec), Idx));
20168     }
20169 
20170     SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, MVT::i32, Vec,
20171                                   DAG.getTargetConstant(IdxVal, dl, MVT::i8));
20172     return DAG.getNode(ISD::TRUNCATE, dl, VT, Extract);
20173   }
20174 
20175   if (Subtarget.hasSSE41())
20176     if (SDValue Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG))
20177       return Res;
20178 
20179   // TODO: We only extract a single element from v16i8, we can probably afford
20180   // to be more aggressive here before using the default approach of spilling to
20181   // stack.
20182   if (VT.getSizeInBits() == 8 && Op->isOnlyUserOf(Vec.getNode())) {
20183     // Extract either the lowest i32 or any i16, and extract the sub-byte.
20184     int DWordIdx = IdxVal / 4;
20185     if (DWordIdx == 0) {
20186       SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
20187                                 DAG.getBitcast(MVT::v4i32, Vec),
20188                                 DAG.getIntPtrConstant(DWordIdx, dl));
20189       int ShiftVal = (IdxVal % 4) * 8;
20190       if (ShiftVal != 0)
20191         Res = DAG.getNode(ISD::SRL, dl, MVT::i32, Res,
20192                           DAG.getConstant(ShiftVal, dl, MVT::i8));
20193       return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
20194     }
20195 
20196     int WordIdx = IdxVal / 2;
20197     SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16,
20198                               DAG.getBitcast(MVT::v8i16, Vec),
20199                               DAG.getIntPtrConstant(WordIdx, dl));
20200     int ShiftVal = (IdxVal % 2) * 8;
20201     if (ShiftVal != 0)
20202       Res = DAG.getNode(ISD::SRL, dl, MVT::i16, Res,
20203                         DAG.getConstant(ShiftVal, dl, MVT::i8));
20204     return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
20205   }
20206 
20207   if (VT == MVT::f16 || VT.getSizeInBits() == 32) {
20208     if (IdxVal == 0)
20209       return Op;
20210 
20211     // Shuffle the element to the lowest element, then movss or movsh.
20212     SmallVector<int, 8> Mask(VecVT.getVectorNumElements(), -1);
20213     Mask[0] = static_cast<int>(IdxVal);
20214     Vec = DAG.getVectorShuffle(VecVT, dl, Vec, DAG.getUNDEF(VecVT), Mask);
20215     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
20216                        DAG.getIntPtrConstant(0, dl));
20217   }
20218 
20219   if (VT.getSizeInBits() == 64) {
20220     // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b
20221     // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught
20222     //        to match extract_elt for f64.
20223     if (IdxVal == 0)
20224       return Op;
20225 
20226     // UNPCKHPD the element to the lowest double word, then movsd.
20227     // Note if the lower 64 bits of the result of the UNPCKHPD is then stored
20228     // to a f64mem, the whole operation is folded into a single MOVHPDmr.
20229     int Mask[2] = { 1, -1 };
20230     Vec = DAG.getVectorShuffle(VecVT, dl, Vec, DAG.getUNDEF(VecVT), Mask);
20231     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
20232                        DAG.getIntPtrConstant(0, dl));
20233   }
20234 
20235   return SDValue();
20236 }
20237 
20238 /// Insert one bit to mask vector, like v16i1 or v8i1.
20239 /// AVX-512 feature.
InsertBitToMaskVector(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)20240 static SDValue InsertBitToMaskVector(SDValue Op, SelectionDAG &DAG,
20241                                      const X86Subtarget &Subtarget) {
20242   SDLoc dl(Op);
20243   SDValue Vec = Op.getOperand(0);
20244   SDValue Elt = Op.getOperand(1);
20245   SDValue Idx = Op.getOperand(2);
20246   MVT VecVT = Vec.getSimpleValueType();
20247 
20248   if (!isa<ConstantSDNode>(Idx)) {
20249     // Non constant index. Extend source and destination,
20250     // insert element and then truncate the result.
20251     unsigned NumElts = VecVT.getVectorNumElements();
20252     MVT ExtEltVT = (NumElts <= 8) ? MVT::getIntegerVT(128 / NumElts) : MVT::i8;
20253     MVT ExtVecVT = MVT::getVectorVT(ExtEltVT, NumElts);
20254     SDValue ExtOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ExtVecVT,
20255       DAG.getNode(ISD::SIGN_EXTEND, dl, ExtVecVT, Vec),
20256       DAG.getNode(ISD::SIGN_EXTEND, dl, ExtEltVT, Elt), Idx);
20257     return DAG.getNode(ISD::TRUNCATE, dl, VecVT, ExtOp);
20258   }
20259 
20260   // Copy into a k-register, extract to v1i1 and insert_subvector.
20261   SDValue EltInVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i1, Elt);
20262   return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, VecVT, Vec, EltInVec, Idx);
20263 }
20264 
LowerINSERT_VECTOR_ELT(SDValue Op,SelectionDAG & DAG) const20265 SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
20266                                                   SelectionDAG &DAG) const {
20267   MVT VT = Op.getSimpleValueType();
20268   MVT EltVT = VT.getVectorElementType();
20269   unsigned NumElts = VT.getVectorNumElements();
20270   unsigned EltSizeInBits = EltVT.getScalarSizeInBits();
20271 
20272   if (EltVT == MVT::i1)
20273     return InsertBitToMaskVector(Op, DAG, Subtarget);
20274 
20275   SDLoc dl(Op);
20276   SDValue N0 = Op.getOperand(0);
20277   SDValue N1 = Op.getOperand(1);
20278   SDValue N2 = Op.getOperand(2);
20279   auto *N2C = dyn_cast<ConstantSDNode>(N2);
20280 
20281   if (!N2C) {
20282     // Variable insertion indices, usually we're better off spilling to stack,
20283     // but AVX512 can use a variable compare+select by comparing against all
20284     // possible vector indices, and FP insertion has less gpr->simd traffic.
20285     if (!(Subtarget.hasBWI() ||
20286           (Subtarget.hasAVX512() && EltSizeInBits >= 32) ||
20287           (Subtarget.hasSSE41() && (EltVT == MVT::f32 || EltVT == MVT::f64))))
20288       return SDValue();
20289 
20290     MVT IdxSVT = MVT::getIntegerVT(EltSizeInBits);
20291     MVT IdxVT = MVT::getVectorVT(IdxSVT, NumElts);
20292     if (!isTypeLegal(IdxSVT) || !isTypeLegal(IdxVT))
20293       return SDValue();
20294 
20295     SDValue IdxExt = DAG.getZExtOrTrunc(N2, dl, IdxSVT);
20296     SDValue IdxSplat = DAG.getSplatBuildVector(IdxVT, dl, IdxExt);
20297     SDValue EltSplat = DAG.getSplatBuildVector(VT, dl, N1);
20298 
20299     SmallVector<SDValue, 16> RawIndices;
20300     for (unsigned I = 0; I != NumElts; ++I)
20301       RawIndices.push_back(DAG.getConstant(I, dl, IdxSVT));
20302     SDValue Indices = DAG.getBuildVector(IdxVT, dl, RawIndices);
20303 
20304     // inselt N0, N1, N2 --> select (SplatN2 == {0,1,2...}) ? SplatN1 : N0.
20305     return DAG.getSelectCC(dl, IdxSplat, Indices, EltSplat, N0,
20306                            ISD::CondCode::SETEQ);
20307   }
20308 
20309   if (N2C->getAPIntValue().uge(NumElts))
20310     return SDValue();
20311   uint64_t IdxVal = N2C->getZExtValue();
20312 
20313   bool IsZeroElt = X86::isZeroNode(N1);
20314   bool IsAllOnesElt = VT.isInteger() && llvm::isAllOnesConstant(N1);
20315 
20316   if (IsZeroElt || IsAllOnesElt) {
20317     // Lower insertion of v16i8/v32i8/v64i16 -1 elts as an 'OR' blend.
20318     // We don't deal with i8 0 since it appears to be handled elsewhere.
20319     if (IsAllOnesElt &&
20320         ((VT == MVT::v16i8 && !Subtarget.hasSSE41()) ||
20321          ((VT == MVT::v32i8 || VT == MVT::v16i16) && !Subtarget.hasInt256()))) {
20322       SDValue ZeroCst = DAG.getConstant(0, dl, VT.getScalarType());
20323       SDValue OnesCst = DAG.getAllOnesConstant(dl, VT.getScalarType());
20324       SmallVector<SDValue, 8> CstVectorElts(NumElts, ZeroCst);
20325       CstVectorElts[IdxVal] = OnesCst;
20326       SDValue CstVector = DAG.getBuildVector(VT, dl, CstVectorElts);
20327       return DAG.getNode(ISD::OR, dl, VT, N0, CstVector);
20328     }
20329     // See if we can do this more efficiently with a blend shuffle with a
20330     // rematerializable vector.
20331     if (Subtarget.hasSSE41() &&
20332         (EltSizeInBits >= 16 || (IsZeroElt && !VT.is128BitVector()))) {
20333       SmallVector<int, 8> BlendMask;
20334       for (unsigned i = 0; i != NumElts; ++i)
20335         BlendMask.push_back(i == IdxVal ? i + NumElts : i);
20336       SDValue CstVector = IsZeroElt ? getZeroVector(VT, Subtarget, DAG, dl)
20337                                     : getOnesVector(VT, DAG, dl);
20338       return DAG.getVectorShuffle(VT, dl, N0, CstVector, BlendMask);
20339     }
20340   }
20341 
20342   // If the vector is wider than 128 bits, extract the 128-bit subvector, insert
20343   // into that, and then insert the subvector back into the result.
20344   if (VT.is256BitVector() || VT.is512BitVector()) {
20345     // With a 256-bit vector, we can insert into the zero element efficiently
20346     // using a blend if we have AVX or AVX2 and the right data type.
20347     if (VT.is256BitVector() && IdxVal == 0) {
20348       // TODO: It is worthwhile to cast integer to floating point and back
20349       // and incur a domain crossing penalty if that's what we'll end up
20350       // doing anyway after extracting to a 128-bit vector.
20351       if ((Subtarget.hasAVX() && (EltVT == MVT::f64 || EltVT == MVT::f32)) ||
20352           (Subtarget.hasAVX2() && (EltVT == MVT::i32 || EltVT == MVT::i64))) {
20353         SDValue N1Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, N1);
20354         return DAG.getNode(X86ISD::BLENDI, dl, VT, N0, N1Vec,
20355                            DAG.getTargetConstant(1, dl, MVT::i8));
20356       }
20357     }
20358 
20359     unsigned NumEltsIn128 = 128 / EltSizeInBits;
20360     assert(isPowerOf2_32(NumEltsIn128) &&
20361            "Vectors will always have power-of-two number of elements.");
20362 
20363     // If we are not inserting into the low 128-bit vector chunk,
20364     // then prefer the broadcast+blend sequence.
20365     // FIXME: relax the profitability check iff all N1 uses are insertions.
20366     if (IdxVal >= NumEltsIn128 &&
20367         ((Subtarget.hasAVX2() && EltSizeInBits != 8) ||
20368          (Subtarget.hasAVX() && (EltSizeInBits >= 32) &&
20369           X86::mayFoldLoad(N1, Subtarget)))) {
20370       SDValue N1SplatVec = DAG.getSplatBuildVector(VT, dl, N1);
20371       SmallVector<int, 8> BlendMask;
20372       for (unsigned i = 0; i != NumElts; ++i)
20373         BlendMask.push_back(i == IdxVal ? i + NumElts : i);
20374       return DAG.getVectorShuffle(VT, dl, N0, N1SplatVec, BlendMask);
20375     }
20376 
20377     // Get the desired 128-bit vector chunk.
20378     SDValue V = extract128BitVector(N0, IdxVal, DAG, dl);
20379 
20380     // Insert the element into the desired chunk.
20381     // Since NumEltsIn128 is a power of 2 we can use mask instead of modulo.
20382     unsigned IdxIn128 = IdxVal & (NumEltsIn128 - 1);
20383 
20384     V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, V.getValueType(), V, N1,
20385                     DAG.getIntPtrConstant(IdxIn128, dl));
20386 
20387     // Insert the changed part back into the bigger vector
20388     return insert128BitVector(N0, V, IdxVal, DAG, dl);
20389   }
20390   assert(VT.is128BitVector() && "Only 128-bit vector types should be left!");
20391 
20392   // This will be just movw/movd/movq/movsh/movss/movsd.
20393   if (IdxVal == 0 && ISD::isBuildVectorAllZeros(N0.getNode())) {
20394     if (EltVT == MVT::i32 || EltVT == MVT::f32 || EltVT == MVT::f64 ||
20395         EltVT == MVT::f16 || EltVT == MVT::i64) {
20396       N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, N1);
20397       return getShuffleVectorZeroOrUndef(N1, 0, true, Subtarget, DAG);
20398     }
20399 
20400     // We can't directly insert an i8 or i16 into a vector, so zero extend
20401     // it to i32 first.
20402     if (EltVT == MVT::i16 || EltVT == MVT::i8) {
20403       N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, N1);
20404       MVT ShufVT = MVT::getVectorVT(MVT::i32, VT.getSizeInBits() / 32);
20405       N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, ShufVT, N1);
20406       N1 = getShuffleVectorZeroOrUndef(N1, 0, true, Subtarget, DAG);
20407       return DAG.getBitcast(VT, N1);
20408     }
20409   }
20410 
20411   // Transform it so it match pinsr{b,w} which expects a GR32 as its second
20412   // argument. SSE41 required for pinsrb.
20413   if (VT == MVT::v8i16 || (VT == MVT::v16i8 && Subtarget.hasSSE41())) {
20414     unsigned Opc;
20415     if (VT == MVT::v8i16) {
20416       assert(Subtarget.hasSSE2() && "SSE2 required for PINSRW");
20417       Opc = X86ISD::PINSRW;
20418     } else {
20419       assert(VT == MVT::v16i8 && "PINSRB requires v16i8 vector");
20420       assert(Subtarget.hasSSE41() && "SSE41 required for PINSRB");
20421       Opc = X86ISD::PINSRB;
20422     }
20423 
20424     assert(N1.getValueType() != MVT::i32 && "Unexpected VT");
20425     N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
20426     N2 = DAG.getTargetConstant(IdxVal, dl, MVT::i8);
20427     return DAG.getNode(Opc, dl, VT, N0, N1, N2);
20428   }
20429 
20430   if (Subtarget.hasSSE41()) {
20431     if (EltVT == MVT::f32) {
20432       // Bits [7:6] of the constant are the source select. This will always be
20433       //   zero here. The DAG Combiner may combine an extract_elt index into
20434       //   these bits. For example (insert (extract, 3), 2) could be matched by
20435       //   putting the '3' into bits [7:6] of X86ISD::INSERTPS.
20436       // Bits [5:4] of the constant are the destination select. This is the
20437       //   value of the incoming immediate.
20438       // Bits [3:0] of the constant are the zero mask. The DAG Combiner may
20439       //   combine either bitwise AND or insert of float 0.0 to set these bits.
20440 
20441       bool MinSize = DAG.getMachineFunction().getFunction().hasMinSize();
20442       if (IdxVal == 0 && (!MinSize || !X86::mayFoldLoad(N1, Subtarget))) {
20443         // If this is an insertion of 32-bits into the low 32-bits of
20444         // a vector, we prefer to generate a blend with immediate rather
20445         // than an insertps. Blends are simpler operations in hardware and so
20446         // will always have equal or better performance than insertps.
20447         // But if optimizing for size and there's a load folding opportunity,
20448         // generate insertps because blendps does not have a 32-bit memory
20449         // operand form.
20450         N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
20451         return DAG.getNode(X86ISD::BLENDI, dl, VT, N0, N1,
20452                            DAG.getTargetConstant(1, dl, MVT::i8));
20453       }
20454       // Create this as a scalar to vector..
20455       N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
20456       return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1,
20457                          DAG.getTargetConstant(IdxVal << 4, dl, MVT::i8));
20458     }
20459 
20460     // PINSR* works with constant index.
20461     if (EltVT == MVT::i32 || EltVT == MVT::i64)
20462       return Op;
20463   }
20464 
20465   return SDValue();
20466 }
20467 
LowerSCALAR_TO_VECTOR(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)20468 static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, const X86Subtarget &Subtarget,
20469                                      SelectionDAG &DAG) {
20470   SDLoc dl(Op);
20471   MVT OpVT = Op.getSimpleValueType();
20472 
20473   // It's always cheaper to replace a xor+movd with xorps and simplifies further
20474   // combines.
20475   if (X86::isZeroNode(Op.getOperand(0)))
20476     return getZeroVector(OpVT, Subtarget, DAG, dl);
20477 
20478   // If this is a 256-bit vector result, first insert into a 128-bit
20479   // vector and then insert into the 256-bit vector.
20480   if (!OpVT.is128BitVector()) {
20481     // Insert into a 128-bit vector.
20482     unsigned SizeFactor = OpVT.getSizeInBits() / 128;
20483     MVT VT128 = MVT::getVectorVT(OpVT.getVectorElementType(),
20484                                  OpVT.getVectorNumElements() / SizeFactor);
20485 
20486     Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT128, Op.getOperand(0));
20487 
20488     // Insert the 128-bit vector.
20489     return insert128BitVector(DAG.getUNDEF(OpVT), Op, 0, DAG, dl);
20490   }
20491   assert(OpVT.is128BitVector() && OpVT.isInteger() && OpVT != MVT::v2i64 &&
20492          "Expected an SSE type!");
20493 
20494   // Pass through a v4i32 or V8i16 SCALAR_TO_VECTOR as that's what we use in
20495   // tblgen.
20496   if (OpVT == MVT::v4i32 || (OpVT == MVT::v8i16 && Subtarget.hasFP16()))
20497     return Op;
20498 
20499   SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0));
20500   return DAG.getBitcast(
20501       OpVT, DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, AnyExt));
20502 }
20503 
20504 // Lower a node with an INSERT_SUBVECTOR opcode.  This may result in a
20505 // simple superregister reference or explicit instructions to insert
20506 // the upper bits of a vector.
LowerINSERT_SUBVECTOR(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)20507 static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget &Subtarget,
20508                                      SelectionDAG &DAG) {
20509   assert(Op.getSimpleValueType().getVectorElementType() == MVT::i1);
20510 
20511   return insert1BitVector(Op, DAG, Subtarget);
20512 }
20513 
LowerEXTRACT_SUBVECTOR(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)20514 static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget &Subtarget,
20515                                       SelectionDAG &DAG) {
20516   assert(Op.getSimpleValueType().getVectorElementType() == MVT::i1 &&
20517          "Only vXi1 extract_subvectors need custom lowering");
20518 
20519   SDLoc dl(Op);
20520   SDValue Vec = Op.getOperand(0);
20521   uint64_t IdxVal = Op.getConstantOperandVal(1);
20522 
20523   if (IdxVal == 0) // the operation is legal
20524     return Op;
20525 
20526   MVT VecVT = Vec.getSimpleValueType();
20527   unsigned NumElems = VecVT.getVectorNumElements();
20528 
20529   // Extend to natively supported kshift.
20530   MVT WideVecVT = VecVT;
20531   if ((!Subtarget.hasDQI() && NumElems == 8) || NumElems < 8) {
20532     WideVecVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
20533     Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideVecVT,
20534                       DAG.getUNDEF(WideVecVT), Vec,
20535                       DAG.getIntPtrConstant(0, dl));
20536   }
20537 
20538   // Shift to the LSB.
20539   Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideVecVT, Vec,
20540                     DAG.getTargetConstant(IdxVal, dl, MVT::i8));
20541 
20542   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, Op.getValueType(), Vec,
20543                      DAG.getIntPtrConstant(0, dl));
20544 }
20545 
20546 // Returns the appropriate wrapper opcode for a global reference.
getGlobalWrapperKind(const GlobalValue * GV,const unsigned char OpFlags) const20547 unsigned X86TargetLowering::getGlobalWrapperKind(
20548     const GlobalValue *GV, const unsigned char OpFlags) const {
20549   // References to absolute symbols are never PC-relative.
20550   if (GV && GV->isAbsoluteSymbolRef())
20551     return X86ISD::Wrapper;
20552 
20553   CodeModel::Model M = getTargetMachine().getCodeModel();
20554   if (Subtarget.isPICStyleRIPRel() &&
20555       (M == CodeModel::Small || M == CodeModel::Kernel))
20556     return X86ISD::WrapperRIP;
20557 
20558   // In the medium model, functions can always be referenced RIP-relatively,
20559   // since they must be within 2GiB. This is also possible in non-PIC mode, and
20560   // shorter than the 64-bit absolute immediate that would otherwise be emitted.
20561   if (M == CodeModel::Medium && isa_and_nonnull<Function>(GV))
20562     return X86ISD::WrapperRIP;
20563 
20564   // GOTPCREL references must always use RIP.
20565   if (OpFlags == X86II::MO_GOTPCREL || OpFlags == X86II::MO_GOTPCREL_NORELAX)
20566     return X86ISD::WrapperRIP;
20567 
20568   return X86ISD::Wrapper;
20569 }
20570 
20571 // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
20572 // their target counterpart wrapped in the X86ISD::Wrapper node. Suppose N is
20573 // one of the above mentioned nodes. It has to be wrapped because otherwise
20574 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
20575 // be used to form addressing mode. These wrapped nodes will be selected
20576 // into MOV32ri.
20577 SDValue
LowerConstantPool(SDValue Op,SelectionDAG & DAG) const20578 X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
20579   ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
20580 
20581   // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
20582   // global base reg.
20583   unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr);
20584 
20585   auto PtrVT = getPointerTy(DAG.getDataLayout());
20586   SDValue Result = DAG.getTargetConstantPool(
20587       CP->getConstVal(), PtrVT, CP->getAlign(), CP->getOffset(), OpFlag);
20588   SDLoc DL(CP);
20589   Result = DAG.getNode(getGlobalWrapperKind(), DL, PtrVT, Result);
20590   // With PIC, the address is actually $g + Offset.
20591   if (OpFlag) {
20592     Result =
20593         DAG.getNode(ISD::ADD, DL, PtrVT,
20594                     DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), Result);
20595   }
20596 
20597   return Result;
20598 }
20599 
LowerJumpTable(SDValue Op,SelectionDAG & DAG) const20600 SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
20601   JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
20602 
20603   // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
20604   // global base reg.
20605   unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr);
20606 
20607   auto PtrVT = getPointerTy(DAG.getDataLayout());
20608   SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, OpFlag);
20609   SDLoc DL(JT);
20610   Result = DAG.getNode(getGlobalWrapperKind(), DL, PtrVT, Result);
20611 
20612   // With PIC, the address is actually $g + Offset.
20613   if (OpFlag)
20614     Result =
20615         DAG.getNode(ISD::ADD, DL, PtrVT,
20616                     DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), Result);
20617 
20618   return Result;
20619 }
20620 
LowerExternalSymbol(SDValue Op,SelectionDAG & DAG) const20621 SDValue X86TargetLowering::LowerExternalSymbol(SDValue Op,
20622                                                SelectionDAG &DAG) const {
20623   return LowerGlobalOrExternal(Op, DAG, /*ForCall=*/false);
20624 }
20625 
20626 SDValue
LowerBlockAddress(SDValue Op,SelectionDAG & DAG) const20627 X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
20628   // Create the TargetBlockAddressAddress node.
20629   unsigned char OpFlags =
20630     Subtarget.classifyBlockAddressReference();
20631   const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
20632   int64_t Offset = cast<BlockAddressSDNode>(Op)->getOffset();
20633   SDLoc dl(Op);
20634   auto PtrVT = getPointerTy(DAG.getDataLayout());
20635   SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset, OpFlags);
20636   Result = DAG.getNode(getGlobalWrapperKind(), dl, PtrVT, Result);
20637 
20638   // With PIC, the address is actually $g + Offset.
20639   if (isGlobalRelativeToPICBase(OpFlags)) {
20640     Result = DAG.getNode(ISD::ADD, dl, PtrVT,
20641                          DAG.getNode(X86ISD::GlobalBaseReg, dl, PtrVT), Result);
20642   }
20643 
20644   return Result;
20645 }
20646 
20647 /// Creates target global address or external symbol nodes for calls or
20648 /// other uses.
LowerGlobalOrExternal(SDValue Op,SelectionDAG & DAG,bool ForCall) const20649 SDValue X86TargetLowering::LowerGlobalOrExternal(SDValue Op, SelectionDAG &DAG,
20650                                                  bool ForCall) const {
20651   // Unpack the global address or external symbol.
20652   const SDLoc &dl = SDLoc(Op);
20653   const GlobalValue *GV = nullptr;
20654   int64_t Offset = 0;
20655   const char *ExternalSym = nullptr;
20656   if (const auto *G = dyn_cast<GlobalAddressSDNode>(Op)) {
20657     GV = G->getGlobal();
20658     Offset = G->getOffset();
20659   } else {
20660     const auto *ES = cast<ExternalSymbolSDNode>(Op);
20661     ExternalSym = ES->getSymbol();
20662   }
20663 
20664   // Calculate some flags for address lowering.
20665   const Module &Mod = *DAG.getMachineFunction().getFunction().getParent();
20666   unsigned char OpFlags;
20667   if (ForCall)
20668     OpFlags = Subtarget.classifyGlobalFunctionReference(GV, Mod);
20669   else
20670     OpFlags = Subtarget.classifyGlobalReference(GV, Mod);
20671   bool HasPICReg = isGlobalRelativeToPICBase(OpFlags);
20672   bool NeedsLoad = isGlobalStubReference(OpFlags);
20673 
20674   CodeModel::Model M = DAG.getTarget().getCodeModel();
20675   auto PtrVT = getPointerTy(DAG.getDataLayout());
20676   SDValue Result;
20677 
20678   if (GV) {
20679     // Create a target global address if this is a global. If possible, fold the
20680     // offset into the global address reference. Otherwise, ADD it on later.
20681     // Suppress the folding if Offset is negative: movl foo-1, %eax is not
20682     // allowed because if the address of foo is 0, the ELF R_X86_64_32
20683     // relocation will compute to a negative value, which is invalid.
20684     int64_t GlobalOffset = 0;
20685     if (OpFlags == X86II::MO_NO_FLAG && Offset >= 0 &&
20686         X86::isOffsetSuitableForCodeModel(Offset, M, true)) {
20687       std::swap(GlobalOffset, Offset);
20688     }
20689     Result = DAG.getTargetGlobalAddress(GV, dl, PtrVT, GlobalOffset, OpFlags);
20690   } else {
20691     // If this is not a global address, this must be an external symbol.
20692     Result = DAG.getTargetExternalSymbol(ExternalSym, PtrVT, OpFlags);
20693   }
20694 
20695   // If this is a direct call, avoid the wrapper if we don't need to do any
20696   // loads or adds. This allows SDAG ISel to match direct calls.
20697   if (ForCall && !NeedsLoad && !HasPICReg && Offset == 0)
20698     return Result;
20699 
20700   Result = DAG.getNode(getGlobalWrapperKind(GV, OpFlags), dl, PtrVT, Result);
20701 
20702   // With PIC, the address is actually $g + Offset.
20703   if (HasPICReg) {
20704     Result = DAG.getNode(ISD::ADD, dl, PtrVT,
20705                          DAG.getNode(X86ISD::GlobalBaseReg, dl, PtrVT), Result);
20706   }
20707 
20708   // For globals that require a load from a stub to get the address, emit the
20709   // load.
20710   if (NeedsLoad)
20711     Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result,
20712                          MachinePointerInfo::getGOT(DAG.getMachineFunction()));
20713 
20714   // If there was a non-zero offset that we didn't fold, create an explicit
20715   // addition for it.
20716   if (Offset != 0)
20717     Result = DAG.getNode(ISD::ADD, dl, PtrVT, Result,
20718                          DAG.getConstant(Offset, dl, PtrVT));
20719 
20720   return Result;
20721 }
20722 
20723 SDValue
LowerGlobalAddress(SDValue Op,SelectionDAG & DAG) const20724 X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
20725   return LowerGlobalOrExternal(Op, DAG, /*ForCall=*/false);
20726 }
20727 
20728 static SDValue
GetTLSADDR(SelectionDAG & DAG,SDValue Chain,GlobalAddressSDNode * GA,SDValue * InFlag,const EVT PtrVT,unsigned ReturnReg,unsigned char OperandFlags,bool LocalDynamic=false)20729 GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA,
20730            SDValue *InFlag, const EVT PtrVT, unsigned ReturnReg,
20731            unsigned char OperandFlags, bool LocalDynamic = false) {
20732   MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
20733   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
20734   SDLoc dl(GA);
20735   SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
20736                                            GA->getValueType(0),
20737                                            GA->getOffset(),
20738                                            OperandFlags);
20739 
20740   X86ISD::NodeType CallType = LocalDynamic ? X86ISD::TLSBASEADDR
20741                                            : X86ISD::TLSADDR;
20742 
20743   if (InFlag) {
20744     SDValue Ops[] = { Chain,  TGA, *InFlag };
20745     Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
20746   } else {
20747     SDValue Ops[]  = { Chain, TGA };
20748     Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
20749   }
20750 
20751   // TLSADDR will be codegen'ed as call. Inform MFI that function has calls.
20752   MFI.setAdjustsStack(true);
20753   MFI.setHasCalls(true);
20754 
20755   SDValue Flag = Chain.getValue(1);
20756   return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Flag);
20757 }
20758 
20759 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit
20760 static SDValue
LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode * GA,SelectionDAG & DAG,const EVT PtrVT)20761 LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG,
20762                                 const EVT PtrVT) {
20763   SDValue InFlag;
20764   SDLoc dl(GA);  // ? function entry point might be better
20765   SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
20766                                    DAG.getNode(X86ISD::GlobalBaseReg,
20767                                                SDLoc(), PtrVT), InFlag);
20768   InFlag = Chain.getValue(1);
20769 
20770   return GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, X86II::MO_TLSGD);
20771 }
20772 
20773 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit LP64
20774 static SDValue
LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode * GA,SelectionDAG & DAG,const EVT PtrVT)20775 LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG,
20776                                 const EVT PtrVT) {
20777   return GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT,
20778                     X86::RAX, X86II::MO_TLSGD);
20779 }
20780 
20781 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit ILP32
20782 static SDValue
LowerToTLSGeneralDynamicModelX32(GlobalAddressSDNode * GA,SelectionDAG & DAG,const EVT PtrVT)20783 LowerToTLSGeneralDynamicModelX32(GlobalAddressSDNode *GA, SelectionDAG &DAG,
20784                                  const EVT PtrVT) {
20785   return GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT,
20786                     X86::EAX, X86II::MO_TLSGD);
20787 }
20788 
LowerToTLSLocalDynamicModel(GlobalAddressSDNode * GA,SelectionDAG & DAG,const EVT PtrVT,bool Is64Bit,bool Is64BitLP64)20789 static SDValue LowerToTLSLocalDynamicModel(GlobalAddressSDNode *GA,
20790                                            SelectionDAG &DAG, const EVT PtrVT,
20791                                            bool Is64Bit, bool Is64BitLP64) {
20792   SDLoc dl(GA);
20793 
20794   // Get the start address of the TLS block for this module.
20795   X86MachineFunctionInfo *MFI = DAG.getMachineFunction()
20796       .getInfo<X86MachineFunctionInfo>();
20797   MFI->incNumLocalDynamicTLSAccesses();
20798 
20799   SDValue Base;
20800   if (Is64Bit) {
20801     unsigned ReturnReg = Is64BitLP64 ? X86::RAX : X86::EAX;
20802     Base = GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT, ReturnReg,
20803                       X86II::MO_TLSLD, /*LocalDynamic=*/true);
20804   } else {
20805     SDValue InFlag;
20806     SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
20807         DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), InFlag);
20808     InFlag = Chain.getValue(1);
20809     Base = GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX,
20810                       X86II::MO_TLSLDM, /*LocalDynamic=*/true);
20811   }
20812 
20813   // Note: the CleanupLocalDynamicTLSPass will remove redundant computations
20814   // of Base.
20815 
20816   // Build x@dtpoff.
20817   unsigned char OperandFlags = X86II::MO_DTPOFF;
20818   unsigned WrapperKind = X86ISD::Wrapper;
20819   SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
20820                                            GA->getValueType(0),
20821                                            GA->getOffset(), OperandFlags);
20822   SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
20823 
20824   // Add x@dtpoff with the base.
20825   return DAG.getNode(ISD::ADD, dl, PtrVT, Offset, Base);
20826 }
20827 
20828 // Lower ISD::GlobalTLSAddress using the "initial exec" or "local exec" model.
LowerToTLSExecModel(GlobalAddressSDNode * GA,SelectionDAG & DAG,const EVT PtrVT,TLSModel::Model model,bool is64Bit,bool isPIC)20829 static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
20830                                    const EVT PtrVT, TLSModel::Model model,
20831                                    bool is64Bit, bool isPIC) {
20832   SDLoc dl(GA);
20833 
20834   // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit).
20835   Value *Ptr = Constant::getNullValue(Type::getInt8PtrTy(*DAG.getContext(),
20836                                                          is64Bit ? 257 : 256));
20837 
20838   SDValue ThreadPointer =
20839       DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), DAG.getIntPtrConstant(0, dl),
20840                   MachinePointerInfo(Ptr));
20841 
20842   unsigned char OperandFlags = 0;
20843   // Most TLS accesses are not RIP relative, even on x86-64.  One exception is
20844   // initialexec.
20845   unsigned WrapperKind = X86ISD::Wrapper;
20846   if (model == TLSModel::LocalExec) {
20847     OperandFlags = is64Bit ? X86II::MO_TPOFF : X86II::MO_NTPOFF;
20848   } else if (model == TLSModel::InitialExec) {
20849     if (is64Bit) {
20850       OperandFlags = X86II::MO_GOTTPOFF;
20851       WrapperKind = X86ISD::WrapperRIP;
20852     } else {
20853       OperandFlags = isPIC ? X86II::MO_GOTNTPOFF : X86II::MO_INDNTPOFF;
20854     }
20855   } else {
20856     llvm_unreachable("Unexpected model");
20857   }
20858 
20859   // emit "addl x@ntpoff,%eax" (local exec)
20860   // or "addl x@indntpoff,%eax" (initial exec)
20861   // or "addl x@gotntpoff(%ebx) ,%eax" (initial exec, 32-bit pic)
20862   SDValue TGA =
20863       DAG.getTargetGlobalAddress(GA->getGlobal(), dl, GA->getValueType(0),
20864                                  GA->getOffset(), OperandFlags);
20865   SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
20866 
20867   if (model == TLSModel::InitialExec) {
20868     if (isPIC && !is64Bit) {
20869       Offset = DAG.getNode(ISD::ADD, dl, PtrVT,
20870                            DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT),
20871                            Offset);
20872     }
20873 
20874     Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset,
20875                          MachinePointerInfo::getGOT(DAG.getMachineFunction()));
20876   }
20877 
20878   // The address of the thread local variable is the add of the thread
20879   // pointer with the offset of the variable.
20880   return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
20881 }
20882 
20883 SDValue
LowerGlobalTLSAddress(SDValue Op,SelectionDAG & DAG) const20884 X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
20885 
20886   GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
20887 
20888   if (DAG.getTarget().useEmulatedTLS())
20889     return LowerToTLSEmulatedModel(GA, DAG);
20890 
20891   const GlobalValue *GV = GA->getGlobal();
20892   auto PtrVT = getPointerTy(DAG.getDataLayout());
20893   bool PositionIndependent = isPositionIndependent();
20894 
20895   if (Subtarget.isTargetELF()) {
20896     TLSModel::Model model = DAG.getTarget().getTLSModel(GV);
20897     switch (model) {
20898       case TLSModel::GeneralDynamic:
20899         if (Subtarget.is64Bit()) {
20900           if (Subtarget.isTarget64BitLP64())
20901             return LowerToTLSGeneralDynamicModel64(GA, DAG, PtrVT);
20902           return LowerToTLSGeneralDynamicModelX32(GA, DAG, PtrVT);
20903         }
20904         return LowerToTLSGeneralDynamicModel32(GA, DAG, PtrVT);
20905       case TLSModel::LocalDynamic:
20906         return LowerToTLSLocalDynamicModel(GA, DAG, PtrVT, Subtarget.is64Bit(),
20907                                            Subtarget.isTarget64BitLP64());
20908       case TLSModel::InitialExec:
20909       case TLSModel::LocalExec:
20910         return LowerToTLSExecModel(GA, DAG, PtrVT, model, Subtarget.is64Bit(),
20911                                    PositionIndependent);
20912     }
20913     llvm_unreachable("Unknown TLS model.");
20914   }
20915 
20916   if (Subtarget.isTargetDarwin()) {
20917     // Darwin only has one model of TLS.  Lower to that.
20918     unsigned char OpFlag = 0;
20919     unsigned WrapperKind = Subtarget.isPICStyleRIPRel() ?
20920                            X86ISD::WrapperRIP : X86ISD::Wrapper;
20921 
20922     // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
20923     // global base reg.
20924     bool PIC32 = PositionIndependent && !Subtarget.is64Bit();
20925     if (PIC32)
20926       OpFlag = X86II::MO_TLVP_PIC_BASE;
20927     else
20928       OpFlag = X86II::MO_TLVP;
20929     SDLoc DL(Op);
20930     SDValue Result = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
20931                                                 GA->getValueType(0),
20932                                                 GA->getOffset(), OpFlag);
20933     SDValue Offset = DAG.getNode(WrapperKind, DL, PtrVT, Result);
20934 
20935     // With PIC32, the address is actually $g + Offset.
20936     if (PIC32)
20937       Offset = DAG.getNode(ISD::ADD, DL, PtrVT,
20938                            DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT),
20939                            Offset);
20940 
20941     // Lowering the machine isd will make sure everything is in the right
20942     // location.
20943     SDValue Chain = DAG.getEntryNode();
20944     SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
20945     Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL);
20946     SDValue Args[] = { Chain, Offset };
20947     Chain = DAG.getNode(X86ISD::TLSCALL, DL, NodeTys, Args);
20948     Chain = DAG.getCALLSEQ_END(Chain, 0, 0, Chain.getValue(1), DL);
20949 
20950     // TLSCALL will be codegen'ed as call. Inform MFI that function has calls.
20951     MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
20952     MFI.setAdjustsStack(true);
20953 
20954     // And our return value (tls address) is in the standard call return value
20955     // location.
20956     unsigned Reg = Subtarget.is64Bit() ? X86::RAX : X86::EAX;
20957     return DAG.getCopyFromReg(Chain, DL, Reg, PtrVT, Chain.getValue(1));
20958   }
20959 
20960   if (Subtarget.isOSWindows()) {
20961     // Just use the implicit TLS architecture
20962     // Need to generate something similar to:
20963     //   mov     rdx, qword [gs:abs 58H]; Load pointer to ThreadLocalStorage
20964     //                                  ; from TEB
20965     //   mov     ecx, dword [rel _tls_index]: Load index (from C runtime)
20966     //   mov     rcx, qword [rdx+rcx*8]
20967     //   mov     eax, .tls$:tlsvar
20968     //   [rax+rcx] contains the address
20969     // Windows 64bit: gs:0x58
20970     // Windows 32bit: fs:__tls_array
20971 
20972     SDLoc dl(GA);
20973     SDValue Chain = DAG.getEntryNode();
20974 
20975     // Get the Thread Pointer, which is %fs:__tls_array (32-bit) or
20976     // %gs:0x58 (64-bit). On MinGW, __tls_array is not available, so directly
20977     // use its literal value of 0x2C.
20978     Value *Ptr = Constant::getNullValue(Subtarget.is64Bit()
20979                                         ? Type::getInt8PtrTy(*DAG.getContext(),
20980                                                              256)
20981                                         : Type::getInt32PtrTy(*DAG.getContext(),
20982                                                               257));
20983 
20984     SDValue TlsArray = Subtarget.is64Bit()
20985                            ? DAG.getIntPtrConstant(0x58, dl)
20986                            : (Subtarget.isTargetWindowsGNU()
20987                                   ? DAG.getIntPtrConstant(0x2C, dl)
20988                                   : DAG.getExternalSymbol("_tls_array", PtrVT));
20989 
20990     SDValue ThreadPointer =
20991         DAG.getLoad(PtrVT, dl, Chain, TlsArray, MachinePointerInfo(Ptr));
20992 
20993     SDValue res;
20994     if (GV->getThreadLocalMode() == GlobalVariable::LocalExecTLSModel) {
20995       res = ThreadPointer;
20996     } else {
20997       // Load the _tls_index variable
20998       SDValue IDX = DAG.getExternalSymbol("_tls_index", PtrVT);
20999       if (Subtarget.is64Bit())
21000         IDX = DAG.getExtLoad(ISD::ZEXTLOAD, dl, PtrVT, Chain, IDX,
21001                              MachinePointerInfo(), MVT::i32);
21002       else
21003         IDX = DAG.getLoad(PtrVT, dl, Chain, IDX, MachinePointerInfo());
21004 
21005       const DataLayout &DL = DAG.getDataLayout();
21006       SDValue Scale =
21007           DAG.getConstant(Log2_64_Ceil(DL.getPointerSize()), dl, MVT::i8);
21008       IDX = DAG.getNode(ISD::SHL, dl, PtrVT, IDX, Scale);
21009 
21010       res = DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, IDX);
21011     }
21012 
21013     res = DAG.getLoad(PtrVT, dl, Chain, res, MachinePointerInfo());
21014 
21015     // Get the offset of start of .tls section
21016     SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
21017                                              GA->getValueType(0),
21018                                              GA->getOffset(), X86II::MO_SECREL);
21019     SDValue Offset = DAG.getNode(X86ISD::Wrapper, dl, PtrVT, TGA);
21020 
21021     // The address of the thread local variable is the add of the thread
21022     // pointer with the offset of the variable.
21023     return DAG.getNode(ISD::ADD, dl, PtrVT, res, Offset);
21024   }
21025 
21026   llvm_unreachable("TLS not implemented for this target.");
21027 }
21028 
21029 /// Lower SRA_PARTS and friends, which return two i32 values
21030 /// and take a 2 x i32 value to shift plus a shift amount.
21031 /// TODO: Can this be moved to general expansion code?
LowerShiftParts(SDValue Op,SelectionDAG & DAG)21032 static SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG) {
21033   SDValue Lo, Hi;
21034   DAG.getTargetLoweringInfo().expandShiftParts(Op.getNode(), Lo, Hi, DAG);
21035   return DAG.getMergeValues({Lo, Hi}, SDLoc(Op));
21036 }
21037 
21038 // Try to use a packed vector operation to handle i64 on 32-bit targets when
21039 // AVX512DQ is enabled.
LowerI64IntToFP_AVX512DQ(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)21040 static SDValue LowerI64IntToFP_AVX512DQ(SDValue Op, SelectionDAG &DAG,
21041                                         const X86Subtarget &Subtarget) {
21042   assert((Op.getOpcode() == ISD::SINT_TO_FP ||
21043           Op.getOpcode() == ISD::STRICT_SINT_TO_FP ||
21044           Op.getOpcode() == ISD::STRICT_UINT_TO_FP ||
21045           Op.getOpcode() == ISD::UINT_TO_FP) &&
21046          "Unexpected opcode!");
21047   bool IsStrict = Op->isStrictFPOpcode();
21048   unsigned OpNo = IsStrict ? 1 : 0;
21049   SDValue Src = Op.getOperand(OpNo);
21050   MVT SrcVT = Src.getSimpleValueType();
21051   MVT VT = Op.getSimpleValueType();
21052 
21053    if (!Subtarget.hasDQI() || SrcVT != MVT::i64 || Subtarget.is64Bit() ||
21054        (VT != MVT::f32 && VT != MVT::f64))
21055     return SDValue();
21056 
21057   // Pack the i64 into a vector, do the operation and extract.
21058 
21059   // Using 256-bit to ensure result is 128-bits for f32 case.
21060   unsigned NumElts = Subtarget.hasVLX() ? 4 : 8;
21061   MVT VecInVT = MVT::getVectorVT(MVT::i64, NumElts);
21062   MVT VecVT = MVT::getVectorVT(VT, NumElts);
21063 
21064   SDLoc dl(Op);
21065   SDValue InVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecInVT, Src);
21066   if (IsStrict) {
21067     SDValue CvtVec = DAG.getNode(Op.getOpcode(), dl, {VecVT, MVT::Other},
21068                                  {Op.getOperand(0), InVec});
21069     SDValue Chain = CvtVec.getValue(1);
21070     SDValue Value = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, CvtVec,
21071                                 DAG.getIntPtrConstant(0, dl));
21072     return DAG.getMergeValues({Value, Chain}, dl);
21073   }
21074 
21075   SDValue CvtVec = DAG.getNode(Op.getOpcode(), dl, VecVT, InVec);
21076 
21077   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, CvtVec,
21078                      DAG.getIntPtrConstant(0, dl));
21079 }
21080 
21081 // Try to use a packed vector operation to handle i64 on 32-bit targets.
LowerI64IntToFP16(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)21082 static SDValue LowerI64IntToFP16(SDValue Op, SelectionDAG &DAG,
21083                                  const X86Subtarget &Subtarget) {
21084   assert((Op.getOpcode() == ISD::SINT_TO_FP ||
21085           Op.getOpcode() == ISD::STRICT_SINT_TO_FP ||
21086           Op.getOpcode() == ISD::STRICT_UINT_TO_FP ||
21087           Op.getOpcode() == ISD::UINT_TO_FP) &&
21088          "Unexpected opcode!");
21089   bool IsStrict = Op->isStrictFPOpcode();
21090   SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
21091   MVT SrcVT = Src.getSimpleValueType();
21092   MVT VT = Op.getSimpleValueType();
21093 
21094   if (SrcVT != MVT::i64 || Subtarget.is64Bit() || VT != MVT::f16)
21095     return SDValue();
21096 
21097   // Pack the i64 into a vector, do the operation and extract.
21098 
21099   assert(Subtarget.hasFP16() && "Expected FP16");
21100 
21101   SDLoc dl(Op);
21102   SDValue InVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Src);
21103   if (IsStrict) {
21104     SDValue CvtVec = DAG.getNode(Op.getOpcode(), dl, {MVT::v2f16, MVT::Other},
21105                                  {Op.getOperand(0), InVec});
21106     SDValue Chain = CvtVec.getValue(1);
21107     SDValue Value = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, CvtVec,
21108                                 DAG.getIntPtrConstant(0, dl));
21109     return DAG.getMergeValues({Value, Chain}, dl);
21110   }
21111 
21112   SDValue CvtVec = DAG.getNode(Op.getOpcode(), dl, MVT::v2f16, InVec);
21113 
21114   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, CvtVec,
21115                      DAG.getIntPtrConstant(0, dl));
21116 }
21117 
useVectorCast(unsigned Opcode,MVT FromVT,MVT ToVT,const X86Subtarget & Subtarget)21118 static bool useVectorCast(unsigned Opcode, MVT FromVT, MVT ToVT,
21119                           const X86Subtarget &Subtarget) {
21120   switch (Opcode) {
21121     case ISD::SINT_TO_FP:
21122       // TODO: Handle wider types with AVX/AVX512.
21123       if (!Subtarget.hasSSE2() || FromVT != MVT::v4i32)
21124         return false;
21125       // CVTDQ2PS or (V)CVTDQ2PD
21126       return ToVT == MVT::v4f32 || (Subtarget.hasAVX() && ToVT == MVT::v4f64);
21127 
21128     case ISD::UINT_TO_FP:
21129       // TODO: Handle wider types and i64 elements.
21130       if (!Subtarget.hasAVX512() || FromVT != MVT::v4i32)
21131         return false;
21132       // VCVTUDQ2PS or VCVTUDQ2PD
21133       return ToVT == MVT::v4f32 || ToVT == MVT::v4f64;
21134 
21135     default:
21136       return false;
21137   }
21138 }
21139 
21140 /// Given a scalar cast operation that is extracted from a vector, try to
21141 /// vectorize the cast op followed by extraction. This will avoid an expensive
21142 /// round-trip between XMM and GPR.
vectorizeExtractedCast(SDValue Cast,SelectionDAG & DAG,const X86Subtarget & Subtarget)21143 static SDValue vectorizeExtractedCast(SDValue Cast, SelectionDAG &DAG,
21144                                       const X86Subtarget &Subtarget) {
21145   // TODO: This could be enhanced to handle smaller integer types by peeking
21146   // through an extend.
21147   SDValue Extract = Cast.getOperand(0);
21148   MVT DestVT = Cast.getSimpleValueType();
21149   if (Extract.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
21150       !isa<ConstantSDNode>(Extract.getOperand(1)))
21151     return SDValue();
21152 
21153   // See if we have a 128-bit vector cast op for this type of cast.
21154   SDValue VecOp = Extract.getOperand(0);
21155   MVT FromVT = VecOp.getSimpleValueType();
21156   unsigned NumEltsInXMM = 128 / FromVT.getScalarSizeInBits();
21157   MVT Vec128VT = MVT::getVectorVT(FromVT.getScalarType(), NumEltsInXMM);
21158   MVT ToVT = MVT::getVectorVT(DestVT, NumEltsInXMM);
21159   if (!useVectorCast(Cast.getOpcode(), Vec128VT, ToVT, Subtarget))
21160     return SDValue();
21161 
21162   // If we are extracting from a non-zero element, first shuffle the source
21163   // vector to allow extracting from element zero.
21164   SDLoc DL(Cast);
21165   if (!isNullConstant(Extract.getOperand(1))) {
21166     SmallVector<int, 16> Mask(FromVT.getVectorNumElements(), -1);
21167     Mask[0] = Extract.getConstantOperandVal(1);
21168     VecOp = DAG.getVectorShuffle(FromVT, DL, VecOp, DAG.getUNDEF(FromVT), Mask);
21169   }
21170   // If the source vector is wider than 128-bits, extract the low part. Do not
21171   // create an unnecessarily wide vector cast op.
21172   if (FromVT != Vec128VT)
21173     VecOp = extract128BitVector(VecOp, 0, DAG, DL);
21174 
21175   // cast (extelt V, 0) --> extelt (cast (extract_subv V)), 0
21176   // cast (extelt V, C) --> extelt (cast (extract_subv (shuffle V, [C...]))), 0
21177   SDValue VCast = DAG.getNode(Cast.getOpcode(), DL, ToVT, VecOp);
21178   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, DestVT, VCast,
21179                      DAG.getIntPtrConstant(0, DL));
21180 }
21181 
21182 /// Given a scalar cast to FP with a cast to integer operand (almost an ftrunc),
21183 /// try to vectorize the cast ops. This will avoid an expensive round-trip
21184 /// between XMM and GPR.
lowerFPToIntToFP(SDValue CastToFP,SelectionDAG & DAG,const X86Subtarget & Subtarget)21185 static SDValue lowerFPToIntToFP(SDValue CastToFP, SelectionDAG &DAG,
21186                                 const X86Subtarget &Subtarget) {
21187   // TODO: Allow FP_TO_UINT.
21188   SDValue CastToInt = CastToFP.getOperand(0);
21189   MVT VT = CastToFP.getSimpleValueType();
21190   if (CastToInt.getOpcode() != ISD::FP_TO_SINT || VT.isVector())
21191     return SDValue();
21192 
21193   MVT IntVT = CastToInt.getSimpleValueType();
21194   SDValue X = CastToInt.getOperand(0);
21195   MVT SrcVT = X.getSimpleValueType();
21196   if (SrcVT != MVT::f32 && SrcVT != MVT::f64)
21197     return SDValue();
21198 
21199   // See if we have 128-bit vector cast instructions for this type of cast.
21200   // We need cvttps2dq/cvttpd2dq and cvtdq2ps/cvtdq2pd.
21201   if (!Subtarget.hasSSE2() || (VT != MVT::f32 && VT != MVT::f64) ||
21202       IntVT != MVT::i32)
21203     return SDValue();
21204 
21205   unsigned SrcSize = SrcVT.getSizeInBits();
21206   unsigned IntSize = IntVT.getSizeInBits();
21207   unsigned VTSize = VT.getSizeInBits();
21208   MVT VecSrcVT = MVT::getVectorVT(SrcVT, 128 / SrcSize);
21209   MVT VecIntVT = MVT::getVectorVT(IntVT, 128 / IntSize);
21210   MVT VecVT = MVT::getVectorVT(VT, 128 / VTSize);
21211 
21212   // We need target-specific opcodes if this is v2f64 -> v4i32 -> v2f64.
21213   unsigned ToIntOpcode =
21214       SrcSize != IntSize ? X86ISD::CVTTP2SI : (unsigned)ISD::FP_TO_SINT;
21215   unsigned ToFPOpcode =
21216       IntSize != VTSize ? X86ISD::CVTSI2P : (unsigned)ISD::SINT_TO_FP;
21217 
21218   // sint_to_fp (fp_to_sint X) --> extelt (sint_to_fp (fp_to_sint (s2v X))), 0
21219   //
21220   // We are not defining the high elements (for example, zero them) because
21221   // that could nullify any performance advantage that we hoped to gain from
21222   // this vector op hack. We do not expect any adverse effects (like denorm
21223   // penalties) with cast ops.
21224   SDLoc DL(CastToFP);
21225   SDValue ZeroIdx = DAG.getIntPtrConstant(0, DL);
21226   SDValue VecX = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecSrcVT, X);
21227   SDValue VCastToInt = DAG.getNode(ToIntOpcode, DL, VecIntVT, VecX);
21228   SDValue VCastToFP = DAG.getNode(ToFPOpcode, DL, VecVT, VCastToInt);
21229   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, VCastToFP, ZeroIdx);
21230 }
21231 
lowerINT_TO_FP_vXi64(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)21232 static SDValue lowerINT_TO_FP_vXi64(SDValue Op, SelectionDAG &DAG,
21233                                     const X86Subtarget &Subtarget) {
21234   SDLoc DL(Op);
21235   bool IsStrict = Op->isStrictFPOpcode();
21236   MVT VT = Op->getSimpleValueType(0);
21237   SDValue Src = Op->getOperand(IsStrict ? 1 : 0);
21238 
21239   if (Subtarget.hasDQI()) {
21240     assert(!Subtarget.hasVLX() && "Unexpected features");
21241 
21242     assert((Src.getSimpleValueType() == MVT::v2i64 ||
21243             Src.getSimpleValueType() == MVT::v4i64) &&
21244            "Unsupported custom type");
21245 
21246     // With AVX512DQ, but not VLX we need to widen to get a 512-bit result type.
21247     assert((VT == MVT::v4f32 || VT == MVT::v2f64 || VT == MVT::v4f64) &&
21248            "Unexpected VT!");
21249     MVT WideVT = VT == MVT::v4f32 ? MVT::v8f32 : MVT::v8f64;
21250 
21251     // Need to concat with zero vector for strict fp to avoid spurious
21252     // exceptions.
21253     SDValue Tmp = IsStrict ? DAG.getConstant(0, DL, MVT::v8i64)
21254                            : DAG.getUNDEF(MVT::v8i64);
21255     Src = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, MVT::v8i64, Tmp, Src,
21256                       DAG.getIntPtrConstant(0, DL));
21257     SDValue Res, Chain;
21258     if (IsStrict) {
21259       Res = DAG.getNode(Op.getOpcode(), DL, {WideVT, MVT::Other},
21260                         {Op->getOperand(0), Src});
21261       Chain = Res.getValue(1);
21262     } else {
21263       Res = DAG.getNode(Op.getOpcode(), DL, WideVT, Src);
21264     }
21265 
21266     Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
21267                       DAG.getIntPtrConstant(0, DL));
21268 
21269     if (IsStrict)
21270       return DAG.getMergeValues({Res, Chain}, DL);
21271     return Res;
21272   }
21273 
21274   bool IsSigned = Op->getOpcode() == ISD::SINT_TO_FP ||
21275                   Op->getOpcode() == ISD::STRICT_SINT_TO_FP;
21276   if (VT != MVT::v4f32 || IsSigned)
21277     return SDValue();
21278 
21279   SDValue Zero = DAG.getConstant(0, DL, MVT::v4i64);
21280   SDValue One  = DAG.getConstant(1, DL, MVT::v4i64);
21281   SDValue Sign = DAG.getNode(ISD::OR, DL, MVT::v4i64,
21282                              DAG.getNode(ISD::SRL, DL, MVT::v4i64, Src, One),
21283                              DAG.getNode(ISD::AND, DL, MVT::v4i64, Src, One));
21284   SDValue IsNeg = DAG.getSetCC(DL, MVT::v4i64, Src, Zero, ISD::SETLT);
21285   SDValue SignSrc = DAG.getSelect(DL, MVT::v4i64, IsNeg, Sign, Src);
21286   SmallVector<SDValue, 4> SignCvts(4);
21287   SmallVector<SDValue, 4> Chains(4);
21288   for (int i = 0; i != 4; ++i) {
21289     SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i64, SignSrc,
21290                               DAG.getIntPtrConstant(i, DL));
21291     if (IsStrict) {
21292       SignCvts[i] =
21293           DAG.getNode(ISD::STRICT_SINT_TO_FP, DL, {MVT::f32, MVT::Other},
21294                       {Op.getOperand(0), Elt});
21295       Chains[i] = SignCvts[i].getValue(1);
21296     } else {
21297       SignCvts[i] = DAG.getNode(ISD::SINT_TO_FP, DL, MVT::f32, Elt);
21298     }
21299   }
21300   SDValue SignCvt = DAG.getBuildVector(VT, DL, SignCvts);
21301 
21302   SDValue Slow, Chain;
21303   if (IsStrict) {
21304     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
21305     Slow = DAG.getNode(ISD::STRICT_FADD, DL, {MVT::v4f32, MVT::Other},
21306                        {Chain, SignCvt, SignCvt});
21307     Chain = Slow.getValue(1);
21308   } else {
21309     Slow = DAG.getNode(ISD::FADD, DL, MVT::v4f32, SignCvt, SignCvt);
21310   }
21311 
21312   IsNeg = DAG.getNode(ISD::TRUNCATE, DL, MVT::v4i32, IsNeg);
21313   SDValue Cvt = DAG.getSelect(DL, MVT::v4f32, IsNeg, Slow, SignCvt);
21314 
21315   if (IsStrict)
21316     return DAG.getMergeValues({Cvt, Chain}, DL);
21317 
21318   return Cvt;
21319 }
21320 
promoteXINT_TO_FP(SDValue Op,SelectionDAG & DAG)21321 static SDValue promoteXINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
21322   bool IsStrict = Op->isStrictFPOpcode();
21323   SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
21324   SDValue Chain = IsStrict ? Op->getOperand(0) : DAG.getEntryNode();
21325   MVT VT = Op.getSimpleValueType();
21326   MVT NVT = VT.isVector() ? VT.changeVectorElementType(MVT::f32) : MVT::f32;
21327   SDLoc dl(Op);
21328 
21329   SDValue Rnd = DAG.getIntPtrConstant(0, dl);
21330   if (IsStrict)
21331     return DAG.getNode(
21332         ISD::STRICT_FP_ROUND, dl, {VT, MVT::Other},
21333         {Chain,
21334          DAG.getNode(Op.getOpcode(), dl, {NVT, MVT::Other}, {Chain, Src}),
21335          Rnd});
21336   return DAG.getNode(ISD::FP_ROUND, dl, VT,
21337                      DAG.getNode(Op.getOpcode(), dl, NVT, Src), Rnd);
21338 }
21339 
isLegalConversion(MVT VT,bool IsSigned,const X86Subtarget & Subtarget)21340 static bool isLegalConversion(MVT VT, bool IsSigned,
21341                               const X86Subtarget &Subtarget) {
21342   if (VT == MVT::v4i32 && Subtarget.hasSSE2() && IsSigned)
21343     return true;
21344   if (VT == MVT::v8i32 && Subtarget.hasAVX() && IsSigned)
21345     return true;
21346   if (Subtarget.hasVLX() && (VT == MVT::v4i32 || VT == MVT::v8i32))
21347     return true;
21348   if (Subtarget.useAVX512Regs()) {
21349     if (VT == MVT::v16i32)
21350       return true;
21351     if (VT == MVT::v8i64 && Subtarget.hasDQI())
21352       return true;
21353   }
21354   if (Subtarget.hasDQI() && Subtarget.hasVLX() &&
21355       (VT == MVT::v2i64 || VT == MVT::v4i64))
21356     return true;
21357   return false;
21358 }
21359 
LowerSINT_TO_FP(SDValue Op,SelectionDAG & DAG) const21360 SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op,
21361                                            SelectionDAG &DAG) const {
21362   bool IsStrict = Op->isStrictFPOpcode();
21363   unsigned OpNo = IsStrict ? 1 : 0;
21364   SDValue Src = Op.getOperand(OpNo);
21365   SDValue Chain = IsStrict ? Op->getOperand(0) : DAG.getEntryNode();
21366   MVT SrcVT = Src.getSimpleValueType();
21367   MVT VT = Op.getSimpleValueType();
21368   SDLoc dl(Op);
21369 
21370   if (isSoftFP16(VT))
21371     return promoteXINT_TO_FP(Op, DAG);
21372   else if (isLegalConversion(SrcVT, true, Subtarget))
21373     return Op;
21374 
21375   if (Subtarget.isTargetWin64() && SrcVT == MVT::i128)
21376     return LowerWin64_INT128_TO_FP(Op, DAG);
21377 
21378   if (SDValue Extract = vectorizeExtractedCast(Op, DAG, Subtarget))
21379     return Extract;
21380 
21381   if (SDValue R = lowerFPToIntToFP(Op, DAG, Subtarget))
21382     return R;
21383 
21384   if (SrcVT.isVector()) {
21385     if (SrcVT == MVT::v2i32 && VT == MVT::v2f64) {
21386       // Note: Since v2f64 is a legal type. We don't need to zero extend the
21387       // source for strict FP.
21388       if (IsStrict)
21389         return DAG.getNode(
21390             X86ISD::STRICT_CVTSI2P, dl, {VT, MVT::Other},
21391             {Chain, DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Src,
21392                                 DAG.getUNDEF(SrcVT))});
21393       return DAG.getNode(X86ISD::CVTSI2P, dl, VT,
21394                          DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Src,
21395                                      DAG.getUNDEF(SrcVT)));
21396     }
21397     if (SrcVT == MVT::v2i64 || SrcVT == MVT::v4i64)
21398       return lowerINT_TO_FP_vXi64(Op, DAG, Subtarget);
21399 
21400     return SDValue();
21401   }
21402 
21403   assert(SrcVT <= MVT::i64 && SrcVT >= MVT::i16 &&
21404          "Unknown SINT_TO_FP to lower!");
21405 
21406   bool UseSSEReg = isScalarFPTypeInSSEReg(VT);
21407 
21408   // These are really Legal; return the operand so the caller accepts it as
21409   // Legal.
21410   if (SrcVT == MVT::i32 && UseSSEReg)
21411     return Op;
21412   if (SrcVT == MVT::i64 && UseSSEReg && Subtarget.is64Bit())
21413     return Op;
21414 
21415   if (SDValue V = LowerI64IntToFP_AVX512DQ(Op, DAG, Subtarget))
21416     return V;
21417   if (SDValue V = LowerI64IntToFP16(Op, DAG, Subtarget))
21418     return V;
21419 
21420   // SSE doesn't have an i16 conversion so we need to promote.
21421   if (SrcVT == MVT::i16 && (UseSSEReg || VT == MVT::f128)) {
21422     SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i32, Src);
21423     if (IsStrict)
21424       return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {VT, MVT::Other},
21425                          {Chain, Ext});
21426 
21427     return DAG.getNode(ISD::SINT_TO_FP, dl, VT, Ext);
21428   }
21429 
21430   if (VT == MVT::f128 || !Subtarget.hasX87())
21431     return SDValue();
21432 
21433   SDValue ValueToStore = Src;
21434   if (SrcVT == MVT::i64 && Subtarget.hasSSE2() && !Subtarget.is64Bit())
21435     // Bitcasting to f64 here allows us to do a single 64-bit store from
21436     // an SSE register, avoiding the store forwarding penalty that would come
21437     // with two 32-bit stores.
21438     ValueToStore = DAG.getBitcast(MVT::f64, ValueToStore);
21439 
21440   unsigned Size = SrcVT.getStoreSize();
21441   Align Alignment(Size);
21442   MachineFunction &MF = DAG.getMachineFunction();
21443   auto PtrVT = getPointerTy(MF.getDataLayout());
21444   int SSFI = MF.getFrameInfo().CreateStackObject(Size, Alignment, false);
21445   MachinePointerInfo MPI =
21446       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI);
21447   SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
21448   Chain = DAG.getStore(Chain, dl, ValueToStore, StackSlot, MPI, Alignment);
21449   std::pair<SDValue, SDValue> Tmp =
21450       BuildFILD(VT, SrcVT, dl, Chain, StackSlot, MPI, Alignment, DAG);
21451 
21452   if (IsStrict)
21453     return DAG.getMergeValues({Tmp.first, Tmp.second}, dl);
21454 
21455   return Tmp.first;
21456 }
21457 
BuildFILD(EVT DstVT,EVT SrcVT,const SDLoc & DL,SDValue Chain,SDValue Pointer,MachinePointerInfo PtrInfo,Align Alignment,SelectionDAG & DAG) const21458 std::pair<SDValue, SDValue> X86TargetLowering::BuildFILD(
21459     EVT DstVT, EVT SrcVT, const SDLoc &DL, SDValue Chain, SDValue Pointer,
21460     MachinePointerInfo PtrInfo, Align Alignment, SelectionDAG &DAG) const {
21461   // Build the FILD
21462   SDVTList Tys;
21463   bool useSSE = isScalarFPTypeInSSEReg(DstVT);
21464   if (useSSE)
21465     Tys = DAG.getVTList(MVT::f80, MVT::Other);
21466   else
21467     Tys = DAG.getVTList(DstVT, MVT::Other);
21468 
21469   SDValue FILDOps[] = {Chain, Pointer};
21470   SDValue Result =
21471       DAG.getMemIntrinsicNode(X86ISD::FILD, DL, Tys, FILDOps, SrcVT, PtrInfo,
21472                               Alignment, MachineMemOperand::MOLoad);
21473   Chain = Result.getValue(1);
21474 
21475   if (useSSE) {
21476     MachineFunction &MF = DAG.getMachineFunction();
21477     unsigned SSFISize = DstVT.getStoreSize();
21478     int SSFI =
21479         MF.getFrameInfo().CreateStackObject(SSFISize, Align(SSFISize), false);
21480     auto PtrVT = getPointerTy(MF.getDataLayout());
21481     SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
21482     Tys = DAG.getVTList(MVT::Other);
21483     SDValue FSTOps[] = {Chain, Result, StackSlot};
21484     MachineMemOperand *StoreMMO = DAG.getMachineFunction().getMachineMemOperand(
21485         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI),
21486         MachineMemOperand::MOStore, SSFISize, Align(SSFISize));
21487 
21488     Chain =
21489         DAG.getMemIntrinsicNode(X86ISD::FST, DL, Tys, FSTOps, DstVT, StoreMMO);
21490     Result = DAG.getLoad(
21491         DstVT, DL, Chain, StackSlot,
21492         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI));
21493     Chain = Result.getValue(1);
21494   }
21495 
21496   return { Result, Chain };
21497 }
21498 
21499 /// Horizontal vector math instructions may be slower than normal math with
21500 /// shuffles. Limit horizontal op codegen based on size/speed trade-offs, uarch
21501 /// implementation, and likely shuffle complexity of the alternate sequence.
shouldUseHorizontalOp(bool IsSingleSource,SelectionDAG & DAG,const X86Subtarget & Subtarget)21502 static bool shouldUseHorizontalOp(bool IsSingleSource, SelectionDAG &DAG,
21503                                   const X86Subtarget &Subtarget) {
21504   bool IsOptimizingSize = DAG.shouldOptForSize();
21505   bool HasFastHOps = Subtarget.hasFastHorizontalOps();
21506   return !IsSingleSource || IsOptimizingSize || HasFastHOps;
21507 }
21508 
21509 /// 64-bit unsigned integer to double expansion.
LowerUINT_TO_FP_i64(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)21510 static SDValue LowerUINT_TO_FP_i64(SDValue Op, SelectionDAG &DAG,
21511                                    const X86Subtarget &Subtarget) {
21512   // We can't use this algorithm for strict fp. It produces -0.0 instead of +0.0
21513   // when converting 0 when rounding toward negative infinity. Caller will
21514   // fall back to Expand for when i64 or is legal or use FILD in 32-bit mode.
21515   assert(!Op->isStrictFPOpcode() && "Expected non-strict uint_to_fp!");
21516   // This algorithm is not obvious. Here it is what we're trying to output:
21517   /*
21518      movq       %rax,  %xmm0
21519      punpckldq  (c0),  %xmm0  // c0: (uint4){ 0x43300000U, 0x45300000U, 0U, 0U }
21520      subpd      (c1),  %xmm0  // c1: (double2){ 0x1.0p52, 0x1.0p52 * 0x1.0p32 }
21521      #ifdef __SSE3__
21522        haddpd   %xmm0, %xmm0
21523      #else
21524        pshufd   $0x4e, %xmm0, %xmm1
21525        addpd    %xmm1, %xmm0
21526      #endif
21527   */
21528 
21529   SDLoc dl(Op);
21530   LLVMContext *Context = DAG.getContext();
21531 
21532   // Build some magic constants.
21533   static const uint32_t CV0[] = { 0x43300000, 0x45300000, 0, 0 };
21534   Constant *C0 = ConstantDataVector::get(*Context, CV0);
21535   auto PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
21536   SDValue CPIdx0 = DAG.getConstantPool(C0, PtrVT, Align(16));
21537 
21538   SmallVector<Constant*,2> CV1;
21539   CV1.push_back(
21540     ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble(),
21541                                       APInt(64, 0x4330000000000000ULL))));
21542   CV1.push_back(
21543     ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble(),
21544                                       APInt(64, 0x4530000000000000ULL))));
21545   Constant *C1 = ConstantVector::get(CV1);
21546   SDValue CPIdx1 = DAG.getConstantPool(C1, PtrVT, Align(16));
21547 
21548   // Load the 64-bit value into an XMM register.
21549   SDValue XR1 =
21550       DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Op.getOperand(0));
21551   SDValue CLod0 = DAG.getLoad(
21552       MVT::v4i32, dl, DAG.getEntryNode(), CPIdx0,
21553       MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), Align(16));
21554   SDValue Unpck1 =
21555       getUnpackl(DAG, dl, MVT::v4i32, DAG.getBitcast(MVT::v4i32, XR1), CLod0);
21556 
21557   SDValue CLod1 = DAG.getLoad(
21558       MVT::v2f64, dl, CLod0.getValue(1), CPIdx1,
21559       MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), Align(16));
21560   SDValue XR2F = DAG.getBitcast(MVT::v2f64, Unpck1);
21561   // TODO: Are there any fast-math-flags to propagate here?
21562   SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, XR2F, CLod1);
21563   SDValue Result;
21564 
21565   if (Subtarget.hasSSE3() &&
21566       shouldUseHorizontalOp(true, DAG, Subtarget)) {
21567     Result = DAG.getNode(X86ISD::FHADD, dl, MVT::v2f64, Sub, Sub);
21568   } else {
21569     SDValue Shuffle = DAG.getVectorShuffle(MVT::v2f64, dl, Sub, Sub, {1,-1});
21570     Result = DAG.getNode(ISD::FADD, dl, MVT::v2f64, Shuffle, Sub);
21571   }
21572   Result = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Result,
21573                        DAG.getIntPtrConstant(0, dl));
21574   return Result;
21575 }
21576 
21577 /// 32-bit unsigned integer to float expansion.
LowerUINT_TO_FP_i32(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)21578 static SDValue LowerUINT_TO_FP_i32(SDValue Op, SelectionDAG &DAG,
21579                                    const X86Subtarget &Subtarget) {
21580   unsigned OpNo = Op.getNode()->isStrictFPOpcode() ? 1 : 0;
21581   SDLoc dl(Op);
21582   // FP constant to bias correct the final result.
21583   SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), dl,
21584                                    MVT::f64);
21585 
21586   // Load the 32-bit value into an XMM register.
21587   SDValue Load =
21588       DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Op.getOperand(OpNo));
21589 
21590   // Zero out the upper parts of the register.
21591   Load = getShuffleVectorZeroOrUndef(Load, 0, true, Subtarget, DAG);
21592 
21593   // Or the load with the bias.
21594   SDValue Or = DAG.getNode(
21595       ISD::OR, dl, MVT::v2i64,
21596       DAG.getBitcast(MVT::v2i64, Load),
21597       DAG.getBitcast(MVT::v2i64,
21598                      DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f64, Bias)));
21599   Or =
21600       DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
21601                   DAG.getBitcast(MVT::v2f64, Or), DAG.getIntPtrConstant(0, dl));
21602 
21603   if (Op.getNode()->isStrictFPOpcode()) {
21604     // Subtract the bias.
21605     // TODO: Are there any fast-math-flags to propagate here?
21606     SDValue Chain = Op.getOperand(0);
21607     SDValue Sub = DAG.getNode(ISD::STRICT_FSUB, dl, {MVT::f64, MVT::Other},
21608                               {Chain, Or, Bias});
21609 
21610     if (Op.getValueType() == Sub.getValueType())
21611       return Sub;
21612 
21613     // Handle final rounding.
21614     std::pair<SDValue, SDValue> ResultPair = DAG.getStrictFPExtendOrRound(
21615         Sub, Sub.getValue(1), dl, Op.getSimpleValueType());
21616 
21617     return DAG.getMergeValues({ResultPair.first, ResultPair.second}, dl);
21618   }
21619 
21620   // Subtract the bias.
21621   // TODO: Are there any fast-math-flags to propagate here?
21622   SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Or, Bias);
21623 
21624   // Handle final rounding.
21625   return DAG.getFPExtendOrRound(Sub, dl, Op.getSimpleValueType());
21626 }
21627 
lowerUINT_TO_FP_v2i32(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget,const SDLoc & DL)21628 static SDValue lowerUINT_TO_FP_v2i32(SDValue Op, SelectionDAG &DAG,
21629                                      const X86Subtarget &Subtarget,
21630                                      const SDLoc &DL) {
21631   if (Op.getSimpleValueType() != MVT::v2f64)
21632     return SDValue();
21633 
21634   bool IsStrict = Op->isStrictFPOpcode();
21635 
21636   SDValue N0 = Op.getOperand(IsStrict ? 1 : 0);
21637   assert(N0.getSimpleValueType() == MVT::v2i32 && "Unexpected input type");
21638 
21639   if (Subtarget.hasAVX512()) {
21640     if (!Subtarget.hasVLX()) {
21641       // Let generic type legalization widen this.
21642       if (!IsStrict)
21643         return SDValue();
21644       // Otherwise pad the integer input with 0s and widen the operation.
21645       N0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4i32, N0,
21646                        DAG.getConstant(0, DL, MVT::v2i32));
21647       SDValue Res = DAG.getNode(Op->getOpcode(), DL, {MVT::v4f64, MVT::Other},
21648                                 {Op.getOperand(0), N0});
21649       SDValue Chain = Res.getValue(1);
21650       Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2f64, Res,
21651                         DAG.getIntPtrConstant(0, DL));
21652       return DAG.getMergeValues({Res, Chain}, DL);
21653     }
21654 
21655     // Legalize to v4i32 type.
21656     N0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4i32, N0,
21657                      DAG.getUNDEF(MVT::v2i32));
21658     if (IsStrict)
21659       return DAG.getNode(X86ISD::STRICT_CVTUI2P, DL, {MVT::v2f64, MVT::Other},
21660                          {Op.getOperand(0), N0});
21661     return DAG.getNode(X86ISD::CVTUI2P, DL, MVT::v2f64, N0);
21662   }
21663 
21664   // Zero extend to 2i64, OR with the floating point representation of 2^52.
21665   // This gives us the floating point equivalent of 2^52 + the i32 integer
21666   // since double has 52-bits of mantissa. Then subtract 2^52 in floating
21667   // point leaving just our i32 integers in double format.
21668   SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v2i64, N0);
21669   SDValue VBias =
21670       DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), DL, MVT::v2f64);
21671   SDValue Or = DAG.getNode(ISD::OR, DL, MVT::v2i64, ZExtIn,
21672                            DAG.getBitcast(MVT::v2i64, VBias));
21673   Or = DAG.getBitcast(MVT::v2f64, Or);
21674 
21675   if (IsStrict)
21676     return DAG.getNode(ISD::STRICT_FSUB, DL, {MVT::v2f64, MVT::Other},
21677                        {Op.getOperand(0), Or, VBias});
21678   return DAG.getNode(ISD::FSUB, DL, MVT::v2f64, Or, VBias);
21679 }
21680 
lowerUINT_TO_FP_vXi32(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)21681 static SDValue lowerUINT_TO_FP_vXi32(SDValue Op, SelectionDAG &DAG,
21682                                      const X86Subtarget &Subtarget) {
21683   SDLoc DL(Op);
21684   bool IsStrict = Op->isStrictFPOpcode();
21685   SDValue V = Op->getOperand(IsStrict ? 1 : 0);
21686   MVT VecIntVT = V.getSimpleValueType();
21687   assert((VecIntVT == MVT::v4i32 || VecIntVT == MVT::v8i32) &&
21688          "Unsupported custom type");
21689 
21690   if (Subtarget.hasAVX512()) {
21691     // With AVX512, but not VLX we need to widen to get a 512-bit result type.
21692     assert(!Subtarget.hasVLX() && "Unexpected features");
21693     MVT VT = Op->getSimpleValueType(0);
21694 
21695     // v8i32->v8f64 is legal with AVX512 so just return it.
21696     if (VT == MVT::v8f64)
21697       return Op;
21698 
21699     assert((VT == MVT::v4f32 || VT == MVT::v8f32 || VT == MVT::v4f64) &&
21700            "Unexpected VT!");
21701     MVT WideVT = VT == MVT::v4f64 ? MVT::v8f64 : MVT::v16f32;
21702     MVT WideIntVT = VT == MVT::v4f64 ? MVT::v8i32 : MVT::v16i32;
21703     // Need to concat with zero vector for strict fp to avoid spurious
21704     // exceptions.
21705     SDValue Tmp =
21706         IsStrict ? DAG.getConstant(0, DL, WideIntVT) : DAG.getUNDEF(WideIntVT);
21707     V = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, WideIntVT, Tmp, V,
21708                     DAG.getIntPtrConstant(0, DL));
21709     SDValue Res, Chain;
21710     if (IsStrict) {
21711       Res = DAG.getNode(ISD::STRICT_UINT_TO_FP, DL, {WideVT, MVT::Other},
21712                         {Op->getOperand(0), V});
21713       Chain = Res.getValue(1);
21714     } else {
21715       Res = DAG.getNode(ISD::UINT_TO_FP, DL, WideVT, V);
21716     }
21717 
21718     Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
21719                       DAG.getIntPtrConstant(0, DL));
21720 
21721     if (IsStrict)
21722       return DAG.getMergeValues({Res, Chain}, DL);
21723     return Res;
21724   }
21725 
21726   if (Subtarget.hasAVX() && VecIntVT == MVT::v4i32 &&
21727       Op->getSimpleValueType(0) == MVT::v4f64) {
21728     SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v4i64, V);
21729     Constant *Bias = ConstantFP::get(
21730         *DAG.getContext(),
21731         APFloat(APFloat::IEEEdouble(), APInt(64, 0x4330000000000000ULL)));
21732     auto PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
21733     SDValue CPIdx = DAG.getConstantPool(Bias, PtrVT, Align(8));
21734     SDVTList Tys = DAG.getVTList(MVT::v4f64, MVT::Other);
21735     SDValue Ops[] = {DAG.getEntryNode(), CPIdx};
21736     SDValue VBias = DAG.getMemIntrinsicNode(
21737         X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, MVT::f64,
21738         MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), Align(8),
21739         MachineMemOperand::MOLoad);
21740 
21741     SDValue Or = DAG.getNode(ISD::OR, DL, MVT::v4i64, ZExtIn,
21742                              DAG.getBitcast(MVT::v4i64, VBias));
21743     Or = DAG.getBitcast(MVT::v4f64, Or);
21744 
21745     if (IsStrict)
21746       return DAG.getNode(ISD::STRICT_FSUB, DL, {MVT::v4f64, MVT::Other},
21747                          {Op.getOperand(0), Or, VBias});
21748     return DAG.getNode(ISD::FSUB, DL, MVT::v4f64, Or, VBias);
21749   }
21750 
21751   // The algorithm is the following:
21752   // #ifdef __SSE4_1__
21753   //     uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
21754   //     uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
21755   //                                 (uint4) 0x53000000, 0xaa);
21756   // #else
21757   //     uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
21758   //     uint4 hi = (v >> 16) | (uint4) 0x53000000;
21759   // #endif
21760   //     float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
21761   //     return (float4) lo + fhi;
21762 
21763   bool Is128 = VecIntVT == MVT::v4i32;
21764   MVT VecFloatVT = Is128 ? MVT::v4f32 : MVT::v8f32;
21765   // If we convert to something else than the supported type, e.g., to v4f64,
21766   // abort early.
21767   if (VecFloatVT != Op->getSimpleValueType(0))
21768     return SDValue();
21769 
21770   // In the #idef/#else code, we have in common:
21771   // - The vector of constants:
21772   // -- 0x4b000000
21773   // -- 0x53000000
21774   // - A shift:
21775   // -- v >> 16
21776 
21777   // Create the splat vector for 0x4b000000.
21778   SDValue VecCstLow = DAG.getConstant(0x4b000000, DL, VecIntVT);
21779   // Create the splat vector for 0x53000000.
21780   SDValue VecCstHigh = DAG.getConstant(0x53000000, DL, VecIntVT);
21781 
21782   // Create the right shift.
21783   SDValue VecCstShift = DAG.getConstant(16, DL, VecIntVT);
21784   SDValue HighShift = DAG.getNode(ISD::SRL, DL, VecIntVT, V, VecCstShift);
21785 
21786   SDValue Low, High;
21787   if (Subtarget.hasSSE41()) {
21788     MVT VecI16VT = Is128 ? MVT::v8i16 : MVT::v16i16;
21789     //     uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
21790     SDValue VecCstLowBitcast = DAG.getBitcast(VecI16VT, VecCstLow);
21791     SDValue VecBitcast = DAG.getBitcast(VecI16VT, V);
21792     // Low will be bitcasted right away, so do not bother bitcasting back to its
21793     // original type.
21794     Low = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecBitcast,
21795                       VecCstLowBitcast, DAG.getTargetConstant(0xaa, DL, MVT::i8));
21796     //     uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
21797     //                                 (uint4) 0x53000000, 0xaa);
21798     SDValue VecCstHighBitcast = DAG.getBitcast(VecI16VT, VecCstHigh);
21799     SDValue VecShiftBitcast = DAG.getBitcast(VecI16VT, HighShift);
21800     // High will be bitcasted right away, so do not bother bitcasting back to
21801     // its original type.
21802     High = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecShiftBitcast,
21803                        VecCstHighBitcast, DAG.getTargetConstant(0xaa, DL, MVT::i8));
21804   } else {
21805     SDValue VecCstMask = DAG.getConstant(0xffff, DL, VecIntVT);
21806     //     uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
21807     SDValue LowAnd = DAG.getNode(ISD::AND, DL, VecIntVT, V, VecCstMask);
21808     Low = DAG.getNode(ISD::OR, DL, VecIntVT, LowAnd, VecCstLow);
21809 
21810     //     uint4 hi = (v >> 16) | (uint4) 0x53000000;
21811     High = DAG.getNode(ISD::OR, DL, VecIntVT, HighShift, VecCstHigh);
21812   }
21813 
21814   // Create the vector constant for (0x1.0p39f + 0x1.0p23f).
21815   SDValue VecCstFSub = DAG.getConstantFP(
21816       APFloat(APFloat::IEEEsingle(), APInt(32, 0x53000080)), DL, VecFloatVT);
21817 
21818   //     float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
21819   // NOTE: By using fsub of a positive constant instead of fadd of a negative
21820   // constant, we avoid reassociation in MachineCombiner when unsafe-fp-math is
21821   // enabled. See PR24512.
21822   SDValue HighBitcast = DAG.getBitcast(VecFloatVT, High);
21823   // TODO: Are there any fast-math-flags to propagate here?
21824   //     (float4) lo;
21825   SDValue LowBitcast = DAG.getBitcast(VecFloatVT, Low);
21826   //     return (float4) lo + fhi;
21827   if (IsStrict) {
21828     SDValue FHigh = DAG.getNode(ISD::STRICT_FSUB, DL, {VecFloatVT, MVT::Other},
21829                                 {Op.getOperand(0), HighBitcast, VecCstFSub});
21830     return DAG.getNode(ISD::STRICT_FADD, DL, {VecFloatVT, MVT::Other},
21831                        {FHigh.getValue(1), LowBitcast, FHigh});
21832   }
21833 
21834   SDValue FHigh =
21835       DAG.getNode(ISD::FSUB, DL, VecFloatVT, HighBitcast, VecCstFSub);
21836   return DAG.getNode(ISD::FADD, DL, VecFloatVT, LowBitcast, FHigh);
21837 }
21838 
lowerUINT_TO_FP_vec(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)21839 static SDValue lowerUINT_TO_FP_vec(SDValue Op, SelectionDAG &DAG,
21840                                    const X86Subtarget &Subtarget) {
21841   unsigned OpNo = Op.getNode()->isStrictFPOpcode() ? 1 : 0;
21842   SDValue N0 = Op.getOperand(OpNo);
21843   MVT SrcVT = N0.getSimpleValueType();
21844   SDLoc dl(Op);
21845 
21846   switch (SrcVT.SimpleTy) {
21847   default:
21848     llvm_unreachable("Custom UINT_TO_FP is not supported!");
21849   case MVT::v2i32:
21850     return lowerUINT_TO_FP_v2i32(Op, DAG, Subtarget, dl);
21851   case MVT::v4i32:
21852   case MVT::v8i32:
21853     return lowerUINT_TO_FP_vXi32(Op, DAG, Subtarget);
21854   case MVT::v2i64:
21855   case MVT::v4i64:
21856     return lowerINT_TO_FP_vXi64(Op, DAG, Subtarget);
21857   }
21858 }
21859 
LowerUINT_TO_FP(SDValue Op,SelectionDAG & DAG) const21860 SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
21861                                            SelectionDAG &DAG) const {
21862   bool IsStrict = Op->isStrictFPOpcode();
21863   unsigned OpNo = IsStrict ? 1 : 0;
21864   SDValue Src = Op.getOperand(OpNo);
21865   SDLoc dl(Op);
21866   auto PtrVT = getPointerTy(DAG.getDataLayout());
21867   MVT SrcVT = Src.getSimpleValueType();
21868   MVT DstVT = Op->getSimpleValueType(0);
21869   SDValue Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode();
21870 
21871   // Bail out when we don't have native conversion instructions.
21872   if (DstVT == MVT::f128)
21873     return SDValue();
21874 
21875   if (isSoftFP16(DstVT))
21876     return promoteXINT_TO_FP(Op, DAG);
21877   else if (isLegalConversion(SrcVT, false, Subtarget))
21878     return Op;
21879 
21880   if (DstVT.isVector())
21881     return lowerUINT_TO_FP_vec(Op, DAG, Subtarget);
21882 
21883   if (Subtarget.isTargetWin64() && SrcVT == MVT::i128)
21884     return LowerWin64_INT128_TO_FP(Op, DAG);
21885 
21886   if (SDValue Extract = vectorizeExtractedCast(Op, DAG, Subtarget))
21887     return Extract;
21888 
21889   if (Subtarget.hasAVX512() && isScalarFPTypeInSSEReg(DstVT) &&
21890       (SrcVT == MVT::i32 || (SrcVT == MVT::i64 && Subtarget.is64Bit()))) {
21891     // Conversions from unsigned i32 to f32/f64 are legal,
21892     // using VCVTUSI2SS/SD.  Same for i64 in 64-bit mode.
21893     return Op;
21894   }
21895 
21896   // Promote i32 to i64 and use a signed conversion on 64-bit targets.
21897   if (SrcVT == MVT::i32 && Subtarget.is64Bit()) {
21898     Src = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Src);
21899     if (IsStrict)
21900       return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {DstVT, MVT::Other},
21901                          {Chain, Src});
21902     return DAG.getNode(ISD::SINT_TO_FP, dl, DstVT, Src);
21903   }
21904 
21905   if (SDValue V = LowerI64IntToFP_AVX512DQ(Op, DAG, Subtarget))
21906     return V;
21907   if (SDValue V = LowerI64IntToFP16(Op, DAG, Subtarget))
21908     return V;
21909 
21910   // The transform for i64->f64 isn't correct for 0 when rounding to negative
21911   // infinity. It produces -0.0, so disable under strictfp.
21912   if (SrcVT == MVT::i64 && DstVT == MVT::f64 && Subtarget.hasSSE2() &&
21913       !IsStrict)
21914     return LowerUINT_TO_FP_i64(Op, DAG, Subtarget);
21915   // The transform for i32->f64/f32 isn't correct for 0 when rounding to
21916   // negative infinity. So disable under strictfp. Using FILD instead.
21917   if (SrcVT == MVT::i32 && Subtarget.hasSSE2() && DstVT != MVT::f80 &&
21918       !IsStrict)
21919     return LowerUINT_TO_FP_i32(Op, DAG, Subtarget);
21920   if (Subtarget.is64Bit() && SrcVT == MVT::i64 &&
21921       (DstVT == MVT::f32 || DstVT == MVT::f64))
21922     return SDValue();
21923 
21924   // Make a 64-bit buffer, and use it to build an FILD.
21925   SDValue StackSlot = DAG.CreateStackTemporary(MVT::i64, 8);
21926   int SSFI = cast<FrameIndexSDNode>(StackSlot)->getIndex();
21927   Align SlotAlign(8);
21928   MachinePointerInfo MPI =
21929     MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI);
21930   if (SrcVT == MVT::i32) {
21931     SDValue OffsetSlot =
21932         DAG.getMemBasePlusOffset(StackSlot, TypeSize::Fixed(4), dl);
21933     SDValue Store1 = DAG.getStore(Chain, dl, Src, StackSlot, MPI, SlotAlign);
21934     SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, dl, MVT::i32),
21935                                   OffsetSlot, MPI.getWithOffset(4), SlotAlign);
21936     std::pair<SDValue, SDValue> Tmp =
21937         BuildFILD(DstVT, MVT::i64, dl, Store2, StackSlot, MPI, SlotAlign, DAG);
21938     if (IsStrict)
21939       return DAG.getMergeValues({Tmp.first, Tmp.second}, dl);
21940 
21941     return Tmp.first;
21942   }
21943 
21944   assert(SrcVT == MVT::i64 && "Unexpected type in UINT_TO_FP");
21945   SDValue ValueToStore = Src;
21946   if (isScalarFPTypeInSSEReg(Op.getValueType()) && !Subtarget.is64Bit()) {
21947     // Bitcasting to f64 here allows us to do a single 64-bit store from
21948     // an SSE register, avoiding the store forwarding penalty that would come
21949     // with two 32-bit stores.
21950     ValueToStore = DAG.getBitcast(MVT::f64, ValueToStore);
21951   }
21952   SDValue Store =
21953       DAG.getStore(Chain, dl, ValueToStore, StackSlot, MPI, SlotAlign);
21954   // For i64 source, we need to add the appropriate power of 2 if the input
21955   // was negative. We must be careful to do the computation in x87 extended
21956   // precision, not in SSE.
21957   SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
21958   SDValue Ops[] = { Store, StackSlot };
21959   SDValue Fild =
21960       DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, Ops, MVT::i64, MPI,
21961                               SlotAlign, MachineMemOperand::MOLoad);
21962   Chain = Fild.getValue(1);
21963 
21964 
21965   // Check whether the sign bit is set.
21966   SDValue SignSet = DAG.getSetCC(
21967       dl, getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i64),
21968       Op.getOperand(OpNo), DAG.getConstant(0, dl, MVT::i64), ISD::SETLT);
21969 
21970   // Build a 64 bit pair (FF, 0) in the constant pool, with FF in the hi bits.
21971   APInt FF(64, 0x5F80000000000000ULL);
21972   SDValue FudgePtr = DAG.getConstantPool(
21973       ConstantInt::get(*DAG.getContext(), FF), PtrVT);
21974   Align CPAlignment = cast<ConstantPoolSDNode>(FudgePtr)->getAlign();
21975 
21976   // Get a pointer to FF if the sign bit was set, or to 0 otherwise.
21977   SDValue Zero = DAG.getIntPtrConstant(0, dl);
21978   SDValue Four = DAG.getIntPtrConstant(4, dl);
21979   SDValue Offset = DAG.getSelect(dl, Zero.getValueType(), SignSet, Four, Zero);
21980   FudgePtr = DAG.getNode(ISD::ADD, dl, PtrVT, FudgePtr, Offset);
21981 
21982   // Load the value out, extending it from f32 to f80.
21983   SDValue Fudge = DAG.getExtLoad(
21984       ISD::EXTLOAD, dl, MVT::f80, Chain, FudgePtr,
21985       MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), MVT::f32,
21986       CPAlignment);
21987   Chain = Fudge.getValue(1);
21988   // Extend everything to 80 bits to force it to be done on x87.
21989   // TODO: Are there any fast-math-flags to propagate here?
21990   if (IsStrict) {
21991     unsigned Opc = ISD::STRICT_FADD;
21992     // Windows needs the precision control changed to 80bits around this add.
21993     if (Subtarget.isOSWindows() && DstVT == MVT::f32)
21994       Opc = X86ISD::STRICT_FP80_ADD;
21995 
21996     SDValue Add =
21997         DAG.getNode(Opc, dl, {MVT::f80, MVT::Other}, {Chain, Fild, Fudge});
21998     // STRICT_FP_ROUND can't handle equal types.
21999     if (DstVT == MVT::f80)
22000       return Add;
22001     return DAG.getNode(ISD::STRICT_FP_ROUND, dl, {DstVT, MVT::Other},
22002                        {Add.getValue(1), Add, DAG.getIntPtrConstant(0, dl)});
22003   }
22004   unsigned Opc = ISD::FADD;
22005   // Windows needs the precision control changed to 80bits around this add.
22006   if (Subtarget.isOSWindows() && DstVT == MVT::f32)
22007     Opc = X86ISD::FP80_ADD;
22008 
22009   SDValue Add = DAG.getNode(Opc, dl, MVT::f80, Fild, Fudge);
22010   return DAG.getNode(ISD::FP_ROUND, dl, DstVT, Add,
22011                      DAG.getIntPtrConstant(0, dl, /*isTarget=*/true));
22012 }
22013 
22014 // If the given FP_TO_SINT (IsSigned) or FP_TO_UINT (!IsSigned) operation
22015 // is legal, or has an fp128 or f16 source (which needs to be promoted to f32),
22016 // just return an SDValue().
22017 // Otherwise it is assumed to be a conversion from one of f32, f64 or f80
22018 // to i16, i32 or i64, and we lower it to a legal sequence and return the
22019 // result.
22020 SDValue
FP_TO_INTHelper(SDValue Op,SelectionDAG & DAG,bool IsSigned,SDValue & Chain) const22021 X86TargetLowering::FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
22022                                    bool IsSigned, SDValue &Chain) const {
22023   bool IsStrict = Op->isStrictFPOpcode();
22024   SDLoc DL(Op);
22025 
22026   EVT DstTy = Op.getValueType();
22027   SDValue Value = Op.getOperand(IsStrict ? 1 : 0);
22028   EVT TheVT = Value.getValueType();
22029   auto PtrVT = getPointerTy(DAG.getDataLayout());
22030 
22031   if (TheVT != MVT::f32 && TheVT != MVT::f64 && TheVT != MVT::f80) {
22032     // f16 must be promoted before using the lowering in this routine.
22033     // fp128 does not use this lowering.
22034     return SDValue();
22035   }
22036 
22037   // If using FIST to compute an unsigned i64, we'll need some fixup
22038   // to handle values above the maximum signed i64.  A FIST is always
22039   // used for the 32-bit subtarget, but also for f80 on a 64-bit target.
22040   bool UnsignedFixup = !IsSigned && DstTy == MVT::i64;
22041 
22042   // FIXME: This does not generate an invalid exception if the input does not
22043   // fit in i32. PR44019
22044   if (!IsSigned && DstTy != MVT::i64) {
22045     // Replace the fp-to-uint32 operation with an fp-to-sint64 FIST.
22046     // The low 32 bits of the fist result will have the correct uint32 result.
22047     assert(DstTy == MVT::i32 && "Unexpected FP_TO_UINT");
22048     DstTy = MVT::i64;
22049   }
22050 
22051   assert(DstTy.getSimpleVT() <= MVT::i64 &&
22052          DstTy.getSimpleVT() >= MVT::i16 &&
22053          "Unknown FP_TO_INT to lower!");
22054 
22055   // We lower FP->int64 into FISTP64 followed by a load from a temporary
22056   // stack slot.
22057   MachineFunction &MF = DAG.getMachineFunction();
22058   unsigned MemSize = DstTy.getStoreSize();
22059   int SSFI =
22060       MF.getFrameInfo().CreateStackObject(MemSize, Align(MemSize), false);
22061   SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
22062 
22063   Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode();
22064 
22065   SDValue Adjust; // 0x0 or 0x80000000, for result sign bit adjustment.
22066 
22067   if (UnsignedFixup) {
22068     //
22069     // Conversion to unsigned i64 is implemented with a select,
22070     // depending on whether the source value fits in the range
22071     // of a signed i64.  Let Thresh be the FP equivalent of
22072     // 0x8000000000000000ULL.
22073     //
22074     //  Adjust = (Value >= Thresh) ? 0x80000000 : 0;
22075     //  FltOfs = (Value >= Thresh) ? 0x80000000 : 0;
22076     //  FistSrc = (Value - FltOfs);
22077     //  Fist-to-mem64 FistSrc
22078     //  Add 0 or 0x800...0ULL to the 64-bit result, which is equivalent
22079     //  to XOR'ing the high 32 bits with Adjust.
22080     //
22081     // Being a power of 2, Thresh is exactly representable in all FP formats.
22082     // For X87 we'd like to use the smallest FP type for this constant, but
22083     // for DAG type consistency we have to match the FP operand type.
22084 
22085     APFloat Thresh(APFloat::IEEEsingle(), APInt(32, 0x5f000000));
22086     LLVM_ATTRIBUTE_UNUSED APFloat::opStatus Status = APFloat::opOK;
22087     bool LosesInfo = false;
22088     if (TheVT == MVT::f64)
22089       // The rounding mode is irrelevant as the conversion should be exact.
22090       Status = Thresh.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven,
22091                               &LosesInfo);
22092     else if (TheVT == MVT::f80)
22093       Status = Thresh.convert(APFloat::x87DoubleExtended(),
22094                               APFloat::rmNearestTiesToEven, &LosesInfo);
22095 
22096     assert(Status == APFloat::opOK && !LosesInfo &&
22097            "FP conversion should have been exact");
22098 
22099     SDValue ThreshVal = DAG.getConstantFP(Thresh, DL, TheVT);
22100 
22101     EVT ResVT = getSetCCResultType(DAG.getDataLayout(),
22102                                    *DAG.getContext(), TheVT);
22103     SDValue Cmp;
22104     if (IsStrict) {
22105       Cmp = DAG.getSetCC(DL, ResVT, Value, ThreshVal, ISD::SETGE, Chain,
22106                          /*IsSignaling*/ true);
22107       Chain = Cmp.getValue(1);
22108     } else {
22109       Cmp = DAG.getSetCC(DL, ResVT, Value, ThreshVal, ISD::SETGE);
22110     }
22111 
22112     // Our preferred lowering of
22113     //
22114     // (Value >= Thresh) ? 0x8000000000000000ULL : 0
22115     //
22116     // is
22117     //
22118     // (Value >= Thresh) << 63
22119     //
22120     // but since we can get here after LegalOperations, DAGCombine might do the
22121     // wrong thing if we create a select. So, directly create the preferred
22122     // version.
22123     SDValue Zext = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, Cmp);
22124     SDValue Const63 = DAG.getConstant(63, DL, MVT::i8);
22125     Adjust = DAG.getNode(ISD::SHL, DL, MVT::i64, Zext, Const63);
22126 
22127     SDValue FltOfs = DAG.getSelect(DL, TheVT, Cmp, ThreshVal,
22128                                    DAG.getConstantFP(0.0, DL, TheVT));
22129 
22130     if (IsStrict) {
22131       Value = DAG.getNode(ISD::STRICT_FSUB, DL, { TheVT, MVT::Other},
22132                           { Chain, Value, FltOfs });
22133       Chain = Value.getValue(1);
22134     } else
22135       Value = DAG.getNode(ISD::FSUB, DL, TheVT, Value, FltOfs);
22136   }
22137 
22138   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, SSFI);
22139 
22140   // FIXME This causes a redundant load/store if the SSE-class value is already
22141   // in memory, such as if it is on the callstack.
22142   if (isScalarFPTypeInSSEReg(TheVT)) {
22143     assert(DstTy == MVT::i64 && "Invalid FP_TO_SINT to lower!");
22144     Chain = DAG.getStore(Chain, DL, Value, StackSlot, MPI);
22145     SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
22146     SDValue Ops[] = { Chain, StackSlot };
22147 
22148     unsigned FLDSize = TheVT.getStoreSize();
22149     assert(FLDSize <= MemSize && "Stack slot not big enough");
22150     MachineMemOperand *MMO = MF.getMachineMemOperand(
22151         MPI, MachineMemOperand::MOLoad, FLDSize, Align(FLDSize));
22152     Value = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, TheVT, MMO);
22153     Chain = Value.getValue(1);
22154   }
22155 
22156   // Build the FP_TO_INT*_IN_MEM
22157   MachineMemOperand *MMO = MF.getMachineMemOperand(
22158       MPI, MachineMemOperand::MOStore, MemSize, Align(MemSize));
22159   SDValue Ops[] = { Chain, Value, StackSlot };
22160   SDValue FIST = DAG.getMemIntrinsicNode(X86ISD::FP_TO_INT_IN_MEM, DL,
22161                                          DAG.getVTList(MVT::Other),
22162                                          Ops, DstTy, MMO);
22163 
22164   SDValue Res = DAG.getLoad(Op.getValueType(), SDLoc(Op), FIST, StackSlot, MPI);
22165   Chain = Res.getValue(1);
22166 
22167   // If we need an unsigned fixup, XOR the result with adjust.
22168   if (UnsignedFixup)
22169     Res = DAG.getNode(ISD::XOR, DL, MVT::i64, Res, Adjust);
22170 
22171   return Res;
22172 }
22173 
LowerAVXExtend(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)22174 static SDValue LowerAVXExtend(SDValue Op, SelectionDAG &DAG,
22175                               const X86Subtarget &Subtarget) {
22176   MVT VT = Op.getSimpleValueType();
22177   SDValue In = Op.getOperand(0);
22178   MVT InVT = In.getSimpleValueType();
22179   SDLoc dl(Op);
22180   unsigned Opc = Op.getOpcode();
22181 
22182   assert(VT.isVector() && InVT.isVector() && "Expected vector type");
22183   assert((Opc == ISD::ANY_EXTEND || Opc == ISD::ZERO_EXTEND) &&
22184          "Unexpected extension opcode");
22185   assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
22186          "Expected same number of elements");
22187   assert((VT.getVectorElementType() == MVT::i16 ||
22188           VT.getVectorElementType() == MVT::i32 ||
22189           VT.getVectorElementType() == MVT::i64) &&
22190          "Unexpected element type");
22191   assert((InVT.getVectorElementType() == MVT::i8 ||
22192           InVT.getVectorElementType() == MVT::i16 ||
22193           InVT.getVectorElementType() == MVT::i32) &&
22194          "Unexpected element type");
22195 
22196   unsigned ExtendInVecOpc = DAG.getOpcode_EXTEND_VECTOR_INREG(Opc);
22197 
22198   if (VT == MVT::v32i16 && !Subtarget.hasBWI()) {
22199     assert(InVT == MVT::v32i8 && "Unexpected VT!");
22200     return splitVectorIntUnary(Op, DAG);
22201   }
22202 
22203   if (Subtarget.hasInt256())
22204     return Op;
22205 
22206   // Optimize vectors in AVX mode:
22207   //
22208   //   v8i16 -> v8i32
22209   //   Use vpmovzwd for 4 lower elements  v8i16 -> v4i32.
22210   //   Use vpunpckhwd for 4 upper elements  v8i16 -> v4i32.
22211   //   Concat upper and lower parts.
22212   //
22213   //   v4i32 -> v4i64
22214   //   Use vpmovzdq for 4 lower elements  v4i32 -> v2i64.
22215   //   Use vpunpckhdq for 4 upper elements  v4i32 -> v2i64.
22216   //   Concat upper and lower parts.
22217   //
22218   MVT HalfVT = VT.getHalfNumVectorElementsVT();
22219   SDValue OpLo = DAG.getNode(ExtendInVecOpc, dl, HalfVT, In);
22220 
22221   // Short-circuit if we can determine that each 128-bit half is the same value.
22222   // Otherwise, this is difficult to match and optimize.
22223   if (auto *Shuf = dyn_cast<ShuffleVectorSDNode>(In))
22224     if (hasIdenticalHalvesShuffleMask(Shuf->getMask()))
22225       return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpLo);
22226 
22227   SDValue ZeroVec = DAG.getConstant(0, dl, InVT);
22228   SDValue Undef = DAG.getUNDEF(InVT);
22229   bool NeedZero = Opc == ISD::ZERO_EXTEND;
22230   SDValue OpHi = getUnpackh(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef);
22231   OpHi = DAG.getBitcast(HalfVT, OpHi);
22232 
22233   return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
22234 }
22235 
22236 // Helper to split and extend a v16i1 mask to v16i8 or v16i16.
SplitAndExtendv16i1(unsigned ExtOpc,MVT VT,SDValue In,const SDLoc & dl,SelectionDAG & DAG)22237 static SDValue SplitAndExtendv16i1(unsigned ExtOpc, MVT VT, SDValue In,
22238                                    const SDLoc &dl, SelectionDAG &DAG) {
22239   assert((VT == MVT::v16i8 || VT == MVT::v16i16) && "Unexpected VT.");
22240   SDValue Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v8i1, In,
22241                            DAG.getIntPtrConstant(0, dl));
22242   SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v8i1, In,
22243                            DAG.getIntPtrConstant(8, dl));
22244   Lo = DAG.getNode(ExtOpc, dl, MVT::v8i16, Lo);
22245   Hi = DAG.getNode(ExtOpc, dl, MVT::v8i16, Hi);
22246   SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i16, Lo, Hi);
22247   return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
22248 }
22249 
LowerZERO_EXTEND_Mask(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)22250 static  SDValue LowerZERO_EXTEND_Mask(SDValue Op,
22251                                       const X86Subtarget &Subtarget,
22252                                       SelectionDAG &DAG) {
22253   MVT VT = Op->getSimpleValueType(0);
22254   SDValue In = Op->getOperand(0);
22255   MVT InVT = In.getSimpleValueType();
22256   assert(InVT.getVectorElementType() == MVT::i1 && "Unexpected input type!");
22257   SDLoc DL(Op);
22258   unsigned NumElts = VT.getVectorNumElements();
22259 
22260   // For all vectors, but vXi8 we can just emit a sign_extend and a shift. This
22261   // avoids a constant pool load.
22262   if (VT.getVectorElementType() != MVT::i8) {
22263     SDValue Extend = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, In);
22264     return DAG.getNode(ISD::SRL, DL, VT, Extend,
22265                        DAG.getConstant(VT.getScalarSizeInBits() - 1, DL, VT));
22266   }
22267 
22268   // Extend VT if BWI is not supported.
22269   MVT ExtVT = VT;
22270   if (!Subtarget.hasBWI()) {
22271     // If v16i32 is to be avoided, we'll need to split and concatenate.
22272     if (NumElts == 16 && !Subtarget.canExtendTo512DQ())
22273       return SplitAndExtendv16i1(ISD::ZERO_EXTEND, VT, In, DL, DAG);
22274 
22275     ExtVT = MVT::getVectorVT(MVT::i32, NumElts);
22276   }
22277 
22278   // Widen to 512-bits if VLX is not supported.
22279   MVT WideVT = ExtVT;
22280   if (!ExtVT.is512BitVector() && !Subtarget.hasVLX()) {
22281     NumElts *= 512 / ExtVT.getSizeInBits();
22282     InVT = MVT::getVectorVT(MVT::i1, NumElts);
22283     In = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InVT, DAG.getUNDEF(InVT),
22284                      In, DAG.getIntPtrConstant(0, DL));
22285     WideVT = MVT::getVectorVT(ExtVT.getVectorElementType(),
22286                               NumElts);
22287   }
22288 
22289   SDValue One = DAG.getConstant(1, DL, WideVT);
22290   SDValue Zero = DAG.getConstant(0, DL, WideVT);
22291 
22292   SDValue SelectedVal = DAG.getSelect(DL, WideVT, In, One, Zero);
22293 
22294   // Truncate if we had to extend above.
22295   if (VT != ExtVT) {
22296     WideVT = MVT::getVectorVT(MVT::i8, NumElts);
22297     SelectedVal = DAG.getNode(ISD::TRUNCATE, DL, WideVT, SelectedVal);
22298   }
22299 
22300   // Extract back to 128/256-bit if we widened.
22301   if (WideVT != VT)
22302     SelectedVal = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, SelectedVal,
22303                               DAG.getIntPtrConstant(0, DL));
22304 
22305   return SelectedVal;
22306 }
22307 
LowerZERO_EXTEND(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)22308 static SDValue LowerZERO_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
22309                                 SelectionDAG &DAG) {
22310   SDValue In = Op.getOperand(0);
22311   MVT SVT = In.getSimpleValueType();
22312 
22313   if (SVT.getVectorElementType() == MVT::i1)
22314     return LowerZERO_EXTEND_Mask(Op, Subtarget, DAG);
22315 
22316   assert(Subtarget.hasAVX() && "Expected AVX support");
22317   return LowerAVXExtend(Op, DAG, Subtarget);
22318 }
22319 
22320 /// Helper to recursively truncate vector elements in half with PACKSS/PACKUS.
22321 /// It makes use of the fact that vectors with enough leading sign/zero bits
22322 /// prevent the PACKSS/PACKUS from saturating the results.
22323 /// AVX2 (Int256) sub-targets require extra shuffling as the PACK*S operates
22324 /// within each 128-bit lane.
truncateVectorWithPACK(unsigned Opcode,EVT DstVT,SDValue In,const SDLoc & DL,SelectionDAG & DAG,const X86Subtarget & Subtarget)22325 static SDValue truncateVectorWithPACK(unsigned Opcode, EVT DstVT, SDValue In,
22326                                       const SDLoc &DL, SelectionDAG &DAG,
22327                                       const X86Subtarget &Subtarget) {
22328   assert((Opcode == X86ISD::PACKSS || Opcode == X86ISD::PACKUS) &&
22329          "Unexpected PACK opcode");
22330   assert(DstVT.isVector() && "VT not a vector?");
22331 
22332   // Requires SSE2 for PACKSS (SSE41 PACKUSDW is handled below).
22333   if (!Subtarget.hasSSE2())
22334     return SDValue();
22335 
22336   EVT SrcVT = In.getValueType();
22337 
22338   // No truncation required, we might get here due to recursive calls.
22339   if (SrcVT == DstVT)
22340     return In;
22341 
22342   // We only support vector truncation to 64bits or greater from a
22343   // 128bits or greater source.
22344   unsigned DstSizeInBits = DstVT.getSizeInBits();
22345   unsigned SrcSizeInBits = SrcVT.getSizeInBits();
22346   if ((DstSizeInBits % 64) != 0 || (SrcSizeInBits % 128) != 0)
22347     return SDValue();
22348 
22349   unsigned NumElems = SrcVT.getVectorNumElements();
22350   if (!isPowerOf2_32(NumElems))
22351     return SDValue();
22352 
22353   LLVMContext &Ctx = *DAG.getContext();
22354   assert(DstVT.getVectorNumElements() == NumElems && "Illegal truncation");
22355   assert(SrcSizeInBits > DstSizeInBits && "Illegal truncation");
22356 
22357   EVT PackedSVT = EVT::getIntegerVT(Ctx, SrcVT.getScalarSizeInBits() / 2);
22358 
22359   // Pack to the largest type possible:
22360   // vXi64/vXi32 -> PACK*SDW and vXi16 -> PACK*SWB.
22361   EVT InVT = MVT::i16, OutVT = MVT::i8;
22362   if (SrcVT.getScalarSizeInBits() > 16 &&
22363       (Opcode == X86ISD::PACKSS || Subtarget.hasSSE41())) {
22364     InVT = MVT::i32;
22365     OutVT = MVT::i16;
22366   }
22367 
22368   // 128bit -> 64bit truncate - PACK 128-bit src in the lower subvector.
22369   if (SrcVT.is128BitVector()) {
22370     InVT = EVT::getVectorVT(Ctx, InVT, 128 / InVT.getSizeInBits());
22371     OutVT = EVT::getVectorVT(Ctx, OutVT, 128 / OutVT.getSizeInBits());
22372     In = DAG.getBitcast(InVT, In);
22373     SDValue Res = DAG.getNode(Opcode, DL, OutVT, In, DAG.getUNDEF(InVT));
22374     Res = extractSubVector(Res, 0, DAG, DL, 64);
22375     return DAG.getBitcast(DstVT, Res);
22376   }
22377 
22378   // Split lower/upper subvectors.
22379   SDValue Lo, Hi;
22380   std::tie(Lo, Hi) = splitVector(In, DAG, DL);
22381 
22382   unsigned SubSizeInBits = SrcSizeInBits / 2;
22383   InVT = EVT::getVectorVT(Ctx, InVT, SubSizeInBits / InVT.getSizeInBits());
22384   OutVT = EVT::getVectorVT(Ctx, OutVT, SubSizeInBits / OutVT.getSizeInBits());
22385 
22386   // 256bit -> 128bit truncate - PACK lower/upper 128-bit subvectors.
22387   if (SrcVT.is256BitVector() && DstVT.is128BitVector()) {
22388     Lo = DAG.getBitcast(InVT, Lo);
22389     Hi = DAG.getBitcast(InVT, Hi);
22390     SDValue Res = DAG.getNode(Opcode, DL, OutVT, Lo, Hi);
22391     return DAG.getBitcast(DstVT, Res);
22392   }
22393 
22394   // AVX2: 512bit -> 256bit truncate - PACK lower/upper 256-bit subvectors.
22395   // AVX2: 512bit -> 128bit truncate - PACK(PACK, PACK).
22396   if (SrcVT.is512BitVector() && Subtarget.hasInt256()) {
22397     Lo = DAG.getBitcast(InVT, Lo);
22398     Hi = DAG.getBitcast(InVT, Hi);
22399     SDValue Res = DAG.getNode(Opcode, DL, OutVT, Lo, Hi);
22400 
22401     // 256-bit PACK(ARG0, ARG1) leaves us with ((LO0,LO1),(HI0,HI1)),
22402     // so we need to shuffle to get ((LO0,HI0),(LO1,HI1)).
22403     // Scale shuffle mask to avoid bitcasts and help ComputeNumSignBits.
22404     SmallVector<int, 64> Mask;
22405     int Scale = 64 / OutVT.getScalarSizeInBits();
22406     narrowShuffleMaskElts(Scale, { 0, 2, 1, 3 }, Mask);
22407     Res = DAG.getVectorShuffle(OutVT, DL, Res, Res, Mask);
22408 
22409     if (DstVT.is256BitVector())
22410       return DAG.getBitcast(DstVT, Res);
22411 
22412     // If 512bit -> 128bit truncate another stage.
22413     EVT PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumElems);
22414     Res = DAG.getBitcast(PackedVT, Res);
22415     return truncateVectorWithPACK(Opcode, DstVT, Res, DL, DAG, Subtarget);
22416   }
22417 
22418   // Recursively pack lower/upper subvectors, concat result and pack again.
22419   assert(SrcSizeInBits >= 256 && "Expected 256-bit vector or greater");
22420   EVT PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumElems / 2);
22421   Lo = truncateVectorWithPACK(Opcode, PackedVT, Lo, DL, DAG, Subtarget);
22422   Hi = truncateVectorWithPACK(Opcode, PackedVT, Hi, DL, DAG, Subtarget);
22423 
22424   PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumElems);
22425   SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, PackedVT, Lo, Hi);
22426   return truncateVectorWithPACK(Opcode, DstVT, Res, DL, DAG, Subtarget);
22427 }
22428 
LowerTruncateVecI1(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)22429 static SDValue LowerTruncateVecI1(SDValue Op, SelectionDAG &DAG,
22430                                   const X86Subtarget &Subtarget) {
22431 
22432   SDLoc DL(Op);
22433   MVT VT = Op.getSimpleValueType();
22434   SDValue In = Op.getOperand(0);
22435   MVT InVT = In.getSimpleValueType();
22436 
22437   assert(VT.getVectorElementType() == MVT::i1 && "Unexpected vector type.");
22438 
22439   // Shift LSB to MSB and use VPMOVB/W2M or TESTD/Q.
22440   unsigned ShiftInx = InVT.getScalarSizeInBits() - 1;
22441   if (InVT.getScalarSizeInBits() <= 16) {
22442     if (Subtarget.hasBWI()) {
22443       // legal, will go to VPMOVB2M, VPMOVW2M
22444       if (DAG.ComputeNumSignBits(In) < InVT.getScalarSizeInBits()) {
22445         // We need to shift to get the lsb into sign position.
22446         // Shift packed bytes not supported natively, bitcast to word
22447         MVT ExtVT = MVT::getVectorVT(MVT::i16, InVT.getSizeInBits()/16);
22448         In = DAG.getNode(ISD::SHL, DL, ExtVT,
22449                          DAG.getBitcast(ExtVT, In),
22450                          DAG.getConstant(ShiftInx, DL, ExtVT));
22451         In = DAG.getBitcast(InVT, In);
22452       }
22453       return DAG.getSetCC(DL, VT, DAG.getConstant(0, DL, InVT),
22454                           In, ISD::SETGT);
22455     }
22456     // Use TESTD/Q, extended vector to packed dword/qword.
22457     assert((InVT.is256BitVector() || InVT.is128BitVector()) &&
22458            "Unexpected vector type.");
22459     unsigned NumElts = InVT.getVectorNumElements();
22460     assert((NumElts == 8 || NumElts == 16) && "Unexpected number of elements");
22461     // We need to change to a wider element type that we have support for.
22462     // For 8 element vectors this is easy, we either extend to v8i32 or v8i64.
22463     // For 16 element vectors we extend to v16i32 unless we are explicitly
22464     // trying to avoid 512-bit vectors. If we are avoiding 512-bit vectors
22465     // we need to split into two 8 element vectors which we can extend to v8i32,
22466     // truncate and concat the results. There's an additional complication if
22467     // the original type is v16i8. In that case we can't split the v16i8
22468     // directly, so we need to shuffle high elements to low and use
22469     // sign_extend_vector_inreg.
22470     if (NumElts == 16 && !Subtarget.canExtendTo512DQ()) {
22471       SDValue Lo, Hi;
22472       if (InVT == MVT::v16i8) {
22473         Lo = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, DL, MVT::v8i32, In);
22474         Hi = DAG.getVectorShuffle(
22475             InVT, DL, In, In,
22476             {8, 9, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1});
22477         Hi = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, DL, MVT::v8i32, Hi);
22478       } else {
22479         assert(InVT == MVT::v16i16 && "Unexpected VT!");
22480         Lo = extract128BitVector(In, 0, DAG, DL);
22481         Hi = extract128BitVector(In, 8, DAG, DL);
22482       }
22483       // We're split now, just emit two truncates and a concat. The two
22484       // truncates will trigger legalization to come back to this function.
22485       Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i1, Lo);
22486       Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i1, Hi);
22487       return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
22488     }
22489     // We either have 8 elements or we're allowed to use 512-bit vectors.
22490     // If we have VLX, we want to use the narrowest vector that can get the
22491     // job done so we use vXi32.
22492     MVT EltVT = Subtarget.hasVLX() ? MVT::i32 : MVT::getIntegerVT(512/NumElts);
22493     MVT ExtVT = MVT::getVectorVT(EltVT, NumElts);
22494     In = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, In);
22495     InVT = ExtVT;
22496     ShiftInx = InVT.getScalarSizeInBits() - 1;
22497   }
22498 
22499   if (DAG.ComputeNumSignBits(In) < InVT.getScalarSizeInBits()) {
22500     // We need to shift to get the lsb into sign position.
22501     In = DAG.getNode(ISD::SHL, DL, InVT, In,
22502                      DAG.getConstant(ShiftInx, DL, InVT));
22503   }
22504   // If we have DQI, emit a pattern that will be iseled as vpmovq2m/vpmovd2m.
22505   if (Subtarget.hasDQI())
22506     return DAG.getSetCC(DL, VT, DAG.getConstant(0, DL, InVT), In, ISD::SETGT);
22507   return DAG.getSetCC(DL, VT, In, DAG.getConstant(0, DL, InVT), ISD::SETNE);
22508 }
22509 
LowerTRUNCATE(SDValue Op,SelectionDAG & DAG) const22510 SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
22511   SDLoc DL(Op);
22512   MVT VT = Op.getSimpleValueType();
22513   SDValue In = Op.getOperand(0);
22514   MVT InVT = In.getSimpleValueType();
22515   unsigned InNumEltBits = InVT.getScalarSizeInBits();
22516 
22517   assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
22518          "Invalid TRUNCATE operation");
22519 
22520   // If we're called by the type legalizer, handle a few cases.
22521   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
22522   if (!TLI.isTypeLegal(InVT)) {
22523     if ((InVT == MVT::v8i64 || InVT == MVT::v16i32 || InVT == MVT::v16i64) &&
22524         VT.is128BitVector()) {
22525       assert((InVT == MVT::v16i64 || Subtarget.hasVLX()) &&
22526              "Unexpected subtarget!");
22527       // The default behavior is to truncate one step, concatenate, and then
22528       // truncate the remainder. We'd rather produce two 64-bit results and
22529       // concatenate those.
22530       SDValue Lo, Hi;
22531       std::tie(Lo, Hi) = DAG.SplitVector(In, DL);
22532 
22533       EVT LoVT, HiVT;
22534       std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
22535 
22536       Lo = DAG.getNode(ISD::TRUNCATE, DL, LoVT, Lo);
22537       Hi = DAG.getNode(ISD::TRUNCATE, DL, HiVT, Hi);
22538       return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
22539     }
22540 
22541     // Otherwise let default legalization handle it.
22542     return SDValue();
22543   }
22544 
22545   if (VT.getVectorElementType() == MVT::i1)
22546     return LowerTruncateVecI1(Op, DAG, Subtarget);
22547 
22548   // vpmovqb/w/d, vpmovdb/w, vpmovwb
22549   if (Subtarget.hasAVX512()) {
22550     if (InVT == MVT::v32i16 && !Subtarget.hasBWI()) {
22551       assert(VT == MVT::v32i8 && "Unexpected VT!");
22552       return splitVectorIntUnary(Op, DAG);
22553     }
22554 
22555     // word to byte only under BWI. Otherwise we have to promoted to v16i32
22556     // and then truncate that. But we should only do that if we haven't been
22557     // asked to avoid 512-bit vectors. The actual promotion to v16i32 will be
22558     // handled by isel patterns.
22559     if (InVT != MVT::v16i16 || Subtarget.hasBWI() ||
22560         Subtarget.canExtendTo512DQ())
22561       return Op;
22562   }
22563 
22564   unsigned NumPackedSignBits = std::min<unsigned>(VT.getScalarSizeInBits(), 16);
22565   unsigned NumPackedZeroBits = Subtarget.hasSSE41() ? NumPackedSignBits : 8;
22566 
22567   // Truncate with PACKUS if we are truncating a vector with leading zero bits
22568   // that extend all the way to the packed/truncated value.
22569   // Pre-SSE41 we can only use PACKUSWB.
22570   KnownBits Known = DAG.computeKnownBits(In);
22571   if ((InNumEltBits - NumPackedZeroBits) <= Known.countMinLeadingZeros())
22572     if (SDValue V =
22573             truncateVectorWithPACK(X86ISD::PACKUS, VT, In, DL, DAG, Subtarget))
22574       return V;
22575 
22576   // Truncate with PACKSS if we are truncating a vector with sign-bits that
22577   // extend all the way to the packed/truncated value.
22578   if ((InNumEltBits - NumPackedSignBits) < DAG.ComputeNumSignBits(In))
22579     if (SDValue V =
22580             truncateVectorWithPACK(X86ISD::PACKSS, VT, In, DL, DAG, Subtarget))
22581       return V;
22582 
22583   // Handle truncation of V256 to V128 using shuffles.
22584   assert(VT.is128BitVector() && InVT.is256BitVector() && "Unexpected types!");
22585 
22586   if ((VT == MVT::v4i32) && (InVT == MVT::v4i64)) {
22587     // On AVX2, v4i64 -> v4i32 becomes VPERMD.
22588     if (Subtarget.hasInt256()) {
22589       static const int ShufMask[] = {0, 2, 4, 6, -1, -1, -1, -1};
22590       In = DAG.getBitcast(MVT::v8i32, In);
22591       In = DAG.getVectorShuffle(MVT::v8i32, DL, In, In, ShufMask);
22592       return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, In,
22593                          DAG.getIntPtrConstant(0, DL));
22594     }
22595 
22596     SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
22597                                DAG.getIntPtrConstant(0, DL));
22598     SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
22599                                DAG.getIntPtrConstant(2, DL));
22600     static const int ShufMask[] = {0, 2, 4, 6};
22601     return DAG.getVectorShuffle(VT, DL, DAG.getBitcast(MVT::v4i32, OpLo),
22602                                 DAG.getBitcast(MVT::v4i32, OpHi), ShufMask);
22603   }
22604 
22605   if ((VT == MVT::v8i16) && (InVT == MVT::v8i32)) {
22606     // On AVX2, v8i32 -> v8i16 becomes PSHUFB.
22607     if (Subtarget.hasInt256()) {
22608       // The PSHUFB mask:
22609       static const int ShufMask1[] = { 0,  1,  4,  5,  8,  9, 12, 13,
22610                                       -1, -1, -1, -1, -1, -1, -1, -1,
22611                                       16, 17, 20, 21, 24, 25, 28, 29,
22612                                       -1, -1, -1, -1, -1, -1, -1, -1 };
22613       In = DAG.getBitcast(MVT::v32i8, In);
22614       In = DAG.getVectorShuffle(MVT::v32i8, DL, In, In, ShufMask1);
22615       In = DAG.getBitcast(MVT::v4i64, In);
22616 
22617       static const int ShufMask2[] = {0, 2, -1, -1};
22618       In = DAG.getVectorShuffle(MVT::v4i64, DL, In, In, ShufMask2);
22619       In = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
22620                        DAG.getIntPtrConstant(0, DL));
22621       return DAG.getBitcast(MVT::v8i16, In);
22622     }
22623 
22624     SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
22625                                DAG.getIntPtrConstant(0, DL));
22626     SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
22627                                DAG.getIntPtrConstant(4, DL));
22628 
22629     // The PSHUFB mask:
22630     static const int ShufMask1[] = {0, 2, 4, 6, -1, -1, -1, -1};
22631 
22632     OpLo = DAG.getBitcast(MVT::v8i16, OpLo);
22633     OpHi = DAG.getBitcast(MVT::v8i16, OpHi);
22634 
22635     OpLo = DAG.getVectorShuffle(MVT::v8i16, DL, OpLo, OpLo, ShufMask1);
22636     OpHi = DAG.getVectorShuffle(MVT::v8i16, DL, OpHi, OpHi, ShufMask1);
22637 
22638     OpLo = DAG.getBitcast(MVT::v4i32, OpLo);
22639     OpHi = DAG.getBitcast(MVT::v4i32, OpHi);
22640 
22641     // The MOVLHPS Mask:
22642     static const int ShufMask2[] = {0, 1, 4, 5};
22643     SDValue res = DAG.getVectorShuffle(MVT::v4i32, DL, OpLo, OpHi, ShufMask2);
22644     return DAG.getBitcast(MVT::v8i16, res);
22645   }
22646 
22647   if (VT == MVT::v16i8 && InVT == MVT::v16i16) {
22648     // Use an AND to zero uppper bits for PACKUS.
22649     In = DAG.getNode(ISD::AND, DL, InVT, In, DAG.getConstant(255, DL, InVT));
22650 
22651     SDValue InLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i16, In,
22652                                DAG.getIntPtrConstant(0, DL));
22653     SDValue InHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i16, In,
22654                                DAG.getIntPtrConstant(8, DL));
22655     return DAG.getNode(X86ISD::PACKUS, DL, VT, InLo, InHi);
22656   }
22657 
22658   llvm_unreachable("All 256->128 cases should have been handled above!");
22659 }
22660 
22661 // We can leverage the specific way the "cvttps2dq/cvttpd2dq" instruction
22662 // behaves on out of range inputs to generate optimized conversions.
expandFP_TO_UINT_SSE(MVT VT,SDValue Src,const SDLoc & dl,SelectionDAG & DAG,const X86Subtarget & Subtarget)22663 static SDValue expandFP_TO_UINT_SSE(MVT VT, SDValue Src, const SDLoc &dl,
22664                                     SelectionDAG &DAG,
22665                                     const X86Subtarget &Subtarget) {
22666   MVT SrcVT = Src.getSimpleValueType();
22667   unsigned DstBits = VT.getScalarSizeInBits();
22668   assert(DstBits == 32 && "expandFP_TO_UINT_SSE - only vXi32 supported");
22669 
22670   // Calculate the converted result for values in the range 0 to
22671   // 2^31-1 ("Small") and from 2^31 to 2^32-1 ("Big").
22672   SDValue Small = DAG.getNode(X86ISD::CVTTP2SI, dl, VT, Src);
22673   SDValue Big =
22674       DAG.getNode(X86ISD::CVTTP2SI, dl, VT,
22675                   DAG.getNode(ISD::FSUB, dl, SrcVT, Src,
22676                               DAG.getConstantFP(2147483648.0f, dl, SrcVT)));
22677 
22678   // The "CVTTP2SI" instruction conveniently sets the sign bit if
22679   // and only if the value was out of range. So we can use that
22680   // as our indicator that we rather use "Big" instead of "Small".
22681   //
22682   // Use "Small" if "IsOverflown" has all bits cleared
22683   // and "0x80000000 | Big" if all bits in "IsOverflown" are set.
22684 
22685   // AVX1 can't use the signsplat masking for 256-bit vectors - we have to
22686   // use the slightly slower blendv select instead.
22687   if (VT == MVT::v8i32 && !Subtarget.hasAVX2()) {
22688     SDValue Overflow = DAG.getNode(ISD::OR, dl, VT, Small, Big);
22689     return DAG.getNode(X86ISD::BLENDV, dl, VT, Small, Overflow, Small);
22690   }
22691 
22692   SDValue IsOverflown =
22693       DAG.getNode(X86ISD::VSRAI, dl, VT, Small,
22694                   DAG.getTargetConstant(DstBits - 1, dl, MVT::i8));
22695   return DAG.getNode(ISD::OR, dl, VT, Small,
22696                      DAG.getNode(ISD::AND, dl, VT, Big, IsOverflown));
22697 }
22698 
LowerFP_TO_INT(SDValue Op,SelectionDAG & DAG) const22699 SDValue X86TargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const {
22700   bool IsStrict = Op->isStrictFPOpcode();
22701   bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT ||
22702                   Op.getOpcode() == ISD::STRICT_FP_TO_SINT;
22703   MVT VT = Op->getSimpleValueType(0);
22704   SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
22705   SDValue Chain = IsStrict ? Op->getOperand(0) : SDValue();
22706   MVT SrcVT = Src.getSimpleValueType();
22707   SDLoc dl(Op);
22708 
22709   SDValue Res;
22710   if (isSoftFP16(SrcVT)) {
22711     MVT NVT = VT.isVector() ? VT.changeVectorElementType(MVT::f32) : MVT::f32;
22712     if (IsStrict)
22713       return DAG.getNode(Op.getOpcode(), dl, {VT, MVT::Other},
22714                          {Chain, DAG.getNode(ISD::STRICT_FP_EXTEND, dl,
22715                                              {NVT, MVT::Other}, {Chain, Src})});
22716     return DAG.getNode(Op.getOpcode(), dl, VT,
22717                        DAG.getNode(ISD::FP_EXTEND, dl, NVT, Src));
22718   } else if (isTypeLegal(SrcVT) && isLegalConversion(VT, IsSigned, Subtarget)) {
22719     return Op;
22720   }
22721 
22722   if (VT.isVector()) {
22723     if (VT == MVT::v2i1 && SrcVT == MVT::v2f64) {
22724       MVT ResVT = MVT::v4i32;
22725       MVT TruncVT = MVT::v4i1;
22726       unsigned Opc;
22727       if (IsStrict)
22728         Opc = IsSigned ? X86ISD::STRICT_CVTTP2SI : X86ISD::STRICT_CVTTP2UI;
22729       else
22730         Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
22731 
22732       if (!IsSigned && !Subtarget.hasVLX()) {
22733         assert(Subtarget.useAVX512Regs() && "Unexpected features!");
22734         // Widen to 512-bits.
22735         ResVT = MVT::v8i32;
22736         TruncVT = MVT::v8i1;
22737         Opc = Op.getOpcode();
22738         // Need to concat with zero vector for strict fp to avoid spurious
22739         // exceptions.
22740         // TODO: Should we just do this for non-strict as well?
22741         SDValue Tmp = IsStrict ? DAG.getConstantFP(0.0, dl, MVT::v8f64)
22742                                : DAG.getUNDEF(MVT::v8f64);
22743         Src = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8f64, Tmp, Src,
22744                           DAG.getIntPtrConstant(0, dl));
22745       }
22746       if (IsStrict) {
22747         Res = DAG.getNode(Opc, dl, {ResVT, MVT::Other}, {Chain, Src});
22748         Chain = Res.getValue(1);
22749       } else {
22750         Res = DAG.getNode(Opc, dl, ResVT, Src);
22751       }
22752 
22753       Res = DAG.getNode(ISD::TRUNCATE, dl, TruncVT, Res);
22754       Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i1, Res,
22755                         DAG.getIntPtrConstant(0, dl));
22756       if (IsStrict)
22757         return DAG.getMergeValues({Res, Chain}, dl);
22758       return Res;
22759     }
22760 
22761     if (Subtarget.hasFP16() && SrcVT.getVectorElementType() == MVT::f16) {
22762       if (VT == MVT::v8i16 || VT == MVT::v16i16 || VT == MVT::v32i16)
22763         return Op;
22764 
22765       MVT ResVT = VT;
22766       MVT EleVT = VT.getVectorElementType();
22767       if (EleVT != MVT::i64)
22768         ResVT = EleVT == MVT::i32 ? MVT::v4i32 : MVT::v8i16;
22769 
22770       if (SrcVT != MVT::v8f16) {
22771         SDValue Tmp =
22772             IsStrict ? DAG.getConstantFP(0.0, dl, SrcVT) : DAG.getUNDEF(SrcVT);
22773         SmallVector<SDValue, 4> Ops(SrcVT == MVT::v2f16 ? 4 : 2, Tmp);
22774         Ops[0] = Src;
22775         Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8f16, Ops);
22776       }
22777 
22778       if (IsStrict) {
22779         Res = DAG.getNode(IsSigned ? X86ISD::STRICT_CVTTP2SI
22780                                    : X86ISD::STRICT_CVTTP2UI,
22781                           dl, {ResVT, MVT::Other}, {Chain, Src});
22782         Chain = Res.getValue(1);
22783       } else {
22784         Res = DAG.getNode(IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI, dl,
22785                           ResVT, Src);
22786       }
22787 
22788       // TODO: Need to add exception check code for strict FP.
22789       if (EleVT.getSizeInBits() < 16) {
22790         ResVT = MVT::getVectorVT(EleVT, 8);
22791         Res = DAG.getNode(ISD::TRUNCATE, dl, ResVT, Res);
22792       }
22793 
22794       if (ResVT != VT)
22795         Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Res,
22796                           DAG.getIntPtrConstant(0, dl));
22797 
22798       if (IsStrict)
22799         return DAG.getMergeValues({Res, Chain}, dl);
22800       return Res;
22801     }
22802 
22803     if (VT == MVT::v8i16 && (SrcVT == MVT::v8f32 || SrcVT == MVT::v8f64)) {
22804       if (IsStrict) {
22805         Res = DAG.getNode(IsSigned ? ISD::STRICT_FP_TO_SINT
22806                                    : ISD::STRICT_FP_TO_UINT,
22807                           dl, {MVT::v8i32, MVT::Other}, {Chain, Src});
22808         Chain = Res.getValue(1);
22809       } else {
22810         Res = DAG.getNode(IsSigned ? ISD::FP_TO_SINT : ISD::FP_TO_UINT, dl,
22811                           MVT::v8i32, Src);
22812       }
22813 
22814       // TODO: Need to add exception check code for strict FP.
22815       Res = DAG.getNode(ISD::TRUNCATE, dl, MVT::v8i16, Res);
22816 
22817       if (IsStrict)
22818         return DAG.getMergeValues({Res, Chain}, dl);
22819       return Res;
22820     }
22821 
22822     // v8f64->v8i32 is legal, but we need v8i32 to be custom for v8f32.
22823     if (VT == MVT::v8i32 && SrcVT == MVT::v8f64) {
22824       assert(!IsSigned && "Expected unsigned conversion!");
22825       assert(Subtarget.useAVX512Regs() && "Requires avx512f");
22826       return Op;
22827     }
22828 
22829     // Widen vXi32 fp_to_uint with avx512f to 512-bit source.
22830     if ((VT == MVT::v4i32 || VT == MVT::v8i32) &&
22831         (SrcVT == MVT::v4f64 || SrcVT == MVT::v4f32 || SrcVT == MVT::v8f32) &&
22832         Subtarget.useAVX512Regs()) {
22833       assert(!IsSigned && "Expected unsigned conversion!");
22834       assert(!Subtarget.hasVLX() && "Unexpected features!");
22835       MVT WideVT = SrcVT == MVT::v4f64 ? MVT::v8f64 : MVT::v16f32;
22836       MVT ResVT = SrcVT == MVT::v4f64 ? MVT::v8i32 : MVT::v16i32;
22837       // Need to concat with zero vector for strict fp to avoid spurious
22838       // exceptions.
22839       // TODO: Should we just do this for non-strict as well?
22840       SDValue Tmp =
22841           IsStrict ? DAG.getConstantFP(0.0, dl, WideVT) : DAG.getUNDEF(WideVT);
22842       Src = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideVT, Tmp, Src,
22843                         DAG.getIntPtrConstant(0, dl));
22844 
22845       if (IsStrict) {
22846         Res = DAG.getNode(ISD::STRICT_FP_TO_UINT, dl, {ResVT, MVT::Other},
22847                           {Chain, Src});
22848         Chain = Res.getValue(1);
22849       } else {
22850         Res = DAG.getNode(ISD::FP_TO_UINT, dl, ResVT, Src);
22851       }
22852 
22853       Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Res,
22854                         DAG.getIntPtrConstant(0, dl));
22855 
22856       if (IsStrict)
22857         return DAG.getMergeValues({Res, Chain}, dl);
22858       return Res;
22859     }
22860 
22861     // Widen vXi64 fp_to_uint/fp_to_sint with avx512dq to 512-bit source.
22862     if ((VT == MVT::v2i64 || VT == MVT::v4i64) &&
22863         (SrcVT == MVT::v2f64 || SrcVT == MVT::v4f64 || SrcVT == MVT::v4f32) &&
22864         Subtarget.useAVX512Regs() && Subtarget.hasDQI()) {
22865       assert(!Subtarget.hasVLX() && "Unexpected features!");
22866       MVT WideVT = SrcVT == MVT::v4f32 ? MVT::v8f32 : MVT::v8f64;
22867       // Need to concat with zero vector for strict fp to avoid spurious
22868       // exceptions.
22869       // TODO: Should we just do this for non-strict as well?
22870       SDValue Tmp =
22871           IsStrict ? DAG.getConstantFP(0.0, dl, WideVT) : DAG.getUNDEF(WideVT);
22872       Src = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideVT, Tmp, Src,
22873                         DAG.getIntPtrConstant(0, dl));
22874 
22875       if (IsStrict) {
22876         Res = DAG.getNode(Op.getOpcode(), dl, {MVT::v8i64, MVT::Other},
22877                           {Chain, Src});
22878         Chain = Res.getValue(1);
22879       } else {
22880         Res = DAG.getNode(Op.getOpcode(), dl, MVT::v8i64, Src);
22881       }
22882 
22883       Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Res,
22884                         DAG.getIntPtrConstant(0, dl));
22885 
22886       if (IsStrict)
22887         return DAG.getMergeValues({Res, Chain}, dl);
22888       return Res;
22889     }
22890 
22891     if (VT == MVT::v2i64 && SrcVT == MVT::v2f32) {
22892       if (!Subtarget.hasVLX()) {
22893         // Non-strict nodes without VLX can we widened to v4f32->v4i64 by type
22894         // legalizer and then widened again by vector op legalization.
22895         if (!IsStrict)
22896           return SDValue();
22897 
22898         SDValue Zero = DAG.getConstantFP(0.0, dl, MVT::v2f32);
22899         SDValue Tmp = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8f32,
22900                                   {Src, Zero, Zero, Zero});
22901         Tmp = DAG.getNode(Op.getOpcode(), dl, {MVT::v8i64, MVT::Other},
22902                           {Chain, Tmp});
22903         SDValue Chain = Tmp.getValue(1);
22904         Tmp = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i64, Tmp,
22905                           DAG.getIntPtrConstant(0, dl));
22906         return DAG.getMergeValues({Tmp, Chain}, dl);
22907       }
22908 
22909       assert(Subtarget.hasDQI() && Subtarget.hasVLX() && "Requires AVX512DQVL");
22910       SDValue Tmp = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, Src,
22911                                 DAG.getUNDEF(MVT::v2f32));
22912       if (IsStrict) {
22913         unsigned Opc = IsSigned ? X86ISD::STRICT_CVTTP2SI
22914                                 : X86ISD::STRICT_CVTTP2UI;
22915         return DAG.getNode(Opc, dl, {VT, MVT::Other}, {Op->getOperand(0), Tmp});
22916       }
22917       unsigned Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
22918       return DAG.getNode(Opc, dl, VT, Tmp);
22919     }
22920 
22921     // Generate optimized instructions for pre AVX512 unsigned conversions from
22922     // vXf32 to vXi32.
22923     if ((VT == MVT::v4i32 && SrcVT == MVT::v4f32) ||
22924         (VT == MVT::v4i32 && SrcVT == MVT::v4f64) ||
22925         (VT == MVT::v8i32 && SrcVT == MVT::v8f32)) {
22926       assert(!IsSigned && "Expected unsigned conversion!");
22927       return expandFP_TO_UINT_SSE(VT, Src, dl, DAG, Subtarget);
22928     }
22929 
22930     return SDValue();
22931   }
22932 
22933   assert(!VT.isVector());
22934 
22935   bool UseSSEReg = isScalarFPTypeInSSEReg(SrcVT);
22936 
22937   if (!IsSigned && UseSSEReg) {
22938     // Conversions from f32/f64 with AVX512 should be legal.
22939     if (Subtarget.hasAVX512())
22940       return Op;
22941 
22942     // We can leverage the specific way the "cvttss2si/cvttsd2si" instruction
22943     // behaves on out of range inputs to generate optimized conversions.
22944     if (!IsStrict && ((VT == MVT::i32 && !Subtarget.is64Bit()) ||
22945                       (VT == MVT::i64 && Subtarget.is64Bit()))) {
22946       unsigned DstBits = VT.getScalarSizeInBits();
22947       APInt UIntLimit = APInt::getSignMask(DstBits);
22948       SDValue FloatOffset = DAG.getNode(ISD::UINT_TO_FP, dl, SrcVT,
22949                                         DAG.getConstant(UIntLimit, dl, VT));
22950       MVT SrcVecVT = MVT::getVectorVT(SrcVT, 128 / SrcVT.getScalarSizeInBits());
22951 
22952       // Calculate the converted result for values in the range:
22953       // (i32) 0 to 2^31-1 ("Small") and from 2^31 to 2^32-1 ("Big").
22954       // (i64) 0 to 2^63-1 ("Small") and from 2^63 to 2^64-1 ("Big").
22955       SDValue Small =
22956           DAG.getNode(X86ISD::CVTTS2SI, dl, VT,
22957                       DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, SrcVecVT, Src));
22958       SDValue Big = DAG.getNode(
22959           X86ISD::CVTTS2SI, dl, VT,
22960           DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, SrcVecVT,
22961                       DAG.getNode(ISD::FSUB, dl, SrcVT, Src, FloatOffset)));
22962 
22963       // The "CVTTS2SI" instruction conveniently sets the sign bit if
22964       // and only if the value was out of range. So we can use that
22965       // as our indicator that we rather use "Big" instead of "Small".
22966       //
22967       // Use "Small" if "IsOverflown" has all bits cleared
22968       // and "0x80000000 | Big" if all bits in "IsOverflown" are set.
22969       SDValue IsOverflown = DAG.getNode(
22970           ISD::SRA, dl, VT, Small, DAG.getConstant(DstBits - 1, dl, MVT::i8));
22971       return DAG.getNode(ISD::OR, dl, VT, Small,
22972                          DAG.getNode(ISD::AND, dl, VT, Big, IsOverflown));
22973     }
22974 
22975     // Use default expansion for i64.
22976     if (VT == MVT::i64)
22977       return SDValue();
22978 
22979     assert(VT == MVT::i32 && "Unexpected VT!");
22980 
22981     // Promote i32 to i64 and use a signed operation on 64-bit targets.
22982     // FIXME: This does not generate an invalid exception if the input does not
22983     // fit in i32. PR44019
22984     if (Subtarget.is64Bit()) {
22985       if (IsStrict) {
22986         Res = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, {MVT::i64, MVT::Other},
22987                           {Chain, Src});
22988         Chain = Res.getValue(1);
22989       } else
22990         Res = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i64, Src);
22991 
22992       Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
22993       if (IsStrict)
22994         return DAG.getMergeValues({Res, Chain}, dl);
22995       return Res;
22996     }
22997 
22998     // Use default expansion for SSE1/2 targets without SSE3. With SSE3 we can
22999     // use fisttp which will be handled later.
23000     if (!Subtarget.hasSSE3())
23001       return SDValue();
23002   }
23003 
23004   // Promote i16 to i32 if we can use a SSE operation or the type is f128.
23005   // FIXME: This does not generate an invalid exception if the input does not
23006   // fit in i16. PR44019
23007   if (VT == MVT::i16 && (UseSSEReg || SrcVT == MVT::f128)) {
23008     assert(IsSigned && "Expected i16 FP_TO_UINT to have been promoted!");
23009     if (IsStrict) {
23010       Res = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, {MVT::i32, MVT::Other},
23011                         {Chain, Src});
23012       Chain = Res.getValue(1);
23013     } else
23014       Res = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Src);
23015 
23016     Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
23017     if (IsStrict)
23018       return DAG.getMergeValues({Res, Chain}, dl);
23019     return Res;
23020   }
23021 
23022   // If this is a FP_TO_SINT using SSEReg we're done.
23023   if (UseSSEReg && IsSigned)
23024     return Op;
23025 
23026   // fp128 needs to use a libcall.
23027   if (SrcVT == MVT::f128) {
23028     RTLIB::Libcall LC;
23029     if (IsSigned)
23030       LC = RTLIB::getFPTOSINT(SrcVT, VT);
23031     else
23032       LC = RTLIB::getFPTOUINT(SrcVT, VT);
23033 
23034     MakeLibCallOptions CallOptions;
23035     std::pair<SDValue, SDValue> Tmp = makeLibCall(DAG, LC, VT, Src, CallOptions,
23036                                                   SDLoc(Op), Chain);
23037 
23038     if (IsStrict)
23039       return DAG.getMergeValues({ Tmp.first, Tmp.second }, dl);
23040 
23041     return Tmp.first;
23042   }
23043 
23044   // Fall back to X87.
23045   if (SDValue V = FP_TO_INTHelper(Op, DAG, IsSigned, Chain)) {
23046     if (IsStrict)
23047       return DAG.getMergeValues({V, Chain}, dl);
23048     return V;
23049   }
23050 
23051   llvm_unreachable("Expected FP_TO_INTHelper to handle all remaining cases.");
23052 }
23053 
LowerLRINT_LLRINT(SDValue Op,SelectionDAG & DAG) const23054 SDValue X86TargetLowering::LowerLRINT_LLRINT(SDValue Op,
23055                                              SelectionDAG &DAG) const {
23056   SDValue Src = Op.getOperand(0);
23057   MVT SrcVT = Src.getSimpleValueType();
23058 
23059   if (SrcVT == MVT::f16)
23060     return SDValue();
23061 
23062   // If the source is in an SSE register, the node is Legal.
23063   if (isScalarFPTypeInSSEReg(SrcVT))
23064     return Op;
23065 
23066   return LRINT_LLRINTHelper(Op.getNode(), DAG);
23067 }
23068 
LRINT_LLRINTHelper(SDNode * N,SelectionDAG & DAG) const23069 SDValue X86TargetLowering::LRINT_LLRINTHelper(SDNode *N,
23070                                               SelectionDAG &DAG) const {
23071   EVT DstVT = N->getValueType(0);
23072   SDValue Src = N->getOperand(0);
23073   EVT SrcVT = Src.getValueType();
23074 
23075   if (SrcVT != MVT::f32 && SrcVT != MVT::f64 && SrcVT != MVT::f80) {
23076     // f16 must be promoted before using the lowering in this routine.
23077     // fp128 does not use this lowering.
23078     return SDValue();
23079   }
23080 
23081   SDLoc DL(N);
23082   SDValue Chain = DAG.getEntryNode();
23083 
23084   bool UseSSE = isScalarFPTypeInSSEReg(SrcVT);
23085 
23086   // If we're converting from SSE, the stack slot needs to hold both types.
23087   // Otherwise it only needs to hold the DstVT.
23088   EVT OtherVT = UseSSE ? SrcVT : DstVT;
23089   SDValue StackPtr = DAG.CreateStackTemporary(DstVT, OtherVT);
23090   int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
23091   MachinePointerInfo MPI =
23092       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
23093 
23094   if (UseSSE) {
23095     assert(DstVT == MVT::i64 && "Invalid LRINT/LLRINT to lower!");
23096     Chain = DAG.getStore(Chain, DL, Src, StackPtr, MPI);
23097     SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
23098     SDValue Ops[] = { Chain, StackPtr };
23099 
23100     Src = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, SrcVT, MPI,
23101                                   /*Align*/ std::nullopt,
23102                                   MachineMemOperand::MOLoad);
23103     Chain = Src.getValue(1);
23104   }
23105 
23106   SDValue StoreOps[] = { Chain, Src, StackPtr };
23107   Chain = DAG.getMemIntrinsicNode(X86ISD::FIST, DL, DAG.getVTList(MVT::Other),
23108                                   StoreOps, DstVT, MPI, /*Align*/ std::nullopt,
23109                                   MachineMemOperand::MOStore);
23110 
23111   return DAG.getLoad(DstVT, DL, Chain, StackPtr, MPI);
23112 }
23113 
23114 SDValue
LowerFP_TO_INT_SAT(SDValue Op,SelectionDAG & DAG) const23115 X86TargetLowering::LowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG) const {
23116   // This is based on the TargetLowering::expandFP_TO_INT_SAT implementation,
23117   // but making use of X86 specifics to produce better instruction sequences.
23118   SDNode *Node = Op.getNode();
23119   bool IsSigned = Node->getOpcode() == ISD::FP_TO_SINT_SAT;
23120   unsigned FpToIntOpcode = IsSigned ? ISD::FP_TO_SINT : ISD::FP_TO_UINT;
23121   SDLoc dl(SDValue(Node, 0));
23122   SDValue Src = Node->getOperand(0);
23123 
23124   // There are three types involved here: SrcVT is the source floating point
23125   // type, DstVT is the type of the result, and TmpVT is the result of the
23126   // intermediate FP_TO_*INT operation we'll use (which may be a promotion of
23127   // DstVT).
23128   EVT SrcVT = Src.getValueType();
23129   EVT DstVT = Node->getValueType(0);
23130   EVT TmpVT = DstVT;
23131 
23132   // This code is only for floats and doubles. Fall back to generic code for
23133   // anything else.
23134   if (!isScalarFPTypeInSSEReg(SrcVT) || isSoftFP16(SrcVT))
23135     return SDValue();
23136 
23137   EVT SatVT = cast<VTSDNode>(Node->getOperand(1))->getVT();
23138   unsigned SatWidth = SatVT.getScalarSizeInBits();
23139   unsigned DstWidth = DstVT.getScalarSizeInBits();
23140   unsigned TmpWidth = TmpVT.getScalarSizeInBits();
23141   assert(SatWidth <= DstWidth && SatWidth <= TmpWidth &&
23142          "Expected saturation width smaller than result width");
23143 
23144   // Promote result of FP_TO_*INT to at least 32 bits.
23145   if (TmpWidth < 32) {
23146     TmpVT = MVT::i32;
23147     TmpWidth = 32;
23148   }
23149 
23150   // Promote conversions to unsigned 32-bit to 64-bit, because it will allow
23151   // us to use a native signed conversion instead.
23152   if (SatWidth == 32 && !IsSigned && Subtarget.is64Bit()) {
23153     TmpVT = MVT::i64;
23154     TmpWidth = 64;
23155   }
23156 
23157   // If the saturation width is smaller than the size of the temporary result,
23158   // we can always use signed conversion, which is native.
23159   if (SatWidth < TmpWidth)
23160     FpToIntOpcode = ISD::FP_TO_SINT;
23161 
23162   // Determine minimum and maximum integer values and their corresponding
23163   // floating-point values.
23164   APInt MinInt, MaxInt;
23165   if (IsSigned) {
23166     MinInt = APInt::getSignedMinValue(SatWidth).sext(DstWidth);
23167     MaxInt = APInt::getSignedMaxValue(SatWidth).sext(DstWidth);
23168   } else {
23169     MinInt = APInt::getMinValue(SatWidth).zext(DstWidth);
23170     MaxInt = APInt::getMaxValue(SatWidth).zext(DstWidth);
23171   }
23172 
23173   APFloat MinFloat(DAG.EVTToAPFloatSemantics(SrcVT));
23174   APFloat MaxFloat(DAG.EVTToAPFloatSemantics(SrcVT));
23175 
23176   APFloat::opStatus MinStatus = MinFloat.convertFromAPInt(
23177     MinInt, IsSigned, APFloat::rmTowardZero);
23178   APFloat::opStatus MaxStatus = MaxFloat.convertFromAPInt(
23179     MaxInt, IsSigned, APFloat::rmTowardZero);
23180   bool AreExactFloatBounds = !(MinStatus & APFloat::opStatus::opInexact)
23181                           && !(MaxStatus & APFloat::opStatus::opInexact);
23182 
23183   SDValue MinFloatNode = DAG.getConstantFP(MinFloat, dl, SrcVT);
23184   SDValue MaxFloatNode = DAG.getConstantFP(MaxFloat, dl, SrcVT);
23185 
23186   // If the integer bounds are exactly representable as floats, emit a
23187   // min+max+fptoi sequence. Otherwise use comparisons and selects.
23188   if (AreExactFloatBounds) {
23189     if (DstVT != TmpVT) {
23190       // Clamp by MinFloat from below. If Src is NaN, propagate NaN.
23191       SDValue MinClamped = DAG.getNode(
23192         X86ISD::FMAX, dl, SrcVT, MinFloatNode, Src);
23193       // Clamp by MaxFloat from above. If Src is NaN, propagate NaN.
23194       SDValue BothClamped = DAG.getNode(
23195         X86ISD::FMIN, dl, SrcVT, MaxFloatNode, MinClamped);
23196       // Convert clamped value to integer.
23197       SDValue FpToInt = DAG.getNode(FpToIntOpcode, dl, TmpVT, BothClamped);
23198 
23199       // NaN will become INDVAL, with the top bit set and the rest zero.
23200       // Truncation will discard the top bit, resulting in zero.
23201       return DAG.getNode(ISD::TRUNCATE, dl, DstVT, FpToInt);
23202     }
23203 
23204     // Clamp by MinFloat from below. If Src is NaN, the result is MinFloat.
23205     SDValue MinClamped = DAG.getNode(
23206       X86ISD::FMAX, dl, SrcVT, Src, MinFloatNode);
23207     // Clamp by MaxFloat from above. NaN cannot occur.
23208     SDValue BothClamped = DAG.getNode(
23209       X86ISD::FMINC, dl, SrcVT, MinClamped, MaxFloatNode);
23210     // Convert clamped value to integer.
23211     SDValue FpToInt = DAG.getNode(FpToIntOpcode, dl, DstVT, BothClamped);
23212 
23213     if (!IsSigned) {
23214       // In the unsigned case we're done, because we mapped NaN to MinFloat,
23215       // which is zero.
23216       return FpToInt;
23217     }
23218 
23219     // Otherwise, select zero if Src is NaN.
23220     SDValue ZeroInt = DAG.getConstant(0, dl, DstVT);
23221     return DAG.getSelectCC(
23222       dl, Src, Src, ZeroInt, FpToInt, ISD::CondCode::SETUO);
23223   }
23224 
23225   SDValue MinIntNode = DAG.getConstant(MinInt, dl, DstVT);
23226   SDValue MaxIntNode = DAG.getConstant(MaxInt, dl, DstVT);
23227 
23228   // Result of direct conversion, which may be selected away.
23229   SDValue FpToInt = DAG.getNode(FpToIntOpcode, dl, TmpVT, Src);
23230 
23231   if (DstVT != TmpVT) {
23232     // NaN will become INDVAL, with the top bit set and the rest zero.
23233     // Truncation will discard the top bit, resulting in zero.
23234     FpToInt = DAG.getNode(ISD::TRUNCATE, dl, DstVT, FpToInt);
23235   }
23236 
23237   SDValue Select = FpToInt;
23238   // For signed conversions where we saturate to the same size as the
23239   // result type of the fptoi instructions, INDVAL coincides with integer
23240   // minimum, so we don't need to explicitly check it.
23241   if (!IsSigned || SatWidth != TmpVT.getScalarSizeInBits()) {
23242     // If Src ULT MinFloat, select MinInt. In particular, this also selects
23243     // MinInt if Src is NaN.
23244     Select = DAG.getSelectCC(
23245       dl, Src, MinFloatNode, MinIntNode, Select, ISD::CondCode::SETULT);
23246   }
23247 
23248   // If Src OGT MaxFloat, select MaxInt.
23249   Select = DAG.getSelectCC(
23250     dl, Src, MaxFloatNode, MaxIntNode, Select, ISD::CondCode::SETOGT);
23251 
23252   // In the unsigned case we are done, because we mapped NaN to MinInt, which
23253   // is already zero. The promoted case was already handled above.
23254   if (!IsSigned || DstVT != TmpVT) {
23255     return Select;
23256   }
23257 
23258   // Otherwise, select 0 if Src is NaN.
23259   SDValue ZeroInt = DAG.getConstant(0, dl, DstVT);
23260   return DAG.getSelectCC(
23261     dl, Src, Src, ZeroInt, Select, ISD::CondCode::SETUO);
23262 }
23263 
LowerFP_EXTEND(SDValue Op,SelectionDAG & DAG) const23264 SDValue X86TargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const {
23265   bool IsStrict = Op->isStrictFPOpcode();
23266 
23267   SDLoc DL(Op);
23268   MVT VT = Op.getSimpleValueType();
23269   SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
23270   SDValue In = Op.getOperand(IsStrict ? 1 : 0);
23271   MVT SVT = In.getSimpleValueType();
23272 
23273   // Let f16->f80 get lowered to a libcall, except for darwin, where we should
23274   // lower it to an fp_extend via f32 (as only f16<>f32 libcalls are available)
23275   if (VT == MVT::f128 || (SVT == MVT::f16 && VT == MVT::f80 &&
23276                           !Subtarget.getTargetTriple().isOSDarwin()))
23277     return SDValue();
23278 
23279   if (SVT == MVT::f16) {
23280     if (Subtarget.hasFP16())
23281       return Op;
23282 
23283     if (VT != MVT::f32) {
23284       if (IsStrict)
23285         return DAG.getNode(
23286             ISD::STRICT_FP_EXTEND, DL, {VT, MVT::Other},
23287             {Chain, DAG.getNode(ISD::STRICT_FP_EXTEND, DL,
23288                                 {MVT::f32, MVT::Other}, {Chain, In})});
23289 
23290       return DAG.getNode(ISD::FP_EXTEND, DL, VT,
23291                          DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, In));
23292     }
23293 
23294     if (!Subtarget.hasF16C()) {
23295       if (!Subtarget.getTargetTriple().isOSDarwin())
23296         return SDValue();
23297 
23298       assert(VT == MVT::f32 && SVT == MVT::f16 && "unexpected extend libcall");
23299 
23300       // Need a libcall, but ABI for f16 is soft-float on MacOS.
23301       TargetLowering::CallLoweringInfo CLI(DAG);
23302       Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode();
23303 
23304       In = DAG.getBitcast(MVT::i16, In);
23305       TargetLowering::ArgListTy Args;
23306       TargetLowering::ArgListEntry Entry;
23307       Entry.Node = In;
23308       Entry.Ty = EVT(MVT::i16).getTypeForEVT(*DAG.getContext());
23309       Entry.IsSExt = false;
23310       Entry.IsZExt = true;
23311       Args.push_back(Entry);
23312 
23313       SDValue Callee = DAG.getExternalSymbol(
23314           getLibcallName(RTLIB::FPEXT_F16_F32),
23315           getPointerTy(DAG.getDataLayout()));
23316       CLI.setDebugLoc(DL).setChain(Chain).setLibCallee(
23317           CallingConv::C, EVT(VT).getTypeForEVT(*DAG.getContext()), Callee,
23318           std::move(Args));
23319 
23320       SDValue Res;
23321       std::tie(Res,Chain) = LowerCallTo(CLI);
23322       if (IsStrict)
23323         Res = DAG.getMergeValues({Res, Chain}, DL);
23324 
23325       return Res;
23326     }
23327 
23328     In = DAG.getBitcast(MVT::i16, In);
23329     In = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, MVT::v8i16,
23330                      getZeroVector(MVT::v8i16, Subtarget, DAG, DL), In,
23331                      DAG.getIntPtrConstant(0, DL));
23332     SDValue Res;
23333     if (IsStrict) {
23334       Res = DAG.getNode(X86ISD::STRICT_CVTPH2PS, DL, {MVT::v4f32, MVT::Other},
23335                         {Chain, In});
23336       Chain = Res.getValue(1);
23337     } else {
23338       Res = DAG.getNode(X86ISD::CVTPH2PS, DL, MVT::v4f32, In,
23339                         DAG.getTargetConstant(4, DL, MVT::i32));
23340     }
23341     Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Res,
23342                       DAG.getIntPtrConstant(0, DL));
23343     if (IsStrict)
23344       return DAG.getMergeValues({Res, Chain}, DL);
23345     return Res;
23346   }
23347 
23348   if (!SVT.isVector())
23349     return Op;
23350 
23351   if (SVT.getVectorElementType() == MVT::f16) {
23352     assert(Subtarget.hasF16C() && "Unexpected features!");
23353     if (SVT == MVT::v2f16)
23354       In = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4f16, In,
23355                        DAG.getUNDEF(MVT::v2f16));
23356     SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8f16, In,
23357                               DAG.getUNDEF(MVT::v4f16));
23358     if (IsStrict)
23359       return DAG.getNode(X86ISD::STRICT_VFPEXT, DL, {VT, MVT::Other},
23360                          {Op->getOperand(0), Res});
23361     return DAG.getNode(X86ISD::VFPEXT, DL, VT, Res);
23362   } else if (VT == MVT::v4f64 || VT == MVT::v8f64) {
23363     return Op;
23364   }
23365 
23366   assert(SVT == MVT::v2f32 && "Only customize MVT::v2f32 type legalization!");
23367 
23368   SDValue Res =
23369       DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4f32, In, DAG.getUNDEF(SVT));
23370   if (IsStrict)
23371     return DAG.getNode(X86ISD::STRICT_VFPEXT, DL, {VT, MVT::Other},
23372                        {Op->getOperand(0), Res});
23373   return DAG.getNode(X86ISD::VFPEXT, DL, VT, Res);
23374 }
23375 
LowerFP_ROUND(SDValue Op,SelectionDAG & DAG) const23376 SDValue X86TargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
23377   bool IsStrict = Op->isStrictFPOpcode();
23378 
23379   SDLoc DL(Op);
23380   SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
23381   SDValue In = Op.getOperand(IsStrict ? 1 : 0);
23382   MVT VT = Op.getSimpleValueType();
23383   MVT SVT = In.getSimpleValueType();
23384 
23385   if (SVT == MVT::f128 || (VT == MVT::f16 && SVT == MVT::f80))
23386     return SDValue();
23387 
23388   if (VT == MVT::f16 && (SVT == MVT::f64 || SVT == MVT::f32) &&
23389       !Subtarget.hasFP16() && (SVT == MVT::f64 || !Subtarget.hasF16C())) {
23390     if (!Subtarget.getTargetTriple().isOSDarwin())
23391       return SDValue();
23392 
23393     // We need a libcall but the ABI for f16 libcalls on MacOS is soft.
23394     TargetLowering::CallLoweringInfo CLI(DAG);
23395     Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode();
23396 
23397     TargetLowering::ArgListTy Args;
23398     TargetLowering::ArgListEntry Entry;
23399     Entry.Node = In;
23400     Entry.Ty = EVT(SVT).getTypeForEVT(*DAG.getContext());
23401     Entry.IsSExt = false;
23402     Entry.IsZExt = true;
23403     Args.push_back(Entry);
23404 
23405     SDValue Callee = DAG.getExternalSymbol(
23406         getLibcallName(SVT == MVT::f64 ? RTLIB::FPROUND_F64_F16
23407                                        : RTLIB::FPROUND_F32_F16),
23408         getPointerTy(DAG.getDataLayout()));
23409     CLI.setDebugLoc(DL).setChain(Chain).setLibCallee(
23410         CallingConv::C, EVT(MVT::i16).getTypeForEVT(*DAG.getContext()), Callee,
23411         std::move(Args));
23412 
23413     SDValue Res;
23414     std::tie(Res, Chain) = LowerCallTo(CLI);
23415 
23416     Res = DAG.getBitcast(MVT::f16, Res);
23417 
23418     if (IsStrict)
23419       Res = DAG.getMergeValues({Res, Chain}, DL);
23420 
23421     return Res;
23422   }
23423 
23424   if (VT.getScalarType() == MVT::f16 && !Subtarget.hasFP16()) {
23425     if (!Subtarget.hasF16C() || SVT.getScalarType() != MVT::f32)
23426       return SDValue();
23427 
23428     if (VT.isVector())
23429       return Op;
23430 
23431     SDValue Res;
23432     SDValue Rnd = DAG.getTargetConstant(X86::STATIC_ROUNDING::CUR_DIRECTION, DL,
23433                                         MVT::i32);
23434     if (IsStrict) {
23435       Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, MVT::v4f32,
23436                         DAG.getConstantFP(0, DL, MVT::v4f32), In,
23437                         DAG.getIntPtrConstant(0, DL));
23438       Res = DAG.getNode(X86ISD::STRICT_CVTPS2PH, DL, {MVT::v8i16, MVT::Other},
23439                         {Chain, Res, Rnd});
23440       Chain = Res.getValue(1);
23441     } else {
23442       // FIXME: Should we use zeros for upper elements for non-strict?
23443       Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v4f32, In);
23444       Res = DAG.getNode(X86ISD::CVTPS2PH, DL, MVT::v8i16, Res, Rnd);
23445     }
23446 
23447     Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i16, Res,
23448                       DAG.getIntPtrConstant(0, DL));
23449     Res = DAG.getBitcast(MVT::f16, Res);
23450 
23451     if (IsStrict)
23452       return DAG.getMergeValues({Res, Chain}, DL);
23453 
23454     return Res;
23455   }
23456 
23457   return Op;
23458 }
23459 
LowerFP16_TO_FP(SDValue Op,SelectionDAG & DAG)23460 static SDValue LowerFP16_TO_FP(SDValue Op, SelectionDAG &DAG) {
23461   bool IsStrict = Op->isStrictFPOpcode();
23462   SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
23463   assert(Src.getValueType() == MVT::i16 && Op.getValueType() == MVT::f32 &&
23464          "Unexpected VT!");
23465 
23466   SDLoc dl(Op);
23467   SDValue Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16,
23468                             DAG.getConstant(0, dl, MVT::v8i16), Src,
23469                             DAG.getIntPtrConstant(0, dl));
23470 
23471   SDValue Chain;
23472   if (IsStrict) {
23473     Res = DAG.getNode(X86ISD::STRICT_CVTPH2PS, dl, {MVT::v4f32, MVT::Other},
23474                       {Op.getOperand(0), Res});
23475     Chain = Res.getValue(1);
23476   } else {
23477     Res = DAG.getNode(X86ISD::CVTPH2PS, dl, MVT::v4f32, Res);
23478   }
23479 
23480   Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, Res,
23481                     DAG.getIntPtrConstant(0, dl));
23482 
23483   if (IsStrict)
23484     return DAG.getMergeValues({Res, Chain}, dl);
23485 
23486   return Res;
23487 }
23488 
LowerFP_TO_FP16(SDValue Op,SelectionDAG & DAG)23489 static SDValue LowerFP_TO_FP16(SDValue Op, SelectionDAG &DAG) {
23490   bool IsStrict = Op->isStrictFPOpcode();
23491   SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
23492   assert(Src.getValueType() == MVT::f32 && Op.getValueType() == MVT::i16 &&
23493          "Unexpected VT!");
23494 
23495   SDLoc dl(Op);
23496   SDValue Res, Chain;
23497   if (IsStrict) {
23498     Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v4f32,
23499                       DAG.getConstantFP(0, dl, MVT::v4f32), Src,
23500                       DAG.getIntPtrConstant(0, dl));
23501     Res = DAG.getNode(
23502         X86ISD::STRICT_CVTPS2PH, dl, {MVT::v8i16, MVT::Other},
23503         {Op.getOperand(0), Res, DAG.getTargetConstant(4, dl, MVT::i32)});
23504     Chain = Res.getValue(1);
23505   } else {
23506     // FIXME: Should we use zeros for upper elements for non-strict?
23507     Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, Src);
23508     Res = DAG.getNode(X86ISD::CVTPS2PH, dl, MVT::v8i16, Res,
23509                       DAG.getTargetConstant(4, dl, MVT::i32));
23510   }
23511 
23512   Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Res,
23513                     DAG.getIntPtrConstant(0, dl));
23514 
23515   if (IsStrict)
23516     return DAG.getMergeValues({Res, Chain}, dl);
23517 
23518   return Res;
23519 }
23520 
LowerFP_TO_BF16(SDValue Op,SelectionDAG & DAG) const23521 SDValue X86TargetLowering::LowerFP_TO_BF16(SDValue Op,
23522                                            SelectionDAG &DAG) const {
23523   SDLoc DL(Op);
23524   MakeLibCallOptions CallOptions;
23525   RTLIB::Libcall LC =
23526       RTLIB::getFPROUND(Op.getOperand(0).getValueType(), MVT::bf16);
23527   SDValue Res =
23528       makeLibCall(DAG, LC, MVT::f32, Op.getOperand(0), CallOptions, DL).first;
23529   return DAG.getNode(ISD::TRUNCATE, DL, MVT::i16,
23530                      DAG.getBitcast(MVT::i32, Res));
23531 }
23532 
23533 /// Depending on uarch and/or optimizing for size, we might prefer to use a
23534 /// vector operation in place of the typical scalar operation.
lowerAddSubToHorizontalOp(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)23535 static SDValue lowerAddSubToHorizontalOp(SDValue Op, SelectionDAG &DAG,
23536                                          const X86Subtarget &Subtarget) {
23537   // If both operands have other uses, this is probably not profitable.
23538   SDValue LHS = Op.getOperand(0);
23539   SDValue RHS = Op.getOperand(1);
23540   if (!LHS.hasOneUse() && !RHS.hasOneUse())
23541     return Op;
23542 
23543   // FP horizontal add/sub were added with SSE3. Integer with SSSE3.
23544   bool IsFP = Op.getSimpleValueType().isFloatingPoint();
23545   if (IsFP && !Subtarget.hasSSE3())
23546     return Op;
23547   if (!IsFP && !Subtarget.hasSSSE3())
23548     return Op;
23549 
23550   // Extract from a common vector.
23551   if (LHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
23552       RHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
23553       LHS.getOperand(0) != RHS.getOperand(0) ||
23554       !isa<ConstantSDNode>(LHS.getOperand(1)) ||
23555       !isa<ConstantSDNode>(RHS.getOperand(1)) ||
23556       !shouldUseHorizontalOp(true, DAG, Subtarget))
23557     return Op;
23558 
23559   // Allow commuted 'hadd' ops.
23560   // TODO: Allow commuted (f)sub by negating the result of (F)HSUB?
23561   unsigned HOpcode;
23562   switch (Op.getOpcode()) {
23563     case ISD::ADD: HOpcode = X86ISD::HADD; break;
23564     case ISD::SUB: HOpcode = X86ISD::HSUB; break;
23565     case ISD::FADD: HOpcode = X86ISD::FHADD; break;
23566     case ISD::FSUB: HOpcode = X86ISD::FHSUB; break;
23567     default:
23568       llvm_unreachable("Trying to lower unsupported opcode to horizontal op");
23569   }
23570   unsigned LExtIndex = LHS.getConstantOperandVal(1);
23571   unsigned RExtIndex = RHS.getConstantOperandVal(1);
23572   if ((LExtIndex & 1) == 1 && (RExtIndex & 1) == 0 &&
23573       (HOpcode == X86ISD::HADD || HOpcode == X86ISD::FHADD))
23574     std::swap(LExtIndex, RExtIndex);
23575 
23576   if ((LExtIndex & 1) != 0 || RExtIndex != (LExtIndex + 1))
23577     return Op;
23578 
23579   SDValue X = LHS.getOperand(0);
23580   EVT VecVT = X.getValueType();
23581   unsigned BitWidth = VecVT.getSizeInBits();
23582   unsigned NumLanes = BitWidth / 128;
23583   unsigned NumEltsPerLane = VecVT.getVectorNumElements() / NumLanes;
23584   assert((BitWidth == 128 || BitWidth == 256 || BitWidth == 512) &&
23585          "Not expecting illegal vector widths here");
23586 
23587   // Creating a 256-bit horizontal op would be wasteful, and there is no 512-bit
23588   // equivalent, so extract the 256/512-bit source op to 128-bit if we can.
23589   SDLoc DL(Op);
23590   if (BitWidth == 256 || BitWidth == 512) {
23591     unsigned LaneIdx = LExtIndex / NumEltsPerLane;
23592     X = extract128BitVector(X, LaneIdx * NumEltsPerLane, DAG, DL);
23593     LExtIndex %= NumEltsPerLane;
23594   }
23595 
23596   // add (extractelt (X, 0), extractelt (X, 1)) --> extractelt (hadd X, X), 0
23597   // add (extractelt (X, 1), extractelt (X, 0)) --> extractelt (hadd X, X), 0
23598   // add (extractelt (X, 2), extractelt (X, 3)) --> extractelt (hadd X, X), 1
23599   // sub (extractelt (X, 0), extractelt (X, 1)) --> extractelt (hsub X, X), 0
23600   SDValue HOp = DAG.getNode(HOpcode, DL, X.getValueType(), X, X);
23601   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, Op.getSimpleValueType(), HOp,
23602                      DAG.getIntPtrConstant(LExtIndex / 2, DL));
23603 }
23604 
23605 /// Depending on uarch and/or optimizing for size, we might prefer to use a
23606 /// vector operation in place of the typical scalar operation.
lowerFaddFsub(SDValue Op,SelectionDAG & DAG) const23607 SDValue X86TargetLowering::lowerFaddFsub(SDValue Op, SelectionDAG &DAG) const {
23608   assert((Op.getValueType() == MVT::f32 || Op.getValueType() == MVT::f64) &&
23609          "Only expecting float/double");
23610   return lowerAddSubToHorizontalOp(Op, DAG, Subtarget);
23611 }
23612 
23613 /// ISD::FROUND is defined to round to nearest with ties rounding away from 0.
23614 /// This mode isn't supported in hardware on X86. But as long as we aren't
23615 /// compiling with trapping math, we can emulate this with
23616 /// trunc(X + copysign(nextafter(0.5, 0.0), X)).
LowerFROUND(SDValue Op,SelectionDAG & DAG)23617 static SDValue LowerFROUND(SDValue Op, SelectionDAG &DAG) {
23618   SDValue N0 = Op.getOperand(0);
23619   SDLoc dl(Op);
23620   MVT VT = Op.getSimpleValueType();
23621 
23622   // N0 += copysign(nextafter(0.5, 0.0), N0)
23623   const fltSemantics &Sem = SelectionDAG::EVTToAPFloatSemantics(VT);
23624   bool Ignored;
23625   APFloat Point5Pred = APFloat(0.5f);
23626   Point5Pred.convert(Sem, APFloat::rmNearestTiesToEven, &Ignored);
23627   Point5Pred.next(/*nextDown*/true);
23628 
23629   SDValue Adder = DAG.getNode(ISD::FCOPYSIGN, dl, VT,
23630                               DAG.getConstantFP(Point5Pred, dl, VT), N0);
23631   N0 = DAG.getNode(ISD::FADD, dl, VT, N0, Adder);
23632 
23633   // Truncate the result to remove fraction.
23634   return DAG.getNode(ISD::FTRUNC, dl, VT, N0);
23635 }
23636 
23637 /// The only differences between FABS and FNEG are the mask and the logic op.
23638 /// FNEG also has a folding opportunity for FNEG(FABS(x)).
LowerFABSorFNEG(SDValue Op,SelectionDAG & DAG)23639 static SDValue LowerFABSorFNEG(SDValue Op, SelectionDAG &DAG) {
23640   assert((Op.getOpcode() == ISD::FABS || Op.getOpcode() == ISD::FNEG) &&
23641          "Wrong opcode for lowering FABS or FNEG.");
23642 
23643   bool IsFABS = (Op.getOpcode() == ISD::FABS);
23644 
23645   // If this is a FABS and it has an FNEG user, bail out to fold the combination
23646   // into an FNABS. We'll lower the FABS after that if it is still in use.
23647   if (IsFABS)
23648     for (SDNode *User : Op->uses())
23649       if (User->getOpcode() == ISD::FNEG)
23650         return Op;
23651 
23652   SDLoc dl(Op);
23653   MVT VT = Op.getSimpleValueType();
23654 
23655   bool IsF128 = (VT == MVT::f128);
23656   assert(VT.isFloatingPoint() && VT != MVT::f80 &&
23657          DAG.getTargetLoweringInfo().isTypeLegal(VT) &&
23658          "Unexpected type in LowerFABSorFNEG");
23659 
23660   // FIXME: Use function attribute "OptimizeForSize" and/or CodeGenOpt::Level to
23661   // decide if we should generate a 16-byte constant mask when we only need 4 or
23662   // 8 bytes for the scalar case.
23663 
23664   // There are no scalar bitwise logical SSE/AVX instructions, so we
23665   // generate a 16-byte vector constant and logic op even for the scalar case.
23666   // Using a 16-byte mask allows folding the load of the mask with
23667   // the logic op, so it can save (~4 bytes) on code size.
23668   bool IsFakeVector = !VT.isVector() && !IsF128;
23669   MVT LogicVT = VT;
23670   if (IsFakeVector)
23671     LogicVT = (VT == MVT::f64)   ? MVT::v2f64
23672               : (VT == MVT::f32) ? MVT::v4f32
23673                                  : MVT::v8f16;
23674 
23675   unsigned EltBits = VT.getScalarSizeInBits();
23676   // For FABS, mask is 0x7f...; for FNEG, mask is 0x80...
23677   APInt MaskElt = IsFABS ? APInt::getSignedMaxValue(EltBits) :
23678                            APInt::getSignMask(EltBits);
23679   const fltSemantics &Sem = SelectionDAG::EVTToAPFloatSemantics(VT);
23680   SDValue Mask = DAG.getConstantFP(APFloat(Sem, MaskElt), dl, LogicVT);
23681 
23682   SDValue Op0 = Op.getOperand(0);
23683   bool IsFNABS = !IsFABS && (Op0.getOpcode() == ISD::FABS);
23684   unsigned LogicOp = IsFABS  ? X86ISD::FAND :
23685                      IsFNABS ? X86ISD::FOR  :
23686                                X86ISD::FXOR;
23687   SDValue Operand = IsFNABS ? Op0.getOperand(0) : Op0;
23688 
23689   if (VT.isVector() || IsF128)
23690     return DAG.getNode(LogicOp, dl, LogicVT, Operand, Mask);
23691 
23692   // For the scalar case extend to a 128-bit vector, perform the logic op,
23693   // and extract the scalar result back out.
23694   Operand = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Operand);
23695   SDValue LogicNode = DAG.getNode(LogicOp, dl, LogicVT, Operand, Mask);
23696   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, LogicNode,
23697                      DAG.getIntPtrConstant(0, dl));
23698 }
23699 
LowerFCOPYSIGN(SDValue Op,SelectionDAG & DAG)23700 static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) {
23701   SDValue Mag = Op.getOperand(0);
23702   SDValue Sign = Op.getOperand(1);
23703   SDLoc dl(Op);
23704 
23705   // If the sign operand is smaller, extend it first.
23706   MVT VT = Op.getSimpleValueType();
23707   if (Sign.getSimpleValueType().bitsLT(VT))
23708     Sign = DAG.getNode(ISD::FP_EXTEND, dl, VT, Sign);
23709 
23710   // And if it is bigger, shrink it first.
23711   if (Sign.getSimpleValueType().bitsGT(VT))
23712     Sign = DAG.getNode(ISD::FP_ROUND, dl, VT, Sign,
23713                        DAG.getIntPtrConstant(0, dl, /*isTarget=*/true));
23714 
23715   // At this point the operands and the result should have the same
23716   // type, and that won't be f80 since that is not custom lowered.
23717   bool IsF128 = (VT == MVT::f128);
23718   assert(VT.isFloatingPoint() && VT != MVT::f80 &&
23719          DAG.getTargetLoweringInfo().isTypeLegal(VT) &&
23720          "Unexpected type in LowerFCOPYSIGN");
23721 
23722   const fltSemantics &Sem = SelectionDAG::EVTToAPFloatSemantics(VT);
23723 
23724   // Perform all scalar logic operations as 16-byte vectors because there are no
23725   // scalar FP logic instructions in SSE.
23726   // TODO: This isn't necessary. If we used scalar types, we might avoid some
23727   // unnecessary splats, but we might miss load folding opportunities. Should
23728   // this decision be based on OptimizeForSize?
23729   bool IsFakeVector = !VT.isVector() && !IsF128;
23730   MVT LogicVT = VT;
23731   if (IsFakeVector)
23732     LogicVT = (VT == MVT::f64)   ? MVT::v2f64
23733               : (VT == MVT::f32) ? MVT::v4f32
23734                                  : MVT::v8f16;
23735 
23736   // The mask constants are automatically splatted for vector types.
23737   unsigned EltSizeInBits = VT.getScalarSizeInBits();
23738   SDValue SignMask = DAG.getConstantFP(
23739       APFloat(Sem, APInt::getSignMask(EltSizeInBits)), dl, LogicVT);
23740   SDValue MagMask = DAG.getConstantFP(
23741       APFloat(Sem, APInt::getSignedMaxValue(EltSizeInBits)), dl, LogicVT);
23742 
23743   // First, clear all bits but the sign bit from the second operand (sign).
23744   if (IsFakeVector)
23745     Sign = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Sign);
23746   SDValue SignBit = DAG.getNode(X86ISD::FAND, dl, LogicVT, Sign, SignMask);
23747 
23748   // Next, clear the sign bit from the first operand (magnitude).
23749   // TODO: If we had general constant folding for FP logic ops, this check
23750   // wouldn't be necessary.
23751   SDValue MagBits;
23752   if (ConstantFPSDNode *Op0CN = isConstOrConstSplatFP(Mag)) {
23753     APFloat APF = Op0CN->getValueAPF();
23754     APF.clearSign();
23755     MagBits = DAG.getConstantFP(APF, dl, LogicVT);
23756   } else {
23757     // If the magnitude operand wasn't a constant, we need to AND out the sign.
23758     if (IsFakeVector)
23759       Mag = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Mag);
23760     MagBits = DAG.getNode(X86ISD::FAND, dl, LogicVT, Mag, MagMask);
23761   }
23762 
23763   // OR the magnitude value with the sign bit.
23764   SDValue Or = DAG.getNode(X86ISD::FOR, dl, LogicVT, MagBits, SignBit);
23765   return !IsFakeVector ? Or : DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Or,
23766                                           DAG.getIntPtrConstant(0, dl));
23767 }
23768 
LowerFGETSIGN(SDValue Op,SelectionDAG & DAG)23769 static SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) {
23770   SDValue N0 = Op.getOperand(0);
23771   SDLoc dl(Op);
23772   MVT VT = Op.getSimpleValueType();
23773 
23774   MVT OpVT = N0.getSimpleValueType();
23775   assert((OpVT == MVT::f32 || OpVT == MVT::f64) &&
23776          "Unexpected type for FGETSIGN");
23777 
23778   // Lower ISD::FGETSIGN to (AND (X86ISD::MOVMSK ...) 1).
23779   MVT VecVT = (OpVT == MVT::f32 ? MVT::v4f32 : MVT::v2f64);
23780   SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, N0);
23781   Res = DAG.getNode(X86ISD::MOVMSK, dl, MVT::i32, Res);
23782   Res = DAG.getZExtOrTrunc(Res, dl, VT);
23783   Res = DAG.getNode(ISD::AND, dl, VT, Res, DAG.getConstant(1, dl, VT));
23784   return Res;
23785 }
23786 
23787 /// Helper for attempting to create a X86ISD::BT node.
getBT(SDValue Src,SDValue BitNo,const SDLoc & DL,SelectionDAG & DAG)23788 static SDValue getBT(SDValue Src, SDValue BitNo, const SDLoc &DL, SelectionDAG &DAG) {
23789   // If Src is i8, promote it to i32 with any_extend.  There is no i8 BT
23790   // instruction.  Since the shift amount is in-range-or-undefined, we know
23791   // that doing a bittest on the i32 value is ok.  We extend to i32 because
23792   // the encoding for the i16 version is larger than the i32 version.
23793   // Also promote i16 to i32 for performance / code size reason.
23794   if (Src.getValueType().getScalarSizeInBits() < 32)
23795     Src = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Src);
23796 
23797   // No legal type found, give up.
23798   if (!DAG.getTargetLoweringInfo().isTypeLegal(Src.getValueType()))
23799     return SDValue();
23800 
23801   // See if we can use the 32-bit instruction instead of the 64-bit one for a
23802   // shorter encoding. Since the former takes the modulo 32 of BitNo and the
23803   // latter takes the modulo 64, this is only valid if the 5th bit of BitNo is
23804   // known to be zero.
23805   if (Src.getValueType() == MVT::i64 &&
23806       DAG.MaskedValueIsZero(BitNo, APInt(BitNo.getValueSizeInBits(), 32)))
23807     Src = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Src);
23808 
23809   // If the operand types disagree, extend the shift amount to match.  Since
23810   // BT ignores high bits (like shifts) we can use anyextend.
23811   if (Src.getValueType() != BitNo.getValueType()) {
23812     // Peek through a mask/modulo operation.
23813     // TODO: DAGCombine fails to do this as it just checks isTruncateFree, but
23814     // we probably need a better IsDesirableToPromoteOp to handle this as well.
23815     if (BitNo.getOpcode() == ISD::AND && BitNo->hasOneUse())
23816       BitNo = DAG.getNode(ISD::AND, DL, Src.getValueType(),
23817                           DAG.getNode(ISD::ANY_EXTEND, DL, Src.getValueType(),
23818                                       BitNo.getOperand(0)),
23819                           DAG.getNode(ISD::ANY_EXTEND, DL, Src.getValueType(),
23820                                       BitNo.getOperand(1)));
23821     else
23822       BitNo = DAG.getNode(ISD::ANY_EXTEND, DL, Src.getValueType(), BitNo);
23823   }
23824 
23825   return DAG.getNode(X86ISD::BT, DL, MVT::i32, Src, BitNo);
23826 }
23827 
23828 /// Helper for creating a X86ISD::SETCC node.
getSETCC(X86::CondCode Cond,SDValue EFLAGS,const SDLoc & dl,SelectionDAG & DAG)23829 static SDValue getSETCC(X86::CondCode Cond, SDValue EFLAGS, const SDLoc &dl,
23830                         SelectionDAG &DAG) {
23831   return DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
23832                      DAG.getTargetConstant(Cond, dl, MVT::i8), EFLAGS);
23833 }
23834 
23835 /// Helper for matching OR(EXTRACTELT(X,0),OR(EXTRACTELT(X,1),...))
23836 /// style scalarized (associative) reduction patterns. Partial reductions
23837 /// are supported when the pointer SrcMask is non-null.
23838 /// TODO - move this to SelectionDAG?
matchScalarReduction(SDValue Op,ISD::NodeType BinOp,SmallVectorImpl<SDValue> & SrcOps,SmallVectorImpl<APInt> * SrcMask=nullptr)23839 static bool matchScalarReduction(SDValue Op, ISD::NodeType BinOp,
23840                                  SmallVectorImpl<SDValue> &SrcOps,
23841                                  SmallVectorImpl<APInt> *SrcMask = nullptr) {
23842   SmallVector<SDValue, 8> Opnds;
23843   DenseMap<SDValue, APInt> SrcOpMap;
23844   EVT VT = MVT::Other;
23845 
23846   // Recognize a special case where a vector is casted into wide integer to
23847   // test all 0s.
23848   assert(Op.getOpcode() == unsigned(BinOp) &&
23849          "Unexpected bit reduction opcode");
23850   Opnds.push_back(Op.getOperand(0));
23851   Opnds.push_back(Op.getOperand(1));
23852 
23853   for (unsigned Slot = 0, e = Opnds.size(); Slot < e; ++Slot) {
23854     SmallVectorImpl<SDValue>::const_iterator I = Opnds.begin() + Slot;
23855     // BFS traverse all BinOp operands.
23856     if (I->getOpcode() == unsigned(BinOp)) {
23857       Opnds.push_back(I->getOperand(0));
23858       Opnds.push_back(I->getOperand(1));
23859       // Re-evaluate the number of nodes to be traversed.
23860       e += 2; // 2 more nodes (LHS and RHS) are pushed.
23861       continue;
23862     }
23863 
23864     // Quit if a non-EXTRACT_VECTOR_ELT
23865     if (I->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
23866       return false;
23867 
23868     // Quit if without a constant index.
23869     auto *Idx = dyn_cast<ConstantSDNode>(I->getOperand(1));
23870     if (!Idx)
23871       return false;
23872 
23873     SDValue Src = I->getOperand(0);
23874     DenseMap<SDValue, APInt>::iterator M = SrcOpMap.find(Src);
23875     if (M == SrcOpMap.end()) {
23876       VT = Src.getValueType();
23877       // Quit if not the same type.
23878       if (!SrcOpMap.empty() && VT != SrcOpMap.begin()->first.getValueType())
23879         return false;
23880       unsigned NumElts = VT.getVectorNumElements();
23881       APInt EltCount = APInt::getZero(NumElts);
23882       M = SrcOpMap.insert(std::make_pair(Src, EltCount)).first;
23883       SrcOps.push_back(Src);
23884     }
23885 
23886     // Quit if element already used.
23887     unsigned CIdx = Idx->getZExtValue();
23888     if (M->second[CIdx])
23889       return false;
23890     M->second.setBit(CIdx);
23891   }
23892 
23893   if (SrcMask) {
23894     // Collect the source partial masks.
23895     for (SDValue &SrcOp : SrcOps)
23896       SrcMask->push_back(SrcOpMap[SrcOp]);
23897   } else {
23898     // Quit if not all elements are used.
23899     for (const auto &I : SrcOpMap)
23900       if (!I.second.isAllOnes())
23901         return false;
23902   }
23903 
23904   return true;
23905 }
23906 
23907 // Helper function for comparing all bits of a vector against zero.
LowerVectorAllZero(const SDLoc & DL,SDValue V,ISD::CondCode CC,const APInt & Mask,const X86Subtarget & Subtarget,SelectionDAG & DAG,X86::CondCode & X86CC)23908 static SDValue LowerVectorAllZero(const SDLoc &DL, SDValue V, ISD::CondCode CC,
23909                                   const APInt &Mask,
23910                                   const X86Subtarget &Subtarget,
23911                                   SelectionDAG &DAG, X86::CondCode &X86CC) {
23912   EVT VT = V.getValueType();
23913   unsigned ScalarSize = VT.getScalarSizeInBits();
23914   if (Mask.getBitWidth() != ScalarSize) {
23915     assert(ScalarSize == 1 && "Element Mask vs Vector bitwidth mismatch");
23916     return SDValue();
23917   }
23918 
23919   assert((CC == ISD::SETEQ || CC == ISD::SETNE) && "Unsupported ISD::CondCode");
23920   X86CC = (CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE);
23921 
23922   auto MaskBits = [&](SDValue Src) {
23923     if (Mask.isAllOnes())
23924       return Src;
23925     EVT SrcVT = Src.getValueType();
23926     SDValue MaskValue = DAG.getConstant(Mask, DL, SrcVT);
23927     return DAG.getNode(ISD::AND, DL, SrcVT, Src, MaskValue);
23928   };
23929 
23930   // For sub-128-bit vector, cast to (legal) integer and compare with zero.
23931   if (VT.getSizeInBits() < 128) {
23932     EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits());
23933     if (!DAG.getTargetLoweringInfo().isTypeLegal(IntVT))
23934       return SDValue();
23935     return DAG.getNode(X86ISD::CMP, DL, MVT::i32,
23936                        DAG.getBitcast(IntVT, MaskBits(V)),
23937                        DAG.getConstant(0, DL, IntVT));
23938   }
23939 
23940   // Quit if not splittable to 128/256-bit vector.
23941   if (!isPowerOf2_32(VT.getSizeInBits()))
23942     return SDValue();
23943 
23944   // Split down to 128/256-bit vector.
23945   unsigned TestSize = Subtarget.hasAVX() ? 256 : 128;
23946   while (VT.getSizeInBits() > TestSize) {
23947     auto Split = DAG.SplitVector(V, DL);
23948     VT = Split.first.getValueType();
23949     V = DAG.getNode(ISD::OR, DL, VT, Split.first, Split.second);
23950   }
23951 
23952   bool UsePTEST = Subtarget.hasSSE41();
23953   if (UsePTEST) {
23954     MVT TestVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64;
23955     V = DAG.getBitcast(TestVT, MaskBits(V));
23956     return DAG.getNode(X86ISD::PTEST, DL, MVT::i32, V, V);
23957   }
23958 
23959   // Without PTEST, a masked v2i64 or-reduction is not faster than
23960   // scalarization.
23961   if (!Mask.isAllOnes() && VT.getScalarSizeInBits() > 32)
23962     return SDValue();
23963 
23964   V = DAG.getBitcast(MVT::v16i8, MaskBits(V));
23965   V = DAG.getNode(X86ISD::PCMPEQ, DL, MVT::v16i8, V,
23966                   getZeroVector(MVT::v16i8, Subtarget, DAG, DL));
23967   V = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V);
23968   return DAG.getNode(X86ISD::CMP, DL, MVT::i32, V,
23969                      DAG.getConstant(0xFFFF, DL, MVT::i32));
23970 }
23971 
23972 // Check whether an OR'd reduction tree is PTEST-able, or if we can fallback to
23973 // CMP(MOVMSK(PCMPEQB(X,0))).
MatchVectorAllZeroTest(SDValue Op,ISD::CondCode CC,const SDLoc & DL,const X86Subtarget & Subtarget,SelectionDAG & DAG,SDValue & X86CC)23974 static SDValue MatchVectorAllZeroTest(SDValue Op, ISD::CondCode CC,
23975                                       const SDLoc &DL,
23976                                       const X86Subtarget &Subtarget,
23977                                       SelectionDAG &DAG, SDValue &X86CC) {
23978   assert((CC == ISD::SETEQ || CC == ISD::SETNE) && "Unsupported ISD::CondCode");
23979 
23980   if (!Subtarget.hasSSE2() || !Op->hasOneUse())
23981     return SDValue();
23982 
23983   // Check whether we're masking/truncating an OR-reduction result, in which
23984   // case track the masked bits.
23985   APInt Mask = APInt::getAllOnes(Op.getScalarValueSizeInBits());
23986   switch (Op.getOpcode()) {
23987   case ISD::TRUNCATE: {
23988     SDValue Src = Op.getOperand(0);
23989     Mask = APInt::getLowBitsSet(Src.getScalarValueSizeInBits(),
23990                                 Op.getScalarValueSizeInBits());
23991     Op = Src;
23992     break;
23993   }
23994   case ISD::AND: {
23995     if (auto *Cst = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
23996       Mask = Cst->getAPIntValue();
23997       Op = Op.getOperand(0);
23998     }
23999     break;
24000   }
24001   }
24002 
24003   SmallVector<SDValue, 8> VecIns;
24004   if (Op.getOpcode() == ISD::OR && matchScalarReduction(Op, ISD::OR, VecIns)) {
24005     EVT VT = VecIns[0].getValueType();
24006     assert(llvm::all_of(VecIns,
24007                         [VT](SDValue V) { return VT == V.getValueType(); }) &&
24008            "Reduction source vector mismatch");
24009 
24010     // Quit if less than 128-bits or not splittable to 128/256-bit vector.
24011     if (VT.getSizeInBits() < 128 || !isPowerOf2_32(VT.getSizeInBits()))
24012       return SDValue();
24013 
24014     // If more than one full vector is evaluated, OR them first before PTEST.
24015     for (unsigned Slot = 0, e = VecIns.size(); e - Slot > 1;
24016          Slot += 2, e += 1) {
24017       // Each iteration will OR 2 nodes and append the result until there is
24018       // only 1 node left, i.e. the final OR'd value of all vectors.
24019       SDValue LHS = VecIns[Slot];
24020       SDValue RHS = VecIns[Slot + 1];
24021       VecIns.push_back(DAG.getNode(ISD::OR, DL, VT, LHS, RHS));
24022     }
24023 
24024     X86::CondCode CCode;
24025     if (SDValue V = LowerVectorAllZero(DL, VecIns.back(), CC, Mask, Subtarget,
24026                                        DAG, CCode)) {
24027       X86CC = DAG.getTargetConstant(CCode, DL, MVT::i8);
24028       return V;
24029     }
24030   }
24031 
24032   if (Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
24033     ISD::NodeType BinOp;
24034     if (SDValue Match =
24035             DAG.matchBinOpReduction(Op.getNode(), BinOp, {ISD::OR})) {
24036       X86::CondCode CCode;
24037       if (SDValue V =
24038               LowerVectorAllZero(DL, Match, CC, Mask, Subtarget, DAG, CCode)) {
24039         X86CC = DAG.getTargetConstant(CCode, DL, MVT::i8);
24040         return V;
24041       }
24042     }
24043   }
24044 
24045   return SDValue();
24046 }
24047 
24048 /// return true if \c Op has a use that doesn't just read flags.
hasNonFlagsUse(SDValue Op)24049 static bool hasNonFlagsUse(SDValue Op) {
24050   for (SDNode::use_iterator UI = Op->use_begin(), UE = Op->use_end(); UI != UE;
24051        ++UI) {
24052     SDNode *User = *UI;
24053     unsigned UOpNo = UI.getOperandNo();
24054     if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) {
24055       // Look pass truncate.
24056       UOpNo = User->use_begin().getOperandNo();
24057       User = *User->use_begin();
24058     }
24059 
24060     if (User->getOpcode() != ISD::BRCOND && User->getOpcode() != ISD::SETCC &&
24061         !(User->getOpcode() == ISD::SELECT && UOpNo == 0))
24062       return true;
24063   }
24064   return false;
24065 }
24066 
24067 // Transform to an x86-specific ALU node with flags if there is a chance of
24068 // using an RMW op or only the flags are used. Otherwise, leave
24069 // the node alone and emit a 'cmp' or 'test' instruction.
isProfitableToUseFlagOp(SDValue Op)24070 static bool isProfitableToUseFlagOp(SDValue Op) {
24071   for (SDNode *U : Op->uses())
24072     if (U->getOpcode() != ISD::CopyToReg &&
24073         U->getOpcode() != ISD::SETCC &&
24074         U->getOpcode() != ISD::STORE)
24075       return false;
24076 
24077   return true;
24078 }
24079 
24080 /// Emit nodes that will be selected as "test Op0,Op0", or something
24081 /// equivalent.
EmitTest(SDValue Op,unsigned X86CC,const SDLoc & dl,SelectionDAG & DAG,const X86Subtarget & Subtarget)24082 static SDValue EmitTest(SDValue Op, unsigned X86CC, const SDLoc &dl,
24083                         SelectionDAG &DAG, const X86Subtarget &Subtarget) {
24084   // CF and OF aren't always set the way we want. Determine which
24085   // of these we need.
24086   bool NeedCF = false;
24087   bool NeedOF = false;
24088   switch (X86CC) {
24089   default: break;
24090   case X86::COND_A: case X86::COND_AE:
24091   case X86::COND_B: case X86::COND_BE:
24092     NeedCF = true;
24093     break;
24094   case X86::COND_G: case X86::COND_GE:
24095   case X86::COND_L: case X86::COND_LE:
24096   case X86::COND_O: case X86::COND_NO: {
24097     // Check if we really need to set the
24098     // Overflow flag. If NoSignedWrap is present
24099     // that is not actually needed.
24100     switch (Op->getOpcode()) {
24101     case ISD::ADD:
24102     case ISD::SUB:
24103     case ISD::MUL:
24104     case ISD::SHL:
24105       if (Op.getNode()->getFlags().hasNoSignedWrap())
24106         break;
24107       [[fallthrough]];
24108     default:
24109       NeedOF = true;
24110       break;
24111     }
24112     break;
24113   }
24114   }
24115   // See if we can use the EFLAGS value from the operand instead of
24116   // doing a separate TEST. TEST always sets OF and CF to 0, so unless
24117   // we prove that the arithmetic won't overflow, we can't use OF or CF.
24118   if (Op.getResNo() != 0 || NeedOF || NeedCF) {
24119     // Emit a CMP with 0, which is the TEST pattern.
24120     return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
24121                        DAG.getConstant(0, dl, Op.getValueType()));
24122   }
24123   unsigned Opcode = 0;
24124   unsigned NumOperands = 0;
24125 
24126   SDValue ArithOp = Op;
24127 
24128   // NOTICE: In the code below we use ArithOp to hold the arithmetic operation
24129   // which may be the result of a CAST.  We use the variable 'Op', which is the
24130   // non-casted variable when we check for possible users.
24131   switch (ArithOp.getOpcode()) {
24132   case ISD::AND:
24133     // If the primary 'and' result isn't used, don't bother using X86ISD::AND,
24134     // because a TEST instruction will be better.
24135     if (!hasNonFlagsUse(Op))
24136       break;
24137 
24138     [[fallthrough]];
24139   case ISD::ADD:
24140   case ISD::SUB:
24141   case ISD::OR:
24142   case ISD::XOR:
24143     if (!isProfitableToUseFlagOp(Op))
24144       break;
24145 
24146     // Otherwise use a regular EFLAGS-setting instruction.
24147     switch (ArithOp.getOpcode()) {
24148     default: llvm_unreachable("unexpected operator!");
24149     case ISD::ADD: Opcode = X86ISD::ADD; break;
24150     case ISD::SUB: Opcode = X86ISD::SUB; break;
24151     case ISD::XOR: Opcode = X86ISD::XOR; break;
24152     case ISD::AND: Opcode = X86ISD::AND; break;
24153     case ISD::OR:  Opcode = X86ISD::OR;  break;
24154     }
24155 
24156     NumOperands = 2;
24157     break;
24158   case X86ISD::ADD:
24159   case X86ISD::SUB:
24160   case X86ISD::OR:
24161   case X86ISD::XOR:
24162   case X86ISD::AND:
24163     return SDValue(Op.getNode(), 1);
24164   case ISD::SSUBO:
24165   case ISD::USUBO: {
24166     // /USUBO/SSUBO will become a X86ISD::SUB and we can use its Z flag.
24167     SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
24168     return DAG.getNode(X86ISD::SUB, dl, VTs, Op->getOperand(0),
24169                        Op->getOperand(1)).getValue(1);
24170   }
24171   default:
24172     break;
24173   }
24174 
24175   if (Opcode == 0) {
24176     // Emit a CMP with 0, which is the TEST pattern.
24177     return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
24178                        DAG.getConstant(0, dl, Op.getValueType()));
24179   }
24180   SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
24181   SmallVector<SDValue, 4> Ops(Op->op_begin(), Op->op_begin() + NumOperands);
24182 
24183   SDValue New = DAG.getNode(Opcode, dl, VTs, Ops);
24184   DAG.ReplaceAllUsesOfValueWith(SDValue(Op.getNode(), 0), New);
24185   return SDValue(New.getNode(), 1);
24186 }
24187 
24188 /// Emit nodes that will be selected as "cmp Op0,Op1", or something
24189 /// equivalent.
EmitCmp(SDValue Op0,SDValue Op1,unsigned X86CC,const SDLoc & dl,SelectionDAG & DAG,const X86Subtarget & Subtarget)24190 static SDValue EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
24191                        const SDLoc &dl, SelectionDAG &DAG,
24192                        const X86Subtarget &Subtarget) {
24193   if (isNullConstant(Op1))
24194     return EmitTest(Op0, X86CC, dl, DAG, Subtarget);
24195 
24196   EVT CmpVT = Op0.getValueType();
24197 
24198   assert((CmpVT == MVT::i8 || CmpVT == MVT::i16 ||
24199           CmpVT == MVT::i32 || CmpVT == MVT::i64) && "Unexpected VT!");
24200 
24201   // Only promote the compare up to I32 if it is a 16 bit operation
24202   // with an immediate.  16 bit immediates are to be avoided.
24203   if (CmpVT == MVT::i16 && !Subtarget.isAtom() &&
24204       !DAG.getMachineFunction().getFunction().hasMinSize()) {
24205     ConstantSDNode *COp0 = dyn_cast<ConstantSDNode>(Op0);
24206     ConstantSDNode *COp1 = dyn_cast<ConstantSDNode>(Op1);
24207     // Don't do this if the immediate can fit in 8-bits.
24208     if ((COp0 && !COp0->getAPIntValue().isSignedIntN(8)) ||
24209         (COp1 && !COp1->getAPIntValue().isSignedIntN(8))) {
24210       unsigned ExtendOp =
24211           isX86CCSigned(X86CC) ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
24212       if (X86CC == X86::COND_E || X86CC == X86::COND_NE) {
24213         // For equality comparisons try to use SIGN_EXTEND if the input was
24214         // truncate from something with enough sign bits.
24215         if (Op0.getOpcode() == ISD::TRUNCATE) {
24216           if (DAG.ComputeMaxSignificantBits(Op0.getOperand(0)) <= 16)
24217             ExtendOp = ISD::SIGN_EXTEND;
24218         } else if (Op1.getOpcode() == ISD::TRUNCATE) {
24219           if (DAG.ComputeMaxSignificantBits(Op1.getOperand(0)) <= 16)
24220             ExtendOp = ISD::SIGN_EXTEND;
24221         }
24222       }
24223 
24224       CmpVT = MVT::i32;
24225       Op0 = DAG.getNode(ExtendOp, dl, CmpVT, Op0);
24226       Op1 = DAG.getNode(ExtendOp, dl, CmpVT, Op1);
24227     }
24228   }
24229 
24230   // Try to shrink i64 compares if the input has enough zero bits.
24231   // FIXME: Do this for non-constant compares for constant on LHS?
24232   if (CmpVT == MVT::i64 && isa<ConstantSDNode>(Op1) && !isX86CCSigned(X86CC) &&
24233       Op0.hasOneUse() && // Hacky way to not break CSE opportunities with sub.
24234       cast<ConstantSDNode>(Op1)->getAPIntValue().getActiveBits() <= 32 &&
24235       DAG.MaskedValueIsZero(Op0, APInt::getHighBitsSet(64, 32))) {
24236     CmpVT = MVT::i32;
24237     Op0 = DAG.getNode(ISD::TRUNCATE, dl, CmpVT, Op0);
24238     Op1 = DAG.getNode(ISD::TRUNCATE, dl, CmpVT, Op1);
24239   }
24240 
24241   // 0-x == y --> x+y == 0
24242   // 0-x != y --> x+y != 0
24243   if (Op0.getOpcode() == ISD::SUB && isNullConstant(Op0.getOperand(0)) &&
24244       Op0.hasOneUse() && (X86CC == X86::COND_E || X86CC == X86::COND_NE)) {
24245     SDVTList VTs = DAG.getVTList(CmpVT, MVT::i32);
24246     SDValue Add = DAG.getNode(X86ISD::ADD, dl, VTs, Op0.getOperand(1), Op1);
24247     return Add.getValue(1);
24248   }
24249 
24250   // x == 0-y --> x+y == 0
24251   // x != 0-y --> x+y != 0
24252   if (Op1.getOpcode() == ISD::SUB && isNullConstant(Op1.getOperand(0)) &&
24253       Op1.hasOneUse() && (X86CC == X86::COND_E || X86CC == X86::COND_NE)) {
24254     SDVTList VTs = DAG.getVTList(CmpVT, MVT::i32);
24255     SDValue Add = DAG.getNode(X86ISD::ADD, dl, VTs, Op0, Op1.getOperand(1));
24256     return Add.getValue(1);
24257   }
24258 
24259   // Use SUB instead of CMP to enable CSE between SUB and CMP.
24260   SDVTList VTs = DAG.getVTList(CmpVT, MVT::i32);
24261   SDValue Sub = DAG.getNode(X86ISD::SUB, dl, VTs, Op0, Op1);
24262   return Sub.getValue(1);
24263 }
24264 
24265 /// Check if replacement of SQRT with RSQRT should be disabled.
isFsqrtCheap(SDValue Op,SelectionDAG & DAG) const24266 bool X86TargetLowering::isFsqrtCheap(SDValue Op, SelectionDAG &DAG) const {
24267   EVT VT = Op.getValueType();
24268 
24269   // We don't need to replace SQRT with RSQRT for half type.
24270   if (VT.getScalarType() == MVT::f16)
24271     return true;
24272 
24273   // We never want to use both SQRT and RSQRT instructions for the same input.
24274   if (DAG.doesNodeExist(X86ISD::FRSQRT, DAG.getVTList(VT), Op))
24275     return false;
24276 
24277   if (VT.isVector())
24278     return Subtarget.hasFastVectorFSQRT();
24279   return Subtarget.hasFastScalarFSQRT();
24280 }
24281 
24282 /// The minimum architected relative accuracy is 2^-12. We need one
24283 /// Newton-Raphson step to have a good float result (24 bits of precision).
getSqrtEstimate(SDValue Op,SelectionDAG & DAG,int Enabled,int & RefinementSteps,bool & UseOneConstNR,bool Reciprocal) const24284 SDValue X86TargetLowering::getSqrtEstimate(SDValue Op,
24285                                            SelectionDAG &DAG, int Enabled,
24286                                            int &RefinementSteps,
24287                                            bool &UseOneConstNR,
24288                                            bool Reciprocal) const {
24289   SDLoc DL(Op);
24290   EVT VT = Op.getValueType();
24291 
24292   // SSE1 has rsqrtss and rsqrtps. AVX adds a 256-bit variant for rsqrtps.
24293   // It is likely not profitable to do this for f64 because a double-precision
24294   // rsqrt estimate with refinement on x86 prior to FMA requires at least 16
24295   // instructions: convert to single, rsqrtss, convert back to double, refine
24296   // (3 steps = at least 13 insts). If an 'rsqrtsd' variant was added to the ISA
24297   // along with FMA, this could be a throughput win.
24298   // TODO: SQRT requires SSE2 to prevent the introduction of an illegal v4i32
24299   // after legalize types.
24300   if ((VT == MVT::f32 && Subtarget.hasSSE1()) ||
24301       (VT == MVT::v4f32 && Subtarget.hasSSE1() && Reciprocal) ||
24302       (VT == MVT::v4f32 && Subtarget.hasSSE2() && !Reciprocal) ||
24303       (VT == MVT::v8f32 && Subtarget.hasAVX()) ||
24304       (VT == MVT::v16f32 && Subtarget.useAVX512Regs())) {
24305     if (RefinementSteps == ReciprocalEstimate::Unspecified)
24306       RefinementSteps = 1;
24307 
24308     UseOneConstNR = false;
24309     // There is no FSQRT for 512-bits, but there is RSQRT14.
24310     unsigned Opcode = VT == MVT::v16f32 ? X86ISD::RSQRT14 : X86ISD::FRSQRT;
24311     SDValue Estimate = DAG.getNode(Opcode, DL, VT, Op);
24312     if (RefinementSteps == 0 && !Reciprocal)
24313       Estimate = DAG.getNode(ISD::FMUL, DL, VT, Op, Estimate);
24314     return Estimate;
24315   }
24316 
24317   if (VT.getScalarType() == MVT::f16 && isTypeLegal(VT) &&
24318       Subtarget.hasFP16()) {
24319     assert(Reciprocal && "Don't replace SQRT with RSQRT for half type");
24320     if (RefinementSteps == ReciprocalEstimate::Unspecified)
24321       RefinementSteps = 0;
24322 
24323     if (VT == MVT::f16) {
24324       SDValue Zero = DAG.getIntPtrConstant(0, DL);
24325       SDValue Undef = DAG.getUNDEF(MVT::v8f16);
24326       Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v8f16, Op);
24327       Op = DAG.getNode(X86ISD::RSQRT14S, DL, MVT::v8f16, Undef, Op);
24328       return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f16, Op, Zero);
24329     }
24330 
24331     return DAG.getNode(X86ISD::RSQRT14, DL, VT, Op);
24332   }
24333   return SDValue();
24334 }
24335 
24336 /// The minimum architected relative accuracy is 2^-12. We need one
24337 /// Newton-Raphson step to have a good float result (24 bits of precision).
getRecipEstimate(SDValue Op,SelectionDAG & DAG,int Enabled,int & RefinementSteps) const24338 SDValue X86TargetLowering::getRecipEstimate(SDValue Op, SelectionDAG &DAG,
24339                                             int Enabled,
24340                                             int &RefinementSteps) const {
24341   SDLoc DL(Op);
24342   EVT VT = Op.getValueType();
24343 
24344   // SSE1 has rcpss and rcpps. AVX adds a 256-bit variant for rcpps.
24345   // It is likely not profitable to do this for f64 because a double-precision
24346   // reciprocal estimate with refinement on x86 prior to FMA requires
24347   // 15 instructions: convert to single, rcpss, convert back to double, refine
24348   // (3 steps = 12 insts). If an 'rcpsd' variant was added to the ISA
24349   // along with FMA, this could be a throughput win.
24350 
24351   if ((VT == MVT::f32 && Subtarget.hasSSE1()) ||
24352       (VT == MVT::v4f32 && Subtarget.hasSSE1()) ||
24353       (VT == MVT::v8f32 && Subtarget.hasAVX()) ||
24354       (VT == MVT::v16f32 && Subtarget.useAVX512Regs())) {
24355     // Enable estimate codegen with 1 refinement step for vector division.
24356     // Scalar division estimates are disabled because they break too much
24357     // real-world code. These defaults are intended to match GCC behavior.
24358     if (VT == MVT::f32 && Enabled == ReciprocalEstimate::Unspecified)
24359       return SDValue();
24360 
24361     if (RefinementSteps == ReciprocalEstimate::Unspecified)
24362       RefinementSteps = 1;
24363 
24364     // There is no FSQRT for 512-bits, but there is RCP14.
24365     unsigned Opcode = VT == MVT::v16f32 ? X86ISD::RCP14 : X86ISD::FRCP;
24366     return DAG.getNode(Opcode, DL, VT, Op);
24367   }
24368 
24369   if (VT.getScalarType() == MVT::f16 && isTypeLegal(VT) &&
24370       Subtarget.hasFP16()) {
24371     if (RefinementSteps == ReciprocalEstimate::Unspecified)
24372       RefinementSteps = 0;
24373 
24374     if (VT == MVT::f16) {
24375       SDValue Zero = DAG.getIntPtrConstant(0, DL);
24376       SDValue Undef = DAG.getUNDEF(MVT::v8f16);
24377       Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v8f16, Op);
24378       Op = DAG.getNode(X86ISD::RCP14S, DL, MVT::v8f16, Undef, Op);
24379       return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f16, Op, Zero);
24380     }
24381 
24382     return DAG.getNode(X86ISD::RCP14, DL, VT, Op);
24383   }
24384   return SDValue();
24385 }
24386 
24387 /// If we have at least two divisions that use the same divisor, convert to
24388 /// multiplication by a reciprocal. This may need to be adjusted for a given
24389 /// CPU if a division's cost is not at least twice the cost of a multiplication.
24390 /// This is because we still need one division to calculate the reciprocal and
24391 /// then we need two multiplies by that reciprocal as replacements for the
24392 /// original divisions.
combineRepeatedFPDivisors() const24393 unsigned X86TargetLowering::combineRepeatedFPDivisors() const {
24394   return 2;
24395 }
24396 
24397 SDValue
BuildSDIVPow2(SDNode * N,const APInt & Divisor,SelectionDAG & DAG,SmallVectorImpl<SDNode * > & Created) const24398 X86TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
24399                                  SelectionDAG &DAG,
24400                                  SmallVectorImpl<SDNode *> &Created) const {
24401   AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
24402   if (isIntDivCheap(N->getValueType(0), Attr))
24403     return SDValue(N,0); // Lower SDIV as SDIV
24404 
24405   assert((Divisor.isPowerOf2() || Divisor.isNegatedPowerOf2()) &&
24406          "Unexpected divisor!");
24407 
24408   // Only perform this transform if CMOV is supported otherwise the select
24409   // below will become a branch.
24410   if (!Subtarget.canUseCMOV())
24411     return SDValue();
24412 
24413   // fold (sdiv X, pow2)
24414   EVT VT = N->getValueType(0);
24415   // FIXME: Support i8.
24416   if (VT != MVT::i16 && VT != MVT::i32 &&
24417       !(Subtarget.is64Bit() && VT == MVT::i64))
24418     return SDValue();
24419 
24420   unsigned Lg2 = Divisor.countTrailingZeros();
24421 
24422   // If the divisor is 2 or -2, the default expansion is better.
24423   if (Lg2 == 1)
24424     return SDValue();
24425 
24426   SDLoc DL(N);
24427   SDValue N0 = N->getOperand(0);
24428   SDValue Zero = DAG.getConstant(0, DL, VT);
24429   APInt Lg2Mask = APInt::getLowBitsSet(VT.getSizeInBits(), Lg2);
24430   SDValue Pow2MinusOne = DAG.getConstant(Lg2Mask, DL, VT);
24431 
24432   // If N0 is negative, we need to add (Pow2 - 1) to it before shifting right.
24433   SDValue Cmp = DAG.getSetCC(DL, MVT::i8, N0, Zero, ISD::SETLT);
24434   SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N0, Pow2MinusOne);
24435   SDValue CMov = DAG.getNode(ISD::SELECT, DL, VT, Cmp, Add, N0);
24436 
24437   Created.push_back(Cmp.getNode());
24438   Created.push_back(Add.getNode());
24439   Created.push_back(CMov.getNode());
24440 
24441   // Divide by pow2.
24442   SDValue SRA =
24443       DAG.getNode(ISD::SRA, DL, VT, CMov, DAG.getConstant(Lg2, DL, MVT::i8));
24444 
24445   // If we're dividing by a positive value, we're done.  Otherwise, we must
24446   // negate the result.
24447   if (Divisor.isNonNegative())
24448     return SRA;
24449 
24450   Created.push_back(SRA.getNode());
24451   return DAG.getNode(ISD::SUB, DL, VT, Zero, SRA);
24452 }
24453 
24454 /// Result of 'and' is compared against zero. Change to a BT node if possible.
24455 /// Returns the BT node and the condition code needed to use it.
LowerAndToBT(SDValue And,ISD::CondCode CC,const SDLoc & dl,SelectionDAG & DAG,X86::CondCode & X86CC)24456 static SDValue LowerAndToBT(SDValue And, ISD::CondCode CC, const SDLoc &dl,
24457                             SelectionDAG &DAG, X86::CondCode &X86CC) {
24458   assert(And.getOpcode() == ISD::AND && "Expected AND node!");
24459   SDValue Op0 = And.getOperand(0);
24460   SDValue Op1 = And.getOperand(1);
24461   if (Op0.getOpcode() == ISD::TRUNCATE)
24462     Op0 = Op0.getOperand(0);
24463   if (Op1.getOpcode() == ISD::TRUNCATE)
24464     Op1 = Op1.getOperand(0);
24465 
24466   SDValue Src, BitNo;
24467   if (Op1.getOpcode() == ISD::SHL)
24468     std::swap(Op0, Op1);
24469   if (Op0.getOpcode() == ISD::SHL) {
24470     if (isOneConstant(Op0.getOperand(0))) {
24471       // If we looked past a truncate, check that it's only truncating away
24472       // known zeros.
24473       unsigned BitWidth = Op0.getValueSizeInBits();
24474       unsigned AndBitWidth = And.getValueSizeInBits();
24475       if (BitWidth > AndBitWidth) {
24476         KnownBits Known = DAG.computeKnownBits(Op0);
24477         if (Known.countMinLeadingZeros() < BitWidth - AndBitWidth)
24478           return SDValue();
24479       }
24480       Src = Op1;
24481       BitNo = Op0.getOperand(1);
24482     }
24483   } else if (Op1.getOpcode() == ISD::Constant) {
24484     ConstantSDNode *AndRHS = cast<ConstantSDNode>(Op1);
24485     uint64_t AndRHSVal = AndRHS->getZExtValue();
24486     SDValue AndLHS = Op0;
24487 
24488     if (AndRHSVal == 1 && AndLHS.getOpcode() == ISD::SRL) {
24489       Src = AndLHS.getOperand(0);
24490       BitNo = AndLHS.getOperand(1);
24491     } else {
24492       // Use BT if the immediate can't be encoded in a TEST instruction or we
24493       // are optimizing for size and the immedaite won't fit in a byte.
24494       bool OptForSize = DAG.shouldOptForSize();
24495       if ((!isUInt<32>(AndRHSVal) || (OptForSize && !isUInt<8>(AndRHSVal))) &&
24496           isPowerOf2_64(AndRHSVal)) {
24497         Src = AndLHS;
24498         BitNo = DAG.getConstant(Log2_64_Ceil(AndRHSVal), dl,
24499                                 Src.getValueType());
24500       }
24501     }
24502   }
24503 
24504   // No patterns found, give up.
24505   if (!Src.getNode())
24506     return SDValue();
24507 
24508   // Remove any bit flip.
24509   if (isBitwiseNot(Src)) {
24510     Src = Src.getOperand(0);
24511     CC = CC == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ;
24512   }
24513 
24514   // Attempt to create the X86ISD::BT node.
24515   if (SDValue BT = getBT(Src, BitNo, dl, DAG)) {
24516     X86CC = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B;
24517     return BT;
24518   }
24519 
24520   return SDValue();
24521 }
24522 
24523 // Check if pre-AVX condcode can be performed by a single FCMP op.
cheapX86FSETCC_SSE(ISD::CondCode SetCCOpcode)24524 static bool cheapX86FSETCC_SSE(ISD::CondCode SetCCOpcode) {
24525   return (SetCCOpcode != ISD::SETONE) && (SetCCOpcode != ISD::SETUEQ);
24526 }
24527 
24528 /// Turns an ISD::CondCode into a value suitable for SSE floating-point mask
24529 /// CMPs.
translateX86FSETCC(ISD::CondCode SetCCOpcode,SDValue & Op0,SDValue & Op1,bool & IsAlwaysSignaling)24530 static unsigned translateX86FSETCC(ISD::CondCode SetCCOpcode, SDValue &Op0,
24531                                    SDValue &Op1, bool &IsAlwaysSignaling) {
24532   unsigned SSECC;
24533   bool Swap = false;
24534 
24535   // SSE Condition code mapping:
24536   //  0 - EQ
24537   //  1 - LT
24538   //  2 - LE
24539   //  3 - UNORD
24540   //  4 - NEQ
24541   //  5 - NLT
24542   //  6 - NLE
24543   //  7 - ORD
24544   switch (SetCCOpcode) {
24545   default: llvm_unreachable("Unexpected SETCC condition");
24546   case ISD::SETOEQ:
24547   case ISD::SETEQ:  SSECC = 0; break;
24548   case ISD::SETOGT:
24549   case ISD::SETGT:  Swap = true; [[fallthrough]];
24550   case ISD::SETLT:
24551   case ISD::SETOLT: SSECC = 1; break;
24552   case ISD::SETOGE:
24553   case ISD::SETGE:  Swap = true; [[fallthrough]];
24554   case ISD::SETLE:
24555   case ISD::SETOLE: SSECC = 2; break;
24556   case ISD::SETUO:  SSECC = 3; break;
24557   case ISD::SETUNE:
24558   case ISD::SETNE:  SSECC = 4; break;
24559   case ISD::SETULE: Swap = true; [[fallthrough]];
24560   case ISD::SETUGE: SSECC = 5; break;
24561   case ISD::SETULT: Swap = true; [[fallthrough]];
24562   case ISD::SETUGT: SSECC = 6; break;
24563   case ISD::SETO:   SSECC = 7; break;
24564   case ISD::SETUEQ: SSECC = 8; break;
24565   case ISD::SETONE: SSECC = 12; break;
24566   }
24567   if (Swap)
24568     std::swap(Op0, Op1);
24569 
24570   switch (SetCCOpcode) {
24571   default:
24572     IsAlwaysSignaling = true;
24573     break;
24574   case ISD::SETEQ:
24575   case ISD::SETOEQ:
24576   case ISD::SETUEQ:
24577   case ISD::SETNE:
24578   case ISD::SETONE:
24579   case ISD::SETUNE:
24580   case ISD::SETO:
24581   case ISD::SETUO:
24582     IsAlwaysSignaling = false;
24583     break;
24584   }
24585 
24586   return SSECC;
24587 }
24588 
24589 /// Break a VSETCC 256-bit integer VSETCC into two new 128 ones and then
24590 /// concatenate the result back.
splitIntVSETCC(EVT VT,SDValue LHS,SDValue RHS,ISD::CondCode Cond,SelectionDAG & DAG,const SDLoc & dl)24591 static SDValue splitIntVSETCC(EVT VT, SDValue LHS, SDValue RHS,
24592                               ISD::CondCode Cond, SelectionDAG &DAG,
24593                               const SDLoc &dl) {
24594   assert(VT.isInteger() && VT == LHS.getValueType() &&
24595          VT == RHS.getValueType() && "Unsupported VTs!");
24596 
24597   SDValue CC = DAG.getCondCode(Cond);
24598 
24599   // Extract the LHS Lo/Hi vectors
24600   SDValue LHS1, LHS2;
24601   std::tie(LHS1, LHS2) = splitVector(LHS, DAG, dl);
24602 
24603   // Extract the RHS Lo/Hi vectors
24604   SDValue RHS1, RHS2;
24605   std::tie(RHS1, RHS2) = splitVector(RHS, DAG, dl);
24606 
24607   // Issue the operation on the smaller types and concatenate the result back
24608   EVT LoVT, HiVT;
24609   std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
24610   return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
24611                      DAG.getNode(ISD::SETCC, dl, LoVT, LHS1, RHS1, CC),
24612                      DAG.getNode(ISD::SETCC, dl, HiVT, LHS2, RHS2, CC));
24613 }
24614 
LowerIntVSETCC_AVX512(SDValue Op,SelectionDAG & DAG)24615 static SDValue LowerIntVSETCC_AVX512(SDValue Op, SelectionDAG &DAG) {
24616 
24617   SDValue Op0 = Op.getOperand(0);
24618   SDValue Op1 = Op.getOperand(1);
24619   SDValue CC = Op.getOperand(2);
24620   MVT VT = Op.getSimpleValueType();
24621   SDLoc dl(Op);
24622 
24623   assert(VT.getVectorElementType() == MVT::i1 &&
24624          "Cannot set masked compare for this operation");
24625 
24626   ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
24627 
24628   // Prefer SETGT over SETLT.
24629   if (SetCCOpcode == ISD::SETLT) {
24630     SetCCOpcode = ISD::getSetCCSwappedOperands(SetCCOpcode);
24631     std::swap(Op0, Op1);
24632   }
24633 
24634   return DAG.getSetCC(dl, VT, Op0, Op1, SetCCOpcode);
24635 }
24636 
24637 /// Given a buildvector constant, return a new vector constant with each element
24638 /// incremented or decremented. If incrementing or decrementing would result in
24639 /// unsigned overflow or underflow or this is not a simple vector constant,
24640 /// return an empty value.
incDecVectorConstant(SDValue V,SelectionDAG & DAG,bool IsInc)24641 static SDValue incDecVectorConstant(SDValue V, SelectionDAG &DAG, bool IsInc) {
24642   auto *BV = dyn_cast<BuildVectorSDNode>(V.getNode());
24643   if (!BV)
24644     return SDValue();
24645 
24646   MVT VT = V.getSimpleValueType();
24647   MVT EltVT = VT.getVectorElementType();
24648   unsigned NumElts = VT.getVectorNumElements();
24649   SmallVector<SDValue, 8> NewVecC;
24650   SDLoc DL(V);
24651   for (unsigned i = 0; i < NumElts; ++i) {
24652     auto *Elt = dyn_cast<ConstantSDNode>(BV->getOperand(i));
24653     if (!Elt || Elt->isOpaque() || Elt->getSimpleValueType(0) != EltVT)
24654       return SDValue();
24655 
24656     // Avoid overflow/underflow.
24657     const APInt &EltC = Elt->getAPIntValue();
24658     if ((IsInc && EltC.isMaxValue()) || (!IsInc && EltC.isZero()))
24659       return SDValue();
24660 
24661     NewVecC.push_back(DAG.getConstant(EltC + (IsInc ? 1 : -1), DL, EltVT));
24662   }
24663 
24664   return DAG.getBuildVector(VT, DL, NewVecC);
24665 }
24666 
24667 /// As another special case, use PSUBUS[BW] when it's profitable. E.g. for
24668 /// Op0 u<= Op1:
24669 ///   t = psubus Op0, Op1
24670 ///   pcmpeq t, <0..0>
LowerVSETCCWithSUBUS(SDValue Op0,SDValue Op1,MVT VT,ISD::CondCode Cond,const SDLoc & dl,const X86Subtarget & Subtarget,SelectionDAG & DAG)24671 static SDValue LowerVSETCCWithSUBUS(SDValue Op0, SDValue Op1, MVT VT,
24672                                     ISD::CondCode Cond, const SDLoc &dl,
24673                                     const X86Subtarget &Subtarget,
24674                                     SelectionDAG &DAG) {
24675   if (!Subtarget.hasSSE2())
24676     return SDValue();
24677 
24678   MVT VET = VT.getVectorElementType();
24679   if (VET != MVT::i8 && VET != MVT::i16)
24680     return SDValue();
24681 
24682   switch (Cond) {
24683   default:
24684     return SDValue();
24685   case ISD::SETULT: {
24686     // If the comparison is against a constant we can turn this into a
24687     // setule.  With psubus, setule does not require a swap.  This is
24688     // beneficial because the constant in the register is no longer
24689     // destructed as the destination so it can be hoisted out of a loop.
24690     // Only do this pre-AVX since vpcmp* is no longer destructive.
24691     if (Subtarget.hasAVX())
24692       return SDValue();
24693     SDValue ULEOp1 = incDecVectorConstant(Op1, DAG, /*IsInc*/false);
24694     if (!ULEOp1)
24695       return SDValue();
24696     Op1 = ULEOp1;
24697     break;
24698   }
24699   case ISD::SETUGT: {
24700     // If the comparison is against a constant, we can turn this into a setuge.
24701     // This is beneficial because materializing a constant 0 for the PCMPEQ is
24702     // probably cheaper than XOR+PCMPGT using 2 different vector constants:
24703     // cmpgt (xor X, SignMaskC) CmpC --> cmpeq (usubsat (CmpC+1), X), 0
24704     SDValue UGEOp1 = incDecVectorConstant(Op1, DAG, /*IsInc*/true);
24705     if (!UGEOp1)
24706       return SDValue();
24707     Op1 = Op0;
24708     Op0 = UGEOp1;
24709     break;
24710   }
24711   // Psubus is better than flip-sign because it requires no inversion.
24712   case ISD::SETUGE:
24713     std::swap(Op0, Op1);
24714     break;
24715   case ISD::SETULE:
24716     break;
24717   }
24718 
24719   SDValue Result = DAG.getNode(ISD::USUBSAT, dl, VT, Op0, Op1);
24720   return DAG.getNode(X86ISD::PCMPEQ, dl, VT, Result,
24721                      DAG.getConstant(0, dl, VT));
24722 }
24723 
LowerVSETCC(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)24724 static SDValue LowerVSETCC(SDValue Op, const X86Subtarget &Subtarget,
24725                            SelectionDAG &DAG) {
24726   bool IsStrict = Op.getOpcode() == ISD::STRICT_FSETCC ||
24727                   Op.getOpcode() == ISD::STRICT_FSETCCS;
24728   SDValue Op0 = Op.getOperand(IsStrict ? 1 : 0);
24729   SDValue Op1 = Op.getOperand(IsStrict ? 2 : 1);
24730   SDValue CC = Op.getOperand(IsStrict ? 3 : 2);
24731   MVT VT = Op->getSimpleValueType(0);
24732   ISD::CondCode Cond = cast<CondCodeSDNode>(CC)->get();
24733   bool isFP = Op1.getSimpleValueType().isFloatingPoint();
24734   SDLoc dl(Op);
24735 
24736   if (isFP) {
24737     MVT EltVT = Op0.getSimpleValueType().getVectorElementType();
24738     assert(EltVT == MVT::f16 || EltVT == MVT::f32 || EltVT == MVT::f64);
24739     if (isSoftFP16(EltVT, Subtarget))
24740       return SDValue();
24741 
24742     bool IsSignaling = Op.getOpcode() == ISD::STRICT_FSETCCS;
24743     SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
24744 
24745     // If we have a strict compare with a vXi1 result and the input is 128/256
24746     // bits we can't use a masked compare unless we have VLX. If we use a wider
24747     // compare like we do for non-strict, we might trigger spurious exceptions
24748     // from the upper elements. Instead emit a AVX compare and convert to mask.
24749     unsigned Opc;
24750     if (Subtarget.hasAVX512() && VT.getVectorElementType() == MVT::i1 &&
24751         (!IsStrict || Subtarget.hasVLX() ||
24752          Op0.getSimpleValueType().is512BitVector())) {
24753 #ifndef NDEBUG
24754       unsigned Num = VT.getVectorNumElements();
24755       assert(Num <= 16 || (Num == 32 && EltVT == MVT::f16));
24756 #endif
24757       Opc = IsStrict ? X86ISD::STRICT_CMPM : X86ISD::CMPM;
24758     } else {
24759       Opc = IsStrict ? X86ISD::STRICT_CMPP : X86ISD::CMPP;
24760       // The SSE/AVX packed FP comparison nodes are defined with a
24761       // floating-point vector result that matches the operand type. This allows
24762       // them to work with an SSE1 target (integer vector types are not legal).
24763       VT = Op0.getSimpleValueType();
24764     }
24765 
24766     SDValue Cmp;
24767     bool IsAlwaysSignaling;
24768     unsigned SSECC = translateX86FSETCC(Cond, Op0, Op1, IsAlwaysSignaling);
24769     if (!Subtarget.hasAVX()) {
24770       // TODO: We could use following steps to handle a quiet compare with
24771       // signaling encodings.
24772       // 1. Get ordered masks from a quiet ISD::SETO
24773       // 2. Use the masks to mask potential unordered elements in operand A, B
24774       // 3. Get the compare results of masked A, B
24775       // 4. Calculating final result using the mask and result from 3
24776       // But currently, we just fall back to scalar operations.
24777       if (IsStrict && IsAlwaysSignaling && !IsSignaling)
24778         return SDValue();
24779 
24780       // Insert an extra signaling instruction to raise exception.
24781       if (IsStrict && !IsAlwaysSignaling && IsSignaling) {
24782         SDValue SignalCmp = DAG.getNode(
24783             Opc, dl, {VT, MVT::Other},
24784             {Chain, Op0, Op1, DAG.getTargetConstant(1, dl, MVT::i8)}); // LT_OS
24785         // FIXME: It seems we need to update the flags of all new strict nodes.
24786         // Otherwise, mayRaiseFPException in MI will return false due to
24787         // NoFPExcept = false by default. However, I didn't find it in other
24788         // patches.
24789         SignalCmp->setFlags(Op->getFlags());
24790         Chain = SignalCmp.getValue(1);
24791       }
24792 
24793       // In the two cases not handled by SSE compare predicates (SETUEQ/SETONE),
24794       // emit two comparisons and a logic op to tie them together.
24795       if (!cheapX86FSETCC_SSE(Cond)) {
24796         // LLVM predicate is SETUEQ or SETONE.
24797         unsigned CC0, CC1;
24798         unsigned CombineOpc;
24799         if (Cond == ISD::SETUEQ) {
24800           CC0 = 3; // UNORD
24801           CC1 = 0; // EQ
24802           CombineOpc = X86ISD::FOR;
24803         } else {
24804           assert(Cond == ISD::SETONE);
24805           CC0 = 7; // ORD
24806           CC1 = 4; // NEQ
24807           CombineOpc = X86ISD::FAND;
24808         }
24809 
24810         SDValue Cmp0, Cmp1;
24811         if (IsStrict) {
24812           Cmp0 = DAG.getNode(
24813               Opc, dl, {VT, MVT::Other},
24814               {Chain, Op0, Op1, DAG.getTargetConstant(CC0, dl, MVT::i8)});
24815           Cmp1 = DAG.getNode(
24816               Opc, dl, {VT, MVT::Other},
24817               {Chain, Op0, Op1, DAG.getTargetConstant(CC1, dl, MVT::i8)});
24818           Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Cmp0.getValue(1),
24819                               Cmp1.getValue(1));
24820         } else {
24821           Cmp0 = DAG.getNode(
24822               Opc, dl, VT, Op0, Op1, DAG.getTargetConstant(CC0, dl, MVT::i8));
24823           Cmp1 = DAG.getNode(
24824               Opc, dl, VT, Op0, Op1, DAG.getTargetConstant(CC1, dl, MVT::i8));
24825         }
24826         Cmp = DAG.getNode(CombineOpc, dl, VT, Cmp0, Cmp1);
24827       } else {
24828         if (IsStrict) {
24829           Cmp = DAG.getNode(
24830               Opc, dl, {VT, MVT::Other},
24831               {Chain, Op0, Op1, DAG.getTargetConstant(SSECC, dl, MVT::i8)});
24832           Chain = Cmp.getValue(1);
24833         } else
24834           Cmp = DAG.getNode(
24835               Opc, dl, VT, Op0, Op1, DAG.getTargetConstant(SSECC, dl, MVT::i8));
24836       }
24837     } else {
24838       // Handle all other FP comparisons here.
24839       if (IsStrict) {
24840         // Make a flip on already signaling CCs before setting bit 4 of AVX CC.
24841         SSECC |= (IsAlwaysSignaling ^ IsSignaling) << 4;
24842         Cmp = DAG.getNode(
24843             Opc, dl, {VT, MVT::Other},
24844             {Chain, Op0, Op1, DAG.getTargetConstant(SSECC, dl, MVT::i8)});
24845         Chain = Cmp.getValue(1);
24846       } else
24847         Cmp = DAG.getNode(
24848             Opc, dl, VT, Op0, Op1, DAG.getTargetConstant(SSECC, dl, MVT::i8));
24849     }
24850 
24851     if (VT.getFixedSizeInBits() >
24852         Op.getSimpleValueType().getFixedSizeInBits()) {
24853       // We emitted a compare with an XMM/YMM result. Finish converting to a
24854       // mask register using a vptestm.
24855       EVT CastVT = EVT(VT).changeVectorElementTypeToInteger();
24856       Cmp = DAG.getBitcast(CastVT, Cmp);
24857       Cmp = DAG.getSetCC(dl, Op.getSimpleValueType(), Cmp,
24858                          DAG.getConstant(0, dl, CastVT), ISD::SETNE);
24859     } else {
24860       // If this is SSE/AVX CMPP, bitcast the result back to integer to match
24861       // the result type of SETCC. The bitcast is expected to be optimized
24862       // away during combining/isel.
24863       Cmp = DAG.getBitcast(Op.getSimpleValueType(), Cmp);
24864     }
24865 
24866     if (IsStrict)
24867       return DAG.getMergeValues({Cmp, Chain}, dl);
24868 
24869     return Cmp;
24870   }
24871 
24872   assert(!IsStrict && "Strict SETCC only handles FP operands.");
24873 
24874   MVT VTOp0 = Op0.getSimpleValueType();
24875   (void)VTOp0;
24876   assert(VTOp0 == Op1.getSimpleValueType() &&
24877          "Expected operands with same type!");
24878   assert(VT.getVectorNumElements() == VTOp0.getVectorNumElements() &&
24879          "Invalid number of packed elements for source and destination!");
24880 
24881   // The non-AVX512 code below works under the assumption that source and
24882   // destination types are the same.
24883   assert((Subtarget.hasAVX512() || (VT == VTOp0)) &&
24884          "Value types for source and destination must be the same!");
24885 
24886   // The result is boolean, but operands are int/float
24887   if (VT.getVectorElementType() == MVT::i1) {
24888     // In AVX-512 architecture setcc returns mask with i1 elements,
24889     // But there is no compare instruction for i8 and i16 elements in KNL.
24890     assert((VTOp0.getScalarSizeInBits() >= 32 || Subtarget.hasBWI()) &&
24891            "Unexpected operand type");
24892     return LowerIntVSETCC_AVX512(Op, DAG);
24893   }
24894 
24895   // Lower using XOP integer comparisons.
24896   if (VT.is128BitVector() && Subtarget.hasXOP()) {
24897     // Translate compare code to XOP PCOM compare mode.
24898     unsigned CmpMode = 0;
24899     switch (Cond) {
24900     default: llvm_unreachable("Unexpected SETCC condition");
24901     case ISD::SETULT:
24902     case ISD::SETLT: CmpMode = 0x00; break;
24903     case ISD::SETULE:
24904     case ISD::SETLE: CmpMode = 0x01; break;
24905     case ISD::SETUGT:
24906     case ISD::SETGT: CmpMode = 0x02; break;
24907     case ISD::SETUGE:
24908     case ISD::SETGE: CmpMode = 0x03; break;
24909     case ISD::SETEQ: CmpMode = 0x04; break;
24910     case ISD::SETNE: CmpMode = 0x05; break;
24911     }
24912 
24913     // Are we comparing unsigned or signed integers?
24914     unsigned Opc =
24915         ISD::isUnsignedIntSetCC(Cond) ? X86ISD::VPCOMU : X86ISD::VPCOM;
24916 
24917     return DAG.getNode(Opc, dl, VT, Op0, Op1,
24918                        DAG.getTargetConstant(CmpMode, dl, MVT::i8));
24919   }
24920 
24921   // (X & Y) != 0 --> (X & Y) == Y iff Y is power-of-2.
24922   // Revert part of the simplifySetCCWithAnd combine, to avoid an invert.
24923   if (Cond == ISD::SETNE && ISD::isBuildVectorAllZeros(Op1.getNode())) {
24924     SDValue BC0 = peekThroughBitcasts(Op0);
24925     if (BC0.getOpcode() == ISD::AND) {
24926       APInt UndefElts;
24927       SmallVector<APInt, 64> EltBits;
24928       if (getTargetConstantBitsFromNode(BC0.getOperand(1),
24929                                         VT.getScalarSizeInBits(), UndefElts,
24930                                         EltBits, false, false)) {
24931         if (llvm::all_of(EltBits, [](APInt &V) { return V.isPowerOf2(); })) {
24932           Cond = ISD::SETEQ;
24933           Op1 = DAG.getBitcast(VT, BC0.getOperand(1));
24934         }
24935       }
24936     }
24937   }
24938 
24939   // ICMP_EQ(AND(X,C),C) -> SRA(SHL(X,LOG2(C)),BW-1) iff C is power-of-2.
24940   if (Cond == ISD::SETEQ && Op0.getOpcode() == ISD::AND &&
24941       Op0.getOperand(1) == Op1 && Op0.hasOneUse()) {
24942     ConstantSDNode *C1 = isConstOrConstSplat(Op1);
24943     if (C1 && C1->getAPIntValue().isPowerOf2()) {
24944       unsigned BitWidth = VT.getScalarSizeInBits();
24945       unsigned ShiftAmt = BitWidth - C1->getAPIntValue().logBase2() - 1;
24946 
24947       SDValue Result = Op0.getOperand(0);
24948       Result = DAG.getNode(ISD::SHL, dl, VT, Result,
24949                            DAG.getConstant(ShiftAmt, dl, VT));
24950       Result = DAG.getNode(ISD::SRA, dl, VT, Result,
24951                            DAG.getConstant(BitWidth - 1, dl, VT));
24952       return Result;
24953     }
24954   }
24955 
24956   // Break 256-bit integer vector compare into smaller ones.
24957   if (VT.is256BitVector() && !Subtarget.hasInt256())
24958     return splitIntVSETCC(VT, Op0, Op1, Cond, DAG, dl);
24959 
24960   // Break 512-bit integer vector compare into smaller ones.
24961   // TODO: Try harder to use VPCMPx + VPMOV2x?
24962   if (VT.is512BitVector())
24963     return splitIntVSETCC(VT, Op0, Op1, Cond, DAG, dl);
24964 
24965   // If we have a limit constant, try to form PCMPGT (signed cmp) to avoid
24966   // not-of-PCMPEQ:
24967   // X != INT_MIN --> X >s INT_MIN
24968   // X != INT_MAX --> X <s INT_MAX --> INT_MAX >s X
24969   // +X != 0 --> +X >s 0
24970   APInt ConstValue;
24971   if (Cond == ISD::SETNE &&
24972       ISD::isConstantSplatVector(Op1.getNode(), ConstValue)) {
24973     if (ConstValue.isMinSignedValue())
24974       Cond = ISD::SETGT;
24975     else if (ConstValue.isMaxSignedValue())
24976       Cond = ISD::SETLT;
24977     else if (ConstValue.isZero() && DAG.SignBitIsZero(Op0))
24978       Cond = ISD::SETGT;
24979   }
24980 
24981   // If both operands are known non-negative, then an unsigned compare is the
24982   // same as a signed compare and there's no need to flip signbits.
24983   // TODO: We could check for more general simplifications here since we're
24984   // computing known bits.
24985   bool FlipSigns = ISD::isUnsignedIntSetCC(Cond) &&
24986                    !(DAG.SignBitIsZero(Op0) && DAG.SignBitIsZero(Op1));
24987 
24988   // Special case: Use min/max operations for unsigned compares.
24989   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24990   if (ISD::isUnsignedIntSetCC(Cond) &&
24991       (FlipSigns || ISD::isTrueWhenEqual(Cond)) &&
24992       TLI.isOperationLegal(ISD::UMIN, VT)) {
24993     // If we have a constant operand, increment/decrement it and change the
24994     // condition to avoid an invert.
24995     if (Cond == ISD::SETUGT) {
24996       // X > C --> X >= (C+1) --> X == umax(X, C+1)
24997       if (SDValue UGTOp1 = incDecVectorConstant(Op1, DAG, /*IsInc*/true)) {
24998         Op1 = UGTOp1;
24999         Cond = ISD::SETUGE;
25000       }
25001     }
25002     if (Cond == ISD::SETULT) {
25003       // X < C --> X <= (C-1) --> X == umin(X, C-1)
25004       if (SDValue ULTOp1 = incDecVectorConstant(Op1, DAG, /*IsInc*/false)) {
25005         Op1 = ULTOp1;
25006         Cond = ISD::SETULE;
25007       }
25008     }
25009     bool Invert = false;
25010     unsigned Opc;
25011     switch (Cond) {
25012     default: llvm_unreachable("Unexpected condition code");
25013     case ISD::SETUGT: Invert = true; [[fallthrough]];
25014     case ISD::SETULE: Opc = ISD::UMIN; break;
25015     case ISD::SETULT: Invert = true; [[fallthrough]];
25016     case ISD::SETUGE: Opc = ISD::UMAX; break;
25017     }
25018 
25019     SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
25020     Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Op0, Result);
25021 
25022     // If the logical-not of the result is required, perform that now.
25023     if (Invert)
25024       Result = DAG.getNOT(dl, Result, VT);
25025 
25026     return Result;
25027   }
25028 
25029   // Try to use SUBUS and PCMPEQ.
25030   if (FlipSigns)
25031     if (SDValue V =
25032             LowerVSETCCWithSUBUS(Op0, Op1, VT, Cond, dl, Subtarget, DAG))
25033       return V;
25034 
25035   // We are handling one of the integer comparisons here. Since SSE only has
25036   // GT and EQ comparisons for integer, swapping operands and multiple
25037   // operations may be required for some comparisons.
25038   unsigned Opc = (Cond == ISD::SETEQ || Cond == ISD::SETNE) ? X86ISD::PCMPEQ
25039                                                             : X86ISD::PCMPGT;
25040   bool Swap = Cond == ISD::SETLT || Cond == ISD::SETULT ||
25041               Cond == ISD::SETGE || Cond == ISD::SETUGE;
25042   bool Invert = Cond == ISD::SETNE ||
25043                 (Cond != ISD::SETEQ && ISD::isTrueWhenEqual(Cond));
25044 
25045   if (Swap)
25046     std::swap(Op0, Op1);
25047 
25048   // Check that the operation in question is available (most are plain SSE2,
25049   // but PCMPGTQ and PCMPEQQ have different requirements).
25050   if (VT == MVT::v2i64) {
25051     if (Opc == X86ISD::PCMPGT && !Subtarget.hasSSE42()) {
25052       assert(Subtarget.hasSSE2() && "Don't know how to lower!");
25053 
25054       // Special case for sign bit test. We can use a v4i32 PCMPGT and shuffle
25055       // the odd elements over the even elements.
25056       if (!FlipSigns && !Invert && ISD::isBuildVectorAllZeros(Op0.getNode())) {
25057         Op0 = DAG.getConstant(0, dl, MVT::v4i32);
25058         Op1 = DAG.getBitcast(MVT::v4i32, Op1);
25059 
25060         SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
25061         static const int MaskHi[] = { 1, 1, 3, 3 };
25062         SDValue Result = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);
25063 
25064         return DAG.getBitcast(VT, Result);
25065       }
25066 
25067       if (!FlipSigns && !Invert && ISD::isBuildVectorAllOnes(Op1.getNode())) {
25068         Op0 = DAG.getBitcast(MVT::v4i32, Op0);
25069         Op1 = DAG.getConstant(-1, dl, MVT::v4i32);
25070 
25071         SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
25072         static const int MaskHi[] = { 1, 1, 3, 3 };
25073         SDValue Result = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);
25074 
25075         return DAG.getBitcast(VT, Result);
25076       }
25077 
25078       // Since SSE has no unsigned integer comparisons, we need to flip the sign
25079       // bits of the inputs before performing those operations. The lower
25080       // compare is always unsigned.
25081       SDValue SB = DAG.getConstant(FlipSigns ? 0x8000000080000000ULL
25082                                              : 0x0000000080000000ULL,
25083                                    dl, MVT::v2i64);
25084 
25085       Op0 = DAG.getNode(ISD::XOR, dl, MVT::v2i64, Op0, SB);
25086       Op1 = DAG.getNode(ISD::XOR, dl, MVT::v2i64, Op1, SB);
25087 
25088       // Cast everything to the right type.
25089       Op0 = DAG.getBitcast(MVT::v4i32, Op0);
25090       Op1 = DAG.getBitcast(MVT::v4i32, Op1);
25091 
25092       // Emulate PCMPGTQ with (hi1 > hi2) | ((hi1 == hi2) & (lo1 > lo2))
25093       SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
25094       SDValue EQ = DAG.getNode(X86ISD::PCMPEQ, dl, MVT::v4i32, Op0, Op1);
25095 
25096       // Create masks for only the low parts/high parts of the 64 bit integers.
25097       static const int MaskHi[] = { 1, 1, 3, 3 };
25098       static const int MaskLo[] = { 0, 0, 2, 2 };
25099       SDValue EQHi = DAG.getVectorShuffle(MVT::v4i32, dl, EQ, EQ, MaskHi);
25100       SDValue GTLo = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskLo);
25101       SDValue GTHi = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);
25102 
25103       SDValue Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, EQHi, GTLo);
25104       Result = DAG.getNode(ISD::OR, dl, MVT::v4i32, Result, GTHi);
25105 
25106       if (Invert)
25107         Result = DAG.getNOT(dl, Result, MVT::v4i32);
25108 
25109       return DAG.getBitcast(VT, Result);
25110     }
25111 
25112     if (Opc == X86ISD::PCMPEQ && !Subtarget.hasSSE41()) {
25113       // If pcmpeqq is missing but pcmpeqd is available synthesize pcmpeqq with
25114       // pcmpeqd + pshufd + pand.
25115       assert(Subtarget.hasSSE2() && !FlipSigns && "Don't know how to lower!");
25116 
25117       // First cast everything to the right type.
25118       Op0 = DAG.getBitcast(MVT::v4i32, Op0);
25119       Op1 = DAG.getBitcast(MVT::v4i32, Op1);
25120 
25121       // Do the compare.
25122       SDValue Result = DAG.getNode(Opc, dl, MVT::v4i32, Op0, Op1);
25123 
25124       // Make sure the lower and upper halves are both all-ones.
25125       static const int Mask[] = { 1, 0, 3, 2 };
25126       SDValue Shuf = DAG.getVectorShuffle(MVT::v4i32, dl, Result, Result, Mask);
25127       Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, Result, Shuf);
25128 
25129       if (Invert)
25130         Result = DAG.getNOT(dl, Result, MVT::v4i32);
25131 
25132       return DAG.getBitcast(VT, Result);
25133     }
25134   }
25135 
25136   // Since SSE has no unsigned integer comparisons, we need to flip the sign
25137   // bits of the inputs before performing those operations.
25138   if (FlipSigns) {
25139     MVT EltVT = VT.getVectorElementType();
25140     SDValue SM = DAG.getConstant(APInt::getSignMask(EltVT.getSizeInBits()), dl,
25141                                  VT);
25142     Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SM);
25143     Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SM);
25144   }
25145 
25146   SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
25147 
25148   // If the logical-not of the result is required, perform that now.
25149   if (Invert)
25150     Result = DAG.getNOT(dl, Result, VT);
25151 
25152   return Result;
25153 }
25154 
25155 // Try to select this as a KORTEST+SETCC or KTEST+SETCC if possible.
EmitAVX512Test(SDValue Op0,SDValue Op1,ISD::CondCode CC,const SDLoc & dl,SelectionDAG & DAG,const X86Subtarget & Subtarget,SDValue & X86CC)25156 static SDValue EmitAVX512Test(SDValue Op0, SDValue Op1, ISD::CondCode CC,
25157                               const SDLoc &dl, SelectionDAG &DAG,
25158                               const X86Subtarget &Subtarget,
25159                               SDValue &X86CC) {
25160   // Only support equality comparisons.
25161   if (CC != ISD::SETEQ && CC != ISD::SETNE)
25162     return SDValue();
25163 
25164   // Must be a bitcast from vXi1.
25165   if (Op0.getOpcode() != ISD::BITCAST)
25166     return SDValue();
25167 
25168   Op0 = Op0.getOperand(0);
25169   MVT VT = Op0.getSimpleValueType();
25170   if (!(Subtarget.hasAVX512() && VT == MVT::v16i1) &&
25171       !(Subtarget.hasDQI() && VT == MVT::v8i1) &&
25172       !(Subtarget.hasBWI() && (VT == MVT::v32i1 || VT == MVT::v64i1)))
25173     return SDValue();
25174 
25175   X86::CondCode X86Cond;
25176   if (isNullConstant(Op1)) {
25177     X86Cond = CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE;
25178   } else if (isAllOnesConstant(Op1)) {
25179     // C flag is set for all ones.
25180     X86Cond = CC == ISD::SETEQ ? X86::COND_B : X86::COND_AE;
25181   } else
25182     return SDValue();
25183 
25184   // If the input is an AND, we can combine it's operands into the KTEST.
25185   bool KTestable = false;
25186   if (Subtarget.hasDQI() && (VT == MVT::v8i1 || VT == MVT::v16i1))
25187     KTestable = true;
25188   if (Subtarget.hasBWI() && (VT == MVT::v32i1 || VT == MVT::v64i1))
25189     KTestable = true;
25190   if (!isNullConstant(Op1))
25191     KTestable = false;
25192   if (KTestable && Op0.getOpcode() == ISD::AND && Op0.hasOneUse()) {
25193     SDValue LHS = Op0.getOperand(0);
25194     SDValue RHS = Op0.getOperand(1);
25195     X86CC = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
25196     return DAG.getNode(X86ISD::KTEST, dl, MVT::i32, LHS, RHS);
25197   }
25198 
25199   // If the input is an OR, we can combine it's operands into the KORTEST.
25200   SDValue LHS = Op0;
25201   SDValue RHS = Op0;
25202   if (Op0.getOpcode() == ISD::OR && Op0.hasOneUse()) {
25203     LHS = Op0.getOperand(0);
25204     RHS = Op0.getOperand(1);
25205   }
25206 
25207   X86CC = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
25208   return DAG.getNode(X86ISD::KORTEST, dl, MVT::i32, LHS, RHS);
25209 }
25210 
25211 /// Emit flags for the given setcc condition and operands. Also returns the
25212 /// corresponding X86 condition code constant in X86CC.
emitFlagsForSetcc(SDValue Op0,SDValue Op1,ISD::CondCode CC,const SDLoc & dl,SelectionDAG & DAG,SDValue & X86CC) const25213 SDValue X86TargetLowering::emitFlagsForSetcc(SDValue Op0, SDValue Op1,
25214                                              ISD::CondCode CC, const SDLoc &dl,
25215                                              SelectionDAG &DAG,
25216                                              SDValue &X86CC) const {
25217   // Optimize to BT if possible.
25218   // Lower (X & (1 << N)) == 0 to BT(X, N).
25219   // Lower ((X >>u N) & 1) != 0 to BT(X, N).
25220   // Lower ((X >>s N) & 1) != 0 to BT(X, N).
25221   if (Op0.getOpcode() == ISD::AND && Op0.hasOneUse() && isNullConstant(Op1) &&
25222       (CC == ISD::SETEQ || CC == ISD::SETNE)) {
25223     X86::CondCode X86CondCode;
25224     if (SDValue BT = LowerAndToBT(Op0, CC, dl, DAG, X86CondCode)) {
25225       X86CC = DAG.getTargetConstant(X86CondCode, dl, MVT::i8);
25226       return BT;
25227     }
25228   }
25229 
25230   // Try to use PTEST/PMOVMSKB for a tree ORs equality compared with 0.
25231   // TODO: We could do AND tree with all 1s as well by using the C flag.
25232   if (isNullConstant(Op1) && (CC == ISD::SETEQ || CC == ISD::SETNE))
25233     if (SDValue CmpZ =
25234             MatchVectorAllZeroTest(Op0, CC, dl, Subtarget, DAG, X86CC))
25235       return CmpZ;
25236 
25237   // Try to lower using KORTEST or KTEST.
25238   if (SDValue Test = EmitAVX512Test(Op0, Op1, CC, dl, DAG, Subtarget, X86CC))
25239     return Test;
25240 
25241   // Look for X == 0, X == 1, X != 0, or X != 1.  We can simplify some forms of
25242   // these.
25243   if ((isOneConstant(Op1) || isNullConstant(Op1)) &&
25244       (CC == ISD::SETEQ || CC == ISD::SETNE)) {
25245     // If the input is a setcc, then reuse the input setcc or use a new one with
25246     // the inverted condition.
25247     if (Op0.getOpcode() == X86ISD::SETCC) {
25248       bool Invert = (CC == ISD::SETNE) ^ isNullConstant(Op1);
25249 
25250       X86CC = Op0.getOperand(0);
25251       if (Invert) {
25252         X86::CondCode CCode = (X86::CondCode)Op0.getConstantOperandVal(0);
25253         CCode = X86::GetOppositeBranchCondition(CCode);
25254         X86CC = DAG.getTargetConstant(CCode, dl, MVT::i8);
25255       }
25256 
25257       return Op0.getOperand(1);
25258     }
25259   }
25260 
25261   // Try to use the carry flag from the add in place of an separate CMP for:
25262   // (seteq (add X, -1), -1). Similar for setne.
25263   if (isAllOnesConstant(Op1) && Op0.getOpcode() == ISD::ADD &&
25264       Op0.getOperand(1) == Op1 && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
25265     if (isProfitableToUseFlagOp(Op0)) {
25266       SDVTList VTs = DAG.getVTList(Op0.getValueType(), MVT::i32);
25267 
25268       SDValue New = DAG.getNode(X86ISD::ADD, dl, VTs, Op0.getOperand(0),
25269                                 Op0.getOperand(1));
25270       DAG.ReplaceAllUsesOfValueWith(SDValue(Op0.getNode(), 0), New);
25271       X86::CondCode CCode = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B;
25272       X86CC = DAG.getTargetConstant(CCode, dl, MVT::i8);
25273       return SDValue(New.getNode(), 1);
25274     }
25275   }
25276 
25277   X86::CondCode CondCode =
25278       TranslateX86CC(CC, dl, /*IsFP*/ false, Op0, Op1, DAG);
25279   assert(CondCode != X86::COND_INVALID && "Unexpected condition code!");
25280 
25281   SDValue EFLAGS = EmitCmp(Op0, Op1, CondCode, dl, DAG, Subtarget);
25282   X86CC = DAG.getTargetConstant(CondCode, dl, MVT::i8);
25283   return EFLAGS;
25284 }
25285 
LowerSETCC(SDValue Op,SelectionDAG & DAG) const25286 SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
25287 
25288   bool IsStrict = Op.getOpcode() == ISD::STRICT_FSETCC ||
25289                   Op.getOpcode() == ISD::STRICT_FSETCCS;
25290   MVT VT = Op->getSimpleValueType(0);
25291 
25292   if (VT.isVector()) return LowerVSETCC(Op, Subtarget, DAG);
25293 
25294   assert(VT == MVT::i8 && "SetCC type must be 8-bit integer");
25295   SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
25296   SDValue Op0 = Op.getOperand(IsStrict ? 1 : 0);
25297   SDValue Op1 = Op.getOperand(IsStrict ? 2 : 1);
25298   SDLoc dl(Op);
25299   ISD::CondCode CC =
25300       cast<CondCodeSDNode>(Op.getOperand(IsStrict ? 3 : 2))->get();
25301 
25302   if (isSoftFP16(Op0.getValueType()))
25303     return SDValue();
25304 
25305   // Handle f128 first, since one possible outcome is a normal integer
25306   // comparison which gets handled by emitFlagsForSetcc.
25307   if (Op0.getValueType() == MVT::f128) {
25308     softenSetCCOperands(DAG, MVT::f128, Op0, Op1, CC, dl, Op0, Op1, Chain,
25309                         Op.getOpcode() == ISD::STRICT_FSETCCS);
25310 
25311     // If softenSetCCOperands returned a scalar, use it.
25312     if (!Op1.getNode()) {
25313       assert(Op0.getValueType() == Op.getValueType() &&
25314              "Unexpected setcc expansion!");
25315       if (IsStrict)
25316         return DAG.getMergeValues({Op0, Chain}, dl);
25317       return Op0;
25318     }
25319   }
25320 
25321   if (Op0.getSimpleValueType().isInteger()) {
25322     // Attempt to canonicalize SGT/UGT -> SGE/UGE compares with constant which
25323     // reduces the number of EFLAGs bit reads (the GE conditions don't read ZF),
25324     // this may translate to less uops depending on uarch implementation. The
25325     // equivalent for SLE/ULE -> SLT/ULT isn't likely to happen as we already
25326     // canonicalize to that CondCode.
25327     // NOTE: Only do this if incrementing the constant doesn't increase the bit
25328     // encoding size - so it must either already be a i8 or i32 immediate, or it
25329     // shrinks down to that. We don't do this for any i64's to avoid additional
25330     // constant materializations.
25331     // TODO: Can we move this to TranslateX86CC to handle jumps/branches too?
25332     if (auto *Op1C = dyn_cast<ConstantSDNode>(Op1)) {
25333       const APInt &Op1Val = Op1C->getAPIntValue();
25334       if (!Op1Val.isZero()) {
25335         // Ensure the constant+1 doesn't overflow.
25336         if ((CC == ISD::CondCode::SETGT && !Op1Val.isMaxSignedValue()) ||
25337             (CC == ISD::CondCode::SETUGT && !Op1Val.isMaxValue())) {
25338           APInt Op1ValPlusOne = Op1Val + 1;
25339           if (Op1ValPlusOne.isSignedIntN(32) &&
25340               (!Op1Val.isSignedIntN(8) || Op1ValPlusOne.isSignedIntN(8))) {
25341             Op1 = DAG.getConstant(Op1ValPlusOne, dl, Op0.getValueType());
25342             CC = CC == ISD::CondCode::SETGT ? ISD::CondCode::SETGE
25343                                             : ISD::CondCode::SETUGE;
25344           }
25345         }
25346       }
25347     }
25348 
25349     SDValue X86CC;
25350     SDValue EFLAGS = emitFlagsForSetcc(Op0, Op1, CC, dl, DAG, X86CC);
25351     SDValue Res = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, X86CC, EFLAGS);
25352     return IsStrict ? DAG.getMergeValues({Res, Chain}, dl) : Res;
25353   }
25354 
25355   // Handle floating point.
25356   X86::CondCode CondCode = TranslateX86CC(CC, dl, /*IsFP*/ true, Op0, Op1, DAG);
25357   if (CondCode == X86::COND_INVALID)
25358     return SDValue();
25359 
25360   SDValue EFLAGS;
25361   if (IsStrict) {
25362     bool IsSignaling = Op.getOpcode() == ISD::STRICT_FSETCCS;
25363     EFLAGS =
25364         DAG.getNode(IsSignaling ? X86ISD::STRICT_FCMPS : X86ISD::STRICT_FCMP,
25365                     dl, {MVT::i32, MVT::Other}, {Chain, Op0, Op1});
25366     Chain = EFLAGS.getValue(1);
25367   } else {
25368     EFLAGS = DAG.getNode(X86ISD::FCMP, dl, MVT::i32, Op0, Op1);
25369   }
25370 
25371   SDValue X86CC = DAG.getTargetConstant(CondCode, dl, MVT::i8);
25372   SDValue Res = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, X86CC, EFLAGS);
25373   return IsStrict ? DAG.getMergeValues({Res, Chain}, dl) : Res;
25374 }
25375 
LowerSETCCCARRY(SDValue Op,SelectionDAG & DAG) const25376 SDValue X86TargetLowering::LowerSETCCCARRY(SDValue Op, SelectionDAG &DAG) const {
25377   SDValue LHS = Op.getOperand(0);
25378   SDValue RHS = Op.getOperand(1);
25379   SDValue Carry = Op.getOperand(2);
25380   SDValue Cond = Op.getOperand(3);
25381   SDLoc DL(Op);
25382 
25383   assert(LHS.getSimpleValueType().isInteger() && "SETCCCARRY is integer only.");
25384   X86::CondCode CC = TranslateIntegerX86CC(cast<CondCodeSDNode>(Cond)->get());
25385 
25386   // Recreate the carry if needed.
25387   EVT CarryVT = Carry.getValueType();
25388   Carry = DAG.getNode(X86ISD::ADD, DL, DAG.getVTList(CarryVT, MVT::i32),
25389                       Carry, DAG.getAllOnesConstant(DL, CarryVT));
25390 
25391   SDVTList VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
25392   SDValue Cmp = DAG.getNode(X86ISD::SBB, DL, VTs, LHS, RHS, Carry.getValue(1));
25393   return getSETCC(CC, Cmp.getValue(1), DL, DAG);
25394 }
25395 
25396 // This function returns three things: the arithmetic computation itself
25397 // (Value), an EFLAGS result (Overflow), and a condition code (Cond).  The
25398 // flag and the condition code define the case in which the arithmetic
25399 // computation overflows.
25400 static std::pair<SDValue, SDValue>
getX86XALUOOp(X86::CondCode & Cond,SDValue Op,SelectionDAG & DAG)25401 getX86XALUOOp(X86::CondCode &Cond, SDValue Op, SelectionDAG &DAG) {
25402   assert(Op.getResNo() == 0 && "Unexpected result number!");
25403   SDValue Value, Overflow;
25404   SDValue LHS = Op.getOperand(0);
25405   SDValue RHS = Op.getOperand(1);
25406   unsigned BaseOp = 0;
25407   SDLoc DL(Op);
25408   switch (Op.getOpcode()) {
25409   default: llvm_unreachable("Unknown ovf instruction!");
25410   case ISD::SADDO:
25411     BaseOp = X86ISD::ADD;
25412     Cond = X86::COND_O;
25413     break;
25414   case ISD::UADDO:
25415     BaseOp = X86ISD::ADD;
25416     Cond = isOneConstant(RHS) ? X86::COND_E : X86::COND_B;
25417     break;
25418   case ISD::SSUBO:
25419     BaseOp = X86ISD::SUB;
25420     Cond = X86::COND_O;
25421     break;
25422   case ISD::USUBO:
25423     BaseOp = X86ISD::SUB;
25424     Cond = X86::COND_B;
25425     break;
25426   case ISD::SMULO:
25427     BaseOp = X86ISD::SMUL;
25428     Cond = X86::COND_O;
25429     break;
25430   case ISD::UMULO:
25431     BaseOp = X86ISD::UMUL;
25432     Cond = X86::COND_O;
25433     break;
25434   }
25435 
25436   if (BaseOp) {
25437     // Also sets EFLAGS.
25438     SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
25439     Value = DAG.getNode(BaseOp, DL, VTs, LHS, RHS);
25440     Overflow = Value.getValue(1);
25441   }
25442 
25443   return std::make_pair(Value, Overflow);
25444 }
25445 
LowerXALUO(SDValue Op,SelectionDAG & DAG)25446 static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {
25447   // Lower the "add/sub/mul with overflow" instruction into a regular ins plus
25448   // a "setcc" instruction that checks the overflow flag. The "brcond" lowering
25449   // looks for this combo and may remove the "setcc" instruction if the "setcc"
25450   // has only one use.
25451   SDLoc DL(Op);
25452   X86::CondCode Cond;
25453   SDValue Value, Overflow;
25454   std::tie(Value, Overflow) = getX86XALUOOp(Cond, Op, DAG);
25455 
25456   SDValue SetCC = getSETCC(Cond, Overflow, DL, DAG);
25457   assert(Op->getValueType(1) == MVT::i8 && "Unexpected VT!");
25458   return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(), Value, SetCC);
25459 }
25460 
25461 /// Return true if opcode is a X86 logical comparison.
isX86LogicalCmp(SDValue Op)25462 static bool isX86LogicalCmp(SDValue Op) {
25463   unsigned Opc = Op.getOpcode();
25464   if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI ||
25465       Opc == X86ISD::FCMP)
25466     return true;
25467   if (Op.getResNo() == 1 &&
25468       (Opc == X86ISD::ADD || Opc == X86ISD::SUB || Opc == X86ISD::ADC ||
25469        Opc == X86ISD::SBB || Opc == X86ISD::SMUL || Opc == X86ISD::UMUL ||
25470        Opc == X86ISD::OR || Opc == X86ISD::XOR || Opc == X86ISD::AND))
25471     return true;
25472 
25473   return false;
25474 }
25475 
isTruncWithZeroHighBitsInput(SDValue V,SelectionDAG & DAG)25476 static bool isTruncWithZeroHighBitsInput(SDValue V, SelectionDAG &DAG) {
25477   if (V.getOpcode() != ISD::TRUNCATE)
25478     return false;
25479 
25480   SDValue VOp0 = V.getOperand(0);
25481   unsigned InBits = VOp0.getValueSizeInBits();
25482   unsigned Bits = V.getValueSizeInBits();
25483   return DAG.MaskedValueIsZero(VOp0, APInt::getHighBitsSet(InBits,InBits-Bits));
25484 }
25485 
LowerSELECT(SDValue Op,SelectionDAG & DAG) const25486 SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
25487   bool AddTest = true;
25488   SDValue Cond  = Op.getOperand(0);
25489   SDValue Op1 = Op.getOperand(1);
25490   SDValue Op2 = Op.getOperand(2);
25491   SDLoc DL(Op);
25492   MVT VT = Op1.getSimpleValueType();
25493   SDValue CC;
25494 
25495   if (isSoftFP16(VT)) {
25496     MVT NVT = VT.changeTypeToInteger();
25497     return DAG.getBitcast(VT, DAG.getNode(ISD::SELECT, DL, NVT, Cond,
25498                                           DAG.getBitcast(NVT, Op1),
25499                                           DAG.getBitcast(NVT, Op2)));
25500   }
25501 
25502   // Lower FP selects into a CMP/AND/ANDN/OR sequence when the necessary SSE ops
25503   // are available or VBLENDV if AVX is available.
25504   // Otherwise FP cmovs get lowered into a less efficient branch sequence later.
25505   if (Cond.getOpcode() == ISD::SETCC && isScalarFPTypeInSSEReg(VT) &&
25506       VT == Cond.getOperand(0).getSimpleValueType() && Cond->hasOneUse()) {
25507     SDValue CondOp0 = Cond.getOperand(0), CondOp1 = Cond.getOperand(1);
25508     bool IsAlwaysSignaling;
25509     unsigned SSECC =
25510         translateX86FSETCC(cast<CondCodeSDNode>(Cond.getOperand(2))->get(),
25511                            CondOp0, CondOp1, IsAlwaysSignaling);
25512 
25513     if (Subtarget.hasAVX512()) {
25514       SDValue Cmp =
25515           DAG.getNode(X86ISD::FSETCCM, DL, MVT::v1i1, CondOp0, CondOp1,
25516                       DAG.getTargetConstant(SSECC, DL, MVT::i8));
25517       assert(!VT.isVector() && "Not a scalar type?");
25518       return DAG.getNode(X86ISD::SELECTS, DL, VT, Cmp, Op1, Op2);
25519     }
25520 
25521     if (SSECC < 8 || Subtarget.hasAVX()) {
25522       SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, VT, CondOp0, CondOp1,
25523                                 DAG.getTargetConstant(SSECC, DL, MVT::i8));
25524 
25525       // If we have AVX, we can use a variable vector select (VBLENDV) instead
25526       // of 3 logic instructions for size savings and potentially speed.
25527       // Unfortunately, there is no scalar form of VBLENDV.
25528 
25529       // If either operand is a +0.0 constant, don't try this. We can expect to
25530       // optimize away at least one of the logic instructions later in that
25531       // case, so that sequence would be faster than a variable blend.
25532 
25533       // BLENDV was introduced with SSE 4.1, but the 2 register form implicitly
25534       // uses XMM0 as the selection register. That may need just as many
25535       // instructions as the AND/ANDN/OR sequence due to register moves, so
25536       // don't bother.
25537       if (Subtarget.hasAVX() && !isNullFPConstant(Op1) &&
25538           !isNullFPConstant(Op2)) {
25539         // Convert to vectors, do a VSELECT, and convert back to scalar.
25540         // All of the conversions should be optimized away.
25541         MVT VecVT = VT == MVT::f32 ? MVT::v4f32 : MVT::v2f64;
25542         SDValue VOp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Op1);
25543         SDValue VOp2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Op2);
25544         SDValue VCmp = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Cmp);
25545 
25546         MVT VCmpVT = VT == MVT::f32 ? MVT::v4i32 : MVT::v2i64;
25547         VCmp = DAG.getBitcast(VCmpVT, VCmp);
25548 
25549         SDValue VSel = DAG.getSelect(DL, VecVT, VCmp, VOp1, VOp2);
25550 
25551         return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
25552                            VSel, DAG.getIntPtrConstant(0, DL));
25553       }
25554       SDValue AndN = DAG.getNode(X86ISD::FANDN, DL, VT, Cmp, Op2);
25555       SDValue And = DAG.getNode(X86ISD::FAND, DL, VT, Cmp, Op1);
25556       return DAG.getNode(X86ISD::FOR, DL, VT, AndN, And);
25557     }
25558   }
25559 
25560   // AVX512 fallback is to lower selects of scalar floats to masked moves.
25561   if (isScalarFPTypeInSSEReg(VT) && Subtarget.hasAVX512()) {
25562     SDValue Cmp = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v1i1, Cond);
25563     return DAG.getNode(X86ISD::SELECTS, DL, VT, Cmp, Op1, Op2);
25564   }
25565 
25566   if (Cond.getOpcode() == ISD::SETCC &&
25567       !isSoftFP16(Cond.getOperand(0).getSimpleValueType())) {
25568     if (SDValue NewCond = LowerSETCC(Cond, DAG)) {
25569       Cond = NewCond;
25570       // If the condition was updated, it's possible that the operands of the
25571       // select were also updated (for example, EmitTest has a RAUW). Refresh
25572       // the local references to the select operands in case they got stale.
25573       Op1 = Op.getOperand(1);
25574       Op2 = Op.getOperand(2);
25575     }
25576   }
25577 
25578   // (select (x == 0), -1, y) -> (sign_bit (x - 1)) | y
25579   // (select (x == 0), y, -1) -> ~(sign_bit (x - 1)) | y
25580   // (select (x != 0), y, -1) -> (sign_bit (x - 1)) | y
25581   // (select (x != 0), -1, y) -> ~(sign_bit (x - 1)) | y
25582   // (select (and (x , 0x1) == 0), y, (z ^ y) ) -> (-(and (x , 0x1)) & z ) ^ y
25583   // (select (and (x , 0x1) == 0), y, (z | y) ) -> (-(and (x , 0x1)) & z ) | y
25584   // (select (x > 0), x, 0) -> (~(x >> (size_in_bits(x)-1))) & x
25585   // (select (x < 0), x, 0) -> ((x >> (size_in_bits(x)-1))) & x
25586   if (Cond.getOpcode() == X86ISD::SETCC &&
25587       Cond.getOperand(1).getOpcode() == X86ISD::CMP &&
25588       isNullConstant(Cond.getOperand(1).getOperand(1))) {
25589     SDValue Cmp = Cond.getOperand(1);
25590     SDValue CmpOp0 = Cmp.getOperand(0);
25591     unsigned CondCode = Cond.getConstantOperandVal(0);
25592 
25593     // Special handling for __builtin_ffs(X) - 1 pattern which looks like
25594     // (select (seteq X, 0), -1, (cttz_zero_undef X)). Disable the special
25595     // handle to keep the CMP with 0. This should be removed by
25596     // optimizeCompareInst by using the flags from the BSR/TZCNT used for the
25597     // cttz_zero_undef.
25598     auto MatchFFSMinus1 = [&](SDValue Op1, SDValue Op2) {
25599       return (Op1.getOpcode() == ISD::CTTZ_ZERO_UNDEF && Op1.hasOneUse() &&
25600               Op1.getOperand(0) == CmpOp0 && isAllOnesConstant(Op2));
25601     };
25602     if (Subtarget.canUseCMOV() && (VT == MVT::i32 || VT == MVT::i64) &&
25603         ((CondCode == X86::COND_NE && MatchFFSMinus1(Op1, Op2)) ||
25604          (CondCode == X86::COND_E && MatchFFSMinus1(Op2, Op1)))) {
25605       // Keep Cmp.
25606     } else if ((isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
25607         (CondCode == X86::COND_E || CondCode == X86::COND_NE)) {
25608       SDValue Y = isAllOnesConstant(Op2) ? Op1 : Op2;
25609       SDVTList CmpVTs = DAG.getVTList(CmpOp0.getValueType(), MVT::i32);
25610 
25611       // 'X - 1' sets the carry flag if X == 0.
25612       // '0 - X' sets the carry flag if X != 0.
25613       // Convert the carry flag to a -1/0 mask with sbb:
25614       // select (X != 0), -1, Y --> 0 - X; or (sbb), Y
25615       // select (X == 0), Y, -1 --> 0 - X; or (sbb), Y
25616       // select (X != 0), Y, -1 --> X - 1; or (sbb), Y
25617       // select (X == 0), -1, Y --> X - 1; or (sbb), Y
25618       SDValue Sub;
25619       if (isAllOnesConstant(Op1) == (CondCode == X86::COND_NE)) {
25620         SDValue Zero = DAG.getConstant(0, DL, CmpOp0.getValueType());
25621         Sub = DAG.getNode(X86ISD::SUB, DL, CmpVTs, Zero, CmpOp0);
25622       } else {
25623         SDValue One = DAG.getConstant(1, DL, CmpOp0.getValueType());
25624         Sub = DAG.getNode(X86ISD::SUB, DL, CmpVTs, CmpOp0, One);
25625       }
25626       SDValue SBB = DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
25627                                 DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
25628                                 Sub.getValue(1));
25629       return DAG.getNode(ISD::OR, DL, VT, SBB, Y);
25630     } else if (!Subtarget.canUseCMOV() && CondCode == X86::COND_E &&
25631                CmpOp0.getOpcode() == ISD::AND &&
25632                isOneConstant(CmpOp0.getOperand(1))) {
25633       SDValue Src1, Src2;
25634       // true if Op2 is XOR or OR operator and one of its operands
25635       // is equal to Op1
25636       // ( a , a op b) || ( b , a op b)
25637       auto isOrXorPattern = [&]() {
25638         if ((Op2.getOpcode() == ISD::XOR || Op2.getOpcode() == ISD::OR) &&
25639             (Op2.getOperand(0) == Op1 || Op2.getOperand(1) == Op1)) {
25640           Src1 =
25641               Op2.getOperand(0) == Op1 ? Op2.getOperand(1) : Op2.getOperand(0);
25642           Src2 = Op1;
25643           return true;
25644         }
25645         return false;
25646       };
25647 
25648       if (isOrXorPattern()) {
25649         SDValue Neg;
25650         unsigned int CmpSz = CmpOp0.getSimpleValueType().getSizeInBits();
25651         // we need mask of all zeros or ones with same size of the other
25652         // operands.
25653         if (CmpSz > VT.getSizeInBits())
25654           Neg = DAG.getNode(ISD::TRUNCATE, DL, VT, CmpOp0);
25655         else if (CmpSz < VT.getSizeInBits())
25656           Neg = DAG.getNode(ISD::AND, DL, VT,
25657               DAG.getNode(ISD::ANY_EXTEND, DL, VT, CmpOp0.getOperand(0)),
25658               DAG.getConstant(1, DL, VT));
25659         else
25660           Neg = CmpOp0;
25661         SDValue Mask = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
25662                                    Neg); // -(and (x, 0x1))
25663         SDValue And = DAG.getNode(ISD::AND, DL, VT, Mask, Src1); // Mask & z
25664         return DAG.getNode(Op2.getOpcode(), DL, VT, And, Src2);  // And Op y
25665       }
25666     } else if ((VT == MVT::i32 || VT == MVT::i64) && isNullConstant(Op2) &&
25667                Cmp.getNode()->hasOneUse() && (CmpOp0 == Op1) &&
25668                ((CondCode == X86::COND_S) ||                    // smin(x, 0)
25669                 (CondCode == X86::COND_G && hasAndNot(Op1)))) { // smax(x, 0)
25670       // (select (x < 0), x, 0) -> ((x >> (size_in_bits(x)-1))) & x
25671       //
25672       // If the comparison is testing for a positive value, we have to invert
25673       // the sign bit mask, so only do that transform if the target has a
25674       // bitwise 'and not' instruction (the invert is free).
25675       // (select (x > 0), x, 0) -> (~(x >> (size_in_bits(x)-1))) & x
25676       unsigned ShCt = VT.getSizeInBits() - 1;
25677       SDValue ShiftAmt = DAG.getConstant(ShCt, DL, VT);
25678       SDValue Shift = DAG.getNode(ISD::SRA, DL, VT, Op1, ShiftAmt);
25679       if (CondCode == X86::COND_G)
25680         Shift = DAG.getNOT(DL, Shift, VT);
25681       return DAG.getNode(ISD::AND, DL, VT, Shift, Op1);
25682     }
25683   }
25684 
25685   // Look past (and (setcc_carry (cmp ...)), 1).
25686   if (Cond.getOpcode() == ISD::AND &&
25687       Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY &&
25688       isOneConstant(Cond.getOperand(1)))
25689     Cond = Cond.getOperand(0);
25690 
25691   // If condition flag is set by a X86ISD::CMP, then use it as the condition
25692   // setting operand in place of the X86ISD::SETCC.
25693   unsigned CondOpcode = Cond.getOpcode();
25694   if (CondOpcode == X86ISD::SETCC ||
25695       CondOpcode == X86ISD::SETCC_CARRY) {
25696     CC = Cond.getOperand(0);
25697 
25698     SDValue Cmp = Cond.getOperand(1);
25699     bool IllegalFPCMov = false;
25700     if (VT.isFloatingPoint() && !VT.isVector() &&
25701         !isScalarFPTypeInSSEReg(VT) && Subtarget.canUseCMOV())  // FPStack?
25702       IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSExtValue());
25703 
25704     if ((isX86LogicalCmp(Cmp) && !IllegalFPCMov) ||
25705         Cmp.getOpcode() == X86ISD::BT) { // FIXME
25706       Cond = Cmp;
25707       AddTest = false;
25708     }
25709   } else if (CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
25710              CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
25711              CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) {
25712     SDValue Value;
25713     X86::CondCode X86Cond;
25714     std::tie(Value, Cond) = getX86XALUOOp(X86Cond, Cond.getValue(0), DAG);
25715 
25716     CC = DAG.getTargetConstant(X86Cond, DL, MVT::i8);
25717     AddTest = false;
25718   }
25719 
25720   if (AddTest) {
25721     // Look past the truncate if the high bits are known zero.
25722     if (isTruncWithZeroHighBitsInput(Cond, DAG))
25723       Cond = Cond.getOperand(0);
25724 
25725     // We know the result of AND is compared against zero. Try to match
25726     // it to BT.
25727     if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
25728       X86::CondCode X86CondCode;
25729       if (SDValue BT = LowerAndToBT(Cond, ISD::SETNE, DL, DAG, X86CondCode)) {
25730         CC = DAG.getTargetConstant(X86CondCode, DL, MVT::i8);
25731         Cond = BT;
25732         AddTest = false;
25733       }
25734     }
25735   }
25736 
25737   if (AddTest) {
25738     CC = DAG.getTargetConstant(X86::COND_NE, DL, MVT::i8);
25739     Cond = EmitTest(Cond, X86::COND_NE, DL, DAG, Subtarget);
25740   }
25741 
25742   // a <  b ? -1 :  0 -> RES = ~setcc_carry
25743   // a <  b ?  0 : -1 -> RES = setcc_carry
25744   // a >= b ? -1 :  0 -> RES = setcc_carry
25745   // a >= b ?  0 : -1 -> RES = ~setcc_carry
25746   if (Cond.getOpcode() == X86ISD::SUB) {
25747     unsigned CondCode = cast<ConstantSDNode>(CC)->getZExtValue();
25748 
25749     if ((CondCode == X86::COND_AE || CondCode == X86::COND_B) &&
25750         (isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
25751         (isNullConstant(Op1) || isNullConstant(Op2))) {
25752       SDValue Res =
25753           DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
25754                       DAG.getTargetConstant(X86::COND_B, DL, MVT::i8), Cond);
25755       if (isAllOnesConstant(Op1) != (CondCode == X86::COND_B))
25756         return DAG.getNOT(DL, Res, Res.getValueType());
25757       return Res;
25758     }
25759   }
25760 
25761   // X86 doesn't have an i8 cmov. If both operands are the result of a truncate
25762   // widen the cmov and push the truncate through. This avoids introducing a new
25763   // branch during isel and doesn't add any extensions.
25764   if (Op.getValueType() == MVT::i8 &&
25765       Op1.getOpcode() == ISD::TRUNCATE && Op2.getOpcode() == ISD::TRUNCATE) {
25766     SDValue T1 = Op1.getOperand(0), T2 = Op2.getOperand(0);
25767     if (T1.getValueType() == T2.getValueType() &&
25768         // Exclude CopyFromReg to avoid partial register stalls.
25769         T1.getOpcode() != ISD::CopyFromReg && T2.getOpcode()!=ISD::CopyFromReg){
25770       SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, T1.getValueType(), T2, T1,
25771                                  CC, Cond);
25772       return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
25773     }
25774   }
25775 
25776   // Or finally, promote i8 cmovs if we have CMOV,
25777   //                 or i16 cmovs if it won't prevent folding a load.
25778   // FIXME: we should not limit promotion of i8 case to only when the CMOV is
25779   //        legal, but EmitLoweredSelect() can not deal with these extensions
25780   //        being inserted between two CMOV's. (in i16 case too TBN)
25781   //        https://bugs.llvm.org/show_bug.cgi?id=40974
25782   if ((Op.getValueType() == MVT::i8 && Subtarget.canUseCMOV()) ||
25783       (Op.getValueType() == MVT::i16 && !X86::mayFoldLoad(Op1, Subtarget) &&
25784        !X86::mayFoldLoad(Op2, Subtarget))) {
25785     Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op1);
25786     Op2 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op2);
25787     SDValue Ops[] = { Op2, Op1, CC, Cond };
25788     SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, MVT::i32, Ops);
25789     return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
25790   }
25791 
25792   // X86ISD::CMOV means set the result (which is operand 1) to the RHS if
25793   // condition is true.
25794   SDValue Ops[] = { Op2, Op1, CC, Cond };
25795   return DAG.getNode(X86ISD::CMOV, DL, Op.getValueType(), Ops);
25796 }
25797 
LowerSIGN_EXTEND_Mask(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)25798 static SDValue LowerSIGN_EXTEND_Mask(SDValue Op,
25799                                      const X86Subtarget &Subtarget,
25800                                      SelectionDAG &DAG) {
25801   MVT VT = Op->getSimpleValueType(0);
25802   SDValue In = Op->getOperand(0);
25803   MVT InVT = In.getSimpleValueType();
25804   assert(InVT.getVectorElementType() == MVT::i1 && "Unexpected input type!");
25805   MVT VTElt = VT.getVectorElementType();
25806   SDLoc dl(Op);
25807 
25808   unsigned NumElts = VT.getVectorNumElements();
25809 
25810   // Extend VT if the scalar type is i8/i16 and BWI is not supported.
25811   MVT ExtVT = VT;
25812   if (!Subtarget.hasBWI() && VTElt.getSizeInBits() <= 16) {
25813     // If v16i32 is to be avoided, we'll need to split and concatenate.
25814     if (NumElts == 16 && !Subtarget.canExtendTo512DQ())
25815       return SplitAndExtendv16i1(Op.getOpcode(), VT, In, dl, DAG);
25816 
25817     ExtVT = MVT::getVectorVT(MVT::i32, NumElts);
25818   }
25819 
25820   // Widen to 512-bits if VLX is not supported.
25821   MVT WideVT = ExtVT;
25822   if (!ExtVT.is512BitVector() && !Subtarget.hasVLX()) {
25823     NumElts *= 512 / ExtVT.getSizeInBits();
25824     InVT = MVT::getVectorVT(MVT::i1, NumElts);
25825     In = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, InVT, DAG.getUNDEF(InVT),
25826                      In, DAG.getIntPtrConstant(0, dl));
25827     WideVT = MVT::getVectorVT(ExtVT.getVectorElementType(), NumElts);
25828   }
25829 
25830   SDValue V;
25831   MVT WideEltVT = WideVT.getVectorElementType();
25832   if ((Subtarget.hasDQI() && WideEltVT.getSizeInBits() >= 32) ||
25833       (Subtarget.hasBWI() && WideEltVT.getSizeInBits() <= 16)) {
25834     V = DAG.getNode(Op.getOpcode(), dl, WideVT, In);
25835   } else {
25836     SDValue NegOne = DAG.getConstant(-1, dl, WideVT);
25837     SDValue Zero = DAG.getConstant(0, dl, WideVT);
25838     V = DAG.getSelect(dl, WideVT, In, NegOne, Zero);
25839   }
25840 
25841   // Truncate if we had to extend i16/i8 above.
25842   if (VT != ExtVT) {
25843     WideVT = MVT::getVectorVT(VTElt, NumElts);
25844     V = DAG.getNode(ISD::TRUNCATE, dl, WideVT, V);
25845   }
25846 
25847   // Extract back to 128/256-bit if we widened.
25848   if (WideVT != VT)
25849     V = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, V,
25850                     DAG.getIntPtrConstant(0, dl));
25851 
25852   return V;
25853 }
25854 
LowerANY_EXTEND(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)25855 static SDValue LowerANY_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
25856                                SelectionDAG &DAG) {
25857   SDValue In = Op->getOperand(0);
25858   MVT InVT = In.getSimpleValueType();
25859 
25860   if (InVT.getVectorElementType() == MVT::i1)
25861     return LowerSIGN_EXTEND_Mask(Op, Subtarget, DAG);
25862 
25863   assert(Subtarget.hasAVX() && "Expected AVX support");
25864   return LowerAVXExtend(Op, DAG, Subtarget);
25865 }
25866 
25867 // Lowering for SIGN_EXTEND_VECTOR_INREG and ZERO_EXTEND_VECTOR_INREG.
25868 // For sign extend this needs to handle all vector sizes and SSE4.1 and
25869 // non-SSE4.1 targets. For zero extend this should only handle inputs of
25870 // MVT::v64i8 when BWI is not supported, but AVX512 is.
LowerEXTEND_VECTOR_INREG(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)25871 static SDValue LowerEXTEND_VECTOR_INREG(SDValue Op,
25872                                         const X86Subtarget &Subtarget,
25873                                         SelectionDAG &DAG) {
25874   SDValue In = Op->getOperand(0);
25875   MVT VT = Op->getSimpleValueType(0);
25876   MVT InVT = In.getSimpleValueType();
25877 
25878   MVT SVT = VT.getVectorElementType();
25879   MVT InSVT = InVT.getVectorElementType();
25880   assert(SVT.getFixedSizeInBits() > InSVT.getFixedSizeInBits());
25881 
25882   if (SVT != MVT::i64 && SVT != MVT::i32 && SVT != MVT::i16)
25883     return SDValue();
25884   if (InSVT != MVT::i32 && InSVT != MVT::i16 && InSVT != MVT::i8)
25885     return SDValue();
25886   if (!(VT.is128BitVector() && Subtarget.hasSSE2()) &&
25887       !(VT.is256BitVector() && Subtarget.hasAVX()) &&
25888       !(VT.is512BitVector() && Subtarget.hasAVX512()))
25889     return SDValue();
25890 
25891   SDLoc dl(Op);
25892   unsigned Opc = Op.getOpcode();
25893   unsigned NumElts = VT.getVectorNumElements();
25894 
25895   // For 256-bit vectors, we only need the lower (128-bit) half of the input.
25896   // For 512-bit vectors, we need 128-bits or 256-bits.
25897   if (InVT.getSizeInBits() > 128) {
25898     // Input needs to be at least the same number of elements as output, and
25899     // at least 128-bits.
25900     int InSize = InSVT.getSizeInBits() * NumElts;
25901     In = extractSubVector(In, 0, DAG, dl, std::max(InSize, 128));
25902     InVT = In.getSimpleValueType();
25903   }
25904 
25905   // SSE41 targets can use the pmov[sz]x* instructions directly for 128-bit results,
25906   // so are legal and shouldn't occur here. AVX2/AVX512 pmovsx* instructions still
25907   // need to be handled here for 256/512-bit results.
25908   if (Subtarget.hasInt256()) {
25909     assert(VT.getSizeInBits() > 128 && "Unexpected 128-bit vector extension");
25910 
25911     if (InVT.getVectorNumElements() != NumElts)
25912       return DAG.getNode(Op.getOpcode(), dl, VT, In);
25913 
25914     // FIXME: Apparently we create inreg operations that could be regular
25915     // extends.
25916     unsigned ExtOpc =
25917         Opc == ISD::SIGN_EXTEND_VECTOR_INREG ? ISD::SIGN_EXTEND
25918                                              : ISD::ZERO_EXTEND;
25919     return DAG.getNode(ExtOpc, dl, VT, In);
25920   }
25921 
25922   // pre-AVX2 256-bit extensions need to be split into 128-bit instructions.
25923   if (Subtarget.hasAVX()) {
25924     assert(VT.is256BitVector() && "256-bit vector expected");
25925     MVT HalfVT = VT.getHalfNumVectorElementsVT();
25926     int HalfNumElts = HalfVT.getVectorNumElements();
25927 
25928     unsigned NumSrcElts = InVT.getVectorNumElements();
25929     SmallVector<int, 16> HiMask(NumSrcElts, SM_SentinelUndef);
25930     for (int i = 0; i != HalfNumElts; ++i)
25931       HiMask[i] = HalfNumElts + i;
25932 
25933     SDValue Lo = DAG.getNode(Opc, dl, HalfVT, In);
25934     SDValue Hi = DAG.getVectorShuffle(InVT, dl, In, DAG.getUNDEF(InVT), HiMask);
25935     Hi = DAG.getNode(Opc, dl, HalfVT, Hi);
25936     return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
25937   }
25938 
25939   // We should only get here for sign extend.
25940   assert(Opc == ISD::SIGN_EXTEND_VECTOR_INREG && "Unexpected opcode!");
25941   assert(VT.is128BitVector() && InVT.is128BitVector() && "Unexpected VTs");
25942 
25943   // pre-SSE41 targets unpack lower lanes and then sign-extend using SRAI.
25944   SDValue Curr = In;
25945   SDValue SignExt = Curr;
25946 
25947   // As SRAI is only available on i16/i32 types, we expand only up to i32
25948   // and handle i64 separately.
25949   if (InVT != MVT::v4i32) {
25950     MVT DestVT = VT == MVT::v2i64 ? MVT::v4i32 : VT;
25951 
25952     unsigned DestWidth = DestVT.getScalarSizeInBits();
25953     unsigned Scale = DestWidth / InSVT.getSizeInBits();
25954 
25955     unsigned InNumElts = InVT.getVectorNumElements();
25956     unsigned DestElts = DestVT.getVectorNumElements();
25957 
25958     // Build a shuffle mask that takes each input element and places it in the
25959     // MSBs of the new element size.
25960     SmallVector<int, 16> Mask(InNumElts, SM_SentinelUndef);
25961     for (unsigned i = 0; i != DestElts; ++i)
25962       Mask[i * Scale + (Scale - 1)] = i;
25963 
25964     Curr = DAG.getVectorShuffle(InVT, dl, In, In, Mask);
25965     Curr = DAG.getBitcast(DestVT, Curr);
25966 
25967     unsigned SignExtShift = DestWidth - InSVT.getSizeInBits();
25968     SignExt = DAG.getNode(X86ISD::VSRAI, dl, DestVT, Curr,
25969                           DAG.getTargetConstant(SignExtShift, dl, MVT::i8));
25970   }
25971 
25972   if (VT == MVT::v2i64) {
25973     assert(Curr.getValueType() == MVT::v4i32 && "Unexpected input VT");
25974     SDValue Zero = DAG.getConstant(0, dl, MVT::v4i32);
25975     SDValue Sign = DAG.getSetCC(dl, MVT::v4i32, Zero, Curr, ISD::SETGT);
25976     SignExt = DAG.getVectorShuffle(MVT::v4i32, dl, SignExt, Sign, {0, 4, 1, 5});
25977     SignExt = DAG.getBitcast(VT, SignExt);
25978   }
25979 
25980   return SignExt;
25981 }
25982 
LowerSIGN_EXTEND(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)25983 static SDValue LowerSIGN_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
25984                                 SelectionDAG &DAG) {
25985   MVT VT = Op->getSimpleValueType(0);
25986   SDValue In = Op->getOperand(0);
25987   MVT InVT = In.getSimpleValueType();
25988   SDLoc dl(Op);
25989 
25990   if (InVT.getVectorElementType() == MVT::i1)
25991     return LowerSIGN_EXTEND_Mask(Op, Subtarget, DAG);
25992 
25993   assert(VT.isVector() && InVT.isVector() && "Expected vector type");
25994   assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
25995          "Expected same number of elements");
25996   assert((VT.getVectorElementType() == MVT::i16 ||
25997           VT.getVectorElementType() == MVT::i32 ||
25998           VT.getVectorElementType() == MVT::i64) &&
25999          "Unexpected element type");
26000   assert((InVT.getVectorElementType() == MVT::i8 ||
26001           InVT.getVectorElementType() == MVT::i16 ||
26002           InVT.getVectorElementType() == MVT::i32) &&
26003          "Unexpected element type");
26004 
26005   if (VT == MVT::v32i16 && !Subtarget.hasBWI()) {
26006     assert(InVT == MVT::v32i8 && "Unexpected VT!");
26007     return splitVectorIntUnary(Op, DAG);
26008   }
26009 
26010   if (Subtarget.hasInt256())
26011     return Op;
26012 
26013   // Optimize vectors in AVX mode
26014   // Sign extend  v8i16 to v8i32 and
26015   //              v4i32 to v4i64
26016   //
26017   // Divide input vector into two parts
26018   // for v4i32 the high shuffle mask will be {2, 3, -1, -1}
26019   // use vpmovsx instruction to extend v4i32 -> v2i64; v8i16 -> v4i32
26020   // concat the vectors to original VT
26021   MVT HalfVT = VT.getHalfNumVectorElementsVT();
26022   SDValue OpLo = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, HalfVT, In);
26023 
26024   unsigned NumElems = InVT.getVectorNumElements();
26025   SmallVector<int,8> ShufMask(NumElems, -1);
26026   for (unsigned i = 0; i != NumElems/2; ++i)
26027     ShufMask[i] = i + NumElems/2;
26028 
26029   SDValue OpHi = DAG.getVectorShuffle(InVT, dl, In, In, ShufMask);
26030   OpHi = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, HalfVT, OpHi);
26031 
26032   return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
26033 }
26034 
26035 /// Change a vector store into a pair of half-size vector stores.
splitVectorStore(StoreSDNode * Store,SelectionDAG & DAG)26036 static SDValue splitVectorStore(StoreSDNode *Store, SelectionDAG &DAG) {
26037   SDValue StoredVal = Store->getValue();
26038   assert((StoredVal.getValueType().is256BitVector() ||
26039           StoredVal.getValueType().is512BitVector()) &&
26040          "Expecting 256/512-bit op");
26041 
26042   // Splitting volatile memory ops is not allowed unless the operation was not
26043   // legal to begin with. Assume the input store is legal (this transform is
26044   // only used for targets with AVX). Note: It is possible that we have an
26045   // illegal type like v2i128, and so we could allow splitting a volatile store
26046   // in that case if that is important.
26047   if (!Store->isSimple())
26048     return SDValue();
26049 
26050   SDLoc DL(Store);
26051   SDValue Value0, Value1;
26052   std::tie(Value0, Value1) = splitVector(StoredVal, DAG, DL);
26053   unsigned HalfOffset = Value0.getValueType().getStoreSize();
26054   SDValue Ptr0 = Store->getBasePtr();
26055   SDValue Ptr1 =
26056       DAG.getMemBasePlusOffset(Ptr0, TypeSize::Fixed(HalfOffset), DL);
26057   SDValue Ch0 =
26058       DAG.getStore(Store->getChain(), DL, Value0, Ptr0, Store->getPointerInfo(),
26059                    Store->getOriginalAlign(),
26060                    Store->getMemOperand()->getFlags());
26061   SDValue Ch1 = DAG.getStore(Store->getChain(), DL, Value1, Ptr1,
26062                              Store->getPointerInfo().getWithOffset(HalfOffset),
26063                              Store->getOriginalAlign(),
26064                              Store->getMemOperand()->getFlags());
26065   return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Ch0, Ch1);
26066 }
26067 
26068 /// Scalarize a vector store, bitcasting to TargetVT to determine the scalar
26069 /// type.
scalarizeVectorStore(StoreSDNode * Store,MVT StoreVT,SelectionDAG & DAG)26070 static SDValue scalarizeVectorStore(StoreSDNode *Store, MVT StoreVT,
26071                                     SelectionDAG &DAG) {
26072   SDValue StoredVal = Store->getValue();
26073   assert(StoreVT.is128BitVector() &&
26074          StoredVal.getValueType().is128BitVector() && "Expecting 128-bit op");
26075   StoredVal = DAG.getBitcast(StoreVT, StoredVal);
26076 
26077   // Splitting volatile memory ops is not allowed unless the operation was not
26078   // legal to begin with. We are assuming the input op is legal (this transform
26079   // is only used for targets with AVX).
26080   if (!Store->isSimple())
26081     return SDValue();
26082 
26083   MVT StoreSVT = StoreVT.getScalarType();
26084   unsigned NumElems = StoreVT.getVectorNumElements();
26085   unsigned ScalarSize = StoreSVT.getStoreSize();
26086 
26087   SDLoc DL(Store);
26088   SmallVector<SDValue, 4> Stores;
26089   for (unsigned i = 0; i != NumElems; ++i) {
26090     unsigned Offset = i * ScalarSize;
26091     SDValue Ptr = DAG.getMemBasePlusOffset(Store->getBasePtr(),
26092                                            TypeSize::Fixed(Offset), DL);
26093     SDValue Scl = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, StoreSVT, StoredVal,
26094                               DAG.getIntPtrConstant(i, DL));
26095     SDValue Ch = DAG.getStore(Store->getChain(), DL, Scl, Ptr,
26096                               Store->getPointerInfo().getWithOffset(Offset),
26097                               Store->getOriginalAlign(),
26098                               Store->getMemOperand()->getFlags());
26099     Stores.push_back(Ch);
26100   }
26101   return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Stores);
26102 }
26103 
LowerStore(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)26104 static SDValue LowerStore(SDValue Op, const X86Subtarget &Subtarget,
26105                           SelectionDAG &DAG) {
26106   StoreSDNode *St = cast<StoreSDNode>(Op.getNode());
26107   SDLoc dl(St);
26108   SDValue StoredVal = St->getValue();
26109 
26110   // Without AVX512DQ, we need to use a scalar type for v2i1/v4i1/v8i1 stores.
26111   if (StoredVal.getValueType().isVector() &&
26112       StoredVal.getValueType().getVectorElementType() == MVT::i1) {
26113     unsigned NumElts = StoredVal.getValueType().getVectorNumElements();
26114     assert(NumElts <= 8 && "Unexpected VT");
26115     assert(!St->isTruncatingStore() && "Expected non-truncating store");
26116     assert(Subtarget.hasAVX512() && !Subtarget.hasDQI() &&
26117            "Expected AVX512F without AVX512DQI");
26118 
26119     // We must pad with zeros to ensure we store zeroes to any unused bits.
26120     StoredVal = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v16i1,
26121                             DAG.getUNDEF(MVT::v16i1), StoredVal,
26122                             DAG.getIntPtrConstant(0, dl));
26123     StoredVal = DAG.getBitcast(MVT::i16, StoredVal);
26124     StoredVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, StoredVal);
26125     // Make sure we store zeros in the extra bits.
26126     if (NumElts < 8)
26127       StoredVal = DAG.getZeroExtendInReg(
26128           StoredVal, dl, EVT::getIntegerVT(*DAG.getContext(), NumElts));
26129 
26130     return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
26131                         St->getPointerInfo(), St->getOriginalAlign(),
26132                         St->getMemOperand()->getFlags());
26133   }
26134 
26135   if (St->isTruncatingStore())
26136     return SDValue();
26137 
26138   // If this is a 256-bit store of concatenated ops, we are better off splitting
26139   // that store into two 128-bit stores. This avoids spurious use of 256-bit ops
26140   // and each half can execute independently. Some cores would split the op into
26141   // halves anyway, so the concat (vinsertf128) is purely an extra op.
26142   MVT StoreVT = StoredVal.getSimpleValueType();
26143   if (StoreVT.is256BitVector() ||
26144       ((StoreVT == MVT::v32i16 || StoreVT == MVT::v64i8) &&
26145        !Subtarget.hasBWI())) {
26146     SmallVector<SDValue, 4> CatOps;
26147     if (StoredVal.hasOneUse() &&
26148         collectConcatOps(StoredVal.getNode(), CatOps, DAG))
26149       return splitVectorStore(St, DAG);
26150     return SDValue();
26151   }
26152 
26153   if (StoreVT.is32BitVector())
26154     return SDValue();
26155 
26156   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
26157   assert(StoreVT.is64BitVector() && "Unexpected VT");
26158   assert(TLI.getTypeAction(*DAG.getContext(), StoreVT) ==
26159              TargetLowering::TypeWidenVector &&
26160          "Unexpected type action!");
26161 
26162   EVT WideVT = TLI.getTypeToTransformTo(*DAG.getContext(), StoreVT);
26163   StoredVal = DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT, StoredVal,
26164                           DAG.getUNDEF(StoreVT));
26165 
26166   if (Subtarget.hasSSE2()) {
26167     // Widen the vector, cast to a v2x64 type, extract the single 64-bit element
26168     // and store it.
26169     MVT StVT = Subtarget.is64Bit() && StoreVT.isInteger() ? MVT::i64 : MVT::f64;
26170     MVT CastVT = MVT::getVectorVT(StVT, 2);
26171     StoredVal = DAG.getBitcast(CastVT, StoredVal);
26172     StoredVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, StVT, StoredVal,
26173                             DAG.getIntPtrConstant(0, dl));
26174 
26175     return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
26176                         St->getPointerInfo(), St->getOriginalAlign(),
26177                         St->getMemOperand()->getFlags());
26178   }
26179   assert(Subtarget.hasSSE1() && "Expected SSE");
26180   SDVTList Tys = DAG.getVTList(MVT::Other);
26181   SDValue Ops[] = {St->getChain(), StoredVal, St->getBasePtr()};
26182   return DAG.getMemIntrinsicNode(X86ISD::VEXTRACT_STORE, dl, Tys, Ops, MVT::i64,
26183                                  St->getMemOperand());
26184 }
26185 
26186 // Lower vector extended loads using a shuffle. If SSSE3 is not available we
26187 // may emit an illegal shuffle but the expansion is still better than scalar
26188 // code. We generate sext/sext_invec for SEXTLOADs if it's available, otherwise
26189 // we'll emit a shuffle and a arithmetic shift.
26190 // FIXME: Is the expansion actually better than scalar code? It doesn't seem so.
26191 // TODO: It is possible to support ZExt by zeroing the undef values during
26192 // the shuffle phase or after the shuffle.
LowerLoad(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)26193 static SDValue LowerLoad(SDValue Op, const X86Subtarget &Subtarget,
26194                                  SelectionDAG &DAG) {
26195   MVT RegVT = Op.getSimpleValueType();
26196   assert(RegVT.isVector() && "We only custom lower vector loads.");
26197   assert(RegVT.isInteger() &&
26198          "We only custom lower integer vector loads.");
26199 
26200   LoadSDNode *Ld = cast<LoadSDNode>(Op.getNode());
26201   SDLoc dl(Ld);
26202 
26203   // Without AVX512DQ, we need to use a scalar type for v2i1/v4i1/v8i1 loads.
26204   if (RegVT.getVectorElementType() == MVT::i1) {
26205     assert(EVT(RegVT) == Ld->getMemoryVT() && "Expected non-extending load");
26206     assert(RegVT.getVectorNumElements() <= 8 && "Unexpected VT");
26207     assert(Subtarget.hasAVX512() && !Subtarget.hasDQI() &&
26208            "Expected AVX512F without AVX512DQI");
26209 
26210     SDValue NewLd = DAG.getLoad(MVT::i8, dl, Ld->getChain(), Ld->getBasePtr(),
26211                                 Ld->getPointerInfo(), Ld->getOriginalAlign(),
26212                                 Ld->getMemOperand()->getFlags());
26213 
26214     // Replace chain users with the new chain.
26215     assert(NewLd->getNumValues() == 2 && "Loads must carry a chain!");
26216 
26217     SDValue Val = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, NewLd);
26218     Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, RegVT,
26219                       DAG.getBitcast(MVT::v16i1, Val),
26220                       DAG.getIntPtrConstant(0, dl));
26221     return DAG.getMergeValues({Val, NewLd.getValue(1)}, dl);
26222   }
26223 
26224   return SDValue();
26225 }
26226 
26227 /// Return true if node is an ISD::AND or ISD::OR of two X86ISD::SETCC nodes
26228 /// each of which has no other use apart from the AND / OR.
isAndOrOfSetCCs(SDValue Op,unsigned & Opc)26229 static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) {
26230   Opc = Op.getOpcode();
26231   if (Opc != ISD::OR && Opc != ISD::AND)
26232     return false;
26233   return (Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
26234           Op.getOperand(0).hasOneUse() &&
26235           Op.getOperand(1).getOpcode() == X86ISD::SETCC &&
26236           Op.getOperand(1).hasOneUse());
26237 }
26238 
LowerBRCOND(SDValue Op,SelectionDAG & DAG) const26239 SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
26240   SDValue Chain = Op.getOperand(0);
26241   SDValue Cond  = Op.getOperand(1);
26242   SDValue Dest  = Op.getOperand(2);
26243   SDLoc dl(Op);
26244 
26245   // Bail out when we don't have native compare instructions.
26246   if (Cond.getOpcode() == ISD::SETCC &&
26247       Cond.getOperand(0).getValueType() != MVT::f128 &&
26248       !isSoftFP16(Cond.getOperand(0).getValueType())) {
26249     SDValue LHS = Cond.getOperand(0);
26250     SDValue RHS = Cond.getOperand(1);
26251     ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
26252 
26253     // Special case for
26254     // setcc([su]{add,sub,mul}o == 0)
26255     // setcc([su]{add,sub,mul}o != 1)
26256     if (ISD::isOverflowIntrOpRes(LHS) &&
26257         (CC == ISD::SETEQ || CC == ISD::SETNE) &&
26258         (isNullConstant(RHS) || isOneConstant(RHS))) {
26259       SDValue Value, Overflow;
26260       X86::CondCode X86Cond;
26261       std::tie(Value, Overflow) = getX86XALUOOp(X86Cond, LHS.getValue(0), DAG);
26262 
26263       if ((CC == ISD::SETEQ) == isNullConstant(RHS))
26264         X86Cond = X86::GetOppositeBranchCondition(X86Cond);
26265 
26266       SDValue CCVal = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
26267       return DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
26268                          Overflow);
26269     }
26270 
26271     if (LHS.getSimpleValueType().isInteger()) {
26272       SDValue CCVal;
26273       SDValue EFLAGS = emitFlagsForSetcc(LHS, RHS, CC, SDLoc(Cond), DAG, CCVal);
26274       return DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
26275                          EFLAGS);
26276     }
26277 
26278     if (CC == ISD::SETOEQ) {
26279       // For FCMP_OEQ, we can emit
26280       // two branches instead of an explicit AND instruction with a
26281       // separate test. However, we only do this if this block doesn't
26282       // have a fall-through edge, because this requires an explicit
26283       // jmp when the condition is false.
26284       if (Op.getNode()->hasOneUse()) {
26285         SDNode *User = *Op.getNode()->use_begin();
26286         // Look for an unconditional branch following this conditional branch.
26287         // We need this because we need to reverse the successors in order
26288         // to implement FCMP_OEQ.
26289         if (User->getOpcode() == ISD::BR) {
26290           SDValue FalseBB = User->getOperand(1);
26291           SDNode *NewBR =
26292             DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
26293           assert(NewBR == User);
26294           (void)NewBR;
26295           Dest = FalseBB;
26296 
26297           SDValue Cmp =
26298               DAG.getNode(X86ISD::FCMP, SDLoc(Cond), MVT::i32, LHS, RHS);
26299           SDValue CCVal = DAG.getTargetConstant(X86::COND_NE, dl, MVT::i8);
26300           Chain = DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest,
26301                               CCVal, Cmp);
26302           CCVal = DAG.getTargetConstant(X86::COND_P, dl, MVT::i8);
26303           return DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
26304                              Cmp);
26305         }
26306       }
26307     } else if (CC == ISD::SETUNE) {
26308       // For FCMP_UNE, we can emit
26309       // two branches instead of an explicit OR instruction with a
26310       // separate test.
26311       SDValue Cmp = DAG.getNode(X86ISD::FCMP, SDLoc(Cond), MVT::i32, LHS, RHS);
26312       SDValue CCVal = DAG.getTargetConstant(X86::COND_NE, dl, MVT::i8);
26313       Chain =
26314           DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal, Cmp);
26315       CCVal = DAG.getTargetConstant(X86::COND_P, dl, MVT::i8);
26316       return DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
26317                          Cmp);
26318     } else {
26319       X86::CondCode X86Cond =
26320           TranslateX86CC(CC, dl, /*IsFP*/ true, LHS, RHS, DAG);
26321       SDValue Cmp = DAG.getNode(X86ISD::FCMP, SDLoc(Cond), MVT::i32, LHS, RHS);
26322       SDValue CCVal = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
26323       return DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
26324                          Cmp);
26325     }
26326   }
26327 
26328   if (ISD::isOverflowIntrOpRes(Cond)) {
26329     SDValue Value, Overflow;
26330     X86::CondCode X86Cond;
26331     std::tie(Value, Overflow) = getX86XALUOOp(X86Cond, Cond.getValue(0), DAG);
26332 
26333     SDValue CCVal = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
26334     return DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
26335                        Overflow);
26336   }
26337 
26338   // Look past the truncate if the high bits are known zero.
26339   if (isTruncWithZeroHighBitsInput(Cond, DAG))
26340     Cond = Cond.getOperand(0);
26341 
26342   EVT CondVT = Cond.getValueType();
26343 
26344   // Add an AND with 1 if we don't already have one.
26345   if (!(Cond.getOpcode() == ISD::AND && isOneConstant(Cond.getOperand(1))))
26346     Cond =
26347         DAG.getNode(ISD::AND, dl, CondVT, Cond, DAG.getConstant(1, dl, CondVT));
26348 
26349   SDValue LHS = Cond;
26350   SDValue RHS = DAG.getConstant(0, dl, CondVT);
26351 
26352   SDValue CCVal;
26353   SDValue EFLAGS = emitFlagsForSetcc(LHS, RHS, ISD::SETNE, dl, DAG, CCVal);
26354   return DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
26355                      EFLAGS);
26356 }
26357 
26358 // Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets.
26359 // Calls to _alloca are needed to probe the stack when allocating more than 4k
26360 // bytes in one go. Touching the stack at 4K increments is necessary to ensure
26361 // that the guard pages used by the OS virtual memory manager are allocated in
26362 // correct sequence.
26363 SDValue
LowerDYNAMIC_STACKALLOC(SDValue Op,SelectionDAG & DAG) const26364 X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
26365                                            SelectionDAG &DAG) const {
26366   MachineFunction &MF = DAG.getMachineFunction();
26367   bool SplitStack = MF.shouldSplitStack();
26368   bool EmitStackProbeCall = hasStackProbeSymbol(MF);
26369   bool Lower = (Subtarget.isOSWindows() && !Subtarget.isTargetMachO()) ||
26370                SplitStack || EmitStackProbeCall;
26371   SDLoc dl(Op);
26372 
26373   // Get the inputs.
26374   SDNode *Node = Op.getNode();
26375   SDValue Chain = Op.getOperand(0);
26376   SDValue Size  = Op.getOperand(1);
26377   MaybeAlign Alignment(Op.getConstantOperandVal(2));
26378   EVT VT = Node->getValueType(0);
26379 
26380   // Chain the dynamic stack allocation so that it doesn't modify the stack
26381   // pointer when other instructions are using the stack.
26382   Chain = DAG.getCALLSEQ_START(Chain, 0, 0, dl);
26383 
26384   bool Is64Bit = Subtarget.is64Bit();
26385   MVT SPTy = getPointerTy(DAG.getDataLayout());
26386 
26387   SDValue Result;
26388   if (!Lower) {
26389     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
26390     Register SPReg = TLI.getStackPointerRegisterToSaveRestore();
26391     assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and"
26392                     " not tell us which reg is the stack pointer!");
26393 
26394     const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
26395     const Align StackAlign = TFI.getStackAlign();
26396     if (hasInlineStackProbe(MF)) {
26397       MachineRegisterInfo &MRI = MF.getRegInfo();
26398 
26399       const TargetRegisterClass *AddrRegClass = getRegClassFor(SPTy);
26400       Register Vreg = MRI.createVirtualRegister(AddrRegClass);
26401       Chain = DAG.getCopyToReg(Chain, dl, Vreg, Size);
26402       Result = DAG.getNode(X86ISD::PROBED_ALLOCA, dl, SPTy, Chain,
26403                            DAG.getRegister(Vreg, SPTy));
26404     } else {
26405       SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
26406       Chain = SP.getValue(1);
26407       Result = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
26408     }
26409     if (Alignment && *Alignment > StackAlign)
26410       Result =
26411           DAG.getNode(ISD::AND, dl, VT, Result,
26412                       DAG.getConstant(~(Alignment->value() - 1ULL), dl, VT));
26413     Chain = DAG.getCopyToReg(Chain, dl, SPReg, Result); // Output chain
26414   } else if (SplitStack) {
26415     MachineRegisterInfo &MRI = MF.getRegInfo();
26416 
26417     if (Is64Bit) {
26418       // The 64 bit implementation of segmented stacks needs to clobber both r10
26419       // r11. This makes it impossible to use it along with nested parameters.
26420       const Function &F = MF.getFunction();
26421       for (const auto &A : F.args()) {
26422         if (A.hasNestAttr())
26423           report_fatal_error("Cannot use segmented stacks with functions that "
26424                              "have nested arguments.");
26425       }
26426     }
26427 
26428     const TargetRegisterClass *AddrRegClass = getRegClassFor(SPTy);
26429     Register Vreg = MRI.createVirtualRegister(AddrRegClass);
26430     Chain = DAG.getCopyToReg(Chain, dl, Vreg, Size);
26431     Result = DAG.getNode(X86ISD::SEG_ALLOCA, dl, SPTy, Chain,
26432                                 DAG.getRegister(Vreg, SPTy));
26433   } else {
26434     SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
26435     Chain = DAG.getNode(X86ISD::DYN_ALLOCA, dl, NodeTys, Chain, Size);
26436     MF.getInfo<X86MachineFunctionInfo>()->setHasDynAlloca(true);
26437 
26438     const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
26439     Register SPReg = RegInfo->getStackRegister();
26440     SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, SPTy);
26441     Chain = SP.getValue(1);
26442 
26443     if (Alignment) {
26444       SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0),
26445                        DAG.getConstant(~(Alignment->value() - 1ULL), dl, VT));
26446       Chain = DAG.getCopyToReg(Chain, dl, SPReg, SP);
26447     }
26448 
26449     Result = SP;
26450   }
26451 
26452   Chain = DAG.getCALLSEQ_END(Chain, 0, 0, SDValue(), dl);
26453 
26454   SDValue Ops[2] = {Result, Chain};
26455   return DAG.getMergeValues(Ops, dl);
26456 }
26457 
LowerVASTART(SDValue Op,SelectionDAG & DAG) const26458 SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
26459   MachineFunction &MF = DAG.getMachineFunction();
26460   auto PtrVT = getPointerTy(MF.getDataLayout());
26461   X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
26462 
26463   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
26464   SDLoc DL(Op);
26465 
26466   if (!Subtarget.is64Bit() ||
26467       Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv())) {
26468     // vastart just stores the address of the VarArgsFrameIndex slot into the
26469     // memory location argument.
26470     SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
26471     return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1),
26472                         MachinePointerInfo(SV));
26473   }
26474 
26475   // __va_list_tag:
26476   //   gp_offset         (0 - 6 * 8)
26477   //   fp_offset         (48 - 48 + 8 * 16)
26478   //   overflow_arg_area (point to parameters coming in memory).
26479   //   reg_save_area
26480   SmallVector<SDValue, 8> MemOps;
26481   SDValue FIN = Op.getOperand(1);
26482   // Store gp_offset
26483   SDValue Store = DAG.getStore(
26484       Op.getOperand(0), DL,
26485       DAG.getConstant(FuncInfo->getVarArgsGPOffset(), DL, MVT::i32), FIN,
26486       MachinePointerInfo(SV));
26487   MemOps.push_back(Store);
26488 
26489   // Store fp_offset
26490   FIN = DAG.getMemBasePlusOffset(FIN, TypeSize::Fixed(4), DL);
26491   Store = DAG.getStore(
26492       Op.getOperand(0), DL,
26493       DAG.getConstant(FuncInfo->getVarArgsFPOffset(), DL, MVT::i32), FIN,
26494       MachinePointerInfo(SV, 4));
26495   MemOps.push_back(Store);
26496 
26497   // Store ptr to overflow_arg_area
26498   FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(4, DL));
26499   SDValue OVFIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
26500   Store =
26501       DAG.getStore(Op.getOperand(0), DL, OVFIN, FIN, MachinePointerInfo(SV, 8));
26502   MemOps.push_back(Store);
26503 
26504   // Store ptr to reg_save_area.
26505   FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(
26506       Subtarget.isTarget64BitLP64() ? 8 : 4, DL));
26507   SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), PtrVT);
26508   Store = DAG.getStore(
26509       Op.getOperand(0), DL, RSFIN, FIN,
26510       MachinePointerInfo(SV, Subtarget.isTarget64BitLP64() ? 16 : 12));
26511   MemOps.push_back(Store);
26512   return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
26513 }
26514 
LowerVAARG(SDValue Op,SelectionDAG & DAG) const26515 SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
26516   assert(Subtarget.is64Bit() &&
26517          "LowerVAARG only handles 64-bit va_arg!");
26518   assert(Op.getNumOperands() == 4);
26519 
26520   MachineFunction &MF = DAG.getMachineFunction();
26521   if (Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv()))
26522     // The Win64 ABI uses char* instead of a structure.
26523     return DAG.expandVAArg(Op.getNode());
26524 
26525   SDValue Chain = Op.getOperand(0);
26526   SDValue SrcPtr = Op.getOperand(1);
26527   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
26528   unsigned Align = Op.getConstantOperandVal(3);
26529   SDLoc dl(Op);
26530 
26531   EVT ArgVT = Op.getNode()->getValueType(0);
26532   Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
26533   uint32_t ArgSize = DAG.getDataLayout().getTypeAllocSize(ArgTy);
26534   uint8_t ArgMode;
26535 
26536   // Decide which area this value should be read from.
26537   // TODO: Implement the AMD64 ABI in its entirety. This simple
26538   // selection mechanism works only for the basic types.
26539   assert(ArgVT != MVT::f80 && "va_arg for f80 not yet implemented");
26540   if (ArgVT.isFloatingPoint() && ArgSize <= 16 /*bytes*/) {
26541     ArgMode = 2;  // Argument passed in XMM register. Use fp_offset.
26542   } else {
26543     assert(ArgVT.isInteger() && ArgSize <= 32 /*bytes*/ &&
26544            "Unhandled argument type in LowerVAARG");
26545     ArgMode = 1;  // Argument passed in GPR64 register(s). Use gp_offset.
26546   }
26547 
26548   if (ArgMode == 2) {
26549     // Make sure using fp_offset makes sense.
26550     assert(!Subtarget.useSoftFloat() &&
26551            !(MF.getFunction().hasFnAttribute(Attribute::NoImplicitFloat)) &&
26552            Subtarget.hasSSE1());
26553   }
26554 
26555   // Insert VAARG node into the DAG
26556   // VAARG returns two values: Variable Argument Address, Chain
26557   SDValue InstOps[] = {Chain, SrcPtr,
26558                        DAG.getTargetConstant(ArgSize, dl, MVT::i32),
26559                        DAG.getTargetConstant(ArgMode, dl, MVT::i8),
26560                        DAG.getTargetConstant(Align, dl, MVT::i32)};
26561   SDVTList VTs = DAG.getVTList(getPointerTy(DAG.getDataLayout()), MVT::Other);
26562   SDValue VAARG = DAG.getMemIntrinsicNode(
26563       Subtarget.isTarget64BitLP64() ? X86ISD::VAARG_64 : X86ISD::VAARG_X32, dl,
26564       VTs, InstOps, MVT::i64, MachinePointerInfo(SV),
26565       /*Alignment=*/std::nullopt,
26566       MachineMemOperand::MOLoad | MachineMemOperand::MOStore);
26567   Chain = VAARG.getValue(1);
26568 
26569   // Load the next argument and return it
26570   return DAG.getLoad(ArgVT, dl, Chain, VAARG, MachinePointerInfo());
26571 }
26572 
LowerVACOPY(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)26573 static SDValue LowerVACOPY(SDValue Op, const X86Subtarget &Subtarget,
26574                            SelectionDAG &DAG) {
26575   // X86-64 va_list is a struct { i32, i32, i8*, i8* }, except on Windows,
26576   // where a va_list is still an i8*.
26577   assert(Subtarget.is64Bit() && "This code only handles 64-bit va_copy!");
26578   if (Subtarget.isCallingConvWin64(
26579         DAG.getMachineFunction().getFunction().getCallingConv()))
26580     // Probably a Win64 va_copy.
26581     return DAG.expandVACopy(Op.getNode());
26582 
26583   SDValue Chain = Op.getOperand(0);
26584   SDValue DstPtr = Op.getOperand(1);
26585   SDValue SrcPtr = Op.getOperand(2);
26586   const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
26587   const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
26588   SDLoc DL(Op);
26589 
26590   return DAG.getMemcpy(
26591       Chain, DL, DstPtr, SrcPtr,
26592       DAG.getIntPtrConstant(Subtarget.isTarget64BitLP64() ? 24 : 16, DL),
26593       Align(Subtarget.isTarget64BitLP64() ? 8 : 4), /*isVolatile*/ false, false,
26594       false, MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
26595 }
26596 
26597 // Helper to get immediate/variable SSE shift opcode from other shift opcodes.
getTargetVShiftUniformOpcode(unsigned Opc,bool IsVariable)26598 static unsigned getTargetVShiftUniformOpcode(unsigned Opc, bool IsVariable) {
26599   switch (Opc) {
26600   case ISD::SHL:
26601   case X86ISD::VSHL:
26602   case X86ISD::VSHLI:
26603     return IsVariable ? X86ISD::VSHL : X86ISD::VSHLI;
26604   case ISD::SRL:
26605   case X86ISD::VSRL:
26606   case X86ISD::VSRLI:
26607     return IsVariable ? X86ISD::VSRL : X86ISD::VSRLI;
26608   case ISD::SRA:
26609   case X86ISD::VSRA:
26610   case X86ISD::VSRAI:
26611     return IsVariable ? X86ISD::VSRA : X86ISD::VSRAI;
26612   }
26613   llvm_unreachable("Unknown target vector shift node");
26614 }
26615 
26616 /// Handle vector element shifts where the shift amount is a constant.
26617 /// Takes immediate version of shift as input.
getTargetVShiftByConstNode(unsigned Opc,const SDLoc & dl,MVT VT,SDValue SrcOp,uint64_t ShiftAmt,SelectionDAG & DAG)26618 static SDValue getTargetVShiftByConstNode(unsigned Opc, const SDLoc &dl, MVT VT,
26619                                           SDValue SrcOp, uint64_t ShiftAmt,
26620                                           SelectionDAG &DAG) {
26621   MVT ElementType = VT.getVectorElementType();
26622 
26623   // Bitcast the source vector to the output type, this is mainly necessary for
26624   // vXi8/vXi64 shifts.
26625   if (VT != SrcOp.getSimpleValueType())
26626     SrcOp = DAG.getBitcast(VT, SrcOp);
26627 
26628   // Fold this packed shift into its first operand if ShiftAmt is 0.
26629   if (ShiftAmt == 0)
26630     return SrcOp;
26631 
26632   // Check for ShiftAmt >= element width
26633   if (ShiftAmt >= ElementType.getSizeInBits()) {
26634     if (Opc == X86ISD::VSRAI)
26635       ShiftAmt = ElementType.getSizeInBits() - 1;
26636     else
26637       return DAG.getConstant(0, dl, VT);
26638   }
26639 
26640   assert((Opc == X86ISD::VSHLI || Opc == X86ISD::VSRLI || Opc == X86ISD::VSRAI)
26641          && "Unknown target vector shift-by-constant node");
26642 
26643   // Fold this packed vector shift into a build vector if SrcOp is a
26644   // vector of Constants or UNDEFs.
26645   if (ISD::isBuildVectorOfConstantSDNodes(SrcOp.getNode())) {
26646     unsigned ShiftOpc;
26647     switch (Opc) {
26648     default: llvm_unreachable("Unknown opcode!");
26649     case X86ISD::VSHLI:
26650       ShiftOpc = ISD::SHL;
26651       break;
26652     case X86ISD::VSRLI:
26653       ShiftOpc = ISD::SRL;
26654       break;
26655     case X86ISD::VSRAI:
26656       ShiftOpc = ISD::SRA;
26657       break;
26658     }
26659 
26660     SDValue Amt = DAG.getConstant(ShiftAmt, dl, VT);
26661     if (SDValue C = DAG.FoldConstantArithmetic(ShiftOpc, dl, VT, {SrcOp, Amt}))
26662       return C;
26663   }
26664 
26665   return DAG.getNode(Opc, dl, VT, SrcOp,
26666                      DAG.getTargetConstant(ShiftAmt, dl, MVT::i8));
26667 }
26668 
26669 /// Handle vector element shifts by a splat shift amount
getTargetVShiftNode(unsigned Opc,const SDLoc & dl,MVT VT,SDValue SrcOp,SDValue ShAmt,int ShAmtIdx,const X86Subtarget & Subtarget,SelectionDAG & DAG)26670 static SDValue getTargetVShiftNode(unsigned Opc, const SDLoc &dl, MVT VT,
26671                                    SDValue SrcOp, SDValue ShAmt, int ShAmtIdx,
26672                                    const X86Subtarget &Subtarget,
26673                                    SelectionDAG &DAG) {
26674   MVT AmtVT = ShAmt.getSimpleValueType();
26675   assert(AmtVT.isVector() && "Vector shift type mismatch");
26676   assert(0 <= ShAmtIdx && ShAmtIdx < (int)AmtVT.getVectorNumElements() &&
26677          "Illegal vector splat index");
26678 
26679   // Move the splat element to the bottom element.
26680   if (ShAmtIdx != 0) {
26681     SmallVector<int> Mask(AmtVT.getVectorNumElements(), -1);
26682     Mask[0] = ShAmtIdx;
26683     ShAmt = DAG.getVectorShuffle(AmtVT, dl, ShAmt, DAG.getUNDEF(AmtVT), Mask);
26684   }
26685 
26686   // Peek through any zext node if we can get back to a 128-bit source.
26687   if (AmtVT.getScalarSizeInBits() == 64 &&
26688       (ShAmt.getOpcode() == ISD::ZERO_EXTEND ||
26689        ShAmt.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG) &&
26690       ShAmt.getOperand(0).getValueType().isSimple() &&
26691       ShAmt.getOperand(0).getValueType().is128BitVector()) {
26692     ShAmt = ShAmt.getOperand(0);
26693     AmtVT = ShAmt.getSimpleValueType();
26694   }
26695 
26696   // See if we can mask off the upper elements using the existing source node.
26697   // The shift uses the entire lower 64-bits of the amount vector, so no need to
26698   // do this for vXi64 types.
26699   bool IsMasked = false;
26700   if (AmtVT.getScalarSizeInBits() < 64) {
26701     if (ShAmt.getOpcode() == ISD::BUILD_VECTOR ||
26702         ShAmt.getOpcode() == ISD::SCALAR_TO_VECTOR) {
26703       // If the shift amount has come from a scalar, then zero-extend the scalar
26704       // before moving to the vector.
26705       ShAmt = DAG.getZExtOrTrunc(ShAmt.getOperand(0), dl, MVT::i32);
26706       ShAmt = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, ShAmt);
26707       ShAmt = DAG.getNode(X86ISD::VZEXT_MOVL, dl, MVT::v4i32, ShAmt);
26708       AmtVT = MVT::v4i32;
26709       IsMasked = true;
26710     } else if (ShAmt.getOpcode() == ISD::AND) {
26711       // See if the shift amount is already masked (e.g. for rotation modulo),
26712       // then we can zero-extend it by setting all the other mask elements to
26713       // zero.
26714       SmallVector<SDValue> MaskElts(
26715           AmtVT.getVectorNumElements(),
26716           DAG.getConstant(0, dl, AmtVT.getScalarType()));
26717       MaskElts[0] = DAG.getAllOnesConstant(dl, AmtVT.getScalarType());
26718       SDValue Mask = DAG.getBuildVector(AmtVT, dl, MaskElts);
26719       if ((Mask = DAG.FoldConstantArithmetic(ISD::AND, dl, AmtVT,
26720                                              {ShAmt.getOperand(1), Mask}))) {
26721         ShAmt = DAG.getNode(ISD::AND, dl, AmtVT, ShAmt.getOperand(0), Mask);
26722         IsMasked = true;
26723       }
26724     }
26725   }
26726 
26727   // Extract if the shift amount vector is larger than 128-bits.
26728   if (AmtVT.getSizeInBits() > 128) {
26729     ShAmt = extract128BitVector(ShAmt, 0, DAG, dl);
26730     AmtVT = ShAmt.getSimpleValueType();
26731   }
26732 
26733   // Zero-extend bottom element to v2i64 vector type, either by extension or
26734   // shuffle masking.
26735   if (!IsMasked && AmtVT.getScalarSizeInBits() < 64) {
26736     if (AmtVT == MVT::v4i32 && (ShAmt.getOpcode() == X86ISD::VBROADCAST ||
26737                                 ShAmt.getOpcode() == X86ISD::VBROADCAST_LOAD)) {
26738       ShAmt = DAG.getNode(X86ISD::VZEXT_MOVL, SDLoc(ShAmt), MVT::v4i32, ShAmt);
26739     } else if (Subtarget.hasSSE41()) {
26740       ShAmt = DAG.getNode(ISD::ZERO_EXTEND_VECTOR_INREG, SDLoc(ShAmt),
26741                           MVT::v2i64, ShAmt);
26742     } else {
26743       SDValue ByteShift = DAG.getTargetConstant(
26744           (128 - AmtVT.getScalarSizeInBits()) / 8, SDLoc(ShAmt), MVT::i8);
26745       ShAmt = DAG.getBitcast(MVT::v16i8, ShAmt);
26746       ShAmt = DAG.getNode(X86ISD::VSHLDQ, SDLoc(ShAmt), MVT::v16i8, ShAmt,
26747                           ByteShift);
26748       ShAmt = DAG.getNode(X86ISD::VSRLDQ, SDLoc(ShAmt), MVT::v16i8, ShAmt,
26749                           ByteShift);
26750     }
26751   }
26752 
26753   // Change opcode to non-immediate version.
26754   Opc = getTargetVShiftUniformOpcode(Opc, true);
26755 
26756   // The return type has to be a 128-bit type with the same element
26757   // type as the input type.
26758   MVT EltVT = VT.getVectorElementType();
26759   MVT ShVT = MVT::getVectorVT(EltVT, 128 / EltVT.getSizeInBits());
26760 
26761   ShAmt = DAG.getBitcast(ShVT, ShAmt);
26762   return DAG.getNode(Opc, dl, VT, SrcOp, ShAmt);
26763 }
26764 
26765 /// Return Mask with the necessary casting or extending
26766 /// for \p Mask according to \p MaskVT when lowering masking intrinsics
getMaskNode(SDValue Mask,MVT MaskVT,const X86Subtarget & Subtarget,SelectionDAG & DAG,const SDLoc & dl)26767 static SDValue getMaskNode(SDValue Mask, MVT MaskVT,
26768                            const X86Subtarget &Subtarget, SelectionDAG &DAG,
26769                            const SDLoc &dl) {
26770 
26771   if (isAllOnesConstant(Mask))
26772     return DAG.getConstant(1, dl, MaskVT);
26773   if (X86::isZeroNode(Mask))
26774     return DAG.getConstant(0, dl, MaskVT);
26775 
26776   assert(MaskVT.bitsLE(Mask.getSimpleValueType()) && "Unexpected mask size!");
26777 
26778   if (Mask.getSimpleValueType() == MVT::i64 && Subtarget.is32Bit()) {
26779     assert(MaskVT == MVT::v64i1 && "Expected v64i1 mask!");
26780     assert(Subtarget.hasBWI() && "Expected AVX512BW target!");
26781     // In case 32bit mode, bitcast i64 is illegal, extend/split it.
26782     SDValue Lo, Hi;
26783     Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Mask,
26784                         DAG.getConstant(0, dl, MVT::i32));
26785     Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Mask,
26786                         DAG.getConstant(1, dl, MVT::i32));
26787 
26788     Lo = DAG.getBitcast(MVT::v32i1, Lo);
26789     Hi = DAG.getBitcast(MVT::v32i1, Hi);
26790 
26791     return DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, Lo, Hi);
26792   } else {
26793     MVT BitcastVT = MVT::getVectorVT(MVT::i1,
26794                                      Mask.getSimpleValueType().getSizeInBits());
26795     // In case when MaskVT equals v2i1 or v4i1, low 2 or 4 elements
26796     // are extracted by EXTRACT_SUBVECTOR.
26797     return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
26798                        DAG.getBitcast(BitcastVT, Mask),
26799                        DAG.getIntPtrConstant(0, dl));
26800   }
26801 }
26802 
26803 /// Return (and \p Op, \p Mask) for compare instructions or
26804 /// (vselect \p Mask, \p Op, \p PreservedSrc) for others along with the
26805 /// necessary casting or extending for \p Mask when lowering masking intrinsics
getVectorMaskingNode(SDValue Op,SDValue Mask,SDValue PreservedSrc,const X86Subtarget & Subtarget,SelectionDAG & DAG)26806 static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
26807                                     SDValue PreservedSrc,
26808                                     const X86Subtarget &Subtarget,
26809                                     SelectionDAG &DAG) {
26810   MVT VT = Op.getSimpleValueType();
26811   MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
26812   unsigned OpcodeSelect = ISD::VSELECT;
26813   SDLoc dl(Op);
26814 
26815   if (isAllOnesConstant(Mask))
26816     return Op;
26817 
26818   SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
26819 
26820   if (PreservedSrc.isUndef())
26821     PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
26822   return DAG.getNode(OpcodeSelect, dl, VT, VMask, Op, PreservedSrc);
26823 }
26824 
26825 /// Creates an SDNode for a predicated scalar operation.
26826 /// \returns (X86vselect \p Mask, \p Op, \p PreservedSrc).
26827 /// The mask is coming as MVT::i8 and it should be transformed
26828 /// to MVT::v1i1 while lowering masking intrinsics.
26829 /// The main difference between ScalarMaskingNode and VectorMaskingNode is using
26830 /// "X86select" instead of "vselect". We just can't create the "vselect" node
26831 /// for a scalar instruction.
getScalarMaskingNode(SDValue Op,SDValue Mask,SDValue PreservedSrc,const X86Subtarget & Subtarget,SelectionDAG & DAG)26832 static SDValue getScalarMaskingNode(SDValue Op, SDValue Mask,
26833                                     SDValue PreservedSrc,
26834                                     const X86Subtarget &Subtarget,
26835                                     SelectionDAG &DAG) {
26836 
26837   if (auto *MaskConst = dyn_cast<ConstantSDNode>(Mask))
26838     if (MaskConst->getZExtValue() & 0x1)
26839       return Op;
26840 
26841   MVT VT = Op.getSimpleValueType();
26842   SDLoc dl(Op);
26843 
26844   assert(Mask.getValueType() == MVT::i8 && "Unexpect type");
26845   SDValue IMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v1i1,
26846                               DAG.getBitcast(MVT::v8i1, Mask),
26847                               DAG.getIntPtrConstant(0, dl));
26848   if (Op.getOpcode() == X86ISD::FSETCCM ||
26849       Op.getOpcode() == X86ISD::FSETCCM_SAE ||
26850       Op.getOpcode() == X86ISD::VFPCLASSS)
26851     return DAG.getNode(ISD::AND, dl, VT, Op, IMask);
26852 
26853   if (PreservedSrc.isUndef())
26854     PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
26855   return DAG.getNode(X86ISD::SELECTS, dl, VT, IMask, Op, PreservedSrc);
26856 }
26857 
getSEHRegistrationNodeSize(const Function * Fn)26858 static int getSEHRegistrationNodeSize(const Function *Fn) {
26859   if (!Fn->hasPersonalityFn())
26860     report_fatal_error(
26861         "querying registration node size for function without personality");
26862   // The RegNodeSize is 6 32-bit words for SEH and 4 for C++ EH. See
26863   // WinEHStatePass for the full struct definition.
26864   switch (classifyEHPersonality(Fn->getPersonalityFn())) {
26865   case EHPersonality::MSVC_X86SEH: return 24;
26866   case EHPersonality::MSVC_CXX: return 16;
26867   default: break;
26868   }
26869   report_fatal_error(
26870       "can only recover FP for 32-bit MSVC EH personality functions");
26871 }
26872 
26873 /// When the MSVC runtime transfers control to us, either to an outlined
26874 /// function or when returning to a parent frame after catching an exception, we
26875 /// recover the parent frame pointer by doing arithmetic on the incoming EBP.
26876 /// Here's the math:
26877 ///   RegNodeBase = EntryEBP - RegNodeSize
26878 ///   ParentFP = RegNodeBase - ParentFrameOffset
26879 /// Subtracting RegNodeSize takes us to the offset of the registration node, and
26880 /// subtracting the offset (negative on x86) takes us back to the parent FP.
recoverFramePointer(SelectionDAG & DAG,const Function * Fn,SDValue EntryEBP)26881 static SDValue recoverFramePointer(SelectionDAG &DAG, const Function *Fn,
26882                                    SDValue EntryEBP) {
26883   MachineFunction &MF = DAG.getMachineFunction();
26884   SDLoc dl;
26885 
26886   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
26887   MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
26888 
26889   // It's possible that the parent function no longer has a personality function
26890   // if the exceptional code was optimized away, in which case we just return
26891   // the incoming EBP.
26892   if (!Fn->hasPersonalityFn())
26893     return EntryEBP;
26894 
26895   // Get an MCSymbol that will ultimately resolve to the frame offset of the EH
26896   // registration, or the .set_setframe offset.
26897   MCSymbol *OffsetSym =
26898       MF.getMMI().getContext().getOrCreateParentFrameOffsetSymbol(
26899           GlobalValue::dropLLVMManglingEscape(Fn->getName()));
26900   SDValue OffsetSymVal = DAG.getMCSymbol(OffsetSym, PtrVT);
26901   SDValue ParentFrameOffset =
26902       DAG.getNode(ISD::LOCAL_RECOVER, dl, PtrVT, OffsetSymVal);
26903 
26904   // Return EntryEBP + ParentFrameOffset for x64. This adjusts from RSP after
26905   // prologue to RBP in the parent function.
26906   const X86Subtarget &Subtarget = DAG.getSubtarget<X86Subtarget>();
26907   if (Subtarget.is64Bit())
26908     return DAG.getNode(ISD::ADD, dl, PtrVT, EntryEBP, ParentFrameOffset);
26909 
26910   int RegNodeSize = getSEHRegistrationNodeSize(Fn);
26911   // RegNodeBase = EntryEBP - RegNodeSize
26912   // ParentFP = RegNodeBase - ParentFrameOffset
26913   SDValue RegNodeBase = DAG.getNode(ISD::SUB, dl, PtrVT, EntryEBP,
26914                                     DAG.getConstant(RegNodeSize, dl, PtrVT));
26915   return DAG.getNode(ISD::SUB, dl, PtrVT, RegNodeBase, ParentFrameOffset);
26916 }
26917 
LowerINTRINSIC_WO_CHAIN(SDValue Op,SelectionDAG & DAG) const26918 SDValue X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
26919                                                    SelectionDAG &DAG) const {
26920   // Helper to detect if the operand is CUR_DIRECTION rounding mode.
26921   auto isRoundModeCurDirection = [](SDValue Rnd) {
26922     if (auto *C = dyn_cast<ConstantSDNode>(Rnd))
26923       return C->getAPIntValue() == X86::STATIC_ROUNDING::CUR_DIRECTION;
26924 
26925     return false;
26926   };
26927   auto isRoundModeSAE = [](SDValue Rnd) {
26928     if (auto *C = dyn_cast<ConstantSDNode>(Rnd)) {
26929       unsigned RC = C->getZExtValue();
26930       if (RC & X86::STATIC_ROUNDING::NO_EXC) {
26931         // Clear the NO_EXC bit and check remaining bits.
26932         RC ^= X86::STATIC_ROUNDING::NO_EXC;
26933         // As a convenience we allow no other bits or explicitly
26934         // current direction.
26935         return RC == 0 || RC == X86::STATIC_ROUNDING::CUR_DIRECTION;
26936       }
26937     }
26938 
26939     return false;
26940   };
26941   auto isRoundModeSAEToX = [](SDValue Rnd, unsigned &RC) {
26942     if (auto *C = dyn_cast<ConstantSDNode>(Rnd)) {
26943       RC = C->getZExtValue();
26944       if (RC & X86::STATIC_ROUNDING::NO_EXC) {
26945         // Clear the NO_EXC bit and check remaining bits.
26946         RC ^= X86::STATIC_ROUNDING::NO_EXC;
26947         return RC == X86::STATIC_ROUNDING::TO_NEAREST_INT ||
26948                RC == X86::STATIC_ROUNDING::TO_NEG_INF ||
26949                RC == X86::STATIC_ROUNDING::TO_POS_INF ||
26950                RC == X86::STATIC_ROUNDING::TO_ZERO;
26951       }
26952     }
26953 
26954     return false;
26955   };
26956 
26957   SDLoc dl(Op);
26958   unsigned IntNo = Op.getConstantOperandVal(0);
26959   MVT VT = Op.getSimpleValueType();
26960   const IntrinsicData* IntrData = getIntrinsicWithoutChain(IntNo);
26961 
26962   // Propagate flags from original node to transformed node(s).
26963   SelectionDAG::FlagInserter FlagsInserter(DAG, Op->getFlags());
26964 
26965   if (IntrData) {
26966     switch(IntrData->Type) {
26967     case INTR_TYPE_1OP: {
26968       // We specify 2 possible opcodes for intrinsics with rounding modes.
26969       // First, we check if the intrinsic may have non-default rounding mode,
26970       // (IntrData->Opc1 != 0), then we check the rounding mode operand.
26971       unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
26972       if (IntrWithRoundingModeOpcode != 0) {
26973         SDValue Rnd = Op.getOperand(2);
26974         unsigned RC = 0;
26975         if (isRoundModeSAEToX(Rnd, RC))
26976           return DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
26977                              Op.getOperand(1),
26978                              DAG.getTargetConstant(RC, dl, MVT::i32));
26979         if (!isRoundModeCurDirection(Rnd))
26980           return SDValue();
26981       }
26982       return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
26983                          Op.getOperand(1));
26984     }
26985     case INTR_TYPE_1OP_SAE: {
26986       SDValue Sae = Op.getOperand(2);
26987 
26988       unsigned Opc;
26989       if (isRoundModeCurDirection(Sae))
26990         Opc = IntrData->Opc0;
26991       else if (isRoundModeSAE(Sae))
26992         Opc = IntrData->Opc1;
26993       else
26994         return SDValue();
26995 
26996       return DAG.getNode(Opc, dl, Op.getValueType(), Op.getOperand(1));
26997     }
26998     case INTR_TYPE_2OP: {
26999       SDValue Src2 = Op.getOperand(2);
27000 
27001       // We specify 2 possible opcodes for intrinsics with rounding modes.
27002       // First, we check if the intrinsic may have non-default rounding mode,
27003       // (IntrData->Opc1 != 0), then we check the rounding mode operand.
27004       unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
27005       if (IntrWithRoundingModeOpcode != 0) {
27006         SDValue Rnd = Op.getOperand(3);
27007         unsigned RC = 0;
27008         if (isRoundModeSAEToX(Rnd, RC))
27009           return DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
27010                              Op.getOperand(1), Src2,
27011                              DAG.getTargetConstant(RC, dl, MVT::i32));
27012         if (!isRoundModeCurDirection(Rnd))
27013           return SDValue();
27014       }
27015 
27016       return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
27017                          Op.getOperand(1), Src2);
27018     }
27019     case INTR_TYPE_2OP_SAE: {
27020       SDValue Sae = Op.getOperand(3);
27021 
27022       unsigned Opc;
27023       if (isRoundModeCurDirection(Sae))
27024         Opc = IntrData->Opc0;
27025       else if (isRoundModeSAE(Sae))
27026         Opc = IntrData->Opc1;
27027       else
27028         return SDValue();
27029 
27030       return DAG.getNode(Opc, dl, Op.getValueType(), Op.getOperand(1),
27031                          Op.getOperand(2));
27032     }
27033     case INTR_TYPE_3OP:
27034     case INTR_TYPE_3OP_IMM8: {
27035       SDValue Src1 = Op.getOperand(1);
27036       SDValue Src2 = Op.getOperand(2);
27037       SDValue Src3 = Op.getOperand(3);
27038 
27039       if (IntrData->Type == INTR_TYPE_3OP_IMM8 &&
27040           Src3.getValueType() != MVT::i8) {
27041         Src3 = DAG.getTargetConstant(
27042             cast<ConstantSDNode>(Src3)->getZExtValue() & 0xff, dl, MVT::i8);
27043       }
27044 
27045       // We specify 2 possible opcodes for intrinsics with rounding modes.
27046       // First, we check if the intrinsic may have non-default rounding mode,
27047       // (IntrData->Opc1 != 0), then we check the rounding mode operand.
27048       unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
27049       if (IntrWithRoundingModeOpcode != 0) {
27050         SDValue Rnd = Op.getOperand(4);
27051         unsigned RC = 0;
27052         if (isRoundModeSAEToX(Rnd, RC))
27053           return DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
27054                              Src1, Src2, Src3,
27055                              DAG.getTargetConstant(RC, dl, MVT::i32));
27056         if (!isRoundModeCurDirection(Rnd))
27057           return SDValue();
27058       }
27059 
27060       return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
27061                          {Src1, Src2, Src3});
27062     }
27063     case INTR_TYPE_4OP_IMM8: {
27064       assert(Op.getOperand(4)->getOpcode() == ISD::TargetConstant);
27065       SDValue Src4 = Op.getOperand(4);
27066       if (Src4.getValueType() != MVT::i8) {
27067         Src4 = DAG.getTargetConstant(
27068             cast<ConstantSDNode>(Src4)->getZExtValue() & 0xff, dl, MVT::i8);
27069       }
27070 
27071       return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
27072                          Op.getOperand(1), Op.getOperand(2), Op.getOperand(3),
27073                          Src4);
27074     }
27075     case INTR_TYPE_1OP_MASK: {
27076       SDValue Src = Op.getOperand(1);
27077       SDValue PassThru = Op.getOperand(2);
27078       SDValue Mask = Op.getOperand(3);
27079       // We add rounding mode to the Node when
27080       //   - RC Opcode is specified and
27081       //   - RC is not "current direction".
27082       unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
27083       if (IntrWithRoundingModeOpcode != 0) {
27084         SDValue Rnd = Op.getOperand(4);
27085         unsigned RC = 0;
27086         if (isRoundModeSAEToX(Rnd, RC))
27087           return getVectorMaskingNode(
27088               DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
27089                           Src, DAG.getTargetConstant(RC, dl, MVT::i32)),
27090               Mask, PassThru, Subtarget, DAG);
27091         if (!isRoundModeCurDirection(Rnd))
27092           return SDValue();
27093       }
27094       return getVectorMaskingNode(
27095           DAG.getNode(IntrData->Opc0, dl, VT, Src), Mask, PassThru,
27096           Subtarget, DAG);
27097     }
27098     case INTR_TYPE_1OP_MASK_SAE: {
27099       SDValue Src = Op.getOperand(1);
27100       SDValue PassThru = Op.getOperand(2);
27101       SDValue Mask = Op.getOperand(3);
27102       SDValue Rnd = Op.getOperand(4);
27103 
27104       unsigned Opc;
27105       if (isRoundModeCurDirection(Rnd))
27106         Opc = IntrData->Opc0;
27107       else if (isRoundModeSAE(Rnd))
27108         Opc = IntrData->Opc1;
27109       else
27110         return SDValue();
27111 
27112       return getVectorMaskingNode(DAG.getNode(Opc, dl, VT, Src), Mask, PassThru,
27113                                   Subtarget, DAG);
27114     }
27115     case INTR_TYPE_SCALAR_MASK: {
27116       SDValue Src1 = Op.getOperand(1);
27117       SDValue Src2 = Op.getOperand(2);
27118       SDValue passThru = Op.getOperand(3);
27119       SDValue Mask = Op.getOperand(4);
27120       unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
27121       // There are 2 kinds of intrinsics in this group:
27122       // (1) With suppress-all-exceptions (sae) or rounding mode- 6 operands
27123       // (2) With rounding mode and sae - 7 operands.
27124       bool HasRounding = IntrWithRoundingModeOpcode != 0;
27125       if (Op.getNumOperands() == (5U + HasRounding)) {
27126         if (HasRounding) {
27127           SDValue Rnd = Op.getOperand(5);
27128           unsigned RC = 0;
27129           if (isRoundModeSAEToX(Rnd, RC))
27130             return getScalarMaskingNode(
27131                 DAG.getNode(IntrWithRoundingModeOpcode, dl, VT, Src1, Src2,
27132                             DAG.getTargetConstant(RC, dl, MVT::i32)),
27133                 Mask, passThru, Subtarget, DAG);
27134           if (!isRoundModeCurDirection(Rnd))
27135             return SDValue();
27136         }
27137         return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1,
27138                                                 Src2),
27139                                     Mask, passThru, Subtarget, DAG);
27140       }
27141 
27142       assert(Op.getNumOperands() == (6U + HasRounding) &&
27143              "Unexpected intrinsic form");
27144       SDValue RoundingMode = Op.getOperand(5);
27145       unsigned Opc = IntrData->Opc0;
27146       if (HasRounding) {
27147         SDValue Sae = Op.getOperand(6);
27148         if (isRoundModeSAE(Sae))
27149           Opc = IntrWithRoundingModeOpcode;
27150         else if (!isRoundModeCurDirection(Sae))
27151           return SDValue();
27152       }
27153       return getScalarMaskingNode(DAG.getNode(Opc, dl, VT, Src1,
27154                                               Src2, RoundingMode),
27155                                   Mask, passThru, Subtarget, DAG);
27156     }
27157     case INTR_TYPE_SCALAR_MASK_RND: {
27158       SDValue Src1 = Op.getOperand(1);
27159       SDValue Src2 = Op.getOperand(2);
27160       SDValue passThru = Op.getOperand(3);
27161       SDValue Mask = Op.getOperand(4);
27162       SDValue Rnd = Op.getOperand(5);
27163 
27164       SDValue NewOp;
27165       unsigned RC = 0;
27166       if (isRoundModeCurDirection(Rnd))
27167         NewOp = DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2);
27168       else if (isRoundModeSAEToX(Rnd, RC))
27169         NewOp = DAG.getNode(IntrData->Opc1, dl, VT, Src1, Src2,
27170                             DAG.getTargetConstant(RC, dl, MVT::i32));
27171       else
27172         return SDValue();
27173 
27174       return getScalarMaskingNode(NewOp, Mask, passThru, Subtarget, DAG);
27175     }
27176     case INTR_TYPE_SCALAR_MASK_SAE: {
27177       SDValue Src1 = Op.getOperand(1);
27178       SDValue Src2 = Op.getOperand(2);
27179       SDValue passThru = Op.getOperand(3);
27180       SDValue Mask = Op.getOperand(4);
27181       SDValue Sae = Op.getOperand(5);
27182       unsigned Opc;
27183       if (isRoundModeCurDirection(Sae))
27184         Opc = IntrData->Opc0;
27185       else if (isRoundModeSAE(Sae))
27186         Opc = IntrData->Opc1;
27187       else
27188         return SDValue();
27189 
27190       return getScalarMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2),
27191                                   Mask, passThru, Subtarget, DAG);
27192     }
27193     case INTR_TYPE_2OP_MASK: {
27194       SDValue Src1 = Op.getOperand(1);
27195       SDValue Src2 = Op.getOperand(2);
27196       SDValue PassThru = Op.getOperand(3);
27197       SDValue Mask = Op.getOperand(4);
27198       SDValue NewOp;
27199       if (IntrData->Opc1 != 0) {
27200         SDValue Rnd = Op.getOperand(5);
27201         unsigned RC = 0;
27202         if (isRoundModeSAEToX(Rnd, RC))
27203           NewOp = DAG.getNode(IntrData->Opc1, dl, VT, Src1, Src2,
27204                               DAG.getTargetConstant(RC, dl, MVT::i32));
27205         else if (!isRoundModeCurDirection(Rnd))
27206           return SDValue();
27207       }
27208       if (!NewOp)
27209         NewOp = DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2);
27210       return getVectorMaskingNode(NewOp, Mask, PassThru, Subtarget, DAG);
27211     }
27212     case INTR_TYPE_2OP_MASK_SAE: {
27213       SDValue Src1 = Op.getOperand(1);
27214       SDValue Src2 = Op.getOperand(2);
27215       SDValue PassThru = Op.getOperand(3);
27216       SDValue Mask = Op.getOperand(4);
27217 
27218       unsigned Opc = IntrData->Opc0;
27219       if (IntrData->Opc1 != 0) {
27220         SDValue Sae = Op.getOperand(5);
27221         if (isRoundModeSAE(Sae))
27222           Opc = IntrData->Opc1;
27223         else if (!isRoundModeCurDirection(Sae))
27224           return SDValue();
27225       }
27226 
27227       return getVectorMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2),
27228                                   Mask, PassThru, Subtarget, DAG);
27229     }
27230     case INTR_TYPE_3OP_SCALAR_MASK_SAE: {
27231       SDValue Src1 = Op.getOperand(1);
27232       SDValue Src2 = Op.getOperand(2);
27233       SDValue Src3 = Op.getOperand(3);
27234       SDValue PassThru = Op.getOperand(4);
27235       SDValue Mask = Op.getOperand(5);
27236       SDValue Sae = Op.getOperand(6);
27237       unsigned Opc;
27238       if (isRoundModeCurDirection(Sae))
27239         Opc = IntrData->Opc0;
27240       else if (isRoundModeSAE(Sae))
27241         Opc = IntrData->Opc1;
27242       else
27243         return SDValue();
27244 
27245       return getScalarMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2, Src3),
27246                                   Mask, PassThru, Subtarget, DAG);
27247     }
27248     case INTR_TYPE_3OP_MASK_SAE: {
27249       SDValue Src1 = Op.getOperand(1);
27250       SDValue Src2 = Op.getOperand(2);
27251       SDValue Src3 = Op.getOperand(3);
27252       SDValue PassThru = Op.getOperand(4);
27253       SDValue Mask = Op.getOperand(5);
27254 
27255       unsigned Opc = IntrData->Opc0;
27256       if (IntrData->Opc1 != 0) {
27257         SDValue Sae = Op.getOperand(6);
27258         if (isRoundModeSAE(Sae))
27259           Opc = IntrData->Opc1;
27260         else if (!isRoundModeCurDirection(Sae))
27261           return SDValue();
27262       }
27263       return getVectorMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2, Src3),
27264                                   Mask, PassThru, Subtarget, DAG);
27265     }
27266     case BLENDV: {
27267       SDValue Src1 = Op.getOperand(1);
27268       SDValue Src2 = Op.getOperand(2);
27269       SDValue Src3 = Op.getOperand(3);
27270 
27271       EVT MaskVT = Src3.getValueType().changeVectorElementTypeToInteger();
27272       Src3 = DAG.getBitcast(MaskVT, Src3);
27273 
27274       // Reverse the operands to match VSELECT order.
27275       return DAG.getNode(IntrData->Opc0, dl, VT, Src3, Src2, Src1);
27276     }
27277     case VPERM_2OP : {
27278       SDValue Src1 = Op.getOperand(1);
27279       SDValue Src2 = Op.getOperand(2);
27280 
27281       // Swap Src1 and Src2 in the node creation
27282       return DAG.getNode(IntrData->Opc0, dl, VT,Src2, Src1);
27283     }
27284     case CFMA_OP_MASKZ:
27285     case CFMA_OP_MASK: {
27286       SDValue Src1 = Op.getOperand(1);
27287       SDValue Src2 = Op.getOperand(2);
27288       SDValue Src3 = Op.getOperand(3);
27289       SDValue Mask = Op.getOperand(4);
27290       MVT VT = Op.getSimpleValueType();
27291 
27292       SDValue PassThru = Src3;
27293       if (IntrData->Type == CFMA_OP_MASKZ)
27294         PassThru = getZeroVector(VT, Subtarget, DAG, dl);
27295 
27296       // We add rounding mode to the Node when
27297       //   - RC Opcode is specified and
27298       //   - RC is not "current direction".
27299       SDValue NewOp;
27300       if (IntrData->Opc1 != 0) {
27301         SDValue Rnd = Op.getOperand(5);
27302         unsigned RC = 0;
27303         if (isRoundModeSAEToX(Rnd, RC))
27304           NewOp = DAG.getNode(IntrData->Opc1, dl, VT, Src1, Src2, Src3,
27305                               DAG.getTargetConstant(RC, dl, MVT::i32));
27306         else if (!isRoundModeCurDirection(Rnd))
27307           return SDValue();
27308       }
27309       if (!NewOp)
27310         NewOp = DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2, Src3);
27311       return getVectorMaskingNode(NewOp, Mask, PassThru, Subtarget, DAG);
27312     }
27313     case IFMA_OP:
27314       // NOTE: We need to swizzle the operands to pass the multiply operands
27315       // first.
27316       return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
27317                          Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
27318     case FPCLASSS: {
27319       SDValue Src1 = Op.getOperand(1);
27320       SDValue Imm = Op.getOperand(2);
27321       SDValue Mask = Op.getOperand(3);
27322       SDValue FPclass = DAG.getNode(IntrData->Opc0, dl, MVT::v1i1, Src1, Imm);
27323       SDValue FPclassMask = getScalarMaskingNode(FPclass, Mask, SDValue(),
27324                                                  Subtarget, DAG);
27325       // Need to fill with zeros to ensure the bitcast will produce zeroes
27326       // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
27327       SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8i1,
27328                                 DAG.getConstant(0, dl, MVT::v8i1),
27329                                 FPclassMask, DAG.getIntPtrConstant(0, dl));
27330       return DAG.getBitcast(MVT::i8, Ins);
27331     }
27332 
27333     case CMP_MASK_CC: {
27334       MVT MaskVT = Op.getSimpleValueType();
27335       SDValue CC = Op.getOperand(3);
27336       SDValue Mask = Op.getOperand(4);
27337       // We specify 2 possible opcodes for intrinsics with rounding modes.
27338       // First, we check if the intrinsic may have non-default rounding mode,
27339       // (IntrData->Opc1 != 0), then we check the rounding mode operand.
27340       if (IntrData->Opc1 != 0) {
27341         SDValue Sae = Op.getOperand(5);
27342         if (isRoundModeSAE(Sae))
27343           return DAG.getNode(IntrData->Opc1, dl, MaskVT, Op.getOperand(1),
27344                              Op.getOperand(2), CC, Mask, Sae);
27345         if (!isRoundModeCurDirection(Sae))
27346           return SDValue();
27347       }
27348       //default rounding mode
27349       return DAG.getNode(IntrData->Opc0, dl, MaskVT,
27350                          {Op.getOperand(1), Op.getOperand(2), CC, Mask});
27351     }
27352     case CMP_MASK_SCALAR_CC: {
27353       SDValue Src1 = Op.getOperand(1);
27354       SDValue Src2 = Op.getOperand(2);
27355       SDValue CC = Op.getOperand(3);
27356       SDValue Mask = Op.getOperand(4);
27357 
27358       SDValue Cmp;
27359       if (IntrData->Opc1 != 0) {
27360         SDValue Sae = Op.getOperand(5);
27361         if (isRoundModeSAE(Sae))
27362           Cmp = DAG.getNode(IntrData->Opc1, dl, MVT::v1i1, Src1, Src2, CC, Sae);
27363         else if (!isRoundModeCurDirection(Sae))
27364           return SDValue();
27365       }
27366       //default rounding mode
27367       if (!Cmp.getNode())
27368         Cmp = DAG.getNode(IntrData->Opc0, dl, MVT::v1i1, Src1, Src2, CC);
27369 
27370       SDValue CmpMask = getScalarMaskingNode(Cmp, Mask, SDValue(),
27371                                              Subtarget, DAG);
27372       // Need to fill with zeros to ensure the bitcast will produce zeroes
27373       // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
27374       SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8i1,
27375                                 DAG.getConstant(0, dl, MVT::v8i1),
27376                                 CmpMask, DAG.getIntPtrConstant(0, dl));
27377       return DAG.getBitcast(MVT::i8, Ins);
27378     }
27379     case COMI: { // Comparison intrinsics
27380       ISD::CondCode CC = (ISD::CondCode)IntrData->Opc1;
27381       SDValue LHS = Op.getOperand(1);
27382       SDValue RHS = Op.getOperand(2);
27383       // Some conditions require the operands to be swapped.
27384       if (CC == ISD::SETLT || CC == ISD::SETLE)
27385         std::swap(LHS, RHS);
27386 
27387       SDValue Comi = DAG.getNode(IntrData->Opc0, dl, MVT::i32, LHS, RHS);
27388       SDValue SetCC;
27389       switch (CC) {
27390       case ISD::SETEQ: { // (ZF = 0 and PF = 0)
27391         SetCC = getSETCC(X86::COND_E, Comi, dl, DAG);
27392         SDValue SetNP = getSETCC(X86::COND_NP, Comi, dl, DAG);
27393         SetCC = DAG.getNode(ISD::AND, dl, MVT::i8, SetCC, SetNP);
27394         break;
27395       }
27396       case ISD::SETNE: { // (ZF = 1 or PF = 1)
27397         SetCC = getSETCC(X86::COND_NE, Comi, dl, DAG);
27398         SDValue SetP = getSETCC(X86::COND_P, Comi, dl, DAG);
27399         SetCC = DAG.getNode(ISD::OR, dl, MVT::i8, SetCC, SetP);
27400         break;
27401       }
27402       case ISD::SETGT: // (CF = 0 and ZF = 0)
27403       case ISD::SETLT: { // Condition opposite to GT. Operands swapped above.
27404         SetCC = getSETCC(X86::COND_A, Comi, dl, DAG);
27405         break;
27406       }
27407       case ISD::SETGE: // CF = 0
27408       case ISD::SETLE: // Condition opposite to GE. Operands swapped above.
27409         SetCC = getSETCC(X86::COND_AE, Comi, dl, DAG);
27410         break;
27411       default:
27412         llvm_unreachable("Unexpected illegal condition!");
27413       }
27414       return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
27415     }
27416     case COMI_RM: { // Comparison intrinsics with Sae
27417       SDValue LHS = Op.getOperand(1);
27418       SDValue RHS = Op.getOperand(2);
27419       unsigned CondVal = Op.getConstantOperandVal(3);
27420       SDValue Sae = Op.getOperand(4);
27421 
27422       SDValue FCmp;
27423       if (isRoundModeCurDirection(Sae))
27424         FCmp = DAG.getNode(X86ISD::FSETCCM, dl, MVT::v1i1, LHS, RHS,
27425                            DAG.getTargetConstant(CondVal, dl, MVT::i8));
27426       else if (isRoundModeSAE(Sae))
27427         FCmp = DAG.getNode(X86ISD::FSETCCM_SAE, dl, MVT::v1i1, LHS, RHS,
27428                            DAG.getTargetConstant(CondVal, dl, MVT::i8), Sae);
27429       else
27430         return SDValue();
27431       // Need to fill with zeros to ensure the bitcast will produce zeroes
27432       // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
27433       SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v16i1,
27434                                 DAG.getConstant(0, dl, MVT::v16i1),
27435                                 FCmp, DAG.getIntPtrConstant(0, dl));
27436       return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32,
27437                          DAG.getBitcast(MVT::i16, Ins));
27438     }
27439     case VSHIFT: {
27440       SDValue SrcOp = Op.getOperand(1);
27441       SDValue ShAmt = Op.getOperand(2);
27442       assert(ShAmt.getValueType() == MVT::i32 &&
27443              "Unexpected VSHIFT amount type");
27444 
27445       // Catch shift-by-constant.
27446       if (auto *CShAmt = dyn_cast<ConstantSDNode>(ShAmt))
27447         return getTargetVShiftByConstNode(IntrData->Opc0, dl,
27448                                           Op.getSimpleValueType(), SrcOp,
27449                                           CShAmt->getZExtValue(), DAG);
27450 
27451       ShAmt = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, ShAmt);
27452       return getTargetVShiftNode(IntrData->Opc0, dl, Op.getSimpleValueType(),
27453                                  SrcOp, ShAmt, 0, Subtarget, DAG);
27454     }
27455     case COMPRESS_EXPAND_IN_REG: {
27456       SDValue Mask = Op.getOperand(3);
27457       SDValue DataToCompress = Op.getOperand(1);
27458       SDValue PassThru = Op.getOperand(2);
27459       if (ISD::isBuildVectorAllOnes(Mask.getNode())) // return data as is
27460         return Op.getOperand(1);
27461 
27462       // Avoid false dependency.
27463       if (PassThru.isUndef())
27464         PassThru = getZeroVector(VT, Subtarget, DAG, dl);
27465 
27466       return DAG.getNode(IntrData->Opc0, dl, VT, DataToCompress, PassThru,
27467                          Mask);
27468     }
27469     case FIXUPIMM:
27470     case FIXUPIMM_MASKZ: {
27471       SDValue Src1 = Op.getOperand(1);
27472       SDValue Src2 = Op.getOperand(2);
27473       SDValue Src3 = Op.getOperand(3);
27474       SDValue Imm = Op.getOperand(4);
27475       SDValue Mask = Op.getOperand(5);
27476       SDValue Passthru = (IntrData->Type == FIXUPIMM)
27477                              ? Src1
27478                              : getZeroVector(VT, Subtarget, DAG, dl);
27479 
27480       unsigned Opc = IntrData->Opc0;
27481       if (IntrData->Opc1 != 0) {
27482         SDValue Sae = Op.getOperand(6);
27483         if (isRoundModeSAE(Sae))
27484           Opc = IntrData->Opc1;
27485         else if (!isRoundModeCurDirection(Sae))
27486           return SDValue();
27487       }
27488 
27489       SDValue FixupImm = DAG.getNode(Opc, dl, VT, Src1, Src2, Src3, Imm);
27490 
27491       if (Opc == X86ISD::VFIXUPIMM || Opc == X86ISD::VFIXUPIMM_SAE)
27492         return getVectorMaskingNode(FixupImm, Mask, Passthru, Subtarget, DAG);
27493 
27494       return getScalarMaskingNode(FixupImm, Mask, Passthru, Subtarget, DAG);
27495     }
27496     case ROUNDP: {
27497       assert(IntrData->Opc0 == X86ISD::VRNDSCALE && "Unexpected opcode");
27498       // Clear the upper bits of the rounding immediate so that the legacy
27499       // intrinsic can't trigger the scaling behavior of VRNDSCALE.
27500       auto Round = cast<ConstantSDNode>(Op.getOperand(2));
27501       SDValue RoundingMode =
27502           DAG.getTargetConstant(Round->getZExtValue() & 0xf, dl, MVT::i32);
27503       return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
27504                          Op.getOperand(1), RoundingMode);
27505     }
27506     case ROUNDS: {
27507       assert(IntrData->Opc0 == X86ISD::VRNDSCALES && "Unexpected opcode");
27508       // Clear the upper bits of the rounding immediate so that the legacy
27509       // intrinsic can't trigger the scaling behavior of VRNDSCALE.
27510       auto Round = cast<ConstantSDNode>(Op.getOperand(3));
27511       SDValue RoundingMode =
27512           DAG.getTargetConstant(Round->getZExtValue() & 0xf, dl, MVT::i32);
27513       return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
27514                          Op.getOperand(1), Op.getOperand(2), RoundingMode);
27515     }
27516     case BEXTRI: {
27517       assert(IntrData->Opc0 == X86ISD::BEXTRI && "Unexpected opcode");
27518 
27519       uint64_t Imm = Op.getConstantOperandVal(2);
27520       SDValue Control = DAG.getTargetConstant(Imm & 0xffff, dl,
27521                                               Op.getValueType());
27522       return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
27523                          Op.getOperand(1), Control);
27524     }
27525     // ADC/ADCX/SBB
27526     case ADX: {
27527       SDVTList CFVTs = DAG.getVTList(Op->getValueType(0), MVT::i32);
27528       SDVTList VTs = DAG.getVTList(Op.getOperand(2).getValueType(), MVT::i32);
27529 
27530       SDValue Res;
27531       // If the carry in is zero, then we should just use ADD/SUB instead of
27532       // ADC/SBB.
27533       if (isNullConstant(Op.getOperand(1))) {
27534         Res = DAG.getNode(IntrData->Opc1, dl, VTs, Op.getOperand(2),
27535                           Op.getOperand(3));
27536       } else {
27537         SDValue GenCF = DAG.getNode(X86ISD::ADD, dl, CFVTs, Op.getOperand(1),
27538                                     DAG.getConstant(-1, dl, MVT::i8));
27539         Res = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(2),
27540                           Op.getOperand(3), GenCF.getValue(1));
27541       }
27542       SDValue SetCC = getSETCC(X86::COND_B, Res.getValue(1), dl, DAG);
27543       SDValue Results[] = { SetCC, Res };
27544       return DAG.getMergeValues(Results, dl);
27545     }
27546     case CVTPD2PS_MASK:
27547     case CVTPD2DQ_MASK:
27548     case CVTQQ2PS_MASK:
27549     case TRUNCATE_TO_REG: {
27550       SDValue Src = Op.getOperand(1);
27551       SDValue PassThru = Op.getOperand(2);
27552       SDValue Mask = Op.getOperand(3);
27553 
27554       if (isAllOnesConstant(Mask))
27555         return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Src);
27556 
27557       MVT SrcVT = Src.getSimpleValueType();
27558       MVT MaskVT = MVT::getVectorVT(MVT::i1, SrcVT.getVectorNumElements());
27559       Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
27560       return DAG.getNode(IntrData->Opc1, dl, Op.getValueType(),
27561                          {Src, PassThru, Mask});
27562     }
27563     case CVTPS2PH_MASK: {
27564       SDValue Src = Op.getOperand(1);
27565       SDValue Rnd = Op.getOperand(2);
27566       SDValue PassThru = Op.getOperand(3);
27567       SDValue Mask = Op.getOperand(4);
27568 
27569       unsigned RC = 0;
27570       unsigned Opc = IntrData->Opc0;
27571       bool SAE = Src.getValueType().is512BitVector() &&
27572                  (isRoundModeSAEToX(Rnd, RC) || isRoundModeSAE(Rnd));
27573       if (SAE) {
27574         Opc = X86ISD::CVTPS2PH_SAE;
27575         Rnd = DAG.getTargetConstant(RC, dl, MVT::i32);
27576       }
27577 
27578       if (isAllOnesConstant(Mask))
27579         return DAG.getNode(Opc, dl, Op.getValueType(), Src, Rnd);
27580 
27581       if (SAE)
27582         Opc = X86ISD::MCVTPS2PH_SAE;
27583       else
27584         Opc = IntrData->Opc1;
27585       MVT SrcVT = Src.getSimpleValueType();
27586       MVT MaskVT = MVT::getVectorVT(MVT::i1, SrcVT.getVectorNumElements());
27587       Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
27588       return DAG.getNode(Opc, dl, Op.getValueType(), Src, Rnd, PassThru, Mask);
27589     }
27590     case CVTNEPS2BF16_MASK: {
27591       SDValue Src = Op.getOperand(1);
27592       SDValue PassThru = Op.getOperand(2);
27593       SDValue Mask = Op.getOperand(3);
27594 
27595       if (ISD::isBuildVectorAllOnes(Mask.getNode()))
27596         return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Src);
27597 
27598       // Break false dependency.
27599       if (PassThru.isUndef())
27600         PassThru = DAG.getConstant(0, dl, PassThru.getValueType());
27601 
27602       return DAG.getNode(IntrData->Opc1, dl, Op.getValueType(), Src, PassThru,
27603                          Mask);
27604     }
27605     default:
27606       break;
27607     }
27608   }
27609 
27610   switch (IntNo) {
27611   default: return SDValue();    // Don't custom lower most intrinsics.
27612 
27613   // ptest and testp intrinsics. The intrinsic these come from are designed to
27614   // return an integer value, not just an instruction so lower it to the ptest
27615   // or testp pattern and a setcc for the result.
27616   case Intrinsic::x86_avx512_ktestc_b:
27617   case Intrinsic::x86_avx512_ktestc_w:
27618   case Intrinsic::x86_avx512_ktestc_d:
27619   case Intrinsic::x86_avx512_ktestc_q:
27620   case Intrinsic::x86_avx512_ktestz_b:
27621   case Intrinsic::x86_avx512_ktestz_w:
27622   case Intrinsic::x86_avx512_ktestz_d:
27623   case Intrinsic::x86_avx512_ktestz_q:
27624   case Intrinsic::x86_sse41_ptestz:
27625   case Intrinsic::x86_sse41_ptestc:
27626   case Intrinsic::x86_sse41_ptestnzc:
27627   case Intrinsic::x86_avx_ptestz_256:
27628   case Intrinsic::x86_avx_ptestc_256:
27629   case Intrinsic::x86_avx_ptestnzc_256:
27630   case Intrinsic::x86_avx_vtestz_ps:
27631   case Intrinsic::x86_avx_vtestc_ps:
27632   case Intrinsic::x86_avx_vtestnzc_ps:
27633   case Intrinsic::x86_avx_vtestz_pd:
27634   case Intrinsic::x86_avx_vtestc_pd:
27635   case Intrinsic::x86_avx_vtestnzc_pd:
27636   case Intrinsic::x86_avx_vtestz_ps_256:
27637   case Intrinsic::x86_avx_vtestc_ps_256:
27638   case Intrinsic::x86_avx_vtestnzc_ps_256:
27639   case Intrinsic::x86_avx_vtestz_pd_256:
27640   case Intrinsic::x86_avx_vtestc_pd_256:
27641   case Intrinsic::x86_avx_vtestnzc_pd_256: {
27642     unsigned TestOpc = X86ISD::PTEST;
27643     X86::CondCode X86CC;
27644     switch (IntNo) {
27645     default: llvm_unreachable("Bad fallthrough in Intrinsic lowering.");
27646     case Intrinsic::x86_avx512_ktestc_b:
27647     case Intrinsic::x86_avx512_ktestc_w:
27648     case Intrinsic::x86_avx512_ktestc_d:
27649     case Intrinsic::x86_avx512_ktestc_q:
27650       // CF = 1
27651       TestOpc = X86ISD::KTEST;
27652       X86CC = X86::COND_B;
27653       break;
27654     case Intrinsic::x86_avx512_ktestz_b:
27655     case Intrinsic::x86_avx512_ktestz_w:
27656     case Intrinsic::x86_avx512_ktestz_d:
27657     case Intrinsic::x86_avx512_ktestz_q:
27658       TestOpc = X86ISD::KTEST;
27659       X86CC = X86::COND_E;
27660       break;
27661     case Intrinsic::x86_avx_vtestz_ps:
27662     case Intrinsic::x86_avx_vtestz_pd:
27663     case Intrinsic::x86_avx_vtestz_ps_256:
27664     case Intrinsic::x86_avx_vtestz_pd_256:
27665       TestOpc = X86ISD::TESTP;
27666       [[fallthrough]];
27667     case Intrinsic::x86_sse41_ptestz:
27668     case Intrinsic::x86_avx_ptestz_256:
27669       // ZF = 1
27670       X86CC = X86::COND_E;
27671       break;
27672     case Intrinsic::x86_avx_vtestc_ps:
27673     case Intrinsic::x86_avx_vtestc_pd:
27674     case Intrinsic::x86_avx_vtestc_ps_256:
27675     case Intrinsic::x86_avx_vtestc_pd_256:
27676       TestOpc = X86ISD::TESTP;
27677       [[fallthrough]];
27678     case Intrinsic::x86_sse41_ptestc:
27679     case Intrinsic::x86_avx_ptestc_256:
27680       // CF = 1
27681       X86CC = X86::COND_B;
27682       break;
27683     case Intrinsic::x86_avx_vtestnzc_ps:
27684     case Intrinsic::x86_avx_vtestnzc_pd:
27685     case Intrinsic::x86_avx_vtestnzc_ps_256:
27686     case Intrinsic::x86_avx_vtestnzc_pd_256:
27687       TestOpc = X86ISD::TESTP;
27688       [[fallthrough]];
27689     case Intrinsic::x86_sse41_ptestnzc:
27690     case Intrinsic::x86_avx_ptestnzc_256:
27691       // ZF and CF = 0
27692       X86CC = X86::COND_A;
27693       break;
27694     }
27695 
27696     SDValue LHS = Op.getOperand(1);
27697     SDValue RHS = Op.getOperand(2);
27698     SDValue Test = DAG.getNode(TestOpc, dl, MVT::i32, LHS, RHS);
27699     SDValue SetCC = getSETCC(X86CC, Test, dl, DAG);
27700     return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
27701   }
27702 
27703   case Intrinsic::x86_sse42_pcmpistria128:
27704   case Intrinsic::x86_sse42_pcmpestria128:
27705   case Intrinsic::x86_sse42_pcmpistric128:
27706   case Intrinsic::x86_sse42_pcmpestric128:
27707   case Intrinsic::x86_sse42_pcmpistrio128:
27708   case Intrinsic::x86_sse42_pcmpestrio128:
27709   case Intrinsic::x86_sse42_pcmpistris128:
27710   case Intrinsic::x86_sse42_pcmpestris128:
27711   case Intrinsic::x86_sse42_pcmpistriz128:
27712   case Intrinsic::x86_sse42_pcmpestriz128: {
27713     unsigned Opcode;
27714     X86::CondCode X86CC;
27715     switch (IntNo) {
27716     default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
27717     case Intrinsic::x86_sse42_pcmpistria128:
27718       Opcode = X86ISD::PCMPISTR;
27719       X86CC = X86::COND_A;
27720       break;
27721     case Intrinsic::x86_sse42_pcmpestria128:
27722       Opcode = X86ISD::PCMPESTR;
27723       X86CC = X86::COND_A;
27724       break;
27725     case Intrinsic::x86_sse42_pcmpistric128:
27726       Opcode = X86ISD::PCMPISTR;
27727       X86CC = X86::COND_B;
27728       break;
27729     case Intrinsic::x86_sse42_pcmpestric128:
27730       Opcode = X86ISD::PCMPESTR;
27731       X86CC = X86::COND_B;
27732       break;
27733     case Intrinsic::x86_sse42_pcmpistrio128:
27734       Opcode = X86ISD::PCMPISTR;
27735       X86CC = X86::COND_O;
27736       break;
27737     case Intrinsic::x86_sse42_pcmpestrio128:
27738       Opcode = X86ISD::PCMPESTR;
27739       X86CC = X86::COND_O;
27740       break;
27741     case Intrinsic::x86_sse42_pcmpistris128:
27742       Opcode = X86ISD::PCMPISTR;
27743       X86CC = X86::COND_S;
27744       break;
27745     case Intrinsic::x86_sse42_pcmpestris128:
27746       Opcode = X86ISD::PCMPESTR;
27747       X86CC = X86::COND_S;
27748       break;
27749     case Intrinsic::x86_sse42_pcmpistriz128:
27750       Opcode = X86ISD::PCMPISTR;
27751       X86CC = X86::COND_E;
27752       break;
27753     case Intrinsic::x86_sse42_pcmpestriz128:
27754       Opcode = X86ISD::PCMPESTR;
27755       X86CC = X86::COND_E;
27756       break;
27757     }
27758     SmallVector<SDValue, 5> NewOps(llvm::drop_begin(Op->ops()));
27759     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::v16i8, MVT::i32);
27760     SDValue PCMP = DAG.getNode(Opcode, dl, VTs, NewOps).getValue(2);
27761     SDValue SetCC = getSETCC(X86CC, PCMP, dl, DAG);
27762     return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
27763   }
27764 
27765   case Intrinsic::x86_sse42_pcmpistri128:
27766   case Intrinsic::x86_sse42_pcmpestri128: {
27767     unsigned Opcode;
27768     if (IntNo == Intrinsic::x86_sse42_pcmpistri128)
27769       Opcode = X86ISD::PCMPISTR;
27770     else
27771       Opcode = X86ISD::PCMPESTR;
27772 
27773     SmallVector<SDValue, 5> NewOps(llvm::drop_begin(Op->ops()));
27774     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::v16i8, MVT::i32);
27775     return DAG.getNode(Opcode, dl, VTs, NewOps);
27776   }
27777 
27778   case Intrinsic::x86_sse42_pcmpistrm128:
27779   case Intrinsic::x86_sse42_pcmpestrm128: {
27780     unsigned Opcode;
27781     if (IntNo == Intrinsic::x86_sse42_pcmpistrm128)
27782       Opcode = X86ISD::PCMPISTR;
27783     else
27784       Opcode = X86ISD::PCMPESTR;
27785 
27786     SmallVector<SDValue, 5> NewOps(llvm::drop_begin(Op->ops()));
27787     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::v16i8, MVT::i32);
27788     return DAG.getNode(Opcode, dl, VTs, NewOps).getValue(1);
27789   }
27790 
27791   case Intrinsic::eh_sjlj_lsda: {
27792     MachineFunction &MF = DAG.getMachineFunction();
27793     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
27794     MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
27795     auto &Context = MF.getMMI().getContext();
27796     MCSymbol *S = Context.getOrCreateSymbol(Twine("GCC_except_table") +
27797                                             Twine(MF.getFunctionNumber()));
27798     return DAG.getNode(getGlobalWrapperKind(), dl, VT,
27799                        DAG.getMCSymbol(S, PtrVT));
27800   }
27801 
27802   case Intrinsic::x86_seh_lsda: {
27803     // Compute the symbol for the LSDA. We know it'll get emitted later.
27804     MachineFunction &MF = DAG.getMachineFunction();
27805     SDValue Op1 = Op.getOperand(1);
27806     auto *Fn = cast<Function>(cast<GlobalAddressSDNode>(Op1)->getGlobal());
27807     MCSymbol *LSDASym = MF.getMMI().getContext().getOrCreateLSDASymbol(
27808         GlobalValue::dropLLVMManglingEscape(Fn->getName()));
27809 
27810     // Generate a simple absolute symbol reference. This intrinsic is only
27811     // supported on 32-bit Windows, which isn't PIC.
27812     SDValue Result = DAG.getMCSymbol(LSDASym, VT);
27813     return DAG.getNode(X86ISD::Wrapper, dl, VT, Result);
27814   }
27815 
27816   case Intrinsic::eh_recoverfp: {
27817     SDValue FnOp = Op.getOperand(1);
27818     SDValue IncomingFPOp = Op.getOperand(2);
27819     GlobalAddressSDNode *GSD = dyn_cast<GlobalAddressSDNode>(FnOp);
27820     auto *Fn = dyn_cast_or_null<Function>(GSD ? GSD->getGlobal() : nullptr);
27821     if (!Fn)
27822       report_fatal_error(
27823           "llvm.eh.recoverfp must take a function as the first argument");
27824     return recoverFramePointer(DAG, Fn, IncomingFPOp);
27825   }
27826 
27827   case Intrinsic::localaddress: {
27828     // Returns one of the stack, base, or frame pointer registers, depending on
27829     // which is used to reference local variables.
27830     MachineFunction &MF = DAG.getMachineFunction();
27831     const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
27832     unsigned Reg;
27833     if (RegInfo->hasBasePointer(MF))
27834       Reg = RegInfo->getBaseRegister();
27835     else { // Handles the SP or FP case.
27836       bool CantUseFP = RegInfo->hasStackRealignment(MF);
27837       if (CantUseFP)
27838         Reg = RegInfo->getPtrSizedStackRegister(MF);
27839       else
27840         Reg = RegInfo->getPtrSizedFrameRegister(MF);
27841     }
27842     return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT);
27843   }
27844   case Intrinsic::x86_avx512_vp2intersect_q_512:
27845   case Intrinsic::x86_avx512_vp2intersect_q_256:
27846   case Intrinsic::x86_avx512_vp2intersect_q_128:
27847   case Intrinsic::x86_avx512_vp2intersect_d_512:
27848   case Intrinsic::x86_avx512_vp2intersect_d_256:
27849   case Intrinsic::x86_avx512_vp2intersect_d_128: {
27850     MVT MaskVT = Op.getSimpleValueType();
27851 
27852     SDVTList VTs = DAG.getVTList(MVT::Untyped, MVT::Other);
27853     SDLoc DL(Op);
27854 
27855     SDValue Operation =
27856         DAG.getNode(X86ISD::VP2INTERSECT, DL, VTs,
27857                     Op->getOperand(1), Op->getOperand(2));
27858 
27859     SDValue Result0 = DAG.getTargetExtractSubreg(X86::sub_mask_0, DL,
27860                                                  MaskVT, Operation);
27861     SDValue Result1 = DAG.getTargetExtractSubreg(X86::sub_mask_1, DL,
27862                                                  MaskVT, Operation);
27863     return DAG.getMergeValues({Result0, Result1}, DL);
27864   }
27865   case Intrinsic::x86_mmx_pslli_w:
27866   case Intrinsic::x86_mmx_pslli_d:
27867   case Intrinsic::x86_mmx_pslli_q:
27868   case Intrinsic::x86_mmx_psrli_w:
27869   case Intrinsic::x86_mmx_psrli_d:
27870   case Intrinsic::x86_mmx_psrli_q:
27871   case Intrinsic::x86_mmx_psrai_w:
27872   case Intrinsic::x86_mmx_psrai_d: {
27873     SDLoc DL(Op);
27874     SDValue ShAmt = Op.getOperand(2);
27875     // If the argument is a constant, convert it to a target constant.
27876     if (auto *C = dyn_cast<ConstantSDNode>(ShAmt)) {
27877       // Clamp out of bounds shift amounts since they will otherwise be masked
27878       // to 8-bits which may make it no longer out of bounds.
27879       unsigned ShiftAmount = C->getAPIntValue().getLimitedValue(255);
27880       if (ShiftAmount == 0)
27881         return Op.getOperand(1);
27882 
27883       return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, Op.getValueType(),
27884                          Op.getOperand(0), Op.getOperand(1),
27885                          DAG.getTargetConstant(ShiftAmount, DL, MVT::i32));
27886     }
27887 
27888     unsigned NewIntrinsic;
27889     switch (IntNo) {
27890     default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
27891     case Intrinsic::x86_mmx_pslli_w:
27892       NewIntrinsic = Intrinsic::x86_mmx_psll_w;
27893       break;
27894     case Intrinsic::x86_mmx_pslli_d:
27895       NewIntrinsic = Intrinsic::x86_mmx_psll_d;
27896       break;
27897     case Intrinsic::x86_mmx_pslli_q:
27898       NewIntrinsic = Intrinsic::x86_mmx_psll_q;
27899       break;
27900     case Intrinsic::x86_mmx_psrli_w:
27901       NewIntrinsic = Intrinsic::x86_mmx_psrl_w;
27902       break;
27903     case Intrinsic::x86_mmx_psrli_d:
27904       NewIntrinsic = Intrinsic::x86_mmx_psrl_d;
27905       break;
27906     case Intrinsic::x86_mmx_psrli_q:
27907       NewIntrinsic = Intrinsic::x86_mmx_psrl_q;
27908       break;
27909     case Intrinsic::x86_mmx_psrai_w:
27910       NewIntrinsic = Intrinsic::x86_mmx_psra_w;
27911       break;
27912     case Intrinsic::x86_mmx_psrai_d:
27913       NewIntrinsic = Intrinsic::x86_mmx_psra_d;
27914       break;
27915     }
27916 
27917     // The vector shift intrinsics with scalars uses 32b shift amounts but
27918     // the sse2/mmx shift instructions reads 64 bits. Copy the 32 bits to an
27919     // MMX register.
27920     ShAmt = DAG.getNode(X86ISD::MMX_MOVW2D, DL, MVT::x86mmx, ShAmt);
27921     return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, Op.getValueType(),
27922                        DAG.getTargetConstant(NewIntrinsic, DL,
27923                                              getPointerTy(DAG.getDataLayout())),
27924                        Op.getOperand(1), ShAmt);
27925   }
27926   case Intrinsic::thread_pointer: {
27927     if (Subtarget.isTargetELF()) {
27928       SDLoc dl(Op);
27929       EVT PtrVT = getPointerTy(DAG.getDataLayout());
27930       // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit).
27931       Value *Ptr = Constant::getNullValue(Type::getInt8PtrTy(
27932           *DAG.getContext(), Subtarget.is64Bit() ? X86AS::FS : X86AS::GS));
27933       return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
27934                          DAG.getIntPtrConstant(0, dl), MachinePointerInfo(Ptr));
27935     }
27936     report_fatal_error(
27937         "Target OS doesn't support __builtin_thread_pointer() yet.");
27938   }
27939   }
27940 }
27941 
getAVX2GatherNode(unsigned Opc,SDValue Op,SelectionDAG & DAG,SDValue Src,SDValue Mask,SDValue Base,SDValue Index,SDValue ScaleOp,SDValue Chain,const X86Subtarget & Subtarget)27942 static SDValue getAVX2GatherNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
27943                                  SDValue Src, SDValue Mask, SDValue Base,
27944                                  SDValue Index, SDValue ScaleOp, SDValue Chain,
27945                                  const X86Subtarget &Subtarget) {
27946   SDLoc dl(Op);
27947   auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
27948   // Scale must be constant.
27949   if (!C)
27950     return SDValue();
27951   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
27952   SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl,
27953                                         TLI.getPointerTy(DAG.getDataLayout()));
27954   EVT MaskVT = Mask.getValueType().changeVectorElementTypeToInteger();
27955   SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Other);
27956   // If source is undef or we know it won't be used, use a zero vector
27957   // to break register dependency.
27958   // TODO: use undef instead and let BreakFalseDeps deal with it?
27959   if (Src.isUndef() || ISD::isBuildVectorAllOnes(Mask.getNode()))
27960     Src = getZeroVector(Op.getSimpleValueType(), Subtarget, DAG, dl);
27961 
27962   // Cast mask to an integer type.
27963   Mask = DAG.getBitcast(MaskVT, Mask);
27964 
27965   MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);
27966 
27967   SDValue Ops[] = {Chain, Src, Mask, Base, Index, Scale };
27968   SDValue Res =
27969       DAG.getMemIntrinsicNode(X86ISD::MGATHER, dl, VTs, Ops,
27970                               MemIntr->getMemoryVT(), MemIntr->getMemOperand());
27971   return DAG.getMergeValues({Res, Res.getValue(1)}, dl);
27972 }
27973 
getGatherNode(SDValue Op,SelectionDAG & DAG,SDValue Src,SDValue Mask,SDValue Base,SDValue Index,SDValue ScaleOp,SDValue Chain,const X86Subtarget & Subtarget)27974 static SDValue getGatherNode(SDValue Op, SelectionDAG &DAG,
27975                              SDValue Src, SDValue Mask, SDValue Base,
27976                              SDValue Index, SDValue ScaleOp, SDValue Chain,
27977                              const X86Subtarget &Subtarget) {
27978   MVT VT = Op.getSimpleValueType();
27979   SDLoc dl(Op);
27980   auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
27981   // Scale must be constant.
27982   if (!C)
27983     return SDValue();
27984   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
27985   SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl,
27986                                         TLI.getPointerTy(DAG.getDataLayout()));
27987   unsigned MinElts = std::min(Index.getSimpleValueType().getVectorNumElements(),
27988                               VT.getVectorNumElements());
27989   MVT MaskVT = MVT::getVectorVT(MVT::i1, MinElts);
27990 
27991   // We support two versions of the gather intrinsics. One with scalar mask and
27992   // one with vXi1 mask. Convert scalar to vXi1 if necessary.
27993   if (Mask.getValueType() != MaskVT)
27994     Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
27995 
27996   SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Other);
27997   // If source is undef or we know it won't be used, use a zero vector
27998   // to break register dependency.
27999   // TODO: use undef instead and let BreakFalseDeps deal with it?
28000   if (Src.isUndef() || ISD::isBuildVectorAllOnes(Mask.getNode()))
28001     Src = getZeroVector(Op.getSimpleValueType(), Subtarget, DAG, dl);
28002 
28003   MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);
28004 
28005   SDValue Ops[] = {Chain, Src, Mask, Base, Index, Scale };
28006   SDValue Res =
28007       DAG.getMemIntrinsicNode(X86ISD::MGATHER, dl, VTs, Ops,
28008                               MemIntr->getMemoryVT(), MemIntr->getMemOperand());
28009   return DAG.getMergeValues({Res, Res.getValue(1)}, dl);
28010 }
28011 
getScatterNode(unsigned Opc,SDValue Op,SelectionDAG & DAG,SDValue Src,SDValue Mask,SDValue Base,SDValue Index,SDValue ScaleOp,SDValue Chain,const X86Subtarget & Subtarget)28012 static SDValue getScatterNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
28013                                SDValue Src, SDValue Mask, SDValue Base,
28014                                SDValue Index, SDValue ScaleOp, SDValue Chain,
28015                                const X86Subtarget &Subtarget) {
28016   SDLoc dl(Op);
28017   auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
28018   // Scale must be constant.
28019   if (!C)
28020     return SDValue();
28021   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
28022   SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl,
28023                                         TLI.getPointerTy(DAG.getDataLayout()));
28024   unsigned MinElts = std::min(Index.getSimpleValueType().getVectorNumElements(),
28025                               Src.getSimpleValueType().getVectorNumElements());
28026   MVT MaskVT = MVT::getVectorVT(MVT::i1, MinElts);
28027 
28028   // We support two versions of the scatter intrinsics. One with scalar mask and
28029   // one with vXi1 mask. Convert scalar to vXi1 if necessary.
28030   if (Mask.getValueType() != MaskVT)
28031     Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
28032 
28033   MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);
28034 
28035   SDVTList VTs = DAG.getVTList(MVT::Other);
28036   SDValue Ops[] = {Chain, Src, Mask, Base, Index, Scale};
28037   SDValue Res =
28038       DAG.getMemIntrinsicNode(X86ISD::MSCATTER, dl, VTs, Ops,
28039                               MemIntr->getMemoryVT(), MemIntr->getMemOperand());
28040   return Res;
28041 }
28042 
getPrefetchNode(unsigned Opc,SDValue Op,SelectionDAG & DAG,SDValue Mask,SDValue Base,SDValue Index,SDValue ScaleOp,SDValue Chain,const X86Subtarget & Subtarget)28043 static SDValue getPrefetchNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
28044                                SDValue Mask, SDValue Base, SDValue Index,
28045                                SDValue ScaleOp, SDValue Chain,
28046                                const X86Subtarget &Subtarget) {
28047   SDLoc dl(Op);
28048   auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
28049   // Scale must be constant.
28050   if (!C)
28051     return SDValue();
28052   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
28053   SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl,
28054                                         TLI.getPointerTy(DAG.getDataLayout()));
28055   SDValue Disp = DAG.getTargetConstant(0, dl, MVT::i32);
28056   SDValue Segment = DAG.getRegister(0, MVT::i32);
28057   MVT MaskVT =
28058     MVT::getVectorVT(MVT::i1, Index.getSimpleValueType().getVectorNumElements());
28059   SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
28060   SDValue Ops[] = {VMask, Base, Scale, Index, Disp, Segment, Chain};
28061   SDNode *Res = DAG.getMachineNode(Opc, dl, MVT::Other, Ops);
28062   return SDValue(Res, 0);
28063 }
28064 
28065 /// Handles the lowering of builtin intrinsics with chain that return their
28066 /// value into registers EDX:EAX.
28067 /// If operand ScrReg is a valid register identifier, then operand 2 of N is
28068 /// copied to SrcReg. The assumption is that SrcReg is an implicit input to
28069 /// TargetOpcode.
28070 /// Returns a Glue value which can be used to add extra copy-from-reg if the
28071 /// expanded intrinsics implicitly defines extra registers (i.e. not just
28072 /// EDX:EAX).
expandIntrinsicWChainHelper(SDNode * N,const SDLoc & DL,SelectionDAG & DAG,unsigned TargetOpcode,unsigned SrcReg,const X86Subtarget & Subtarget,SmallVectorImpl<SDValue> & Results)28073 static SDValue expandIntrinsicWChainHelper(SDNode *N, const SDLoc &DL,
28074                                         SelectionDAG &DAG,
28075                                         unsigned TargetOpcode,
28076                                         unsigned SrcReg,
28077                                         const X86Subtarget &Subtarget,
28078                                         SmallVectorImpl<SDValue> &Results) {
28079   SDValue Chain = N->getOperand(0);
28080   SDValue Glue;
28081 
28082   if (SrcReg) {
28083     assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
28084     Chain = DAG.getCopyToReg(Chain, DL, SrcReg, N->getOperand(2), Glue);
28085     Glue = Chain.getValue(1);
28086   }
28087 
28088   SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
28089   SDValue N1Ops[] = {Chain, Glue};
28090   SDNode *N1 = DAG.getMachineNode(
28091       TargetOpcode, DL, Tys, ArrayRef<SDValue>(N1Ops, Glue.getNode() ? 2 : 1));
28092   Chain = SDValue(N1, 0);
28093 
28094   // Reads the content of XCR and returns it in registers EDX:EAX.
28095   SDValue LO, HI;
28096   if (Subtarget.is64Bit()) {
28097     LO = DAG.getCopyFromReg(Chain, DL, X86::RAX, MVT::i64, SDValue(N1, 1));
28098     HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
28099                             LO.getValue(2));
28100   } else {
28101     LO = DAG.getCopyFromReg(Chain, DL, X86::EAX, MVT::i32, SDValue(N1, 1));
28102     HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
28103                             LO.getValue(2));
28104   }
28105   Chain = HI.getValue(1);
28106   Glue = HI.getValue(2);
28107 
28108   if (Subtarget.is64Bit()) {
28109     // Merge the two 32-bit values into a 64-bit one.
28110     SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
28111                               DAG.getConstant(32, DL, MVT::i8));
28112     Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
28113     Results.push_back(Chain);
28114     return Glue;
28115   }
28116 
28117   // Use a buildpair to merge the two 32-bit values into a 64-bit one.
28118   SDValue Ops[] = { LO, HI };
28119   SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
28120   Results.push_back(Pair);
28121   Results.push_back(Chain);
28122   return Glue;
28123 }
28124 
28125 /// Handles the lowering of builtin intrinsics that read the time stamp counter
28126 /// (x86_rdtsc and x86_rdtscp). This function is also used to custom lower
28127 /// READCYCLECOUNTER nodes.
getReadTimeStampCounter(SDNode * N,const SDLoc & DL,unsigned Opcode,SelectionDAG & DAG,const X86Subtarget & Subtarget,SmallVectorImpl<SDValue> & Results)28128 static void getReadTimeStampCounter(SDNode *N, const SDLoc &DL, unsigned Opcode,
28129                                     SelectionDAG &DAG,
28130                                     const X86Subtarget &Subtarget,
28131                                     SmallVectorImpl<SDValue> &Results) {
28132   // The processor's time-stamp counter (a 64-bit MSR) is stored into the
28133   // EDX:EAX registers. EDX is loaded with the high-order 32 bits of the MSR
28134   // and the EAX register is loaded with the low-order 32 bits.
28135   SDValue Glue = expandIntrinsicWChainHelper(N, DL, DAG, Opcode,
28136                                              /* NoRegister */0, Subtarget,
28137                                              Results);
28138   if (Opcode != X86::RDTSCP)
28139     return;
28140 
28141   SDValue Chain = Results[1];
28142   // Instruction RDTSCP loads the IA32:TSC_AUX_MSR (address C000_0103H) into
28143   // the ECX register. Add 'ecx' explicitly to the chain.
28144   SDValue ecx = DAG.getCopyFromReg(Chain, DL, X86::ECX, MVT::i32, Glue);
28145   Results[1] = ecx;
28146   Results.push_back(ecx.getValue(1));
28147 }
28148 
LowerREADCYCLECOUNTER(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)28149 static SDValue LowerREADCYCLECOUNTER(SDValue Op, const X86Subtarget &Subtarget,
28150                                      SelectionDAG &DAG) {
28151   SmallVector<SDValue, 3> Results;
28152   SDLoc DL(Op);
28153   getReadTimeStampCounter(Op.getNode(), DL, X86::RDTSC, DAG, Subtarget,
28154                           Results);
28155   return DAG.getMergeValues(Results, DL);
28156 }
28157 
MarkEHRegistrationNode(SDValue Op,SelectionDAG & DAG)28158 static SDValue MarkEHRegistrationNode(SDValue Op, SelectionDAG &DAG) {
28159   MachineFunction &MF = DAG.getMachineFunction();
28160   SDValue Chain = Op.getOperand(0);
28161   SDValue RegNode = Op.getOperand(2);
28162   WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo();
28163   if (!EHInfo)
28164     report_fatal_error("EH registrations only live in functions using WinEH");
28165 
28166   // Cast the operand to an alloca, and remember the frame index.
28167   auto *FINode = dyn_cast<FrameIndexSDNode>(RegNode);
28168   if (!FINode)
28169     report_fatal_error("llvm.x86.seh.ehregnode expects a static alloca");
28170   EHInfo->EHRegNodeFrameIndex = FINode->getIndex();
28171 
28172   // Return the chain operand without making any DAG nodes.
28173   return Chain;
28174 }
28175 
MarkEHGuard(SDValue Op,SelectionDAG & DAG)28176 static SDValue MarkEHGuard(SDValue Op, SelectionDAG &DAG) {
28177   MachineFunction &MF = DAG.getMachineFunction();
28178   SDValue Chain = Op.getOperand(0);
28179   SDValue EHGuard = Op.getOperand(2);
28180   WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo();
28181   if (!EHInfo)
28182     report_fatal_error("EHGuard only live in functions using WinEH");
28183 
28184   // Cast the operand to an alloca, and remember the frame index.
28185   auto *FINode = dyn_cast<FrameIndexSDNode>(EHGuard);
28186   if (!FINode)
28187     report_fatal_error("llvm.x86.seh.ehguard expects a static alloca");
28188   EHInfo->EHGuardFrameIndex = FINode->getIndex();
28189 
28190   // Return the chain operand without making any DAG nodes.
28191   return Chain;
28192 }
28193 
28194 /// Emit Truncating Store with signed or unsigned saturation.
28195 static SDValue
EmitTruncSStore(bool SignedSat,SDValue Chain,const SDLoc & Dl,SDValue Val,SDValue Ptr,EVT MemVT,MachineMemOperand * MMO,SelectionDAG & DAG)28196 EmitTruncSStore(bool SignedSat, SDValue Chain, const SDLoc &Dl, SDValue Val,
28197                 SDValue Ptr, EVT MemVT, MachineMemOperand *MMO,
28198                 SelectionDAG &DAG) {
28199   SDVTList VTs = DAG.getVTList(MVT::Other);
28200   SDValue Undef = DAG.getUNDEF(Ptr.getValueType());
28201   SDValue Ops[] = { Chain, Val, Ptr, Undef };
28202   unsigned Opc = SignedSat ? X86ISD::VTRUNCSTORES : X86ISD::VTRUNCSTOREUS;
28203   return DAG.getMemIntrinsicNode(Opc, Dl, VTs, Ops, MemVT, MMO);
28204 }
28205 
28206 /// Emit Masked Truncating Store with signed or unsigned saturation.
28207 static SDValue
EmitMaskedTruncSStore(bool SignedSat,SDValue Chain,const SDLoc & Dl,SDValue Val,SDValue Ptr,SDValue Mask,EVT MemVT,MachineMemOperand * MMO,SelectionDAG & DAG)28208 EmitMaskedTruncSStore(bool SignedSat, SDValue Chain, const SDLoc &Dl,
28209                       SDValue Val, SDValue Ptr, SDValue Mask, EVT MemVT,
28210                       MachineMemOperand *MMO, SelectionDAG &DAG) {
28211   SDVTList VTs = DAG.getVTList(MVT::Other);
28212   SDValue Ops[] = { Chain, Val, Ptr, Mask };
28213   unsigned Opc = SignedSat ? X86ISD::VMTRUNCSTORES : X86ISD::VMTRUNCSTOREUS;
28214   return DAG.getMemIntrinsicNode(Opc, Dl, VTs, Ops, MemVT, MMO);
28215 }
28216 
LowerINTRINSIC_W_CHAIN(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)28217 static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget &Subtarget,
28218                                       SelectionDAG &DAG) {
28219   unsigned IntNo = Op.getConstantOperandVal(1);
28220   const IntrinsicData *IntrData = getIntrinsicWithChain(IntNo);
28221   if (!IntrData) {
28222     switch (IntNo) {
28223 
28224     case Intrinsic::swift_async_context_addr: {
28225       SDLoc dl(Op);
28226       auto &MF = DAG.getMachineFunction();
28227       auto X86FI = MF.getInfo<X86MachineFunctionInfo>();
28228       if (Subtarget.is64Bit()) {
28229         MF.getFrameInfo().setFrameAddressIsTaken(true);
28230         X86FI->setHasSwiftAsyncContext(true);
28231         SDValue Chain = Op->getOperand(0);
28232         SDValue CopyRBP = DAG.getCopyFromReg(Chain, dl, X86::RBP, MVT::i64);
28233         SDValue Result =
28234             SDValue(DAG.getMachineNode(X86::SUB64ri8, dl, MVT::i64, CopyRBP,
28235                                        DAG.getTargetConstant(8, dl, MVT::i32)),
28236                     0);
28237         // Return { result, chain }.
28238         return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result,
28239                            CopyRBP.getValue(1));
28240       } else {
28241         // 32-bit so no special extended frame, create or reuse an existing
28242         // stack slot.
28243         if (!X86FI->getSwiftAsyncContextFrameIdx())
28244           X86FI->setSwiftAsyncContextFrameIdx(
28245               MF.getFrameInfo().CreateStackObject(4, Align(4), false));
28246         SDValue Result =
28247             DAG.getFrameIndex(*X86FI->getSwiftAsyncContextFrameIdx(), MVT::i32);
28248         // Return { result, chain }.
28249         return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result,
28250                            Op->getOperand(0));
28251       }
28252     }
28253 
28254     case llvm::Intrinsic::x86_seh_ehregnode:
28255       return MarkEHRegistrationNode(Op, DAG);
28256     case llvm::Intrinsic::x86_seh_ehguard:
28257       return MarkEHGuard(Op, DAG);
28258     case llvm::Intrinsic::x86_rdpkru: {
28259       SDLoc dl(Op);
28260       SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
28261       // Create a RDPKRU node and pass 0 to the ECX parameter.
28262       return DAG.getNode(X86ISD::RDPKRU, dl, VTs, Op.getOperand(0),
28263                          DAG.getConstant(0, dl, MVT::i32));
28264     }
28265     case llvm::Intrinsic::x86_wrpkru: {
28266       SDLoc dl(Op);
28267       // Create a WRPKRU node, pass the input to the EAX parameter,  and pass 0
28268       // to the EDX and ECX parameters.
28269       return DAG.getNode(X86ISD::WRPKRU, dl, MVT::Other,
28270                          Op.getOperand(0), Op.getOperand(2),
28271                          DAG.getConstant(0, dl, MVT::i32),
28272                          DAG.getConstant(0, dl, MVT::i32));
28273     }
28274     case llvm::Intrinsic::asan_check_memaccess: {
28275       // Mark this as adjustsStack because it will be lowered to a call.
28276       DAG.getMachineFunction().getFrameInfo().setAdjustsStack(true);
28277       // Don't do anything here, we will expand these intrinsics out later.
28278       return Op;
28279     }
28280     case llvm::Intrinsic::x86_flags_read_u32:
28281     case llvm::Intrinsic::x86_flags_read_u64:
28282     case llvm::Intrinsic::x86_flags_write_u32:
28283     case llvm::Intrinsic::x86_flags_write_u64: {
28284       // We need a frame pointer because this will get lowered to a PUSH/POP
28285       // sequence.
28286       MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
28287       MFI.setHasCopyImplyingStackAdjustment(true);
28288       // Don't do anything here, we will expand these intrinsics out later
28289       // during FinalizeISel in EmitInstrWithCustomInserter.
28290       return Op;
28291     }
28292     case Intrinsic::x86_lwpins32:
28293     case Intrinsic::x86_lwpins64:
28294     case Intrinsic::x86_umwait:
28295     case Intrinsic::x86_tpause: {
28296       SDLoc dl(Op);
28297       SDValue Chain = Op->getOperand(0);
28298       SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
28299       unsigned Opcode;
28300 
28301       switch (IntNo) {
28302       default: llvm_unreachable("Impossible intrinsic");
28303       case Intrinsic::x86_umwait:
28304         Opcode = X86ISD::UMWAIT;
28305         break;
28306       case Intrinsic::x86_tpause:
28307         Opcode = X86ISD::TPAUSE;
28308         break;
28309       case Intrinsic::x86_lwpins32:
28310       case Intrinsic::x86_lwpins64:
28311         Opcode = X86ISD::LWPINS;
28312         break;
28313       }
28314 
28315       SDValue Operation =
28316           DAG.getNode(Opcode, dl, VTs, Chain, Op->getOperand(2),
28317                       Op->getOperand(3), Op->getOperand(4));
28318       SDValue SetCC = getSETCC(X86::COND_B, Operation.getValue(0), dl, DAG);
28319       return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), SetCC,
28320                          Operation.getValue(1));
28321     }
28322     case Intrinsic::x86_enqcmd:
28323     case Intrinsic::x86_enqcmds: {
28324       SDLoc dl(Op);
28325       SDValue Chain = Op.getOperand(0);
28326       SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
28327       unsigned Opcode;
28328       switch (IntNo) {
28329       default: llvm_unreachable("Impossible intrinsic!");
28330       case Intrinsic::x86_enqcmd:
28331         Opcode = X86ISD::ENQCMD;
28332         break;
28333       case Intrinsic::x86_enqcmds:
28334         Opcode = X86ISD::ENQCMDS;
28335         break;
28336       }
28337       SDValue Operation = DAG.getNode(Opcode, dl, VTs, Chain, Op.getOperand(2),
28338                                       Op.getOperand(3));
28339       SDValue SetCC = getSETCC(X86::COND_E, Operation.getValue(0), dl, DAG);
28340       return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), SetCC,
28341                          Operation.getValue(1));
28342     }
28343     case Intrinsic::x86_aesenc128kl:
28344     case Intrinsic::x86_aesdec128kl:
28345     case Intrinsic::x86_aesenc256kl:
28346     case Intrinsic::x86_aesdec256kl: {
28347       SDLoc DL(Op);
28348       SDVTList VTs = DAG.getVTList(MVT::v2i64, MVT::i32, MVT::Other);
28349       SDValue Chain = Op.getOperand(0);
28350       unsigned Opcode;
28351 
28352       switch (IntNo) {
28353       default: llvm_unreachable("Impossible intrinsic");
28354       case Intrinsic::x86_aesenc128kl:
28355         Opcode = X86ISD::AESENC128KL;
28356         break;
28357       case Intrinsic::x86_aesdec128kl:
28358         Opcode = X86ISD::AESDEC128KL;
28359         break;
28360       case Intrinsic::x86_aesenc256kl:
28361         Opcode = X86ISD::AESENC256KL;
28362         break;
28363       case Intrinsic::x86_aesdec256kl:
28364         Opcode = X86ISD::AESDEC256KL;
28365         break;
28366       }
28367 
28368       MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);
28369       MachineMemOperand *MMO = MemIntr->getMemOperand();
28370       EVT MemVT = MemIntr->getMemoryVT();
28371       SDValue Operation = DAG.getMemIntrinsicNode(
28372           Opcode, DL, VTs, {Chain, Op.getOperand(2), Op.getOperand(3)}, MemVT,
28373           MMO);
28374       SDValue ZF = getSETCC(X86::COND_E, Operation.getValue(1), DL, DAG);
28375 
28376       return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(),
28377                          {ZF, Operation.getValue(0), Operation.getValue(2)});
28378     }
28379     case Intrinsic::x86_aesencwide128kl:
28380     case Intrinsic::x86_aesdecwide128kl:
28381     case Intrinsic::x86_aesencwide256kl:
28382     case Intrinsic::x86_aesdecwide256kl: {
28383       SDLoc DL(Op);
28384       SDVTList VTs = DAG.getVTList(
28385           {MVT::i32, MVT::v2i64, MVT::v2i64, MVT::v2i64, MVT::v2i64, MVT::v2i64,
28386            MVT::v2i64, MVT::v2i64, MVT::v2i64, MVT::Other});
28387       SDValue Chain = Op.getOperand(0);
28388       unsigned Opcode;
28389 
28390       switch (IntNo) {
28391       default: llvm_unreachable("Impossible intrinsic");
28392       case Intrinsic::x86_aesencwide128kl:
28393         Opcode = X86ISD::AESENCWIDE128KL;
28394         break;
28395       case Intrinsic::x86_aesdecwide128kl:
28396         Opcode = X86ISD::AESDECWIDE128KL;
28397         break;
28398       case Intrinsic::x86_aesencwide256kl:
28399         Opcode = X86ISD::AESENCWIDE256KL;
28400         break;
28401       case Intrinsic::x86_aesdecwide256kl:
28402         Opcode = X86ISD::AESDECWIDE256KL;
28403         break;
28404       }
28405 
28406       MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);
28407       MachineMemOperand *MMO = MemIntr->getMemOperand();
28408       EVT MemVT = MemIntr->getMemoryVT();
28409       SDValue Operation = DAG.getMemIntrinsicNode(
28410           Opcode, DL, VTs,
28411           {Chain, Op.getOperand(2), Op.getOperand(3), Op.getOperand(4),
28412            Op.getOperand(5), Op.getOperand(6), Op.getOperand(7),
28413            Op.getOperand(8), Op.getOperand(9), Op.getOperand(10)},
28414           MemVT, MMO);
28415       SDValue ZF = getSETCC(X86::COND_E, Operation.getValue(0), DL, DAG);
28416 
28417       return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(),
28418                          {ZF, Operation.getValue(1), Operation.getValue(2),
28419                           Operation.getValue(3), Operation.getValue(4),
28420                           Operation.getValue(5), Operation.getValue(6),
28421                           Operation.getValue(7), Operation.getValue(8),
28422                           Operation.getValue(9)});
28423     }
28424     case Intrinsic::x86_testui: {
28425       SDLoc dl(Op);
28426       SDValue Chain = Op.getOperand(0);
28427       SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
28428       SDValue Operation = DAG.getNode(X86ISD::TESTUI, dl, VTs, Chain);
28429       SDValue SetCC = getSETCC(X86::COND_B, Operation.getValue(0), dl, DAG);
28430       return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), SetCC,
28431                          Operation.getValue(1));
28432     }
28433     case Intrinsic::x86_atomic_bts_rm:
28434     case Intrinsic::x86_atomic_btc_rm:
28435     case Intrinsic::x86_atomic_btr_rm: {
28436       SDLoc DL(Op);
28437       MVT VT = Op.getSimpleValueType();
28438       SDValue Chain = Op.getOperand(0);
28439       SDValue Op1 = Op.getOperand(2);
28440       SDValue Op2 = Op.getOperand(3);
28441       unsigned Opc = IntNo == Intrinsic::x86_atomic_bts_rm   ? X86ISD::LBTS_RM
28442                      : IntNo == Intrinsic::x86_atomic_btc_rm ? X86ISD::LBTC_RM
28443                                                              : X86ISD::LBTR_RM;
28444       MachineMemOperand *MMO = cast<MemIntrinsicSDNode>(Op)->getMemOperand();
28445       SDValue Res =
28446           DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::i32, MVT::Other),
28447                                   {Chain, Op1, Op2}, VT, MMO);
28448       Chain = Res.getValue(1);
28449       Res = DAG.getZExtOrTrunc(getSETCC(X86::COND_B, Res, DL, DAG), DL, VT);
28450       return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(), Res, Chain);
28451     }
28452     case Intrinsic::x86_atomic_bts:
28453     case Intrinsic::x86_atomic_btc:
28454     case Intrinsic::x86_atomic_btr: {
28455       SDLoc DL(Op);
28456       MVT VT = Op.getSimpleValueType();
28457       SDValue Chain = Op.getOperand(0);
28458       SDValue Op1 = Op.getOperand(2);
28459       SDValue Op2 = Op.getOperand(3);
28460       unsigned Opc = IntNo == Intrinsic::x86_atomic_bts   ? X86ISD::LBTS
28461                      : IntNo == Intrinsic::x86_atomic_btc ? X86ISD::LBTC
28462                                                           : X86ISD::LBTR;
28463       SDValue Size = DAG.getConstant(VT.getScalarSizeInBits(), DL, MVT::i32);
28464       MachineMemOperand *MMO = cast<MemIntrinsicSDNode>(Op)->getMemOperand();
28465       SDValue Res =
28466           DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::i32, MVT::Other),
28467                                   {Chain, Op1, Op2, Size}, VT, MMO);
28468       Chain = Res.getValue(1);
28469       Res = DAG.getZExtOrTrunc(getSETCC(X86::COND_B, Res, DL, DAG), DL, VT);
28470       unsigned Imm = cast<ConstantSDNode>(Op2)->getZExtValue();
28471       if (Imm)
28472         Res = DAG.getNode(ISD::SHL, DL, VT, Res,
28473                           DAG.getShiftAmountConstant(Imm, VT, DL));
28474       return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(), Res, Chain);
28475     }
28476     case Intrinsic::x86_cmpccxadd32:
28477     case Intrinsic::x86_cmpccxadd64: {
28478       SDLoc DL(Op);
28479       SDValue Chain = Op.getOperand(0);
28480       SDValue Addr = Op.getOperand(2);
28481       SDValue Src1 = Op.getOperand(3);
28482       SDValue Src2 = Op.getOperand(4);
28483       SDValue CC = Op.getOperand(5);
28484       MachineMemOperand *MMO = cast<MemIntrinsicSDNode>(Op)->getMemOperand();
28485       SDValue Operation = DAG.getMemIntrinsicNode(
28486           X86ISD::CMPCCXADD, DL, Op->getVTList(), {Chain, Addr, Src1, Src2, CC},
28487           MVT::i32, MMO);
28488       return Operation;
28489     }
28490     case Intrinsic::x86_aadd32:
28491     case Intrinsic::x86_aadd64:
28492     case Intrinsic::x86_aand32:
28493     case Intrinsic::x86_aand64:
28494     case Intrinsic::x86_aor32:
28495     case Intrinsic::x86_aor64:
28496     case Intrinsic::x86_axor32:
28497     case Intrinsic::x86_axor64: {
28498       SDLoc DL(Op);
28499       SDValue Chain = Op.getOperand(0);
28500       SDValue Op1 = Op.getOperand(2);
28501       SDValue Op2 = Op.getOperand(3);
28502       MVT VT = Op2.getSimpleValueType();
28503       unsigned Opc = 0;
28504       switch (IntNo) {
28505       default:
28506         llvm_unreachable("Unknown Intrinsic");
28507       case Intrinsic::x86_aadd32:
28508       case Intrinsic::x86_aadd64:
28509         Opc = X86ISD::AADD;
28510         break;
28511       case Intrinsic::x86_aand32:
28512       case Intrinsic::x86_aand64:
28513         Opc = X86ISD::AAND;
28514         break;
28515       case Intrinsic::x86_aor32:
28516       case Intrinsic::x86_aor64:
28517         Opc = X86ISD::AOR;
28518         break;
28519       case Intrinsic::x86_axor32:
28520       case Intrinsic::x86_axor64:
28521         Opc = X86ISD::AXOR;
28522         break;
28523       }
28524       MachineMemOperand *MMO = cast<MemSDNode>(Op)->getMemOperand();
28525       return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(),
28526                                      {Chain, Op1, Op2}, VT, MMO);
28527     }
28528     case Intrinsic::x86_atomic_add_cc:
28529     case Intrinsic::x86_atomic_sub_cc:
28530     case Intrinsic::x86_atomic_or_cc:
28531     case Intrinsic::x86_atomic_and_cc:
28532     case Intrinsic::x86_atomic_xor_cc: {
28533       SDLoc DL(Op);
28534       SDValue Chain = Op.getOperand(0);
28535       SDValue Op1 = Op.getOperand(2);
28536       SDValue Op2 = Op.getOperand(3);
28537       X86::CondCode CC = (X86::CondCode)Op.getConstantOperandVal(4);
28538       MVT VT = Op2.getSimpleValueType();
28539       unsigned Opc = 0;
28540       switch (IntNo) {
28541       default:
28542         llvm_unreachable("Unknown Intrinsic");
28543       case Intrinsic::x86_atomic_add_cc:
28544         Opc = X86ISD::LADD;
28545         break;
28546       case Intrinsic::x86_atomic_sub_cc:
28547         Opc = X86ISD::LSUB;
28548         break;
28549       case Intrinsic::x86_atomic_or_cc:
28550         Opc = X86ISD::LOR;
28551         break;
28552       case Intrinsic::x86_atomic_and_cc:
28553         Opc = X86ISD::LAND;
28554         break;
28555       case Intrinsic::x86_atomic_xor_cc:
28556         Opc = X86ISD::LXOR;
28557         break;
28558       }
28559       MachineMemOperand *MMO = cast<MemIntrinsicSDNode>(Op)->getMemOperand();
28560       SDValue LockArith =
28561           DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::i32, MVT::Other),
28562                                   {Chain, Op1, Op2}, VT, MMO);
28563       Chain = LockArith.getValue(1);
28564       return DAG.getMergeValues({getSETCC(CC, LockArith, DL, DAG), Chain}, DL);
28565     }
28566     }
28567     return SDValue();
28568   }
28569 
28570   SDLoc dl(Op);
28571   switch(IntrData->Type) {
28572   default: llvm_unreachable("Unknown Intrinsic Type");
28573   case RDSEED:
28574   case RDRAND: {
28575     // Emit the node with the right value type.
28576     SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::i32, MVT::Other);
28577     SDValue Result = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
28578 
28579     // If the value returned by RDRAND/RDSEED was valid (CF=1), return 1.
28580     // Otherwise return the value from Rand, which is always 0, casted to i32.
28581     SDValue Ops[] = {DAG.getZExtOrTrunc(Result, dl, Op->getValueType(1)),
28582                      DAG.getConstant(1, dl, Op->getValueType(1)),
28583                      DAG.getTargetConstant(X86::COND_B, dl, MVT::i8),
28584                      SDValue(Result.getNode(), 1)};
28585     SDValue isValid = DAG.getNode(X86ISD::CMOV, dl, Op->getValueType(1), Ops);
28586 
28587     // Return { result, isValid, chain }.
28588     return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result, isValid,
28589                        SDValue(Result.getNode(), 2));
28590   }
28591   case GATHER_AVX2: {
28592     SDValue Chain = Op.getOperand(0);
28593     SDValue Src   = Op.getOperand(2);
28594     SDValue Base  = Op.getOperand(3);
28595     SDValue Index = Op.getOperand(4);
28596     SDValue Mask  = Op.getOperand(5);
28597     SDValue Scale = Op.getOperand(6);
28598     return getAVX2GatherNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index,
28599                              Scale, Chain, Subtarget);
28600   }
28601   case GATHER: {
28602   //gather(v1, mask, index, base, scale);
28603     SDValue Chain = Op.getOperand(0);
28604     SDValue Src   = Op.getOperand(2);
28605     SDValue Base  = Op.getOperand(3);
28606     SDValue Index = Op.getOperand(4);
28607     SDValue Mask  = Op.getOperand(5);
28608     SDValue Scale = Op.getOperand(6);
28609     return getGatherNode(Op, DAG, Src, Mask, Base, Index, Scale,
28610                          Chain, Subtarget);
28611   }
28612   case SCATTER: {
28613   //scatter(base, mask, index, v1, scale);
28614     SDValue Chain = Op.getOperand(0);
28615     SDValue Base  = Op.getOperand(2);
28616     SDValue Mask  = Op.getOperand(3);
28617     SDValue Index = Op.getOperand(4);
28618     SDValue Src   = Op.getOperand(5);
28619     SDValue Scale = Op.getOperand(6);
28620     return getScatterNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index,
28621                           Scale, Chain, Subtarget);
28622   }
28623   case PREFETCH: {
28624     const APInt &HintVal = Op.getConstantOperandAPInt(6);
28625     assert((HintVal == 2 || HintVal == 3) &&
28626            "Wrong prefetch hint in intrinsic: should be 2 or 3");
28627     unsigned Opcode = (HintVal == 2 ? IntrData->Opc1 : IntrData->Opc0);
28628     SDValue Chain = Op.getOperand(0);
28629     SDValue Mask  = Op.getOperand(2);
28630     SDValue Index = Op.getOperand(3);
28631     SDValue Base  = Op.getOperand(4);
28632     SDValue Scale = Op.getOperand(5);
28633     return getPrefetchNode(Opcode, Op, DAG, Mask, Base, Index, Scale, Chain,
28634                            Subtarget);
28635   }
28636   // Read Time Stamp Counter (RDTSC) and Processor ID (RDTSCP).
28637   case RDTSC: {
28638     SmallVector<SDValue, 2> Results;
28639     getReadTimeStampCounter(Op.getNode(), dl, IntrData->Opc0, DAG, Subtarget,
28640                             Results);
28641     return DAG.getMergeValues(Results, dl);
28642   }
28643   // Read Performance Monitoring Counters.
28644   case RDPMC:
28645   // Read Processor Register.
28646   case RDPRU:
28647   // GetExtended Control Register.
28648   case XGETBV: {
28649     SmallVector<SDValue, 2> Results;
28650 
28651     // RDPMC uses ECX to select the index of the performance counter to read.
28652     // RDPRU uses ECX to select the processor register to read.
28653     // XGETBV uses ECX to select the index of the XCR register to return.
28654     // The result is stored into registers EDX:EAX.
28655     expandIntrinsicWChainHelper(Op.getNode(), dl, DAG, IntrData->Opc0, X86::ECX,
28656                                 Subtarget, Results);
28657     return DAG.getMergeValues(Results, dl);
28658   }
28659   // XTEST intrinsics.
28660   case XTEST: {
28661     SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
28662     SDValue InTrans = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
28663 
28664     SDValue SetCC = getSETCC(X86::COND_NE, InTrans, dl, DAG);
28665     SDValue Ret = DAG.getNode(ISD::ZERO_EXTEND, dl, Op->getValueType(0), SetCC);
28666     return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(),
28667                        Ret, SDValue(InTrans.getNode(), 1));
28668   }
28669   case TRUNCATE_TO_MEM_VI8:
28670   case TRUNCATE_TO_MEM_VI16:
28671   case TRUNCATE_TO_MEM_VI32: {
28672     SDValue Mask = Op.getOperand(4);
28673     SDValue DataToTruncate = Op.getOperand(3);
28674     SDValue Addr = Op.getOperand(2);
28675     SDValue Chain = Op.getOperand(0);
28676 
28677     MemIntrinsicSDNode *MemIntr = dyn_cast<MemIntrinsicSDNode>(Op);
28678     assert(MemIntr && "Expected MemIntrinsicSDNode!");
28679 
28680     EVT MemVT  = MemIntr->getMemoryVT();
28681 
28682     uint16_t TruncationOp = IntrData->Opc0;
28683     switch (TruncationOp) {
28684     case X86ISD::VTRUNC: {
28685       if (isAllOnesConstant(Mask)) // return just a truncate store
28686         return DAG.getTruncStore(Chain, dl, DataToTruncate, Addr, MemVT,
28687                                  MemIntr->getMemOperand());
28688 
28689       MVT MaskVT = MVT::getVectorVT(MVT::i1, MemVT.getVectorNumElements());
28690       SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
28691       SDValue Offset = DAG.getUNDEF(VMask.getValueType());
28692 
28693       return DAG.getMaskedStore(Chain, dl, DataToTruncate, Addr, Offset, VMask,
28694                                 MemVT, MemIntr->getMemOperand(), ISD::UNINDEXED,
28695                                 true /* truncating */);
28696     }
28697     case X86ISD::VTRUNCUS:
28698     case X86ISD::VTRUNCS: {
28699       bool IsSigned = (TruncationOp == X86ISD::VTRUNCS);
28700       if (isAllOnesConstant(Mask))
28701         return EmitTruncSStore(IsSigned, Chain, dl, DataToTruncate, Addr, MemVT,
28702                                MemIntr->getMemOperand(), DAG);
28703 
28704       MVT MaskVT = MVT::getVectorVT(MVT::i1, MemVT.getVectorNumElements());
28705       SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
28706 
28707       return EmitMaskedTruncSStore(IsSigned, Chain, dl, DataToTruncate, Addr,
28708                                    VMask, MemVT, MemIntr->getMemOperand(), DAG);
28709     }
28710     default:
28711       llvm_unreachable("Unsupported truncstore intrinsic");
28712     }
28713   }
28714   }
28715 }
28716 
LowerRETURNADDR(SDValue Op,SelectionDAG & DAG) const28717 SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op,
28718                                            SelectionDAG &DAG) const {
28719   MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
28720   MFI.setReturnAddressIsTaken(true);
28721 
28722   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
28723     return SDValue();
28724 
28725   unsigned Depth = Op.getConstantOperandVal(0);
28726   SDLoc dl(Op);
28727   EVT PtrVT = getPointerTy(DAG.getDataLayout());
28728 
28729   if (Depth > 0) {
28730     SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
28731     const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
28732     SDValue Offset = DAG.getConstant(RegInfo->getSlotSize(), dl, PtrVT);
28733     return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
28734                        DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset),
28735                        MachinePointerInfo());
28736   }
28737 
28738   // Just load the return address.
28739   SDValue RetAddrFI = getReturnAddressFrameIndex(DAG);
28740   return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI,
28741                      MachinePointerInfo());
28742 }
28743 
LowerADDROFRETURNADDR(SDValue Op,SelectionDAG & DAG) const28744 SDValue X86TargetLowering::LowerADDROFRETURNADDR(SDValue Op,
28745                                                  SelectionDAG &DAG) const {
28746   DAG.getMachineFunction().getFrameInfo().setReturnAddressIsTaken(true);
28747   return getReturnAddressFrameIndex(DAG);
28748 }
28749 
LowerFRAMEADDR(SDValue Op,SelectionDAG & DAG) const28750 SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
28751   MachineFunction &MF = DAG.getMachineFunction();
28752   MachineFrameInfo &MFI = MF.getFrameInfo();
28753   X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
28754   const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
28755   EVT VT = Op.getValueType();
28756 
28757   MFI.setFrameAddressIsTaken(true);
28758 
28759   if (MF.getTarget().getMCAsmInfo()->usesWindowsCFI()) {
28760     // Depth > 0 makes no sense on targets which use Windows unwind codes.  It
28761     // is not possible to crawl up the stack without looking at the unwind codes
28762     // simultaneously.
28763     int FrameAddrIndex = FuncInfo->getFAIndex();
28764     if (!FrameAddrIndex) {
28765       // Set up a frame object for the return address.
28766       unsigned SlotSize = RegInfo->getSlotSize();
28767       FrameAddrIndex = MF.getFrameInfo().CreateFixedObject(
28768           SlotSize, /*SPOffset=*/0, /*IsImmutable=*/false);
28769       FuncInfo->setFAIndex(FrameAddrIndex);
28770     }
28771     return DAG.getFrameIndex(FrameAddrIndex, VT);
28772   }
28773 
28774   unsigned FrameReg =
28775       RegInfo->getPtrSizedFrameRegister(DAG.getMachineFunction());
28776   SDLoc dl(Op);  // FIXME probably not meaningful
28777   unsigned Depth = Op.getConstantOperandVal(0);
28778   assert(((FrameReg == X86::RBP && VT == MVT::i64) ||
28779           (FrameReg == X86::EBP && VT == MVT::i32)) &&
28780          "Invalid Frame Register!");
28781   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
28782   while (Depth--)
28783     FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
28784                             MachinePointerInfo());
28785   return FrameAddr;
28786 }
28787 
28788 // FIXME? Maybe this could be a TableGen attribute on some registers and
28789 // this table could be generated automatically from RegInfo.
getRegisterByName(const char * RegName,LLT VT,const MachineFunction & MF) const28790 Register X86TargetLowering::getRegisterByName(const char* RegName, LLT VT,
28791                                               const MachineFunction &MF) const {
28792   const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
28793 
28794   Register Reg = StringSwitch<unsigned>(RegName)
28795                        .Case("esp", X86::ESP)
28796                        .Case("rsp", X86::RSP)
28797                        .Case("ebp", X86::EBP)
28798                        .Case("rbp", X86::RBP)
28799                        .Default(0);
28800 
28801   if (Reg == X86::EBP || Reg == X86::RBP) {
28802     if (!TFI.hasFP(MF))
28803       report_fatal_error("register " + StringRef(RegName) +
28804                          " is allocatable: function has no frame pointer");
28805 #ifndef NDEBUG
28806     else {
28807       const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
28808       Register FrameReg = RegInfo->getPtrSizedFrameRegister(MF);
28809       assert((FrameReg == X86::EBP || FrameReg == X86::RBP) &&
28810              "Invalid Frame Register!");
28811     }
28812 #endif
28813   }
28814 
28815   if (Reg)
28816     return Reg;
28817 
28818   report_fatal_error("Invalid register name global variable");
28819 }
28820 
LowerFRAME_TO_ARGS_OFFSET(SDValue Op,SelectionDAG & DAG) const28821 SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op,
28822                                                      SelectionDAG &DAG) const {
28823   const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
28824   return DAG.getIntPtrConstant(2 * RegInfo->getSlotSize(), SDLoc(Op));
28825 }
28826 
getExceptionPointerRegister(const Constant * PersonalityFn) const28827 Register X86TargetLowering::getExceptionPointerRegister(
28828     const Constant *PersonalityFn) const {
28829   if (classifyEHPersonality(PersonalityFn) == EHPersonality::CoreCLR)
28830     return Subtarget.isTarget64BitLP64() ? X86::RDX : X86::EDX;
28831 
28832   return Subtarget.isTarget64BitLP64() ? X86::RAX : X86::EAX;
28833 }
28834 
getExceptionSelectorRegister(const Constant * PersonalityFn) const28835 Register X86TargetLowering::getExceptionSelectorRegister(
28836     const Constant *PersonalityFn) const {
28837   // Funclet personalities don't use selectors (the runtime does the selection).
28838   if (isFuncletEHPersonality(classifyEHPersonality(PersonalityFn)))
28839     return X86::NoRegister;
28840   return Subtarget.isTarget64BitLP64() ? X86::RDX : X86::EDX;
28841 }
28842 
needsFixedCatchObjects() const28843 bool X86TargetLowering::needsFixedCatchObjects() const {
28844   return Subtarget.isTargetWin64();
28845 }
28846 
LowerEH_RETURN(SDValue Op,SelectionDAG & DAG) const28847 SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
28848   SDValue Chain     = Op.getOperand(0);
28849   SDValue Offset    = Op.getOperand(1);
28850   SDValue Handler   = Op.getOperand(2);
28851   SDLoc dl      (Op);
28852 
28853   EVT PtrVT = getPointerTy(DAG.getDataLayout());
28854   const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
28855   Register FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction());
28856   assert(((FrameReg == X86::RBP && PtrVT == MVT::i64) ||
28857           (FrameReg == X86::EBP && PtrVT == MVT::i32)) &&
28858          "Invalid Frame Register!");
28859   SDValue Frame = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, PtrVT);
28860   Register StoreAddrReg = (PtrVT == MVT::i64) ? X86::RCX : X86::ECX;
28861 
28862   SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, Frame,
28863                                  DAG.getIntPtrConstant(RegInfo->getSlotSize(),
28864                                                        dl));
28865   StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, StoreAddr, Offset);
28866   Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo());
28867   Chain = DAG.getCopyToReg(Chain, dl, StoreAddrReg, StoreAddr);
28868 
28869   return DAG.getNode(X86ISD::EH_RETURN, dl, MVT::Other, Chain,
28870                      DAG.getRegister(StoreAddrReg, PtrVT));
28871 }
28872 
lowerEH_SJLJ_SETJMP(SDValue Op,SelectionDAG & DAG) const28873 SDValue X86TargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
28874                                                SelectionDAG &DAG) const {
28875   SDLoc DL(Op);
28876   // If the subtarget is not 64bit, we may need the global base reg
28877   // after isel expand pseudo, i.e., after CGBR pass ran.
28878   // Therefore, ask for the GlobalBaseReg now, so that the pass
28879   // inserts the code for us in case we need it.
28880   // Otherwise, we will end up in a situation where we will
28881   // reference a virtual register that is not defined!
28882   if (!Subtarget.is64Bit()) {
28883     const X86InstrInfo *TII = Subtarget.getInstrInfo();
28884     (void)TII->getGlobalBaseReg(&DAG.getMachineFunction());
28885   }
28886   return DAG.getNode(X86ISD::EH_SJLJ_SETJMP, DL,
28887                      DAG.getVTList(MVT::i32, MVT::Other),
28888                      Op.getOperand(0), Op.getOperand(1));
28889 }
28890 
lowerEH_SJLJ_LONGJMP(SDValue Op,SelectionDAG & DAG) const28891 SDValue X86TargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
28892                                                 SelectionDAG &DAG) const {
28893   SDLoc DL(Op);
28894   return DAG.getNode(X86ISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
28895                      Op.getOperand(0), Op.getOperand(1));
28896 }
28897 
lowerEH_SJLJ_SETUP_DISPATCH(SDValue Op,SelectionDAG & DAG) const28898 SDValue X86TargetLowering::lowerEH_SJLJ_SETUP_DISPATCH(SDValue Op,
28899                                                        SelectionDAG &DAG) const {
28900   SDLoc DL(Op);
28901   return DAG.getNode(X86ISD::EH_SJLJ_SETUP_DISPATCH, DL, MVT::Other,
28902                      Op.getOperand(0));
28903 }
28904 
LowerADJUST_TRAMPOLINE(SDValue Op,SelectionDAG & DAG)28905 static SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) {
28906   return Op.getOperand(0);
28907 }
28908 
LowerINIT_TRAMPOLINE(SDValue Op,SelectionDAG & DAG) const28909 SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
28910                                                 SelectionDAG &DAG) const {
28911   SDValue Root = Op.getOperand(0);
28912   SDValue Trmp = Op.getOperand(1); // trampoline
28913   SDValue FPtr = Op.getOperand(2); // nested function
28914   SDValue Nest = Op.getOperand(3); // 'nest' parameter value
28915   SDLoc dl (Op);
28916 
28917   const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
28918   const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
28919 
28920   if (Subtarget.is64Bit()) {
28921     SDValue OutChains[6];
28922 
28923     // Large code-model.
28924     const unsigned char JMP64r  = 0xFF; // 64-bit jmp through register opcode.
28925     const unsigned char MOV64ri = 0xB8; // X86::MOV64ri opcode.
28926 
28927     const unsigned char N86R10 = TRI->getEncodingValue(X86::R10) & 0x7;
28928     const unsigned char N86R11 = TRI->getEncodingValue(X86::R11) & 0x7;
28929 
28930     const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix
28931 
28932     // Load the pointer to the nested function into R11.
28933     unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11
28934     SDValue Addr = Trmp;
28935     OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
28936                                 Addr, MachinePointerInfo(TrmpAddr));
28937 
28938     Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
28939                        DAG.getConstant(2, dl, MVT::i64));
28940     OutChains[1] = DAG.getStore(Root, dl, FPtr, Addr,
28941                                 MachinePointerInfo(TrmpAddr, 2), Align(2));
28942 
28943     // Load the 'nest' parameter value into R10.
28944     // R10 is specified in X86CallingConv.td
28945     OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10
28946     Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
28947                        DAG.getConstant(10, dl, MVT::i64));
28948     OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
28949                                 Addr, MachinePointerInfo(TrmpAddr, 10));
28950 
28951     Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
28952                        DAG.getConstant(12, dl, MVT::i64));
28953     OutChains[3] = DAG.getStore(Root, dl, Nest, Addr,
28954                                 MachinePointerInfo(TrmpAddr, 12), Align(2));
28955 
28956     // Jump to the nested function.
28957     OpCode = (JMP64r << 8) | REX_WB; // jmpq *...
28958     Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
28959                        DAG.getConstant(20, dl, MVT::i64));
28960     OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
28961                                 Addr, MachinePointerInfo(TrmpAddr, 20));
28962 
28963     unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11
28964     Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
28965                        DAG.getConstant(22, dl, MVT::i64));
28966     OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, dl, MVT::i8),
28967                                 Addr, MachinePointerInfo(TrmpAddr, 22));
28968 
28969     return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
28970   } else {
28971     const Function *Func =
28972       cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue());
28973     CallingConv::ID CC = Func->getCallingConv();
28974     unsigned NestReg;
28975 
28976     switch (CC) {
28977     default:
28978       llvm_unreachable("Unsupported calling convention");
28979     case CallingConv::C:
28980     case CallingConv::X86_StdCall: {
28981       // Pass 'nest' parameter in ECX.
28982       // Must be kept in sync with X86CallingConv.td
28983       NestReg = X86::ECX;
28984 
28985       // Check that ECX wasn't needed by an 'inreg' parameter.
28986       FunctionType *FTy = Func->getFunctionType();
28987       const AttributeList &Attrs = Func->getAttributes();
28988 
28989       if (!Attrs.isEmpty() && !Func->isVarArg()) {
28990         unsigned InRegCount = 0;
28991         unsigned Idx = 0;
28992 
28993         for (FunctionType::param_iterator I = FTy->param_begin(),
28994              E = FTy->param_end(); I != E; ++I, ++Idx)
28995           if (Attrs.hasParamAttr(Idx, Attribute::InReg)) {
28996             const DataLayout &DL = DAG.getDataLayout();
28997             // FIXME: should only count parameters that are lowered to integers.
28998             InRegCount += (DL.getTypeSizeInBits(*I) + 31) / 32;
28999           }
29000 
29001         if (InRegCount > 2) {
29002           report_fatal_error("Nest register in use - reduce number of inreg"
29003                              " parameters!");
29004         }
29005       }
29006       break;
29007     }
29008     case CallingConv::X86_FastCall:
29009     case CallingConv::X86_ThisCall:
29010     case CallingConv::Fast:
29011     case CallingConv::Tail:
29012     case CallingConv::SwiftTail:
29013       // Pass 'nest' parameter in EAX.
29014       // Must be kept in sync with X86CallingConv.td
29015       NestReg = X86::EAX;
29016       break;
29017     }
29018 
29019     SDValue OutChains[4];
29020     SDValue Addr, Disp;
29021 
29022     Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
29023                        DAG.getConstant(10, dl, MVT::i32));
29024     Disp = DAG.getNode(ISD::SUB, dl, MVT::i32, FPtr, Addr);
29025 
29026     // This is storing the opcode for MOV32ri.
29027     const unsigned char MOV32ri = 0xB8; // X86::MOV32ri's opcode byte.
29028     const unsigned char N86Reg = TRI->getEncodingValue(NestReg) & 0x7;
29029     OutChains[0] =
29030         DAG.getStore(Root, dl, DAG.getConstant(MOV32ri | N86Reg, dl, MVT::i8),
29031                      Trmp, MachinePointerInfo(TrmpAddr));
29032 
29033     Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
29034                        DAG.getConstant(1, dl, MVT::i32));
29035     OutChains[1] = DAG.getStore(Root, dl, Nest, Addr,
29036                                 MachinePointerInfo(TrmpAddr, 1), Align(1));
29037 
29038     const unsigned char JMP = 0xE9; // jmp <32bit dst> opcode.
29039     Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
29040                        DAG.getConstant(5, dl, MVT::i32));
29041     OutChains[2] =
29042         DAG.getStore(Root, dl, DAG.getConstant(JMP, dl, MVT::i8), Addr,
29043                      MachinePointerInfo(TrmpAddr, 5), Align(1));
29044 
29045     Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
29046                        DAG.getConstant(6, dl, MVT::i32));
29047     OutChains[3] = DAG.getStore(Root, dl, Disp, Addr,
29048                                 MachinePointerInfo(TrmpAddr, 6), Align(1));
29049 
29050     return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
29051   }
29052 }
29053 
LowerGET_ROUNDING(SDValue Op,SelectionDAG & DAG) const29054 SDValue X86TargetLowering::LowerGET_ROUNDING(SDValue Op,
29055                                              SelectionDAG &DAG) const {
29056   /*
29057    The rounding mode is in bits 11:10 of FPSR, and has the following
29058    settings:
29059      00 Round to nearest
29060      01 Round to -inf
29061      10 Round to +inf
29062      11 Round to 0
29063 
29064   GET_ROUNDING, on the other hand, expects the following:
29065     -1 Undefined
29066      0 Round to 0
29067      1 Round to nearest
29068      2 Round to +inf
29069      3 Round to -inf
29070 
29071   To perform the conversion, we use a packed lookup table of the four 2-bit
29072   values that we can index by FPSP[11:10]
29073     0x2d --> (0b00,10,11,01) --> (0,2,3,1) >> FPSR[11:10]
29074 
29075     (0x2d >> ((FPSR & 0xc00) >> 9)) & 3
29076   */
29077 
29078   MachineFunction &MF = DAG.getMachineFunction();
29079   MVT VT = Op.getSimpleValueType();
29080   SDLoc DL(Op);
29081 
29082   // Save FP Control Word to stack slot
29083   int SSFI = MF.getFrameInfo().CreateStackObject(2, Align(2), false);
29084   SDValue StackSlot =
29085       DAG.getFrameIndex(SSFI, getPointerTy(DAG.getDataLayout()));
29086 
29087   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, SSFI);
29088 
29089   SDValue Chain = Op.getOperand(0);
29090   SDValue Ops[] = {Chain, StackSlot};
29091   Chain = DAG.getMemIntrinsicNode(X86ISD::FNSTCW16m, DL,
29092                                   DAG.getVTList(MVT::Other), Ops, MVT::i16, MPI,
29093                                   Align(2), MachineMemOperand::MOStore);
29094 
29095   // Load FP Control Word from stack slot
29096   SDValue CWD = DAG.getLoad(MVT::i16, DL, Chain, StackSlot, MPI, Align(2));
29097   Chain = CWD.getValue(1);
29098 
29099   // Mask and turn the control bits into a shift for the lookup table.
29100   SDValue Shift =
29101     DAG.getNode(ISD::SRL, DL, MVT::i16,
29102                 DAG.getNode(ISD::AND, DL, MVT::i16,
29103                             CWD, DAG.getConstant(0xc00, DL, MVT::i16)),
29104                 DAG.getConstant(9, DL, MVT::i8));
29105   Shift = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, Shift);
29106 
29107   SDValue LUT = DAG.getConstant(0x2d, DL, MVT::i32);
29108   SDValue RetVal =
29109     DAG.getNode(ISD::AND, DL, MVT::i32,
29110                 DAG.getNode(ISD::SRL, DL, MVT::i32, LUT, Shift),
29111                 DAG.getConstant(3, DL, MVT::i32));
29112 
29113   RetVal = DAG.getZExtOrTrunc(RetVal, DL, VT);
29114 
29115   return DAG.getMergeValues({RetVal, Chain}, DL);
29116 }
29117 
LowerSET_ROUNDING(SDValue Op,SelectionDAG & DAG) const29118 SDValue X86TargetLowering::LowerSET_ROUNDING(SDValue Op,
29119                                              SelectionDAG &DAG) const {
29120   MachineFunction &MF = DAG.getMachineFunction();
29121   SDLoc DL(Op);
29122   SDValue Chain = Op.getNode()->getOperand(0);
29123 
29124   // FP control word may be set only from data in memory. So we need to allocate
29125   // stack space to save/load FP control word.
29126   int OldCWFrameIdx = MF.getFrameInfo().CreateStackObject(4, Align(4), false);
29127   SDValue StackSlot =
29128       DAG.getFrameIndex(OldCWFrameIdx, getPointerTy(DAG.getDataLayout()));
29129   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, OldCWFrameIdx);
29130   MachineMemOperand *MMO =
29131       MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 2, Align(2));
29132 
29133   // Store FP control word into memory.
29134   SDValue Ops[] = {Chain, StackSlot};
29135   Chain = DAG.getMemIntrinsicNode(
29136       X86ISD::FNSTCW16m, DL, DAG.getVTList(MVT::Other), Ops, MVT::i16, MMO);
29137 
29138   // Load FP Control Word from stack slot and clear RM field (bits 11:10).
29139   SDValue CWD = DAG.getLoad(MVT::i16, DL, Chain, StackSlot, MPI);
29140   Chain = CWD.getValue(1);
29141   CWD = DAG.getNode(ISD::AND, DL, MVT::i16, CWD.getValue(0),
29142                     DAG.getConstant(0xf3ff, DL, MVT::i16));
29143 
29144   // Calculate new rounding mode.
29145   SDValue NewRM = Op.getNode()->getOperand(1);
29146   SDValue RMBits;
29147   if (auto *CVal = dyn_cast<ConstantSDNode>(NewRM)) {
29148     uint64_t RM = CVal->getZExtValue();
29149     int FieldVal;
29150     switch (static_cast<RoundingMode>(RM)) {
29151     case RoundingMode::NearestTiesToEven: FieldVal = X86::rmToNearest; break;
29152     case RoundingMode::TowardNegative:    FieldVal = X86::rmDownward; break;
29153     case RoundingMode::TowardPositive:    FieldVal = X86::rmUpward; break;
29154     case RoundingMode::TowardZero:        FieldVal = X86::rmTowardZero; break;
29155     default:
29156       llvm_unreachable("rounding mode is not supported by X86 hardware");
29157     }
29158     RMBits = DAG.getConstant(FieldVal, DL, MVT::i16);
29159   } else {
29160     // Need to convert argument into bits of control word:
29161     //    0 Round to 0       -> 11
29162     //    1 Round to nearest -> 00
29163     //    2 Round to +inf    -> 10
29164     //    3 Round to -inf    -> 01
29165     // The 2-bit value needs then to be shifted so that it occupies bits 11:10.
29166     // To make the conversion, put all these values into a value 0xc9 and shift
29167     // it left depending on the rounding mode:
29168     //    (0xc9 << 4) & 0xc00 = X86::rmTowardZero
29169     //    (0xc9 << 6) & 0xc00 = X86::rmToNearest
29170     //    ...
29171     // (0xc9 << (2 * NewRM + 4)) & 0xc00
29172     SDValue ShiftValue =
29173         DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
29174                     DAG.getNode(ISD::ADD, DL, MVT::i32,
29175                                 DAG.getNode(ISD::SHL, DL, MVT::i32, NewRM,
29176                                             DAG.getConstant(1, DL, MVT::i8)),
29177                                 DAG.getConstant(4, DL, MVT::i32)));
29178     SDValue Shifted =
29179         DAG.getNode(ISD::SHL, DL, MVT::i16, DAG.getConstant(0xc9, DL, MVT::i16),
29180                     ShiftValue);
29181     RMBits = DAG.getNode(ISD::AND, DL, MVT::i16, Shifted,
29182                          DAG.getConstant(0xc00, DL, MVT::i16));
29183   }
29184 
29185   // Update rounding mode bits and store the new FP Control Word into stack.
29186   CWD = DAG.getNode(ISD::OR, DL, MVT::i16, CWD, RMBits);
29187   Chain = DAG.getStore(Chain, DL, CWD, StackSlot, MPI, Align(2));
29188 
29189   // Load FP control word from the slot.
29190   SDValue OpsLD[] = {Chain, StackSlot};
29191   MachineMemOperand *MMOL =
29192       MF.getMachineMemOperand(MPI, MachineMemOperand::MOLoad, 2, Align(2));
29193   Chain = DAG.getMemIntrinsicNode(
29194       X86ISD::FLDCW16m, DL, DAG.getVTList(MVT::Other), OpsLD, MVT::i16, MMOL);
29195 
29196   // If target supports SSE, set MXCSR as well. Rounding mode is encoded in the
29197   // same way but in bits 14:13.
29198   if (Subtarget.hasSSE1()) {
29199     // Store MXCSR into memory.
29200     Chain = DAG.getNode(
29201         ISD::INTRINSIC_VOID, DL, DAG.getVTList(MVT::Other), Chain,
29202         DAG.getTargetConstant(Intrinsic::x86_sse_stmxcsr, DL, MVT::i32),
29203         StackSlot);
29204 
29205     // Load MXCSR from stack slot and clear RM field (bits 14:13).
29206     SDValue CWD = DAG.getLoad(MVT::i32, DL, Chain, StackSlot, MPI);
29207     Chain = CWD.getValue(1);
29208     CWD = DAG.getNode(ISD::AND, DL, MVT::i32, CWD.getValue(0),
29209                       DAG.getConstant(0xffff9fff, DL, MVT::i32));
29210 
29211     // Shift X87 RM bits from 11:10 to 14:13.
29212     RMBits = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, RMBits);
29213     RMBits = DAG.getNode(ISD::SHL, DL, MVT::i32, RMBits,
29214                          DAG.getConstant(3, DL, MVT::i8));
29215 
29216     // Update rounding mode bits and store the new FP Control Word into stack.
29217     CWD = DAG.getNode(ISD::OR, DL, MVT::i32, CWD, RMBits);
29218     Chain = DAG.getStore(Chain, DL, CWD, StackSlot, MPI, Align(4));
29219 
29220     // Load MXCSR from the slot.
29221     Chain = DAG.getNode(
29222         ISD::INTRINSIC_VOID, DL, DAG.getVTList(MVT::Other), Chain,
29223         DAG.getTargetConstant(Intrinsic::x86_sse_ldmxcsr, DL, MVT::i32),
29224         StackSlot);
29225   }
29226 
29227   return Chain;
29228 }
29229 
29230 /// Lower a vector CTLZ using native supported vector CTLZ instruction.
29231 //
29232 // i8/i16 vector implemented using dword LZCNT vector instruction
29233 // ( sub(trunc(lzcnt(zext32(x)))) ). In case zext32(x) is illegal,
29234 // split the vector, perform operation on it's Lo a Hi part and
29235 // concatenate the results.
LowerVectorCTLZ_AVX512CDI(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)29236 static SDValue LowerVectorCTLZ_AVX512CDI(SDValue Op, SelectionDAG &DAG,
29237                                          const X86Subtarget &Subtarget) {
29238   assert(Op.getOpcode() == ISD::CTLZ);
29239   SDLoc dl(Op);
29240   MVT VT = Op.getSimpleValueType();
29241   MVT EltVT = VT.getVectorElementType();
29242   unsigned NumElems = VT.getVectorNumElements();
29243 
29244   assert((EltVT == MVT::i8 || EltVT == MVT::i16) &&
29245           "Unsupported element type");
29246 
29247   // Split vector, it's Lo and Hi parts will be handled in next iteration.
29248   if (NumElems > 16 ||
29249       (NumElems == 16 && !Subtarget.canExtendTo512DQ()))
29250     return splitVectorIntUnary(Op, DAG);
29251 
29252   MVT NewVT = MVT::getVectorVT(MVT::i32, NumElems);
29253   assert((NewVT.is256BitVector() || NewVT.is512BitVector()) &&
29254           "Unsupported value type for operation");
29255 
29256   // Use native supported vector instruction vplzcntd.
29257   Op = DAG.getNode(ISD::ZERO_EXTEND, dl, NewVT, Op.getOperand(0));
29258   SDValue CtlzNode = DAG.getNode(ISD::CTLZ, dl, NewVT, Op);
29259   SDValue TruncNode = DAG.getNode(ISD::TRUNCATE, dl, VT, CtlzNode);
29260   SDValue Delta = DAG.getConstant(32 - EltVT.getSizeInBits(), dl, VT);
29261 
29262   return DAG.getNode(ISD::SUB, dl, VT, TruncNode, Delta);
29263 }
29264 
29265 // Lower CTLZ using a PSHUFB lookup table implementation.
LowerVectorCTLZInRegLUT(SDValue Op,const SDLoc & DL,const X86Subtarget & Subtarget,SelectionDAG & DAG)29266 static SDValue LowerVectorCTLZInRegLUT(SDValue Op, const SDLoc &DL,
29267                                        const X86Subtarget &Subtarget,
29268                                        SelectionDAG &DAG) {
29269   MVT VT = Op.getSimpleValueType();
29270   int NumElts = VT.getVectorNumElements();
29271   int NumBytes = NumElts * (VT.getScalarSizeInBits() / 8);
29272   MVT CurrVT = MVT::getVectorVT(MVT::i8, NumBytes);
29273 
29274   // Per-nibble leading zero PSHUFB lookup table.
29275   const int LUT[16] = {/* 0 */ 4, /* 1 */ 3, /* 2 */ 2, /* 3 */ 2,
29276                        /* 4 */ 1, /* 5 */ 1, /* 6 */ 1, /* 7 */ 1,
29277                        /* 8 */ 0, /* 9 */ 0, /* a */ 0, /* b */ 0,
29278                        /* c */ 0, /* d */ 0, /* e */ 0, /* f */ 0};
29279 
29280   SmallVector<SDValue, 64> LUTVec;
29281   for (int i = 0; i < NumBytes; ++i)
29282     LUTVec.push_back(DAG.getConstant(LUT[i % 16], DL, MVT::i8));
29283   SDValue InRegLUT = DAG.getBuildVector(CurrVT, DL, LUTVec);
29284 
29285   // Begin by bitcasting the input to byte vector, then split those bytes
29286   // into lo/hi nibbles and use the PSHUFB LUT to perform CLTZ on each of them.
29287   // If the hi input nibble is zero then we add both results together, otherwise
29288   // we just take the hi result (by masking the lo result to zero before the
29289   // add).
29290   SDValue Op0 = DAG.getBitcast(CurrVT, Op.getOperand(0));
29291   SDValue Zero = DAG.getConstant(0, DL, CurrVT);
29292 
29293   SDValue NibbleShift = DAG.getConstant(0x4, DL, CurrVT);
29294   SDValue Lo = Op0;
29295   SDValue Hi = DAG.getNode(ISD::SRL, DL, CurrVT, Op0, NibbleShift);
29296   SDValue HiZ;
29297   if (CurrVT.is512BitVector()) {
29298     MVT MaskVT = MVT::getVectorVT(MVT::i1, CurrVT.getVectorNumElements());
29299     HiZ = DAG.getSetCC(DL, MaskVT, Hi, Zero, ISD::SETEQ);
29300     HiZ = DAG.getNode(ISD::SIGN_EXTEND, DL, CurrVT, HiZ);
29301   } else {
29302     HiZ = DAG.getSetCC(DL, CurrVT, Hi, Zero, ISD::SETEQ);
29303   }
29304 
29305   Lo = DAG.getNode(X86ISD::PSHUFB, DL, CurrVT, InRegLUT, Lo);
29306   Hi = DAG.getNode(X86ISD::PSHUFB, DL, CurrVT, InRegLUT, Hi);
29307   Lo = DAG.getNode(ISD::AND, DL, CurrVT, Lo, HiZ);
29308   SDValue Res = DAG.getNode(ISD::ADD, DL, CurrVT, Lo, Hi);
29309 
29310   // Merge result back from vXi8 back to VT, working on the lo/hi halves
29311   // of the current vector width in the same way we did for the nibbles.
29312   // If the upper half of the input element is zero then add the halves'
29313   // leading zero counts together, otherwise just use the upper half's.
29314   // Double the width of the result until we are at target width.
29315   while (CurrVT != VT) {
29316     int CurrScalarSizeInBits = CurrVT.getScalarSizeInBits();
29317     int CurrNumElts = CurrVT.getVectorNumElements();
29318     MVT NextSVT = MVT::getIntegerVT(CurrScalarSizeInBits * 2);
29319     MVT NextVT = MVT::getVectorVT(NextSVT, CurrNumElts / 2);
29320     SDValue Shift = DAG.getConstant(CurrScalarSizeInBits, DL, NextVT);
29321 
29322     // Check if the upper half of the input element is zero.
29323     if (CurrVT.is512BitVector()) {
29324       MVT MaskVT = MVT::getVectorVT(MVT::i1, CurrVT.getVectorNumElements());
29325       HiZ = DAG.getSetCC(DL, MaskVT, DAG.getBitcast(CurrVT, Op0),
29326                          DAG.getBitcast(CurrVT, Zero), ISD::SETEQ);
29327       HiZ = DAG.getNode(ISD::SIGN_EXTEND, DL, CurrVT, HiZ);
29328     } else {
29329       HiZ = DAG.getSetCC(DL, CurrVT, DAG.getBitcast(CurrVT, Op0),
29330                          DAG.getBitcast(CurrVT, Zero), ISD::SETEQ);
29331     }
29332     HiZ = DAG.getBitcast(NextVT, HiZ);
29333 
29334     // Move the upper/lower halves to the lower bits as we'll be extending to
29335     // NextVT. Mask the lower result to zero if HiZ is true and add the results
29336     // together.
29337     SDValue ResNext = Res = DAG.getBitcast(NextVT, Res);
29338     SDValue R0 = DAG.getNode(ISD::SRL, DL, NextVT, ResNext, Shift);
29339     SDValue R1 = DAG.getNode(ISD::SRL, DL, NextVT, HiZ, Shift);
29340     R1 = DAG.getNode(ISD::AND, DL, NextVT, ResNext, R1);
29341     Res = DAG.getNode(ISD::ADD, DL, NextVT, R0, R1);
29342     CurrVT = NextVT;
29343   }
29344 
29345   return Res;
29346 }
29347 
LowerVectorCTLZ(SDValue Op,const SDLoc & DL,const X86Subtarget & Subtarget,SelectionDAG & DAG)29348 static SDValue LowerVectorCTLZ(SDValue Op, const SDLoc &DL,
29349                                const X86Subtarget &Subtarget,
29350                                SelectionDAG &DAG) {
29351   MVT VT = Op.getSimpleValueType();
29352 
29353   if (Subtarget.hasCDI() &&
29354       // vXi8 vectors need to be promoted to 512-bits for vXi32.
29355       (Subtarget.canExtendTo512DQ() || VT.getVectorElementType() != MVT::i8))
29356     return LowerVectorCTLZ_AVX512CDI(Op, DAG, Subtarget);
29357 
29358   // Decompose 256-bit ops into smaller 128-bit ops.
29359   if (VT.is256BitVector() && !Subtarget.hasInt256())
29360     return splitVectorIntUnary(Op, DAG);
29361 
29362   // Decompose 512-bit ops into smaller 256-bit ops.
29363   if (VT.is512BitVector() && !Subtarget.hasBWI())
29364     return splitVectorIntUnary(Op, DAG);
29365 
29366   assert(Subtarget.hasSSSE3() && "Expected SSSE3 support for PSHUFB");
29367   return LowerVectorCTLZInRegLUT(Op, DL, Subtarget, DAG);
29368 }
29369 
LowerCTLZ(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)29370 static SDValue LowerCTLZ(SDValue Op, const X86Subtarget &Subtarget,
29371                          SelectionDAG &DAG) {
29372   MVT VT = Op.getSimpleValueType();
29373   MVT OpVT = VT;
29374   unsigned NumBits = VT.getSizeInBits();
29375   SDLoc dl(Op);
29376   unsigned Opc = Op.getOpcode();
29377 
29378   if (VT.isVector())
29379     return LowerVectorCTLZ(Op, dl, Subtarget, DAG);
29380 
29381   Op = Op.getOperand(0);
29382   if (VT == MVT::i8) {
29383     // Zero extend to i32 since there is not an i8 bsr.
29384     OpVT = MVT::i32;
29385     Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
29386   }
29387 
29388   // Issue a bsr (scan bits in reverse) which also sets EFLAGS.
29389   SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
29390   Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
29391 
29392   if (Opc == ISD::CTLZ) {
29393     // If src is zero (i.e. bsr sets ZF), returns NumBits.
29394     SDValue Ops[] = {Op, DAG.getConstant(NumBits + NumBits - 1, dl, OpVT),
29395                      DAG.getTargetConstant(X86::COND_E, dl, MVT::i8),
29396                      Op.getValue(1)};
29397     Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops);
29398   }
29399 
29400   // Finally xor with NumBits-1.
29401   Op = DAG.getNode(ISD::XOR, dl, OpVT, Op,
29402                    DAG.getConstant(NumBits - 1, dl, OpVT));
29403 
29404   if (VT == MVT::i8)
29405     Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
29406   return Op;
29407 }
29408 
LowerCTTZ(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)29409 static SDValue LowerCTTZ(SDValue Op, const X86Subtarget &Subtarget,
29410                          SelectionDAG &DAG) {
29411   MVT VT = Op.getSimpleValueType();
29412   unsigned NumBits = VT.getScalarSizeInBits();
29413   SDValue N0 = Op.getOperand(0);
29414   SDLoc dl(Op);
29415 
29416   assert(!VT.isVector() && Op.getOpcode() == ISD::CTTZ &&
29417          "Only scalar CTTZ requires custom lowering");
29418 
29419   // Issue a bsf (scan bits forward) which also sets EFLAGS.
29420   SDVTList VTs = DAG.getVTList(VT, MVT::i32);
29421   Op = DAG.getNode(X86ISD::BSF, dl, VTs, N0);
29422 
29423   // If src is known never zero we can skip the CMOV.
29424   if (DAG.isKnownNeverZero(N0))
29425     return Op;
29426 
29427   // If src is zero (i.e. bsf sets ZF), returns NumBits.
29428   SDValue Ops[] = {Op, DAG.getConstant(NumBits, dl, VT),
29429                    DAG.getTargetConstant(X86::COND_E, dl, MVT::i8),
29430                    Op.getValue(1)};
29431   return DAG.getNode(X86ISD::CMOV, dl, VT, Ops);
29432 }
29433 
lowerAddSub(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)29434 static SDValue lowerAddSub(SDValue Op, SelectionDAG &DAG,
29435                            const X86Subtarget &Subtarget) {
29436   MVT VT = Op.getSimpleValueType();
29437   if (VT == MVT::i16 || VT == MVT::i32)
29438     return lowerAddSubToHorizontalOp(Op, DAG, Subtarget);
29439 
29440   if (VT == MVT::v32i16 || VT == MVT::v64i8)
29441     return splitVectorIntBinary(Op, DAG);
29442 
29443   assert(Op.getSimpleValueType().is256BitVector() &&
29444          Op.getSimpleValueType().isInteger() &&
29445          "Only handle AVX 256-bit vector integer operation");
29446   return splitVectorIntBinary(Op, DAG);
29447 }
29448 
LowerADDSAT_SUBSAT(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)29449 static SDValue LowerADDSAT_SUBSAT(SDValue Op, SelectionDAG &DAG,
29450                                   const X86Subtarget &Subtarget) {
29451   MVT VT = Op.getSimpleValueType();
29452   SDValue X = Op.getOperand(0), Y = Op.getOperand(1);
29453   unsigned Opcode = Op.getOpcode();
29454   SDLoc DL(Op);
29455 
29456   if (VT == MVT::v32i16 || VT == MVT::v64i8 ||
29457       (VT.is256BitVector() && !Subtarget.hasInt256())) {
29458     assert(Op.getSimpleValueType().isInteger() &&
29459            "Only handle AVX vector integer operation");
29460     return splitVectorIntBinary(Op, DAG);
29461   }
29462 
29463   // Avoid the generic expansion with min/max if we don't have pminu*/pmaxu*.
29464   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
29465   EVT SetCCResultType =
29466       TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
29467 
29468   unsigned BitWidth = VT.getScalarSizeInBits();
29469   if (Opcode == ISD::USUBSAT) {
29470     if (!TLI.isOperationLegal(ISD::UMAX, VT) || useVPTERNLOG(Subtarget, VT)) {
29471       // Handle a special-case with a bit-hack instead of cmp+select:
29472       // usubsat X, SMIN --> (X ^ SMIN) & (X s>> BW-1)
29473       // If the target can use VPTERNLOG, DAGToDAG will match this as
29474       // "vpsra + vpternlog" which is better than "vpmax + vpsub" with a
29475       // "broadcast" constant load.
29476       ConstantSDNode *C = isConstOrConstSplat(Y, true);
29477       if (C && C->getAPIntValue().isSignMask()) {
29478         SDValue SignMask = DAG.getConstant(C->getAPIntValue(), DL, VT);
29479         SDValue ShiftAmt = DAG.getConstant(BitWidth - 1, DL, VT);
29480         SDValue Xor = DAG.getNode(ISD::XOR, DL, VT, X, SignMask);
29481         SDValue Sra = DAG.getNode(ISD::SRA, DL, VT, X, ShiftAmt);
29482         return DAG.getNode(ISD::AND, DL, VT, Xor, Sra);
29483       }
29484     }
29485     if (!TLI.isOperationLegal(ISD::UMAX, VT)) {
29486       // usubsat X, Y --> (X >u Y) ? X - Y : 0
29487       SDValue Sub = DAG.getNode(ISD::SUB, DL, VT, X, Y);
29488       SDValue Cmp = DAG.getSetCC(DL, SetCCResultType, X, Y, ISD::SETUGT);
29489       // TODO: Move this to DAGCombiner?
29490       if (SetCCResultType == VT &&
29491           DAG.ComputeNumSignBits(Cmp) == VT.getScalarSizeInBits())
29492         return DAG.getNode(ISD::AND, DL, VT, Cmp, Sub);
29493       return DAG.getSelect(DL, VT, Cmp, Sub, DAG.getConstant(0, DL, VT));
29494     }
29495   }
29496 
29497   if ((Opcode == ISD::SADDSAT || Opcode == ISD::SSUBSAT) &&
29498       (!VT.isVector() || VT == MVT::v2i64)) {
29499     APInt MinVal = APInt::getSignedMinValue(BitWidth);
29500     APInt MaxVal = APInt::getSignedMaxValue(BitWidth);
29501     SDValue Zero = DAG.getConstant(0, DL, VT);
29502     SDValue Result =
29503         DAG.getNode(Opcode == ISD::SADDSAT ? ISD::SADDO : ISD::SSUBO, DL,
29504                     DAG.getVTList(VT, SetCCResultType), X, Y);
29505     SDValue SumDiff = Result.getValue(0);
29506     SDValue Overflow = Result.getValue(1);
29507     SDValue SatMin = DAG.getConstant(MinVal, DL, VT);
29508     SDValue SatMax = DAG.getConstant(MaxVal, DL, VT);
29509     SDValue SumNeg =
29510         DAG.getSetCC(DL, SetCCResultType, SumDiff, Zero, ISD::SETLT);
29511     Result = DAG.getSelect(DL, VT, SumNeg, SatMax, SatMin);
29512     return DAG.getSelect(DL, VT, Overflow, Result, SumDiff);
29513   }
29514 
29515   // Use default expansion.
29516   return SDValue();
29517 }
29518 
LowerABS(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)29519 static SDValue LowerABS(SDValue Op, const X86Subtarget &Subtarget,
29520                         SelectionDAG &DAG) {
29521   MVT VT = Op.getSimpleValueType();
29522   if (VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64) {
29523     // Since X86 does not have CMOV for 8-bit integer, we don't convert
29524     // 8-bit integer abs to NEG and CMOV.
29525     SDLoc DL(Op);
29526     SDValue N0 = Op.getOperand(0);
29527     SDValue Neg = DAG.getNode(X86ISD::SUB, DL, DAG.getVTList(VT, MVT::i32),
29528                               DAG.getConstant(0, DL, VT), N0);
29529     SDValue Ops[] = {N0, Neg, DAG.getTargetConstant(X86::COND_NS, DL, MVT::i8),
29530                      SDValue(Neg.getNode(), 1)};
29531     return DAG.getNode(X86ISD::CMOV, DL, VT, Ops);
29532   }
29533 
29534   // ABS(vXi64 X) --> VPBLENDVPD(X, 0-X, X).
29535   if ((VT == MVT::v2i64 || VT == MVT::v4i64) && Subtarget.hasSSE41()) {
29536     SDLoc DL(Op);
29537     SDValue Src = Op.getOperand(0);
29538     SDValue Sub =
29539         DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Src);
29540     return DAG.getNode(X86ISD::BLENDV, DL, VT, Src, Sub, Src);
29541   }
29542 
29543   if (VT.is256BitVector() && !Subtarget.hasInt256()) {
29544     assert(VT.isInteger() &&
29545            "Only handle AVX 256-bit vector integer operation");
29546     return splitVectorIntUnary(Op, DAG);
29547   }
29548 
29549   if ((VT == MVT::v32i16 || VT == MVT::v64i8) && !Subtarget.hasBWI())
29550     return splitVectorIntUnary(Op, DAG);
29551 
29552   // Default to expand.
29553   return SDValue();
29554 }
29555 
LowerAVG(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)29556 static SDValue LowerAVG(SDValue Op, const X86Subtarget &Subtarget,
29557                         SelectionDAG &DAG) {
29558   MVT VT = Op.getSimpleValueType();
29559 
29560   // For AVX1 cases, split to use legal ops.
29561   if (VT.is256BitVector() && !Subtarget.hasInt256())
29562     return splitVectorIntBinary(Op, DAG);
29563 
29564   if (VT == MVT::v32i16 || VT == MVT::v64i8)
29565     return splitVectorIntBinary(Op, DAG);
29566 
29567   // Default to expand.
29568   return SDValue();
29569 }
29570 
LowerMINMAX(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)29571 static SDValue LowerMINMAX(SDValue Op, const X86Subtarget &Subtarget,
29572                            SelectionDAG &DAG) {
29573   MVT VT = Op.getSimpleValueType();
29574 
29575   // For AVX1 cases, split to use legal ops.
29576   if (VT.is256BitVector() && !Subtarget.hasInt256())
29577     return splitVectorIntBinary(Op, DAG);
29578 
29579   if (VT == MVT::v32i16 || VT == MVT::v64i8)
29580     return splitVectorIntBinary(Op, DAG);
29581 
29582   // Default to expand.
29583   return SDValue();
29584 }
29585 
LowerMUL(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)29586 static SDValue LowerMUL(SDValue Op, const X86Subtarget &Subtarget,
29587                         SelectionDAG &DAG) {
29588   SDLoc dl(Op);
29589   MVT VT = Op.getSimpleValueType();
29590 
29591   // Decompose 256-bit ops into 128-bit ops.
29592   if (VT.is256BitVector() && !Subtarget.hasInt256())
29593     return splitVectorIntBinary(Op, DAG);
29594 
29595   if ((VT == MVT::v32i16 || VT == MVT::v64i8) && !Subtarget.hasBWI())
29596     return splitVectorIntBinary(Op, DAG);
29597 
29598   SDValue A = Op.getOperand(0);
29599   SDValue B = Op.getOperand(1);
29600 
29601   // Lower v16i8/v32i8/v64i8 mul as sign-extension to v8i16/v16i16/v32i16
29602   // vector pairs, multiply and truncate.
29603   if (VT == MVT::v16i8 || VT == MVT::v32i8 || VT == MVT::v64i8) {
29604     unsigned NumElts = VT.getVectorNumElements();
29605 
29606     if ((VT == MVT::v16i8 && Subtarget.hasInt256()) ||
29607         (VT == MVT::v32i8 && Subtarget.canExtendTo512BW())) {
29608       MVT ExVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements());
29609       return DAG.getNode(
29610           ISD::TRUNCATE, dl, VT,
29611           DAG.getNode(ISD::MUL, dl, ExVT,
29612                       DAG.getNode(ISD::ANY_EXTEND, dl, ExVT, A),
29613                       DAG.getNode(ISD::ANY_EXTEND, dl, ExVT, B)));
29614     }
29615 
29616     MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
29617 
29618     // Extract the lo/hi parts to any extend to i16.
29619     // We're going to mask off the low byte of each result element of the
29620     // pmullw, so it doesn't matter what's in the high byte of each 16-bit
29621     // element.
29622     SDValue Undef = DAG.getUNDEF(VT);
29623     SDValue ALo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, A, Undef));
29624     SDValue AHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, A, Undef));
29625 
29626     SDValue BLo, BHi;
29627     if (ISD::isBuildVectorOfConstantSDNodes(B.getNode())) {
29628       // If the RHS is a constant, manually unpackl/unpackh.
29629       SmallVector<SDValue, 16> LoOps, HiOps;
29630       for (unsigned i = 0; i != NumElts; i += 16) {
29631         for (unsigned j = 0; j != 8; ++j) {
29632           LoOps.push_back(DAG.getAnyExtOrTrunc(B.getOperand(i + j), dl,
29633                                                MVT::i16));
29634           HiOps.push_back(DAG.getAnyExtOrTrunc(B.getOperand(i + j + 8), dl,
29635                                                MVT::i16));
29636         }
29637       }
29638 
29639       BLo = DAG.getBuildVector(ExVT, dl, LoOps);
29640       BHi = DAG.getBuildVector(ExVT, dl, HiOps);
29641     } else {
29642       BLo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, B, Undef));
29643       BHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, B, Undef));
29644     }
29645 
29646     // Multiply, mask the lower 8bits of the lo/hi results and pack.
29647     SDValue RLo = DAG.getNode(ISD::MUL, dl, ExVT, ALo, BLo);
29648     SDValue RHi = DAG.getNode(ISD::MUL, dl, ExVT, AHi, BHi);
29649     return getPack(DAG, Subtarget, dl, VT, RLo, RHi);
29650   }
29651 
29652   // Lower v4i32 mul as 2x shuffle, 2x pmuludq, 2x shuffle.
29653   if (VT == MVT::v4i32) {
29654     assert(Subtarget.hasSSE2() && !Subtarget.hasSSE41() &&
29655            "Should not custom lower when pmulld is available!");
29656 
29657     // Extract the odd parts.
29658     static const int UnpackMask[] = { 1, -1, 3, -1 };
29659     SDValue Aodds = DAG.getVectorShuffle(VT, dl, A, A, UnpackMask);
29660     SDValue Bodds = DAG.getVectorShuffle(VT, dl, B, B, UnpackMask);
29661 
29662     // Multiply the even parts.
29663     SDValue Evens = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64,
29664                                 DAG.getBitcast(MVT::v2i64, A),
29665                                 DAG.getBitcast(MVT::v2i64, B));
29666     // Now multiply odd parts.
29667     SDValue Odds = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64,
29668                                DAG.getBitcast(MVT::v2i64, Aodds),
29669                                DAG.getBitcast(MVT::v2i64, Bodds));
29670 
29671     Evens = DAG.getBitcast(VT, Evens);
29672     Odds = DAG.getBitcast(VT, Odds);
29673 
29674     // Merge the two vectors back together with a shuffle. This expands into 2
29675     // shuffles.
29676     static const int ShufMask[] = { 0, 4, 2, 6 };
29677     return DAG.getVectorShuffle(VT, dl, Evens, Odds, ShufMask);
29678   }
29679 
29680   assert((VT == MVT::v2i64 || VT == MVT::v4i64 || VT == MVT::v8i64) &&
29681          "Only know how to lower V2I64/V4I64/V8I64 multiply");
29682   assert(!Subtarget.hasDQI() && "DQI should use MULLQ");
29683 
29684   //  Ahi = psrlqi(a, 32);
29685   //  Bhi = psrlqi(b, 32);
29686   //
29687   //  AloBlo = pmuludq(a, b);
29688   //  AloBhi = pmuludq(a, Bhi);
29689   //  AhiBlo = pmuludq(Ahi, b);
29690   //
29691   //  Hi = psllqi(AloBhi + AhiBlo, 32);
29692   //  return AloBlo + Hi;
29693   KnownBits AKnown = DAG.computeKnownBits(A);
29694   KnownBits BKnown = DAG.computeKnownBits(B);
29695 
29696   APInt LowerBitsMask = APInt::getLowBitsSet(64, 32);
29697   bool ALoIsZero = LowerBitsMask.isSubsetOf(AKnown.Zero);
29698   bool BLoIsZero = LowerBitsMask.isSubsetOf(BKnown.Zero);
29699 
29700   APInt UpperBitsMask = APInt::getHighBitsSet(64, 32);
29701   bool AHiIsZero = UpperBitsMask.isSubsetOf(AKnown.Zero);
29702   bool BHiIsZero = UpperBitsMask.isSubsetOf(BKnown.Zero);
29703 
29704   SDValue Zero = DAG.getConstant(0, dl, VT);
29705 
29706   // Only multiply lo/hi halves that aren't known to be zero.
29707   SDValue AloBlo = Zero;
29708   if (!ALoIsZero && !BLoIsZero)
29709     AloBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, B);
29710 
29711   SDValue AloBhi = Zero;
29712   if (!ALoIsZero && !BHiIsZero) {
29713     SDValue Bhi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, B, 32, DAG);
29714     AloBhi = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, Bhi);
29715   }
29716 
29717   SDValue AhiBlo = Zero;
29718   if (!AHiIsZero && !BLoIsZero) {
29719     SDValue Ahi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, A, 32, DAG);
29720     AhiBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, Ahi, B);
29721   }
29722 
29723   SDValue Hi = DAG.getNode(ISD::ADD, dl, VT, AloBhi, AhiBlo);
29724   Hi = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Hi, 32, DAG);
29725 
29726   return DAG.getNode(ISD::ADD, dl, VT, AloBlo, Hi);
29727 }
29728 
LowervXi8MulWithUNPCK(SDValue A,SDValue B,const SDLoc & dl,MVT VT,bool IsSigned,const X86Subtarget & Subtarget,SelectionDAG & DAG,SDValue * Low=nullptr)29729 static SDValue LowervXi8MulWithUNPCK(SDValue A, SDValue B, const SDLoc &dl,
29730                                      MVT VT, bool IsSigned,
29731                                      const X86Subtarget &Subtarget,
29732                                      SelectionDAG &DAG,
29733                                      SDValue *Low = nullptr) {
29734   unsigned NumElts = VT.getVectorNumElements();
29735 
29736   // For vXi8 we will unpack the low and high half of each 128 bit lane to widen
29737   // to a vXi16 type. Do the multiplies, shift the results and pack the half
29738   // lane results back together.
29739 
29740   // We'll take different approaches for signed and unsigned.
29741   // For unsigned we'll use punpcklbw/punpckhbw to put zero extend the bytes
29742   // and use pmullw to calculate the full 16-bit product.
29743   // For signed we'll use punpcklbw/punpckbw to extend the bytes to words and
29744   // shift them left into the upper byte of each word. This allows us to use
29745   // pmulhw to calculate the full 16-bit product. This trick means we don't
29746   // need to sign extend the bytes to use pmullw.
29747 
29748   MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
29749   SDValue Zero = DAG.getConstant(0, dl, VT);
29750 
29751   SDValue ALo, AHi;
29752   if (IsSigned) {
29753     ALo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, Zero, A));
29754     AHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, Zero, A));
29755   } else {
29756     ALo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, A, Zero));
29757     AHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, A, Zero));
29758   }
29759 
29760   SDValue BLo, BHi;
29761   if (ISD::isBuildVectorOfConstantSDNodes(B.getNode())) {
29762     // If the RHS is a constant, manually unpackl/unpackh and extend.
29763     SmallVector<SDValue, 16> LoOps, HiOps;
29764     for (unsigned i = 0; i != NumElts; i += 16) {
29765       for (unsigned j = 0; j != 8; ++j) {
29766         SDValue LoOp = B.getOperand(i + j);
29767         SDValue HiOp = B.getOperand(i + j + 8);
29768 
29769         if (IsSigned) {
29770           LoOp = DAG.getAnyExtOrTrunc(LoOp, dl, MVT::i16);
29771           HiOp = DAG.getAnyExtOrTrunc(HiOp, dl, MVT::i16);
29772           LoOp = DAG.getNode(ISD::SHL, dl, MVT::i16, LoOp,
29773                              DAG.getConstant(8, dl, MVT::i16));
29774           HiOp = DAG.getNode(ISD::SHL, dl, MVT::i16, HiOp,
29775                              DAG.getConstant(8, dl, MVT::i16));
29776         } else {
29777           LoOp = DAG.getZExtOrTrunc(LoOp, dl, MVT::i16);
29778           HiOp = DAG.getZExtOrTrunc(HiOp, dl, MVT::i16);
29779         }
29780 
29781         LoOps.push_back(LoOp);
29782         HiOps.push_back(HiOp);
29783       }
29784     }
29785 
29786     BLo = DAG.getBuildVector(ExVT, dl, LoOps);
29787     BHi = DAG.getBuildVector(ExVT, dl, HiOps);
29788   } else if (IsSigned) {
29789     BLo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, Zero, B));
29790     BHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, Zero, B));
29791   } else {
29792     BLo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, B, Zero));
29793     BHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, B, Zero));
29794   }
29795 
29796   // Multiply, lshr the upper 8bits to the lower 8bits of the lo/hi results and
29797   // pack back to vXi8.
29798   unsigned MulOpc = IsSigned ? ISD::MULHS : ISD::MUL;
29799   SDValue RLo = DAG.getNode(MulOpc, dl, ExVT, ALo, BLo);
29800   SDValue RHi = DAG.getNode(MulOpc, dl, ExVT, AHi, BHi);
29801 
29802   if (Low)
29803     *Low = getPack(DAG, Subtarget, dl, VT, RLo, RHi);
29804 
29805   return getPack(DAG, Subtarget, dl, VT, RLo, RHi, /*PackHiHalf*/ true);
29806 }
29807 
LowerMULH(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)29808 static SDValue LowerMULH(SDValue Op, const X86Subtarget &Subtarget,
29809                          SelectionDAG &DAG) {
29810   SDLoc dl(Op);
29811   MVT VT = Op.getSimpleValueType();
29812   bool IsSigned = Op->getOpcode() == ISD::MULHS;
29813   unsigned NumElts = VT.getVectorNumElements();
29814   SDValue A = Op.getOperand(0);
29815   SDValue B = Op.getOperand(1);
29816 
29817   // Decompose 256-bit ops into 128-bit ops.
29818   if (VT.is256BitVector() && !Subtarget.hasInt256())
29819     return splitVectorIntBinary(Op, DAG);
29820 
29821   if ((VT == MVT::v32i16 || VT == MVT::v64i8) && !Subtarget.hasBWI())
29822     return splitVectorIntBinary(Op, DAG);
29823 
29824   if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32) {
29825     assert((VT == MVT::v4i32 && Subtarget.hasSSE2()) ||
29826            (VT == MVT::v8i32 && Subtarget.hasInt256()) ||
29827            (VT == MVT::v16i32 && Subtarget.hasAVX512()));
29828 
29829     // PMULxD operations multiply each even value (starting at 0) of LHS with
29830     // the related value of RHS and produce a widen result.
29831     // E.g., PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
29832     // => <2 x i64> <ae|cg>
29833     //
29834     // In other word, to have all the results, we need to perform two PMULxD:
29835     // 1. one with the even values.
29836     // 2. one with the odd values.
29837     // To achieve #2, with need to place the odd values at an even position.
29838     //
29839     // Place the odd value at an even position (basically, shift all values 1
29840     // step to the left):
29841     const int Mask[] = {1, -1,  3, -1,  5, -1,  7, -1,
29842                         9, -1, 11, -1, 13, -1, 15, -1};
29843     // <a|b|c|d> => <b|undef|d|undef>
29844     SDValue Odd0 =
29845         DAG.getVectorShuffle(VT, dl, A, A, ArrayRef(&Mask[0], NumElts));
29846     // <e|f|g|h> => <f|undef|h|undef>
29847     SDValue Odd1 =
29848         DAG.getVectorShuffle(VT, dl, B, B, ArrayRef(&Mask[0], NumElts));
29849 
29850     // Emit two multiplies, one for the lower 2 ints and one for the higher 2
29851     // ints.
29852     MVT MulVT = MVT::getVectorVT(MVT::i64, NumElts / 2);
29853     unsigned Opcode =
29854         (IsSigned && Subtarget.hasSSE41()) ? X86ISD::PMULDQ : X86ISD::PMULUDQ;
29855     // PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
29856     // => <2 x i64> <ae|cg>
29857     SDValue Mul1 = DAG.getBitcast(VT, DAG.getNode(Opcode, dl, MulVT,
29858                                                   DAG.getBitcast(MulVT, A),
29859                                                   DAG.getBitcast(MulVT, B)));
29860     // PMULUDQ <4 x i32> <b|undef|d|undef>, <4 x i32> <f|undef|h|undef>
29861     // => <2 x i64> <bf|dh>
29862     SDValue Mul2 = DAG.getBitcast(VT, DAG.getNode(Opcode, dl, MulVT,
29863                                                   DAG.getBitcast(MulVT, Odd0),
29864                                                   DAG.getBitcast(MulVT, Odd1)));
29865 
29866     // Shuffle it back into the right order.
29867     SmallVector<int, 16> ShufMask(NumElts);
29868     for (int i = 0; i != (int)NumElts; ++i)
29869       ShufMask[i] = (i / 2) * 2 + ((i % 2) * NumElts) + 1;
29870 
29871     SDValue Res = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, ShufMask);
29872 
29873     // If we have a signed multiply but no PMULDQ fix up the result of an
29874     // unsigned multiply.
29875     if (IsSigned && !Subtarget.hasSSE41()) {
29876       SDValue Zero = DAG.getConstant(0, dl, VT);
29877       SDValue T1 = DAG.getNode(ISD::AND, dl, VT,
29878                                DAG.getSetCC(dl, VT, Zero, A, ISD::SETGT), B);
29879       SDValue T2 = DAG.getNode(ISD::AND, dl, VT,
29880                                DAG.getSetCC(dl, VT, Zero, B, ISD::SETGT), A);
29881 
29882       SDValue Fixup = DAG.getNode(ISD::ADD, dl, VT, T1, T2);
29883       Res = DAG.getNode(ISD::SUB, dl, VT, Res, Fixup);
29884     }
29885 
29886     return Res;
29887   }
29888 
29889   // Only i8 vectors should need custom lowering after this.
29890   assert((VT == MVT::v16i8 || (VT == MVT::v32i8 && Subtarget.hasInt256()) ||
29891          (VT == MVT::v64i8 && Subtarget.hasBWI())) &&
29892          "Unsupported vector type");
29893 
29894   // Lower v16i8/v32i8 as extension to v8i16/v16i16 vector pairs, multiply,
29895   // logical shift down the upper half and pack back to i8.
29896 
29897   // With SSE41 we can use sign/zero extend, but for pre-SSE41 we unpack
29898   // and then ashr/lshr the upper bits down to the lower bits before multiply.
29899 
29900   if ((VT == MVT::v16i8 && Subtarget.hasInt256()) ||
29901       (VT == MVT::v32i8 && Subtarget.canExtendTo512BW())) {
29902     MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts);
29903     unsigned ExAVX = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
29904     SDValue ExA = DAG.getNode(ExAVX, dl, ExVT, A);
29905     SDValue ExB = DAG.getNode(ExAVX, dl, ExVT, B);
29906     SDValue Mul = DAG.getNode(ISD::MUL, dl, ExVT, ExA, ExB);
29907     Mul = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, Mul, 8, DAG);
29908     return DAG.getNode(ISD::TRUNCATE, dl, VT, Mul);
29909   }
29910 
29911   return LowervXi8MulWithUNPCK(A, B, dl, VT, IsSigned, Subtarget, DAG);
29912 }
29913 
29914 // Custom lowering for SMULO/UMULO.
LowerMULO(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)29915 static SDValue LowerMULO(SDValue Op, const X86Subtarget &Subtarget,
29916                          SelectionDAG &DAG) {
29917   MVT VT = Op.getSimpleValueType();
29918 
29919   // Scalars defer to LowerXALUO.
29920   if (!VT.isVector())
29921     return LowerXALUO(Op, DAG);
29922 
29923   SDLoc dl(Op);
29924   bool IsSigned = Op->getOpcode() == ISD::SMULO;
29925   SDValue A = Op.getOperand(0);
29926   SDValue B = Op.getOperand(1);
29927   EVT OvfVT = Op->getValueType(1);
29928 
29929   if ((VT == MVT::v32i8 && !Subtarget.hasInt256()) ||
29930       (VT == MVT::v64i8 && !Subtarget.hasBWI())) {
29931     // Extract the LHS Lo/Hi vectors
29932     SDValue LHSLo, LHSHi;
29933     std::tie(LHSLo, LHSHi) = splitVector(A, DAG, dl);
29934 
29935     // Extract the RHS Lo/Hi vectors
29936     SDValue RHSLo, RHSHi;
29937     std::tie(RHSLo, RHSHi) = splitVector(B, DAG, dl);
29938 
29939     EVT LoOvfVT, HiOvfVT;
29940     std::tie(LoOvfVT, HiOvfVT) = DAG.GetSplitDestVTs(OvfVT);
29941     SDVTList LoVTs = DAG.getVTList(LHSLo.getValueType(), LoOvfVT);
29942     SDVTList HiVTs = DAG.getVTList(LHSHi.getValueType(), HiOvfVT);
29943 
29944     // Issue the split operations.
29945     SDValue Lo = DAG.getNode(Op.getOpcode(), dl, LoVTs, LHSLo, RHSLo);
29946     SDValue Hi = DAG.getNode(Op.getOpcode(), dl, HiVTs, LHSHi, RHSHi);
29947 
29948     // Join the separate data results and the overflow results.
29949     SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
29950     SDValue Ovf = DAG.getNode(ISD::CONCAT_VECTORS, dl, OvfVT, Lo.getValue(1),
29951                               Hi.getValue(1));
29952 
29953     return DAG.getMergeValues({Res, Ovf}, dl);
29954   }
29955 
29956   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
29957   EVT SetccVT =
29958       TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
29959 
29960   if ((VT == MVT::v16i8 && Subtarget.hasInt256()) ||
29961       (VT == MVT::v32i8 && Subtarget.canExtendTo512BW())) {
29962     unsigned NumElts = VT.getVectorNumElements();
29963     MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts);
29964     unsigned ExAVX = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
29965     SDValue ExA = DAG.getNode(ExAVX, dl, ExVT, A);
29966     SDValue ExB = DAG.getNode(ExAVX, dl, ExVT, B);
29967     SDValue Mul = DAG.getNode(ISD::MUL, dl, ExVT, ExA, ExB);
29968 
29969     SDValue Low = DAG.getNode(ISD::TRUNCATE, dl, VT, Mul);
29970 
29971     SDValue Ovf;
29972     if (IsSigned) {
29973       SDValue High, LowSign;
29974       if (OvfVT.getVectorElementType() == MVT::i1 &&
29975           (Subtarget.hasBWI() || Subtarget.canExtendTo512DQ())) {
29976         // Rather the truncating try to do the compare on vXi16 or vXi32.
29977         // Shift the high down filling with sign bits.
29978         High = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Mul, 8, DAG);
29979         // Fill all 16 bits with the sign bit from the low.
29980         LowSign =
29981             getTargetVShiftByConstNode(X86ISD::VSHLI, dl, ExVT, Mul, 8, DAG);
29982         LowSign = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, LowSign,
29983                                              15, DAG);
29984         SetccVT = OvfVT;
29985         if (!Subtarget.hasBWI()) {
29986           // We can't do a vXi16 compare so sign extend to v16i32.
29987           High = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v16i32, High);
29988           LowSign = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v16i32, LowSign);
29989         }
29990       } else {
29991         // Otherwise do the compare at vXi8.
29992         High = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, Mul, 8, DAG);
29993         High = DAG.getNode(ISD::TRUNCATE, dl, VT, High);
29994         LowSign =
29995             DAG.getNode(ISD::SRA, dl, VT, Low, DAG.getConstant(7, dl, VT));
29996       }
29997 
29998       Ovf = DAG.getSetCC(dl, SetccVT, LowSign, High, ISD::SETNE);
29999     } else {
30000       SDValue High =
30001           getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, Mul, 8, DAG);
30002       if (OvfVT.getVectorElementType() == MVT::i1 &&
30003           (Subtarget.hasBWI() || Subtarget.canExtendTo512DQ())) {
30004         // Rather the truncating try to do the compare on vXi16 or vXi32.
30005         SetccVT = OvfVT;
30006         if (!Subtarget.hasBWI()) {
30007           // We can't do a vXi16 compare so sign extend to v16i32.
30008           High = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v16i32, High);
30009         }
30010       } else {
30011         // Otherwise do the compare at vXi8.
30012         High = DAG.getNode(ISD::TRUNCATE, dl, VT, High);
30013       }
30014 
30015       Ovf =
30016           DAG.getSetCC(dl, SetccVT, High,
30017                        DAG.getConstant(0, dl, High.getValueType()), ISD::SETNE);
30018     }
30019 
30020     Ovf = DAG.getSExtOrTrunc(Ovf, dl, OvfVT);
30021 
30022     return DAG.getMergeValues({Low, Ovf}, dl);
30023   }
30024 
30025   SDValue Low;
30026   SDValue High =
30027       LowervXi8MulWithUNPCK(A, B, dl, VT, IsSigned, Subtarget, DAG, &Low);
30028 
30029   SDValue Ovf;
30030   if (IsSigned) {
30031     // SMULO overflows if the high bits don't match the sign of the low.
30032     SDValue LowSign =
30033         DAG.getNode(ISD::SRA, dl, VT, Low, DAG.getConstant(7, dl, VT));
30034     Ovf = DAG.getSetCC(dl, SetccVT, LowSign, High, ISD::SETNE);
30035   } else {
30036     // UMULO overflows if the high bits are non-zero.
30037     Ovf =
30038         DAG.getSetCC(dl, SetccVT, High, DAG.getConstant(0, dl, VT), ISD::SETNE);
30039   }
30040 
30041   Ovf = DAG.getSExtOrTrunc(Ovf, dl, OvfVT);
30042 
30043   return DAG.getMergeValues({Low, Ovf}, dl);
30044 }
30045 
LowerWin64_i128OP(SDValue Op,SelectionDAG & DAG) const30046 SDValue X86TargetLowering::LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const {
30047   assert(Subtarget.isTargetWin64() && "Unexpected target");
30048   EVT VT = Op.getValueType();
30049   assert(VT.isInteger() && VT.getSizeInBits() == 128 &&
30050          "Unexpected return type for lowering");
30051 
30052   if (isa<ConstantSDNode>(Op->getOperand(1))) {
30053     SmallVector<SDValue> Result;
30054     if (expandDIVREMByConstant(Op.getNode(), Result, MVT::i64, DAG))
30055       return DAG.getNode(ISD::BUILD_PAIR, SDLoc(Op), VT, Result[0], Result[1]);
30056   }
30057 
30058   RTLIB::Libcall LC;
30059   bool isSigned;
30060   switch (Op->getOpcode()) {
30061   default: llvm_unreachable("Unexpected request for libcall!");
30062   case ISD::SDIV:      isSigned = true;  LC = RTLIB::SDIV_I128;    break;
30063   case ISD::UDIV:      isSigned = false; LC = RTLIB::UDIV_I128;    break;
30064   case ISD::SREM:      isSigned = true;  LC = RTLIB::SREM_I128;    break;
30065   case ISD::UREM:      isSigned = false; LC = RTLIB::UREM_I128;    break;
30066   }
30067 
30068   SDLoc dl(Op);
30069   SDValue InChain = DAG.getEntryNode();
30070 
30071   TargetLowering::ArgListTy Args;
30072   TargetLowering::ArgListEntry Entry;
30073   for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i) {
30074     EVT ArgVT = Op->getOperand(i).getValueType();
30075     assert(ArgVT.isInteger() && ArgVT.getSizeInBits() == 128 &&
30076            "Unexpected argument type for lowering");
30077     SDValue StackPtr = DAG.CreateStackTemporary(ArgVT, 16);
30078     int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
30079     MachinePointerInfo MPI =
30080         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
30081     Entry.Node = StackPtr;
30082     InChain =
30083         DAG.getStore(InChain, dl, Op->getOperand(i), StackPtr, MPI, Align(16));
30084     Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
30085     Entry.Ty = PointerType::get(ArgTy,0);
30086     Entry.IsSExt = false;
30087     Entry.IsZExt = false;
30088     Args.push_back(Entry);
30089   }
30090 
30091   SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
30092                                          getPointerTy(DAG.getDataLayout()));
30093 
30094   TargetLowering::CallLoweringInfo CLI(DAG);
30095   CLI.setDebugLoc(dl)
30096       .setChain(InChain)
30097       .setLibCallee(
30098           getLibcallCallingConv(LC),
30099           static_cast<EVT>(MVT::v2i64).getTypeForEVT(*DAG.getContext()), Callee,
30100           std::move(Args))
30101       .setInRegister()
30102       .setSExtResult(isSigned)
30103       .setZExtResult(!isSigned);
30104 
30105   std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
30106   return DAG.getBitcast(VT, CallInfo.first);
30107 }
30108 
LowerWin64_FP_TO_INT128(SDValue Op,SelectionDAG & DAG,SDValue & Chain) const30109 SDValue X86TargetLowering::LowerWin64_FP_TO_INT128(SDValue Op,
30110                                                    SelectionDAG &DAG,
30111                                                    SDValue &Chain) const {
30112   assert(Subtarget.isTargetWin64() && "Unexpected target");
30113   EVT VT = Op.getValueType();
30114   bool IsStrict = Op->isStrictFPOpcode();
30115 
30116   SDValue Arg = Op.getOperand(IsStrict ? 1 : 0);
30117   EVT ArgVT = Arg.getValueType();
30118 
30119   assert(VT.isInteger() && VT.getSizeInBits() == 128 &&
30120          "Unexpected return type for lowering");
30121 
30122   RTLIB::Libcall LC;
30123   if (Op->getOpcode() == ISD::FP_TO_SINT ||
30124       Op->getOpcode() == ISD::STRICT_FP_TO_SINT)
30125     LC = RTLIB::getFPTOSINT(ArgVT, VT);
30126   else
30127     LC = RTLIB::getFPTOUINT(ArgVT, VT);
30128   assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unexpected request for libcall!");
30129 
30130   SDLoc dl(Op);
30131   MakeLibCallOptions CallOptions;
30132   Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode();
30133 
30134   SDValue Result;
30135   // Expect the i128 argument returned as a v2i64 in xmm0, cast back to the
30136   // expected VT (i128).
30137   std::tie(Result, Chain) =
30138       makeLibCall(DAG, LC, MVT::v2i64, Arg, CallOptions, dl, Chain);
30139   Result = DAG.getBitcast(VT, Result);
30140   return Result;
30141 }
30142 
LowerWin64_INT128_TO_FP(SDValue Op,SelectionDAG & DAG) const30143 SDValue X86TargetLowering::LowerWin64_INT128_TO_FP(SDValue Op,
30144                                                    SelectionDAG &DAG) const {
30145   assert(Subtarget.isTargetWin64() && "Unexpected target");
30146   EVT VT = Op.getValueType();
30147   bool IsStrict = Op->isStrictFPOpcode();
30148 
30149   SDValue Arg = Op.getOperand(IsStrict ? 1 : 0);
30150   EVT ArgVT = Arg.getValueType();
30151 
30152   assert(ArgVT.isInteger() && ArgVT.getSizeInBits() == 128 &&
30153          "Unexpected argument type for lowering");
30154 
30155   RTLIB::Libcall LC;
30156   if (Op->getOpcode() == ISD::SINT_TO_FP ||
30157       Op->getOpcode() == ISD::STRICT_SINT_TO_FP)
30158     LC = RTLIB::getSINTTOFP(ArgVT, VT);
30159   else
30160     LC = RTLIB::getUINTTOFP(ArgVT, VT);
30161   assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unexpected request for libcall!");
30162 
30163   SDLoc dl(Op);
30164   MakeLibCallOptions CallOptions;
30165   SDValue Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode();
30166 
30167   // Pass the i128 argument as an indirect argument on the stack.
30168   SDValue StackPtr = DAG.CreateStackTemporary(ArgVT, 16);
30169   int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
30170   MachinePointerInfo MPI =
30171       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
30172   Chain = DAG.getStore(Chain, dl, Arg, StackPtr, MPI, Align(16));
30173 
30174   SDValue Result;
30175   std::tie(Result, Chain) =
30176       makeLibCall(DAG, LC, VT, StackPtr, CallOptions, dl, Chain);
30177   return IsStrict ? DAG.getMergeValues({Result, Chain}, dl) : Result;
30178 }
30179 
30180 // Return true if the required (according to Opcode) shift-imm form is natively
30181 // supported by the Subtarget
supportedVectorShiftWithImm(MVT VT,const X86Subtarget & Subtarget,unsigned Opcode)30182 static bool supportedVectorShiftWithImm(MVT VT, const X86Subtarget &Subtarget,
30183                                         unsigned Opcode) {
30184   if (!(VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()))
30185     return false;
30186 
30187   if (VT.getScalarSizeInBits() < 16)
30188     return false;
30189 
30190   if (VT.is512BitVector() && Subtarget.useAVX512Regs() &&
30191       (VT.getScalarSizeInBits() > 16 || Subtarget.hasBWI()))
30192     return true;
30193 
30194   bool LShift = (VT.is128BitVector() && Subtarget.hasSSE2()) ||
30195                 (VT.is256BitVector() && Subtarget.hasInt256());
30196 
30197   bool AShift = LShift && (Subtarget.hasAVX512() ||
30198                            (VT != MVT::v2i64 && VT != MVT::v4i64));
30199   return (Opcode == ISD::SRA) ? AShift : LShift;
30200 }
30201 
30202 // The shift amount is a variable, but it is the same for all vector lanes.
30203 // These instructions are defined together with shift-immediate.
30204 static
supportedVectorShiftWithBaseAmnt(MVT VT,const X86Subtarget & Subtarget,unsigned Opcode)30205 bool supportedVectorShiftWithBaseAmnt(MVT VT, const X86Subtarget &Subtarget,
30206                                       unsigned Opcode) {
30207   return supportedVectorShiftWithImm(VT, Subtarget, Opcode);
30208 }
30209 
30210 // Return true if the required (according to Opcode) variable-shift form is
30211 // natively supported by the Subtarget
supportedVectorVarShift(MVT VT,const X86Subtarget & Subtarget,unsigned Opcode)30212 static bool supportedVectorVarShift(MVT VT, const X86Subtarget &Subtarget,
30213                                     unsigned Opcode) {
30214   if (!(VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()))
30215     return false;
30216 
30217   if (!Subtarget.hasInt256() || VT.getScalarSizeInBits() < 16)
30218     return false;
30219 
30220   // vXi16 supported only on AVX-512, BWI
30221   if (VT.getScalarSizeInBits() == 16 && !Subtarget.hasBWI())
30222     return false;
30223 
30224   if (Subtarget.hasAVX512() &&
30225       (Subtarget.useAVX512Regs() || !VT.is512BitVector()))
30226     return true;
30227 
30228   bool LShift = VT.is128BitVector() || VT.is256BitVector();
30229   bool AShift = LShift &&  VT != MVT::v2i64 && VT != MVT::v4i64;
30230   return (Opcode == ISD::SRA) ? AShift : LShift;
30231 }
30232 
LowerShiftByScalarImmediate(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)30233 static SDValue LowerShiftByScalarImmediate(SDValue Op, SelectionDAG &DAG,
30234                                            const X86Subtarget &Subtarget) {
30235   MVT VT = Op.getSimpleValueType();
30236   SDLoc dl(Op);
30237   SDValue R = Op.getOperand(0);
30238   SDValue Amt = Op.getOperand(1);
30239   unsigned X86Opc = getTargetVShiftUniformOpcode(Op.getOpcode(), false);
30240 
30241   auto ArithmeticShiftRight64 = [&](uint64_t ShiftAmt) {
30242     assert((VT == MVT::v2i64 || VT == MVT::v4i64) && "Unexpected SRA type");
30243     MVT ExVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() * 2);
30244     SDValue Ex = DAG.getBitcast(ExVT, R);
30245 
30246     // ashr(R, 63) === cmp_slt(R, 0)
30247     if (ShiftAmt == 63 && Subtarget.hasSSE42()) {
30248       assert((VT != MVT::v4i64 || Subtarget.hasInt256()) &&
30249              "Unsupported PCMPGT op");
30250       return DAG.getNode(X86ISD::PCMPGT, dl, VT, DAG.getConstant(0, dl, VT), R);
30251     }
30252 
30253     if (ShiftAmt >= 32) {
30254       // Splat sign to upper i32 dst, and SRA upper i32 src to lower i32.
30255       SDValue Upper =
30256           getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex, 31, DAG);
30257       SDValue Lower = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex,
30258                                                  ShiftAmt - 32, DAG);
30259       if (VT == MVT::v2i64)
30260         Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower, {5, 1, 7, 3});
30261       if (VT == MVT::v4i64)
30262         Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower,
30263                                   {9, 1, 11, 3, 13, 5, 15, 7});
30264     } else {
30265       // SRA upper i32, SRL whole i64 and select lower i32.
30266       SDValue Upper = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex,
30267                                                  ShiftAmt, DAG);
30268       SDValue Lower =
30269           getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, R, ShiftAmt, DAG);
30270       Lower = DAG.getBitcast(ExVT, Lower);
30271       if (VT == MVT::v2i64)
30272         Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower, {4, 1, 6, 3});
30273       if (VT == MVT::v4i64)
30274         Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower,
30275                                   {8, 1, 10, 3, 12, 5, 14, 7});
30276     }
30277     return DAG.getBitcast(VT, Ex);
30278   };
30279 
30280   // Optimize shl/srl/sra with constant shift amount.
30281   APInt APIntShiftAmt;
30282   if (!X86::isConstantSplat(Amt, APIntShiftAmt))
30283     return SDValue();
30284 
30285   // If the shift amount is out of range, return undef.
30286   if (APIntShiftAmt.uge(VT.getScalarSizeInBits()))
30287     return DAG.getUNDEF(VT);
30288 
30289   uint64_t ShiftAmt = APIntShiftAmt.getZExtValue();
30290 
30291   if (supportedVectorShiftWithImm(VT, Subtarget, Op.getOpcode())) {
30292     // Hardware support for vector shifts is sparse which makes us scalarize the
30293     // vector operations in many cases. Also, on sandybridge ADD is faster than
30294     // shl: (shl V, 1) -> (add (freeze V), (freeze V))
30295     if (Op.getOpcode() == ISD::SHL && ShiftAmt == 1) {
30296       // R may be undef at run-time, but (shl R, 1) must be an even number (LSB
30297       // must be 0). (add undef, undef) however can be any value. To make this
30298       // safe, we must freeze R to ensure that register allocation uses the same
30299       // register for an undefined value. This ensures that the result will
30300       // still be even and preserves the original semantics.
30301       R = DAG.getFreeze(R);
30302       return DAG.getNode(ISD::ADD, dl, VT, R, R);
30303     }
30304 
30305     return getTargetVShiftByConstNode(X86Opc, dl, VT, R, ShiftAmt, DAG);
30306   }
30307 
30308   // i64 SRA needs to be performed as partial shifts.
30309   if (((!Subtarget.hasXOP() && VT == MVT::v2i64) ||
30310        (Subtarget.hasInt256() && VT == MVT::v4i64)) &&
30311       Op.getOpcode() == ISD::SRA)
30312     return ArithmeticShiftRight64(ShiftAmt);
30313 
30314   if (VT == MVT::v16i8 || (Subtarget.hasInt256() && VT == MVT::v32i8) ||
30315       (Subtarget.hasBWI() && VT == MVT::v64i8)) {
30316     unsigned NumElts = VT.getVectorNumElements();
30317     MVT ShiftVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
30318 
30319     // Simple i8 add case
30320     if (Op.getOpcode() == ISD::SHL && ShiftAmt == 1) {
30321       // R may be undef at run-time, but (shl R, 1) must be an even number (LSB
30322       // must be 0). (add undef, undef) however can be any value. To make this
30323       // safe, we must freeze R to ensure that register allocation uses the same
30324       // register for an undefined value. This ensures that the result will
30325       // still be even and preserves the original semantics.
30326       R = DAG.getFreeze(R);
30327       return DAG.getNode(ISD::ADD, dl, VT, R, R);
30328     }
30329 
30330     // ashr(R, 7)  === cmp_slt(R, 0)
30331     if (Op.getOpcode() == ISD::SRA && ShiftAmt == 7) {
30332       SDValue Zeros = DAG.getConstant(0, dl, VT);
30333       if (VT.is512BitVector()) {
30334         assert(VT == MVT::v64i8 && "Unexpected element type!");
30335         SDValue CMP = DAG.getSetCC(dl, MVT::v64i1, Zeros, R, ISD::SETGT);
30336         return DAG.getNode(ISD::SIGN_EXTEND, dl, VT, CMP);
30337       }
30338       return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
30339     }
30340 
30341     // XOP can shift v16i8 directly instead of as shift v8i16 + mask.
30342     if (VT == MVT::v16i8 && Subtarget.hasXOP())
30343       return SDValue();
30344 
30345     if (Op.getOpcode() == ISD::SHL) {
30346       // Make a large shift.
30347       SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, ShiftVT, R,
30348                                                ShiftAmt, DAG);
30349       SHL = DAG.getBitcast(VT, SHL);
30350       // Zero out the rightmost bits.
30351       APInt Mask = APInt::getHighBitsSet(8, 8 - ShiftAmt);
30352       return DAG.getNode(ISD::AND, dl, VT, SHL, DAG.getConstant(Mask, dl, VT));
30353     }
30354     if (Op.getOpcode() == ISD::SRL) {
30355       // Make a large shift.
30356       SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ShiftVT, R,
30357                                                ShiftAmt, DAG);
30358       SRL = DAG.getBitcast(VT, SRL);
30359       // Zero out the leftmost bits.
30360       APInt Mask = APInt::getLowBitsSet(8, 8 - ShiftAmt);
30361       return DAG.getNode(ISD::AND, dl, VT, SRL, DAG.getConstant(Mask, dl, VT));
30362     }
30363     if (Op.getOpcode() == ISD::SRA) {
30364       // ashr(R, Amt) === sub(xor(lshr(R, Amt), Mask), Mask)
30365       SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
30366 
30367       SDValue Mask = DAG.getConstant(128 >> ShiftAmt, dl, VT);
30368       Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
30369       Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
30370       return Res;
30371     }
30372     llvm_unreachable("Unknown shift opcode.");
30373   }
30374 
30375   return SDValue();
30376 }
30377 
LowerShiftByScalarVariable(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)30378 static SDValue LowerShiftByScalarVariable(SDValue Op, SelectionDAG &DAG,
30379                                           const X86Subtarget &Subtarget) {
30380   MVT VT = Op.getSimpleValueType();
30381   SDLoc dl(Op);
30382   SDValue R = Op.getOperand(0);
30383   SDValue Amt = Op.getOperand(1);
30384   unsigned Opcode = Op.getOpcode();
30385   unsigned X86OpcI = getTargetVShiftUniformOpcode(Opcode, false);
30386 
30387   int BaseShAmtIdx = -1;
30388   if (SDValue BaseShAmt = DAG.getSplatSourceVector(Amt, BaseShAmtIdx)) {
30389     if (supportedVectorShiftWithBaseAmnt(VT, Subtarget, Opcode))
30390       return getTargetVShiftNode(X86OpcI, dl, VT, R, BaseShAmt, BaseShAmtIdx,
30391                                  Subtarget, DAG);
30392 
30393     // vXi8 shifts - shift as v8i16 + mask result.
30394     if (((VT == MVT::v16i8 && !Subtarget.canExtendTo512DQ()) ||
30395          (VT == MVT::v32i8 && !Subtarget.canExtendTo512BW()) ||
30396          VT == MVT::v64i8) &&
30397         !Subtarget.hasXOP()) {
30398       unsigned NumElts = VT.getVectorNumElements();
30399       MVT ExtVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
30400       if (supportedVectorShiftWithBaseAmnt(ExtVT, Subtarget, Opcode)) {
30401         unsigned LogicalOp = (Opcode == ISD::SHL ? ISD::SHL : ISD::SRL);
30402         unsigned LogicalX86Op = getTargetVShiftUniformOpcode(LogicalOp, false);
30403 
30404         // Create the mask using vXi16 shifts. For shift-rights we need to move
30405         // the upper byte down before splatting the vXi8 mask.
30406         SDValue BitMask = DAG.getConstant(-1, dl, ExtVT);
30407         BitMask = getTargetVShiftNode(LogicalX86Op, dl, ExtVT, BitMask,
30408                                       BaseShAmt, BaseShAmtIdx, Subtarget, DAG);
30409         if (Opcode != ISD::SHL)
30410           BitMask = getTargetVShiftByConstNode(LogicalX86Op, dl, ExtVT, BitMask,
30411                                                8, DAG);
30412         BitMask = DAG.getBitcast(VT, BitMask);
30413         BitMask = DAG.getVectorShuffle(VT, dl, BitMask, BitMask,
30414                                        SmallVector<int, 64>(NumElts, 0));
30415 
30416         SDValue Res = getTargetVShiftNode(LogicalX86Op, dl, ExtVT,
30417                                           DAG.getBitcast(ExtVT, R), BaseShAmt,
30418                                           BaseShAmtIdx, Subtarget, DAG);
30419         Res = DAG.getBitcast(VT, Res);
30420         Res = DAG.getNode(ISD::AND, dl, VT, Res, BitMask);
30421 
30422         if (Opcode == ISD::SRA) {
30423           // ashr(R, Amt) === sub(xor(lshr(R, Amt), SignMask), SignMask)
30424           // SignMask = lshr(SignBit, Amt) - safe to do this with PSRLW.
30425           SDValue SignMask = DAG.getConstant(0x8080, dl, ExtVT);
30426           SignMask =
30427               getTargetVShiftNode(LogicalX86Op, dl, ExtVT, SignMask, BaseShAmt,
30428                                   BaseShAmtIdx, Subtarget, DAG);
30429           SignMask = DAG.getBitcast(VT, SignMask);
30430           Res = DAG.getNode(ISD::XOR, dl, VT, Res, SignMask);
30431           Res = DAG.getNode(ISD::SUB, dl, VT, Res, SignMask);
30432         }
30433         return Res;
30434       }
30435     }
30436   }
30437 
30438   return SDValue();
30439 }
30440 
30441 // Convert a shift/rotate left amount to a multiplication scale factor.
convertShiftLeftToScale(SDValue Amt,const SDLoc & dl,const X86Subtarget & Subtarget,SelectionDAG & DAG)30442 static SDValue convertShiftLeftToScale(SDValue Amt, const SDLoc &dl,
30443                                        const X86Subtarget &Subtarget,
30444                                        SelectionDAG &DAG) {
30445   MVT VT = Amt.getSimpleValueType();
30446   if (!(VT == MVT::v8i16 || VT == MVT::v4i32 ||
30447         (Subtarget.hasInt256() && VT == MVT::v16i16) ||
30448         (Subtarget.hasAVX512() && VT == MVT::v32i16) ||
30449         (!Subtarget.hasAVX512() && VT == MVT::v16i8) ||
30450         (Subtarget.hasInt256() && VT == MVT::v32i8) ||
30451         (Subtarget.hasBWI() && VT == MVT::v64i8)))
30452     return SDValue();
30453 
30454   MVT SVT = VT.getVectorElementType();
30455   unsigned SVTBits = SVT.getSizeInBits();
30456   unsigned NumElems = VT.getVectorNumElements();
30457 
30458   APInt UndefElts;
30459   SmallVector<APInt> EltBits;
30460   if (getTargetConstantBitsFromNode(Amt, SVTBits, UndefElts, EltBits)) {
30461     APInt One(SVTBits, 1);
30462     SmallVector<SDValue> Elts(NumElems, DAG.getUNDEF(SVT));
30463     for (unsigned I = 0; I != NumElems; ++I) {
30464       if (UndefElts[I] || EltBits[I].uge(SVTBits))
30465         continue;
30466       uint64_t ShAmt = EltBits[I].getZExtValue();
30467       Elts[I] = DAG.getConstant(One.shl(ShAmt), dl, SVT);
30468     }
30469     return DAG.getBuildVector(VT, dl, Elts);
30470   }
30471 
30472   // If the target doesn't support variable shifts, use either FP conversion
30473   // or integer multiplication to avoid shifting each element individually.
30474   if (VT == MVT::v4i32) {
30475     Amt = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(23, dl, VT));
30476     Amt = DAG.getNode(ISD::ADD, dl, VT, Amt,
30477                       DAG.getConstant(0x3f800000U, dl, VT));
30478     Amt = DAG.getBitcast(MVT::v4f32, Amt);
30479     return DAG.getNode(ISD::FP_TO_SINT, dl, VT, Amt);
30480   }
30481 
30482   // AVX2 can more effectively perform this as a zext/trunc to/from v8i32.
30483   if (VT == MVT::v8i16 && !Subtarget.hasAVX2()) {
30484     SDValue Z = DAG.getConstant(0, dl, VT);
30485     SDValue Lo = DAG.getBitcast(MVT::v4i32, getUnpackl(DAG, dl, VT, Amt, Z));
30486     SDValue Hi = DAG.getBitcast(MVT::v4i32, getUnpackh(DAG, dl, VT, Amt, Z));
30487     Lo = convertShiftLeftToScale(Lo, dl, Subtarget, DAG);
30488     Hi = convertShiftLeftToScale(Hi, dl, Subtarget, DAG);
30489     if (Subtarget.hasSSE41())
30490       return DAG.getNode(X86ISD::PACKUS, dl, VT, Lo, Hi);
30491     return getPack(DAG, Subtarget, dl, VT, Lo, Hi);
30492   }
30493 
30494   return SDValue();
30495 }
30496 
LowerShift(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)30497 static SDValue LowerShift(SDValue Op, const X86Subtarget &Subtarget,
30498                           SelectionDAG &DAG) {
30499   MVT VT = Op.getSimpleValueType();
30500   SDLoc dl(Op);
30501   SDValue R = Op.getOperand(0);
30502   SDValue Amt = Op.getOperand(1);
30503   unsigned EltSizeInBits = VT.getScalarSizeInBits();
30504   bool ConstantAmt = ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
30505 
30506   unsigned Opc = Op.getOpcode();
30507   unsigned X86OpcV = getTargetVShiftUniformOpcode(Opc, true);
30508   unsigned X86OpcI = getTargetVShiftUniformOpcode(Opc, false);
30509 
30510   assert(VT.isVector() && "Custom lowering only for vector shifts!");
30511   assert(Subtarget.hasSSE2() && "Only custom lower when we have SSE2!");
30512 
30513   if (SDValue V = LowerShiftByScalarImmediate(Op, DAG, Subtarget))
30514     return V;
30515 
30516   if (SDValue V = LowerShiftByScalarVariable(Op, DAG, Subtarget))
30517     return V;
30518 
30519   if (supportedVectorVarShift(VT, Subtarget, Opc))
30520     return Op;
30521 
30522   // i64 vector arithmetic shift can be emulated with the transform:
30523   // M = lshr(SIGN_MASK, Amt)
30524   // ashr(R, Amt) === sub(xor(lshr(R, Amt), M), M)
30525   if (((VT == MVT::v2i64 && !Subtarget.hasXOP()) ||
30526        (VT == MVT::v4i64 && Subtarget.hasInt256())) &&
30527       Opc == ISD::SRA) {
30528     SDValue S = DAG.getConstant(APInt::getSignMask(64), dl, VT);
30529     SDValue M = DAG.getNode(ISD::SRL, dl, VT, S, Amt);
30530     R = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
30531     R = DAG.getNode(ISD::XOR, dl, VT, R, M);
30532     R = DAG.getNode(ISD::SUB, dl, VT, R, M);
30533     return R;
30534   }
30535 
30536   // XOP has 128-bit variable logical/arithmetic shifts.
30537   // +ve/-ve Amt = shift left/right.
30538   if (Subtarget.hasXOP() && (VT == MVT::v2i64 || VT == MVT::v4i32 ||
30539                              VT == MVT::v8i16 || VT == MVT::v16i8)) {
30540     if (Opc == ISD::SRL || Opc == ISD::SRA) {
30541       SDValue Zero = DAG.getConstant(0, dl, VT);
30542       Amt = DAG.getNode(ISD::SUB, dl, VT, Zero, Amt);
30543     }
30544     if (Opc == ISD::SHL || Opc == ISD::SRL)
30545       return DAG.getNode(X86ISD::VPSHL, dl, VT, R, Amt);
30546     if (Opc == ISD::SRA)
30547       return DAG.getNode(X86ISD::VPSHA, dl, VT, R, Amt);
30548   }
30549 
30550   // 2i64 vector logical shifts can efficiently avoid scalarization - do the
30551   // shifts per-lane and then shuffle the partial results back together.
30552   if (VT == MVT::v2i64 && Opc != ISD::SRA) {
30553     // Splat the shift amounts so the scalar shifts above will catch it.
30554     SDValue Amt0 = DAG.getVectorShuffle(VT, dl, Amt, Amt, {0, 0});
30555     SDValue Amt1 = DAG.getVectorShuffle(VT, dl, Amt, Amt, {1, 1});
30556     SDValue R0 = DAG.getNode(Opc, dl, VT, R, Amt0);
30557     SDValue R1 = DAG.getNode(Opc, dl, VT, R, Amt1);
30558     return DAG.getVectorShuffle(VT, dl, R0, R1, {0, 3});
30559   }
30560 
30561   // If possible, lower this shift as a sequence of two shifts by
30562   // constant plus a BLENDing shuffle instead of scalarizing it.
30563   // Example:
30564   //   (v4i32 (srl A, (build_vector < X, Y, Y, Y>)))
30565   //
30566   // Could be rewritten as:
30567   //   (v4i32 (MOVSS (srl A, <Y,Y,Y,Y>), (srl A, <X,X,X,X>)))
30568   //
30569   // The advantage is that the two shifts from the example would be
30570   // lowered as X86ISD::VSRLI nodes in parallel before blending.
30571   if (ConstantAmt && (VT == MVT::v8i16 || VT == MVT::v4i32 ||
30572                       (VT == MVT::v16i16 && Subtarget.hasInt256()))) {
30573     SDValue Amt1, Amt2;
30574     unsigned NumElts = VT.getVectorNumElements();
30575     SmallVector<int, 8> ShuffleMask;
30576     for (unsigned i = 0; i != NumElts; ++i) {
30577       SDValue A = Amt->getOperand(i);
30578       if (A.isUndef()) {
30579         ShuffleMask.push_back(SM_SentinelUndef);
30580         continue;
30581       }
30582       if (!Amt1 || Amt1 == A) {
30583         ShuffleMask.push_back(i);
30584         Amt1 = A;
30585         continue;
30586       }
30587       if (!Amt2 || Amt2 == A) {
30588         ShuffleMask.push_back(i + NumElts);
30589         Amt2 = A;
30590         continue;
30591       }
30592       break;
30593     }
30594 
30595     // Only perform this blend if we can perform it without loading a mask.
30596     if (ShuffleMask.size() == NumElts && Amt1 && Amt2 &&
30597         (VT != MVT::v16i16 ||
30598          is128BitLaneRepeatedShuffleMask(VT, ShuffleMask)) &&
30599         (VT == MVT::v4i32 || Subtarget.hasSSE41() || Opc != ISD::SHL ||
30600          canWidenShuffleElements(ShuffleMask))) {
30601       auto *Cst1 = dyn_cast<ConstantSDNode>(Amt1);
30602       auto *Cst2 = dyn_cast<ConstantSDNode>(Amt2);
30603       if (Cst1 && Cst2 && Cst1->getAPIntValue().ult(EltSizeInBits) &&
30604           Cst2->getAPIntValue().ult(EltSizeInBits)) {
30605         SDValue Shift1 = getTargetVShiftByConstNode(X86OpcI, dl, VT, R,
30606                                                     Cst1->getZExtValue(), DAG);
30607         SDValue Shift2 = getTargetVShiftByConstNode(X86OpcI, dl, VT, R,
30608                                                     Cst2->getZExtValue(), DAG);
30609         return DAG.getVectorShuffle(VT, dl, Shift1, Shift2, ShuffleMask);
30610       }
30611     }
30612   }
30613 
30614   // If possible, lower this packed shift into a vector multiply instead of
30615   // expanding it into a sequence of scalar shifts.
30616   // For v32i8 cases, it might be quicker to split/extend to vXi16 shifts.
30617   if (Opc == ISD::SHL && !(VT == MVT::v32i8 && (Subtarget.hasXOP() ||
30618                                                 Subtarget.canExtendTo512BW())))
30619     if (SDValue Scale = convertShiftLeftToScale(Amt, dl, Subtarget, DAG))
30620       return DAG.getNode(ISD::MUL, dl, VT, R, Scale);
30621 
30622   // Constant ISD::SRL can be performed efficiently on vXi16 vectors as we
30623   // can replace with ISD::MULHU, creating scale factor from (NumEltBits - Amt).
30624   if (Opc == ISD::SRL && ConstantAmt &&
30625       (VT == MVT::v8i16 || (VT == MVT::v16i16 && Subtarget.hasInt256()))) {
30626     SDValue EltBits = DAG.getConstant(EltSizeInBits, dl, VT);
30627     SDValue RAmt = DAG.getNode(ISD::SUB, dl, VT, EltBits, Amt);
30628     if (SDValue Scale = convertShiftLeftToScale(RAmt, dl, Subtarget, DAG)) {
30629       SDValue Zero = DAG.getConstant(0, dl, VT);
30630       SDValue ZAmt = DAG.getSetCC(dl, VT, Amt, Zero, ISD::SETEQ);
30631       SDValue Res = DAG.getNode(ISD::MULHU, dl, VT, R, Scale);
30632       return DAG.getSelect(dl, VT, ZAmt, R, Res);
30633     }
30634   }
30635 
30636   // Constant ISD::SRA can be performed efficiently on vXi16 vectors as we
30637   // can replace with ISD::MULHS, creating scale factor from (NumEltBits - Amt).
30638   // TODO: Special case handling for shift by 0/1, really we can afford either
30639   // of these cases in pre-SSE41/XOP/AVX512 but not both.
30640   if (Opc == ISD::SRA && ConstantAmt &&
30641       (VT == MVT::v8i16 || (VT == MVT::v16i16 && Subtarget.hasInt256())) &&
30642       ((Subtarget.hasSSE41() && !Subtarget.hasXOP() &&
30643         !Subtarget.hasAVX512()) ||
30644        DAG.isKnownNeverZero(Amt))) {
30645     SDValue EltBits = DAG.getConstant(EltSizeInBits, dl, VT);
30646     SDValue RAmt = DAG.getNode(ISD::SUB, dl, VT, EltBits, Amt);
30647     if (SDValue Scale = convertShiftLeftToScale(RAmt, dl, Subtarget, DAG)) {
30648       SDValue Amt0 =
30649           DAG.getSetCC(dl, VT, Amt, DAG.getConstant(0, dl, VT), ISD::SETEQ);
30650       SDValue Amt1 =
30651           DAG.getSetCC(dl, VT, Amt, DAG.getConstant(1, dl, VT), ISD::SETEQ);
30652       SDValue Sra1 =
30653           getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, R, 1, DAG);
30654       SDValue Res = DAG.getNode(ISD::MULHS, dl, VT, R, Scale);
30655       Res = DAG.getSelect(dl, VT, Amt0, R, Res);
30656       return DAG.getSelect(dl, VT, Amt1, Sra1, Res);
30657     }
30658   }
30659 
30660   // v4i32 Non Uniform Shifts.
30661   // If the shift amount is constant we can shift each lane using the SSE2
30662   // immediate shifts, else we need to zero-extend each lane to the lower i64
30663   // and shift using the SSE2 variable shifts.
30664   // The separate results can then be blended together.
30665   if (VT == MVT::v4i32) {
30666     SDValue Amt0, Amt1, Amt2, Amt3;
30667     if (ConstantAmt) {
30668       Amt0 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {0, 0, 0, 0});
30669       Amt1 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {1, 1, 1, 1});
30670       Amt2 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {2, 2, 2, 2});
30671       Amt3 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {3, 3, 3, 3});
30672     } else {
30673       // The SSE2 shifts use the lower i64 as the same shift amount for
30674       // all lanes and the upper i64 is ignored. On AVX we're better off
30675       // just zero-extending, but for SSE just duplicating the top 16-bits is
30676       // cheaper and has the same effect for out of range values.
30677       if (Subtarget.hasAVX()) {
30678         SDValue Z = DAG.getConstant(0, dl, VT);
30679         Amt0 = DAG.getVectorShuffle(VT, dl, Amt, Z, {0, 4, -1, -1});
30680         Amt1 = DAG.getVectorShuffle(VT, dl, Amt, Z, {1, 5, -1, -1});
30681         Amt2 = DAG.getVectorShuffle(VT, dl, Amt, Z, {2, 6, -1, -1});
30682         Amt3 = DAG.getVectorShuffle(VT, dl, Amt, Z, {3, 7, -1, -1});
30683       } else {
30684         SDValue Amt01 = DAG.getBitcast(MVT::v8i16, Amt);
30685         SDValue Amt23 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt01, Amt01,
30686                                              {4, 5, 6, 7, -1, -1, -1, -1});
30687         SDValue Msk02 = getV4X86ShuffleImm8ForMask({0, 1, 1, 1}, dl, DAG);
30688         SDValue Msk13 = getV4X86ShuffleImm8ForMask({2, 3, 3, 3}, dl, DAG);
30689         Amt0 = DAG.getNode(X86ISD::PSHUFLW, dl, MVT::v8i16, Amt01, Msk02);
30690         Amt1 = DAG.getNode(X86ISD::PSHUFLW, dl, MVT::v8i16, Amt01, Msk13);
30691         Amt2 = DAG.getNode(X86ISD::PSHUFLW, dl, MVT::v8i16, Amt23, Msk02);
30692         Amt3 = DAG.getNode(X86ISD::PSHUFLW, dl, MVT::v8i16, Amt23, Msk13);
30693       }
30694     }
30695 
30696     unsigned ShOpc = ConstantAmt ? Opc : X86OpcV;
30697     SDValue R0 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt0));
30698     SDValue R1 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt1));
30699     SDValue R2 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt2));
30700     SDValue R3 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt3));
30701 
30702     // Merge the shifted lane results optimally with/without PBLENDW.
30703     // TODO - ideally shuffle combining would handle this.
30704     if (Subtarget.hasSSE41()) {
30705       SDValue R02 = DAG.getVectorShuffle(VT, dl, R0, R2, {0, -1, 6, -1});
30706       SDValue R13 = DAG.getVectorShuffle(VT, dl, R1, R3, {-1, 1, -1, 7});
30707       return DAG.getVectorShuffle(VT, dl, R02, R13, {0, 5, 2, 7});
30708     }
30709     SDValue R01 = DAG.getVectorShuffle(VT, dl, R0, R1, {0, -1, -1, 5});
30710     SDValue R23 = DAG.getVectorShuffle(VT, dl, R2, R3, {2, -1, -1, 7});
30711     return DAG.getVectorShuffle(VT, dl, R01, R23, {0, 3, 4, 7});
30712   }
30713 
30714   // It's worth extending once and using the vXi16/vXi32 shifts for smaller
30715   // types, but without AVX512 the extra overheads to get from vXi8 to vXi32
30716   // make the existing SSE solution better.
30717   // NOTE: We honor prefered vector width before promoting to 512-bits.
30718   if ((Subtarget.hasInt256() && VT == MVT::v8i16) ||
30719       (Subtarget.canExtendTo512DQ() && VT == MVT::v16i16) ||
30720       (Subtarget.canExtendTo512DQ() && VT == MVT::v16i8) ||
30721       (Subtarget.canExtendTo512BW() && VT == MVT::v32i8) ||
30722       (Subtarget.hasBWI() && Subtarget.hasVLX() && VT == MVT::v16i8)) {
30723     assert((!Subtarget.hasBWI() || VT == MVT::v32i8 || VT == MVT::v16i8) &&
30724            "Unexpected vector type");
30725     MVT EvtSVT = Subtarget.hasBWI() ? MVT::i16 : MVT::i32;
30726     MVT ExtVT = MVT::getVectorVT(EvtSVT, VT.getVectorNumElements());
30727     unsigned ExtOpc = Opc == ISD::SRA ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
30728     R = DAG.getNode(ExtOpc, dl, ExtVT, R);
30729     Amt = DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVT, Amt);
30730     return DAG.getNode(ISD::TRUNCATE, dl, VT,
30731                        DAG.getNode(Opc, dl, ExtVT, R, Amt));
30732   }
30733 
30734   // Constant ISD::SRA/SRL can be performed efficiently on vXi8 vectors as we
30735   // extend to vXi16 to perform a MUL scale effectively as a MUL_LOHI.
30736   if (ConstantAmt && (Opc == ISD::SRA || Opc == ISD::SRL) &&
30737       (VT == MVT::v16i8 || (VT == MVT::v32i8 && Subtarget.hasInt256()) ||
30738        (VT == MVT::v64i8 && Subtarget.hasBWI())) &&
30739       !Subtarget.hasXOP()) {
30740     int NumElts = VT.getVectorNumElements();
30741     SDValue Cst8 = DAG.getTargetConstant(8, dl, MVT::i8);
30742 
30743     // Extend constant shift amount to vXi16 (it doesn't matter if the type
30744     // isn't legal).
30745     MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts);
30746     Amt = DAG.getZExtOrTrunc(Amt, dl, ExVT);
30747     Amt = DAG.getNode(ISD::SUB, dl, ExVT, DAG.getConstant(8, dl, ExVT), Amt);
30748     Amt = DAG.getNode(ISD::SHL, dl, ExVT, DAG.getConstant(1, dl, ExVT), Amt);
30749     assert(ISD::isBuildVectorOfConstantSDNodes(Amt.getNode()) &&
30750            "Constant build vector expected");
30751 
30752     if (VT == MVT::v16i8 && Subtarget.hasInt256()) {
30753       R = Opc == ISD::SRA ? DAG.getSExtOrTrunc(R, dl, ExVT)
30754                           : DAG.getZExtOrTrunc(R, dl, ExVT);
30755       R = DAG.getNode(ISD::MUL, dl, ExVT, R, Amt);
30756       R = DAG.getNode(X86ISD::VSRLI, dl, ExVT, R, Cst8);
30757       return DAG.getZExtOrTrunc(R, dl, VT);
30758     }
30759 
30760     SmallVector<SDValue, 16> LoAmt, HiAmt;
30761     for (int i = 0; i != NumElts; i += 16) {
30762       for (int j = 0; j != 8; ++j) {
30763         LoAmt.push_back(Amt.getOperand(i + j));
30764         HiAmt.push_back(Amt.getOperand(i + j + 8));
30765       }
30766     }
30767 
30768     MVT VT16 = MVT::getVectorVT(MVT::i16, NumElts / 2);
30769     SDValue LoA = DAG.getBuildVector(VT16, dl, LoAmt);
30770     SDValue HiA = DAG.getBuildVector(VT16, dl, HiAmt);
30771 
30772     SDValue LoR = DAG.getBitcast(VT16, getUnpackl(DAG, dl, VT, R, R));
30773     SDValue HiR = DAG.getBitcast(VT16, getUnpackh(DAG, dl, VT, R, R));
30774     LoR = DAG.getNode(X86OpcI, dl, VT16, LoR, Cst8);
30775     HiR = DAG.getNode(X86OpcI, dl, VT16, HiR, Cst8);
30776     LoR = DAG.getNode(ISD::MUL, dl, VT16, LoR, LoA);
30777     HiR = DAG.getNode(ISD::MUL, dl, VT16, HiR, HiA);
30778     LoR = DAG.getNode(X86ISD::VSRLI, dl, VT16, LoR, Cst8);
30779     HiR = DAG.getNode(X86ISD::VSRLI, dl, VT16, HiR, Cst8);
30780     return DAG.getNode(X86ISD::PACKUS, dl, VT, LoR, HiR);
30781   }
30782 
30783   if (VT == MVT::v16i8 ||
30784       (VT == MVT::v32i8 && Subtarget.hasInt256() && !Subtarget.hasXOP()) ||
30785       (VT == MVT::v64i8 && Subtarget.hasBWI())) {
30786     MVT ExtVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements() / 2);
30787 
30788     auto SignBitSelect = [&](MVT SelVT, SDValue Sel, SDValue V0, SDValue V1) {
30789       if (VT.is512BitVector()) {
30790         // On AVX512BW targets we make use of the fact that VSELECT lowers
30791         // to a masked blend which selects bytes based just on the sign bit
30792         // extracted to a mask.
30793         MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
30794         V0 = DAG.getBitcast(VT, V0);
30795         V1 = DAG.getBitcast(VT, V1);
30796         Sel = DAG.getBitcast(VT, Sel);
30797         Sel = DAG.getSetCC(dl, MaskVT, DAG.getConstant(0, dl, VT), Sel,
30798                            ISD::SETGT);
30799         return DAG.getBitcast(SelVT, DAG.getSelect(dl, VT, Sel, V0, V1));
30800       } else if (Subtarget.hasSSE41()) {
30801         // On SSE41 targets we can use PBLENDVB which selects bytes based just
30802         // on the sign bit.
30803         V0 = DAG.getBitcast(VT, V0);
30804         V1 = DAG.getBitcast(VT, V1);
30805         Sel = DAG.getBitcast(VT, Sel);
30806         return DAG.getBitcast(SelVT,
30807                               DAG.getNode(X86ISD::BLENDV, dl, VT, Sel, V0, V1));
30808       }
30809       // On pre-SSE41 targets we test for the sign bit by comparing to
30810       // zero - a negative value will set all bits of the lanes to true
30811       // and VSELECT uses that in its OR(AND(V0,C),AND(V1,~C)) lowering.
30812       SDValue Z = DAG.getConstant(0, dl, SelVT);
30813       SDValue C = DAG.getNode(X86ISD::PCMPGT, dl, SelVT, Z, Sel);
30814       return DAG.getSelect(dl, SelVT, C, V0, V1);
30815     };
30816 
30817     // Turn 'a' into a mask suitable for VSELECT: a = a << 5;
30818     // We can safely do this using i16 shifts as we're only interested in
30819     // the 3 lower bits of each byte.
30820     Amt = DAG.getBitcast(ExtVT, Amt);
30821     Amt = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, ExtVT, Amt, 5, DAG);
30822     Amt = DAG.getBitcast(VT, Amt);
30823 
30824     if (Opc == ISD::SHL || Opc == ISD::SRL) {
30825       // r = VSELECT(r, shift(r, 4), a);
30826       SDValue M = DAG.getNode(Opc, dl, VT, R, DAG.getConstant(4, dl, VT));
30827       R = SignBitSelect(VT, Amt, M, R);
30828 
30829       // a += a
30830       Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
30831 
30832       // r = VSELECT(r, shift(r, 2), a);
30833       M = DAG.getNode(Opc, dl, VT, R, DAG.getConstant(2, dl, VT));
30834       R = SignBitSelect(VT, Amt, M, R);
30835 
30836       // a += a
30837       Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
30838 
30839       // return VSELECT(r, shift(r, 1), a);
30840       M = DAG.getNode(Opc, dl, VT, R, DAG.getConstant(1, dl, VT));
30841       R = SignBitSelect(VT, Amt, M, R);
30842       return R;
30843     }
30844 
30845     if (Opc == ISD::SRA) {
30846       // For SRA we need to unpack each byte to the higher byte of a i16 vector
30847       // so we can correctly sign extend. We don't care what happens to the
30848       // lower byte.
30849       SDValue ALo = getUnpackl(DAG, dl, VT, DAG.getUNDEF(VT), Amt);
30850       SDValue AHi = getUnpackh(DAG, dl, VT, DAG.getUNDEF(VT), Amt);
30851       SDValue RLo = getUnpackl(DAG, dl, VT, DAG.getUNDEF(VT), R);
30852       SDValue RHi = getUnpackh(DAG, dl, VT, DAG.getUNDEF(VT), R);
30853       ALo = DAG.getBitcast(ExtVT, ALo);
30854       AHi = DAG.getBitcast(ExtVT, AHi);
30855       RLo = DAG.getBitcast(ExtVT, RLo);
30856       RHi = DAG.getBitcast(ExtVT, RHi);
30857 
30858       // r = VSELECT(r, shift(r, 4), a);
30859       SDValue MLo = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RLo, 4, DAG);
30860       SDValue MHi = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RHi, 4, DAG);
30861       RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
30862       RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
30863 
30864       // a += a
30865       ALo = DAG.getNode(ISD::ADD, dl, ExtVT, ALo, ALo);
30866       AHi = DAG.getNode(ISD::ADD, dl, ExtVT, AHi, AHi);
30867 
30868       // r = VSELECT(r, shift(r, 2), a);
30869       MLo = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RLo, 2, DAG);
30870       MHi = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RHi, 2, DAG);
30871       RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
30872       RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
30873 
30874       // a += a
30875       ALo = DAG.getNode(ISD::ADD, dl, ExtVT, ALo, ALo);
30876       AHi = DAG.getNode(ISD::ADD, dl, ExtVT, AHi, AHi);
30877 
30878       // r = VSELECT(r, shift(r, 1), a);
30879       MLo = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RLo, 1, DAG);
30880       MHi = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RHi, 1, DAG);
30881       RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
30882       RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
30883 
30884       // Logical shift the result back to the lower byte, leaving a zero upper
30885       // byte meaning that we can safely pack with PACKUSWB.
30886       RLo = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, RLo, 8, DAG);
30887       RHi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, RHi, 8, DAG);
30888       return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi);
30889     }
30890   }
30891 
30892   if (Subtarget.hasInt256() && !Subtarget.hasXOP() && VT == MVT::v16i16) {
30893     MVT ExtVT = MVT::v8i32;
30894     SDValue Z = DAG.getConstant(0, dl, VT);
30895     SDValue ALo = getUnpackl(DAG, dl, VT, Amt, Z);
30896     SDValue AHi = getUnpackh(DAG, dl, VT, Amt, Z);
30897     SDValue RLo = getUnpackl(DAG, dl, VT, Z, R);
30898     SDValue RHi = getUnpackh(DAG, dl, VT, Z, R);
30899     ALo = DAG.getBitcast(ExtVT, ALo);
30900     AHi = DAG.getBitcast(ExtVT, AHi);
30901     RLo = DAG.getBitcast(ExtVT, RLo);
30902     RHi = DAG.getBitcast(ExtVT, RHi);
30903     SDValue Lo = DAG.getNode(Opc, dl, ExtVT, RLo, ALo);
30904     SDValue Hi = DAG.getNode(Opc, dl, ExtVT, RHi, AHi);
30905     Lo = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, Lo, 16, DAG);
30906     Hi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, Hi, 16, DAG);
30907     return DAG.getNode(X86ISD::PACKUS, dl, VT, Lo, Hi);
30908   }
30909 
30910   if (VT == MVT::v8i16) {
30911     // If we have a constant shift amount, the non-SSE41 path is best as
30912     // avoiding bitcasts make it easier to constant fold and reduce to PBLENDW.
30913     bool UseSSE41 = Subtarget.hasSSE41() &&
30914                     !ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
30915 
30916     auto SignBitSelect = [&](SDValue Sel, SDValue V0, SDValue V1) {
30917       // On SSE41 targets we can use PBLENDVB which selects bytes based just on
30918       // the sign bit.
30919       if (UseSSE41) {
30920         MVT ExtVT = MVT::getVectorVT(MVT::i8, VT.getVectorNumElements() * 2);
30921         V0 = DAG.getBitcast(ExtVT, V0);
30922         V1 = DAG.getBitcast(ExtVT, V1);
30923         Sel = DAG.getBitcast(ExtVT, Sel);
30924         return DAG.getBitcast(
30925             VT, DAG.getNode(X86ISD::BLENDV, dl, ExtVT, Sel, V0, V1));
30926       }
30927       // On pre-SSE41 targets we splat the sign bit - a negative value will
30928       // set all bits of the lanes to true and VSELECT uses that in
30929       // its OR(AND(V0,C),AND(V1,~C)) lowering.
30930       SDValue C =
30931           getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, Sel, 15, DAG);
30932       return DAG.getSelect(dl, VT, C, V0, V1);
30933     };
30934 
30935     // Turn 'a' into a mask suitable for VSELECT: a = a << 12;
30936     if (UseSSE41) {
30937       // On SSE41 targets we need to replicate the shift mask in both
30938       // bytes for PBLENDVB.
30939       Amt = DAG.getNode(
30940           ISD::OR, dl, VT,
30941           getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Amt, 4, DAG),
30942           getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Amt, 12, DAG));
30943     } else {
30944       Amt = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Amt, 12, DAG);
30945     }
30946 
30947     // r = VSELECT(r, shift(r, 8), a);
30948     SDValue M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 8, DAG);
30949     R = SignBitSelect(Amt, M, R);
30950 
30951     // a += a
30952     Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
30953 
30954     // r = VSELECT(r, shift(r, 4), a);
30955     M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 4, DAG);
30956     R = SignBitSelect(Amt, M, R);
30957 
30958     // a += a
30959     Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
30960 
30961     // r = VSELECT(r, shift(r, 2), a);
30962     M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 2, DAG);
30963     R = SignBitSelect(Amt, M, R);
30964 
30965     // a += a
30966     Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
30967 
30968     // return VSELECT(r, shift(r, 1), a);
30969     M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 1, DAG);
30970     R = SignBitSelect(Amt, M, R);
30971     return R;
30972   }
30973 
30974   // Decompose 256-bit shifts into 128-bit shifts.
30975   if (VT.is256BitVector())
30976     return splitVectorIntBinary(Op, DAG);
30977 
30978   if (VT == MVT::v32i16 || VT == MVT::v64i8)
30979     return splitVectorIntBinary(Op, DAG);
30980 
30981   return SDValue();
30982 }
30983 
LowerFunnelShift(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)30984 static SDValue LowerFunnelShift(SDValue Op, const X86Subtarget &Subtarget,
30985                                 SelectionDAG &DAG) {
30986   MVT VT = Op.getSimpleValueType();
30987   assert((Op.getOpcode() == ISD::FSHL || Op.getOpcode() == ISD::FSHR) &&
30988          "Unexpected funnel shift opcode!");
30989 
30990   SDLoc DL(Op);
30991   SDValue Op0 = Op.getOperand(0);
30992   SDValue Op1 = Op.getOperand(1);
30993   SDValue Amt = Op.getOperand(2);
30994   unsigned EltSizeInBits = VT.getScalarSizeInBits();
30995   bool IsFSHR = Op.getOpcode() == ISD::FSHR;
30996 
30997   if (VT.isVector()) {
30998     APInt APIntShiftAmt;
30999     bool IsCstSplat = X86::isConstantSplat(Amt, APIntShiftAmt);
31000 
31001     if (Subtarget.hasVBMI2() && EltSizeInBits > 8) {
31002       if (IsFSHR)
31003         std::swap(Op0, Op1);
31004 
31005       if (IsCstSplat) {
31006         uint64_t ShiftAmt = APIntShiftAmt.urem(EltSizeInBits);
31007         SDValue Imm = DAG.getTargetConstant(ShiftAmt, DL, MVT::i8);
31008         return getAVX512Node(IsFSHR ? X86ISD::VSHRD : X86ISD::VSHLD, DL, VT,
31009                              {Op0, Op1, Imm}, DAG, Subtarget);
31010       }
31011       return getAVX512Node(IsFSHR ? X86ISD::VSHRDV : X86ISD::VSHLDV, DL, VT,
31012                            {Op0, Op1, Amt}, DAG, Subtarget);
31013     }
31014     assert((VT == MVT::v16i8 || VT == MVT::v32i8 || VT == MVT::v64i8 ||
31015             VT == MVT::v8i16 || VT == MVT::v16i16 || VT == MVT::v32i16 ||
31016             VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32) &&
31017            "Unexpected funnel shift type!");
31018 
31019     // fshl(x,y,z) -> unpack(y,x) << (z & (bw-1))) >> bw.
31020     // fshr(x,y,z) -> unpack(y,x) >> (z & (bw-1))).
31021     if (IsCstSplat)
31022       return SDValue();
31023 
31024     SDValue AmtMask = DAG.getConstant(EltSizeInBits - 1, DL, VT);
31025     SDValue AmtMod = DAG.getNode(ISD::AND, DL, VT, Amt, AmtMask);
31026     bool IsCst = ISD::isBuildVectorOfConstantSDNodes(AmtMod.getNode());
31027 
31028     // Constant vXi16 funnel shifts can be efficiently handled by default.
31029     if (IsCst && EltSizeInBits == 16)
31030       return SDValue();
31031 
31032     unsigned ShiftOpc = IsFSHR ? ISD::SRL : ISD::SHL;
31033     unsigned NumElts = VT.getVectorNumElements();
31034     MVT ExtSVT = MVT::getIntegerVT(2 * EltSizeInBits);
31035     MVT ExtVT = MVT::getVectorVT(ExtSVT, NumElts / 2);
31036 
31037     // Split 256-bit integers on XOP/pre-AVX2 targets.
31038     // Split 512-bit integers on non 512-bit BWI targets.
31039     if ((VT.is256BitVector() && ((Subtarget.hasXOP() && EltSizeInBits < 16) ||
31040                                  !Subtarget.hasAVX2())) ||
31041         (VT.is512BitVector() && !Subtarget.useBWIRegs() &&
31042          EltSizeInBits < 32)) {
31043       // Pre-mask the amount modulo using the wider vector.
31044       Op = DAG.getNode(Op.getOpcode(), DL, VT, Op0, Op1, AmtMod);
31045       return splitVectorOp(Op, DAG);
31046     }
31047 
31048     // Attempt to fold scalar shift as unpack(y,x) << zext(splat(z))
31049     if (supportedVectorShiftWithBaseAmnt(ExtVT, Subtarget, ShiftOpc)) {
31050       int ScalarAmtIdx = -1;
31051       if (SDValue ScalarAmt = DAG.getSplatSourceVector(AmtMod, ScalarAmtIdx)) {
31052         // Uniform vXi16 funnel shifts can be efficiently handled by default.
31053         if (EltSizeInBits == 16)
31054           return SDValue();
31055 
31056         SDValue Lo = DAG.getBitcast(ExtVT, getUnpackl(DAG, DL, VT, Op1, Op0));
31057         SDValue Hi = DAG.getBitcast(ExtVT, getUnpackh(DAG, DL, VT, Op1, Op0));
31058         Lo = getTargetVShiftNode(ShiftOpc, DL, ExtVT, Lo, ScalarAmt,
31059                                  ScalarAmtIdx, Subtarget, DAG);
31060         Hi = getTargetVShiftNode(ShiftOpc, DL, ExtVT, Hi, ScalarAmt,
31061                                  ScalarAmtIdx, Subtarget, DAG);
31062         return getPack(DAG, Subtarget, DL, VT, Lo, Hi, !IsFSHR);
31063       }
31064     }
31065 
31066     MVT WideSVT = MVT::getIntegerVT(
31067         std::min<unsigned>(EltSizeInBits * 2, Subtarget.hasBWI() ? 16 : 32));
31068     MVT WideVT = MVT::getVectorVT(WideSVT, NumElts);
31069 
31070     // If per-element shifts are legal, fallback to generic expansion.
31071     if (supportedVectorVarShift(VT, Subtarget, ShiftOpc) || Subtarget.hasXOP())
31072       return SDValue();
31073 
31074     // Attempt to fold as:
31075     // fshl(x,y,z) -> (((aext(x) << bw) | zext(y)) << (z & (bw-1))) >> bw.
31076     // fshr(x,y,z) -> (((aext(x) << bw) | zext(y)) >> (z & (bw-1))).
31077     if (supportedVectorVarShift(WideVT, Subtarget, ShiftOpc) &&
31078         supportedVectorShiftWithImm(WideVT, Subtarget, ShiftOpc)) {
31079       Op0 = DAG.getNode(ISD::ANY_EXTEND, DL, WideVT, Op0);
31080       Op1 = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Op1);
31081       AmtMod = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, AmtMod);
31082       Op0 = getTargetVShiftByConstNode(X86ISD::VSHLI, DL, WideVT, Op0,
31083                                        EltSizeInBits, DAG);
31084       SDValue Res = DAG.getNode(ISD::OR, DL, WideVT, Op0, Op1);
31085       Res = DAG.getNode(ShiftOpc, DL, WideVT, Res, AmtMod);
31086       if (!IsFSHR)
31087         Res = getTargetVShiftByConstNode(X86ISD::VSRLI, DL, WideVT, Res,
31088                                          EltSizeInBits, DAG);
31089       return DAG.getNode(ISD::TRUNCATE, DL, VT, Res);
31090     }
31091 
31092     // Attempt to fold per-element (ExtVT) shift as unpack(y,x) << zext(z)
31093     if (((IsCst || !Subtarget.hasAVX512()) && !IsFSHR && EltSizeInBits <= 16) ||
31094         supportedVectorVarShift(ExtVT, Subtarget, ShiftOpc)) {
31095       SDValue Z = DAG.getConstant(0, DL, VT);
31096       SDValue RLo = DAG.getBitcast(ExtVT, getUnpackl(DAG, DL, VT, Op1, Op0));
31097       SDValue RHi = DAG.getBitcast(ExtVT, getUnpackh(DAG, DL, VT, Op1, Op0));
31098       SDValue ALo = DAG.getBitcast(ExtVT, getUnpackl(DAG, DL, VT, AmtMod, Z));
31099       SDValue AHi = DAG.getBitcast(ExtVT, getUnpackh(DAG, DL, VT, AmtMod, Z));
31100       SDValue Lo = DAG.getNode(ShiftOpc, DL, ExtVT, RLo, ALo);
31101       SDValue Hi = DAG.getNode(ShiftOpc, DL, ExtVT, RHi, AHi);
31102       return getPack(DAG, Subtarget, DL, VT, Lo, Hi, !IsFSHR);
31103     }
31104 
31105     // Fallback to generic expansion.
31106     return SDValue();
31107   }
31108   assert(
31109       (VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64) &&
31110       "Unexpected funnel shift type!");
31111 
31112   // Expand slow SHLD/SHRD cases if we are not optimizing for size.
31113   bool OptForSize = DAG.shouldOptForSize();
31114   bool ExpandFunnel = !OptForSize && Subtarget.isSHLDSlow();
31115 
31116   // fshl(x,y,z) -> (((aext(x) << bw) | zext(y)) << (z & (bw-1))) >> bw.
31117   // fshr(x,y,z) -> (((aext(x) << bw) | zext(y)) >> (z & (bw-1))).
31118   if ((VT == MVT::i8 || (ExpandFunnel && VT == MVT::i16)) &&
31119       !isa<ConstantSDNode>(Amt)) {
31120     SDValue Mask = DAG.getConstant(EltSizeInBits - 1, DL, Amt.getValueType());
31121     SDValue HiShift = DAG.getConstant(EltSizeInBits, DL, Amt.getValueType());
31122     Op0 = DAG.getAnyExtOrTrunc(Op0, DL, MVT::i32);
31123     Op1 = DAG.getZExtOrTrunc(Op1, DL, MVT::i32);
31124     Amt = DAG.getNode(ISD::AND, DL, Amt.getValueType(), Amt, Mask);
31125     SDValue Res = DAG.getNode(ISD::SHL, DL, MVT::i32, Op0, HiShift);
31126     Res = DAG.getNode(ISD::OR, DL, MVT::i32, Res, Op1);
31127     if (IsFSHR) {
31128       Res = DAG.getNode(ISD::SRL, DL, MVT::i32, Res, Amt);
31129     } else {
31130       Res = DAG.getNode(ISD::SHL, DL, MVT::i32, Res, Amt);
31131       Res = DAG.getNode(ISD::SRL, DL, MVT::i32, Res, HiShift);
31132     }
31133     return DAG.getZExtOrTrunc(Res, DL, VT);
31134   }
31135 
31136   if (VT == MVT::i8 || ExpandFunnel)
31137     return SDValue();
31138 
31139   // i16 needs to modulo the shift amount, but i32/i64 have implicit modulo.
31140   if (VT == MVT::i16) {
31141     Amt = DAG.getNode(ISD::AND, DL, Amt.getValueType(), Amt,
31142                       DAG.getConstant(15, DL, Amt.getValueType()));
31143     unsigned FSHOp = (IsFSHR ? X86ISD::FSHR : X86ISD::FSHL);
31144     return DAG.getNode(FSHOp, DL, VT, Op0, Op1, Amt);
31145   }
31146 
31147   return Op;
31148 }
31149 
LowerRotate(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)31150 static SDValue LowerRotate(SDValue Op, const X86Subtarget &Subtarget,
31151                            SelectionDAG &DAG) {
31152   MVT VT = Op.getSimpleValueType();
31153   assert(VT.isVector() && "Custom lowering only for vector rotates!");
31154 
31155   SDLoc DL(Op);
31156   SDValue R = Op.getOperand(0);
31157   SDValue Amt = Op.getOperand(1);
31158   unsigned Opcode = Op.getOpcode();
31159   unsigned EltSizeInBits = VT.getScalarSizeInBits();
31160   int NumElts = VT.getVectorNumElements();
31161   bool IsROTL = Opcode == ISD::ROTL;
31162 
31163   // Check for constant splat rotation amount.
31164   APInt CstSplatValue;
31165   bool IsCstSplat = X86::isConstantSplat(Amt, CstSplatValue);
31166 
31167   // Check for splat rotate by zero.
31168   if (IsCstSplat && CstSplatValue.urem(EltSizeInBits) == 0)
31169     return R;
31170 
31171   // AVX512 implicitly uses modulo rotation amounts.
31172   if (Subtarget.hasAVX512() && 32 <= EltSizeInBits) {
31173     // Attempt to rotate by immediate.
31174     if (IsCstSplat) {
31175       unsigned RotOpc = IsROTL ? X86ISD::VROTLI : X86ISD::VROTRI;
31176       uint64_t RotAmt = CstSplatValue.urem(EltSizeInBits);
31177       return DAG.getNode(RotOpc, DL, VT, R,
31178                          DAG.getTargetConstant(RotAmt, DL, MVT::i8));
31179     }
31180 
31181     // Else, fall-back on VPROLV/VPRORV.
31182     return Op;
31183   }
31184 
31185   // AVX512 VBMI2 vXi16 - lower to funnel shifts.
31186   if (Subtarget.hasVBMI2() && 16 == EltSizeInBits) {
31187     unsigned FunnelOpc = IsROTL ? ISD::FSHL : ISD::FSHR;
31188     return DAG.getNode(FunnelOpc, DL, VT, R, R, Amt);
31189   }
31190 
31191   SDValue Z = DAG.getConstant(0, DL, VT);
31192 
31193   if (!IsROTL) {
31194     // If the ISD::ROTR amount is constant, we're always better converting to
31195     // ISD::ROTL.
31196     if (SDValue NegAmt = DAG.FoldConstantArithmetic(ISD::SUB, DL, VT, {Z, Amt}))
31197       return DAG.getNode(ISD::ROTL, DL, VT, R, NegAmt);
31198 
31199     // XOP targets always prefers ISD::ROTL.
31200     if (Subtarget.hasXOP())
31201       return DAG.getNode(ISD::ROTL, DL, VT, R,
31202                          DAG.getNode(ISD::SUB, DL, VT, Z, Amt));
31203   }
31204 
31205   // Split 256-bit integers on XOP/pre-AVX2 targets.
31206   if (VT.is256BitVector() && (Subtarget.hasXOP() || !Subtarget.hasAVX2()))
31207     return splitVectorIntBinary(Op, DAG);
31208 
31209   // XOP has 128-bit vector variable + immediate rotates.
31210   // +ve/-ve Amt = rotate left/right - just need to handle ISD::ROTL.
31211   // XOP implicitly uses modulo rotation amounts.
31212   if (Subtarget.hasXOP()) {
31213     assert(IsROTL && "Only ROTL expected");
31214     assert(VT.is128BitVector() && "Only rotate 128-bit vectors!");
31215 
31216     // Attempt to rotate by immediate.
31217     if (IsCstSplat) {
31218       uint64_t RotAmt = CstSplatValue.urem(EltSizeInBits);
31219       return DAG.getNode(X86ISD::VROTLI, DL, VT, R,
31220                          DAG.getTargetConstant(RotAmt, DL, MVT::i8));
31221     }
31222 
31223     // Use general rotate by variable (per-element).
31224     return Op;
31225   }
31226 
31227   // Rotate by an uniform constant - expand back to shifts.
31228   if (IsCstSplat)
31229     return SDValue();
31230 
31231   // Split 512-bit integers on non 512-bit BWI targets.
31232   if (VT.is512BitVector() && !Subtarget.useBWIRegs())
31233     return splitVectorIntBinary(Op, DAG);
31234 
31235   assert(
31236       (VT == MVT::v4i32 || VT == MVT::v8i16 || VT == MVT::v16i8 ||
31237        ((VT == MVT::v8i32 || VT == MVT::v16i16 || VT == MVT::v32i8) &&
31238         Subtarget.hasAVX2()) ||
31239        ((VT == MVT::v32i16 || VT == MVT::v64i8) && Subtarget.useBWIRegs())) &&
31240       "Only vXi32/vXi16/vXi8 vector rotates supported");
31241 
31242   MVT ExtSVT = MVT::getIntegerVT(2 * EltSizeInBits);
31243   MVT ExtVT = MVT::getVectorVT(ExtSVT, NumElts / 2);
31244 
31245   SDValue AmtMask = DAG.getConstant(EltSizeInBits - 1, DL, VT);
31246   SDValue AmtMod = DAG.getNode(ISD::AND, DL, VT, Amt, AmtMask);
31247 
31248   // Attempt to fold as unpack(x,x) << zext(splat(y)):
31249   // rotl(x,y) -> (unpack(x,x) << (y & (bw-1))) >> bw.
31250   // rotr(x,y) -> (unpack(x,x) >> (y & (bw-1))).
31251   if (EltSizeInBits == 8 || EltSizeInBits == 16 || EltSizeInBits == 32) {
31252     int BaseRotAmtIdx = -1;
31253     if (SDValue BaseRotAmt = DAG.getSplatSourceVector(AmtMod, BaseRotAmtIdx)) {
31254       if (EltSizeInBits == 16 && Subtarget.hasSSE41()) {
31255         unsigned FunnelOpc = IsROTL ? ISD::FSHL : ISD::FSHR;
31256         return DAG.getNode(FunnelOpc, DL, VT, R, R, Amt);
31257       }
31258       unsigned ShiftX86Opc = IsROTL ? X86ISD::VSHLI : X86ISD::VSRLI;
31259       SDValue Lo = DAG.getBitcast(ExtVT, getUnpackl(DAG, DL, VT, R, R));
31260       SDValue Hi = DAG.getBitcast(ExtVT, getUnpackh(DAG, DL, VT, R, R));
31261       Lo = getTargetVShiftNode(ShiftX86Opc, DL, ExtVT, Lo, BaseRotAmt,
31262                                BaseRotAmtIdx, Subtarget, DAG);
31263       Hi = getTargetVShiftNode(ShiftX86Opc, DL, ExtVT, Hi, BaseRotAmt,
31264                                BaseRotAmtIdx, Subtarget, DAG);
31265       return getPack(DAG, Subtarget, DL, VT, Lo, Hi, IsROTL);
31266     }
31267   }
31268 
31269   // v16i8/v32i8/v64i8: Split rotation into rot4/rot2/rot1 stages and select by
31270   // the amount bit.
31271   // TODO: We're doing nothing here that we couldn't do for funnel shifts.
31272   if (EltSizeInBits == 8) {
31273     bool IsConstAmt = ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
31274     MVT WideVT =
31275         MVT::getVectorVT(Subtarget.hasBWI() ? MVT::i16 : MVT::i32, NumElts);
31276     unsigned ShiftOpc = IsROTL ? ISD::SHL : ISD::SRL;
31277 
31278     // Attempt to fold as:
31279     // rotl(x,y) -> (((aext(x) << bw) | zext(x)) << (y & (bw-1))) >> bw.
31280     // rotr(x,y) -> (((aext(x) << bw) | zext(x)) >> (y & (bw-1))).
31281     if (supportedVectorVarShift(WideVT, Subtarget, ShiftOpc) &&
31282         supportedVectorShiftWithImm(WideVT, Subtarget, ShiftOpc)) {
31283       // If we're rotating by constant, just use default promotion.
31284       if (IsConstAmt)
31285         return SDValue();
31286       // See if we can perform this by widening to vXi16 or vXi32.
31287       R = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, R);
31288       R = DAG.getNode(
31289           ISD::OR, DL, WideVT, R,
31290           getTargetVShiftByConstNode(X86ISD::VSHLI, DL, WideVT, R, 8, DAG));
31291       Amt = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, AmtMod);
31292       R = DAG.getNode(ShiftOpc, DL, WideVT, R, Amt);
31293       if (IsROTL)
31294         R = getTargetVShiftByConstNode(X86ISD::VSRLI, DL, WideVT, R, 8, DAG);
31295       return DAG.getNode(ISD::TRUNCATE, DL, VT, R);
31296     }
31297 
31298     // Attempt to fold as unpack(x,x) << zext(y):
31299     // rotl(x,y) -> (unpack(x,x) << (y & (bw-1))) >> bw.
31300     // rotr(x,y) -> (unpack(x,x) >> (y & (bw-1))).
31301     if (IsConstAmt || supportedVectorVarShift(ExtVT, Subtarget, ShiftOpc)) {
31302       // See if we can perform this by unpacking to lo/hi vXi16.
31303       SDValue RLo = DAG.getBitcast(ExtVT, getUnpackl(DAG, DL, VT, R, R));
31304       SDValue RHi = DAG.getBitcast(ExtVT, getUnpackh(DAG, DL, VT, R, R));
31305       SDValue ALo = DAG.getBitcast(ExtVT, getUnpackl(DAG, DL, VT, AmtMod, Z));
31306       SDValue AHi = DAG.getBitcast(ExtVT, getUnpackh(DAG, DL, VT, AmtMod, Z));
31307       SDValue Lo = DAG.getNode(ShiftOpc, DL, ExtVT, RLo, ALo);
31308       SDValue Hi = DAG.getNode(ShiftOpc, DL, ExtVT, RHi, AHi);
31309       return getPack(DAG, Subtarget, DL, VT, Lo, Hi, IsROTL);
31310     }
31311     assert((VT == MVT::v16i8 || VT == MVT::v32i8) && "Unsupported vXi8 type");
31312 
31313     // We don't need ModuloAmt here as we just peek at individual bits.
31314     auto SignBitSelect = [&](MVT SelVT, SDValue Sel, SDValue V0, SDValue V1) {
31315       if (Subtarget.hasSSE41()) {
31316         // On SSE41 targets we can use PBLENDVB which selects bytes based just
31317         // on the sign bit.
31318         V0 = DAG.getBitcast(VT, V0);
31319         V1 = DAG.getBitcast(VT, V1);
31320         Sel = DAG.getBitcast(VT, Sel);
31321         return DAG.getBitcast(SelVT,
31322                               DAG.getNode(X86ISD::BLENDV, DL, VT, Sel, V0, V1));
31323       }
31324       // On pre-SSE41 targets we test for the sign bit by comparing to
31325       // zero - a negative value will set all bits of the lanes to true
31326       // and VSELECT uses that in its OR(AND(V0,C),AND(V1,~C)) lowering.
31327       SDValue Z = DAG.getConstant(0, DL, SelVT);
31328       SDValue C = DAG.getNode(X86ISD::PCMPGT, DL, SelVT, Z, Sel);
31329       return DAG.getSelect(DL, SelVT, C, V0, V1);
31330     };
31331 
31332     // ISD::ROTR is currently only profitable on AVX512 targets with VPTERNLOG.
31333     if (!IsROTL && !useVPTERNLOG(Subtarget, VT)) {
31334       Amt = DAG.getNode(ISD::SUB, DL, VT, Z, Amt);
31335       IsROTL = true;
31336     }
31337 
31338     unsigned ShiftLHS = IsROTL ? ISD::SHL : ISD::SRL;
31339     unsigned ShiftRHS = IsROTL ? ISD::SRL : ISD::SHL;
31340 
31341     // Turn 'a' into a mask suitable for VSELECT: a = a << 5;
31342     // We can safely do this using i16 shifts as we're only interested in
31343     // the 3 lower bits of each byte.
31344     Amt = DAG.getBitcast(ExtVT, Amt);
31345     Amt = DAG.getNode(ISD::SHL, DL, ExtVT, Amt, DAG.getConstant(5, DL, ExtVT));
31346     Amt = DAG.getBitcast(VT, Amt);
31347 
31348     // r = VSELECT(r, rot(r, 4), a);
31349     SDValue M;
31350     M = DAG.getNode(
31351         ISD::OR, DL, VT,
31352         DAG.getNode(ShiftLHS, DL, VT, R, DAG.getConstant(4, DL, VT)),
31353         DAG.getNode(ShiftRHS, DL, VT, R, DAG.getConstant(4, DL, VT)));
31354     R = SignBitSelect(VT, Amt, M, R);
31355 
31356     // a += a
31357     Amt = DAG.getNode(ISD::ADD, DL, VT, Amt, Amt);
31358 
31359     // r = VSELECT(r, rot(r, 2), a);
31360     M = DAG.getNode(
31361         ISD::OR, DL, VT,
31362         DAG.getNode(ShiftLHS, DL, VT, R, DAG.getConstant(2, DL, VT)),
31363         DAG.getNode(ShiftRHS, DL, VT, R, DAG.getConstant(6, DL, VT)));
31364     R = SignBitSelect(VT, Amt, M, R);
31365 
31366     // a += a
31367     Amt = DAG.getNode(ISD::ADD, DL, VT, Amt, Amt);
31368 
31369     // return VSELECT(r, rot(r, 1), a);
31370     M = DAG.getNode(
31371         ISD::OR, DL, VT,
31372         DAG.getNode(ShiftLHS, DL, VT, R, DAG.getConstant(1, DL, VT)),
31373         DAG.getNode(ShiftRHS, DL, VT, R, DAG.getConstant(7, DL, VT)));
31374     return SignBitSelect(VT, Amt, M, R);
31375   }
31376 
31377   bool IsSplatAmt = DAG.isSplatValue(Amt);
31378   bool ConstantAmt = ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
31379   bool LegalVarShifts = supportedVectorVarShift(VT, Subtarget, ISD::SHL) &&
31380                         supportedVectorVarShift(VT, Subtarget, ISD::SRL);
31381 
31382   // Fallback for splats + all supported variable shifts.
31383   // Fallback for non-constants AVX2 vXi16 as well.
31384   if (IsSplatAmt || LegalVarShifts || (Subtarget.hasAVX2() && !ConstantAmt)) {
31385     Amt = DAG.getNode(ISD::AND, DL, VT, Amt, AmtMask);
31386     SDValue AmtR = DAG.getConstant(EltSizeInBits, DL, VT);
31387     AmtR = DAG.getNode(ISD::SUB, DL, VT, AmtR, Amt);
31388     SDValue SHL = DAG.getNode(IsROTL ? ISD::SHL : ISD::SRL, DL, VT, R, Amt);
31389     SDValue SRL = DAG.getNode(IsROTL ? ISD::SRL : ISD::SHL, DL, VT, R, AmtR);
31390     return DAG.getNode(ISD::OR, DL, VT, SHL, SRL);
31391   }
31392 
31393   // Everything below assumes ISD::ROTL.
31394   if (!IsROTL) {
31395     Amt = DAG.getNode(ISD::SUB, DL, VT, Z, Amt);
31396     IsROTL = true;
31397   }
31398 
31399   // ISD::ROT* uses modulo rotate amounts.
31400   Amt = DAG.getNode(ISD::AND, DL, VT, Amt, AmtMask);
31401 
31402   assert(IsROTL && "Only ROTL supported");
31403 
31404   // As with shifts, attempt to convert the rotation amount to a multiplication
31405   // factor, fallback to general expansion.
31406   SDValue Scale = convertShiftLeftToScale(Amt, DL, Subtarget, DAG);
31407   if (!Scale)
31408     return SDValue();
31409 
31410   // v8i16/v16i16: perform unsigned multiply hi/lo and OR the results.
31411   if (EltSizeInBits == 16) {
31412     SDValue Lo = DAG.getNode(ISD::MUL, DL, VT, R, Scale);
31413     SDValue Hi = DAG.getNode(ISD::MULHU, DL, VT, R, Scale);
31414     return DAG.getNode(ISD::OR, DL, VT, Lo, Hi);
31415   }
31416 
31417   // v4i32: make use of the PMULUDQ instruction to multiply 2 lanes of v4i32
31418   // to v2i64 results at a time. The upper 32-bits contain the wrapped bits
31419   // that can then be OR'd with the lower 32-bits.
31420   assert(VT == MVT::v4i32 && "Only v4i32 vector rotate expected");
31421   static const int OddMask[] = {1, -1, 3, -1};
31422   SDValue R13 = DAG.getVectorShuffle(VT, DL, R, R, OddMask);
31423   SDValue Scale13 = DAG.getVectorShuffle(VT, DL, Scale, Scale, OddMask);
31424 
31425   SDValue Res02 = DAG.getNode(X86ISD::PMULUDQ, DL, MVT::v2i64,
31426                               DAG.getBitcast(MVT::v2i64, R),
31427                               DAG.getBitcast(MVT::v2i64, Scale));
31428   SDValue Res13 = DAG.getNode(X86ISD::PMULUDQ, DL, MVT::v2i64,
31429                               DAG.getBitcast(MVT::v2i64, R13),
31430                               DAG.getBitcast(MVT::v2i64, Scale13));
31431   Res02 = DAG.getBitcast(VT, Res02);
31432   Res13 = DAG.getBitcast(VT, Res13);
31433 
31434   return DAG.getNode(ISD::OR, DL, VT,
31435                      DAG.getVectorShuffle(VT, DL, Res02, Res13, {0, 4, 2, 6}),
31436                      DAG.getVectorShuffle(VT, DL, Res02, Res13, {1, 5, 3, 7}));
31437 }
31438 
31439 /// Returns true if the operand type is exactly twice the native width, and
31440 /// the corresponding cmpxchg8b or cmpxchg16b instruction is available.
31441 /// Used to know whether to use cmpxchg8/16b when expanding atomic operations
31442 /// (otherwise we leave them alone to become __sync_fetch_and_... calls).
needsCmpXchgNb(Type * MemType) const31443 bool X86TargetLowering::needsCmpXchgNb(Type *MemType) const {
31444   unsigned OpWidth = MemType->getPrimitiveSizeInBits();
31445 
31446   if (OpWidth == 64)
31447     return Subtarget.canUseCMPXCHG8B() && !Subtarget.is64Bit();
31448   if (OpWidth == 128)
31449     return Subtarget.canUseCMPXCHG16B();
31450 
31451   return false;
31452 }
31453 
31454 TargetLoweringBase::AtomicExpansionKind
shouldExpandAtomicStoreInIR(StoreInst * SI) const31455 X86TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
31456   Type *MemType = SI->getValueOperand()->getType();
31457 
31458   bool NoImplicitFloatOps =
31459       SI->getFunction()->hasFnAttribute(Attribute::NoImplicitFloat);
31460   if (MemType->getPrimitiveSizeInBits() == 64 && !Subtarget.is64Bit() &&
31461       !Subtarget.useSoftFloat() && !NoImplicitFloatOps &&
31462       (Subtarget.hasSSE1() || Subtarget.hasX87()))
31463     return AtomicExpansionKind::None;
31464 
31465   return needsCmpXchgNb(MemType) ? AtomicExpansionKind::Expand
31466                                  : AtomicExpansionKind::None;
31467 }
31468 
31469 // Note: this turns large loads into lock cmpxchg8b/16b.
31470 // TODO: In 32-bit mode, use MOVLPS when SSE1 is available?
31471 TargetLowering::AtomicExpansionKind
shouldExpandAtomicLoadInIR(LoadInst * LI) const31472 X86TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
31473   Type *MemType = LI->getType();
31474 
31475   // If this a 64 bit atomic load on a 32-bit target and SSE2 is enabled, we
31476   // can use movq to do the load. If we have X87 we can load into an 80-bit
31477   // X87 register and store it to a stack temporary.
31478   bool NoImplicitFloatOps =
31479       LI->getFunction()->hasFnAttribute(Attribute::NoImplicitFloat);
31480   if (MemType->getPrimitiveSizeInBits() == 64 && !Subtarget.is64Bit() &&
31481       !Subtarget.useSoftFloat() && !NoImplicitFloatOps &&
31482       (Subtarget.hasSSE1() || Subtarget.hasX87()))
31483     return AtomicExpansionKind::None;
31484 
31485   return needsCmpXchgNb(MemType) ? AtomicExpansionKind::CmpXChg
31486                                  : AtomicExpansionKind::None;
31487 }
31488 
31489 enum BitTestKind : unsigned {
31490   UndefBit,
31491   ConstantBit,
31492   NotConstantBit,
31493   ShiftBit,
31494   NotShiftBit
31495 };
31496 
FindSingleBitChange(Value * V)31497 static std::pair<Value *, BitTestKind> FindSingleBitChange(Value *V) {
31498   using namespace llvm::PatternMatch;
31499   BitTestKind BTK = UndefBit;
31500   auto *C = dyn_cast<ConstantInt>(V);
31501   if (C) {
31502     // Check if V is a power of 2 or NOT power of 2.
31503     if (isPowerOf2_64(C->getZExtValue()))
31504       BTK = ConstantBit;
31505     else if (isPowerOf2_64((~C->getValue()).getZExtValue()))
31506       BTK = NotConstantBit;
31507     return {V, BTK};
31508   }
31509 
31510   // Check if V is some power of 2 pattern known to be non-zero
31511   auto *I = dyn_cast<Instruction>(V);
31512   if (I) {
31513     bool Not = false;
31514     // Check if we have a NOT
31515     Value *PeekI;
31516     if (match(I, m_c_Xor(m_Value(PeekI), m_AllOnes())) ||
31517         match(I, m_Sub(m_AllOnes(), m_Value(PeekI)))) {
31518       Not = true;
31519       I = dyn_cast<Instruction>(PeekI);
31520 
31521       // If I is constant, it will fold and we can evaluate later. If its an
31522       // argument or something of that nature, we can't analyze.
31523       if (I == nullptr)
31524         return {nullptr, UndefBit};
31525     }
31526     // We can only use 1 << X without more sophisticated analysis. C << X where
31527     // C is a power of 2 but not 1 can result in zero which cannot be translated
31528     // to bittest. Likewise any C >> X (either arith or logical) can be zero.
31529     if (I->getOpcode() == Instruction::Shl) {
31530       // Todo(1): The cmpxchg case is pretty costly so matching `BLSI(X)`, `X &
31531       // -X` and some other provable power of 2 patterns that we can use CTZ on
31532       // may be profitable.
31533       // Todo(2): It may be possible in some cases to prove that Shl(C, X) is
31534       // non-zero even where C != 1. Likewise LShr(C, X) and AShr(C, X) may also
31535       // be provably a non-zero power of 2.
31536       // Todo(3): ROTL and ROTR patterns on a power of 2 C should also be
31537       // transformable to bittest.
31538       auto *ShiftVal = dyn_cast<ConstantInt>(I->getOperand(0));
31539       if (!ShiftVal)
31540         return {nullptr, UndefBit};
31541       if (ShiftVal->equalsInt(1))
31542         BTK = Not ? NotShiftBit : ShiftBit;
31543 
31544       if (BTK == UndefBit)
31545         return {nullptr, UndefBit};
31546 
31547       Value *BitV = I->getOperand(1);
31548 
31549       Value *AndOp;
31550       const APInt *AndC;
31551       if (match(BitV, m_c_And(m_Value(AndOp), m_APInt(AndC)))) {
31552         // Read past a shiftmask instruction to find count
31553         if (*AndC == (I->getType()->getPrimitiveSizeInBits() - 1))
31554           BitV = AndOp;
31555       }
31556       return {BitV, BTK};
31557     }
31558   }
31559   return {nullptr, UndefBit};
31560 }
31561 
31562 TargetLowering::AtomicExpansionKind
shouldExpandLogicAtomicRMWInIR(AtomicRMWInst * AI) const31563 X86TargetLowering::shouldExpandLogicAtomicRMWInIR(AtomicRMWInst *AI) const {
31564   // If the atomicrmw's result isn't actually used, we can just add a "lock"
31565   // prefix to a normal instruction for these operations.
31566   if (AI->use_empty())
31567     return AtomicExpansionKind::None;
31568 
31569   // If the atomicrmw's result is used by a single bit AND, we may use
31570   // bts/btr/btc instruction for these operations.
31571   // Note: InstCombinePass can cause a de-optimization here. It replaces the
31572   // SETCC(And(AtomicRMW(P, power_of_2), power_of_2)) with LShr and Xor
31573   // (depending on CC). This pattern can only use bts/btr/btc but we don't
31574   // detect it.
31575   Instruction *I = AI->user_back();
31576   auto BitChange = FindSingleBitChange(AI->getValOperand());
31577   if (BitChange.second == UndefBit || !AI->hasOneUse() ||
31578       I->getOpcode() != Instruction::And ||
31579       AI->getType()->getPrimitiveSizeInBits() == 8 ||
31580       AI->getParent() != I->getParent())
31581     return AtomicExpansionKind::CmpXChg;
31582 
31583   unsigned OtherIdx = I->getOperand(0) == AI ? 1 : 0;
31584 
31585   // This is a redundant AND, it should get cleaned up elsewhere.
31586   if (AI == I->getOperand(OtherIdx))
31587     return AtomicExpansionKind::CmpXChg;
31588 
31589   // The following instruction must be a AND single bit.
31590   if (BitChange.second == ConstantBit || BitChange.second == NotConstantBit) {
31591     auto *C1 = cast<ConstantInt>(AI->getValOperand());
31592     auto *C2 = dyn_cast<ConstantInt>(I->getOperand(OtherIdx));
31593     if (!C2 || !isPowerOf2_64(C2->getZExtValue())) {
31594       return AtomicExpansionKind::CmpXChg;
31595     }
31596     if (AI->getOperation() == AtomicRMWInst::And) {
31597       return ~C1->getValue() == C2->getValue()
31598                  ? AtomicExpansionKind::BitTestIntrinsic
31599                  : AtomicExpansionKind::CmpXChg;
31600     }
31601     return C1 == C2 ? AtomicExpansionKind::BitTestIntrinsic
31602                     : AtomicExpansionKind::CmpXChg;
31603   }
31604 
31605   assert(BitChange.second == ShiftBit || BitChange.second == NotShiftBit);
31606 
31607   auto BitTested = FindSingleBitChange(I->getOperand(OtherIdx));
31608   if (BitTested.second != ShiftBit && BitTested.second != NotShiftBit)
31609     return AtomicExpansionKind::CmpXChg;
31610 
31611   assert(BitChange.first != nullptr && BitTested.first != nullptr);
31612 
31613   // If shift amounts are not the same we can't use BitTestIntrinsic.
31614   if (BitChange.first != BitTested.first)
31615     return AtomicExpansionKind::CmpXChg;
31616 
31617   // If atomic AND need to be masking all be one bit and testing the one bit
31618   // unset in the mask.
31619   if (AI->getOperation() == AtomicRMWInst::And)
31620     return (BitChange.second == NotShiftBit && BitTested.second == ShiftBit)
31621                ? AtomicExpansionKind::BitTestIntrinsic
31622                : AtomicExpansionKind::CmpXChg;
31623 
31624   // If atomic XOR/OR need to be setting and testing the same bit.
31625   return (BitChange.second == ShiftBit && BitTested.second == ShiftBit)
31626              ? AtomicExpansionKind::BitTestIntrinsic
31627              : AtomicExpansionKind::CmpXChg;
31628 }
31629 
emitBitTestAtomicRMWIntrinsic(AtomicRMWInst * AI) const31630 void X86TargetLowering::emitBitTestAtomicRMWIntrinsic(AtomicRMWInst *AI) const {
31631   IRBuilder<> Builder(AI);
31632   Intrinsic::ID IID_C = Intrinsic::not_intrinsic;
31633   Intrinsic::ID IID_I = Intrinsic::not_intrinsic;
31634   switch (AI->getOperation()) {
31635   default:
31636     llvm_unreachable("Unknown atomic operation");
31637   case AtomicRMWInst::Or:
31638     IID_C = Intrinsic::x86_atomic_bts;
31639     IID_I = Intrinsic::x86_atomic_bts_rm;
31640     break;
31641   case AtomicRMWInst::Xor:
31642     IID_C = Intrinsic::x86_atomic_btc;
31643     IID_I = Intrinsic::x86_atomic_btc_rm;
31644     break;
31645   case AtomicRMWInst::And:
31646     IID_C = Intrinsic::x86_atomic_btr;
31647     IID_I = Intrinsic::x86_atomic_btr_rm;
31648     break;
31649   }
31650   Instruction *I = AI->user_back();
31651   LLVMContext &Ctx = AI->getContext();
31652   Value *Addr = Builder.CreatePointerCast(AI->getPointerOperand(),
31653                                           Type::getInt8PtrTy(Ctx));
31654   Function *BitTest = nullptr;
31655   Value *Result = nullptr;
31656   auto BitTested = FindSingleBitChange(AI->getValOperand());
31657   assert(BitTested.first != nullptr);
31658 
31659   if (BitTested.second == ConstantBit || BitTested.second == NotConstantBit) {
31660     auto *C = cast<ConstantInt>(I->getOperand(I->getOperand(0) == AI ? 1 : 0));
31661 
31662     BitTest = Intrinsic::getDeclaration(AI->getModule(), IID_C, AI->getType());
31663 
31664     unsigned Imm = countTrailingZeros(C->getZExtValue());
31665     Result = Builder.CreateCall(BitTest, {Addr, Builder.getInt8(Imm)});
31666   } else {
31667     BitTest = Intrinsic::getDeclaration(AI->getModule(), IID_I, AI->getType());
31668 
31669     assert(BitTested.second == ShiftBit || BitTested.second == NotShiftBit);
31670 
31671     Value *SI = BitTested.first;
31672     assert(SI != nullptr);
31673 
31674     // BT{S|R|C} on memory operand don't modulo bit position so we need to
31675     // mask it.
31676     unsigned ShiftBits = SI->getType()->getPrimitiveSizeInBits();
31677     Value *BitPos =
31678         Builder.CreateAnd(SI, Builder.getIntN(ShiftBits, ShiftBits - 1));
31679     // Todo(1): In many cases it may be provable that SI is less than
31680     // ShiftBits in which case this mask is unnecessary
31681     // Todo(2): In the fairly idiomatic case of P[X / sizeof_bits(X)] OP 1
31682     // << (X % sizeof_bits(X)) we can drop the shift mask and AGEN in
31683     // favor of just a raw BT{S|R|C}.
31684 
31685     Result = Builder.CreateCall(BitTest, {Addr, BitPos});
31686     Result = Builder.CreateZExtOrTrunc(Result, AI->getType());
31687 
31688     // If the result is only used for zero/non-zero status then we don't need to
31689     // shift value back. Otherwise do so.
31690     for (auto It = I->user_begin(); It != I->user_end(); ++It) {
31691       if (auto *ICmp = dyn_cast<ICmpInst>(*It)) {
31692         if (ICmp->isEquality()) {
31693           auto *C0 = dyn_cast<ConstantInt>(ICmp->getOperand(0));
31694           auto *C1 = dyn_cast<ConstantInt>(ICmp->getOperand(1));
31695           if (C0 || C1) {
31696             assert(C0 == nullptr || C1 == nullptr);
31697             if ((C0 ? C0 : C1)->isZero())
31698               continue;
31699           }
31700         }
31701       }
31702       Result = Builder.CreateShl(Result, BitPos);
31703       break;
31704     }
31705   }
31706 
31707   I->replaceAllUsesWith(Result);
31708   I->eraseFromParent();
31709   AI->eraseFromParent();
31710 }
31711 
shouldExpandCmpArithRMWInIR(AtomicRMWInst * AI)31712 static bool shouldExpandCmpArithRMWInIR(AtomicRMWInst *AI) {
31713   using namespace llvm::PatternMatch;
31714   if (!AI->hasOneUse())
31715     return false;
31716 
31717   Value *Op = AI->getOperand(1);
31718   ICmpInst::Predicate Pred;
31719   Instruction *I = AI->user_back();
31720   AtomicRMWInst::BinOp Opc = AI->getOperation();
31721   if (Opc == AtomicRMWInst::Add) {
31722     if (match(I, m_c_ICmp(Pred, m_Sub(m_ZeroInt(), m_Specific(Op)), m_Value())))
31723       return Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE;
31724     if (match(I, m_OneUse(m_c_Add(m_Specific(Op), m_Value())))) {
31725       if (match(I->user_back(), m_ICmp(Pred, m_Value(), m_ZeroInt())))
31726         return Pred == CmpInst::ICMP_SLT;
31727       if (match(I->user_back(), m_ICmp(Pred, m_Value(), m_AllOnes())))
31728         return Pred == CmpInst::ICMP_SGT;
31729     }
31730     return false;
31731   }
31732   if (Opc == AtomicRMWInst::Sub) {
31733     if (match(I, m_c_ICmp(Pred, m_Specific(Op), m_Value())))
31734       return Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE;
31735     if (match(I, m_OneUse(m_Sub(m_Value(), m_Specific(Op))))) {
31736       if (match(I->user_back(), m_ICmp(Pred, m_Value(), m_ZeroInt())))
31737         return Pred == CmpInst::ICMP_SLT;
31738       if (match(I->user_back(), m_ICmp(Pred, m_Value(), m_AllOnes())))
31739         return Pred == CmpInst::ICMP_SGT;
31740     }
31741     return false;
31742   }
31743   if ((Opc == AtomicRMWInst::Or &&
31744        match(I, m_OneUse(m_c_Or(m_Specific(Op), m_Value())))) ||
31745       (Opc == AtomicRMWInst::And &&
31746        match(I, m_OneUse(m_c_And(m_Specific(Op), m_Value()))))) {
31747     if (match(I->user_back(), m_ICmp(Pred, m_Value(), m_ZeroInt())))
31748       return Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE ||
31749              Pred == CmpInst::ICMP_SLT;
31750     if (match(I->user_back(), m_ICmp(Pred, m_Value(), m_AllOnes())))
31751       return Pred == CmpInst::ICMP_SGT;
31752     return false;
31753   }
31754   if (Opc == AtomicRMWInst::Xor) {
31755     if (match(I, m_c_ICmp(Pred, m_Specific(Op), m_Value())))
31756       return Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE;
31757     if (match(I, m_OneUse(m_c_Xor(m_Specific(Op), m_Value())))) {
31758       if (match(I->user_back(), m_ICmp(Pred, m_Value(), m_ZeroInt())))
31759         return Pred == CmpInst::ICMP_SLT;
31760       if (match(I->user_back(), m_ICmp(Pred, m_Value(), m_AllOnes())))
31761         return Pred == CmpInst::ICMP_SGT;
31762     }
31763     return false;
31764   }
31765 
31766   return false;
31767 }
31768 
emitCmpArithAtomicRMWIntrinsic(AtomicRMWInst * AI) const31769 void X86TargetLowering::emitCmpArithAtomicRMWIntrinsic(
31770     AtomicRMWInst *AI) const {
31771   IRBuilder<> Builder(AI);
31772   Instruction *TempI = nullptr;
31773   LLVMContext &Ctx = AI->getContext();
31774   ICmpInst *ICI = dyn_cast<ICmpInst>(AI->user_back());
31775   if (!ICI) {
31776     TempI = AI->user_back();
31777     assert(TempI->hasOneUse() && "Must have one use");
31778     ICI = cast<ICmpInst>(TempI->user_back());
31779   }
31780   X86::CondCode CC = X86::COND_INVALID;
31781   ICmpInst::Predicate Pred = ICI->getPredicate();
31782   switch (Pred) {
31783   default:
31784     llvm_unreachable("Not supported Pred");
31785   case CmpInst::ICMP_EQ:
31786     CC = X86::COND_E;
31787     break;
31788   case CmpInst::ICMP_NE:
31789     CC = X86::COND_NE;
31790     break;
31791   case CmpInst::ICMP_SLT:
31792     CC = X86::COND_S;
31793     break;
31794   case CmpInst::ICMP_SGT:
31795     CC = X86::COND_NS;
31796     break;
31797   }
31798   Intrinsic::ID IID = Intrinsic::not_intrinsic;
31799   switch (AI->getOperation()) {
31800   default:
31801     llvm_unreachable("Unknown atomic operation");
31802   case AtomicRMWInst::Add:
31803     IID = Intrinsic::x86_atomic_add_cc;
31804     break;
31805   case AtomicRMWInst::Sub:
31806     IID = Intrinsic::x86_atomic_sub_cc;
31807     break;
31808   case AtomicRMWInst::Or:
31809     IID = Intrinsic::x86_atomic_or_cc;
31810     break;
31811   case AtomicRMWInst::And:
31812     IID = Intrinsic::x86_atomic_and_cc;
31813     break;
31814   case AtomicRMWInst::Xor:
31815     IID = Intrinsic::x86_atomic_xor_cc;
31816     break;
31817   }
31818   Function *CmpArith =
31819       Intrinsic::getDeclaration(AI->getModule(), IID, AI->getType());
31820   Value *Addr = Builder.CreatePointerCast(AI->getPointerOperand(),
31821                                           Type::getInt8PtrTy(Ctx));
31822   Value *Call = Builder.CreateCall(
31823       CmpArith, {Addr, AI->getValOperand(), Builder.getInt32((unsigned)CC)});
31824   Value *Result = Builder.CreateTrunc(Call, Type::getInt1Ty(Ctx));
31825   ICI->replaceAllUsesWith(Result);
31826   ICI->eraseFromParent();
31827   if (TempI)
31828     TempI->eraseFromParent();
31829   AI->eraseFromParent();
31830 }
31831 
31832 TargetLowering::AtomicExpansionKind
shouldExpandAtomicRMWInIR(AtomicRMWInst * AI) const31833 X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
31834   unsigned NativeWidth = Subtarget.is64Bit() ? 64 : 32;
31835   Type *MemType = AI->getType();
31836 
31837   // If the operand is too big, we must see if cmpxchg8/16b is available
31838   // and default to library calls otherwise.
31839   if (MemType->getPrimitiveSizeInBits() > NativeWidth) {
31840     return needsCmpXchgNb(MemType) ? AtomicExpansionKind::CmpXChg
31841                                    : AtomicExpansionKind::None;
31842   }
31843 
31844   AtomicRMWInst::BinOp Op = AI->getOperation();
31845   switch (Op) {
31846   case AtomicRMWInst::Xchg:
31847     return AtomicExpansionKind::None;
31848   case AtomicRMWInst::Add:
31849   case AtomicRMWInst::Sub:
31850     if (shouldExpandCmpArithRMWInIR(AI))
31851       return AtomicExpansionKind::CmpArithIntrinsic;
31852     // It's better to use xadd, xsub or xchg for these in other cases.
31853     return AtomicExpansionKind::None;
31854   case AtomicRMWInst::Or:
31855   case AtomicRMWInst::And:
31856   case AtomicRMWInst::Xor:
31857     if (shouldExpandCmpArithRMWInIR(AI))
31858       return AtomicExpansionKind::CmpArithIntrinsic;
31859     return shouldExpandLogicAtomicRMWInIR(AI);
31860   case AtomicRMWInst::Nand:
31861   case AtomicRMWInst::Max:
31862   case AtomicRMWInst::Min:
31863   case AtomicRMWInst::UMax:
31864   case AtomicRMWInst::UMin:
31865   case AtomicRMWInst::FAdd:
31866   case AtomicRMWInst::FSub:
31867   case AtomicRMWInst::FMax:
31868   case AtomicRMWInst::FMin:
31869   case AtomicRMWInst::UIncWrap:
31870   case AtomicRMWInst::UDecWrap:
31871   default:
31872     // These always require a non-trivial set of data operations on x86. We must
31873     // use a cmpxchg loop.
31874     return AtomicExpansionKind::CmpXChg;
31875   }
31876 }
31877 
31878 LoadInst *
lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst * AI) const31879 X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
31880   unsigned NativeWidth = Subtarget.is64Bit() ? 64 : 32;
31881   Type *MemType = AI->getType();
31882   // Accesses larger than the native width are turned into cmpxchg/libcalls, so
31883   // there is no benefit in turning such RMWs into loads, and it is actually
31884   // harmful as it introduces a mfence.
31885   if (MemType->getPrimitiveSizeInBits() > NativeWidth)
31886     return nullptr;
31887 
31888   // If this is a canonical idempotent atomicrmw w/no uses, we have a better
31889   // lowering available in lowerAtomicArith.
31890   // TODO: push more cases through this path.
31891   if (auto *C = dyn_cast<ConstantInt>(AI->getValOperand()))
31892     if (AI->getOperation() == AtomicRMWInst::Or && C->isZero() &&
31893         AI->use_empty())
31894       return nullptr;
31895 
31896   IRBuilder<> Builder(AI);
31897   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
31898   auto SSID = AI->getSyncScopeID();
31899   // We must restrict the ordering to avoid generating loads with Release or
31900   // ReleaseAcquire orderings.
31901   auto Order = AtomicCmpXchgInst::getStrongestFailureOrdering(AI->getOrdering());
31902 
31903   // Before the load we need a fence. Here is an example lifted from
31904   // http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf showing why a fence
31905   // is required:
31906   // Thread 0:
31907   //   x.store(1, relaxed);
31908   //   r1 = y.fetch_add(0, release);
31909   // Thread 1:
31910   //   y.fetch_add(42, acquire);
31911   //   r2 = x.load(relaxed);
31912   // r1 = r2 = 0 is impossible, but becomes possible if the idempotent rmw is
31913   // lowered to just a load without a fence. A mfence flushes the store buffer,
31914   // making the optimization clearly correct.
31915   // FIXME: it is required if isReleaseOrStronger(Order) but it is not clear
31916   // otherwise, we might be able to be more aggressive on relaxed idempotent
31917   // rmw. In practice, they do not look useful, so we don't try to be
31918   // especially clever.
31919   if (SSID == SyncScope::SingleThread)
31920     // FIXME: we could just insert an ISD::MEMBARRIER here, except we are at
31921     // the IR level, so we must wrap it in an intrinsic.
31922     return nullptr;
31923 
31924   if (!Subtarget.hasMFence())
31925     // FIXME: it might make sense to use a locked operation here but on a
31926     // different cache-line to prevent cache-line bouncing. In practice it
31927     // is probably a small win, and x86 processors without mfence are rare
31928     // enough that we do not bother.
31929     return nullptr;
31930 
31931   Function *MFence =
31932       llvm::Intrinsic::getDeclaration(M, Intrinsic::x86_sse2_mfence);
31933   Builder.CreateCall(MFence, {});
31934 
31935   // Finally we can emit the atomic load.
31936   LoadInst *Loaded = Builder.CreateAlignedLoad(
31937       AI->getType(), AI->getPointerOperand(), AI->getAlign());
31938   Loaded->setAtomic(Order, SSID);
31939   AI->replaceAllUsesWith(Loaded);
31940   AI->eraseFromParent();
31941   return Loaded;
31942 }
31943 
lowerAtomicStoreAsStoreSDNode(const StoreInst & SI) const31944 bool X86TargetLowering::lowerAtomicStoreAsStoreSDNode(const StoreInst &SI) const {
31945   if (!SI.isUnordered())
31946     return false;
31947   return ExperimentalUnorderedISEL;
31948 }
lowerAtomicLoadAsLoadSDNode(const LoadInst & LI) const31949 bool X86TargetLowering::lowerAtomicLoadAsLoadSDNode(const LoadInst &LI) const {
31950   if (!LI.isUnordered())
31951     return false;
31952   return ExperimentalUnorderedISEL;
31953 }
31954 
31955 
31956 /// Emit a locked operation on a stack location which does not change any
31957 /// memory location, but does involve a lock prefix.  Location is chosen to be
31958 /// a) very likely accessed only by a single thread to minimize cache traffic,
31959 /// and b) definitely dereferenceable.  Returns the new Chain result.
emitLockedStackOp(SelectionDAG & DAG,const X86Subtarget & Subtarget,SDValue Chain,const SDLoc & DL)31960 static SDValue emitLockedStackOp(SelectionDAG &DAG,
31961                                  const X86Subtarget &Subtarget, SDValue Chain,
31962                                  const SDLoc &DL) {
31963   // Implementation notes:
31964   // 1) LOCK prefix creates a full read/write reordering barrier for memory
31965   // operations issued by the current processor.  As such, the location
31966   // referenced is not relevant for the ordering properties of the instruction.
31967   // See: Intel® 64 and IA-32 ArchitecturesSoftware Developer’s Manual,
31968   // 8.2.3.9  Loads and Stores Are Not Reordered with Locked Instructions
31969   // 2) Using an immediate operand appears to be the best encoding choice
31970   // here since it doesn't require an extra register.
31971   // 3) OR appears to be very slightly faster than ADD. (Though, the difference
31972   // is small enough it might just be measurement noise.)
31973   // 4) When choosing offsets, there are several contributing factors:
31974   //   a) If there's no redzone, we default to TOS.  (We could allocate a cache
31975   //      line aligned stack object to improve this case.)
31976   //   b) To minimize our chances of introducing a false dependence, we prefer
31977   //      to offset the stack usage from TOS slightly.
31978   //   c) To minimize concerns about cross thread stack usage - in particular,
31979   //      the idiomatic MyThreadPool.run([&StackVars]() {...}) pattern which
31980   //      captures state in the TOS frame and accesses it from many threads -
31981   //      we want to use an offset such that the offset is in a distinct cache
31982   //      line from the TOS frame.
31983   //
31984   // For a general discussion of the tradeoffs and benchmark results, see:
31985   // https://shipilev.net/blog/2014/on-the-fence-with-dependencies/
31986 
31987   auto &MF = DAG.getMachineFunction();
31988   auto &TFL = *Subtarget.getFrameLowering();
31989   const unsigned SPOffset = TFL.has128ByteRedZone(MF) ? -64 : 0;
31990 
31991   if (Subtarget.is64Bit()) {
31992     SDValue Zero = DAG.getTargetConstant(0, DL, MVT::i32);
31993     SDValue Ops[] = {
31994       DAG.getRegister(X86::RSP, MVT::i64),                  // Base
31995       DAG.getTargetConstant(1, DL, MVT::i8),                // Scale
31996       DAG.getRegister(0, MVT::i64),                         // Index
31997       DAG.getTargetConstant(SPOffset, DL, MVT::i32),        // Disp
31998       DAG.getRegister(0, MVT::i16),                         // Segment.
31999       Zero,
32000       Chain};
32001     SDNode *Res = DAG.getMachineNode(X86::OR32mi8Locked, DL, MVT::i32,
32002                                      MVT::Other, Ops);
32003     return SDValue(Res, 1);
32004   }
32005 
32006   SDValue Zero = DAG.getTargetConstant(0, DL, MVT::i32);
32007   SDValue Ops[] = {
32008     DAG.getRegister(X86::ESP, MVT::i32),            // Base
32009     DAG.getTargetConstant(1, DL, MVT::i8),          // Scale
32010     DAG.getRegister(0, MVT::i32),                   // Index
32011     DAG.getTargetConstant(SPOffset, DL, MVT::i32),  // Disp
32012     DAG.getRegister(0, MVT::i16),                   // Segment.
32013     Zero,
32014     Chain
32015   };
32016   SDNode *Res = DAG.getMachineNode(X86::OR32mi8Locked, DL, MVT::i32,
32017                                    MVT::Other, Ops);
32018   return SDValue(Res, 1);
32019 }
32020 
LowerATOMIC_FENCE(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)32021 static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget &Subtarget,
32022                                  SelectionDAG &DAG) {
32023   SDLoc dl(Op);
32024   AtomicOrdering FenceOrdering =
32025       static_cast<AtomicOrdering>(Op.getConstantOperandVal(1));
32026   SyncScope::ID FenceSSID =
32027       static_cast<SyncScope::ID>(Op.getConstantOperandVal(2));
32028 
32029   // The only fence that needs an instruction is a sequentially-consistent
32030   // cross-thread fence.
32031   if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
32032       FenceSSID == SyncScope::System) {
32033     if (Subtarget.hasMFence())
32034       return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0));
32035 
32036     SDValue Chain = Op.getOperand(0);
32037     return emitLockedStackOp(DAG, Subtarget, Chain, dl);
32038   }
32039 
32040   // MEMBARRIER is a compiler barrier; it codegens to a no-op.
32041   return DAG.getNode(ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0));
32042 }
32043 
LowerCMP_SWAP(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)32044 static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget &Subtarget,
32045                              SelectionDAG &DAG) {
32046   MVT T = Op.getSimpleValueType();
32047   SDLoc DL(Op);
32048   unsigned Reg = 0;
32049   unsigned size = 0;
32050   switch(T.SimpleTy) {
32051   default: llvm_unreachable("Invalid value type!");
32052   case MVT::i8:  Reg = X86::AL;  size = 1; break;
32053   case MVT::i16: Reg = X86::AX;  size = 2; break;
32054   case MVT::i32: Reg = X86::EAX; size = 4; break;
32055   case MVT::i64:
32056     assert(Subtarget.is64Bit() && "Node not type legal!");
32057     Reg = X86::RAX; size = 8;
32058     break;
32059   }
32060   SDValue cpIn = DAG.getCopyToReg(Op.getOperand(0), DL, Reg,
32061                                   Op.getOperand(2), SDValue());
32062   SDValue Ops[] = { cpIn.getValue(0),
32063                     Op.getOperand(1),
32064                     Op.getOperand(3),
32065                     DAG.getTargetConstant(size, DL, MVT::i8),
32066                     cpIn.getValue(1) };
32067   SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
32068   MachineMemOperand *MMO = cast<AtomicSDNode>(Op)->getMemOperand();
32069   SDValue Result = DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG_DAG, DL, Tys,
32070                                            Ops, T, MMO);
32071 
32072   SDValue cpOut =
32073     DAG.getCopyFromReg(Result.getValue(0), DL, Reg, T, Result.getValue(1));
32074   SDValue EFLAGS = DAG.getCopyFromReg(cpOut.getValue(1), DL, X86::EFLAGS,
32075                                       MVT::i32, cpOut.getValue(2));
32076   SDValue Success = getSETCC(X86::COND_E, EFLAGS, DL, DAG);
32077 
32078   return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(),
32079                      cpOut, Success, EFLAGS.getValue(1));
32080 }
32081 
32082 // Create MOVMSKB, taking into account whether we need to split for AVX1.
getPMOVMSKB(const SDLoc & DL,SDValue V,SelectionDAG & DAG,const X86Subtarget & Subtarget)32083 static SDValue getPMOVMSKB(const SDLoc &DL, SDValue V, SelectionDAG &DAG,
32084                            const X86Subtarget &Subtarget) {
32085   MVT InVT = V.getSimpleValueType();
32086 
32087   if (InVT == MVT::v64i8) {
32088     SDValue Lo, Hi;
32089     std::tie(Lo, Hi) = DAG.SplitVector(V, DL);
32090     Lo = getPMOVMSKB(DL, Lo, DAG, Subtarget);
32091     Hi = getPMOVMSKB(DL, Hi, DAG, Subtarget);
32092     Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, Lo);
32093     Hi = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Hi);
32094     Hi = DAG.getNode(ISD::SHL, DL, MVT::i64, Hi,
32095                      DAG.getConstant(32, DL, MVT::i8));
32096     return DAG.getNode(ISD::OR, DL, MVT::i64, Lo, Hi);
32097   }
32098   if (InVT == MVT::v32i8 && !Subtarget.hasInt256()) {
32099     SDValue Lo, Hi;
32100     std::tie(Lo, Hi) = DAG.SplitVector(V, DL);
32101     Lo = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Lo);
32102     Hi = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Hi);
32103     Hi = DAG.getNode(ISD::SHL, DL, MVT::i32, Hi,
32104                      DAG.getConstant(16, DL, MVT::i8));
32105     return DAG.getNode(ISD::OR, DL, MVT::i32, Lo, Hi);
32106   }
32107 
32108   return DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V);
32109 }
32110 
LowerBITCAST(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)32111 static SDValue LowerBITCAST(SDValue Op, const X86Subtarget &Subtarget,
32112                             SelectionDAG &DAG) {
32113   SDValue Src = Op.getOperand(0);
32114   MVT SrcVT = Src.getSimpleValueType();
32115   MVT DstVT = Op.getSimpleValueType();
32116 
32117   // Legalize (v64i1 (bitcast i64 (X))) by splitting the i64, bitcasting each
32118   // half to v32i1 and concatenating the result.
32119   if (SrcVT == MVT::i64 && DstVT == MVT::v64i1) {
32120     assert(!Subtarget.is64Bit() && "Expected 32-bit mode");
32121     assert(Subtarget.hasBWI() && "Expected BWI target");
32122     SDLoc dl(Op);
32123     SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Src,
32124                              DAG.getIntPtrConstant(0, dl));
32125     Lo = DAG.getBitcast(MVT::v32i1, Lo);
32126     SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Src,
32127                              DAG.getIntPtrConstant(1, dl));
32128     Hi = DAG.getBitcast(MVT::v32i1, Hi);
32129     return DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, Lo, Hi);
32130   }
32131 
32132   // Use MOVMSK for vector to scalar conversion to prevent scalarization.
32133   if ((SrcVT == MVT::v16i1 || SrcVT == MVT::v32i1) && DstVT.isScalarInteger()) {
32134     assert(!Subtarget.hasAVX512() && "Should use K-registers with AVX512");
32135     MVT SExtVT = SrcVT == MVT::v16i1 ? MVT::v16i8 : MVT::v32i8;
32136     SDLoc DL(Op);
32137     SDValue V = DAG.getSExtOrTrunc(Src, DL, SExtVT);
32138     V = getPMOVMSKB(DL, V, DAG, Subtarget);
32139     return DAG.getZExtOrTrunc(V, DL, DstVT);
32140   }
32141 
32142   assert((SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8 ||
32143           SrcVT == MVT::i64) && "Unexpected VT!");
32144 
32145   assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
32146   if (!(DstVT == MVT::f64 && SrcVT == MVT::i64) &&
32147       !(DstVT == MVT::x86mmx && SrcVT.isVector()))
32148     // This conversion needs to be expanded.
32149     return SDValue();
32150 
32151   SDLoc dl(Op);
32152   if (SrcVT.isVector()) {
32153     // Widen the vector in input in the case of MVT::v2i32.
32154     // Example: from MVT::v2i32 to MVT::v4i32.
32155     MVT NewVT = MVT::getVectorVT(SrcVT.getVectorElementType(),
32156                                  SrcVT.getVectorNumElements() * 2);
32157     Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewVT, Src,
32158                       DAG.getUNDEF(SrcVT));
32159   } else {
32160     assert(SrcVT == MVT::i64 && !Subtarget.is64Bit() &&
32161            "Unexpected source type in LowerBITCAST");
32162     Src = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Src);
32163   }
32164 
32165   MVT V2X64VT = DstVT == MVT::f64 ? MVT::v2f64 : MVT::v2i64;
32166   Src = DAG.getNode(ISD::BITCAST, dl, V2X64VT, Src);
32167 
32168   if (DstVT == MVT::x86mmx)
32169     return DAG.getNode(X86ISD::MOVDQ2Q, dl, DstVT, Src);
32170 
32171   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, DstVT, Src,
32172                      DAG.getIntPtrConstant(0, dl));
32173 }
32174 
32175 /// Compute the horizontal sum of bytes in V for the elements of VT.
32176 ///
32177 /// Requires V to be a byte vector and VT to be an integer vector type with
32178 /// wider elements than V's type. The width of the elements of VT determines
32179 /// how many bytes of V are summed horizontally to produce each element of the
32180 /// result.
LowerHorizontalByteSum(SDValue V,MVT VT,const X86Subtarget & Subtarget,SelectionDAG & DAG)32181 static SDValue LowerHorizontalByteSum(SDValue V, MVT VT,
32182                                       const X86Subtarget &Subtarget,
32183                                       SelectionDAG &DAG) {
32184   SDLoc DL(V);
32185   MVT ByteVecVT = V.getSimpleValueType();
32186   MVT EltVT = VT.getVectorElementType();
32187   assert(ByteVecVT.getVectorElementType() == MVT::i8 &&
32188          "Expected value to have byte element type.");
32189   assert(EltVT != MVT::i8 &&
32190          "Horizontal byte sum only makes sense for wider elements!");
32191   unsigned VecSize = VT.getSizeInBits();
32192   assert(ByteVecVT.getSizeInBits() == VecSize && "Cannot change vector size!");
32193 
32194   // PSADBW instruction horizontally add all bytes and leave the result in i64
32195   // chunks, thus directly computes the pop count for v2i64 and v4i64.
32196   if (EltVT == MVT::i64) {
32197     SDValue Zeros = DAG.getConstant(0, DL, ByteVecVT);
32198     MVT SadVecVT = MVT::getVectorVT(MVT::i64, VecSize / 64);
32199     V = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT, V, Zeros);
32200     return DAG.getBitcast(VT, V);
32201   }
32202 
32203   if (EltVT == MVT::i32) {
32204     // We unpack the low half and high half into i32s interleaved with zeros so
32205     // that we can use PSADBW to horizontally sum them. The most useful part of
32206     // this is that it lines up the results of two PSADBW instructions to be
32207     // two v2i64 vectors which concatenated are the 4 population counts. We can
32208     // then use PACKUSWB to shrink and concatenate them into a v4i32 again.
32209     SDValue Zeros = DAG.getConstant(0, DL, VT);
32210     SDValue V32 = DAG.getBitcast(VT, V);
32211     SDValue Low = getUnpackl(DAG, DL, VT, V32, Zeros);
32212     SDValue High = getUnpackh(DAG, DL, VT, V32, Zeros);
32213 
32214     // Do the horizontal sums into two v2i64s.
32215     Zeros = DAG.getConstant(0, DL, ByteVecVT);
32216     MVT SadVecVT = MVT::getVectorVT(MVT::i64, VecSize / 64);
32217     Low = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT,
32218                       DAG.getBitcast(ByteVecVT, Low), Zeros);
32219     High = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT,
32220                        DAG.getBitcast(ByteVecVT, High), Zeros);
32221 
32222     // Merge them together.
32223     MVT ShortVecVT = MVT::getVectorVT(MVT::i16, VecSize / 16);
32224     V = DAG.getNode(X86ISD::PACKUS, DL, ByteVecVT,
32225                     DAG.getBitcast(ShortVecVT, Low),
32226                     DAG.getBitcast(ShortVecVT, High));
32227 
32228     return DAG.getBitcast(VT, V);
32229   }
32230 
32231   // The only element type left is i16.
32232   assert(EltVT == MVT::i16 && "Unknown how to handle type");
32233 
32234   // To obtain pop count for each i16 element starting from the pop count for
32235   // i8 elements, shift the i16s left by 8, sum as i8s, and then shift as i16s
32236   // right by 8. It is important to shift as i16s as i8 vector shift isn't
32237   // directly supported.
32238   SDValue ShifterV = DAG.getConstant(8, DL, VT);
32239   SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, DAG.getBitcast(VT, V), ShifterV);
32240   V = DAG.getNode(ISD::ADD, DL, ByteVecVT, DAG.getBitcast(ByteVecVT, Shl),
32241                   DAG.getBitcast(ByteVecVT, V));
32242   return DAG.getNode(ISD::SRL, DL, VT, DAG.getBitcast(VT, V), ShifterV);
32243 }
32244 
LowerVectorCTPOPInRegLUT(SDValue Op,const SDLoc & DL,const X86Subtarget & Subtarget,SelectionDAG & DAG)32245 static SDValue LowerVectorCTPOPInRegLUT(SDValue Op, const SDLoc &DL,
32246                                         const X86Subtarget &Subtarget,
32247                                         SelectionDAG &DAG) {
32248   MVT VT = Op.getSimpleValueType();
32249   MVT EltVT = VT.getVectorElementType();
32250   int NumElts = VT.getVectorNumElements();
32251   (void)EltVT;
32252   assert(EltVT == MVT::i8 && "Only vXi8 vector CTPOP lowering supported.");
32253 
32254   // Implement a lookup table in register by using an algorithm based on:
32255   // http://wm.ite.pl/articles/sse-popcount.html
32256   //
32257   // The general idea is that every lower byte nibble in the input vector is an
32258   // index into a in-register pre-computed pop count table. We then split up the
32259   // input vector in two new ones: (1) a vector with only the shifted-right
32260   // higher nibbles for each byte and (2) a vector with the lower nibbles (and
32261   // masked out higher ones) for each byte. PSHUFB is used separately with both
32262   // to index the in-register table. Next, both are added and the result is a
32263   // i8 vector where each element contains the pop count for input byte.
32264   const int LUT[16] = {/* 0 */ 0, /* 1 */ 1, /* 2 */ 1, /* 3 */ 2,
32265                        /* 4 */ 1, /* 5 */ 2, /* 6 */ 2, /* 7 */ 3,
32266                        /* 8 */ 1, /* 9 */ 2, /* a */ 2, /* b */ 3,
32267                        /* c */ 2, /* d */ 3, /* e */ 3, /* f */ 4};
32268 
32269   SmallVector<SDValue, 64> LUTVec;
32270   for (int i = 0; i < NumElts; ++i)
32271     LUTVec.push_back(DAG.getConstant(LUT[i % 16], DL, MVT::i8));
32272   SDValue InRegLUT = DAG.getBuildVector(VT, DL, LUTVec);
32273   SDValue M0F = DAG.getConstant(0x0F, DL, VT);
32274 
32275   // High nibbles
32276   SDValue FourV = DAG.getConstant(4, DL, VT);
32277   SDValue HiNibbles = DAG.getNode(ISD::SRL, DL, VT, Op, FourV);
32278 
32279   // Low nibbles
32280   SDValue LoNibbles = DAG.getNode(ISD::AND, DL, VT, Op, M0F);
32281 
32282   // The input vector is used as the shuffle mask that index elements into the
32283   // LUT. After counting low and high nibbles, add the vector to obtain the
32284   // final pop count per i8 element.
32285   SDValue HiPopCnt = DAG.getNode(X86ISD::PSHUFB, DL, VT, InRegLUT, HiNibbles);
32286   SDValue LoPopCnt = DAG.getNode(X86ISD::PSHUFB, DL, VT, InRegLUT, LoNibbles);
32287   return DAG.getNode(ISD::ADD, DL, VT, HiPopCnt, LoPopCnt);
32288 }
32289 
32290 // Please ensure that any codegen change from LowerVectorCTPOP is reflected in
32291 // updated cost models in X86TTIImpl::getIntrinsicInstrCost.
LowerVectorCTPOP(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)32292 static SDValue LowerVectorCTPOP(SDValue Op, const X86Subtarget &Subtarget,
32293                                 SelectionDAG &DAG) {
32294   MVT VT = Op.getSimpleValueType();
32295   assert((VT.is512BitVector() || VT.is256BitVector() || VT.is128BitVector()) &&
32296          "Unknown CTPOP type to handle");
32297   SDLoc DL(Op.getNode());
32298   SDValue Op0 = Op.getOperand(0);
32299 
32300   // TRUNC(CTPOP(ZEXT(X))) to make use of vXi32/vXi64 VPOPCNT instructions.
32301   if (Subtarget.hasVPOPCNTDQ()) {
32302     unsigned NumElems = VT.getVectorNumElements();
32303     assert((VT.getVectorElementType() == MVT::i8 ||
32304             VT.getVectorElementType() == MVT::i16) && "Unexpected type");
32305     if (NumElems < 16 || (NumElems == 16 && Subtarget.canExtendTo512DQ())) {
32306       MVT NewVT = MVT::getVectorVT(MVT::i32, NumElems);
32307       Op = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, Op0);
32308       Op = DAG.getNode(ISD::CTPOP, DL, NewVT, Op);
32309       return DAG.getNode(ISD::TRUNCATE, DL, VT, Op);
32310     }
32311   }
32312 
32313   // Decompose 256-bit ops into smaller 128-bit ops.
32314   if (VT.is256BitVector() && !Subtarget.hasInt256())
32315     return splitVectorIntUnary(Op, DAG);
32316 
32317   // Decompose 512-bit ops into smaller 256-bit ops.
32318   if (VT.is512BitVector() && !Subtarget.hasBWI())
32319     return splitVectorIntUnary(Op, DAG);
32320 
32321   // For element types greater than i8, do vXi8 pop counts and a bytesum.
32322   if (VT.getScalarType() != MVT::i8) {
32323     MVT ByteVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
32324     SDValue ByteOp = DAG.getBitcast(ByteVT, Op0);
32325     SDValue PopCnt8 = DAG.getNode(ISD::CTPOP, DL, ByteVT, ByteOp);
32326     return LowerHorizontalByteSum(PopCnt8, VT, Subtarget, DAG);
32327   }
32328 
32329   // We can't use the fast LUT approach, so fall back on LegalizeDAG.
32330   if (!Subtarget.hasSSSE3())
32331     return SDValue();
32332 
32333   return LowerVectorCTPOPInRegLUT(Op0, DL, Subtarget, DAG);
32334 }
32335 
LowerCTPOP(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)32336 static SDValue LowerCTPOP(SDValue Op, const X86Subtarget &Subtarget,
32337                           SelectionDAG &DAG) {
32338   assert(Op.getSimpleValueType().isVector() &&
32339          "We only do custom lowering for vector population count.");
32340   return LowerVectorCTPOP(Op, Subtarget, DAG);
32341 }
32342 
LowerBITREVERSE_XOP(SDValue Op,SelectionDAG & DAG)32343 static SDValue LowerBITREVERSE_XOP(SDValue Op, SelectionDAG &DAG) {
32344   MVT VT = Op.getSimpleValueType();
32345   SDValue In = Op.getOperand(0);
32346   SDLoc DL(Op);
32347 
32348   // For scalars, its still beneficial to transfer to/from the SIMD unit to
32349   // perform the BITREVERSE.
32350   if (!VT.isVector()) {
32351     MVT VecVT = MVT::getVectorVT(VT, 128 / VT.getSizeInBits());
32352     SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, In);
32353     Res = DAG.getNode(ISD::BITREVERSE, DL, VecVT, Res);
32354     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Res,
32355                        DAG.getIntPtrConstant(0, DL));
32356   }
32357 
32358   int NumElts = VT.getVectorNumElements();
32359   int ScalarSizeInBytes = VT.getScalarSizeInBits() / 8;
32360 
32361   // Decompose 256-bit ops into smaller 128-bit ops.
32362   if (VT.is256BitVector())
32363     return splitVectorIntUnary(Op, DAG);
32364 
32365   assert(VT.is128BitVector() &&
32366          "Only 128-bit vector bitreverse lowering supported.");
32367 
32368   // VPPERM reverses the bits of a byte with the permute Op (2 << 5), and we
32369   // perform the BSWAP in the shuffle.
32370   // Its best to shuffle using the second operand as this will implicitly allow
32371   // memory folding for multiple vectors.
32372   SmallVector<SDValue, 16> MaskElts;
32373   for (int i = 0; i != NumElts; ++i) {
32374     for (int j = ScalarSizeInBytes - 1; j >= 0; --j) {
32375       int SourceByte = 16 + (i * ScalarSizeInBytes) + j;
32376       int PermuteByte = SourceByte | (2 << 5);
32377       MaskElts.push_back(DAG.getConstant(PermuteByte, DL, MVT::i8));
32378     }
32379   }
32380 
32381   SDValue Mask = DAG.getBuildVector(MVT::v16i8, DL, MaskElts);
32382   SDValue Res = DAG.getBitcast(MVT::v16i8, In);
32383   Res = DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, DAG.getUNDEF(MVT::v16i8),
32384                     Res, Mask);
32385   return DAG.getBitcast(VT, Res);
32386 }
32387 
LowerBITREVERSE(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)32388 static SDValue LowerBITREVERSE(SDValue Op, const X86Subtarget &Subtarget,
32389                                SelectionDAG &DAG) {
32390   MVT VT = Op.getSimpleValueType();
32391 
32392   if (Subtarget.hasXOP() && !VT.is512BitVector())
32393     return LowerBITREVERSE_XOP(Op, DAG);
32394 
32395   assert(Subtarget.hasSSSE3() && "SSSE3 required for BITREVERSE");
32396 
32397   SDValue In = Op.getOperand(0);
32398   SDLoc DL(Op);
32399 
32400   assert(VT.getScalarType() == MVT::i8 &&
32401          "Only byte vector BITREVERSE supported");
32402 
32403   // Split v64i8 without BWI so that we can still use the PSHUFB lowering.
32404   if (VT == MVT::v64i8 && !Subtarget.hasBWI())
32405     return splitVectorIntUnary(Op, DAG);
32406 
32407   // Decompose 256-bit ops into smaller 128-bit ops on pre-AVX2.
32408   if (VT == MVT::v32i8 && !Subtarget.hasInt256())
32409     return splitVectorIntUnary(Op, DAG);
32410 
32411   unsigned NumElts = VT.getVectorNumElements();
32412 
32413   // If we have GFNI, we can use GF2P8AFFINEQB to reverse the bits.
32414   if (Subtarget.hasGFNI()) {
32415     MVT MatrixVT = MVT::getVectorVT(MVT::i64, NumElts / 8);
32416     SDValue Matrix = DAG.getConstant(0x8040201008040201ULL, DL, MatrixVT);
32417     Matrix = DAG.getBitcast(VT, Matrix);
32418     return DAG.getNode(X86ISD::GF2P8AFFINEQB, DL, VT, In, Matrix,
32419                        DAG.getTargetConstant(0, DL, MVT::i8));
32420   }
32421 
32422   // Perform BITREVERSE using PSHUFB lookups. Each byte is split into
32423   // two nibbles and a PSHUFB lookup to find the bitreverse of each
32424   // 0-15 value (moved to the other nibble).
32425   SDValue NibbleMask = DAG.getConstant(0xF, DL, VT);
32426   SDValue Lo = DAG.getNode(ISD::AND, DL, VT, In, NibbleMask);
32427   SDValue Hi = DAG.getNode(ISD::SRL, DL, VT, In, DAG.getConstant(4, DL, VT));
32428 
32429   const int LoLUT[16] = {
32430       /* 0 */ 0x00, /* 1 */ 0x80, /* 2 */ 0x40, /* 3 */ 0xC0,
32431       /* 4 */ 0x20, /* 5 */ 0xA0, /* 6 */ 0x60, /* 7 */ 0xE0,
32432       /* 8 */ 0x10, /* 9 */ 0x90, /* a */ 0x50, /* b */ 0xD0,
32433       /* c */ 0x30, /* d */ 0xB0, /* e */ 0x70, /* f */ 0xF0};
32434   const int HiLUT[16] = {
32435       /* 0 */ 0x00, /* 1 */ 0x08, /* 2 */ 0x04, /* 3 */ 0x0C,
32436       /* 4 */ 0x02, /* 5 */ 0x0A, /* 6 */ 0x06, /* 7 */ 0x0E,
32437       /* 8 */ 0x01, /* 9 */ 0x09, /* a */ 0x05, /* b */ 0x0D,
32438       /* c */ 0x03, /* d */ 0x0B, /* e */ 0x07, /* f */ 0x0F};
32439 
32440   SmallVector<SDValue, 16> LoMaskElts, HiMaskElts;
32441   for (unsigned i = 0; i < NumElts; ++i) {
32442     LoMaskElts.push_back(DAG.getConstant(LoLUT[i % 16], DL, MVT::i8));
32443     HiMaskElts.push_back(DAG.getConstant(HiLUT[i % 16], DL, MVT::i8));
32444   }
32445 
32446   SDValue LoMask = DAG.getBuildVector(VT, DL, LoMaskElts);
32447   SDValue HiMask = DAG.getBuildVector(VT, DL, HiMaskElts);
32448   Lo = DAG.getNode(X86ISD::PSHUFB, DL, VT, LoMask, Lo);
32449   Hi = DAG.getNode(X86ISD::PSHUFB, DL, VT, HiMask, Hi);
32450   return DAG.getNode(ISD::OR, DL, VT, Lo, Hi);
32451 }
32452 
LowerPARITY(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)32453 static SDValue LowerPARITY(SDValue Op, const X86Subtarget &Subtarget,
32454                            SelectionDAG &DAG) {
32455   SDLoc DL(Op);
32456   SDValue X = Op.getOperand(0);
32457   MVT VT = Op.getSimpleValueType();
32458 
32459   // Special case. If the input fits in 8-bits we can use a single 8-bit TEST.
32460   if (VT == MVT::i8 ||
32461       DAG.MaskedValueIsZero(X, APInt::getBitsSetFrom(VT.getSizeInBits(), 8))) {
32462     X = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, X);
32463     SDValue Flags = DAG.getNode(X86ISD::CMP, DL, MVT::i32, X,
32464                                 DAG.getConstant(0, DL, MVT::i8));
32465     // Copy the inverse of the parity flag into a register with setcc.
32466     SDValue Setnp = getSETCC(X86::COND_NP, Flags, DL, DAG);
32467     // Extend to the original type.
32468     return DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Setnp);
32469   }
32470 
32471   // If we have POPCNT, use the default expansion.
32472   if (Subtarget.hasPOPCNT())
32473     return SDValue();
32474 
32475   if (VT == MVT::i64) {
32476     // Xor the high and low 16-bits together using a 32-bit operation.
32477     SDValue Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32,
32478                              DAG.getNode(ISD::SRL, DL, MVT::i64, X,
32479                                          DAG.getConstant(32, DL, MVT::i8)));
32480     SDValue Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, X);
32481     X = DAG.getNode(ISD::XOR, DL, MVT::i32, Lo, Hi);
32482   }
32483 
32484   if (VT != MVT::i16) {
32485     // Xor the high and low 16-bits together using a 32-bit operation.
32486     SDValue Hi16 = DAG.getNode(ISD::SRL, DL, MVT::i32, X,
32487                                DAG.getConstant(16, DL, MVT::i8));
32488     X = DAG.getNode(ISD::XOR, DL, MVT::i32, X, Hi16);
32489   } else {
32490     // If the input is 16-bits, we need to extend to use an i32 shift below.
32491     X = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, X);
32492   }
32493 
32494   // Finally xor the low 2 bytes together and use a 8-bit flag setting xor.
32495   // This should allow an h-reg to be used to save a shift.
32496   SDValue Hi = DAG.getNode(
32497       ISD::TRUNCATE, DL, MVT::i8,
32498       DAG.getNode(ISD::SRL, DL, MVT::i32, X, DAG.getConstant(8, DL, MVT::i8)));
32499   SDValue Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, X);
32500   SDVTList VTs = DAG.getVTList(MVT::i8, MVT::i32);
32501   SDValue Flags = DAG.getNode(X86ISD::XOR, DL, VTs, Lo, Hi).getValue(1);
32502 
32503   // Copy the inverse of the parity flag into a register with setcc.
32504   SDValue Setnp = getSETCC(X86::COND_NP, Flags, DL, DAG);
32505   // Extend to the original type.
32506   return DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Setnp);
32507 }
32508 
lowerAtomicArithWithLOCK(SDValue N,SelectionDAG & DAG,const X86Subtarget & Subtarget)32509 static SDValue lowerAtomicArithWithLOCK(SDValue N, SelectionDAG &DAG,
32510                                         const X86Subtarget &Subtarget) {
32511   unsigned NewOpc = 0;
32512   switch (N->getOpcode()) {
32513   case ISD::ATOMIC_LOAD_ADD:
32514     NewOpc = X86ISD::LADD;
32515     break;
32516   case ISD::ATOMIC_LOAD_SUB:
32517     NewOpc = X86ISD::LSUB;
32518     break;
32519   case ISD::ATOMIC_LOAD_OR:
32520     NewOpc = X86ISD::LOR;
32521     break;
32522   case ISD::ATOMIC_LOAD_XOR:
32523     NewOpc = X86ISD::LXOR;
32524     break;
32525   case ISD::ATOMIC_LOAD_AND:
32526     NewOpc = X86ISD::LAND;
32527     break;
32528   default:
32529     llvm_unreachable("Unknown ATOMIC_LOAD_ opcode");
32530   }
32531 
32532   MachineMemOperand *MMO = cast<MemSDNode>(N)->getMemOperand();
32533 
32534   return DAG.getMemIntrinsicNode(
32535       NewOpc, SDLoc(N), DAG.getVTList(MVT::i32, MVT::Other),
32536       {N->getOperand(0), N->getOperand(1), N->getOperand(2)},
32537       /*MemVT=*/N->getSimpleValueType(0), MMO);
32538 }
32539 
32540 /// Lower atomic_load_ops into LOCK-prefixed operations.
lowerAtomicArith(SDValue N,SelectionDAG & DAG,const X86Subtarget & Subtarget)32541 static SDValue lowerAtomicArith(SDValue N, SelectionDAG &DAG,
32542                                 const X86Subtarget &Subtarget) {
32543   AtomicSDNode *AN = cast<AtomicSDNode>(N.getNode());
32544   SDValue Chain = N->getOperand(0);
32545   SDValue LHS = N->getOperand(1);
32546   SDValue RHS = N->getOperand(2);
32547   unsigned Opc = N->getOpcode();
32548   MVT VT = N->getSimpleValueType(0);
32549   SDLoc DL(N);
32550 
32551   // We can lower atomic_load_add into LXADD. However, any other atomicrmw op
32552   // can only be lowered when the result is unused.  They should have already
32553   // been transformed into a cmpxchg loop in AtomicExpand.
32554   if (N->hasAnyUseOfValue(0)) {
32555     // Handle (atomic_load_sub p, v) as (atomic_load_add p, -v), to be able to
32556     // select LXADD if LOCK_SUB can't be selected.
32557     if (Opc == ISD::ATOMIC_LOAD_SUB) {
32558       RHS = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), RHS);
32559       return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, DL, VT, Chain, LHS,
32560                            RHS, AN->getMemOperand());
32561     }
32562     assert(Opc == ISD::ATOMIC_LOAD_ADD &&
32563            "Used AtomicRMW ops other than Add should have been expanded!");
32564     return N;
32565   }
32566 
32567   // Specialized lowering for the canonical form of an idemptotent atomicrmw.
32568   // The core idea here is that since the memory location isn't actually
32569   // changing, all we need is a lowering for the *ordering* impacts of the
32570   // atomicrmw.  As such, we can chose a different operation and memory
32571   // location to minimize impact on other code.
32572   if (Opc == ISD::ATOMIC_LOAD_OR && isNullConstant(RHS)) {
32573     // On X86, the only ordering which actually requires an instruction is
32574     // seq_cst which isn't SingleThread, everything just needs to be preserved
32575     // during codegen and then dropped. Note that we expect (but don't assume),
32576     // that orderings other than seq_cst and acq_rel have been canonicalized to
32577     // a store or load.
32578     if (AN->getSuccessOrdering() == AtomicOrdering::SequentiallyConsistent &&
32579         AN->getSyncScopeID() == SyncScope::System) {
32580       // Prefer a locked operation against a stack location to minimize cache
32581       // traffic.  This assumes that stack locations are very likely to be
32582       // accessed only by the owning thread.
32583       SDValue NewChain = emitLockedStackOp(DAG, Subtarget, Chain, DL);
32584       assert(!N->hasAnyUseOfValue(0));
32585       // NOTE: The getUNDEF is needed to give something for the unused result 0.
32586       return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(),
32587                          DAG.getUNDEF(VT), NewChain);
32588     }
32589     // MEMBARRIER is a compiler barrier; it codegens to a no-op.
32590     SDValue NewChain = DAG.getNode(ISD::MEMBARRIER, DL, MVT::Other, Chain);
32591     assert(!N->hasAnyUseOfValue(0));
32592     // NOTE: The getUNDEF is needed to give something for the unused result 0.
32593     return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(),
32594                        DAG.getUNDEF(VT), NewChain);
32595   }
32596 
32597   SDValue LockOp = lowerAtomicArithWithLOCK(N, DAG, Subtarget);
32598   // RAUW the chain, but don't worry about the result, as it's unused.
32599   assert(!N->hasAnyUseOfValue(0));
32600   // NOTE: The getUNDEF is needed to give something for the unused result 0.
32601   return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(),
32602                      DAG.getUNDEF(VT), LockOp.getValue(1));
32603 }
32604 
LowerATOMIC_STORE(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)32605 static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG,
32606                                  const X86Subtarget &Subtarget) {
32607   auto *Node = cast<AtomicSDNode>(Op.getNode());
32608   SDLoc dl(Node);
32609   EVT VT = Node->getMemoryVT();
32610 
32611   bool IsSeqCst =
32612       Node->getSuccessOrdering() == AtomicOrdering::SequentiallyConsistent;
32613   bool IsTypeLegal = DAG.getTargetLoweringInfo().isTypeLegal(VT);
32614 
32615   // If this store is not sequentially consistent and the type is legal
32616   // we can just keep it.
32617   if (!IsSeqCst && IsTypeLegal)
32618     return Op;
32619 
32620   if (VT == MVT::i64 && !IsTypeLegal) {
32621     // For illegal i64 atomic_stores, we can try to use MOVQ or MOVLPS if SSE
32622     // is enabled.
32623     bool NoImplicitFloatOps =
32624         DAG.getMachineFunction().getFunction().hasFnAttribute(
32625             Attribute::NoImplicitFloat);
32626     if (!Subtarget.useSoftFloat() && !NoImplicitFloatOps) {
32627       SDValue Chain;
32628       if (Subtarget.hasSSE1()) {
32629         SDValue SclToVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
32630                                        Node->getOperand(2));
32631         MVT StVT = Subtarget.hasSSE2() ? MVT::v2i64 : MVT::v4f32;
32632         SclToVec = DAG.getBitcast(StVT, SclToVec);
32633         SDVTList Tys = DAG.getVTList(MVT::Other);
32634         SDValue Ops[] = {Node->getChain(), SclToVec, Node->getBasePtr()};
32635         Chain = DAG.getMemIntrinsicNode(X86ISD::VEXTRACT_STORE, dl, Tys, Ops,
32636                                         MVT::i64, Node->getMemOperand());
32637       } else if (Subtarget.hasX87()) {
32638         // First load this into an 80-bit X87 register using a stack temporary.
32639         // This will put the whole integer into the significand.
32640         SDValue StackPtr = DAG.CreateStackTemporary(MVT::i64);
32641         int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
32642         MachinePointerInfo MPI =
32643             MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
32644         Chain =
32645             DAG.getStore(Node->getChain(), dl, Node->getOperand(2), StackPtr,
32646                          MPI, MaybeAlign(), MachineMemOperand::MOStore);
32647         SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
32648         SDValue LdOps[] = {Chain, StackPtr};
32649         SDValue Value = DAG.getMemIntrinsicNode(
32650             X86ISD::FILD, dl, Tys, LdOps, MVT::i64, MPI,
32651             /*Align*/ std::nullopt, MachineMemOperand::MOLoad);
32652         Chain = Value.getValue(1);
32653 
32654         // Now use an FIST to do the atomic store.
32655         SDValue StoreOps[] = {Chain, Value, Node->getBasePtr()};
32656         Chain =
32657             DAG.getMemIntrinsicNode(X86ISD::FIST, dl, DAG.getVTList(MVT::Other),
32658                                     StoreOps, MVT::i64, Node->getMemOperand());
32659       }
32660 
32661       if (Chain) {
32662         // If this is a sequentially consistent store, also emit an appropriate
32663         // barrier.
32664         if (IsSeqCst)
32665           Chain = emitLockedStackOp(DAG, Subtarget, Chain, dl);
32666 
32667         return Chain;
32668       }
32669     }
32670   }
32671 
32672   // Convert seq_cst store -> xchg
32673   // Convert wide store -> swap (-> cmpxchg8b/cmpxchg16b)
32674   // FIXME: 16-byte ATOMIC_SWAP isn't actually hooked up at the moment.
32675   SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl,
32676                                Node->getMemoryVT(),
32677                                Node->getOperand(0),
32678                                Node->getOperand(1), Node->getOperand(2),
32679                                Node->getMemOperand());
32680   return Swap.getValue(1);
32681 }
32682 
LowerADDSUBCARRY(SDValue Op,SelectionDAG & DAG)32683 static SDValue LowerADDSUBCARRY(SDValue Op, SelectionDAG &DAG) {
32684   SDNode *N = Op.getNode();
32685   MVT VT = N->getSimpleValueType(0);
32686   unsigned Opc = Op.getOpcode();
32687 
32688   // Let legalize expand this if it isn't a legal type yet.
32689   if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
32690     return SDValue();
32691 
32692   SDVTList VTs = DAG.getVTList(VT, MVT::i32);
32693   SDLoc DL(N);
32694 
32695   // Set the carry flag.
32696   SDValue Carry = Op.getOperand(2);
32697   EVT CarryVT = Carry.getValueType();
32698   Carry = DAG.getNode(X86ISD::ADD, DL, DAG.getVTList(CarryVT, MVT::i32),
32699                       Carry, DAG.getAllOnesConstant(DL, CarryVT));
32700 
32701   bool IsAdd = Opc == ISD::ADDCARRY || Opc == ISD::SADDO_CARRY;
32702   SDValue Sum = DAG.getNode(IsAdd ? X86ISD::ADC : X86ISD::SBB, DL, VTs,
32703                             Op.getOperand(0), Op.getOperand(1),
32704                             Carry.getValue(1));
32705 
32706   bool IsSigned = Opc == ISD::SADDO_CARRY || Opc == ISD::SSUBO_CARRY;
32707   SDValue SetCC = getSETCC(IsSigned ? X86::COND_O : X86::COND_B,
32708                            Sum.getValue(1), DL, DAG);
32709   if (N->getValueType(1) == MVT::i1)
32710     SetCC = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC);
32711 
32712   return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
32713 }
32714 
LowerFSINCOS(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)32715 static SDValue LowerFSINCOS(SDValue Op, const X86Subtarget &Subtarget,
32716                             SelectionDAG &DAG) {
32717   assert(Subtarget.isTargetDarwin() && Subtarget.is64Bit());
32718 
32719   // For MacOSX, we want to call an alternative entry point: __sincos_stret,
32720   // which returns the values as { float, float } (in XMM0) or
32721   // { double, double } (which is returned in XMM0, XMM1).
32722   SDLoc dl(Op);
32723   SDValue Arg = Op.getOperand(0);
32724   EVT ArgVT = Arg.getValueType();
32725   Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
32726 
32727   TargetLowering::ArgListTy Args;
32728   TargetLowering::ArgListEntry Entry;
32729 
32730   Entry.Node = Arg;
32731   Entry.Ty = ArgTy;
32732   Entry.IsSExt = false;
32733   Entry.IsZExt = false;
32734   Args.push_back(Entry);
32735 
32736   bool isF64 = ArgVT == MVT::f64;
32737   // Only optimize x86_64 for now. i386 is a bit messy. For f32,
32738   // the small struct {f32, f32} is returned in (eax, edx). For f64,
32739   // the results are returned via SRet in memory.
32740   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
32741   RTLIB::Libcall LC = isF64 ? RTLIB::SINCOS_STRET_F64 : RTLIB::SINCOS_STRET_F32;
32742   const char *LibcallName = TLI.getLibcallName(LC);
32743   SDValue Callee =
32744       DAG.getExternalSymbol(LibcallName, TLI.getPointerTy(DAG.getDataLayout()));
32745 
32746   Type *RetTy = isF64 ? (Type *)StructType::get(ArgTy, ArgTy)
32747                       : (Type *)FixedVectorType::get(ArgTy, 4);
32748 
32749   TargetLowering::CallLoweringInfo CLI(DAG);
32750   CLI.setDebugLoc(dl)
32751       .setChain(DAG.getEntryNode())
32752       .setLibCallee(CallingConv::C, RetTy, Callee, std::move(Args));
32753 
32754   std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
32755 
32756   if (isF64)
32757     // Returned in xmm0 and xmm1.
32758     return CallResult.first;
32759 
32760   // Returned in bits 0:31 and 32:64 xmm0.
32761   SDValue SinVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
32762                                CallResult.first, DAG.getIntPtrConstant(0, dl));
32763   SDValue CosVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
32764                                CallResult.first, DAG.getIntPtrConstant(1, dl));
32765   SDVTList Tys = DAG.getVTList(ArgVT, ArgVT);
32766   return DAG.getNode(ISD::MERGE_VALUES, dl, Tys, SinVal, CosVal);
32767 }
32768 
32769 /// Widen a vector input to a vector of NVT.  The
32770 /// input vector must have the same element type as NVT.
ExtendToType(SDValue InOp,MVT NVT,SelectionDAG & DAG,bool FillWithZeroes=false)32771 static SDValue ExtendToType(SDValue InOp, MVT NVT, SelectionDAG &DAG,
32772                             bool FillWithZeroes = false) {
32773   // Check if InOp already has the right width.
32774   MVT InVT = InOp.getSimpleValueType();
32775   if (InVT == NVT)
32776     return InOp;
32777 
32778   if (InOp.isUndef())
32779     return DAG.getUNDEF(NVT);
32780 
32781   assert(InVT.getVectorElementType() == NVT.getVectorElementType() &&
32782          "input and widen element type must match");
32783 
32784   unsigned InNumElts = InVT.getVectorNumElements();
32785   unsigned WidenNumElts = NVT.getVectorNumElements();
32786   assert(WidenNumElts > InNumElts && WidenNumElts % InNumElts == 0 &&
32787          "Unexpected request for vector widening");
32788 
32789   SDLoc dl(InOp);
32790   if (InOp.getOpcode() == ISD::CONCAT_VECTORS &&
32791       InOp.getNumOperands() == 2) {
32792     SDValue N1 = InOp.getOperand(1);
32793     if ((ISD::isBuildVectorAllZeros(N1.getNode()) && FillWithZeroes) ||
32794         N1.isUndef()) {
32795       InOp = InOp.getOperand(0);
32796       InVT = InOp.getSimpleValueType();
32797       InNumElts = InVT.getVectorNumElements();
32798     }
32799   }
32800   if (ISD::isBuildVectorOfConstantSDNodes(InOp.getNode()) ||
32801       ISD::isBuildVectorOfConstantFPSDNodes(InOp.getNode())) {
32802     SmallVector<SDValue, 16> Ops;
32803     for (unsigned i = 0; i < InNumElts; ++i)
32804       Ops.push_back(InOp.getOperand(i));
32805 
32806     EVT EltVT = InOp.getOperand(0).getValueType();
32807 
32808     SDValue FillVal = FillWithZeroes ? DAG.getConstant(0, dl, EltVT) :
32809       DAG.getUNDEF(EltVT);
32810     for (unsigned i = 0; i < WidenNumElts - InNumElts; ++i)
32811       Ops.push_back(FillVal);
32812     return DAG.getBuildVector(NVT, dl, Ops);
32813   }
32814   SDValue FillVal = FillWithZeroes ? DAG.getConstant(0, dl, NVT) :
32815     DAG.getUNDEF(NVT);
32816   return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, NVT, FillVal,
32817                      InOp, DAG.getIntPtrConstant(0, dl));
32818 }
32819 
LowerMSCATTER(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)32820 static SDValue LowerMSCATTER(SDValue Op, const X86Subtarget &Subtarget,
32821                              SelectionDAG &DAG) {
32822   assert(Subtarget.hasAVX512() &&
32823          "MGATHER/MSCATTER are supported on AVX-512 arch only");
32824 
32825   MaskedScatterSDNode *N = cast<MaskedScatterSDNode>(Op.getNode());
32826   SDValue Src = N->getValue();
32827   MVT VT = Src.getSimpleValueType();
32828   assert(VT.getScalarSizeInBits() >= 32 && "Unsupported scatter op");
32829   SDLoc dl(Op);
32830 
32831   SDValue Scale = N->getScale();
32832   SDValue Index = N->getIndex();
32833   SDValue Mask = N->getMask();
32834   SDValue Chain = N->getChain();
32835   SDValue BasePtr = N->getBasePtr();
32836 
32837   if (VT == MVT::v2f32 || VT == MVT::v2i32) {
32838     assert(Mask.getValueType() == MVT::v2i1 && "Unexpected mask type");
32839     // If the index is v2i64 and we have VLX we can use xmm for data and index.
32840     if (Index.getValueType() == MVT::v2i64 && Subtarget.hasVLX()) {
32841       const TargetLowering &TLI = DAG.getTargetLoweringInfo();
32842       EVT WideVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
32843       Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT, Src, DAG.getUNDEF(VT));
32844       SDVTList VTs = DAG.getVTList(MVT::Other);
32845       SDValue Ops[] = {Chain, Src, Mask, BasePtr, Index, Scale};
32846       return DAG.getMemIntrinsicNode(X86ISD::MSCATTER, dl, VTs, Ops,
32847                                      N->getMemoryVT(), N->getMemOperand());
32848     }
32849     return SDValue();
32850   }
32851 
32852   MVT IndexVT = Index.getSimpleValueType();
32853 
32854   // If the index is v2i32, we're being called by type legalization and we
32855   // should just let the default handling take care of it.
32856   if (IndexVT == MVT::v2i32)
32857     return SDValue();
32858 
32859   // If we don't have VLX and neither the passthru or index is 512-bits, we
32860   // need to widen until one is.
32861   if (!Subtarget.hasVLX() && !VT.is512BitVector() &&
32862       !Index.getSimpleValueType().is512BitVector()) {
32863     // Determine how much we need to widen by to get a 512-bit type.
32864     unsigned Factor = std::min(512/VT.getSizeInBits(),
32865                                512/IndexVT.getSizeInBits());
32866     unsigned NumElts = VT.getVectorNumElements() * Factor;
32867 
32868     VT = MVT::getVectorVT(VT.getVectorElementType(), NumElts);
32869     IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(), NumElts);
32870     MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
32871 
32872     Src = ExtendToType(Src, VT, DAG);
32873     Index = ExtendToType(Index, IndexVT, DAG);
32874     Mask = ExtendToType(Mask, MaskVT, DAG, true);
32875   }
32876 
32877   SDVTList VTs = DAG.getVTList(MVT::Other);
32878   SDValue Ops[] = {Chain, Src, Mask, BasePtr, Index, Scale};
32879   return DAG.getMemIntrinsicNode(X86ISD::MSCATTER, dl, VTs, Ops,
32880                                  N->getMemoryVT(), N->getMemOperand());
32881 }
32882 
LowerMLOAD(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)32883 static SDValue LowerMLOAD(SDValue Op, const X86Subtarget &Subtarget,
32884                           SelectionDAG &DAG) {
32885 
32886   MaskedLoadSDNode *N = cast<MaskedLoadSDNode>(Op.getNode());
32887   MVT VT = Op.getSimpleValueType();
32888   MVT ScalarVT = VT.getScalarType();
32889   SDValue Mask = N->getMask();
32890   MVT MaskVT = Mask.getSimpleValueType();
32891   SDValue PassThru = N->getPassThru();
32892   SDLoc dl(Op);
32893 
32894   // Handle AVX masked loads which don't support passthru other than 0.
32895   if (MaskVT.getVectorElementType() != MVT::i1) {
32896     // We also allow undef in the isel pattern.
32897     if (PassThru.isUndef() || ISD::isBuildVectorAllZeros(PassThru.getNode()))
32898       return Op;
32899 
32900     SDValue NewLoad = DAG.getMaskedLoad(
32901         VT, dl, N->getChain(), N->getBasePtr(), N->getOffset(), Mask,
32902         getZeroVector(VT, Subtarget, DAG, dl), N->getMemoryVT(),
32903         N->getMemOperand(), N->getAddressingMode(), N->getExtensionType(),
32904         N->isExpandingLoad());
32905     // Emit a blend.
32906     SDValue Select = DAG.getNode(ISD::VSELECT, dl, VT, Mask, NewLoad, PassThru);
32907     return DAG.getMergeValues({ Select, NewLoad.getValue(1) }, dl);
32908   }
32909 
32910   assert((!N->isExpandingLoad() || Subtarget.hasAVX512()) &&
32911          "Expanding masked load is supported on AVX-512 target only!");
32912 
32913   assert((!N->isExpandingLoad() || ScalarVT.getSizeInBits() >= 32) &&
32914          "Expanding masked load is supported for 32 and 64-bit types only!");
32915 
32916   assert(Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&
32917          "Cannot lower masked load op.");
32918 
32919   assert((ScalarVT.getSizeInBits() >= 32 ||
32920           (Subtarget.hasBWI() &&
32921               (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) &&
32922          "Unsupported masked load op.");
32923 
32924   // This operation is legal for targets with VLX, but without
32925   // VLX the vector should be widened to 512 bit
32926   unsigned NumEltsInWideVec = 512 / VT.getScalarSizeInBits();
32927   MVT WideDataVT = MVT::getVectorVT(ScalarVT, NumEltsInWideVec);
32928   PassThru = ExtendToType(PassThru, WideDataVT, DAG);
32929 
32930   // Mask element has to be i1.
32931   assert(Mask.getSimpleValueType().getScalarType() == MVT::i1 &&
32932          "Unexpected mask type");
32933 
32934   MVT WideMaskVT = MVT::getVectorVT(MVT::i1, NumEltsInWideVec);
32935 
32936   Mask = ExtendToType(Mask, WideMaskVT, DAG, true);
32937   SDValue NewLoad = DAG.getMaskedLoad(
32938       WideDataVT, dl, N->getChain(), N->getBasePtr(), N->getOffset(), Mask,
32939       PassThru, N->getMemoryVT(), N->getMemOperand(), N->getAddressingMode(),
32940       N->getExtensionType(), N->isExpandingLoad());
32941 
32942   SDValue Extract =
32943       DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, NewLoad.getValue(0),
32944                   DAG.getIntPtrConstant(0, dl));
32945   SDValue RetOps[] = {Extract, NewLoad.getValue(1)};
32946   return DAG.getMergeValues(RetOps, dl);
32947 }
32948 
LowerMSTORE(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)32949 static SDValue LowerMSTORE(SDValue Op, const X86Subtarget &Subtarget,
32950                            SelectionDAG &DAG) {
32951   MaskedStoreSDNode *N = cast<MaskedStoreSDNode>(Op.getNode());
32952   SDValue DataToStore = N->getValue();
32953   MVT VT = DataToStore.getSimpleValueType();
32954   MVT ScalarVT = VT.getScalarType();
32955   SDValue Mask = N->getMask();
32956   SDLoc dl(Op);
32957 
32958   assert((!N->isCompressingStore() || Subtarget.hasAVX512()) &&
32959          "Expanding masked load is supported on AVX-512 target only!");
32960 
32961   assert((!N->isCompressingStore() || ScalarVT.getSizeInBits() >= 32) &&
32962          "Expanding masked load is supported for 32 and 64-bit types only!");
32963 
32964   assert(Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&
32965          "Cannot lower masked store op.");
32966 
32967   assert((ScalarVT.getSizeInBits() >= 32 ||
32968           (Subtarget.hasBWI() &&
32969               (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) &&
32970           "Unsupported masked store op.");
32971 
32972   // This operation is legal for targets with VLX, but without
32973   // VLX the vector should be widened to 512 bit
32974   unsigned NumEltsInWideVec = 512/VT.getScalarSizeInBits();
32975   MVT WideDataVT = MVT::getVectorVT(ScalarVT, NumEltsInWideVec);
32976 
32977   // Mask element has to be i1.
32978   assert(Mask.getSimpleValueType().getScalarType() == MVT::i1 &&
32979          "Unexpected mask type");
32980 
32981   MVT WideMaskVT = MVT::getVectorVT(MVT::i1, NumEltsInWideVec);
32982 
32983   DataToStore = ExtendToType(DataToStore, WideDataVT, DAG);
32984   Mask = ExtendToType(Mask, WideMaskVT, DAG, true);
32985   return DAG.getMaskedStore(N->getChain(), dl, DataToStore, N->getBasePtr(),
32986                             N->getOffset(), Mask, N->getMemoryVT(),
32987                             N->getMemOperand(), N->getAddressingMode(),
32988                             N->isTruncatingStore(), N->isCompressingStore());
32989 }
32990 
LowerMGATHER(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)32991 static SDValue LowerMGATHER(SDValue Op, const X86Subtarget &Subtarget,
32992                             SelectionDAG &DAG) {
32993   assert(Subtarget.hasAVX2() &&
32994          "MGATHER/MSCATTER are supported on AVX-512/AVX-2 arch only");
32995 
32996   MaskedGatherSDNode *N = cast<MaskedGatherSDNode>(Op.getNode());
32997   SDLoc dl(Op);
32998   MVT VT = Op.getSimpleValueType();
32999   SDValue Index = N->getIndex();
33000   SDValue Mask = N->getMask();
33001   SDValue PassThru = N->getPassThru();
33002   MVT IndexVT = Index.getSimpleValueType();
33003 
33004   assert(VT.getScalarSizeInBits() >= 32 && "Unsupported gather op");
33005 
33006   // If the index is v2i32, we're being called by type legalization.
33007   if (IndexVT == MVT::v2i32)
33008     return SDValue();
33009 
33010   // If we don't have VLX and neither the passthru or index is 512-bits, we
33011   // need to widen until one is.
33012   MVT OrigVT = VT;
33013   if (Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&
33014       !IndexVT.is512BitVector()) {
33015     // Determine how much we need to widen by to get a 512-bit type.
33016     unsigned Factor = std::min(512/VT.getSizeInBits(),
33017                                512/IndexVT.getSizeInBits());
33018 
33019     unsigned NumElts = VT.getVectorNumElements() * Factor;
33020 
33021     VT = MVT::getVectorVT(VT.getVectorElementType(), NumElts);
33022     IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(), NumElts);
33023     MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
33024 
33025     PassThru = ExtendToType(PassThru, VT, DAG);
33026     Index = ExtendToType(Index, IndexVT, DAG);
33027     Mask = ExtendToType(Mask, MaskVT, DAG, true);
33028   }
33029 
33030   // Break dependency on the data register.
33031   if (PassThru.isUndef())
33032     PassThru = getZeroVector(VT, Subtarget, DAG, dl);
33033 
33034   SDValue Ops[] = { N->getChain(), PassThru, Mask, N->getBasePtr(), Index,
33035                     N->getScale() };
33036   SDValue NewGather = DAG.getMemIntrinsicNode(
33037       X86ISD::MGATHER, dl, DAG.getVTList(VT, MVT::Other), Ops, N->getMemoryVT(),
33038       N->getMemOperand());
33039   SDValue Extract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OrigVT,
33040                                 NewGather, DAG.getIntPtrConstant(0, dl));
33041   return DAG.getMergeValues({Extract, NewGather.getValue(1)}, dl);
33042 }
33043 
LowerADDRSPACECAST(SDValue Op,SelectionDAG & DAG)33044 static SDValue LowerADDRSPACECAST(SDValue Op, SelectionDAG &DAG) {
33045   SDLoc dl(Op);
33046   SDValue Src = Op.getOperand(0);
33047   MVT DstVT = Op.getSimpleValueType();
33048 
33049   AddrSpaceCastSDNode *N = cast<AddrSpaceCastSDNode>(Op.getNode());
33050   unsigned SrcAS = N->getSrcAddressSpace();
33051 
33052   assert(SrcAS != N->getDestAddressSpace() &&
33053          "addrspacecast must be between different address spaces");
33054 
33055   if (SrcAS == X86AS::PTR32_UPTR && DstVT == MVT::i64) {
33056     Op = DAG.getNode(ISD::ZERO_EXTEND, dl, DstVT, Src);
33057   } else if (DstVT == MVT::i64) {
33058     Op = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Src);
33059   } else if (DstVT == MVT::i32) {
33060     Op = DAG.getNode(ISD::TRUNCATE, dl, DstVT, Src);
33061   } else {
33062     report_fatal_error("Bad address space in addrspacecast");
33063   }
33064   return Op;
33065 }
33066 
LowerGC_TRANSITION(SDValue Op,SelectionDAG & DAG) const33067 SDValue X86TargetLowering::LowerGC_TRANSITION(SDValue Op,
33068                                               SelectionDAG &DAG) const {
33069   // TODO: Eventually, the lowering of these nodes should be informed by or
33070   // deferred to the GC strategy for the function in which they appear. For
33071   // now, however, they must be lowered to something. Since they are logically
33072   // no-ops in the case of a null GC strategy (or a GC strategy which does not
33073   // require special handling for these nodes), lower them as literal NOOPs for
33074   // the time being.
33075   SmallVector<SDValue, 2> Ops;
33076   Ops.push_back(Op.getOperand(0));
33077   if (Op->getGluedNode())
33078     Ops.push_back(Op->getOperand(Op->getNumOperands() - 1));
33079 
33080   SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
33081   return SDValue(DAG.getMachineNode(X86::NOOP, SDLoc(Op), VTs, Ops), 0);
33082 }
33083 
33084 // Custom split CVTPS2PH with wide types.
LowerCVTPS2PH(SDValue Op,SelectionDAG & DAG)33085 static SDValue LowerCVTPS2PH(SDValue Op, SelectionDAG &DAG) {
33086   SDLoc dl(Op);
33087   EVT VT = Op.getValueType();
33088   SDValue Lo, Hi;
33089   std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0);
33090   EVT LoVT, HiVT;
33091   std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
33092   SDValue RC = Op.getOperand(1);
33093   Lo = DAG.getNode(X86ISD::CVTPS2PH, dl, LoVT, Lo, RC);
33094   Hi = DAG.getNode(X86ISD::CVTPS2PH, dl, HiVT, Hi, RC);
33095   return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
33096 }
33097 
LowerPREFETCH(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)33098 static SDValue LowerPREFETCH(SDValue Op, const X86Subtarget &Subtarget,
33099                              SelectionDAG &DAG) {
33100   unsigned IsData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue();
33101 
33102   // We don't support non-data prefetch without PREFETCHI.
33103   // Just preserve the chain.
33104   if (!IsData && !Subtarget.hasPREFETCHI())
33105     return Op.getOperand(0);
33106 
33107   return Op;
33108 }
33109 
getInstrStrFromOpNo(const SmallVectorImpl<StringRef> & AsmStrs,unsigned OpNo)33110 static StringRef getInstrStrFromOpNo(const SmallVectorImpl<StringRef> &AsmStrs,
33111                                      unsigned OpNo) {
33112   const APInt Operand(32, OpNo);
33113   std::string OpNoStr = llvm::toString(Operand, 10, false);
33114   std::string Str(" $");
33115 
33116   std::string OpNoStr1(Str + OpNoStr);             // e.g. " $1" (OpNo=1)
33117   std::string OpNoStr2(Str + "{" + OpNoStr + ":"); // With modifier, e.g. ${1:P}
33118 
33119   auto I = StringRef::npos;
33120   for (auto &AsmStr : AsmStrs) {
33121     // Match the OpNo string. We should match exactly to exclude match
33122     // sub-string, e.g. "$12" contain "$1"
33123     if (AsmStr.endswith(OpNoStr1))
33124       I = AsmStr.size() - OpNoStr1.size();
33125 
33126     // Get the index of operand in AsmStr.
33127     if (I == StringRef::npos)
33128       I = AsmStr.find(OpNoStr1 + ",");
33129     if (I == StringRef::npos)
33130       I = AsmStr.find(OpNoStr2);
33131 
33132     if (I == StringRef::npos)
33133       continue;
33134 
33135     assert(I > 0 && "Unexpected inline asm string!");
33136     // Remove the operand string and label (if exsit).
33137     // For example:
33138     // ".L__MSASMLABEL_.${:uid}__l:call dword ptr ${0:P}"
33139     // ==>
33140     // ".L__MSASMLABEL_.${:uid}__l:call dword ptr "
33141     // ==>
33142     // "call dword ptr "
33143     auto TmpStr = AsmStr.substr(0, I);
33144     I = TmpStr.rfind(':');
33145     if (I == StringRef::npos)
33146       return TmpStr;
33147 
33148     assert(I < TmpStr.size() && "Unexpected inline asm string!");
33149     auto Asm = TmpStr.drop_front(I + 1);
33150     return Asm;
33151   }
33152 
33153   return StringRef();
33154 }
33155 
isInlineAsmTargetBranch(const SmallVectorImpl<StringRef> & AsmStrs,unsigned OpNo) const33156 bool X86TargetLowering::isInlineAsmTargetBranch(
33157     const SmallVectorImpl<StringRef> &AsmStrs, unsigned OpNo) const {
33158   StringRef InstrStr = getInstrStrFromOpNo(AsmStrs, OpNo);
33159 
33160   if (InstrStr.contains("call"))
33161     return true;
33162 
33163   return false;
33164 }
33165 
33166 /// Provide custom lowering hooks for some operations.
LowerOperation(SDValue Op,SelectionDAG & DAG) const33167 SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
33168   switch (Op.getOpcode()) {
33169   default: llvm_unreachable("Should not custom lower this!");
33170   case ISD::ATOMIC_FENCE:       return LowerATOMIC_FENCE(Op, Subtarget, DAG);
33171   case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
33172     return LowerCMP_SWAP(Op, Subtarget, DAG);
33173   case ISD::CTPOP:              return LowerCTPOP(Op, Subtarget, DAG);
33174   case ISD::ATOMIC_LOAD_ADD:
33175   case ISD::ATOMIC_LOAD_SUB:
33176   case ISD::ATOMIC_LOAD_OR:
33177   case ISD::ATOMIC_LOAD_XOR:
33178   case ISD::ATOMIC_LOAD_AND:    return lowerAtomicArith(Op, DAG, Subtarget);
33179   case ISD::ATOMIC_STORE:       return LowerATOMIC_STORE(Op, DAG, Subtarget);
33180   case ISD::BITREVERSE:         return LowerBITREVERSE(Op, Subtarget, DAG);
33181   case ISD::PARITY:             return LowerPARITY(Op, Subtarget, DAG);
33182   case ISD::BUILD_VECTOR:       return LowerBUILD_VECTOR(Op, DAG);
33183   case ISD::CONCAT_VECTORS:     return LowerCONCAT_VECTORS(Op, Subtarget, DAG);
33184   case ISD::VECTOR_SHUFFLE:     return lowerVECTOR_SHUFFLE(Op, Subtarget, DAG);
33185   case ISD::VSELECT:            return LowerVSELECT(Op, DAG);
33186   case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
33187   case ISD::INSERT_VECTOR_ELT:  return LowerINSERT_VECTOR_ELT(Op, DAG);
33188   case ISD::INSERT_SUBVECTOR:   return LowerINSERT_SUBVECTOR(Op, Subtarget,DAG);
33189   case ISD::EXTRACT_SUBVECTOR:  return LowerEXTRACT_SUBVECTOR(Op,Subtarget,DAG);
33190   case ISD::SCALAR_TO_VECTOR:   return LowerSCALAR_TO_VECTOR(Op, Subtarget,DAG);
33191   case ISD::ConstantPool:       return LowerConstantPool(Op, DAG);
33192   case ISD::GlobalAddress:      return LowerGlobalAddress(Op, DAG);
33193   case ISD::GlobalTLSAddress:   return LowerGlobalTLSAddress(Op, DAG);
33194   case ISD::ExternalSymbol:     return LowerExternalSymbol(Op, DAG);
33195   case ISD::BlockAddress:       return LowerBlockAddress(Op, DAG);
33196   case ISD::SHL_PARTS:
33197   case ISD::SRA_PARTS:
33198   case ISD::SRL_PARTS:          return LowerShiftParts(Op, DAG);
33199   case ISD::FSHL:
33200   case ISD::FSHR:               return LowerFunnelShift(Op, Subtarget, DAG);
33201   case ISD::STRICT_SINT_TO_FP:
33202   case ISD::SINT_TO_FP:         return LowerSINT_TO_FP(Op, DAG);
33203   case ISD::STRICT_UINT_TO_FP:
33204   case ISD::UINT_TO_FP:         return LowerUINT_TO_FP(Op, DAG);
33205   case ISD::TRUNCATE:           return LowerTRUNCATE(Op, DAG);
33206   case ISD::ZERO_EXTEND:        return LowerZERO_EXTEND(Op, Subtarget, DAG);
33207   case ISD::SIGN_EXTEND:        return LowerSIGN_EXTEND(Op, Subtarget, DAG);
33208   case ISD::ANY_EXTEND:         return LowerANY_EXTEND(Op, Subtarget, DAG);
33209   case ISD::ZERO_EXTEND_VECTOR_INREG:
33210   case ISD::SIGN_EXTEND_VECTOR_INREG:
33211     return LowerEXTEND_VECTOR_INREG(Op, Subtarget, DAG);
33212   case ISD::FP_TO_SINT:
33213   case ISD::STRICT_FP_TO_SINT:
33214   case ISD::FP_TO_UINT:
33215   case ISD::STRICT_FP_TO_UINT:  return LowerFP_TO_INT(Op, DAG);
33216   case ISD::FP_TO_SINT_SAT:
33217   case ISD::FP_TO_UINT_SAT:     return LowerFP_TO_INT_SAT(Op, DAG);
33218   case ISD::FP_EXTEND:
33219   case ISD::STRICT_FP_EXTEND:   return LowerFP_EXTEND(Op, DAG);
33220   case ISD::FP_ROUND:
33221   case ISD::STRICT_FP_ROUND:    return LowerFP_ROUND(Op, DAG);
33222   case ISD::FP16_TO_FP:
33223   case ISD::STRICT_FP16_TO_FP:  return LowerFP16_TO_FP(Op, DAG);
33224   case ISD::FP_TO_FP16:
33225   case ISD::STRICT_FP_TO_FP16:  return LowerFP_TO_FP16(Op, DAG);
33226   case ISD::FP_TO_BF16:         return LowerFP_TO_BF16(Op, DAG);
33227   case ISD::LOAD:               return LowerLoad(Op, Subtarget, DAG);
33228   case ISD::STORE:              return LowerStore(Op, Subtarget, DAG);
33229   case ISD::FADD:
33230   case ISD::FSUB:               return lowerFaddFsub(Op, DAG);
33231   case ISD::FROUND:             return LowerFROUND(Op, DAG);
33232   case ISD::FABS:
33233   case ISD::FNEG:               return LowerFABSorFNEG(Op, DAG);
33234   case ISD::FCOPYSIGN:          return LowerFCOPYSIGN(Op, DAG);
33235   case ISD::FGETSIGN:           return LowerFGETSIGN(Op, DAG);
33236   case ISD::LRINT:
33237   case ISD::LLRINT:             return LowerLRINT_LLRINT(Op, DAG);
33238   case ISD::SETCC:
33239   case ISD::STRICT_FSETCC:
33240   case ISD::STRICT_FSETCCS:     return LowerSETCC(Op, DAG);
33241   case ISD::SETCCCARRY:         return LowerSETCCCARRY(Op, DAG);
33242   case ISD::SELECT:             return LowerSELECT(Op, DAG);
33243   case ISD::BRCOND:             return LowerBRCOND(Op, DAG);
33244   case ISD::JumpTable:          return LowerJumpTable(Op, DAG);
33245   case ISD::VASTART:            return LowerVASTART(Op, DAG);
33246   case ISD::VAARG:              return LowerVAARG(Op, DAG);
33247   case ISD::VACOPY:             return LowerVACOPY(Op, Subtarget, DAG);
33248   case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
33249   case ISD::INTRINSIC_VOID:
33250   case ISD::INTRINSIC_W_CHAIN:  return LowerINTRINSIC_W_CHAIN(Op, Subtarget, DAG);
33251   case ISD::RETURNADDR:         return LowerRETURNADDR(Op, DAG);
33252   case ISD::ADDROFRETURNADDR:   return LowerADDROFRETURNADDR(Op, DAG);
33253   case ISD::FRAMEADDR:          return LowerFRAMEADDR(Op, DAG);
33254   case ISD::FRAME_TO_ARGS_OFFSET:
33255                                 return LowerFRAME_TO_ARGS_OFFSET(Op, DAG);
33256   case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
33257   case ISD::EH_RETURN:          return LowerEH_RETURN(Op, DAG);
33258   case ISD::EH_SJLJ_SETJMP:     return lowerEH_SJLJ_SETJMP(Op, DAG);
33259   case ISD::EH_SJLJ_LONGJMP:    return lowerEH_SJLJ_LONGJMP(Op, DAG);
33260   case ISD::EH_SJLJ_SETUP_DISPATCH:
33261     return lowerEH_SJLJ_SETUP_DISPATCH(Op, DAG);
33262   case ISD::INIT_TRAMPOLINE:    return LowerINIT_TRAMPOLINE(Op, DAG);
33263   case ISD::ADJUST_TRAMPOLINE:  return LowerADJUST_TRAMPOLINE(Op, DAG);
33264   case ISD::GET_ROUNDING:       return LowerGET_ROUNDING(Op, DAG);
33265   case ISD::SET_ROUNDING:       return LowerSET_ROUNDING(Op, DAG);
33266   case ISD::CTLZ:
33267   case ISD::CTLZ_ZERO_UNDEF:    return LowerCTLZ(Op, Subtarget, DAG);
33268   case ISD::CTTZ:
33269   case ISD::CTTZ_ZERO_UNDEF:    return LowerCTTZ(Op, Subtarget, DAG);
33270   case ISD::MUL:                return LowerMUL(Op, Subtarget, DAG);
33271   case ISD::MULHS:
33272   case ISD::MULHU:              return LowerMULH(Op, Subtarget, DAG);
33273   case ISD::ROTL:
33274   case ISD::ROTR:               return LowerRotate(Op, Subtarget, DAG);
33275   case ISD::SRA:
33276   case ISD::SRL:
33277   case ISD::SHL:                return LowerShift(Op, Subtarget, DAG);
33278   case ISD::SADDO:
33279   case ISD::UADDO:
33280   case ISD::SSUBO:
33281   case ISD::USUBO:              return LowerXALUO(Op, DAG);
33282   case ISD::SMULO:
33283   case ISD::UMULO:              return LowerMULO(Op, Subtarget, DAG);
33284   case ISD::READCYCLECOUNTER:   return LowerREADCYCLECOUNTER(Op, Subtarget,DAG);
33285   case ISD::BITCAST:            return LowerBITCAST(Op, Subtarget, DAG);
33286   case ISD::SADDO_CARRY:
33287   case ISD::SSUBO_CARRY:
33288   case ISD::ADDCARRY:
33289   case ISD::SUBCARRY:           return LowerADDSUBCARRY(Op, DAG);
33290   case ISD::ADD:
33291   case ISD::SUB:                return lowerAddSub(Op, DAG, Subtarget);
33292   case ISD::UADDSAT:
33293   case ISD::SADDSAT:
33294   case ISD::USUBSAT:
33295   case ISD::SSUBSAT:            return LowerADDSAT_SUBSAT(Op, DAG, Subtarget);
33296   case ISD::SMAX:
33297   case ISD::SMIN:
33298   case ISD::UMAX:
33299   case ISD::UMIN:               return LowerMINMAX(Op, Subtarget, DAG);
33300   case ISD::ABS:                return LowerABS(Op, Subtarget, DAG);
33301   case ISD::AVGCEILU:           return LowerAVG(Op, Subtarget, DAG);
33302   case ISD::FSINCOS:            return LowerFSINCOS(Op, Subtarget, DAG);
33303   case ISD::MLOAD:              return LowerMLOAD(Op, Subtarget, DAG);
33304   case ISD::MSTORE:             return LowerMSTORE(Op, Subtarget, DAG);
33305   case ISD::MGATHER:            return LowerMGATHER(Op, Subtarget, DAG);
33306   case ISD::MSCATTER:           return LowerMSCATTER(Op, Subtarget, DAG);
33307   case ISD::GC_TRANSITION_START:
33308   case ISD::GC_TRANSITION_END:  return LowerGC_TRANSITION(Op, DAG);
33309   case ISD::ADDRSPACECAST:      return LowerADDRSPACECAST(Op, DAG);
33310   case X86ISD::CVTPS2PH:        return LowerCVTPS2PH(Op, DAG);
33311   case ISD::PREFETCH:           return LowerPREFETCH(Op, Subtarget, DAG);
33312   }
33313 }
33314 
33315 /// Replace a node with an illegal result type with a new node built out of
33316 /// custom code.
ReplaceNodeResults(SDNode * N,SmallVectorImpl<SDValue> & Results,SelectionDAG & DAG) const33317 void X86TargetLowering::ReplaceNodeResults(SDNode *N,
33318                                            SmallVectorImpl<SDValue>&Results,
33319                                            SelectionDAG &DAG) const {
33320   SDLoc dl(N);
33321   switch (N->getOpcode()) {
33322   default:
33323 #ifndef NDEBUG
33324     dbgs() << "ReplaceNodeResults: ";
33325     N->dump(&DAG);
33326 #endif
33327     llvm_unreachable("Do not know how to custom type legalize this operation!");
33328   case X86ISD::CVTPH2PS: {
33329     EVT VT = N->getValueType(0);
33330     SDValue Lo, Hi;
33331     std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0);
33332     EVT LoVT, HiVT;
33333     std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
33334     Lo = DAG.getNode(X86ISD::CVTPH2PS, dl, LoVT, Lo);
33335     Hi = DAG.getNode(X86ISD::CVTPH2PS, dl, HiVT, Hi);
33336     SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
33337     Results.push_back(Res);
33338     return;
33339   }
33340   case X86ISD::STRICT_CVTPH2PS: {
33341     EVT VT = N->getValueType(0);
33342     SDValue Lo, Hi;
33343     std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 1);
33344     EVT LoVT, HiVT;
33345     std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
33346     Lo = DAG.getNode(X86ISD::STRICT_CVTPH2PS, dl, {LoVT, MVT::Other},
33347                      {N->getOperand(0), Lo});
33348     Hi = DAG.getNode(X86ISD::STRICT_CVTPH2PS, dl, {HiVT, MVT::Other},
33349                      {N->getOperand(0), Hi});
33350     SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
33351                                 Lo.getValue(1), Hi.getValue(1));
33352     SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
33353     Results.push_back(Res);
33354     Results.push_back(Chain);
33355     return;
33356   }
33357   case X86ISD::CVTPS2PH:
33358     Results.push_back(LowerCVTPS2PH(SDValue(N, 0), DAG));
33359     return;
33360   case ISD::CTPOP: {
33361     assert(N->getValueType(0) == MVT::i64 && "Unexpected VT!");
33362     // Use a v2i64 if possible.
33363     bool NoImplicitFloatOps =
33364         DAG.getMachineFunction().getFunction().hasFnAttribute(
33365             Attribute::NoImplicitFloat);
33366     if (isTypeLegal(MVT::v2i64) && !NoImplicitFloatOps) {
33367       SDValue Wide =
33368           DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, N->getOperand(0));
33369       Wide = DAG.getNode(ISD::CTPOP, dl, MVT::v2i64, Wide);
33370       // Bit count should fit in 32-bits, extract it as that and then zero
33371       // extend to i64. Otherwise we end up extracting bits 63:32 separately.
33372       Wide = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Wide);
33373       Wide = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, Wide,
33374                          DAG.getIntPtrConstant(0, dl));
33375       Wide = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Wide);
33376       Results.push_back(Wide);
33377     }
33378     return;
33379   }
33380   case ISD::MUL: {
33381     EVT VT = N->getValueType(0);
33382     assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
33383            VT.getVectorElementType() == MVT::i8 && "Unexpected VT!");
33384     // Pre-promote these to vXi16 to avoid op legalization thinking all 16
33385     // elements are needed.
33386     MVT MulVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements());
33387     SDValue Op0 = DAG.getNode(ISD::ANY_EXTEND, dl, MulVT, N->getOperand(0));
33388     SDValue Op1 = DAG.getNode(ISD::ANY_EXTEND, dl, MulVT, N->getOperand(1));
33389     SDValue Res = DAG.getNode(ISD::MUL, dl, MulVT, Op0, Op1);
33390     Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
33391     unsigned NumConcats = 16 / VT.getVectorNumElements();
33392     SmallVector<SDValue, 8> ConcatOps(NumConcats, DAG.getUNDEF(VT));
33393     ConcatOps[0] = Res;
33394     Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i8, ConcatOps);
33395     Results.push_back(Res);
33396     return;
33397   }
33398   case ISD::SMULO:
33399   case ISD::UMULO: {
33400     EVT VT = N->getValueType(0);
33401     assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
33402            VT == MVT::v2i32 && "Unexpected VT!");
33403     bool IsSigned = N->getOpcode() == ISD::SMULO;
33404     unsigned ExtOpc = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
33405     SDValue Op0 = DAG.getNode(ExtOpc, dl, MVT::v2i64, N->getOperand(0));
33406     SDValue Op1 = DAG.getNode(ExtOpc, dl, MVT::v2i64, N->getOperand(1));
33407     SDValue Res = DAG.getNode(ISD::MUL, dl, MVT::v2i64, Op0, Op1);
33408     // Extract the high 32 bits from each result using PSHUFD.
33409     // TODO: Could use SRL+TRUNCATE but that doesn't become a PSHUFD.
33410     SDValue Hi = DAG.getBitcast(MVT::v4i32, Res);
33411     Hi = DAG.getVectorShuffle(MVT::v4i32, dl, Hi, Hi, {1, 3, -1, -1});
33412     Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Hi,
33413                      DAG.getIntPtrConstant(0, dl));
33414 
33415     // Truncate the low bits of the result. This will become PSHUFD.
33416     Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
33417 
33418     SDValue HiCmp;
33419     if (IsSigned) {
33420       // SMULO overflows if the high bits don't match the sign of the low.
33421       HiCmp = DAG.getNode(ISD::SRA, dl, VT, Res, DAG.getConstant(31, dl, VT));
33422     } else {
33423       // UMULO overflows if the high bits are non-zero.
33424       HiCmp = DAG.getConstant(0, dl, VT);
33425     }
33426     SDValue Ovf = DAG.getSetCC(dl, N->getValueType(1), Hi, HiCmp, ISD::SETNE);
33427 
33428     // Widen the result with by padding with undef.
33429     Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Res,
33430                       DAG.getUNDEF(VT));
33431     Results.push_back(Res);
33432     Results.push_back(Ovf);
33433     return;
33434   }
33435   case X86ISD::VPMADDWD: {
33436     // Legalize types for X86ISD::VPMADDWD by widening.
33437     assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
33438 
33439     EVT VT = N->getValueType(0);
33440     EVT InVT = N->getOperand(0).getValueType();
33441     assert(VT.getSizeInBits() < 128 && 128 % VT.getSizeInBits() == 0 &&
33442            "Expected a VT that divides into 128 bits.");
33443     assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
33444            "Unexpected type action!");
33445     unsigned NumConcat = 128 / InVT.getSizeInBits();
33446 
33447     EVT InWideVT = EVT::getVectorVT(*DAG.getContext(),
33448                                     InVT.getVectorElementType(),
33449                                     NumConcat * InVT.getVectorNumElements());
33450     EVT WideVT = EVT::getVectorVT(*DAG.getContext(),
33451                                   VT.getVectorElementType(),
33452                                   NumConcat * VT.getVectorNumElements());
33453 
33454     SmallVector<SDValue, 16> Ops(NumConcat, DAG.getUNDEF(InVT));
33455     Ops[0] = N->getOperand(0);
33456     SDValue InVec0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, InWideVT, Ops);
33457     Ops[0] = N->getOperand(1);
33458     SDValue InVec1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, InWideVT, Ops);
33459 
33460     SDValue Res = DAG.getNode(N->getOpcode(), dl, WideVT, InVec0, InVec1);
33461     Results.push_back(Res);
33462     return;
33463   }
33464   // We might have generated v2f32 FMIN/FMAX operations. Widen them to v4f32.
33465   case X86ISD::FMINC:
33466   case X86ISD::FMIN:
33467   case X86ISD::FMAXC:
33468   case X86ISD::FMAX: {
33469     EVT VT = N->getValueType(0);
33470     assert(VT == MVT::v2f32 && "Unexpected type (!= v2f32) on FMIN/FMAX.");
33471     SDValue UNDEF = DAG.getUNDEF(VT);
33472     SDValue LHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
33473                               N->getOperand(0), UNDEF);
33474     SDValue RHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
33475                               N->getOperand(1), UNDEF);
33476     Results.push_back(DAG.getNode(N->getOpcode(), dl, MVT::v4f32, LHS, RHS));
33477     return;
33478   }
33479   case ISD::SDIV:
33480   case ISD::UDIV:
33481   case ISD::SREM:
33482   case ISD::UREM: {
33483     EVT VT = N->getValueType(0);
33484     if (VT.isVector()) {
33485       assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
33486              "Unexpected type action!");
33487       // If this RHS is a constant splat vector we can widen this and let
33488       // division/remainder by constant optimize it.
33489       // TODO: Can we do something for non-splat?
33490       APInt SplatVal;
33491       if (ISD::isConstantSplatVector(N->getOperand(1).getNode(), SplatVal)) {
33492         unsigned NumConcats = 128 / VT.getSizeInBits();
33493         SmallVector<SDValue, 8> Ops0(NumConcats, DAG.getUNDEF(VT));
33494         Ops0[0] = N->getOperand(0);
33495         EVT ResVT = getTypeToTransformTo(*DAG.getContext(), VT);
33496         SDValue N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Ops0);
33497         SDValue N1 = DAG.getConstant(SplatVal, dl, ResVT);
33498         SDValue Res = DAG.getNode(N->getOpcode(), dl, ResVT, N0, N1);
33499         Results.push_back(Res);
33500       }
33501       return;
33502     }
33503 
33504     SDValue V = LowerWin64_i128OP(SDValue(N,0), DAG);
33505     Results.push_back(V);
33506     return;
33507   }
33508   case ISD::TRUNCATE: {
33509     MVT VT = N->getSimpleValueType(0);
33510     if (getTypeAction(*DAG.getContext(), VT) != TypeWidenVector)
33511       return;
33512 
33513     // The generic legalizer will try to widen the input type to the same
33514     // number of elements as the widened result type. But this isn't always
33515     // the best thing so do some custom legalization to avoid some cases.
33516     MVT WidenVT = getTypeToTransformTo(*DAG.getContext(), VT).getSimpleVT();
33517     SDValue In = N->getOperand(0);
33518     EVT InVT = In.getValueType();
33519 
33520     unsigned InBits = InVT.getSizeInBits();
33521     if (128 % InBits == 0) {
33522       // 128 bit and smaller inputs should avoid truncate all together and
33523       // just use a build_vector that will become a shuffle.
33524       // TODO: Widen and use a shuffle directly?
33525       MVT InEltVT = InVT.getSimpleVT().getVectorElementType();
33526       EVT EltVT = VT.getVectorElementType();
33527       unsigned WidenNumElts = WidenVT.getVectorNumElements();
33528       SmallVector<SDValue, 16> Ops(WidenNumElts, DAG.getUNDEF(EltVT));
33529       // Use the original element count so we don't do more scalar opts than
33530       // necessary.
33531       unsigned MinElts = VT.getVectorNumElements();
33532       for (unsigned i=0; i < MinElts; ++i) {
33533         SDValue Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, InEltVT, In,
33534                                   DAG.getIntPtrConstant(i, dl));
33535         Ops[i] = DAG.getNode(ISD::TRUNCATE, dl, EltVT, Val);
33536       }
33537       Results.push_back(DAG.getBuildVector(WidenVT, dl, Ops));
33538       return;
33539     }
33540     // With AVX512 there are some cases that can use a target specific
33541     // truncate node to go from 256/512 to less than 128 with zeros in the
33542     // upper elements of the 128 bit result.
33543     if (Subtarget.hasAVX512() && isTypeLegal(InVT)) {
33544       // We can use VTRUNC directly if for 256 bits with VLX or for any 512.
33545       if ((InBits == 256 && Subtarget.hasVLX()) || InBits == 512) {
33546         Results.push_back(DAG.getNode(X86ISD::VTRUNC, dl, WidenVT, In));
33547         return;
33548       }
33549       // There's one case we can widen to 512 bits and use VTRUNC.
33550       if (InVT == MVT::v4i64 && VT == MVT::v4i8 && isTypeLegal(MVT::v8i64)) {
33551         In = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i64, In,
33552                          DAG.getUNDEF(MVT::v4i64));
33553         Results.push_back(DAG.getNode(X86ISD::VTRUNC, dl, WidenVT, In));
33554         return;
33555       }
33556     }
33557     if (Subtarget.hasVLX() && InVT == MVT::v8i64 && VT == MVT::v8i8 &&
33558         getTypeAction(*DAG.getContext(), InVT) == TypeSplitVector &&
33559         isTypeLegal(MVT::v4i64)) {
33560       // Input needs to be split and output needs to widened. Let's use two
33561       // VTRUNCs, and shuffle their results together into the wider type.
33562       SDValue Lo, Hi;
33563       std::tie(Lo, Hi) = DAG.SplitVector(In, dl);
33564 
33565       Lo = DAG.getNode(X86ISD::VTRUNC, dl, MVT::v16i8, Lo);
33566       Hi = DAG.getNode(X86ISD::VTRUNC, dl, MVT::v16i8, Hi);
33567       SDValue Res = DAG.getVectorShuffle(MVT::v16i8, dl, Lo, Hi,
33568                                          { 0,  1,  2,  3, 16, 17, 18, 19,
33569                                           -1, -1, -1, -1, -1, -1, -1, -1 });
33570       Results.push_back(Res);
33571       return;
33572     }
33573 
33574     return;
33575   }
33576   case ISD::ANY_EXTEND:
33577     // Right now, only MVT::v8i8 has Custom action for an illegal type.
33578     // It's intended to custom handle the input type.
33579     assert(N->getValueType(0) == MVT::v8i8 &&
33580            "Do not know how to legalize this Node");
33581     return;
33582   case ISD::SIGN_EXTEND:
33583   case ISD::ZERO_EXTEND: {
33584     EVT VT = N->getValueType(0);
33585     SDValue In = N->getOperand(0);
33586     EVT InVT = In.getValueType();
33587     if (!Subtarget.hasSSE41() && VT == MVT::v4i64 &&
33588         (InVT == MVT::v4i16 || InVT == MVT::v4i8)){
33589       assert(getTypeAction(*DAG.getContext(), InVT) == TypeWidenVector &&
33590              "Unexpected type action!");
33591       assert(N->getOpcode() == ISD::SIGN_EXTEND && "Unexpected opcode");
33592       // Custom split this so we can extend i8/i16->i32 invec. This is better
33593       // since sign_extend_inreg i8/i16->i64 requires an extend to i32 using
33594       // sra. Then extending from i32 to i64 using pcmpgt. By custom splitting
33595       // we allow the sra from the extend to i32 to be shared by the split.
33596       In = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, In);
33597 
33598       // Fill a vector with sign bits for each element.
33599       SDValue Zero = DAG.getConstant(0, dl, MVT::v4i32);
33600       SDValue SignBits = DAG.getSetCC(dl, MVT::v4i32, Zero, In, ISD::SETGT);
33601 
33602       // Create an unpackl and unpackh to interleave the sign bits then bitcast
33603       // to v2i64.
33604       SDValue Lo = DAG.getVectorShuffle(MVT::v4i32, dl, In, SignBits,
33605                                         {0, 4, 1, 5});
33606       Lo = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Lo);
33607       SDValue Hi = DAG.getVectorShuffle(MVT::v4i32, dl, In, SignBits,
33608                                         {2, 6, 3, 7});
33609       Hi = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Hi);
33610 
33611       SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
33612       Results.push_back(Res);
33613       return;
33614     }
33615 
33616     if (VT == MVT::v16i32 || VT == MVT::v8i64) {
33617       if (!InVT.is128BitVector()) {
33618         // Not a 128 bit vector, but maybe type legalization will promote
33619         // it to 128 bits.
33620         if (getTypeAction(*DAG.getContext(), InVT) != TypePromoteInteger)
33621           return;
33622         InVT = getTypeToTransformTo(*DAG.getContext(), InVT);
33623         if (!InVT.is128BitVector())
33624           return;
33625 
33626         // Promote the input to 128 bits. Type legalization will turn this into
33627         // zext_inreg/sext_inreg.
33628         In = DAG.getNode(N->getOpcode(), dl, InVT, In);
33629       }
33630 
33631       // Perform custom splitting instead of the two stage extend we would get
33632       // by default.
33633       EVT LoVT, HiVT;
33634       std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
33635       assert(isTypeLegal(LoVT) && "Split VT not legal?");
33636 
33637       SDValue Lo = getEXTEND_VECTOR_INREG(N->getOpcode(), dl, LoVT, In, DAG);
33638 
33639       // We need to shift the input over by half the number of elements.
33640       unsigned NumElts = InVT.getVectorNumElements();
33641       unsigned HalfNumElts = NumElts / 2;
33642       SmallVector<int, 16> ShufMask(NumElts, SM_SentinelUndef);
33643       for (unsigned i = 0; i != HalfNumElts; ++i)
33644         ShufMask[i] = i + HalfNumElts;
33645 
33646       SDValue Hi = DAG.getVectorShuffle(InVT, dl, In, In, ShufMask);
33647       Hi = getEXTEND_VECTOR_INREG(N->getOpcode(), dl, HiVT, Hi, DAG);
33648 
33649       SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
33650       Results.push_back(Res);
33651     }
33652     return;
33653   }
33654   case ISD::FP_TO_SINT:
33655   case ISD::STRICT_FP_TO_SINT:
33656   case ISD::FP_TO_UINT:
33657   case ISD::STRICT_FP_TO_UINT: {
33658     bool IsStrict = N->isStrictFPOpcode();
33659     bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT ||
33660                     N->getOpcode() == ISD::STRICT_FP_TO_SINT;
33661     EVT VT = N->getValueType(0);
33662     SDValue Src = N->getOperand(IsStrict ? 1 : 0);
33663     SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
33664     EVT SrcVT = Src.getValueType();
33665 
33666     SDValue Res;
33667     if (isSoftFP16(SrcVT)) {
33668       EVT NVT = VT.isVector() ? VT.changeVectorElementType(MVT::f32) : MVT::f32;
33669       if (IsStrict) {
33670         Res =
33671             DAG.getNode(N->getOpcode(), dl, {VT, MVT::Other},
33672                         {Chain, DAG.getNode(ISD::STRICT_FP_EXTEND, dl,
33673                                             {NVT, MVT::Other}, {Chain, Src})});
33674         Chain = Res.getValue(1);
33675       } else {
33676         Res = DAG.getNode(N->getOpcode(), dl, VT,
33677                           DAG.getNode(ISD::FP_EXTEND, dl, NVT, Src));
33678       }
33679       Results.push_back(Res);
33680       if (IsStrict)
33681         Results.push_back(Chain);
33682 
33683       return;
33684     }
33685 
33686     if (VT.isVector() && Subtarget.hasFP16() &&
33687         SrcVT.getVectorElementType() == MVT::f16) {
33688       EVT EleVT = VT.getVectorElementType();
33689       EVT ResVT = EleVT == MVT::i32 ? MVT::v4i32 : MVT::v8i16;
33690 
33691       if (SrcVT != MVT::v8f16) {
33692         SDValue Tmp =
33693             IsStrict ? DAG.getConstantFP(0.0, dl, SrcVT) : DAG.getUNDEF(SrcVT);
33694         SmallVector<SDValue, 4> Ops(SrcVT == MVT::v2f16 ? 4 : 2, Tmp);
33695         Ops[0] = Src;
33696         Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8f16, Ops);
33697       }
33698 
33699       if (IsStrict) {
33700         unsigned Opc =
33701             IsSigned ? X86ISD::STRICT_CVTTP2SI : X86ISD::STRICT_CVTTP2UI;
33702         Res =
33703             DAG.getNode(Opc, dl, {ResVT, MVT::Other}, {N->getOperand(0), Src});
33704         Chain = Res.getValue(1);
33705       } else {
33706         unsigned Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
33707         Res = DAG.getNode(Opc, dl, ResVT, Src);
33708       }
33709 
33710       // TODO: Need to add exception check code for strict FP.
33711       if (EleVT.getSizeInBits() < 16) {
33712         MVT TmpVT = MVT::getVectorVT(EleVT.getSimpleVT(), 8);
33713         Res = DAG.getNode(ISD::TRUNCATE, dl, TmpVT, Res);
33714 
33715         // Now widen to 128 bits.
33716         unsigned NumConcats = 128 / TmpVT.getSizeInBits();
33717         MVT ConcatVT = MVT::getVectorVT(EleVT.getSimpleVT(), 8 * NumConcats);
33718         SmallVector<SDValue, 8> ConcatOps(NumConcats, DAG.getUNDEF(TmpVT));
33719         ConcatOps[0] = Res;
33720         Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, ConcatVT, ConcatOps);
33721       }
33722 
33723       Results.push_back(Res);
33724       if (IsStrict)
33725         Results.push_back(Chain);
33726 
33727       return;
33728     }
33729 
33730     if (VT.isVector() && VT.getScalarSizeInBits() < 32) {
33731       assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
33732              "Unexpected type action!");
33733 
33734       // Try to create a 128 bit vector, but don't exceed a 32 bit element.
33735       unsigned NewEltWidth = std::min(128 / VT.getVectorNumElements(), 32U);
33736       MVT PromoteVT = MVT::getVectorVT(MVT::getIntegerVT(NewEltWidth),
33737                                        VT.getVectorNumElements());
33738       SDValue Res;
33739       SDValue Chain;
33740       if (IsStrict) {
33741         Res = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, {PromoteVT, MVT::Other},
33742                           {N->getOperand(0), Src});
33743         Chain = Res.getValue(1);
33744       } else
33745         Res = DAG.getNode(ISD::FP_TO_SINT, dl, PromoteVT, Src);
33746 
33747       // Preserve what we know about the size of the original result. If the
33748       // result is v2i32, we have to manually widen the assert.
33749       if (PromoteVT == MVT::v2i32)
33750         Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Res,
33751                           DAG.getUNDEF(MVT::v2i32));
33752 
33753       Res = DAG.getNode(!IsSigned ? ISD::AssertZext : ISD::AssertSext, dl,
33754                         Res.getValueType(), Res,
33755                         DAG.getValueType(VT.getVectorElementType()));
33756 
33757       if (PromoteVT == MVT::v2i32)
33758         Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i32, Res,
33759                           DAG.getIntPtrConstant(0, dl));
33760 
33761       // Truncate back to the original width.
33762       Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
33763 
33764       // Now widen to 128 bits.
33765       unsigned NumConcats = 128 / VT.getSizeInBits();
33766       MVT ConcatVT = MVT::getVectorVT(VT.getSimpleVT().getVectorElementType(),
33767                                       VT.getVectorNumElements() * NumConcats);
33768       SmallVector<SDValue, 8> ConcatOps(NumConcats, DAG.getUNDEF(VT));
33769       ConcatOps[0] = Res;
33770       Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, ConcatVT, ConcatOps);
33771       Results.push_back(Res);
33772       if (IsStrict)
33773         Results.push_back(Chain);
33774       return;
33775     }
33776 
33777 
33778     if (VT == MVT::v2i32) {
33779       assert((!IsStrict || IsSigned || Subtarget.hasAVX512()) &&
33780              "Strict unsigned conversion requires AVX512");
33781       assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
33782       assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
33783              "Unexpected type action!");
33784       if (Src.getValueType() == MVT::v2f64) {
33785         if (!IsSigned && !Subtarget.hasAVX512()) {
33786           SDValue Res =
33787               expandFP_TO_UINT_SSE(MVT::v4i32, Src, dl, DAG, Subtarget);
33788           Results.push_back(Res);
33789           return;
33790         }
33791 
33792         unsigned Opc;
33793         if (IsStrict)
33794           Opc = IsSigned ? X86ISD::STRICT_CVTTP2SI : X86ISD::STRICT_CVTTP2UI;
33795         else
33796           Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
33797 
33798         // If we have VLX we can emit a target specific FP_TO_UINT node,.
33799         if (!IsSigned && !Subtarget.hasVLX()) {
33800           // Otherwise we can defer to the generic legalizer which will widen
33801           // the input as well. This will be further widened during op
33802           // legalization to v8i32<-v8f64.
33803           // For strict nodes we'll need to widen ourselves.
33804           // FIXME: Fix the type legalizer to safely widen strict nodes?
33805           if (!IsStrict)
33806             return;
33807           Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f64, Src,
33808                             DAG.getConstantFP(0.0, dl, MVT::v2f64));
33809           Opc = N->getOpcode();
33810         }
33811         SDValue Res;
33812         SDValue Chain;
33813         if (IsStrict) {
33814           Res = DAG.getNode(Opc, dl, {MVT::v4i32, MVT::Other},
33815                             {N->getOperand(0), Src});
33816           Chain = Res.getValue(1);
33817         } else {
33818           Res = DAG.getNode(Opc, dl, MVT::v4i32, Src);
33819         }
33820         Results.push_back(Res);
33821         if (IsStrict)
33822           Results.push_back(Chain);
33823         return;
33824       }
33825 
33826       // Custom widen strict v2f32->v2i32 by padding with zeros.
33827       // FIXME: Should generic type legalizer do this?
33828       if (Src.getValueType() == MVT::v2f32 && IsStrict) {
33829         Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, Src,
33830                           DAG.getConstantFP(0.0, dl, MVT::v2f32));
33831         SDValue Res = DAG.getNode(N->getOpcode(), dl, {MVT::v4i32, MVT::Other},
33832                                   {N->getOperand(0), Src});
33833         Results.push_back(Res);
33834         Results.push_back(Res.getValue(1));
33835         return;
33836       }
33837 
33838       // The FP_TO_INTHelper below only handles f32/f64/f80 scalar inputs,
33839       // so early out here.
33840       return;
33841     }
33842 
33843     assert(!VT.isVector() && "Vectors should have been handled above!");
33844 
33845     if ((Subtarget.hasDQI() && VT == MVT::i64 &&
33846          (SrcVT == MVT::f32 || SrcVT == MVT::f64)) ||
33847         (Subtarget.hasFP16() && SrcVT == MVT::f16)) {
33848       assert(!Subtarget.is64Bit() && "i64 should be legal");
33849       unsigned NumElts = Subtarget.hasVLX() ? 2 : 8;
33850       // If we use a 128-bit result we might need to use a target specific node.
33851       unsigned SrcElts =
33852           std::max(NumElts, 128U / (unsigned)SrcVT.getSizeInBits());
33853       MVT VecVT = MVT::getVectorVT(MVT::i64, NumElts);
33854       MVT VecInVT = MVT::getVectorVT(SrcVT.getSimpleVT(), SrcElts);
33855       unsigned Opc = N->getOpcode();
33856       if (NumElts != SrcElts) {
33857         if (IsStrict)
33858           Opc = IsSigned ? X86ISD::STRICT_CVTTP2SI : X86ISD::STRICT_CVTTP2UI;
33859         else
33860           Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
33861       }
33862 
33863       SDValue ZeroIdx = DAG.getIntPtrConstant(0, dl);
33864       SDValue Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VecInVT,
33865                                 DAG.getConstantFP(0.0, dl, VecInVT), Src,
33866                                 ZeroIdx);
33867       SDValue Chain;
33868       if (IsStrict) {
33869         SDVTList Tys = DAG.getVTList(VecVT, MVT::Other);
33870         Res = DAG.getNode(Opc, SDLoc(N), Tys, N->getOperand(0), Res);
33871         Chain = Res.getValue(1);
33872       } else
33873         Res = DAG.getNode(Opc, SDLoc(N), VecVT, Res);
33874       Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Res, ZeroIdx);
33875       Results.push_back(Res);
33876       if (IsStrict)
33877         Results.push_back(Chain);
33878       return;
33879     }
33880 
33881     if (VT == MVT::i128 && Subtarget.isTargetWin64()) {
33882       SDValue Chain;
33883       SDValue V = LowerWin64_FP_TO_INT128(SDValue(N, 0), DAG, Chain);
33884       Results.push_back(V);
33885       if (IsStrict)
33886         Results.push_back(Chain);
33887       return;
33888     }
33889 
33890     if (SDValue V = FP_TO_INTHelper(SDValue(N, 0), DAG, IsSigned, Chain)) {
33891       Results.push_back(V);
33892       if (IsStrict)
33893         Results.push_back(Chain);
33894     }
33895     return;
33896   }
33897   case ISD::LRINT:
33898   case ISD::LLRINT: {
33899     if (SDValue V = LRINT_LLRINTHelper(N, DAG))
33900       Results.push_back(V);
33901     return;
33902   }
33903 
33904   case ISD::SINT_TO_FP:
33905   case ISD::STRICT_SINT_TO_FP:
33906   case ISD::UINT_TO_FP:
33907   case ISD::STRICT_UINT_TO_FP: {
33908     bool IsStrict = N->isStrictFPOpcode();
33909     bool IsSigned = N->getOpcode() == ISD::SINT_TO_FP ||
33910                     N->getOpcode() == ISD::STRICT_SINT_TO_FP;
33911     EVT VT = N->getValueType(0);
33912     SDValue Src = N->getOperand(IsStrict ? 1 : 0);
33913     if (VT.getVectorElementType() == MVT::f16 && Subtarget.hasFP16() &&
33914         Subtarget.hasVLX()) {
33915       if (Src.getValueType().getVectorElementType() == MVT::i16)
33916         return;
33917 
33918       if (VT == MVT::v2f16 && Src.getValueType() == MVT::v2i32)
33919         Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Src,
33920                           IsStrict ? DAG.getConstant(0, dl, MVT::v2i32)
33921                                    : DAG.getUNDEF(MVT::v2i32));
33922       if (IsStrict) {
33923         unsigned Opc =
33924             IsSigned ? X86ISD::STRICT_CVTSI2P : X86ISD::STRICT_CVTUI2P;
33925         SDValue Res = DAG.getNode(Opc, dl, {MVT::v8f16, MVT::Other},
33926                                   {N->getOperand(0), Src});
33927         Results.push_back(Res);
33928         Results.push_back(Res.getValue(1));
33929       } else {
33930         unsigned Opc = IsSigned ? X86ISD::CVTSI2P : X86ISD::CVTUI2P;
33931         Results.push_back(DAG.getNode(Opc, dl, MVT::v8f16, Src));
33932       }
33933       return;
33934     }
33935     if (VT != MVT::v2f32)
33936       return;
33937     EVT SrcVT = Src.getValueType();
33938     if (Subtarget.hasDQI() && Subtarget.hasVLX() && SrcVT == MVT::v2i64) {
33939       if (IsStrict) {
33940         unsigned Opc = IsSigned ? X86ISD::STRICT_CVTSI2P
33941                                 : X86ISD::STRICT_CVTUI2P;
33942         SDValue Res = DAG.getNode(Opc, dl, {MVT::v4f32, MVT::Other},
33943                                   {N->getOperand(0), Src});
33944         Results.push_back(Res);
33945         Results.push_back(Res.getValue(1));
33946       } else {
33947         unsigned Opc = IsSigned ? X86ISD::CVTSI2P : X86ISD::CVTUI2P;
33948         Results.push_back(DAG.getNode(Opc, dl, MVT::v4f32, Src));
33949       }
33950       return;
33951     }
33952     if (SrcVT == MVT::v2i64 && !IsSigned && Subtarget.is64Bit() &&
33953         Subtarget.hasSSE41() && !Subtarget.hasAVX512()) {
33954       SDValue Zero = DAG.getConstant(0, dl, SrcVT);
33955       SDValue One  = DAG.getConstant(1, dl, SrcVT);
33956       SDValue Sign = DAG.getNode(ISD::OR, dl, SrcVT,
33957                                  DAG.getNode(ISD::SRL, dl, SrcVT, Src, One),
33958                                  DAG.getNode(ISD::AND, dl, SrcVT, Src, One));
33959       SDValue IsNeg = DAG.getSetCC(dl, MVT::v2i64, Src, Zero, ISD::SETLT);
33960       SDValue SignSrc = DAG.getSelect(dl, SrcVT, IsNeg, Sign, Src);
33961       SmallVector<SDValue, 4> SignCvts(4, DAG.getConstantFP(0.0, dl, MVT::f32));
33962       for (int i = 0; i != 2; ++i) {
33963         SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64,
33964                                   SignSrc, DAG.getIntPtrConstant(i, dl));
33965         if (IsStrict)
33966           SignCvts[i] =
33967               DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {MVT::f32, MVT::Other},
33968                           {N->getOperand(0), Elt});
33969         else
33970           SignCvts[i] = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, Elt);
33971       };
33972       SDValue SignCvt = DAG.getBuildVector(MVT::v4f32, dl, SignCvts);
33973       SDValue Slow, Chain;
33974       if (IsStrict) {
33975         Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
33976                             SignCvts[0].getValue(1), SignCvts[1].getValue(1));
33977         Slow = DAG.getNode(ISD::STRICT_FADD, dl, {MVT::v4f32, MVT::Other},
33978                            {Chain, SignCvt, SignCvt});
33979         Chain = Slow.getValue(1);
33980       } else {
33981         Slow = DAG.getNode(ISD::FADD, dl, MVT::v4f32, SignCvt, SignCvt);
33982       }
33983       IsNeg = DAG.getBitcast(MVT::v4i32, IsNeg);
33984       IsNeg =
33985           DAG.getVectorShuffle(MVT::v4i32, dl, IsNeg, IsNeg, {1, 3, -1, -1});
33986       SDValue Cvt = DAG.getSelect(dl, MVT::v4f32, IsNeg, Slow, SignCvt);
33987       Results.push_back(Cvt);
33988       if (IsStrict)
33989         Results.push_back(Chain);
33990       return;
33991     }
33992 
33993     if (SrcVT != MVT::v2i32)
33994       return;
33995 
33996     if (IsSigned || Subtarget.hasAVX512()) {
33997       if (!IsStrict)
33998         return;
33999 
34000       // Custom widen strict v2i32->v2f32 to avoid scalarization.
34001       // FIXME: Should generic type legalizer do this?
34002       Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Src,
34003                         DAG.getConstant(0, dl, MVT::v2i32));
34004       SDValue Res = DAG.getNode(N->getOpcode(), dl, {MVT::v4f32, MVT::Other},
34005                                 {N->getOperand(0), Src});
34006       Results.push_back(Res);
34007       Results.push_back(Res.getValue(1));
34008       return;
34009     }
34010 
34011     assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
34012     SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v2i64, Src);
34013     SDValue VBias =
34014         DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), dl, MVT::v2f64);
34015     SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, ZExtIn,
34016                              DAG.getBitcast(MVT::v2i64, VBias));
34017     Or = DAG.getBitcast(MVT::v2f64, Or);
34018     if (IsStrict) {
34019       SDValue Sub = DAG.getNode(ISD::STRICT_FSUB, dl, {MVT::v2f64, MVT::Other},
34020                                 {N->getOperand(0), Or, VBias});
34021       SDValue Res = DAG.getNode(X86ISD::STRICT_VFPROUND, dl,
34022                                 {MVT::v4f32, MVT::Other},
34023                                 {Sub.getValue(1), Sub});
34024       Results.push_back(Res);
34025       Results.push_back(Res.getValue(1));
34026     } else {
34027       // TODO: Are there any fast-math-flags to propagate here?
34028       SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, Or, VBias);
34029       Results.push_back(DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, Sub));
34030     }
34031     return;
34032   }
34033   case ISD::STRICT_FP_ROUND:
34034   case ISD::FP_ROUND: {
34035     bool IsStrict = N->isStrictFPOpcode();
34036     SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
34037     SDValue Src = N->getOperand(IsStrict ? 1 : 0);
34038     SDValue Rnd = N->getOperand(IsStrict ? 2 : 1);
34039     EVT SrcVT = Src.getValueType();
34040     EVT VT = N->getValueType(0);
34041     SDValue V;
34042     if (VT == MVT::v2f16 && Src.getValueType() == MVT::v2f32) {
34043       SDValue Ext = IsStrict ? DAG.getConstantFP(0.0, dl, MVT::v2f32)
34044                              : DAG.getUNDEF(MVT::v2f32);
34045       Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, Src, Ext);
34046     }
34047     if (!Subtarget.hasFP16() && VT.getVectorElementType() == MVT::f16) {
34048       assert(Subtarget.hasF16C() && "Cannot widen f16 without F16C");
34049       if (SrcVT.getVectorElementType() != MVT::f32)
34050         return;
34051 
34052       if (IsStrict)
34053         V = DAG.getNode(X86ISD::STRICT_CVTPS2PH, dl, {MVT::v8i16, MVT::Other},
34054                         {Chain, Src, Rnd});
34055       else
34056         V = DAG.getNode(X86ISD::CVTPS2PH, dl, MVT::v8i16, Src, Rnd);
34057 
34058       Results.push_back(DAG.getBitcast(MVT::v8f16, V));
34059       if (IsStrict)
34060         Results.push_back(V.getValue(1));
34061       return;
34062     }
34063     if (!isTypeLegal(Src.getValueType()))
34064       return;
34065     EVT NewVT = VT.getVectorElementType() == MVT::f16 ? MVT::v8f16 : MVT::v4f32;
34066     if (IsStrict)
34067       V = DAG.getNode(X86ISD::STRICT_VFPROUND, dl, {NewVT, MVT::Other},
34068                       {Chain, Src});
34069     else
34070       V = DAG.getNode(X86ISD::VFPROUND, dl, NewVT, Src);
34071     Results.push_back(V);
34072     if (IsStrict)
34073       Results.push_back(V.getValue(1));
34074     return;
34075   }
34076   case ISD::FP_EXTEND:
34077   case ISD::STRICT_FP_EXTEND: {
34078     // Right now, only MVT::v2f32 has OperationAction for FP_EXTEND.
34079     // No other ValueType for FP_EXTEND should reach this point.
34080     assert(N->getValueType(0) == MVT::v2f32 &&
34081            "Do not know how to legalize this Node");
34082     if (!Subtarget.hasFP16() || !Subtarget.hasVLX())
34083       return;
34084     bool IsStrict = N->isStrictFPOpcode();
34085     SDValue Src = N->getOperand(IsStrict ? 1 : 0);
34086     SDValue Ext = IsStrict ? DAG.getConstantFP(0.0, dl, MVT::v2f16)
34087                            : DAG.getUNDEF(MVT::v2f16);
34088     SDValue V = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f16, Src, Ext);
34089     if (IsStrict)
34090       V = DAG.getNode(ISD::STRICT_FP_EXTEND, dl, {MVT::v4f32, MVT::Other},
34091                       {N->getOperand(0), V});
34092     else
34093       V = DAG.getNode(ISD::FP_EXTEND, dl, MVT::v4f32, V);
34094     Results.push_back(V);
34095     if (IsStrict)
34096       Results.push_back(V.getValue(1));
34097     return;
34098   }
34099   case ISD::INTRINSIC_W_CHAIN: {
34100     unsigned IntNo = N->getConstantOperandVal(1);
34101     switch (IntNo) {
34102     default : llvm_unreachable("Do not know how to custom type "
34103                                "legalize this intrinsic operation!");
34104     case Intrinsic::x86_rdtsc:
34105       return getReadTimeStampCounter(N, dl, X86::RDTSC, DAG, Subtarget,
34106                                      Results);
34107     case Intrinsic::x86_rdtscp:
34108       return getReadTimeStampCounter(N, dl, X86::RDTSCP, DAG, Subtarget,
34109                                      Results);
34110     case Intrinsic::x86_rdpmc:
34111       expandIntrinsicWChainHelper(N, dl, DAG, X86::RDPMC, X86::ECX, Subtarget,
34112                                   Results);
34113       return;
34114     case Intrinsic::x86_rdpru:
34115       expandIntrinsicWChainHelper(N, dl, DAG, X86::RDPRU, X86::ECX, Subtarget,
34116         Results);
34117       return;
34118     case Intrinsic::x86_xgetbv:
34119       expandIntrinsicWChainHelper(N, dl, DAG, X86::XGETBV, X86::ECX, Subtarget,
34120                                   Results);
34121       return;
34122     }
34123   }
34124   case ISD::READCYCLECOUNTER: {
34125     return getReadTimeStampCounter(N, dl, X86::RDTSC, DAG, Subtarget, Results);
34126   }
34127   case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: {
34128     EVT T = N->getValueType(0);
34129     assert((T == MVT::i64 || T == MVT::i128) && "can only expand cmpxchg pair");
34130     bool Regs64bit = T == MVT::i128;
34131     assert((!Regs64bit || Subtarget.canUseCMPXCHG16B()) &&
34132            "64-bit ATOMIC_CMP_SWAP_WITH_SUCCESS requires CMPXCHG16B");
34133     MVT HalfT = Regs64bit ? MVT::i64 : MVT::i32;
34134     SDValue cpInL, cpInH;
34135     cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
34136                         DAG.getConstant(0, dl, HalfT));
34137     cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
34138                         DAG.getConstant(1, dl, HalfT));
34139     cpInL = DAG.getCopyToReg(N->getOperand(0), dl,
34140                              Regs64bit ? X86::RAX : X86::EAX,
34141                              cpInL, SDValue());
34142     cpInH = DAG.getCopyToReg(cpInL.getValue(0), dl,
34143                              Regs64bit ? X86::RDX : X86::EDX,
34144                              cpInH, cpInL.getValue(1));
34145     SDValue swapInL, swapInH;
34146     swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
34147                           DAG.getConstant(0, dl, HalfT));
34148     swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
34149                           DAG.getConstant(1, dl, HalfT));
34150     swapInH =
34151         DAG.getCopyToReg(cpInH.getValue(0), dl, Regs64bit ? X86::RCX : X86::ECX,
34152                          swapInH, cpInH.getValue(1));
34153 
34154     // In 64-bit mode we might need the base pointer in RBX, but we can't know
34155     // until later. So we keep the RBX input in a vreg and use a custom
34156     // inserter.
34157     // Since RBX will be a reserved register the register allocator will not
34158     // make sure its value will be properly saved and restored around this
34159     // live-range.
34160     SDValue Result;
34161     SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
34162     MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand();
34163     if (Regs64bit) {
34164       SDValue Ops[] = {swapInH.getValue(0), N->getOperand(1), swapInL,
34165                        swapInH.getValue(1)};
34166       Result =
34167           DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG16_DAG, dl, Tys, Ops, T, MMO);
34168     } else {
34169       swapInL = DAG.getCopyToReg(swapInH.getValue(0), dl, X86::EBX, swapInL,
34170                                  swapInH.getValue(1));
34171       SDValue Ops[] = {swapInL.getValue(0), N->getOperand(1),
34172                        swapInL.getValue(1)};
34173       Result =
34174           DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG8_DAG, dl, Tys, Ops, T, MMO);
34175     }
34176 
34177     SDValue cpOutL = DAG.getCopyFromReg(Result.getValue(0), dl,
34178                                         Regs64bit ? X86::RAX : X86::EAX,
34179                                         HalfT, Result.getValue(1));
34180     SDValue cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), dl,
34181                                         Regs64bit ? X86::RDX : X86::EDX,
34182                                         HalfT, cpOutL.getValue(2));
34183     SDValue OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)};
34184 
34185     SDValue EFLAGS = DAG.getCopyFromReg(cpOutH.getValue(1), dl, X86::EFLAGS,
34186                                         MVT::i32, cpOutH.getValue(2));
34187     SDValue Success = getSETCC(X86::COND_E, EFLAGS, dl, DAG);
34188     Success = DAG.getZExtOrTrunc(Success, dl, N->getValueType(1));
34189 
34190     Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, T, OpsF));
34191     Results.push_back(Success);
34192     Results.push_back(EFLAGS.getValue(1));
34193     return;
34194   }
34195   case ISD::ATOMIC_LOAD: {
34196     assert(N->getValueType(0) == MVT::i64 && "Unexpected VT!");
34197     bool NoImplicitFloatOps =
34198         DAG.getMachineFunction().getFunction().hasFnAttribute(
34199             Attribute::NoImplicitFloat);
34200     if (!Subtarget.useSoftFloat() && !NoImplicitFloatOps) {
34201       auto *Node = cast<AtomicSDNode>(N);
34202       if (Subtarget.hasSSE1()) {
34203         // Use a VZEXT_LOAD which will be selected as MOVQ or XORPS+MOVLPS.
34204         // Then extract the lower 64-bits.
34205         MVT LdVT = Subtarget.hasSSE2() ? MVT::v2i64 : MVT::v4f32;
34206         SDVTList Tys = DAG.getVTList(LdVT, MVT::Other);
34207         SDValue Ops[] = { Node->getChain(), Node->getBasePtr() };
34208         SDValue Ld = DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops,
34209                                              MVT::i64, Node->getMemOperand());
34210         if (Subtarget.hasSSE2()) {
34211           SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Ld,
34212                                     DAG.getIntPtrConstant(0, dl));
34213           Results.push_back(Res);
34214           Results.push_back(Ld.getValue(1));
34215           return;
34216         }
34217         // We use an alternative sequence for SSE1 that extracts as v2f32 and
34218         // then casts to i64. This avoids a 128-bit stack temporary being
34219         // created by type legalization if we were to cast v4f32->v2i64.
34220         SDValue Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2f32, Ld,
34221                                   DAG.getIntPtrConstant(0, dl));
34222         Res = DAG.getBitcast(MVT::i64, Res);
34223         Results.push_back(Res);
34224         Results.push_back(Ld.getValue(1));
34225         return;
34226       }
34227       if (Subtarget.hasX87()) {
34228         // First load this into an 80-bit X87 register. This will put the whole
34229         // integer into the significand.
34230         SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
34231         SDValue Ops[] = { Node->getChain(), Node->getBasePtr() };
34232         SDValue Result = DAG.getMemIntrinsicNode(X86ISD::FILD,
34233                                                  dl, Tys, Ops, MVT::i64,
34234                                                  Node->getMemOperand());
34235         SDValue Chain = Result.getValue(1);
34236 
34237         // Now store the X87 register to a stack temporary and convert to i64.
34238         // This store is not atomic and doesn't need to be.
34239         // FIXME: We don't need a stack temporary if the result of the load
34240         // is already being stored. We could just directly store there.
34241         SDValue StackPtr = DAG.CreateStackTemporary(MVT::i64);
34242         int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
34243         MachinePointerInfo MPI =
34244             MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
34245         SDValue StoreOps[] = { Chain, Result, StackPtr };
34246         Chain = DAG.getMemIntrinsicNode(
34247             X86ISD::FIST, dl, DAG.getVTList(MVT::Other), StoreOps, MVT::i64,
34248             MPI, std::nullopt /*Align*/, MachineMemOperand::MOStore);
34249 
34250         // Finally load the value back from the stack temporary and return it.
34251         // This load is not atomic and doesn't need to be.
34252         // This load will be further type legalized.
34253         Result = DAG.getLoad(MVT::i64, dl, Chain, StackPtr, MPI);
34254         Results.push_back(Result);
34255         Results.push_back(Result.getValue(1));
34256         return;
34257       }
34258     }
34259     // TODO: Use MOVLPS when SSE1 is available?
34260     // Delegate to generic TypeLegalization. Situations we can really handle
34261     // should have already been dealt with by AtomicExpandPass.cpp.
34262     break;
34263   }
34264   case ISD::ATOMIC_SWAP:
34265   case ISD::ATOMIC_LOAD_ADD:
34266   case ISD::ATOMIC_LOAD_SUB:
34267   case ISD::ATOMIC_LOAD_AND:
34268   case ISD::ATOMIC_LOAD_OR:
34269   case ISD::ATOMIC_LOAD_XOR:
34270   case ISD::ATOMIC_LOAD_NAND:
34271   case ISD::ATOMIC_LOAD_MIN:
34272   case ISD::ATOMIC_LOAD_MAX:
34273   case ISD::ATOMIC_LOAD_UMIN:
34274   case ISD::ATOMIC_LOAD_UMAX:
34275     // Delegate to generic TypeLegalization. Situations we can really handle
34276     // should have already been dealt with by AtomicExpandPass.cpp.
34277     break;
34278 
34279   case ISD::BITCAST: {
34280     assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
34281     EVT DstVT = N->getValueType(0);
34282     EVT SrcVT = N->getOperand(0).getValueType();
34283 
34284     // If this is a bitcast from a v64i1 k-register to a i64 on a 32-bit target
34285     // we can split using the k-register rather than memory.
34286     if (SrcVT == MVT::v64i1 && DstVT == MVT::i64 && Subtarget.hasBWI()) {
34287       assert(!Subtarget.is64Bit() && "Expected 32-bit mode");
34288       SDValue Lo, Hi;
34289       std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0);
34290       Lo = DAG.getBitcast(MVT::i32, Lo);
34291       Hi = DAG.getBitcast(MVT::i32, Hi);
34292       SDValue Res = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
34293       Results.push_back(Res);
34294       return;
34295     }
34296 
34297     if (DstVT.isVector() && SrcVT == MVT::x86mmx) {
34298       // FIXME: Use v4f32 for SSE1?
34299       assert(Subtarget.hasSSE2() && "Requires SSE2");
34300       assert(getTypeAction(*DAG.getContext(), DstVT) == TypeWidenVector &&
34301              "Unexpected type action!");
34302       EVT WideVT = getTypeToTransformTo(*DAG.getContext(), DstVT);
34303       SDValue Res = DAG.getNode(X86ISD::MOVQ2DQ, dl, MVT::v2i64,
34304                                 N->getOperand(0));
34305       Res = DAG.getBitcast(WideVT, Res);
34306       Results.push_back(Res);
34307       return;
34308     }
34309 
34310     return;
34311   }
34312   case ISD::MGATHER: {
34313     EVT VT = N->getValueType(0);
34314     if ((VT == MVT::v2f32 || VT == MVT::v2i32) &&
34315         (Subtarget.hasVLX() || !Subtarget.hasAVX512())) {
34316       auto *Gather = cast<MaskedGatherSDNode>(N);
34317       SDValue Index = Gather->getIndex();
34318       if (Index.getValueType() != MVT::v2i64)
34319         return;
34320       assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
34321              "Unexpected type action!");
34322       EVT WideVT = getTypeToTransformTo(*DAG.getContext(), VT);
34323       SDValue Mask = Gather->getMask();
34324       assert(Mask.getValueType() == MVT::v2i1 && "Unexpected mask type");
34325       SDValue PassThru = DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT,
34326                                      Gather->getPassThru(),
34327                                      DAG.getUNDEF(VT));
34328       if (!Subtarget.hasVLX()) {
34329         // We need to widen the mask, but the instruction will only use 2
34330         // of its elements. So we can use undef.
34331         Mask = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i1, Mask,
34332                            DAG.getUNDEF(MVT::v2i1));
34333         Mask = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, Mask);
34334       }
34335       SDValue Ops[] = { Gather->getChain(), PassThru, Mask,
34336                         Gather->getBasePtr(), Index, Gather->getScale() };
34337       SDValue Res = DAG.getMemIntrinsicNode(
34338           X86ISD::MGATHER, dl, DAG.getVTList(WideVT, MVT::Other), Ops,
34339           Gather->getMemoryVT(), Gather->getMemOperand());
34340       Results.push_back(Res);
34341       Results.push_back(Res.getValue(1));
34342       return;
34343     }
34344     return;
34345   }
34346   case ISD::LOAD: {
34347     // Use an f64/i64 load and a scalar_to_vector for v2f32/v2i32 loads. This
34348     // avoids scalarizing in 32-bit mode. In 64-bit mode this avoids a int->fp
34349     // cast since type legalization will try to use an i64 load.
34350     MVT VT = N->getSimpleValueType(0);
34351     assert(VT.isVector() && VT.getSizeInBits() == 64 && "Unexpected VT");
34352     assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
34353            "Unexpected type action!");
34354     if (!ISD::isNON_EXTLoad(N))
34355       return;
34356     auto *Ld = cast<LoadSDNode>(N);
34357     if (Subtarget.hasSSE2()) {
34358       MVT LdVT = Subtarget.is64Bit() && VT.isInteger() ? MVT::i64 : MVT::f64;
34359       SDValue Res = DAG.getLoad(LdVT, dl, Ld->getChain(), Ld->getBasePtr(),
34360                                 Ld->getPointerInfo(), Ld->getOriginalAlign(),
34361                                 Ld->getMemOperand()->getFlags());
34362       SDValue Chain = Res.getValue(1);
34363       MVT VecVT = MVT::getVectorVT(LdVT, 2);
34364       Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Res);
34365       EVT WideVT = getTypeToTransformTo(*DAG.getContext(), VT);
34366       Res = DAG.getBitcast(WideVT, Res);
34367       Results.push_back(Res);
34368       Results.push_back(Chain);
34369       return;
34370     }
34371     assert(Subtarget.hasSSE1() && "Expected SSE");
34372     SDVTList Tys = DAG.getVTList(MVT::v4f32, MVT::Other);
34373     SDValue Ops[] = {Ld->getChain(), Ld->getBasePtr()};
34374     SDValue Res = DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops,
34375                                           MVT::i64, Ld->getMemOperand());
34376     Results.push_back(Res);
34377     Results.push_back(Res.getValue(1));
34378     return;
34379   }
34380   case ISD::ADDRSPACECAST: {
34381     SDValue V = LowerADDRSPACECAST(SDValue(N,0), DAG);
34382     Results.push_back(V);
34383     return;
34384   }
34385   case ISD::BITREVERSE: {
34386     assert(N->getValueType(0) == MVT::i64 && "Unexpected VT!");
34387     assert(Subtarget.hasXOP() && "Expected XOP");
34388     // We can use VPPERM by copying to a vector register and back. We'll need
34389     // to move the scalar in two i32 pieces.
34390     Results.push_back(LowerBITREVERSE(SDValue(N, 0), Subtarget, DAG));
34391     return;
34392   }
34393   case ISD::EXTRACT_VECTOR_ELT: {
34394     // f16 = extract vXf16 %vec, i64 %idx
34395     assert(N->getSimpleValueType(0) == MVT::f16 &&
34396            "Unexpected Value type of EXTRACT_VECTOR_ELT!");
34397     assert(Subtarget.hasFP16() && "Expected FP16");
34398     SDValue VecOp = N->getOperand(0);
34399     EVT ExtVT = VecOp.getValueType().changeVectorElementTypeToInteger();
34400     SDValue Split = DAG.getBitcast(ExtVT, N->getOperand(0));
34401     Split = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Split,
34402                         N->getOperand(1));
34403     Split = DAG.getBitcast(MVT::f16, Split);
34404     Results.push_back(Split);
34405     return;
34406   }
34407   }
34408 }
34409 
getTargetNodeName(unsigned Opcode) const34410 const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
34411   switch ((X86ISD::NodeType)Opcode) {
34412   case X86ISD::FIRST_NUMBER:       break;
34413 #define NODE_NAME_CASE(NODE) case X86ISD::NODE: return "X86ISD::" #NODE;
34414   NODE_NAME_CASE(BSF)
34415   NODE_NAME_CASE(BSR)
34416   NODE_NAME_CASE(FSHL)
34417   NODE_NAME_CASE(FSHR)
34418   NODE_NAME_CASE(FAND)
34419   NODE_NAME_CASE(FANDN)
34420   NODE_NAME_CASE(FOR)
34421   NODE_NAME_CASE(FXOR)
34422   NODE_NAME_CASE(FILD)
34423   NODE_NAME_CASE(FIST)
34424   NODE_NAME_CASE(FP_TO_INT_IN_MEM)
34425   NODE_NAME_CASE(FLD)
34426   NODE_NAME_CASE(FST)
34427   NODE_NAME_CASE(CALL)
34428   NODE_NAME_CASE(CALL_RVMARKER)
34429   NODE_NAME_CASE(BT)
34430   NODE_NAME_CASE(CMP)
34431   NODE_NAME_CASE(FCMP)
34432   NODE_NAME_CASE(STRICT_FCMP)
34433   NODE_NAME_CASE(STRICT_FCMPS)
34434   NODE_NAME_CASE(COMI)
34435   NODE_NAME_CASE(UCOMI)
34436   NODE_NAME_CASE(CMPM)
34437   NODE_NAME_CASE(CMPMM)
34438   NODE_NAME_CASE(STRICT_CMPM)
34439   NODE_NAME_CASE(CMPMM_SAE)
34440   NODE_NAME_CASE(SETCC)
34441   NODE_NAME_CASE(SETCC_CARRY)
34442   NODE_NAME_CASE(FSETCC)
34443   NODE_NAME_CASE(FSETCCM)
34444   NODE_NAME_CASE(FSETCCM_SAE)
34445   NODE_NAME_CASE(CMOV)
34446   NODE_NAME_CASE(BRCOND)
34447   NODE_NAME_CASE(RET_FLAG)
34448   NODE_NAME_CASE(IRET)
34449   NODE_NAME_CASE(REP_STOS)
34450   NODE_NAME_CASE(REP_MOVS)
34451   NODE_NAME_CASE(GlobalBaseReg)
34452   NODE_NAME_CASE(Wrapper)
34453   NODE_NAME_CASE(WrapperRIP)
34454   NODE_NAME_CASE(MOVQ2DQ)
34455   NODE_NAME_CASE(MOVDQ2Q)
34456   NODE_NAME_CASE(MMX_MOVD2W)
34457   NODE_NAME_CASE(MMX_MOVW2D)
34458   NODE_NAME_CASE(PEXTRB)
34459   NODE_NAME_CASE(PEXTRW)
34460   NODE_NAME_CASE(INSERTPS)
34461   NODE_NAME_CASE(PINSRB)
34462   NODE_NAME_CASE(PINSRW)
34463   NODE_NAME_CASE(PSHUFB)
34464   NODE_NAME_CASE(ANDNP)
34465   NODE_NAME_CASE(BLENDI)
34466   NODE_NAME_CASE(BLENDV)
34467   NODE_NAME_CASE(HADD)
34468   NODE_NAME_CASE(HSUB)
34469   NODE_NAME_CASE(FHADD)
34470   NODE_NAME_CASE(FHSUB)
34471   NODE_NAME_CASE(CONFLICT)
34472   NODE_NAME_CASE(FMAX)
34473   NODE_NAME_CASE(FMAXS)
34474   NODE_NAME_CASE(FMAX_SAE)
34475   NODE_NAME_CASE(FMAXS_SAE)
34476   NODE_NAME_CASE(FMIN)
34477   NODE_NAME_CASE(FMINS)
34478   NODE_NAME_CASE(FMIN_SAE)
34479   NODE_NAME_CASE(FMINS_SAE)
34480   NODE_NAME_CASE(FMAXC)
34481   NODE_NAME_CASE(FMINC)
34482   NODE_NAME_CASE(FRSQRT)
34483   NODE_NAME_CASE(FRCP)
34484   NODE_NAME_CASE(EXTRQI)
34485   NODE_NAME_CASE(INSERTQI)
34486   NODE_NAME_CASE(TLSADDR)
34487   NODE_NAME_CASE(TLSBASEADDR)
34488   NODE_NAME_CASE(TLSCALL)
34489   NODE_NAME_CASE(EH_SJLJ_SETJMP)
34490   NODE_NAME_CASE(EH_SJLJ_LONGJMP)
34491   NODE_NAME_CASE(EH_SJLJ_SETUP_DISPATCH)
34492   NODE_NAME_CASE(EH_RETURN)
34493   NODE_NAME_CASE(TC_RETURN)
34494   NODE_NAME_CASE(FNSTCW16m)
34495   NODE_NAME_CASE(FLDCW16m)
34496   NODE_NAME_CASE(LCMPXCHG_DAG)
34497   NODE_NAME_CASE(LCMPXCHG8_DAG)
34498   NODE_NAME_CASE(LCMPXCHG16_DAG)
34499   NODE_NAME_CASE(LCMPXCHG16_SAVE_RBX_DAG)
34500   NODE_NAME_CASE(LADD)
34501   NODE_NAME_CASE(LSUB)
34502   NODE_NAME_CASE(LOR)
34503   NODE_NAME_CASE(LXOR)
34504   NODE_NAME_CASE(LAND)
34505   NODE_NAME_CASE(LBTS)
34506   NODE_NAME_CASE(LBTC)
34507   NODE_NAME_CASE(LBTR)
34508   NODE_NAME_CASE(LBTS_RM)
34509   NODE_NAME_CASE(LBTC_RM)
34510   NODE_NAME_CASE(LBTR_RM)
34511   NODE_NAME_CASE(AADD)
34512   NODE_NAME_CASE(AOR)
34513   NODE_NAME_CASE(AXOR)
34514   NODE_NAME_CASE(AAND)
34515   NODE_NAME_CASE(VZEXT_MOVL)
34516   NODE_NAME_CASE(VZEXT_LOAD)
34517   NODE_NAME_CASE(VEXTRACT_STORE)
34518   NODE_NAME_CASE(VTRUNC)
34519   NODE_NAME_CASE(VTRUNCS)
34520   NODE_NAME_CASE(VTRUNCUS)
34521   NODE_NAME_CASE(VMTRUNC)
34522   NODE_NAME_CASE(VMTRUNCS)
34523   NODE_NAME_CASE(VMTRUNCUS)
34524   NODE_NAME_CASE(VTRUNCSTORES)
34525   NODE_NAME_CASE(VTRUNCSTOREUS)
34526   NODE_NAME_CASE(VMTRUNCSTORES)
34527   NODE_NAME_CASE(VMTRUNCSTOREUS)
34528   NODE_NAME_CASE(VFPEXT)
34529   NODE_NAME_CASE(STRICT_VFPEXT)
34530   NODE_NAME_CASE(VFPEXT_SAE)
34531   NODE_NAME_CASE(VFPEXTS)
34532   NODE_NAME_CASE(VFPEXTS_SAE)
34533   NODE_NAME_CASE(VFPROUND)
34534   NODE_NAME_CASE(STRICT_VFPROUND)
34535   NODE_NAME_CASE(VMFPROUND)
34536   NODE_NAME_CASE(VFPROUND_RND)
34537   NODE_NAME_CASE(VFPROUNDS)
34538   NODE_NAME_CASE(VFPROUNDS_RND)
34539   NODE_NAME_CASE(VSHLDQ)
34540   NODE_NAME_CASE(VSRLDQ)
34541   NODE_NAME_CASE(VSHL)
34542   NODE_NAME_CASE(VSRL)
34543   NODE_NAME_CASE(VSRA)
34544   NODE_NAME_CASE(VSHLI)
34545   NODE_NAME_CASE(VSRLI)
34546   NODE_NAME_CASE(VSRAI)
34547   NODE_NAME_CASE(VSHLV)
34548   NODE_NAME_CASE(VSRLV)
34549   NODE_NAME_CASE(VSRAV)
34550   NODE_NAME_CASE(VROTLI)
34551   NODE_NAME_CASE(VROTRI)
34552   NODE_NAME_CASE(VPPERM)
34553   NODE_NAME_CASE(CMPP)
34554   NODE_NAME_CASE(STRICT_CMPP)
34555   NODE_NAME_CASE(PCMPEQ)
34556   NODE_NAME_CASE(PCMPGT)
34557   NODE_NAME_CASE(PHMINPOS)
34558   NODE_NAME_CASE(ADD)
34559   NODE_NAME_CASE(SUB)
34560   NODE_NAME_CASE(ADC)
34561   NODE_NAME_CASE(SBB)
34562   NODE_NAME_CASE(SMUL)
34563   NODE_NAME_CASE(UMUL)
34564   NODE_NAME_CASE(OR)
34565   NODE_NAME_CASE(XOR)
34566   NODE_NAME_CASE(AND)
34567   NODE_NAME_CASE(BEXTR)
34568   NODE_NAME_CASE(BEXTRI)
34569   NODE_NAME_CASE(BZHI)
34570   NODE_NAME_CASE(PDEP)
34571   NODE_NAME_CASE(PEXT)
34572   NODE_NAME_CASE(MUL_IMM)
34573   NODE_NAME_CASE(MOVMSK)
34574   NODE_NAME_CASE(PTEST)
34575   NODE_NAME_CASE(TESTP)
34576   NODE_NAME_CASE(KORTEST)
34577   NODE_NAME_CASE(KTEST)
34578   NODE_NAME_CASE(KADD)
34579   NODE_NAME_CASE(KSHIFTL)
34580   NODE_NAME_CASE(KSHIFTR)
34581   NODE_NAME_CASE(PACKSS)
34582   NODE_NAME_CASE(PACKUS)
34583   NODE_NAME_CASE(PALIGNR)
34584   NODE_NAME_CASE(VALIGN)
34585   NODE_NAME_CASE(VSHLD)
34586   NODE_NAME_CASE(VSHRD)
34587   NODE_NAME_CASE(VSHLDV)
34588   NODE_NAME_CASE(VSHRDV)
34589   NODE_NAME_CASE(PSHUFD)
34590   NODE_NAME_CASE(PSHUFHW)
34591   NODE_NAME_CASE(PSHUFLW)
34592   NODE_NAME_CASE(SHUFP)
34593   NODE_NAME_CASE(SHUF128)
34594   NODE_NAME_CASE(MOVLHPS)
34595   NODE_NAME_CASE(MOVHLPS)
34596   NODE_NAME_CASE(MOVDDUP)
34597   NODE_NAME_CASE(MOVSHDUP)
34598   NODE_NAME_CASE(MOVSLDUP)
34599   NODE_NAME_CASE(MOVSD)
34600   NODE_NAME_CASE(MOVSS)
34601   NODE_NAME_CASE(MOVSH)
34602   NODE_NAME_CASE(UNPCKL)
34603   NODE_NAME_CASE(UNPCKH)
34604   NODE_NAME_CASE(VBROADCAST)
34605   NODE_NAME_CASE(VBROADCAST_LOAD)
34606   NODE_NAME_CASE(VBROADCASTM)
34607   NODE_NAME_CASE(SUBV_BROADCAST_LOAD)
34608   NODE_NAME_CASE(VPERMILPV)
34609   NODE_NAME_CASE(VPERMILPI)
34610   NODE_NAME_CASE(VPERM2X128)
34611   NODE_NAME_CASE(VPERMV)
34612   NODE_NAME_CASE(VPERMV3)
34613   NODE_NAME_CASE(VPERMI)
34614   NODE_NAME_CASE(VPTERNLOG)
34615   NODE_NAME_CASE(VFIXUPIMM)
34616   NODE_NAME_CASE(VFIXUPIMM_SAE)
34617   NODE_NAME_CASE(VFIXUPIMMS)
34618   NODE_NAME_CASE(VFIXUPIMMS_SAE)
34619   NODE_NAME_CASE(VRANGE)
34620   NODE_NAME_CASE(VRANGE_SAE)
34621   NODE_NAME_CASE(VRANGES)
34622   NODE_NAME_CASE(VRANGES_SAE)
34623   NODE_NAME_CASE(PMULUDQ)
34624   NODE_NAME_CASE(PMULDQ)
34625   NODE_NAME_CASE(PSADBW)
34626   NODE_NAME_CASE(DBPSADBW)
34627   NODE_NAME_CASE(VASTART_SAVE_XMM_REGS)
34628   NODE_NAME_CASE(VAARG_64)
34629   NODE_NAME_CASE(VAARG_X32)
34630   NODE_NAME_CASE(DYN_ALLOCA)
34631   NODE_NAME_CASE(MFENCE)
34632   NODE_NAME_CASE(SEG_ALLOCA)
34633   NODE_NAME_CASE(PROBED_ALLOCA)
34634   NODE_NAME_CASE(RDRAND)
34635   NODE_NAME_CASE(RDSEED)
34636   NODE_NAME_CASE(RDPKRU)
34637   NODE_NAME_CASE(WRPKRU)
34638   NODE_NAME_CASE(VPMADDUBSW)
34639   NODE_NAME_CASE(VPMADDWD)
34640   NODE_NAME_CASE(VPSHA)
34641   NODE_NAME_CASE(VPSHL)
34642   NODE_NAME_CASE(VPCOM)
34643   NODE_NAME_CASE(VPCOMU)
34644   NODE_NAME_CASE(VPERMIL2)
34645   NODE_NAME_CASE(FMSUB)
34646   NODE_NAME_CASE(STRICT_FMSUB)
34647   NODE_NAME_CASE(FNMADD)
34648   NODE_NAME_CASE(STRICT_FNMADD)
34649   NODE_NAME_CASE(FNMSUB)
34650   NODE_NAME_CASE(STRICT_FNMSUB)
34651   NODE_NAME_CASE(FMADDSUB)
34652   NODE_NAME_CASE(FMSUBADD)
34653   NODE_NAME_CASE(FMADD_RND)
34654   NODE_NAME_CASE(FNMADD_RND)
34655   NODE_NAME_CASE(FMSUB_RND)
34656   NODE_NAME_CASE(FNMSUB_RND)
34657   NODE_NAME_CASE(FMADDSUB_RND)
34658   NODE_NAME_CASE(FMSUBADD_RND)
34659   NODE_NAME_CASE(VFMADDC)
34660   NODE_NAME_CASE(VFMADDC_RND)
34661   NODE_NAME_CASE(VFCMADDC)
34662   NODE_NAME_CASE(VFCMADDC_RND)
34663   NODE_NAME_CASE(VFMULC)
34664   NODE_NAME_CASE(VFMULC_RND)
34665   NODE_NAME_CASE(VFCMULC)
34666   NODE_NAME_CASE(VFCMULC_RND)
34667   NODE_NAME_CASE(VFMULCSH)
34668   NODE_NAME_CASE(VFMULCSH_RND)
34669   NODE_NAME_CASE(VFCMULCSH)
34670   NODE_NAME_CASE(VFCMULCSH_RND)
34671   NODE_NAME_CASE(VFMADDCSH)
34672   NODE_NAME_CASE(VFMADDCSH_RND)
34673   NODE_NAME_CASE(VFCMADDCSH)
34674   NODE_NAME_CASE(VFCMADDCSH_RND)
34675   NODE_NAME_CASE(VPMADD52H)
34676   NODE_NAME_CASE(VPMADD52L)
34677   NODE_NAME_CASE(VRNDSCALE)
34678   NODE_NAME_CASE(STRICT_VRNDSCALE)
34679   NODE_NAME_CASE(VRNDSCALE_SAE)
34680   NODE_NAME_CASE(VRNDSCALES)
34681   NODE_NAME_CASE(VRNDSCALES_SAE)
34682   NODE_NAME_CASE(VREDUCE)
34683   NODE_NAME_CASE(VREDUCE_SAE)
34684   NODE_NAME_CASE(VREDUCES)
34685   NODE_NAME_CASE(VREDUCES_SAE)
34686   NODE_NAME_CASE(VGETMANT)
34687   NODE_NAME_CASE(VGETMANT_SAE)
34688   NODE_NAME_CASE(VGETMANTS)
34689   NODE_NAME_CASE(VGETMANTS_SAE)
34690   NODE_NAME_CASE(PCMPESTR)
34691   NODE_NAME_CASE(PCMPISTR)
34692   NODE_NAME_CASE(XTEST)
34693   NODE_NAME_CASE(COMPRESS)
34694   NODE_NAME_CASE(EXPAND)
34695   NODE_NAME_CASE(SELECTS)
34696   NODE_NAME_CASE(ADDSUB)
34697   NODE_NAME_CASE(RCP14)
34698   NODE_NAME_CASE(RCP14S)
34699   NODE_NAME_CASE(RCP28)
34700   NODE_NAME_CASE(RCP28_SAE)
34701   NODE_NAME_CASE(RCP28S)
34702   NODE_NAME_CASE(RCP28S_SAE)
34703   NODE_NAME_CASE(EXP2)
34704   NODE_NAME_CASE(EXP2_SAE)
34705   NODE_NAME_CASE(RSQRT14)
34706   NODE_NAME_CASE(RSQRT14S)
34707   NODE_NAME_CASE(RSQRT28)
34708   NODE_NAME_CASE(RSQRT28_SAE)
34709   NODE_NAME_CASE(RSQRT28S)
34710   NODE_NAME_CASE(RSQRT28S_SAE)
34711   NODE_NAME_CASE(FADD_RND)
34712   NODE_NAME_CASE(FADDS)
34713   NODE_NAME_CASE(FADDS_RND)
34714   NODE_NAME_CASE(FSUB_RND)
34715   NODE_NAME_CASE(FSUBS)
34716   NODE_NAME_CASE(FSUBS_RND)
34717   NODE_NAME_CASE(FMUL_RND)
34718   NODE_NAME_CASE(FMULS)
34719   NODE_NAME_CASE(FMULS_RND)
34720   NODE_NAME_CASE(FDIV_RND)
34721   NODE_NAME_CASE(FDIVS)
34722   NODE_NAME_CASE(FDIVS_RND)
34723   NODE_NAME_CASE(FSQRT_RND)
34724   NODE_NAME_CASE(FSQRTS)
34725   NODE_NAME_CASE(FSQRTS_RND)
34726   NODE_NAME_CASE(FGETEXP)
34727   NODE_NAME_CASE(FGETEXP_SAE)
34728   NODE_NAME_CASE(FGETEXPS)
34729   NODE_NAME_CASE(FGETEXPS_SAE)
34730   NODE_NAME_CASE(SCALEF)
34731   NODE_NAME_CASE(SCALEF_RND)
34732   NODE_NAME_CASE(SCALEFS)
34733   NODE_NAME_CASE(SCALEFS_RND)
34734   NODE_NAME_CASE(MULHRS)
34735   NODE_NAME_CASE(SINT_TO_FP_RND)
34736   NODE_NAME_CASE(UINT_TO_FP_RND)
34737   NODE_NAME_CASE(CVTTP2SI)
34738   NODE_NAME_CASE(CVTTP2UI)
34739   NODE_NAME_CASE(STRICT_CVTTP2SI)
34740   NODE_NAME_CASE(STRICT_CVTTP2UI)
34741   NODE_NAME_CASE(MCVTTP2SI)
34742   NODE_NAME_CASE(MCVTTP2UI)
34743   NODE_NAME_CASE(CVTTP2SI_SAE)
34744   NODE_NAME_CASE(CVTTP2UI_SAE)
34745   NODE_NAME_CASE(CVTTS2SI)
34746   NODE_NAME_CASE(CVTTS2UI)
34747   NODE_NAME_CASE(CVTTS2SI_SAE)
34748   NODE_NAME_CASE(CVTTS2UI_SAE)
34749   NODE_NAME_CASE(CVTSI2P)
34750   NODE_NAME_CASE(CVTUI2P)
34751   NODE_NAME_CASE(STRICT_CVTSI2P)
34752   NODE_NAME_CASE(STRICT_CVTUI2P)
34753   NODE_NAME_CASE(MCVTSI2P)
34754   NODE_NAME_CASE(MCVTUI2P)
34755   NODE_NAME_CASE(VFPCLASS)
34756   NODE_NAME_CASE(VFPCLASSS)
34757   NODE_NAME_CASE(MULTISHIFT)
34758   NODE_NAME_CASE(SCALAR_SINT_TO_FP)
34759   NODE_NAME_CASE(SCALAR_SINT_TO_FP_RND)
34760   NODE_NAME_CASE(SCALAR_UINT_TO_FP)
34761   NODE_NAME_CASE(SCALAR_UINT_TO_FP_RND)
34762   NODE_NAME_CASE(CVTPS2PH)
34763   NODE_NAME_CASE(STRICT_CVTPS2PH)
34764   NODE_NAME_CASE(CVTPS2PH_SAE)
34765   NODE_NAME_CASE(MCVTPS2PH)
34766   NODE_NAME_CASE(MCVTPS2PH_SAE)
34767   NODE_NAME_CASE(CVTPH2PS)
34768   NODE_NAME_CASE(STRICT_CVTPH2PS)
34769   NODE_NAME_CASE(CVTPH2PS_SAE)
34770   NODE_NAME_CASE(CVTP2SI)
34771   NODE_NAME_CASE(CVTP2UI)
34772   NODE_NAME_CASE(MCVTP2SI)
34773   NODE_NAME_CASE(MCVTP2UI)
34774   NODE_NAME_CASE(CVTP2SI_RND)
34775   NODE_NAME_CASE(CVTP2UI_RND)
34776   NODE_NAME_CASE(CVTS2SI)
34777   NODE_NAME_CASE(CVTS2UI)
34778   NODE_NAME_CASE(CVTS2SI_RND)
34779   NODE_NAME_CASE(CVTS2UI_RND)
34780   NODE_NAME_CASE(CVTNE2PS2BF16)
34781   NODE_NAME_CASE(CVTNEPS2BF16)
34782   NODE_NAME_CASE(MCVTNEPS2BF16)
34783   NODE_NAME_CASE(DPBF16PS)
34784   NODE_NAME_CASE(LWPINS)
34785   NODE_NAME_CASE(MGATHER)
34786   NODE_NAME_CASE(MSCATTER)
34787   NODE_NAME_CASE(VPDPBUSD)
34788   NODE_NAME_CASE(VPDPBUSDS)
34789   NODE_NAME_CASE(VPDPWSSD)
34790   NODE_NAME_CASE(VPDPWSSDS)
34791   NODE_NAME_CASE(VPSHUFBITQMB)
34792   NODE_NAME_CASE(GF2P8MULB)
34793   NODE_NAME_CASE(GF2P8AFFINEQB)
34794   NODE_NAME_CASE(GF2P8AFFINEINVQB)
34795   NODE_NAME_CASE(NT_CALL)
34796   NODE_NAME_CASE(NT_BRIND)
34797   NODE_NAME_CASE(UMWAIT)
34798   NODE_NAME_CASE(TPAUSE)
34799   NODE_NAME_CASE(ENQCMD)
34800   NODE_NAME_CASE(ENQCMDS)
34801   NODE_NAME_CASE(VP2INTERSECT)
34802   NODE_NAME_CASE(VPDPBSUD)
34803   NODE_NAME_CASE(VPDPBSUDS)
34804   NODE_NAME_CASE(VPDPBUUD)
34805   NODE_NAME_CASE(VPDPBUUDS)
34806   NODE_NAME_CASE(VPDPBSSD)
34807   NODE_NAME_CASE(VPDPBSSDS)
34808   NODE_NAME_CASE(AESENC128KL)
34809   NODE_NAME_CASE(AESDEC128KL)
34810   NODE_NAME_CASE(AESENC256KL)
34811   NODE_NAME_CASE(AESDEC256KL)
34812   NODE_NAME_CASE(AESENCWIDE128KL)
34813   NODE_NAME_CASE(AESDECWIDE128KL)
34814   NODE_NAME_CASE(AESENCWIDE256KL)
34815   NODE_NAME_CASE(AESDECWIDE256KL)
34816   NODE_NAME_CASE(CMPCCXADD)
34817   NODE_NAME_CASE(TESTUI)
34818   NODE_NAME_CASE(FP80_ADD)
34819   NODE_NAME_CASE(STRICT_FP80_ADD)
34820   }
34821   return nullptr;
34822 #undef NODE_NAME_CASE
34823 }
34824 
34825 /// Return true if the addressing mode represented by AM is legal for this
34826 /// target, for a load/store of the specified type.
isLegalAddressingMode(const DataLayout & DL,const AddrMode & AM,Type * Ty,unsigned AS,Instruction * I) const34827 bool X86TargetLowering::isLegalAddressingMode(const DataLayout &DL,
34828                                               const AddrMode &AM, Type *Ty,
34829                                               unsigned AS,
34830                                               Instruction *I) const {
34831   // X86 supports extremely general addressing modes.
34832   CodeModel::Model M = getTargetMachine().getCodeModel();
34833 
34834   // X86 allows a sign-extended 32-bit immediate field as a displacement.
34835   if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != nullptr))
34836     return false;
34837 
34838   if (AM.BaseGV) {
34839     unsigned GVFlags = Subtarget.classifyGlobalReference(AM.BaseGV);
34840 
34841     // If a reference to this global requires an extra load, we can't fold it.
34842     if (isGlobalStubReference(GVFlags))
34843       return false;
34844 
34845     // If BaseGV requires a register for the PIC base, we cannot also have a
34846     // BaseReg specified.
34847     if (AM.HasBaseReg && isGlobalRelativeToPICBase(GVFlags))
34848       return false;
34849 
34850     // If lower 4G is not available, then we must use rip-relative addressing.
34851     if ((M != CodeModel::Small || isPositionIndependent()) &&
34852         Subtarget.is64Bit() && (AM.BaseOffs || AM.Scale > 1))
34853       return false;
34854   }
34855 
34856   switch (AM.Scale) {
34857   case 0:
34858   case 1:
34859   case 2:
34860   case 4:
34861   case 8:
34862     // These scales always work.
34863     break;
34864   case 3:
34865   case 5:
34866   case 9:
34867     // These scales are formed with basereg+scalereg.  Only accept if there is
34868     // no basereg yet.
34869     if (AM.HasBaseReg)
34870       return false;
34871     break;
34872   default:  // Other stuff never works.
34873     return false;
34874   }
34875 
34876   return true;
34877 }
34878 
isVectorShiftByScalarCheap(Type * Ty) const34879 bool X86TargetLowering::isVectorShiftByScalarCheap(Type *Ty) const {
34880   unsigned Bits = Ty->getScalarSizeInBits();
34881 
34882   // XOP has v16i8/v8i16/v4i32/v2i64 variable vector shifts.
34883   // Splitting for v32i8/v16i16 on XOP+AVX2 targets is still preferred.
34884   if (Subtarget.hasXOP() &&
34885       (Bits == 8 || Bits == 16 || Bits == 32 || Bits == 64))
34886     return false;
34887 
34888   // AVX2 has vpsllv[dq] instructions (and other shifts) that make variable
34889   // shifts just as cheap as scalar ones.
34890   if (Subtarget.hasAVX2() && (Bits == 32 || Bits == 64))
34891     return false;
34892 
34893   // AVX512BW has shifts such as vpsllvw.
34894   if (Subtarget.hasBWI() && Bits == 16)
34895     return false;
34896 
34897   // Otherwise, it's significantly cheaper to shift by a scalar amount than by a
34898   // fully general vector.
34899   return true;
34900 }
34901 
isBinOp(unsigned Opcode) const34902 bool X86TargetLowering::isBinOp(unsigned Opcode) const {
34903   switch (Opcode) {
34904   // These are non-commutative binops.
34905   // TODO: Add more X86ISD opcodes once we have test coverage.
34906   case X86ISD::ANDNP:
34907   case X86ISD::PCMPGT:
34908   case X86ISD::FMAX:
34909   case X86ISD::FMIN:
34910   case X86ISD::FANDN:
34911   case X86ISD::VPSHA:
34912   case X86ISD::VPSHL:
34913   case X86ISD::VSHLV:
34914   case X86ISD::VSRLV:
34915   case X86ISD::VSRAV:
34916     return true;
34917   }
34918 
34919   return TargetLoweringBase::isBinOp(Opcode);
34920 }
34921 
isCommutativeBinOp(unsigned Opcode) const34922 bool X86TargetLowering::isCommutativeBinOp(unsigned Opcode) const {
34923   switch (Opcode) {
34924   // TODO: Add more X86ISD opcodes once we have test coverage.
34925   case X86ISD::PCMPEQ:
34926   case X86ISD::PMULDQ:
34927   case X86ISD::PMULUDQ:
34928   case X86ISD::FMAXC:
34929   case X86ISD::FMINC:
34930   case X86ISD::FAND:
34931   case X86ISD::FOR:
34932   case X86ISD::FXOR:
34933     return true;
34934   }
34935 
34936   return TargetLoweringBase::isCommutativeBinOp(Opcode);
34937 }
34938 
isTruncateFree(Type * Ty1,Type * Ty2) const34939 bool X86TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
34940   if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
34941     return false;
34942   unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
34943   unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
34944   return NumBits1 > NumBits2;
34945 }
34946 
allowTruncateForTailCall(Type * Ty1,Type * Ty2) const34947 bool X86TargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
34948   if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
34949     return false;
34950 
34951   if (!isTypeLegal(EVT::getEVT(Ty1)))
34952     return false;
34953 
34954   assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop");
34955 
34956   // Assuming the caller doesn't have a zeroext or signext return parameter,
34957   // truncation all the way down to i1 is valid.
34958   return true;
34959 }
34960 
isLegalICmpImmediate(int64_t Imm) const34961 bool X86TargetLowering::isLegalICmpImmediate(int64_t Imm) const {
34962   return isInt<32>(Imm);
34963 }
34964 
isLegalAddImmediate(int64_t Imm) const34965 bool X86TargetLowering::isLegalAddImmediate(int64_t Imm) const {
34966   // Can also use sub to handle negated immediates.
34967   return isInt<32>(Imm);
34968 }
34969 
isLegalStoreImmediate(int64_t Imm) const34970 bool X86TargetLowering::isLegalStoreImmediate(int64_t Imm) const {
34971   return isInt<32>(Imm);
34972 }
34973 
isTruncateFree(EVT VT1,EVT VT2) const34974 bool X86TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
34975   if (!VT1.isScalarInteger() || !VT2.isScalarInteger())
34976     return false;
34977   unsigned NumBits1 = VT1.getSizeInBits();
34978   unsigned NumBits2 = VT2.getSizeInBits();
34979   return NumBits1 > NumBits2;
34980 }
34981 
isZExtFree(Type * Ty1,Type * Ty2) const34982 bool X86TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
34983   // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
34984   return Ty1->isIntegerTy(32) && Ty2->isIntegerTy(64) && Subtarget.is64Bit();
34985 }
34986 
isZExtFree(EVT VT1,EVT VT2) const34987 bool X86TargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
34988   // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
34989   return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget.is64Bit();
34990 }
34991 
isZExtFree(SDValue Val,EVT VT2) const34992 bool X86TargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
34993   EVT VT1 = Val.getValueType();
34994   if (isZExtFree(VT1, VT2))
34995     return true;
34996 
34997   if (Val.getOpcode() != ISD::LOAD)
34998     return false;
34999 
35000   if (!VT1.isSimple() || !VT1.isInteger() ||
35001       !VT2.isSimple() || !VT2.isInteger())
35002     return false;
35003 
35004   switch (VT1.getSimpleVT().SimpleTy) {
35005   default: break;
35006   case MVT::i8:
35007   case MVT::i16:
35008   case MVT::i32:
35009     // X86 has 8, 16, and 32-bit zero-extending loads.
35010     return true;
35011   }
35012 
35013   return false;
35014 }
35015 
shouldSinkOperands(Instruction * I,SmallVectorImpl<Use * > & Ops) const35016 bool X86TargetLowering::shouldSinkOperands(Instruction *I,
35017                                            SmallVectorImpl<Use *> &Ops) const {
35018   using namespace llvm::PatternMatch;
35019 
35020   FixedVectorType *VTy = dyn_cast<FixedVectorType>(I->getType());
35021   if (!VTy)
35022     return false;
35023 
35024   if (I->getOpcode() == Instruction::Mul &&
35025       VTy->getElementType()->isIntegerTy(64)) {
35026     for (auto &Op : I->operands()) {
35027       // Make sure we are not already sinking this operand
35028       if (any_of(Ops, [&](Use *U) { return U->get() == Op; }))
35029         continue;
35030 
35031       // Look for PMULDQ pattern where the input is a sext_inreg from vXi32 or
35032       // the PMULUDQ pattern where the input is a zext_inreg from vXi32.
35033       if (Subtarget.hasSSE41() &&
35034           match(Op.get(), m_AShr(m_Shl(m_Value(), m_SpecificInt(32)),
35035                                  m_SpecificInt(32)))) {
35036         Ops.push_back(&cast<Instruction>(Op)->getOperandUse(0));
35037         Ops.push_back(&Op);
35038       } else if (Subtarget.hasSSE2() &&
35039                  match(Op.get(),
35040                        m_And(m_Value(), m_SpecificInt(UINT64_C(0xffffffff))))) {
35041         Ops.push_back(&Op);
35042       }
35043     }
35044 
35045     return !Ops.empty();
35046   }
35047 
35048   // A uniform shift amount in a vector shift or funnel shift may be much
35049   // cheaper than a generic variable vector shift, so make that pattern visible
35050   // to SDAG by sinking the shuffle instruction next to the shift.
35051   int ShiftAmountOpNum = -1;
35052   if (I->isShift())
35053     ShiftAmountOpNum = 1;
35054   else if (auto *II = dyn_cast<IntrinsicInst>(I)) {
35055     if (II->getIntrinsicID() == Intrinsic::fshl ||
35056         II->getIntrinsicID() == Intrinsic::fshr)
35057       ShiftAmountOpNum = 2;
35058   }
35059 
35060   if (ShiftAmountOpNum == -1)
35061     return false;
35062 
35063   auto *Shuf = dyn_cast<ShuffleVectorInst>(I->getOperand(ShiftAmountOpNum));
35064   if (Shuf && getSplatIndex(Shuf->getShuffleMask()) >= 0 &&
35065       isVectorShiftByScalarCheap(I->getType())) {
35066     Ops.push_back(&I->getOperandUse(ShiftAmountOpNum));
35067     return true;
35068   }
35069 
35070   return false;
35071 }
35072 
shouldConvertPhiType(Type * From,Type * To) const35073 bool X86TargetLowering::shouldConvertPhiType(Type *From, Type *To) const {
35074   if (!Subtarget.is64Bit())
35075     return false;
35076   return TargetLowering::shouldConvertPhiType(From, To);
35077 }
35078 
isVectorLoadExtDesirable(SDValue ExtVal) const35079 bool X86TargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const {
35080   if (isa<MaskedLoadSDNode>(ExtVal.getOperand(0)))
35081     return false;
35082 
35083   EVT SrcVT = ExtVal.getOperand(0).getValueType();
35084 
35085   // There is no extending load for vXi1.
35086   if (SrcVT.getScalarType() == MVT::i1)
35087     return false;
35088 
35089   return true;
35090 }
35091 
isFMAFasterThanFMulAndFAdd(const MachineFunction & MF,EVT VT) const35092 bool X86TargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
35093                                                    EVT VT) const {
35094   if (!Subtarget.hasAnyFMA())
35095     return false;
35096 
35097   VT = VT.getScalarType();
35098 
35099   if (!VT.isSimple())
35100     return false;
35101 
35102   switch (VT.getSimpleVT().SimpleTy) {
35103   case MVT::f16:
35104     return Subtarget.hasFP16();
35105   case MVT::f32:
35106   case MVT::f64:
35107     return true;
35108   default:
35109     break;
35110   }
35111 
35112   return false;
35113 }
35114 
isNarrowingProfitable(EVT VT1,EVT VT2) const35115 bool X86TargetLowering::isNarrowingProfitable(EVT VT1, EVT VT2) const {
35116   // i16 instructions are longer (0x66 prefix) and potentially slower.
35117   return !(VT1 == MVT::i32 && VT2 == MVT::i16);
35118 }
35119 
shouldFoldSelectWithIdentityConstant(unsigned Opcode,EVT VT) const35120 bool X86TargetLowering::shouldFoldSelectWithIdentityConstant(unsigned Opcode,
35121                                                              EVT VT) const {
35122   // TODO: This is too general. There are cases where pre-AVX512 codegen would
35123   //       benefit. The transform may also be profitable for scalar code.
35124   if (!Subtarget.hasAVX512())
35125     return false;
35126   if (!Subtarget.hasVLX() && !VT.is512BitVector())
35127     return false;
35128   if (!VT.isVector() || VT.getScalarType() == MVT::i1)
35129     return false;
35130 
35131   return true;
35132 }
35133 
35134 /// Targets can use this to indicate that they only support *some*
35135 /// VECTOR_SHUFFLE operations, those with specific masks.
35136 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
35137 /// are assumed to be legal.
isShuffleMaskLegal(ArrayRef<int> Mask,EVT VT) const35138 bool X86TargetLowering::isShuffleMaskLegal(ArrayRef<int> Mask, EVT VT) const {
35139   if (!VT.isSimple())
35140     return false;
35141 
35142   // Not for i1 vectors
35143   if (VT.getSimpleVT().getScalarType() == MVT::i1)
35144     return false;
35145 
35146   // Very little shuffling can be done for 64-bit vectors right now.
35147   if (VT.getSimpleVT().getSizeInBits() == 64)
35148     return false;
35149 
35150   // We only care that the types being shuffled are legal. The lowering can
35151   // handle any possible shuffle mask that results.
35152   return isTypeLegal(VT.getSimpleVT());
35153 }
35154 
isVectorClearMaskLegal(ArrayRef<int> Mask,EVT VT) const35155 bool X86TargetLowering::isVectorClearMaskLegal(ArrayRef<int> Mask,
35156                                                EVT VT) const {
35157   // Don't convert an 'and' into a shuffle that we don't directly support.
35158   // vpblendw and vpshufb for 256-bit vectors are not available on AVX1.
35159   if (!Subtarget.hasAVX2())
35160     if (VT == MVT::v32i8 || VT == MVT::v16i16)
35161       return false;
35162 
35163   // Just delegate to the generic legality, clear masks aren't special.
35164   return isShuffleMaskLegal(Mask, VT);
35165 }
35166 
areJTsAllowed(const Function * Fn) const35167 bool X86TargetLowering::areJTsAllowed(const Function *Fn) const {
35168   // If the subtarget is using thunks, we need to not generate jump tables.
35169   if (Subtarget.useIndirectThunkBranches())
35170     return false;
35171 
35172   // Otherwise, fallback on the generic logic.
35173   return TargetLowering::areJTsAllowed(Fn);
35174 }
35175 
getPreferredSwitchConditionType(LLVMContext & Context,EVT ConditionVT) const35176 MVT X86TargetLowering::getPreferredSwitchConditionType(LLVMContext &Context,
35177                                                        EVT ConditionVT) const {
35178   // Avoid 8 and 16 bit types because they increase the chance for unnecessary
35179   // zero-extensions.
35180   if (ConditionVT.getSizeInBits() < 32)
35181     return MVT::i32;
35182   return TargetLoweringBase::getPreferredSwitchConditionType(Context,
35183                                                              ConditionVT);
35184 }
35185 
35186 //===----------------------------------------------------------------------===//
35187 //                           X86 Scheduler Hooks
35188 //===----------------------------------------------------------------------===//
35189 
35190 // Returns true if EFLAG is consumed after this iterator in the rest of the
35191 // basic block or any successors of the basic block.
isEFLAGSLiveAfter(MachineBasicBlock::iterator Itr,MachineBasicBlock * BB)35192 static bool isEFLAGSLiveAfter(MachineBasicBlock::iterator Itr,
35193                               MachineBasicBlock *BB) {
35194   // Scan forward through BB for a use/def of EFLAGS.
35195   for (const MachineInstr &mi : llvm::make_range(std::next(Itr), BB->end())) {
35196     if (mi.readsRegister(X86::EFLAGS))
35197       return true;
35198     // If we found a def, we can stop searching.
35199     if (mi.definesRegister(X86::EFLAGS))
35200       return false;
35201   }
35202 
35203   // If we hit the end of the block, check whether EFLAGS is live into a
35204   // successor.
35205   for (MachineBasicBlock *Succ : BB->successors())
35206     if (Succ->isLiveIn(X86::EFLAGS))
35207       return true;
35208 
35209   return false;
35210 }
35211 
35212 /// Utility function to emit xbegin specifying the start of an RTM region.
emitXBegin(MachineInstr & MI,MachineBasicBlock * MBB,const TargetInstrInfo * TII)35213 static MachineBasicBlock *emitXBegin(MachineInstr &MI, MachineBasicBlock *MBB,
35214                                      const TargetInstrInfo *TII) {
35215   const DebugLoc &DL = MI.getDebugLoc();
35216 
35217   const BasicBlock *BB = MBB->getBasicBlock();
35218   MachineFunction::iterator I = ++MBB->getIterator();
35219 
35220   // For the v = xbegin(), we generate
35221   //
35222   // thisMBB:
35223   //  xbegin sinkMBB
35224   //
35225   // mainMBB:
35226   //  s0 = -1
35227   //
35228   // fallBB:
35229   //  eax = # XABORT_DEF
35230   //  s1 = eax
35231   //
35232   // sinkMBB:
35233   //  v = phi(s0/mainBB, s1/fallBB)
35234 
35235   MachineBasicBlock *thisMBB = MBB;
35236   MachineFunction *MF = MBB->getParent();
35237   MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
35238   MachineBasicBlock *fallMBB = MF->CreateMachineBasicBlock(BB);
35239   MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
35240   MF->insert(I, mainMBB);
35241   MF->insert(I, fallMBB);
35242   MF->insert(I, sinkMBB);
35243 
35244   if (isEFLAGSLiveAfter(MI, MBB)) {
35245     mainMBB->addLiveIn(X86::EFLAGS);
35246     fallMBB->addLiveIn(X86::EFLAGS);
35247     sinkMBB->addLiveIn(X86::EFLAGS);
35248   }
35249 
35250   // Transfer the remainder of BB and its successor edges to sinkMBB.
35251   sinkMBB->splice(sinkMBB->begin(), MBB,
35252                   std::next(MachineBasicBlock::iterator(MI)), MBB->end());
35253   sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
35254 
35255   MachineRegisterInfo &MRI = MF->getRegInfo();
35256   Register DstReg = MI.getOperand(0).getReg();
35257   const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
35258   Register mainDstReg = MRI.createVirtualRegister(RC);
35259   Register fallDstReg = MRI.createVirtualRegister(RC);
35260 
35261   // thisMBB:
35262   //  xbegin fallMBB
35263   //  # fallthrough to mainMBB
35264   //  # abortion to fallMBB
35265   BuildMI(thisMBB, DL, TII->get(X86::XBEGIN_4)).addMBB(fallMBB);
35266   thisMBB->addSuccessor(mainMBB);
35267   thisMBB->addSuccessor(fallMBB);
35268 
35269   // mainMBB:
35270   //  mainDstReg := -1
35271   BuildMI(mainMBB, DL, TII->get(X86::MOV32ri), mainDstReg).addImm(-1);
35272   BuildMI(mainMBB, DL, TII->get(X86::JMP_1)).addMBB(sinkMBB);
35273   mainMBB->addSuccessor(sinkMBB);
35274 
35275   // fallMBB:
35276   //  ; pseudo instruction to model hardware's definition from XABORT
35277   //  EAX := XABORT_DEF
35278   //  fallDstReg := EAX
35279   BuildMI(fallMBB, DL, TII->get(X86::XABORT_DEF));
35280   BuildMI(fallMBB, DL, TII->get(TargetOpcode::COPY), fallDstReg)
35281       .addReg(X86::EAX);
35282   fallMBB->addSuccessor(sinkMBB);
35283 
35284   // sinkMBB:
35285   //  DstReg := phi(mainDstReg/mainBB, fallDstReg/fallBB)
35286   BuildMI(*sinkMBB, sinkMBB->begin(), DL, TII->get(X86::PHI), DstReg)
35287       .addReg(mainDstReg).addMBB(mainMBB)
35288       .addReg(fallDstReg).addMBB(fallMBB);
35289 
35290   MI.eraseFromParent();
35291   return sinkMBB;
35292 }
35293 
35294 MachineBasicBlock *
EmitVAARGWithCustomInserter(MachineInstr & MI,MachineBasicBlock * MBB) const35295 X86TargetLowering::EmitVAARGWithCustomInserter(MachineInstr &MI,
35296                                                MachineBasicBlock *MBB) const {
35297   // Emit va_arg instruction on X86-64.
35298 
35299   // Operands to this pseudo-instruction:
35300   // 0  ) Output        : destination address (reg)
35301   // 1-5) Input         : va_list address (addr, i64mem)
35302   // 6  ) ArgSize       : Size (in bytes) of vararg type
35303   // 7  ) ArgMode       : 0=overflow only, 1=use gp_offset, 2=use fp_offset
35304   // 8  ) Align         : Alignment of type
35305   // 9  ) EFLAGS (implicit-def)
35306 
35307   assert(MI.getNumOperands() == 10 && "VAARG should have 10 operands!");
35308   static_assert(X86::AddrNumOperands == 5, "VAARG assumes 5 address operands");
35309 
35310   Register DestReg = MI.getOperand(0).getReg();
35311   MachineOperand &Base = MI.getOperand(1);
35312   MachineOperand &Scale = MI.getOperand(2);
35313   MachineOperand &Index = MI.getOperand(3);
35314   MachineOperand &Disp = MI.getOperand(4);
35315   MachineOperand &Segment = MI.getOperand(5);
35316   unsigned ArgSize = MI.getOperand(6).getImm();
35317   unsigned ArgMode = MI.getOperand(7).getImm();
35318   Align Alignment = Align(MI.getOperand(8).getImm());
35319 
35320   MachineFunction *MF = MBB->getParent();
35321 
35322   // Memory Reference
35323   assert(MI.hasOneMemOperand() && "Expected VAARG to have one memoperand");
35324 
35325   MachineMemOperand *OldMMO = MI.memoperands().front();
35326 
35327   // Clone the MMO into two separate MMOs for loading and storing
35328   MachineMemOperand *LoadOnlyMMO = MF->getMachineMemOperand(
35329       OldMMO, OldMMO->getFlags() & ~MachineMemOperand::MOStore);
35330   MachineMemOperand *StoreOnlyMMO = MF->getMachineMemOperand(
35331       OldMMO, OldMMO->getFlags() & ~MachineMemOperand::MOLoad);
35332 
35333   // Machine Information
35334   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
35335   MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
35336   const TargetRegisterClass *AddrRegClass =
35337       getRegClassFor(getPointerTy(MBB->getParent()->getDataLayout()));
35338   const TargetRegisterClass *OffsetRegClass = getRegClassFor(MVT::i32);
35339   const DebugLoc &DL = MI.getDebugLoc();
35340 
35341   // struct va_list {
35342   //   i32   gp_offset
35343   //   i32   fp_offset
35344   //   i64   overflow_area (address)
35345   //   i64   reg_save_area (address)
35346   // }
35347   // sizeof(va_list) = 24
35348   // alignment(va_list) = 8
35349 
35350   unsigned TotalNumIntRegs = 6;
35351   unsigned TotalNumXMMRegs = 8;
35352   bool UseGPOffset = (ArgMode == 1);
35353   bool UseFPOffset = (ArgMode == 2);
35354   unsigned MaxOffset = TotalNumIntRegs * 8 +
35355                        (UseFPOffset ? TotalNumXMMRegs * 16 : 0);
35356 
35357   /* Align ArgSize to a multiple of 8 */
35358   unsigned ArgSizeA8 = (ArgSize + 7) & ~7;
35359   bool NeedsAlign = (Alignment > 8);
35360 
35361   MachineBasicBlock *thisMBB = MBB;
35362   MachineBasicBlock *overflowMBB;
35363   MachineBasicBlock *offsetMBB;
35364   MachineBasicBlock *endMBB;
35365 
35366   unsigned OffsetDestReg = 0;    // Argument address computed by offsetMBB
35367   unsigned OverflowDestReg = 0;  // Argument address computed by overflowMBB
35368   unsigned OffsetReg = 0;
35369 
35370   if (!UseGPOffset && !UseFPOffset) {
35371     // If we only pull from the overflow region, we don't create a branch.
35372     // We don't need to alter control flow.
35373     OffsetDestReg = 0; // unused
35374     OverflowDestReg = DestReg;
35375 
35376     offsetMBB = nullptr;
35377     overflowMBB = thisMBB;
35378     endMBB = thisMBB;
35379   } else {
35380     // First emit code to check if gp_offset (or fp_offset) is below the bound.
35381     // If so, pull the argument from reg_save_area. (branch to offsetMBB)
35382     // If not, pull from overflow_area. (branch to overflowMBB)
35383     //
35384     //       thisMBB
35385     //         |     .
35386     //         |        .
35387     //     offsetMBB   overflowMBB
35388     //         |        .
35389     //         |     .
35390     //        endMBB
35391 
35392     // Registers for the PHI in endMBB
35393     OffsetDestReg = MRI.createVirtualRegister(AddrRegClass);
35394     OverflowDestReg = MRI.createVirtualRegister(AddrRegClass);
35395 
35396     const BasicBlock *LLVM_BB = MBB->getBasicBlock();
35397     overflowMBB = MF->CreateMachineBasicBlock(LLVM_BB);
35398     offsetMBB = MF->CreateMachineBasicBlock(LLVM_BB);
35399     endMBB = MF->CreateMachineBasicBlock(LLVM_BB);
35400 
35401     MachineFunction::iterator MBBIter = ++MBB->getIterator();
35402 
35403     // Insert the new basic blocks
35404     MF->insert(MBBIter, offsetMBB);
35405     MF->insert(MBBIter, overflowMBB);
35406     MF->insert(MBBIter, endMBB);
35407 
35408     // Transfer the remainder of MBB and its successor edges to endMBB.
35409     endMBB->splice(endMBB->begin(), thisMBB,
35410                    std::next(MachineBasicBlock::iterator(MI)), thisMBB->end());
35411     endMBB->transferSuccessorsAndUpdatePHIs(thisMBB);
35412 
35413     // Make offsetMBB and overflowMBB successors of thisMBB
35414     thisMBB->addSuccessor(offsetMBB);
35415     thisMBB->addSuccessor(overflowMBB);
35416 
35417     // endMBB is a successor of both offsetMBB and overflowMBB
35418     offsetMBB->addSuccessor(endMBB);
35419     overflowMBB->addSuccessor(endMBB);
35420 
35421     // Load the offset value into a register
35422     OffsetReg = MRI.createVirtualRegister(OffsetRegClass);
35423     BuildMI(thisMBB, DL, TII->get(X86::MOV32rm), OffsetReg)
35424         .add(Base)
35425         .add(Scale)
35426         .add(Index)
35427         .addDisp(Disp, UseFPOffset ? 4 : 0)
35428         .add(Segment)
35429         .setMemRefs(LoadOnlyMMO);
35430 
35431     // Check if there is enough room left to pull this argument.
35432     BuildMI(thisMBB, DL, TII->get(X86::CMP32ri))
35433       .addReg(OffsetReg)
35434       .addImm(MaxOffset + 8 - ArgSizeA8);
35435 
35436     // Branch to "overflowMBB" if offset >= max
35437     // Fall through to "offsetMBB" otherwise
35438     BuildMI(thisMBB, DL, TII->get(X86::JCC_1))
35439       .addMBB(overflowMBB).addImm(X86::COND_AE);
35440   }
35441 
35442   // In offsetMBB, emit code to use the reg_save_area.
35443   if (offsetMBB) {
35444     assert(OffsetReg != 0);
35445 
35446     // Read the reg_save_area address.
35447     Register RegSaveReg = MRI.createVirtualRegister(AddrRegClass);
35448     BuildMI(
35449         offsetMBB, DL,
35450         TII->get(Subtarget.isTarget64BitLP64() ? X86::MOV64rm : X86::MOV32rm),
35451         RegSaveReg)
35452         .add(Base)
35453         .add(Scale)
35454         .add(Index)
35455         .addDisp(Disp, Subtarget.isTarget64BitLP64() ? 16 : 12)
35456         .add(Segment)
35457         .setMemRefs(LoadOnlyMMO);
35458 
35459     if (Subtarget.isTarget64BitLP64()) {
35460       // Zero-extend the offset
35461       Register OffsetReg64 = MRI.createVirtualRegister(AddrRegClass);
35462       BuildMI(offsetMBB, DL, TII->get(X86::SUBREG_TO_REG), OffsetReg64)
35463           .addImm(0)
35464           .addReg(OffsetReg)
35465           .addImm(X86::sub_32bit);
35466 
35467       // Add the offset to the reg_save_area to get the final address.
35468       BuildMI(offsetMBB, DL, TII->get(X86::ADD64rr), OffsetDestReg)
35469           .addReg(OffsetReg64)
35470           .addReg(RegSaveReg);
35471     } else {
35472       // Add the offset to the reg_save_area to get the final address.
35473       BuildMI(offsetMBB, DL, TII->get(X86::ADD32rr), OffsetDestReg)
35474           .addReg(OffsetReg)
35475           .addReg(RegSaveReg);
35476     }
35477 
35478     // Compute the offset for the next argument
35479     Register NextOffsetReg = MRI.createVirtualRegister(OffsetRegClass);
35480     BuildMI(offsetMBB, DL, TII->get(X86::ADD32ri), NextOffsetReg)
35481       .addReg(OffsetReg)
35482       .addImm(UseFPOffset ? 16 : 8);
35483 
35484     // Store it back into the va_list.
35485     BuildMI(offsetMBB, DL, TII->get(X86::MOV32mr))
35486         .add(Base)
35487         .add(Scale)
35488         .add(Index)
35489         .addDisp(Disp, UseFPOffset ? 4 : 0)
35490         .add(Segment)
35491         .addReg(NextOffsetReg)
35492         .setMemRefs(StoreOnlyMMO);
35493 
35494     // Jump to endMBB
35495     BuildMI(offsetMBB, DL, TII->get(X86::JMP_1))
35496       .addMBB(endMBB);
35497   }
35498 
35499   //
35500   // Emit code to use overflow area
35501   //
35502 
35503   // Load the overflow_area address into a register.
35504   Register OverflowAddrReg = MRI.createVirtualRegister(AddrRegClass);
35505   BuildMI(overflowMBB, DL,
35506           TII->get(Subtarget.isTarget64BitLP64() ? X86::MOV64rm : X86::MOV32rm),
35507           OverflowAddrReg)
35508       .add(Base)
35509       .add(Scale)
35510       .add(Index)
35511       .addDisp(Disp, 8)
35512       .add(Segment)
35513       .setMemRefs(LoadOnlyMMO);
35514 
35515   // If we need to align it, do so. Otherwise, just copy the address
35516   // to OverflowDestReg.
35517   if (NeedsAlign) {
35518     // Align the overflow address
35519     Register TmpReg = MRI.createVirtualRegister(AddrRegClass);
35520 
35521     // aligned_addr = (addr + (align-1)) & ~(align-1)
35522     BuildMI(
35523         overflowMBB, DL,
35524         TII->get(Subtarget.isTarget64BitLP64() ? X86::ADD64ri32 : X86::ADD32ri),
35525         TmpReg)
35526         .addReg(OverflowAddrReg)
35527         .addImm(Alignment.value() - 1);
35528 
35529     BuildMI(
35530         overflowMBB, DL,
35531         TII->get(Subtarget.isTarget64BitLP64() ? X86::AND64ri32 : X86::AND32ri),
35532         OverflowDestReg)
35533         .addReg(TmpReg)
35534         .addImm(~(uint64_t)(Alignment.value() - 1));
35535   } else {
35536     BuildMI(overflowMBB, DL, TII->get(TargetOpcode::COPY), OverflowDestReg)
35537       .addReg(OverflowAddrReg);
35538   }
35539 
35540   // Compute the next overflow address after this argument.
35541   // (the overflow address should be kept 8-byte aligned)
35542   Register NextAddrReg = MRI.createVirtualRegister(AddrRegClass);
35543   BuildMI(
35544       overflowMBB, DL,
35545       TII->get(Subtarget.isTarget64BitLP64() ? X86::ADD64ri32 : X86::ADD32ri),
35546       NextAddrReg)
35547       .addReg(OverflowDestReg)
35548       .addImm(ArgSizeA8);
35549 
35550   // Store the new overflow address.
35551   BuildMI(overflowMBB, DL,
35552           TII->get(Subtarget.isTarget64BitLP64() ? X86::MOV64mr : X86::MOV32mr))
35553       .add(Base)
35554       .add(Scale)
35555       .add(Index)
35556       .addDisp(Disp, 8)
35557       .add(Segment)
35558       .addReg(NextAddrReg)
35559       .setMemRefs(StoreOnlyMMO);
35560 
35561   // If we branched, emit the PHI to the front of endMBB.
35562   if (offsetMBB) {
35563     BuildMI(*endMBB, endMBB->begin(), DL,
35564             TII->get(X86::PHI), DestReg)
35565       .addReg(OffsetDestReg).addMBB(offsetMBB)
35566       .addReg(OverflowDestReg).addMBB(overflowMBB);
35567   }
35568 
35569   // Erase the pseudo instruction
35570   MI.eraseFromParent();
35571 
35572   return endMBB;
35573 }
35574 
35575 // The EFLAGS operand of SelectItr might be missing a kill marker
35576 // because there were multiple uses of EFLAGS, and ISel didn't know
35577 // which to mark. Figure out whether SelectItr should have had a
35578 // kill marker, and set it if it should. Returns the correct kill
35579 // marker value.
checkAndUpdateEFLAGSKill(MachineBasicBlock::iterator SelectItr,MachineBasicBlock * BB,const TargetRegisterInfo * TRI)35580 static bool checkAndUpdateEFLAGSKill(MachineBasicBlock::iterator SelectItr,
35581                                      MachineBasicBlock* BB,
35582                                      const TargetRegisterInfo* TRI) {
35583   if (isEFLAGSLiveAfter(SelectItr, BB))
35584     return false;
35585 
35586   // We found a def, or hit the end of the basic block and EFLAGS wasn't live
35587   // out. SelectMI should have a kill flag on EFLAGS.
35588   SelectItr->addRegisterKilled(X86::EFLAGS, TRI);
35589   return true;
35590 }
35591 
35592 // Return true if it is OK for this CMOV pseudo-opcode to be cascaded
35593 // together with other CMOV pseudo-opcodes into a single basic-block with
35594 // conditional jump around it.
isCMOVPseudo(MachineInstr & MI)35595 static bool isCMOVPseudo(MachineInstr &MI) {
35596   switch (MI.getOpcode()) {
35597   case X86::CMOV_FR16:
35598   case X86::CMOV_FR16X:
35599   case X86::CMOV_FR32:
35600   case X86::CMOV_FR32X:
35601   case X86::CMOV_FR64:
35602   case X86::CMOV_FR64X:
35603   case X86::CMOV_GR8:
35604   case X86::CMOV_GR16:
35605   case X86::CMOV_GR32:
35606   case X86::CMOV_RFP32:
35607   case X86::CMOV_RFP64:
35608   case X86::CMOV_RFP80:
35609   case X86::CMOV_VR64:
35610   case X86::CMOV_VR128:
35611   case X86::CMOV_VR128X:
35612   case X86::CMOV_VR256:
35613   case X86::CMOV_VR256X:
35614   case X86::CMOV_VR512:
35615   case X86::CMOV_VK1:
35616   case X86::CMOV_VK2:
35617   case X86::CMOV_VK4:
35618   case X86::CMOV_VK8:
35619   case X86::CMOV_VK16:
35620   case X86::CMOV_VK32:
35621   case X86::CMOV_VK64:
35622     return true;
35623 
35624   default:
35625     return false;
35626   }
35627 }
35628 
35629 // Helper function, which inserts PHI functions into SinkMBB:
35630 //   %Result(i) = phi [ %FalseValue(i), FalseMBB ], [ %TrueValue(i), TrueMBB ],
35631 // where %FalseValue(i) and %TrueValue(i) are taken from the consequent CMOVs
35632 // in [MIItBegin, MIItEnd) range. It returns the last MachineInstrBuilder for
35633 // the last PHI function inserted.
createPHIsForCMOVsInSinkBB(MachineBasicBlock::iterator MIItBegin,MachineBasicBlock::iterator MIItEnd,MachineBasicBlock * TrueMBB,MachineBasicBlock * FalseMBB,MachineBasicBlock * SinkMBB)35634 static MachineInstrBuilder createPHIsForCMOVsInSinkBB(
35635     MachineBasicBlock::iterator MIItBegin, MachineBasicBlock::iterator MIItEnd,
35636     MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB,
35637     MachineBasicBlock *SinkMBB) {
35638   MachineFunction *MF = TrueMBB->getParent();
35639   const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
35640   const DebugLoc &DL = MIItBegin->getDebugLoc();
35641 
35642   X86::CondCode CC = X86::CondCode(MIItBegin->getOperand(3).getImm());
35643   X86::CondCode OppCC = X86::GetOppositeBranchCondition(CC);
35644 
35645   MachineBasicBlock::iterator SinkInsertionPoint = SinkMBB->begin();
35646 
35647   // As we are creating the PHIs, we have to be careful if there is more than
35648   // one.  Later CMOVs may reference the results of earlier CMOVs, but later
35649   // PHIs have to reference the individual true/false inputs from earlier PHIs.
35650   // That also means that PHI construction must work forward from earlier to
35651   // later, and that the code must maintain a mapping from earlier PHI's
35652   // destination registers, and the registers that went into the PHI.
35653   DenseMap<unsigned, std::pair<unsigned, unsigned>> RegRewriteTable;
35654   MachineInstrBuilder MIB;
35655 
35656   for (MachineBasicBlock::iterator MIIt = MIItBegin; MIIt != MIItEnd; ++MIIt) {
35657     Register DestReg = MIIt->getOperand(0).getReg();
35658     Register Op1Reg = MIIt->getOperand(1).getReg();
35659     Register Op2Reg = MIIt->getOperand(2).getReg();
35660 
35661     // If this CMOV we are generating is the opposite condition from
35662     // the jump we generated, then we have to swap the operands for the
35663     // PHI that is going to be generated.
35664     if (MIIt->getOperand(3).getImm() == OppCC)
35665       std::swap(Op1Reg, Op2Reg);
35666 
35667     if (RegRewriteTable.find(Op1Reg) != RegRewriteTable.end())
35668       Op1Reg = RegRewriteTable[Op1Reg].first;
35669 
35670     if (RegRewriteTable.find(Op2Reg) != RegRewriteTable.end())
35671       Op2Reg = RegRewriteTable[Op2Reg].second;
35672 
35673     MIB = BuildMI(*SinkMBB, SinkInsertionPoint, DL, TII->get(X86::PHI), DestReg)
35674               .addReg(Op1Reg)
35675               .addMBB(FalseMBB)
35676               .addReg(Op2Reg)
35677               .addMBB(TrueMBB);
35678 
35679     // Add this PHI to the rewrite table.
35680     RegRewriteTable[DestReg] = std::make_pair(Op1Reg, Op2Reg);
35681   }
35682 
35683   return MIB;
35684 }
35685 
35686 // Lower cascaded selects in form of (SecondCmov (FirstCMOV F, T, cc1), T, cc2).
35687 MachineBasicBlock *
EmitLoweredCascadedSelect(MachineInstr & FirstCMOV,MachineInstr & SecondCascadedCMOV,MachineBasicBlock * ThisMBB) const35688 X86TargetLowering::EmitLoweredCascadedSelect(MachineInstr &FirstCMOV,
35689                                              MachineInstr &SecondCascadedCMOV,
35690                                              MachineBasicBlock *ThisMBB) const {
35691   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
35692   const DebugLoc &DL = FirstCMOV.getDebugLoc();
35693 
35694   // We lower cascaded CMOVs such as
35695   //
35696   //   (SecondCascadedCMOV (FirstCMOV F, T, cc1), T, cc2)
35697   //
35698   // to two successive branches.
35699   //
35700   // Without this, we would add a PHI between the two jumps, which ends up
35701   // creating a few copies all around. For instance, for
35702   //
35703   //    (sitofp (zext (fcmp une)))
35704   //
35705   // we would generate:
35706   //
35707   //         ucomiss %xmm1, %xmm0
35708   //         movss  <1.0f>, %xmm0
35709   //         movaps  %xmm0, %xmm1
35710   //         jne     .LBB5_2
35711   //         xorps   %xmm1, %xmm1
35712   // .LBB5_2:
35713   //         jp      .LBB5_4
35714   //         movaps  %xmm1, %xmm0
35715   // .LBB5_4:
35716   //         retq
35717   //
35718   // because this custom-inserter would have generated:
35719   //
35720   //   A
35721   //   | \
35722   //   |  B
35723   //   | /
35724   //   C
35725   //   | \
35726   //   |  D
35727   //   | /
35728   //   E
35729   //
35730   // A: X = ...; Y = ...
35731   // B: empty
35732   // C: Z = PHI [X, A], [Y, B]
35733   // D: empty
35734   // E: PHI [X, C], [Z, D]
35735   //
35736   // If we lower both CMOVs in a single step, we can instead generate:
35737   //
35738   //   A
35739   //   | \
35740   //   |  C
35741   //   | /|
35742   //   |/ |
35743   //   |  |
35744   //   |  D
35745   //   | /
35746   //   E
35747   //
35748   // A: X = ...; Y = ...
35749   // D: empty
35750   // E: PHI [X, A], [X, C], [Y, D]
35751   //
35752   // Which, in our sitofp/fcmp example, gives us something like:
35753   //
35754   //         ucomiss %xmm1, %xmm0
35755   //         movss  <1.0f>, %xmm0
35756   //         jne     .LBB5_4
35757   //         jp      .LBB5_4
35758   //         xorps   %xmm0, %xmm0
35759   // .LBB5_4:
35760   //         retq
35761   //
35762 
35763   // We lower cascaded CMOV into two successive branches to the same block.
35764   // EFLAGS is used by both, so mark it as live in the second.
35765   const BasicBlock *LLVM_BB = ThisMBB->getBasicBlock();
35766   MachineFunction *F = ThisMBB->getParent();
35767   MachineBasicBlock *FirstInsertedMBB = F->CreateMachineBasicBlock(LLVM_BB);
35768   MachineBasicBlock *SecondInsertedMBB = F->CreateMachineBasicBlock(LLVM_BB);
35769   MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
35770 
35771   MachineFunction::iterator It = ++ThisMBB->getIterator();
35772   F->insert(It, FirstInsertedMBB);
35773   F->insert(It, SecondInsertedMBB);
35774   F->insert(It, SinkMBB);
35775 
35776   // For a cascaded CMOV, we lower it to two successive branches to
35777   // the same block (SinkMBB).  EFLAGS is used by both, so mark it as live in
35778   // the FirstInsertedMBB.
35779   FirstInsertedMBB->addLiveIn(X86::EFLAGS);
35780 
35781   // If the EFLAGS register isn't dead in the terminator, then claim that it's
35782   // live into the sink and copy blocks.
35783   const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
35784   if (!SecondCascadedCMOV.killsRegister(X86::EFLAGS) &&
35785       !checkAndUpdateEFLAGSKill(SecondCascadedCMOV, ThisMBB, TRI)) {
35786     SecondInsertedMBB->addLiveIn(X86::EFLAGS);
35787     SinkMBB->addLiveIn(X86::EFLAGS);
35788   }
35789 
35790   // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
35791   SinkMBB->splice(SinkMBB->begin(), ThisMBB,
35792                   std::next(MachineBasicBlock::iterator(FirstCMOV)),
35793                   ThisMBB->end());
35794   SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB);
35795 
35796   // Fallthrough block for ThisMBB.
35797   ThisMBB->addSuccessor(FirstInsertedMBB);
35798   // The true block target of the first branch is always SinkMBB.
35799   ThisMBB->addSuccessor(SinkMBB);
35800   // Fallthrough block for FirstInsertedMBB.
35801   FirstInsertedMBB->addSuccessor(SecondInsertedMBB);
35802   // The true block for the branch of FirstInsertedMBB.
35803   FirstInsertedMBB->addSuccessor(SinkMBB);
35804   // This is fallthrough.
35805   SecondInsertedMBB->addSuccessor(SinkMBB);
35806 
35807   // Create the conditional branch instructions.
35808   X86::CondCode FirstCC = X86::CondCode(FirstCMOV.getOperand(3).getImm());
35809   BuildMI(ThisMBB, DL, TII->get(X86::JCC_1)).addMBB(SinkMBB).addImm(FirstCC);
35810 
35811   X86::CondCode SecondCC =
35812       X86::CondCode(SecondCascadedCMOV.getOperand(3).getImm());
35813   BuildMI(FirstInsertedMBB, DL, TII->get(X86::JCC_1)).addMBB(SinkMBB).addImm(SecondCC);
35814 
35815   //  SinkMBB:
35816   //   %Result = phi [ %FalseValue, SecondInsertedMBB ], [ %TrueValue, ThisMBB ]
35817   Register DestReg = SecondCascadedCMOV.getOperand(0).getReg();
35818   Register Op1Reg = FirstCMOV.getOperand(1).getReg();
35819   Register Op2Reg = FirstCMOV.getOperand(2).getReg();
35820   MachineInstrBuilder MIB =
35821       BuildMI(*SinkMBB, SinkMBB->begin(), DL, TII->get(X86::PHI), DestReg)
35822           .addReg(Op1Reg)
35823           .addMBB(SecondInsertedMBB)
35824           .addReg(Op2Reg)
35825           .addMBB(ThisMBB);
35826 
35827   // The second SecondInsertedMBB provides the same incoming value as the
35828   // FirstInsertedMBB (the True operand of the SELECT_CC/CMOV nodes).
35829   MIB.addReg(FirstCMOV.getOperand(2).getReg()).addMBB(FirstInsertedMBB);
35830 
35831   // Now remove the CMOVs.
35832   FirstCMOV.eraseFromParent();
35833   SecondCascadedCMOV.eraseFromParent();
35834 
35835   return SinkMBB;
35836 }
35837 
35838 MachineBasicBlock *
EmitLoweredSelect(MachineInstr & MI,MachineBasicBlock * ThisMBB) const35839 X86TargetLowering::EmitLoweredSelect(MachineInstr &MI,
35840                                      MachineBasicBlock *ThisMBB) const {
35841   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
35842   const DebugLoc &DL = MI.getDebugLoc();
35843 
35844   // To "insert" a SELECT_CC instruction, we actually have to insert the
35845   // diamond control-flow pattern.  The incoming instruction knows the
35846   // destination vreg to set, the condition code register to branch on, the
35847   // true/false values to select between and a branch opcode to use.
35848 
35849   //  ThisMBB:
35850   //  ...
35851   //   TrueVal = ...
35852   //   cmpTY ccX, r1, r2
35853   //   bCC copy1MBB
35854   //   fallthrough --> FalseMBB
35855 
35856   // This code lowers all pseudo-CMOV instructions. Generally it lowers these
35857   // as described above, by inserting a BB, and then making a PHI at the join
35858   // point to select the true and false operands of the CMOV in the PHI.
35859   //
35860   // The code also handles two different cases of multiple CMOV opcodes
35861   // in a row.
35862   //
35863   // Case 1:
35864   // In this case, there are multiple CMOVs in a row, all which are based on
35865   // the same condition setting (or the exact opposite condition setting).
35866   // In this case we can lower all the CMOVs using a single inserted BB, and
35867   // then make a number of PHIs at the join point to model the CMOVs. The only
35868   // trickiness here, is that in a case like:
35869   //
35870   // t2 = CMOV cond1 t1, f1
35871   // t3 = CMOV cond1 t2, f2
35872   //
35873   // when rewriting this into PHIs, we have to perform some renaming on the
35874   // temps since you cannot have a PHI operand refer to a PHI result earlier
35875   // in the same block.  The "simple" but wrong lowering would be:
35876   //
35877   // t2 = PHI t1(BB1), f1(BB2)
35878   // t3 = PHI t2(BB1), f2(BB2)
35879   //
35880   // but clearly t2 is not defined in BB1, so that is incorrect. The proper
35881   // renaming is to note that on the path through BB1, t2 is really just a
35882   // copy of t1, and do that renaming, properly generating:
35883   //
35884   // t2 = PHI t1(BB1), f1(BB2)
35885   // t3 = PHI t1(BB1), f2(BB2)
35886   //
35887   // Case 2:
35888   // CMOV ((CMOV F, T, cc1), T, cc2) is checked here and handled by a separate
35889   // function - EmitLoweredCascadedSelect.
35890 
35891   X86::CondCode CC = X86::CondCode(MI.getOperand(3).getImm());
35892   X86::CondCode OppCC = X86::GetOppositeBranchCondition(CC);
35893   MachineInstr *LastCMOV = &MI;
35894   MachineBasicBlock::iterator NextMIIt = MachineBasicBlock::iterator(MI);
35895 
35896   // Check for case 1, where there are multiple CMOVs with the same condition
35897   // first.  Of the two cases of multiple CMOV lowerings, case 1 reduces the
35898   // number of jumps the most.
35899 
35900   if (isCMOVPseudo(MI)) {
35901     // See if we have a string of CMOVS with the same condition. Skip over
35902     // intervening debug insts.
35903     while (NextMIIt != ThisMBB->end() && isCMOVPseudo(*NextMIIt) &&
35904            (NextMIIt->getOperand(3).getImm() == CC ||
35905             NextMIIt->getOperand(3).getImm() == OppCC)) {
35906       LastCMOV = &*NextMIIt;
35907       NextMIIt = next_nodbg(NextMIIt, ThisMBB->end());
35908     }
35909   }
35910 
35911   // This checks for case 2, but only do this if we didn't already find
35912   // case 1, as indicated by LastCMOV == MI.
35913   if (LastCMOV == &MI && NextMIIt != ThisMBB->end() &&
35914       NextMIIt->getOpcode() == MI.getOpcode() &&
35915       NextMIIt->getOperand(2).getReg() == MI.getOperand(2).getReg() &&
35916       NextMIIt->getOperand(1).getReg() == MI.getOperand(0).getReg() &&
35917       NextMIIt->getOperand(1).isKill()) {
35918     return EmitLoweredCascadedSelect(MI, *NextMIIt, ThisMBB);
35919   }
35920 
35921   const BasicBlock *LLVM_BB = ThisMBB->getBasicBlock();
35922   MachineFunction *F = ThisMBB->getParent();
35923   MachineBasicBlock *FalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
35924   MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
35925 
35926   MachineFunction::iterator It = ++ThisMBB->getIterator();
35927   F->insert(It, FalseMBB);
35928   F->insert(It, SinkMBB);
35929 
35930   // If the EFLAGS register isn't dead in the terminator, then claim that it's
35931   // live into the sink and copy blocks.
35932   const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
35933   if (!LastCMOV->killsRegister(X86::EFLAGS) &&
35934       !checkAndUpdateEFLAGSKill(LastCMOV, ThisMBB, TRI)) {
35935     FalseMBB->addLiveIn(X86::EFLAGS);
35936     SinkMBB->addLiveIn(X86::EFLAGS);
35937   }
35938 
35939   // Transfer any debug instructions inside the CMOV sequence to the sunk block.
35940   auto DbgRange = llvm::make_range(MachineBasicBlock::iterator(MI),
35941                                    MachineBasicBlock::iterator(LastCMOV));
35942   for (MachineInstr &MI : llvm::make_early_inc_range(DbgRange))
35943     if (MI.isDebugInstr())
35944       SinkMBB->push_back(MI.removeFromParent());
35945 
35946   // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
35947   SinkMBB->splice(SinkMBB->end(), ThisMBB,
35948                   std::next(MachineBasicBlock::iterator(LastCMOV)),
35949                   ThisMBB->end());
35950   SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB);
35951 
35952   // Fallthrough block for ThisMBB.
35953   ThisMBB->addSuccessor(FalseMBB);
35954   // The true block target of the first (or only) branch is always a SinkMBB.
35955   ThisMBB->addSuccessor(SinkMBB);
35956   // Fallthrough block for FalseMBB.
35957   FalseMBB->addSuccessor(SinkMBB);
35958 
35959   // Create the conditional branch instruction.
35960   BuildMI(ThisMBB, DL, TII->get(X86::JCC_1)).addMBB(SinkMBB).addImm(CC);
35961 
35962   //  SinkMBB:
35963   //   %Result = phi [ %FalseValue, FalseMBB ], [ %TrueValue, ThisMBB ]
35964   //  ...
35965   MachineBasicBlock::iterator MIItBegin = MachineBasicBlock::iterator(MI);
35966   MachineBasicBlock::iterator MIItEnd =
35967       std::next(MachineBasicBlock::iterator(LastCMOV));
35968   createPHIsForCMOVsInSinkBB(MIItBegin, MIItEnd, ThisMBB, FalseMBB, SinkMBB);
35969 
35970   // Now remove the CMOV(s).
35971   ThisMBB->erase(MIItBegin, MIItEnd);
35972 
35973   return SinkMBB;
35974 }
35975 
getSUBriOpcode(bool IsLP64,int64_t Imm)35976 static unsigned getSUBriOpcode(bool IsLP64, int64_t Imm) {
35977   if (IsLP64) {
35978     if (isInt<8>(Imm))
35979       return X86::SUB64ri8;
35980     return X86::SUB64ri32;
35981   } else {
35982     if (isInt<8>(Imm))
35983       return X86::SUB32ri8;
35984     return X86::SUB32ri;
35985   }
35986 }
35987 
35988 MachineBasicBlock *
EmitLoweredProbedAlloca(MachineInstr & MI,MachineBasicBlock * MBB) const35989 X86TargetLowering::EmitLoweredProbedAlloca(MachineInstr &MI,
35990                                            MachineBasicBlock *MBB) const {
35991   MachineFunction *MF = MBB->getParent();
35992   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
35993   const X86FrameLowering &TFI = *Subtarget.getFrameLowering();
35994   const DebugLoc &DL = MI.getDebugLoc();
35995   const BasicBlock *LLVM_BB = MBB->getBasicBlock();
35996 
35997   const unsigned ProbeSize = getStackProbeSize(*MF);
35998 
35999   MachineRegisterInfo &MRI = MF->getRegInfo();
36000   MachineBasicBlock *testMBB = MF->CreateMachineBasicBlock(LLVM_BB);
36001   MachineBasicBlock *tailMBB = MF->CreateMachineBasicBlock(LLVM_BB);
36002   MachineBasicBlock *blockMBB = MF->CreateMachineBasicBlock(LLVM_BB);
36003 
36004   MachineFunction::iterator MBBIter = ++MBB->getIterator();
36005   MF->insert(MBBIter, testMBB);
36006   MF->insert(MBBIter, blockMBB);
36007   MF->insert(MBBIter, tailMBB);
36008 
36009   Register sizeVReg = MI.getOperand(1).getReg();
36010 
36011   Register physSPReg = TFI.Uses64BitFramePtr ? X86::RSP : X86::ESP;
36012 
36013   Register TmpStackPtr = MRI.createVirtualRegister(
36014       TFI.Uses64BitFramePtr ? &X86::GR64RegClass : &X86::GR32RegClass);
36015   Register FinalStackPtr = MRI.createVirtualRegister(
36016       TFI.Uses64BitFramePtr ? &X86::GR64RegClass : &X86::GR32RegClass);
36017 
36018   BuildMI(*MBB, {MI}, DL, TII->get(TargetOpcode::COPY), TmpStackPtr)
36019       .addReg(physSPReg);
36020   {
36021     const unsigned Opc = TFI.Uses64BitFramePtr ? X86::SUB64rr : X86::SUB32rr;
36022     BuildMI(*MBB, {MI}, DL, TII->get(Opc), FinalStackPtr)
36023         .addReg(TmpStackPtr)
36024         .addReg(sizeVReg);
36025   }
36026 
36027   // test rsp size
36028 
36029   BuildMI(testMBB, DL,
36030           TII->get(TFI.Uses64BitFramePtr ? X86::CMP64rr : X86::CMP32rr))
36031       .addReg(FinalStackPtr)
36032       .addReg(physSPReg);
36033 
36034   BuildMI(testMBB, DL, TII->get(X86::JCC_1))
36035       .addMBB(tailMBB)
36036       .addImm(X86::COND_GE);
36037   testMBB->addSuccessor(blockMBB);
36038   testMBB->addSuccessor(tailMBB);
36039 
36040   // Touch the block then extend it. This is done on the opposite side of
36041   // static probe where we allocate then touch, to avoid the need of probing the
36042   // tail of the static alloca. Possible scenarios are:
36043   //
36044   //       + ---- <- ------------ <- ------------- <- ------------ +
36045   //       |                                                       |
36046   // [free probe] -> [page alloc] -> [alloc probe] -> [tail alloc] + -> [dyn probe] -> [page alloc] -> [dyn probe] -> [tail alloc] +
36047   //                                                               |                                                               |
36048   //                                                               + <- ----------- <- ------------ <- ----------- <- ------------ +
36049   //
36050   // The property we want to enforce is to never have more than [page alloc] between two probes.
36051 
36052   const unsigned XORMIOpc =
36053       TFI.Uses64BitFramePtr ? X86::XOR64mi8 : X86::XOR32mi8;
36054   addRegOffset(BuildMI(blockMBB, DL, TII->get(XORMIOpc)), physSPReg, false, 0)
36055       .addImm(0);
36056 
36057   BuildMI(blockMBB, DL,
36058           TII->get(getSUBriOpcode(TFI.Uses64BitFramePtr, ProbeSize)), physSPReg)
36059       .addReg(physSPReg)
36060       .addImm(ProbeSize);
36061 
36062 
36063   BuildMI(blockMBB, DL, TII->get(X86::JMP_1)).addMBB(testMBB);
36064   blockMBB->addSuccessor(testMBB);
36065 
36066   // Replace original instruction by the expected stack ptr
36067   BuildMI(tailMBB, DL, TII->get(TargetOpcode::COPY), MI.getOperand(0).getReg())
36068       .addReg(FinalStackPtr);
36069 
36070   tailMBB->splice(tailMBB->end(), MBB,
36071                   std::next(MachineBasicBlock::iterator(MI)), MBB->end());
36072   tailMBB->transferSuccessorsAndUpdatePHIs(MBB);
36073   MBB->addSuccessor(testMBB);
36074 
36075   // Delete the original pseudo instruction.
36076   MI.eraseFromParent();
36077 
36078   // And we're done.
36079   return tailMBB;
36080 }
36081 
36082 MachineBasicBlock *
EmitLoweredSegAlloca(MachineInstr & MI,MachineBasicBlock * BB) const36083 X86TargetLowering::EmitLoweredSegAlloca(MachineInstr &MI,
36084                                         MachineBasicBlock *BB) const {
36085   MachineFunction *MF = BB->getParent();
36086   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
36087   const DebugLoc &DL = MI.getDebugLoc();
36088   const BasicBlock *LLVM_BB = BB->getBasicBlock();
36089 
36090   assert(MF->shouldSplitStack());
36091 
36092   const bool Is64Bit = Subtarget.is64Bit();
36093   const bool IsLP64 = Subtarget.isTarget64BitLP64();
36094 
36095   const unsigned TlsReg = Is64Bit ? X86::FS : X86::GS;
36096   const unsigned TlsOffset = IsLP64 ? 0x70 : Is64Bit ? 0x40 : 0x30;
36097 
36098   // BB:
36099   //  ... [Till the alloca]
36100   // If stacklet is not large enough, jump to mallocMBB
36101   //
36102   // bumpMBB:
36103   //  Allocate by subtracting from RSP
36104   //  Jump to continueMBB
36105   //
36106   // mallocMBB:
36107   //  Allocate by call to runtime
36108   //
36109   // continueMBB:
36110   //  ...
36111   //  [rest of original BB]
36112   //
36113 
36114   MachineBasicBlock *mallocMBB = MF->CreateMachineBasicBlock(LLVM_BB);
36115   MachineBasicBlock *bumpMBB = MF->CreateMachineBasicBlock(LLVM_BB);
36116   MachineBasicBlock *continueMBB = MF->CreateMachineBasicBlock(LLVM_BB);
36117 
36118   MachineRegisterInfo &MRI = MF->getRegInfo();
36119   const TargetRegisterClass *AddrRegClass =
36120       getRegClassFor(getPointerTy(MF->getDataLayout()));
36121 
36122   Register mallocPtrVReg = MRI.createVirtualRegister(AddrRegClass),
36123            bumpSPPtrVReg = MRI.createVirtualRegister(AddrRegClass),
36124            tmpSPVReg = MRI.createVirtualRegister(AddrRegClass),
36125            SPLimitVReg = MRI.createVirtualRegister(AddrRegClass),
36126            sizeVReg = MI.getOperand(1).getReg(),
36127            physSPReg =
36128                IsLP64 || Subtarget.isTargetNaCl64() ? X86::RSP : X86::ESP;
36129 
36130   MachineFunction::iterator MBBIter = ++BB->getIterator();
36131 
36132   MF->insert(MBBIter, bumpMBB);
36133   MF->insert(MBBIter, mallocMBB);
36134   MF->insert(MBBIter, continueMBB);
36135 
36136   continueMBB->splice(continueMBB->begin(), BB,
36137                       std::next(MachineBasicBlock::iterator(MI)), BB->end());
36138   continueMBB->transferSuccessorsAndUpdatePHIs(BB);
36139 
36140   // Add code to the main basic block to check if the stack limit has been hit,
36141   // and if so, jump to mallocMBB otherwise to bumpMBB.
36142   BuildMI(BB, DL, TII->get(TargetOpcode::COPY), tmpSPVReg).addReg(physSPReg);
36143   BuildMI(BB, DL, TII->get(IsLP64 ? X86::SUB64rr:X86::SUB32rr), SPLimitVReg)
36144     .addReg(tmpSPVReg).addReg(sizeVReg);
36145   BuildMI(BB, DL, TII->get(IsLP64 ? X86::CMP64mr:X86::CMP32mr))
36146     .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg)
36147     .addReg(SPLimitVReg);
36148   BuildMI(BB, DL, TII->get(X86::JCC_1)).addMBB(mallocMBB).addImm(X86::COND_G);
36149 
36150   // bumpMBB simply decreases the stack pointer, since we know the current
36151   // stacklet has enough space.
36152   BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), physSPReg)
36153     .addReg(SPLimitVReg);
36154   BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), bumpSPPtrVReg)
36155     .addReg(SPLimitVReg);
36156   BuildMI(bumpMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
36157 
36158   // Calls into a routine in libgcc to allocate more space from the heap.
36159   const uint32_t *RegMask =
36160       Subtarget.getRegisterInfo()->getCallPreservedMask(*MF, CallingConv::C);
36161   if (IsLP64) {
36162     BuildMI(mallocMBB, DL, TII->get(X86::MOV64rr), X86::RDI)
36163       .addReg(sizeVReg);
36164     BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
36165       .addExternalSymbol("__morestack_allocate_stack_space")
36166       .addRegMask(RegMask)
36167       .addReg(X86::RDI, RegState::Implicit)
36168       .addReg(X86::RAX, RegState::ImplicitDefine);
36169   } else if (Is64Bit) {
36170     BuildMI(mallocMBB, DL, TII->get(X86::MOV32rr), X86::EDI)
36171       .addReg(sizeVReg);
36172     BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
36173       .addExternalSymbol("__morestack_allocate_stack_space")
36174       .addRegMask(RegMask)
36175       .addReg(X86::EDI, RegState::Implicit)
36176       .addReg(X86::EAX, RegState::ImplicitDefine);
36177   } else {
36178     BuildMI(mallocMBB, DL, TII->get(X86::SUB32ri), physSPReg).addReg(physSPReg)
36179       .addImm(12);
36180     BuildMI(mallocMBB, DL, TII->get(X86::PUSH32r)).addReg(sizeVReg);
36181     BuildMI(mallocMBB, DL, TII->get(X86::CALLpcrel32))
36182       .addExternalSymbol("__morestack_allocate_stack_space")
36183       .addRegMask(RegMask)
36184       .addReg(X86::EAX, RegState::ImplicitDefine);
36185   }
36186 
36187   if (!Is64Bit)
36188     BuildMI(mallocMBB, DL, TII->get(X86::ADD32ri), physSPReg).addReg(physSPReg)
36189       .addImm(16);
36190 
36191   BuildMI(mallocMBB, DL, TII->get(TargetOpcode::COPY), mallocPtrVReg)
36192     .addReg(IsLP64 ? X86::RAX : X86::EAX);
36193   BuildMI(mallocMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
36194 
36195   // Set up the CFG correctly.
36196   BB->addSuccessor(bumpMBB);
36197   BB->addSuccessor(mallocMBB);
36198   mallocMBB->addSuccessor(continueMBB);
36199   bumpMBB->addSuccessor(continueMBB);
36200 
36201   // Take care of the PHI nodes.
36202   BuildMI(*continueMBB, continueMBB->begin(), DL, TII->get(X86::PHI),
36203           MI.getOperand(0).getReg())
36204       .addReg(mallocPtrVReg)
36205       .addMBB(mallocMBB)
36206       .addReg(bumpSPPtrVReg)
36207       .addMBB(bumpMBB);
36208 
36209   // Delete the original pseudo instruction.
36210   MI.eraseFromParent();
36211 
36212   // And we're done.
36213   return continueMBB;
36214 }
36215 
36216 MachineBasicBlock *
EmitLoweredCatchRet(MachineInstr & MI,MachineBasicBlock * BB) const36217 X86TargetLowering::EmitLoweredCatchRet(MachineInstr &MI,
36218                                        MachineBasicBlock *BB) const {
36219   MachineFunction *MF = BB->getParent();
36220   const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
36221   MachineBasicBlock *TargetMBB = MI.getOperand(0).getMBB();
36222   const DebugLoc &DL = MI.getDebugLoc();
36223 
36224   assert(!isAsynchronousEHPersonality(
36225              classifyEHPersonality(MF->getFunction().getPersonalityFn())) &&
36226          "SEH does not use catchret!");
36227 
36228   // Only 32-bit EH needs to worry about manually restoring stack pointers.
36229   if (!Subtarget.is32Bit())
36230     return BB;
36231 
36232   // C++ EH creates a new target block to hold the restore code, and wires up
36233   // the new block to the return destination with a normal JMP_4.
36234   MachineBasicBlock *RestoreMBB =
36235       MF->CreateMachineBasicBlock(BB->getBasicBlock());
36236   assert(BB->succ_size() == 1);
36237   MF->insert(std::next(BB->getIterator()), RestoreMBB);
36238   RestoreMBB->transferSuccessorsAndUpdatePHIs(BB);
36239   BB->addSuccessor(RestoreMBB);
36240   MI.getOperand(0).setMBB(RestoreMBB);
36241 
36242   // Marking this as an EH pad but not a funclet entry block causes PEI to
36243   // restore stack pointers in the block.
36244   RestoreMBB->setIsEHPad(true);
36245 
36246   auto RestoreMBBI = RestoreMBB->begin();
36247   BuildMI(*RestoreMBB, RestoreMBBI, DL, TII.get(X86::JMP_4)).addMBB(TargetMBB);
36248   return BB;
36249 }
36250 
36251 MachineBasicBlock *
EmitLoweredTLSAddr(MachineInstr & MI,MachineBasicBlock * BB) const36252 X86TargetLowering::EmitLoweredTLSAddr(MachineInstr &MI,
36253                                       MachineBasicBlock *BB) const {
36254   // So, here we replace TLSADDR with the sequence:
36255   // adjust_stackdown -> TLSADDR -> adjust_stackup.
36256   // We need this because TLSADDR is lowered into calls
36257   // inside MC, therefore without the two markers shrink-wrapping
36258   // may push the prologue/epilogue pass them.
36259   const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
36260   const DebugLoc &DL = MI.getDebugLoc();
36261   MachineFunction &MF = *BB->getParent();
36262 
36263   // Emit CALLSEQ_START right before the instruction.
36264   unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
36265   MachineInstrBuilder CallseqStart =
36266     BuildMI(MF, DL, TII.get(AdjStackDown)).addImm(0).addImm(0).addImm(0);
36267   BB->insert(MachineBasicBlock::iterator(MI), CallseqStart);
36268 
36269   // Emit CALLSEQ_END right after the instruction.
36270   // We don't call erase from parent because we want to keep the
36271   // original instruction around.
36272   unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
36273   MachineInstrBuilder CallseqEnd =
36274     BuildMI(MF, DL, TII.get(AdjStackUp)).addImm(0).addImm(0);
36275   BB->insertAfter(MachineBasicBlock::iterator(MI), CallseqEnd);
36276 
36277   return BB;
36278 }
36279 
36280 MachineBasicBlock *
EmitLoweredTLSCall(MachineInstr & MI,MachineBasicBlock * BB) const36281 X86TargetLowering::EmitLoweredTLSCall(MachineInstr &MI,
36282                                       MachineBasicBlock *BB) const {
36283   // This is pretty easy.  We're taking the value that we received from
36284   // our load from the relocation, sticking it in either RDI (x86-64)
36285   // or EAX and doing an indirect call.  The return value will then
36286   // be in the normal return register.
36287   MachineFunction *F = BB->getParent();
36288   const X86InstrInfo *TII = Subtarget.getInstrInfo();
36289   const DebugLoc &DL = MI.getDebugLoc();
36290 
36291   assert(Subtarget.isTargetDarwin() && "Darwin only instr emitted?");
36292   assert(MI.getOperand(3).isGlobal() && "This should be a global");
36293 
36294   // Get a register mask for the lowered call.
36295   // FIXME: The 32-bit calls have non-standard calling conventions. Use a
36296   // proper register mask.
36297   const uint32_t *RegMask =
36298       Subtarget.is64Bit() ?
36299       Subtarget.getRegisterInfo()->getDarwinTLSCallPreservedMask() :
36300       Subtarget.getRegisterInfo()->getCallPreservedMask(*F, CallingConv::C);
36301   if (Subtarget.is64Bit()) {
36302     MachineInstrBuilder MIB =
36303         BuildMI(*BB, MI, DL, TII->get(X86::MOV64rm), X86::RDI)
36304             .addReg(X86::RIP)
36305             .addImm(0)
36306             .addReg(0)
36307             .addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
36308                               MI.getOperand(3).getTargetFlags())
36309             .addReg(0);
36310     MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL64m));
36311     addDirectMem(MIB, X86::RDI);
36312     MIB.addReg(X86::RAX, RegState::ImplicitDefine).addRegMask(RegMask);
36313   } else if (!isPositionIndependent()) {
36314     MachineInstrBuilder MIB =
36315         BuildMI(*BB, MI, DL, TII->get(X86::MOV32rm), X86::EAX)
36316             .addReg(0)
36317             .addImm(0)
36318             .addReg(0)
36319             .addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
36320                               MI.getOperand(3).getTargetFlags())
36321             .addReg(0);
36322     MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
36323     addDirectMem(MIB, X86::EAX);
36324     MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
36325   } else {
36326     MachineInstrBuilder MIB =
36327         BuildMI(*BB, MI, DL, TII->get(X86::MOV32rm), X86::EAX)
36328             .addReg(TII->getGlobalBaseReg(F))
36329             .addImm(0)
36330             .addReg(0)
36331             .addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
36332                               MI.getOperand(3).getTargetFlags())
36333             .addReg(0);
36334     MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
36335     addDirectMem(MIB, X86::EAX);
36336     MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
36337   }
36338 
36339   MI.eraseFromParent(); // The pseudo instruction is gone now.
36340   return BB;
36341 }
36342 
getOpcodeForIndirectThunk(unsigned RPOpc)36343 static unsigned getOpcodeForIndirectThunk(unsigned RPOpc) {
36344   switch (RPOpc) {
36345   case X86::INDIRECT_THUNK_CALL32:
36346     return X86::CALLpcrel32;
36347   case X86::INDIRECT_THUNK_CALL64:
36348     return X86::CALL64pcrel32;
36349   case X86::INDIRECT_THUNK_TCRETURN32:
36350     return X86::TCRETURNdi;
36351   case X86::INDIRECT_THUNK_TCRETURN64:
36352     return X86::TCRETURNdi64;
36353   }
36354   llvm_unreachable("not indirect thunk opcode");
36355 }
36356 
getIndirectThunkSymbol(const X86Subtarget & Subtarget,unsigned Reg)36357 static const char *getIndirectThunkSymbol(const X86Subtarget &Subtarget,
36358                                           unsigned Reg) {
36359   if (Subtarget.useRetpolineExternalThunk()) {
36360     // When using an external thunk for retpolines, we pick names that match the
36361     // names GCC happens to use as well. This helps simplify the implementation
36362     // of the thunks for kernels where they have no easy ability to create
36363     // aliases and are doing non-trivial configuration of the thunk's body. For
36364     // example, the Linux kernel will do boot-time hot patching of the thunk
36365     // bodies and cannot easily export aliases of these to loaded modules.
36366     //
36367     // Note that at any point in the future, we may need to change the semantics
36368     // of how we implement retpolines and at that time will likely change the
36369     // name of the called thunk. Essentially, there is no hard guarantee that
36370     // LLVM will generate calls to specific thunks, we merely make a best-effort
36371     // attempt to help out kernels and other systems where duplicating the
36372     // thunks is costly.
36373     switch (Reg) {
36374     case X86::EAX:
36375       assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
36376       return "__x86_indirect_thunk_eax";
36377     case X86::ECX:
36378       assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
36379       return "__x86_indirect_thunk_ecx";
36380     case X86::EDX:
36381       assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
36382       return "__x86_indirect_thunk_edx";
36383     case X86::EDI:
36384       assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
36385       return "__x86_indirect_thunk_edi";
36386     case X86::R11:
36387       assert(Subtarget.is64Bit() && "Should not be using a 64-bit thunk!");
36388       return "__x86_indirect_thunk_r11";
36389     }
36390     llvm_unreachable("unexpected reg for external indirect thunk");
36391   }
36392 
36393   if (Subtarget.useRetpolineIndirectCalls() ||
36394       Subtarget.useRetpolineIndirectBranches()) {
36395     // When targeting an internal COMDAT thunk use an LLVM-specific name.
36396     switch (Reg) {
36397     case X86::EAX:
36398       assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
36399       return "__llvm_retpoline_eax";
36400     case X86::ECX:
36401       assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
36402       return "__llvm_retpoline_ecx";
36403     case X86::EDX:
36404       assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
36405       return "__llvm_retpoline_edx";
36406     case X86::EDI:
36407       assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
36408       return "__llvm_retpoline_edi";
36409     case X86::R11:
36410       assert(Subtarget.is64Bit() && "Should not be using a 64-bit thunk!");
36411       return "__llvm_retpoline_r11";
36412     }
36413     llvm_unreachable("unexpected reg for retpoline");
36414   }
36415 
36416   if (Subtarget.useLVIControlFlowIntegrity()) {
36417     assert(Subtarget.is64Bit() && "Should not be using a 64-bit thunk!");
36418     return "__llvm_lvi_thunk_r11";
36419   }
36420   llvm_unreachable("getIndirectThunkSymbol() invoked without thunk feature");
36421 }
36422 
36423 MachineBasicBlock *
EmitLoweredIndirectThunk(MachineInstr & MI,MachineBasicBlock * BB) const36424 X86TargetLowering::EmitLoweredIndirectThunk(MachineInstr &MI,
36425                                             MachineBasicBlock *BB) const {
36426   // Copy the virtual register into the R11 physical register and
36427   // call the retpoline thunk.
36428   const DebugLoc &DL = MI.getDebugLoc();
36429   const X86InstrInfo *TII = Subtarget.getInstrInfo();
36430   Register CalleeVReg = MI.getOperand(0).getReg();
36431   unsigned Opc = getOpcodeForIndirectThunk(MI.getOpcode());
36432 
36433   // Find an available scratch register to hold the callee. On 64-bit, we can
36434   // just use R11, but we scan for uses anyway to ensure we don't generate
36435   // incorrect code. On 32-bit, we use one of EAX, ECX, or EDX that isn't
36436   // already a register use operand to the call to hold the callee. If none
36437   // are available, use EDI instead. EDI is chosen because EBX is the PIC base
36438   // register and ESI is the base pointer to realigned stack frames with VLAs.
36439   SmallVector<unsigned, 3> AvailableRegs;
36440   if (Subtarget.is64Bit())
36441     AvailableRegs.push_back(X86::R11);
36442   else
36443     AvailableRegs.append({X86::EAX, X86::ECX, X86::EDX, X86::EDI});
36444 
36445   // Zero out any registers that are already used.
36446   for (const auto &MO : MI.operands()) {
36447     if (MO.isReg() && MO.isUse())
36448       for (unsigned &Reg : AvailableRegs)
36449         if (Reg == MO.getReg())
36450           Reg = 0;
36451   }
36452 
36453   // Choose the first remaining non-zero available register.
36454   unsigned AvailableReg = 0;
36455   for (unsigned MaybeReg : AvailableRegs) {
36456     if (MaybeReg) {
36457       AvailableReg = MaybeReg;
36458       break;
36459     }
36460   }
36461   if (!AvailableReg)
36462     report_fatal_error("calling convention incompatible with retpoline, no "
36463                        "available registers");
36464 
36465   const char *Symbol = getIndirectThunkSymbol(Subtarget, AvailableReg);
36466 
36467   BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY), AvailableReg)
36468       .addReg(CalleeVReg);
36469   MI.getOperand(0).ChangeToES(Symbol);
36470   MI.setDesc(TII->get(Opc));
36471   MachineInstrBuilder(*BB->getParent(), &MI)
36472       .addReg(AvailableReg, RegState::Implicit | RegState::Kill);
36473   return BB;
36474 }
36475 
36476 /// SetJmp implies future control flow change upon calling the corresponding
36477 /// LongJmp.
36478 /// Instead of using the 'return' instruction, the long jump fixes the stack and
36479 /// performs an indirect branch. To do so it uses the registers that were stored
36480 /// in the jump buffer (when calling SetJmp).
36481 /// In case the shadow stack is enabled we need to fix it as well, because some
36482 /// return addresses will be skipped.
36483 /// The function will save the SSP for future fixing in the function
36484 /// emitLongJmpShadowStackFix.
36485 /// \sa emitLongJmpShadowStackFix
36486 /// \param [in] MI The temporary Machine Instruction for the builtin.
36487 /// \param [in] MBB The Machine Basic Block that will be modified.
emitSetJmpShadowStackFix(MachineInstr & MI,MachineBasicBlock * MBB) const36488 void X86TargetLowering::emitSetJmpShadowStackFix(MachineInstr &MI,
36489                                                  MachineBasicBlock *MBB) const {
36490   const DebugLoc &DL = MI.getDebugLoc();
36491   MachineFunction *MF = MBB->getParent();
36492   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
36493   MachineRegisterInfo &MRI = MF->getRegInfo();
36494   MachineInstrBuilder MIB;
36495 
36496   // Memory Reference.
36497   SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
36498                                            MI.memoperands_end());
36499 
36500   // Initialize a register with zero.
36501   MVT PVT = getPointerTy(MF->getDataLayout());
36502   const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
36503   Register ZReg = MRI.createVirtualRegister(PtrRC);
36504   unsigned XorRROpc = (PVT == MVT::i64) ? X86::XOR64rr : X86::XOR32rr;
36505   BuildMI(*MBB, MI, DL, TII->get(XorRROpc))
36506       .addDef(ZReg)
36507       .addReg(ZReg, RegState::Undef)
36508       .addReg(ZReg, RegState::Undef);
36509 
36510   // Read the current SSP Register value to the zeroed register.
36511   Register SSPCopyReg = MRI.createVirtualRegister(PtrRC);
36512   unsigned RdsspOpc = (PVT == MVT::i64) ? X86::RDSSPQ : X86::RDSSPD;
36513   BuildMI(*MBB, MI, DL, TII->get(RdsspOpc), SSPCopyReg).addReg(ZReg);
36514 
36515   // Write the SSP register value to offset 3 in input memory buffer.
36516   unsigned PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
36517   MIB = BuildMI(*MBB, MI, DL, TII->get(PtrStoreOpc));
36518   const int64_t SSPOffset = 3 * PVT.getStoreSize();
36519   const unsigned MemOpndSlot = 1;
36520   for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
36521     if (i == X86::AddrDisp)
36522       MIB.addDisp(MI.getOperand(MemOpndSlot + i), SSPOffset);
36523     else
36524       MIB.add(MI.getOperand(MemOpndSlot + i));
36525   }
36526   MIB.addReg(SSPCopyReg);
36527   MIB.setMemRefs(MMOs);
36528 }
36529 
36530 MachineBasicBlock *
emitEHSjLjSetJmp(MachineInstr & MI,MachineBasicBlock * MBB) const36531 X86TargetLowering::emitEHSjLjSetJmp(MachineInstr &MI,
36532                                     MachineBasicBlock *MBB) const {
36533   const DebugLoc &DL = MI.getDebugLoc();
36534   MachineFunction *MF = MBB->getParent();
36535   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
36536   const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
36537   MachineRegisterInfo &MRI = MF->getRegInfo();
36538 
36539   const BasicBlock *BB = MBB->getBasicBlock();
36540   MachineFunction::iterator I = ++MBB->getIterator();
36541 
36542   // Memory Reference
36543   SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
36544                                            MI.memoperands_end());
36545 
36546   unsigned DstReg;
36547   unsigned MemOpndSlot = 0;
36548 
36549   unsigned CurOp = 0;
36550 
36551   DstReg = MI.getOperand(CurOp++).getReg();
36552   const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
36553   assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!");
36554   (void)TRI;
36555   Register mainDstReg = MRI.createVirtualRegister(RC);
36556   Register restoreDstReg = MRI.createVirtualRegister(RC);
36557 
36558   MemOpndSlot = CurOp;
36559 
36560   MVT PVT = getPointerTy(MF->getDataLayout());
36561   assert((PVT == MVT::i64 || PVT == MVT::i32) &&
36562          "Invalid Pointer Size!");
36563 
36564   // For v = setjmp(buf), we generate
36565   //
36566   // thisMBB:
36567   //  buf[LabelOffset] = restoreMBB <-- takes address of restoreMBB
36568   //  SjLjSetup restoreMBB
36569   //
36570   // mainMBB:
36571   //  v_main = 0
36572   //
36573   // sinkMBB:
36574   //  v = phi(main, restore)
36575   //
36576   // restoreMBB:
36577   //  if base pointer being used, load it from frame
36578   //  v_restore = 1
36579 
36580   MachineBasicBlock *thisMBB = MBB;
36581   MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
36582   MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
36583   MachineBasicBlock *restoreMBB = MF->CreateMachineBasicBlock(BB);
36584   MF->insert(I, mainMBB);
36585   MF->insert(I, sinkMBB);
36586   MF->push_back(restoreMBB);
36587   restoreMBB->setMachineBlockAddressTaken();
36588 
36589   MachineInstrBuilder MIB;
36590 
36591   // Transfer the remainder of BB and its successor edges to sinkMBB.
36592   sinkMBB->splice(sinkMBB->begin(), MBB,
36593                   std::next(MachineBasicBlock::iterator(MI)), MBB->end());
36594   sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
36595 
36596   // thisMBB:
36597   unsigned PtrStoreOpc = 0;
36598   unsigned LabelReg = 0;
36599   const int64_t LabelOffset = 1 * PVT.getStoreSize();
36600   bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
36601                      !isPositionIndependent();
36602 
36603   // Prepare IP either in reg or imm.
36604   if (!UseImmLabel) {
36605     PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
36606     const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
36607     LabelReg = MRI.createVirtualRegister(PtrRC);
36608     if (Subtarget.is64Bit()) {
36609       MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA64r), LabelReg)
36610               .addReg(X86::RIP)
36611               .addImm(0)
36612               .addReg(0)
36613               .addMBB(restoreMBB)
36614               .addReg(0);
36615     } else {
36616       const X86InstrInfo *XII = static_cast<const X86InstrInfo*>(TII);
36617       MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA32r), LabelReg)
36618               .addReg(XII->getGlobalBaseReg(MF))
36619               .addImm(0)
36620               .addReg(0)
36621               .addMBB(restoreMBB, Subtarget.classifyBlockAddressReference())
36622               .addReg(0);
36623     }
36624   } else
36625     PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
36626   // Store IP
36627   MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrStoreOpc));
36628   for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
36629     if (i == X86::AddrDisp)
36630       MIB.addDisp(MI.getOperand(MemOpndSlot + i), LabelOffset);
36631     else
36632       MIB.add(MI.getOperand(MemOpndSlot + i));
36633   }
36634   if (!UseImmLabel)
36635     MIB.addReg(LabelReg);
36636   else
36637     MIB.addMBB(restoreMBB);
36638   MIB.setMemRefs(MMOs);
36639 
36640   if (MF->getMMI().getModule()->getModuleFlag("cf-protection-return")) {
36641     emitSetJmpShadowStackFix(MI, thisMBB);
36642   }
36643 
36644   // Setup
36645   MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::EH_SjLj_Setup))
36646           .addMBB(restoreMBB);
36647 
36648   const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
36649   MIB.addRegMask(RegInfo->getNoPreservedMask());
36650   thisMBB->addSuccessor(mainMBB);
36651   thisMBB->addSuccessor(restoreMBB);
36652 
36653   // mainMBB:
36654   //  EAX = 0
36655   BuildMI(mainMBB, DL, TII->get(X86::MOV32r0), mainDstReg);
36656   mainMBB->addSuccessor(sinkMBB);
36657 
36658   // sinkMBB:
36659   BuildMI(*sinkMBB, sinkMBB->begin(), DL,
36660           TII->get(X86::PHI), DstReg)
36661     .addReg(mainDstReg).addMBB(mainMBB)
36662     .addReg(restoreDstReg).addMBB(restoreMBB);
36663 
36664   // restoreMBB:
36665   if (RegInfo->hasBasePointer(*MF)) {
36666     const bool Uses64BitFramePtr =
36667         Subtarget.isTarget64BitLP64() || Subtarget.isTargetNaCl64();
36668     X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
36669     X86FI->setRestoreBasePointer(MF);
36670     Register FramePtr = RegInfo->getFrameRegister(*MF);
36671     Register BasePtr = RegInfo->getBaseRegister();
36672     unsigned Opm = Uses64BitFramePtr ? X86::MOV64rm : X86::MOV32rm;
36673     addRegOffset(BuildMI(restoreMBB, DL, TII->get(Opm), BasePtr),
36674                  FramePtr, true, X86FI->getRestoreBasePointerOffset())
36675       .setMIFlag(MachineInstr::FrameSetup);
36676   }
36677   BuildMI(restoreMBB, DL, TII->get(X86::MOV32ri), restoreDstReg).addImm(1);
36678   BuildMI(restoreMBB, DL, TII->get(X86::JMP_1)).addMBB(sinkMBB);
36679   restoreMBB->addSuccessor(sinkMBB);
36680 
36681   MI.eraseFromParent();
36682   return sinkMBB;
36683 }
36684 
36685 /// Fix the shadow stack using the previously saved SSP pointer.
36686 /// \sa emitSetJmpShadowStackFix
36687 /// \param [in] MI The temporary Machine Instruction for the builtin.
36688 /// \param [in] MBB The Machine Basic Block that will be modified.
36689 /// \return The sink MBB that will perform the future indirect branch.
36690 MachineBasicBlock *
emitLongJmpShadowStackFix(MachineInstr & MI,MachineBasicBlock * MBB) const36691 X86TargetLowering::emitLongJmpShadowStackFix(MachineInstr &MI,
36692                                              MachineBasicBlock *MBB) const {
36693   const DebugLoc &DL = MI.getDebugLoc();
36694   MachineFunction *MF = MBB->getParent();
36695   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
36696   MachineRegisterInfo &MRI = MF->getRegInfo();
36697 
36698   // Memory Reference
36699   SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
36700                                            MI.memoperands_end());
36701 
36702   MVT PVT = getPointerTy(MF->getDataLayout());
36703   const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
36704 
36705   // checkSspMBB:
36706   //         xor vreg1, vreg1
36707   //         rdssp vreg1
36708   //         test vreg1, vreg1
36709   //         je sinkMBB   # Jump if Shadow Stack is not supported
36710   // fallMBB:
36711   //         mov buf+24/12(%rip), vreg2
36712   //         sub vreg1, vreg2
36713   //         jbe sinkMBB  # No need to fix the Shadow Stack
36714   // fixShadowMBB:
36715   //         shr 3/2, vreg2
36716   //         incssp vreg2  # fix the SSP according to the lower 8 bits
36717   //         shr 8, vreg2
36718   //         je sinkMBB
36719   // fixShadowLoopPrepareMBB:
36720   //         shl vreg2
36721   //         mov 128, vreg3
36722   // fixShadowLoopMBB:
36723   //         incssp vreg3
36724   //         dec vreg2
36725   //         jne fixShadowLoopMBB # Iterate until you finish fixing
36726   //                              # the Shadow Stack
36727   // sinkMBB:
36728 
36729   MachineFunction::iterator I = ++MBB->getIterator();
36730   const BasicBlock *BB = MBB->getBasicBlock();
36731 
36732   MachineBasicBlock *checkSspMBB = MF->CreateMachineBasicBlock(BB);
36733   MachineBasicBlock *fallMBB = MF->CreateMachineBasicBlock(BB);
36734   MachineBasicBlock *fixShadowMBB = MF->CreateMachineBasicBlock(BB);
36735   MachineBasicBlock *fixShadowLoopPrepareMBB = MF->CreateMachineBasicBlock(BB);
36736   MachineBasicBlock *fixShadowLoopMBB = MF->CreateMachineBasicBlock(BB);
36737   MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
36738   MF->insert(I, checkSspMBB);
36739   MF->insert(I, fallMBB);
36740   MF->insert(I, fixShadowMBB);
36741   MF->insert(I, fixShadowLoopPrepareMBB);
36742   MF->insert(I, fixShadowLoopMBB);
36743   MF->insert(I, sinkMBB);
36744 
36745   // Transfer the remainder of BB and its successor edges to sinkMBB.
36746   sinkMBB->splice(sinkMBB->begin(), MBB, MachineBasicBlock::iterator(MI),
36747                   MBB->end());
36748   sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
36749 
36750   MBB->addSuccessor(checkSspMBB);
36751 
36752   // Initialize a register with zero.
36753   Register ZReg = MRI.createVirtualRegister(&X86::GR32RegClass);
36754   BuildMI(checkSspMBB, DL, TII->get(X86::MOV32r0), ZReg);
36755 
36756   if (PVT == MVT::i64) {
36757     Register TmpZReg = MRI.createVirtualRegister(PtrRC);
36758     BuildMI(checkSspMBB, DL, TII->get(X86::SUBREG_TO_REG), TmpZReg)
36759       .addImm(0)
36760       .addReg(ZReg)
36761       .addImm(X86::sub_32bit);
36762     ZReg = TmpZReg;
36763   }
36764 
36765   // Read the current SSP Register value to the zeroed register.
36766   Register SSPCopyReg = MRI.createVirtualRegister(PtrRC);
36767   unsigned RdsspOpc = (PVT == MVT::i64) ? X86::RDSSPQ : X86::RDSSPD;
36768   BuildMI(checkSspMBB, DL, TII->get(RdsspOpc), SSPCopyReg).addReg(ZReg);
36769 
36770   // Check whether the result of the SSP register is zero and jump directly
36771   // to the sink.
36772   unsigned TestRROpc = (PVT == MVT::i64) ? X86::TEST64rr : X86::TEST32rr;
36773   BuildMI(checkSspMBB, DL, TII->get(TestRROpc))
36774       .addReg(SSPCopyReg)
36775       .addReg(SSPCopyReg);
36776   BuildMI(checkSspMBB, DL, TII->get(X86::JCC_1)).addMBB(sinkMBB).addImm(X86::COND_E);
36777   checkSspMBB->addSuccessor(sinkMBB);
36778   checkSspMBB->addSuccessor(fallMBB);
36779 
36780   // Reload the previously saved SSP register value.
36781   Register PrevSSPReg = MRI.createVirtualRegister(PtrRC);
36782   unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm;
36783   const int64_t SPPOffset = 3 * PVT.getStoreSize();
36784   MachineInstrBuilder MIB =
36785       BuildMI(fallMBB, DL, TII->get(PtrLoadOpc), PrevSSPReg);
36786   for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
36787     const MachineOperand &MO = MI.getOperand(i);
36788     if (i == X86::AddrDisp)
36789       MIB.addDisp(MO, SPPOffset);
36790     else if (MO.isReg()) // Don't add the whole operand, we don't want to
36791                          // preserve kill flags.
36792       MIB.addReg(MO.getReg());
36793     else
36794       MIB.add(MO);
36795   }
36796   MIB.setMemRefs(MMOs);
36797 
36798   // Subtract the current SSP from the previous SSP.
36799   Register SspSubReg = MRI.createVirtualRegister(PtrRC);
36800   unsigned SubRROpc = (PVT == MVT::i64) ? X86::SUB64rr : X86::SUB32rr;
36801   BuildMI(fallMBB, DL, TII->get(SubRROpc), SspSubReg)
36802       .addReg(PrevSSPReg)
36803       .addReg(SSPCopyReg);
36804 
36805   // Jump to sink in case PrevSSPReg <= SSPCopyReg.
36806   BuildMI(fallMBB, DL, TII->get(X86::JCC_1)).addMBB(sinkMBB).addImm(X86::COND_BE);
36807   fallMBB->addSuccessor(sinkMBB);
36808   fallMBB->addSuccessor(fixShadowMBB);
36809 
36810   // Shift right by 2/3 for 32/64 because incssp multiplies the argument by 4/8.
36811   unsigned ShrRIOpc = (PVT == MVT::i64) ? X86::SHR64ri : X86::SHR32ri;
36812   unsigned Offset = (PVT == MVT::i64) ? 3 : 2;
36813   Register SspFirstShrReg = MRI.createVirtualRegister(PtrRC);
36814   BuildMI(fixShadowMBB, DL, TII->get(ShrRIOpc), SspFirstShrReg)
36815       .addReg(SspSubReg)
36816       .addImm(Offset);
36817 
36818   // Increase SSP when looking only on the lower 8 bits of the delta.
36819   unsigned IncsspOpc = (PVT == MVT::i64) ? X86::INCSSPQ : X86::INCSSPD;
36820   BuildMI(fixShadowMBB, DL, TII->get(IncsspOpc)).addReg(SspFirstShrReg);
36821 
36822   // Reset the lower 8 bits.
36823   Register SspSecondShrReg = MRI.createVirtualRegister(PtrRC);
36824   BuildMI(fixShadowMBB, DL, TII->get(ShrRIOpc), SspSecondShrReg)
36825       .addReg(SspFirstShrReg)
36826       .addImm(8);
36827 
36828   // Jump if the result of the shift is zero.
36829   BuildMI(fixShadowMBB, DL, TII->get(X86::JCC_1)).addMBB(sinkMBB).addImm(X86::COND_E);
36830   fixShadowMBB->addSuccessor(sinkMBB);
36831   fixShadowMBB->addSuccessor(fixShadowLoopPrepareMBB);
36832 
36833   // Do a single shift left.
36834   unsigned ShlR1Opc = (PVT == MVT::i64) ? X86::SHL64r1 : X86::SHL32r1;
36835   Register SspAfterShlReg = MRI.createVirtualRegister(PtrRC);
36836   BuildMI(fixShadowLoopPrepareMBB, DL, TII->get(ShlR1Opc), SspAfterShlReg)
36837       .addReg(SspSecondShrReg);
36838 
36839   // Save the value 128 to a register (will be used next with incssp).
36840   Register Value128InReg = MRI.createVirtualRegister(PtrRC);
36841   unsigned MovRIOpc = (PVT == MVT::i64) ? X86::MOV64ri32 : X86::MOV32ri;
36842   BuildMI(fixShadowLoopPrepareMBB, DL, TII->get(MovRIOpc), Value128InReg)
36843       .addImm(128);
36844   fixShadowLoopPrepareMBB->addSuccessor(fixShadowLoopMBB);
36845 
36846   // Since incssp only looks at the lower 8 bits, we might need to do several
36847   // iterations of incssp until we finish fixing the shadow stack.
36848   Register DecReg = MRI.createVirtualRegister(PtrRC);
36849   Register CounterReg = MRI.createVirtualRegister(PtrRC);
36850   BuildMI(fixShadowLoopMBB, DL, TII->get(X86::PHI), CounterReg)
36851       .addReg(SspAfterShlReg)
36852       .addMBB(fixShadowLoopPrepareMBB)
36853       .addReg(DecReg)
36854       .addMBB(fixShadowLoopMBB);
36855 
36856   // Every iteration we increase the SSP by 128.
36857   BuildMI(fixShadowLoopMBB, DL, TII->get(IncsspOpc)).addReg(Value128InReg);
36858 
36859   // Every iteration we decrement the counter by 1.
36860   unsigned DecROpc = (PVT == MVT::i64) ? X86::DEC64r : X86::DEC32r;
36861   BuildMI(fixShadowLoopMBB, DL, TII->get(DecROpc), DecReg).addReg(CounterReg);
36862 
36863   // Jump if the counter is not zero yet.
36864   BuildMI(fixShadowLoopMBB, DL, TII->get(X86::JCC_1)).addMBB(fixShadowLoopMBB).addImm(X86::COND_NE);
36865   fixShadowLoopMBB->addSuccessor(sinkMBB);
36866   fixShadowLoopMBB->addSuccessor(fixShadowLoopMBB);
36867 
36868   return sinkMBB;
36869 }
36870 
36871 MachineBasicBlock *
emitEHSjLjLongJmp(MachineInstr & MI,MachineBasicBlock * MBB) const36872 X86TargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
36873                                      MachineBasicBlock *MBB) const {
36874   const DebugLoc &DL = MI.getDebugLoc();
36875   MachineFunction *MF = MBB->getParent();
36876   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
36877   MachineRegisterInfo &MRI = MF->getRegInfo();
36878 
36879   // Memory Reference
36880   SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
36881                                            MI.memoperands_end());
36882 
36883   MVT PVT = getPointerTy(MF->getDataLayout());
36884   assert((PVT == MVT::i64 || PVT == MVT::i32) &&
36885          "Invalid Pointer Size!");
36886 
36887   const TargetRegisterClass *RC =
36888     (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
36889   Register Tmp = MRI.createVirtualRegister(RC);
36890   // Since FP is only updated here but NOT referenced, it's treated as GPR.
36891   const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
36892   Register FP = (PVT == MVT::i64) ? X86::RBP : X86::EBP;
36893   Register SP = RegInfo->getStackRegister();
36894 
36895   MachineInstrBuilder MIB;
36896 
36897   const int64_t LabelOffset = 1 * PVT.getStoreSize();
36898   const int64_t SPOffset = 2 * PVT.getStoreSize();
36899 
36900   unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm;
36901   unsigned IJmpOpc = (PVT == MVT::i64) ? X86::JMP64r : X86::JMP32r;
36902 
36903   MachineBasicBlock *thisMBB = MBB;
36904 
36905   // When CET and shadow stack is enabled, we need to fix the Shadow Stack.
36906   if (MF->getMMI().getModule()->getModuleFlag("cf-protection-return")) {
36907     thisMBB = emitLongJmpShadowStackFix(MI, thisMBB);
36908   }
36909 
36910   // Reload FP
36911   MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrLoadOpc), FP);
36912   for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
36913     const MachineOperand &MO = MI.getOperand(i);
36914     if (MO.isReg()) // Don't add the whole operand, we don't want to
36915                     // preserve kill flags.
36916       MIB.addReg(MO.getReg());
36917     else
36918       MIB.add(MO);
36919   }
36920   MIB.setMemRefs(MMOs);
36921 
36922   // Reload IP
36923   MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrLoadOpc), Tmp);
36924   for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
36925     const MachineOperand &MO = MI.getOperand(i);
36926     if (i == X86::AddrDisp)
36927       MIB.addDisp(MO, LabelOffset);
36928     else if (MO.isReg()) // Don't add the whole operand, we don't want to
36929                          // preserve kill flags.
36930       MIB.addReg(MO.getReg());
36931     else
36932       MIB.add(MO);
36933   }
36934   MIB.setMemRefs(MMOs);
36935 
36936   // Reload SP
36937   MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrLoadOpc), SP);
36938   for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
36939     if (i == X86::AddrDisp)
36940       MIB.addDisp(MI.getOperand(i), SPOffset);
36941     else
36942       MIB.add(MI.getOperand(i)); // We can preserve the kill flags here, it's
36943                                  // the last instruction of the expansion.
36944   }
36945   MIB.setMemRefs(MMOs);
36946 
36947   // Jump
36948   BuildMI(*thisMBB, MI, DL, TII->get(IJmpOpc)).addReg(Tmp);
36949 
36950   MI.eraseFromParent();
36951   return thisMBB;
36952 }
36953 
SetupEntryBlockForSjLj(MachineInstr & MI,MachineBasicBlock * MBB,MachineBasicBlock * DispatchBB,int FI) const36954 void X86TargetLowering::SetupEntryBlockForSjLj(MachineInstr &MI,
36955                                                MachineBasicBlock *MBB,
36956                                                MachineBasicBlock *DispatchBB,
36957                                                int FI) const {
36958   const DebugLoc &DL = MI.getDebugLoc();
36959   MachineFunction *MF = MBB->getParent();
36960   MachineRegisterInfo *MRI = &MF->getRegInfo();
36961   const X86InstrInfo *TII = Subtarget.getInstrInfo();
36962 
36963   MVT PVT = getPointerTy(MF->getDataLayout());
36964   assert((PVT == MVT::i64 || PVT == MVT::i32) && "Invalid Pointer Size!");
36965 
36966   unsigned Op = 0;
36967   unsigned VR = 0;
36968 
36969   bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
36970                      !isPositionIndependent();
36971 
36972   if (UseImmLabel) {
36973     Op = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
36974   } else {
36975     const TargetRegisterClass *TRC =
36976         (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
36977     VR = MRI->createVirtualRegister(TRC);
36978     Op = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
36979 
36980     if (Subtarget.is64Bit())
36981       BuildMI(*MBB, MI, DL, TII->get(X86::LEA64r), VR)
36982           .addReg(X86::RIP)
36983           .addImm(1)
36984           .addReg(0)
36985           .addMBB(DispatchBB)
36986           .addReg(0);
36987     else
36988       BuildMI(*MBB, MI, DL, TII->get(X86::LEA32r), VR)
36989           .addReg(0) /* TII->getGlobalBaseReg(MF) */
36990           .addImm(1)
36991           .addReg(0)
36992           .addMBB(DispatchBB, Subtarget.classifyBlockAddressReference())
36993           .addReg(0);
36994   }
36995 
36996   MachineInstrBuilder MIB = BuildMI(*MBB, MI, DL, TII->get(Op));
36997   addFrameReference(MIB, FI, Subtarget.is64Bit() ? 56 : 36);
36998   if (UseImmLabel)
36999     MIB.addMBB(DispatchBB);
37000   else
37001     MIB.addReg(VR);
37002 }
37003 
37004 MachineBasicBlock *
EmitSjLjDispatchBlock(MachineInstr & MI,MachineBasicBlock * BB) const37005 X86TargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI,
37006                                          MachineBasicBlock *BB) const {
37007   const DebugLoc &DL = MI.getDebugLoc();
37008   MachineFunction *MF = BB->getParent();
37009   MachineRegisterInfo *MRI = &MF->getRegInfo();
37010   const X86InstrInfo *TII = Subtarget.getInstrInfo();
37011   int FI = MF->getFrameInfo().getFunctionContextIndex();
37012 
37013   // Get a mapping of the call site numbers to all of the landing pads they're
37014   // associated with.
37015   DenseMap<unsigned, SmallVector<MachineBasicBlock *, 2>> CallSiteNumToLPad;
37016   unsigned MaxCSNum = 0;
37017   for (auto &MBB : *MF) {
37018     if (!MBB.isEHPad())
37019       continue;
37020 
37021     MCSymbol *Sym = nullptr;
37022     for (const auto &MI : MBB) {
37023       if (MI.isDebugInstr())
37024         continue;
37025 
37026       assert(MI.isEHLabel() && "expected EH_LABEL");
37027       Sym = MI.getOperand(0).getMCSymbol();
37028       break;
37029     }
37030 
37031     if (!MF->hasCallSiteLandingPad(Sym))
37032       continue;
37033 
37034     for (unsigned CSI : MF->getCallSiteLandingPad(Sym)) {
37035       CallSiteNumToLPad[CSI].push_back(&MBB);
37036       MaxCSNum = std::max(MaxCSNum, CSI);
37037     }
37038   }
37039 
37040   // Get an ordered list of the machine basic blocks for the jump table.
37041   std::vector<MachineBasicBlock *> LPadList;
37042   SmallPtrSet<MachineBasicBlock *, 32> InvokeBBs;
37043   LPadList.reserve(CallSiteNumToLPad.size());
37044 
37045   for (unsigned CSI = 1; CSI <= MaxCSNum; ++CSI) {
37046     for (auto &LP : CallSiteNumToLPad[CSI]) {
37047       LPadList.push_back(LP);
37048       InvokeBBs.insert(LP->pred_begin(), LP->pred_end());
37049     }
37050   }
37051 
37052   assert(!LPadList.empty() &&
37053          "No landing pad destinations for the dispatch jump table!");
37054 
37055   // Create the MBBs for the dispatch code.
37056 
37057   // Shove the dispatch's address into the return slot in the function context.
37058   MachineBasicBlock *DispatchBB = MF->CreateMachineBasicBlock();
37059   DispatchBB->setIsEHPad(true);
37060 
37061   MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock();
37062   BuildMI(TrapBB, DL, TII->get(X86::TRAP));
37063   DispatchBB->addSuccessor(TrapBB);
37064 
37065   MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock();
37066   DispatchBB->addSuccessor(DispContBB);
37067 
37068   // Insert MBBs.
37069   MF->push_back(DispatchBB);
37070   MF->push_back(DispContBB);
37071   MF->push_back(TrapBB);
37072 
37073   // Insert code into the entry block that creates and registers the function
37074   // context.
37075   SetupEntryBlockForSjLj(MI, BB, DispatchBB, FI);
37076 
37077   // Create the jump table and associated information
37078   unsigned JTE = getJumpTableEncoding();
37079   MachineJumpTableInfo *JTI = MF->getOrCreateJumpTableInfo(JTE);
37080   unsigned MJTI = JTI->createJumpTableIndex(LPadList);
37081 
37082   const X86RegisterInfo &RI = TII->getRegisterInfo();
37083   // Add a register mask with no preserved registers.  This results in all
37084   // registers being marked as clobbered.
37085   if (RI.hasBasePointer(*MF)) {
37086     const bool FPIs64Bit =
37087         Subtarget.isTarget64BitLP64() || Subtarget.isTargetNaCl64();
37088     X86MachineFunctionInfo *MFI = MF->getInfo<X86MachineFunctionInfo>();
37089     MFI->setRestoreBasePointer(MF);
37090 
37091     Register FP = RI.getFrameRegister(*MF);
37092     Register BP = RI.getBaseRegister();
37093     unsigned Op = FPIs64Bit ? X86::MOV64rm : X86::MOV32rm;
37094     addRegOffset(BuildMI(DispatchBB, DL, TII->get(Op), BP), FP, true,
37095                  MFI->getRestoreBasePointerOffset())
37096         .addRegMask(RI.getNoPreservedMask());
37097   } else {
37098     BuildMI(DispatchBB, DL, TII->get(X86::NOOP))
37099         .addRegMask(RI.getNoPreservedMask());
37100   }
37101 
37102   // IReg is used as an index in a memory operand and therefore can't be SP
37103   Register IReg = MRI->createVirtualRegister(&X86::GR32_NOSPRegClass);
37104   addFrameReference(BuildMI(DispatchBB, DL, TII->get(X86::MOV32rm), IReg), FI,
37105                     Subtarget.is64Bit() ? 8 : 4);
37106   BuildMI(DispatchBB, DL, TII->get(X86::CMP32ri))
37107       .addReg(IReg)
37108       .addImm(LPadList.size());
37109   BuildMI(DispatchBB, DL, TII->get(X86::JCC_1)).addMBB(TrapBB).addImm(X86::COND_AE);
37110 
37111   if (Subtarget.is64Bit()) {
37112     Register BReg = MRI->createVirtualRegister(&X86::GR64RegClass);
37113     Register IReg64 = MRI->createVirtualRegister(&X86::GR64_NOSPRegClass);
37114 
37115     // leaq .LJTI0_0(%rip), BReg
37116     BuildMI(DispContBB, DL, TII->get(X86::LEA64r), BReg)
37117         .addReg(X86::RIP)
37118         .addImm(1)
37119         .addReg(0)
37120         .addJumpTableIndex(MJTI)
37121         .addReg(0);
37122     // movzx IReg64, IReg
37123     BuildMI(DispContBB, DL, TII->get(TargetOpcode::SUBREG_TO_REG), IReg64)
37124         .addImm(0)
37125         .addReg(IReg)
37126         .addImm(X86::sub_32bit);
37127 
37128     switch (JTE) {
37129     case MachineJumpTableInfo::EK_BlockAddress:
37130       // jmpq *(BReg,IReg64,8)
37131       BuildMI(DispContBB, DL, TII->get(X86::JMP64m))
37132           .addReg(BReg)
37133           .addImm(8)
37134           .addReg(IReg64)
37135           .addImm(0)
37136           .addReg(0);
37137       break;
37138     case MachineJumpTableInfo::EK_LabelDifference32: {
37139       Register OReg = MRI->createVirtualRegister(&X86::GR32RegClass);
37140       Register OReg64 = MRI->createVirtualRegister(&X86::GR64RegClass);
37141       Register TReg = MRI->createVirtualRegister(&X86::GR64RegClass);
37142 
37143       // movl (BReg,IReg64,4), OReg
37144       BuildMI(DispContBB, DL, TII->get(X86::MOV32rm), OReg)
37145           .addReg(BReg)
37146           .addImm(4)
37147           .addReg(IReg64)
37148           .addImm(0)
37149           .addReg(0);
37150       // movsx OReg64, OReg
37151       BuildMI(DispContBB, DL, TII->get(X86::MOVSX64rr32), OReg64).addReg(OReg);
37152       // addq BReg, OReg64, TReg
37153       BuildMI(DispContBB, DL, TII->get(X86::ADD64rr), TReg)
37154           .addReg(OReg64)
37155           .addReg(BReg);
37156       // jmpq *TReg
37157       BuildMI(DispContBB, DL, TII->get(X86::JMP64r)).addReg(TReg);
37158       break;
37159     }
37160     default:
37161       llvm_unreachable("Unexpected jump table encoding");
37162     }
37163   } else {
37164     // jmpl *.LJTI0_0(,IReg,4)
37165     BuildMI(DispContBB, DL, TII->get(X86::JMP32m))
37166         .addReg(0)
37167         .addImm(4)
37168         .addReg(IReg)
37169         .addJumpTableIndex(MJTI)
37170         .addReg(0);
37171   }
37172 
37173   // Add the jump table entries as successors to the MBB.
37174   SmallPtrSet<MachineBasicBlock *, 8> SeenMBBs;
37175   for (auto &LP : LPadList)
37176     if (SeenMBBs.insert(LP).second)
37177       DispContBB->addSuccessor(LP);
37178 
37179   // N.B. the order the invoke BBs are processed in doesn't matter here.
37180   SmallVector<MachineBasicBlock *, 64> MBBLPads;
37181   const MCPhysReg *SavedRegs = MF->getRegInfo().getCalleeSavedRegs();
37182   for (MachineBasicBlock *MBB : InvokeBBs) {
37183     // Remove the landing pad successor from the invoke block and replace it
37184     // with the new dispatch block.
37185     // Keep a copy of Successors since it's modified inside the loop.
37186     SmallVector<MachineBasicBlock *, 8> Successors(MBB->succ_rbegin(),
37187                                                    MBB->succ_rend());
37188     // FIXME: Avoid quadratic complexity.
37189     for (auto *MBBS : Successors) {
37190       if (MBBS->isEHPad()) {
37191         MBB->removeSuccessor(MBBS);
37192         MBBLPads.push_back(MBBS);
37193       }
37194     }
37195 
37196     MBB->addSuccessor(DispatchBB);
37197 
37198     // Find the invoke call and mark all of the callee-saved registers as
37199     // 'implicit defined' so that they're spilled.  This prevents code from
37200     // moving instructions to before the EH block, where they will never be
37201     // executed.
37202     for (auto &II : reverse(*MBB)) {
37203       if (!II.isCall())
37204         continue;
37205 
37206       DenseMap<unsigned, bool> DefRegs;
37207       for (auto &MOp : II.operands())
37208         if (MOp.isReg())
37209           DefRegs[MOp.getReg()] = true;
37210 
37211       MachineInstrBuilder MIB(*MF, &II);
37212       for (unsigned RegIdx = 0; SavedRegs[RegIdx]; ++RegIdx) {
37213         unsigned Reg = SavedRegs[RegIdx];
37214         if (!DefRegs[Reg])
37215           MIB.addReg(Reg, RegState::ImplicitDefine | RegState::Dead);
37216       }
37217 
37218       break;
37219     }
37220   }
37221 
37222   // Mark all former landing pads as non-landing pads.  The dispatch is the only
37223   // landing pad now.
37224   for (auto &LP : MBBLPads)
37225     LP->setIsEHPad(false);
37226 
37227   // The instruction is gone now.
37228   MI.eraseFromParent();
37229   return BB;
37230 }
37231 
37232 MachineBasicBlock *
EmitInstrWithCustomInserter(MachineInstr & MI,MachineBasicBlock * BB) const37233 X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
37234                                                MachineBasicBlock *BB) const {
37235   MachineFunction *MF = BB->getParent();
37236   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
37237   const DebugLoc &DL = MI.getDebugLoc();
37238 
37239   auto TMMImmToTMMReg = [](unsigned Imm) {
37240     assert (Imm < 8 && "Illegal tmm index");
37241     return X86::TMM0 + Imm;
37242   };
37243   switch (MI.getOpcode()) {
37244   default: llvm_unreachable("Unexpected instr type to insert");
37245   case X86::TLS_addr32:
37246   case X86::TLS_addr64:
37247   case X86::TLS_addrX32:
37248   case X86::TLS_base_addr32:
37249   case X86::TLS_base_addr64:
37250   case X86::TLS_base_addrX32:
37251     return EmitLoweredTLSAddr(MI, BB);
37252   case X86::INDIRECT_THUNK_CALL32:
37253   case X86::INDIRECT_THUNK_CALL64:
37254   case X86::INDIRECT_THUNK_TCRETURN32:
37255   case X86::INDIRECT_THUNK_TCRETURN64:
37256     return EmitLoweredIndirectThunk(MI, BB);
37257   case X86::CATCHRET:
37258     return EmitLoweredCatchRet(MI, BB);
37259   case X86::SEG_ALLOCA_32:
37260   case X86::SEG_ALLOCA_64:
37261     return EmitLoweredSegAlloca(MI, BB);
37262   case X86::PROBED_ALLOCA_32:
37263   case X86::PROBED_ALLOCA_64:
37264     return EmitLoweredProbedAlloca(MI, BB);
37265   case X86::TLSCall_32:
37266   case X86::TLSCall_64:
37267     return EmitLoweredTLSCall(MI, BB);
37268   case X86::CMOV_FR16:
37269   case X86::CMOV_FR16X:
37270   case X86::CMOV_FR32:
37271   case X86::CMOV_FR32X:
37272   case X86::CMOV_FR64:
37273   case X86::CMOV_FR64X:
37274   case X86::CMOV_GR8:
37275   case X86::CMOV_GR16:
37276   case X86::CMOV_GR32:
37277   case X86::CMOV_RFP32:
37278   case X86::CMOV_RFP64:
37279   case X86::CMOV_RFP80:
37280   case X86::CMOV_VR64:
37281   case X86::CMOV_VR128:
37282   case X86::CMOV_VR128X:
37283   case X86::CMOV_VR256:
37284   case X86::CMOV_VR256X:
37285   case X86::CMOV_VR512:
37286   case X86::CMOV_VK1:
37287   case X86::CMOV_VK2:
37288   case X86::CMOV_VK4:
37289   case X86::CMOV_VK8:
37290   case X86::CMOV_VK16:
37291   case X86::CMOV_VK32:
37292   case X86::CMOV_VK64:
37293     return EmitLoweredSelect(MI, BB);
37294 
37295   case X86::RDFLAGS32:
37296   case X86::RDFLAGS64: {
37297     unsigned PushF =
37298         MI.getOpcode() == X86::RDFLAGS32 ? X86::PUSHF32 : X86::PUSHF64;
37299     unsigned Pop = MI.getOpcode() == X86::RDFLAGS32 ? X86::POP32r : X86::POP64r;
37300     MachineInstr *Push = BuildMI(*BB, MI, DL, TII->get(PushF));
37301     // Permit reads of the EFLAGS and DF registers without them being defined.
37302     // This intrinsic exists to read external processor state in flags, such as
37303     // the trap flag, interrupt flag, and direction flag, none of which are
37304     // modeled by the backend.
37305     assert(Push->getOperand(2).getReg() == X86::EFLAGS &&
37306            "Unexpected register in operand!");
37307     Push->getOperand(2).setIsUndef();
37308     assert(Push->getOperand(3).getReg() == X86::DF &&
37309            "Unexpected register in operand!");
37310     Push->getOperand(3).setIsUndef();
37311     BuildMI(*BB, MI, DL, TII->get(Pop), MI.getOperand(0).getReg());
37312 
37313     MI.eraseFromParent(); // The pseudo is gone now.
37314     return BB;
37315   }
37316 
37317   case X86::WRFLAGS32:
37318   case X86::WRFLAGS64: {
37319     unsigned Push =
37320         MI.getOpcode() == X86::WRFLAGS32 ? X86::PUSH32r : X86::PUSH64r;
37321     unsigned PopF =
37322         MI.getOpcode() == X86::WRFLAGS32 ? X86::POPF32 : X86::POPF64;
37323     BuildMI(*BB, MI, DL, TII->get(Push)).addReg(MI.getOperand(0).getReg());
37324     BuildMI(*BB, MI, DL, TII->get(PopF));
37325 
37326     MI.eraseFromParent(); // The pseudo is gone now.
37327     return BB;
37328   }
37329 
37330   case X86::FP80_ADDr:
37331   case X86::FP80_ADDm32: {
37332     // Change the floating point control register to use double extended
37333     // precision when performing the addition.
37334     int OrigCWFrameIdx =
37335         MF->getFrameInfo().CreateStackObject(2, Align(2), false);
37336     addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::FNSTCW16m)),
37337                       OrigCWFrameIdx);
37338 
37339     // Load the old value of the control word...
37340     Register OldCW = MF->getRegInfo().createVirtualRegister(&X86::GR32RegClass);
37341     addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOVZX32rm16), OldCW),
37342                       OrigCWFrameIdx);
37343 
37344     // OR 0b11 into bit 8 and 9. 0b11 is the encoding for double extended
37345     // precision.
37346     Register NewCW = MF->getRegInfo().createVirtualRegister(&X86::GR32RegClass);
37347     BuildMI(*BB, MI, DL, TII->get(X86::OR32ri), NewCW)
37348         .addReg(OldCW, RegState::Kill)
37349         .addImm(0x300);
37350 
37351     // Extract to 16 bits.
37352     Register NewCW16 =
37353         MF->getRegInfo().createVirtualRegister(&X86::GR16RegClass);
37354     BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY), NewCW16)
37355         .addReg(NewCW, RegState::Kill, X86::sub_16bit);
37356 
37357     // Prepare memory for FLDCW.
37358     int NewCWFrameIdx =
37359         MF->getFrameInfo().CreateStackObject(2, Align(2), false);
37360     addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mr)),
37361                       NewCWFrameIdx)
37362         .addReg(NewCW16, RegState::Kill);
37363 
37364     // Reload the modified control word now...
37365     addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::FLDCW16m)),
37366                       NewCWFrameIdx);
37367 
37368     // Do the addition.
37369     if (MI.getOpcode() == X86::FP80_ADDr) {
37370       BuildMI(*BB, MI, DL, TII->get(X86::ADD_Fp80))
37371           .add(MI.getOperand(0))
37372           .add(MI.getOperand(1))
37373           .add(MI.getOperand(2));
37374     } else {
37375       BuildMI(*BB, MI, DL, TII->get(X86::ADD_Fp80m32))
37376           .add(MI.getOperand(0))
37377           .add(MI.getOperand(1))
37378           .add(MI.getOperand(2))
37379           .add(MI.getOperand(3))
37380           .add(MI.getOperand(4))
37381           .add(MI.getOperand(5))
37382           .add(MI.getOperand(6));
37383     }
37384 
37385     // Reload the original control word now.
37386     addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::FLDCW16m)),
37387                       OrigCWFrameIdx);
37388 
37389     MI.eraseFromParent(); // The pseudo instruction is gone now.
37390     return BB;
37391   }
37392 
37393   case X86::FP32_TO_INT16_IN_MEM:
37394   case X86::FP32_TO_INT32_IN_MEM:
37395   case X86::FP32_TO_INT64_IN_MEM:
37396   case X86::FP64_TO_INT16_IN_MEM:
37397   case X86::FP64_TO_INT32_IN_MEM:
37398   case X86::FP64_TO_INT64_IN_MEM:
37399   case X86::FP80_TO_INT16_IN_MEM:
37400   case X86::FP80_TO_INT32_IN_MEM:
37401   case X86::FP80_TO_INT64_IN_MEM: {
37402     // Change the floating point control register to use "round towards zero"
37403     // mode when truncating to an integer value.
37404     int OrigCWFrameIdx =
37405         MF->getFrameInfo().CreateStackObject(2, Align(2), false);
37406     addFrameReference(BuildMI(*BB, MI, DL,
37407                               TII->get(X86::FNSTCW16m)), OrigCWFrameIdx);
37408 
37409     // Load the old value of the control word...
37410     Register OldCW = MF->getRegInfo().createVirtualRegister(&X86::GR32RegClass);
37411     addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOVZX32rm16), OldCW),
37412                       OrigCWFrameIdx);
37413 
37414     // OR 0b11 into bit 10 and 11. 0b11 is the encoding for round toward zero.
37415     Register NewCW = MF->getRegInfo().createVirtualRegister(&X86::GR32RegClass);
37416     BuildMI(*BB, MI, DL, TII->get(X86::OR32ri), NewCW)
37417       .addReg(OldCW, RegState::Kill).addImm(0xC00);
37418 
37419     // Extract to 16 bits.
37420     Register NewCW16 =
37421         MF->getRegInfo().createVirtualRegister(&X86::GR16RegClass);
37422     BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY), NewCW16)
37423       .addReg(NewCW, RegState::Kill, X86::sub_16bit);
37424 
37425     // Prepare memory for FLDCW.
37426     int NewCWFrameIdx =
37427         MF->getFrameInfo().CreateStackObject(2, Align(2), false);
37428     addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mr)),
37429                       NewCWFrameIdx)
37430       .addReg(NewCW16, RegState::Kill);
37431 
37432     // Reload the modified control word now...
37433     addFrameReference(BuildMI(*BB, MI, DL,
37434                               TII->get(X86::FLDCW16m)), NewCWFrameIdx);
37435 
37436     // Get the X86 opcode to use.
37437     unsigned Opc;
37438     switch (MI.getOpcode()) {
37439     default: llvm_unreachable("illegal opcode!");
37440     case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break;
37441     case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break;
37442     case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break;
37443     case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break;
37444     case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break;
37445     case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break;
37446     case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break;
37447     case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break;
37448     case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break;
37449     }
37450 
37451     X86AddressMode AM = getAddressFromInstr(&MI, 0);
37452     addFullAddress(BuildMI(*BB, MI, DL, TII->get(Opc)), AM)
37453         .addReg(MI.getOperand(X86::AddrNumOperands).getReg());
37454 
37455     // Reload the original control word now.
37456     addFrameReference(BuildMI(*BB, MI, DL,
37457                               TII->get(X86::FLDCW16m)), OrigCWFrameIdx);
37458 
37459     MI.eraseFromParent(); // The pseudo instruction is gone now.
37460     return BB;
37461   }
37462 
37463   // xbegin
37464   case X86::XBEGIN:
37465     return emitXBegin(MI, BB, Subtarget.getInstrInfo());
37466 
37467   case X86::VAARG_64:
37468   case X86::VAARG_X32:
37469     return EmitVAARGWithCustomInserter(MI, BB);
37470 
37471   case X86::EH_SjLj_SetJmp32:
37472   case X86::EH_SjLj_SetJmp64:
37473     return emitEHSjLjSetJmp(MI, BB);
37474 
37475   case X86::EH_SjLj_LongJmp32:
37476   case X86::EH_SjLj_LongJmp64:
37477     return emitEHSjLjLongJmp(MI, BB);
37478 
37479   case X86::Int_eh_sjlj_setup_dispatch:
37480     return EmitSjLjDispatchBlock(MI, BB);
37481 
37482   case TargetOpcode::STATEPOINT:
37483     // As an implementation detail, STATEPOINT shares the STACKMAP format at
37484     // this point in the process.  We diverge later.
37485     return emitPatchPoint(MI, BB);
37486 
37487   case TargetOpcode::STACKMAP:
37488   case TargetOpcode::PATCHPOINT:
37489     return emitPatchPoint(MI, BB);
37490 
37491   case TargetOpcode::PATCHABLE_EVENT_CALL:
37492   case TargetOpcode::PATCHABLE_TYPED_EVENT_CALL:
37493     return BB;
37494 
37495   case X86::LCMPXCHG8B: {
37496     const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
37497     // In addition to 4 E[ABCD] registers implied by encoding, CMPXCHG8B
37498     // requires a memory operand. If it happens that current architecture is
37499     // i686 and for current function we need a base pointer
37500     // - which is ESI for i686 - register allocator would not be able to
37501     // allocate registers for an address in form of X(%reg, %reg, Y)
37502     // - there never would be enough unreserved registers during regalloc
37503     // (without the need for base ptr the only option would be X(%edi, %esi, Y).
37504     // We are giving a hand to register allocator by precomputing the address in
37505     // a new vreg using LEA.
37506 
37507     // If it is not i686 or there is no base pointer - nothing to do here.
37508     if (!Subtarget.is32Bit() || !TRI->hasBasePointer(*MF))
37509       return BB;
37510 
37511     // Even though this code does not necessarily needs the base pointer to
37512     // be ESI, we check for that. The reason: if this assert fails, there are
37513     // some changes happened in the compiler base pointer handling, which most
37514     // probably have to be addressed somehow here.
37515     assert(TRI->getBaseRegister() == X86::ESI &&
37516            "LCMPXCHG8B custom insertion for i686 is written with X86::ESI as a "
37517            "base pointer in mind");
37518 
37519     MachineRegisterInfo &MRI = MF->getRegInfo();
37520     MVT SPTy = getPointerTy(MF->getDataLayout());
37521     const TargetRegisterClass *AddrRegClass = getRegClassFor(SPTy);
37522     Register computedAddrVReg = MRI.createVirtualRegister(AddrRegClass);
37523 
37524     X86AddressMode AM = getAddressFromInstr(&MI, 0);
37525     // Regalloc does not need any help when the memory operand of CMPXCHG8B
37526     // does not use index register.
37527     if (AM.IndexReg == X86::NoRegister)
37528       return BB;
37529 
37530     // After X86TargetLowering::ReplaceNodeResults CMPXCHG8B is glued to its
37531     // four operand definitions that are E[ABCD] registers. We skip them and
37532     // then insert the LEA.
37533     MachineBasicBlock::reverse_iterator RMBBI(MI.getReverseIterator());
37534     while (RMBBI != BB->rend() && (RMBBI->definesRegister(X86::EAX) ||
37535                                    RMBBI->definesRegister(X86::EBX) ||
37536                                    RMBBI->definesRegister(X86::ECX) ||
37537                                    RMBBI->definesRegister(X86::EDX))) {
37538       ++RMBBI;
37539     }
37540     MachineBasicBlock::iterator MBBI(RMBBI);
37541     addFullAddress(
37542         BuildMI(*BB, *MBBI, DL, TII->get(X86::LEA32r), computedAddrVReg), AM);
37543 
37544     setDirectAddressInInstr(&MI, 0, computedAddrVReg);
37545 
37546     return BB;
37547   }
37548   case X86::LCMPXCHG16B_NO_RBX: {
37549     const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
37550     Register BasePtr = TRI->getBaseRegister();
37551     if (TRI->hasBasePointer(*MF) &&
37552         (BasePtr == X86::RBX || BasePtr == X86::EBX)) {
37553       if (!BB->isLiveIn(BasePtr))
37554         BB->addLiveIn(BasePtr);
37555       // Save RBX into a virtual register.
37556       Register SaveRBX =
37557           MF->getRegInfo().createVirtualRegister(&X86::GR64RegClass);
37558       BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY), SaveRBX)
37559           .addReg(X86::RBX);
37560       Register Dst = MF->getRegInfo().createVirtualRegister(&X86::GR64RegClass);
37561       MachineInstrBuilder MIB =
37562           BuildMI(*BB, MI, DL, TII->get(X86::LCMPXCHG16B_SAVE_RBX), Dst);
37563       for (unsigned Idx = 0; Idx < X86::AddrNumOperands; ++Idx)
37564         MIB.add(MI.getOperand(Idx));
37565       MIB.add(MI.getOperand(X86::AddrNumOperands));
37566       MIB.addReg(SaveRBX);
37567     } else {
37568       // Simple case, just copy the virtual register to RBX.
37569       BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY), X86::RBX)
37570           .add(MI.getOperand(X86::AddrNumOperands));
37571       MachineInstrBuilder MIB =
37572           BuildMI(*BB, MI, DL, TII->get(X86::LCMPXCHG16B));
37573       for (unsigned Idx = 0; Idx < X86::AddrNumOperands; ++Idx)
37574         MIB.add(MI.getOperand(Idx));
37575     }
37576     MI.eraseFromParent();
37577     return BB;
37578   }
37579   case X86::MWAITX: {
37580     const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
37581     Register BasePtr = TRI->getBaseRegister();
37582     bool IsRBX = (BasePtr == X86::RBX || BasePtr == X86::EBX);
37583     // If no need to save the base pointer, we generate MWAITXrrr,
37584     // else we generate pseudo MWAITX_SAVE_RBX.
37585     if (!IsRBX || !TRI->hasBasePointer(*MF)) {
37586       BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY), X86::ECX)
37587           .addReg(MI.getOperand(0).getReg());
37588       BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY), X86::EAX)
37589           .addReg(MI.getOperand(1).getReg());
37590       BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY), X86::EBX)
37591           .addReg(MI.getOperand(2).getReg());
37592       BuildMI(*BB, MI, DL, TII->get(X86::MWAITXrrr));
37593       MI.eraseFromParent();
37594     } else {
37595       if (!BB->isLiveIn(BasePtr)) {
37596         BB->addLiveIn(BasePtr);
37597       }
37598       // Parameters can be copied into ECX and EAX but not EBX yet.
37599       BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY), X86::ECX)
37600           .addReg(MI.getOperand(0).getReg());
37601       BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY), X86::EAX)
37602           .addReg(MI.getOperand(1).getReg());
37603       assert(Subtarget.is64Bit() && "Expected 64-bit mode!");
37604       // Save RBX into a virtual register.
37605       Register SaveRBX =
37606           MF->getRegInfo().createVirtualRegister(&X86::GR64RegClass);
37607       BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY), SaveRBX)
37608           .addReg(X86::RBX);
37609       // Generate mwaitx pseudo.
37610       Register Dst = MF->getRegInfo().createVirtualRegister(&X86::GR64RegClass);
37611       BuildMI(*BB, MI, DL, TII->get(X86::MWAITX_SAVE_RBX))
37612           .addDef(Dst) // Destination tied in with SaveRBX.
37613           .addReg(MI.getOperand(2).getReg()) // input value of EBX.
37614           .addUse(SaveRBX);                  // Save of base pointer.
37615       MI.eraseFromParent();
37616     }
37617     return BB;
37618   }
37619   case TargetOpcode::PREALLOCATED_SETUP: {
37620     assert(Subtarget.is32Bit() && "preallocated only used in 32-bit");
37621     auto MFI = MF->getInfo<X86MachineFunctionInfo>();
37622     MFI->setHasPreallocatedCall(true);
37623     int64_t PreallocatedId = MI.getOperand(0).getImm();
37624     size_t StackAdjustment = MFI->getPreallocatedStackSize(PreallocatedId);
37625     assert(StackAdjustment != 0 && "0 stack adjustment");
37626     LLVM_DEBUG(dbgs() << "PREALLOCATED_SETUP stack adjustment "
37627                       << StackAdjustment << "\n");
37628     BuildMI(*BB, MI, DL, TII->get(X86::SUB32ri), X86::ESP)
37629         .addReg(X86::ESP)
37630         .addImm(StackAdjustment);
37631     MI.eraseFromParent();
37632     return BB;
37633   }
37634   case TargetOpcode::PREALLOCATED_ARG: {
37635     assert(Subtarget.is32Bit() && "preallocated calls only used in 32-bit");
37636     int64_t PreallocatedId = MI.getOperand(1).getImm();
37637     int64_t ArgIdx = MI.getOperand(2).getImm();
37638     auto MFI = MF->getInfo<X86MachineFunctionInfo>();
37639     size_t ArgOffset = MFI->getPreallocatedArgOffsets(PreallocatedId)[ArgIdx];
37640     LLVM_DEBUG(dbgs() << "PREALLOCATED_ARG arg index " << ArgIdx
37641                       << ", arg offset " << ArgOffset << "\n");
37642     // stack pointer + offset
37643     addRegOffset(
37644         BuildMI(*BB, MI, DL, TII->get(X86::LEA32r), MI.getOperand(0).getReg()),
37645         X86::ESP, false, ArgOffset);
37646     MI.eraseFromParent();
37647     return BB;
37648   }
37649   case X86::PTDPBSSD:
37650   case X86::PTDPBSUD:
37651   case X86::PTDPBUSD:
37652   case X86::PTDPBUUD:
37653   case X86::PTDPBF16PS:
37654   case X86::PTDPFP16PS: {
37655     unsigned Opc;
37656     switch (MI.getOpcode()) {
37657     default: llvm_unreachable("illegal opcode!");
37658     case X86::PTDPBSSD: Opc = X86::TDPBSSD; break;
37659     case X86::PTDPBSUD: Opc = X86::TDPBSUD; break;
37660     case X86::PTDPBUSD: Opc = X86::TDPBUSD; break;
37661     case X86::PTDPBUUD: Opc = X86::TDPBUUD; break;
37662     case X86::PTDPBF16PS: Opc = X86::TDPBF16PS; break;
37663     case X86::PTDPFP16PS: Opc = X86::TDPFP16PS; break;
37664     }
37665 
37666     MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, TII->get(Opc));
37667     MIB.addReg(TMMImmToTMMReg(MI.getOperand(0).getImm()), RegState::Define);
37668     MIB.addReg(TMMImmToTMMReg(MI.getOperand(0).getImm()), RegState::Undef);
37669     MIB.addReg(TMMImmToTMMReg(MI.getOperand(1).getImm()), RegState::Undef);
37670     MIB.addReg(TMMImmToTMMReg(MI.getOperand(2).getImm()), RegState::Undef);
37671 
37672     MI.eraseFromParent(); // The pseudo is gone now.
37673     return BB;
37674   }
37675   case X86::PTILEZERO: {
37676     unsigned Imm = MI.getOperand(0).getImm();
37677     BuildMI(*BB, MI, DL, TII->get(X86::TILEZERO), TMMImmToTMMReg(Imm));
37678     MI.eraseFromParent(); // The pseudo is gone now.
37679     return BB;
37680   }
37681   case X86::PTILELOADD:
37682   case X86::PTILELOADDT1:
37683   case X86::PTILESTORED: {
37684     unsigned Opc;
37685     switch (MI.getOpcode()) {
37686     default: llvm_unreachable("illegal opcode!");
37687     case X86::PTILELOADD:   Opc = X86::TILELOADD;   break;
37688     case X86::PTILELOADDT1: Opc = X86::TILELOADDT1; break;
37689     case X86::PTILESTORED:  Opc = X86::TILESTORED;  break;
37690     }
37691 
37692     MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, TII->get(Opc));
37693     unsigned CurOp = 0;
37694     if (Opc != X86::TILESTORED)
37695       MIB.addReg(TMMImmToTMMReg(MI.getOperand(CurOp++).getImm()),
37696                  RegState::Define);
37697 
37698     MIB.add(MI.getOperand(CurOp++)); // base
37699     MIB.add(MI.getOperand(CurOp++)); // scale
37700     MIB.add(MI.getOperand(CurOp++)); // index -- stride
37701     MIB.add(MI.getOperand(CurOp++)); // displacement
37702     MIB.add(MI.getOperand(CurOp++)); // segment
37703 
37704     if (Opc == X86::TILESTORED)
37705       MIB.addReg(TMMImmToTMMReg(MI.getOperand(CurOp++).getImm()),
37706                  RegState::Undef);
37707 
37708     MI.eraseFromParent(); // The pseudo is gone now.
37709     return BB;
37710   }
37711   }
37712 }
37713 
37714 //===----------------------------------------------------------------------===//
37715 //                           X86 Optimization Hooks
37716 //===----------------------------------------------------------------------===//
37717 
37718 bool
targetShrinkDemandedConstant(SDValue Op,const APInt & DemandedBits,const APInt & DemandedElts,TargetLoweringOpt & TLO) const37719 X86TargetLowering::targetShrinkDemandedConstant(SDValue Op,
37720                                                 const APInt &DemandedBits,
37721                                                 const APInt &DemandedElts,
37722                                                 TargetLoweringOpt &TLO) const {
37723   EVT VT = Op.getValueType();
37724   unsigned Opcode = Op.getOpcode();
37725   unsigned EltSize = VT.getScalarSizeInBits();
37726 
37727   if (VT.isVector()) {
37728     // If the constant is only all signbits in the active bits, then we should
37729     // extend it to the entire constant to allow it act as a boolean constant
37730     // vector.
37731     auto NeedsSignExtension = [&](SDValue V, unsigned ActiveBits) {
37732       if (!ISD::isBuildVectorOfConstantSDNodes(V.getNode()))
37733         return false;
37734       for (unsigned i = 0, e = V.getNumOperands(); i != e; ++i) {
37735         if (!DemandedElts[i] || V.getOperand(i).isUndef())
37736           continue;
37737         const APInt &Val = V.getConstantOperandAPInt(i);
37738         if (Val.getBitWidth() > Val.getNumSignBits() &&
37739             Val.trunc(ActiveBits).getNumSignBits() == ActiveBits)
37740           return true;
37741       }
37742       return false;
37743     };
37744     // For vectors - if we have a constant, then try to sign extend.
37745     // TODO: Handle AND/ANDN cases.
37746     unsigned ActiveBits = DemandedBits.getActiveBits();
37747     if (EltSize > ActiveBits && EltSize > 1 && isTypeLegal(VT) &&
37748         (Opcode == ISD::OR || Opcode == ISD::XOR) &&
37749         NeedsSignExtension(Op.getOperand(1), ActiveBits)) {
37750       EVT ExtSVT = EVT::getIntegerVT(*TLO.DAG.getContext(), ActiveBits);
37751       EVT ExtVT = EVT::getVectorVT(*TLO.DAG.getContext(), ExtSVT,
37752                                     VT.getVectorNumElements());
37753       SDValue NewC =
37754           TLO.DAG.getNode(ISD::SIGN_EXTEND_INREG, SDLoc(Op), VT,
37755                           Op.getOperand(1), TLO.DAG.getValueType(ExtVT));
37756       SDValue NewOp =
37757           TLO.DAG.getNode(Opcode, SDLoc(Op), VT, Op.getOperand(0), NewC);
37758       return TLO.CombineTo(Op, NewOp);
37759     }
37760     return false;
37761   }
37762 
37763   // Only optimize Ands to prevent shrinking a constant that could be
37764   // matched by movzx.
37765   if (Opcode != ISD::AND)
37766     return false;
37767 
37768   // Make sure the RHS really is a constant.
37769   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
37770   if (!C)
37771     return false;
37772 
37773   const APInt &Mask = C->getAPIntValue();
37774 
37775   // Clear all non-demanded bits initially.
37776   APInt ShrunkMask = Mask & DemandedBits;
37777 
37778   // Find the width of the shrunk mask.
37779   unsigned Width = ShrunkMask.getActiveBits();
37780 
37781   // If the mask is all 0s there's nothing to do here.
37782   if (Width == 0)
37783     return false;
37784 
37785   // Find the next power of 2 width, rounding up to a byte.
37786   Width = PowerOf2Ceil(std::max(Width, 8U));
37787   // Truncate the width to size to handle illegal types.
37788   Width = std::min(Width, EltSize);
37789 
37790   // Calculate a possible zero extend mask for this constant.
37791   APInt ZeroExtendMask = APInt::getLowBitsSet(EltSize, Width);
37792 
37793   // If we aren't changing the mask, just return true to keep it and prevent
37794   // the caller from optimizing.
37795   if (ZeroExtendMask == Mask)
37796     return true;
37797 
37798   // Make sure the new mask can be represented by a combination of mask bits
37799   // and non-demanded bits.
37800   if (!ZeroExtendMask.isSubsetOf(Mask | ~DemandedBits))
37801     return false;
37802 
37803   // Replace the constant with the zero extend mask.
37804   SDLoc DL(Op);
37805   SDValue NewC = TLO.DAG.getConstant(ZeroExtendMask, DL, VT);
37806   SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC);
37807   return TLO.CombineTo(Op, NewOp);
37808 }
37809 
computeKnownBitsForTargetNode(const SDValue Op,KnownBits & Known,const APInt & DemandedElts,const SelectionDAG & DAG,unsigned Depth) const37810 void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
37811                                                       KnownBits &Known,
37812                                                       const APInt &DemandedElts,
37813                                                       const SelectionDAG &DAG,
37814                                                       unsigned Depth) const {
37815   unsigned BitWidth = Known.getBitWidth();
37816   unsigned NumElts = DemandedElts.getBitWidth();
37817   unsigned Opc = Op.getOpcode();
37818   EVT VT = Op.getValueType();
37819   assert((Opc >= ISD::BUILTIN_OP_END ||
37820           Opc == ISD::INTRINSIC_WO_CHAIN ||
37821           Opc == ISD::INTRINSIC_W_CHAIN ||
37822           Opc == ISD::INTRINSIC_VOID) &&
37823          "Should use MaskedValueIsZero if you don't know whether Op"
37824          " is a target node!");
37825 
37826   Known.resetAll();
37827   switch (Opc) {
37828   default: break;
37829   case X86ISD::SETCC:
37830     Known.Zero.setBitsFrom(1);
37831     break;
37832   case X86ISD::MOVMSK: {
37833     unsigned NumLoBits = Op.getOperand(0).getValueType().getVectorNumElements();
37834     Known.Zero.setBitsFrom(NumLoBits);
37835     break;
37836   }
37837   case X86ISD::PEXTRB:
37838   case X86ISD::PEXTRW: {
37839     SDValue Src = Op.getOperand(0);
37840     EVT SrcVT = Src.getValueType();
37841     APInt DemandedElt = APInt::getOneBitSet(SrcVT.getVectorNumElements(),
37842                                             Op.getConstantOperandVal(1));
37843     Known = DAG.computeKnownBits(Src, DemandedElt, Depth + 1);
37844     Known = Known.anyextOrTrunc(BitWidth);
37845     Known.Zero.setBitsFrom(SrcVT.getScalarSizeInBits());
37846     break;
37847   }
37848   case X86ISD::VSRAI:
37849   case X86ISD::VSHLI:
37850   case X86ISD::VSRLI: {
37851     unsigned ShAmt = Op.getConstantOperandVal(1);
37852     if (ShAmt >= VT.getScalarSizeInBits()) {
37853       // Out of range logical bit shifts are guaranteed to be zero.
37854       // Out of range arithmetic bit shifts splat the sign bit.
37855       if (Opc != X86ISD::VSRAI) {
37856         Known.setAllZero();
37857         break;
37858       }
37859 
37860       ShAmt = VT.getScalarSizeInBits() - 1;
37861     }
37862 
37863     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
37864     if (Opc == X86ISD::VSHLI) {
37865       Known.Zero <<= ShAmt;
37866       Known.One <<= ShAmt;
37867       // Low bits are known zero.
37868       Known.Zero.setLowBits(ShAmt);
37869     } else if (Opc == X86ISD::VSRLI) {
37870       Known.Zero.lshrInPlace(ShAmt);
37871       Known.One.lshrInPlace(ShAmt);
37872       // High bits are known zero.
37873       Known.Zero.setHighBits(ShAmt);
37874     } else {
37875       Known.Zero.ashrInPlace(ShAmt);
37876       Known.One.ashrInPlace(ShAmt);
37877     }
37878     break;
37879   }
37880   case X86ISD::PACKUS: {
37881     // PACKUS is just a truncation if the upper half is zero.
37882     APInt DemandedLHS, DemandedRHS;
37883     getPackDemandedElts(VT, DemandedElts, DemandedLHS, DemandedRHS);
37884 
37885     Known.One = APInt::getAllOnes(BitWidth * 2);
37886     Known.Zero = APInt::getAllOnes(BitWidth * 2);
37887 
37888     KnownBits Known2;
37889     if (!!DemandedLHS) {
37890       Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedLHS, Depth + 1);
37891       Known = KnownBits::commonBits(Known, Known2);
37892     }
37893     if (!!DemandedRHS) {
37894       Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedRHS, Depth + 1);
37895       Known = KnownBits::commonBits(Known, Known2);
37896     }
37897 
37898     if (Known.countMinLeadingZeros() < BitWidth)
37899       Known.resetAll();
37900     Known = Known.trunc(BitWidth);
37901     break;
37902   }
37903   case X86ISD::VBROADCAST: {
37904     SDValue Src = Op.getOperand(0);
37905     if (!Src.getSimpleValueType().isVector()) {
37906       Known = DAG.computeKnownBits(Src, Depth + 1);
37907       return;
37908     }
37909     break;
37910   }
37911   case X86ISD::AND: {
37912     if (Op.getResNo() == 0) {
37913       KnownBits Known2;
37914       Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
37915       Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
37916       Known &= Known2;
37917     }
37918     break;
37919   }
37920   case X86ISD::ANDNP: {
37921     KnownBits Known2;
37922     Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
37923     Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
37924 
37925     // ANDNP = (~X & Y);
37926     Known.One &= Known2.Zero;
37927     Known.Zero |= Known2.One;
37928     break;
37929   }
37930   case X86ISD::FOR: {
37931     KnownBits Known2;
37932     Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
37933     Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
37934 
37935     Known |= Known2;
37936     break;
37937   }
37938   case X86ISD::PSADBW: {
37939     assert(VT.getScalarType() == MVT::i64 &&
37940            Op.getOperand(0).getValueType().getScalarType() == MVT::i8 &&
37941            "Unexpected PSADBW types");
37942 
37943     // PSADBW - fills low 16 bits and zeros upper 48 bits of each i64 result.
37944     Known.Zero.setBitsFrom(16);
37945     break;
37946   }
37947   case X86ISD::PMULUDQ: {
37948     KnownBits Known2;
37949     Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
37950     Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
37951 
37952     Known = Known.trunc(BitWidth / 2).zext(BitWidth);
37953     Known2 = Known2.trunc(BitWidth / 2).zext(BitWidth);
37954     Known = KnownBits::mul(Known, Known2);
37955     break;
37956   }
37957   case X86ISD::CMOV: {
37958     Known = DAG.computeKnownBits(Op.getOperand(1), Depth + 1);
37959     // If we don't know any bits, early out.
37960     if (Known.isUnknown())
37961       break;
37962     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
37963 
37964     // Only known if known in both the LHS and RHS.
37965     Known = KnownBits::commonBits(Known, Known2);
37966     break;
37967   }
37968   case X86ISD::BEXTR:
37969   case X86ISD::BEXTRI: {
37970     SDValue Op0 = Op.getOperand(0);
37971     SDValue Op1 = Op.getOperand(1);
37972 
37973     if (auto* Cst1 = dyn_cast<ConstantSDNode>(Op1)) {
37974       unsigned Shift = Cst1->getAPIntValue().extractBitsAsZExtValue(8, 0);
37975       unsigned Length = Cst1->getAPIntValue().extractBitsAsZExtValue(8, 8);
37976 
37977       // If the length is 0, the result is 0.
37978       if (Length == 0) {
37979         Known.setAllZero();
37980         break;
37981       }
37982 
37983       if ((Shift + Length) <= BitWidth) {
37984         Known = DAG.computeKnownBits(Op0, Depth + 1);
37985         Known = Known.extractBits(Length, Shift);
37986         Known = Known.zextOrTrunc(BitWidth);
37987       }
37988     }
37989     break;
37990   }
37991   case X86ISD::PDEP: {
37992     KnownBits Known2;
37993     Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
37994     Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
37995     // Zeros are retained from the mask operand. But not ones.
37996     Known.One.clearAllBits();
37997     // The result will have at least as many trailing zeros as the non-mask
37998     // operand since bits can only map to the same or higher bit position.
37999     Known.Zero.setLowBits(Known2.countMinTrailingZeros());
38000     break;
38001   }
38002   case X86ISD::PEXT: {
38003     Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
38004     // The result has as many leading zeros as the number of zeroes in the mask.
38005     unsigned Count = Known.Zero.countPopulation();
38006     Known.Zero = APInt::getHighBitsSet(BitWidth, Count);
38007     Known.One.clearAllBits();
38008     break;
38009   }
38010   case X86ISD::VTRUNC:
38011   case X86ISD::VTRUNCS:
38012   case X86ISD::VTRUNCUS:
38013   case X86ISD::CVTSI2P:
38014   case X86ISD::CVTUI2P:
38015   case X86ISD::CVTP2SI:
38016   case X86ISD::CVTP2UI:
38017   case X86ISD::MCVTP2SI:
38018   case X86ISD::MCVTP2UI:
38019   case X86ISD::CVTTP2SI:
38020   case X86ISD::CVTTP2UI:
38021   case X86ISD::MCVTTP2SI:
38022   case X86ISD::MCVTTP2UI:
38023   case X86ISD::MCVTSI2P:
38024   case X86ISD::MCVTUI2P:
38025   case X86ISD::VFPROUND:
38026   case X86ISD::VMFPROUND:
38027   case X86ISD::CVTPS2PH:
38028   case X86ISD::MCVTPS2PH: {
38029     // Truncations/Conversions - upper elements are known zero.
38030     EVT SrcVT = Op.getOperand(0).getValueType();
38031     if (SrcVT.isVector()) {
38032       unsigned NumSrcElts = SrcVT.getVectorNumElements();
38033       if (NumElts > NumSrcElts &&
38034           DemandedElts.countTrailingZeros() >= NumSrcElts)
38035         Known.setAllZero();
38036     }
38037     break;
38038   }
38039   case X86ISD::STRICT_CVTTP2SI:
38040   case X86ISD::STRICT_CVTTP2UI:
38041   case X86ISD::STRICT_CVTSI2P:
38042   case X86ISD::STRICT_CVTUI2P:
38043   case X86ISD::STRICT_VFPROUND:
38044   case X86ISD::STRICT_CVTPS2PH: {
38045     // Strict Conversions - upper elements are known zero.
38046     EVT SrcVT = Op.getOperand(1).getValueType();
38047     if (SrcVT.isVector()) {
38048       unsigned NumSrcElts = SrcVT.getVectorNumElements();
38049       if (NumElts > NumSrcElts &&
38050           DemandedElts.countTrailingZeros() >= NumSrcElts)
38051         Known.setAllZero();
38052     }
38053     break;
38054   }
38055   case X86ISD::MOVQ2DQ: {
38056     // Move from MMX to XMM. Upper half of XMM should be 0.
38057     if (DemandedElts.countTrailingZeros() >= (NumElts / 2))
38058       Known.setAllZero();
38059     break;
38060   }
38061   case X86ISD::VBROADCAST_LOAD: {
38062     APInt UndefElts;
38063     SmallVector<APInt, 16> EltBits;
38064     if (getTargetConstantBitsFromNode(Op, BitWidth, UndefElts, EltBits,
38065                                       /*AllowWholeUndefs*/ false,
38066                                       /*AllowPartialUndefs*/ false)) {
38067       Known.Zero.setAllBits();
38068       Known.One.setAllBits();
38069       for (unsigned I = 0; I != NumElts; ++I) {
38070         if (!DemandedElts[I])
38071           continue;
38072         if (UndefElts[I]) {
38073           Known.resetAll();
38074           break;
38075         }
38076         KnownBits Known2 = KnownBits::makeConstant(EltBits[I]);
38077         Known = KnownBits::commonBits(Known, Known2);
38078       }
38079       return;
38080     }
38081     break;
38082   }
38083   }
38084 
38085   // Handle target shuffles.
38086   // TODO - use resolveTargetShuffleInputs once we can limit recursive depth.
38087   if (isTargetShuffle(Opc)) {
38088     SmallVector<int, 64> Mask;
38089     SmallVector<SDValue, 2> Ops;
38090     if (getTargetShuffleMask(Op.getNode(), VT.getSimpleVT(), true, Ops, Mask)) {
38091       unsigned NumOps = Ops.size();
38092       unsigned NumElts = VT.getVectorNumElements();
38093       if (Mask.size() == NumElts) {
38094         SmallVector<APInt, 2> DemandedOps(NumOps, APInt(NumElts, 0));
38095         Known.Zero.setAllBits(); Known.One.setAllBits();
38096         for (unsigned i = 0; i != NumElts; ++i) {
38097           if (!DemandedElts[i])
38098             continue;
38099           int M = Mask[i];
38100           if (M == SM_SentinelUndef) {
38101             // For UNDEF elements, we don't know anything about the common state
38102             // of the shuffle result.
38103             Known.resetAll();
38104             break;
38105           }
38106           if (M == SM_SentinelZero) {
38107             Known.One.clearAllBits();
38108             continue;
38109           }
38110           assert(0 <= M && (unsigned)M < (NumOps * NumElts) &&
38111                  "Shuffle index out of range");
38112 
38113           unsigned OpIdx = (unsigned)M / NumElts;
38114           unsigned EltIdx = (unsigned)M % NumElts;
38115           if (Ops[OpIdx].getValueType() != VT) {
38116             // TODO - handle target shuffle ops with different value types.
38117             Known.resetAll();
38118             break;
38119           }
38120           DemandedOps[OpIdx].setBit(EltIdx);
38121         }
38122         // Known bits are the values that are shared by every demanded element.
38123         for (unsigned i = 0; i != NumOps && !Known.isUnknown(); ++i) {
38124           if (!DemandedOps[i])
38125             continue;
38126           KnownBits Known2 =
38127               DAG.computeKnownBits(Ops[i], DemandedOps[i], Depth + 1);
38128           Known = KnownBits::commonBits(Known, Known2);
38129         }
38130       }
38131     }
38132   }
38133 }
38134 
ComputeNumSignBitsForTargetNode(SDValue Op,const APInt & DemandedElts,const SelectionDAG & DAG,unsigned Depth) const38135 unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(
38136     SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
38137     unsigned Depth) const {
38138   EVT VT = Op.getValueType();
38139   unsigned VTBits = VT.getScalarSizeInBits();
38140   unsigned Opcode = Op.getOpcode();
38141   switch (Opcode) {
38142   case X86ISD::SETCC_CARRY:
38143     // SETCC_CARRY sets the dest to ~0 for true or 0 for false.
38144     return VTBits;
38145 
38146   case X86ISD::VTRUNC: {
38147     SDValue Src = Op.getOperand(0);
38148     MVT SrcVT = Src.getSimpleValueType();
38149     unsigned NumSrcBits = SrcVT.getScalarSizeInBits();
38150     assert(VTBits < NumSrcBits && "Illegal truncation input type");
38151     APInt DemandedSrc = DemandedElts.zextOrTrunc(SrcVT.getVectorNumElements());
38152     unsigned Tmp = DAG.ComputeNumSignBits(Src, DemandedSrc, Depth + 1);
38153     if (Tmp > (NumSrcBits - VTBits))
38154       return Tmp - (NumSrcBits - VTBits);
38155     return 1;
38156   }
38157 
38158   case X86ISD::PACKSS: {
38159     // PACKSS is just a truncation if the sign bits extend to the packed size.
38160     APInt DemandedLHS, DemandedRHS;
38161     getPackDemandedElts(Op.getValueType(), DemandedElts, DemandedLHS,
38162                         DemandedRHS);
38163 
38164     unsigned SrcBits = Op.getOperand(0).getScalarValueSizeInBits();
38165     unsigned Tmp0 = SrcBits, Tmp1 = SrcBits;
38166     if (!!DemandedLHS)
38167       Tmp0 = DAG.ComputeNumSignBits(Op.getOperand(0), DemandedLHS, Depth + 1);
38168     if (!!DemandedRHS)
38169       Tmp1 = DAG.ComputeNumSignBits(Op.getOperand(1), DemandedRHS, Depth + 1);
38170     unsigned Tmp = std::min(Tmp0, Tmp1);
38171     if (Tmp > (SrcBits - VTBits))
38172       return Tmp - (SrcBits - VTBits);
38173     return 1;
38174   }
38175 
38176   case X86ISD::VBROADCAST: {
38177     SDValue Src = Op.getOperand(0);
38178     if (!Src.getSimpleValueType().isVector())
38179       return DAG.ComputeNumSignBits(Src, Depth + 1);
38180     break;
38181   }
38182 
38183   case X86ISD::VSHLI: {
38184     SDValue Src = Op.getOperand(0);
38185     const APInt &ShiftVal = Op.getConstantOperandAPInt(1);
38186     if (ShiftVal.uge(VTBits))
38187       return VTBits; // Shifted all bits out --> zero.
38188     unsigned Tmp = DAG.ComputeNumSignBits(Src, DemandedElts, Depth + 1);
38189     if (ShiftVal.uge(Tmp))
38190       return 1; // Shifted all sign bits out --> unknown.
38191     return Tmp - ShiftVal.getZExtValue();
38192   }
38193 
38194   case X86ISD::VSRAI: {
38195     SDValue Src = Op.getOperand(0);
38196     APInt ShiftVal = Op.getConstantOperandAPInt(1);
38197     if (ShiftVal.uge(VTBits - 1))
38198       return VTBits; // Sign splat.
38199     unsigned Tmp = DAG.ComputeNumSignBits(Src, DemandedElts, Depth + 1);
38200     ShiftVal += Tmp;
38201     return ShiftVal.uge(VTBits) ? VTBits : ShiftVal.getZExtValue();
38202   }
38203 
38204   case X86ISD::FSETCC:
38205     // cmpss/cmpsd return zero/all-bits result values in the bottom element.
38206     if (VT == MVT::f32 || VT == MVT::f64 ||
38207         ((VT == MVT::v4f32 || VT == MVT::v2f64) && DemandedElts == 1))
38208       return VTBits;
38209     break;
38210 
38211   case X86ISD::PCMPGT:
38212   case X86ISD::PCMPEQ:
38213   case X86ISD::CMPP:
38214   case X86ISD::VPCOM:
38215   case X86ISD::VPCOMU:
38216     // Vector compares return zero/all-bits result values.
38217     return VTBits;
38218 
38219   case X86ISD::ANDNP: {
38220     unsigned Tmp0 =
38221         DAG.ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
38222     if (Tmp0 == 1) return 1; // Early out.
38223     unsigned Tmp1 =
38224         DAG.ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1);
38225     return std::min(Tmp0, Tmp1);
38226   }
38227 
38228   case X86ISD::CMOV: {
38229     unsigned Tmp0 = DAG.ComputeNumSignBits(Op.getOperand(0), Depth+1);
38230     if (Tmp0 == 1) return 1;  // Early out.
38231     unsigned Tmp1 = DAG.ComputeNumSignBits(Op.getOperand(1), Depth+1);
38232     return std::min(Tmp0, Tmp1);
38233   }
38234   }
38235 
38236   // Handle target shuffles.
38237   // TODO - use resolveTargetShuffleInputs once we can limit recursive depth.
38238   if (isTargetShuffle(Opcode)) {
38239     SmallVector<int, 64> Mask;
38240     SmallVector<SDValue, 2> Ops;
38241     if (getTargetShuffleMask(Op.getNode(), VT.getSimpleVT(), true, Ops, Mask)) {
38242       unsigned NumOps = Ops.size();
38243       unsigned NumElts = VT.getVectorNumElements();
38244       if (Mask.size() == NumElts) {
38245         SmallVector<APInt, 2> DemandedOps(NumOps, APInt(NumElts, 0));
38246         for (unsigned i = 0; i != NumElts; ++i) {
38247           if (!DemandedElts[i])
38248             continue;
38249           int M = Mask[i];
38250           if (M == SM_SentinelUndef) {
38251             // For UNDEF elements, we don't know anything about the common state
38252             // of the shuffle result.
38253             return 1;
38254           } else if (M == SM_SentinelZero) {
38255             // Zero = all sign bits.
38256             continue;
38257           }
38258           assert(0 <= M && (unsigned)M < (NumOps * NumElts) &&
38259                  "Shuffle index out of range");
38260 
38261           unsigned OpIdx = (unsigned)M / NumElts;
38262           unsigned EltIdx = (unsigned)M % NumElts;
38263           if (Ops[OpIdx].getValueType() != VT) {
38264             // TODO - handle target shuffle ops with different value types.
38265             return 1;
38266           }
38267           DemandedOps[OpIdx].setBit(EltIdx);
38268         }
38269         unsigned Tmp0 = VTBits;
38270         for (unsigned i = 0; i != NumOps && Tmp0 > 1; ++i) {
38271           if (!DemandedOps[i])
38272             continue;
38273           unsigned Tmp1 =
38274               DAG.ComputeNumSignBits(Ops[i], DemandedOps[i], Depth + 1);
38275           Tmp0 = std::min(Tmp0, Tmp1);
38276         }
38277         return Tmp0;
38278       }
38279     }
38280   }
38281 
38282   // Fallback case.
38283   return 1;
38284 }
38285 
unwrapAddress(SDValue N) const38286 SDValue X86TargetLowering::unwrapAddress(SDValue N) const {
38287   if (N->getOpcode() == X86ISD::Wrapper || N->getOpcode() == X86ISD::WrapperRIP)
38288     return N->getOperand(0);
38289   return N;
38290 }
38291 
38292 // Helper to look for a normal load that can be narrowed into a vzload with the
38293 // specified VT and memory VT. Returns SDValue() on failure.
narrowLoadToVZLoad(LoadSDNode * LN,MVT MemVT,MVT VT,SelectionDAG & DAG)38294 static SDValue narrowLoadToVZLoad(LoadSDNode *LN, MVT MemVT, MVT VT,
38295                                   SelectionDAG &DAG) {
38296   // Can't if the load is volatile or atomic.
38297   if (!LN->isSimple())
38298     return SDValue();
38299 
38300   SDVTList Tys = DAG.getVTList(VT, MVT::Other);
38301   SDValue Ops[] = {LN->getChain(), LN->getBasePtr()};
38302   return DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, SDLoc(LN), Tys, Ops, MemVT,
38303                                  LN->getPointerInfo(), LN->getOriginalAlign(),
38304                                  LN->getMemOperand()->getFlags());
38305 }
38306 
38307 // Attempt to match a combined shuffle mask against supported unary shuffle
38308 // instructions.
38309 // TODO: Investigate sharing more of this with shuffle lowering.
matchUnaryShuffle(MVT MaskVT,ArrayRef<int> Mask,bool AllowFloatDomain,bool AllowIntDomain,SDValue V1,const SelectionDAG & DAG,const X86Subtarget & Subtarget,unsigned & Shuffle,MVT & SrcVT,MVT & DstVT)38310 static bool matchUnaryShuffle(MVT MaskVT, ArrayRef<int> Mask,
38311                               bool AllowFloatDomain, bool AllowIntDomain,
38312                               SDValue V1, const SelectionDAG &DAG,
38313                               const X86Subtarget &Subtarget, unsigned &Shuffle,
38314                               MVT &SrcVT, MVT &DstVT) {
38315   unsigned NumMaskElts = Mask.size();
38316   unsigned MaskEltSize = MaskVT.getScalarSizeInBits();
38317 
38318   // Match against a VZEXT_MOVL vXi32 and vXi16 zero-extending instruction.
38319   if (Mask[0] == 0 &&
38320       (MaskEltSize == 32 || (MaskEltSize == 16 && Subtarget.hasFP16()))) {
38321     if ((isUndefOrZero(Mask[1]) && isUndefInRange(Mask, 2, NumMaskElts - 2)) ||
38322         (V1.getOpcode() == ISD::SCALAR_TO_VECTOR &&
38323          isUndefOrZeroInRange(Mask, 1, NumMaskElts - 1))) {
38324       Shuffle = X86ISD::VZEXT_MOVL;
38325       if (MaskEltSize == 16)
38326         SrcVT = DstVT = MaskVT.changeVectorElementType(MVT::f16);
38327       else
38328         SrcVT = DstVT = !Subtarget.hasSSE2() ? MVT::v4f32 : MaskVT;
38329       return true;
38330     }
38331   }
38332 
38333   // Match against a ANY/ZERO_EXTEND_VECTOR_INREG instruction.
38334   // TODO: Add 512-bit vector support (split AVX512F and AVX512BW).
38335   if (AllowIntDomain && ((MaskVT.is128BitVector() && Subtarget.hasSSE41()) ||
38336                          (MaskVT.is256BitVector() && Subtarget.hasInt256()))) {
38337     unsigned MaxScale = 64 / MaskEltSize;
38338     for (unsigned Scale = 2; Scale <= MaxScale; Scale *= 2) {
38339       bool MatchAny = true;
38340       bool MatchZero = true;
38341       unsigned NumDstElts = NumMaskElts / Scale;
38342       for (unsigned i = 0; i != NumDstElts && (MatchAny || MatchZero); ++i) {
38343         if (!isUndefOrEqual(Mask[i * Scale], (int)i)) {
38344           MatchAny = MatchZero = false;
38345           break;
38346         }
38347         MatchAny &= isUndefInRange(Mask, (i * Scale) + 1, Scale - 1);
38348         MatchZero &= isUndefOrZeroInRange(Mask, (i * Scale) + 1, Scale - 1);
38349       }
38350       if (MatchAny || MatchZero) {
38351         assert(MatchZero && "Failed to match zext but matched aext?");
38352         unsigned SrcSize = std::max(128u, NumDstElts * MaskEltSize);
38353         MVT ScalarTy = MaskVT.isInteger() ? MaskVT.getScalarType() :
38354                                             MVT::getIntegerVT(MaskEltSize);
38355         SrcVT = MVT::getVectorVT(ScalarTy, SrcSize / MaskEltSize);
38356 
38357         Shuffle = unsigned(MatchAny ? ISD::ANY_EXTEND : ISD::ZERO_EXTEND);
38358         if (SrcVT.getVectorNumElements() != NumDstElts)
38359           Shuffle = DAG.getOpcode_EXTEND_VECTOR_INREG(Shuffle);
38360 
38361         DstVT = MVT::getIntegerVT(Scale * MaskEltSize);
38362         DstVT = MVT::getVectorVT(DstVT, NumDstElts);
38363         return true;
38364       }
38365     }
38366   }
38367 
38368   // Match against a VZEXT_MOVL instruction, SSE1 only supports 32-bits (MOVSS).
38369   if (((MaskEltSize == 32) || (MaskEltSize == 64 && Subtarget.hasSSE2()) ||
38370        (MaskEltSize == 16 && Subtarget.hasFP16())) &&
38371       isUndefOrEqual(Mask[0], 0) &&
38372       isUndefOrZeroInRange(Mask, 1, NumMaskElts - 1)) {
38373     Shuffle = X86ISD::VZEXT_MOVL;
38374     if (MaskEltSize == 16)
38375       SrcVT = DstVT = MaskVT.changeVectorElementType(MVT::f16);
38376     else
38377       SrcVT = DstVT = !Subtarget.hasSSE2() ? MVT::v4f32 : MaskVT;
38378     return true;
38379   }
38380 
38381   // Check if we have SSE3 which will let us use MOVDDUP etc. The
38382   // instructions are no slower than UNPCKLPD but has the option to
38383   // fold the input operand into even an unaligned memory load.
38384   if (MaskVT.is128BitVector() && Subtarget.hasSSE3() && AllowFloatDomain) {
38385     if (isTargetShuffleEquivalent(MaskVT, Mask, {0, 0}, DAG, V1)) {
38386       Shuffle = X86ISD::MOVDDUP;
38387       SrcVT = DstVT = MVT::v2f64;
38388       return true;
38389     }
38390     if (isTargetShuffleEquivalent(MaskVT, Mask, {0, 0, 2, 2}, DAG, V1)) {
38391       Shuffle = X86ISD::MOVSLDUP;
38392       SrcVT = DstVT = MVT::v4f32;
38393       return true;
38394     }
38395     if (isTargetShuffleEquivalent(MaskVT, Mask, {1, 1, 3, 3}, DAG, V1)) {
38396       Shuffle = X86ISD::MOVSHDUP;
38397       SrcVT = DstVT = MVT::v4f32;
38398       return true;
38399     }
38400   }
38401 
38402   if (MaskVT.is256BitVector() && AllowFloatDomain) {
38403     assert(Subtarget.hasAVX() && "AVX required for 256-bit vector shuffles");
38404     if (isTargetShuffleEquivalent(MaskVT, Mask, {0, 0, 2, 2}, DAG, V1)) {
38405       Shuffle = X86ISD::MOVDDUP;
38406       SrcVT = DstVT = MVT::v4f64;
38407       return true;
38408     }
38409     if (isTargetShuffleEquivalent(MaskVT, Mask, {0, 0, 2, 2, 4, 4, 6, 6}, DAG,
38410                                   V1)) {
38411       Shuffle = X86ISD::MOVSLDUP;
38412       SrcVT = DstVT = MVT::v8f32;
38413       return true;
38414     }
38415     if (isTargetShuffleEquivalent(MaskVT, Mask, {1, 1, 3, 3, 5, 5, 7, 7}, DAG,
38416                                   V1)) {
38417       Shuffle = X86ISD::MOVSHDUP;
38418       SrcVT = DstVT = MVT::v8f32;
38419       return true;
38420     }
38421   }
38422 
38423   if (MaskVT.is512BitVector() && AllowFloatDomain) {
38424     assert(Subtarget.hasAVX512() &&
38425            "AVX512 required for 512-bit vector shuffles");
38426     if (isTargetShuffleEquivalent(MaskVT, Mask, {0, 0, 2, 2, 4, 4, 6, 6}, DAG,
38427                                   V1)) {
38428       Shuffle = X86ISD::MOVDDUP;
38429       SrcVT = DstVT = MVT::v8f64;
38430       return true;
38431     }
38432     if (isTargetShuffleEquivalent(
38433             MaskVT, Mask,
38434             {0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14}, DAG, V1)) {
38435       Shuffle = X86ISD::MOVSLDUP;
38436       SrcVT = DstVT = MVT::v16f32;
38437       return true;
38438     }
38439     if (isTargetShuffleEquivalent(
38440             MaskVT, Mask,
38441             {1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15}, DAG, V1)) {
38442       Shuffle = X86ISD::MOVSHDUP;
38443       SrcVT = DstVT = MVT::v16f32;
38444       return true;
38445     }
38446   }
38447 
38448   return false;
38449 }
38450 
38451 // Attempt to match a combined shuffle mask against supported unary immediate
38452 // permute instructions.
38453 // TODO: Investigate sharing more of this with shuffle lowering.
matchUnaryPermuteShuffle(MVT MaskVT,ArrayRef<int> Mask,const APInt & Zeroable,bool AllowFloatDomain,bool AllowIntDomain,const SelectionDAG & DAG,const X86Subtarget & Subtarget,unsigned & Shuffle,MVT & ShuffleVT,unsigned & PermuteImm)38454 static bool matchUnaryPermuteShuffle(MVT MaskVT, ArrayRef<int> Mask,
38455                                      const APInt &Zeroable,
38456                                      bool AllowFloatDomain, bool AllowIntDomain,
38457                                      const SelectionDAG &DAG,
38458                                      const X86Subtarget &Subtarget,
38459                                      unsigned &Shuffle, MVT &ShuffleVT,
38460                                      unsigned &PermuteImm) {
38461   unsigned NumMaskElts = Mask.size();
38462   unsigned InputSizeInBits = MaskVT.getSizeInBits();
38463   unsigned MaskScalarSizeInBits = InputSizeInBits / NumMaskElts;
38464   MVT MaskEltVT = MVT::getIntegerVT(MaskScalarSizeInBits);
38465   bool ContainsZeros = isAnyZero(Mask);
38466 
38467   // Handle VPERMI/VPERMILPD vXi64/vXi64 patterns.
38468   if (!ContainsZeros && MaskScalarSizeInBits == 64) {
38469     // Check for lane crossing permutes.
38470     if (is128BitLaneCrossingShuffleMask(MaskEltVT, Mask)) {
38471       // PERMPD/PERMQ permutes within a 256-bit vector (AVX2+).
38472       if (Subtarget.hasAVX2() && MaskVT.is256BitVector()) {
38473         Shuffle = X86ISD::VPERMI;
38474         ShuffleVT = (AllowFloatDomain ? MVT::v4f64 : MVT::v4i64);
38475         PermuteImm = getV4X86ShuffleImm(Mask);
38476         return true;
38477       }
38478       if (Subtarget.hasAVX512() && MaskVT.is512BitVector()) {
38479         SmallVector<int, 4> RepeatedMask;
38480         if (is256BitLaneRepeatedShuffleMask(MVT::v8f64, Mask, RepeatedMask)) {
38481           Shuffle = X86ISD::VPERMI;
38482           ShuffleVT = (AllowFloatDomain ? MVT::v8f64 : MVT::v8i64);
38483           PermuteImm = getV4X86ShuffleImm(RepeatedMask);
38484           return true;
38485         }
38486       }
38487     } else if (AllowFloatDomain && Subtarget.hasAVX()) {
38488       // VPERMILPD can permute with a non-repeating shuffle.
38489       Shuffle = X86ISD::VPERMILPI;
38490       ShuffleVT = MVT::getVectorVT(MVT::f64, Mask.size());
38491       PermuteImm = 0;
38492       for (int i = 0, e = Mask.size(); i != e; ++i) {
38493         int M = Mask[i];
38494         if (M == SM_SentinelUndef)
38495           continue;
38496         assert(((M / 2) == (i / 2)) && "Out of range shuffle mask index");
38497         PermuteImm |= (M & 1) << i;
38498       }
38499       return true;
38500     }
38501   }
38502 
38503   // Handle PSHUFD/VPERMILPI vXi32/vXf32 repeated patterns.
38504   // AVX introduced the VPERMILPD/VPERMILPS float permutes, before then we
38505   // had to use 2-input SHUFPD/SHUFPS shuffles (not handled here).
38506   if ((MaskScalarSizeInBits == 64 || MaskScalarSizeInBits == 32) &&
38507       !ContainsZeros && (AllowIntDomain || Subtarget.hasAVX())) {
38508     SmallVector<int, 4> RepeatedMask;
38509     if (is128BitLaneRepeatedShuffleMask(MaskEltVT, Mask, RepeatedMask)) {
38510       // Narrow the repeated mask to create 32-bit element permutes.
38511       SmallVector<int, 4> WordMask = RepeatedMask;
38512       if (MaskScalarSizeInBits == 64)
38513         narrowShuffleMaskElts(2, RepeatedMask, WordMask);
38514 
38515       Shuffle = (AllowIntDomain ? X86ISD::PSHUFD : X86ISD::VPERMILPI);
38516       ShuffleVT = (AllowIntDomain ? MVT::i32 : MVT::f32);
38517       ShuffleVT = MVT::getVectorVT(ShuffleVT, InputSizeInBits / 32);
38518       PermuteImm = getV4X86ShuffleImm(WordMask);
38519       return true;
38520     }
38521   }
38522 
38523   // Handle PSHUFLW/PSHUFHW vXi16 repeated patterns.
38524   if (!ContainsZeros && AllowIntDomain && MaskScalarSizeInBits == 16 &&
38525       ((MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
38526        (MaskVT.is256BitVector() && Subtarget.hasAVX2()) ||
38527        (MaskVT.is512BitVector() && Subtarget.hasBWI()))) {
38528     SmallVector<int, 4> RepeatedMask;
38529     if (is128BitLaneRepeatedShuffleMask(MaskEltVT, Mask, RepeatedMask)) {
38530       ArrayRef<int> LoMask(RepeatedMask.data() + 0, 4);
38531       ArrayRef<int> HiMask(RepeatedMask.data() + 4, 4);
38532 
38533       // PSHUFLW: permute lower 4 elements only.
38534       if (isUndefOrInRange(LoMask, 0, 4) &&
38535           isSequentialOrUndefInRange(HiMask, 0, 4, 4)) {
38536         Shuffle = X86ISD::PSHUFLW;
38537         ShuffleVT = MVT::getVectorVT(MVT::i16, InputSizeInBits / 16);
38538         PermuteImm = getV4X86ShuffleImm(LoMask);
38539         return true;
38540       }
38541 
38542       // PSHUFHW: permute upper 4 elements only.
38543       if (isUndefOrInRange(HiMask, 4, 8) &&
38544           isSequentialOrUndefInRange(LoMask, 0, 4, 0)) {
38545         // Offset the HiMask so that we can create the shuffle immediate.
38546         int OffsetHiMask[4];
38547         for (int i = 0; i != 4; ++i)
38548           OffsetHiMask[i] = (HiMask[i] < 0 ? HiMask[i] : HiMask[i] - 4);
38549 
38550         Shuffle = X86ISD::PSHUFHW;
38551         ShuffleVT = MVT::getVectorVT(MVT::i16, InputSizeInBits / 16);
38552         PermuteImm = getV4X86ShuffleImm(OffsetHiMask);
38553         return true;
38554       }
38555     }
38556   }
38557 
38558   // Attempt to match against byte/bit shifts.
38559   if (AllowIntDomain &&
38560       ((MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
38561        (MaskVT.is256BitVector() && Subtarget.hasAVX2()) ||
38562        (MaskVT.is512BitVector() && Subtarget.hasAVX512()))) {
38563     int ShiftAmt = matchShuffleAsShift(ShuffleVT, Shuffle, MaskScalarSizeInBits,
38564                                        Mask, 0, Zeroable, Subtarget);
38565     if (0 < ShiftAmt && (!ShuffleVT.is512BitVector() || Subtarget.hasBWI() ||
38566                          32 <= ShuffleVT.getScalarSizeInBits())) {
38567       PermuteImm = (unsigned)ShiftAmt;
38568       return true;
38569     }
38570   }
38571 
38572   // Attempt to match against bit rotates.
38573   if (!ContainsZeros && AllowIntDomain && MaskScalarSizeInBits < 64 &&
38574       ((MaskVT.is128BitVector() && Subtarget.hasXOP()) ||
38575        Subtarget.hasAVX512())) {
38576     int RotateAmt = matchShuffleAsBitRotate(ShuffleVT, MaskScalarSizeInBits,
38577                                             Subtarget, Mask);
38578     if (0 < RotateAmt) {
38579       Shuffle = X86ISD::VROTLI;
38580       PermuteImm = (unsigned)RotateAmt;
38581       return true;
38582     }
38583   }
38584 
38585   return false;
38586 }
38587 
38588 // Attempt to match a combined unary shuffle mask against supported binary
38589 // shuffle instructions.
38590 // TODO: Investigate sharing more of this with shuffle lowering.
matchBinaryShuffle(MVT MaskVT,ArrayRef<int> Mask,bool AllowFloatDomain,bool AllowIntDomain,SDValue & V1,SDValue & V2,const SDLoc & DL,SelectionDAG & DAG,const X86Subtarget & Subtarget,unsigned & Shuffle,MVT & SrcVT,MVT & DstVT,bool IsUnary)38591 static bool matchBinaryShuffle(MVT MaskVT, ArrayRef<int> Mask,
38592                                bool AllowFloatDomain, bool AllowIntDomain,
38593                                SDValue &V1, SDValue &V2, const SDLoc &DL,
38594                                SelectionDAG &DAG, const X86Subtarget &Subtarget,
38595                                unsigned &Shuffle, MVT &SrcVT, MVT &DstVT,
38596                                bool IsUnary) {
38597   unsigned NumMaskElts = Mask.size();
38598   unsigned EltSizeInBits = MaskVT.getScalarSizeInBits();
38599   unsigned SizeInBits = MaskVT.getSizeInBits();
38600 
38601   if (MaskVT.is128BitVector()) {
38602     if (isTargetShuffleEquivalent(MaskVT, Mask, {0, 0}, DAG) &&
38603         AllowFloatDomain) {
38604       V2 = V1;
38605       V1 = (SM_SentinelUndef == Mask[0] ? DAG.getUNDEF(MVT::v4f32) : V1);
38606       Shuffle = Subtarget.hasSSE2() ? X86ISD::UNPCKL : X86ISD::MOVLHPS;
38607       SrcVT = DstVT = Subtarget.hasSSE2() ? MVT::v2f64 : MVT::v4f32;
38608       return true;
38609     }
38610     if (isTargetShuffleEquivalent(MaskVT, Mask, {1, 1}, DAG) &&
38611         AllowFloatDomain) {
38612       V2 = V1;
38613       Shuffle = Subtarget.hasSSE2() ? X86ISD::UNPCKH : X86ISD::MOVHLPS;
38614       SrcVT = DstVT = Subtarget.hasSSE2() ? MVT::v2f64 : MVT::v4f32;
38615       return true;
38616     }
38617     if (isTargetShuffleEquivalent(MaskVT, Mask, {0, 3}, DAG) &&
38618         Subtarget.hasSSE2() && (AllowFloatDomain || !Subtarget.hasSSE41())) {
38619       std::swap(V1, V2);
38620       Shuffle = X86ISD::MOVSD;
38621       SrcVT = DstVT = MVT::v2f64;
38622       return true;
38623     }
38624     if (isTargetShuffleEquivalent(MaskVT, Mask, {4, 1, 2, 3}, DAG) &&
38625         (AllowFloatDomain || !Subtarget.hasSSE41())) {
38626       Shuffle = X86ISD::MOVSS;
38627       SrcVT = DstVT = MVT::v4f32;
38628       return true;
38629     }
38630     if (isTargetShuffleEquivalent(MaskVT, Mask, {8, 1, 2, 3, 4, 5, 6, 7},
38631                                   DAG) &&
38632         Subtarget.hasFP16()) {
38633       Shuffle = X86ISD::MOVSH;
38634       SrcVT = DstVT = MVT::v8f16;
38635       return true;
38636     }
38637   }
38638 
38639   // Attempt to match against either an unary or binary PACKSS/PACKUS shuffle.
38640   if (((MaskVT == MVT::v8i16 || MaskVT == MVT::v16i8) && Subtarget.hasSSE2()) ||
38641       ((MaskVT == MVT::v16i16 || MaskVT == MVT::v32i8) && Subtarget.hasInt256()) ||
38642       ((MaskVT == MVT::v32i16 || MaskVT == MVT::v64i8) && Subtarget.hasBWI())) {
38643     if (matchShuffleWithPACK(MaskVT, SrcVT, V1, V2, Shuffle, Mask, DAG,
38644                              Subtarget)) {
38645       DstVT = MaskVT;
38646       return true;
38647     }
38648   }
38649 
38650   // Attempt to match against either a unary or binary UNPCKL/UNPCKH shuffle.
38651   if ((MaskVT == MVT::v4f32 && Subtarget.hasSSE1()) ||
38652       (MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
38653       (MaskVT.is256BitVector() && 32 <= EltSizeInBits && Subtarget.hasAVX()) ||
38654       (MaskVT.is256BitVector() && Subtarget.hasAVX2()) ||
38655       (MaskVT.is512BitVector() && Subtarget.hasAVX512())) {
38656     if (matchShuffleWithUNPCK(MaskVT, V1, V2, Shuffle, IsUnary, Mask, DL, DAG,
38657                               Subtarget)) {
38658       SrcVT = DstVT = MaskVT;
38659       if (MaskVT.is256BitVector() && !Subtarget.hasAVX2())
38660         SrcVT = DstVT = (32 == EltSizeInBits ? MVT::v8f32 : MVT::v4f64);
38661       return true;
38662     }
38663   }
38664 
38665   // Attempt to match against a OR if we're performing a blend shuffle and the
38666   // non-blended source element is zero in each case.
38667   // TODO: Handle cases where V1/V2 sizes doesn't match SizeInBits.
38668   if (SizeInBits == V1.getValueSizeInBits() &&
38669       SizeInBits == V2.getValueSizeInBits() &&
38670       (EltSizeInBits % V1.getScalarValueSizeInBits()) == 0 &&
38671       (EltSizeInBits % V2.getScalarValueSizeInBits()) == 0) {
38672     bool IsBlend = true;
38673     unsigned NumV1Elts = V1.getValueType().getVectorNumElements();
38674     unsigned NumV2Elts = V2.getValueType().getVectorNumElements();
38675     unsigned Scale1 = NumV1Elts / NumMaskElts;
38676     unsigned Scale2 = NumV2Elts / NumMaskElts;
38677     APInt DemandedZeroV1 = APInt::getZero(NumV1Elts);
38678     APInt DemandedZeroV2 = APInt::getZero(NumV2Elts);
38679     for (unsigned i = 0; i != NumMaskElts; ++i) {
38680       int M = Mask[i];
38681       if (M == SM_SentinelUndef)
38682         continue;
38683       if (M == SM_SentinelZero) {
38684         DemandedZeroV1.setBits(i * Scale1, (i + 1) * Scale1);
38685         DemandedZeroV2.setBits(i * Scale2, (i + 1) * Scale2);
38686         continue;
38687       }
38688       if (M == (int)i) {
38689         DemandedZeroV2.setBits(i * Scale2, (i + 1) * Scale2);
38690         continue;
38691       }
38692       if (M == (int)(i + NumMaskElts)) {
38693         DemandedZeroV1.setBits(i * Scale1, (i + 1) * Scale1);
38694         continue;
38695       }
38696       IsBlend = false;
38697       break;
38698     }
38699     if (IsBlend) {
38700       if (DAG.MaskedVectorIsZero(V1, DemandedZeroV1) &&
38701           DAG.MaskedVectorIsZero(V2, DemandedZeroV2)) {
38702         Shuffle = ISD::OR;
38703         SrcVT = DstVT = MaskVT.changeTypeToInteger();
38704         return true;
38705       }
38706       if (NumV1Elts == NumV2Elts && NumV1Elts == NumMaskElts) {
38707         // FIXME: handle mismatched sizes?
38708         // TODO: investigate if `ISD::OR` handling in
38709         // `TargetLowering::SimplifyDemandedVectorElts` can be improved instead.
38710         auto computeKnownBitsElementWise = [&DAG](SDValue V) {
38711           unsigned NumElts = V.getValueType().getVectorNumElements();
38712           KnownBits Known(NumElts);
38713           for (unsigned EltIdx = 0; EltIdx != NumElts; ++EltIdx) {
38714             APInt Mask = APInt::getOneBitSet(NumElts, EltIdx);
38715             KnownBits PeepholeKnown = DAG.computeKnownBits(V, Mask);
38716             if (PeepholeKnown.isZero())
38717               Known.Zero.setBit(EltIdx);
38718             if (PeepholeKnown.isAllOnes())
38719               Known.One.setBit(EltIdx);
38720           }
38721           return Known;
38722         };
38723 
38724         KnownBits V1Known = computeKnownBitsElementWise(V1);
38725         KnownBits V2Known = computeKnownBitsElementWise(V2);
38726 
38727         for (unsigned i = 0; i != NumMaskElts && IsBlend; ++i) {
38728           int M = Mask[i];
38729           if (M == SM_SentinelUndef)
38730             continue;
38731           if (M == SM_SentinelZero) {
38732             IsBlend &= V1Known.Zero[i] && V2Known.Zero[i];
38733             continue;
38734           }
38735           if (M == (int)i) {
38736             IsBlend &= V2Known.Zero[i] || V1Known.One[i];
38737             continue;
38738           }
38739           if (M == (int)(i + NumMaskElts)) {
38740             IsBlend &= V1Known.Zero[i] || V2Known.One[i];
38741             continue;
38742           }
38743           llvm_unreachable("will not get here.");
38744         }
38745         if (IsBlend) {
38746           Shuffle = ISD::OR;
38747           SrcVT = DstVT = MaskVT.changeTypeToInteger();
38748           return true;
38749         }
38750       }
38751     }
38752   }
38753 
38754   return false;
38755 }
38756 
matchBinaryPermuteShuffle(MVT MaskVT,ArrayRef<int> Mask,const APInt & Zeroable,bool AllowFloatDomain,bool AllowIntDomain,SDValue & V1,SDValue & V2,const SDLoc & DL,SelectionDAG & DAG,const X86Subtarget & Subtarget,unsigned & Shuffle,MVT & ShuffleVT,unsigned & PermuteImm)38757 static bool matchBinaryPermuteShuffle(
38758     MVT MaskVT, ArrayRef<int> Mask, const APInt &Zeroable,
38759     bool AllowFloatDomain, bool AllowIntDomain, SDValue &V1, SDValue &V2,
38760     const SDLoc &DL, SelectionDAG &DAG, const X86Subtarget &Subtarget,
38761     unsigned &Shuffle, MVT &ShuffleVT, unsigned &PermuteImm) {
38762   unsigned NumMaskElts = Mask.size();
38763   unsigned EltSizeInBits = MaskVT.getScalarSizeInBits();
38764 
38765   // Attempt to match against VALIGND/VALIGNQ rotate.
38766   if (AllowIntDomain && (EltSizeInBits == 64 || EltSizeInBits == 32) &&
38767       ((MaskVT.is128BitVector() && Subtarget.hasVLX()) ||
38768        (MaskVT.is256BitVector() && Subtarget.hasVLX()) ||
38769        (MaskVT.is512BitVector() && Subtarget.hasAVX512()))) {
38770     if (!isAnyZero(Mask)) {
38771       int Rotation = matchShuffleAsElementRotate(V1, V2, Mask);
38772       if (0 < Rotation) {
38773         Shuffle = X86ISD::VALIGN;
38774         if (EltSizeInBits == 64)
38775           ShuffleVT = MVT::getVectorVT(MVT::i64, MaskVT.getSizeInBits() / 64);
38776         else
38777           ShuffleVT = MVT::getVectorVT(MVT::i32, MaskVT.getSizeInBits() / 32);
38778         PermuteImm = Rotation;
38779         return true;
38780       }
38781     }
38782   }
38783 
38784   // Attempt to match against PALIGNR byte rotate.
38785   if (AllowIntDomain && ((MaskVT.is128BitVector() && Subtarget.hasSSSE3()) ||
38786                          (MaskVT.is256BitVector() && Subtarget.hasAVX2()) ||
38787                          (MaskVT.is512BitVector() && Subtarget.hasBWI()))) {
38788     int ByteRotation = matchShuffleAsByteRotate(MaskVT, V1, V2, Mask);
38789     if (0 < ByteRotation) {
38790       Shuffle = X86ISD::PALIGNR;
38791       ShuffleVT = MVT::getVectorVT(MVT::i8, MaskVT.getSizeInBits() / 8);
38792       PermuteImm = ByteRotation;
38793       return true;
38794     }
38795   }
38796 
38797   // Attempt to combine to X86ISD::BLENDI.
38798   if ((NumMaskElts <= 8 && ((Subtarget.hasSSE41() && MaskVT.is128BitVector()) ||
38799                             (Subtarget.hasAVX() && MaskVT.is256BitVector()))) ||
38800       (MaskVT == MVT::v16i16 && Subtarget.hasAVX2())) {
38801     uint64_t BlendMask = 0;
38802     bool ForceV1Zero = false, ForceV2Zero = false;
38803     SmallVector<int, 8> TargetMask(Mask);
38804     if (matchShuffleAsBlend(V1, V2, TargetMask, Zeroable, ForceV1Zero,
38805                             ForceV2Zero, BlendMask)) {
38806       if (MaskVT == MVT::v16i16) {
38807         // We can only use v16i16 PBLENDW if the lanes are repeated.
38808         SmallVector<int, 8> RepeatedMask;
38809         if (isRepeatedTargetShuffleMask(128, MaskVT, TargetMask,
38810                                         RepeatedMask)) {
38811           assert(RepeatedMask.size() == 8 &&
38812                  "Repeated mask size doesn't match!");
38813           PermuteImm = 0;
38814           for (int i = 0; i < 8; ++i)
38815             if (RepeatedMask[i] >= 8)
38816               PermuteImm |= 1 << i;
38817           V1 = ForceV1Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V1;
38818           V2 = ForceV2Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V2;
38819           Shuffle = X86ISD::BLENDI;
38820           ShuffleVT = MaskVT;
38821           return true;
38822         }
38823       } else {
38824         V1 = ForceV1Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V1;
38825         V2 = ForceV2Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V2;
38826         PermuteImm = (unsigned)BlendMask;
38827         Shuffle = X86ISD::BLENDI;
38828         ShuffleVT = MaskVT;
38829         return true;
38830       }
38831     }
38832   }
38833 
38834   // Attempt to combine to INSERTPS, but only if it has elements that need to
38835   // be set to zero.
38836   if (AllowFloatDomain && EltSizeInBits == 32 && Subtarget.hasSSE41() &&
38837       MaskVT.is128BitVector() && isAnyZero(Mask) &&
38838       matchShuffleAsInsertPS(V1, V2, PermuteImm, Zeroable, Mask, DAG)) {
38839     Shuffle = X86ISD::INSERTPS;
38840     ShuffleVT = MVT::v4f32;
38841     return true;
38842   }
38843 
38844   // Attempt to combine to SHUFPD.
38845   if (AllowFloatDomain && EltSizeInBits == 64 &&
38846       ((MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
38847        (MaskVT.is256BitVector() && Subtarget.hasAVX()) ||
38848        (MaskVT.is512BitVector() && Subtarget.hasAVX512()))) {
38849     bool ForceV1Zero = false, ForceV2Zero = false;
38850     if (matchShuffleWithSHUFPD(MaskVT, V1, V2, ForceV1Zero, ForceV2Zero,
38851                                PermuteImm, Mask, Zeroable)) {
38852       V1 = ForceV1Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V1;
38853       V2 = ForceV2Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V2;
38854       Shuffle = X86ISD::SHUFP;
38855       ShuffleVT = MVT::getVectorVT(MVT::f64, MaskVT.getSizeInBits() / 64);
38856       return true;
38857     }
38858   }
38859 
38860   // Attempt to combine to SHUFPS.
38861   if (AllowFloatDomain && EltSizeInBits == 32 &&
38862       ((MaskVT.is128BitVector() && Subtarget.hasSSE1()) ||
38863        (MaskVT.is256BitVector() && Subtarget.hasAVX()) ||
38864        (MaskVT.is512BitVector() && Subtarget.hasAVX512()))) {
38865     SmallVector<int, 4> RepeatedMask;
38866     if (isRepeatedTargetShuffleMask(128, MaskVT, Mask, RepeatedMask)) {
38867       // Match each half of the repeated mask, to determine if its just
38868       // referencing one of the vectors, is zeroable or entirely undef.
38869       auto MatchHalf = [&](unsigned Offset, int &S0, int &S1) {
38870         int M0 = RepeatedMask[Offset];
38871         int M1 = RepeatedMask[Offset + 1];
38872 
38873         if (isUndefInRange(RepeatedMask, Offset, 2)) {
38874           return DAG.getUNDEF(MaskVT);
38875         } else if (isUndefOrZeroInRange(RepeatedMask, Offset, 2)) {
38876           S0 = (SM_SentinelUndef == M0 ? -1 : 0);
38877           S1 = (SM_SentinelUndef == M1 ? -1 : 1);
38878           return getZeroVector(MaskVT, Subtarget, DAG, DL);
38879         } else if (isUndefOrInRange(M0, 0, 4) && isUndefOrInRange(M1, 0, 4)) {
38880           S0 = (SM_SentinelUndef == M0 ? -1 : M0 & 3);
38881           S1 = (SM_SentinelUndef == M1 ? -1 : M1 & 3);
38882           return V1;
38883         } else if (isUndefOrInRange(M0, 4, 8) && isUndefOrInRange(M1, 4, 8)) {
38884           S0 = (SM_SentinelUndef == M0 ? -1 : M0 & 3);
38885           S1 = (SM_SentinelUndef == M1 ? -1 : M1 & 3);
38886           return V2;
38887         }
38888 
38889         return SDValue();
38890       };
38891 
38892       int ShufMask[4] = {-1, -1, -1, -1};
38893       SDValue Lo = MatchHalf(0, ShufMask[0], ShufMask[1]);
38894       SDValue Hi = MatchHalf(2, ShufMask[2], ShufMask[3]);
38895 
38896       if (Lo && Hi) {
38897         V1 = Lo;
38898         V2 = Hi;
38899         Shuffle = X86ISD::SHUFP;
38900         ShuffleVT = MVT::getVectorVT(MVT::f32, MaskVT.getSizeInBits() / 32);
38901         PermuteImm = getV4X86ShuffleImm(ShufMask);
38902         return true;
38903       }
38904     }
38905   }
38906 
38907   // Attempt to combine to INSERTPS more generally if X86ISD::SHUFP failed.
38908   if (AllowFloatDomain && EltSizeInBits == 32 && Subtarget.hasSSE41() &&
38909       MaskVT.is128BitVector() &&
38910       matchShuffleAsInsertPS(V1, V2, PermuteImm, Zeroable, Mask, DAG)) {
38911     Shuffle = X86ISD::INSERTPS;
38912     ShuffleVT = MVT::v4f32;
38913     return true;
38914   }
38915 
38916   return false;
38917 }
38918 
38919 static SDValue combineX86ShuffleChainWithExtract(
38920     ArrayRef<SDValue> Inputs, SDValue Root, ArrayRef<int> BaseMask, int Depth,
38921     bool HasVariableMask, bool AllowVariableCrossLaneMask,
38922     bool AllowVariablePerLaneMask, SelectionDAG &DAG,
38923     const X86Subtarget &Subtarget);
38924 
38925 /// Combine an arbitrary chain of shuffles into a single instruction if
38926 /// possible.
38927 ///
38928 /// This is the leaf of the recursive combine below. When we have found some
38929 /// chain of single-use x86 shuffle instructions and accumulated the combined
38930 /// shuffle mask represented by them, this will try to pattern match that mask
38931 /// into either a single instruction if there is a special purpose instruction
38932 /// for this operation, or into a PSHUFB instruction which is a fully general
38933 /// instruction but should only be used to replace chains over a certain depth.
combineX86ShuffleChain(ArrayRef<SDValue> Inputs,SDValue Root,ArrayRef<int> BaseMask,int Depth,bool HasVariableMask,bool AllowVariableCrossLaneMask,bool AllowVariablePerLaneMask,SelectionDAG & DAG,const X86Subtarget & Subtarget)38934 static SDValue combineX86ShuffleChain(ArrayRef<SDValue> Inputs, SDValue Root,
38935                                       ArrayRef<int> BaseMask, int Depth,
38936                                       bool HasVariableMask,
38937                                       bool AllowVariableCrossLaneMask,
38938                                       bool AllowVariablePerLaneMask,
38939                                       SelectionDAG &DAG,
38940                                       const X86Subtarget &Subtarget) {
38941   assert(!BaseMask.empty() && "Cannot combine an empty shuffle mask!");
38942   assert((Inputs.size() == 1 || Inputs.size() == 2) &&
38943          "Unexpected number of shuffle inputs!");
38944 
38945   SDLoc DL(Root);
38946   MVT RootVT = Root.getSimpleValueType();
38947   unsigned RootSizeInBits = RootVT.getSizeInBits();
38948   unsigned NumRootElts = RootVT.getVectorNumElements();
38949 
38950   // Canonicalize shuffle input op to the requested type.
38951   auto CanonicalizeShuffleInput = [&](MVT VT, SDValue Op) {
38952     if (VT.getSizeInBits() > Op.getValueSizeInBits())
38953       Op = widenSubVector(Op, false, Subtarget, DAG, DL, VT.getSizeInBits());
38954     else if (VT.getSizeInBits() < Op.getValueSizeInBits())
38955       Op = extractSubVector(Op, 0, DAG, DL, VT.getSizeInBits());
38956     return DAG.getBitcast(VT, Op);
38957   };
38958 
38959   // Find the inputs that enter the chain. Note that multiple uses are OK
38960   // here, we're not going to remove the operands we find.
38961   bool UnaryShuffle = (Inputs.size() == 1);
38962   SDValue V1 = peekThroughBitcasts(Inputs[0]);
38963   SDValue V2 = (UnaryShuffle ? DAG.getUNDEF(V1.getValueType())
38964                              : peekThroughBitcasts(Inputs[1]));
38965 
38966   MVT VT1 = V1.getSimpleValueType();
38967   MVT VT2 = V2.getSimpleValueType();
38968   assert((RootSizeInBits % VT1.getSizeInBits()) == 0 &&
38969          (RootSizeInBits % VT2.getSizeInBits()) == 0 && "Vector size mismatch");
38970 
38971   SDValue Res;
38972 
38973   unsigned NumBaseMaskElts = BaseMask.size();
38974   if (NumBaseMaskElts == 1) {
38975     assert(BaseMask[0] == 0 && "Invalid shuffle index found!");
38976     return CanonicalizeShuffleInput(RootVT, V1);
38977   }
38978 
38979   bool OptForSize = DAG.shouldOptForSize();
38980   unsigned BaseMaskEltSizeInBits = RootSizeInBits / NumBaseMaskElts;
38981   bool FloatDomain = VT1.isFloatingPoint() || VT2.isFloatingPoint() ||
38982                      (RootVT.isFloatingPoint() && Depth >= 1) ||
38983                      (RootVT.is256BitVector() && !Subtarget.hasAVX2());
38984 
38985   // Don't combine if we are a AVX512/EVEX target and the mask element size
38986   // is different from the root element size - this would prevent writemasks
38987   // from being reused.
38988   bool IsMaskedShuffle = false;
38989   if (RootSizeInBits == 512 || (Subtarget.hasVLX() && RootSizeInBits >= 128)) {
38990     if (Root.hasOneUse() && Root->use_begin()->getOpcode() == ISD::VSELECT &&
38991         Root->use_begin()->getOperand(0).getScalarValueSizeInBits() == 1) {
38992       IsMaskedShuffle = true;
38993     }
38994   }
38995 
38996   // If we are shuffling a splat (and not introducing zeros) then we can just
38997   // use it directly. This works for smaller elements as well as they already
38998   // repeat across each mask element.
38999   if (UnaryShuffle && !isAnyZero(BaseMask) &&
39000       V1.getValueSizeInBits() >= RootSizeInBits &&
39001       (BaseMaskEltSizeInBits % V1.getScalarValueSizeInBits()) == 0 &&
39002       DAG.isSplatValue(V1, /*AllowUndefs*/ false)) {
39003     return CanonicalizeShuffleInput(RootVT, V1);
39004   }
39005 
39006   SmallVector<int, 64> Mask(BaseMask);
39007 
39008   // See if the shuffle is a hidden identity shuffle - repeated args in HOPs
39009   // etc. can be simplified.
39010   if (VT1 == VT2 && VT1.getSizeInBits() == RootSizeInBits && VT1.isVector()) {
39011     SmallVector<int> ScaledMask, IdentityMask;
39012     unsigned NumElts = VT1.getVectorNumElements();
39013     if (Mask.size() <= NumElts &&
39014         scaleShuffleElements(Mask, NumElts, ScaledMask)) {
39015       for (unsigned i = 0; i != NumElts; ++i)
39016         IdentityMask.push_back(i);
39017       if (isTargetShuffleEquivalent(RootVT, ScaledMask, IdentityMask, DAG, V1,
39018                                     V2))
39019         return CanonicalizeShuffleInput(RootVT, V1);
39020     }
39021   }
39022 
39023   // Handle 128/256-bit lane shuffles of 512-bit vectors.
39024   if (RootVT.is512BitVector() &&
39025       (NumBaseMaskElts == 2 || NumBaseMaskElts == 4)) {
39026     // If the upper subvectors are zeroable, then an extract+insert is more
39027     // optimal than using X86ISD::SHUF128. The insertion is free, even if it has
39028     // to zero the upper subvectors.
39029     if (isUndefOrZeroInRange(Mask, 1, NumBaseMaskElts - 1)) {
39030       if (Depth == 0 && Root.getOpcode() == ISD::INSERT_SUBVECTOR)
39031         return SDValue(); // Nothing to do!
39032       assert(isInRange(Mask[0], 0, NumBaseMaskElts) &&
39033              "Unexpected lane shuffle");
39034       Res = CanonicalizeShuffleInput(RootVT, V1);
39035       unsigned SubIdx = Mask[0] * (NumRootElts / NumBaseMaskElts);
39036       bool UseZero = isAnyZero(Mask);
39037       Res = extractSubVector(Res, SubIdx, DAG, DL, BaseMaskEltSizeInBits);
39038       return widenSubVector(Res, UseZero, Subtarget, DAG, DL, RootSizeInBits);
39039     }
39040 
39041     // Narrow shuffle mask to v4x128.
39042     SmallVector<int, 4> ScaledMask;
39043     assert((BaseMaskEltSizeInBits % 128) == 0 && "Illegal mask size");
39044     narrowShuffleMaskElts(BaseMaskEltSizeInBits / 128, Mask, ScaledMask);
39045 
39046     // Try to lower to vshuf64x2/vshuf32x4.
39047     auto MatchSHUF128 = [&](MVT ShuffleVT, const SDLoc &DL,
39048                             ArrayRef<int> ScaledMask, SDValue V1, SDValue V2,
39049                             SelectionDAG &DAG) {
39050       unsigned PermMask = 0;
39051       // Insure elements came from the same Op.
39052       SDValue Ops[2] = {DAG.getUNDEF(ShuffleVT), DAG.getUNDEF(ShuffleVT)};
39053       for (int i = 0; i < 4; ++i) {
39054         assert(ScaledMask[i] >= -1 && "Illegal shuffle sentinel value");
39055         if (ScaledMask[i] < 0)
39056           continue;
39057 
39058         SDValue Op = ScaledMask[i] >= 4 ? V2 : V1;
39059         unsigned OpIndex = i / 2;
39060         if (Ops[OpIndex].isUndef())
39061           Ops[OpIndex] = Op;
39062         else if (Ops[OpIndex] != Op)
39063           return SDValue();
39064 
39065         // Convert the 128-bit shuffle mask selection values into 128-bit
39066         // selection bits defined by a vshuf64x2 instruction's immediate control
39067         // byte.
39068         PermMask |= (ScaledMask[i] % 4) << (i * 2);
39069       }
39070 
39071       return DAG.getNode(X86ISD::SHUF128, DL, ShuffleVT,
39072                          CanonicalizeShuffleInput(ShuffleVT, Ops[0]),
39073                          CanonicalizeShuffleInput(ShuffleVT, Ops[1]),
39074                          DAG.getTargetConstant(PermMask, DL, MVT::i8));
39075     };
39076 
39077     // FIXME: Is there a better way to do this? is256BitLaneRepeatedShuffleMask
39078     // doesn't work because our mask is for 128 bits and we don't have an MVT
39079     // to match that.
39080     bool PreferPERMQ = UnaryShuffle && isUndefOrInRange(ScaledMask[0], 0, 2) &&
39081                        isUndefOrInRange(ScaledMask[1], 0, 2) &&
39082                        isUndefOrInRange(ScaledMask[2], 2, 4) &&
39083                        isUndefOrInRange(ScaledMask[3], 2, 4) &&
39084                        (ScaledMask[0] < 0 || ScaledMask[2] < 0 ||
39085                         ScaledMask[0] == (ScaledMask[2] % 2)) &&
39086                        (ScaledMask[1] < 0 || ScaledMask[3] < 0 ||
39087                         ScaledMask[1] == (ScaledMask[3] % 2));
39088 
39089     if (!isAnyZero(ScaledMask) && !PreferPERMQ) {
39090       if (Depth == 0 && Root.getOpcode() == X86ISD::SHUF128)
39091         return SDValue(); // Nothing to do!
39092       MVT ShuffleVT = (FloatDomain ? MVT::v8f64 : MVT::v8i64);
39093       if (SDValue V = MatchSHUF128(ShuffleVT, DL, ScaledMask, V1, V2, DAG))
39094         return DAG.getBitcast(RootVT, V);
39095     }
39096   }
39097 
39098   // Handle 128-bit lane shuffles of 256-bit vectors.
39099   if (RootVT.is256BitVector() && NumBaseMaskElts == 2) {
39100     // If the upper half is zeroable, then an extract+insert is more optimal
39101     // than using X86ISD::VPERM2X128. The insertion is free, even if it has to
39102     // zero the upper half.
39103     if (isUndefOrZero(Mask[1])) {
39104       if (Depth == 0 && Root.getOpcode() == ISD::INSERT_SUBVECTOR)
39105         return SDValue(); // Nothing to do!
39106       assert(isInRange(Mask[0], 0, 2) && "Unexpected lane shuffle");
39107       Res = CanonicalizeShuffleInput(RootVT, V1);
39108       Res = extract128BitVector(Res, Mask[0] * (NumRootElts / 2), DAG, DL);
39109       return widenSubVector(Res, Mask[1] == SM_SentinelZero, Subtarget, DAG, DL,
39110                             256);
39111     }
39112 
39113     // If we're inserting the low subvector, an insert-subvector 'concat'
39114     // pattern is quicker than VPERM2X128.
39115     // TODO: Add AVX2 support instead of VPERMQ/VPERMPD.
39116     if (BaseMask[0] == 0 && (BaseMask[1] == 0 || BaseMask[1] == 2) &&
39117         !Subtarget.hasAVX2()) {
39118       if (Depth == 0 && Root.getOpcode() == ISD::INSERT_SUBVECTOR)
39119         return SDValue(); // Nothing to do!
39120       SDValue Lo = CanonicalizeShuffleInput(RootVT, V1);
39121       SDValue Hi = CanonicalizeShuffleInput(RootVT, BaseMask[1] == 0 ? V1 : V2);
39122       Hi = extractSubVector(Hi, 0, DAG, DL, 128);
39123       return insertSubVector(Lo, Hi, NumRootElts / 2, DAG, DL, 128);
39124     }
39125 
39126     if (Depth == 0 && Root.getOpcode() == X86ISD::VPERM2X128)
39127       return SDValue(); // Nothing to do!
39128 
39129     // If we have AVX2, prefer to use VPERMQ/VPERMPD for unary shuffles unless
39130     // we need to use the zeroing feature.
39131     // Prefer blends for sequential shuffles unless we are optimizing for size.
39132     if (UnaryShuffle &&
39133         !(Subtarget.hasAVX2() && isUndefOrInRange(Mask, 0, 2)) &&
39134         (OptForSize || !isSequentialOrUndefOrZeroInRange(Mask, 0, 2, 0))) {
39135       unsigned PermMask = 0;
39136       PermMask |= ((Mask[0] < 0 ? 0x8 : (Mask[0] & 1)) << 0);
39137       PermMask |= ((Mask[1] < 0 ? 0x8 : (Mask[1] & 1)) << 4);
39138       return DAG.getNode(
39139           X86ISD::VPERM2X128, DL, RootVT, CanonicalizeShuffleInput(RootVT, V1),
39140           DAG.getUNDEF(RootVT), DAG.getTargetConstant(PermMask, DL, MVT::i8));
39141     }
39142 
39143     if (Depth == 0 && Root.getOpcode() == X86ISD::SHUF128)
39144       return SDValue(); // Nothing to do!
39145 
39146     // TODO - handle AVX512VL cases with X86ISD::SHUF128.
39147     if (!UnaryShuffle && !IsMaskedShuffle) {
39148       assert(llvm::all_of(Mask, [](int M) { return 0 <= M && M < 4; }) &&
39149              "Unexpected shuffle sentinel value");
39150       // Prefer blends to X86ISD::VPERM2X128.
39151       if (!((Mask[0] == 0 && Mask[1] == 3) || (Mask[0] == 2 && Mask[1] == 1))) {
39152         unsigned PermMask = 0;
39153         PermMask |= ((Mask[0] & 3) << 0);
39154         PermMask |= ((Mask[1] & 3) << 4);
39155         SDValue LHS = isInRange(Mask[0], 0, 2) ? V1 : V2;
39156         SDValue RHS = isInRange(Mask[1], 0, 2) ? V1 : V2;
39157         return DAG.getNode(X86ISD::VPERM2X128, DL, RootVT,
39158                           CanonicalizeShuffleInput(RootVT, LHS),
39159                           CanonicalizeShuffleInput(RootVT, RHS),
39160                           DAG.getTargetConstant(PermMask, DL, MVT::i8));
39161       }
39162     }
39163   }
39164 
39165   // For masks that have been widened to 128-bit elements or more,
39166   // narrow back down to 64-bit elements.
39167   if (BaseMaskEltSizeInBits > 64) {
39168     assert((BaseMaskEltSizeInBits % 64) == 0 && "Illegal mask size");
39169     int MaskScale = BaseMaskEltSizeInBits / 64;
39170     SmallVector<int, 64> ScaledMask;
39171     narrowShuffleMaskElts(MaskScale, Mask, ScaledMask);
39172     Mask = std::move(ScaledMask);
39173   }
39174 
39175   // For masked shuffles, we're trying to match the root width for better
39176   // writemask folding, attempt to scale the mask.
39177   // TODO - variable shuffles might need this to be widened again.
39178   if (IsMaskedShuffle && NumRootElts > Mask.size()) {
39179     assert((NumRootElts % Mask.size()) == 0 && "Illegal mask size");
39180     int MaskScale = NumRootElts / Mask.size();
39181     SmallVector<int, 64> ScaledMask;
39182     narrowShuffleMaskElts(MaskScale, Mask, ScaledMask);
39183     Mask = std::move(ScaledMask);
39184   }
39185 
39186   unsigned NumMaskElts = Mask.size();
39187   unsigned MaskEltSizeInBits = RootSizeInBits / NumMaskElts;
39188 
39189   // Determine the effective mask value type.
39190   FloatDomain &= (32 <= MaskEltSizeInBits);
39191   MVT MaskVT = FloatDomain ? MVT::getFloatingPointVT(MaskEltSizeInBits)
39192                            : MVT::getIntegerVT(MaskEltSizeInBits);
39193   MaskVT = MVT::getVectorVT(MaskVT, NumMaskElts);
39194 
39195   // Only allow legal mask types.
39196   if (!DAG.getTargetLoweringInfo().isTypeLegal(MaskVT))
39197     return SDValue();
39198 
39199   // Attempt to match the mask against known shuffle patterns.
39200   MVT ShuffleSrcVT, ShuffleVT;
39201   unsigned Shuffle, PermuteImm;
39202 
39203   // Which shuffle domains are permitted?
39204   // Permit domain crossing at higher combine depths.
39205   // TODO: Should we indicate which domain is preferred if both are allowed?
39206   bool AllowFloatDomain = FloatDomain || (Depth >= 3);
39207   bool AllowIntDomain = (!FloatDomain || (Depth >= 3)) && Subtarget.hasSSE2() &&
39208                         (!MaskVT.is256BitVector() || Subtarget.hasAVX2());
39209 
39210   // Determine zeroable mask elements.
39211   APInt KnownUndef, KnownZero;
39212   resolveZeroablesFromTargetShuffle(Mask, KnownUndef, KnownZero);
39213   APInt Zeroable = KnownUndef | KnownZero;
39214 
39215   if (UnaryShuffle) {
39216     // Attempt to match against broadcast-from-vector.
39217     // Limit AVX1 to cases where we're loading+broadcasting a scalar element.
39218     if ((Subtarget.hasAVX2() ||
39219          (Subtarget.hasAVX() && 32 <= MaskEltSizeInBits)) &&
39220         (!IsMaskedShuffle || NumRootElts == NumMaskElts)) {
39221       if (isUndefOrEqual(Mask, 0)) {
39222         if (V1.getValueType() == MaskVT &&
39223             V1.getOpcode() == ISD::SCALAR_TO_VECTOR &&
39224             X86::mayFoldLoad(V1.getOperand(0), Subtarget)) {
39225           if (Depth == 0 && Root.getOpcode() == X86ISD::VBROADCAST)
39226             return SDValue(); // Nothing to do!
39227           Res = V1.getOperand(0);
39228           Res = DAG.getNode(X86ISD::VBROADCAST, DL, MaskVT, Res);
39229           return DAG.getBitcast(RootVT, Res);
39230         }
39231         if (Subtarget.hasAVX2()) {
39232           if (Depth == 0 && Root.getOpcode() == X86ISD::VBROADCAST)
39233             return SDValue(); // Nothing to do!
39234           Res = CanonicalizeShuffleInput(MaskVT, V1);
39235           Res = DAG.getNode(X86ISD::VBROADCAST, DL, MaskVT, Res);
39236           return DAG.getBitcast(RootVT, Res);
39237         }
39238       }
39239     }
39240 
39241     if (matchUnaryShuffle(MaskVT, Mask, AllowFloatDomain, AllowIntDomain, V1,
39242                           DAG, Subtarget, Shuffle, ShuffleSrcVT, ShuffleVT) &&
39243         (!IsMaskedShuffle ||
39244          (NumRootElts == ShuffleVT.getVectorNumElements()))) {
39245       if (Depth == 0 && Root.getOpcode() == Shuffle)
39246         return SDValue(); // Nothing to do!
39247       Res = CanonicalizeShuffleInput(ShuffleSrcVT, V1);
39248       Res = DAG.getNode(Shuffle, DL, ShuffleVT, Res);
39249       return DAG.getBitcast(RootVT, Res);
39250     }
39251 
39252     if (matchUnaryPermuteShuffle(MaskVT, Mask, Zeroable, AllowFloatDomain,
39253                                  AllowIntDomain, DAG, Subtarget, Shuffle, ShuffleVT,
39254                                  PermuteImm) &&
39255         (!IsMaskedShuffle ||
39256          (NumRootElts == ShuffleVT.getVectorNumElements()))) {
39257       if (Depth == 0 && Root.getOpcode() == Shuffle)
39258         return SDValue(); // Nothing to do!
39259       Res = CanonicalizeShuffleInput(ShuffleVT, V1);
39260       Res = DAG.getNode(Shuffle, DL, ShuffleVT, Res,
39261                         DAG.getTargetConstant(PermuteImm, DL, MVT::i8));
39262       return DAG.getBitcast(RootVT, Res);
39263     }
39264   }
39265 
39266   // Attempt to combine to INSERTPS, but only if the inserted element has come
39267   // from a scalar.
39268   // TODO: Handle other insertions here as well?
39269   if (!UnaryShuffle && AllowFloatDomain && RootSizeInBits == 128 &&
39270       Subtarget.hasSSE41() &&
39271       !isTargetShuffleEquivalent(MaskVT, Mask, {4, 1, 2, 3}, DAG)) {
39272     if (MaskEltSizeInBits == 32) {
39273       SDValue SrcV1 = V1, SrcV2 = V2;
39274       if (matchShuffleAsInsertPS(SrcV1, SrcV2, PermuteImm, Zeroable, Mask,
39275                                  DAG) &&
39276           SrcV2.getOpcode() == ISD::SCALAR_TO_VECTOR) {
39277         if (Depth == 0 && Root.getOpcode() == X86ISD::INSERTPS)
39278           return SDValue(); // Nothing to do!
39279         Res = DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32,
39280                           CanonicalizeShuffleInput(MVT::v4f32, SrcV1),
39281                           CanonicalizeShuffleInput(MVT::v4f32, SrcV2),
39282                           DAG.getTargetConstant(PermuteImm, DL, MVT::i8));
39283         return DAG.getBitcast(RootVT, Res);
39284       }
39285     }
39286     if (MaskEltSizeInBits == 64 &&
39287         isTargetShuffleEquivalent(MaskVT, Mask, {0, 2}, DAG) &&
39288         V2.getOpcode() == ISD::SCALAR_TO_VECTOR &&
39289         V2.getScalarValueSizeInBits() <= 32) {
39290       if (Depth == 0 && Root.getOpcode() == X86ISD::INSERTPS)
39291         return SDValue(); // Nothing to do!
39292       PermuteImm = (/*DstIdx*/ 2 << 4) | (/*SrcIdx*/ 0 << 0);
39293       Res = DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32,
39294                         CanonicalizeShuffleInput(MVT::v4f32, V1),
39295                         CanonicalizeShuffleInput(MVT::v4f32, V2),
39296                         DAG.getTargetConstant(PermuteImm, DL, MVT::i8));
39297       return DAG.getBitcast(RootVT, Res);
39298     }
39299   }
39300 
39301   SDValue NewV1 = V1; // Save operands in case early exit happens.
39302   SDValue NewV2 = V2;
39303   if (matchBinaryShuffle(MaskVT, Mask, AllowFloatDomain, AllowIntDomain, NewV1,
39304                          NewV2, DL, DAG, Subtarget, Shuffle, ShuffleSrcVT,
39305                          ShuffleVT, UnaryShuffle) &&
39306       (!IsMaskedShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) {
39307     if (Depth == 0 && Root.getOpcode() == Shuffle)
39308       return SDValue(); // Nothing to do!
39309     NewV1 = CanonicalizeShuffleInput(ShuffleSrcVT, NewV1);
39310     NewV2 = CanonicalizeShuffleInput(ShuffleSrcVT, NewV2);
39311     Res = DAG.getNode(Shuffle, DL, ShuffleVT, NewV1, NewV2);
39312     return DAG.getBitcast(RootVT, Res);
39313   }
39314 
39315   NewV1 = V1; // Save operands in case early exit happens.
39316   NewV2 = V2;
39317   if (matchBinaryPermuteShuffle(MaskVT, Mask, Zeroable, AllowFloatDomain,
39318                                 AllowIntDomain, NewV1, NewV2, DL, DAG,
39319                                 Subtarget, Shuffle, ShuffleVT, PermuteImm) &&
39320       (!IsMaskedShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) {
39321     if (Depth == 0 && Root.getOpcode() == Shuffle)
39322       return SDValue(); // Nothing to do!
39323     NewV1 = CanonicalizeShuffleInput(ShuffleVT, NewV1);
39324     NewV2 = CanonicalizeShuffleInput(ShuffleVT, NewV2);
39325     Res = DAG.getNode(Shuffle, DL, ShuffleVT, NewV1, NewV2,
39326                       DAG.getTargetConstant(PermuteImm, DL, MVT::i8));
39327     return DAG.getBitcast(RootVT, Res);
39328   }
39329 
39330   // Typically from here on, we need an integer version of MaskVT.
39331   MVT IntMaskVT = MVT::getIntegerVT(MaskEltSizeInBits);
39332   IntMaskVT = MVT::getVectorVT(IntMaskVT, NumMaskElts);
39333 
39334   // Annoyingly, SSE4A instructions don't map into the above match helpers.
39335   if (Subtarget.hasSSE4A() && AllowIntDomain && RootSizeInBits == 128) {
39336     uint64_t BitLen, BitIdx;
39337     if (matchShuffleAsEXTRQ(IntMaskVT, V1, V2, Mask, BitLen, BitIdx,
39338                             Zeroable)) {
39339       if (Depth == 0 && Root.getOpcode() == X86ISD::EXTRQI)
39340         return SDValue(); // Nothing to do!
39341       V1 = CanonicalizeShuffleInput(IntMaskVT, V1);
39342       Res = DAG.getNode(X86ISD::EXTRQI, DL, IntMaskVT, V1,
39343                         DAG.getTargetConstant(BitLen, DL, MVT::i8),
39344                         DAG.getTargetConstant(BitIdx, DL, MVT::i8));
39345       return DAG.getBitcast(RootVT, Res);
39346     }
39347 
39348     if (matchShuffleAsINSERTQ(IntMaskVT, V1, V2, Mask, BitLen, BitIdx)) {
39349       if (Depth == 0 && Root.getOpcode() == X86ISD::INSERTQI)
39350         return SDValue(); // Nothing to do!
39351       V1 = CanonicalizeShuffleInput(IntMaskVT, V1);
39352       V2 = CanonicalizeShuffleInput(IntMaskVT, V2);
39353       Res = DAG.getNode(X86ISD::INSERTQI, DL, IntMaskVT, V1, V2,
39354                         DAG.getTargetConstant(BitLen, DL, MVT::i8),
39355                         DAG.getTargetConstant(BitIdx, DL, MVT::i8));
39356       return DAG.getBitcast(RootVT, Res);
39357     }
39358   }
39359 
39360   // Match shuffle against TRUNCATE patterns.
39361   if (AllowIntDomain && MaskEltSizeInBits < 64 && Subtarget.hasAVX512()) {
39362     // Match against a VTRUNC instruction, accounting for src/dst sizes.
39363     if (matchShuffleAsVTRUNC(ShuffleSrcVT, ShuffleVT, IntMaskVT, Mask, Zeroable,
39364                              Subtarget)) {
39365       bool IsTRUNCATE = ShuffleVT.getVectorNumElements() ==
39366                         ShuffleSrcVT.getVectorNumElements();
39367       unsigned Opc =
39368           IsTRUNCATE ? (unsigned)ISD::TRUNCATE : (unsigned)X86ISD::VTRUNC;
39369       if (Depth == 0 && Root.getOpcode() == Opc)
39370         return SDValue(); // Nothing to do!
39371       V1 = CanonicalizeShuffleInput(ShuffleSrcVT, V1);
39372       Res = DAG.getNode(Opc, DL, ShuffleVT, V1);
39373       if (ShuffleVT.getSizeInBits() < RootSizeInBits)
39374         Res = widenSubVector(Res, true, Subtarget, DAG, DL, RootSizeInBits);
39375       return DAG.getBitcast(RootVT, Res);
39376     }
39377 
39378     // Do we need a more general binary truncation pattern?
39379     if (RootSizeInBits < 512 &&
39380         ((RootVT.is256BitVector() && Subtarget.useAVX512Regs()) ||
39381          (RootVT.is128BitVector() && Subtarget.hasVLX())) &&
39382         (MaskEltSizeInBits > 8 || Subtarget.hasBWI()) &&
39383         isSequentialOrUndefInRange(Mask, 0, NumMaskElts, 0, 2)) {
39384       // Bail if this was already a truncation or PACK node.
39385       // We sometimes fail to match PACK if we demand known undef elements.
39386       if (Depth == 0 && (Root.getOpcode() == ISD::TRUNCATE ||
39387                          Root.getOpcode() == X86ISD::PACKSS ||
39388                          Root.getOpcode() == X86ISD::PACKUS))
39389         return SDValue(); // Nothing to do!
39390       ShuffleSrcVT = MVT::getIntegerVT(MaskEltSizeInBits * 2);
39391       ShuffleSrcVT = MVT::getVectorVT(ShuffleSrcVT, NumMaskElts / 2);
39392       V1 = CanonicalizeShuffleInput(ShuffleSrcVT, V1);
39393       V2 = CanonicalizeShuffleInput(ShuffleSrcVT, V2);
39394       ShuffleSrcVT = MVT::getIntegerVT(MaskEltSizeInBits * 2);
39395       ShuffleSrcVT = MVT::getVectorVT(ShuffleSrcVT, NumMaskElts);
39396       Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, ShuffleSrcVT, V1, V2);
39397       Res = DAG.getNode(ISD::TRUNCATE, DL, IntMaskVT, Res);
39398       return DAG.getBitcast(RootVT, Res);
39399     }
39400   }
39401 
39402   // Don't try to re-form single instruction chains under any circumstances now
39403   // that we've done encoding canonicalization for them.
39404   if (Depth < 1)
39405     return SDValue();
39406 
39407   // Depth threshold above which we can efficiently use variable mask shuffles.
39408   int VariableCrossLaneShuffleDepth =
39409       Subtarget.hasFastVariableCrossLaneShuffle() ? 1 : 2;
39410   int VariablePerLaneShuffleDepth =
39411       Subtarget.hasFastVariablePerLaneShuffle() ? 1 : 2;
39412   AllowVariableCrossLaneMask &=
39413       (Depth >= VariableCrossLaneShuffleDepth) || HasVariableMask;
39414   AllowVariablePerLaneMask &=
39415       (Depth >= VariablePerLaneShuffleDepth) || HasVariableMask;
39416   // VPERMI2W/VPERMI2B are 3 uops on Skylake and Icelake so we require a
39417   // higher depth before combining them.
39418   bool AllowBWIVPERMV3 =
39419       (Depth >= (VariableCrossLaneShuffleDepth + 2) || HasVariableMask);
39420 
39421   bool MaskContainsZeros = isAnyZero(Mask);
39422 
39423   if (is128BitLaneCrossingShuffleMask(MaskVT, Mask)) {
39424     // If we have a single input lane-crossing shuffle then lower to VPERMV.
39425     if (UnaryShuffle && AllowVariableCrossLaneMask && !MaskContainsZeros) {
39426       if (Subtarget.hasAVX2() &&
39427           (MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) {
39428         SDValue VPermMask = getConstVector(Mask, IntMaskVT, DAG, DL, true);
39429         Res = CanonicalizeShuffleInput(MaskVT, V1);
39430         Res = DAG.getNode(X86ISD::VPERMV, DL, MaskVT, VPermMask, Res);
39431         return DAG.getBitcast(RootVT, Res);
39432       }
39433       // AVX512 variants (non-VLX will pad to 512-bit shuffles).
39434       if ((Subtarget.hasAVX512() &&
39435            (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
39436             MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) ||
39437           (Subtarget.hasBWI() &&
39438            (MaskVT == MVT::v16i16 || MaskVT == MVT::v32i16)) ||
39439           (Subtarget.hasVBMI() &&
39440            (MaskVT == MVT::v32i8 || MaskVT == MVT::v64i8))) {
39441         V1 = CanonicalizeShuffleInput(MaskVT, V1);
39442         V2 = DAG.getUNDEF(MaskVT);
39443         Res = lowerShuffleWithPERMV(DL, MaskVT, Mask, V1, V2, Subtarget, DAG);
39444         return DAG.getBitcast(RootVT, Res);
39445       }
39446     }
39447 
39448     // Lower a unary+zero lane-crossing shuffle as VPERMV3 with a zero
39449     // vector as the second source (non-VLX will pad to 512-bit shuffles).
39450     if (UnaryShuffle && AllowVariableCrossLaneMask &&
39451         ((Subtarget.hasAVX512() &&
39452           (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
39453            MaskVT == MVT::v4f64 || MaskVT == MVT::v4i64 ||
39454            MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32 ||
39455            MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) ||
39456          (Subtarget.hasBWI() && AllowBWIVPERMV3 &&
39457           (MaskVT == MVT::v16i16 || MaskVT == MVT::v32i16)) ||
39458          (Subtarget.hasVBMI() && AllowBWIVPERMV3 &&
39459           (MaskVT == MVT::v32i8 || MaskVT == MVT::v64i8)))) {
39460       // Adjust shuffle mask - replace SM_SentinelZero with second source index.
39461       for (unsigned i = 0; i != NumMaskElts; ++i)
39462         if (Mask[i] == SM_SentinelZero)
39463           Mask[i] = NumMaskElts + i;
39464       V1 = CanonicalizeShuffleInput(MaskVT, V1);
39465       V2 = getZeroVector(MaskVT, Subtarget, DAG, DL);
39466       Res = lowerShuffleWithPERMV(DL, MaskVT, Mask, V1, V2, Subtarget, DAG);
39467       return DAG.getBitcast(RootVT, Res);
39468     }
39469 
39470     // If that failed and either input is extracted then try to combine as a
39471     // shuffle with the larger type.
39472     if (SDValue WideShuffle = combineX86ShuffleChainWithExtract(
39473             Inputs, Root, BaseMask, Depth, HasVariableMask,
39474             AllowVariableCrossLaneMask, AllowVariablePerLaneMask, DAG,
39475             Subtarget))
39476       return WideShuffle;
39477 
39478     // If we have a dual input lane-crossing shuffle then lower to VPERMV3,
39479     // (non-VLX will pad to 512-bit shuffles).
39480     if (AllowVariableCrossLaneMask && !MaskContainsZeros &&
39481         ((Subtarget.hasAVX512() &&
39482           (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
39483            MaskVT == MVT::v4f64 || MaskVT == MVT::v4i64 ||
39484            MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32 ||
39485            MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) ||
39486          (Subtarget.hasBWI() && AllowBWIVPERMV3 &&
39487           (MaskVT == MVT::v16i16 || MaskVT == MVT::v32i16)) ||
39488          (Subtarget.hasVBMI() && AllowBWIVPERMV3 &&
39489           (MaskVT == MVT::v32i8 || MaskVT == MVT::v64i8)))) {
39490       V1 = CanonicalizeShuffleInput(MaskVT, V1);
39491       V2 = CanonicalizeShuffleInput(MaskVT, V2);
39492       Res = lowerShuffleWithPERMV(DL, MaskVT, Mask, V1, V2, Subtarget, DAG);
39493       return DAG.getBitcast(RootVT, Res);
39494     }
39495     return SDValue();
39496   }
39497 
39498   // See if we can combine a single input shuffle with zeros to a bit-mask,
39499   // which is much simpler than any shuffle.
39500   if (UnaryShuffle && MaskContainsZeros && AllowVariablePerLaneMask &&
39501       isSequentialOrUndefOrZeroInRange(Mask, 0, NumMaskElts, 0) &&
39502       DAG.getTargetLoweringInfo().isTypeLegal(MaskVT)) {
39503     APInt Zero = APInt::getZero(MaskEltSizeInBits);
39504     APInt AllOnes = APInt::getAllOnes(MaskEltSizeInBits);
39505     APInt UndefElts(NumMaskElts, 0);
39506     SmallVector<APInt, 64> EltBits(NumMaskElts, Zero);
39507     for (unsigned i = 0; i != NumMaskElts; ++i) {
39508       int M = Mask[i];
39509       if (M == SM_SentinelUndef) {
39510         UndefElts.setBit(i);
39511         continue;
39512       }
39513       if (M == SM_SentinelZero)
39514         continue;
39515       EltBits[i] = AllOnes;
39516     }
39517     SDValue BitMask = getConstVector(EltBits, UndefElts, MaskVT, DAG, DL);
39518     Res = CanonicalizeShuffleInput(MaskVT, V1);
39519     unsigned AndOpcode =
39520         MaskVT.isFloatingPoint() ? unsigned(X86ISD::FAND) : unsigned(ISD::AND);
39521     Res = DAG.getNode(AndOpcode, DL, MaskVT, Res, BitMask);
39522     return DAG.getBitcast(RootVT, Res);
39523   }
39524 
39525   // If we have a single input shuffle with different shuffle patterns in the
39526   // the 128-bit lanes use the variable mask to VPERMILPS.
39527   // TODO Combine other mask types at higher depths.
39528   if (UnaryShuffle && AllowVariablePerLaneMask && !MaskContainsZeros &&
39529       ((MaskVT == MVT::v8f32 && Subtarget.hasAVX()) ||
39530        (MaskVT == MVT::v16f32 && Subtarget.hasAVX512()))) {
39531     SmallVector<SDValue, 16> VPermIdx;
39532     for (int M : Mask) {
39533       SDValue Idx =
39534           M < 0 ? DAG.getUNDEF(MVT::i32) : DAG.getConstant(M % 4, DL, MVT::i32);
39535       VPermIdx.push_back(Idx);
39536     }
39537     SDValue VPermMask = DAG.getBuildVector(IntMaskVT, DL, VPermIdx);
39538     Res = CanonicalizeShuffleInput(MaskVT, V1);
39539     Res = DAG.getNode(X86ISD::VPERMILPV, DL, MaskVT, Res, VPermMask);
39540     return DAG.getBitcast(RootVT, Res);
39541   }
39542 
39543   // With XOP, binary shuffles of 128/256-bit floating point vectors can combine
39544   // to VPERMIL2PD/VPERMIL2PS.
39545   if (AllowVariablePerLaneMask && Subtarget.hasXOP() &&
39546       (MaskVT == MVT::v2f64 || MaskVT == MVT::v4f64 || MaskVT == MVT::v4f32 ||
39547        MaskVT == MVT::v8f32)) {
39548     // VPERMIL2 Operation.
39549     // Bits[3] - Match Bit.
39550     // Bits[2:1] - (Per Lane) PD Shuffle Mask.
39551     // Bits[2:0] - (Per Lane) PS Shuffle Mask.
39552     unsigned NumLanes = MaskVT.getSizeInBits() / 128;
39553     unsigned NumEltsPerLane = NumMaskElts / NumLanes;
39554     SmallVector<int, 8> VPerm2Idx;
39555     unsigned M2ZImm = 0;
39556     for (int M : Mask) {
39557       if (M == SM_SentinelUndef) {
39558         VPerm2Idx.push_back(-1);
39559         continue;
39560       }
39561       if (M == SM_SentinelZero) {
39562         M2ZImm = 2;
39563         VPerm2Idx.push_back(8);
39564         continue;
39565       }
39566       int Index = (M % NumEltsPerLane) + ((M / NumMaskElts) * NumEltsPerLane);
39567       Index = (MaskVT.getScalarSizeInBits() == 64 ? Index << 1 : Index);
39568       VPerm2Idx.push_back(Index);
39569     }
39570     V1 = CanonicalizeShuffleInput(MaskVT, V1);
39571     V2 = CanonicalizeShuffleInput(MaskVT, V2);
39572     SDValue VPerm2MaskOp = getConstVector(VPerm2Idx, IntMaskVT, DAG, DL, true);
39573     Res = DAG.getNode(X86ISD::VPERMIL2, DL, MaskVT, V1, V2, VPerm2MaskOp,
39574                       DAG.getTargetConstant(M2ZImm, DL, MVT::i8));
39575     return DAG.getBitcast(RootVT, Res);
39576   }
39577 
39578   // If we have 3 or more shuffle instructions or a chain involving a variable
39579   // mask, we can replace them with a single PSHUFB instruction profitably.
39580   // Intel's manuals suggest only using PSHUFB if doing so replacing 5
39581   // instructions, but in practice PSHUFB tends to be *very* fast so we're
39582   // more aggressive.
39583   if (UnaryShuffle && AllowVariablePerLaneMask &&
39584       ((RootVT.is128BitVector() && Subtarget.hasSSSE3()) ||
39585        (RootVT.is256BitVector() && Subtarget.hasAVX2()) ||
39586        (RootVT.is512BitVector() && Subtarget.hasBWI()))) {
39587     SmallVector<SDValue, 16> PSHUFBMask;
39588     int NumBytes = RootVT.getSizeInBits() / 8;
39589     int Ratio = NumBytes / NumMaskElts;
39590     for (int i = 0; i < NumBytes; ++i) {
39591       int M = Mask[i / Ratio];
39592       if (M == SM_SentinelUndef) {
39593         PSHUFBMask.push_back(DAG.getUNDEF(MVT::i8));
39594         continue;
39595       }
39596       if (M == SM_SentinelZero) {
39597         PSHUFBMask.push_back(DAG.getConstant(0x80, DL, MVT::i8));
39598         continue;
39599       }
39600       M = Ratio * M + i % Ratio;
39601       assert((M / 16) == (i / 16) && "Lane crossing detected");
39602       PSHUFBMask.push_back(DAG.getConstant(M, DL, MVT::i8));
39603     }
39604     MVT ByteVT = MVT::getVectorVT(MVT::i8, NumBytes);
39605     Res = CanonicalizeShuffleInput(ByteVT, V1);
39606     SDValue PSHUFBMaskOp = DAG.getBuildVector(ByteVT, DL, PSHUFBMask);
39607     Res = DAG.getNode(X86ISD::PSHUFB, DL, ByteVT, Res, PSHUFBMaskOp);
39608     return DAG.getBitcast(RootVT, Res);
39609   }
39610 
39611   // With XOP, if we have a 128-bit binary input shuffle we can always combine
39612   // to VPPERM. We match the depth requirement of PSHUFB - VPPERM is never
39613   // slower than PSHUFB on targets that support both.
39614   if (AllowVariablePerLaneMask && RootVT.is128BitVector() &&
39615       Subtarget.hasXOP()) {
39616     // VPPERM Mask Operation
39617     // Bits[4:0] - Byte Index (0 - 31)
39618     // Bits[7:5] - Permute Operation (0 - Source byte, 4 - ZERO)
39619     SmallVector<SDValue, 16> VPPERMMask;
39620     int NumBytes = 16;
39621     int Ratio = NumBytes / NumMaskElts;
39622     for (int i = 0; i < NumBytes; ++i) {
39623       int M = Mask[i / Ratio];
39624       if (M == SM_SentinelUndef) {
39625         VPPERMMask.push_back(DAG.getUNDEF(MVT::i8));
39626         continue;
39627       }
39628       if (M == SM_SentinelZero) {
39629         VPPERMMask.push_back(DAG.getConstant(0x80, DL, MVT::i8));
39630         continue;
39631       }
39632       M = Ratio * M + i % Ratio;
39633       VPPERMMask.push_back(DAG.getConstant(M, DL, MVT::i8));
39634     }
39635     MVT ByteVT = MVT::v16i8;
39636     V1 = CanonicalizeShuffleInput(ByteVT, V1);
39637     V2 = CanonicalizeShuffleInput(ByteVT, V2);
39638     SDValue VPPERMMaskOp = DAG.getBuildVector(ByteVT, DL, VPPERMMask);
39639     Res = DAG.getNode(X86ISD::VPPERM, DL, ByteVT, V1, V2, VPPERMMaskOp);
39640     return DAG.getBitcast(RootVT, Res);
39641   }
39642 
39643   // If that failed and either input is extracted then try to combine as a
39644   // shuffle with the larger type.
39645   if (SDValue WideShuffle = combineX86ShuffleChainWithExtract(
39646           Inputs, Root, BaseMask, Depth, HasVariableMask,
39647           AllowVariableCrossLaneMask, AllowVariablePerLaneMask, DAG, Subtarget))
39648     return WideShuffle;
39649 
39650   // If we have a dual input shuffle then lower to VPERMV3,
39651   // (non-VLX will pad to 512-bit shuffles)
39652   if (!UnaryShuffle && AllowVariablePerLaneMask && !MaskContainsZeros &&
39653       ((Subtarget.hasAVX512() &&
39654         (MaskVT == MVT::v2f64 || MaskVT == MVT::v4f64 || MaskVT == MVT::v8f64 ||
39655          MaskVT == MVT::v2i64 || MaskVT == MVT::v4i64 || MaskVT == MVT::v8i64 ||
39656          MaskVT == MVT::v4f32 || MaskVT == MVT::v4i32 || MaskVT == MVT::v8f32 ||
39657          MaskVT == MVT::v8i32 || MaskVT == MVT::v16f32 ||
39658          MaskVT == MVT::v16i32)) ||
39659        (Subtarget.hasBWI() && AllowBWIVPERMV3 &&
39660         (MaskVT == MVT::v8i16 || MaskVT == MVT::v16i16 ||
39661          MaskVT == MVT::v32i16)) ||
39662        (Subtarget.hasVBMI() && AllowBWIVPERMV3 &&
39663         (MaskVT == MVT::v16i8 || MaskVT == MVT::v32i8 ||
39664          MaskVT == MVT::v64i8)))) {
39665     V1 = CanonicalizeShuffleInput(MaskVT, V1);
39666     V2 = CanonicalizeShuffleInput(MaskVT, V2);
39667     Res = lowerShuffleWithPERMV(DL, MaskVT, Mask, V1, V2, Subtarget, DAG);
39668     return DAG.getBitcast(RootVT, Res);
39669   }
39670 
39671   // Failed to find any combines.
39672   return SDValue();
39673 }
39674 
39675 // Combine an arbitrary chain of shuffles + extract_subvectors into a single
39676 // instruction if possible.
39677 //
39678 // Wrapper for combineX86ShuffleChain that extends the shuffle mask to a larger
39679 // type size to attempt to combine:
39680 // shuffle(extract_subvector(x,c1),extract_subvector(y,c2),m1)
39681 // -->
39682 // extract_subvector(shuffle(x,y,m2),0)
combineX86ShuffleChainWithExtract(ArrayRef<SDValue> Inputs,SDValue Root,ArrayRef<int> BaseMask,int Depth,bool HasVariableMask,bool AllowVariableCrossLaneMask,bool AllowVariablePerLaneMask,SelectionDAG & DAG,const X86Subtarget & Subtarget)39683 static SDValue combineX86ShuffleChainWithExtract(
39684     ArrayRef<SDValue> Inputs, SDValue Root, ArrayRef<int> BaseMask, int Depth,
39685     bool HasVariableMask, bool AllowVariableCrossLaneMask,
39686     bool AllowVariablePerLaneMask, SelectionDAG &DAG,
39687     const X86Subtarget &Subtarget) {
39688   unsigned NumMaskElts = BaseMask.size();
39689   unsigned NumInputs = Inputs.size();
39690   if (NumInputs == 0)
39691     return SDValue();
39692 
39693   EVT RootVT = Root.getValueType();
39694   unsigned RootSizeInBits = RootVT.getSizeInBits();
39695   assert((RootSizeInBits % NumMaskElts) == 0 && "Unexpected root shuffle mask");
39696 
39697   // Bail if we have any smaller inputs.
39698   if (llvm::any_of(Inputs, [RootSizeInBits](SDValue Input) {
39699         return Input.getValueSizeInBits() < RootSizeInBits;
39700       }))
39701     return SDValue();
39702 
39703   SmallVector<SDValue, 4> WideInputs(Inputs.begin(), Inputs.end());
39704   SmallVector<unsigned, 4> Offsets(NumInputs, 0);
39705 
39706   // Peek through subvectors.
39707   // TODO: Support inter-mixed EXTRACT_SUBVECTORs + BITCASTs?
39708   unsigned WideSizeInBits = RootSizeInBits;
39709   for (unsigned i = 0; i != NumInputs; ++i) {
39710     SDValue &Src = WideInputs[i];
39711     unsigned &Offset = Offsets[i];
39712     Src = peekThroughBitcasts(Src);
39713     EVT BaseVT = Src.getValueType();
39714     while (Src.getOpcode() == ISD::EXTRACT_SUBVECTOR) {
39715       Offset += Src.getConstantOperandVal(1);
39716       Src = Src.getOperand(0);
39717     }
39718     WideSizeInBits = std::max(WideSizeInBits,
39719                               (unsigned)Src.getValueSizeInBits());
39720     assert((Offset % BaseVT.getVectorNumElements()) == 0 &&
39721            "Unexpected subvector extraction");
39722     Offset /= BaseVT.getVectorNumElements();
39723     Offset *= NumMaskElts;
39724   }
39725 
39726   // Bail if we're always extracting from the lowest subvectors,
39727   // combineX86ShuffleChain should match this for the current width.
39728   if (llvm::all_of(Offsets, [](unsigned Offset) { return Offset == 0; }))
39729     return SDValue();
39730 
39731   unsigned Scale = WideSizeInBits / RootSizeInBits;
39732   assert((WideSizeInBits % RootSizeInBits) == 0 &&
39733          "Unexpected subvector extraction");
39734 
39735   // If the src vector types aren't the same, see if we can extend
39736   // them to match each other.
39737   // TODO: Support different scalar types?
39738   EVT WideSVT = WideInputs[0].getValueType().getScalarType();
39739   if (llvm::any_of(WideInputs, [&WideSVT, &DAG](SDValue Op) {
39740         return !DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType()) ||
39741                Op.getValueType().getScalarType() != WideSVT;
39742       }))
39743     return SDValue();
39744 
39745   // Create new mask for larger type.
39746   for (unsigned i = 1; i != NumInputs; ++i)
39747     Offsets[i] += i * Scale * NumMaskElts;
39748 
39749   SmallVector<int, 64> WideMask(BaseMask);
39750   for (int &M : WideMask) {
39751     if (M < 0)
39752       continue;
39753     M = (M % NumMaskElts) + Offsets[M / NumMaskElts];
39754   }
39755   WideMask.append((Scale - 1) * NumMaskElts, SM_SentinelUndef);
39756 
39757   // Remove unused/repeated shuffle source ops.
39758   resolveTargetShuffleInputsAndMask(WideInputs, WideMask);
39759   assert(!WideInputs.empty() && "Shuffle with no inputs detected");
39760 
39761   if (WideInputs.size() > 2)
39762     return SDValue();
39763 
39764   // Increase depth for every upper subvector we've peeked through.
39765   Depth += count_if(Offsets, [](unsigned Offset) { return Offset > 0; });
39766 
39767   // Attempt to combine wider chain.
39768   // TODO: Can we use a better Root?
39769   SDValue WideRoot = WideInputs.front().getValueSizeInBits() >
39770                              WideInputs.back().getValueSizeInBits()
39771                          ? WideInputs.front()
39772                          : WideInputs.back();
39773   if (SDValue WideShuffle =
39774           combineX86ShuffleChain(WideInputs, WideRoot, WideMask, Depth,
39775                                  HasVariableMask, AllowVariableCrossLaneMask,
39776                                  AllowVariablePerLaneMask, DAG, Subtarget)) {
39777     WideShuffle =
39778         extractSubVector(WideShuffle, 0, DAG, SDLoc(Root), RootSizeInBits);
39779     return DAG.getBitcast(RootVT, WideShuffle);
39780   }
39781   return SDValue();
39782 }
39783 
39784 // Canonicalize the combined shuffle mask chain with horizontal ops.
39785 // NOTE: This may update the Ops and Mask.
canonicalizeShuffleMaskWithHorizOp(MutableArrayRef<SDValue> Ops,MutableArrayRef<int> Mask,unsigned RootSizeInBits,const SDLoc & DL,SelectionDAG & DAG,const X86Subtarget & Subtarget)39786 static SDValue canonicalizeShuffleMaskWithHorizOp(
39787     MutableArrayRef<SDValue> Ops, MutableArrayRef<int> Mask,
39788     unsigned RootSizeInBits, const SDLoc &DL, SelectionDAG &DAG,
39789     const X86Subtarget &Subtarget) {
39790   if (Mask.empty() || Ops.empty())
39791     return SDValue();
39792 
39793   SmallVector<SDValue> BC;
39794   for (SDValue Op : Ops)
39795     BC.push_back(peekThroughBitcasts(Op));
39796 
39797   // All ops must be the same horizop + type.
39798   SDValue BC0 = BC[0];
39799   EVT VT0 = BC0.getValueType();
39800   unsigned Opcode0 = BC0.getOpcode();
39801   if (VT0.getSizeInBits() != RootSizeInBits || llvm::any_of(BC, [&](SDValue V) {
39802         return V.getOpcode() != Opcode0 || V.getValueType() != VT0;
39803       }))
39804     return SDValue();
39805 
39806   bool isHoriz = (Opcode0 == X86ISD::FHADD || Opcode0 == X86ISD::HADD ||
39807                   Opcode0 == X86ISD::FHSUB || Opcode0 == X86ISD::HSUB);
39808   bool isPack = (Opcode0 == X86ISD::PACKSS || Opcode0 == X86ISD::PACKUS);
39809   if (!isHoriz && !isPack)
39810     return SDValue();
39811 
39812   // Do all ops have a single use?
39813   bool OneUseOps = llvm::all_of(Ops, [](SDValue Op) {
39814     return Op.hasOneUse() &&
39815            peekThroughBitcasts(Op) == peekThroughOneUseBitcasts(Op);
39816   });
39817 
39818   int NumElts = VT0.getVectorNumElements();
39819   int NumLanes = VT0.getSizeInBits() / 128;
39820   int NumEltsPerLane = NumElts / NumLanes;
39821   int NumHalfEltsPerLane = NumEltsPerLane / 2;
39822   MVT SrcVT = BC0.getOperand(0).getSimpleValueType();
39823   unsigned EltSizeInBits = RootSizeInBits / Mask.size();
39824 
39825   if (NumEltsPerLane >= 4 &&
39826       (isPack || shouldUseHorizontalOp(Ops.size() == 1, DAG, Subtarget))) {
39827     SmallVector<int> LaneMask, ScaledMask;
39828     if (isRepeatedTargetShuffleMask(128, EltSizeInBits, Mask, LaneMask) &&
39829         scaleShuffleElements(LaneMask, 4, ScaledMask)) {
39830       // See if we can remove the shuffle by resorting the HOP chain so that
39831       // the HOP args are pre-shuffled.
39832       // TODO: Generalize to any sized/depth chain.
39833       // TODO: Add support for PACKSS/PACKUS.
39834       if (isHoriz) {
39835         // Attempt to find a HOP(HOP(X,Y),HOP(Z,W)) source operand.
39836         auto GetHOpSrc = [&](int M) {
39837           if (M == SM_SentinelUndef)
39838             return DAG.getUNDEF(VT0);
39839           if (M == SM_SentinelZero)
39840             return getZeroVector(VT0.getSimpleVT(), Subtarget, DAG, DL);
39841           SDValue Src0 = BC[M / 4];
39842           SDValue Src1 = Src0.getOperand((M % 4) >= 2);
39843           if (Src1.getOpcode() == Opcode0 && Src0->isOnlyUserOf(Src1.getNode()))
39844             return Src1.getOperand(M % 2);
39845           return SDValue();
39846         };
39847         SDValue M0 = GetHOpSrc(ScaledMask[0]);
39848         SDValue M1 = GetHOpSrc(ScaledMask[1]);
39849         SDValue M2 = GetHOpSrc(ScaledMask[2]);
39850         SDValue M3 = GetHOpSrc(ScaledMask[3]);
39851         if (M0 && M1 && M2 && M3) {
39852           SDValue LHS = DAG.getNode(Opcode0, DL, SrcVT, M0, M1);
39853           SDValue RHS = DAG.getNode(Opcode0, DL, SrcVT, M2, M3);
39854           return DAG.getNode(Opcode0, DL, VT0, LHS, RHS);
39855         }
39856       }
39857       // shuffle(hop(x,y),hop(z,w)) -> permute(hop(x,z)) etc.
39858       if (Ops.size() >= 2) {
39859         SDValue LHS, RHS;
39860         auto GetHOpSrc = [&](int M, int &OutM) {
39861           // TODO: Support SM_SentinelZero
39862           if (M < 0)
39863             return M == SM_SentinelUndef;
39864           SDValue Src = BC[M / 4].getOperand((M % 4) >= 2);
39865           if (!LHS || LHS == Src) {
39866             LHS = Src;
39867             OutM = (M % 2);
39868             return true;
39869           }
39870           if (!RHS || RHS == Src) {
39871             RHS = Src;
39872             OutM = (M % 2) + 2;
39873             return true;
39874           }
39875           return false;
39876         };
39877         int PostMask[4] = {-1, -1, -1, -1};
39878         if (GetHOpSrc(ScaledMask[0], PostMask[0]) &&
39879             GetHOpSrc(ScaledMask[1], PostMask[1]) &&
39880             GetHOpSrc(ScaledMask[2], PostMask[2]) &&
39881             GetHOpSrc(ScaledMask[3], PostMask[3])) {
39882           LHS = DAG.getBitcast(SrcVT, LHS);
39883           RHS = DAG.getBitcast(SrcVT, RHS ? RHS : LHS);
39884           SDValue Res = DAG.getNode(Opcode0, DL, VT0, LHS, RHS);
39885           // Use SHUFPS for the permute so this will work on SSE3 targets,
39886           // shuffle combining and domain handling will simplify this later on.
39887           MVT ShuffleVT = MVT::getVectorVT(MVT::f32, RootSizeInBits / 32);
39888           Res = DAG.getBitcast(ShuffleVT, Res);
39889           return DAG.getNode(X86ISD::SHUFP, DL, ShuffleVT, Res, Res,
39890                              getV4X86ShuffleImm8ForMask(PostMask, DL, DAG));
39891         }
39892       }
39893     }
39894   }
39895 
39896   if (2 < Ops.size())
39897     return SDValue();
39898 
39899   SDValue BC1 = BC[BC.size() - 1];
39900   if (Mask.size() == VT0.getVectorNumElements()) {
39901     // Canonicalize binary shuffles of horizontal ops that use the
39902     // same sources to an unary shuffle.
39903     // TODO: Try to perform this fold even if the shuffle remains.
39904     if (Ops.size() == 2) {
39905       auto ContainsOps = [](SDValue HOp, SDValue Op) {
39906         return Op == HOp.getOperand(0) || Op == HOp.getOperand(1);
39907       };
39908       // Commute if all BC0's ops are contained in BC1.
39909       if (ContainsOps(BC1, BC0.getOperand(0)) &&
39910           ContainsOps(BC1, BC0.getOperand(1))) {
39911         ShuffleVectorSDNode::commuteMask(Mask);
39912         std::swap(Ops[0], Ops[1]);
39913         std::swap(BC0, BC1);
39914       }
39915 
39916       // If BC1 can be represented by BC0, then convert to unary shuffle.
39917       if (ContainsOps(BC0, BC1.getOperand(0)) &&
39918           ContainsOps(BC0, BC1.getOperand(1))) {
39919         for (int &M : Mask) {
39920           if (M < NumElts) // BC0 element or UNDEF/Zero sentinel.
39921             continue;
39922           int SubLane = ((M % NumEltsPerLane) >= NumHalfEltsPerLane) ? 1 : 0;
39923           M -= NumElts + (SubLane * NumHalfEltsPerLane);
39924           if (BC1.getOperand(SubLane) != BC0.getOperand(0))
39925             M += NumHalfEltsPerLane;
39926         }
39927       }
39928     }
39929 
39930     // Canonicalize unary horizontal ops to only refer to lower halves.
39931     for (int i = 0; i != NumElts; ++i) {
39932       int &M = Mask[i];
39933       if (isUndefOrZero(M))
39934         continue;
39935       if (M < NumElts && BC0.getOperand(0) == BC0.getOperand(1) &&
39936           (M % NumEltsPerLane) >= NumHalfEltsPerLane)
39937         M -= NumHalfEltsPerLane;
39938       if (NumElts <= M && BC1.getOperand(0) == BC1.getOperand(1) &&
39939           (M % NumEltsPerLane) >= NumHalfEltsPerLane)
39940         M -= NumHalfEltsPerLane;
39941     }
39942   }
39943 
39944   // Combine binary shuffle of 2 similar 'Horizontal' instructions into a
39945   // single instruction. Attempt to match a v2X64 repeating shuffle pattern that
39946   // represents the LHS/RHS inputs for the lower/upper halves.
39947   SmallVector<int, 16> TargetMask128, WideMask128;
39948   if (isRepeatedTargetShuffleMask(128, EltSizeInBits, Mask, TargetMask128) &&
39949       scaleShuffleElements(TargetMask128, 2, WideMask128)) {
39950     assert(isUndefOrZeroOrInRange(WideMask128, 0, 4) && "Illegal shuffle");
39951     bool SingleOp = (Ops.size() == 1);
39952     if (isPack || OneUseOps ||
39953         shouldUseHorizontalOp(SingleOp, DAG, Subtarget)) {
39954       SDValue Lo = isInRange(WideMask128[0], 0, 2) ? BC0 : BC1;
39955       SDValue Hi = isInRange(WideMask128[1], 0, 2) ? BC0 : BC1;
39956       Lo = Lo.getOperand(WideMask128[0] & 1);
39957       Hi = Hi.getOperand(WideMask128[1] & 1);
39958       if (SingleOp) {
39959         SDValue Undef = DAG.getUNDEF(SrcVT);
39960         SDValue Zero = getZeroVector(SrcVT, Subtarget, DAG, DL);
39961         Lo = (WideMask128[0] == SM_SentinelZero ? Zero : Lo);
39962         Hi = (WideMask128[1] == SM_SentinelZero ? Zero : Hi);
39963         Lo = (WideMask128[0] == SM_SentinelUndef ? Undef : Lo);
39964         Hi = (WideMask128[1] == SM_SentinelUndef ? Undef : Hi);
39965       }
39966       return DAG.getNode(Opcode0, DL, VT0, Lo, Hi);
39967     }
39968   }
39969 
39970   return SDValue();
39971 }
39972 
39973 // Attempt to constant fold all of the constant source ops.
39974 // Returns true if the entire shuffle is folded to a constant.
39975 // TODO: Extend this to merge multiple constant Ops and update the mask.
combineX86ShufflesConstants(ArrayRef<SDValue> Ops,ArrayRef<int> Mask,SDValue Root,bool HasVariableMask,SelectionDAG & DAG,const X86Subtarget & Subtarget)39976 static SDValue combineX86ShufflesConstants(ArrayRef<SDValue> Ops,
39977                                            ArrayRef<int> Mask, SDValue Root,
39978                                            bool HasVariableMask,
39979                                            SelectionDAG &DAG,
39980                                            const X86Subtarget &Subtarget) {
39981   MVT VT = Root.getSimpleValueType();
39982 
39983   unsigned SizeInBits = VT.getSizeInBits();
39984   unsigned NumMaskElts = Mask.size();
39985   unsigned MaskSizeInBits = SizeInBits / NumMaskElts;
39986   unsigned NumOps = Ops.size();
39987 
39988   // Extract constant bits from each source op.
39989   bool OneUseConstantOp = false;
39990   SmallVector<APInt, 16> UndefEltsOps(NumOps);
39991   SmallVector<SmallVector<APInt, 16>, 16> RawBitsOps(NumOps);
39992   for (unsigned i = 0; i != NumOps; ++i) {
39993     SDValue SrcOp = Ops[i];
39994     OneUseConstantOp |= SrcOp.hasOneUse();
39995     if (!getTargetConstantBitsFromNode(SrcOp, MaskSizeInBits, UndefEltsOps[i],
39996                                        RawBitsOps[i]))
39997       return SDValue();
39998   }
39999 
40000   // If we're optimizing for size, only fold if at least one of the constants is
40001   // only used once or the combined shuffle has included a variable mask
40002   // shuffle, this is to avoid constant pool bloat.
40003   bool IsOptimizingSize = DAG.shouldOptForSize();
40004   if (IsOptimizingSize && !OneUseConstantOp && !HasVariableMask)
40005     return SDValue();
40006 
40007   // Shuffle the constant bits according to the mask.
40008   SDLoc DL(Root);
40009   APInt UndefElts(NumMaskElts, 0);
40010   APInt ZeroElts(NumMaskElts, 0);
40011   APInt ConstantElts(NumMaskElts, 0);
40012   SmallVector<APInt, 8> ConstantBitData(NumMaskElts,
40013                                         APInt::getZero(MaskSizeInBits));
40014   for (unsigned i = 0; i != NumMaskElts; ++i) {
40015     int M = Mask[i];
40016     if (M == SM_SentinelUndef) {
40017       UndefElts.setBit(i);
40018       continue;
40019     } else if (M == SM_SentinelZero) {
40020       ZeroElts.setBit(i);
40021       continue;
40022     }
40023     assert(0 <= M && M < (int)(NumMaskElts * NumOps));
40024 
40025     unsigned SrcOpIdx = (unsigned)M / NumMaskElts;
40026     unsigned SrcMaskIdx = (unsigned)M % NumMaskElts;
40027 
40028     auto &SrcUndefElts = UndefEltsOps[SrcOpIdx];
40029     if (SrcUndefElts[SrcMaskIdx]) {
40030       UndefElts.setBit(i);
40031       continue;
40032     }
40033 
40034     auto &SrcEltBits = RawBitsOps[SrcOpIdx];
40035     APInt &Bits = SrcEltBits[SrcMaskIdx];
40036     if (!Bits) {
40037       ZeroElts.setBit(i);
40038       continue;
40039     }
40040 
40041     ConstantElts.setBit(i);
40042     ConstantBitData[i] = Bits;
40043   }
40044   assert((UndefElts | ZeroElts | ConstantElts).isAllOnes());
40045 
40046   // Attempt to create a zero vector.
40047   if ((UndefElts | ZeroElts).isAllOnes())
40048     return getZeroVector(Root.getSimpleValueType(), Subtarget, DAG, DL);
40049 
40050   // Create the constant data.
40051   MVT MaskSVT;
40052   if (VT.isFloatingPoint() && (MaskSizeInBits == 32 || MaskSizeInBits == 64))
40053     MaskSVT = MVT::getFloatingPointVT(MaskSizeInBits);
40054   else
40055     MaskSVT = MVT::getIntegerVT(MaskSizeInBits);
40056 
40057   MVT MaskVT = MVT::getVectorVT(MaskSVT, NumMaskElts);
40058   if (!DAG.getTargetLoweringInfo().isTypeLegal(MaskVT))
40059     return SDValue();
40060 
40061   SDValue CstOp = getConstVector(ConstantBitData, UndefElts, MaskVT, DAG, DL);
40062   return DAG.getBitcast(VT, CstOp);
40063 }
40064 
40065 namespace llvm {
40066   namespace X86 {
40067     enum {
40068       MaxShuffleCombineDepth = 8
40069     };
40070   }
40071 } // namespace llvm
40072 
40073 /// Fully generic combining of x86 shuffle instructions.
40074 ///
40075 /// This should be the last combine run over the x86 shuffle instructions. Once
40076 /// they have been fully optimized, this will recursively consider all chains
40077 /// of single-use shuffle instructions, build a generic model of the cumulative
40078 /// shuffle operation, and check for simpler instructions which implement this
40079 /// operation. We use this primarily for two purposes:
40080 ///
40081 /// 1) Collapse generic shuffles to specialized single instructions when
40082 ///    equivalent. In most cases, this is just an encoding size win, but
40083 ///    sometimes we will collapse multiple generic shuffles into a single
40084 ///    special-purpose shuffle.
40085 /// 2) Look for sequences of shuffle instructions with 3 or more total
40086 ///    instructions, and replace them with the slightly more expensive SSSE3
40087 ///    PSHUFB instruction if available. We do this as the last combining step
40088 ///    to ensure we avoid using PSHUFB if we can implement the shuffle with
40089 ///    a suitable short sequence of other instructions. The PSHUFB will either
40090 ///    use a register or have to read from memory and so is slightly (but only
40091 ///    slightly) more expensive than the other shuffle instructions.
40092 ///
40093 /// Because this is inherently a quadratic operation (for each shuffle in
40094 /// a chain, we recurse up the chain), the depth is limited to 8 instructions.
40095 /// This should never be an issue in practice as the shuffle lowering doesn't
40096 /// produce sequences of more than 8 instructions.
40097 ///
40098 /// FIXME: We will currently miss some cases where the redundant shuffling
40099 /// would simplify under the threshold for PSHUFB formation because of
40100 /// combine-ordering. To fix this, we should do the redundant instruction
40101 /// combining in this recursive walk.
combineX86ShufflesRecursively(ArrayRef<SDValue> SrcOps,int SrcOpIndex,SDValue Root,ArrayRef<int> RootMask,ArrayRef<const SDNode * > SrcNodes,unsigned Depth,unsigned MaxDepth,bool HasVariableMask,bool AllowVariableCrossLaneMask,bool AllowVariablePerLaneMask,SelectionDAG & DAG,const X86Subtarget & Subtarget)40102 static SDValue combineX86ShufflesRecursively(
40103     ArrayRef<SDValue> SrcOps, int SrcOpIndex, SDValue Root,
40104     ArrayRef<int> RootMask, ArrayRef<const SDNode *> SrcNodes, unsigned Depth,
40105     unsigned MaxDepth, bool HasVariableMask, bool AllowVariableCrossLaneMask,
40106     bool AllowVariablePerLaneMask, SelectionDAG &DAG,
40107     const X86Subtarget &Subtarget) {
40108   assert(RootMask.size() > 0 &&
40109          (RootMask.size() > 1 || (RootMask[0] == 0 && SrcOpIndex == 0)) &&
40110          "Illegal shuffle root mask");
40111   MVT RootVT = Root.getSimpleValueType();
40112   assert(RootVT.isVector() && "Shuffles operate on vector types!");
40113   unsigned RootSizeInBits = RootVT.getSizeInBits();
40114 
40115   // Bound the depth of our recursive combine because this is ultimately
40116   // quadratic in nature.
40117   if (Depth >= MaxDepth)
40118     return SDValue();
40119 
40120   // Directly rip through bitcasts to find the underlying operand.
40121   SDValue Op = SrcOps[SrcOpIndex];
40122   Op = peekThroughOneUseBitcasts(Op);
40123 
40124   EVT VT = Op.getValueType();
40125   if (!VT.isVector() || !VT.isSimple())
40126     return SDValue(); // Bail if we hit a non-simple non-vector.
40127 
40128   // FIXME: Just bail on f16 for now.
40129   if (VT.getVectorElementType() == MVT::f16)
40130     return SDValue();
40131 
40132   assert((RootSizeInBits % VT.getSizeInBits()) == 0 &&
40133          "Can only combine shuffles upto size of the root op.");
40134 
40135   // Create a demanded elts mask from the referenced elements of Op.
40136   APInt OpDemandedElts = APInt::getZero(RootMask.size());
40137   for (int M : RootMask) {
40138     int BaseIdx = RootMask.size() * SrcOpIndex;
40139     if (isInRange(M, BaseIdx, BaseIdx + RootMask.size()))
40140       OpDemandedElts.setBit(M - BaseIdx);
40141   }
40142   if (RootSizeInBits != VT.getSizeInBits()) {
40143     // Op is smaller than Root - extract the demanded elts for the subvector.
40144     unsigned Scale = RootSizeInBits / VT.getSizeInBits();
40145     unsigned NumOpMaskElts = RootMask.size() / Scale;
40146     assert((RootMask.size() % Scale) == 0 && "Root mask size mismatch");
40147     assert(OpDemandedElts
40148                .extractBits(RootMask.size() - NumOpMaskElts, NumOpMaskElts)
40149                .isZero() &&
40150            "Out of range elements referenced in root mask");
40151     OpDemandedElts = OpDemandedElts.extractBits(NumOpMaskElts, 0);
40152   }
40153   OpDemandedElts =
40154       APIntOps::ScaleBitMask(OpDemandedElts, VT.getVectorNumElements());
40155 
40156   // Extract target shuffle mask and resolve sentinels and inputs.
40157   SmallVector<int, 64> OpMask;
40158   SmallVector<SDValue, 2> OpInputs;
40159   APInt OpUndef, OpZero;
40160   bool IsOpVariableMask = isTargetShuffleVariableMask(Op.getOpcode());
40161   if (getTargetShuffleInputs(Op, OpDemandedElts, OpInputs, OpMask, OpUndef,
40162                              OpZero, DAG, Depth, false)) {
40163     // Shuffle inputs must not be larger than the shuffle result.
40164     // TODO: Relax this for single input faux shuffles (e.g. trunc).
40165     if (llvm::any_of(OpInputs, [VT](SDValue OpInput) {
40166           return OpInput.getValueSizeInBits() > VT.getSizeInBits();
40167         }))
40168       return SDValue();
40169   } else if (Op.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
40170              (RootSizeInBits % Op.getOperand(0).getValueSizeInBits()) == 0 &&
40171              !isNullConstant(Op.getOperand(1))) {
40172     SDValue SrcVec = Op.getOperand(0);
40173     int ExtractIdx = Op.getConstantOperandVal(1);
40174     unsigned NumElts = VT.getVectorNumElements();
40175     OpInputs.assign({SrcVec});
40176     OpMask.assign(NumElts, SM_SentinelUndef);
40177     std::iota(OpMask.begin(), OpMask.end(), ExtractIdx);
40178     OpZero = OpUndef = APInt::getNullValue(NumElts);
40179   } else {
40180     return SDValue();
40181   }
40182 
40183   // If the shuffle result was smaller than the root, we need to adjust the
40184   // mask indices and pad the mask with undefs.
40185   if (RootSizeInBits > VT.getSizeInBits()) {
40186     unsigned NumSubVecs = RootSizeInBits / VT.getSizeInBits();
40187     unsigned OpMaskSize = OpMask.size();
40188     if (OpInputs.size() > 1) {
40189       unsigned PaddedMaskSize = NumSubVecs * OpMaskSize;
40190       for (int &M : OpMask) {
40191         if (M < 0)
40192           continue;
40193         int EltIdx = M % OpMaskSize;
40194         int OpIdx = M / OpMaskSize;
40195         M = (PaddedMaskSize * OpIdx) + EltIdx;
40196       }
40197     }
40198     OpZero = OpZero.zext(NumSubVecs * OpMaskSize);
40199     OpUndef = OpUndef.zext(NumSubVecs * OpMaskSize);
40200     OpMask.append((NumSubVecs - 1) * OpMaskSize, SM_SentinelUndef);
40201   }
40202 
40203   SmallVector<int, 64> Mask;
40204   SmallVector<SDValue, 16> Ops;
40205 
40206   // We don't need to merge masks if the root is empty.
40207   bool EmptyRoot = (Depth == 0) && (RootMask.size() == 1);
40208   if (EmptyRoot) {
40209     // Only resolve zeros if it will remove an input, otherwise we might end
40210     // up in an infinite loop.
40211     bool ResolveKnownZeros = true;
40212     if (!OpZero.isZero()) {
40213       APInt UsedInputs = APInt::getZero(OpInputs.size());
40214       for (int i = 0, e = OpMask.size(); i != e; ++i) {
40215         int M = OpMask[i];
40216         if (OpUndef[i] || OpZero[i] || isUndefOrZero(M))
40217           continue;
40218         UsedInputs.setBit(M / OpMask.size());
40219         if (UsedInputs.isAllOnes()) {
40220           ResolveKnownZeros = false;
40221           break;
40222         }
40223       }
40224     }
40225     resolveTargetShuffleFromZeroables(OpMask, OpUndef, OpZero,
40226                                       ResolveKnownZeros);
40227 
40228     Mask = OpMask;
40229     Ops.append(OpInputs.begin(), OpInputs.end());
40230   } else {
40231     resolveTargetShuffleFromZeroables(OpMask, OpUndef, OpZero);
40232 
40233     // Add the inputs to the Ops list, avoiding duplicates.
40234     Ops.append(SrcOps.begin(), SrcOps.end());
40235 
40236     auto AddOp = [&Ops](SDValue Input, int InsertionPoint) -> int {
40237       // Attempt to find an existing match.
40238       SDValue InputBC = peekThroughBitcasts(Input);
40239       for (int i = 0, e = Ops.size(); i < e; ++i)
40240         if (InputBC == peekThroughBitcasts(Ops[i]))
40241           return i;
40242       // Match failed - should we replace an existing Op?
40243       if (InsertionPoint >= 0) {
40244         Ops[InsertionPoint] = Input;
40245         return InsertionPoint;
40246       }
40247       // Add to the end of the Ops list.
40248       Ops.push_back(Input);
40249       return Ops.size() - 1;
40250     };
40251 
40252     SmallVector<int, 2> OpInputIdx;
40253     for (SDValue OpInput : OpInputs)
40254       OpInputIdx.push_back(
40255           AddOp(OpInput, OpInputIdx.empty() ? SrcOpIndex : -1));
40256 
40257     assert(((RootMask.size() > OpMask.size() &&
40258              RootMask.size() % OpMask.size() == 0) ||
40259             (OpMask.size() > RootMask.size() &&
40260              OpMask.size() % RootMask.size() == 0) ||
40261             OpMask.size() == RootMask.size()) &&
40262            "The smaller number of elements must divide the larger.");
40263 
40264     // This function can be performance-critical, so we rely on the power-of-2
40265     // knowledge that we have about the mask sizes to replace div/rem ops with
40266     // bit-masks and shifts.
40267     assert(isPowerOf2_32(RootMask.size()) &&
40268            "Non-power-of-2 shuffle mask sizes");
40269     assert(isPowerOf2_32(OpMask.size()) && "Non-power-of-2 shuffle mask sizes");
40270     unsigned RootMaskSizeLog2 = countTrailingZeros(RootMask.size());
40271     unsigned OpMaskSizeLog2 = countTrailingZeros(OpMask.size());
40272 
40273     unsigned MaskWidth = std::max<unsigned>(OpMask.size(), RootMask.size());
40274     unsigned RootRatio =
40275         std::max<unsigned>(1, OpMask.size() >> RootMaskSizeLog2);
40276     unsigned OpRatio = std::max<unsigned>(1, RootMask.size() >> OpMaskSizeLog2);
40277     assert((RootRatio == 1 || OpRatio == 1) &&
40278            "Must not have a ratio for both incoming and op masks!");
40279 
40280     assert(isPowerOf2_32(MaskWidth) && "Non-power-of-2 shuffle mask sizes");
40281     assert(isPowerOf2_32(RootRatio) && "Non-power-of-2 shuffle mask sizes");
40282     assert(isPowerOf2_32(OpRatio) && "Non-power-of-2 shuffle mask sizes");
40283     unsigned RootRatioLog2 = countTrailingZeros(RootRatio);
40284     unsigned OpRatioLog2 = countTrailingZeros(OpRatio);
40285 
40286     Mask.resize(MaskWidth, SM_SentinelUndef);
40287 
40288     // Merge this shuffle operation's mask into our accumulated mask. Note that
40289     // this shuffle's mask will be the first applied to the input, followed by
40290     // the root mask to get us all the way to the root value arrangement. The
40291     // reason for this order is that we are recursing up the operation chain.
40292     for (unsigned i = 0; i < MaskWidth; ++i) {
40293       unsigned RootIdx = i >> RootRatioLog2;
40294       if (RootMask[RootIdx] < 0) {
40295         // This is a zero or undef lane, we're done.
40296         Mask[i] = RootMask[RootIdx];
40297         continue;
40298       }
40299 
40300       unsigned RootMaskedIdx =
40301           RootRatio == 1
40302               ? RootMask[RootIdx]
40303               : (RootMask[RootIdx] << RootRatioLog2) + (i & (RootRatio - 1));
40304 
40305       // Just insert the scaled root mask value if it references an input other
40306       // than the SrcOp we're currently inserting.
40307       if ((RootMaskedIdx < (SrcOpIndex * MaskWidth)) ||
40308           (((SrcOpIndex + 1) * MaskWidth) <= RootMaskedIdx)) {
40309         Mask[i] = RootMaskedIdx;
40310         continue;
40311       }
40312 
40313       RootMaskedIdx = RootMaskedIdx & (MaskWidth - 1);
40314       unsigned OpIdx = RootMaskedIdx >> OpRatioLog2;
40315       if (OpMask[OpIdx] < 0) {
40316         // The incoming lanes are zero or undef, it doesn't matter which ones we
40317         // are using.
40318         Mask[i] = OpMask[OpIdx];
40319         continue;
40320       }
40321 
40322       // Ok, we have non-zero lanes, map them through to one of the Op's inputs.
40323       unsigned OpMaskedIdx = OpRatio == 1 ? OpMask[OpIdx]
40324                                           : (OpMask[OpIdx] << OpRatioLog2) +
40325                                                 (RootMaskedIdx & (OpRatio - 1));
40326 
40327       OpMaskedIdx = OpMaskedIdx & (MaskWidth - 1);
40328       int InputIdx = OpMask[OpIdx] / (int)OpMask.size();
40329       assert(0 <= OpInputIdx[InputIdx] && "Unknown target shuffle input");
40330       OpMaskedIdx += OpInputIdx[InputIdx] * MaskWidth;
40331 
40332       Mask[i] = OpMaskedIdx;
40333     }
40334   }
40335 
40336   // Remove unused/repeated shuffle source ops.
40337   resolveTargetShuffleInputsAndMask(Ops, Mask);
40338 
40339   // Handle the all undef/zero/ones cases early.
40340   if (all_of(Mask, [](int Idx) { return Idx == SM_SentinelUndef; }))
40341     return DAG.getUNDEF(RootVT);
40342   if (all_of(Mask, [](int Idx) { return Idx < 0; }))
40343     return getZeroVector(RootVT, Subtarget, DAG, SDLoc(Root));
40344   if (Ops.size() == 1 && ISD::isBuildVectorAllOnes(Ops[0].getNode()) &&
40345       !llvm::is_contained(Mask, SM_SentinelZero))
40346     return getOnesVector(RootVT, DAG, SDLoc(Root));
40347 
40348   assert(!Ops.empty() && "Shuffle with no inputs detected");
40349   HasVariableMask |= IsOpVariableMask;
40350 
40351   // Update the list of shuffle nodes that have been combined so far.
40352   SmallVector<const SDNode *, 16> CombinedNodes(SrcNodes.begin(),
40353                                                 SrcNodes.end());
40354   CombinedNodes.push_back(Op.getNode());
40355 
40356   // See if we can recurse into each shuffle source op (if it's a target
40357   // shuffle). The source op should only be generally combined if it either has
40358   // a single use (i.e. current Op) or all its users have already been combined,
40359   // if not then we can still combine but should prevent generation of variable
40360   // shuffles to avoid constant pool bloat.
40361   // Don't recurse if we already have more source ops than we can combine in
40362   // the remaining recursion depth.
40363   if (Ops.size() < (MaxDepth - Depth)) {
40364     for (int i = 0, e = Ops.size(); i < e; ++i) {
40365       // For empty roots, we need to resolve zeroable elements before combining
40366       // them with other shuffles.
40367       SmallVector<int, 64> ResolvedMask = Mask;
40368       if (EmptyRoot)
40369         resolveTargetShuffleFromZeroables(ResolvedMask, OpUndef, OpZero);
40370       bool AllowCrossLaneVar = false;
40371       bool AllowPerLaneVar = false;
40372       if (Ops[i].getNode()->hasOneUse() ||
40373           SDNode::areOnlyUsersOf(CombinedNodes, Ops[i].getNode())) {
40374         AllowCrossLaneVar = AllowVariableCrossLaneMask;
40375         AllowPerLaneVar = AllowVariablePerLaneMask;
40376       }
40377       if (SDValue Res = combineX86ShufflesRecursively(
40378               Ops, i, Root, ResolvedMask, CombinedNodes, Depth + 1, MaxDepth,
40379               HasVariableMask, AllowCrossLaneVar, AllowPerLaneVar, DAG,
40380               Subtarget))
40381         return Res;
40382     }
40383   }
40384 
40385   // Attempt to constant fold all of the constant source ops.
40386   if (SDValue Cst = combineX86ShufflesConstants(
40387           Ops, Mask, Root, HasVariableMask, DAG, Subtarget))
40388     return Cst;
40389 
40390   // If constant fold failed and we only have constants - then we have
40391   // multiple uses by a single non-variable shuffle - just bail.
40392   if (Depth == 0 && llvm::all_of(Ops, [&](SDValue Op) {
40393         APInt UndefElts;
40394         SmallVector<APInt> RawBits;
40395         unsigned EltSizeInBits = RootSizeInBits / Mask.size();
40396         return getTargetConstantBitsFromNode(Op, EltSizeInBits, UndefElts,
40397                                              RawBits);
40398       })) {
40399     return SDValue();
40400   }
40401 
40402   // Canonicalize the combined shuffle mask chain with horizontal ops.
40403   // NOTE: This will update the Ops and Mask.
40404   if (SDValue HOp = canonicalizeShuffleMaskWithHorizOp(
40405           Ops, Mask, RootSizeInBits, SDLoc(Root), DAG, Subtarget))
40406     return DAG.getBitcast(RootVT, HOp);
40407 
40408   // Try to refine our inputs given our knowledge of target shuffle mask.
40409   for (auto I : enumerate(Ops)) {
40410     int OpIdx = I.index();
40411     SDValue &Op = I.value();
40412 
40413     // What range of shuffle mask element values results in picking from Op?
40414     int Lo = OpIdx * Mask.size();
40415     int Hi = Lo + Mask.size();
40416 
40417     // Which elements of Op do we demand, given the mask's granularity?
40418     APInt OpDemandedElts(Mask.size(), 0);
40419     for (int MaskElt : Mask) {
40420       if (isInRange(MaskElt, Lo, Hi)) { // Picks from Op?
40421         int OpEltIdx = MaskElt - Lo;
40422         OpDemandedElts.setBit(OpEltIdx);
40423       }
40424     }
40425 
40426     // Is the shuffle result smaller than the root?
40427     if (Op.getValueSizeInBits() < RootSizeInBits) {
40428       // We padded the mask with undefs. But we now need to undo that.
40429       unsigned NumExpectedVectorElts = Mask.size();
40430       unsigned EltSizeInBits = RootSizeInBits / NumExpectedVectorElts;
40431       unsigned NumOpVectorElts = Op.getValueSizeInBits() / EltSizeInBits;
40432       assert(!OpDemandedElts.extractBits(
40433                  NumExpectedVectorElts - NumOpVectorElts, NumOpVectorElts) &&
40434              "Demanding the virtual undef widening padding?");
40435       OpDemandedElts = OpDemandedElts.trunc(NumOpVectorElts); // NUW
40436     }
40437 
40438     // The Op itself may be of different VT, so we need to scale the mask.
40439     unsigned NumOpElts = Op.getValueType().getVectorNumElements();
40440     APInt OpScaledDemandedElts = APIntOps::ScaleBitMask(OpDemandedElts, NumOpElts);
40441 
40442     // Can this operand be simplified any further, given it's demanded elements?
40443     if (SDValue NewOp =
40444             DAG.getTargetLoweringInfo().SimplifyMultipleUseDemandedVectorElts(
40445                 Op, OpScaledDemandedElts, DAG))
40446       Op = NewOp;
40447   }
40448   // FIXME: should we rerun resolveTargetShuffleInputsAndMask() now?
40449 
40450   // Widen any subvector shuffle inputs we've collected.
40451   // TODO: Remove this to avoid generating temporary nodes, we should only
40452   // widen once combineX86ShuffleChain has found a match.
40453   if (any_of(Ops, [RootSizeInBits](SDValue Op) {
40454         return Op.getValueSizeInBits() < RootSizeInBits;
40455       })) {
40456     for (SDValue &Op : Ops)
40457       if (Op.getValueSizeInBits() < RootSizeInBits)
40458         Op = widenSubVector(Op, false, Subtarget, DAG, SDLoc(Op),
40459                             RootSizeInBits);
40460     // Reresolve - we might have repeated subvector sources.
40461     resolveTargetShuffleInputsAndMask(Ops, Mask);
40462   }
40463 
40464   // We can only combine unary and binary shuffle mask cases.
40465   if (Ops.size() <= 2) {
40466     // Minor canonicalization of the accumulated shuffle mask to make it easier
40467     // to match below. All this does is detect masks with sequential pairs of
40468     // elements, and shrink them to the half-width mask. It does this in a loop
40469     // so it will reduce the size of the mask to the minimal width mask which
40470     // performs an equivalent shuffle.
40471     while (Mask.size() > 1) {
40472       SmallVector<int, 64> WidenedMask;
40473       if (!canWidenShuffleElements(Mask, WidenedMask))
40474         break;
40475       Mask = std::move(WidenedMask);
40476     }
40477 
40478     // Canonicalization of binary shuffle masks to improve pattern matching by
40479     // commuting the inputs.
40480     if (Ops.size() == 2 && canonicalizeShuffleMaskWithCommute(Mask)) {
40481       ShuffleVectorSDNode::commuteMask(Mask);
40482       std::swap(Ops[0], Ops[1]);
40483     }
40484 
40485     // Try to combine into a single shuffle instruction.
40486     if (SDValue Shuffle = combineX86ShuffleChain(
40487             Ops, Root, Mask, Depth, HasVariableMask, AllowVariableCrossLaneMask,
40488             AllowVariablePerLaneMask, DAG, Subtarget))
40489       return Shuffle;
40490 
40491     // If all the operands come from the same larger vector, fallthrough and try
40492     // to use combineX86ShuffleChainWithExtract.
40493     SDValue LHS = peekThroughBitcasts(Ops.front());
40494     SDValue RHS = peekThroughBitcasts(Ops.back());
40495     if (Ops.size() != 2 || !Subtarget.hasAVX2() || RootSizeInBits != 128 ||
40496         (RootSizeInBits / Mask.size()) != 64 ||
40497         LHS.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
40498         RHS.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
40499         LHS.getOperand(0) != RHS.getOperand(0))
40500       return SDValue();
40501   }
40502 
40503   // If that failed and any input is extracted then try to combine as a
40504   // shuffle with the larger type.
40505   return combineX86ShuffleChainWithExtract(
40506       Ops, Root, Mask, Depth, HasVariableMask, AllowVariableCrossLaneMask,
40507       AllowVariablePerLaneMask, DAG, Subtarget);
40508 }
40509 
40510 /// Helper entry wrapper to combineX86ShufflesRecursively.
combineX86ShufflesRecursively(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)40511 static SDValue combineX86ShufflesRecursively(SDValue Op, SelectionDAG &DAG,
40512                                              const X86Subtarget &Subtarget) {
40513   return combineX86ShufflesRecursively(
40514       {Op}, 0, Op, {0}, {}, /*Depth*/ 0, X86::MaxShuffleCombineDepth,
40515       /*HasVarMask*/ false,
40516       /*AllowCrossLaneVarMask*/ true, /*AllowPerLaneVarMask*/ true, DAG,
40517       Subtarget);
40518 }
40519 
40520 /// Get the PSHUF-style mask from PSHUF node.
40521 ///
40522 /// This is a very minor wrapper around getTargetShuffleMask to easy forming v4
40523 /// PSHUF-style masks that can be reused with such instructions.
getPSHUFShuffleMask(SDValue N)40524 static SmallVector<int, 4> getPSHUFShuffleMask(SDValue N) {
40525   MVT VT = N.getSimpleValueType();
40526   SmallVector<int, 4> Mask;
40527   SmallVector<SDValue, 2> Ops;
40528   bool HaveMask =
40529       getTargetShuffleMask(N.getNode(), VT, false, Ops, Mask);
40530   (void)HaveMask;
40531   assert(HaveMask);
40532 
40533   // If we have more than 128-bits, only the low 128-bits of shuffle mask
40534   // matter. Check that the upper masks are repeats and remove them.
40535   if (VT.getSizeInBits() > 128) {
40536     int LaneElts = 128 / VT.getScalarSizeInBits();
40537 #ifndef NDEBUG
40538     for (int i = 1, NumLanes = VT.getSizeInBits() / 128; i < NumLanes; ++i)
40539       for (int j = 0; j < LaneElts; ++j)
40540         assert(Mask[j] == Mask[i * LaneElts + j] - (LaneElts * i) &&
40541                "Mask doesn't repeat in high 128-bit lanes!");
40542 #endif
40543     Mask.resize(LaneElts);
40544   }
40545 
40546   switch (N.getOpcode()) {
40547   case X86ISD::PSHUFD:
40548     return Mask;
40549   case X86ISD::PSHUFLW:
40550     Mask.resize(4);
40551     return Mask;
40552   case X86ISD::PSHUFHW:
40553     Mask.erase(Mask.begin(), Mask.begin() + 4);
40554     for (int &M : Mask)
40555       M -= 4;
40556     return Mask;
40557   default:
40558     llvm_unreachable("No valid shuffle instruction found!");
40559   }
40560 }
40561 
40562 /// Search for a combinable shuffle across a chain ending in pshufd.
40563 ///
40564 /// We walk up the chain and look for a combinable shuffle, skipping over
40565 /// shuffles that we could hoist this shuffle's transformation past without
40566 /// altering anything.
40567 static SDValue
combineRedundantDWordShuffle(SDValue N,MutableArrayRef<int> Mask,SelectionDAG & DAG)40568 combineRedundantDWordShuffle(SDValue N, MutableArrayRef<int> Mask,
40569                              SelectionDAG &DAG) {
40570   assert(N.getOpcode() == X86ISD::PSHUFD &&
40571          "Called with something other than an x86 128-bit half shuffle!");
40572   SDLoc DL(N);
40573 
40574   // Walk up a single-use chain looking for a combinable shuffle. Keep a stack
40575   // of the shuffles in the chain so that we can form a fresh chain to replace
40576   // this one.
40577   SmallVector<SDValue, 8> Chain;
40578   SDValue V = N.getOperand(0);
40579   for (; V.hasOneUse(); V = V.getOperand(0)) {
40580     switch (V.getOpcode()) {
40581     default:
40582       return SDValue(); // Nothing combined!
40583 
40584     case ISD::BITCAST:
40585       // Skip bitcasts as we always know the type for the target specific
40586       // instructions.
40587       continue;
40588 
40589     case X86ISD::PSHUFD:
40590       // Found another dword shuffle.
40591       break;
40592 
40593     case X86ISD::PSHUFLW:
40594       // Check that the low words (being shuffled) are the identity in the
40595       // dword shuffle, and the high words are self-contained.
40596       if (Mask[0] != 0 || Mask[1] != 1 ||
40597           !(Mask[2] >= 2 && Mask[2] < 4 && Mask[3] >= 2 && Mask[3] < 4))
40598         return SDValue();
40599 
40600       Chain.push_back(V);
40601       continue;
40602 
40603     case X86ISD::PSHUFHW:
40604       // Check that the high words (being shuffled) are the identity in the
40605       // dword shuffle, and the low words are self-contained.
40606       if (Mask[2] != 2 || Mask[3] != 3 ||
40607           !(Mask[0] >= 0 && Mask[0] < 2 && Mask[1] >= 0 && Mask[1] < 2))
40608         return SDValue();
40609 
40610       Chain.push_back(V);
40611       continue;
40612 
40613     case X86ISD::UNPCKL:
40614     case X86ISD::UNPCKH:
40615       // For either i8 -> i16 or i16 -> i32 unpacks, we can combine a dword
40616       // shuffle into a preceding word shuffle.
40617       if (V.getSimpleValueType().getVectorElementType() != MVT::i8 &&
40618           V.getSimpleValueType().getVectorElementType() != MVT::i16)
40619         return SDValue();
40620 
40621       // Search for a half-shuffle which we can combine with.
40622       unsigned CombineOp =
40623           V.getOpcode() == X86ISD::UNPCKL ? X86ISD::PSHUFLW : X86ISD::PSHUFHW;
40624       if (V.getOperand(0) != V.getOperand(1) ||
40625           !V->isOnlyUserOf(V.getOperand(0).getNode()))
40626         return SDValue();
40627       Chain.push_back(V);
40628       V = V.getOperand(0);
40629       do {
40630         switch (V.getOpcode()) {
40631         default:
40632           return SDValue(); // Nothing to combine.
40633 
40634         case X86ISD::PSHUFLW:
40635         case X86ISD::PSHUFHW:
40636           if (V.getOpcode() == CombineOp)
40637             break;
40638 
40639           Chain.push_back(V);
40640 
40641           [[fallthrough]];
40642         case ISD::BITCAST:
40643           V = V.getOperand(0);
40644           continue;
40645         }
40646         break;
40647       } while (V.hasOneUse());
40648       break;
40649     }
40650     // Break out of the loop if we break out of the switch.
40651     break;
40652   }
40653 
40654   if (!V.hasOneUse())
40655     // We fell out of the loop without finding a viable combining instruction.
40656     return SDValue();
40657 
40658   // Merge this node's mask and our incoming mask.
40659   SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
40660   for (int &M : Mask)
40661     M = VMask[M];
40662   V = DAG.getNode(V.getOpcode(), DL, V.getValueType(), V.getOperand(0),
40663                   getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
40664 
40665   // Rebuild the chain around this new shuffle.
40666   while (!Chain.empty()) {
40667     SDValue W = Chain.pop_back_val();
40668 
40669     if (V.getValueType() != W.getOperand(0).getValueType())
40670       V = DAG.getBitcast(W.getOperand(0).getValueType(), V);
40671 
40672     switch (W.getOpcode()) {
40673     default:
40674       llvm_unreachable("Only PSHUF and UNPCK instructions get here!");
40675 
40676     case X86ISD::UNPCKL:
40677     case X86ISD::UNPCKH:
40678       V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, V);
40679       break;
40680 
40681     case X86ISD::PSHUFD:
40682     case X86ISD::PSHUFLW:
40683     case X86ISD::PSHUFHW:
40684       V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, W.getOperand(1));
40685       break;
40686     }
40687   }
40688   if (V.getValueType() != N.getValueType())
40689     V = DAG.getBitcast(N.getValueType(), V);
40690 
40691   // Return the new chain to replace N.
40692   return V;
40693 }
40694 
40695 // Attempt to commute shufps LHS loads:
40696 // permilps(shufps(load(),x)) --> permilps(shufps(x,load()))
combineCommutableSHUFP(SDValue N,MVT VT,const SDLoc & DL,SelectionDAG & DAG)40697 static SDValue combineCommutableSHUFP(SDValue N, MVT VT, const SDLoc &DL,
40698                                       SelectionDAG &DAG) {
40699   // TODO: Add vXf64 support.
40700   if (VT != MVT::v4f32 && VT != MVT::v8f32 && VT != MVT::v16f32)
40701     return SDValue();
40702 
40703   // SHUFP(LHS, RHS) -> SHUFP(RHS, LHS) iff LHS is foldable + RHS is not.
40704   auto commuteSHUFP = [&VT, &DL, &DAG](SDValue Parent, SDValue V) {
40705     if (V.getOpcode() != X86ISD::SHUFP || !Parent->isOnlyUserOf(V.getNode()))
40706       return SDValue();
40707     SDValue N0 = V.getOperand(0);
40708     SDValue N1 = V.getOperand(1);
40709     unsigned Imm = V.getConstantOperandVal(2);
40710     const X86Subtarget &Subtarget = DAG.getSubtarget<X86Subtarget>();
40711     if (!X86::mayFoldLoad(peekThroughOneUseBitcasts(N0), Subtarget) ||
40712         X86::mayFoldLoad(peekThroughOneUseBitcasts(N1), Subtarget))
40713       return SDValue();
40714     Imm = ((Imm & 0x0F) << 4) | ((Imm & 0xF0) >> 4);
40715     return DAG.getNode(X86ISD::SHUFP, DL, VT, N1, N0,
40716                        DAG.getTargetConstant(Imm, DL, MVT::i8));
40717   };
40718 
40719   switch (N.getOpcode()) {
40720   case X86ISD::VPERMILPI:
40721     if (SDValue NewSHUFP = commuteSHUFP(N, N.getOperand(0))) {
40722       unsigned Imm = N.getConstantOperandVal(1);
40723       return DAG.getNode(X86ISD::VPERMILPI, DL, VT, NewSHUFP,
40724                          DAG.getTargetConstant(Imm ^ 0xAA, DL, MVT::i8));
40725     }
40726     break;
40727   case X86ISD::SHUFP: {
40728     SDValue N0 = N.getOperand(0);
40729     SDValue N1 = N.getOperand(1);
40730     unsigned Imm = N.getConstantOperandVal(2);
40731     if (N0 == N1) {
40732       if (SDValue NewSHUFP = commuteSHUFP(N, N0))
40733         return DAG.getNode(X86ISD::SHUFP, DL, VT, NewSHUFP, NewSHUFP,
40734                            DAG.getTargetConstant(Imm ^ 0xAA, DL, MVT::i8));
40735     } else if (SDValue NewSHUFP = commuteSHUFP(N, N0)) {
40736       return DAG.getNode(X86ISD::SHUFP, DL, VT, NewSHUFP, N1,
40737                          DAG.getTargetConstant(Imm ^ 0x0A, DL, MVT::i8));
40738     } else if (SDValue NewSHUFP = commuteSHUFP(N, N1)) {
40739       return DAG.getNode(X86ISD::SHUFP, DL, VT, N0, NewSHUFP,
40740                          DAG.getTargetConstant(Imm ^ 0xA0, DL, MVT::i8));
40741     }
40742     break;
40743   }
40744   }
40745 
40746   return SDValue();
40747 }
40748 
40749 // Canonicalize SHUFFLE(BINOP(X,Y)) -> BINOP(SHUFFLE(X),SHUFFLE(Y)).
canonicalizeShuffleWithBinOps(SDValue N,SelectionDAG & DAG,const SDLoc & DL)40750 static SDValue canonicalizeShuffleWithBinOps(SDValue N, SelectionDAG &DAG,
40751                                              const SDLoc &DL) {
40752   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
40753   EVT ShuffleVT = N.getValueType();
40754 
40755   auto IsMergeableWithShuffle = [&DAG](SDValue Op, bool FoldLoad = false) {
40756     // AllZeros/AllOnes constants are freely shuffled and will peek through
40757     // bitcasts. Other constant build vectors do not peek through bitcasts. Only
40758     // merge with target shuffles if it has one use so shuffle combining is
40759     // likely to kick in. Shuffles of splats are expected to be removed.
40760     return ISD::isBuildVectorAllOnes(Op.getNode()) ||
40761            ISD::isBuildVectorAllZeros(Op.getNode()) ||
40762            ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) ||
40763            ISD::isBuildVectorOfConstantFPSDNodes(Op.getNode()) ||
40764            (isTargetShuffle(Op.getOpcode()) && Op->hasOneUse()) ||
40765            (FoldLoad && isShuffleFoldableLoad(Op)) ||
40766            DAG.isSplatValue(Op, /*AllowUndefs*/ false);
40767   };
40768   auto IsSafeToMoveShuffle = [ShuffleVT](SDValue Op, unsigned BinOp) {
40769     // Ensure we only shuffle whole vector src elements, unless its a logical
40770     // binops where we can more aggressively move shuffles from dst to src.
40771     return BinOp == ISD::AND || BinOp == ISD::OR || BinOp == ISD::XOR ||
40772            BinOp == X86ISD::ANDNP ||
40773            (Op.getScalarValueSizeInBits() <= ShuffleVT.getScalarSizeInBits());
40774   };
40775 
40776   unsigned Opc = N.getOpcode();
40777   switch (Opc) {
40778   // Unary and Unary+Permute Shuffles.
40779   case X86ISD::PSHUFB: {
40780     // Don't merge PSHUFB if it contains zero'd elements.
40781     SmallVector<int> Mask;
40782     SmallVector<SDValue> Ops;
40783     if (!getTargetShuffleMask(N.getNode(), ShuffleVT.getSimpleVT(), false, Ops,
40784                               Mask))
40785       break;
40786     [[fallthrough]];
40787   }
40788   case X86ISD::VBROADCAST:
40789   case X86ISD::MOVDDUP:
40790   case X86ISD::PSHUFD:
40791   case X86ISD::PSHUFHW:
40792   case X86ISD::PSHUFLW:
40793   case X86ISD::VPERMI:
40794   case X86ISD::VPERMILPI: {
40795     if (N.getOperand(0).getValueType() == ShuffleVT &&
40796         N->isOnlyUserOf(N.getOperand(0).getNode())) {
40797       SDValue N0 = peekThroughOneUseBitcasts(N.getOperand(0));
40798       unsigned SrcOpcode = N0.getOpcode();
40799       if (TLI.isBinOp(SrcOpcode) && IsSafeToMoveShuffle(N0, SrcOpcode)) {
40800         SDValue Op00 = peekThroughOneUseBitcasts(N0.getOperand(0));
40801         SDValue Op01 = peekThroughOneUseBitcasts(N0.getOperand(1));
40802         if (IsMergeableWithShuffle(Op00, Opc != X86ISD::PSHUFB) ||
40803             IsMergeableWithShuffle(Op01, Opc != X86ISD::PSHUFB)) {
40804           SDValue LHS, RHS;
40805           Op00 = DAG.getBitcast(ShuffleVT, Op00);
40806           Op01 = DAG.getBitcast(ShuffleVT, Op01);
40807           if (N.getNumOperands() == 2) {
40808             LHS = DAG.getNode(Opc, DL, ShuffleVT, Op00, N.getOperand(1));
40809             RHS = DAG.getNode(Opc, DL, ShuffleVT, Op01, N.getOperand(1));
40810           } else {
40811             LHS = DAG.getNode(Opc, DL, ShuffleVT, Op00);
40812             RHS = DAG.getNode(Opc, DL, ShuffleVT, Op01);
40813           }
40814           EVT OpVT = N0.getValueType();
40815           return DAG.getBitcast(ShuffleVT,
40816                                 DAG.getNode(SrcOpcode, DL, OpVT,
40817                                             DAG.getBitcast(OpVT, LHS),
40818                                             DAG.getBitcast(OpVT, RHS)));
40819         }
40820       }
40821     }
40822     break;
40823   }
40824   // Binary and Binary+Permute Shuffles.
40825   case X86ISD::INSERTPS: {
40826     // Don't merge INSERTPS if it contains zero'd elements.
40827     unsigned InsertPSMask = N.getConstantOperandVal(2);
40828     unsigned ZeroMask = InsertPSMask & 0xF;
40829     if (ZeroMask != 0)
40830       break;
40831     [[fallthrough]];
40832   }
40833   case X86ISD::MOVSD:
40834   case X86ISD::MOVSS:
40835   case X86ISD::BLENDI:
40836   case X86ISD::SHUFP:
40837   case X86ISD::UNPCKH:
40838   case X86ISD::UNPCKL: {
40839     if (N->isOnlyUserOf(N.getOperand(0).getNode()) &&
40840         N->isOnlyUserOf(N.getOperand(1).getNode())) {
40841       SDValue N0 = peekThroughOneUseBitcasts(N.getOperand(0));
40842       SDValue N1 = peekThroughOneUseBitcasts(N.getOperand(1));
40843       unsigned SrcOpcode = N0.getOpcode();
40844       if (TLI.isBinOp(SrcOpcode) && N1.getOpcode() == SrcOpcode &&
40845           IsSafeToMoveShuffle(N0, SrcOpcode) &&
40846           IsSafeToMoveShuffle(N1, SrcOpcode)) {
40847         SDValue Op00 = peekThroughOneUseBitcasts(N0.getOperand(0));
40848         SDValue Op10 = peekThroughOneUseBitcasts(N1.getOperand(0));
40849         SDValue Op01 = peekThroughOneUseBitcasts(N0.getOperand(1));
40850         SDValue Op11 = peekThroughOneUseBitcasts(N1.getOperand(1));
40851         // Ensure the total number of shuffles doesn't increase by folding this
40852         // shuffle through to the source ops.
40853         if (((IsMergeableWithShuffle(Op00) && IsMergeableWithShuffle(Op10)) ||
40854              (IsMergeableWithShuffle(Op01) && IsMergeableWithShuffle(Op11))) ||
40855             ((IsMergeableWithShuffle(Op00) || IsMergeableWithShuffle(Op10)) &&
40856              (IsMergeableWithShuffle(Op01) || IsMergeableWithShuffle(Op11)))) {
40857           SDValue LHS, RHS;
40858           Op00 = DAG.getBitcast(ShuffleVT, Op00);
40859           Op10 = DAG.getBitcast(ShuffleVT, Op10);
40860           Op01 = DAG.getBitcast(ShuffleVT, Op01);
40861           Op11 = DAG.getBitcast(ShuffleVT, Op11);
40862           if (N.getNumOperands() == 3) {
40863             LHS = DAG.getNode(Opc, DL, ShuffleVT, Op00, Op10, N.getOperand(2));
40864             RHS = DAG.getNode(Opc, DL, ShuffleVT, Op01, Op11, N.getOperand(2));
40865           } else {
40866             LHS = DAG.getNode(Opc, DL, ShuffleVT, Op00, Op10);
40867             RHS = DAG.getNode(Opc, DL, ShuffleVT, Op01, Op11);
40868           }
40869           EVT OpVT = N0.getValueType();
40870           return DAG.getBitcast(ShuffleVT,
40871                                 DAG.getNode(SrcOpcode, DL, OpVT,
40872                                             DAG.getBitcast(OpVT, LHS),
40873                                             DAG.getBitcast(OpVT, RHS)));
40874         }
40875       }
40876     }
40877     break;
40878   }
40879   }
40880   return SDValue();
40881 }
40882 
40883 /// Attempt to fold vpermf128(op(),op()) -> op(vpermf128(),vpermf128()).
canonicalizeLaneShuffleWithRepeatedOps(SDValue V,SelectionDAG & DAG,const SDLoc & DL)40884 static SDValue canonicalizeLaneShuffleWithRepeatedOps(SDValue V,
40885                                                       SelectionDAG &DAG,
40886                                                       const SDLoc &DL) {
40887   assert(V.getOpcode() == X86ISD::VPERM2X128 && "Unknown lane shuffle");
40888 
40889   MVT VT = V.getSimpleValueType();
40890   SDValue Src0 = peekThroughBitcasts(V.getOperand(0));
40891   SDValue Src1 = peekThroughBitcasts(V.getOperand(1));
40892   unsigned SrcOpc0 = Src0.getOpcode();
40893   unsigned SrcOpc1 = Src1.getOpcode();
40894   EVT SrcVT0 = Src0.getValueType();
40895   EVT SrcVT1 = Src1.getValueType();
40896 
40897   if (!Src1.isUndef() && (SrcVT0 != SrcVT1 || SrcOpc0 != SrcOpc1))
40898     return SDValue();
40899 
40900   switch (SrcOpc0) {
40901   case X86ISD::MOVDDUP: {
40902     SDValue LHS = Src0.getOperand(0);
40903     SDValue RHS = Src1.isUndef() ? Src1 : Src1.getOperand(0);
40904     SDValue Res =
40905         DAG.getNode(X86ISD::VPERM2X128, DL, SrcVT0, LHS, RHS, V.getOperand(2));
40906     Res = DAG.getNode(SrcOpc0, DL, SrcVT0, Res);
40907     return DAG.getBitcast(VT, Res);
40908   }
40909   case X86ISD::VPERMILPI:
40910     // TODO: Handle v4f64 permutes with different low/high lane masks.
40911     if (SrcVT0 == MVT::v4f64) {
40912       uint64_t Mask = Src0.getConstantOperandVal(1);
40913       if ((Mask & 0x3) != ((Mask >> 2) & 0x3))
40914         break;
40915     }
40916     [[fallthrough]];
40917   case X86ISD::VSHLI:
40918   case X86ISD::VSRLI:
40919   case X86ISD::VSRAI:
40920   case X86ISD::PSHUFD:
40921     if (Src1.isUndef() || Src0.getOperand(1) == Src1.getOperand(1)) {
40922       SDValue LHS = Src0.getOperand(0);
40923       SDValue RHS = Src1.isUndef() ? Src1 : Src1.getOperand(0);
40924       SDValue Res = DAG.getNode(X86ISD::VPERM2X128, DL, SrcVT0, LHS, RHS,
40925                                 V.getOperand(2));
40926       Res = DAG.getNode(SrcOpc0, DL, SrcVT0, Res, Src0.getOperand(1));
40927       return DAG.getBitcast(VT, Res);
40928     }
40929     break;
40930   }
40931 
40932   return SDValue();
40933 }
40934 
40935 /// Try to combine x86 target specific shuffles.
combineTargetShuffle(SDValue N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)40936 static SDValue combineTargetShuffle(SDValue N, SelectionDAG &DAG,
40937                                     TargetLowering::DAGCombinerInfo &DCI,
40938                                     const X86Subtarget &Subtarget) {
40939   SDLoc DL(N);
40940   MVT VT = N.getSimpleValueType();
40941   SmallVector<int, 4> Mask;
40942   unsigned Opcode = N.getOpcode();
40943 
40944   if (SDValue R = combineCommutableSHUFP(N, VT, DL, DAG))
40945     return R;
40946 
40947   // Handle specific target shuffles.
40948   switch (Opcode) {
40949   case X86ISD::MOVDDUP: {
40950     SDValue Src = N.getOperand(0);
40951     // Turn a 128-bit MOVDDUP of a full vector load into movddup+vzload.
40952     if (VT == MVT::v2f64 && Src.hasOneUse() &&
40953         ISD::isNormalLoad(Src.getNode())) {
40954       LoadSDNode *LN = cast<LoadSDNode>(Src);
40955       if (SDValue VZLoad = narrowLoadToVZLoad(LN, MVT::f64, MVT::v2f64, DAG)) {
40956         SDValue Movddup = DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v2f64, VZLoad);
40957         DCI.CombineTo(N.getNode(), Movddup);
40958         DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
40959         DCI.recursivelyDeleteUnusedNodes(LN);
40960         return N; // Return N so it doesn't get rechecked!
40961       }
40962     }
40963 
40964     return SDValue();
40965   }
40966   case X86ISD::VBROADCAST: {
40967     SDValue Src = N.getOperand(0);
40968     SDValue BC = peekThroughBitcasts(Src);
40969     EVT SrcVT = Src.getValueType();
40970     EVT BCVT = BC.getValueType();
40971 
40972     // If broadcasting from another shuffle, attempt to simplify it.
40973     // TODO - we really need a general SimplifyDemandedVectorElts mechanism.
40974     if (isTargetShuffle(BC.getOpcode()) &&
40975         VT.getScalarSizeInBits() % BCVT.getScalarSizeInBits() == 0) {
40976       unsigned Scale = VT.getScalarSizeInBits() / BCVT.getScalarSizeInBits();
40977       SmallVector<int, 16> DemandedMask(BCVT.getVectorNumElements(),
40978                                         SM_SentinelUndef);
40979       for (unsigned i = 0; i != Scale; ++i)
40980         DemandedMask[i] = i;
40981       if (SDValue Res = combineX86ShufflesRecursively(
40982               {BC}, 0, BC, DemandedMask, {}, /*Depth*/ 0,
40983               X86::MaxShuffleCombineDepth,
40984               /*HasVarMask*/ false, /*AllowCrossLaneVarMask*/ true,
40985               /*AllowPerLaneVarMask*/ true, DAG, Subtarget))
40986         return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
40987                            DAG.getBitcast(SrcVT, Res));
40988     }
40989 
40990     // broadcast(bitcast(src)) -> bitcast(broadcast(src))
40991     // 32-bit targets have to bitcast i64 to f64, so better to bitcast upward.
40992     if (Src.getOpcode() == ISD::BITCAST &&
40993         SrcVT.getScalarSizeInBits() == BCVT.getScalarSizeInBits() &&
40994         DAG.getTargetLoweringInfo().isTypeLegal(BCVT) &&
40995         FixedVectorType::isValidElementType(
40996             BCVT.getScalarType().getTypeForEVT(*DAG.getContext()))) {
40997       EVT NewVT = EVT::getVectorVT(*DAG.getContext(), BCVT.getScalarType(),
40998                                    VT.getVectorNumElements());
40999       return DAG.getBitcast(VT, DAG.getNode(X86ISD::VBROADCAST, DL, NewVT, BC));
41000     }
41001 
41002     // vbroadcast(bitcast(vbroadcast(src))) -> bitcast(vbroadcast(src))
41003     // If we're re-broadcasting a smaller type then broadcast with that type and
41004     // bitcast.
41005     // TODO: Do this for any splat?
41006     if (Src.getOpcode() == ISD::BITCAST &&
41007         (BC.getOpcode() == X86ISD::VBROADCAST ||
41008          BC.getOpcode() == X86ISD::VBROADCAST_LOAD) &&
41009         (VT.getScalarSizeInBits() % BCVT.getScalarSizeInBits()) == 0 &&
41010         (VT.getSizeInBits() % BCVT.getSizeInBits()) == 0) {
41011       MVT NewVT =
41012           MVT::getVectorVT(BCVT.getSimpleVT().getScalarType(),
41013                            VT.getSizeInBits() / BCVT.getScalarSizeInBits());
41014       return DAG.getBitcast(VT, DAG.getNode(X86ISD::VBROADCAST, DL, NewVT, BC));
41015     }
41016 
41017     // Reduce broadcast source vector to lowest 128-bits.
41018     if (SrcVT.getSizeInBits() > 128)
41019       return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
41020                          extract128BitVector(Src, 0, DAG, DL));
41021 
41022     // broadcast(scalar_to_vector(x)) -> broadcast(x).
41023     if (Src.getOpcode() == ISD::SCALAR_TO_VECTOR)
41024       return DAG.getNode(X86ISD::VBROADCAST, DL, VT, Src.getOperand(0));
41025 
41026     // broadcast(extract_vector_elt(x, 0)) -> broadcast(x).
41027     if (Src.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
41028         isNullConstant(Src.getOperand(1)) &&
41029         DAG.getTargetLoweringInfo().isTypeLegal(
41030             Src.getOperand(0).getValueType()))
41031       return DAG.getNode(X86ISD::VBROADCAST, DL, VT, Src.getOperand(0));
41032 
41033     // Share broadcast with the longest vector and extract low subvector (free).
41034     // Ensure the same SDValue from the SDNode use is being used.
41035     for (SDNode *User : Src->uses())
41036       if (User != N.getNode() && User->getOpcode() == X86ISD::VBROADCAST &&
41037           Src == User->getOperand(0) &&
41038           User->getValueSizeInBits(0).getFixedValue() >
41039               VT.getFixedSizeInBits()) {
41040         return extractSubVector(SDValue(User, 0), 0, DAG, DL,
41041                                 VT.getSizeInBits());
41042       }
41043 
41044     // vbroadcast(scalarload X) -> vbroadcast_load X
41045     // For float loads, extract other uses of the scalar from the broadcast.
41046     if (!SrcVT.isVector() && (Src.hasOneUse() || VT.isFloatingPoint()) &&
41047         ISD::isNormalLoad(Src.getNode())) {
41048       LoadSDNode *LN = cast<LoadSDNode>(Src);
41049       SDVTList Tys = DAG.getVTList(VT, MVT::Other);
41050       SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
41051       SDValue BcastLd =
41052           DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, DL, Tys, Ops,
41053                                   LN->getMemoryVT(), LN->getMemOperand());
41054       // If the load value is used only by N, replace it via CombineTo N.
41055       bool NoReplaceExtract = Src.hasOneUse();
41056       DCI.CombineTo(N.getNode(), BcastLd);
41057       if (NoReplaceExtract) {
41058         DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1));
41059         DCI.recursivelyDeleteUnusedNodes(LN);
41060       } else {
41061         SDValue Scl = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SrcVT, BcastLd,
41062                                   DAG.getIntPtrConstant(0, DL));
41063         DCI.CombineTo(LN, Scl, BcastLd.getValue(1));
41064       }
41065       return N; // Return N so it doesn't get rechecked!
41066     }
41067 
41068     // Due to isTypeDesirableForOp, we won't always shrink a load truncated to
41069     // i16. So shrink it ourselves if we can make a broadcast_load.
41070     if (SrcVT == MVT::i16 && Src.getOpcode() == ISD::TRUNCATE &&
41071         Src.hasOneUse() && Src.getOperand(0).hasOneUse()) {
41072       assert(Subtarget.hasAVX2() && "Expected AVX2");
41073       SDValue TruncIn = Src.getOperand(0);
41074 
41075       // If this is a truncate of a non extending load we can just narrow it to
41076       // use a broadcast_load.
41077       if (ISD::isNormalLoad(TruncIn.getNode())) {
41078         LoadSDNode *LN = cast<LoadSDNode>(TruncIn);
41079         // Unless its volatile or atomic.
41080         if (LN->isSimple()) {
41081           SDVTList Tys = DAG.getVTList(VT, MVT::Other);
41082           SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
41083           SDValue BcastLd = DAG.getMemIntrinsicNode(
41084               X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, MVT::i16,
41085               LN->getPointerInfo(), LN->getOriginalAlign(),
41086               LN->getMemOperand()->getFlags());
41087           DCI.CombineTo(N.getNode(), BcastLd);
41088           DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1));
41089           DCI.recursivelyDeleteUnusedNodes(Src.getNode());
41090           return N; // Return N so it doesn't get rechecked!
41091         }
41092       }
41093 
41094       // If this is a truncate of an i16 extload, we can directly replace it.
41095       if (ISD::isUNINDEXEDLoad(Src.getOperand(0).getNode()) &&
41096           ISD::isEXTLoad(Src.getOperand(0).getNode())) {
41097         LoadSDNode *LN = cast<LoadSDNode>(Src.getOperand(0));
41098         if (LN->getMemoryVT().getSizeInBits() == 16) {
41099           SDVTList Tys = DAG.getVTList(VT, MVT::Other);
41100           SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
41101           SDValue BcastLd =
41102               DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, DL, Tys, Ops,
41103                                       LN->getMemoryVT(), LN->getMemOperand());
41104           DCI.CombineTo(N.getNode(), BcastLd);
41105           DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1));
41106           DCI.recursivelyDeleteUnusedNodes(Src.getNode());
41107           return N; // Return N so it doesn't get rechecked!
41108         }
41109       }
41110 
41111       // If this is a truncate of load that has been shifted right, we can
41112       // offset the pointer and use a narrower load.
41113       if (TruncIn.getOpcode() == ISD::SRL &&
41114           TruncIn.getOperand(0).hasOneUse() &&
41115           isa<ConstantSDNode>(TruncIn.getOperand(1)) &&
41116           ISD::isNormalLoad(TruncIn.getOperand(0).getNode())) {
41117         LoadSDNode *LN = cast<LoadSDNode>(TruncIn.getOperand(0));
41118         unsigned ShiftAmt = TruncIn.getConstantOperandVal(1);
41119         // Make sure the shift amount and the load size are divisible by 16.
41120         // Don't do this if the load is volatile or atomic.
41121         if (ShiftAmt % 16 == 0 && TruncIn.getValueSizeInBits() % 16 == 0 &&
41122             LN->isSimple()) {
41123           unsigned Offset = ShiftAmt / 8;
41124           SDVTList Tys = DAG.getVTList(VT, MVT::Other);
41125           SDValue Ptr = DAG.getMemBasePlusOffset(LN->getBasePtr(),
41126                                                  TypeSize::Fixed(Offset), DL);
41127           SDValue Ops[] = { LN->getChain(), Ptr };
41128           SDValue BcastLd = DAG.getMemIntrinsicNode(
41129               X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, MVT::i16,
41130               LN->getPointerInfo().getWithOffset(Offset),
41131               LN->getOriginalAlign(),
41132               LN->getMemOperand()->getFlags());
41133           DCI.CombineTo(N.getNode(), BcastLd);
41134           DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1));
41135           DCI.recursivelyDeleteUnusedNodes(Src.getNode());
41136           return N; // Return N so it doesn't get rechecked!
41137         }
41138       }
41139     }
41140 
41141     // vbroadcast(vzload X) -> vbroadcast_load X
41142     if (Src.getOpcode() == X86ISD::VZEXT_LOAD && Src.hasOneUse()) {
41143       MemSDNode *LN = cast<MemIntrinsicSDNode>(Src);
41144       if (LN->getMemoryVT().getSizeInBits() == VT.getScalarSizeInBits()) {
41145         SDVTList Tys = DAG.getVTList(VT, MVT::Other);
41146         SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
41147         SDValue BcastLd =
41148             DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, DL, Tys, Ops,
41149                                     LN->getMemoryVT(), LN->getMemOperand());
41150         DCI.CombineTo(N.getNode(), BcastLd);
41151         DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1));
41152         DCI.recursivelyDeleteUnusedNodes(LN);
41153         return N; // Return N so it doesn't get rechecked!
41154       }
41155     }
41156 
41157     // vbroadcast(vector load X) -> vbroadcast_load
41158     if ((SrcVT == MVT::v2f64 || SrcVT == MVT::v4f32 || SrcVT == MVT::v2i64 ||
41159          SrcVT == MVT::v4i32) &&
41160         Src.hasOneUse() && ISD::isNormalLoad(Src.getNode())) {
41161       LoadSDNode *LN = cast<LoadSDNode>(Src);
41162       // Unless the load is volatile or atomic.
41163       if (LN->isSimple()) {
41164         SDVTList Tys = DAG.getVTList(VT, MVT::Other);
41165         SDValue Ops[] = {LN->getChain(), LN->getBasePtr()};
41166         SDValue BcastLd = DAG.getMemIntrinsicNode(
41167             X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, SrcVT.getScalarType(),
41168             LN->getPointerInfo(), LN->getOriginalAlign(),
41169             LN->getMemOperand()->getFlags());
41170         DCI.CombineTo(N.getNode(), BcastLd);
41171         DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1));
41172         DCI.recursivelyDeleteUnusedNodes(LN);
41173         return N; // Return N so it doesn't get rechecked!
41174       }
41175     }
41176 
41177     return SDValue();
41178   }
41179   case X86ISD::VZEXT_MOVL: {
41180     SDValue N0 = N.getOperand(0);
41181 
41182     // If this a vzmovl of a full vector load, replace it with a vzload, unless
41183     // the load is volatile.
41184     if (N0.hasOneUse() && ISD::isNormalLoad(N0.getNode())) {
41185       auto *LN = cast<LoadSDNode>(N0);
41186       if (SDValue VZLoad =
41187               narrowLoadToVZLoad(LN, VT.getVectorElementType(), VT, DAG)) {
41188         DCI.CombineTo(N.getNode(), VZLoad);
41189         DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
41190         DCI.recursivelyDeleteUnusedNodes(LN);
41191         return N;
41192       }
41193     }
41194 
41195     // If this a VZEXT_MOVL of a VBROADCAST_LOAD, we don't need the broadcast
41196     // and can just use a VZEXT_LOAD.
41197     // FIXME: Is there some way to do this with SimplifyDemandedVectorElts?
41198     if (N0.hasOneUse() && N0.getOpcode() == X86ISD::VBROADCAST_LOAD) {
41199       auto *LN = cast<MemSDNode>(N0);
41200       if (VT.getScalarSizeInBits() == LN->getMemoryVT().getSizeInBits()) {
41201         SDVTList Tys = DAG.getVTList(VT, MVT::Other);
41202         SDValue Ops[] = {LN->getChain(), LN->getBasePtr()};
41203         SDValue VZLoad =
41204             DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, Ops,
41205                                     LN->getMemoryVT(), LN->getMemOperand());
41206         DCI.CombineTo(N.getNode(), VZLoad);
41207         DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
41208         DCI.recursivelyDeleteUnusedNodes(LN);
41209         return N;
41210       }
41211     }
41212 
41213     // Turn (v2i64 (vzext_movl (scalar_to_vector (i64 X)))) into
41214     // (v2i64 (bitcast (v4i32 (vzext_movl (scalar_to_vector (i32 (trunc X)))))))
41215     // if the upper bits of the i64 are zero.
41216     if (N0.hasOneUse() && N0.getOpcode() == ISD::SCALAR_TO_VECTOR &&
41217         N0.getOperand(0).hasOneUse() &&
41218         N0.getOperand(0).getValueType() == MVT::i64) {
41219       SDValue In = N0.getOperand(0);
41220       APInt Mask = APInt::getHighBitsSet(64, 32);
41221       if (DAG.MaskedValueIsZero(In, Mask)) {
41222         SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, In);
41223         MVT VecVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() * 2);
41224         SDValue SclVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Trunc);
41225         SDValue Movl = DAG.getNode(X86ISD::VZEXT_MOVL, DL, VecVT, SclVec);
41226         return DAG.getBitcast(VT, Movl);
41227       }
41228     }
41229 
41230     // Load a scalar integer constant directly to XMM instead of transferring an
41231     // immediate value from GPR.
41232     // vzext_movl (scalar_to_vector C) --> load [C,0...]
41233     if (N0.getOpcode() == ISD::SCALAR_TO_VECTOR) {
41234       if (auto *C = dyn_cast<ConstantSDNode>(N0.getOperand(0))) {
41235         // Create a vector constant - scalar constant followed by zeros.
41236         EVT ScalarVT = N0.getOperand(0).getValueType();
41237         Type *ScalarTy = ScalarVT.getTypeForEVT(*DAG.getContext());
41238         unsigned NumElts = VT.getVectorNumElements();
41239         Constant *Zero = ConstantInt::getNullValue(ScalarTy);
41240         SmallVector<Constant *, 32> ConstantVec(NumElts, Zero);
41241         ConstantVec[0] = const_cast<ConstantInt *>(C->getConstantIntValue());
41242 
41243         // Load the vector constant from constant pool.
41244         MVT PVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
41245         SDValue CP = DAG.getConstantPool(ConstantVector::get(ConstantVec), PVT);
41246         MachinePointerInfo MPI =
41247             MachinePointerInfo::getConstantPool(DAG.getMachineFunction());
41248         Align Alignment = cast<ConstantPoolSDNode>(CP)->getAlign();
41249         return DAG.getLoad(VT, DL, DAG.getEntryNode(), CP, MPI, Alignment,
41250                            MachineMemOperand::MOLoad);
41251       }
41252     }
41253 
41254     // Pull subvector inserts into undef through VZEXT_MOVL by making it an
41255     // insert into a zero vector. This helps get VZEXT_MOVL closer to
41256     // scalar_to_vectors where 256/512 are canonicalized to an insert and a
41257     // 128-bit scalar_to_vector. This reduces the number of isel patterns.
41258     if (!DCI.isBeforeLegalizeOps() && N0.hasOneUse()) {
41259       SDValue V = peekThroughOneUseBitcasts(N0);
41260 
41261       if (V.getOpcode() == ISD::INSERT_SUBVECTOR && V.getOperand(0).isUndef() &&
41262           isNullConstant(V.getOperand(2))) {
41263         SDValue In = V.getOperand(1);
41264         MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(),
41265                                      In.getValueSizeInBits() /
41266                                          VT.getScalarSizeInBits());
41267         In = DAG.getBitcast(SubVT, In);
41268         SDValue Movl = DAG.getNode(X86ISD::VZEXT_MOVL, DL, SubVT, In);
41269         return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
41270                            getZeroVector(VT, Subtarget, DAG, DL), Movl,
41271                            V.getOperand(2));
41272       }
41273     }
41274 
41275     return SDValue();
41276   }
41277   case X86ISD::BLENDI: {
41278     SDValue N0 = N.getOperand(0);
41279     SDValue N1 = N.getOperand(1);
41280 
41281     // blend(bitcast(x),bitcast(y)) -> bitcast(blend(x,y)) to narrower types.
41282     // TODO: Handle MVT::v16i16 repeated blend mask.
41283     if (N0.getOpcode() == ISD::BITCAST && N1.getOpcode() == ISD::BITCAST &&
41284         N0.getOperand(0).getValueType() == N1.getOperand(0).getValueType()) {
41285       MVT SrcVT = N0.getOperand(0).getSimpleValueType();
41286       if ((VT.getScalarSizeInBits() % SrcVT.getScalarSizeInBits()) == 0 &&
41287           SrcVT.getScalarSizeInBits() >= 32) {
41288         unsigned BlendMask = N.getConstantOperandVal(2);
41289         unsigned Size = VT.getVectorNumElements();
41290         unsigned Scale = VT.getScalarSizeInBits() / SrcVT.getScalarSizeInBits();
41291         BlendMask = scaleVectorShuffleBlendMask(BlendMask, Size, Scale);
41292         return DAG.getBitcast(
41293             VT, DAG.getNode(X86ISD::BLENDI, DL, SrcVT, N0.getOperand(0),
41294                             N1.getOperand(0),
41295                             DAG.getTargetConstant(BlendMask, DL, MVT::i8)));
41296       }
41297     }
41298     return SDValue();
41299   }
41300   case X86ISD::SHUFP: {
41301     // Fold shufps(shuffle(x),shuffle(y)) -> shufps(x,y).
41302     // This is a more relaxed shuffle combiner that can ignore oneuse limits.
41303     // TODO: Support types other than v4f32.
41304     if (VT == MVT::v4f32) {
41305       bool Updated = false;
41306       SmallVector<int> Mask;
41307       SmallVector<SDValue> Ops;
41308       if (getTargetShuffleMask(N.getNode(), VT, false, Ops, Mask) &&
41309           Ops.size() == 2) {
41310         for (int i = 0; i != 2; ++i) {
41311           SmallVector<SDValue> SubOps;
41312           SmallVector<int> SubMask, SubScaledMask;
41313           SDValue Sub = peekThroughBitcasts(Ops[i]);
41314           // TODO: Scaling might be easier if we specify the demanded elts.
41315           if (getTargetShuffleInputs(Sub, SubOps, SubMask, DAG, 0, false) &&
41316               scaleShuffleElements(SubMask, 4, SubScaledMask) &&
41317               SubOps.size() == 1 && isUndefOrInRange(SubScaledMask, 0, 4)) {
41318             int Ofs = i * 2;
41319             Mask[Ofs + 0] = SubScaledMask[Mask[Ofs + 0] % 4] + (i * 4);
41320             Mask[Ofs + 1] = SubScaledMask[Mask[Ofs + 1] % 4] + (i * 4);
41321             Ops[i] = DAG.getBitcast(VT, SubOps[0]);
41322             Updated = true;
41323           }
41324         }
41325       }
41326       if (Updated) {
41327         for (int &M : Mask)
41328           M %= 4;
41329         Ops.push_back(getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
41330         return DAG.getNode(X86ISD::SHUFP, DL, VT, Ops);
41331       }
41332     }
41333     return SDValue();
41334   }
41335   case X86ISD::VPERMI: {
41336     // vpermi(bitcast(x)) -> bitcast(vpermi(x)) for same number of elements.
41337     // TODO: Remove when we have preferred domains in combineX86ShuffleChain.
41338     SDValue N0 = N.getOperand(0);
41339     SDValue N1 = N.getOperand(1);
41340     unsigned EltSizeInBits = VT.getScalarSizeInBits();
41341     if (N0.getOpcode() == ISD::BITCAST &&
41342         N0.getOperand(0).getScalarValueSizeInBits() == EltSizeInBits) {
41343       SDValue Src = N0.getOperand(0);
41344       EVT SrcVT = Src.getValueType();
41345       SDValue Res = DAG.getNode(X86ISD::VPERMI, DL, SrcVT, Src, N1);
41346       return DAG.getBitcast(VT, Res);
41347     }
41348     return SDValue();
41349   }
41350   case X86ISD::VPERM2X128: {
41351     // Fold vperm2x128(bitcast(x),bitcast(y),c) -> bitcast(vperm2x128(x,y,c)).
41352     SDValue LHS = N->getOperand(0);
41353     SDValue RHS = N->getOperand(1);
41354     if (LHS.getOpcode() == ISD::BITCAST &&
41355         (RHS.getOpcode() == ISD::BITCAST || RHS.isUndef())) {
41356       EVT SrcVT = LHS.getOperand(0).getValueType();
41357       if (RHS.isUndef() || SrcVT == RHS.getOperand(0).getValueType()) {
41358         return DAG.getBitcast(VT, DAG.getNode(X86ISD::VPERM2X128, DL, SrcVT,
41359                                               DAG.getBitcast(SrcVT, LHS),
41360                                               DAG.getBitcast(SrcVT, RHS),
41361                                               N->getOperand(2)));
41362       }
41363     }
41364 
41365     // Fold vperm2x128(op(),op()) -> op(vperm2x128(),vperm2x128()).
41366     if (SDValue Res = canonicalizeLaneShuffleWithRepeatedOps(N, DAG, DL))
41367       return Res;
41368 
41369     // Fold vperm2x128 subvector shuffle with an inner concat pattern.
41370     // vperm2x128(concat(X,Y),concat(Z,W)) --> concat X,Y etc.
41371     auto FindSubVector128 = [&](unsigned Idx) {
41372       if (Idx > 3)
41373         return SDValue();
41374       SDValue Src = peekThroughBitcasts(N.getOperand(Idx < 2 ? 0 : 1));
41375       SmallVector<SDValue> SubOps;
41376       if (collectConcatOps(Src.getNode(), SubOps, DAG) && SubOps.size() == 2)
41377         return SubOps[Idx & 1];
41378       unsigned NumElts = Src.getValueType().getVectorNumElements();
41379       if ((Idx & 1) == 1 && Src.getOpcode() == ISD::INSERT_SUBVECTOR &&
41380           Src.getOperand(1).getValueSizeInBits() == 128 &&
41381           Src.getConstantOperandAPInt(2) == (NumElts / 2)) {
41382         return Src.getOperand(1);
41383       }
41384       return SDValue();
41385     };
41386     unsigned Imm = N.getConstantOperandVal(2);
41387     if (SDValue SubLo = FindSubVector128(Imm & 0x0F)) {
41388       if (SDValue SubHi = FindSubVector128((Imm & 0xF0) >> 4)) {
41389         MVT SubVT = VT.getHalfNumVectorElementsVT();
41390         SubLo = DAG.getBitcast(SubVT, SubLo);
41391         SubHi = DAG.getBitcast(SubVT, SubHi);
41392         return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, SubLo, SubHi);
41393       }
41394     }
41395     return SDValue();
41396   }
41397   case X86ISD::PSHUFD:
41398   case X86ISD::PSHUFLW:
41399   case X86ISD::PSHUFHW:
41400     Mask = getPSHUFShuffleMask(N);
41401     assert(Mask.size() == 4);
41402     break;
41403   case X86ISD::MOVSD:
41404   case X86ISD::MOVSH:
41405   case X86ISD::MOVSS: {
41406     SDValue N0 = N.getOperand(0);
41407     SDValue N1 = N.getOperand(1);
41408 
41409     // Canonicalize scalar FPOps:
41410     // MOVS*(N0, OP(N0, N1)) --> MOVS*(N0, SCALAR_TO_VECTOR(OP(N0[0], N1[0])))
41411     // If commutable, allow OP(N1[0], N0[0]).
41412     unsigned Opcode1 = N1.getOpcode();
41413     if (Opcode1 == ISD::FADD || Opcode1 == ISD::FMUL || Opcode1 == ISD::FSUB ||
41414         Opcode1 == ISD::FDIV) {
41415       SDValue N10 = N1.getOperand(0);
41416       SDValue N11 = N1.getOperand(1);
41417       if (N10 == N0 ||
41418           (N11 == N0 && (Opcode1 == ISD::FADD || Opcode1 == ISD::FMUL))) {
41419         if (N10 != N0)
41420           std::swap(N10, N11);
41421         MVT SVT = VT.getVectorElementType();
41422         SDValue ZeroIdx = DAG.getIntPtrConstant(0, DL);
41423         N10 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SVT, N10, ZeroIdx);
41424         N11 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SVT, N11, ZeroIdx);
41425         SDValue Scl = DAG.getNode(Opcode1, DL, SVT, N10, N11);
41426         SDValue SclVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Scl);
41427         return DAG.getNode(Opcode, DL, VT, N0, SclVec);
41428       }
41429     }
41430 
41431     return SDValue();
41432   }
41433   case X86ISD::INSERTPS: {
41434     assert(VT == MVT::v4f32 && "INSERTPS ValueType must be MVT::v4f32");
41435     SDValue Op0 = N.getOperand(0);
41436     SDValue Op1 = N.getOperand(1);
41437     unsigned InsertPSMask = N.getConstantOperandVal(2);
41438     unsigned SrcIdx = (InsertPSMask >> 6) & 0x3;
41439     unsigned DstIdx = (InsertPSMask >> 4) & 0x3;
41440     unsigned ZeroMask = InsertPSMask & 0xF;
41441 
41442     // If we zero out all elements from Op0 then we don't need to reference it.
41443     if (((ZeroMask | (1u << DstIdx)) == 0xF) && !Op0.isUndef())
41444       return DAG.getNode(X86ISD::INSERTPS, DL, VT, DAG.getUNDEF(VT), Op1,
41445                          DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
41446 
41447     // If we zero out the element from Op1 then we don't need to reference it.
41448     if ((ZeroMask & (1u << DstIdx)) && !Op1.isUndef())
41449       return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, DAG.getUNDEF(VT),
41450                          DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
41451 
41452     // Attempt to merge insertps Op1 with an inner target shuffle node.
41453     SmallVector<int, 8> TargetMask1;
41454     SmallVector<SDValue, 2> Ops1;
41455     APInt KnownUndef1, KnownZero1;
41456     if (getTargetShuffleAndZeroables(Op1, TargetMask1, Ops1, KnownUndef1,
41457                                      KnownZero1)) {
41458       if (KnownUndef1[SrcIdx] || KnownZero1[SrcIdx]) {
41459         // Zero/UNDEF insertion - zero out element and remove dependency.
41460         InsertPSMask |= (1u << DstIdx);
41461         return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, DAG.getUNDEF(VT),
41462                            DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
41463       }
41464       // Update insertps mask srcidx and reference the source input directly.
41465       int M = TargetMask1[SrcIdx];
41466       assert(0 <= M && M < 8 && "Shuffle index out of range");
41467       InsertPSMask = (InsertPSMask & 0x3f) | ((M & 0x3) << 6);
41468       Op1 = Ops1[M < 4 ? 0 : 1];
41469       return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, Op1,
41470                          DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
41471     }
41472 
41473     // Attempt to merge insertps Op0 with an inner target shuffle node.
41474     SmallVector<int, 8> TargetMask0;
41475     SmallVector<SDValue, 2> Ops0;
41476     APInt KnownUndef0, KnownZero0;
41477     if (getTargetShuffleAndZeroables(Op0, TargetMask0, Ops0, KnownUndef0,
41478                                      KnownZero0)) {
41479       bool Updated = false;
41480       bool UseInput00 = false;
41481       bool UseInput01 = false;
41482       for (int i = 0; i != 4; ++i) {
41483         if ((InsertPSMask & (1u << i)) || (i == (int)DstIdx)) {
41484           // No change if element is already zero or the inserted element.
41485           continue;
41486         }
41487 
41488         if (KnownUndef0[i] || KnownZero0[i]) {
41489           // If the target mask is undef/zero then we must zero the element.
41490           InsertPSMask |= (1u << i);
41491           Updated = true;
41492           continue;
41493         }
41494 
41495         // The input vector element must be inline.
41496         int M = TargetMask0[i];
41497         if (M != i && M != (i + 4))
41498           return SDValue();
41499 
41500         // Determine which inputs of the target shuffle we're using.
41501         UseInput00 |= (0 <= M && M < 4);
41502         UseInput01 |= (4 <= M);
41503       }
41504 
41505       // If we're not using both inputs of the target shuffle then use the
41506       // referenced input directly.
41507       if (UseInput00 && !UseInput01) {
41508         Updated = true;
41509         Op0 = Ops0[0];
41510       } else if (!UseInput00 && UseInput01) {
41511         Updated = true;
41512         Op0 = Ops0[1];
41513       }
41514 
41515       if (Updated)
41516         return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, Op1,
41517                            DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
41518     }
41519 
41520     // If we're inserting an element from a vbroadcast load, fold the
41521     // load into the X86insertps instruction. We need to convert the scalar
41522     // load to a vector and clear the source lane of the INSERTPS control.
41523     if (Op1.getOpcode() == X86ISD::VBROADCAST_LOAD && Op1.hasOneUse()) {
41524       auto *MemIntr = cast<MemIntrinsicSDNode>(Op1);
41525       if (MemIntr->getMemoryVT().getScalarSizeInBits() == 32) {
41526         SDValue Load = DAG.getLoad(MVT::f32, DL, MemIntr->getChain(),
41527                                    MemIntr->getBasePtr(),
41528                                    MemIntr->getMemOperand());
41529         SDValue Insert = DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0,
41530                            DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT,
41531                                        Load),
41532                            DAG.getTargetConstant(InsertPSMask & 0x3f, DL, MVT::i8));
41533         DAG.ReplaceAllUsesOfValueWith(SDValue(MemIntr, 1), Load.getValue(1));
41534         return Insert;
41535       }
41536     }
41537 
41538     return SDValue();
41539   }
41540   default:
41541     return SDValue();
41542   }
41543 
41544   // Nuke no-op shuffles that show up after combining.
41545   if (isNoopShuffleMask(Mask))
41546     return N.getOperand(0);
41547 
41548   // Look for simplifications involving one or two shuffle instructions.
41549   SDValue V = N.getOperand(0);
41550   switch (N.getOpcode()) {
41551   default:
41552     break;
41553   case X86ISD::PSHUFLW:
41554   case X86ISD::PSHUFHW:
41555     assert(VT.getVectorElementType() == MVT::i16 && "Bad word shuffle type!");
41556 
41557     // See if this reduces to a PSHUFD which is no more expensive and can
41558     // combine with more operations. Note that it has to at least flip the
41559     // dwords as otherwise it would have been removed as a no-op.
41560     if (ArrayRef(Mask).equals({2, 3, 0, 1})) {
41561       int DMask[] = {0, 1, 2, 3};
41562       int DOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 2;
41563       DMask[DOffset + 0] = DOffset + 1;
41564       DMask[DOffset + 1] = DOffset + 0;
41565       MVT DVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() / 2);
41566       V = DAG.getBitcast(DVT, V);
41567       V = DAG.getNode(X86ISD::PSHUFD, DL, DVT, V,
41568                       getV4X86ShuffleImm8ForMask(DMask, DL, DAG));
41569       return DAG.getBitcast(VT, V);
41570     }
41571 
41572     // Look for shuffle patterns which can be implemented as a single unpack.
41573     // FIXME: This doesn't handle the location of the PSHUFD generically, and
41574     // only works when we have a PSHUFD followed by two half-shuffles.
41575     if (Mask[0] == Mask[1] && Mask[2] == Mask[3] &&
41576         (V.getOpcode() == X86ISD::PSHUFLW ||
41577          V.getOpcode() == X86ISD::PSHUFHW) &&
41578         V.getOpcode() != N.getOpcode() &&
41579         V.hasOneUse() && V.getOperand(0).hasOneUse()) {
41580       SDValue D = peekThroughOneUseBitcasts(V.getOperand(0));
41581       if (D.getOpcode() == X86ISD::PSHUFD) {
41582         SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
41583         SmallVector<int, 4> DMask = getPSHUFShuffleMask(D);
41584         int NOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
41585         int VOffset = V.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
41586         int WordMask[8];
41587         for (int i = 0; i < 4; ++i) {
41588           WordMask[i + NOffset] = Mask[i] + NOffset;
41589           WordMask[i + VOffset] = VMask[i] + VOffset;
41590         }
41591         // Map the word mask through the DWord mask.
41592         int MappedMask[8];
41593         for (int i = 0; i < 8; ++i)
41594           MappedMask[i] = 2 * DMask[WordMask[i] / 2] + WordMask[i] % 2;
41595         if (ArrayRef(MappedMask).equals({0, 0, 1, 1, 2, 2, 3, 3}) ||
41596             ArrayRef(MappedMask).equals({4, 4, 5, 5, 6, 6, 7, 7})) {
41597           // We can replace all three shuffles with an unpack.
41598           V = DAG.getBitcast(VT, D.getOperand(0));
41599           return DAG.getNode(MappedMask[0] == 0 ? X86ISD::UNPCKL
41600                                                 : X86ISD::UNPCKH,
41601                              DL, VT, V, V);
41602         }
41603       }
41604     }
41605 
41606     break;
41607 
41608   case X86ISD::PSHUFD:
41609     if (SDValue NewN = combineRedundantDWordShuffle(N, Mask, DAG))
41610       return NewN;
41611 
41612     break;
41613   }
41614 
41615   return SDValue();
41616 }
41617 
41618 /// Checks if the shuffle mask takes subsequent elements
41619 /// alternately from two vectors.
41620 /// For example <0, 5, 2, 7> or <8, 1, 10, 3, 12, 5, 14, 7> are both correct.
isAddSubOrSubAddMask(ArrayRef<int> Mask,bool & Op0Even)41621 static bool isAddSubOrSubAddMask(ArrayRef<int> Mask, bool &Op0Even) {
41622 
41623   int ParitySrc[2] = {-1, -1};
41624   unsigned Size = Mask.size();
41625   for (unsigned i = 0; i != Size; ++i) {
41626     int M = Mask[i];
41627     if (M < 0)
41628       continue;
41629 
41630     // Make sure we are using the matching element from the input.
41631     if ((M % Size) != i)
41632       return false;
41633 
41634     // Make sure we use the same input for all elements of the same parity.
41635     int Src = M / Size;
41636     if (ParitySrc[i % 2] >= 0 && ParitySrc[i % 2] != Src)
41637       return false;
41638     ParitySrc[i % 2] = Src;
41639   }
41640 
41641   // Make sure each input is used.
41642   if (ParitySrc[0] < 0 || ParitySrc[1] < 0 || ParitySrc[0] == ParitySrc[1])
41643     return false;
41644 
41645   Op0Even = ParitySrc[0] == 0;
41646   return true;
41647 }
41648 
41649 /// Returns true iff the shuffle node \p N can be replaced with ADDSUB(SUBADD)
41650 /// operation. If true is returned then the operands of ADDSUB(SUBADD) operation
41651 /// are written to the parameters \p Opnd0 and \p Opnd1.
41652 ///
41653 /// We combine shuffle to ADDSUB(SUBADD) directly on the abstract vector shuffle nodes
41654 /// so it is easier to generically match. We also insert dummy vector shuffle
41655 /// nodes for the operands which explicitly discard the lanes which are unused
41656 /// by this operation to try to flow through the rest of the combiner
41657 /// the fact that they're unused.
isAddSubOrSubAdd(SDNode * N,const X86Subtarget & Subtarget,SelectionDAG & DAG,SDValue & Opnd0,SDValue & Opnd1,bool & IsSubAdd)41658 static bool isAddSubOrSubAdd(SDNode *N, const X86Subtarget &Subtarget,
41659                              SelectionDAG &DAG, SDValue &Opnd0, SDValue &Opnd1,
41660                              bool &IsSubAdd) {
41661 
41662   EVT VT = N->getValueType(0);
41663   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
41664   if (!Subtarget.hasSSE3() || !TLI.isTypeLegal(VT) ||
41665       !VT.getSimpleVT().isFloatingPoint())
41666     return false;
41667 
41668   // We only handle target-independent shuffles.
41669   // FIXME: It would be easy and harmless to use the target shuffle mask
41670   // extraction tool to support more.
41671   if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
41672     return false;
41673 
41674   SDValue V1 = N->getOperand(0);
41675   SDValue V2 = N->getOperand(1);
41676 
41677   // Make sure we have an FADD and an FSUB.
41678   if ((V1.getOpcode() != ISD::FADD && V1.getOpcode() != ISD::FSUB) ||
41679       (V2.getOpcode() != ISD::FADD && V2.getOpcode() != ISD::FSUB) ||
41680       V1.getOpcode() == V2.getOpcode())
41681     return false;
41682 
41683   // If there are other uses of these operations we can't fold them.
41684   if (!V1->hasOneUse() || !V2->hasOneUse())
41685     return false;
41686 
41687   // Ensure that both operations have the same operands. Note that we can
41688   // commute the FADD operands.
41689   SDValue LHS, RHS;
41690   if (V1.getOpcode() == ISD::FSUB) {
41691     LHS = V1->getOperand(0); RHS = V1->getOperand(1);
41692     if ((V2->getOperand(0) != LHS || V2->getOperand(1) != RHS) &&
41693         (V2->getOperand(0) != RHS || V2->getOperand(1) != LHS))
41694       return false;
41695   } else {
41696     assert(V2.getOpcode() == ISD::FSUB && "Unexpected opcode");
41697     LHS = V2->getOperand(0); RHS = V2->getOperand(1);
41698     if ((V1->getOperand(0) != LHS || V1->getOperand(1) != RHS) &&
41699         (V1->getOperand(0) != RHS || V1->getOperand(1) != LHS))
41700       return false;
41701   }
41702 
41703   ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(N)->getMask();
41704   bool Op0Even;
41705   if (!isAddSubOrSubAddMask(Mask, Op0Even))
41706     return false;
41707 
41708   // It's a subadd if the vector in the even parity is an FADD.
41709   IsSubAdd = Op0Even ? V1->getOpcode() == ISD::FADD
41710                      : V2->getOpcode() == ISD::FADD;
41711 
41712   Opnd0 = LHS;
41713   Opnd1 = RHS;
41714   return true;
41715 }
41716 
41717 /// Combine shuffle of two fma nodes into FMAddSub or FMSubAdd.
combineShuffleToFMAddSub(SDNode * N,const X86Subtarget & Subtarget,SelectionDAG & DAG)41718 static SDValue combineShuffleToFMAddSub(SDNode *N,
41719                                         const X86Subtarget &Subtarget,
41720                                         SelectionDAG &DAG) {
41721   // We only handle target-independent shuffles.
41722   // FIXME: It would be easy and harmless to use the target shuffle mask
41723   // extraction tool to support more.
41724   if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
41725     return SDValue();
41726 
41727   MVT VT = N->getSimpleValueType(0);
41728   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
41729   if (!Subtarget.hasAnyFMA() || !TLI.isTypeLegal(VT))
41730     return SDValue();
41731 
41732   // We're trying to match (shuffle fma(a, b, c), X86Fmsub(a, b, c).
41733   SDValue Op0 = N->getOperand(0);
41734   SDValue Op1 = N->getOperand(1);
41735   SDValue FMAdd = Op0, FMSub = Op1;
41736   if (FMSub.getOpcode() != X86ISD::FMSUB)
41737     std::swap(FMAdd, FMSub);
41738 
41739   if (FMAdd.getOpcode() != ISD::FMA || FMSub.getOpcode() != X86ISD::FMSUB ||
41740       FMAdd.getOperand(0) != FMSub.getOperand(0) || !FMAdd.hasOneUse() ||
41741       FMAdd.getOperand(1) != FMSub.getOperand(1) || !FMSub.hasOneUse() ||
41742       FMAdd.getOperand(2) != FMSub.getOperand(2))
41743     return SDValue();
41744 
41745   // Check for correct shuffle mask.
41746   ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(N)->getMask();
41747   bool Op0Even;
41748   if (!isAddSubOrSubAddMask(Mask, Op0Even))
41749     return SDValue();
41750 
41751   // FMAddSub takes zeroth operand from FMSub node.
41752   SDLoc DL(N);
41753   bool IsSubAdd = Op0Even ? Op0 == FMAdd : Op1 == FMAdd;
41754   unsigned Opcode = IsSubAdd ? X86ISD::FMSUBADD : X86ISD::FMADDSUB;
41755   return DAG.getNode(Opcode, DL, VT, FMAdd.getOperand(0), FMAdd.getOperand(1),
41756                      FMAdd.getOperand(2));
41757 }
41758 
41759 /// Try to combine a shuffle into a target-specific add-sub or
41760 /// mul-add-sub node.
combineShuffleToAddSubOrFMAddSub(SDNode * N,const X86Subtarget & Subtarget,SelectionDAG & DAG)41761 static SDValue combineShuffleToAddSubOrFMAddSub(SDNode *N,
41762                                                 const X86Subtarget &Subtarget,
41763                                                 SelectionDAG &DAG) {
41764   if (SDValue V = combineShuffleToFMAddSub(N, Subtarget, DAG))
41765     return V;
41766 
41767   SDValue Opnd0, Opnd1;
41768   bool IsSubAdd;
41769   if (!isAddSubOrSubAdd(N, Subtarget, DAG, Opnd0, Opnd1, IsSubAdd))
41770     return SDValue();
41771 
41772   MVT VT = N->getSimpleValueType(0);
41773   SDLoc DL(N);
41774 
41775   // Try to generate X86ISD::FMADDSUB node here.
41776   SDValue Opnd2;
41777   if (isFMAddSubOrFMSubAdd(Subtarget, DAG, Opnd0, Opnd1, Opnd2, 2)) {
41778     unsigned Opc = IsSubAdd ? X86ISD::FMSUBADD : X86ISD::FMADDSUB;
41779     return DAG.getNode(Opc, DL, VT, Opnd0, Opnd1, Opnd2);
41780   }
41781 
41782   if (IsSubAdd)
41783     return SDValue();
41784 
41785   // Do not generate X86ISD::ADDSUB node for 512-bit types even though
41786   // the ADDSUB idiom has been successfully recognized. There are no known
41787   // X86 targets with 512-bit ADDSUB instructions!
41788   if (VT.is512BitVector())
41789     return SDValue();
41790 
41791   // Do not generate X86ISD::ADDSUB node for FP16's vector types even though
41792   // the ADDSUB idiom has been successfully recognized. There are no known
41793   // X86 targets with FP16 ADDSUB instructions!
41794   if (VT.getVectorElementType() == MVT::f16)
41795     return SDValue();
41796 
41797   return DAG.getNode(X86ISD::ADDSUB, DL, VT, Opnd0, Opnd1);
41798 }
41799 
41800 // We are looking for a shuffle where both sources are concatenated with undef
41801 // and have a width that is half of the output's width. AVX2 has VPERMD/Q, so
41802 // if we can express this as a single-source shuffle, that's preferable.
combineShuffleOfConcatUndef(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)41803 static SDValue combineShuffleOfConcatUndef(SDNode *N, SelectionDAG &DAG,
41804                                            const X86Subtarget &Subtarget) {
41805   if (!Subtarget.hasAVX2() || !isa<ShuffleVectorSDNode>(N))
41806     return SDValue();
41807 
41808   EVT VT = N->getValueType(0);
41809 
41810   // We only care about shuffles of 128/256-bit vectors of 32/64-bit values.
41811   if (!VT.is128BitVector() && !VT.is256BitVector())
41812     return SDValue();
41813 
41814   if (VT.getVectorElementType() != MVT::i32 &&
41815       VT.getVectorElementType() != MVT::i64 &&
41816       VT.getVectorElementType() != MVT::f32 &&
41817       VT.getVectorElementType() != MVT::f64)
41818     return SDValue();
41819 
41820   SDValue N0 = N->getOperand(0);
41821   SDValue N1 = N->getOperand(1);
41822 
41823   // Check that both sources are concats with undef.
41824   if (N0.getOpcode() != ISD::CONCAT_VECTORS ||
41825       N1.getOpcode() != ISD::CONCAT_VECTORS || N0.getNumOperands() != 2 ||
41826       N1.getNumOperands() != 2 || !N0.getOperand(1).isUndef() ||
41827       !N1.getOperand(1).isUndef())
41828     return SDValue();
41829 
41830   // Construct the new shuffle mask. Elements from the first source retain their
41831   // index, but elements from the second source no longer need to skip an undef.
41832   SmallVector<int, 8> Mask;
41833   int NumElts = VT.getVectorNumElements();
41834 
41835   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
41836   for (int Elt : SVOp->getMask())
41837     Mask.push_back(Elt < NumElts ? Elt : (Elt - NumElts / 2));
41838 
41839   SDLoc DL(N);
41840   SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, N0.getOperand(0),
41841                                N1.getOperand(0));
41842   return DAG.getVectorShuffle(VT, DL, Concat, DAG.getUNDEF(VT), Mask);
41843 }
41844 
41845 /// If we have a shuffle of AVX/AVX512 (256/512 bit) vectors that only uses the
41846 /// low half of each source vector and does not set any high half elements in
41847 /// the destination vector, narrow the shuffle to half its original size.
narrowShuffle(ShuffleVectorSDNode * Shuf,SelectionDAG & DAG)41848 static SDValue narrowShuffle(ShuffleVectorSDNode *Shuf, SelectionDAG &DAG) {
41849   if (!Shuf->getValueType(0).isSimple())
41850     return SDValue();
41851   MVT VT = Shuf->getSimpleValueType(0);
41852   if (!VT.is256BitVector() && !VT.is512BitVector())
41853     return SDValue();
41854 
41855   // See if we can ignore all of the high elements of the shuffle.
41856   ArrayRef<int> Mask = Shuf->getMask();
41857   if (!isUndefUpperHalf(Mask))
41858     return SDValue();
41859 
41860   // Check if the shuffle mask accesses only the low half of each input vector
41861   // (half-index output is 0 or 2).
41862   int HalfIdx1, HalfIdx2;
41863   SmallVector<int, 8> HalfMask(Mask.size() / 2);
41864   if (!getHalfShuffleMask(Mask, HalfMask, HalfIdx1, HalfIdx2) ||
41865       (HalfIdx1 % 2 == 1) || (HalfIdx2 % 2 == 1))
41866     return SDValue();
41867 
41868   // Create a half-width shuffle to replace the unnecessarily wide shuffle.
41869   // The trick is knowing that all of the insert/extract are actually free
41870   // subregister (zmm<->ymm or ymm<->xmm) ops. That leaves us with a shuffle
41871   // of narrow inputs into a narrow output, and that is always cheaper than
41872   // the wide shuffle that we started with.
41873   return getShuffleHalfVectors(SDLoc(Shuf), Shuf->getOperand(0),
41874                                Shuf->getOperand(1), HalfMask, HalfIdx1,
41875                                HalfIdx2, false, DAG, /*UseConcat*/true);
41876 }
41877 
combineShuffle(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)41878 static SDValue combineShuffle(SDNode *N, SelectionDAG &DAG,
41879                               TargetLowering::DAGCombinerInfo &DCI,
41880                               const X86Subtarget &Subtarget) {
41881   if (auto *Shuf = dyn_cast<ShuffleVectorSDNode>(N))
41882     if (SDValue V = narrowShuffle(Shuf, DAG))
41883       return V;
41884 
41885   // If we have legalized the vector types, look for blends of FADD and FSUB
41886   // nodes that we can fuse into an ADDSUB, FMADDSUB, or FMSUBADD node.
41887   SDLoc dl(N);
41888   EVT VT = N->getValueType(0);
41889   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
41890   if (TLI.isTypeLegal(VT))
41891     if (SDValue AddSub = combineShuffleToAddSubOrFMAddSub(N, Subtarget, DAG))
41892       return AddSub;
41893 
41894   // Attempt to combine into a vector load/broadcast.
41895   if (SDValue LD = combineToConsecutiveLoads(
41896           VT, SDValue(N, 0), dl, DAG, Subtarget, /*IsAfterLegalize*/ true))
41897     return LD;
41898 
41899   // For AVX2, we sometimes want to combine
41900   // (vector_shuffle <mask> (concat_vectors t1, undef)
41901   //                        (concat_vectors t2, undef))
41902   // Into:
41903   // (vector_shuffle <mask> (concat_vectors t1, t2), undef)
41904   // Since the latter can be efficiently lowered with VPERMD/VPERMQ
41905   if (SDValue ShufConcat = combineShuffleOfConcatUndef(N, DAG, Subtarget))
41906     return ShufConcat;
41907 
41908   if (isTargetShuffle(N->getOpcode())) {
41909     SDValue Op(N, 0);
41910     if (SDValue Shuffle = combineTargetShuffle(Op, DAG, DCI, Subtarget))
41911       return Shuffle;
41912 
41913     // Try recursively combining arbitrary sequences of x86 shuffle
41914     // instructions into higher-order shuffles. We do this after combining
41915     // specific PSHUF instruction sequences into their minimal form so that we
41916     // can evaluate how many specialized shuffle instructions are involved in
41917     // a particular chain.
41918     if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
41919       return Res;
41920 
41921     // Simplify source operands based on shuffle mask.
41922     // TODO - merge this into combineX86ShufflesRecursively.
41923     APInt DemandedElts = APInt::getAllOnes(VT.getVectorNumElements());
41924     if (TLI.SimplifyDemandedVectorElts(Op, DemandedElts, DCI))
41925       return SDValue(N, 0);
41926 
41927     // Canonicalize SHUFFLE(BINOP(X,Y)) -> BINOP(SHUFFLE(X),SHUFFLE(Y)).
41928     // Perform this after other shuffle combines to allow inner shuffles to be
41929     // combined away first.
41930     if (SDValue BinOp = canonicalizeShuffleWithBinOps(Op, DAG, dl))
41931       return BinOp;
41932   }
41933 
41934   return SDValue();
41935 }
41936 
41937 // Simplify variable target shuffle masks based on the demanded elements.
41938 // TODO: Handle DemandedBits in mask indices as well?
SimplifyDemandedVectorEltsForTargetShuffle(SDValue Op,const APInt & DemandedElts,unsigned MaskIndex,TargetLowering::TargetLoweringOpt & TLO,unsigned Depth) const41939 bool X86TargetLowering::SimplifyDemandedVectorEltsForTargetShuffle(
41940     SDValue Op, const APInt &DemandedElts, unsigned MaskIndex,
41941     TargetLowering::TargetLoweringOpt &TLO, unsigned Depth) const {
41942   // If we're demanding all elements don't bother trying to simplify the mask.
41943   unsigned NumElts = DemandedElts.getBitWidth();
41944   if (DemandedElts.isAllOnes())
41945     return false;
41946 
41947   SDValue Mask = Op.getOperand(MaskIndex);
41948   if (!Mask.hasOneUse())
41949     return false;
41950 
41951   // Attempt to generically simplify the variable shuffle mask.
41952   APInt MaskUndef, MaskZero;
41953   if (SimplifyDemandedVectorElts(Mask, DemandedElts, MaskUndef, MaskZero, TLO,
41954                                  Depth + 1))
41955     return true;
41956 
41957   // Attempt to extract+simplify a (constant pool load) shuffle mask.
41958   // TODO: Support other types from getTargetShuffleMaskIndices?
41959   SDValue BC = peekThroughOneUseBitcasts(Mask);
41960   EVT BCVT = BC.getValueType();
41961   auto *Load = dyn_cast<LoadSDNode>(BC);
41962   if (!Load)
41963     return false;
41964 
41965   const Constant *C = getTargetConstantFromNode(Load);
41966   if (!C)
41967     return false;
41968 
41969   Type *CTy = C->getType();
41970   if (!CTy->isVectorTy() ||
41971       CTy->getPrimitiveSizeInBits() != Mask.getValueSizeInBits())
41972     return false;
41973 
41974   // Handle scaling for i64 elements on 32-bit targets.
41975   unsigned NumCstElts = cast<FixedVectorType>(CTy)->getNumElements();
41976   if (NumCstElts != NumElts && NumCstElts != (NumElts * 2))
41977     return false;
41978   unsigned Scale = NumCstElts / NumElts;
41979 
41980   // Simplify mask if we have an undemanded element that is not undef.
41981   bool Simplified = false;
41982   SmallVector<Constant *, 32> ConstVecOps;
41983   for (unsigned i = 0; i != NumCstElts; ++i) {
41984     Constant *Elt = C->getAggregateElement(i);
41985     if (!DemandedElts[i / Scale] && !isa<UndefValue>(Elt)) {
41986       ConstVecOps.push_back(UndefValue::get(Elt->getType()));
41987       Simplified = true;
41988       continue;
41989     }
41990     ConstVecOps.push_back(Elt);
41991   }
41992   if (!Simplified)
41993     return false;
41994 
41995   // Generate new constant pool entry + legalize immediately for the load.
41996   SDLoc DL(Op);
41997   SDValue CV = TLO.DAG.getConstantPool(ConstantVector::get(ConstVecOps), BCVT);
41998   SDValue LegalCV = LowerConstantPool(CV, TLO.DAG);
41999   SDValue NewMask = TLO.DAG.getLoad(
42000       BCVT, DL, TLO.DAG.getEntryNode(), LegalCV,
42001       MachinePointerInfo::getConstantPool(TLO.DAG.getMachineFunction()),
42002       Load->getAlign());
42003   return TLO.CombineTo(Mask, TLO.DAG.getBitcast(Mask.getValueType(), NewMask));
42004 }
42005 
SimplifyDemandedVectorEltsForTargetNode(SDValue Op,const APInt & DemandedElts,APInt & KnownUndef,APInt & KnownZero,TargetLoweringOpt & TLO,unsigned Depth) const42006 bool X86TargetLowering::SimplifyDemandedVectorEltsForTargetNode(
42007     SDValue Op, const APInt &DemandedElts, APInt &KnownUndef, APInt &KnownZero,
42008     TargetLoweringOpt &TLO, unsigned Depth) const {
42009   int NumElts = DemandedElts.getBitWidth();
42010   unsigned Opc = Op.getOpcode();
42011   EVT VT = Op.getValueType();
42012 
42013   // Handle special case opcodes.
42014   switch (Opc) {
42015   case X86ISD::PMULDQ:
42016   case X86ISD::PMULUDQ: {
42017     APInt LHSUndef, LHSZero;
42018     APInt RHSUndef, RHSZero;
42019     SDValue LHS = Op.getOperand(0);
42020     SDValue RHS = Op.getOperand(1);
42021     if (SimplifyDemandedVectorElts(LHS, DemandedElts, LHSUndef, LHSZero, TLO,
42022                                    Depth + 1))
42023       return true;
42024     if (SimplifyDemandedVectorElts(RHS, DemandedElts, RHSUndef, RHSZero, TLO,
42025                                    Depth + 1))
42026       return true;
42027     // Multiply by zero.
42028     KnownZero = LHSZero | RHSZero;
42029     break;
42030   }
42031   case X86ISD::VPMADDWD: {
42032     APInt LHSUndef, LHSZero;
42033     APInt RHSUndef, RHSZero;
42034     SDValue LHS = Op.getOperand(0);
42035     SDValue RHS = Op.getOperand(1);
42036     APInt DemandedSrcElts = APIntOps::ScaleBitMask(DemandedElts, 2 * NumElts);
42037 
42038     if (SimplifyDemandedVectorElts(LHS, DemandedSrcElts, LHSUndef, LHSZero, TLO,
42039                                    Depth + 1))
42040       return true;
42041     if (SimplifyDemandedVectorElts(RHS, DemandedSrcElts, RHSUndef, RHSZero, TLO,
42042                                    Depth + 1))
42043       return true;
42044 
42045     // TODO: Multiply by zero.
42046 
42047     // If RHS/LHS elements are known zero then we don't need the LHS/RHS equivalent.
42048     APInt DemandedLHSElts = DemandedSrcElts & ~RHSZero;
42049     if (SimplifyDemandedVectorElts(LHS, DemandedLHSElts, LHSUndef, LHSZero, TLO,
42050                                    Depth + 1))
42051       return true;
42052     APInt DemandedRHSElts = DemandedSrcElts & ~LHSZero;
42053     if (SimplifyDemandedVectorElts(RHS, DemandedRHSElts, RHSUndef, RHSZero, TLO,
42054                                    Depth + 1))
42055       return true;
42056     break;
42057   }
42058   case X86ISD::PSADBW: {
42059     SDValue LHS = Op.getOperand(0);
42060     SDValue RHS = Op.getOperand(1);
42061     assert(VT.getScalarType() == MVT::i64 &&
42062            LHS.getValueType() == RHS.getValueType() &&
42063            LHS.getValueType().getScalarType() == MVT::i8 &&
42064            "Unexpected PSADBW types");
42065 
42066     // Aggressively peek through ops to get at the demanded elts.
42067     if (!DemandedElts.isAllOnes()) {
42068       unsigned NumSrcElts = LHS.getValueType().getVectorNumElements();
42069       APInt DemandedSrcElts = APIntOps::ScaleBitMask(DemandedElts, NumSrcElts);
42070       SDValue NewLHS = SimplifyMultipleUseDemandedVectorElts(
42071           LHS, DemandedSrcElts, TLO.DAG, Depth + 1);
42072       SDValue NewRHS = SimplifyMultipleUseDemandedVectorElts(
42073           RHS, DemandedSrcElts, TLO.DAG, Depth + 1);
42074       if (NewLHS || NewRHS) {
42075         NewLHS = NewLHS ? NewLHS : LHS;
42076         NewRHS = NewRHS ? NewRHS : RHS;
42077         return TLO.CombineTo(
42078             Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewLHS, NewRHS));
42079       }
42080     }
42081     break;
42082   }
42083   case X86ISD::VSHL:
42084   case X86ISD::VSRL:
42085   case X86ISD::VSRA: {
42086     // We only need the bottom 64-bits of the (128-bit) shift amount.
42087     SDValue Amt = Op.getOperand(1);
42088     MVT AmtVT = Amt.getSimpleValueType();
42089     assert(AmtVT.is128BitVector() && "Unexpected value type");
42090 
42091     // If we reuse the shift amount just for sse shift amounts then we know that
42092     // only the bottom 64-bits are only ever used.
42093     bool AssumeSingleUse = llvm::all_of(Amt->uses(), [&Amt](SDNode *Use) {
42094       unsigned UseOpc = Use->getOpcode();
42095       return (UseOpc == X86ISD::VSHL || UseOpc == X86ISD::VSRL ||
42096               UseOpc == X86ISD::VSRA) &&
42097              Use->getOperand(0) != Amt;
42098     });
42099 
42100     APInt AmtUndef, AmtZero;
42101     unsigned NumAmtElts = AmtVT.getVectorNumElements();
42102     APInt AmtElts = APInt::getLowBitsSet(NumAmtElts, NumAmtElts / 2);
42103     if (SimplifyDemandedVectorElts(Amt, AmtElts, AmtUndef, AmtZero, TLO,
42104                                    Depth + 1, AssumeSingleUse))
42105       return true;
42106     [[fallthrough]];
42107   }
42108   case X86ISD::VSHLI:
42109   case X86ISD::VSRLI:
42110   case X86ISD::VSRAI: {
42111     SDValue Src = Op.getOperand(0);
42112     APInt SrcUndef;
42113     if (SimplifyDemandedVectorElts(Src, DemandedElts, SrcUndef, KnownZero, TLO,
42114                                    Depth + 1))
42115       return true;
42116 
42117     // Fold shift(0,x) -> 0
42118     if (DemandedElts.isSubsetOf(KnownZero))
42119       return TLO.CombineTo(
42120           Op, getZeroVector(VT.getSimpleVT(), Subtarget, TLO.DAG, SDLoc(Op)));
42121 
42122     // Aggressively peek through ops to get at the demanded elts.
42123     if (!DemandedElts.isAllOnes())
42124       if (SDValue NewSrc = SimplifyMultipleUseDemandedVectorElts(
42125               Src, DemandedElts, TLO.DAG, Depth + 1))
42126         return TLO.CombineTo(
42127             Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewSrc, Op.getOperand(1)));
42128     break;
42129   }
42130   case X86ISD::VPSHA:
42131   case X86ISD::VPSHL:
42132   case X86ISD::VSHLV:
42133   case X86ISD::VSRLV:
42134   case X86ISD::VSRAV: {
42135     APInt LHSUndef, LHSZero;
42136     APInt RHSUndef, RHSZero;
42137     SDValue LHS = Op.getOperand(0);
42138     SDValue RHS = Op.getOperand(1);
42139     if (SimplifyDemandedVectorElts(LHS, DemandedElts, LHSUndef, LHSZero, TLO,
42140                                    Depth + 1))
42141       return true;
42142 
42143     // Fold shift(0,x) -> 0
42144     if (DemandedElts.isSubsetOf(LHSZero))
42145       return TLO.CombineTo(
42146           Op, getZeroVector(VT.getSimpleVT(), Subtarget, TLO.DAG, SDLoc(Op)));
42147 
42148     if (SimplifyDemandedVectorElts(RHS, DemandedElts, RHSUndef, RHSZero, TLO,
42149                                    Depth + 1))
42150       return true;
42151 
42152     KnownZero = LHSZero;
42153     break;
42154   }
42155   case X86ISD::KSHIFTL: {
42156     SDValue Src = Op.getOperand(0);
42157     auto *Amt = cast<ConstantSDNode>(Op.getOperand(1));
42158     assert(Amt->getAPIntValue().ult(NumElts) && "Out of range shift amount");
42159     unsigned ShiftAmt = Amt->getZExtValue();
42160 
42161     if (ShiftAmt == 0)
42162       return TLO.CombineTo(Op, Src);
42163 
42164     // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a
42165     // single shift.  We can do this if the bottom bits (which are shifted
42166     // out) are never demanded.
42167     if (Src.getOpcode() == X86ISD::KSHIFTR) {
42168       if (!DemandedElts.intersects(APInt::getLowBitsSet(NumElts, ShiftAmt))) {
42169         unsigned C1 = Src.getConstantOperandVal(1);
42170         unsigned NewOpc = X86ISD::KSHIFTL;
42171         int Diff = ShiftAmt - C1;
42172         if (Diff < 0) {
42173           Diff = -Diff;
42174           NewOpc = X86ISD::KSHIFTR;
42175         }
42176 
42177         SDLoc dl(Op);
42178         SDValue NewSA = TLO.DAG.getTargetConstant(Diff, dl, MVT::i8);
42179         return TLO.CombineTo(
42180             Op, TLO.DAG.getNode(NewOpc, dl, VT, Src.getOperand(0), NewSA));
42181       }
42182     }
42183 
42184     APInt DemandedSrc = DemandedElts.lshr(ShiftAmt);
42185     if (SimplifyDemandedVectorElts(Src, DemandedSrc, KnownUndef, KnownZero, TLO,
42186                                    Depth + 1))
42187       return true;
42188 
42189     KnownUndef <<= ShiftAmt;
42190     KnownZero <<= ShiftAmt;
42191     KnownZero.setLowBits(ShiftAmt);
42192     break;
42193   }
42194   case X86ISD::KSHIFTR: {
42195     SDValue Src = Op.getOperand(0);
42196     auto *Amt = cast<ConstantSDNode>(Op.getOperand(1));
42197     assert(Amt->getAPIntValue().ult(NumElts) && "Out of range shift amount");
42198     unsigned ShiftAmt = Amt->getZExtValue();
42199 
42200     if (ShiftAmt == 0)
42201       return TLO.CombineTo(Op, Src);
42202 
42203     // If this is ((X << C1) >>u ShAmt), see if we can simplify this into a
42204     // single shift.  We can do this if the top bits (which are shifted
42205     // out) are never demanded.
42206     if (Src.getOpcode() == X86ISD::KSHIFTL) {
42207       if (!DemandedElts.intersects(APInt::getHighBitsSet(NumElts, ShiftAmt))) {
42208         unsigned C1 = Src.getConstantOperandVal(1);
42209         unsigned NewOpc = X86ISD::KSHIFTR;
42210         int Diff = ShiftAmt - C1;
42211         if (Diff < 0) {
42212           Diff = -Diff;
42213           NewOpc = X86ISD::KSHIFTL;
42214         }
42215 
42216         SDLoc dl(Op);
42217         SDValue NewSA = TLO.DAG.getTargetConstant(Diff, dl, MVT::i8);
42218         return TLO.CombineTo(
42219             Op, TLO.DAG.getNode(NewOpc, dl, VT, Src.getOperand(0), NewSA));
42220       }
42221     }
42222 
42223     APInt DemandedSrc = DemandedElts.shl(ShiftAmt);
42224     if (SimplifyDemandedVectorElts(Src, DemandedSrc, KnownUndef, KnownZero, TLO,
42225                                    Depth + 1))
42226       return true;
42227 
42228     KnownUndef.lshrInPlace(ShiftAmt);
42229     KnownZero.lshrInPlace(ShiftAmt);
42230     KnownZero.setHighBits(ShiftAmt);
42231     break;
42232   }
42233   case X86ISD::ANDNP: {
42234     // ANDNP = (~LHS & RHS);
42235     SDValue LHS = Op.getOperand(0);
42236     SDValue RHS = Op.getOperand(1);
42237 
42238     auto GetDemandedMasks = [&](SDValue Op, bool Invert = false) {
42239       APInt UndefElts;
42240       SmallVector<APInt> EltBits;
42241       int NumElts = VT.getVectorNumElements();
42242       int EltSizeInBits = VT.getScalarSizeInBits();
42243       APInt OpBits = APInt::getAllOnes(EltSizeInBits);
42244       APInt OpElts = DemandedElts;
42245       if (getTargetConstantBitsFromNode(Op, EltSizeInBits, UndefElts,
42246                                         EltBits)) {
42247         OpBits.clearAllBits();
42248         OpElts.clearAllBits();
42249         for (int I = 0; I != NumElts; ++I) {
42250           if (!DemandedElts[I])
42251             continue;
42252           if (UndefElts[I]) {
42253             // We can't assume an undef src element gives an undef dst - the
42254             // other src might be zero.
42255             OpBits.setAllBits();
42256             OpElts.setBit(I);
42257           } else if ((Invert && !EltBits[I].isAllOnes()) ||
42258                      (!Invert && !EltBits[I].isZero())) {
42259             OpBits |= Invert ? ~EltBits[I] : EltBits[I];
42260             OpElts.setBit(I);
42261           }
42262         }
42263       }
42264       return std::make_pair(OpBits, OpElts);
42265     };
42266     APInt BitsLHS, EltsLHS;
42267     APInt BitsRHS, EltsRHS;
42268     std::tie(BitsLHS, EltsLHS) = GetDemandedMasks(RHS);
42269     std::tie(BitsRHS, EltsRHS) = GetDemandedMasks(LHS, true);
42270 
42271     APInt LHSUndef, LHSZero;
42272     APInt RHSUndef, RHSZero;
42273     if (SimplifyDemandedVectorElts(LHS, EltsLHS, LHSUndef, LHSZero, TLO,
42274                                    Depth + 1))
42275       return true;
42276     if (SimplifyDemandedVectorElts(RHS, EltsRHS, RHSUndef, RHSZero, TLO,
42277                                    Depth + 1))
42278       return true;
42279 
42280     if (!DemandedElts.isAllOnes()) {
42281       SDValue NewLHS = SimplifyMultipleUseDemandedBits(LHS, BitsLHS, EltsLHS,
42282                                                        TLO.DAG, Depth + 1);
42283       SDValue NewRHS = SimplifyMultipleUseDemandedBits(RHS, BitsRHS, EltsRHS,
42284                                                        TLO.DAG, Depth + 1);
42285       if (NewLHS || NewRHS) {
42286         NewLHS = NewLHS ? NewLHS : LHS;
42287         NewRHS = NewRHS ? NewRHS : RHS;
42288         return TLO.CombineTo(
42289             Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewLHS, NewRHS));
42290       }
42291     }
42292     break;
42293   }
42294   case X86ISD::CVTSI2P:
42295   case X86ISD::CVTUI2P: {
42296     SDValue Src = Op.getOperand(0);
42297     MVT SrcVT = Src.getSimpleValueType();
42298     APInt SrcUndef, SrcZero;
42299     APInt SrcElts = DemandedElts.zextOrTrunc(SrcVT.getVectorNumElements());
42300     if (SimplifyDemandedVectorElts(Src, SrcElts, SrcUndef, SrcZero, TLO,
42301                                    Depth + 1))
42302       return true;
42303     break;
42304   }
42305   case X86ISD::PACKSS:
42306   case X86ISD::PACKUS: {
42307     SDValue N0 = Op.getOperand(0);
42308     SDValue N1 = Op.getOperand(1);
42309 
42310     APInt DemandedLHS, DemandedRHS;
42311     getPackDemandedElts(VT, DemandedElts, DemandedLHS, DemandedRHS);
42312 
42313     APInt LHSUndef, LHSZero;
42314     if (SimplifyDemandedVectorElts(N0, DemandedLHS, LHSUndef, LHSZero, TLO,
42315                                    Depth + 1))
42316       return true;
42317     APInt RHSUndef, RHSZero;
42318     if (SimplifyDemandedVectorElts(N1, DemandedRHS, RHSUndef, RHSZero, TLO,
42319                                    Depth + 1))
42320       return true;
42321 
42322     // TODO - pass on known zero/undef.
42323 
42324     // Aggressively peek through ops to get at the demanded elts.
42325     // TODO - we should do this for all target/faux shuffles ops.
42326     if (!DemandedElts.isAllOnes()) {
42327       SDValue NewN0 = SimplifyMultipleUseDemandedVectorElts(N0, DemandedLHS,
42328                                                             TLO.DAG, Depth + 1);
42329       SDValue NewN1 = SimplifyMultipleUseDemandedVectorElts(N1, DemandedRHS,
42330                                                             TLO.DAG, Depth + 1);
42331       if (NewN0 || NewN1) {
42332         NewN0 = NewN0 ? NewN0 : N0;
42333         NewN1 = NewN1 ? NewN1 : N1;
42334         return TLO.CombineTo(Op,
42335                              TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewN0, NewN1));
42336       }
42337     }
42338     break;
42339   }
42340   case X86ISD::HADD:
42341   case X86ISD::HSUB:
42342   case X86ISD::FHADD:
42343   case X86ISD::FHSUB: {
42344     SDValue N0 = Op.getOperand(0);
42345     SDValue N1 = Op.getOperand(1);
42346 
42347     APInt DemandedLHS, DemandedRHS;
42348     getHorizDemandedElts(VT, DemandedElts, DemandedLHS, DemandedRHS);
42349 
42350     APInt LHSUndef, LHSZero;
42351     if (SimplifyDemandedVectorElts(N0, DemandedLHS, LHSUndef, LHSZero, TLO,
42352                                    Depth + 1))
42353       return true;
42354     APInt RHSUndef, RHSZero;
42355     if (SimplifyDemandedVectorElts(N1, DemandedRHS, RHSUndef, RHSZero, TLO,
42356                                    Depth + 1))
42357       return true;
42358 
42359     // TODO - pass on known zero/undef.
42360 
42361     // Aggressively peek through ops to get at the demanded elts.
42362     // TODO: Handle repeated operands.
42363     if (N0 != N1 && !DemandedElts.isAllOnes()) {
42364       SDValue NewN0 = SimplifyMultipleUseDemandedVectorElts(N0, DemandedLHS,
42365                                                             TLO.DAG, Depth + 1);
42366       SDValue NewN1 = SimplifyMultipleUseDemandedVectorElts(N1, DemandedRHS,
42367                                                             TLO.DAG, Depth + 1);
42368       if (NewN0 || NewN1) {
42369         NewN0 = NewN0 ? NewN0 : N0;
42370         NewN1 = NewN1 ? NewN1 : N1;
42371         return TLO.CombineTo(Op,
42372                              TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewN0, NewN1));
42373       }
42374     }
42375     break;
42376   }
42377   case X86ISD::VTRUNC:
42378   case X86ISD::VTRUNCS:
42379   case X86ISD::VTRUNCUS: {
42380     SDValue Src = Op.getOperand(0);
42381     MVT SrcVT = Src.getSimpleValueType();
42382     APInt DemandedSrc = DemandedElts.zextOrTrunc(SrcVT.getVectorNumElements());
42383     APInt SrcUndef, SrcZero;
42384     if (SimplifyDemandedVectorElts(Src, DemandedSrc, SrcUndef, SrcZero, TLO,
42385                                    Depth + 1))
42386       return true;
42387     KnownZero = SrcZero.zextOrTrunc(NumElts);
42388     KnownUndef = SrcUndef.zextOrTrunc(NumElts);
42389     break;
42390   }
42391   case X86ISD::BLENDV: {
42392     APInt SelUndef, SelZero;
42393     if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedElts, SelUndef,
42394                                    SelZero, TLO, Depth + 1))
42395       return true;
42396 
42397     // TODO: Use SelZero to adjust LHS/RHS DemandedElts.
42398     APInt LHSUndef, LHSZero;
42399     if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedElts, LHSUndef,
42400                                    LHSZero, TLO, Depth + 1))
42401       return true;
42402 
42403     APInt RHSUndef, RHSZero;
42404     if (SimplifyDemandedVectorElts(Op.getOperand(2), DemandedElts, RHSUndef,
42405                                    RHSZero, TLO, Depth + 1))
42406       return true;
42407 
42408     KnownZero = LHSZero & RHSZero;
42409     KnownUndef = LHSUndef & RHSUndef;
42410     break;
42411   }
42412   case X86ISD::VZEXT_MOVL: {
42413     // If upper demanded elements are already zero then we have nothing to do.
42414     SDValue Src = Op.getOperand(0);
42415     APInt DemandedUpperElts = DemandedElts;
42416     DemandedUpperElts.clearLowBits(1);
42417     if (TLO.DAG.MaskedVectorIsZero(Src, DemandedUpperElts, Depth + 1))
42418       return TLO.CombineTo(Op, Src);
42419     break;
42420   }
42421   case X86ISD::VBROADCAST: {
42422     SDValue Src = Op.getOperand(0);
42423     MVT SrcVT = Src.getSimpleValueType();
42424     if (!SrcVT.isVector())
42425       break;
42426     // Don't bother broadcasting if we just need the 0'th element.
42427     if (DemandedElts == 1) {
42428       if (Src.getValueType() != VT)
42429         Src = widenSubVector(VT.getSimpleVT(), Src, false, Subtarget, TLO.DAG,
42430                              SDLoc(Op));
42431       return TLO.CombineTo(Op, Src);
42432     }
42433     APInt SrcUndef, SrcZero;
42434     APInt SrcElts = APInt::getOneBitSet(SrcVT.getVectorNumElements(), 0);
42435     if (SimplifyDemandedVectorElts(Src, SrcElts, SrcUndef, SrcZero, TLO,
42436                                    Depth + 1))
42437       return true;
42438     // Aggressively peek through src to get at the demanded elt.
42439     // TODO - we should do this for all target/faux shuffles ops.
42440     if (SDValue NewSrc = SimplifyMultipleUseDemandedVectorElts(
42441             Src, SrcElts, TLO.DAG, Depth + 1))
42442       return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewSrc));
42443     break;
42444   }
42445   case X86ISD::VPERMV:
42446     if (SimplifyDemandedVectorEltsForTargetShuffle(Op, DemandedElts, 0, TLO,
42447                                                    Depth))
42448       return true;
42449     break;
42450   case X86ISD::PSHUFB:
42451   case X86ISD::VPERMV3:
42452   case X86ISD::VPERMILPV:
42453     if (SimplifyDemandedVectorEltsForTargetShuffle(Op, DemandedElts, 1, TLO,
42454                                                    Depth))
42455       return true;
42456     break;
42457   case X86ISD::VPPERM:
42458   case X86ISD::VPERMIL2:
42459     if (SimplifyDemandedVectorEltsForTargetShuffle(Op, DemandedElts, 2, TLO,
42460                                                    Depth))
42461       return true;
42462     break;
42463   }
42464 
42465   // For 256/512-bit ops that are 128/256-bit ops glued together, if we do not
42466   // demand any of the high elements, then narrow the op to 128/256-bits: e.g.
42467   // (op ymm0, ymm1) --> insert undef, (op xmm0, xmm1), 0
42468   if ((VT.is256BitVector() || VT.is512BitVector()) &&
42469       DemandedElts.lshr(NumElts / 2) == 0) {
42470     unsigned SizeInBits = VT.getSizeInBits();
42471     unsigned ExtSizeInBits = SizeInBits / 2;
42472 
42473     // See if 512-bit ops only use the bottom 128-bits.
42474     if (VT.is512BitVector() && DemandedElts.lshr(NumElts / 4) == 0)
42475       ExtSizeInBits = SizeInBits / 4;
42476 
42477     switch (Opc) {
42478       // Scalar broadcast.
42479     case X86ISD::VBROADCAST: {
42480       SDLoc DL(Op);
42481       SDValue Src = Op.getOperand(0);
42482       if (Src.getValueSizeInBits() > ExtSizeInBits)
42483         Src = extractSubVector(Src, 0, TLO.DAG, DL, ExtSizeInBits);
42484       EVT BcstVT = EVT::getVectorVT(*TLO.DAG.getContext(), VT.getScalarType(),
42485                                     ExtSizeInBits / VT.getScalarSizeInBits());
42486       SDValue Bcst = TLO.DAG.getNode(X86ISD::VBROADCAST, DL, BcstVT, Src);
42487       return TLO.CombineTo(Op, insertSubVector(TLO.DAG.getUNDEF(VT), Bcst, 0,
42488                                                TLO.DAG, DL, ExtSizeInBits));
42489     }
42490     case X86ISD::VBROADCAST_LOAD: {
42491       SDLoc DL(Op);
42492       auto *MemIntr = cast<MemIntrinsicSDNode>(Op);
42493       EVT BcstVT = EVT::getVectorVT(*TLO.DAG.getContext(), VT.getScalarType(),
42494                                     ExtSizeInBits / VT.getScalarSizeInBits());
42495       SDVTList Tys = TLO.DAG.getVTList(BcstVT, MVT::Other);
42496       SDValue Ops[] = {MemIntr->getOperand(0), MemIntr->getOperand(1)};
42497       SDValue Bcst = TLO.DAG.getMemIntrinsicNode(
42498           X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, MemIntr->getMemoryVT(),
42499           MemIntr->getMemOperand());
42500       TLO.DAG.makeEquivalentMemoryOrdering(SDValue(MemIntr, 1),
42501                                            Bcst.getValue(1));
42502       return TLO.CombineTo(Op, insertSubVector(TLO.DAG.getUNDEF(VT), Bcst, 0,
42503                                                TLO.DAG, DL, ExtSizeInBits));
42504     }
42505       // Subvector broadcast.
42506     case X86ISD::SUBV_BROADCAST_LOAD: {
42507       auto *MemIntr = cast<MemIntrinsicSDNode>(Op);
42508       EVT MemVT = MemIntr->getMemoryVT();
42509       if (ExtSizeInBits == MemVT.getStoreSizeInBits()) {
42510         SDLoc DL(Op);
42511         SDValue Ld =
42512             TLO.DAG.getLoad(MemVT, DL, MemIntr->getChain(),
42513                             MemIntr->getBasePtr(), MemIntr->getMemOperand());
42514         TLO.DAG.makeEquivalentMemoryOrdering(SDValue(MemIntr, 1),
42515                                              Ld.getValue(1));
42516         return TLO.CombineTo(Op, insertSubVector(TLO.DAG.getUNDEF(VT), Ld, 0,
42517                                                  TLO.DAG, DL, ExtSizeInBits));
42518       } else if ((ExtSizeInBits % MemVT.getStoreSizeInBits()) == 0) {
42519         SDLoc DL(Op);
42520         EVT BcstVT = EVT::getVectorVT(*TLO.DAG.getContext(), VT.getScalarType(),
42521                                       ExtSizeInBits / VT.getScalarSizeInBits());
42522         if (SDValue BcstLd =
42523                 getBROADCAST_LOAD(Opc, DL, BcstVT, MemVT, MemIntr, 0, TLO.DAG))
42524           return TLO.CombineTo(Op,
42525                                insertSubVector(TLO.DAG.getUNDEF(VT), BcstLd, 0,
42526                                                TLO.DAG, DL, ExtSizeInBits));
42527       }
42528       break;
42529     }
42530       // Byte shifts by immediate.
42531     case X86ISD::VSHLDQ:
42532     case X86ISD::VSRLDQ:
42533       // Shift by uniform.
42534     case X86ISD::VSHL:
42535     case X86ISD::VSRL:
42536     case X86ISD::VSRA:
42537       // Shift by immediate.
42538     case X86ISD::VSHLI:
42539     case X86ISD::VSRLI:
42540     case X86ISD::VSRAI: {
42541       SDLoc DL(Op);
42542       SDValue Ext0 =
42543           extractSubVector(Op.getOperand(0), 0, TLO.DAG, DL, ExtSizeInBits);
42544       SDValue ExtOp =
42545           TLO.DAG.getNode(Opc, DL, Ext0.getValueType(), Ext0, Op.getOperand(1));
42546       SDValue UndefVec = TLO.DAG.getUNDEF(VT);
42547       SDValue Insert =
42548           insertSubVector(UndefVec, ExtOp, 0, TLO.DAG, DL, ExtSizeInBits);
42549       return TLO.CombineTo(Op, Insert);
42550     }
42551     case X86ISD::VPERMI: {
42552       // Simplify PERMPD/PERMQ to extract_subvector.
42553       // TODO: This should be done in shuffle combining.
42554       if (VT == MVT::v4f64 || VT == MVT::v4i64) {
42555         SmallVector<int, 4> Mask;
42556         DecodeVPERMMask(NumElts, Op.getConstantOperandVal(1), Mask);
42557         if (isUndefOrEqual(Mask[0], 2) && isUndefOrEqual(Mask[1], 3)) {
42558           SDLoc DL(Op);
42559           SDValue Ext = extractSubVector(Op.getOperand(0), 2, TLO.DAG, DL, 128);
42560           SDValue UndefVec = TLO.DAG.getUNDEF(VT);
42561           SDValue Insert = insertSubVector(UndefVec, Ext, 0, TLO.DAG, DL, 128);
42562           return TLO.CombineTo(Op, Insert);
42563         }
42564       }
42565       break;
42566     }
42567     case X86ISD::VPERM2X128: {
42568       // Simplify VPERM2F128/VPERM2I128 to extract_subvector.
42569       SDLoc DL(Op);
42570       unsigned LoMask = Op.getConstantOperandVal(2) & 0xF;
42571       if (LoMask & 0x8)
42572         return TLO.CombineTo(
42573             Op, getZeroVector(VT.getSimpleVT(), Subtarget, TLO.DAG, DL));
42574       unsigned EltIdx = (LoMask & 0x1) * (NumElts / 2);
42575       unsigned SrcIdx = (LoMask & 0x2) >> 1;
42576       SDValue ExtOp =
42577           extractSubVector(Op.getOperand(SrcIdx), EltIdx, TLO.DAG, DL, 128);
42578       SDValue UndefVec = TLO.DAG.getUNDEF(VT);
42579       SDValue Insert =
42580           insertSubVector(UndefVec, ExtOp, 0, TLO.DAG, DL, ExtSizeInBits);
42581       return TLO.CombineTo(Op, Insert);
42582     }
42583       // Zero upper elements.
42584     case X86ISD::VZEXT_MOVL:
42585       // Target unary shuffles by immediate:
42586     case X86ISD::PSHUFD:
42587     case X86ISD::PSHUFLW:
42588     case X86ISD::PSHUFHW:
42589     case X86ISD::VPERMILPI:
42590       // (Non-Lane Crossing) Target Shuffles.
42591     case X86ISD::VPERMILPV:
42592     case X86ISD::VPERMIL2:
42593     case X86ISD::PSHUFB:
42594     case X86ISD::UNPCKL:
42595     case X86ISD::UNPCKH:
42596     case X86ISD::BLENDI:
42597       // Integer ops.
42598     case X86ISD::PACKSS:
42599     case X86ISD::PACKUS:
42600       // Horizontal Ops.
42601     case X86ISD::HADD:
42602     case X86ISD::HSUB:
42603     case X86ISD::FHADD:
42604     case X86ISD::FHSUB: {
42605       SDLoc DL(Op);
42606       SmallVector<SDValue, 4> Ops;
42607       for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
42608         SDValue SrcOp = Op.getOperand(i);
42609         EVT SrcVT = SrcOp.getValueType();
42610         assert((!SrcVT.isVector() || SrcVT.getSizeInBits() == SizeInBits) &&
42611                "Unsupported vector size");
42612         Ops.push_back(SrcVT.isVector() ? extractSubVector(SrcOp, 0, TLO.DAG, DL,
42613                                                           ExtSizeInBits)
42614                                        : SrcOp);
42615       }
42616       MVT ExtVT = VT.getSimpleVT();
42617       ExtVT = MVT::getVectorVT(ExtVT.getScalarType(),
42618                                ExtSizeInBits / ExtVT.getScalarSizeInBits());
42619       SDValue ExtOp = TLO.DAG.getNode(Opc, DL, ExtVT, Ops);
42620       SDValue UndefVec = TLO.DAG.getUNDEF(VT);
42621       SDValue Insert =
42622           insertSubVector(UndefVec, ExtOp, 0, TLO.DAG, DL, ExtSizeInBits);
42623       return TLO.CombineTo(Op, Insert);
42624     }
42625     }
42626   }
42627 
42628   // For splats, unless we *only* demand the 0'th element,
42629   // stop attempts at simplification here, we aren't going to improve things,
42630   // this is better than any potential shuffle.
42631   if (!DemandedElts.isOne() && TLO.DAG.isSplatValue(Op, /*AllowUndefs*/false))
42632     return false;
42633 
42634   // Get target/faux shuffle mask.
42635   APInt OpUndef, OpZero;
42636   SmallVector<int, 64> OpMask;
42637   SmallVector<SDValue, 2> OpInputs;
42638   if (!getTargetShuffleInputs(Op, DemandedElts, OpInputs, OpMask, OpUndef,
42639                               OpZero, TLO.DAG, Depth, false))
42640     return false;
42641 
42642   // Shuffle inputs must be the same size as the result.
42643   if (OpMask.size() != (unsigned)NumElts ||
42644       llvm::any_of(OpInputs, [VT](SDValue V) {
42645         return VT.getSizeInBits() != V.getValueSizeInBits() ||
42646                !V.getValueType().isVector();
42647       }))
42648     return false;
42649 
42650   KnownZero = OpZero;
42651   KnownUndef = OpUndef;
42652 
42653   // Check if shuffle mask can be simplified to undef/zero/identity.
42654   int NumSrcs = OpInputs.size();
42655   for (int i = 0; i != NumElts; ++i)
42656     if (!DemandedElts[i])
42657       OpMask[i] = SM_SentinelUndef;
42658 
42659   if (isUndefInRange(OpMask, 0, NumElts)) {
42660     KnownUndef.setAllBits();
42661     return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT));
42662   }
42663   if (isUndefOrZeroInRange(OpMask, 0, NumElts)) {
42664     KnownZero.setAllBits();
42665     return TLO.CombineTo(
42666         Op, getZeroVector(VT.getSimpleVT(), Subtarget, TLO.DAG, SDLoc(Op)));
42667   }
42668   for (int Src = 0; Src != NumSrcs; ++Src)
42669     if (isSequentialOrUndefInRange(OpMask, 0, NumElts, Src * NumElts))
42670       return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, OpInputs[Src]));
42671 
42672   // Attempt to simplify inputs.
42673   for (int Src = 0; Src != NumSrcs; ++Src) {
42674     // TODO: Support inputs of different types.
42675     if (OpInputs[Src].getValueType() != VT)
42676       continue;
42677 
42678     int Lo = Src * NumElts;
42679     APInt SrcElts = APInt::getZero(NumElts);
42680     for (int i = 0; i != NumElts; ++i)
42681       if (DemandedElts[i]) {
42682         int M = OpMask[i] - Lo;
42683         if (0 <= M && M < NumElts)
42684           SrcElts.setBit(M);
42685       }
42686 
42687     // TODO - Propagate input undef/zero elts.
42688     APInt SrcUndef, SrcZero;
42689     if (SimplifyDemandedVectorElts(OpInputs[Src], SrcElts, SrcUndef, SrcZero,
42690                                    TLO, Depth + 1))
42691       return true;
42692   }
42693 
42694   // If we don't demand all elements, then attempt to combine to a simpler
42695   // shuffle.
42696   // We need to convert the depth to something combineX86ShufflesRecursively
42697   // can handle - so pretend its Depth == 0 again, and reduce the max depth
42698   // to match. This prevents combineX86ShuffleChain from returning a
42699   // combined shuffle that's the same as the original root, causing an
42700   // infinite loop.
42701   if (!DemandedElts.isAllOnes()) {
42702     assert(Depth < X86::MaxShuffleCombineDepth && "Depth out of range");
42703 
42704     SmallVector<int, 64> DemandedMask(NumElts, SM_SentinelUndef);
42705     for (int i = 0; i != NumElts; ++i)
42706       if (DemandedElts[i])
42707         DemandedMask[i] = i;
42708 
42709     SDValue NewShuffle = combineX86ShufflesRecursively(
42710         {Op}, 0, Op, DemandedMask, {}, 0, X86::MaxShuffleCombineDepth - Depth,
42711         /*HasVarMask*/ false,
42712         /*AllowCrossLaneVarMask*/ true, /*AllowPerLaneVarMask*/ true, TLO.DAG,
42713         Subtarget);
42714     if (NewShuffle)
42715       return TLO.CombineTo(Op, NewShuffle);
42716   }
42717 
42718   return false;
42719 }
42720 
SimplifyDemandedBitsForTargetNode(SDValue Op,const APInt & OriginalDemandedBits,const APInt & OriginalDemandedElts,KnownBits & Known,TargetLoweringOpt & TLO,unsigned Depth) const42721 bool X86TargetLowering::SimplifyDemandedBitsForTargetNode(
42722     SDValue Op, const APInt &OriginalDemandedBits,
42723     const APInt &OriginalDemandedElts, KnownBits &Known, TargetLoweringOpt &TLO,
42724     unsigned Depth) const {
42725   EVT VT = Op.getValueType();
42726   unsigned BitWidth = OriginalDemandedBits.getBitWidth();
42727   unsigned Opc = Op.getOpcode();
42728   switch(Opc) {
42729   case X86ISD::VTRUNC: {
42730     KnownBits KnownOp;
42731     SDValue Src = Op.getOperand(0);
42732     MVT SrcVT = Src.getSimpleValueType();
42733 
42734     // Simplify the input, using demanded bit information.
42735     APInt TruncMask = OriginalDemandedBits.zext(SrcVT.getScalarSizeInBits());
42736     APInt DemandedElts = OriginalDemandedElts.trunc(SrcVT.getVectorNumElements());
42737     if (SimplifyDemandedBits(Src, TruncMask, DemandedElts, KnownOp, TLO, Depth + 1))
42738       return true;
42739     break;
42740   }
42741   case X86ISD::PMULDQ:
42742   case X86ISD::PMULUDQ: {
42743     // PMULDQ/PMULUDQ only uses lower 32 bits from each vector element.
42744     KnownBits KnownLHS, KnownRHS;
42745     SDValue LHS = Op.getOperand(0);
42746     SDValue RHS = Op.getOperand(1);
42747 
42748     // Don't mask bits on 32-bit AVX512 targets which might lose a broadcast.
42749     // FIXME: Can we bound this better?
42750     APInt DemandedMask = APInt::getLowBitsSet(64, 32);
42751     APInt DemandedMaskLHS = APInt::getAllOnes(64);
42752     APInt DemandedMaskRHS = APInt::getAllOnes(64);
42753 
42754     bool Is32BitAVX512 = !Subtarget.is64Bit() && Subtarget.hasAVX512();
42755     if (!Is32BitAVX512 || !TLO.DAG.isSplatValue(LHS))
42756       DemandedMaskLHS = DemandedMask;
42757     if (!Is32BitAVX512 || !TLO.DAG.isSplatValue(RHS))
42758       DemandedMaskRHS = DemandedMask;
42759 
42760     if (SimplifyDemandedBits(LHS, DemandedMaskLHS, OriginalDemandedElts,
42761                              KnownLHS, TLO, Depth + 1))
42762       return true;
42763     if (SimplifyDemandedBits(RHS, DemandedMaskRHS, OriginalDemandedElts,
42764                              KnownRHS, TLO, Depth + 1))
42765       return true;
42766 
42767     // PMULUDQ(X,1) -> AND(X,(1<<32)-1) 'getZeroExtendInReg'.
42768     KnownRHS = KnownRHS.trunc(32);
42769     if (Opc == X86ISD::PMULUDQ && KnownRHS.isConstant() &&
42770         KnownRHS.getConstant().isOne()) {
42771       SDLoc DL(Op);
42772       SDValue Mask = TLO.DAG.getConstant(DemandedMask, DL, VT);
42773       return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::AND, DL, VT, LHS, Mask));
42774     }
42775 
42776     // Aggressively peek through ops to get at the demanded low bits.
42777     SDValue DemandedLHS = SimplifyMultipleUseDemandedBits(
42778         LHS, DemandedMaskLHS, OriginalDemandedElts, TLO.DAG, Depth + 1);
42779     SDValue DemandedRHS = SimplifyMultipleUseDemandedBits(
42780         RHS, DemandedMaskRHS, OriginalDemandedElts, TLO.DAG, Depth + 1);
42781     if (DemandedLHS || DemandedRHS) {
42782       DemandedLHS = DemandedLHS ? DemandedLHS : LHS;
42783       DemandedRHS = DemandedRHS ? DemandedRHS : RHS;
42784       return TLO.CombineTo(
42785           Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, DemandedLHS, DemandedRHS));
42786     }
42787     break;
42788   }
42789   case X86ISD::VSHLI: {
42790     SDValue Op0 = Op.getOperand(0);
42791 
42792     unsigned ShAmt = Op.getConstantOperandVal(1);
42793     if (ShAmt >= BitWidth)
42794       break;
42795 
42796     APInt DemandedMask = OriginalDemandedBits.lshr(ShAmt);
42797 
42798     // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a
42799     // single shift.  We can do this if the bottom bits (which are shifted
42800     // out) are never demanded.
42801     if (Op0.getOpcode() == X86ISD::VSRLI &&
42802         OriginalDemandedBits.countTrailingZeros() >= ShAmt) {
42803       unsigned Shift2Amt = Op0.getConstantOperandVal(1);
42804       if (Shift2Amt < BitWidth) {
42805         int Diff = ShAmt - Shift2Amt;
42806         if (Diff == 0)
42807           return TLO.CombineTo(Op, Op0.getOperand(0));
42808 
42809         unsigned NewOpc = Diff < 0 ? X86ISD::VSRLI : X86ISD::VSHLI;
42810         SDValue NewShift = TLO.DAG.getNode(
42811             NewOpc, SDLoc(Op), VT, Op0.getOperand(0),
42812             TLO.DAG.getTargetConstant(std::abs(Diff), SDLoc(Op), MVT::i8));
42813         return TLO.CombineTo(Op, NewShift);
42814       }
42815     }
42816 
42817     // If we are only demanding sign bits then we can use the shift source directly.
42818     unsigned NumSignBits =
42819         TLO.DAG.ComputeNumSignBits(Op0, OriginalDemandedElts, Depth + 1);
42820     unsigned UpperDemandedBits =
42821         BitWidth - OriginalDemandedBits.countTrailingZeros();
42822     if (NumSignBits > ShAmt && (NumSignBits - ShAmt) >= UpperDemandedBits)
42823       return TLO.CombineTo(Op, Op0);
42824 
42825     if (SimplifyDemandedBits(Op0, DemandedMask, OriginalDemandedElts, Known,
42826                              TLO, Depth + 1))
42827       return true;
42828 
42829     assert(!Known.hasConflict() && "Bits known to be one AND zero?");
42830     Known.Zero <<= ShAmt;
42831     Known.One <<= ShAmt;
42832 
42833     // Low bits known zero.
42834     Known.Zero.setLowBits(ShAmt);
42835     return false;
42836   }
42837   case X86ISD::VSRLI: {
42838     unsigned ShAmt = Op.getConstantOperandVal(1);
42839     if (ShAmt >= BitWidth)
42840       break;
42841 
42842     APInt DemandedMask = OriginalDemandedBits << ShAmt;
42843 
42844     if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask,
42845                              OriginalDemandedElts, Known, TLO, Depth + 1))
42846       return true;
42847 
42848     assert(!Known.hasConflict() && "Bits known to be one AND zero?");
42849     Known.Zero.lshrInPlace(ShAmt);
42850     Known.One.lshrInPlace(ShAmt);
42851 
42852     // High bits known zero.
42853     Known.Zero.setHighBits(ShAmt);
42854     return false;
42855   }
42856   case X86ISD::VSRAI: {
42857     SDValue Op0 = Op.getOperand(0);
42858     SDValue Op1 = Op.getOperand(1);
42859 
42860     unsigned ShAmt = cast<ConstantSDNode>(Op1)->getZExtValue();
42861     if (ShAmt >= BitWidth)
42862       break;
42863 
42864     APInt DemandedMask = OriginalDemandedBits << ShAmt;
42865 
42866     // If we just want the sign bit then we don't need to shift it.
42867     if (OriginalDemandedBits.isSignMask())
42868       return TLO.CombineTo(Op, Op0);
42869 
42870     // fold (VSRAI (VSHLI X, C1), C1) --> X iff NumSignBits(X) > C1
42871     if (Op0.getOpcode() == X86ISD::VSHLI &&
42872         Op.getOperand(1) == Op0.getOperand(1)) {
42873       SDValue Op00 = Op0.getOperand(0);
42874       unsigned NumSignBits =
42875           TLO.DAG.ComputeNumSignBits(Op00, OriginalDemandedElts);
42876       if (ShAmt < NumSignBits)
42877         return TLO.CombineTo(Op, Op00);
42878     }
42879 
42880     // If any of the demanded bits are produced by the sign extension, we also
42881     // demand the input sign bit.
42882     if (OriginalDemandedBits.countLeadingZeros() < ShAmt)
42883       DemandedMask.setSignBit();
42884 
42885     if (SimplifyDemandedBits(Op0, DemandedMask, OriginalDemandedElts, Known,
42886                              TLO, Depth + 1))
42887       return true;
42888 
42889     assert(!Known.hasConflict() && "Bits known to be one AND zero?");
42890     Known.Zero.lshrInPlace(ShAmt);
42891     Known.One.lshrInPlace(ShAmt);
42892 
42893     // If the input sign bit is known to be zero, or if none of the top bits
42894     // are demanded, turn this into an unsigned shift right.
42895     if (Known.Zero[BitWidth - ShAmt - 1] ||
42896         OriginalDemandedBits.countLeadingZeros() >= ShAmt)
42897       return TLO.CombineTo(
42898           Op, TLO.DAG.getNode(X86ISD::VSRLI, SDLoc(Op), VT, Op0, Op1));
42899 
42900     // High bits are known one.
42901     if (Known.One[BitWidth - ShAmt - 1])
42902       Known.One.setHighBits(ShAmt);
42903     return false;
42904   }
42905   case X86ISD::BLENDV: {
42906     SDValue Sel = Op.getOperand(0);
42907     SDValue LHS = Op.getOperand(1);
42908     SDValue RHS = Op.getOperand(2);
42909 
42910     APInt SignMask = APInt::getSignMask(BitWidth);
42911     SDValue NewSel = SimplifyMultipleUseDemandedBits(
42912         Sel, SignMask, OriginalDemandedElts, TLO.DAG, Depth + 1);
42913     SDValue NewLHS = SimplifyMultipleUseDemandedBits(
42914         LHS, OriginalDemandedBits, OriginalDemandedElts, TLO.DAG, Depth + 1);
42915     SDValue NewRHS = SimplifyMultipleUseDemandedBits(
42916         RHS, OriginalDemandedBits, OriginalDemandedElts, TLO.DAG, Depth + 1);
42917 
42918     if (NewSel || NewLHS || NewRHS) {
42919       NewSel = NewSel ? NewSel : Sel;
42920       NewLHS = NewLHS ? NewLHS : LHS;
42921       NewRHS = NewRHS ? NewRHS : RHS;
42922       return TLO.CombineTo(Op, TLO.DAG.getNode(X86ISD::BLENDV, SDLoc(Op), VT,
42923                                                NewSel, NewLHS, NewRHS));
42924     }
42925     break;
42926   }
42927   case X86ISD::PEXTRB:
42928   case X86ISD::PEXTRW: {
42929     SDValue Vec = Op.getOperand(0);
42930     auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(1));
42931     MVT VecVT = Vec.getSimpleValueType();
42932     unsigned NumVecElts = VecVT.getVectorNumElements();
42933 
42934     if (CIdx && CIdx->getAPIntValue().ult(NumVecElts)) {
42935       unsigned Idx = CIdx->getZExtValue();
42936       unsigned VecBitWidth = VecVT.getScalarSizeInBits();
42937 
42938       // If we demand no bits from the vector then we must have demanded
42939       // bits from the implict zext - simplify to zero.
42940       APInt DemandedVecBits = OriginalDemandedBits.trunc(VecBitWidth);
42941       if (DemandedVecBits == 0)
42942         return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT));
42943 
42944       APInt KnownUndef, KnownZero;
42945       APInt DemandedVecElts = APInt::getOneBitSet(NumVecElts, Idx);
42946       if (SimplifyDemandedVectorElts(Vec, DemandedVecElts, KnownUndef,
42947                                      KnownZero, TLO, Depth + 1))
42948         return true;
42949 
42950       KnownBits KnownVec;
42951       if (SimplifyDemandedBits(Vec, DemandedVecBits, DemandedVecElts,
42952                                KnownVec, TLO, Depth + 1))
42953         return true;
42954 
42955       if (SDValue V = SimplifyMultipleUseDemandedBits(
42956               Vec, DemandedVecBits, DemandedVecElts, TLO.DAG, Depth + 1))
42957         return TLO.CombineTo(
42958             Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, V, Op.getOperand(1)));
42959 
42960       Known = KnownVec.zext(BitWidth);
42961       return false;
42962     }
42963     break;
42964   }
42965   case X86ISD::PINSRB:
42966   case X86ISD::PINSRW: {
42967     SDValue Vec = Op.getOperand(0);
42968     SDValue Scl = Op.getOperand(1);
42969     auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2));
42970     MVT VecVT = Vec.getSimpleValueType();
42971 
42972     if (CIdx && CIdx->getAPIntValue().ult(VecVT.getVectorNumElements())) {
42973       unsigned Idx = CIdx->getZExtValue();
42974       if (!OriginalDemandedElts[Idx])
42975         return TLO.CombineTo(Op, Vec);
42976 
42977       KnownBits KnownVec;
42978       APInt DemandedVecElts(OriginalDemandedElts);
42979       DemandedVecElts.clearBit(Idx);
42980       if (SimplifyDemandedBits(Vec, OriginalDemandedBits, DemandedVecElts,
42981                                KnownVec, TLO, Depth + 1))
42982         return true;
42983 
42984       KnownBits KnownScl;
42985       unsigned NumSclBits = Scl.getScalarValueSizeInBits();
42986       APInt DemandedSclBits = OriginalDemandedBits.zext(NumSclBits);
42987       if (SimplifyDemandedBits(Scl, DemandedSclBits, KnownScl, TLO, Depth + 1))
42988         return true;
42989 
42990       KnownScl = KnownScl.trunc(VecVT.getScalarSizeInBits());
42991       Known = KnownBits::commonBits(KnownVec, KnownScl);
42992       return false;
42993     }
42994     break;
42995   }
42996   case X86ISD::PACKSS:
42997     // PACKSS saturates to MIN/MAX integer values. So if we just want the
42998     // sign bit then we can just ask for the source operands sign bit.
42999     // TODO - add known bits handling.
43000     if (OriginalDemandedBits.isSignMask()) {
43001       APInt DemandedLHS, DemandedRHS;
43002       getPackDemandedElts(VT, OriginalDemandedElts, DemandedLHS, DemandedRHS);
43003 
43004       KnownBits KnownLHS, KnownRHS;
43005       APInt SignMask = APInt::getSignMask(BitWidth * 2);
43006       if (SimplifyDemandedBits(Op.getOperand(0), SignMask, DemandedLHS,
43007                                KnownLHS, TLO, Depth + 1))
43008         return true;
43009       if (SimplifyDemandedBits(Op.getOperand(1), SignMask, DemandedRHS,
43010                                KnownRHS, TLO, Depth + 1))
43011         return true;
43012 
43013       // Attempt to avoid multi-use ops if we don't need anything from them.
43014       SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
43015           Op.getOperand(0), SignMask, DemandedLHS, TLO.DAG, Depth + 1);
43016       SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
43017           Op.getOperand(1), SignMask, DemandedRHS, TLO.DAG, Depth + 1);
43018       if (DemandedOp0 || DemandedOp1) {
43019         SDValue Op0 = DemandedOp0 ? DemandedOp0 : Op.getOperand(0);
43020         SDValue Op1 = DemandedOp1 ? DemandedOp1 : Op.getOperand(1);
43021         return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, Op0, Op1));
43022       }
43023     }
43024     // TODO - add general PACKSS/PACKUS SimplifyDemandedBits support.
43025     break;
43026   case X86ISD::VBROADCAST: {
43027     SDValue Src = Op.getOperand(0);
43028     MVT SrcVT = Src.getSimpleValueType();
43029     APInt DemandedElts = APInt::getOneBitSet(
43030         SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1, 0);
43031     if (SimplifyDemandedBits(Src, OriginalDemandedBits, DemandedElts, Known,
43032                              TLO, Depth + 1))
43033       return true;
43034     // If we don't need the upper bits, attempt to narrow the broadcast source.
43035     // Don't attempt this on AVX512 as it might affect broadcast folding.
43036     // TODO: Should we attempt this for i32/i16 splats? They tend to be slower.
43037     if ((BitWidth == 64) && SrcVT.isScalarInteger() && !Subtarget.hasAVX512() &&
43038         OriginalDemandedBits.countLeadingZeros() >= (BitWidth / 2) &&
43039         Src->hasOneUse()) {
43040       MVT NewSrcVT = MVT::getIntegerVT(BitWidth / 2);
43041       SDValue NewSrc =
43042           TLO.DAG.getNode(ISD::TRUNCATE, SDLoc(Src), NewSrcVT, Src);
43043       MVT NewVT = MVT::getVectorVT(NewSrcVT, VT.getVectorNumElements() * 2);
43044       SDValue NewBcst =
43045           TLO.DAG.getNode(X86ISD::VBROADCAST, SDLoc(Op), NewVT, NewSrc);
43046       return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, NewBcst));
43047     }
43048     break;
43049   }
43050   case X86ISD::PCMPGT:
43051     // icmp sgt(0, R) == ashr(R, BitWidth-1).
43052     // iff we only need the sign bit then we can use R directly.
43053     if (OriginalDemandedBits.isSignMask() &&
43054         ISD::isBuildVectorAllZeros(Op.getOperand(0).getNode()))
43055       return TLO.CombineTo(Op, Op.getOperand(1));
43056     break;
43057   case X86ISD::MOVMSK: {
43058     SDValue Src = Op.getOperand(0);
43059     MVT SrcVT = Src.getSimpleValueType();
43060     unsigned SrcBits = SrcVT.getScalarSizeInBits();
43061     unsigned NumElts = SrcVT.getVectorNumElements();
43062 
43063     // If we don't need the sign bits at all just return zero.
43064     if (OriginalDemandedBits.countTrailingZeros() >= NumElts)
43065       return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT));
43066 
43067     // See if we only demand bits from the lower 128-bit vector.
43068     if (SrcVT.is256BitVector() &&
43069         OriginalDemandedBits.getActiveBits() <= (NumElts / 2)) {
43070       SDValue NewSrc = extract128BitVector(Src, 0, TLO.DAG, SDLoc(Src));
43071       return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewSrc));
43072     }
43073 
43074     // Only demand the vector elements of the sign bits we need.
43075     APInt KnownUndef, KnownZero;
43076     APInt DemandedElts = OriginalDemandedBits.zextOrTrunc(NumElts);
43077     if (SimplifyDemandedVectorElts(Src, DemandedElts, KnownUndef, KnownZero,
43078                                    TLO, Depth + 1))
43079       return true;
43080 
43081     Known.Zero = KnownZero.zext(BitWidth);
43082     Known.Zero.setHighBits(BitWidth - NumElts);
43083 
43084     // MOVMSK only uses the MSB from each vector element.
43085     KnownBits KnownSrc;
43086     APInt DemandedSrcBits = APInt::getSignMask(SrcBits);
43087     if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedElts, KnownSrc, TLO,
43088                              Depth + 1))
43089       return true;
43090 
43091     if (KnownSrc.One[SrcBits - 1])
43092       Known.One.setLowBits(NumElts);
43093     else if (KnownSrc.Zero[SrcBits - 1])
43094       Known.Zero.setLowBits(NumElts);
43095 
43096     // Attempt to avoid multi-use os if we don't need anything from it.
43097     if (SDValue NewSrc = SimplifyMultipleUseDemandedBits(
43098             Src, DemandedSrcBits, DemandedElts, TLO.DAG, Depth + 1))
43099       return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewSrc));
43100     return false;
43101   }
43102   case X86ISD::BEXTR:
43103   case X86ISD::BEXTRI: {
43104     SDValue Op0 = Op.getOperand(0);
43105     SDValue Op1 = Op.getOperand(1);
43106 
43107     // Only bottom 16-bits of the control bits are required.
43108     if (auto *Cst1 = dyn_cast<ConstantSDNode>(Op1)) {
43109       // NOTE: SimplifyDemandedBits won't do this for constants.
43110       uint64_t Val1 = Cst1->getZExtValue();
43111       uint64_t MaskedVal1 = Val1 & 0xFFFF;
43112       if (Opc == X86ISD::BEXTR && MaskedVal1 != Val1) {
43113         SDLoc DL(Op);
43114         return TLO.CombineTo(
43115             Op, TLO.DAG.getNode(X86ISD::BEXTR, DL, VT, Op0,
43116                                 TLO.DAG.getConstant(MaskedVal1, DL, VT)));
43117       }
43118 
43119       unsigned Shift = Cst1->getAPIntValue().extractBitsAsZExtValue(8, 0);
43120       unsigned Length = Cst1->getAPIntValue().extractBitsAsZExtValue(8, 8);
43121 
43122       // If the length is 0, the result is 0.
43123       if (Length == 0) {
43124         Known.setAllZero();
43125         return false;
43126       }
43127 
43128       if ((Shift + Length) <= BitWidth) {
43129         APInt DemandedMask = APInt::getBitsSet(BitWidth, Shift, Shift + Length);
43130         if (SimplifyDemandedBits(Op0, DemandedMask, Known, TLO, Depth + 1))
43131           return true;
43132 
43133         Known = Known.extractBits(Length, Shift);
43134         Known = Known.zextOrTrunc(BitWidth);
43135         return false;
43136       }
43137     } else {
43138       assert(Opc == X86ISD::BEXTR && "Unexpected opcode!");
43139       KnownBits Known1;
43140       APInt DemandedMask(APInt::getLowBitsSet(BitWidth, 16));
43141       if (SimplifyDemandedBits(Op1, DemandedMask, Known1, TLO, Depth + 1))
43142         return true;
43143 
43144       // If the length is 0, replace with 0.
43145       KnownBits LengthBits = Known1.extractBits(8, 8);
43146       if (LengthBits.isZero())
43147         return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT));
43148     }
43149 
43150     break;
43151   }
43152   case X86ISD::PDEP: {
43153     SDValue Op0 = Op.getOperand(0);
43154     SDValue Op1 = Op.getOperand(1);
43155 
43156     unsigned DemandedBitsLZ = OriginalDemandedBits.countLeadingZeros();
43157     APInt LoMask = APInt::getLowBitsSet(BitWidth, BitWidth - DemandedBitsLZ);
43158 
43159     // If the demanded bits has leading zeroes, we don't demand those from the
43160     // mask.
43161     if (SimplifyDemandedBits(Op1, LoMask, Known, TLO, Depth + 1))
43162       return true;
43163 
43164     // The number of possible 1s in the mask determines the number of LSBs of
43165     // operand 0 used. Undemanded bits from the mask don't matter so filter
43166     // them before counting.
43167     KnownBits Known2;
43168     uint64_t Count = (~Known.Zero & LoMask).countPopulation();
43169     APInt DemandedMask(APInt::getLowBitsSet(BitWidth, Count));
43170     if (SimplifyDemandedBits(Op0, DemandedMask, Known2, TLO, Depth + 1))
43171       return true;
43172 
43173     // Zeroes are retained from the mask, but not ones.
43174     Known.One.clearAllBits();
43175     // The result will have at least as many trailing zeros as the non-mask
43176     // operand since bits can only map to the same or higher bit position.
43177     Known.Zero.setLowBits(Known2.countMinTrailingZeros());
43178     return false;
43179   }
43180   }
43181 
43182   return TargetLowering::SimplifyDemandedBitsForTargetNode(
43183       Op, OriginalDemandedBits, OriginalDemandedElts, Known, TLO, Depth);
43184 }
43185 
SimplifyMultipleUseDemandedBitsForTargetNode(SDValue Op,const APInt & DemandedBits,const APInt & DemandedElts,SelectionDAG & DAG,unsigned Depth) const43186 SDValue X86TargetLowering::SimplifyMultipleUseDemandedBitsForTargetNode(
43187     SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
43188     SelectionDAG &DAG, unsigned Depth) const {
43189   int NumElts = DemandedElts.getBitWidth();
43190   unsigned Opc = Op.getOpcode();
43191   EVT VT = Op.getValueType();
43192 
43193   switch (Opc) {
43194   case X86ISD::PINSRB:
43195   case X86ISD::PINSRW: {
43196     // If we don't demand the inserted element, return the base vector.
43197     SDValue Vec = Op.getOperand(0);
43198     auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2));
43199     MVT VecVT = Vec.getSimpleValueType();
43200     if (CIdx && CIdx->getAPIntValue().ult(VecVT.getVectorNumElements()) &&
43201         !DemandedElts[CIdx->getZExtValue()])
43202       return Vec;
43203     break;
43204   }
43205   case X86ISD::VSHLI: {
43206     // If we are only demanding sign bits then we can use the shift source
43207     // directly.
43208     SDValue Op0 = Op.getOperand(0);
43209     unsigned ShAmt = Op.getConstantOperandVal(1);
43210     unsigned BitWidth = DemandedBits.getBitWidth();
43211     unsigned NumSignBits = DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1);
43212     unsigned UpperDemandedBits = BitWidth - DemandedBits.countTrailingZeros();
43213     if (NumSignBits > ShAmt && (NumSignBits - ShAmt) >= UpperDemandedBits)
43214       return Op0;
43215     break;
43216   }
43217   case X86ISD::VSRAI:
43218     // iff we only need the sign bit then we can use the source directly.
43219     // TODO: generalize where we only demand extended signbits.
43220     if (DemandedBits.isSignMask())
43221       return Op.getOperand(0);
43222     break;
43223   case X86ISD::PCMPGT:
43224     // icmp sgt(0, R) == ashr(R, BitWidth-1).
43225     // iff we only need the sign bit then we can use R directly.
43226     if (DemandedBits.isSignMask() &&
43227         ISD::isBuildVectorAllZeros(Op.getOperand(0).getNode()))
43228       return Op.getOperand(1);
43229     break;
43230   case X86ISD::ANDNP: {
43231     // ANDNP = (~LHS & RHS);
43232     SDValue LHS = Op.getOperand(0);
43233     SDValue RHS = Op.getOperand(1);
43234 
43235     KnownBits LHSKnown = DAG.computeKnownBits(LHS, DemandedElts, Depth + 1);
43236     KnownBits RHSKnown = DAG.computeKnownBits(RHS, DemandedElts, Depth + 1);
43237 
43238     // If all of the demanded bits are known 0 on LHS and known 0 on RHS, then
43239     // the (inverted) LHS bits cannot contribute to the result of the 'andn' in
43240     // this context, so return RHS.
43241     if (DemandedBits.isSubsetOf(RHSKnown.Zero | LHSKnown.Zero))
43242       return RHS;
43243     break;
43244   }
43245   }
43246 
43247   APInt ShuffleUndef, ShuffleZero;
43248   SmallVector<int, 16> ShuffleMask;
43249   SmallVector<SDValue, 2> ShuffleOps;
43250   if (getTargetShuffleInputs(Op, DemandedElts, ShuffleOps, ShuffleMask,
43251                              ShuffleUndef, ShuffleZero, DAG, Depth, false)) {
43252     // If all the demanded elts are from one operand and are inline,
43253     // then we can use the operand directly.
43254     int NumOps = ShuffleOps.size();
43255     if (ShuffleMask.size() == (unsigned)NumElts &&
43256         llvm::all_of(ShuffleOps, [VT](SDValue V) {
43257           return VT.getSizeInBits() == V.getValueSizeInBits();
43258         })) {
43259 
43260       if (DemandedElts.isSubsetOf(ShuffleUndef))
43261         return DAG.getUNDEF(VT);
43262       if (DemandedElts.isSubsetOf(ShuffleUndef | ShuffleZero))
43263         return getZeroVector(VT.getSimpleVT(), Subtarget, DAG, SDLoc(Op));
43264 
43265       // Bitmask that indicates which ops have only been accessed 'inline'.
43266       APInt IdentityOp = APInt::getAllOnes(NumOps);
43267       for (int i = 0; i != NumElts; ++i) {
43268         int M = ShuffleMask[i];
43269         if (!DemandedElts[i] || ShuffleUndef[i])
43270           continue;
43271         int OpIdx = M / NumElts;
43272         int EltIdx = M % NumElts;
43273         if (M < 0 || EltIdx != i) {
43274           IdentityOp.clearAllBits();
43275           break;
43276         }
43277         IdentityOp &= APInt::getOneBitSet(NumOps, OpIdx);
43278         if (IdentityOp == 0)
43279           break;
43280       }
43281       assert((IdentityOp == 0 || IdentityOp.countPopulation() == 1) &&
43282              "Multiple identity shuffles detected");
43283 
43284       if (IdentityOp != 0)
43285         return DAG.getBitcast(VT, ShuffleOps[IdentityOp.countTrailingZeros()]);
43286     }
43287   }
43288 
43289   return TargetLowering::SimplifyMultipleUseDemandedBitsForTargetNode(
43290       Op, DemandedBits, DemandedElts, DAG, Depth);
43291 }
43292 
isGuaranteedNotToBeUndefOrPoisonForTargetNode(SDValue Op,const APInt & DemandedElts,const SelectionDAG & DAG,bool PoisonOnly,unsigned Depth) const43293 bool X86TargetLowering::isGuaranteedNotToBeUndefOrPoisonForTargetNode(
43294     SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
43295     bool PoisonOnly, unsigned Depth) const {
43296   unsigned EltsBits = Op.getScalarValueSizeInBits();
43297   unsigned NumElts = DemandedElts.getBitWidth();
43298 
43299   // TODO: Add more target shuffles.
43300   switch (Op.getOpcode()) {
43301   case X86ISD::PSHUFD:
43302   case X86ISD::VPERMILPI: {
43303     SmallVector<int, 8> Mask;
43304     DecodePSHUFMask(NumElts, EltsBits, Op.getConstantOperandVal(1), Mask);
43305 
43306     APInt DemandedSrcElts = APInt::getZero(NumElts);
43307     for (unsigned I = 0; I != NumElts; ++I)
43308       if (DemandedElts[I])
43309         DemandedSrcElts.setBit(Mask[I]);
43310 
43311     return DAG.isGuaranteedNotToBeUndefOrPoison(
43312         Op.getOperand(0), DemandedSrcElts, PoisonOnly, Depth + 1);
43313   }
43314   }
43315   return TargetLowering::isGuaranteedNotToBeUndefOrPoisonForTargetNode(
43316       Op, DemandedElts, DAG, PoisonOnly, Depth);
43317 }
43318 
canCreateUndefOrPoisonForTargetNode(SDValue Op,const APInt & DemandedElts,const SelectionDAG & DAG,bool PoisonOnly,bool ConsiderFlags,unsigned Depth) const43319 bool X86TargetLowering::canCreateUndefOrPoisonForTargetNode(
43320     SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
43321     bool PoisonOnly, bool ConsiderFlags, unsigned Depth) const {
43322 
43323   // TODO: Add more target shuffles.
43324   switch (Op.getOpcode()) {
43325   case X86ISD::PSHUFD:
43326   case X86ISD::VPERMILPI:
43327     return false;
43328   }
43329   return TargetLowering::canCreateUndefOrPoisonForTargetNode(
43330       Op, DemandedElts, DAG, PoisonOnly, ConsiderFlags, Depth);
43331 }
43332 
isSplatValueForTargetNode(SDValue Op,const APInt & DemandedElts,APInt & UndefElts,const SelectionDAG & DAG,unsigned Depth) const43333 bool X86TargetLowering::isSplatValueForTargetNode(SDValue Op,
43334                                                   const APInt &DemandedElts,
43335                                                   APInt &UndefElts,
43336                                                   const SelectionDAG &DAG,
43337                                                   unsigned Depth) const {
43338   unsigned NumElts = DemandedElts.getBitWidth();
43339   unsigned Opc = Op.getOpcode();
43340 
43341   switch (Opc) {
43342   case X86ISD::VBROADCAST:
43343   case X86ISD::VBROADCAST_LOAD:
43344     UndefElts = APInt::getNullValue(NumElts);
43345     return true;
43346   }
43347 
43348   return TargetLowering::isSplatValueForTargetNode(Op, DemandedElts, UndefElts,
43349                                                    DAG, Depth);
43350 }
43351 
43352 // Helper to peek through bitops/trunc/setcc to determine size of source vector.
43353 // Allows combineBitcastvxi1 to determine what size vector generated a <X x i1>.
checkBitcastSrcVectorSize(SDValue Src,unsigned Size,bool AllowTruncate)43354 static bool checkBitcastSrcVectorSize(SDValue Src, unsigned Size,
43355                                       bool AllowTruncate) {
43356   switch (Src.getOpcode()) {
43357   case ISD::TRUNCATE:
43358     if (!AllowTruncate)
43359       return false;
43360     [[fallthrough]];
43361   case ISD::SETCC:
43362     return Src.getOperand(0).getValueSizeInBits() == Size;
43363   case ISD::AND:
43364   case ISD::XOR:
43365   case ISD::OR:
43366     return checkBitcastSrcVectorSize(Src.getOperand(0), Size, AllowTruncate) &&
43367            checkBitcastSrcVectorSize(Src.getOperand(1), Size, AllowTruncate);
43368   case ISD::VSELECT:
43369     return Src.getOperand(0).getScalarValueSizeInBits() == 1 &&
43370            checkBitcastSrcVectorSize(Src.getOperand(1), Size, AllowTruncate) &&
43371            checkBitcastSrcVectorSize(Src.getOperand(2), Size, AllowTruncate);
43372   case ISD::BUILD_VECTOR:
43373     return ISD::isBuildVectorAllZeros(Src.getNode());
43374 
43375   }
43376   return false;
43377 }
43378 
43379 // Helper to flip between AND/OR/XOR opcodes and their X86ISD FP equivalents.
getAltBitOpcode(unsigned Opcode)43380 static unsigned getAltBitOpcode(unsigned Opcode) {
43381   switch(Opcode) {
43382   case ISD::AND: return X86ISD::FAND;
43383   case ISD::OR: return X86ISD::FOR;
43384   case ISD::XOR: return X86ISD::FXOR;
43385   case X86ISD::ANDNP: return X86ISD::FANDN;
43386   }
43387   llvm_unreachable("Unknown bitwise opcode");
43388 }
43389 
43390 // Helper to adjust v4i32 MOVMSK expansion to work with SSE1-only targets.
adjustBitcastSrcVectorSSE1(SelectionDAG & DAG,SDValue Src,const SDLoc & DL)43391 static SDValue adjustBitcastSrcVectorSSE1(SelectionDAG &DAG, SDValue Src,
43392                                           const SDLoc &DL) {
43393   EVT SrcVT = Src.getValueType();
43394   if (SrcVT != MVT::v4i1)
43395     return SDValue();
43396 
43397   switch (Src.getOpcode()) {
43398   case ISD::SETCC:
43399     if (Src.getOperand(0).getValueType() == MVT::v4i32 &&
43400         ISD::isBuildVectorAllZeros(Src.getOperand(1).getNode()) &&
43401         cast<CondCodeSDNode>(Src.getOperand(2))->get() == ISD::SETLT) {
43402       SDValue Op0 = Src.getOperand(0);
43403       if (ISD::isNormalLoad(Op0.getNode()))
43404         return DAG.getBitcast(MVT::v4f32, Op0);
43405       if (Op0.getOpcode() == ISD::BITCAST &&
43406           Op0.getOperand(0).getValueType() == MVT::v4f32)
43407         return Op0.getOperand(0);
43408     }
43409     break;
43410   case ISD::AND:
43411   case ISD::XOR:
43412   case ISD::OR: {
43413     SDValue Op0 = adjustBitcastSrcVectorSSE1(DAG, Src.getOperand(0), DL);
43414     SDValue Op1 = adjustBitcastSrcVectorSSE1(DAG, Src.getOperand(1), DL);
43415     if (Op0 && Op1)
43416       return DAG.getNode(getAltBitOpcode(Src.getOpcode()), DL, MVT::v4f32, Op0,
43417                          Op1);
43418     break;
43419   }
43420   }
43421   return SDValue();
43422 }
43423 
43424 // Helper to push sign extension of vXi1 SETCC result through bitops.
signExtendBitcastSrcVector(SelectionDAG & DAG,EVT SExtVT,SDValue Src,const SDLoc & DL)43425 static SDValue signExtendBitcastSrcVector(SelectionDAG &DAG, EVT SExtVT,
43426                                           SDValue Src, const SDLoc &DL) {
43427   switch (Src.getOpcode()) {
43428   case ISD::SETCC:
43429   case ISD::TRUNCATE:
43430   case ISD::BUILD_VECTOR:
43431     return DAG.getNode(ISD::SIGN_EXTEND, DL, SExtVT, Src);
43432   case ISD::AND:
43433   case ISD::XOR:
43434   case ISD::OR:
43435     return DAG.getNode(
43436         Src.getOpcode(), DL, SExtVT,
43437         signExtendBitcastSrcVector(DAG, SExtVT, Src.getOperand(0), DL),
43438         signExtendBitcastSrcVector(DAG, SExtVT, Src.getOperand(1), DL));
43439   case ISD::VSELECT:
43440     return DAG.getSelect(
43441         DL, SExtVT, Src.getOperand(0),
43442         signExtendBitcastSrcVector(DAG, SExtVT, Src.getOperand(1), DL),
43443         signExtendBitcastSrcVector(DAG, SExtVT, Src.getOperand(2), DL));
43444   }
43445   llvm_unreachable("Unexpected node type for vXi1 sign extension");
43446 }
43447 
43448 // Try to match patterns such as
43449 // (i16 bitcast (v16i1 x))
43450 // ->
43451 // (i16 movmsk (16i8 sext (v16i1 x)))
43452 // before the illegal vector is scalarized on subtargets that don't have legal
43453 // vxi1 types.
combineBitcastvxi1(SelectionDAG & DAG,EVT VT,SDValue Src,const SDLoc & DL,const X86Subtarget & Subtarget)43454 static SDValue combineBitcastvxi1(SelectionDAG &DAG, EVT VT, SDValue Src,
43455                                   const SDLoc &DL,
43456                                   const X86Subtarget &Subtarget) {
43457   EVT SrcVT = Src.getValueType();
43458   if (!SrcVT.isSimple() || SrcVT.getScalarType() != MVT::i1)
43459     return SDValue();
43460 
43461   // Recognize the IR pattern for the movmsk intrinsic under SSE1 before type
43462   // legalization destroys the v4i32 type.
43463   if (Subtarget.hasSSE1() && !Subtarget.hasSSE2()) {
43464     if (SDValue V = adjustBitcastSrcVectorSSE1(DAG, Src, DL)) {
43465       V = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32,
43466                       DAG.getBitcast(MVT::v4f32, V));
43467       return DAG.getZExtOrTrunc(V, DL, VT);
43468     }
43469   }
43470 
43471   // If the input is a truncate from v16i8 or v32i8 go ahead and use a
43472   // movmskb even with avx512. This will be better than truncating to vXi1 and
43473   // using a kmov. This can especially help KNL if the input is a v16i8/v32i8
43474   // vpcmpeqb/vpcmpgtb.
43475   bool PreferMovMsk = Src.getOpcode() == ISD::TRUNCATE && Src.hasOneUse() &&
43476                       (Src.getOperand(0).getValueType() == MVT::v16i8 ||
43477                        Src.getOperand(0).getValueType() == MVT::v32i8 ||
43478                        Src.getOperand(0).getValueType() == MVT::v64i8);
43479 
43480   // Prefer movmsk for AVX512 for (bitcast (setlt X, 0)) which can be handled
43481   // directly with vpmovmskb/vmovmskps/vmovmskpd.
43482   if (Src.getOpcode() == ISD::SETCC && Src.hasOneUse() &&
43483       cast<CondCodeSDNode>(Src.getOperand(2))->get() == ISD::SETLT &&
43484       ISD::isBuildVectorAllZeros(Src.getOperand(1).getNode())) {
43485     EVT CmpVT = Src.getOperand(0).getValueType();
43486     EVT EltVT = CmpVT.getVectorElementType();
43487     if (CmpVT.getSizeInBits() <= 256 &&
43488         (EltVT == MVT::i8 || EltVT == MVT::i32 || EltVT == MVT::i64))
43489       PreferMovMsk = true;
43490   }
43491 
43492   // With AVX512 vxi1 types are legal and we prefer using k-regs.
43493   // MOVMSK is supported in SSE2 or later.
43494   if (!Subtarget.hasSSE2() || (Subtarget.hasAVX512() && !PreferMovMsk))
43495     return SDValue();
43496 
43497   // There are MOVMSK flavors for types v16i8, v32i8, v4f32, v8f32, v4f64 and
43498   // v8f64. So all legal 128-bit and 256-bit vectors are covered except for
43499   // v8i16 and v16i16.
43500   // For these two cases, we can shuffle the upper element bytes to a
43501   // consecutive sequence at the start of the vector and treat the results as
43502   // v16i8 or v32i8, and for v16i8 this is the preferable solution. However,
43503   // for v16i16 this is not the case, because the shuffle is expensive, so we
43504   // avoid sign-extending to this type entirely.
43505   // For example, t0 := (v8i16 sext(v8i1 x)) needs to be shuffled as:
43506   // (v16i8 shuffle <0,2,4,6,8,10,12,14,u,u,...,u> (v16i8 bitcast t0), undef)
43507   MVT SExtVT;
43508   bool PropagateSExt = false;
43509   switch (SrcVT.getSimpleVT().SimpleTy) {
43510   default:
43511     return SDValue();
43512   case MVT::v2i1:
43513     SExtVT = MVT::v2i64;
43514     break;
43515   case MVT::v4i1:
43516     SExtVT = MVT::v4i32;
43517     // For cases such as (i4 bitcast (v4i1 setcc v4i64 v1, v2))
43518     // sign-extend to a 256-bit operation to avoid truncation.
43519     if (Subtarget.hasAVX() &&
43520         checkBitcastSrcVectorSize(Src, 256, Subtarget.hasAVX2())) {
43521       SExtVT = MVT::v4i64;
43522       PropagateSExt = true;
43523     }
43524     break;
43525   case MVT::v8i1:
43526     SExtVT = MVT::v8i16;
43527     // For cases such as (i8 bitcast (v8i1 setcc v8i32 v1, v2)),
43528     // sign-extend to a 256-bit operation to match the compare.
43529     // If the setcc operand is 128-bit, prefer sign-extending to 128-bit over
43530     // 256-bit because the shuffle is cheaper than sign extending the result of
43531     // the compare.
43532     if (Subtarget.hasAVX() && (checkBitcastSrcVectorSize(Src, 256, true) ||
43533                                checkBitcastSrcVectorSize(Src, 512, true))) {
43534       SExtVT = MVT::v8i32;
43535       PropagateSExt = true;
43536     }
43537     break;
43538   case MVT::v16i1:
43539     SExtVT = MVT::v16i8;
43540     // For the case (i16 bitcast (v16i1 setcc v16i16 v1, v2)),
43541     // it is not profitable to sign-extend to 256-bit because this will
43542     // require an extra cross-lane shuffle which is more expensive than
43543     // truncating the result of the compare to 128-bits.
43544     break;
43545   case MVT::v32i1:
43546     SExtVT = MVT::v32i8;
43547     break;
43548   case MVT::v64i1:
43549     // If we have AVX512F, but not AVX512BW and the input is truncated from
43550     // v64i8 checked earlier. Then split the input and make two pmovmskbs.
43551     if (Subtarget.hasAVX512()) {
43552       if (Subtarget.hasBWI())
43553         return SDValue();
43554       SExtVT = MVT::v64i8;
43555       break;
43556     }
43557     // Split if this is a <64 x i8> comparison result.
43558     if (checkBitcastSrcVectorSize(Src, 512, false)) {
43559       SExtVT = MVT::v64i8;
43560       break;
43561     }
43562     return SDValue();
43563   };
43564 
43565   SDValue V = PropagateSExt ? signExtendBitcastSrcVector(DAG, SExtVT, Src, DL)
43566                             : DAG.getNode(ISD::SIGN_EXTEND, DL, SExtVT, Src);
43567 
43568   if (SExtVT == MVT::v16i8 || SExtVT == MVT::v32i8 || SExtVT == MVT::v64i8) {
43569     V = getPMOVMSKB(DL, V, DAG, Subtarget);
43570   } else {
43571     if (SExtVT == MVT::v8i16)
43572       V = DAG.getNode(X86ISD::PACKSS, DL, MVT::v16i8, V,
43573                       DAG.getUNDEF(MVT::v8i16));
43574     V = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V);
43575   }
43576 
43577   EVT IntVT =
43578       EVT::getIntegerVT(*DAG.getContext(), SrcVT.getVectorNumElements());
43579   V = DAG.getZExtOrTrunc(V, DL, IntVT);
43580   return DAG.getBitcast(VT, V);
43581 }
43582 
43583 // Convert a vXi1 constant build vector to the same width scalar integer.
combinevXi1ConstantToInteger(SDValue Op,SelectionDAG & DAG)43584 static SDValue combinevXi1ConstantToInteger(SDValue Op, SelectionDAG &DAG) {
43585   EVT SrcVT = Op.getValueType();
43586   assert(SrcVT.getVectorElementType() == MVT::i1 &&
43587          "Expected a vXi1 vector");
43588   assert(ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
43589          "Expected a constant build vector");
43590 
43591   APInt Imm(SrcVT.getVectorNumElements(), 0);
43592   for (unsigned Idx = 0, e = Op.getNumOperands(); Idx < e; ++Idx) {
43593     SDValue In = Op.getOperand(Idx);
43594     if (!In.isUndef() && (cast<ConstantSDNode>(In)->getZExtValue() & 0x1))
43595       Imm.setBit(Idx);
43596   }
43597   EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), Imm.getBitWidth());
43598   return DAG.getConstant(Imm, SDLoc(Op), IntVT);
43599 }
43600 
combineCastedMaskArithmetic(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)43601 static SDValue combineCastedMaskArithmetic(SDNode *N, SelectionDAG &DAG,
43602                                            TargetLowering::DAGCombinerInfo &DCI,
43603                                            const X86Subtarget &Subtarget) {
43604   assert(N->getOpcode() == ISD::BITCAST && "Expected a bitcast");
43605 
43606   if (!DCI.isBeforeLegalizeOps())
43607     return SDValue();
43608 
43609   // Only do this if we have k-registers.
43610   if (!Subtarget.hasAVX512())
43611     return SDValue();
43612 
43613   EVT DstVT = N->getValueType(0);
43614   SDValue Op = N->getOperand(0);
43615   EVT SrcVT = Op.getValueType();
43616 
43617   if (!Op.hasOneUse())
43618     return SDValue();
43619 
43620   // Look for logic ops.
43621   if (Op.getOpcode() != ISD::AND &&
43622       Op.getOpcode() != ISD::OR &&
43623       Op.getOpcode() != ISD::XOR)
43624     return SDValue();
43625 
43626   // Make sure we have a bitcast between mask registers and a scalar type.
43627   if (!(SrcVT.isVector() && SrcVT.getVectorElementType() == MVT::i1 &&
43628         DstVT.isScalarInteger()) &&
43629       !(DstVT.isVector() && DstVT.getVectorElementType() == MVT::i1 &&
43630         SrcVT.isScalarInteger()))
43631     return SDValue();
43632 
43633   SDValue LHS = Op.getOperand(0);
43634   SDValue RHS = Op.getOperand(1);
43635 
43636   if (LHS.hasOneUse() && LHS.getOpcode() == ISD::BITCAST &&
43637       LHS.getOperand(0).getValueType() == DstVT)
43638     return DAG.getNode(Op.getOpcode(), SDLoc(N), DstVT, LHS.getOperand(0),
43639                        DAG.getBitcast(DstVT, RHS));
43640 
43641   if (RHS.hasOneUse() && RHS.getOpcode() == ISD::BITCAST &&
43642       RHS.getOperand(0).getValueType() == DstVT)
43643     return DAG.getNode(Op.getOpcode(), SDLoc(N), DstVT,
43644                        DAG.getBitcast(DstVT, LHS), RHS.getOperand(0));
43645 
43646   // If the RHS is a vXi1 build vector, this is a good reason to flip too.
43647   // Most of these have to move a constant from the scalar domain anyway.
43648   if (ISD::isBuildVectorOfConstantSDNodes(RHS.getNode())) {
43649     RHS = combinevXi1ConstantToInteger(RHS, DAG);
43650     return DAG.getNode(Op.getOpcode(), SDLoc(N), DstVT,
43651                        DAG.getBitcast(DstVT, LHS), RHS);
43652   }
43653 
43654   return SDValue();
43655 }
43656 
createMMXBuildVector(BuildVectorSDNode * BV,SelectionDAG & DAG,const X86Subtarget & Subtarget)43657 static SDValue createMMXBuildVector(BuildVectorSDNode *BV, SelectionDAG &DAG,
43658                                     const X86Subtarget &Subtarget) {
43659   SDLoc DL(BV);
43660   unsigned NumElts = BV->getNumOperands();
43661   SDValue Splat = BV->getSplatValue();
43662 
43663   // Build MMX element from integer GPR or SSE float values.
43664   auto CreateMMXElement = [&](SDValue V) {
43665     if (V.isUndef())
43666       return DAG.getUNDEF(MVT::x86mmx);
43667     if (V.getValueType().isFloatingPoint()) {
43668       if (Subtarget.hasSSE1() && !isa<ConstantFPSDNode>(V)) {
43669         V = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v4f32, V);
43670         V = DAG.getBitcast(MVT::v2i64, V);
43671         return DAG.getNode(X86ISD::MOVDQ2Q, DL, MVT::x86mmx, V);
43672       }
43673       V = DAG.getBitcast(MVT::i32, V);
43674     } else {
43675       V = DAG.getAnyExtOrTrunc(V, DL, MVT::i32);
43676     }
43677     return DAG.getNode(X86ISD::MMX_MOVW2D, DL, MVT::x86mmx, V);
43678   };
43679 
43680   // Convert build vector ops to MMX data in the bottom elements.
43681   SmallVector<SDValue, 8> Ops;
43682 
43683   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
43684 
43685   // Broadcast - use (PUNPCKL+)PSHUFW to broadcast single element.
43686   if (Splat) {
43687     if (Splat.isUndef())
43688       return DAG.getUNDEF(MVT::x86mmx);
43689 
43690     Splat = CreateMMXElement(Splat);
43691 
43692     if (Subtarget.hasSSE1()) {
43693       // Unpack v8i8 to splat i8 elements to lowest 16-bits.
43694       if (NumElts == 8)
43695         Splat = DAG.getNode(
43696             ISD::INTRINSIC_WO_CHAIN, DL, MVT::x86mmx,
43697             DAG.getTargetConstant(Intrinsic::x86_mmx_punpcklbw, DL,
43698                                   TLI.getPointerTy(DAG.getDataLayout())),
43699             Splat, Splat);
43700 
43701       // Use PSHUFW to repeat 16-bit elements.
43702       unsigned ShufMask = (NumElts > 2 ? 0 : 0x44);
43703       return DAG.getNode(
43704           ISD::INTRINSIC_WO_CHAIN, DL, MVT::x86mmx,
43705           DAG.getTargetConstant(Intrinsic::x86_sse_pshuf_w, DL,
43706                                 TLI.getPointerTy(DAG.getDataLayout())),
43707           Splat, DAG.getTargetConstant(ShufMask, DL, MVT::i8));
43708     }
43709     Ops.append(NumElts, Splat);
43710   } else {
43711     for (unsigned i = 0; i != NumElts; ++i)
43712       Ops.push_back(CreateMMXElement(BV->getOperand(i)));
43713   }
43714 
43715   // Use tree of PUNPCKLs to build up general MMX vector.
43716   while (Ops.size() > 1) {
43717     unsigned NumOps = Ops.size();
43718     unsigned IntrinOp =
43719         (NumOps == 2 ? Intrinsic::x86_mmx_punpckldq
43720                      : (NumOps == 4 ? Intrinsic::x86_mmx_punpcklwd
43721                                     : Intrinsic::x86_mmx_punpcklbw));
43722     SDValue Intrin = DAG.getTargetConstant(
43723         IntrinOp, DL, TLI.getPointerTy(DAG.getDataLayout()));
43724     for (unsigned i = 0; i != NumOps; i += 2)
43725       Ops[i / 2] = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, MVT::x86mmx, Intrin,
43726                                Ops[i], Ops[i + 1]);
43727     Ops.resize(NumOps / 2);
43728   }
43729 
43730   return Ops[0];
43731 }
43732 
43733 // Recursive function that attempts to find if a bool vector node was originally
43734 // a vector/float/double that got truncated/extended/bitcast to/from a scalar
43735 // integer. If so, replace the scalar ops with bool vector equivalents back down
43736 // the chain.
combineBitcastToBoolVector(EVT VT,SDValue V,const SDLoc & DL,SelectionDAG & DAG,const X86Subtarget & Subtarget)43737 static SDValue combineBitcastToBoolVector(EVT VT, SDValue V, const SDLoc &DL,
43738                                           SelectionDAG &DAG,
43739                                           const X86Subtarget &Subtarget) {
43740   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
43741   unsigned Opc = V.getOpcode();
43742   switch (Opc) {
43743   case ISD::BITCAST: {
43744     // Bitcast from a vector/float/double, we can cheaply bitcast to VT.
43745     SDValue Src = V.getOperand(0);
43746     EVT SrcVT = Src.getValueType();
43747     if (SrcVT.isVector() || SrcVT.isFloatingPoint())
43748       return DAG.getBitcast(VT, Src);
43749     break;
43750   }
43751   case ISD::TRUNCATE: {
43752     // If we find a suitable source, a truncated scalar becomes a subvector.
43753     SDValue Src = V.getOperand(0);
43754     EVT NewSrcVT =
43755         EVT::getVectorVT(*DAG.getContext(), MVT::i1, Src.getValueSizeInBits());
43756     if (TLI.isTypeLegal(NewSrcVT))
43757       if (SDValue N0 =
43758               combineBitcastToBoolVector(NewSrcVT, Src, DL, DAG, Subtarget))
43759         return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, N0,
43760                            DAG.getIntPtrConstant(0, DL));
43761     break;
43762   }
43763   case ISD::ANY_EXTEND:
43764   case ISD::ZERO_EXTEND: {
43765     // If we find a suitable source, an extended scalar becomes a subvector.
43766     SDValue Src = V.getOperand(0);
43767     EVT NewSrcVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
43768                                     Src.getScalarValueSizeInBits());
43769     if (TLI.isTypeLegal(NewSrcVT))
43770       if (SDValue N0 =
43771               combineBitcastToBoolVector(NewSrcVT, Src, DL, DAG, Subtarget))
43772         return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
43773                            Opc == ISD::ANY_EXTEND ? DAG.getUNDEF(VT)
43774                                                   : DAG.getConstant(0, DL, VT),
43775                            N0, DAG.getIntPtrConstant(0, DL));
43776     break;
43777   }
43778   case ISD::OR: {
43779     // If we find suitable sources, we can just move an OR to the vector domain.
43780     SDValue Src0 = V.getOperand(0);
43781     SDValue Src1 = V.getOperand(1);
43782     if (SDValue N0 = combineBitcastToBoolVector(VT, Src0, DL, DAG, Subtarget))
43783       if (SDValue N1 = combineBitcastToBoolVector(VT, Src1, DL, DAG, Subtarget))
43784         return DAG.getNode(Opc, DL, VT, N0, N1);
43785     break;
43786   }
43787   case ISD::SHL: {
43788     // If we find a suitable source, a SHL becomes a KSHIFTL.
43789     SDValue Src0 = V.getOperand(0);
43790     if ((VT == MVT::v8i1 && !Subtarget.hasDQI()) ||
43791         ((VT == MVT::v32i1 || VT == MVT::v64i1) && !Subtarget.hasBWI()))
43792       break;
43793 
43794     if (auto *Amt = dyn_cast<ConstantSDNode>(V.getOperand(1)))
43795       if (SDValue N0 = combineBitcastToBoolVector(VT, Src0, DL, DAG, Subtarget))
43796         return DAG.getNode(
43797             X86ISD::KSHIFTL, DL, VT, N0,
43798             DAG.getTargetConstant(Amt->getZExtValue(), DL, MVT::i8));
43799     break;
43800   }
43801   }
43802   return SDValue();
43803 }
43804 
combineBitcast(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)43805 static SDValue combineBitcast(SDNode *N, SelectionDAG &DAG,
43806                               TargetLowering::DAGCombinerInfo &DCI,
43807                               const X86Subtarget &Subtarget) {
43808   SDValue N0 = N->getOperand(0);
43809   EVT VT = N->getValueType(0);
43810   EVT SrcVT = N0.getValueType();
43811   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
43812 
43813   // Try to match patterns such as
43814   // (i16 bitcast (v16i1 x))
43815   // ->
43816   // (i16 movmsk (16i8 sext (v16i1 x)))
43817   // before the setcc result is scalarized on subtargets that don't have legal
43818   // vxi1 types.
43819   if (DCI.isBeforeLegalize()) {
43820     SDLoc dl(N);
43821     if (SDValue V = combineBitcastvxi1(DAG, VT, N0, dl, Subtarget))
43822       return V;
43823 
43824     // If this is a bitcast between a MVT::v4i1/v2i1 and an illegal integer
43825     // type, widen both sides to avoid a trip through memory.
43826     if ((VT == MVT::v4i1 || VT == MVT::v2i1) && SrcVT.isScalarInteger() &&
43827         Subtarget.hasAVX512()) {
43828       N0 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i8, N0);
43829       N0 = DAG.getBitcast(MVT::v8i1, N0);
43830       return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, N0,
43831                          DAG.getIntPtrConstant(0, dl));
43832     }
43833 
43834     // If this is a bitcast between a MVT::v4i1/v2i1 and an illegal integer
43835     // type, widen both sides to avoid a trip through memory.
43836     if ((SrcVT == MVT::v4i1 || SrcVT == MVT::v2i1) && VT.isScalarInteger() &&
43837         Subtarget.hasAVX512()) {
43838       // Use zeros for the widening if we already have some zeroes. This can
43839       // allow SimplifyDemandedBits to remove scalar ANDs that may be down
43840       // stream of this.
43841       // FIXME: It might make sense to detect a concat_vectors with a mix of
43842       // zeroes and undef and turn it into insert_subvector for i1 vectors as
43843       // a separate combine. What we can't do is canonicalize the operands of
43844       // such a concat or we'll get into a loop with SimplifyDemandedBits.
43845       if (N0.getOpcode() == ISD::CONCAT_VECTORS) {
43846         SDValue LastOp = N0.getOperand(N0.getNumOperands() - 1);
43847         if (ISD::isBuildVectorAllZeros(LastOp.getNode())) {
43848           SrcVT = LastOp.getValueType();
43849           unsigned NumConcats = 8 / SrcVT.getVectorNumElements();
43850           SmallVector<SDValue, 4> Ops(N0->op_begin(), N0->op_end());
43851           Ops.resize(NumConcats, DAG.getConstant(0, dl, SrcVT));
43852           N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i1, Ops);
43853           N0 = DAG.getBitcast(MVT::i8, N0);
43854           return DAG.getNode(ISD::TRUNCATE, dl, VT, N0);
43855         }
43856       }
43857 
43858       unsigned NumConcats = 8 / SrcVT.getVectorNumElements();
43859       SmallVector<SDValue, 4> Ops(NumConcats, DAG.getUNDEF(SrcVT));
43860       Ops[0] = N0;
43861       N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i1, Ops);
43862       N0 = DAG.getBitcast(MVT::i8, N0);
43863       return DAG.getNode(ISD::TRUNCATE, dl, VT, N0);
43864     }
43865   } else {
43866     // If we're bitcasting from iX to vXi1, see if the integer originally
43867     // began as a vXi1 and whether we can remove the bitcast entirely.
43868     if (VT.isVector() && VT.getScalarType() == MVT::i1 &&
43869         SrcVT.isScalarInteger() && TLI.isTypeLegal(VT)) {
43870       if (SDValue V =
43871               combineBitcastToBoolVector(VT, N0, SDLoc(N), DAG, Subtarget))
43872         return V;
43873     }
43874   }
43875 
43876   // Look for (i8 (bitcast (v8i1 (extract_subvector (v16i1 X), 0)))) and
43877   // replace with (i8 (trunc (i16 (bitcast (v16i1 X))))). This can occur
43878   // due to insert_subvector legalization on KNL. By promoting the copy to i16
43879   // we can help with known bits propagation from the vXi1 domain to the
43880   // scalar domain.
43881   if (VT == MVT::i8 && SrcVT == MVT::v8i1 && Subtarget.hasAVX512() &&
43882       !Subtarget.hasDQI() && N0.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
43883       N0.getOperand(0).getValueType() == MVT::v16i1 &&
43884       isNullConstant(N0.getOperand(1)))
43885     return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT,
43886                        DAG.getBitcast(MVT::i16, N0.getOperand(0)));
43887 
43888   // Canonicalize (bitcast (vbroadcast_load)) so that the output of the bitcast
43889   // and the vbroadcast_load are both integer or both fp. In some cases this
43890   // will remove the bitcast entirely.
43891   if (N0.getOpcode() == X86ISD::VBROADCAST_LOAD && N0.hasOneUse() &&
43892        VT.isFloatingPoint() != SrcVT.isFloatingPoint() && VT.isVector()) {
43893     auto *BCast = cast<MemIntrinsicSDNode>(N0);
43894     unsigned SrcVTSize = SrcVT.getScalarSizeInBits();
43895     unsigned MemSize = BCast->getMemoryVT().getScalarSizeInBits();
43896     // Don't swap i8/i16 since don't have fp types that size.
43897     if (MemSize >= 32) {
43898       MVT MemVT = VT.isFloatingPoint() ? MVT::getFloatingPointVT(MemSize)
43899                                        : MVT::getIntegerVT(MemSize);
43900       MVT LoadVT = VT.isFloatingPoint() ? MVT::getFloatingPointVT(SrcVTSize)
43901                                         : MVT::getIntegerVT(SrcVTSize);
43902       LoadVT = MVT::getVectorVT(LoadVT, SrcVT.getVectorNumElements());
43903 
43904       SDVTList Tys = DAG.getVTList(LoadVT, MVT::Other);
43905       SDValue Ops[] = { BCast->getChain(), BCast->getBasePtr() };
43906       SDValue ResNode =
43907           DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, SDLoc(N), Tys, Ops,
43908                                   MemVT, BCast->getMemOperand());
43909       DAG.ReplaceAllUsesOfValueWith(SDValue(BCast, 1), ResNode.getValue(1));
43910       return DAG.getBitcast(VT, ResNode);
43911     }
43912   }
43913 
43914   // Since MMX types are special and don't usually play with other vector types,
43915   // it's better to handle them early to be sure we emit efficient code by
43916   // avoiding store-load conversions.
43917   if (VT == MVT::x86mmx) {
43918     // Detect MMX constant vectors.
43919     APInt UndefElts;
43920     SmallVector<APInt, 1> EltBits;
43921     if (getTargetConstantBitsFromNode(N0, 64, UndefElts, EltBits)) {
43922       SDLoc DL(N0);
43923       // Handle zero-extension of i32 with MOVD.
43924       if (EltBits[0].countLeadingZeros() >= 32)
43925         return DAG.getNode(X86ISD::MMX_MOVW2D, DL, VT,
43926                            DAG.getConstant(EltBits[0].trunc(32), DL, MVT::i32));
43927       // Else, bitcast to a double.
43928       // TODO - investigate supporting sext 32-bit immediates on x86_64.
43929       APFloat F64(APFloat::IEEEdouble(), EltBits[0]);
43930       return DAG.getBitcast(VT, DAG.getConstantFP(F64, DL, MVT::f64));
43931     }
43932 
43933     // Detect bitcasts to x86mmx low word.
43934     if (N0.getOpcode() == ISD::BUILD_VECTOR &&
43935         (SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8) &&
43936         N0.getOperand(0).getValueType() == SrcVT.getScalarType()) {
43937       bool LowUndef = true, AllUndefOrZero = true;
43938       for (unsigned i = 1, e = SrcVT.getVectorNumElements(); i != e; ++i) {
43939         SDValue Op = N0.getOperand(i);
43940         LowUndef &= Op.isUndef() || (i >= e/2);
43941         AllUndefOrZero &= (Op.isUndef() || isNullConstant(Op));
43942       }
43943       if (AllUndefOrZero) {
43944         SDValue N00 = N0.getOperand(0);
43945         SDLoc dl(N00);
43946         N00 = LowUndef ? DAG.getAnyExtOrTrunc(N00, dl, MVT::i32)
43947                        : DAG.getZExtOrTrunc(N00, dl, MVT::i32);
43948         return DAG.getNode(X86ISD::MMX_MOVW2D, dl, VT, N00);
43949       }
43950     }
43951 
43952     // Detect bitcasts of 64-bit build vectors and convert to a
43953     // MMX UNPCK/PSHUFW which takes MMX type inputs with the value in the
43954     // lowest element.
43955     if (N0.getOpcode() == ISD::BUILD_VECTOR &&
43956         (SrcVT == MVT::v2f32 || SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 ||
43957          SrcVT == MVT::v8i8))
43958       return createMMXBuildVector(cast<BuildVectorSDNode>(N0), DAG, Subtarget);
43959 
43960     // Detect bitcasts between element or subvector extraction to x86mmx.
43961     if ((N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT ||
43962          N0.getOpcode() == ISD::EXTRACT_SUBVECTOR) &&
43963         isNullConstant(N0.getOperand(1))) {
43964       SDValue N00 = N0.getOperand(0);
43965       if (N00.getValueType().is128BitVector())
43966         return DAG.getNode(X86ISD::MOVDQ2Q, SDLoc(N00), VT,
43967                            DAG.getBitcast(MVT::v2i64, N00));
43968     }
43969 
43970     // Detect bitcasts from FP_TO_SINT to x86mmx.
43971     if (SrcVT == MVT::v2i32 && N0.getOpcode() == ISD::FP_TO_SINT) {
43972       SDLoc DL(N0);
43973       SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4i32, N0,
43974                                 DAG.getUNDEF(MVT::v2i32));
43975       return DAG.getNode(X86ISD::MOVDQ2Q, DL, VT,
43976                          DAG.getBitcast(MVT::v2i64, Res));
43977     }
43978   }
43979 
43980   // Try to remove a bitcast of constant vXi1 vector. We have to legalize
43981   // most of these to scalar anyway.
43982   if (Subtarget.hasAVX512() && VT.isScalarInteger() &&
43983       SrcVT.isVector() && SrcVT.getVectorElementType() == MVT::i1 &&
43984       ISD::isBuildVectorOfConstantSDNodes(N0.getNode())) {
43985     return combinevXi1ConstantToInteger(N0, DAG);
43986   }
43987 
43988   if (Subtarget.hasAVX512() && SrcVT.isScalarInteger() &&
43989       VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
43990       isa<ConstantSDNode>(N0)) {
43991     auto *C = cast<ConstantSDNode>(N0);
43992     if (C->isAllOnes())
43993       return DAG.getConstant(1, SDLoc(N0), VT);
43994     if (C->isZero())
43995       return DAG.getConstant(0, SDLoc(N0), VT);
43996   }
43997 
43998   // Look for MOVMSK that is maybe truncated and then bitcasted to vXi1.
43999   // Turn it into a sign bit compare that produces a k-register. This avoids
44000   // a trip through a GPR.
44001   if (Subtarget.hasAVX512() && SrcVT.isScalarInteger() &&
44002       VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
44003       isPowerOf2_32(VT.getVectorNumElements())) {
44004     unsigned NumElts = VT.getVectorNumElements();
44005     SDValue Src = N0;
44006 
44007     // Peek through truncate.
44008     if (N0.getOpcode() == ISD::TRUNCATE && N0.hasOneUse())
44009       Src = N0.getOperand(0);
44010 
44011     if (Src.getOpcode() == X86ISD::MOVMSK && Src.hasOneUse()) {
44012       SDValue MovmskIn = Src.getOperand(0);
44013       MVT MovmskVT = MovmskIn.getSimpleValueType();
44014       unsigned MovMskElts = MovmskVT.getVectorNumElements();
44015 
44016       // We allow extra bits of the movmsk to be used since they are known zero.
44017       // We can't convert a VPMOVMSKB without avx512bw.
44018       if (MovMskElts <= NumElts &&
44019           (Subtarget.hasBWI() || MovmskVT.getVectorElementType() != MVT::i8)) {
44020         EVT IntVT = EVT(MovmskVT).changeVectorElementTypeToInteger();
44021         MovmskIn = DAG.getBitcast(IntVT, MovmskIn);
44022         SDLoc dl(N);
44023         MVT CmpVT = MVT::getVectorVT(MVT::i1, MovMskElts);
44024         SDValue Cmp = DAG.getSetCC(dl, CmpVT, MovmskIn,
44025                                    DAG.getConstant(0, dl, IntVT), ISD::SETLT);
44026         if (EVT(CmpVT) == VT)
44027           return Cmp;
44028 
44029         // Pad with zeroes up to original VT to replace the zeroes that were
44030         // being used from the MOVMSK.
44031         unsigned NumConcats = NumElts / MovMskElts;
44032         SmallVector<SDValue, 4> Ops(NumConcats, DAG.getConstant(0, dl, CmpVT));
44033         Ops[0] = Cmp;
44034         return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Ops);
44035       }
44036     }
44037   }
44038 
44039   // Try to remove bitcasts from input and output of mask arithmetic to
44040   // remove GPR<->K-register crossings.
44041   if (SDValue V = combineCastedMaskArithmetic(N, DAG, DCI, Subtarget))
44042     return V;
44043 
44044   // Convert a bitcasted integer logic operation that has one bitcasted
44045   // floating-point operand into a floating-point logic operation. This may
44046   // create a load of a constant, but that is cheaper than materializing the
44047   // constant in an integer register and transferring it to an SSE register or
44048   // transferring the SSE operand to integer register and back.
44049   unsigned FPOpcode;
44050   switch (N0.getOpcode()) {
44051     case ISD::AND: FPOpcode = X86ISD::FAND; break;
44052     case ISD::OR:  FPOpcode = X86ISD::FOR;  break;
44053     case ISD::XOR: FPOpcode = X86ISD::FXOR; break;
44054     default: return SDValue();
44055   }
44056 
44057   // Check if we have a bitcast from another integer type as well.
44058   if (!((Subtarget.hasSSE1() && VT == MVT::f32) ||
44059         (Subtarget.hasSSE2() && VT == MVT::f64) ||
44060         (Subtarget.hasFP16() && VT == MVT::f16) ||
44061         (Subtarget.hasSSE2() && VT.isInteger() && VT.isVector() &&
44062          TLI.isTypeLegal(VT))))
44063     return SDValue();
44064 
44065   SDValue LogicOp0 = N0.getOperand(0);
44066   SDValue LogicOp1 = N0.getOperand(1);
44067   SDLoc DL0(N0);
44068 
44069   // bitcast(logic(bitcast(X), Y)) --> logic'(X, bitcast(Y))
44070   if (N0.hasOneUse() && LogicOp0.getOpcode() == ISD::BITCAST &&
44071       LogicOp0.hasOneUse() && LogicOp0.getOperand(0).hasOneUse() &&
44072       LogicOp0.getOperand(0).getValueType() == VT &&
44073       !isa<ConstantSDNode>(LogicOp0.getOperand(0))) {
44074     SDValue CastedOp1 = DAG.getBitcast(VT, LogicOp1);
44075     unsigned Opcode = VT.isFloatingPoint() ? FPOpcode : N0.getOpcode();
44076     return DAG.getNode(Opcode, DL0, VT, LogicOp0.getOperand(0), CastedOp1);
44077   }
44078   // bitcast(logic(X, bitcast(Y))) --> logic'(bitcast(X), Y)
44079   if (N0.hasOneUse() && LogicOp1.getOpcode() == ISD::BITCAST &&
44080       LogicOp1.hasOneUse() && LogicOp1.getOperand(0).hasOneUse() &&
44081       LogicOp1.getOperand(0).getValueType() == VT &&
44082       !isa<ConstantSDNode>(LogicOp1.getOperand(0))) {
44083     SDValue CastedOp0 = DAG.getBitcast(VT, LogicOp0);
44084     unsigned Opcode = VT.isFloatingPoint() ? FPOpcode : N0.getOpcode();
44085     return DAG.getNode(Opcode, DL0, VT, LogicOp1.getOperand(0), CastedOp0);
44086   }
44087 
44088   return SDValue();
44089 }
44090 
44091 // (mul (zext a), (sext, b))
detectExtMul(SelectionDAG & DAG,const SDValue & Mul,SDValue & Op0,SDValue & Op1)44092 static bool detectExtMul(SelectionDAG &DAG, const SDValue &Mul, SDValue &Op0,
44093                          SDValue &Op1) {
44094   Op0 = Mul.getOperand(0);
44095   Op1 = Mul.getOperand(1);
44096 
44097   // The operand1 should be signed extend
44098   if (Op0.getOpcode() == ISD::SIGN_EXTEND)
44099     std::swap(Op0, Op1);
44100 
44101   auto IsFreeTruncation = [](SDValue &Op) -> bool {
44102     if ((Op.getOpcode() == ISD::ZERO_EXTEND ||
44103          Op.getOpcode() == ISD::SIGN_EXTEND) &&
44104         Op.getOperand(0).getScalarValueSizeInBits() <= 8)
44105       return true;
44106 
44107     auto *BV = dyn_cast<BuildVectorSDNode>(Op);
44108     return (BV && BV->isConstant());
44109   };
44110 
44111   // (dpbusd (zext a), (sext, b)). Since the first operand should be unsigned
44112   // value, we need to check Op0 is zero extended value. Op1 should be signed
44113   // value, so we just check the signed bits.
44114   if ((IsFreeTruncation(Op0) &&
44115        DAG.computeKnownBits(Op0).countMaxActiveBits() <= 8) &&
44116       (IsFreeTruncation(Op1) && DAG.ComputeMaxSignificantBits(Op1) <= 8))
44117     return true;
44118 
44119   return false;
44120 }
44121 
44122 // Given a ABS node, detect the following pattern:
44123 // (ABS (SUB (ZERO_EXTEND a), (ZERO_EXTEND b))).
44124 // This is useful as it is the input into a SAD pattern.
detectZextAbsDiff(const SDValue & Abs,SDValue & Op0,SDValue & Op1)44125 static bool detectZextAbsDiff(const SDValue &Abs, SDValue &Op0, SDValue &Op1) {
44126   SDValue AbsOp1 = Abs->getOperand(0);
44127   if (AbsOp1.getOpcode() != ISD::SUB)
44128     return false;
44129 
44130   Op0 = AbsOp1.getOperand(0);
44131   Op1 = AbsOp1.getOperand(1);
44132 
44133   // Check if the operands of the sub are zero-extended from vectors of i8.
44134   if (Op0.getOpcode() != ISD::ZERO_EXTEND ||
44135       Op0.getOperand(0).getValueType().getVectorElementType() != MVT::i8 ||
44136       Op1.getOpcode() != ISD::ZERO_EXTEND ||
44137       Op1.getOperand(0).getValueType().getVectorElementType() != MVT::i8)
44138     return false;
44139 
44140   return true;
44141 }
44142 
createVPDPBUSD(SelectionDAG & DAG,SDValue LHS,SDValue RHS,unsigned & LogBias,const SDLoc & DL,const X86Subtarget & Subtarget)44143 static SDValue createVPDPBUSD(SelectionDAG &DAG, SDValue LHS, SDValue RHS,
44144                               unsigned &LogBias, const SDLoc &DL,
44145                               const X86Subtarget &Subtarget) {
44146   // Extend or truncate to MVT::i8 first.
44147   MVT Vi8VT =
44148       MVT::getVectorVT(MVT::i8, LHS.getValueType().getVectorElementCount());
44149   LHS = DAG.getZExtOrTrunc(LHS, DL, Vi8VT);
44150   RHS = DAG.getSExtOrTrunc(RHS, DL, Vi8VT);
44151 
44152   // VPDPBUSD(<16 x i32>C, <16 x i8>A, <16 x i8>B). For each dst element
44153   // C[0] = C[0] + A[0]B[0] + A[1]B[1] + A[2]B[2] + A[3]B[3].
44154   // The src A, B element type is i8, but the dst C element type is i32.
44155   // When we calculate the reduce stage, we use src vector type vXi8 for it
44156   // so we need logbias 2 to avoid extra 2 stages.
44157   LogBias = 2;
44158 
44159   unsigned RegSize = std::max(128u, (unsigned)Vi8VT.getSizeInBits());
44160   if (Subtarget.hasVNNI() && !Subtarget.hasVLX())
44161     RegSize = std::max(512u, RegSize);
44162 
44163   // "Zero-extend" the i8 vectors. This is not a per-element zext, rather we
44164   // fill in the missing vector elements with 0.
44165   unsigned NumConcat = RegSize / Vi8VT.getSizeInBits();
44166   SmallVector<SDValue, 16> Ops(NumConcat, DAG.getConstant(0, DL, Vi8VT));
44167   Ops[0] = LHS;
44168   MVT ExtendedVT = MVT::getVectorVT(MVT::i8, RegSize / 8);
44169   SDValue DpOp0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, ExtendedVT, Ops);
44170   Ops[0] = RHS;
44171   SDValue DpOp1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, ExtendedVT, Ops);
44172 
44173   // Actually build the DotProduct, split as 256/512 bits for
44174   // AVXVNNI/AVX512VNNI.
44175   auto DpBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
44176                        ArrayRef<SDValue> Ops) {
44177     MVT VT = MVT::getVectorVT(MVT::i32, Ops[0].getValueSizeInBits() / 32);
44178     return DAG.getNode(X86ISD::VPDPBUSD, DL, VT, Ops);
44179   };
44180   MVT DpVT = MVT::getVectorVT(MVT::i32, RegSize / 32);
44181   SDValue Zero = DAG.getConstant(0, DL, DpVT);
44182 
44183   return SplitOpsAndApply(DAG, Subtarget, DL, DpVT, {Zero, DpOp0, DpOp1},
44184                           DpBuilder, false);
44185 }
44186 
44187 // Given two zexts of <k x i8> to <k x i32>, create a PSADBW of the inputs
44188 // to these zexts.
createPSADBW(SelectionDAG & DAG,const SDValue & Zext0,const SDValue & Zext1,const SDLoc & DL,const X86Subtarget & Subtarget)44189 static SDValue createPSADBW(SelectionDAG &DAG, const SDValue &Zext0,
44190                             const SDValue &Zext1, const SDLoc &DL,
44191                             const X86Subtarget &Subtarget) {
44192   // Find the appropriate width for the PSADBW.
44193   EVT InVT = Zext0.getOperand(0).getValueType();
44194   unsigned RegSize = std::max(128u, (unsigned)InVT.getSizeInBits());
44195 
44196   // "Zero-extend" the i8 vectors. This is not a per-element zext, rather we
44197   // fill in the missing vector elements with 0.
44198   unsigned NumConcat = RegSize / InVT.getSizeInBits();
44199   SmallVector<SDValue, 16> Ops(NumConcat, DAG.getConstant(0, DL, InVT));
44200   Ops[0] = Zext0.getOperand(0);
44201   MVT ExtendedVT = MVT::getVectorVT(MVT::i8, RegSize / 8);
44202   SDValue SadOp0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, ExtendedVT, Ops);
44203   Ops[0] = Zext1.getOperand(0);
44204   SDValue SadOp1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, ExtendedVT, Ops);
44205 
44206   // Actually build the SAD, split as 128/256/512 bits for SSE/AVX2/AVX512BW.
44207   auto PSADBWBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
44208                           ArrayRef<SDValue> Ops) {
44209     MVT VT = MVT::getVectorVT(MVT::i64, Ops[0].getValueSizeInBits() / 64);
44210     return DAG.getNode(X86ISD::PSADBW, DL, VT, Ops);
44211   };
44212   MVT SadVT = MVT::getVectorVT(MVT::i64, RegSize / 64);
44213   return SplitOpsAndApply(DAG, Subtarget, DL, SadVT, { SadOp0, SadOp1 },
44214                           PSADBWBuilder);
44215 }
44216 
44217 // Attempt to replace an min/max v8i16/v16i8 horizontal reduction with
44218 // PHMINPOSUW.
combineMinMaxReduction(SDNode * Extract,SelectionDAG & DAG,const X86Subtarget & Subtarget)44219 static SDValue combineMinMaxReduction(SDNode *Extract, SelectionDAG &DAG,
44220                                       const X86Subtarget &Subtarget) {
44221   // Bail without SSE41.
44222   if (!Subtarget.hasSSE41())
44223     return SDValue();
44224 
44225   EVT ExtractVT = Extract->getValueType(0);
44226   if (ExtractVT != MVT::i16 && ExtractVT != MVT::i8)
44227     return SDValue();
44228 
44229   // Check for SMAX/SMIN/UMAX/UMIN horizontal reduction patterns.
44230   ISD::NodeType BinOp;
44231   SDValue Src = DAG.matchBinOpReduction(
44232       Extract, BinOp, {ISD::SMAX, ISD::SMIN, ISD::UMAX, ISD::UMIN}, true);
44233   if (!Src)
44234     return SDValue();
44235 
44236   EVT SrcVT = Src.getValueType();
44237   EVT SrcSVT = SrcVT.getScalarType();
44238   if (SrcSVT != ExtractVT || (SrcVT.getSizeInBits() % 128) != 0)
44239     return SDValue();
44240 
44241   SDLoc DL(Extract);
44242   SDValue MinPos = Src;
44243 
44244   // First, reduce the source down to 128-bit, applying BinOp to lo/hi.
44245   while (SrcVT.getSizeInBits() > 128) {
44246     SDValue Lo, Hi;
44247     std::tie(Lo, Hi) = splitVector(MinPos, DAG, DL);
44248     SrcVT = Lo.getValueType();
44249     MinPos = DAG.getNode(BinOp, DL, SrcVT, Lo, Hi);
44250   }
44251   assert(((SrcVT == MVT::v8i16 && ExtractVT == MVT::i16) ||
44252           (SrcVT == MVT::v16i8 && ExtractVT == MVT::i8)) &&
44253          "Unexpected value type");
44254 
44255   // PHMINPOSUW applies to UMIN(v8i16), for SMIN/SMAX/UMAX we must apply a mask
44256   // to flip the value accordingly.
44257   SDValue Mask;
44258   unsigned MaskEltsBits = ExtractVT.getSizeInBits();
44259   if (BinOp == ISD::SMAX)
44260     Mask = DAG.getConstant(APInt::getSignedMaxValue(MaskEltsBits), DL, SrcVT);
44261   else if (BinOp == ISD::SMIN)
44262     Mask = DAG.getConstant(APInt::getSignedMinValue(MaskEltsBits), DL, SrcVT);
44263   else if (BinOp == ISD::UMAX)
44264     Mask = DAG.getAllOnesConstant(DL, SrcVT);
44265 
44266   if (Mask)
44267     MinPos = DAG.getNode(ISD::XOR, DL, SrcVT, Mask, MinPos);
44268 
44269   // For v16i8 cases we need to perform UMIN on pairs of byte elements,
44270   // shuffling each upper element down and insert zeros. This means that the
44271   // v16i8 UMIN will leave the upper element as zero, performing zero-extension
44272   // ready for the PHMINPOS.
44273   if (ExtractVT == MVT::i8) {
44274     SDValue Upper = DAG.getVectorShuffle(
44275         SrcVT, DL, MinPos, DAG.getConstant(0, DL, MVT::v16i8),
44276         {1, 16, 3, 16, 5, 16, 7, 16, 9, 16, 11, 16, 13, 16, 15, 16});
44277     MinPos = DAG.getNode(ISD::UMIN, DL, SrcVT, MinPos, Upper);
44278   }
44279 
44280   // Perform the PHMINPOS on a v8i16 vector,
44281   MinPos = DAG.getBitcast(MVT::v8i16, MinPos);
44282   MinPos = DAG.getNode(X86ISD::PHMINPOS, DL, MVT::v8i16, MinPos);
44283   MinPos = DAG.getBitcast(SrcVT, MinPos);
44284 
44285   if (Mask)
44286     MinPos = DAG.getNode(ISD::XOR, DL, SrcVT, Mask, MinPos);
44287 
44288   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ExtractVT, MinPos,
44289                      DAG.getIntPtrConstant(0, DL));
44290 }
44291 
44292 // Attempt to replace an all_of/any_of/parity style horizontal reduction with a MOVMSK.
combinePredicateReduction(SDNode * Extract,SelectionDAG & DAG,const X86Subtarget & Subtarget)44293 static SDValue combinePredicateReduction(SDNode *Extract, SelectionDAG &DAG,
44294                                          const X86Subtarget &Subtarget) {
44295   // Bail without SSE2.
44296   if (!Subtarget.hasSSE2())
44297     return SDValue();
44298 
44299   EVT ExtractVT = Extract->getValueType(0);
44300   unsigned BitWidth = ExtractVT.getSizeInBits();
44301   if (ExtractVT != MVT::i64 && ExtractVT != MVT::i32 && ExtractVT != MVT::i16 &&
44302       ExtractVT != MVT::i8 && ExtractVT != MVT::i1)
44303     return SDValue();
44304 
44305   // Check for OR(any_of)/AND(all_of)/XOR(parity) horizontal reduction patterns.
44306   ISD::NodeType BinOp;
44307   SDValue Match = DAG.matchBinOpReduction(Extract, BinOp, {ISD::OR, ISD::AND});
44308   if (!Match && ExtractVT == MVT::i1)
44309     Match = DAG.matchBinOpReduction(Extract, BinOp, {ISD::XOR});
44310   if (!Match)
44311     return SDValue();
44312 
44313   // EXTRACT_VECTOR_ELT can require implicit extension of the vector element
44314   // which we can't support here for now.
44315   if (Match.getScalarValueSizeInBits() != BitWidth)
44316     return SDValue();
44317 
44318   SDValue Movmsk;
44319   SDLoc DL(Extract);
44320   EVT MatchVT = Match.getValueType();
44321   unsigned NumElts = MatchVT.getVectorNumElements();
44322   unsigned MaxElts = Subtarget.hasInt256() ? 32 : 16;
44323   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
44324 
44325   if (ExtractVT == MVT::i1) {
44326     // Special case for (pre-legalization) vXi1 reductions.
44327     if (NumElts > 64 || !isPowerOf2_32(NumElts))
44328       return SDValue();
44329     if (TLI.isTypeLegal(MatchVT)) {
44330       // If this is a legal AVX512 predicate type then we can just bitcast.
44331       EVT MovmskVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
44332       Movmsk = DAG.getBitcast(MovmskVT, Match);
44333     } else {
44334       // For all_of(setcc(x,y,eq)) - use PMOVMSKB(PCMPEQB()).
44335       if (BinOp == ISD::AND && Match.getOpcode() == ISD::SETCC &&
44336           cast<CondCodeSDNode>(Match.getOperand(2))->get() ==
44337               ISD::CondCode::SETEQ) {
44338         EVT VecSVT = Match.getOperand(0).getValueType().getScalarType();
44339         if (VecSVT != MVT::i8 && (VecSVT.getSizeInBits() % 8) == 0) {
44340           NumElts *= VecSVT.getSizeInBits() / 8;
44341           EVT CmpVT = EVT::getVectorVT(*DAG.getContext(), MVT::i8, NumElts);
44342           MatchVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1, NumElts);
44343           Match = DAG.getSetCC(
44344               DL, MatchVT, DAG.getBitcast(CmpVT, Match.getOperand(0)),
44345               DAG.getBitcast(CmpVT, Match.getOperand(1)), ISD::CondCode::SETEQ);
44346         }
44347       }
44348 
44349       // Use combineBitcastvxi1 to create the MOVMSK.
44350       while (NumElts > MaxElts) {
44351         SDValue Lo, Hi;
44352         std::tie(Lo, Hi) = DAG.SplitVector(Match, DL);
44353         Match = DAG.getNode(BinOp, DL, Lo.getValueType(), Lo, Hi);
44354         NumElts /= 2;
44355       }
44356       EVT MovmskVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
44357       Movmsk = combineBitcastvxi1(DAG, MovmskVT, Match, DL, Subtarget);
44358     }
44359     if (!Movmsk)
44360       return SDValue();
44361     Movmsk = DAG.getZExtOrTrunc(Movmsk, DL, NumElts > 32 ? MVT::i64 : MVT::i32);
44362   } else {
44363     // FIXME: Better handling of k-registers or 512-bit vectors?
44364     unsigned MatchSizeInBits = Match.getValueSizeInBits();
44365     if (!(MatchSizeInBits == 128 ||
44366           (MatchSizeInBits == 256 && Subtarget.hasAVX())))
44367       return SDValue();
44368 
44369     // Make sure this isn't a vector of 1 element. The perf win from using
44370     // MOVMSK diminishes with less elements in the reduction, but it is
44371     // generally better to get the comparison over to the GPRs as soon as
44372     // possible to reduce the number of vector ops.
44373     if (Match.getValueType().getVectorNumElements() < 2)
44374       return SDValue();
44375 
44376     // Check that we are extracting a reduction of all sign bits.
44377     if (DAG.ComputeNumSignBits(Match) != BitWidth)
44378       return SDValue();
44379 
44380     if (MatchSizeInBits == 256 && BitWidth < 32 && !Subtarget.hasInt256()) {
44381       SDValue Lo, Hi;
44382       std::tie(Lo, Hi) = DAG.SplitVector(Match, DL);
44383       Match = DAG.getNode(BinOp, DL, Lo.getValueType(), Lo, Hi);
44384       MatchSizeInBits = Match.getValueSizeInBits();
44385     }
44386 
44387     // For 32/64 bit comparisons use MOVMSKPS/MOVMSKPD, else PMOVMSKB.
44388     MVT MaskSrcVT;
44389     if (64 == BitWidth || 32 == BitWidth)
44390       MaskSrcVT = MVT::getVectorVT(MVT::getFloatingPointVT(BitWidth),
44391                                    MatchSizeInBits / BitWidth);
44392     else
44393       MaskSrcVT = MVT::getVectorVT(MVT::i8, MatchSizeInBits / 8);
44394 
44395     SDValue BitcastLogicOp = DAG.getBitcast(MaskSrcVT, Match);
44396     Movmsk = getPMOVMSKB(DL, BitcastLogicOp, DAG, Subtarget);
44397     NumElts = MaskSrcVT.getVectorNumElements();
44398   }
44399   assert((NumElts <= 32 || NumElts == 64) &&
44400          "Not expecting more than 64 elements");
44401 
44402   MVT CmpVT = NumElts == 64 ? MVT::i64 : MVT::i32;
44403   if (BinOp == ISD::XOR) {
44404     // parity -> (PARITY(MOVMSK X))
44405     SDValue Result = DAG.getNode(ISD::PARITY, DL, CmpVT, Movmsk);
44406     return DAG.getZExtOrTrunc(Result, DL, ExtractVT);
44407   }
44408 
44409   SDValue CmpC;
44410   ISD::CondCode CondCode;
44411   if (BinOp == ISD::OR) {
44412     // any_of -> MOVMSK != 0
44413     CmpC = DAG.getConstant(0, DL, CmpVT);
44414     CondCode = ISD::CondCode::SETNE;
44415   } else {
44416     // all_of -> MOVMSK == ((1 << NumElts) - 1)
44417     CmpC = DAG.getConstant(APInt::getLowBitsSet(CmpVT.getSizeInBits(), NumElts),
44418                            DL, CmpVT);
44419     CondCode = ISD::CondCode::SETEQ;
44420   }
44421 
44422   // The setcc produces an i8 of 0/1, so extend that to the result width and
44423   // negate to get the final 0/-1 mask value.
44424   EVT SetccVT =
44425       TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), CmpVT);
44426   SDValue Setcc = DAG.getSetCC(DL, SetccVT, Movmsk, CmpC, CondCode);
44427   SDValue Zext = DAG.getZExtOrTrunc(Setcc, DL, ExtractVT);
44428   SDValue Zero = DAG.getConstant(0, DL, ExtractVT);
44429   return DAG.getNode(ISD::SUB, DL, ExtractVT, Zero, Zext);
44430 }
44431 
combineVPDPBUSDPattern(SDNode * Extract,SelectionDAG & DAG,const X86Subtarget & Subtarget)44432 static SDValue combineVPDPBUSDPattern(SDNode *Extract, SelectionDAG &DAG,
44433                                       const X86Subtarget &Subtarget) {
44434   if (!Subtarget.hasVNNI() && !Subtarget.hasAVXVNNI())
44435     return SDValue();
44436 
44437   EVT ExtractVT = Extract->getValueType(0);
44438   // Verify the type we're extracting is i32, as the output element type of
44439   // vpdpbusd is i32.
44440   if (ExtractVT != MVT::i32)
44441     return SDValue();
44442 
44443   EVT VT = Extract->getOperand(0).getValueType();
44444   if (!isPowerOf2_32(VT.getVectorNumElements()))
44445     return SDValue();
44446 
44447   // Match shuffle + add pyramid.
44448   ISD::NodeType BinOp;
44449   SDValue Root = DAG.matchBinOpReduction(Extract, BinOp, {ISD::ADD});
44450 
44451   // We can't combine to vpdpbusd for zext, because each of the 4 multiplies
44452   // done by vpdpbusd compute a signed 16-bit product that will be sign extended
44453   // before adding into the accumulator.
44454   // TODO:
44455   // We also need to verify that the multiply has at least 2x the number of bits
44456   // of the input. We shouldn't match
44457   // (sign_extend (mul (vXi9 (zext (vXi8 X))), (vXi9 (zext (vXi8 Y)))).
44458   // if (Root && (Root.getOpcode() == ISD::SIGN_EXTEND))
44459   //   Root = Root.getOperand(0);
44460 
44461   // If there was a match, we want Root to be a mul.
44462   if (!Root || Root.getOpcode() != ISD::MUL)
44463     return SDValue();
44464 
44465   // Check whether we have an extend and mul pattern
44466   SDValue LHS, RHS;
44467   if (!detectExtMul(DAG, Root, LHS, RHS))
44468     return SDValue();
44469 
44470   // Create the dot product instruction.
44471   SDLoc DL(Extract);
44472   unsigned StageBias;
44473   SDValue DP = createVPDPBUSD(DAG, LHS, RHS, StageBias, DL, Subtarget);
44474 
44475   // If the original vector was wider than 4 elements, sum over the results
44476   // in the DP vector.
44477   unsigned Stages = Log2_32(VT.getVectorNumElements());
44478   EVT DpVT = DP.getValueType();
44479 
44480   if (Stages > StageBias) {
44481     unsigned DpElems = DpVT.getVectorNumElements();
44482 
44483     for (unsigned i = Stages - StageBias; i > 0; --i) {
44484       SmallVector<int, 16> Mask(DpElems, -1);
44485       for (unsigned j = 0, MaskEnd = 1 << (i - 1); j < MaskEnd; ++j)
44486         Mask[j] = MaskEnd + j;
44487 
44488       SDValue Shuffle =
44489           DAG.getVectorShuffle(DpVT, DL, DP, DAG.getUNDEF(DpVT), Mask);
44490       DP = DAG.getNode(ISD::ADD, DL, DpVT, DP, Shuffle);
44491     }
44492   }
44493 
44494   // Return the lowest ExtractSizeInBits bits.
44495   EVT ResVT =
44496       EVT::getVectorVT(*DAG.getContext(), ExtractVT,
44497                        DpVT.getSizeInBits() / ExtractVT.getSizeInBits());
44498   DP = DAG.getBitcast(ResVT, DP);
44499   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ExtractVT, DP,
44500                      Extract->getOperand(1));
44501 }
44502 
combineBasicSADPattern(SDNode * Extract,SelectionDAG & DAG,const X86Subtarget & Subtarget)44503 static SDValue combineBasicSADPattern(SDNode *Extract, SelectionDAG &DAG,
44504                                       const X86Subtarget &Subtarget) {
44505   // PSADBW is only supported on SSE2 and up.
44506   if (!Subtarget.hasSSE2())
44507     return SDValue();
44508 
44509   EVT ExtractVT = Extract->getValueType(0);
44510   // Verify the type we're extracting is either i32 or i64.
44511   // FIXME: Could support other types, but this is what we have coverage for.
44512   if (ExtractVT != MVT::i32 && ExtractVT != MVT::i64)
44513     return SDValue();
44514 
44515   EVT VT = Extract->getOperand(0).getValueType();
44516   if (!isPowerOf2_32(VT.getVectorNumElements()))
44517     return SDValue();
44518 
44519   // Match shuffle + add pyramid.
44520   ISD::NodeType BinOp;
44521   SDValue Root = DAG.matchBinOpReduction(Extract, BinOp, {ISD::ADD});
44522 
44523   // The operand is expected to be zero extended from i8
44524   // (verified in detectZextAbsDiff).
44525   // In order to convert to i64 and above, additional any/zero/sign
44526   // extend is expected.
44527   // The zero extend from 32 bit has no mathematical effect on the result.
44528   // Also the sign extend is basically zero extend
44529   // (extends the sign bit which is zero).
44530   // So it is correct to skip the sign/zero extend instruction.
44531   if (Root && (Root.getOpcode() == ISD::SIGN_EXTEND ||
44532                Root.getOpcode() == ISD::ZERO_EXTEND ||
44533                Root.getOpcode() == ISD::ANY_EXTEND))
44534     Root = Root.getOperand(0);
44535 
44536   // If there was a match, we want Root to be a select that is the root of an
44537   // abs-diff pattern.
44538   if (!Root || Root.getOpcode() != ISD::ABS)
44539     return SDValue();
44540 
44541   // Check whether we have an abs-diff pattern feeding into the select.
44542   SDValue Zext0, Zext1;
44543   if (!detectZextAbsDiff(Root, Zext0, Zext1))
44544     return SDValue();
44545 
44546   // Create the SAD instruction.
44547   SDLoc DL(Extract);
44548   SDValue SAD = createPSADBW(DAG, Zext0, Zext1, DL, Subtarget);
44549 
44550   // If the original vector was wider than 8 elements, sum over the results
44551   // in the SAD vector.
44552   unsigned Stages = Log2_32(VT.getVectorNumElements());
44553   EVT SadVT = SAD.getValueType();
44554   if (Stages > 3) {
44555     unsigned SadElems = SadVT.getVectorNumElements();
44556 
44557     for(unsigned i = Stages - 3; i > 0; --i) {
44558       SmallVector<int, 16> Mask(SadElems, -1);
44559       for(unsigned j = 0, MaskEnd = 1 << (i - 1); j < MaskEnd; ++j)
44560         Mask[j] = MaskEnd + j;
44561 
44562       SDValue Shuffle =
44563           DAG.getVectorShuffle(SadVT, DL, SAD, DAG.getUNDEF(SadVT), Mask);
44564       SAD = DAG.getNode(ISD::ADD, DL, SadVT, SAD, Shuffle);
44565     }
44566   }
44567 
44568   unsigned ExtractSizeInBits = ExtractVT.getSizeInBits();
44569   // Return the lowest ExtractSizeInBits bits.
44570   EVT ResVT = EVT::getVectorVT(*DAG.getContext(), ExtractVT,
44571                                SadVT.getSizeInBits() / ExtractSizeInBits);
44572   SAD = DAG.getBitcast(ResVT, SAD);
44573   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ExtractVT, SAD,
44574                      Extract->getOperand(1));
44575 }
44576 
44577 // Attempt to peek through a target shuffle and extract the scalar from the
44578 // source.
combineExtractWithShuffle(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)44579 static SDValue combineExtractWithShuffle(SDNode *N, SelectionDAG &DAG,
44580                                          TargetLowering::DAGCombinerInfo &DCI,
44581                                          const X86Subtarget &Subtarget) {
44582   if (DCI.isBeforeLegalizeOps())
44583     return SDValue();
44584 
44585   SDLoc dl(N);
44586   SDValue Src = N->getOperand(0);
44587   SDValue Idx = N->getOperand(1);
44588 
44589   EVT VT = N->getValueType(0);
44590   EVT SrcVT = Src.getValueType();
44591   EVT SrcSVT = SrcVT.getVectorElementType();
44592   unsigned SrcEltBits = SrcSVT.getSizeInBits();
44593   unsigned NumSrcElts = SrcVT.getVectorNumElements();
44594 
44595   // Don't attempt this for boolean mask vectors or unknown extraction indices.
44596   if (SrcSVT == MVT::i1 || !isa<ConstantSDNode>(Idx))
44597     return SDValue();
44598 
44599   const APInt &IdxC = N->getConstantOperandAPInt(1);
44600   if (IdxC.uge(NumSrcElts))
44601     return SDValue();
44602 
44603   SDValue SrcBC = peekThroughBitcasts(Src);
44604 
44605   // Handle extract(bitcast(broadcast(scalar_value))).
44606   if (X86ISD::VBROADCAST == SrcBC.getOpcode()) {
44607     SDValue SrcOp = SrcBC.getOperand(0);
44608     EVT SrcOpVT = SrcOp.getValueType();
44609     if (SrcOpVT.isScalarInteger() && VT.isInteger() &&
44610         (SrcOpVT.getSizeInBits() % SrcEltBits) == 0) {
44611       unsigned Scale = SrcOpVT.getSizeInBits() / SrcEltBits;
44612       unsigned Offset = IdxC.urem(Scale) * SrcEltBits;
44613       // TODO support non-zero offsets.
44614       if (Offset == 0) {
44615         SrcOp = DAG.getZExtOrTrunc(SrcOp, dl, SrcVT.getScalarType());
44616         SrcOp = DAG.getZExtOrTrunc(SrcOp, dl, VT);
44617         return SrcOp;
44618       }
44619     }
44620   }
44621 
44622   // If we're extracting a single element from a broadcast load and there are
44623   // no other users, just create a single load.
44624   if (SrcBC.getOpcode() == X86ISD::VBROADCAST_LOAD && SrcBC.hasOneUse()) {
44625     auto *MemIntr = cast<MemIntrinsicSDNode>(SrcBC);
44626     unsigned SrcBCWidth = SrcBC.getScalarValueSizeInBits();
44627     if (MemIntr->getMemoryVT().getSizeInBits() == SrcBCWidth &&
44628         VT.getSizeInBits() == SrcBCWidth && SrcEltBits == SrcBCWidth) {
44629       SDValue Load = DAG.getLoad(VT, dl, MemIntr->getChain(),
44630                                  MemIntr->getBasePtr(),
44631                                  MemIntr->getPointerInfo(),
44632                                  MemIntr->getOriginalAlign(),
44633                                  MemIntr->getMemOperand()->getFlags());
44634       DAG.ReplaceAllUsesOfValueWith(SDValue(MemIntr, 1), Load.getValue(1));
44635       return Load;
44636     }
44637   }
44638 
44639   // Handle extract(bitcast(scalar_to_vector(scalar_value))) for integers.
44640   // TODO: Move to DAGCombine?
44641   if (SrcBC.getOpcode() == ISD::SCALAR_TO_VECTOR && VT.isInteger() &&
44642       SrcBC.getValueType().isInteger() &&
44643       (SrcBC.getScalarValueSizeInBits() % SrcEltBits) == 0 &&
44644       SrcBC.getScalarValueSizeInBits() ==
44645           SrcBC.getOperand(0).getValueSizeInBits()) {
44646     unsigned Scale = SrcBC.getScalarValueSizeInBits() / SrcEltBits;
44647     if (IdxC.ult(Scale)) {
44648       unsigned Offset = IdxC.getZExtValue() * SrcVT.getScalarSizeInBits();
44649       SDValue Scl = SrcBC.getOperand(0);
44650       EVT SclVT = Scl.getValueType();
44651       if (Offset) {
44652         Scl = DAG.getNode(ISD::SRL, dl, SclVT, Scl,
44653                           DAG.getShiftAmountConstant(Offset, SclVT, dl));
44654       }
44655       Scl = DAG.getZExtOrTrunc(Scl, dl, SrcVT.getScalarType());
44656       Scl = DAG.getZExtOrTrunc(Scl, dl, VT);
44657       return Scl;
44658     }
44659   }
44660 
44661   // Handle extract(truncate(x)) for 0'th index.
44662   // TODO: Treat this as a faux shuffle?
44663   // TODO: When can we use this for general indices?
44664   if (ISD::TRUNCATE == Src.getOpcode() && IdxC == 0 &&
44665       (SrcVT.getSizeInBits() % 128) == 0) {
44666     Src = extract128BitVector(Src.getOperand(0), 0, DAG, dl);
44667     MVT ExtractVT = MVT::getVectorVT(SrcSVT.getSimpleVT(), 128 / SrcEltBits);
44668     return DAG.getNode(N->getOpcode(), dl, VT, DAG.getBitcast(ExtractVT, Src),
44669                        Idx);
44670   }
44671 
44672   // We can only legally extract other elements from 128-bit vectors and in
44673   // certain circumstances, depending on SSE-level.
44674   // TODO: Investigate float/double extraction if it will be just stored.
44675   auto GetLegalExtract = [&Subtarget, &DAG, &dl](SDValue Vec, EVT VecVT,
44676                                                  unsigned Idx) {
44677     EVT VecSVT = VecVT.getScalarType();
44678     if ((VecVT.is256BitVector() || VecVT.is512BitVector()) &&
44679         (VecSVT == MVT::i8 || VecSVT == MVT::i16 || VecSVT == MVT::i32 ||
44680          VecSVT == MVT::i64)) {
44681       unsigned EltSizeInBits = VecSVT.getSizeInBits();
44682       unsigned NumEltsPerLane = 128 / EltSizeInBits;
44683       unsigned LaneOffset = (Idx & ~(NumEltsPerLane - 1)) * EltSizeInBits;
44684       unsigned LaneIdx = LaneOffset / Vec.getScalarValueSizeInBits();
44685       VecVT = EVT::getVectorVT(*DAG.getContext(), VecSVT, NumEltsPerLane);
44686       Vec = extract128BitVector(Vec, LaneIdx, DAG, dl);
44687       Idx &= (NumEltsPerLane - 1);
44688     }
44689     if ((VecVT == MVT::v4i32 || VecVT == MVT::v2i64) &&
44690         ((Idx == 0 && Subtarget.hasSSE2()) || Subtarget.hasSSE41())) {
44691       return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VecVT.getScalarType(),
44692                          DAG.getBitcast(VecVT, Vec),
44693                          DAG.getIntPtrConstant(Idx, dl));
44694     }
44695     if ((VecVT == MVT::v8i16 && Subtarget.hasSSE2()) ||
44696         (VecVT == MVT::v16i8 && Subtarget.hasSSE41())) {
44697       unsigned OpCode = (VecVT == MVT::v8i16 ? X86ISD::PEXTRW : X86ISD::PEXTRB);
44698       return DAG.getNode(OpCode, dl, MVT::i32, DAG.getBitcast(VecVT, Vec),
44699                          DAG.getTargetConstant(Idx, dl, MVT::i8));
44700     }
44701     return SDValue();
44702   };
44703 
44704   // Resolve the target shuffle inputs and mask.
44705   SmallVector<int, 16> Mask;
44706   SmallVector<SDValue, 2> Ops;
44707   if (!getTargetShuffleInputs(SrcBC, Ops, Mask, DAG))
44708     return SDValue();
44709 
44710   // Shuffle inputs must be the same size as the result.
44711   if (llvm::any_of(Ops, [SrcVT](SDValue Op) {
44712         return SrcVT.getSizeInBits() != Op.getValueSizeInBits();
44713       }))
44714     return SDValue();
44715 
44716   // Attempt to narrow/widen the shuffle mask to the correct size.
44717   if (Mask.size() != NumSrcElts) {
44718     if ((NumSrcElts % Mask.size()) == 0) {
44719       SmallVector<int, 16> ScaledMask;
44720       int Scale = NumSrcElts / Mask.size();
44721       narrowShuffleMaskElts(Scale, Mask, ScaledMask);
44722       Mask = std::move(ScaledMask);
44723     } else if ((Mask.size() % NumSrcElts) == 0) {
44724       // Simplify Mask based on demanded element.
44725       int ExtractIdx = (int)IdxC.getZExtValue();
44726       int Scale = Mask.size() / NumSrcElts;
44727       int Lo = Scale * ExtractIdx;
44728       int Hi = Scale * (ExtractIdx + 1);
44729       for (int i = 0, e = (int)Mask.size(); i != e; ++i)
44730         if (i < Lo || Hi <= i)
44731           Mask[i] = SM_SentinelUndef;
44732 
44733       SmallVector<int, 16> WidenedMask;
44734       while (Mask.size() > NumSrcElts &&
44735              canWidenShuffleElements(Mask, WidenedMask))
44736         Mask = std::move(WidenedMask);
44737     }
44738   }
44739 
44740   // If narrowing/widening failed, see if we can extract+zero-extend.
44741   int ExtractIdx;
44742   EVT ExtractVT;
44743   if (Mask.size() == NumSrcElts) {
44744     ExtractIdx = Mask[IdxC.getZExtValue()];
44745     ExtractVT = SrcVT;
44746   } else {
44747     unsigned Scale = Mask.size() / NumSrcElts;
44748     if ((Mask.size() % NumSrcElts) != 0 || SrcVT.isFloatingPoint())
44749       return SDValue();
44750     unsigned ScaledIdx = Scale * IdxC.getZExtValue();
44751     if (!isUndefOrZeroInRange(Mask, ScaledIdx + 1, Scale - 1))
44752       return SDValue();
44753     ExtractIdx = Mask[ScaledIdx];
44754     EVT ExtractSVT = EVT::getIntegerVT(*DAG.getContext(), SrcEltBits / Scale);
44755     ExtractVT = EVT::getVectorVT(*DAG.getContext(), ExtractSVT, Mask.size());
44756     assert(SrcVT.getSizeInBits() == ExtractVT.getSizeInBits() &&
44757            "Failed to widen vector type");
44758   }
44759 
44760   // If the shuffle source element is undef/zero then we can just accept it.
44761   if (ExtractIdx == SM_SentinelUndef)
44762     return DAG.getUNDEF(VT);
44763 
44764   if (ExtractIdx == SM_SentinelZero)
44765     return VT.isFloatingPoint() ? DAG.getConstantFP(0.0, dl, VT)
44766                                 : DAG.getConstant(0, dl, VT);
44767 
44768   SDValue SrcOp = Ops[ExtractIdx / Mask.size()];
44769   ExtractIdx = ExtractIdx % Mask.size();
44770   if (SDValue V = GetLegalExtract(SrcOp, ExtractVT, ExtractIdx))
44771     return DAG.getZExtOrTrunc(V, dl, VT);
44772 
44773   return SDValue();
44774 }
44775 
44776 /// Extracting a scalar FP value from vector element 0 is free, so extract each
44777 /// operand first, then perform the math as a scalar op.
scalarizeExtEltFP(SDNode * ExtElt,SelectionDAG & DAG,const X86Subtarget & Subtarget)44778 static SDValue scalarizeExtEltFP(SDNode *ExtElt, SelectionDAG &DAG,
44779                                  const X86Subtarget &Subtarget) {
44780   assert(ExtElt->getOpcode() == ISD::EXTRACT_VECTOR_ELT && "Expected extract");
44781   SDValue Vec = ExtElt->getOperand(0);
44782   SDValue Index = ExtElt->getOperand(1);
44783   EVT VT = ExtElt->getValueType(0);
44784   EVT VecVT = Vec.getValueType();
44785 
44786   // TODO: If this is a unary/expensive/expand op, allow extraction from a
44787   // non-zero element because the shuffle+scalar op will be cheaper?
44788   if (!Vec.hasOneUse() || !isNullConstant(Index) || VecVT.getScalarType() != VT)
44789     return SDValue();
44790 
44791   // Vector FP compares don't fit the pattern of FP math ops (propagate, not
44792   // extract, the condition code), so deal with those as a special-case.
44793   if (Vec.getOpcode() == ISD::SETCC && VT == MVT::i1) {
44794     EVT OpVT = Vec.getOperand(0).getValueType().getScalarType();
44795     if (OpVT != MVT::f32 && OpVT != MVT::f64)
44796       return SDValue();
44797 
44798     // extract (setcc X, Y, CC), 0 --> setcc (extract X, 0), (extract Y, 0), CC
44799     SDLoc DL(ExtElt);
44800     SDValue Ext0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, OpVT,
44801                                Vec.getOperand(0), Index);
44802     SDValue Ext1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, OpVT,
44803                                Vec.getOperand(1), Index);
44804     return DAG.getNode(Vec.getOpcode(), DL, VT, Ext0, Ext1, Vec.getOperand(2));
44805   }
44806 
44807   if (!(VT == MVT::f16 && Subtarget.hasFP16()) && VT != MVT::f32 &&
44808       VT != MVT::f64)
44809     return SDValue();
44810 
44811   // Vector FP selects don't fit the pattern of FP math ops (because the
44812   // condition has a different type and we have to change the opcode), so deal
44813   // with those here.
44814   // FIXME: This is restricted to pre type legalization by ensuring the setcc
44815   // has i1 elements. If we loosen this we need to convert vector bool to a
44816   // scalar bool.
44817   if (Vec.getOpcode() == ISD::VSELECT &&
44818       Vec.getOperand(0).getOpcode() == ISD::SETCC &&
44819       Vec.getOperand(0).getValueType().getScalarType() == MVT::i1 &&
44820       Vec.getOperand(0).getOperand(0).getValueType() == VecVT) {
44821     // ext (sel Cond, X, Y), 0 --> sel (ext Cond, 0), (ext X, 0), (ext Y, 0)
44822     SDLoc DL(ExtElt);
44823     SDValue Ext0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
44824                                Vec.getOperand(0).getValueType().getScalarType(),
44825                                Vec.getOperand(0), Index);
44826     SDValue Ext1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
44827                                Vec.getOperand(1), Index);
44828     SDValue Ext2 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
44829                                Vec.getOperand(2), Index);
44830     return DAG.getNode(ISD::SELECT, DL, VT, Ext0, Ext1, Ext2);
44831   }
44832 
44833   // TODO: This switch could include FNEG and the x86-specific FP logic ops
44834   // (FAND, FANDN, FOR, FXOR). But that may require enhancements to avoid
44835   // missed load folding and fma+fneg combining.
44836   switch (Vec.getOpcode()) {
44837   case ISD::FMA: // Begin 3 operands
44838   case ISD::FMAD:
44839   case ISD::FADD: // Begin 2 operands
44840   case ISD::FSUB:
44841   case ISD::FMUL:
44842   case ISD::FDIV:
44843   case ISD::FREM:
44844   case ISD::FCOPYSIGN:
44845   case ISD::FMINNUM:
44846   case ISD::FMAXNUM:
44847   case ISD::FMINNUM_IEEE:
44848   case ISD::FMAXNUM_IEEE:
44849   case ISD::FMAXIMUM:
44850   case ISD::FMINIMUM:
44851   case X86ISD::FMAX:
44852   case X86ISD::FMIN:
44853   case ISD::FABS: // Begin 1 operand
44854   case ISD::FSQRT:
44855   case ISD::FRINT:
44856   case ISD::FCEIL:
44857   case ISD::FTRUNC:
44858   case ISD::FNEARBYINT:
44859   case ISD::FROUND:
44860   case ISD::FFLOOR:
44861   case X86ISD::FRCP:
44862   case X86ISD::FRSQRT: {
44863     // extract (fp X, Y, ...), 0 --> fp (extract X, 0), (extract Y, 0), ...
44864     SDLoc DL(ExtElt);
44865     SmallVector<SDValue, 4> ExtOps;
44866     for (SDValue Op : Vec->ops())
44867       ExtOps.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Op, Index));
44868     return DAG.getNode(Vec.getOpcode(), DL, VT, ExtOps);
44869   }
44870   default:
44871     return SDValue();
44872   }
44873   llvm_unreachable("All opcodes should return within switch");
44874 }
44875 
44876 /// Try to convert a vector reduction sequence composed of binops and shuffles
44877 /// into horizontal ops.
combineArithReduction(SDNode * ExtElt,SelectionDAG & DAG,const X86Subtarget & Subtarget)44878 static SDValue combineArithReduction(SDNode *ExtElt, SelectionDAG &DAG,
44879                                      const X86Subtarget &Subtarget) {
44880   assert(ExtElt->getOpcode() == ISD::EXTRACT_VECTOR_ELT && "Unexpected caller");
44881 
44882   // We need at least SSE2 to anything here.
44883   if (!Subtarget.hasSSE2())
44884     return SDValue();
44885 
44886   ISD::NodeType Opc;
44887   SDValue Rdx = DAG.matchBinOpReduction(ExtElt, Opc,
44888                                         {ISD::ADD, ISD::MUL, ISD::FADD}, true);
44889   if (!Rdx)
44890     return SDValue();
44891 
44892   SDValue Index = ExtElt->getOperand(1);
44893   assert(isNullConstant(Index) &&
44894          "Reduction doesn't end in an extract from index 0");
44895 
44896   EVT VT = ExtElt->getValueType(0);
44897   EVT VecVT = Rdx.getValueType();
44898   if (VecVT.getScalarType() != VT)
44899     return SDValue();
44900 
44901   SDLoc DL(ExtElt);
44902   unsigned NumElts = VecVT.getVectorNumElements();
44903   unsigned EltSizeInBits = VecVT.getScalarSizeInBits();
44904 
44905   // Extend v4i8/v8i8 vector to v16i8, with undef upper 64-bits.
44906   auto WidenToV16I8 = [&](SDValue V, bool ZeroExtend) {
44907     if (V.getValueType() == MVT::v4i8) {
44908       if (ZeroExtend && Subtarget.hasSSE41()) {
44909         V = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, MVT::v4i32,
44910                         DAG.getConstant(0, DL, MVT::v4i32),
44911                         DAG.getBitcast(MVT::i32, V),
44912                         DAG.getIntPtrConstant(0, DL));
44913         return DAG.getBitcast(MVT::v16i8, V);
44914       }
44915       V = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8i8, V,
44916                       ZeroExtend ? DAG.getConstant(0, DL, MVT::v4i8)
44917                                  : DAG.getUNDEF(MVT::v4i8));
44918     }
44919     return DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v16i8, V,
44920                        DAG.getUNDEF(MVT::v8i8));
44921   };
44922 
44923   // vXi8 mul reduction - promote to vXi16 mul reduction.
44924   if (Opc == ISD::MUL) {
44925     if (VT != MVT::i8 || NumElts < 4 || !isPowerOf2_32(NumElts))
44926       return SDValue();
44927     if (VecVT.getSizeInBits() >= 128) {
44928       EVT WideVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16, NumElts / 2);
44929       SDValue Lo = getUnpackl(DAG, DL, VecVT, Rdx, DAG.getUNDEF(VecVT));
44930       SDValue Hi = getUnpackh(DAG, DL, VecVT, Rdx, DAG.getUNDEF(VecVT));
44931       Lo = DAG.getBitcast(WideVT, Lo);
44932       Hi = DAG.getBitcast(WideVT, Hi);
44933       Rdx = DAG.getNode(Opc, DL, WideVT, Lo, Hi);
44934       while (Rdx.getValueSizeInBits() > 128) {
44935         std::tie(Lo, Hi) = splitVector(Rdx, DAG, DL);
44936         Rdx = DAG.getNode(Opc, DL, Lo.getValueType(), Lo, Hi);
44937       }
44938     } else {
44939       Rdx = WidenToV16I8(Rdx, false);
44940       Rdx = getUnpackl(DAG, DL, MVT::v16i8, Rdx, DAG.getUNDEF(MVT::v16i8));
44941       Rdx = DAG.getBitcast(MVT::v8i16, Rdx);
44942     }
44943     if (NumElts >= 8)
44944       Rdx = DAG.getNode(Opc, DL, MVT::v8i16, Rdx,
44945                         DAG.getVectorShuffle(MVT::v8i16, DL, Rdx, Rdx,
44946                                              {4, 5, 6, 7, -1, -1, -1, -1}));
44947     Rdx = DAG.getNode(Opc, DL, MVT::v8i16, Rdx,
44948                       DAG.getVectorShuffle(MVT::v8i16, DL, Rdx, Rdx,
44949                                            {2, 3, -1, -1, -1, -1, -1, -1}));
44950     Rdx = DAG.getNode(Opc, DL, MVT::v8i16, Rdx,
44951                       DAG.getVectorShuffle(MVT::v8i16, DL, Rdx, Rdx,
44952                                            {1, -1, -1, -1, -1, -1, -1, -1}));
44953     Rdx = DAG.getBitcast(MVT::v16i8, Rdx);
44954     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Rdx, Index);
44955   }
44956 
44957   // vXi8 add reduction - sub 128-bit vector.
44958   if (VecVT == MVT::v4i8 || VecVT == MVT::v8i8) {
44959     Rdx = WidenToV16I8(Rdx, true);
44960     Rdx = DAG.getNode(X86ISD::PSADBW, DL, MVT::v2i64, Rdx,
44961                       DAG.getConstant(0, DL, MVT::v16i8));
44962     Rdx = DAG.getBitcast(MVT::v16i8, Rdx);
44963     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Rdx, Index);
44964   }
44965 
44966   // Must be a >=128-bit vector with pow2 elements.
44967   if ((VecVT.getSizeInBits() % 128) != 0 || !isPowerOf2_32(NumElts))
44968     return SDValue();
44969 
44970   // vXi8 add reduction - sum lo/hi halves then use PSADBW.
44971   if (VT == MVT::i8) {
44972     while (Rdx.getValueSizeInBits() > 128) {
44973       SDValue Lo, Hi;
44974       std::tie(Lo, Hi) = splitVector(Rdx, DAG, DL);
44975       VecVT = Lo.getValueType();
44976       Rdx = DAG.getNode(ISD::ADD, DL, VecVT, Lo, Hi);
44977     }
44978     assert(VecVT == MVT::v16i8 && "v16i8 reduction expected");
44979 
44980     SDValue Hi = DAG.getVectorShuffle(
44981         MVT::v16i8, DL, Rdx, Rdx,
44982         {8, 9, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1});
44983     Rdx = DAG.getNode(ISD::ADD, DL, MVT::v16i8, Rdx, Hi);
44984     Rdx = DAG.getNode(X86ISD::PSADBW, DL, MVT::v2i64, Rdx,
44985                       getZeroVector(MVT::v16i8, Subtarget, DAG, DL));
44986     Rdx = DAG.getBitcast(MVT::v16i8, Rdx);
44987     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Rdx, Index);
44988   }
44989 
44990   // See if we can use vXi8 PSADBW add reduction for larger zext types.
44991   // If the source vector values are 0-255, then we can use PSADBW to
44992   // sum+zext v8i8 subvectors to vXi64, then perform the reduction.
44993   // TODO: See if its worth avoiding vXi16/i32 truncations?
44994   if (Opc == ISD::ADD && NumElts >= 4 && EltSizeInBits >= 16 &&
44995       DAG.computeKnownBits(Rdx).getMaxValue().ule(255) &&
44996       (EltSizeInBits == 16 || Rdx.getOpcode() == ISD::ZERO_EXTEND ||
44997        Subtarget.hasAVX512())) {
44998     EVT ByteVT = VecVT.changeVectorElementType(MVT::i8);
44999     Rdx = DAG.getNode(ISD::TRUNCATE, DL, ByteVT, Rdx);
45000     if (ByteVT.getSizeInBits() < 128)
45001       Rdx = WidenToV16I8(Rdx, true);
45002 
45003     // Build the PSADBW, split as 128/256/512 bits for SSE/AVX2/AVX512BW.
45004     auto PSADBWBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
45005                             ArrayRef<SDValue> Ops) {
45006       MVT VT = MVT::getVectorVT(MVT::i64, Ops[0].getValueSizeInBits() / 64);
45007       SDValue Zero = DAG.getConstant(0, DL, Ops[0].getValueType());
45008       return DAG.getNode(X86ISD::PSADBW, DL, VT, Ops[0], Zero);
45009     };
45010     MVT SadVT = MVT::getVectorVT(MVT::i64, Rdx.getValueSizeInBits() / 64);
45011     Rdx = SplitOpsAndApply(DAG, Subtarget, DL, SadVT, {Rdx}, PSADBWBuilder);
45012 
45013     // TODO: We could truncate to vXi16/vXi32 before performing the reduction.
45014     while (Rdx.getValueSizeInBits() > 128) {
45015       SDValue Lo, Hi;
45016       std::tie(Lo, Hi) = splitVector(Rdx, DAG, DL);
45017       VecVT = Lo.getValueType();
45018       Rdx = DAG.getNode(ISD::ADD, DL, VecVT, Lo, Hi);
45019     }
45020     assert(Rdx.getValueType() == MVT::v2i64 && "v2i64 reduction expected");
45021 
45022     if (NumElts > 8) {
45023       SDValue RdxHi = DAG.getVectorShuffle(MVT::v2i64, DL, Rdx, Rdx, {1, -1});
45024       Rdx = DAG.getNode(ISD::ADD, DL, MVT::v2i64, Rdx, RdxHi);
45025     }
45026 
45027     VecVT = MVT::getVectorVT(VT.getSimpleVT(), 128 / VT.getSizeInBits());
45028     Rdx = DAG.getBitcast(VecVT, Rdx);
45029     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Rdx, Index);
45030   }
45031 
45032   // Only use (F)HADD opcodes if they aren't microcoded or minimizes codesize.
45033   if (!shouldUseHorizontalOp(true, DAG, Subtarget))
45034     return SDValue();
45035 
45036   unsigned HorizOpcode = Opc == ISD::ADD ? X86ISD::HADD : X86ISD::FHADD;
45037 
45038   // 256-bit horizontal instructions operate on 128-bit chunks rather than
45039   // across the whole vector, so we need an extract + hop preliminary stage.
45040   // This is the only step where the operands of the hop are not the same value.
45041   // TODO: We could extend this to handle 512-bit or even longer vectors.
45042   if (((VecVT == MVT::v16i16 || VecVT == MVT::v8i32) && Subtarget.hasSSSE3()) ||
45043       ((VecVT == MVT::v8f32 || VecVT == MVT::v4f64) && Subtarget.hasSSE3())) {
45044     unsigned NumElts = VecVT.getVectorNumElements();
45045     SDValue Hi = extract128BitVector(Rdx, NumElts / 2, DAG, DL);
45046     SDValue Lo = extract128BitVector(Rdx, 0, DAG, DL);
45047     Rdx = DAG.getNode(HorizOpcode, DL, Lo.getValueType(), Hi, Lo);
45048     VecVT = Rdx.getValueType();
45049   }
45050   if (!((VecVT == MVT::v8i16 || VecVT == MVT::v4i32) && Subtarget.hasSSSE3()) &&
45051       !((VecVT == MVT::v4f32 || VecVT == MVT::v2f64) && Subtarget.hasSSE3()))
45052     return SDValue();
45053 
45054   // extract (add (shuf X), X), 0 --> extract (hadd X, X), 0
45055   unsigned ReductionSteps = Log2_32(VecVT.getVectorNumElements());
45056   for (unsigned i = 0; i != ReductionSteps; ++i)
45057     Rdx = DAG.getNode(HorizOpcode, DL, VecVT, Rdx, Rdx);
45058 
45059   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Rdx, Index);
45060 }
45061 
45062 /// Detect vector gather/scatter index generation and convert it from being a
45063 /// bunch of shuffles and extracts into a somewhat faster sequence.
45064 /// For i686, the best sequence is apparently storing the value and loading
45065 /// scalars back, while for x64 we should use 64-bit extracts and shifts.
combineExtractVectorElt(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)45066 static SDValue combineExtractVectorElt(SDNode *N, SelectionDAG &DAG,
45067                                        TargetLowering::DAGCombinerInfo &DCI,
45068                                        const X86Subtarget &Subtarget) {
45069   if (SDValue NewOp = combineExtractWithShuffle(N, DAG, DCI, Subtarget))
45070     return NewOp;
45071 
45072   SDValue InputVector = N->getOperand(0);
45073   SDValue EltIdx = N->getOperand(1);
45074   auto *CIdx = dyn_cast<ConstantSDNode>(EltIdx);
45075 
45076   EVT SrcVT = InputVector.getValueType();
45077   EVT VT = N->getValueType(0);
45078   SDLoc dl(InputVector);
45079   bool IsPextr = N->getOpcode() != ISD::EXTRACT_VECTOR_ELT;
45080   unsigned NumSrcElts = SrcVT.getVectorNumElements();
45081   unsigned NumEltBits = VT.getScalarSizeInBits();
45082   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
45083 
45084   if (CIdx && CIdx->getAPIntValue().uge(NumSrcElts))
45085     return IsPextr ? DAG.getConstant(0, dl, VT) : DAG.getUNDEF(VT);
45086 
45087   // Integer Constant Folding.
45088   if (CIdx && VT.isInteger()) {
45089     APInt UndefVecElts;
45090     SmallVector<APInt, 16> EltBits;
45091     unsigned VecEltBitWidth = SrcVT.getScalarSizeInBits();
45092     if (getTargetConstantBitsFromNode(InputVector, VecEltBitWidth, UndefVecElts,
45093                                       EltBits, true, false)) {
45094       uint64_t Idx = CIdx->getZExtValue();
45095       if (UndefVecElts[Idx])
45096         return IsPextr ? DAG.getConstant(0, dl, VT) : DAG.getUNDEF(VT);
45097       return DAG.getConstant(EltBits[Idx].zext(NumEltBits), dl, VT);
45098     }
45099 
45100     // Convert extract_element(bitcast(<X x i1>) -> bitcast(extract_subvector()).
45101     // Improves lowering of bool masks on rust which splits them into byte array.
45102     if (InputVector.getOpcode() == ISD::BITCAST && (NumEltBits % 8) == 0) {
45103       SDValue Src = peekThroughBitcasts(InputVector);
45104       if (Src.getValueType().getScalarType() == MVT::i1 &&
45105           TLI.isTypeLegal(Src.getValueType())) {
45106         MVT SubVT = MVT::getVectorVT(MVT::i1, NumEltBits);
45107         SDValue Sub = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, SubVT, Src,
45108             DAG.getIntPtrConstant(CIdx->getZExtValue() * NumEltBits, dl));
45109         return DAG.getBitcast(VT, Sub);
45110       }
45111     }
45112   }
45113 
45114   if (IsPextr) {
45115     if (TLI.SimplifyDemandedBits(SDValue(N, 0), APInt::getAllOnes(NumEltBits),
45116                                  DCI))
45117       return SDValue(N, 0);
45118 
45119     // PEXTR*(PINSR*(v, s, c), c) -> s (with implicit zext handling).
45120     if ((InputVector.getOpcode() == X86ISD::PINSRB ||
45121          InputVector.getOpcode() == X86ISD::PINSRW) &&
45122         InputVector.getOperand(2) == EltIdx) {
45123       assert(SrcVT == InputVector.getOperand(0).getValueType() &&
45124              "Vector type mismatch");
45125       SDValue Scl = InputVector.getOperand(1);
45126       Scl = DAG.getNode(ISD::TRUNCATE, dl, SrcVT.getScalarType(), Scl);
45127       return DAG.getZExtOrTrunc(Scl, dl, VT);
45128     }
45129 
45130     // TODO - Remove this once we can handle the implicit zero-extension of
45131     // X86ISD::PEXTRW/X86ISD::PEXTRB in combinePredicateReduction and
45132     // combineBasicSADPattern.
45133     return SDValue();
45134   }
45135 
45136   // Detect mmx extraction of all bits as a i64. It works better as a bitcast.
45137   if (InputVector.getOpcode() == ISD::BITCAST && InputVector.hasOneUse() &&
45138       VT == MVT::i64 && SrcVT == MVT::v1i64 && isNullConstant(EltIdx)) {
45139     SDValue MMXSrc = InputVector.getOperand(0);
45140 
45141     // The bitcast source is a direct mmx result.
45142     if (MMXSrc.getValueType() == MVT::x86mmx)
45143       return DAG.getBitcast(VT, InputVector);
45144   }
45145 
45146   // Detect mmx to i32 conversion through a v2i32 elt extract.
45147   if (InputVector.getOpcode() == ISD::BITCAST && InputVector.hasOneUse() &&
45148       VT == MVT::i32 && SrcVT == MVT::v2i32 && isNullConstant(EltIdx)) {
45149     SDValue MMXSrc = InputVector.getOperand(0);
45150 
45151     // The bitcast source is a direct mmx result.
45152     if (MMXSrc.getValueType() == MVT::x86mmx)
45153       return DAG.getNode(X86ISD::MMX_MOVD2W, dl, MVT::i32, MMXSrc);
45154   }
45155 
45156   // Check whether this extract is the root of a sum of absolute differences
45157   // pattern. This has to be done here because we really want it to happen
45158   // pre-legalization,
45159   if (SDValue SAD = combineBasicSADPattern(N, DAG, Subtarget))
45160     return SAD;
45161 
45162   if (SDValue VPDPBUSD = combineVPDPBUSDPattern(N, DAG, Subtarget))
45163     return VPDPBUSD;
45164 
45165   // Attempt to replace an all_of/any_of horizontal reduction with a MOVMSK.
45166   if (SDValue Cmp = combinePredicateReduction(N, DAG, Subtarget))
45167     return Cmp;
45168 
45169   // Attempt to replace min/max v8i16/v16i8 reductions with PHMINPOSUW.
45170   if (SDValue MinMax = combineMinMaxReduction(N, DAG, Subtarget))
45171     return MinMax;
45172 
45173   // Attempt to optimize ADD/FADD/MUL reductions with HADD, promotion etc..
45174   if (SDValue V = combineArithReduction(N, DAG, Subtarget))
45175     return V;
45176 
45177   if (SDValue V = scalarizeExtEltFP(N, DAG, Subtarget))
45178     return V;
45179 
45180   // Attempt to extract a i1 element by using MOVMSK to extract the signbits
45181   // and then testing the relevant element.
45182   //
45183   // Note that we only combine extracts on the *same* result number, i.e.
45184   //   t0 = merge_values a0, a1, a2, a3
45185   //   i1 = extract_vector_elt t0, Constant:i64<2>
45186   //   i1 = extract_vector_elt t0, Constant:i64<3>
45187   // but not
45188   //   i1 = extract_vector_elt t0:1, Constant:i64<2>
45189   // since the latter would need its own MOVMSK.
45190   if (SrcVT.getScalarType() == MVT::i1) {
45191     bool IsVar = !CIdx;
45192     SmallVector<SDNode *, 16> BoolExtracts;
45193     unsigned ResNo = InputVector.getResNo();
45194     auto IsBoolExtract = [&BoolExtracts, &ResNo, &IsVar](SDNode *Use) {
45195       if (Use->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
45196           Use->getOperand(0).getResNo() == ResNo &&
45197           Use->getValueType(0) == MVT::i1) {
45198         BoolExtracts.push_back(Use);
45199         IsVar |= !isa<ConstantSDNode>(Use->getOperand(1));
45200         return true;
45201       }
45202       return false;
45203     };
45204     // TODO: Can we drop the oneuse check for constant extracts?
45205     if (all_of(InputVector->uses(), IsBoolExtract) &&
45206         (IsVar || BoolExtracts.size() > 1)) {
45207       EVT BCVT = EVT::getIntegerVT(*DAG.getContext(), NumSrcElts);
45208       if (SDValue BC =
45209               combineBitcastvxi1(DAG, BCVT, InputVector, dl, Subtarget)) {
45210         for (SDNode *Use : BoolExtracts) {
45211           // extractelement vXi1 X, MaskIdx --> ((movmsk X) & Mask) == Mask
45212           // Mask = 1 << MaskIdx
45213           SDValue MaskIdx = DAG.getZExtOrTrunc(Use->getOperand(1), dl, MVT::i8);
45214           SDValue MaskBit = DAG.getConstant(1, dl, BCVT);
45215           SDValue Mask = DAG.getNode(ISD::SHL, dl, BCVT, MaskBit, MaskIdx);
45216           SDValue Res = DAG.getNode(ISD::AND, dl, BCVT, BC, Mask);
45217           Res = DAG.getSetCC(dl, MVT::i1, Res, Mask, ISD::SETEQ);
45218           DCI.CombineTo(Use, Res);
45219         }
45220         return SDValue(N, 0);
45221       }
45222     }
45223   }
45224 
45225   // If this extract is from a loaded vector value and will be used as an
45226   // integer, that requires a potentially expensive XMM -> GPR transfer.
45227   // Additionally, if we can convert to a scalar integer load, that will likely
45228   // be folded into a subsequent integer op.
45229   // Note: Unlike the related fold for this in DAGCombiner, this is not limited
45230   //       to a single-use of the loaded vector. For the reasons above, we
45231   //       expect this to be profitable even if it creates an extra load.
45232   bool LikelyUsedAsVector = any_of(N->uses(), [](SDNode *Use) {
45233     return Use->getOpcode() == ISD::STORE ||
45234            Use->getOpcode() == ISD::INSERT_VECTOR_ELT ||
45235            Use->getOpcode() == ISD::SCALAR_TO_VECTOR;
45236   });
45237   auto *LoadVec = dyn_cast<LoadSDNode>(InputVector);
45238   if (LoadVec && CIdx && ISD::isNormalLoad(LoadVec) && VT.isInteger() &&
45239       SrcVT.getVectorElementType() == VT && DCI.isAfterLegalizeDAG() &&
45240       !LikelyUsedAsVector && LoadVec->isSimple()) {
45241     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
45242     SDValue NewPtr =
45243         TLI.getVectorElementPointer(DAG, LoadVec->getBasePtr(), SrcVT, EltIdx);
45244     unsigned PtrOff = VT.getSizeInBits() * CIdx->getZExtValue() / 8;
45245     MachinePointerInfo MPI = LoadVec->getPointerInfo().getWithOffset(PtrOff);
45246     Align Alignment = commonAlignment(LoadVec->getAlign(), PtrOff);
45247     SDValue Load =
45248         DAG.getLoad(VT, dl, LoadVec->getChain(), NewPtr, MPI, Alignment,
45249                     LoadVec->getMemOperand()->getFlags(), LoadVec->getAAInfo());
45250     DAG.makeEquivalentMemoryOrdering(LoadVec, Load);
45251     return Load;
45252   }
45253 
45254   return SDValue();
45255 }
45256 
45257 // Convert (vXiY *ext(vXi1 bitcast(iX))) to extend_in_reg(broadcast(iX)).
45258 // This is more or less the reverse of combineBitcastvxi1.
combineToExtendBoolVectorInReg(unsigned Opcode,const SDLoc & DL,EVT VT,SDValue N0,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)45259 static SDValue combineToExtendBoolVectorInReg(
45260     unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N0, SelectionDAG &DAG,
45261     TargetLowering::DAGCombinerInfo &DCI, const X86Subtarget &Subtarget) {
45262   if (Opcode != ISD::SIGN_EXTEND && Opcode != ISD::ZERO_EXTEND &&
45263       Opcode != ISD::ANY_EXTEND)
45264     return SDValue();
45265   if (!DCI.isBeforeLegalizeOps())
45266     return SDValue();
45267   if (!Subtarget.hasSSE2() || Subtarget.hasAVX512())
45268     return SDValue();
45269 
45270   EVT SVT = VT.getScalarType();
45271   EVT InSVT = N0.getValueType().getScalarType();
45272   unsigned EltSizeInBits = SVT.getSizeInBits();
45273 
45274   // Input type must be extending a bool vector (bit-casted from a scalar
45275   // integer) to legal integer types.
45276   if (!VT.isVector())
45277     return SDValue();
45278   if (SVT != MVT::i64 && SVT != MVT::i32 && SVT != MVT::i16 && SVT != MVT::i8)
45279     return SDValue();
45280   if (InSVT != MVT::i1 || N0.getOpcode() != ISD::BITCAST)
45281     return SDValue();
45282 
45283   SDValue N00 = N0.getOperand(0);
45284   EVT SclVT = N00.getValueType();
45285   if (!SclVT.isScalarInteger())
45286     return SDValue();
45287 
45288   SDValue Vec;
45289   SmallVector<int> ShuffleMask;
45290   unsigned NumElts = VT.getVectorNumElements();
45291   assert(NumElts == SclVT.getSizeInBits() && "Unexpected bool vector size");
45292 
45293   // Broadcast the scalar integer to the vector elements.
45294   if (NumElts > EltSizeInBits) {
45295     // If the scalar integer is greater than the vector element size, then we
45296     // must split it down into sub-sections for broadcasting. For example:
45297     //   i16 -> v16i8 (i16 -> v8i16 -> v16i8) with 2 sub-sections.
45298     //   i32 -> v32i8 (i32 -> v8i32 -> v32i8) with 4 sub-sections.
45299     assert((NumElts % EltSizeInBits) == 0 && "Unexpected integer scale");
45300     unsigned Scale = NumElts / EltSizeInBits;
45301     EVT BroadcastVT = EVT::getVectorVT(*DAG.getContext(), SclVT, EltSizeInBits);
45302     Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, BroadcastVT, N00);
45303     Vec = DAG.getBitcast(VT, Vec);
45304 
45305     for (unsigned i = 0; i != Scale; ++i)
45306       ShuffleMask.append(EltSizeInBits, i);
45307     Vec = DAG.getVectorShuffle(VT, DL, Vec, Vec, ShuffleMask);
45308   } else if (Subtarget.hasAVX2() && NumElts < EltSizeInBits &&
45309              (SclVT == MVT::i8 || SclVT == MVT::i16 || SclVT == MVT::i32)) {
45310     // If we have register broadcast instructions, use the scalar size as the
45311     // element type for the shuffle. Then cast to the wider element type. The
45312     // widened bits won't be used, and this might allow the use of a broadcast
45313     // load.
45314     assert((EltSizeInBits % NumElts) == 0 && "Unexpected integer scale");
45315     unsigned Scale = EltSizeInBits / NumElts;
45316     EVT BroadcastVT =
45317         EVT::getVectorVT(*DAG.getContext(), SclVT, NumElts * Scale);
45318     Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, BroadcastVT, N00);
45319     ShuffleMask.append(NumElts * Scale, 0);
45320     Vec = DAG.getVectorShuffle(BroadcastVT, DL, Vec, Vec, ShuffleMask);
45321     Vec = DAG.getBitcast(VT, Vec);
45322   } else {
45323     // For smaller scalar integers, we can simply any-extend it to the vector
45324     // element size (we don't care about the upper bits) and broadcast it to all
45325     // elements.
45326     SDValue Scl = DAG.getAnyExtOrTrunc(N00, DL, SVT);
45327     Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Scl);
45328     ShuffleMask.append(NumElts, 0);
45329     Vec = DAG.getVectorShuffle(VT, DL, Vec, Vec, ShuffleMask);
45330   }
45331 
45332   // Now, mask the relevant bit in each element.
45333   SmallVector<SDValue, 32> Bits;
45334   for (unsigned i = 0; i != NumElts; ++i) {
45335     int BitIdx = (i % EltSizeInBits);
45336     APInt Bit = APInt::getBitsSet(EltSizeInBits, BitIdx, BitIdx + 1);
45337     Bits.push_back(DAG.getConstant(Bit, DL, SVT));
45338   }
45339   SDValue BitMask = DAG.getBuildVector(VT, DL, Bits);
45340   Vec = DAG.getNode(ISD::AND, DL, VT, Vec, BitMask);
45341 
45342   // Compare against the bitmask and extend the result.
45343   EVT CCVT = VT.changeVectorElementType(MVT::i1);
45344   Vec = DAG.getSetCC(DL, CCVT, Vec, BitMask, ISD::SETEQ);
45345   Vec = DAG.getSExtOrTrunc(Vec, DL, VT);
45346 
45347   // For SEXT, this is now done, otherwise shift the result down for
45348   // zero-extension.
45349   if (Opcode == ISD::SIGN_EXTEND)
45350     return Vec;
45351   return DAG.getNode(ISD::SRL, DL, VT, Vec,
45352                      DAG.getConstant(EltSizeInBits - 1, DL, VT));
45353 }
45354 
45355 /// If a vector select has an operand that is -1 or 0, try to simplify the
45356 /// select to a bitwise logic operation.
45357 /// TODO: Move to DAGCombiner, possibly using TargetLowering::hasAndNot()?
45358 static SDValue
combineVSelectWithAllOnesOrZeros(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)45359 combineVSelectWithAllOnesOrZeros(SDNode *N, SelectionDAG &DAG,
45360                                  TargetLowering::DAGCombinerInfo &DCI,
45361                                  const X86Subtarget &Subtarget) {
45362   SDValue Cond = N->getOperand(0);
45363   SDValue LHS = N->getOperand(1);
45364   SDValue RHS = N->getOperand(2);
45365   EVT VT = LHS.getValueType();
45366   EVT CondVT = Cond.getValueType();
45367   SDLoc DL(N);
45368   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
45369 
45370   if (N->getOpcode() != ISD::VSELECT)
45371     return SDValue();
45372 
45373   assert(CondVT.isVector() && "Vector select expects a vector selector!");
45374 
45375   // TODO: Use isNullOrNullSplat() to distinguish constants with undefs?
45376   // TODO: Can we assert that both operands are not zeros (because that should
45377   //       get simplified at node creation time)?
45378   bool TValIsAllZeros = ISD::isBuildVectorAllZeros(LHS.getNode());
45379   bool FValIsAllZeros = ISD::isBuildVectorAllZeros(RHS.getNode());
45380 
45381   // If both inputs are 0/undef, create a complete zero vector.
45382   // FIXME: As noted above this should be handled by DAGCombiner/getNode.
45383   if (TValIsAllZeros && FValIsAllZeros) {
45384     if (VT.isFloatingPoint())
45385       return DAG.getConstantFP(0.0, DL, VT);
45386     return DAG.getConstant(0, DL, VT);
45387   }
45388 
45389   // To use the condition operand as a bitwise mask, it must have elements that
45390   // are the same size as the select elements. Ie, the condition operand must
45391   // have already been promoted from the IR select condition type <N x i1>.
45392   // Don't check if the types themselves are equal because that excludes
45393   // vector floating-point selects.
45394   if (CondVT.getScalarSizeInBits() != VT.getScalarSizeInBits())
45395     return SDValue();
45396 
45397   // Try to invert the condition if true value is not all 1s and false value is
45398   // not all 0s. Only do this if the condition has one use.
45399   bool TValIsAllOnes = ISD::isBuildVectorAllOnes(LHS.getNode());
45400   if (!TValIsAllOnes && !FValIsAllZeros && Cond.hasOneUse() &&
45401       // Check if the selector will be produced by CMPP*/PCMP*.
45402       Cond.getOpcode() == ISD::SETCC &&
45403       // Check if SETCC has already been promoted.
45404       TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT) ==
45405           CondVT) {
45406     bool FValIsAllOnes = ISD::isBuildVectorAllOnes(RHS.getNode());
45407 
45408     if (TValIsAllZeros || FValIsAllOnes) {
45409       SDValue CC = Cond.getOperand(2);
45410       ISD::CondCode NewCC = ISD::getSetCCInverse(
45411           cast<CondCodeSDNode>(CC)->get(), Cond.getOperand(0).getValueType());
45412       Cond = DAG.getSetCC(DL, CondVT, Cond.getOperand(0), Cond.getOperand(1),
45413                           NewCC);
45414       std::swap(LHS, RHS);
45415       TValIsAllOnes = FValIsAllOnes;
45416       FValIsAllZeros = TValIsAllZeros;
45417     }
45418   }
45419 
45420   // Cond value must be 'sign splat' to be converted to a logical op.
45421   if (DAG.ComputeNumSignBits(Cond) != CondVT.getScalarSizeInBits())
45422     return SDValue();
45423 
45424   // vselect Cond, 111..., 000... -> Cond
45425   if (TValIsAllOnes && FValIsAllZeros)
45426     return DAG.getBitcast(VT, Cond);
45427 
45428   if (!TLI.isTypeLegal(CondVT))
45429     return SDValue();
45430 
45431   // vselect Cond, 111..., X -> or Cond, X
45432   if (TValIsAllOnes) {
45433     SDValue CastRHS = DAG.getBitcast(CondVT, RHS);
45434     SDValue Or = DAG.getNode(ISD::OR, DL, CondVT, Cond, CastRHS);
45435     return DAG.getBitcast(VT, Or);
45436   }
45437 
45438   // vselect Cond, X, 000... -> and Cond, X
45439   if (FValIsAllZeros) {
45440     SDValue CastLHS = DAG.getBitcast(CondVT, LHS);
45441     SDValue And = DAG.getNode(ISD::AND, DL, CondVT, Cond, CastLHS);
45442     return DAG.getBitcast(VT, And);
45443   }
45444 
45445   // vselect Cond, 000..., X -> andn Cond, X
45446   if (TValIsAllZeros) {
45447     SDValue CastRHS = DAG.getBitcast(CondVT, RHS);
45448     SDValue AndN;
45449     // The canonical form differs for i1 vectors - x86andnp is not used
45450     if (CondVT.getScalarType() == MVT::i1)
45451       AndN = DAG.getNode(ISD::AND, DL, CondVT, DAG.getNOT(DL, Cond, CondVT),
45452                          CastRHS);
45453     else
45454       AndN = DAG.getNode(X86ISD::ANDNP, DL, CondVT, Cond, CastRHS);
45455     return DAG.getBitcast(VT, AndN);
45456   }
45457 
45458   return SDValue();
45459 }
45460 
45461 /// If both arms of a vector select are concatenated vectors, split the select,
45462 /// and concatenate the result to eliminate a wide (256-bit) vector instruction:
45463 ///   vselect Cond, (concat T0, T1), (concat F0, F1) -->
45464 ///   concat (vselect (split Cond), T0, F0), (vselect (split Cond), T1, F1)
narrowVectorSelect(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)45465 static SDValue narrowVectorSelect(SDNode *N, SelectionDAG &DAG,
45466                                   const X86Subtarget &Subtarget) {
45467   unsigned Opcode = N->getOpcode();
45468   if (Opcode != X86ISD::BLENDV && Opcode != ISD::VSELECT)
45469     return SDValue();
45470 
45471   // TODO: Split 512-bit vectors too?
45472   EVT VT = N->getValueType(0);
45473   if (!VT.is256BitVector())
45474     return SDValue();
45475 
45476   // TODO: Split as long as any 2 of the 3 operands are concatenated?
45477   SDValue Cond = N->getOperand(0);
45478   SDValue TVal = N->getOperand(1);
45479   SDValue FVal = N->getOperand(2);
45480   SmallVector<SDValue, 4> CatOpsT, CatOpsF;
45481   if (!TVal.hasOneUse() || !FVal.hasOneUse() ||
45482       !collectConcatOps(TVal.getNode(), CatOpsT, DAG) ||
45483       !collectConcatOps(FVal.getNode(), CatOpsF, DAG))
45484     return SDValue();
45485 
45486   auto makeBlend = [Opcode](SelectionDAG &DAG, const SDLoc &DL,
45487                             ArrayRef<SDValue> Ops) {
45488     return DAG.getNode(Opcode, DL, Ops[1].getValueType(), Ops);
45489   };
45490   return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, { Cond, TVal, FVal },
45491                           makeBlend, /*CheckBWI*/ false);
45492 }
45493 
combineSelectOfTwoConstants(SDNode * N,SelectionDAG & DAG)45494 static SDValue combineSelectOfTwoConstants(SDNode *N, SelectionDAG &DAG) {
45495   SDValue Cond = N->getOperand(0);
45496   SDValue LHS = N->getOperand(1);
45497   SDValue RHS = N->getOperand(2);
45498   SDLoc DL(N);
45499 
45500   auto *TrueC = dyn_cast<ConstantSDNode>(LHS);
45501   auto *FalseC = dyn_cast<ConstantSDNode>(RHS);
45502   if (!TrueC || !FalseC)
45503     return SDValue();
45504 
45505   // Don't do this for crazy integer types.
45506   EVT VT = N->getValueType(0);
45507   if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
45508     return SDValue();
45509 
45510   // We're going to use the condition bit in math or logic ops. We could allow
45511   // this with a wider condition value (post-legalization it becomes an i8),
45512   // but if nothing is creating selects that late, it doesn't matter.
45513   if (Cond.getValueType() != MVT::i1)
45514     return SDValue();
45515 
45516   // A power-of-2 multiply is just a shift. LEA also cheaply handles multiply by
45517   // 3, 5, or 9 with i32/i64, so those get transformed too.
45518   // TODO: For constants that overflow or do not differ by power-of-2 or small
45519   // multiplier, convert to 'and' + 'add'.
45520   const APInt &TrueVal = TrueC->getAPIntValue();
45521   const APInt &FalseVal = FalseC->getAPIntValue();
45522 
45523   // We have a more efficient lowering for "(X == 0) ? Y : -1" using SBB.
45524   if ((TrueVal.isAllOnes() || FalseVal.isAllOnes()) &&
45525       Cond.getOpcode() == ISD::SETCC && isNullConstant(Cond.getOperand(1))) {
45526     ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
45527     if (CC == ISD::SETEQ || CC == ISD::SETNE)
45528       return SDValue();
45529   }
45530 
45531   bool OV;
45532   APInt Diff = TrueVal.ssub_ov(FalseVal, OV);
45533   if (OV)
45534     return SDValue();
45535 
45536   APInt AbsDiff = Diff.abs();
45537   if (AbsDiff.isPowerOf2() ||
45538       ((VT == MVT::i32 || VT == MVT::i64) &&
45539        (AbsDiff == 3 || AbsDiff == 5 || AbsDiff == 9))) {
45540 
45541     // We need a positive multiplier constant for shift/LEA codegen. The 'not'
45542     // of the condition can usually be folded into a compare predicate, but even
45543     // without that, the sequence should be cheaper than a CMOV alternative.
45544     if (TrueVal.slt(FalseVal)) {
45545       Cond = DAG.getNOT(DL, Cond, MVT::i1);
45546       std::swap(TrueC, FalseC);
45547     }
45548 
45549     // select Cond, TC, FC --> (zext(Cond) * (TC - FC)) + FC
45550     SDValue R = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Cond);
45551 
45552     // Multiply condition by the difference if non-one.
45553     if (!AbsDiff.isOne())
45554       R = DAG.getNode(ISD::MUL, DL, VT, R, DAG.getConstant(AbsDiff, DL, VT));
45555 
45556     // Add the base if non-zero.
45557     if (!FalseC->isZero())
45558       R = DAG.getNode(ISD::ADD, DL, VT, R, SDValue(FalseC, 0));
45559 
45560     return R;
45561   }
45562 
45563   return SDValue();
45564 }
45565 
45566 /// If this is a *dynamic* select (non-constant condition) and we can match
45567 /// this node with one of the variable blend instructions, restructure the
45568 /// condition so that blends can use the high (sign) bit of each element.
45569 /// This function will also call SimplifyDemandedBits on already created
45570 /// BLENDV to perform additional simplifications.
combineVSelectToBLENDV(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)45571 static SDValue combineVSelectToBLENDV(SDNode *N, SelectionDAG &DAG,
45572                                       TargetLowering::DAGCombinerInfo &DCI,
45573                                       const X86Subtarget &Subtarget) {
45574   SDValue Cond = N->getOperand(0);
45575   if ((N->getOpcode() != ISD::VSELECT &&
45576        N->getOpcode() != X86ISD::BLENDV) ||
45577       ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
45578     return SDValue();
45579 
45580   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
45581   unsigned BitWidth = Cond.getScalarValueSizeInBits();
45582   EVT VT = N->getValueType(0);
45583 
45584   // We can only handle the cases where VSELECT is directly legal on the
45585   // subtarget. We custom lower VSELECT nodes with constant conditions and
45586   // this makes it hard to see whether a dynamic VSELECT will correctly
45587   // lower, so we both check the operation's status and explicitly handle the
45588   // cases where a *dynamic* blend will fail even though a constant-condition
45589   // blend could be custom lowered.
45590   // FIXME: We should find a better way to handle this class of problems.
45591   // Potentially, we should combine constant-condition vselect nodes
45592   // pre-legalization into shuffles and not mark as many types as custom
45593   // lowered.
45594   if (!TLI.isOperationLegalOrCustom(ISD::VSELECT, VT))
45595     return SDValue();
45596   // FIXME: We don't support i16-element blends currently. We could and
45597   // should support them by making *all* the bits in the condition be set
45598   // rather than just the high bit and using an i8-element blend.
45599   if (VT.getVectorElementType() == MVT::i16)
45600     return SDValue();
45601   // Dynamic blending was only available from SSE4.1 onward.
45602   if (VT.is128BitVector() && !Subtarget.hasSSE41())
45603     return SDValue();
45604   // Byte blends are only available in AVX2
45605   if (VT == MVT::v32i8 && !Subtarget.hasAVX2())
45606     return SDValue();
45607   // There are no 512-bit blend instructions that use sign bits.
45608   if (VT.is512BitVector())
45609     return SDValue();
45610 
45611   // Don't optimize before the condition has been transformed to a legal type
45612   // and don't ever optimize vector selects that map to AVX512 mask-registers.
45613   if (BitWidth < 8 || BitWidth > 64)
45614     return SDValue();
45615 
45616   auto OnlyUsedAsSelectCond = [](SDValue Cond) {
45617     for (SDNode::use_iterator UI = Cond->use_begin(), UE = Cond->use_end();
45618          UI != UE; ++UI)
45619       if ((UI->getOpcode() != ISD::VSELECT &&
45620            UI->getOpcode() != X86ISD::BLENDV) ||
45621           UI.getOperandNo() != 0)
45622         return false;
45623 
45624     return true;
45625   };
45626 
45627   APInt DemandedBits(APInt::getSignMask(BitWidth));
45628 
45629   if (OnlyUsedAsSelectCond(Cond)) {
45630     KnownBits Known;
45631     TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
45632                                           !DCI.isBeforeLegalizeOps());
45633     if (!TLI.SimplifyDemandedBits(Cond, DemandedBits, Known, TLO, 0, true))
45634       return SDValue();
45635 
45636     // If we changed the computation somewhere in the DAG, this change will
45637     // affect all users of Cond. Update all the nodes so that we do not use
45638     // the generic VSELECT anymore. Otherwise, we may perform wrong
45639     // optimizations as we messed with the actual expectation for the vector
45640     // boolean values.
45641     for (SDNode *U : Cond->uses()) {
45642       if (U->getOpcode() == X86ISD::BLENDV)
45643         continue;
45644 
45645       SDValue SB = DAG.getNode(X86ISD::BLENDV, SDLoc(U), U->getValueType(0),
45646                                Cond, U->getOperand(1), U->getOperand(2));
45647       DAG.ReplaceAllUsesOfValueWith(SDValue(U, 0), SB);
45648       DCI.AddToWorklist(U);
45649     }
45650     DCI.CommitTargetLoweringOpt(TLO);
45651     return SDValue(N, 0);
45652   }
45653 
45654   // Otherwise we can still at least try to simplify multiple use bits.
45655   if (SDValue V = TLI.SimplifyMultipleUseDemandedBits(Cond, DemandedBits, DAG))
45656       return DAG.getNode(X86ISD::BLENDV, SDLoc(N), N->getValueType(0), V,
45657                          N->getOperand(1), N->getOperand(2));
45658 
45659   return SDValue();
45660 }
45661 
45662 // Try to match:
45663 //   (or (and (M, (sub 0, X)), (pandn M, X)))
45664 // which is a special case of:
45665 //   (select M, (sub 0, X), X)
45666 // Per:
45667 // http://graphics.stanford.edu/~seander/bithacks.html#ConditionalNegate
45668 // We know that, if fNegate is 0 or 1:
45669 //   (fNegate ? -v : v) == ((v ^ -fNegate) + fNegate)
45670 //
45671 // Here, we have a mask, M (all 1s or 0), and, similarly, we know that:
45672 //   ((M & 1) ? -X : X) == ((X ^ -(M & 1)) + (M & 1))
45673 //   ( M      ? -X : X) == ((X ^   M     ) + (M & 1))
45674 // This lets us transform our vselect to:
45675 //   (add (xor X, M), (and M, 1))
45676 // And further to:
45677 //   (sub (xor X, M), M)
combineLogicBlendIntoConditionalNegate(EVT VT,SDValue Mask,SDValue X,SDValue Y,const SDLoc & DL,SelectionDAG & DAG,const X86Subtarget & Subtarget)45678 static SDValue combineLogicBlendIntoConditionalNegate(
45679     EVT VT, SDValue Mask, SDValue X, SDValue Y, const SDLoc &DL,
45680     SelectionDAG &DAG, const X86Subtarget &Subtarget) {
45681   EVT MaskVT = Mask.getValueType();
45682   assert(MaskVT.isInteger() &&
45683          DAG.ComputeNumSignBits(Mask) == MaskVT.getScalarSizeInBits() &&
45684          "Mask must be zero/all-bits");
45685 
45686   if (X.getValueType() != MaskVT || Y.getValueType() != MaskVT)
45687     return SDValue();
45688   if (!DAG.getTargetLoweringInfo().isOperationLegal(ISD::SUB, MaskVT))
45689     return SDValue();
45690 
45691   auto IsNegV = [](SDNode *N, SDValue V) {
45692     return N->getOpcode() == ISD::SUB && N->getOperand(1) == V &&
45693            ISD::isBuildVectorAllZeros(N->getOperand(0).getNode());
45694   };
45695 
45696   SDValue V;
45697   if (IsNegV(Y.getNode(), X))
45698     V = X;
45699   else if (IsNegV(X.getNode(), Y))
45700     V = Y;
45701   else
45702     return SDValue();
45703 
45704   SDValue SubOp1 = DAG.getNode(ISD::XOR, DL, MaskVT, V, Mask);
45705   SDValue SubOp2 = Mask;
45706 
45707   // If the negate was on the false side of the select, then
45708   // the operands of the SUB need to be swapped. PR 27251.
45709   // This is because the pattern being matched above is
45710   // (vselect M, (sub (0, X), X)  -> (sub (xor X, M), M)
45711   // but if the pattern matched was
45712   // (vselect M, X, (sub (0, X))), that is really negation of the pattern
45713   // above, -(vselect M, (sub 0, X), X), and therefore the replacement
45714   // pattern also needs to be a negation of the replacement pattern above.
45715   // And -(sub X, Y) is just sub (Y, X), so swapping the operands of the
45716   // sub accomplishes the negation of the replacement pattern.
45717   if (V == Y)
45718     std::swap(SubOp1, SubOp2);
45719 
45720   SDValue Res = DAG.getNode(ISD::SUB, DL, MaskVT, SubOp1, SubOp2);
45721   return DAG.getBitcast(VT, Res);
45722 }
45723 
45724 /// Do target-specific dag combines on SELECT and VSELECT nodes.
combineSelect(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)45725 static SDValue combineSelect(SDNode *N, SelectionDAG &DAG,
45726                              TargetLowering::DAGCombinerInfo &DCI,
45727                              const X86Subtarget &Subtarget) {
45728   SDLoc DL(N);
45729   SDValue Cond = N->getOperand(0);
45730   SDValue LHS = N->getOperand(1);
45731   SDValue RHS = N->getOperand(2);
45732 
45733   // Try simplification again because we use this function to optimize
45734   // BLENDV nodes that are not handled by the generic combiner.
45735   if (SDValue V = DAG.simplifySelect(Cond, LHS, RHS))
45736     return V;
45737 
45738   EVT VT = LHS.getValueType();
45739   EVT CondVT = Cond.getValueType();
45740   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
45741   bool CondConstantVector = ISD::isBuildVectorOfConstantSDNodes(Cond.getNode());
45742 
45743   // Attempt to combine (select M, (sub 0, X), X) -> (sub (xor X, M), M).
45744   // Limit this to cases of non-constant masks that createShuffleMaskFromVSELECT
45745   // can't catch, plus vXi8 cases where we'd likely end up with BLENDV.
45746   if (CondVT.isVector() && CondVT.isInteger() &&
45747       CondVT.getScalarSizeInBits() == VT.getScalarSizeInBits() &&
45748       (!CondConstantVector || CondVT.getScalarType() == MVT::i8) &&
45749       DAG.ComputeNumSignBits(Cond) == CondVT.getScalarSizeInBits())
45750     if (SDValue V = combineLogicBlendIntoConditionalNegate(VT, Cond, RHS, LHS,
45751                                                            DL, DAG, Subtarget))
45752       return V;
45753 
45754   // Convert vselects with constant condition into shuffles.
45755   if (CondConstantVector && DCI.isBeforeLegalizeOps() &&
45756       (N->getOpcode() == ISD::VSELECT || N->getOpcode() == X86ISD::BLENDV)) {
45757     SmallVector<int, 64> Mask;
45758     if (createShuffleMaskFromVSELECT(Mask, Cond,
45759                                      N->getOpcode() == X86ISD::BLENDV))
45760       return DAG.getVectorShuffle(VT, DL, LHS, RHS, Mask);
45761   }
45762 
45763   // fold vselect(cond, pshufb(x), pshufb(y)) -> or (pshufb(x), pshufb(y))
45764   // by forcing the unselected elements to zero.
45765   // TODO: Can we handle more shuffles with this?
45766   if (N->getOpcode() == ISD::VSELECT && CondVT.isVector() &&
45767       LHS.getOpcode() == X86ISD::PSHUFB && RHS.getOpcode() == X86ISD::PSHUFB &&
45768       LHS.hasOneUse() && RHS.hasOneUse()) {
45769     MVT SimpleVT = VT.getSimpleVT();
45770     SmallVector<SDValue, 1> LHSOps, RHSOps;
45771     SmallVector<int, 64> LHSMask, RHSMask, CondMask;
45772     if (createShuffleMaskFromVSELECT(CondMask, Cond) &&
45773         getTargetShuffleMask(LHS.getNode(), SimpleVT, true, LHSOps, LHSMask) &&
45774         getTargetShuffleMask(RHS.getNode(), SimpleVT, true, RHSOps, RHSMask)) {
45775       int NumElts = VT.getVectorNumElements();
45776       for (int i = 0; i != NumElts; ++i) {
45777         // getConstVector sets negative shuffle mask values as undef, so ensure
45778         // we hardcode SM_SentinelZero values to zero (0x80).
45779         if (CondMask[i] < NumElts) {
45780           LHSMask[i] = isUndefOrZero(LHSMask[i]) ? 0x80 : LHSMask[i];
45781           RHSMask[i] = 0x80;
45782         } else {
45783           LHSMask[i] = 0x80;
45784           RHSMask[i] = isUndefOrZero(RHSMask[i]) ? 0x80 : RHSMask[i];
45785         }
45786       }
45787       LHS = DAG.getNode(X86ISD::PSHUFB, DL, VT, LHS.getOperand(0),
45788                         getConstVector(LHSMask, SimpleVT, DAG, DL, true));
45789       RHS = DAG.getNode(X86ISD::PSHUFB, DL, VT, RHS.getOperand(0),
45790                         getConstVector(RHSMask, SimpleVT, DAG, DL, true));
45791       return DAG.getNode(ISD::OR, DL, VT, LHS, RHS);
45792     }
45793   }
45794 
45795   // If we have SSE[12] support, try to form min/max nodes. SSE min/max
45796   // instructions match the semantics of the common C idiom x<y?x:y but not
45797   // x<=y?x:y, because of how they handle negative zero (which can be
45798   // ignored in unsafe-math mode).
45799   // We also try to create v2f32 min/max nodes, which we later widen to v4f32.
45800   if (Cond.getOpcode() == ISD::SETCC && VT.isFloatingPoint() &&
45801       VT != MVT::f80 && VT != MVT::f128 && !isSoftFP16(VT, Subtarget) &&
45802       (TLI.isTypeLegal(VT) || VT == MVT::v2f32) &&
45803       (Subtarget.hasSSE2() ||
45804        (Subtarget.hasSSE1() && VT.getScalarType() == MVT::f32))) {
45805     ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
45806 
45807     unsigned Opcode = 0;
45808     // Check for x CC y ? x : y.
45809     if (DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
45810         DAG.isEqualTo(RHS, Cond.getOperand(1))) {
45811       switch (CC) {
45812       default: break;
45813       case ISD::SETULT:
45814         // Converting this to a min would handle NaNs incorrectly, and swapping
45815         // the operands would cause it to handle comparisons between positive
45816         // and negative zero incorrectly.
45817         if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
45818           if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
45819               !(DAG.isKnownNeverZeroFloat(LHS) ||
45820                 DAG.isKnownNeverZeroFloat(RHS)))
45821             break;
45822           std::swap(LHS, RHS);
45823         }
45824         Opcode = X86ISD::FMIN;
45825         break;
45826       case ISD::SETOLE:
45827         // Converting this to a min would handle comparisons between positive
45828         // and negative zero incorrectly.
45829         if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
45830             !DAG.isKnownNeverZeroFloat(LHS) && !DAG.isKnownNeverZeroFloat(RHS))
45831           break;
45832         Opcode = X86ISD::FMIN;
45833         break;
45834       case ISD::SETULE:
45835         // Converting this to a min would handle both negative zeros and NaNs
45836         // incorrectly, but we can swap the operands to fix both.
45837         std::swap(LHS, RHS);
45838         [[fallthrough]];
45839       case ISD::SETOLT:
45840       case ISD::SETLT:
45841       case ISD::SETLE:
45842         Opcode = X86ISD::FMIN;
45843         break;
45844 
45845       case ISD::SETOGE:
45846         // Converting this to a max would handle comparisons between positive
45847         // and negative zero incorrectly.
45848         if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
45849             !DAG.isKnownNeverZeroFloat(LHS) && !DAG.isKnownNeverZeroFloat(RHS))
45850           break;
45851         Opcode = X86ISD::FMAX;
45852         break;
45853       case ISD::SETUGT:
45854         // Converting this to a max would handle NaNs incorrectly, and swapping
45855         // the operands would cause it to handle comparisons between positive
45856         // and negative zero incorrectly.
45857         if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
45858           if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
45859               !(DAG.isKnownNeverZeroFloat(LHS) ||
45860                 DAG.isKnownNeverZeroFloat(RHS)))
45861             break;
45862           std::swap(LHS, RHS);
45863         }
45864         Opcode = X86ISD::FMAX;
45865         break;
45866       case ISD::SETUGE:
45867         // Converting this to a max would handle both negative zeros and NaNs
45868         // incorrectly, but we can swap the operands to fix both.
45869         std::swap(LHS, RHS);
45870         [[fallthrough]];
45871       case ISD::SETOGT:
45872       case ISD::SETGT:
45873       case ISD::SETGE:
45874         Opcode = X86ISD::FMAX;
45875         break;
45876       }
45877     // Check for x CC y ? y : x -- a min/max with reversed arms.
45878     } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) &&
45879                DAG.isEqualTo(RHS, Cond.getOperand(0))) {
45880       switch (CC) {
45881       default: break;
45882       case ISD::SETOGE:
45883         // Converting this to a min would handle comparisons between positive
45884         // and negative zero incorrectly, and swapping the operands would
45885         // cause it to handle NaNs incorrectly.
45886         if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
45887             !(DAG.isKnownNeverZeroFloat(LHS) ||
45888               DAG.isKnownNeverZeroFloat(RHS))) {
45889           if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
45890             break;
45891           std::swap(LHS, RHS);
45892         }
45893         Opcode = X86ISD::FMIN;
45894         break;
45895       case ISD::SETUGT:
45896         // Converting this to a min would handle NaNs incorrectly.
45897         if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
45898           break;
45899         Opcode = X86ISD::FMIN;
45900         break;
45901       case ISD::SETUGE:
45902         // Converting this to a min would handle both negative zeros and NaNs
45903         // incorrectly, but we can swap the operands to fix both.
45904         std::swap(LHS, RHS);
45905         [[fallthrough]];
45906       case ISD::SETOGT:
45907       case ISD::SETGT:
45908       case ISD::SETGE:
45909         Opcode = X86ISD::FMIN;
45910         break;
45911 
45912       case ISD::SETULT:
45913         // Converting this to a max would handle NaNs incorrectly.
45914         if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
45915           break;
45916         Opcode = X86ISD::FMAX;
45917         break;
45918       case ISD::SETOLE:
45919         // Converting this to a max would handle comparisons between positive
45920         // and negative zero incorrectly, and swapping the operands would
45921         // cause it to handle NaNs incorrectly.
45922         if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
45923             !DAG.isKnownNeverZeroFloat(LHS) &&
45924             !DAG.isKnownNeverZeroFloat(RHS)) {
45925           if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
45926             break;
45927           std::swap(LHS, RHS);
45928         }
45929         Opcode = X86ISD::FMAX;
45930         break;
45931       case ISD::SETULE:
45932         // Converting this to a max would handle both negative zeros and NaNs
45933         // incorrectly, but we can swap the operands to fix both.
45934         std::swap(LHS, RHS);
45935         [[fallthrough]];
45936       case ISD::SETOLT:
45937       case ISD::SETLT:
45938       case ISD::SETLE:
45939         Opcode = X86ISD::FMAX;
45940         break;
45941       }
45942     }
45943 
45944     if (Opcode)
45945       return DAG.getNode(Opcode, DL, N->getValueType(0), LHS, RHS);
45946   }
45947 
45948   // Some mask scalar intrinsics rely on checking if only one bit is set
45949   // and implement it in C code like this:
45950   // A[0] = (U & 1) ? A[0] : W[0];
45951   // This creates some redundant instructions that break pattern matching.
45952   // fold (select (setcc (and (X, 1), 0, seteq), Y, Z)) -> select(and(X, 1),Z,Y)
45953   if (Subtarget.hasAVX512() && N->getOpcode() == ISD::SELECT &&
45954       Cond.getOpcode() == ISD::SETCC && (VT == MVT::f32 || VT == MVT::f64)) {
45955     ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
45956     SDValue AndNode = Cond.getOperand(0);
45957     if (AndNode.getOpcode() == ISD::AND && CC == ISD::SETEQ &&
45958         isNullConstant(Cond.getOperand(1)) &&
45959         isOneConstant(AndNode.getOperand(1))) {
45960       // LHS and RHS swapped due to
45961       // setcc outputting 1 when AND resulted in 0 and vice versa.
45962       AndNode = DAG.getZExtOrTrunc(AndNode, DL, MVT::i8);
45963       return DAG.getNode(ISD::SELECT, DL, VT, AndNode, RHS, LHS);
45964     }
45965   }
45966 
45967   // v16i8 (select v16i1, v16i8, v16i8) does not have a proper
45968   // lowering on KNL. In this case we convert it to
45969   // v16i8 (select v16i8, v16i8, v16i8) and use AVX instruction.
45970   // The same situation all vectors of i8 and i16 without BWI.
45971   // Make sure we extend these even before type legalization gets a chance to
45972   // split wide vectors.
45973   // Since SKX these selects have a proper lowering.
45974   if (Subtarget.hasAVX512() && !Subtarget.hasBWI() && CondVT.isVector() &&
45975       CondVT.getVectorElementType() == MVT::i1 &&
45976       (VT.getVectorElementType() == MVT::i8 ||
45977        VT.getVectorElementType() == MVT::i16)) {
45978     Cond = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Cond);
45979     return DAG.getNode(N->getOpcode(), DL, VT, Cond, LHS, RHS);
45980   }
45981 
45982   // AVX512 - Extend select with zero to merge with target shuffle.
45983   // select(mask, extract_subvector(shuffle(x)), zero) -->
45984   // extract_subvector(select(insert_subvector(mask), shuffle(x), zero))
45985   // TODO - support non target shuffles as well.
45986   if (Subtarget.hasAVX512() && CondVT.isVector() &&
45987       CondVT.getVectorElementType() == MVT::i1) {
45988     auto SelectableOp = [&TLI](SDValue Op) {
45989       return Op.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
45990              isTargetShuffle(Op.getOperand(0).getOpcode()) &&
45991              isNullConstant(Op.getOperand(1)) &&
45992              TLI.isTypeLegal(Op.getOperand(0).getValueType()) &&
45993              Op.hasOneUse() && Op.getOperand(0).hasOneUse();
45994     };
45995 
45996     bool SelectableLHS = SelectableOp(LHS);
45997     bool SelectableRHS = SelectableOp(RHS);
45998     bool ZeroLHS = ISD::isBuildVectorAllZeros(LHS.getNode());
45999     bool ZeroRHS = ISD::isBuildVectorAllZeros(RHS.getNode());
46000 
46001     if ((SelectableLHS && ZeroRHS) || (SelectableRHS && ZeroLHS)) {
46002       EVT SrcVT = SelectableLHS ? LHS.getOperand(0).getValueType()
46003                                 : RHS.getOperand(0).getValueType();
46004       EVT SrcCondVT = SrcVT.changeVectorElementType(MVT::i1);
46005       LHS = insertSubVector(DAG.getUNDEF(SrcVT), LHS, 0, DAG, DL,
46006                             VT.getSizeInBits());
46007       RHS = insertSubVector(DAG.getUNDEF(SrcVT), RHS, 0, DAG, DL,
46008                             VT.getSizeInBits());
46009       Cond = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, SrcCondVT,
46010                          DAG.getUNDEF(SrcCondVT), Cond,
46011                          DAG.getIntPtrConstant(0, DL));
46012       SDValue Res = DAG.getSelect(DL, SrcVT, Cond, LHS, RHS);
46013       return extractSubVector(Res, 0, DAG, DL, VT.getSizeInBits());
46014     }
46015   }
46016 
46017   if (SDValue V = combineSelectOfTwoConstants(N, DAG))
46018     return V;
46019 
46020   if (N->getOpcode() == ISD::SELECT && Cond.getOpcode() == ISD::SETCC &&
46021       Cond.hasOneUse()) {
46022     EVT CondVT = Cond.getValueType();
46023     SDValue Cond0 = Cond.getOperand(0);
46024     SDValue Cond1 = Cond.getOperand(1);
46025     ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
46026 
46027     // Canonicalize min/max:
46028     // (x > 0) ? x : 0 -> (x >= 0) ? x : 0
46029     // (x < -1) ? x : -1 -> (x <= -1) ? x : -1
46030     // This allows use of COND_S / COND_NS (see TranslateX86CC) which eliminates
46031     // the need for an extra compare against zero. e.g.
46032     // (a - b) > 0 : (a - b) ? 0 -> (a - b) >= 0 : (a - b) ? 0
46033     // subl   %esi, %edi
46034     // testl  %edi, %edi
46035     // movl   $0, %eax
46036     // cmovgl %edi, %eax
46037     // =>
46038     // xorl   %eax, %eax
46039     // subl   %esi, $edi
46040     // cmovsl %eax, %edi
46041     //
46042     // We can also canonicalize
46043     //  (x s> 1) ? x : 1 -> (x s>= 1) ? x : 1 -> (x s> 0) ? x : 1
46044     //  (x u> 1) ? x : 1 -> (x u>= 1) ? x : 1 -> (x != 0) ? x : 1
46045     // This allows the use of a test instruction for the compare.
46046     if (LHS == Cond0 && RHS == Cond1) {
46047       if ((CC == ISD::SETGT && (isNullConstant(RHS) || isOneConstant(RHS))) ||
46048           (CC == ISD::SETLT && isAllOnesConstant(RHS))) {
46049         ISD::CondCode NewCC = CC == ISD::SETGT ? ISD::SETGE : ISD::SETLE;
46050         Cond = DAG.getSetCC(SDLoc(Cond), CondVT, Cond0, Cond1, NewCC);
46051         return DAG.getSelect(DL, VT, Cond, LHS, RHS);
46052       }
46053       if (CC == ISD::SETUGT && isOneConstant(RHS)) {
46054         ISD::CondCode NewCC = ISD::SETUGE;
46055         Cond = DAG.getSetCC(SDLoc(Cond), CondVT, Cond0, Cond1, NewCC);
46056         return DAG.getSelect(DL, VT, Cond, LHS, RHS);
46057       }
46058     }
46059 
46060     // Similar to DAGCombine's select(or(CC0,CC1),X,Y) fold but for legal types.
46061     // fold eq + gt/lt nested selects into ge/le selects
46062     // select (cmpeq Cond0, Cond1), LHS, (select (cmpugt Cond0, Cond1), LHS, Y)
46063     // --> (select (cmpuge Cond0, Cond1), LHS, Y)
46064     // select (cmpslt Cond0, Cond1), LHS, (select (cmpeq Cond0, Cond1), LHS, Y)
46065     // --> (select (cmpsle Cond0, Cond1), LHS, Y)
46066     // .. etc ..
46067     if (RHS.getOpcode() == ISD::SELECT && RHS.getOperand(1) == LHS &&
46068         RHS.getOperand(0).getOpcode() == ISD::SETCC) {
46069       SDValue InnerSetCC = RHS.getOperand(0);
46070       ISD::CondCode InnerCC =
46071           cast<CondCodeSDNode>(InnerSetCC.getOperand(2))->get();
46072       if ((CC == ISD::SETEQ || InnerCC == ISD::SETEQ) &&
46073           Cond0 == InnerSetCC.getOperand(0) &&
46074           Cond1 == InnerSetCC.getOperand(1)) {
46075         ISD::CondCode NewCC;
46076         switch (CC == ISD::SETEQ ? InnerCC : CC) {
46077         case ISD::SETGT:  NewCC = ISD::SETGE; break;
46078         case ISD::SETLT:  NewCC = ISD::SETLE; break;
46079         case ISD::SETUGT: NewCC = ISD::SETUGE; break;
46080         case ISD::SETULT: NewCC = ISD::SETULE; break;
46081         default: NewCC = ISD::SETCC_INVALID; break;
46082         }
46083         if (NewCC != ISD::SETCC_INVALID) {
46084           Cond = DAG.getSetCC(DL, CondVT, Cond0, Cond1, NewCC);
46085           return DAG.getSelect(DL, VT, Cond, LHS, RHS.getOperand(2));
46086         }
46087       }
46088     }
46089   }
46090 
46091   // Check if the first operand is all zeros and Cond type is vXi1.
46092   // If this an avx512 target we can improve the use of zero masking by
46093   // swapping the operands and inverting the condition.
46094   if (N->getOpcode() == ISD::VSELECT && Cond.hasOneUse() &&
46095       Subtarget.hasAVX512() && CondVT.getVectorElementType() == MVT::i1 &&
46096       ISD::isBuildVectorAllZeros(LHS.getNode()) &&
46097       !ISD::isBuildVectorAllZeros(RHS.getNode())) {
46098     // Invert the cond to not(cond) : xor(op,allones)=not(op)
46099     SDValue CondNew = DAG.getNOT(DL, Cond, CondVT);
46100     // Vselect cond, op1, op2 = Vselect not(cond), op2, op1
46101     return DAG.getSelect(DL, VT, CondNew, RHS, LHS);
46102   }
46103 
46104   // Attempt to convert a (vXi1 bitcast(iX Cond)) selection mask before it might
46105   // get split by legalization.
46106   if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::BITCAST &&
46107       CondVT.getVectorElementType() == MVT::i1 && Cond.hasOneUse() &&
46108       TLI.isTypeLegal(VT.getScalarType())) {
46109     EVT ExtCondVT = VT.changeVectorElementTypeToInteger();
46110     if (SDValue ExtCond = combineToExtendBoolVectorInReg(
46111             ISD::SIGN_EXTEND, DL, ExtCondVT, Cond, DAG, DCI, Subtarget)) {
46112       ExtCond = DAG.getNode(ISD::TRUNCATE, DL, CondVT, ExtCond);
46113       return DAG.getSelect(DL, VT, ExtCond, LHS, RHS);
46114     }
46115   }
46116 
46117   // Early exit check
46118   if (!TLI.isTypeLegal(VT) || isSoftFP16(VT, Subtarget))
46119     return SDValue();
46120 
46121   if (SDValue V = combineVSelectWithAllOnesOrZeros(N, DAG, DCI, Subtarget))
46122     return V;
46123 
46124   if (SDValue V = combineVSelectToBLENDV(N, DAG, DCI, Subtarget))
46125     return V;
46126 
46127   if (SDValue V = narrowVectorSelect(N, DAG, Subtarget))
46128     return V;
46129 
46130   // select(~Cond, X, Y) -> select(Cond, Y, X)
46131   if (CondVT.getScalarType() != MVT::i1) {
46132     if (SDValue CondNot = IsNOT(Cond, DAG))
46133       return DAG.getNode(N->getOpcode(), DL, VT,
46134                          DAG.getBitcast(CondVT, CondNot), RHS, LHS);
46135 
46136     if (Cond.getOpcode() == X86ISD::PCMPGT && Cond.hasOneUse()) {
46137       // pcmpgt(X, -1) -> pcmpgt(0, X) to help select/blendv just use the
46138       // signbit.
46139       if (ISD::isBuildVectorAllOnes(Cond.getOperand(1).getNode())) {
46140         Cond = DAG.getNode(X86ISD::PCMPGT, DL, CondVT,
46141                            DAG.getConstant(0, DL, CondVT), Cond.getOperand(0));
46142         return DAG.getNode(N->getOpcode(), DL, VT, Cond, RHS, LHS);
46143       }
46144 
46145       // smin(LHS, RHS) : select(pcmpgt(RHS, LHS), LHS, RHS)
46146       //               -> select(pcmpgt(LHS, RHS), RHS, LHS)
46147       // iff the commuted pcmpgt() already exists.
46148       // TODO: Could DAGCombiner::combine cse search for SETCC nodes, like it
46149       // does for commutative binops?
46150       if (Cond.getOperand(0) == RHS && Cond.getOperand(1) == LHS) {
46151         if (SDNode *FlipCond =
46152                 DAG.getNodeIfExists(X86ISD::PCMPGT, DAG.getVTList(CondVT),
46153                                     {Cond.getOperand(1), Cond.getOperand(0)})) {
46154           return DAG.getNode(N->getOpcode(), DL, VT, SDValue(FlipCond, 0), RHS,
46155                              LHS);
46156         }
46157       }
46158     }
46159   }
46160 
46161   // Try to optimize vXi1 selects if both operands are either all constants or
46162   // bitcasts from scalar integer type. In that case we can convert the operands
46163   // to integer and use an integer select which will be converted to a CMOV.
46164   // We need to take a little bit of care to avoid creating an i64 type after
46165   // type legalization.
46166   if (N->getOpcode() == ISD::SELECT && VT.isVector() &&
46167       VT.getVectorElementType() == MVT::i1 &&
46168       (DCI.isBeforeLegalize() || (VT != MVT::v64i1 || Subtarget.is64Bit()))) {
46169     EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), VT.getVectorNumElements());
46170     bool LHSIsConst = ISD::isBuildVectorOfConstantSDNodes(LHS.getNode());
46171     bool RHSIsConst = ISD::isBuildVectorOfConstantSDNodes(RHS.getNode());
46172 
46173     if ((LHSIsConst ||
46174          (LHS.getOpcode() == ISD::BITCAST &&
46175           LHS.getOperand(0).getValueType() == IntVT)) &&
46176         (RHSIsConst ||
46177          (RHS.getOpcode() == ISD::BITCAST &&
46178           RHS.getOperand(0).getValueType() == IntVT))) {
46179       if (LHSIsConst)
46180         LHS = combinevXi1ConstantToInteger(LHS, DAG);
46181       else
46182         LHS = LHS.getOperand(0);
46183 
46184       if (RHSIsConst)
46185         RHS = combinevXi1ConstantToInteger(RHS, DAG);
46186       else
46187         RHS = RHS.getOperand(0);
46188 
46189       SDValue Select = DAG.getSelect(DL, IntVT, Cond, LHS, RHS);
46190       return DAG.getBitcast(VT, Select);
46191     }
46192   }
46193 
46194   // If this is "((X & C) == 0) ? Y : Z" and C is a constant mask vector of
46195   // single bits, then invert the predicate and swap the select operands.
46196   // This can lower using a vector shift bit-hack rather than mask and compare.
46197   if (DCI.isBeforeLegalize() && !Subtarget.hasAVX512() &&
46198       N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC &&
46199       Cond.hasOneUse() && CondVT.getVectorElementType() == MVT::i1 &&
46200       Cond.getOperand(0).getOpcode() == ISD::AND &&
46201       isNullOrNullSplat(Cond.getOperand(1)) &&
46202       cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETEQ &&
46203       Cond.getOperand(0).getValueType() == VT) {
46204     // The 'and' mask must be composed of power-of-2 constants.
46205     SDValue And = Cond.getOperand(0);
46206     auto *C = isConstOrConstSplat(And.getOperand(1));
46207     if (C && C->getAPIntValue().isPowerOf2()) {
46208       // vselect (X & C == 0), LHS, RHS --> vselect (X & C != 0), RHS, LHS
46209       SDValue NotCond =
46210           DAG.getSetCC(DL, CondVT, And, Cond.getOperand(1), ISD::SETNE);
46211       return DAG.getSelect(DL, VT, NotCond, RHS, LHS);
46212     }
46213 
46214     // If we have a non-splat but still powers-of-2 mask, AVX1 can use pmulld
46215     // and AVX2 can use vpsllv{dq}. 8-bit lacks a proper shift or multiply.
46216     // 16-bit lacks a proper blendv.
46217     unsigned EltBitWidth = VT.getScalarSizeInBits();
46218     bool CanShiftBlend =
46219         TLI.isTypeLegal(VT) && ((Subtarget.hasAVX() && EltBitWidth == 32) ||
46220                                 (Subtarget.hasAVX2() && EltBitWidth == 64) ||
46221                                 (Subtarget.hasXOP()));
46222     if (CanShiftBlend &&
46223         ISD::matchUnaryPredicate(And.getOperand(1), [](ConstantSDNode *C) {
46224           return C->getAPIntValue().isPowerOf2();
46225         })) {
46226       // Create a left-shift constant to get the mask bits over to the sign-bit.
46227       SDValue Mask = And.getOperand(1);
46228       SmallVector<int, 32> ShlVals;
46229       for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
46230         auto *MaskVal = cast<ConstantSDNode>(Mask.getOperand(i));
46231         ShlVals.push_back(EltBitWidth - 1 -
46232                           MaskVal->getAPIntValue().exactLogBase2());
46233       }
46234       // vsel ((X & C) == 0), LHS, RHS --> vsel ((shl X, C') < 0), RHS, LHS
46235       SDValue ShlAmt = getConstVector(ShlVals, VT.getSimpleVT(), DAG, DL);
46236       SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, And.getOperand(0), ShlAmt);
46237       SDValue NewCond =
46238           DAG.getSetCC(DL, CondVT, Shl, Cond.getOperand(1), ISD::SETLT);
46239       return DAG.getSelect(DL, VT, NewCond, RHS, LHS);
46240     }
46241   }
46242 
46243   return SDValue();
46244 }
46245 
46246 /// Combine:
46247 ///   (brcond/cmov/setcc .., (cmp (atomic_load_add x, 1), 0), COND_S)
46248 /// to:
46249 ///   (brcond/cmov/setcc .., (LADD x, 1), COND_LE)
46250 /// i.e., reusing the EFLAGS produced by the LOCKed instruction.
46251 /// Note that this is only legal for some op/cc combinations.
combineSetCCAtomicArith(SDValue Cmp,X86::CondCode & CC,SelectionDAG & DAG,const X86Subtarget & Subtarget)46252 static SDValue combineSetCCAtomicArith(SDValue Cmp, X86::CondCode &CC,
46253                                        SelectionDAG &DAG,
46254                                        const X86Subtarget &Subtarget) {
46255   // This combine only operates on CMP-like nodes.
46256   if (!(Cmp.getOpcode() == X86ISD::CMP ||
46257         (Cmp.getOpcode() == X86ISD::SUB && !Cmp->hasAnyUseOfValue(0))))
46258     return SDValue();
46259 
46260   // Can't replace the cmp if it has more uses than the one we're looking at.
46261   // FIXME: We would like to be able to handle this, but would need to make sure
46262   // all uses were updated.
46263   if (!Cmp.hasOneUse())
46264     return SDValue();
46265 
46266   // This only applies to variations of the common case:
46267   //   (icmp slt x, 0) -> (icmp sle (add x, 1), 0)
46268   //   (icmp sge x, 0) -> (icmp sgt (add x, 1), 0)
46269   //   (icmp sle x, 0) -> (icmp slt (sub x, 1), 0)
46270   //   (icmp sgt x, 0) -> (icmp sge (sub x, 1), 0)
46271   // Using the proper condcodes (see below), overflow is checked for.
46272 
46273   // FIXME: We can generalize both constraints:
46274   // - XOR/OR/AND (if they were made to survive AtomicExpand)
46275   // - LHS != 1
46276   // if the result is compared.
46277 
46278   SDValue CmpLHS = Cmp.getOperand(0);
46279   SDValue CmpRHS = Cmp.getOperand(1);
46280   EVT CmpVT = CmpLHS.getValueType();
46281 
46282   if (!CmpLHS.hasOneUse())
46283     return SDValue();
46284 
46285   unsigned Opc = CmpLHS.getOpcode();
46286   if (Opc != ISD::ATOMIC_LOAD_ADD && Opc != ISD::ATOMIC_LOAD_SUB)
46287     return SDValue();
46288 
46289   SDValue OpRHS = CmpLHS.getOperand(2);
46290   auto *OpRHSC = dyn_cast<ConstantSDNode>(OpRHS);
46291   if (!OpRHSC)
46292     return SDValue();
46293 
46294   APInt Addend = OpRHSC->getAPIntValue();
46295   if (Opc == ISD::ATOMIC_LOAD_SUB)
46296     Addend = -Addend;
46297 
46298   auto *CmpRHSC = dyn_cast<ConstantSDNode>(CmpRHS);
46299   if (!CmpRHSC)
46300     return SDValue();
46301 
46302   APInt Comparison = CmpRHSC->getAPIntValue();
46303   APInt NegAddend = -Addend;
46304 
46305   // See if we can adjust the CC to make the comparison match the negated
46306   // addend.
46307   if (Comparison != NegAddend) {
46308     APInt IncComparison = Comparison + 1;
46309     if (IncComparison == NegAddend) {
46310       if (CC == X86::COND_A && !Comparison.isMaxValue()) {
46311         Comparison = IncComparison;
46312         CC = X86::COND_AE;
46313       } else if (CC == X86::COND_LE && !Comparison.isMaxSignedValue()) {
46314         Comparison = IncComparison;
46315         CC = X86::COND_L;
46316       }
46317     }
46318     APInt DecComparison = Comparison - 1;
46319     if (DecComparison == NegAddend) {
46320       if (CC == X86::COND_AE && !Comparison.isMinValue()) {
46321         Comparison = DecComparison;
46322         CC = X86::COND_A;
46323       } else if (CC == X86::COND_L && !Comparison.isMinSignedValue()) {
46324         Comparison = DecComparison;
46325         CC = X86::COND_LE;
46326       }
46327     }
46328   }
46329 
46330   // If the addend is the negation of the comparison value, then we can do
46331   // a full comparison by emitting the atomic arithmetic as a locked sub.
46332   if (Comparison == NegAddend) {
46333     // The CC is fine, but we need to rewrite the LHS of the comparison as an
46334     // atomic sub.
46335     auto *AN = cast<AtomicSDNode>(CmpLHS.getNode());
46336     auto AtomicSub = DAG.getAtomic(
46337         ISD::ATOMIC_LOAD_SUB, SDLoc(CmpLHS), CmpVT,
46338         /*Chain*/ CmpLHS.getOperand(0), /*LHS*/ CmpLHS.getOperand(1),
46339         /*RHS*/ DAG.getConstant(NegAddend, SDLoc(CmpRHS), CmpVT),
46340         AN->getMemOperand());
46341     auto LockOp = lowerAtomicArithWithLOCK(AtomicSub, DAG, Subtarget);
46342     DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(0), DAG.getUNDEF(CmpVT));
46343     DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(1), LockOp.getValue(1));
46344     return LockOp;
46345   }
46346 
46347   // We can handle comparisons with zero in a number of cases by manipulating
46348   // the CC used.
46349   if (!Comparison.isZero())
46350     return SDValue();
46351 
46352   if (CC == X86::COND_S && Addend == 1)
46353     CC = X86::COND_LE;
46354   else if (CC == X86::COND_NS && Addend == 1)
46355     CC = X86::COND_G;
46356   else if (CC == X86::COND_G && Addend == -1)
46357     CC = X86::COND_GE;
46358   else if (CC == X86::COND_LE && Addend == -1)
46359     CC = X86::COND_L;
46360   else
46361     return SDValue();
46362 
46363   SDValue LockOp = lowerAtomicArithWithLOCK(CmpLHS, DAG, Subtarget);
46364   DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(0), DAG.getUNDEF(CmpVT));
46365   DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(1), LockOp.getValue(1));
46366   return LockOp;
46367 }
46368 
46369 // Check whether a boolean test is testing a boolean value generated by
46370 // X86ISD::SETCC. If so, return the operand of that SETCC and proper condition
46371 // code.
46372 //
46373 // Simplify the following patterns:
46374 // (Op (CMP (SETCC Cond EFLAGS) 1) EQ) or
46375 // (Op (CMP (SETCC Cond EFLAGS) 0) NEQ)
46376 // to (Op EFLAGS Cond)
46377 //
46378 // (Op (CMP (SETCC Cond EFLAGS) 0) EQ) or
46379 // (Op (CMP (SETCC Cond EFLAGS) 1) NEQ)
46380 // to (Op EFLAGS !Cond)
46381 //
46382 // where Op could be BRCOND or CMOV.
46383 //
checkBoolTestSetCCCombine(SDValue Cmp,X86::CondCode & CC)46384 static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) {
46385   // This combine only operates on CMP-like nodes.
46386   if (!(Cmp.getOpcode() == X86ISD::CMP ||
46387         (Cmp.getOpcode() == X86ISD::SUB && !Cmp->hasAnyUseOfValue(0))))
46388     return SDValue();
46389 
46390   // Quit if not used as a boolean value.
46391   if (CC != X86::COND_E && CC != X86::COND_NE)
46392     return SDValue();
46393 
46394   // Check CMP operands. One of them should be 0 or 1 and the other should be
46395   // an SetCC or extended from it.
46396   SDValue Op1 = Cmp.getOperand(0);
46397   SDValue Op2 = Cmp.getOperand(1);
46398 
46399   SDValue SetCC;
46400   const ConstantSDNode* C = nullptr;
46401   bool needOppositeCond = (CC == X86::COND_E);
46402   bool checkAgainstTrue = false; // Is it a comparison against 1?
46403 
46404   if ((C = dyn_cast<ConstantSDNode>(Op1)))
46405     SetCC = Op2;
46406   else if ((C = dyn_cast<ConstantSDNode>(Op2)))
46407     SetCC = Op1;
46408   else // Quit if all operands are not constants.
46409     return SDValue();
46410 
46411   if (C->getZExtValue() == 1) {
46412     needOppositeCond = !needOppositeCond;
46413     checkAgainstTrue = true;
46414   } else if (C->getZExtValue() != 0)
46415     // Quit if the constant is neither 0 or 1.
46416     return SDValue();
46417 
46418   bool truncatedToBoolWithAnd = false;
46419   // Skip (zext $x), (trunc $x), or (and $x, 1) node.
46420   while (SetCC.getOpcode() == ISD::ZERO_EXTEND ||
46421          SetCC.getOpcode() == ISD::TRUNCATE ||
46422          SetCC.getOpcode() == ISD::AND) {
46423     if (SetCC.getOpcode() == ISD::AND) {
46424       int OpIdx = -1;
46425       if (isOneConstant(SetCC.getOperand(0)))
46426         OpIdx = 1;
46427       if (isOneConstant(SetCC.getOperand(1)))
46428         OpIdx = 0;
46429       if (OpIdx < 0)
46430         break;
46431       SetCC = SetCC.getOperand(OpIdx);
46432       truncatedToBoolWithAnd = true;
46433     } else
46434       SetCC = SetCC.getOperand(0);
46435   }
46436 
46437   switch (SetCC.getOpcode()) {
46438   case X86ISD::SETCC_CARRY:
46439     // Since SETCC_CARRY gives output based on R = CF ? ~0 : 0, it's unsafe to
46440     // simplify it if the result of SETCC_CARRY is not canonicalized to 0 or 1,
46441     // i.e. it's a comparison against true but the result of SETCC_CARRY is not
46442     // truncated to i1 using 'and'.
46443     if (checkAgainstTrue && !truncatedToBoolWithAnd)
46444       break;
46445     assert(X86::CondCode(SetCC.getConstantOperandVal(0)) == X86::COND_B &&
46446            "Invalid use of SETCC_CARRY!");
46447     [[fallthrough]];
46448   case X86ISD::SETCC:
46449     // Set the condition code or opposite one if necessary.
46450     CC = X86::CondCode(SetCC.getConstantOperandVal(0));
46451     if (needOppositeCond)
46452       CC = X86::GetOppositeBranchCondition(CC);
46453     return SetCC.getOperand(1);
46454   case X86ISD::CMOV: {
46455     // Check whether false/true value has canonical one, i.e. 0 or 1.
46456     ConstantSDNode *FVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(0));
46457     ConstantSDNode *TVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(1));
46458     // Quit if true value is not a constant.
46459     if (!TVal)
46460       return SDValue();
46461     // Quit if false value is not a constant.
46462     if (!FVal) {
46463       SDValue Op = SetCC.getOperand(0);
46464       // Skip 'zext' or 'trunc' node.
46465       if (Op.getOpcode() == ISD::ZERO_EXTEND ||
46466           Op.getOpcode() == ISD::TRUNCATE)
46467         Op = Op.getOperand(0);
46468       // A special case for rdrand/rdseed, where 0 is set if false cond is
46469       // found.
46470       if ((Op.getOpcode() != X86ISD::RDRAND &&
46471            Op.getOpcode() != X86ISD::RDSEED) || Op.getResNo() != 0)
46472         return SDValue();
46473     }
46474     // Quit if false value is not the constant 0 or 1.
46475     bool FValIsFalse = true;
46476     if (FVal && FVal->getZExtValue() != 0) {
46477       if (FVal->getZExtValue() != 1)
46478         return SDValue();
46479       // If FVal is 1, opposite cond is needed.
46480       needOppositeCond = !needOppositeCond;
46481       FValIsFalse = false;
46482     }
46483     // Quit if TVal is not the constant opposite of FVal.
46484     if (FValIsFalse && TVal->getZExtValue() != 1)
46485       return SDValue();
46486     if (!FValIsFalse && TVal->getZExtValue() != 0)
46487       return SDValue();
46488     CC = X86::CondCode(SetCC.getConstantOperandVal(2));
46489     if (needOppositeCond)
46490       CC = X86::GetOppositeBranchCondition(CC);
46491     return SetCC.getOperand(3);
46492   }
46493   }
46494 
46495   return SDValue();
46496 }
46497 
46498 /// Check whether Cond is an AND/OR of SETCCs off of the same EFLAGS.
46499 /// Match:
46500 ///   (X86or (X86setcc) (X86setcc))
46501 ///   (X86cmp (and (X86setcc) (X86setcc)), 0)
checkBoolTestAndOrSetCCCombine(SDValue Cond,X86::CondCode & CC0,X86::CondCode & CC1,SDValue & Flags,bool & isAnd)46502 static bool checkBoolTestAndOrSetCCCombine(SDValue Cond, X86::CondCode &CC0,
46503                                            X86::CondCode &CC1, SDValue &Flags,
46504                                            bool &isAnd) {
46505   if (Cond->getOpcode() == X86ISD::CMP) {
46506     if (!isNullConstant(Cond->getOperand(1)))
46507       return false;
46508 
46509     Cond = Cond->getOperand(0);
46510   }
46511 
46512   isAnd = false;
46513 
46514   SDValue SetCC0, SetCC1;
46515   switch (Cond->getOpcode()) {
46516   default: return false;
46517   case ISD::AND:
46518   case X86ISD::AND:
46519     isAnd = true;
46520     [[fallthrough]];
46521   case ISD::OR:
46522   case X86ISD::OR:
46523     SetCC0 = Cond->getOperand(0);
46524     SetCC1 = Cond->getOperand(1);
46525     break;
46526   };
46527 
46528   // Make sure we have SETCC nodes, using the same flags value.
46529   if (SetCC0.getOpcode() != X86ISD::SETCC ||
46530       SetCC1.getOpcode() != X86ISD::SETCC ||
46531       SetCC0->getOperand(1) != SetCC1->getOperand(1))
46532     return false;
46533 
46534   CC0 = (X86::CondCode)SetCC0->getConstantOperandVal(0);
46535   CC1 = (X86::CondCode)SetCC1->getConstantOperandVal(0);
46536   Flags = SetCC0->getOperand(1);
46537   return true;
46538 }
46539 
46540 // When legalizing carry, we create carries via add X, -1
46541 // If that comes from an actual carry, via setcc, we use the
46542 // carry directly.
combineCarryThroughADD(SDValue EFLAGS,SelectionDAG & DAG)46543 static SDValue combineCarryThroughADD(SDValue EFLAGS, SelectionDAG &DAG) {
46544   if (EFLAGS.getOpcode() == X86ISD::ADD) {
46545     if (isAllOnesConstant(EFLAGS.getOperand(1))) {
46546       bool FoundAndLSB = false;
46547       SDValue Carry = EFLAGS.getOperand(0);
46548       while (Carry.getOpcode() == ISD::TRUNCATE ||
46549              Carry.getOpcode() == ISD::ZERO_EXTEND ||
46550              (Carry.getOpcode() == ISD::AND &&
46551               isOneConstant(Carry.getOperand(1)))) {
46552         FoundAndLSB |= Carry.getOpcode() == ISD::AND;
46553         Carry = Carry.getOperand(0);
46554       }
46555       if (Carry.getOpcode() == X86ISD::SETCC ||
46556           Carry.getOpcode() == X86ISD::SETCC_CARRY) {
46557         // TODO: Merge this code with equivalent in combineAddOrSubToADCOrSBB?
46558         uint64_t CarryCC = Carry.getConstantOperandVal(0);
46559         SDValue CarryOp1 = Carry.getOperand(1);
46560         if (CarryCC == X86::COND_B)
46561           return CarryOp1;
46562         if (CarryCC == X86::COND_A) {
46563           // Try to convert COND_A into COND_B in an attempt to facilitate
46564           // materializing "setb reg".
46565           //
46566           // Do not flip "e > c", where "c" is a constant, because Cmp
46567           // instruction cannot take an immediate as its first operand.
46568           //
46569           if (CarryOp1.getOpcode() == X86ISD::SUB &&
46570               CarryOp1.getNode()->hasOneUse() &&
46571               CarryOp1.getValueType().isInteger() &&
46572               !isa<ConstantSDNode>(CarryOp1.getOperand(1))) {
46573             SDValue SubCommute =
46574                 DAG.getNode(X86ISD::SUB, SDLoc(CarryOp1), CarryOp1->getVTList(),
46575                             CarryOp1.getOperand(1), CarryOp1.getOperand(0));
46576             return SDValue(SubCommute.getNode(), CarryOp1.getResNo());
46577           }
46578         }
46579         // If this is a check of the z flag of an add with 1, switch to the
46580         // C flag.
46581         if (CarryCC == X86::COND_E &&
46582             CarryOp1.getOpcode() == X86ISD::ADD &&
46583             isOneConstant(CarryOp1.getOperand(1)))
46584           return CarryOp1;
46585       } else if (FoundAndLSB) {
46586         SDLoc DL(Carry);
46587         SDValue BitNo = DAG.getConstant(0, DL, Carry.getValueType());
46588         if (Carry.getOpcode() == ISD::SRL) {
46589           BitNo = Carry.getOperand(1);
46590           Carry = Carry.getOperand(0);
46591         }
46592         return getBT(Carry, BitNo, DL, DAG);
46593       }
46594     }
46595   }
46596 
46597   return SDValue();
46598 }
46599 
46600 /// If we are inverting an PTEST/TESTP operand, attempt to adjust the CC
46601 /// to avoid the inversion.
combinePTESTCC(SDValue EFLAGS,X86::CondCode & CC,SelectionDAG & DAG,const X86Subtarget & Subtarget)46602 static SDValue combinePTESTCC(SDValue EFLAGS, X86::CondCode &CC,
46603                               SelectionDAG &DAG,
46604                               const X86Subtarget &Subtarget) {
46605   // TODO: Handle X86ISD::KTEST/X86ISD::KORTEST.
46606   if (EFLAGS.getOpcode() != X86ISD::PTEST &&
46607       EFLAGS.getOpcode() != X86ISD::TESTP)
46608     return SDValue();
46609 
46610   // PTEST/TESTP sets EFLAGS as:
46611   // TESTZ: ZF = (Op0 & Op1) == 0
46612   // TESTC: CF = (~Op0 & Op1) == 0
46613   // TESTNZC: ZF == 0 && CF == 0
46614   EVT VT = EFLAGS.getValueType();
46615   SDValue Op0 = EFLAGS.getOperand(0);
46616   SDValue Op1 = EFLAGS.getOperand(1);
46617   EVT OpVT = Op0.getValueType();
46618 
46619   // TEST*(~X,Y) == TEST*(X,Y)
46620   if (SDValue NotOp0 = IsNOT(Op0, DAG)) {
46621     X86::CondCode InvCC;
46622     switch (CC) {
46623     case X86::COND_B:
46624       // testc -> testz.
46625       InvCC = X86::COND_E;
46626       break;
46627     case X86::COND_AE:
46628       // !testc -> !testz.
46629       InvCC = X86::COND_NE;
46630       break;
46631     case X86::COND_E:
46632       // testz -> testc.
46633       InvCC = X86::COND_B;
46634       break;
46635     case X86::COND_NE:
46636       // !testz -> !testc.
46637       InvCC = X86::COND_AE;
46638       break;
46639     case X86::COND_A:
46640     case X86::COND_BE:
46641       // testnzc -> testnzc (no change).
46642       InvCC = CC;
46643       break;
46644     default:
46645       InvCC = X86::COND_INVALID;
46646       break;
46647     }
46648 
46649     if (InvCC != X86::COND_INVALID) {
46650       CC = InvCC;
46651       return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT,
46652                          DAG.getBitcast(OpVT, NotOp0), Op1);
46653     }
46654   }
46655 
46656   if (CC == X86::COND_E || CC == X86::COND_NE) {
46657     // TESTZ(X,~Y) == TESTC(Y,X)
46658     if (SDValue NotOp1 = IsNOT(Op1, DAG)) {
46659       CC = (CC == X86::COND_E ? X86::COND_B : X86::COND_AE);
46660       return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT,
46661                          DAG.getBitcast(OpVT, NotOp1), Op0);
46662     }
46663 
46664     if (Op0 == Op1) {
46665       SDValue BC = peekThroughBitcasts(Op0);
46666       EVT BCVT = BC.getValueType();
46667       assert(BCVT.isVector() && DAG.getTargetLoweringInfo().isTypeLegal(BCVT) &&
46668              "Unexpected vector type");
46669 
46670       // TESTZ(AND(X,Y),AND(X,Y)) == TESTZ(X,Y)
46671       if (BC.getOpcode() == ISD::AND || BC.getOpcode() == X86ISD::FAND) {
46672         return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT,
46673                            DAG.getBitcast(OpVT, BC.getOperand(0)),
46674                            DAG.getBitcast(OpVT, BC.getOperand(1)));
46675       }
46676 
46677       // TESTZ(AND(~X,Y),AND(~X,Y)) == TESTC(X,Y)
46678       if (BC.getOpcode() == X86ISD::ANDNP || BC.getOpcode() == X86ISD::FANDN) {
46679         CC = (CC == X86::COND_E ? X86::COND_B : X86::COND_AE);
46680         return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT,
46681                            DAG.getBitcast(OpVT, BC.getOperand(0)),
46682                            DAG.getBitcast(OpVT, BC.getOperand(1)));
46683       }
46684 
46685       // If every element is an all-sign value, see if we can use MOVMSK to
46686       // more efficiently extract the sign bits and compare that.
46687       // TODO: Handle TESTC with comparison inversion.
46688       // TODO: Can we remove SimplifyMultipleUseDemandedBits and rely on
46689       // MOVMSK combines to make sure its never worse than PTEST?
46690       unsigned EltBits = BCVT.getScalarSizeInBits();
46691       if (DAG.ComputeNumSignBits(BC) == EltBits) {
46692         assert(VT == MVT::i32 && "Expected i32 EFLAGS comparison result");
46693         APInt SignMask = APInt::getSignMask(EltBits);
46694         const TargetLowering &TLI = DAG.getTargetLoweringInfo();
46695         if (SDValue Res =
46696                 TLI.SimplifyMultipleUseDemandedBits(BC, SignMask, DAG)) {
46697           // For vXi16 cases we need to use pmovmksb and extract every other
46698           // sign bit.
46699           SDLoc DL(EFLAGS);
46700           if (EltBits == 16) {
46701             MVT MovmskVT = BCVT.is128BitVector() ? MVT::v16i8 : MVT::v32i8;
46702             Res = DAG.getBitcast(MovmskVT, Res);
46703             Res = getPMOVMSKB(DL, Res, DAG, Subtarget);
46704             Res = DAG.getNode(ISD::AND, DL, MVT::i32, Res,
46705                               DAG.getConstant(0xAAAAAAAA, DL, MVT::i32));
46706           } else {
46707             Res = getPMOVMSKB(DL, Res, DAG, Subtarget);
46708           }
46709           return DAG.getNode(X86ISD::CMP, DL, MVT::i32, Res,
46710                              DAG.getConstant(0, DL, MVT::i32));
46711         }
46712       }
46713     }
46714 
46715     // TESTZ(-1,X) == TESTZ(X,X)
46716     if (ISD::isBuildVectorAllOnes(Op0.getNode()))
46717       return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT, Op1, Op1);
46718 
46719     // TESTZ(X,-1) == TESTZ(X,X)
46720     if (ISD::isBuildVectorAllOnes(Op1.getNode()))
46721       return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT, Op0, Op0);
46722 
46723     // TESTZ(OR(LO(X),HI(X)),OR(LO(Y),HI(Y))) -> TESTZ(X,Y)
46724     // TODO: Add COND_NE handling?
46725     if (CC == X86::COND_E && OpVT.is128BitVector() && Subtarget.hasAVX()) {
46726       SDValue Src0 = peekThroughBitcasts(Op0);
46727       SDValue Src1 = peekThroughBitcasts(Op1);
46728       if (Src0.getOpcode() == ISD::OR && Src1.getOpcode() == ISD::OR) {
46729         Src0 = getSplitVectorSrc(peekThroughBitcasts(Src0.getOperand(0)),
46730                                  peekThroughBitcasts(Src0.getOperand(1)), true);
46731         Src1 = getSplitVectorSrc(peekThroughBitcasts(Src1.getOperand(0)),
46732                                  peekThroughBitcasts(Src1.getOperand(1)), true);
46733         if (Src0 && Src1)
46734           return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT,
46735                              DAG.getBitcast(MVT::v4i64, Src0),
46736                              DAG.getBitcast(MVT::v4i64, Src1));
46737       }
46738     }
46739   }
46740 
46741   return SDValue();
46742 }
46743 
46744 // Attempt to simplify the MOVMSK input based on the comparison type.
combineSetCCMOVMSK(SDValue EFLAGS,X86::CondCode & CC,SelectionDAG & DAG,const X86Subtarget & Subtarget)46745 static SDValue combineSetCCMOVMSK(SDValue EFLAGS, X86::CondCode &CC,
46746                                   SelectionDAG &DAG,
46747                                   const X86Subtarget &Subtarget) {
46748   // Handle eq/ne against zero (any_of).
46749   // Handle eq/ne against -1 (all_of).
46750   if (!(CC == X86::COND_E || CC == X86::COND_NE))
46751     return SDValue();
46752   if (EFLAGS.getValueType() != MVT::i32)
46753     return SDValue();
46754   unsigned CmpOpcode = EFLAGS.getOpcode();
46755   if (CmpOpcode != X86ISD::CMP && CmpOpcode != X86ISD::SUB)
46756     return SDValue();
46757   auto *CmpConstant = dyn_cast<ConstantSDNode>(EFLAGS.getOperand(1));
46758   if (!CmpConstant)
46759     return SDValue();
46760   const APInt &CmpVal = CmpConstant->getAPIntValue();
46761 
46762   SDValue CmpOp = EFLAGS.getOperand(0);
46763   unsigned CmpBits = CmpOp.getValueSizeInBits();
46764   assert(CmpBits == CmpVal.getBitWidth() && "Value size mismatch");
46765 
46766   // Peek through any truncate.
46767   if (CmpOp.getOpcode() == ISD::TRUNCATE)
46768     CmpOp = CmpOp.getOperand(0);
46769 
46770   // Bail if we don't find a MOVMSK.
46771   if (CmpOp.getOpcode() != X86ISD::MOVMSK)
46772     return SDValue();
46773 
46774   SDValue Vec = CmpOp.getOperand(0);
46775   MVT VecVT = Vec.getSimpleValueType();
46776   assert((VecVT.is128BitVector() || VecVT.is256BitVector()) &&
46777          "Unexpected MOVMSK operand");
46778   unsigned NumElts = VecVT.getVectorNumElements();
46779   unsigned NumEltBits = VecVT.getScalarSizeInBits();
46780 
46781   bool IsAnyOf = CmpOpcode == X86ISD::CMP && CmpVal.isZero();
46782   bool IsAllOf = (CmpOpcode == X86ISD::SUB || CmpOpcode == X86ISD::CMP) &&
46783                  NumElts <= CmpBits && CmpVal.isMask(NumElts);
46784   if (!IsAnyOf && !IsAllOf)
46785     return SDValue();
46786 
46787   // TODO: Check more combining cases for me.
46788   // Here we check the cmp use number to decide do combining or not.
46789   // Currently we only get 2 tests about combining "MOVMSK(CONCAT(..))"
46790   // and "MOVMSK(PCMPEQ(..))" are fit to use this constraint.
46791   bool IsOneUse = CmpOp.getNode()->hasOneUse();
46792 
46793   // See if we can peek through to a vector with a wider element type, if the
46794   // signbits extend down to all the sub-elements as well.
46795   // Calling MOVMSK with the wider type, avoiding the bitcast, helps expose
46796   // potential SimplifyDemandedBits/Elts cases.
46797   // If we looked through a truncate that discard bits, we can't do this
46798   // transform.
46799   // FIXME: We could do this transform for truncates that discarded bits by
46800   // inserting an AND mask between the new MOVMSK and the CMP.
46801   if (Vec.getOpcode() == ISD::BITCAST && NumElts <= CmpBits) {
46802     SDValue BC = peekThroughBitcasts(Vec);
46803     MVT BCVT = BC.getSimpleValueType();
46804     unsigned BCNumElts = BCVT.getVectorNumElements();
46805     unsigned BCNumEltBits = BCVT.getScalarSizeInBits();
46806     if ((BCNumEltBits == 32 || BCNumEltBits == 64) &&
46807         BCNumEltBits > NumEltBits &&
46808         DAG.ComputeNumSignBits(BC) > (BCNumEltBits - NumEltBits)) {
46809       SDLoc DL(EFLAGS);
46810       APInt CmpMask = APInt::getLowBitsSet(32, IsAnyOf ? 0 : BCNumElts);
46811       return DAG.getNode(X86ISD::CMP, DL, MVT::i32,
46812                          DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, BC),
46813                          DAG.getConstant(CmpMask, DL, MVT::i32));
46814     }
46815   }
46816 
46817   // MOVMSK(CONCAT(X,Y)) == 0 ->  MOVMSK(OR(X,Y)).
46818   // MOVMSK(CONCAT(X,Y)) != 0 ->  MOVMSK(OR(X,Y)).
46819   // MOVMSK(CONCAT(X,Y)) == -1 ->  MOVMSK(AND(X,Y)).
46820   // MOVMSK(CONCAT(X,Y)) != -1 ->  MOVMSK(AND(X,Y)).
46821   if (VecVT.is256BitVector() && NumElts <= CmpBits && IsOneUse) {
46822     SmallVector<SDValue> Ops;
46823     if (collectConcatOps(peekThroughBitcasts(Vec).getNode(), Ops, DAG) &&
46824         Ops.size() == 2) {
46825       SDLoc DL(EFLAGS);
46826       EVT SubVT = Ops[0].getValueType().changeTypeToInteger();
46827       APInt CmpMask = APInt::getLowBitsSet(32, IsAnyOf ? 0 : NumElts / 2);
46828       SDValue V = DAG.getNode(IsAnyOf ? ISD::OR : ISD::AND, DL, SubVT,
46829                               DAG.getBitcast(SubVT, Ops[0]),
46830                               DAG.getBitcast(SubVT, Ops[1]));
46831       V = DAG.getBitcast(VecVT.getHalfNumVectorElementsVT(), V);
46832       return DAG.getNode(X86ISD::CMP, DL, MVT::i32,
46833                          DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V),
46834                          DAG.getConstant(CmpMask, DL, MVT::i32));
46835     }
46836   }
46837 
46838   // MOVMSK(PCMPEQ(X,0)) == -1 -> PTESTZ(X,X).
46839   // MOVMSK(PCMPEQ(X,0)) != -1 -> !PTESTZ(X,X).
46840   // MOVMSK(PCMPEQ(X,Y)) == -1 -> PTESTZ(SUB(X,Y),SUB(X,Y)).
46841   // MOVMSK(PCMPEQ(X,Y)) != -1 -> !PTESTZ(SUB(X,Y),SUB(X,Y)).
46842   if (IsAllOf && Subtarget.hasSSE41() && IsOneUse) {
46843     MVT TestVT = VecVT.is128BitVector() ? MVT::v2i64 : MVT::v4i64;
46844     SDValue BC = peekThroughBitcasts(Vec);
46845     // Ensure MOVMSK was testing every signbit of BC.
46846     if (BC.getValueType().getVectorNumElements() <= NumElts) {
46847       if (BC.getOpcode() == X86ISD::PCMPEQ) {
46848         SDValue V = DAG.getNode(ISD::SUB, SDLoc(BC), BC.getValueType(),
46849                                 BC.getOperand(0), BC.getOperand(1));
46850         V = DAG.getBitcast(TestVT, V);
46851         return DAG.getNode(X86ISD::PTEST, SDLoc(EFLAGS), MVT::i32, V, V);
46852       }
46853       // Check for 256-bit split vector cases.
46854       if (BC.getOpcode() == ISD::AND &&
46855           BC.getOperand(0).getOpcode() == X86ISD::PCMPEQ &&
46856           BC.getOperand(1).getOpcode() == X86ISD::PCMPEQ) {
46857         SDValue LHS = BC.getOperand(0);
46858         SDValue RHS = BC.getOperand(1);
46859         LHS = DAG.getNode(ISD::SUB, SDLoc(LHS), LHS.getValueType(),
46860                           LHS.getOperand(0), LHS.getOperand(1));
46861         RHS = DAG.getNode(ISD::SUB, SDLoc(RHS), RHS.getValueType(),
46862                           RHS.getOperand(0), RHS.getOperand(1));
46863         LHS = DAG.getBitcast(TestVT, LHS);
46864         RHS = DAG.getBitcast(TestVT, RHS);
46865         SDValue V = DAG.getNode(ISD::OR, SDLoc(EFLAGS), TestVT, LHS, RHS);
46866         return DAG.getNode(X86ISD::PTEST, SDLoc(EFLAGS), MVT::i32, V, V);
46867       }
46868     }
46869   }
46870 
46871   // See if we can avoid a PACKSS by calling MOVMSK on the sources.
46872   // For vXi16 cases we can use a v2Xi8 PMOVMSKB. We must mask out
46873   // sign bits prior to the comparison with zero unless we know that
46874   // the vXi16 splats the sign bit down to the lower i8 half.
46875   // TODO: Handle all_of patterns.
46876   if (Vec.getOpcode() == X86ISD::PACKSS && VecVT == MVT::v16i8) {
46877     SDValue VecOp0 = Vec.getOperand(0);
46878     SDValue VecOp1 = Vec.getOperand(1);
46879     bool SignExt0 = DAG.ComputeNumSignBits(VecOp0) > 8;
46880     bool SignExt1 = DAG.ComputeNumSignBits(VecOp1) > 8;
46881     // PMOVMSKB(PACKSSBW(X, undef)) -> PMOVMSKB(BITCAST_v16i8(X)) & 0xAAAA.
46882     if (IsAnyOf && CmpBits == 8 && VecOp1.isUndef()) {
46883       SDLoc DL(EFLAGS);
46884       SDValue Result = DAG.getBitcast(MVT::v16i8, VecOp0);
46885       Result = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Result);
46886       Result = DAG.getZExtOrTrunc(Result, DL, MVT::i16);
46887       if (!SignExt0) {
46888         Result = DAG.getNode(ISD::AND, DL, MVT::i16, Result,
46889                              DAG.getConstant(0xAAAA, DL, MVT::i16));
46890       }
46891       return DAG.getNode(X86ISD::CMP, DL, MVT::i32, Result,
46892                          DAG.getConstant(0, DL, MVT::i16));
46893     }
46894     // PMOVMSKB(PACKSSBW(LO(X), HI(X)))
46895     // -> PMOVMSKB(BITCAST_v32i8(X)) & 0xAAAAAAAA.
46896     if (CmpBits >= 16 && Subtarget.hasInt256() &&
46897         (IsAnyOf || (SignExt0 && SignExt1))) {
46898       if (SDValue Src = getSplitVectorSrc(VecOp0, VecOp1, true)) {
46899         SDLoc DL(EFLAGS);
46900         SDValue Result = peekThroughBitcasts(Src);
46901         if (IsAllOf && Result.getOpcode() == X86ISD::PCMPEQ &&
46902             Result.getValueType().getVectorNumElements() <= NumElts) {
46903           SDValue V = DAG.getNode(ISD::SUB, DL, Result.getValueType(),
46904                                   Result.getOperand(0), Result.getOperand(1));
46905           V = DAG.getBitcast(MVT::v4i64, V);
46906           return DAG.getNode(X86ISD::PTEST, SDLoc(EFLAGS), MVT::i32, V, V);
46907         }
46908         Result = DAG.getBitcast(MVT::v32i8, Result);
46909         Result = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Result);
46910         unsigned CmpMask = IsAnyOf ? 0 : 0xFFFFFFFF;
46911         if (!SignExt0 || !SignExt1) {
46912           assert(IsAnyOf &&
46913                  "Only perform v16i16 signmasks for any_of patterns");
46914           Result = DAG.getNode(ISD::AND, DL, MVT::i32, Result,
46915                                DAG.getConstant(0xAAAAAAAA, DL, MVT::i32));
46916         }
46917         return DAG.getNode(X86ISD::CMP, DL, MVT::i32, Result,
46918                            DAG.getConstant(CmpMask, DL, MVT::i32));
46919       }
46920     }
46921   }
46922 
46923   // MOVMSK(SHUFFLE(X,u)) -> MOVMSK(X) iff every element is referenced.
46924   SmallVector<int, 32> ShuffleMask;
46925   SmallVector<SDValue, 2> ShuffleInputs;
46926   if (NumElts <= CmpBits &&
46927       getTargetShuffleInputs(peekThroughBitcasts(Vec), ShuffleInputs,
46928                              ShuffleMask, DAG) &&
46929       ShuffleInputs.size() == 1 && !isAnyZeroOrUndef(ShuffleMask) &&
46930       ShuffleInputs[0].getValueSizeInBits() == VecVT.getSizeInBits()) {
46931     unsigned NumShuffleElts = ShuffleMask.size();
46932     APInt DemandedElts = APInt::getZero(NumShuffleElts);
46933     for (int M : ShuffleMask) {
46934       assert(0 <= M && M < (int)NumShuffleElts && "Bad unary shuffle index");
46935       DemandedElts.setBit(M);
46936     }
46937     if (DemandedElts.isAllOnes()) {
46938       SDLoc DL(EFLAGS);
46939       SDValue Result = DAG.getBitcast(VecVT, ShuffleInputs[0]);
46940       Result = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Result);
46941       Result =
46942           DAG.getZExtOrTrunc(Result, DL, EFLAGS.getOperand(0).getValueType());
46943       return DAG.getNode(X86ISD::CMP, DL, MVT::i32, Result,
46944                          EFLAGS.getOperand(1));
46945     }
46946   }
46947 
46948   return SDValue();
46949 }
46950 
46951 /// Optimize an EFLAGS definition used according to the condition code \p CC
46952 /// into a simpler EFLAGS value, potentially returning a new \p CC and replacing
46953 /// uses of chain values.
combineSetCCEFLAGS(SDValue EFLAGS,X86::CondCode & CC,SelectionDAG & DAG,const X86Subtarget & Subtarget)46954 static SDValue combineSetCCEFLAGS(SDValue EFLAGS, X86::CondCode &CC,
46955                                   SelectionDAG &DAG,
46956                                   const X86Subtarget &Subtarget) {
46957   if (CC == X86::COND_B)
46958     if (SDValue Flags = combineCarryThroughADD(EFLAGS, DAG))
46959       return Flags;
46960 
46961   if (SDValue R = checkBoolTestSetCCCombine(EFLAGS, CC))
46962     return R;
46963 
46964   if (SDValue R = combinePTESTCC(EFLAGS, CC, DAG, Subtarget))
46965     return R;
46966 
46967   if (SDValue R = combineSetCCMOVMSK(EFLAGS, CC, DAG, Subtarget))
46968     return R;
46969 
46970   return combineSetCCAtomicArith(EFLAGS, CC, DAG, Subtarget);
46971 }
46972 
46973 /// Optimize X86ISD::CMOV [LHS, RHS, CONDCODE (e.g. X86::COND_NE), CONDVAL]
combineCMov(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)46974 static SDValue combineCMov(SDNode *N, SelectionDAG &DAG,
46975                            TargetLowering::DAGCombinerInfo &DCI,
46976                            const X86Subtarget &Subtarget) {
46977   SDLoc DL(N);
46978 
46979   SDValue FalseOp = N->getOperand(0);
46980   SDValue TrueOp = N->getOperand(1);
46981   X86::CondCode CC = (X86::CondCode)N->getConstantOperandVal(2);
46982   SDValue Cond = N->getOperand(3);
46983 
46984   // cmov X, X, ?, ? --> X
46985   if (TrueOp == FalseOp)
46986     return TrueOp;
46987 
46988   // Try to simplify the EFLAGS and condition code operands.
46989   // We can't always do this as FCMOV only supports a subset of X86 cond.
46990   if (SDValue Flags = combineSetCCEFLAGS(Cond, CC, DAG, Subtarget)) {
46991     if (!(FalseOp.getValueType() == MVT::f80 ||
46992           (FalseOp.getValueType() == MVT::f64 && !Subtarget.hasSSE2()) ||
46993           (FalseOp.getValueType() == MVT::f32 && !Subtarget.hasSSE1())) ||
46994         !Subtarget.canUseCMOV() || hasFPCMov(CC)) {
46995       SDValue Ops[] = {FalseOp, TrueOp, DAG.getTargetConstant(CC, DL, MVT::i8),
46996                        Flags};
46997       return DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), Ops);
46998     }
46999   }
47000 
47001   // If this is a select between two integer constants, try to do some
47002   // optimizations.  Note that the operands are ordered the opposite of SELECT
47003   // operands.
47004   if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(TrueOp)) {
47005     if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(FalseOp)) {
47006       // Canonicalize the TrueC/FalseC values so that TrueC (the true value) is
47007       // larger than FalseC (the false value).
47008       if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue())) {
47009         CC = X86::GetOppositeBranchCondition(CC);
47010         std::swap(TrueC, FalseC);
47011         std::swap(TrueOp, FalseOp);
47012       }
47013 
47014       // Optimize C ? 8 : 0 -> zext(setcc(C)) << 3.  Likewise for any pow2/0.
47015       // This is efficient for any integer data type (including i8/i16) and
47016       // shift amount.
47017       if (FalseC->getAPIntValue() == 0 && TrueC->getAPIntValue().isPowerOf2()) {
47018         Cond = getSETCC(CC, Cond, DL, DAG);
47019 
47020         // Zero extend the condition if needed.
47021         Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, TrueC->getValueType(0), Cond);
47022 
47023         unsigned ShAmt = TrueC->getAPIntValue().logBase2();
47024         Cond = DAG.getNode(ISD::SHL, DL, Cond.getValueType(), Cond,
47025                            DAG.getConstant(ShAmt, DL, MVT::i8));
47026         return Cond;
47027       }
47028 
47029       // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst.  This is efficient
47030       // for any integer data type, including i8/i16.
47031       if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
47032         Cond = getSETCC(CC, Cond, DL, DAG);
47033 
47034         // Zero extend the condition if needed.
47035         Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
47036                            FalseC->getValueType(0), Cond);
47037         Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
47038                            SDValue(FalseC, 0));
47039         return Cond;
47040       }
47041 
47042       // Optimize cases that will turn into an LEA instruction.  This requires
47043       // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
47044       if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
47045         APInt Diff = TrueC->getAPIntValue() - FalseC->getAPIntValue();
47046         assert(Diff.getBitWidth() == N->getValueType(0).getSizeInBits() &&
47047                "Implicit constant truncation");
47048 
47049         bool isFastMultiplier = false;
47050         if (Diff.ult(10)) {
47051           switch (Diff.getZExtValue()) {
47052           default: break;
47053           case 1:  // result = add base, cond
47054           case 2:  // result = lea base(    , cond*2)
47055           case 3:  // result = lea base(cond, cond*2)
47056           case 4:  // result = lea base(    , cond*4)
47057           case 5:  // result = lea base(cond, cond*4)
47058           case 8:  // result = lea base(    , cond*8)
47059           case 9:  // result = lea base(cond, cond*8)
47060             isFastMultiplier = true;
47061             break;
47062           }
47063         }
47064 
47065         if (isFastMultiplier) {
47066           Cond = getSETCC(CC, Cond, DL ,DAG);
47067           // Zero extend the condition if needed.
47068           Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
47069                              Cond);
47070           // Scale the condition by the difference.
47071           if (Diff != 1)
47072             Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
47073                                DAG.getConstant(Diff, DL, Cond.getValueType()));
47074 
47075           // Add the base if non-zero.
47076           if (FalseC->getAPIntValue() != 0)
47077             Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
47078                                SDValue(FalseC, 0));
47079           return Cond;
47080         }
47081       }
47082     }
47083   }
47084 
47085   // Handle these cases:
47086   //   (select (x != c), e, c) -> select (x != c), e, x),
47087   //   (select (x == c), c, e) -> select (x == c), x, e)
47088   // where the c is an integer constant, and the "select" is the combination
47089   // of CMOV and CMP.
47090   //
47091   // The rationale for this change is that the conditional-move from a constant
47092   // needs two instructions, however, conditional-move from a register needs
47093   // only one instruction.
47094   //
47095   // CAVEAT: By replacing a constant with a symbolic value, it may obscure
47096   //  some instruction-combining opportunities. This opt needs to be
47097   //  postponed as late as possible.
47098   //
47099   if (!DCI.isBeforeLegalize() && !DCI.isBeforeLegalizeOps()) {
47100     // the DCI.xxxx conditions are provided to postpone the optimization as
47101     // late as possible.
47102 
47103     ConstantSDNode *CmpAgainst = nullptr;
47104     if ((Cond.getOpcode() == X86ISD::CMP || Cond.getOpcode() == X86ISD::SUB) &&
47105         (CmpAgainst = dyn_cast<ConstantSDNode>(Cond.getOperand(1))) &&
47106         !isa<ConstantSDNode>(Cond.getOperand(0))) {
47107 
47108       if (CC == X86::COND_NE &&
47109           CmpAgainst == dyn_cast<ConstantSDNode>(FalseOp)) {
47110         CC = X86::GetOppositeBranchCondition(CC);
47111         std::swap(TrueOp, FalseOp);
47112       }
47113 
47114       if (CC == X86::COND_E &&
47115           CmpAgainst == dyn_cast<ConstantSDNode>(TrueOp)) {
47116         SDValue Ops[] = {FalseOp, Cond.getOperand(0),
47117                          DAG.getTargetConstant(CC, DL, MVT::i8), Cond};
47118         return DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), Ops);
47119       }
47120     }
47121   }
47122 
47123   // Fold and/or of setcc's to double CMOV:
47124   //   (CMOV F, T, ((cc1 | cc2) != 0)) -> (CMOV (CMOV F, T, cc1), T, cc2)
47125   //   (CMOV F, T, ((cc1 & cc2) != 0)) -> (CMOV (CMOV T, F, !cc1), F, !cc2)
47126   //
47127   // This combine lets us generate:
47128   //   cmovcc1 (jcc1 if we don't have CMOV)
47129   //   cmovcc2 (same)
47130   // instead of:
47131   //   setcc1
47132   //   setcc2
47133   //   and/or
47134   //   cmovne (jne if we don't have CMOV)
47135   // When we can't use the CMOV instruction, it might increase branch
47136   // mispredicts.
47137   // When we can use CMOV, or when there is no mispredict, this improves
47138   // throughput and reduces register pressure.
47139   //
47140   if (CC == X86::COND_NE) {
47141     SDValue Flags;
47142     X86::CondCode CC0, CC1;
47143     bool isAndSetCC;
47144     if (checkBoolTestAndOrSetCCCombine(Cond, CC0, CC1, Flags, isAndSetCC)) {
47145       if (isAndSetCC) {
47146         std::swap(FalseOp, TrueOp);
47147         CC0 = X86::GetOppositeBranchCondition(CC0);
47148         CC1 = X86::GetOppositeBranchCondition(CC1);
47149       }
47150 
47151       SDValue LOps[] = {FalseOp, TrueOp,
47152                         DAG.getTargetConstant(CC0, DL, MVT::i8), Flags};
47153       SDValue LCMOV = DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), LOps);
47154       SDValue Ops[] = {LCMOV, TrueOp, DAG.getTargetConstant(CC1, DL, MVT::i8),
47155                        Flags};
47156       SDValue CMOV = DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), Ops);
47157       return CMOV;
47158     }
47159   }
47160 
47161   // Fold (CMOV C1, (ADD (CTTZ X), C2), (X != 0)) ->
47162   //      (ADD (CMOV C1-C2, (CTTZ X), (X != 0)), C2)
47163   // Or (CMOV (ADD (CTTZ X), C2), C1, (X == 0)) ->
47164   //    (ADD (CMOV (CTTZ X), C1-C2, (X == 0)), C2)
47165   if ((CC == X86::COND_NE || CC == X86::COND_E) &&
47166       Cond.getOpcode() == X86ISD::CMP && isNullConstant(Cond.getOperand(1))) {
47167     SDValue Add = TrueOp;
47168     SDValue Const = FalseOp;
47169     // Canonicalize the condition code for easier matching and output.
47170     if (CC == X86::COND_E)
47171       std::swap(Add, Const);
47172 
47173     // We might have replaced the constant in the cmov with the LHS of the
47174     // compare. If so change it to the RHS of the compare.
47175     if (Const == Cond.getOperand(0))
47176       Const = Cond.getOperand(1);
47177 
47178     // Ok, now make sure that Add is (add (cttz X), C2) and Const is a constant.
47179     if (isa<ConstantSDNode>(Const) && Add.getOpcode() == ISD::ADD &&
47180         Add.hasOneUse() && isa<ConstantSDNode>(Add.getOperand(1)) &&
47181         (Add.getOperand(0).getOpcode() == ISD::CTTZ_ZERO_UNDEF ||
47182          Add.getOperand(0).getOpcode() == ISD::CTTZ) &&
47183         Add.getOperand(0).getOperand(0) == Cond.getOperand(0)) {
47184       EVT VT = N->getValueType(0);
47185       // This should constant fold.
47186       SDValue Diff = DAG.getNode(ISD::SUB, DL, VT, Const, Add.getOperand(1));
47187       SDValue CMov =
47188           DAG.getNode(X86ISD::CMOV, DL, VT, Diff, Add.getOperand(0),
47189                       DAG.getTargetConstant(X86::COND_NE, DL, MVT::i8), Cond);
47190       return DAG.getNode(ISD::ADD, DL, VT, CMov, Add.getOperand(1));
47191     }
47192   }
47193 
47194   return SDValue();
47195 }
47196 
47197 /// Different mul shrinking modes.
47198 enum class ShrinkMode { MULS8, MULU8, MULS16, MULU16 };
47199 
canReduceVMulWidth(SDNode * N,SelectionDAG & DAG,ShrinkMode & Mode)47200 static bool canReduceVMulWidth(SDNode *N, SelectionDAG &DAG, ShrinkMode &Mode) {
47201   EVT VT = N->getOperand(0).getValueType();
47202   if (VT.getScalarSizeInBits() != 32)
47203     return false;
47204 
47205   assert(N->getNumOperands() == 2 && "NumOperands of Mul are 2");
47206   unsigned SignBits[2] = {1, 1};
47207   bool IsPositive[2] = {false, false};
47208   for (unsigned i = 0; i < 2; i++) {
47209     SDValue Opd = N->getOperand(i);
47210 
47211     SignBits[i] = DAG.ComputeNumSignBits(Opd);
47212     IsPositive[i] = DAG.SignBitIsZero(Opd);
47213   }
47214 
47215   bool AllPositive = IsPositive[0] && IsPositive[1];
47216   unsigned MinSignBits = std::min(SignBits[0], SignBits[1]);
47217   // When ranges are from -128 ~ 127, use MULS8 mode.
47218   if (MinSignBits >= 25)
47219     Mode = ShrinkMode::MULS8;
47220   // When ranges are from 0 ~ 255, use MULU8 mode.
47221   else if (AllPositive && MinSignBits >= 24)
47222     Mode = ShrinkMode::MULU8;
47223   // When ranges are from -32768 ~ 32767, use MULS16 mode.
47224   else if (MinSignBits >= 17)
47225     Mode = ShrinkMode::MULS16;
47226   // When ranges are from 0 ~ 65535, use MULU16 mode.
47227   else if (AllPositive && MinSignBits >= 16)
47228     Mode = ShrinkMode::MULU16;
47229   else
47230     return false;
47231   return true;
47232 }
47233 
47234 /// When the operands of vector mul are extended from smaller size values,
47235 /// like i8 and i16, the type of mul may be shrinked to generate more
47236 /// efficient code. Two typical patterns are handled:
47237 /// Pattern1:
47238 ///     %2 = sext/zext <N x i8> %1 to <N x i32>
47239 ///     %4 = sext/zext <N x i8> %3 to <N x i32>
47240 //   or %4 = build_vector <N x i32> %C1, ..., %CN (%C1..%CN are constants)
47241 ///     %5 = mul <N x i32> %2, %4
47242 ///
47243 /// Pattern2:
47244 ///     %2 = zext/sext <N x i16> %1 to <N x i32>
47245 ///     %4 = zext/sext <N x i16> %3 to <N x i32>
47246 ///  or %4 = build_vector <N x i32> %C1, ..., %CN (%C1..%CN are constants)
47247 ///     %5 = mul <N x i32> %2, %4
47248 ///
47249 /// There are four mul shrinking modes:
47250 /// If %2 == sext32(trunc8(%2)), i.e., the scalar value range of %2 is
47251 /// -128 to 128, and the scalar value range of %4 is also -128 to 128,
47252 /// generate pmullw+sext32 for it (MULS8 mode).
47253 /// If %2 == zext32(trunc8(%2)), i.e., the scalar value range of %2 is
47254 /// 0 to 255, and the scalar value range of %4 is also 0 to 255,
47255 /// generate pmullw+zext32 for it (MULU8 mode).
47256 /// If %2 == sext32(trunc16(%2)), i.e., the scalar value range of %2 is
47257 /// -32768 to 32767, and the scalar value range of %4 is also -32768 to 32767,
47258 /// generate pmullw+pmulhw for it (MULS16 mode).
47259 /// If %2 == zext32(trunc16(%2)), i.e., the scalar value range of %2 is
47260 /// 0 to 65535, and the scalar value range of %4 is also 0 to 65535,
47261 /// generate pmullw+pmulhuw for it (MULU16 mode).
reduceVMULWidth(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)47262 static SDValue reduceVMULWidth(SDNode *N, SelectionDAG &DAG,
47263                                const X86Subtarget &Subtarget) {
47264   // Check for legality
47265   // pmullw/pmulhw are not supported by SSE.
47266   if (!Subtarget.hasSSE2())
47267     return SDValue();
47268 
47269   // Check for profitability
47270   // pmulld is supported since SSE41. It is better to use pmulld
47271   // instead of pmullw+pmulhw, except for subtargets where pmulld is slower than
47272   // the expansion.
47273   bool OptForMinSize = DAG.getMachineFunction().getFunction().hasMinSize();
47274   if (Subtarget.hasSSE41() && (OptForMinSize || !Subtarget.isPMULLDSlow()))
47275     return SDValue();
47276 
47277   ShrinkMode Mode;
47278   if (!canReduceVMulWidth(N, DAG, Mode))
47279     return SDValue();
47280 
47281   SDLoc DL(N);
47282   SDValue N0 = N->getOperand(0);
47283   SDValue N1 = N->getOperand(1);
47284   EVT VT = N->getOperand(0).getValueType();
47285   unsigned NumElts = VT.getVectorNumElements();
47286   if ((NumElts % 2) != 0)
47287     return SDValue();
47288 
47289   EVT ReducedVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16, NumElts);
47290 
47291   // Shrink the operands of mul.
47292   SDValue NewN0 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, N0);
47293   SDValue NewN1 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, N1);
47294 
47295   // Generate the lower part of mul: pmullw. For MULU8/MULS8, only the
47296   // lower part is needed.
47297   SDValue MulLo = DAG.getNode(ISD::MUL, DL, ReducedVT, NewN0, NewN1);
47298   if (Mode == ShrinkMode::MULU8 || Mode == ShrinkMode::MULS8)
47299     return DAG.getNode((Mode == ShrinkMode::MULU8) ? ISD::ZERO_EXTEND
47300                                                    : ISD::SIGN_EXTEND,
47301                        DL, VT, MulLo);
47302 
47303   EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts / 2);
47304   // Generate the higher part of mul: pmulhw/pmulhuw. For MULU16/MULS16,
47305   // the higher part is also needed.
47306   SDValue MulHi =
47307       DAG.getNode(Mode == ShrinkMode::MULS16 ? ISD::MULHS : ISD::MULHU, DL,
47308                   ReducedVT, NewN0, NewN1);
47309 
47310   // Repack the lower part and higher part result of mul into a wider
47311   // result.
47312   // Generate shuffle functioning as punpcklwd.
47313   SmallVector<int, 16> ShuffleMask(NumElts);
47314   for (unsigned i = 0, e = NumElts / 2; i < e; i++) {
47315     ShuffleMask[2 * i] = i;
47316     ShuffleMask[2 * i + 1] = i + NumElts;
47317   }
47318   SDValue ResLo =
47319       DAG.getVectorShuffle(ReducedVT, DL, MulLo, MulHi, ShuffleMask);
47320   ResLo = DAG.getBitcast(ResVT, ResLo);
47321   // Generate shuffle functioning as punpckhwd.
47322   for (unsigned i = 0, e = NumElts / 2; i < e; i++) {
47323     ShuffleMask[2 * i] = i + NumElts / 2;
47324     ShuffleMask[2 * i + 1] = i + NumElts * 3 / 2;
47325   }
47326   SDValue ResHi =
47327       DAG.getVectorShuffle(ReducedVT, DL, MulLo, MulHi, ShuffleMask);
47328   ResHi = DAG.getBitcast(ResVT, ResHi);
47329   return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ResLo, ResHi);
47330 }
47331 
combineMulSpecial(uint64_t MulAmt,SDNode * N,SelectionDAG & DAG,EVT VT,const SDLoc & DL)47332 static SDValue combineMulSpecial(uint64_t MulAmt, SDNode *N, SelectionDAG &DAG,
47333                                  EVT VT, const SDLoc &DL) {
47334 
47335   auto combineMulShlAddOrSub = [&](int Mult, int Shift, bool isAdd) {
47336     SDValue Result = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
47337                                  DAG.getConstant(Mult, DL, VT));
47338     Result = DAG.getNode(ISD::SHL, DL, VT, Result,
47339                          DAG.getConstant(Shift, DL, MVT::i8));
47340     Result = DAG.getNode(isAdd ? ISD::ADD : ISD::SUB, DL, VT, Result,
47341                          N->getOperand(0));
47342     return Result;
47343   };
47344 
47345   auto combineMulMulAddOrSub = [&](int Mul1, int Mul2, bool isAdd) {
47346     SDValue Result = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
47347                                  DAG.getConstant(Mul1, DL, VT));
47348     Result = DAG.getNode(X86ISD::MUL_IMM, DL, VT, Result,
47349                          DAG.getConstant(Mul2, DL, VT));
47350     Result = DAG.getNode(isAdd ? ISD::ADD : ISD::SUB, DL, VT, Result,
47351                          N->getOperand(0));
47352     return Result;
47353   };
47354 
47355   switch (MulAmt) {
47356   default:
47357     break;
47358   case 11:
47359     // mul x, 11 => add ((shl (mul x, 5), 1), x)
47360     return combineMulShlAddOrSub(5, 1, /*isAdd*/ true);
47361   case 21:
47362     // mul x, 21 => add ((shl (mul x, 5), 2), x)
47363     return combineMulShlAddOrSub(5, 2, /*isAdd*/ true);
47364   case 41:
47365     // mul x, 41 => add ((shl (mul x, 5), 3), x)
47366     return combineMulShlAddOrSub(5, 3, /*isAdd*/ true);
47367   case 22:
47368     // mul x, 22 => add (add ((shl (mul x, 5), 2), x), x)
47369     return DAG.getNode(ISD::ADD, DL, VT, N->getOperand(0),
47370                        combineMulShlAddOrSub(5, 2, /*isAdd*/ true));
47371   case 19:
47372     // mul x, 19 => add ((shl (mul x, 9), 1), x)
47373     return combineMulShlAddOrSub(9, 1, /*isAdd*/ true);
47374   case 37:
47375     // mul x, 37 => add ((shl (mul x, 9), 2), x)
47376     return combineMulShlAddOrSub(9, 2, /*isAdd*/ true);
47377   case 73:
47378     // mul x, 73 => add ((shl (mul x, 9), 3), x)
47379     return combineMulShlAddOrSub(9, 3, /*isAdd*/ true);
47380   case 13:
47381     // mul x, 13 => add ((shl (mul x, 3), 2), x)
47382     return combineMulShlAddOrSub(3, 2, /*isAdd*/ true);
47383   case 23:
47384     // mul x, 23 => sub ((shl (mul x, 3), 3), x)
47385     return combineMulShlAddOrSub(3, 3, /*isAdd*/ false);
47386   case 26:
47387     // mul x, 26 => add ((mul (mul x, 5), 5), x)
47388     return combineMulMulAddOrSub(5, 5, /*isAdd*/ true);
47389   case 28:
47390     // mul x, 28 => add ((mul (mul x, 9), 3), x)
47391     return combineMulMulAddOrSub(9, 3, /*isAdd*/ true);
47392   case 29:
47393     // mul x, 29 => add (add ((mul (mul x, 9), 3), x), x)
47394     return DAG.getNode(ISD::ADD, DL, VT, N->getOperand(0),
47395                        combineMulMulAddOrSub(9, 3, /*isAdd*/ true));
47396   }
47397 
47398   // Another trick. If this is a power 2 + 2/4/8, we can use a shift followed
47399   // by a single LEA.
47400   // First check if this a sum of two power of 2s because that's easy. Then
47401   // count how many zeros are up to the first bit.
47402   // TODO: We can do this even without LEA at a cost of two shifts and an add.
47403   if (isPowerOf2_64(MulAmt & (MulAmt - 1))) {
47404     unsigned ScaleShift = countTrailingZeros(MulAmt);
47405     if (ScaleShift >= 1 && ScaleShift < 4) {
47406       unsigned ShiftAmt = Log2_64((MulAmt & (MulAmt - 1)));
47407       SDValue Shift1 = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
47408                                    DAG.getConstant(ShiftAmt, DL, MVT::i8));
47409       SDValue Shift2 = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
47410                                    DAG.getConstant(ScaleShift, DL, MVT::i8));
47411       return DAG.getNode(ISD::ADD, DL, VT, Shift1, Shift2);
47412     }
47413   }
47414 
47415   return SDValue();
47416 }
47417 
47418 // If the upper 17 bits of either element are zero and the other element are
47419 // zero/sign bits then we can use PMADDWD, which is always at least as quick as
47420 // PMULLD, except on KNL.
combineMulToPMADDWD(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)47421 static SDValue combineMulToPMADDWD(SDNode *N, SelectionDAG &DAG,
47422                                    const X86Subtarget &Subtarget) {
47423   if (!Subtarget.hasSSE2())
47424     return SDValue();
47425 
47426   if (Subtarget.isPMADDWDSlow())
47427     return SDValue();
47428 
47429   EVT VT = N->getValueType(0);
47430 
47431   // Only support vXi32 vectors.
47432   if (!VT.isVector() || VT.getVectorElementType() != MVT::i32)
47433     return SDValue();
47434 
47435   // Make sure the type is legal or can split/widen to a legal type.
47436   // With AVX512 but without BWI, we would need to split v32i16.
47437   unsigned NumElts = VT.getVectorNumElements();
47438   if (NumElts == 1 || !isPowerOf2_32(NumElts))
47439     return SDValue();
47440 
47441   // With AVX512 but without BWI, we would need to split v32i16.
47442   if (32 <= (2 * NumElts) && Subtarget.hasAVX512() && !Subtarget.hasBWI())
47443     return SDValue();
47444 
47445   SDValue N0 = N->getOperand(0);
47446   SDValue N1 = N->getOperand(1);
47447 
47448   // If we are zero/sign extending two steps without SSE4.1, its better to
47449   // reduce the vmul width instead.
47450   if (!Subtarget.hasSSE41() &&
47451       (((N0.getOpcode() == ISD::ZERO_EXTEND &&
47452          N0.getOperand(0).getScalarValueSizeInBits() <= 8) &&
47453         (N1.getOpcode() == ISD::ZERO_EXTEND &&
47454          N1.getOperand(0).getScalarValueSizeInBits() <= 8)) ||
47455        ((N0.getOpcode() == ISD::SIGN_EXTEND &&
47456          N0.getOperand(0).getScalarValueSizeInBits() <= 8) &&
47457         (N1.getOpcode() == ISD::SIGN_EXTEND &&
47458          N1.getOperand(0).getScalarValueSizeInBits() <= 8))))
47459     return SDValue();
47460 
47461   // If we are sign extending a wide vector without SSE4.1, its better to reduce
47462   // the vmul width instead.
47463   if (!Subtarget.hasSSE41() &&
47464       (N0.getOpcode() == ISD::SIGN_EXTEND &&
47465        N0.getOperand(0).getValueSizeInBits() > 128) &&
47466       (N1.getOpcode() == ISD::SIGN_EXTEND &&
47467        N1.getOperand(0).getValueSizeInBits() > 128))
47468     return SDValue();
47469 
47470   // Sign bits must extend down to the lowest i16.
47471   if (DAG.ComputeMaxSignificantBits(N1) > 16 ||
47472       DAG.ComputeMaxSignificantBits(N0) > 16)
47473     return SDValue();
47474 
47475   // At least one of the elements must be zero in the upper 17 bits, or can be
47476   // safely made zero without altering the final result.
47477   auto GetZeroableOp = [&](SDValue Op) {
47478     APInt Mask17 = APInt::getHighBitsSet(32, 17);
47479     if (DAG.MaskedValueIsZero(Op, Mask17))
47480       return Op;
47481     // Mask off upper 16-bits of sign-extended constants.
47482     if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode()))
47483       return DAG.getNode(ISD::AND, SDLoc(N), VT, Op,
47484                          DAG.getConstant(0xFFFF, SDLoc(N), VT));
47485     if (Op.getOpcode() == ISD::SIGN_EXTEND && N->isOnlyUserOf(Op.getNode())) {
47486       SDValue Src = Op.getOperand(0);
47487       // Convert sext(vXi16) to zext(vXi16).
47488       if (Src.getScalarValueSizeInBits() == 16 && VT.getSizeInBits() <= 128)
47489         return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), VT, Src);
47490       // Convert sext(vXi8) to zext(vXi16 sext(vXi8)) on pre-SSE41 targets
47491       // which will expand the extension.
47492       if (Src.getScalarValueSizeInBits() < 16 && !Subtarget.hasSSE41()) {
47493         EVT ExtVT = VT.changeVectorElementType(MVT::i16);
47494         Src = DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), ExtVT, Src);
47495         return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), VT, Src);
47496       }
47497     }
47498     // Convert SIGN_EXTEND_VECTOR_INREG to ZEXT_EXTEND_VECTOR_INREG.
47499     if (Op.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG &&
47500         N->isOnlyUserOf(Op.getNode())) {
47501       SDValue Src = Op.getOperand(0);
47502       if (Src.getScalarValueSizeInBits() == 16)
47503         return DAG.getNode(ISD::ZERO_EXTEND_VECTOR_INREG, SDLoc(N), VT, Src);
47504     }
47505     // Convert VSRAI(Op, 16) to VSRLI(Op, 16).
47506     if (Op.getOpcode() == X86ISD::VSRAI && Op.getConstantOperandVal(1) == 16 &&
47507         N->isOnlyUserOf(Op.getNode())) {
47508       return DAG.getNode(X86ISD::VSRLI, SDLoc(N), VT, Op.getOperand(0),
47509                          Op.getOperand(1));
47510     }
47511     return SDValue();
47512   };
47513   SDValue ZeroN0 = GetZeroableOp(N0);
47514   SDValue ZeroN1 = GetZeroableOp(N1);
47515   if (!ZeroN0 && !ZeroN1)
47516     return SDValue();
47517   N0 = ZeroN0 ? ZeroN0 : N0;
47518   N1 = ZeroN1 ? ZeroN1 : N1;
47519 
47520   // Use SplitOpsAndApply to handle AVX splitting.
47521   auto PMADDWDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
47522                            ArrayRef<SDValue> Ops) {
47523     MVT ResVT = MVT::getVectorVT(MVT::i32, Ops[0].getValueSizeInBits() / 32);
47524     MVT OpVT = MVT::getVectorVT(MVT::i16, Ops[0].getValueSizeInBits() / 16);
47525     return DAG.getNode(X86ISD::VPMADDWD, DL, ResVT,
47526                        DAG.getBitcast(OpVT, Ops[0]),
47527                        DAG.getBitcast(OpVT, Ops[1]));
47528   };
47529   return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, {N0, N1},
47530                           PMADDWDBuilder);
47531 }
47532 
combineMulToPMULDQ(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)47533 static SDValue combineMulToPMULDQ(SDNode *N, SelectionDAG &DAG,
47534                                   const X86Subtarget &Subtarget) {
47535   if (!Subtarget.hasSSE2())
47536     return SDValue();
47537 
47538   EVT VT = N->getValueType(0);
47539 
47540   // Only support vXi64 vectors.
47541   if (!VT.isVector() || VT.getVectorElementType() != MVT::i64 ||
47542       VT.getVectorNumElements() < 2 ||
47543       !isPowerOf2_32(VT.getVectorNumElements()))
47544     return SDValue();
47545 
47546   SDValue N0 = N->getOperand(0);
47547   SDValue N1 = N->getOperand(1);
47548 
47549   // MULDQ returns the 64-bit result of the signed multiplication of the lower
47550   // 32-bits. We can lower with this if the sign bits stretch that far.
47551   if (Subtarget.hasSSE41() && DAG.ComputeNumSignBits(N0) > 32 &&
47552       DAG.ComputeNumSignBits(N1) > 32) {
47553     auto PMULDQBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
47554                             ArrayRef<SDValue> Ops) {
47555       return DAG.getNode(X86ISD::PMULDQ, DL, Ops[0].getValueType(), Ops);
47556     };
47557     return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, { N0, N1 },
47558                             PMULDQBuilder, /*CheckBWI*/false);
47559   }
47560 
47561   // If the upper bits are zero we can use a single pmuludq.
47562   APInt Mask = APInt::getHighBitsSet(64, 32);
47563   if (DAG.MaskedValueIsZero(N0, Mask) && DAG.MaskedValueIsZero(N1, Mask)) {
47564     auto PMULUDQBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
47565                              ArrayRef<SDValue> Ops) {
47566       return DAG.getNode(X86ISD::PMULUDQ, DL, Ops[0].getValueType(), Ops);
47567     };
47568     return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, { N0, N1 },
47569                             PMULUDQBuilder, /*CheckBWI*/false);
47570   }
47571 
47572   return SDValue();
47573 }
47574 
combineMul(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)47575 static SDValue combineMul(SDNode *N, SelectionDAG &DAG,
47576                           TargetLowering::DAGCombinerInfo &DCI,
47577                           const X86Subtarget &Subtarget) {
47578   EVT VT = N->getValueType(0);
47579 
47580   if (SDValue V = combineMulToPMADDWD(N, DAG, Subtarget))
47581     return V;
47582 
47583   if (SDValue V = combineMulToPMULDQ(N, DAG, Subtarget))
47584     return V;
47585 
47586   if (DCI.isBeforeLegalize() && VT.isVector())
47587     return reduceVMULWidth(N, DAG, Subtarget);
47588 
47589   // Optimize a single multiply with constant into two operations in order to
47590   // implement it with two cheaper instructions, e.g. LEA + SHL, LEA + LEA.
47591   if (!MulConstantOptimization)
47592     return SDValue();
47593 
47594   // An imul is usually smaller than the alternative sequence.
47595   if (DAG.getMachineFunction().getFunction().hasMinSize())
47596     return SDValue();
47597 
47598   if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
47599     return SDValue();
47600 
47601   if (VT != MVT::i64 && VT != MVT::i32)
47602     return SDValue();
47603 
47604   ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
47605   if (!C)
47606     return SDValue();
47607   if (isPowerOf2_64(C->getZExtValue()))
47608     return SDValue();
47609 
47610   int64_t SignMulAmt = C->getSExtValue();
47611   assert(SignMulAmt != INT64_MIN && "Int min should have been handled!");
47612   uint64_t AbsMulAmt = SignMulAmt < 0 ? -SignMulAmt : SignMulAmt;
47613 
47614   SDLoc DL(N);
47615   if (AbsMulAmt == 3 || AbsMulAmt == 5 || AbsMulAmt == 9) {
47616     SDValue NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
47617                                  DAG.getConstant(AbsMulAmt, DL, VT));
47618     if (SignMulAmt < 0)
47619       NewMul = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
47620                            NewMul);
47621 
47622     return NewMul;
47623   }
47624 
47625   uint64_t MulAmt1 = 0;
47626   uint64_t MulAmt2 = 0;
47627   if ((AbsMulAmt % 9) == 0) {
47628     MulAmt1 = 9;
47629     MulAmt2 = AbsMulAmt / 9;
47630   } else if ((AbsMulAmt % 5) == 0) {
47631     MulAmt1 = 5;
47632     MulAmt2 = AbsMulAmt / 5;
47633   } else if ((AbsMulAmt % 3) == 0) {
47634     MulAmt1 = 3;
47635     MulAmt2 = AbsMulAmt / 3;
47636   }
47637 
47638   SDValue NewMul;
47639   // For negative multiply amounts, only allow MulAmt2 to be a power of 2.
47640   if (MulAmt2 &&
47641       (isPowerOf2_64(MulAmt2) ||
47642        (SignMulAmt >= 0 && (MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9)))) {
47643 
47644     if (isPowerOf2_64(MulAmt2) &&
47645         !(SignMulAmt >= 0 && N->hasOneUse() &&
47646           N->use_begin()->getOpcode() == ISD::ADD))
47647       // If second multiplifer is pow2, issue it first. We want the multiply by
47648       // 3, 5, or 9 to be folded into the addressing mode unless the lone use
47649       // is an add. Only do this for positive multiply amounts since the
47650       // negate would prevent it from being used as an address mode anyway.
47651       std::swap(MulAmt1, MulAmt2);
47652 
47653     if (isPowerOf2_64(MulAmt1))
47654       NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
47655                            DAG.getConstant(Log2_64(MulAmt1), DL, MVT::i8));
47656     else
47657       NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
47658                            DAG.getConstant(MulAmt1, DL, VT));
47659 
47660     if (isPowerOf2_64(MulAmt2))
47661       NewMul = DAG.getNode(ISD::SHL, DL, VT, NewMul,
47662                            DAG.getConstant(Log2_64(MulAmt2), DL, MVT::i8));
47663     else
47664       NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, NewMul,
47665                            DAG.getConstant(MulAmt2, DL, VT));
47666 
47667     // Negate the result.
47668     if (SignMulAmt < 0)
47669       NewMul = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
47670                            NewMul);
47671   } else if (!Subtarget.slowLEA())
47672     NewMul = combineMulSpecial(C->getZExtValue(), N, DAG, VT, DL);
47673 
47674   if (!NewMul) {
47675     assert(C->getZExtValue() != 0 &&
47676            C->getZExtValue() != (VT == MVT::i64 ? UINT64_MAX : UINT32_MAX) &&
47677            "Both cases that could cause potential overflows should have "
47678            "already been handled.");
47679     if (isPowerOf2_64(AbsMulAmt - 1)) {
47680       // (mul x, 2^N + 1) => (add (shl x, N), x)
47681       NewMul = DAG.getNode(
47682           ISD::ADD, DL, VT, N->getOperand(0),
47683           DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
47684                       DAG.getConstant(Log2_64(AbsMulAmt - 1), DL,
47685                                       MVT::i8)));
47686       // To negate, subtract the number from zero
47687       if (SignMulAmt < 0)
47688         NewMul = DAG.getNode(ISD::SUB, DL, VT,
47689                              DAG.getConstant(0, DL, VT), NewMul);
47690     } else if (isPowerOf2_64(AbsMulAmt + 1)) {
47691       // (mul x, 2^N - 1) => (sub (shl x, N), x)
47692       NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
47693                            DAG.getConstant(Log2_64(AbsMulAmt + 1),
47694                                            DL, MVT::i8));
47695       // To negate, reverse the operands of the subtract.
47696       if (SignMulAmt < 0)
47697         NewMul = DAG.getNode(ISD::SUB, DL, VT, N->getOperand(0), NewMul);
47698       else
47699         NewMul = DAG.getNode(ISD::SUB, DL, VT, NewMul, N->getOperand(0));
47700     } else if (SignMulAmt >= 0 && isPowerOf2_64(AbsMulAmt - 2)) {
47701       // (mul x, 2^N + 2) => (add (shl x, N), (add x, x))
47702       NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
47703                            DAG.getConstant(Log2_64(AbsMulAmt - 2),
47704                                            DL, MVT::i8));
47705       NewMul = DAG.getNode(
47706           ISD::ADD, DL, VT, NewMul,
47707           DAG.getNode(ISD::ADD, DL, VT, N->getOperand(0), N->getOperand(0)));
47708     } else if (SignMulAmt >= 0 && isPowerOf2_64(AbsMulAmt + 2)) {
47709       // (mul x, 2^N - 2) => (sub (shl x, N), (add x, x))
47710       NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
47711                            DAG.getConstant(Log2_64(AbsMulAmt + 2),
47712                                            DL, MVT::i8));
47713       NewMul = DAG.getNode(
47714           ISD::SUB, DL, VT, NewMul,
47715           DAG.getNode(ISD::ADD, DL, VT, N->getOperand(0), N->getOperand(0)));
47716     }
47717   }
47718 
47719   return NewMul;
47720 }
47721 
47722 // Try to form a MULHU or MULHS node by looking for
47723 // (srl (mul ext, ext), 16)
47724 // TODO: This is X86 specific because we want to be able to handle wide types
47725 // before type legalization. But we can only do it if the vector will be
47726 // legalized via widening/splitting. Type legalization can't handle promotion
47727 // of a MULHU/MULHS. There isn't a way to convey this to the generic DAG
47728 // combiner.
combineShiftToPMULH(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)47729 static SDValue combineShiftToPMULH(SDNode *N, SelectionDAG &DAG,
47730                                    const X86Subtarget &Subtarget) {
47731   assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA) &&
47732            "SRL or SRA node is required here!");
47733   SDLoc DL(N);
47734 
47735   if (!Subtarget.hasSSE2())
47736     return SDValue();
47737 
47738   // The operation feeding into the shift must be a multiply.
47739   SDValue ShiftOperand = N->getOperand(0);
47740   if (ShiftOperand.getOpcode() != ISD::MUL || !ShiftOperand.hasOneUse())
47741     return SDValue();
47742 
47743   // Input type should be at least vXi32.
47744   EVT VT = N->getValueType(0);
47745   if (!VT.isVector() || VT.getVectorElementType().getSizeInBits() < 32)
47746     return SDValue();
47747 
47748   // Need a shift by 16.
47749   APInt ShiftAmt;
47750   if (!ISD::isConstantSplatVector(N->getOperand(1).getNode(), ShiftAmt) ||
47751       ShiftAmt != 16)
47752     return SDValue();
47753 
47754   SDValue LHS = ShiftOperand.getOperand(0);
47755   SDValue RHS = ShiftOperand.getOperand(1);
47756 
47757   unsigned ExtOpc = LHS.getOpcode();
47758   if ((ExtOpc != ISD::SIGN_EXTEND && ExtOpc != ISD::ZERO_EXTEND) ||
47759       RHS.getOpcode() != ExtOpc)
47760     return SDValue();
47761 
47762   // Peek through the extends.
47763   LHS = LHS.getOperand(0);
47764   RHS = RHS.getOperand(0);
47765 
47766   // Ensure the input types match.
47767   EVT MulVT = LHS.getValueType();
47768   if (MulVT.getVectorElementType() != MVT::i16 || RHS.getValueType() != MulVT)
47769     return SDValue();
47770 
47771   unsigned Opc = ExtOpc == ISD::SIGN_EXTEND ? ISD::MULHS : ISD::MULHU;
47772   SDValue Mulh = DAG.getNode(Opc, DL, MulVT, LHS, RHS);
47773 
47774   ExtOpc = N->getOpcode() == ISD::SRA ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
47775   return DAG.getNode(ExtOpc, DL, VT, Mulh);
47776 }
47777 
combineShiftLeft(SDNode * N,SelectionDAG & DAG)47778 static SDValue combineShiftLeft(SDNode *N, SelectionDAG &DAG) {
47779   SDValue N0 = N->getOperand(0);
47780   SDValue N1 = N->getOperand(1);
47781   ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
47782   EVT VT = N0.getValueType();
47783 
47784   // fold (shl (and (setcc_c), c1), c2) -> (and setcc_c, (c1 << c2))
47785   // since the result of setcc_c is all zero's or all ones.
47786   if (VT.isInteger() && !VT.isVector() &&
47787       N1C && N0.getOpcode() == ISD::AND &&
47788       N0.getOperand(1).getOpcode() == ISD::Constant) {
47789     SDValue N00 = N0.getOperand(0);
47790     APInt Mask = N0.getConstantOperandAPInt(1);
47791     Mask <<= N1C->getAPIntValue();
47792     bool MaskOK = false;
47793     // We can handle cases concerning bit-widening nodes containing setcc_c if
47794     // we carefully interrogate the mask to make sure we are semantics
47795     // preserving.
47796     // The transform is not safe if the result of C1 << C2 exceeds the bitwidth
47797     // of the underlying setcc_c operation if the setcc_c was zero extended.
47798     // Consider the following example:
47799     //   zext(setcc_c)                 -> i32 0x0000FFFF
47800     //   c1                            -> i32 0x0000FFFF
47801     //   c2                            -> i32 0x00000001
47802     //   (shl (and (setcc_c), c1), c2) -> i32 0x0001FFFE
47803     //   (and setcc_c, (c1 << c2))     -> i32 0x0000FFFE
47804     if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
47805       MaskOK = true;
47806     } else if (N00.getOpcode() == ISD::SIGN_EXTEND &&
47807                N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
47808       MaskOK = true;
47809     } else if ((N00.getOpcode() == ISD::ZERO_EXTEND ||
47810                 N00.getOpcode() == ISD::ANY_EXTEND) &&
47811                N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
47812       MaskOK = Mask.isIntN(N00.getOperand(0).getValueSizeInBits());
47813     }
47814     if (MaskOK && Mask != 0) {
47815       SDLoc DL(N);
47816       return DAG.getNode(ISD::AND, DL, VT, N00, DAG.getConstant(Mask, DL, VT));
47817     }
47818   }
47819 
47820   return SDValue();
47821 }
47822 
combineShiftRightArithmetic(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)47823 static SDValue combineShiftRightArithmetic(SDNode *N, SelectionDAG &DAG,
47824                                            const X86Subtarget &Subtarget) {
47825   SDValue N0 = N->getOperand(0);
47826   SDValue N1 = N->getOperand(1);
47827   EVT VT = N0.getValueType();
47828   unsigned Size = VT.getSizeInBits();
47829 
47830   if (SDValue V = combineShiftToPMULH(N, DAG, Subtarget))
47831     return V;
47832 
47833   // fold (ashr (shl, a, [56,48,32,24,16]), SarConst)
47834   // into (shl, (sext (a), [56,48,32,24,16] - SarConst)) or
47835   // into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
47836   // depending on sign of (SarConst - [56,48,32,24,16])
47837 
47838   // sexts in X86 are MOVs. The MOVs have the same code size
47839   // as above SHIFTs (only SHIFT on 1 has lower code size).
47840   // However the MOVs have 2 advantages to a SHIFT:
47841   // 1. MOVs can write to a register that differs from source
47842   // 2. MOVs accept memory operands
47843 
47844   if (VT.isVector() || N1.getOpcode() != ISD::Constant ||
47845       N0.getOpcode() != ISD::SHL || !N0.hasOneUse() ||
47846       N0.getOperand(1).getOpcode() != ISD::Constant)
47847     return SDValue();
47848 
47849   SDValue N00 = N0.getOperand(0);
47850   SDValue N01 = N0.getOperand(1);
47851   APInt ShlConst = (cast<ConstantSDNode>(N01))->getAPIntValue();
47852   APInt SarConst = (cast<ConstantSDNode>(N1))->getAPIntValue();
47853   EVT CVT = N1.getValueType();
47854 
47855   if (SarConst.isNegative())
47856     return SDValue();
47857 
47858   for (MVT SVT : { MVT::i8, MVT::i16, MVT::i32 }) {
47859     unsigned ShiftSize = SVT.getSizeInBits();
47860     // skipping types without corresponding sext/zext and
47861     // ShlConst that is not one of [56,48,32,24,16]
47862     if (ShiftSize >= Size || ShlConst != Size - ShiftSize)
47863       continue;
47864     SDLoc DL(N);
47865     SDValue NN =
47866         DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, N00, DAG.getValueType(SVT));
47867     SarConst = SarConst - (Size - ShiftSize);
47868     if (SarConst == 0)
47869       return NN;
47870     if (SarConst.isNegative())
47871       return DAG.getNode(ISD::SHL, DL, VT, NN,
47872                          DAG.getConstant(-SarConst, DL, CVT));
47873     return DAG.getNode(ISD::SRA, DL, VT, NN,
47874                        DAG.getConstant(SarConst, DL, CVT));
47875   }
47876   return SDValue();
47877 }
47878 
combineShiftRightLogical(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)47879 static SDValue combineShiftRightLogical(SDNode *N, SelectionDAG &DAG,
47880                                         TargetLowering::DAGCombinerInfo &DCI,
47881                                         const X86Subtarget &Subtarget) {
47882   SDValue N0 = N->getOperand(0);
47883   SDValue N1 = N->getOperand(1);
47884   EVT VT = N0.getValueType();
47885 
47886   if (SDValue V = combineShiftToPMULH(N, DAG, Subtarget))
47887     return V;
47888 
47889   // Only do this on the last DAG combine as it can interfere with other
47890   // combines.
47891   if (!DCI.isAfterLegalizeDAG())
47892     return SDValue();
47893 
47894   // Try to improve a sequence of srl (and X, C1), C2 by inverting the order.
47895   // TODO: This is a generic DAG combine that became an x86-only combine to
47896   // avoid shortcomings in other folds such as bswap, bit-test ('bt'), and
47897   // and-not ('andn').
47898   if (N0.getOpcode() != ISD::AND || !N0.hasOneUse())
47899     return SDValue();
47900 
47901   auto *ShiftC = dyn_cast<ConstantSDNode>(N1);
47902   auto *AndC = dyn_cast<ConstantSDNode>(N0.getOperand(1));
47903   if (!ShiftC || !AndC)
47904     return SDValue();
47905 
47906   // If we can shrink the constant mask below 8-bits or 32-bits, then this
47907   // transform should reduce code size. It may also enable secondary transforms
47908   // from improved known-bits analysis or instruction selection.
47909   APInt MaskVal = AndC->getAPIntValue();
47910 
47911   // If this can be matched by a zero extend, don't optimize.
47912   if (MaskVal.isMask()) {
47913     unsigned TO = MaskVal.countTrailingOnes();
47914     if (TO >= 8 && isPowerOf2_32(TO))
47915       return SDValue();
47916   }
47917 
47918   APInt NewMaskVal = MaskVal.lshr(ShiftC->getAPIntValue());
47919   unsigned OldMaskSize = MaskVal.getMinSignedBits();
47920   unsigned NewMaskSize = NewMaskVal.getMinSignedBits();
47921   if ((OldMaskSize > 8 && NewMaskSize <= 8) ||
47922       (OldMaskSize > 32 && NewMaskSize <= 32)) {
47923     // srl (and X, AndC), ShiftC --> and (srl X, ShiftC), (AndC >> ShiftC)
47924     SDLoc DL(N);
47925     SDValue NewMask = DAG.getConstant(NewMaskVal, DL, VT);
47926     SDValue NewShift = DAG.getNode(ISD::SRL, DL, VT, N0.getOperand(0), N1);
47927     return DAG.getNode(ISD::AND, DL, VT, NewShift, NewMask);
47928   }
47929   return SDValue();
47930 }
47931 
combineHorizOpWithShuffle(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)47932 static SDValue combineHorizOpWithShuffle(SDNode *N, SelectionDAG &DAG,
47933                                          const X86Subtarget &Subtarget) {
47934   unsigned Opcode = N->getOpcode();
47935   assert(isHorizOp(Opcode) && "Unexpected hadd/hsub/pack opcode");
47936 
47937   SDLoc DL(N);
47938   EVT VT = N->getValueType(0);
47939   SDValue N0 = N->getOperand(0);
47940   SDValue N1 = N->getOperand(1);
47941   EVT SrcVT = N0.getValueType();
47942 
47943   SDValue BC0 =
47944       N->isOnlyUserOf(N0.getNode()) ? peekThroughOneUseBitcasts(N0) : N0;
47945   SDValue BC1 =
47946       N->isOnlyUserOf(N1.getNode()) ? peekThroughOneUseBitcasts(N1) : N1;
47947 
47948   // Attempt to fold HOP(LOSUBVECTOR(SHUFFLE(X)),HISUBVECTOR(SHUFFLE(X)))
47949   // to SHUFFLE(HOP(LOSUBVECTOR(X),HISUBVECTOR(X))), this is mainly for
47950   // truncation trees that help us avoid lane crossing shuffles.
47951   // TODO: There's a lot more we can do for PACK/HADD style shuffle combines.
47952   // TODO: We don't handle vXf64 shuffles yet.
47953   if (VT.is128BitVector() && SrcVT.getScalarSizeInBits() <= 32) {
47954     if (SDValue BCSrc = getSplitVectorSrc(BC0, BC1, false)) {
47955       SmallVector<SDValue> ShuffleOps;
47956       SmallVector<int> ShuffleMask, ScaledMask;
47957       SDValue Vec = peekThroughBitcasts(BCSrc);
47958       if (getTargetShuffleInputs(Vec, ShuffleOps, ShuffleMask, DAG)) {
47959         resolveTargetShuffleInputsAndMask(ShuffleOps, ShuffleMask);
47960         // To keep the HOP LHS/RHS coherency, we must be able to scale the unary
47961         // shuffle to a v4X64 width - we can probably relax this in the future.
47962         if (!isAnyZero(ShuffleMask) && ShuffleOps.size() == 1 &&
47963             ShuffleOps[0].getValueType().is256BitVector() &&
47964             scaleShuffleElements(ShuffleMask, 4, ScaledMask)) {
47965           SDValue Lo, Hi;
47966           MVT ShufVT = VT.isFloatingPoint() ? MVT::v4f32 : MVT::v4i32;
47967           std::tie(Lo, Hi) = DAG.SplitVector(ShuffleOps[0], DL);
47968           Lo = DAG.getBitcast(SrcVT, Lo);
47969           Hi = DAG.getBitcast(SrcVT, Hi);
47970           SDValue Res = DAG.getNode(Opcode, DL, VT, Lo, Hi);
47971           Res = DAG.getBitcast(ShufVT, Res);
47972           Res = DAG.getVectorShuffle(ShufVT, DL, Res, Res, ScaledMask);
47973           return DAG.getBitcast(VT, Res);
47974         }
47975       }
47976     }
47977   }
47978 
47979   // Attempt to fold HOP(SHUFFLE(X,Y),SHUFFLE(Z,W)) -> SHUFFLE(HOP()).
47980   if (VT.is128BitVector() && SrcVT.getScalarSizeInBits() <= 32) {
47981     // If either/both ops are a shuffle that can scale to v2x64,
47982     // then see if we can perform this as a v4x32 post shuffle.
47983     SmallVector<SDValue> Ops0, Ops1;
47984     SmallVector<int> Mask0, Mask1, ScaledMask0, ScaledMask1;
47985     bool IsShuf0 =
47986         getTargetShuffleInputs(BC0, Ops0, Mask0, DAG) && !isAnyZero(Mask0) &&
47987         scaleShuffleElements(Mask0, 2, ScaledMask0) &&
47988         all_of(Ops0, [](SDValue Op) { return Op.getValueSizeInBits() == 128; });
47989     bool IsShuf1 =
47990         getTargetShuffleInputs(BC1, Ops1, Mask1, DAG) && !isAnyZero(Mask1) &&
47991         scaleShuffleElements(Mask1, 2, ScaledMask1) &&
47992         all_of(Ops1, [](SDValue Op) { return Op.getValueSizeInBits() == 128; });
47993     if (IsShuf0 || IsShuf1) {
47994       if (!IsShuf0) {
47995         Ops0.assign({BC0});
47996         ScaledMask0.assign({0, 1});
47997       }
47998       if (!IsShuf1) {
47999         Ops1.assign({BC1});
48000         ScaledMask1.assign({0, 1});
48001       }
48002 
48003       SDValue LHS, RHS;
48004       int PostShuffle[4] = {-1, -1, -1, -1};
48005       auto FindShuffleOpAndIdx = [&](int M, int &Idx, ArrayRef<SDValue> Ops) {
48006         if (M < 0)
48007           return true;
48008         Idx = M % 2;
48009         SDValue Src = Ops[M / 2];
48010         if (!LHS || LHS == Src) {
48011           LHS = Src;
48012           return true;
48013         }
48014         if (!RHS || RHS == Src) {
48015           Idx += 2;
48016           RHS = Src;
48017           return true;
48018         }
48019         return false;
48020       };
48021       if (FindShuffleOpAndIdx(ScaledMask0[0], PostShuffle[0], Ops0) &&
48022           FindShuffleOpAndIdx(ScaledMask0[1], PostShuffle[1], Ops0) &&
48023           FindShuffleOpAndIdx(ScaledMask1[0], PostShuffle[2], Ops1) &&
48024           FindShuffleOpAndIdx(ScaledMask1[1], PostShuffle[3], Ops1)) {
48025         LHS = DAG.getBitcast(SrcVT, LHS);
48026         RHS = DAG.getBitcast(SrcVT, RHS ? RHS : LHS);
48027         MVT ShufVT = VT.isFloatingPoint() ? MVT::v4f32 : MVT::v4i32;
48028         SDValue Res = DAG.getNode(Opcode, DL, VT, LHS, RHS);
48029         Res = DAG.getBitcast(ShufVT, Res);
48030         Res = DAG.getVectorShuffle(ShufVT, DL, Res, Res, PostShuffle);
48031         return DAG.getBitcast(VT, Res);
48032       }
48033     }
48034   }
48035 
48036   // Attempt to fold HOP(SHUFFLE(X,Y),SHUFFLE(X,Y)) -> SHUFFLE(HOP(X,Y)).
48037   if (VT.is256BitVector() && Subtarget.hasInt256()) {
48038     SmallVector<int> Mask0, Mask1;
48039     SmallVector<SDValue> Ops0, Ops1;
48040     SmallVector<int, 2> ScaledMask0, ScaledMask1;
48041     if (getTargetShuffleInputs(BC0, Ops0, Mask0, DAG) && !isAnyZero(Mask0) &&
48042         getTargetShuffleInputs(BC1, Ops1, Mask1, DAG) && !isAnyZero(Mask1) &&
48043         !Ops0.empty() && !Ops1.empty() &&
48044         all_of(Ops0,
48045                [](SDValue Op) { return Op.getValueType().is256BitVector(); }) &&
48046         all_of(Ops1,
48047                [](SDValue Op) { return Op.getValueType().is256BitVector(); }) &&
48048         scaleShuffleElements(Mask0, 2, ScaledMask0) &&
48049         scaleShuffleElements(Mask1, 2, ScaledMask1)) {
48050       SDValue Op00 = peekThroughBitcasts(Ops0.front());
48051       SDValue Op10 = peekThroughBitcasts(Ops1.front());
48052       SDValue Op01 = peekThroughBitcasts(Ops0.back());
48053       SDValue Op11 = peekThroughBitcasts(Ops1.back());
48054       if ((Op00 == Op11) && (Op01 == Op10)) {
48055         std::swap(Op10, Op11);
48056         ShuffleVectorSDNode::commuteMask(ScaledMask1);
48057       }
48058       if ((Op00 == Op10) && (Op01 == Op11)) {
48059         const int Map[4] = {0, 2, 1, 3};
48060         SmallVector<int, 4> ShuffleMask(
48061             {Map[ScaledMask0[0]], Map[ScaledMask1[0]], Map[ScaledMask0[1]],
48062              Map[ScaledMask1[1]]});
48063         MVT ShufVT = VT.isFloatingPoint() ? MVT::v4f64 : MVT::v4i64;
48064         SDValue Res = DAG.getNode(Opcode, DL, VT, DAG.getBitcast(SrcVT, Op00),
48065                                   DAG.getBitcast(SrcVT, Op01));
48066         Res = DAG.getBitcast(ShufVT, Res);
48067         Res = DAG.getVectorShuffle(ShufVT, DL, Res, Res, ShuffleMask);
48068         return DAG.getBitcast(VT, Res);
48069       }
48070     }
48071   }
48072 
48073   return SDValue();
48074 }
48075 
combineVectorPack(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)48076 static SDValue combineVectorPack(SDNode *N, SelectionDAG &DAG,
48077                                  TargetLowering::DAGCombinerInfo &DCI,
48078                                  const X86Subtarget &Subtarget) {
48079   unsigned Opcode = N->getOpcode();
48080   assert((X86ISD::PACKSS == Opcode || X86ISD::PACKUS == Opcode) &&
48081          "Unexpected pack opcode");
48082 
48083   EVT VT = N->getValueType(0);
48084   SDValue N0 = N->getOperand(0);
48085   SDValue N1 = N->getOperand(1);
48086   unsigned NumDstElts = VT.getVectorNumElements();
48087   unsigned DstBitsPerElt = VT.getScalarSizeInBits();
48088   unsigned SrcBitsPerElt = 2 * DstBitsPerElt;
48089   assert(N0.getScalarValueSizeInBits() == SrcBitsPerElt &&
48090          N1.getScalarValueSizeInBits() == SrcBitsPerElt &&
48091          "Unexpected PACKSS/PACKUS input type");
48092 
48093   bool IsSigned = (X86ISD::PACKSS == Opcode);
48094 
48095   // Constant Folding.
48096   APInt UndefElts0, UndefElts1;
48097   SmallVector<APInt, 32> EltBits0, EltBits1;
48098   if ((N0.isUndef() || N->isOnlyUserOf(N0.getNode())) &&
48099       (N1.isUndef() || N->isOnlyUserOf(N1.getNode())) &&
48100       getTargetConstantBitsFromNode(N0, SrcBitsPerElt, UndefElts0, EltBits0) &&
48101       getTargetConstantBitsFromNode(N1, SrcBitsPerElt, UndefElts1, EltBits1)) {
48102     unsigned NumLanes = VT.getSizeInBits() / 128;
48103     unsigned NumSrcElts = NumDstElts / 2;
48104     unsigned NumDstEltsPerLane = NumDstElts / NumLanes;
48105     unsigned NumSrcEltsPerLane = NumSrcElts / NumLanes;
48106 
48107     APInt Undefs(NumDstElts, 0);
48108     SmallVector<APInt, 32> Bits(NumDstElts, APInt::getZero(DstBitsPerElt));
48109     for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
48110       for (unsigned Elt = 0; Elt != NumDstEltsPerLane; ++Elt) {
48111         unsigned SrcIdx = Lane * NumSrcEltsPerLane + Elt % NumSrcEltsPerLane;
48112         auto &UndefElts = (Elt >= NumSrcEltsPerLane ? UndefElts1 : UndefElts0);
48113         auto &EltBits = (Elt >= NumSrcEltsPerLane ? EltBits1 : EltBits0);
48114 
48115         if (UndefElts[SrcIdx]) {
48116           Undefs.setBit(Lane * NumDstEltsPerLane + Elt);
48117           continue;
48118         }
48119 
48120         APInt &Val = EltBits[SrcIdx];
48121         if (IsSigned) {
48122           // PACKSS: Truncate signed value with signed saturation.
48123           // Source values less than dst minint are saturated to minint.
48124           // Source values greater than dst maxint are saturated to maxint.
48125           if (Val.isSignedIntN(DstBitsPerElt))
48126             Val = Val.trunc(DstBitsPerElt);
48127           else if (Val.isNegative())
48128             Val = APInt::getSignedMinValue(DstBitsPerElt);
48129           else
48130             Val = APInt::getSignedMaxValue(DstBitsPerElt);
48131         } else {
48132           // PACKUS: Truncate signed value with unsigned saturation.
48133           // Source values less than zero are saturated to zero.
48134           // Source values greater than dst maxuint are saturated to maxuint.
48135           if (Val.isIntN(DstBitsPerElt))
48136             Val = Val.trunc(DstBitsPerElt);
48137           else if (Val.isNegative())
48138             Val = APInt::getZero(DstBitsPerElt);
48139           else
48140             Val = APInt::getAllOnes(DstBitsPerElt);
48141         }
48142         Bits[Lane * NumDstEltsPerLane + Elt] = Val;
48143       }
48144     }
48145 
48146     return getConstVector(Bits, Undefs, VT.getSimpleVT(), DAG, SDLoc(N));
48147   }
48148 
48149   // Try to fold PACK(SHUFFLE(),SHUFFLE()) -> SHUFFLE(PACK()).
48150   if (SDValue V = combineHorizOpWithShuffle(N, DAG, Subtarget))
48151     return V;
48152 
48153   // Try to combine a PACKUSWB/PACKSSWB implemented truncate with a regular
48154   // truncate to create a larger truncate.
48155   if (Subtarget.hasAVX512() &&
48156       N0.getOpcode() == ISD::TRUNCATE && N1.isUndef() && VT == MVT::v16i8 &&
48157       N0.getOperand(0).getValueType() == MVT::v8i32) {
48158     if ((IsSigned && DAG.ComputeNumSignBits(N0) > 8) ||
48159         (!IsSigned &&
48160          DAG.MaskedValueIsZero(N0, APInt::getHighBitsSet(16, 8)))) {
48161       if (Subtarget.hasVLX())
48162         return DAG.getNode(X86ISD::VTRUNC, SDLoc(N), VT, N0.getOperand(0));
48163 
48164       // Widen input to v16i32 so we can truncate that.
48165       SDLoc dl(N);
48166       SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i32,
48167                                    N0.getOperand(0), DAG.getUNDEF(MVT::v8i32));
48168       return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, Concat);
48169     }
48170   }
48171 
48172   // Try to fold PACK(EXTEND(X),EXTEND(Y)) -> CONCAT(X,Y) subvectors.
48173   if (VT.is128BitVector()) {
48174     unsigned ExtOpc = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
48175     SDValue Src0, Src1;
48176     if (N0.getOpcode() == ExtOpc &&
48177         N0.getOperand(0).getValueType().is64BitVector() &&
48178         N0.getOperand(0).getScalarValueSizeInBits() == DstBitsPerElt) {
48179       Src0 = N0.getOperand(0);
48180     }
48181     if (N1.getOpcode() == ExtOpc &&
48182         N1.getOperand(0).getValueType().is64BitVector() &&
48183         N1.getOperand(0).getScalarValueSizeInBits() == DstBitsPerElt) {
48184       Src1 = N1.getOperand(0);
48185     }
48186     if ((Src0 || N0.isUndef()) && (Src1 || N1.isUndef())) {
48187       assert((Src0 || Src1) && "Found PACK(UNDEF,UNDEF)");
48188       Src0 = Src0 ? Src0 : DAG.getUNDEF(Src1.getValueType());
48189       Src1 = Src1 ? Src1 : DAG.getUNDEF(Src0.getValueType());
48190       return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, Src0, Src1);
48191     }
48192 
48193     // Try again with pack(*_extend_vector_inreg, undef).
48194     unsigned VecInRegOpc = IsSigned ? ISD::SIGN_EXTEND_VECTOR_INREG
48195                                     : ISD::ZERO_EXTEND_VECTOR_INREG;
48196     if (N0.getOpcode() == VecInRegOpc && N1.isUndef() &&
48197         N0.getOperand(0).getScalarValueSizeInBits() < DstBitsPerElt)
48198       return getEXTEND_VECTOR_INREG(ExtOpc, SDLoc(N), VT, N0.getOperand(0),
48199                                     DAG);
48200   }
48201 
48202   // Attempt to combine as shuffle.
48203   SDValue Op(N, 0);
48204   if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
48205     return Res;
48206 
48207   return SDValue();
48208 }
48209 
combineVectorHADDSUB(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)48210 static SDValue combineVectorHADDSUB(SDNode *N, SelectionDAG &DAG,
48211                                     TargetLowering::DAGCombinerInfo &DCI,
48212                                     const X86Subtarget &Subtarget) {
48213   assert((X86ISD::HADD == N->getOpcode() || X86ISD::FHADD == N->getOpcode() ||
48214           X86ISD::HSUB == N->getOpcode() || X86ISD::FHSUB == N->getOpcode()) &&
48215          "Unexpected horizontal add/sub opcode");
48216 
48217   if (!shouldUseHorizontalOp(true, DAG, Subtarget)) {
48218     MVT VT = N->getSimpleValueType(0);
48219     SDValue LHS = N->getOperand(0);
48220     SDValue RHS = N->getOperand(1);
48221 
48222     // HOP(HOP'(X,X),HOP'(Y,Y)) -> HOP(PERMUTE(HOP'(X,Y)),PERMUTE(HOP'(X,Y)).
48223     if (LHS != RHS && LHS.getOpcode() == N->getOpcode() &&
48224         LHS.getOpcode() == RHS.getOpcode() &&
48225         LHS.getValueType() == RHS.getValueType() &&
48226         N->isOnlyUserOf(LHS.getNode()) && N->isOnlyUserOf(RHS.getNode())) {
48227       SDValue LHS0 = LHS.getOperand(0);
48228       SDValue LHS1 = LHS.getOperand(1);
48229       SDValue RHS0 = RHS.getOperand(0);
48230       SDValue RHS1 = RHS.getOperand(1);
48231       if ((LHS0 == LHS1 || LHS0.isUndef() || LHS1.isUndef()) &&
48232           (RHS0 == RHS1 || RHS0.isUndef() || RHS1.isUndef())) {
48233         SDLoc DL(N);
48234         SDValue Res = DAG.getNode(LHS.getOpcode(), DL, LHS.getValueType(),
48235                                   LHS0.isUndef() ? LHS1 : LHS0,
48236                                   RHS0.isUndef() ? RHS1 : RHS0);
48237         MVT ShufVT = MVT::getVectorVT(MVT::i32, VT.getSizeInBits() / 32);
48238         Res = DAG.getBitcast(ShufVT, Res);
48239         SDValue NewLHS =
48240             DAG.getNode(X86ISD::PSHUFD, DL, ShufVT, Res,
48241                         getV4X86ShuffleImm8ForMask({0, 1, 0, 1}, DL, DAG));
48242         SDValue NewRHS =
48243             DAG.getNode(X86ISD::PSHUFD, DL, ShufVT, Res,
48244                         getV4X86ShuffleImm8ForMask({2, 3, 2, 3}, DL, DAG));
48245         return DAG.getNode(N->getOpcode(), DL, VT, DAG.getBitcast(VT, NewLHS),
48246                            DAG.getBitcast(VT, NewRHS));
48247       }
48248     }
48249   }
48250 
48251   // Try to fold HOP(SHUFFLE(),SHUFFLE()) -> SHUFFLE(HOP()).
48252   if (SDValue V = combineHorizOpWithShuffle(N, DAG, Subtarget))
48253     return V;
48254 
48255   return SDValue();
48256 }
48257 
combineVectorShiftVar(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)48258 static SDValue combineVectorShiftVar(SDNode *N, SelectionDAG &DAG,
48259                                      TargetLowering::DAGCombinerInfo &DCI,
48260                                      const X86Subtarget &Subtarget) {
48261   assert((X86ISD::VSHL == N->getOpcode() || X86ISD::VSRA == N->getOpcode() ||
48262           X86ISD::VSRL == N->getOpcode()) &&
48263          "Unexpected shift opcode");
48264   EVT VT = N->getValueType(0);
48265   SDValue N0 = N->getOperand(0);
48266   SDValue N1 = N->getOperand(1);
48267 
48268   // Shift zero -> zero.
48269   if (ISD::isBuildVectorAllZeros(N0.getNode()))
48270     return DAG.getConstant(0, SDLoc(N), VT);
48271 
48272   // Detect constant shift amounts.
48273   APInt UndefElts;
48274   SmallVector<APInt, 32> EltBits;
48275   if (getTargetConstantBitsFromNode(N1, 64, UndefElts, EltBits, true, false)) {
48276     unsigned X86Opc = getTargetVShiftUniformOpcode(N->getOpcode(), false);
48277     return getTargetVShiftByConstNode(X86Opc, SDLoc(N), VT.getSimpleVT(), N0,
48278                                       EltBits[0].getZExtValue(), DAG);
48279   }
48280 
48281   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
48282   APInt DemandedElts = APInt::getAllOnes(VT.getVectorNumElements());
48283   if (TLI.SimplifyDemandedVectorElts(SDValue(N, 0), DemandedElts, DCI))
48284     return SDValue(N, 0);
48285 
48286   return SDValue();
48287 }
48288 
combineVectorShiftImm(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)48289 static SDValue combineVectorShiftImm(SDNode *N, SelectionDAG &DAG,
48290                                      TargetLowering::DAGCombinerInfo &DCI,
48291                                      const X86Subtarget &Subtarget) {
48292   unsigned Opcode = N->getOpcode();
48293   assert((X86ISD::VSHLI == Opcode || X86ISD::VSRAI == Opcode ||
48294           X86ISD::VSRLI == Opcode) &&
48295          "Unexpected shift opcode");
48296   bool LogicalShift = X86ISD::VSHLI == Opcode || X86ISD::VSRLI == Opcode;
48297   EVT VT = N->getValueType(0);
48298   SDValue N0 = N->getOperand(0);
48299   unsigned NumBitsPerElt = VT.getScalarSizeInBits();
48300   assert(VT == N0.getValueType() && (NumBitsPerElt % 8) == 0 &&
48301          "Unexpected value type");
48302   assert(N->getOperand(1).getValueType() == MVT::i8 &&
48303          "Unexpected shift amount type");
48304 
48305   // (shift undef, X) -> 0
48306   if (N0.isUndef())
48307     return DAG.getConstant(0, SDLoc(N), VT);
48308 
48309   // Out of range logical bit shifts are guaranteed to be zero.
48310   // Out of range arithmetic bit shifts splat the sign bit.
48311   unsigned ShiftVal = N->getConstantOperandVal(1);
48312   if (ShiftVal >= NumBitsPerElt) {
48313     if (LogicalShift)
48314       return DAG.getConstant(0, SDLoc(N), VT);
48315     ShiftVal = NumBitsPerElt - 1;
48316   }
48317 
48318   // (shift X, 0) -> X
48319   if (!ShiftVal)
48320     return N0;
48321 
48322   // (shift 0, C) -> 0
48323   if (ISD::isBuildVectorAllZeros(N0.getNode()))
48324     // N0 is all zeros or undef. We guarantee that the bits shifted into the
48325     // result are all zeros, not undef.
48326     return DAG.getConstant(0, SDLoc(N), VT);
48327 
48328   // (VSRAI -1, C) -> -1
48329   if (!LogicalShift && ISD::isBuildVectorAllOnes(N0.getNode()))
48330     // N0 is all ones or undef. We guarantee that the bits shifted into the
48331     // result are all ones, not undef.
48332     return DAG.getConstant(-1, SDLoc(N), VT);
48333 
48334   auto MergeShifts = [&](SDValue X, uint64_t Amt0, uint64_t Amt1) {
48335     unsigned NewShiftVal = Amt0 + Amt1;
48336     if (NewShiftVal >= NumBitsPerElt) {
48337       // Out of range logical bit shifts are guaranteed to be zero.
48338       // Out of range arithmetic bit shifts splat the sign bit.
48339       if (LogicalShift)
48340         return DAG.getConstant(0, SDLoc(N), VT);
48341       NewShiftVal = NumBitsPerElt - 1;
48342     }
48343     return DAG.getNode(Opcode, SDLoc(N), VT, N0.getOperand(0),
48344                        DAG.getTargetConstant(NewShiftVal, SDLoc(N), MVT::i8));
48345   };
48346 
48347   // (shift (shift X, C2), C1) -> (shift X, (C1 + C2))
48348   if (Opcode == N0.getOpcode())
48349     return MergeShifts(N0.getOperand(0), ShiftVal, N0.getConstantOperandVal(1));
48350 
48351   // (shl (add X, X), C) -> (shl X, (C + 1))
48352   if (Opcode == X86ISD::VSHLI && N0.getOpcode() == ISD::ADD &&
48353       N0.getOperand(0) == N0.getOperand(1))
48354     return MergeShifts(N0.getOperand(0), ShiftVal, 1);
48355 
48356   // We can decode 'whole byte' logical bit shifts as shuffles.
48357   if (LogicalShift && (ShiftVal % 8) == 0) {
48358     SDValue Op(N, 0);
48359     if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
48360       return Res;
48361   }
48362 
48363   // Constant Folding.
48364   APInt UndefElts;
48365   SmallVector<APInt, 32> EltBits;
48366   if (N->isOnlyUserOf(N0.getNode()) &&
48367       getTargetConstantBitsFromNode(N0, NumBitsPerElt, UndefElts, EltBits)) {
48368     assert(EltBits.size() == VT.getVectorNumElements() &&
48369            "Unexpected shift value type");
48370     // Undef elements need to fold to 0. It's possible SimplifyDemandedBits
48371     // created an undef input due to no input bits being demanded, but user
48372     // still expects 0 in other bits.
48373     for (unsigned i = 0, e = EltBits.size(); i != e; ++i) {
48374       APInt &Elt = EltBits[i];
48375       if (UndefElts[i])
48376         Elt = 0;
48377       else if (X86ISD::VSHLI == Opcode)
48378         Elt <<= ShiftVal;
48379       else if (X86ISD::VSRAI == Opcode)
48380         Elt.ashrInPlace(ShiftVal);
48381       else
48382         Elt.lshrInPlace(ShiftVal);
48383     }
48384     // Reset undef elements since they were zeroed above.
48385     UndefElts = 0;
48386     return getConstVector(EltBits, UndefElts, VT.getSimpleVT(), DAG, SDLoc(N));
48387   }
48388 
48389   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
48390   if (TLI.SimplifyDemandedBits(SDValue(N, 0), APInt::getAllOnes(NumBitsPerElt),
48391                                DCI))
48392     return SDValue(N, 0);
48393 
48394   return SDValue();
48395 }
48396 
combineVectorInsert(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)48397 static SDValue combineVectorInsert(SDNode *N, SelectionDAG &DAG,
48398                                    TargetLowering::DAGCombinerInfo &DCI,
48399                                    const X86Subtarget &Subtarget) {
48400   EVT VT = N->getValueType(0);
48401   unsigned Opcode = N->getOpcode();
48402   assert(((Opcode == X86ISD::PINSRB && VT == MVT::v16i8) ||
48403           (Opcode == X86ISD::PINSRW && VT == MVT::v8i16) ||
48404           Opcode == ISD::INSERT_VECTOR_ELT) &&
48405          "Unexpected vector insertion");
48406 
48407   // Fold insert_vector_elt(undef, elt, 0) --> scalar_to_vector(elt).
48408   if (Opcode == ISD::INSERT_VECTOR_ELT && N->getOperand(0).isUndef() &&
48409       isNullConstant(N->getOperand(2)))
48410     return DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), VT, N->getOperand(1));
48411 
48412   if (Opcode == X86ISD::PINSRB || Opcode == X86ISD::PINSRW) {
48413     unsigned NumBitsPerElt = VT.getScalarSizeInBits();
48414     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
48415     if (TLI.SimplifyDemandedBits(SDValue(N, 0),
48416                                  APInt::getAllOnes(NumBitsPerElt), DCI))
48417       return SDValue(N, 0);
48418   }
48419 
48420   // Attempt to combine insertion patterns to a shuffle.
48421   if (VT.isSimple() && DCI.isAfterLegalizeDAG()) {
48422     SDValue Op(N, 0);
48423     if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
48424       return Res;
48425   }
48426 
48427   return SDValue();
48428 }
48429 
48430 /// Recognize the distinctive (AND (setcc ...) (setcc ..)) where both setccs
48431 /// reference the same FP CMP, and rewrite for CMPEQSS and friends. Likewise for
48432 /// OR -> CMPNEQSS.
combineCompareEqual(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)48433 static SDValue combineCompareEqual(SDNode *N, SelectionDAG &DAG,
48434                                    TargetLowering::DAGCombinerInfo &DCI,
48435                                    const X86Subtarget &Subtarget) {
48436   unsigned opcode;
48437 
48438   // SSE1 supports CMP{eq|ne}SS, and SSE2 added CMP{eq|ne}SD, but
48439   // we're requiring SSE2 for both.
48440   if (Subtarget.hasSSE2() && isAndOrOfSetCCs(SDValue(N, 0U), opcode)) {
48441     SDValue N0 = N->getOperand(0);
48442     SDValue N1 = N->getOperand(1);
48443     SDValue CMP0 = N0.getOperand(1);
48444     SDValue CMP1 = N1.getOperand(1);
48445     SDLoc DL(N);
48446 
48447     // The SETCCs should both refer to the same CMP.
48448     if (CMP0.getOpcode() != X86ISD::FCMP || CMP0 != CMP1)
48449       return SDValue();
48450 
48451     SDValue CMP00 = CMP0->getOperand(0);
48452     SDValue CMP01 = CMP0->getOperand(1);
48453     EVT     VT    = CMP00.getValueType();
48454 
48455     if (VT == MVT::f32 || VT == MVT::f64 ||
48456         (VT == MVT::f16 && Subtarget.hasFP16())) {
48457       bool ExpectingFlags = false;
48458       // Check for any users that want flags:
48459       for (const SDNode *U : N->uses()) {
48460         if (ExpectingFlags)
48461           break;
48462 
48463         switch (U->getOpcode()) {
48464         default:
48465         case ISD::BR_CC:
48466         case ISD::BRCOND:
48467         case ISD::SELECT:
48468           ExpectingFlags = true;
48469           break;
48470         case ISD::CopyToReg:
48471         case ISD::SIGN_EXTEND:
48472         case ISD::ZERO_EXTEND:
48473         case ISD::ANY_EXTEND:
48474           break;
48475         }
48476       }
48477 
48478       if (!ExpectingFlags) {
48479         enum X86::CondCode cc0 = (enum X86::CondCode)N0.getConstantOperandVal(0);
48480         enum X86::CondCode cc1 = (enum X86::CondCode)N1.getConstantOperandVal(0);
48481 
48482         if (cc1 == X86::COND_E || cc1 == X86::COND_NE) {
48483           X86::CondCode tmp = cc0;
48484           cc0 = cc1;
48485           cc1 = tmp;
48486         }
48487 
48488         if ((cc0 == X86::COND_E  && cc1 == X86::COND_NP) ||
48489             (cc0 == X86::COND_NE && cc1 == X86::COND_P)) {
48490           // FIXME: need symbolic constants for these magic numbers.
48491           // See X86ATTInstPrinter.cpp:printSSECC().
48492           unsigned x86cc = (cc0 == X86::COND_E) ? 0 : 4;
48493           if (Subtarget.hasAVX512()) {
48494             SDValue FSetCC =
48495                 DAG.getNode(X86ISD::FSETCCM, DL, MVT::v1i1, CMP00, CMP01,
48496                             DAG.getTargetConstant(x86cc, DL, MVT::i8));
48497             // Need to fill with zeros to ensure the bitcast will produce zeroes
48498             // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
48499             SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, MVT::v16i1,
48500                                       DAG.getConstant(0, DL, MVT::v16i1),
48501                                       FSetCC, DAG.getIntPtrConstant(0, DL));
48502             return DAG.getZExtOrTrunc(DAG.getBitcast(MVT::i16, Ins), DL,
48503                                       N->getSimpleValueType(0));
48504           }
48505           SDValue OnesOrZeroesF =
48506               DAG.getNode(X86ISD::FSETCC, DL, CMP00.getValueType(), CMP00,
48507                           CMP01, DAG.getTargetConstant(x86cc, DL, MVT::i8));
48508 
48509           bool is64BitFP = (CMP00.getValueType() == MVT::f64);
48510           MVT IntVT = is64BitFP ? MVT::i64 : MVT::i32;
48511 
48512           if (is64BitFP && !Subtarget.is64Bit()) {
48513             // On a 32-bit target, we cannot bitcast the 64-bit float to a
48514             // 64-bit integer, since that's not a legal type. Since
48515             // OnesOrZeroesF is all ones or all zeroes, we don't need all the
48516             // bits, but can do this little dance to extract the lowest 32 bits
48517             // and work with those going forward.
48518             SDValue Vector64 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64,
48519                                            OnesOrZeroesF);
48520             SDValue Vector32 = DAG.getBitcast(MVT::v4f32, Vector64);
48521             OnesOrZeroesF = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32,
48522                                         Vector32, DAG.getIntPtrConstant(0, DL));
48523             IntVT = MVT::i32;
48524           }
48525 
48526           SDValue OnesOrZeroesI = DAG.getBitcast(IntVT, OnesOrZeroesF);
48527           SDValue ANDed = DAG.getNode(ISD::AND, DL, IntVT, OnesOrZeroesI,
48528                                       DAG.getConstant(1, DL, IntVT));
48529           SDValue OneBitOfTruth = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
48530                                               ANDed);
48531           return OneBitOfTruth;
48532         }
48533       }
48534     }
48535   }
48536   return SDValue();
48537 }
48538 
48539 /// Try to fold: (and (xor X, -1), Y) -> (andnp X, Y).
combineAndNotIntoANDNP(SDNode * N,SelectionDAG & DAG)48540 static SDValue combineAndNotIntoANDNP(SDNode *N, SelectionDAG &DAG) {
48541   assert(N->getOpcode() == ISD::AND && "Unexpected opcode combine into ANDNP");
48542 
48543   MVT VT = N->getSimpleValueType(0);
48544   if (!VT.is128BitVector() && !VT.is256BitVector() && !VT.is512BitVector())
48545     return SDValue();
48546 
48547   SDValue X, Y;
48548   SDValue N0 = N->getOperand(0);
48549   SDValue N1 = N->getOperand(1);
48550 
48551   if (SDValue Not = IsNOT(N0, DAG)) {
48552     X = Not;
48553     Y = N1;
48554   } else if (SDValue Not = IsNOT(N1, DAG)) {
48555     X = Not;
48556     Y = N0;
48557   } else
48558     return SDValue();
48559 
48560   X = DAG.getBitcast(VT, X);
48561   Y = DAG.getBitcast(VT, Y);
48562   return DAG.getNode(X86ISD::ANDNP, SDLoc(N), VT, X, Y);
48563 }
48564 
48565 /// Try to fold:
48566 ///   and (vector_shuffle<Z,...,Z>
48567 ///            (insert_vector_elt undef, (xor X, -1), Z), undef), Y
48568 ///   ->
48569 ///   andnp (vector_shuffle<Z,...,Z>
48570 ///              (insert_vector_elt undef, X, Z), undef), Y
combineAndShuffleNot(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)48571 static SDValue combineAndShuffleNot(SDNode *N, SelectionDAG &DAG,
48572                                     const X86Subtarget &Subtarget) {
48573   assert(N->getOpcode() == ISD::AND && "Unexpected opcode combine into ANDNP");
48574 
48575   EVT VT = N->getValueType(0);
48576   // Do not split 256 and 512 bit vectors with SSE2 as they overwrite original
48577   // value and require extra moves.
48578   if (!((VT.is128BitVector() && Subtarget.hasSSE2()) ||
48579         ((VT.is256BitVector() || VT.is512BitVector()) && Subtarget.hasAVX())))
48580     return SDValue();
48581 
48582   auto GetNot = [&DAG](SDValue V) {
48583     auto *SVN = dyn_cast<ShuffleVectorSDNode>(peekThroughOneUseBitcasts(V));
48584     // TODO: SVN->hasOneUse() is a strong condition. It can be relaxed if all
48585     // end-users are ISD::AND including cases
48586     // (and(extract_vector_element(SVN), Y)).
48587     if (!SVN || !SVN->hasOneUse() || !SVN->isSplat() ||
48588         !SVN->getOperand(1).isUndef()) {
48589       return SDValue();
48590     }
48591     SDValue IVEN = SVN->getOperand(0);
48592     if (IVEN.getOpcode() != ISD::INSERT_VECTOR_ELT ||
48593         !IVEN.getOperand(0).isUndef() || !IVEN.hasOneUse())
48594       return SDValue();
48595     if (!isa<ConstantSDNode>(IVEN.getOperand(2)) ||
48596         IVEN.getConstantOperandAPInt(2) != SVN->getSplatIndex())
48597       return SDValue();
48598     SDValue Src = IVEN.getOperand(1);
48599     if (SDValue Not = IsNOT(Src, DAG)) {
48600       SDValue NotSrc = DAG.getBitcast(Src.getValueType(), Not);
48601       SDValue NotIVEN =
48602           DAG.getNode(ISD::INSERT_VECTOR_ELT, SDLoc(IVEN), IVEN.getValueType(),
48603                       IVEN.getOperand(0), NotSrc, IVEN.getOperand(2));
48604       return DAG.getVectorShuffle(SVN->getValueType(0), SDLoc(SVN), NotIVEN,
48605                                   SVN->getOperand(1), SVN->getMask());
48606     }
48607     return SDValue();
48608   };
48609 
48610   SDValue X, Y;
48611   SDValue N0 = N->getOperand(0);
48612   SDValue N1 = N->getOperand(1);
48613 
48614   if (SDValue Not = GetNot(N0)) {
48615     X = Not;
48616     Y = N1;
48617   } else if (SDValue Not = GetNot(N1)) {
48618     X = Not;
48619     Y = N0;
48620   } else
48621     return SDValue();
48622 
48623   X = DAG.getBitcast(VT, X);
48624   Y = DAG.getBitcast(VT, Y);
48625   SDLoc DL(N);
48626   // We do not split for SSE at all, but we need to split vectors for AVX1 and
48627   // AVX2.
48628   if (!Subtarget.useAVX512Regs() && VT.is512BitVector()) {
48629     SDValue LoX, HiX;
48630     std::tie(LoX, HiX) = splitVector(X, DAG, DL);
48631     SDValue LoY, HiY;
48632     std::tie(LoY, HiY) = splitVector(Y, DAG, DL);
48633     EVT SplitVT = LoX.getValueType();
48634     SDValue LoV = DAG.getNode(X86ISD::ANDNP, DL, SplitVT, {LoX, LoY});
48635     SDValue HiV = DAG.getNode(X86ISD::ANDNP, DL, SplitVT, {HiX, HiY});
48636     return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, {LoV, HiV});
48637   }
48638   return DAG.getNode(X86ISD::ANDNP, DL, VT, {X, Y});
48639 }
48640 
48641 // Try to widen AND, OR and XOR nodes to VT in order to remove casts around
48642 // logical operations, like in the example below.
48643 //   or (and (truncate x, truncate y)),
48644 //      (xor (truncate z, build_vector (constants)))
48645 // Given a target type \p VT, we generate
48646 //   or (and x, y), (xor z, zext(build_vector (constants)))
48647 // given x, y and z are of type \p VT. We can do so, if operands are either
48648 // truncates from VT types, the second operand is a vector of constants or can
48649 // be recursively promoted.
PromoteMaskArithmetic(SDNode * N,EVT VT,SelectionDAG & DAG,unsigned Depth)48650 static SDValue PromoteMaskArithmetic(SDNode *N, EVT VT, SelectionDAG &DAG,
48651                                      unsigned Depth) {
48652   // Limit recursion to avoid excessive compile times.
48653   if (Depth >= SelectionDAG::MaxRecursionDepth)
48654     return SDValue();
48655 
48656   if (N->getOpcode() != ISD::XOR && N->getOpcode() != ISD::AND &&
48657       N->getOpcode() != ISD::OR)
48658     return SDValue();
48659 
48660   SDValue N0 = N->getOperand(0);
48661   SDValue N1 = N->getOperand(1);
48662   SDLoc DL(N);
48663 
48664   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
48665   if (!TLI.isOperationLegalOrPromote(N->getOpcode(), VT))
48666     return SDValue();
48667 
48668   if (SDValue NN0 = PromoteMaskArithmetic(N0.getNode(), VT, DAG, Depth + 1))
48669     N0 = NN0;
48670   else {
48671     // The Left side has to be a trunc.
48672     if (N0.getOpcode() != ISD::TRUNCATE)
48673       return SDValue();
48674 
48675     // The type of the truncated inputs.
48676     if (N0.getOperand(0).getValueType() != VT)
48677       return SDValue();
48678 
48679     N0 = N0.getOperand(0);
48680   }
48681 
48682   if (SDValue NN1 = PromoteMaskArithmetic(N1.getNode(), VT, DAG, Depth + 1))
48683     N1 = NN1;
48684   else {
48685     // The right side has to be a 'trunc' or a constant vector.
48686     bool RHSTrunc = N1.getOpcode() == ISD::TRUNCATE &&
48687                     N1.getOperand(0).getValueType() == VT;
48688     if (!RHSTrunc && !ISD::isBuildVectorOfConstantSDNodes(N1.getNode()))
48689       return SDValue();
48690 
48691     if (RHSTrunc)
48692       N1 = N1.getOperand(0);
48693     else
48694       N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N1);
48695   }
48696 
48697   return DAG.getNode(N->getOpcode(), DL, VT, N0, N1);
48698 }
48699 
48700 // On AVX/AVX2 the type v8i1 is legalized to v8i16, which is an XMM sized
48701 // register. In most cases we actually compare or select YMM-sized registers
48702 // and mixing the two types creates horrible code. This method optimizes
48703 // some of the transition sequences.
48704 // Even with AVX-512 this is still useful for removing casts around logical
48705 // operations on vXi1 mask types.
PromoteMaskArithmetic(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)48706 static SDValue PromoteMaskArithmetic(SDNode *N, SelectionDAG &DAG,
48707                                      const X86Subtarget &Subtarget) {
48708   EVT VT = N->getValueType(0);
48709   assert(VT.isVector() && "Expected vector type");
48710 
48711   SDLoc DL(N);
48712   assert((N->getOpcode() == ISD::ANY_EXTEND ||
48713           N->getOpcode() == ISD::ZERO_EXTEND ||
48714           N->getOpcode() == ISD::SIGN_EXTEND) && "Invalid Node");
48715 
48716   SDValue Narrow = N->getOperand(0);
48717   EVT NarrowVT = Narrow.getValueType();
48718 
48719   // Generate the wide operation.
48720   SDValue Op = PromoteMaskArithmetic(Narrow.getNode(), VT, DAG, 0);
48721   if (!Op)
48722     return SDValue();
48723   switch (N->getOpcode()) {
48724   default: llvm_unreachable("Unexpected opcode");
48725   case ISD::ANY_EXTEND:
48726     return Op;
48727   case ISD::ZERO_EXTEND:
48728     return DAG.getZeroExtendInReg(Op, DL, NarrowVT);
48729   case ISD::SIGN_EXTEND:
48730     return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT,
48731                        Op, DAG.getValueType(NarrowVT));
48732   }
48733 }
48734 
convertIntLogicToFPLogicOpcode(unsigned Opcode)48735 static unsigned convertIntLogicToFPLogicOpcode(unsigned Opcode) {
48736   unsigned FPOpcode;
48737   switch (Opcode) {
48738   default: llvm_unreachable("Unexpected input node for FP logic conversion");
48739   case ISD::AND: FPOpcode = X86ISD::FAND; break;
48740   case ISD::OR:  FPOpcode = X86ISD::FOR;  break;
48741   case ISD::XOR: FPOpcode = X86ISD::FXOR; break;
48742   }
48743   return FPOpcode;
48744 }
48745 
48746 /// If both input operands of a logic op are being cast from floating-point
48747 /// types or FP compares, try to convert this into a floating-point logic node
48748 /// to avoid unnecessary moves from SSE to integer registers.
convertIntLogicToFPLogic(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)48749 static SDValue convertIntLogicToFPLogic(SDNode *N, SelectionDAG &DAG,
48750                                         TargetLowering::DAGCombinerInfo &DCI,
48751                                         const X86Subtarget &Subtarget) {
48752   EVT VT = N->getValueType(0);
48753   SDValue N0 = N->getOperand(0);
48754   SDValue N1 = N->getOperand(1);
48755   SDLoc DL(N);
48756 
48757   if (!((N0.getOpcode() == ISD::BITCAST && N1.getOpcode() == ISD::BITCAST) ||
48758         (N0.getOpcode() == ISD::SETCC && N1.getOpcode() == ISD::SETCC)))
48759     return SDValue();
48760 
48761   SDValue N00 = N0.getOperand(0);
48762   SDValue N10 = N1.getOperand(0);
48763   EVT N00Type = N00.getValueType();
48764   EVT N10Type = N10.getValueType();
48765 
48766   // Ensure that both types are the same and are legal scalar fp types.
48767   if (N00Type != N10Type || !((Subtarget.hasSSE1() && N00Type == MVT::f32) ||
48768                               (Subtarget.hasSSE2() && N00Type == MVT::f64) ||
48769                               (Subtarget.hasFP16() && N00Type == MVT::f16)))
48770     return SDValue();
48771 
48772   if (N0.getOpcode() == ISD::BITCAST && !DCI.isBeforeLegalizeOps()) {
48773     unsigned FPOpcode = convertIntLogicToFPLogicOpcode(N->getOpcode());
48774     SDValue FPLogic = DAG.getNode(FPOpcode, DL, N00Type, N00, N10);
48775     return DAG.getBitcast(VT, FPLogic);
48776   }
48777 
48778   if (VT != MVT::i1 || N0.getOpcode() != ISD::SETCC || !N0.hasOneUse() ||
48779       !N1.hasOneUse())
48780     return SDValue();
48781 
48782   ISD::CondCode CC0 = cast<CondCodeSDNode>(N0.getOperand(2))->get();
48783   ISD::CondCode CC1 = cast<CondCodeSDNode>(N1.getOperand(2))->get();
48784 
48785   // The vector ISA for FP predicates is incomplete before AVX, so converting
48786   // COMIS* to CMPS* may not be a win before AVX.
48787   if (!Subtarget.hasAVX() &&
48788       !(cheapX86FSETCC_SSE(CC0) && cheapX86FSETCC_SSE(CC1)))
48789     return SDValue();
48790 
48791   // Convert scalar FP compares and logic to vector compares (COMIS* to CMPS*)
48792   // and vector logic:
48793   // logic (setcc N00, N01), (setcc N10, N11) -->
48794   // extelt (logic (setcc (s2v N00), (s2v N01)), setcc (s2v N10), (s2v N11))), 0
48795   unsigned NumElts = 128 / N00Type.getSizeInBits();
48796   EVT VecVT = EVT::getVectorVT(*DAG.getContext(), N00Type, NumElts);
48797   EVT BoolVecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1, NumElts);
48798   SDValue ZeroIndex = DAG.getVectorIdxConstant(0, DL);
48799   SDValue N01 = N0.getOperand(1);
48800   SDValue N11 = N1.getOperand(1);
48801   SDValue Vec00 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, N00);
48802   SDValue Vec01 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, N01);
48803   SDValue Vec10 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, N10);
48804   SDValue Vec11 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, N11);
48805   SDValue Setcc0 = DAG.getSetCC(DL, BoolVecVT, Vec00, Vec01, CC0);
48806   SDValue Setcc1 = DAG.getSetCC(DL, BoolVecVT, Vec10, Vec11, CC1);
48807   SDValue Logic = DAG.getNode(N->getOpcode(), DL, BoolVecVT, Setcc0, Setcc1);
48808   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Logic, ZeroIndex);
48809 }
48810 
48811 // Attempt to fold BITOP(MOVMSK(X),MOVMSK(Y)) -> MOVMSK(BITOP(X,Y))
48812 // to reduce XMM->GPR traffic.
combineBitOpWithMOVMSK(SDNode * N,SelectionDAG & DAG)48813 static SDValue combineBitOpWithMOVMSK(SDNode *N, SelectionDAG &DAG) {
48814   unsigned Opc = N->getOpcode();
48815   assert((Opc == ISD::OR || Opc == ISD::AND || Opc == ISD::XOR) &&
48816          "Unexpected bit opcode");
48817 
48818   SDValue N0 = N->getOperand(0);
48819   SDValue N1 = N->getOperand(1);
48820 
48821   // Both operands must be single use MOVMSK.
48822   if (N0.getOpcode() != X86ISD::MOVMSK || !N0.hasOneUse() ||
48823       N1.getOpcode() != X86ISD::MOVMSK || !N1.hasOneUse())
48824     return SDValue();
48825 
48826   SDValue Vec0 = N0.getOperand(0);
48827   SDValue Vec1 = N1.getOperand(0);
48828   EVT VecVT0 = Vec0.getValueType();
48829   EVT VecVT1 = Vec1.getValueType();
48830 
48831   // Both MOVMSK operands must be from vectors of the same size and same element
48832   // size, but its OK for a fp/int diff.
48833   if (VecVT0.getSizeInBits() != VecVT1.getSizeInBits() ||
48834       VecVT0.getScalarSizeInBits() != VecVT1.getScalarSizeInBits())
48835     return SDValue();
48836 
48837   SDLoc DL(N);
48838   unsigned VecOpc =
48839       VecVT0.isFloatingPoint() ? convertIntLogicToFPLogicOpcode(Opc) : Opc;
48840   SDValue Result =
48841       DAG.getNode(VecOpc, DL, VecVT0, Vec0, DAG.getBitcast(VecVT0, Vec1));
48842   return DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Result);
48843 }
48844 
48845 // Attempt to fold BITOP(SHIFT(X,Z),SHIFT(Y,Z)) -> SHIFT(BITOP(X,Y),Z).
48846 // NOTE: This is a very limited case of what SimplifyUsingDistributiveLaws
48847 // handles in InstCombine.
combineBitOpWithShift(SDNode * N,SelectionDAG & DAG)48848 static SDValue combineBitOpWithShift(SDNode *N, SelectionDAG &DAG) {
48849   unsigned Opc = N->getOpcode();
48850   assert((Opc == ISD::OR || Opc == ISD::AND || Opc == ISD::XOR) &&
48851          "Unexpected bit opcode");
48852 
48853   SDValue N0 = N->getOperand(0);
48854   SDValue N1 = N->getOperand(1);
48855   EVT VT = N->getValueType(0);
48856 
48857   // Both operands must be single use.
48858   if (!N0.hasOneUse() || !N1.hasOneUse())
48859     return SDValue();
48860 
48861   // Search for matching shifts.
48862   SDValue BC0 = peekThroughOneUseBitcasts(N0);
48863   SDValue BC1 = peekThroughOneUseBitcasts(N1);
48864 
48865   unsigned BCOpc = BC0.getOpcode();
48866   EVT BCVT = BC0.getValueType();
48867   if (BCOpc != BC1->getOpcode() || BCVT != BC1.getValueType())
48868     return SDValue();
48869 
48870   switch (BCOpc) {
48871   case X86ISD::VSHLI:
48872   case X86ISD::VSRLI:
48873   case X86ISD::VSRAI: {
48874     if (BC0.getOperand(1) != BC1.getOperand(1))
48875       return SDValue();
48876 
48877     SDLoc DL(N);
48878     SDValue BitOp =
48879         DAG.getNode(Opc, DL, BCVT, BC0.getOperand(0), BC1.getOperand(0));
48880     SDValue Shift = DAG.getNode(BCOpc, DL, BCVT, BitOp, BC0.getOperand(1));
48881     return DAG.getBitcast(VT, Shift);
48882   }
48883   }
48884 
48885   return SDValue();
48886 }
48887 
48888 /// If this is a zero/all-bits result that is bitwise-anded with a low bits
48889 /// mask. (Mask == 1 for the x86 lowering of a SETCC + ZEXT), replace the 'and'
48890 /// with a shift-right to eliminate loading the vector constant mask value.
combineAndMaskToShift(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)48891 static SDValue combineAndMaskToShift(SDNode *N, SelectionDAG &DAG,
48892                                      const X86Subtarget &Subtarget) {
48893   SDValue Op0 = peekThroughBitcasts(N->getOperand(0));
48894   SDValue Op1 = peekThroughBitcasts(N->getOperand(1));
48895   EVT VT = Op0.getValueType();
48896   if (VT != Op1.getValueType() || !VT.isSimple() || !VT.isInteger())
48897     return SDValue();
48898 
48899   // Try to convert an "is positive" signbit masking operation into arithmetic
48900   // shift and "andn". This saves a materialization of a -1 vector constant.
48901   // The "is negative" variant should be handled more generally because it only
48902   // requires "and" rather than "andn":
48903   // and (pcmpgt X, -1), Y --> pandn (vsrai X, BitWidth - 1), Y
48904   //
48905   // This is limited to the original type to avoid producing even more bitcasts.
48906   // If the bitcasts can't be eliminated, then it is unlikely that this fold
48907   // will be profitable.
48908   if (N->getValueType(0) == VT &&
48909       supportedVectorShiftWithImm(VT.getSimpleVT(), Subtarget, ISD::SRA)) {
48910     SDValue X, Y;
48911     if (Op1.hasOneUse() && Op1.getOpcode() == X86ISD::PCMPGT &&
48912         isAllOnesOrAllOnesSplat(Op1.getOperand(1))) {
48913       X = Op1.getOperand(0);
48914       Y = Op0;
48915     } else if (Op0.hasOneUse() && Op0.getOpcode() == X86ISD::PCMPGT &&
48916                isAllOnesOrAllOnesSplat(Op0.getOperand(1))) {
48917       X = Op0.getOperand(0);
48918       Y = Op1;
48919     }
48920     if (X && Y) {
48921       SDLoc DL(N);
48922       SDValue Sra =
48923           getTargetVShiftByConstNode(X86ISD::VSRAI, DL, VT.getSimpleVT(), X,
48924                                      VT.getScalarSizeInBits() - 1, DAG);
48925       return DAG.getNode(X86ISD::ANDNP, DL, VT, Sra, Y);
48926     }
48927   }
48928 
48929   APInt SplatVal;
48930   if (!ISD::isConstantSplatVector(Op1.getNode(), SplatVal) ||
48931       !SplatVal.isMask())
48932     return SDValue();
48933 
48934   // Don't prevent creation of ANDN.
48935   if (isBitwiseNot(Op0))
48936     return SDValue();
48937 
48938   if (!supportedVectorShiftWithImm(VT.getSimpleVT(), Subtarget, ISD::SRL))
48939     return SDValue();
48940 
48941   unsigned EltBitWidth = VT.getScalarSizeInBits();
48942   if (EltBitWidth != DAG.ComputeNumSignBits(Op0))
48943     return SDValue();
48944 
48945   SDLoc DL(N);
48946   unsigned ShiftVal = SplatVal.countTrailingOnes();
48947   SDValue ShAmt = DAG.getTargetConstant(EltBitWidth - ShiftVal, DL, MVT::i8);
48948   SDValue Shift = DAG.getNode(X86ISD::VSRLI, DL, VT, Op0, ShAmt);
48949   return DAG.getBitcast(N->getValueType(0), Shift);
48950 }
48951 
48952 // Get the index node from the lowered DAG of a GEP IR instruction with one
48953 // indexing dimension.
getIndexFromUnindexedLoad(LoadSDNode * Ld)48954 static SDValue getIndexFromUnindexedLoad(LoadSDNode *Ld) {
48955   if (Ld->isIndexed())
48956     return SDValue();
48957 
48958   SDValue Base = Ld->getBasePtr();
48959 
48960   if (Base.getOpcode() != ISD::ADD)
48961     return SDValue();
48962 
48963   SDValue ShiftedIndex = Base.getOperand(0);
48964 
48965   if (ShiftedIndex.getOpcode() != ISD::SHL)
48966     return SDValue();
48967 
48968   return ShiftedIndex.getOperand(0);
48969 
48970 }
48971 
hasBZHI(const X86Subtarget & Subtarget,MVT VT)48972 static bool hasBZHI(const X86Subtarget &Subtarget, MVT VT) {
48973   if (Subtarget.hasBMI2() && VT.isScalarInteger()) {
48974     switch (VT.getSizeInBits()) {
48975     default: return false;
48976     case 64: return Subtarget.is64Bit() ? true : false;
48977     case 32: return true;
48978     }
48979   }
48980   return false;
48981 }
48982 
48983 // This function recognizes cases where X86 bzhi instruction can replace and
48984 // 'and-load' sequence.
48985 // In case of loading integer value from an array of constants which is defined
48986 // as follows:
48987 //
48988 //   int array[SIZE] = {0x0, 0x1, 0x3, 0x7, 0xF ..., 2^(SIZE-1) - 1}
48989 //
48990 // then applying a bitwise and on the result with another input.
48991 // It's equivalent to performing bzhi (zero high bits) on the input, with the
48992 // same index of the load.
combineAndLoadToBZHI(SDNode * Node,SelectionDAG & DAG,const X86Subtarget & Subtarget)48993 static SDValue combineAndLoadToBZHI(SDNode *Node, SelectionDAG &DAG,
48994                                     const X86Subtarget &Subtarget) {
48995   MVT VT = Node->getSimpleValueType(0);
48996   SDLoc dl(Node);
48997 
48998   // Check if subtarget has BZHI instruction for the node's type
48999   if (!hasBZHI(Subtarget, VT))
49000     return SDValue();
49001 
49002   // Try matching the pattern for both operands.
49003   for (unsigned i = 0; i < 2; i++) {
49004     SDValue N = Node->getOperand(i);
49005     LoadSDNode *Ld = dyn_cast<LoadSDNode>(N.getNode());
49006 
49007      // continue if the operand is not a load instruction
49008     if (!Ld)
49009       return SDValue();
49010 
49011     const Value *MemOp = Ld->getMemOperand()->getValue();
49012 
49013     if (!MemOp)
49014       return SDValue();
49015 
49016     if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(MemOp)) {
49017       if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0))) {
49018         if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
49019 
49020           Constant *Init = GV->getInitializer();
49021           Type *Ty = Init->getType();
49022           if (!isa<ConstantDataArray>(Init) ||
49023               !Ty->getArrayElementType()->isIntegerTy() ||
49024               Ty->getArrayElementType()->getScalarSizeInBits() !=
49025                   VT.getSizeInBits() ||
49026               Ty->getArrayNumElements() >
49027                   Ty->getArrayElementType()->getScalarSizeInBits())
49028             continue;
49029 
49030           // Check if the array's constant elements are suitable to our case.
49031           uint64_t ArrayElementCount = Init->getType()->getArrayNumElements();
49032           bool ConstantsMatch = true;
49033           for (uint64_t j = 0; j < ArrayElementCount; j++) {
49034             auto *Elem = cast<ConstantInt>(Init->getAggregateElement(j));
49035             if (Elem->getZExtValue() != (((uint64_t)1 << j) - 1)) {
49036               ConstantsMatch = false;
49037               break;
49038             }
49039           }
49040           if (!ConstantsMatch)
49041             continue;
49042 
49043           // Do the transformation (For 32-bit type):
49044           // -> (and (load arr[idx]), inp)
49045           // <- (and (srl 0xFFFFFFFF, (sub 32, idx)))
49046           //    that will be replaced with one bzhi instruction.
49047           SDValue Inp = (i == 0) ? Node->getOperand(1) : Node->getOperand(0);
49048           SDValue SizeC = DAG.getConstant(VT.getSizeInBits(), dl, MVT::i32);
49049 
49050           // Get the Node which indexes into the array.
49051           SDValue Index = getIndexFromUnindexedLoad(Ld);
49052           if (!Index)
49053             return SDValue();
49054           Index = DAG.getZExtOrTrunc(Index, dl, MVT::i32);
49055 
49056           SDValue Sub = DAG.getNode(ISD::SUB, dl, MVT::i32, SizeC, Index);
49057           Sub = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Sub);
49058 
49059           SDValue AllOnes = DAG.getAllOnesConstant(dl, VT);
49060           SDValue LShr = DAG.getNode(ISD::SRL, dl, VT, AllOnes, Sub);
49061 
49062           return DAG.getNode(ISD::AND, dl, VT, Inp, LShr);
49063         }
49064       }
49065     }
49066   }
49067   return SDValue();
49068 }
49069 
49070 // Look for (and (bitcast (vXi1 (concat_vectors (vYi1 setcc), undef,))), C)
49071 // Where C is a mask containing the same number of bits as the setcc and
49072 // where the setcc will freely 0 upper bits of k-register. We can replace the
49073 // undef in the concat with 0s and remove the AND. This mainly helps with
49074 // v2i1/v4i1 setcc being casted to scalar.
combineScalarAndWithMaskSetcc(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)49075 static SDValue combineScalarAndWithMaskSetcc(SDNode *N, SelectionDAG &DAG,
49076                                              const X86Subtarget &Subtarget) {
49077   assert(N->getOpcode() == ISD::AND && "Unexpected opcode!");
49078 
49079   EVT VT = N->getValueType(0);
49080 
49081   // Make sure this is an AND with constant. We will check the value of the
49082   // constant later.
49083   auto *C1 = dyn_cast<ConstantSDNode>(N->getOperand(1));
49084   if (!C1)
49085     return SDValue();
49086 
49087   // This is implied by the ConstantSDNode.
49088   assert(!VT.isVector() && "Expected scalar VT!");
49089 
49090   SDValue Src = N->getOperand(0);
49091   if (!Src.hasOneUse())
49092     return SDValue();
49093 
49094   // (Optionally) peek through any_extend().
49095   if (Src.getOpcode() == ISD::ANY_EXTEND) {
49096     if (!Src.getOperand(0).hasOneUse())
49097       return SDValue();
49098     Src = Src.getOperand(0);
49099   }
49100 
49101   if (Src.getOpcode() != ISD::BITCAST || !Src.getOperand(0).hasOneUse())
49102     return SDValue();
49103 
49104   Src = Src.getOperand(0);
49105   EVT SrcVT = Src.getValueType();
49106 
49107   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
49108   if (!SrcVT.isVector() || SrcVT.getVectorElementType() != MVT::i1 ||
49109       !TLI.isTypeLegal(SrcVT))
49110     return SDValue();
49111 
49112   if (Src.getOpcode() != ISD::CONCAT_VECTORS)
49113     return SDValue();
49114 
49115   // We only care about the first subvector of the concat, we expect the
49116   // other subvectors to be ignored due to the AND if we make the change.
49117   SDValue SubVec = Src.getOperand(0);
49118   EVT SubVecVT = SubVec.getValueType();
49119 
49120   // The RHS of the AND should be a mask with as many bits as SubVec.
49121   if (!TLI.isTypeLegal(SubVecVT) ||
49122       !C1->getAPIntValue().isMask(SubVecVT.getVectorNumElements()))
49123     return SDValue();
49124 
49125   // First subvector should be a setcc with a legal result type or a
49126   // AND containing at least one setcc with a legal result type.
49127   auto IsLegalSetCC = [&](SDValue V) {
49128     if (V.getOpcode() != ISD::SETCC)
49129       return false;
49130     EVT SetccVT = V.getOperand(0).getValueType();
49131     if (!TLI.isTypeLegal(SetccVT) ||
49132         !(Subtarget.hasVLX() || SetccVT.is512BitVector()))
49133       return false;
49134     if (!(Subtarget.hasBWI() || SetccVT.getScalarSizeInBits() >= 32))
49135       return false;
49136     return true;
49137   };
49138   if (!(IsLegalSetCC(SubVec) || (SubVec.getOpcode() == ISD::AND &&
49139                                  (IsLegalSetCC(SubVec.getOperand(0)) ||
49140                                   IsLegalSetCC(SubVec.getOperand(1))))))
49141     return SDValue();
49142 
49143   // We passed all the checks. Rebuild the concat_vectors with zeroes
49144   // and cast it back to VT.
49145   SDLoc dl(N);
49146   SmallVector<SDValue, 4> Ops(Src.getNumOperands(),
49147                               DAG.getConstant(0, dl, SubVecVT));
49148   Ops[0] = SubVec;
49149   SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, dl, SrcVT,
49150                                Ops);
49151   EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), SrcVT.getSizeInBits());
49152   return DAG.getZExtOrTrunc(DAG.getBitcast(IntVT, Concat), dl, VT);
49153 }
49154 
combineAnd(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)49155 static SDValue combineAnd(SDNode *N, SelectionDAG &DAG,
49156                           TargetLowering::DAGCombinerInfo &DCI,
49157                           const X86Subtarget &Subtarget) {
49158   SDValue N0 = N->getOperand(0);
49159   SDValue N1 = N->getOperand(1);
49160   EVT VT = N->getValueType(0);
49161   SDLoc dl(N);
49162   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
49163 
49164   // If this is SSE1 only convert to FAND to avoid scalarization.
49165   if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32) {
49166     return DAG.getBitcast(MVT::v4i32,
49167                           DAG.getNode(X86ISD::FAND, dl, MVT::v4f32,
49168                                       DAG.getBitcast(MVT::v4f32, N0),
49169                                       DAG.getBitcast(MVT::v4f32, N1)));
49170   }
49171 
49172   // Use a 32-bit and+zext if upper bits known zero.
49173   if (VT == MVT::i64 && Subtarget.is64Bit() && !isa<ConstantSDNode>(N1)) {
49174     APInt HiMask = APInt::getHighBitsSet(64, 32);
49175     if (DAG.MaskedValueIsZero(N1, HiMask) ||
49176         DAG.MaskedValueIsZero(N0, HiMask)) {
49177       SDValue LHS = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, N0);
49178       SDValue RHS = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, N1);
49179       return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64,
49180                          DAG.getNode(ISD::AND, dl, MVT::i32, LHS, RHS));
49181     }
49182   }
49183 
49184   // Match all-of bool scalar reductions into a bitcast/movmsk + cmp.
49185   // TODO: Support multiple SrcOps.
49186   if (VT == MVT::i1) {
49187     SmallVector<SDValue, 2> SrcOps;
49188     SmallVector<APInt, 2> SrcPartials;
49189     if (matchScalarReduction(SDValue(N, 0), ISD::AND, SrcOps, &SrcPartials) &&
49190         SrcOps.size() == 1) {
49191       unsigned NumElts = SrcOps[0].getValueType().getVectorNumElements();
49192       EVT MaskVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
49193       SDValue Mask = combineBitcastvxi1(DAG, MaskVT, SrcOps[0], dl, Subtarget);
49194       if (!Mask && TLI.isTypeLegal(SrcOps[0].getValueType()))
49195         Mask = DAG.getBitcast(MaskVT, SrcOps[0]);
49196       if (Mask) {
49197         assert(SrcPartials[0].getBitWidth() == NumElts &&
49198                "Unexpected partial reduction mask");
49199         SDValue PartialBits = DAG.getConstant(SrcPartials[0], dl, MaskVT);
49200         Mask = DAG.getNode(ISD::AND, dl, MaskVT, Mask, PartialBits);
49201         return DAG.getSetCC(dl, MVT::i1, Mask, PartialBits, ISD::SETEQ);
49202       }
49203     }
49204   }
49205 
49206   if (SDValue V = combineScalarAndWithMaskSetcc(N, DAG, Subtarget))
49207     return V;
49208 
49209   if (SDValue R = combineBitOpWithMOVMSK(N, DAG))
49210     return R;
49211 
49212   if (SDValue R = combineBitOpWithShift(N, DAG))
49213     return R;
49214 
49215   if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, DCI, Subtarget))
49216     return FPLogic;
49217 
49218   if (SDValue R = combineAndShuffleNot(N, DAG, Subtarget))
49219     return R;
49220 
49221   if (DCI.isBeforeLegalizeOps())
49222     return SDValue();
49223 
49224   if (SDValue R = combineCompareEqual(N, DAG, DCI, Subtarget))
49225     return R;
49226 
49227   if (SDValue R = combineAndNotIntoANDNP(N, DAG))
49228     return R;
49229 
49230   if (SDValue ShiftRight = combineAndMaskToShift(N, DAG, Subtarget))
49231     return ShiftRight;
49232 
49233   if (SDValue R = combineAndLoadToBZHI(N, DAG, Subtarget))
49234     return R;
49235 
49236   // fold (and (mul x, c1), c2) -> (mul x, (and c1, c2))
49237   // iff c2 is all/no bits mask - i.e. a select-with-zero mask.
49238   // TODO: Handle PMULDQ/PMULUDQ/VPMADDWD/VPMADDUBSW?
49239   if (VT.isVector() && getTargetConstantFromNode(N1)) {
49240     unsigned Opc0 = N0.getOpcode();
49241     if ((Opc0 == ISD::MUL || Opc0 == ISD::MULHU || Opc0 == ISD::MULHS) &&
49242         getTargetConstantFromNode(N0.getOperand(1)) &&
49243         DAG.ComputeNumSignBits(N1) == VT.getScalarSizeInBits() &&
49244         N0->hasOneUse() && N0.getOperand(1)->hasOneUse()) {
49245       SDValue MaskMul = DAG.getNode(ISD::AND, dl, VT, N0.getOperand(1), N1);
49246       return DAG.getNode(Opc0, dl, VT, N0.getOperand(0), MaskMul);
49247     }
49248   }
49249 
49250   // Fold AND(SRL(X,Y),1) -> SETCC(BT(X,Y), COND_B) iff Y is not a constant
49251   // avoids slow variable shift (moving shift amount to ECX etc.)
49252   if (isOneConstant(N1) && N0->hasOneUse()) {
49253     SDValue Src = N0;
49254     while ((Src.getOpcode() == ISD::ZERO_EXTEND ||
49255             Src.getOpcode() == ISD::TRUNCATE) &&
49256            Src.getOperand(0)->hasOneUse())
49257       Src = Src.getOperand(0);
49258     bool ContainsNOT = false;
49259     X86::CondCode X86CC = X86::COND_B;
49260     // Peek through AND(NOT(SRL(X,Y)),1).
49261     if (isBitwiseNot(Src)) {
49262       Src = Src.getOperand(0);
49263       X86CC = X86::COND_AE;
49264       ContainsNOT = true;
49265     }
49266     if (Src.getOpcode() == ISD::SRL &&
49267         !isa<ConstantSDNode>(Src.getOperand(1))) {
49268       SDValue BitNo = Src.getOperand(1);
49269       Src = Src.getOperand(0);
49270       // Peek through AND(SRL(NOT(X),Y),1).
49271       if (isBitwiseNot(Src)) {
49272         Src = Src.getOperand(0);
49273         X86CC = X86CC == X86::COND_AE ? X86::COND_B : X86::COND_AE;
49274         ContainsNOT = true;
49275       }
49276       // If we have BMI2 then SHRX should be faster for i32/i64 cases.
49277       if (!(Subtarget.hasBMI2() && !ContainsNOT && VT.getSizeInBits() >= 32))
49278         if (SDValue BT = getBT(Src, BitNo, dl, DAG))
49279           return DAG.getZExtOrTrunc(getSETCC(X86CC, BT, dl, DAG), dl, VT);
49280     }
49281   }
49282 
49283   if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) {
49284     // Attempt to recursively combine a bitmask AND with shuffles.
49285     SDValue Op(N, 0);
49286     if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
49287       return Res;
49288 
49289     // If either operand is a constant mask, then only the elements that aren't
49290     // zero are actually demanded by the other operand.
49291     auto GetDemandedMasks = [&](SDValue Op) {
49292       APInt UndefElts;
49293       SmallVector<APInt> EltBits;
49294       int NumElts = VT.getVectorNumElements();
49295       int EltSizeInBits = VT.getScalarSizeInBits();
49296       APInt DemandedBits = APInt::getAllOnes(EltSizeInBits);
49297       APInt DemandedElts = APInt::getAllOnes(NumElts);
49298       if (getTargetConstantBitsFromNode(Op, EltSizeInBits, UndefElts,
49299                                         EltBits)) {
49300         DemandedBits.clearAllBits();
49301         DemandedElts.clearAllBits();
49302         for (int I = 0; I != NumElts; ++I) {
49303           if (UndefElts[I]) {
49304             // We can't assume an undef src element gives an undef dst - the
49305             // other src might be zero.
49306             DemandedBits.setAllBits();
49307             DemandedElts.setBit(I);
49308           } else if (!EltBits[I].isZero()) {
49309             DemandedBits |= EltBits[I];
49310             DemandedElts.setBit(I);
49311           }
49312         }
49313       }
49314       return std::make_pair(DemandedBits, DemandedElts);
49315     };
49316     APInt Bits0, Elts0;
49317     APInt Bits1, Elts1;
49318     std::tie(Bits0, Elts0) = GetDemandedMasks(N1);
49319     std::tie(Bits1, Elts1) = GetDemandedMasks(N0);
49320 
49321     if (TLI.SimplifyDemandedVectorElts(N0, Elts0, DCI) ||
49322         TLI.SimplifyDemandedVectorElts(N1, Elts1, DCI) ||
49323         TLI.SimplifyDemandedBits(N0, Bits0, Elts0, DCI) ||
49324         TLI.SimplifyDemandedBits(N1, Bits1, Elts1, DCI)) {
49325       if (N->getOpcode() != ISD::DELETED_NODE)
49326         DCI.AddToWorklist(N);
49327       return SDValue(N, 0);
49328     }
49329 
49330     SDValue NewN0 = TLI.SimplifyMultipleUseDemandedBits(N0, Bits0, Elts0, DAG);
49331     SDValue NewN1 = TLI.SimplifyMultipleUseDemandedBits(N1, Bits1, Elts1, DAG);
49332     if (NewN0 || NewN1)
49333       return DAG.getNode(ISD::AND, dl, VT, NewN0 ? NewN0 : N0,
49334                          NewN1 ? NewN1 : N1);
49335   }
49336 
49337   // Attempt to combine a scalar bitmask AND with an extracted shuffle.
49338   if ((VT.getScalarSizeInBits() % 8) == 0 &&
49339       N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
49340       isa<ConstantSDNode>(N0.getOperand(1))) {
49341     SDValue BitMask = N1;
49342     SDValue SrcVec = N0.getOperand(0);
49343     EVT SrcVecVT = SrcVec.getValueType();
49344 
49345     // Check that the constant bitmask masks whole bytes.
49346     APInt UndefElts;
49347     SmallVector<APInt, 64> EltBits;
49348     if (VT == SrcVecVT.getScalarType() && N0->isOnlyUserOf(SrcVec.getNode()) &&
49349         getTargetConstantBitsFromNode(BitMask, 8, UndefElts, EltBits) &&
49350         llvm::all_of(EltBits, [](const APInt &M) {
49351           return M.isZero() || M.isAllOnes();
49352         })) {
49353       unsigned NumElts = SrcVecVT.getVectorNumElements();
49354       unsigned Scale = SrcVecVT.getScalarSizeInBits() / 8;
49355       unsigned Idx = N0.getConstantOperandVal(1);
49356 
49357       // Create a root shuffle mask from the byte mask and the extracted index.
49358       SmallVector<int, 16> ShuffleMask(NumElts * Scale, SM_SentinelUndef);
49359       for (unsigned i = 0; i != Scale; ++i) {
49360         if (UndefElts[i])
49361           continue;
49362         int VecIdx = Scale * Idx + i;
49363         ShuffleMask[VecIdx] = EltBits[i].isZero() ? SM_SentinelZero : VecIdx;
49364       }
49365 
49366       if (SDValue Shuffle = combineX86ShufflesRecursively(
49367               {SrcVec}, 0, SrcVec, ShuffleMask, {}, /*Depth*/ 1,
49368               X86::MaxShuffleCombineDepth,
49369               /*HasVarMask*/ false, /*AllowVarCrossLaneMask*/ true,
49370               /*AllowVarPerLaneMask*/ true, DAG, Subtarget))
49371         return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Shuffle,
49372                            N0.getOperand(1));
49373     }
49374   }
49375 
49376   return SDValue();
49377 }
49378 
49379 // Canonicalize OR(AND(X,C),AND(Y,~C)) -> OR(AND(X,C),ANDNP(C,Y))
canonicalizeBitSelect(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)49380 static SDValue canonicalizeBitSelect(SDNode *N, SelectionDAG &DAG,
49381                                      const X86Subtarget &Subtarget) {
49382   assert(N->getOpcode() == ISD::OR && "Unexpected Opcode");
49383 
49384   MVT VT = N->getSimpleValueType(0);
49385   unsigned EltSizeInBits = VT.getScalarSizeInBits();
49386   if (!VT.isVector() || (EltSizeInBits % 8) != 0)
49387     return SDValue();
49388 
49389   SDValue N0 = peekThroughBitcasts(N->getOperand(0));
49390   SDValue N1 = peekThroughBitcasts(N->getOperand(1));
49391   if (N0.getOpcode() != ISD::AND || N1.getOpcode() != ISD::AND)
49392     return SDValue();
49393 
49394   // On XOP we'll lower to PCMOV so accept one use. With AVX512, we can use
49395   // VPTERNLOG. Otherwise only do this if either mask has multiple uses already.
49396   if (!(Subtarget.hasXOP() || useVPTERNLOG(Subtarget, VT) ||
49397         !N0.getOperand(1).hasOneUse() || !N1.getOperand(1).hasOneUse()))
49398     return SDValue();
49399 
49400   // Attempt to extract constant byte masks.
49401   APInt UndefElts0, UndefElts1;
49402   SmallVector<APInt, 32> EltBits0, EltBits1;
49403   if (!getTargetConstantBitsFromNode(N0.getOperand(1), 8, UndefElts0, EltBits0,
49404                                      false, false))
49405     return SDValue();
49406   if (!getTargetConstantBitsFromNode(N1.getOperand(1), 8, UndefElts1, EltBits1,
49407                                      false, false))
49408     return SDValue();
49409 
49410   for (unsigned i = 0, e = EltBits0.size(); i != e; ++i) {
49411     // TODO - add UNDEF elts support.
49412     if (UndefElts0[i] || UndefElts1[i])
49413       return SDValue();
49414     if (EltBits0[i] != ~EltBits1[i])
49415       return SDValue();
49416   }
49417 
49418   SDLoc DL(N);
49419 
49420   if (useVPTERNLOG(Subtarget, VT)) {
49421     // Emit a VPTERNLOG node directly - 0xCA is the imm code for A?B:C.
49422     // VPTERNLOG is only available as vXi32/64-bit types.
49423     MVT OpSVT = EltSizeInBits == 32 ? MVT::i32 : MVT::i64;
49424     MVT OpVT =
49425         MVT::getVectorVT(OpSVT, VT.getSizeInBits() / OpSVT.getSizeInBits());
49426     SDValue A = DAG.getBitcast(OpVT, N0.getOperand(1));
49427     SDValue B = DAG.getBitcast(OpVT, N0.getOperand(0));
49428     SDValue C = DAG.getBitcast(OpVT, N1.getOperand(0));
49429     SDValue Imm = DAG.getTargetConstant(0xCA, DL, MVT::i8);
49430     SDValue Res = getAVX512Node(X86ISD::VPTERNLOG, DL, OpVT, {A, B, C, Imm},
49431                                 DAG, Subtarget);
49432     return DAG.getBitcast(VT, Res);
49433   }
49434 
49435   SDValue X = N->getOperand(0);
49436   SDValue Y =
49437       DAG.getNode(X86ISD::ANDNP, DL, VT, DAG.getBitcast(VT, N0.getOperand(1)),
49438                   DAG.getBitcast(VT, N1.getOperand(0)));
49439   return DAG.getNode(ISD::OR, DL, VT, X, Y);
49440 }
49441 
49442 // Try to match OR(AND(~MASK,X),AND(MASK,Y)) logic pattern.
matchLogicBlend(SDNode * N,SDValue & X,SDValue & Y,SDValue & Mask)49443 static bool matchLogicBlend(SDNode *N, SDValue &X, SDValue &Y, SDValue &Mask) {
49444   if (N->getOpcode() != ISD::OR)
49445     return false;
49446 
49447   SDValue N0 = N->getOperand(0);
49448   SDValue N1 = N->getOperand(1);
49449 
49450   // Canonicalize AND to LHS.
49451   if (N1.getOpcode() == ISD::AND)
49452     std::swap(N0, N1);
49453 
49454   // Attempt to match OR(AND(M,Y),ANDNP(M,X)).
49455   if (N0.getOpcode() != ISD::AND || N1.getOpcode() != X86ISD::ANDNP)
49456     return false;
49457 
49458   Mask = N1.getOperand(0);
49459   X = N1.getOperand(1);
49460 
49461   // Check to see if the mask appeared in both the AND and ANDNP.
49462   if (N0.getOperand(0) == Mask)
49463     Y = N0.getOperand(1);
49464   else if (N0.getOperand(1) == Mask)
49465     Y = N0.getOperand(0);
49466   else
49467     return false;
49468 
49469   // TODO: Attempt to match against AND(XOR(-1,M),Y) as well, waiting for
49470   // ANDNP combine allows other combines to happen that prevent matching.
49471   return true;
49472 }
49473 
49474 // Try to fold:
49475 //   (or (and (m, y), (pandn m, x)))
49476 // into:
49477 //   (vselect m, x, y)
49478 // As a special case, try to fold:
49479 //   (or (and (m, (sub 0, x)), (pandn m, x)))
49480 // into:
49481 //   (sub (xor X, M), M)
combineLogicBlendIntoPBLENDV(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)49482 static SDValue combineLogicBlendIntoPBLENDV(SDNode *N, SelectionDAG &DAG,
49483                                             const X86Subtarget &Subtarget) {
49484   assert(N->getOpcode() == ISD::OR && "Unexpected Opcode");
49485 
49486   EVT VT = N->getValueType(0);
49487   if (!((VT.is128BitVector() && Subtarget.hasSSE2()) ||
49488         (VT.is256BitVector() && Subtarget.hasInt256())))
49489     return SDValue();
49490 
49491   SDValue X, Y, Mask;
49492   if (!matchLogicBlend(N, X, Y, Mask))
49493     return SDValue();
49494 
49495   // Validate that X, Y, and Mask are bitcasts, and see through them.
49496   Mask = peekThroughBitcasts(Mask);
49497   X = peekThroughBitcasts(X);
49498   Y = peekThroughBitcasts(Y);
49499 
49500   EVT MaskVT = Mask.getValueType();
49501   unsigned EltBits = MaskVT.getScalarSizeInBits();
49502 
49503   // TODO: Attempt to handle floating point cases as well?
49504   if (!MaskVT.isInteger() || DAG.ComputeNumSignBits(Mask) != EltBits)
49505     return SDValue();
49506 
49507   SDLoc DL(N);
49508 
49509   // Attempt to combine to conditional negate: (sub (xor X, M), M)
49510   if (SDValue Res = combineLogicBlendIntoConditionalNegate(VT, Mask, X, Y, DL,
49511                                                            DAG, Subtarget))
49512     return Res;
49513 
49514   // PBLENDVB is only available on SSE 4.1.
49515   if (!Subtarget.hasSSE41())
49516     return SDValue();
49517 
49518   // If we have VPTERNLOG we should prefer that since PBLENDVB is multiple uops.
49519   if (Subtarget.hasVLX())
49520     return SDValue();
49521 
49522   MVT BlendVT = VT.is256BitVector() ? MVT::v32i8 : MVT::v16i8;
49523 
49524   X = DAG.getBitcast(BlendVT, X);
49525   Y = DAG.getBitcast(BlendVT, Y);
49526   Mask = DAG.getBitcast(BlendVT, Mask);
49527   Mask = DAG.getSelect(DL, BlendVT, Mask, Y, X);
49528   return DAG.getBitcast(VT, Mask);
49529 }
49530 
49531 // Helper function for combineOrCmpEqZeroToCtlzSrl
49532 // Transforms:
49533 //   seteq(cmp x, 0)
49534 //   into:
49535 //   srl(ctlz x), log2(bitsize(x))
49536 // Input pattern is checked by caller.
lowerX86CmpEqZeroToCtlzSrl(SDValue Op,SelectionDAG & DAG)49537 static SDValue lowerX86CmpEqZeroToCtlzSrl(SDValue Op, SelectionDAG &DAG) {
49538   SDValue Cmp = Op.getOperand(1);
49539   EVT VT = Cmp.getOperand(0).getValueType();
49540   unsigned Log2b = Log2_32(VT.getSizeInBits());
49541   SDLoc dl(Op);
49542   SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Cmp->getOperand(0));
49543   // The result of the shift is true or false, and on X86, the 32-bit
49544   // encoding of shr and lzcnt is more desirable.
49545   SDValue Trunc = DAG.getZExtOrTrunc(Clz, dl, MVT::i32);
49546   SDValue Scc = DAG.getNode(ISD::SRL, dl, MVT::i32, Trunc,
49547                             DAG.getConstant(Log2b, dl, MVT::i8));
49548   return Scc;
49549 }
49550 
49551 // Try to transform:
49552 //   zext(or(setcc(eq, (cmp x, 0)), setcc(eq, (cmp y, 0))))
49553 //   into:
49554 //   srl(or(ctlz(x), ctlz(y)), log2(bitsize(x))
49555 // Will also attempt to match more generic cases, eg:
49556 //   zext(or(or(setcc(eq, cmp 0), setcc(eq, cmp 0)), setcc(eq, cmp 0)))
49557 // Only applies if the target supports the FastLZCNT feature.
combineOrCmpEqZeroToCtlzSrl(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)49558 static SDValue combineOrCmpEqZeroToCtlzSrl(SDNode *N, SelectionDAG &DAG,
49559                                            TargetLowering::DAGCombinerInfo &DCI,
49560                                            const X86Subtarget &Subtarget) {
49561   if (DCI.isBeforeLegalize() || !Subtarget.getTargetLowering()->isCtlzFast())
49562     return SDValue();
49563 
49564   auto isORCandidate = [](SDValue N) {
49565     return (N->getOpcode() == ISD::OR && N->hasOneUse());
49566   };
49567 
49568   // Check the zero extend is extending to 32-bit or more. The code generated by
49569   // srl(ctlz) for 16-bit or less variants of the pattern would require extra
49570   // instructions to clear the upper bits.
49571   if (!N->hasOneUse() || !N->getSimpleValueType(0).bitsGE(MVT::i32) ||
49572       !isORCandidate(N->getOperand(0)))
49573     return SDValue();
49574 
49575   // Check the node matches: setcc(eq, cmp 0)
49576   auto isSetCCCandidate = [](SDValue N) {
49577     return N->getOpcode() == X86ISD::SETCC && N->hasOneUse() &&
49578            X86::CondCode(N->getConstantOperandVal(0)) == X86::COND_E &&
49579            N->getOperand(1).getOpcode() == X86ISD::CMP &&
49580            isNullConstant(N->getOperand(1).getOperand(1)) &&
49581            N->getOperand(1).getValueType().bitsGE(MVT::i32);
49582   };
49583 
49584   SDNode *OR = N->getOperand(0).getNode();
49585   SDValue LHS = OR->getOperand(0);
49586   SDValue RHS = OR->getOperand(1);
49587 
49588   // Save nodes matching or(or, setcc(eq, cmp 0)).
49589   SmallVector<SDNode *, 2> ORNodes;
49590   while (((isORCandidate(LHS) && isSetCCCandidate(RHS)) ||
49591           (isORCandidate(RHS) && isSetCCCandidate(LHS)))) {
49592     ORNodes.push_back(OR);
49593     OR = (LHS->getOpcode() == ISD::OR) ? LHS.getNode() : RHS.getNode();
49594     LHS = OR->getOperand(0);
49595     RHS = OR->getOperand(1);
49596   }
49597 
49598   // The last OR node should match or(setcc(eq, cmp 0), setcc(eq, cmp 0)).
49599   if (!(isSetCCCandidate(LHS) && isSetCCCandidate(RHS)) ||
49600       !isORCandidate(SDValue(OR, 0)))
49601     return SDValue();
49602 
49603   // We have a or(setcc(eq, cmp 0), setcc(eq, cmp 0)) pattern, try to lower it
49604   // to
49605   // or(srl(ctlz),srl(ctlz)).
49606   // The dag combiner can then fold it into:
49607   // srl(or(ctlz, ctlz)).
49608   SDValue NewLHS = lowerX86CmpEqZeroToCtlzSrl(LHS, DAG);
49609   SDValue Ret, NewRHS;
49610   if (NewLHS && (NewRHS = lowerX86CmpEqZeroToCtlzSrl(RHS, DAG)))
49611     Ret = DAG.getNode(ISD::OR, SDLoc(OR), MVT::i32, NewLHS, NewRHS);
49612 
49613   if (!Ret)
49614     return SDValue();
49615 
49616   // Try to lower nodes matching the or(or, setcc(eq, cmp 0)) pattern.
49617   while (ORNodes.size() > 0) {
49618     OR = ORNodes.pop_back_val();
49619     LHS = OR->getOperand(0);
49620     RHS = OR->getOperand(1);
49621     // Swap rhs with lhs to match or(setcc(eq, cmp, 0), or).
49622     if (RHS->getOpcode() == ISD::OR)
49623       std::swap(LHS, RHS);
49624     NewRHS = lowerX86CmpEqZeroToCtlzSrl(RHS, DAG);
49625     if (!NewRHS)
49626       return SDValue();
49627     Ret = DAG.getNode(ISD::OR, SDLoc(OR), MVT::i32, Ret, NewRHS);
49628   }
49629 
49630   return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), N->getValueType(0), Ret);
49631 }
49632 
foldMaskedMergeImpl(SDValue And0_L,SDValue And0_R,SDValue And1_L,SDValue And1_R,const SDLoc & DL,SelectionDAG & DAG)49633 static SDValue foldMaskedMergeImpl(SDValue And0_L, SDValue And0_R,
49634                                    SDValue And1_L, SDValue And1_R,
49635                                    const SDLoc &DL, SelectionDAG &DAG) {
49636   if (!isBitwiseNot(And0_L, true) || !And0_L->hasOneUse())
49637     return SDValue();
49638   SDValue NotOp = And0_L->getOperand(0);
49639   if (NotOp == And1_R)
49640     std::swap(And1_R, And1_L);
49641   if (NotOp != And1_L)
49642     return SDValue();
49643 
49644   // (~(NotOp) & And0_R) | (NotOp & And1_R)
49645   // --> ((And0_R ^ And1_R) & NotOp) ^ And1_R
49646   EVT VT = And1_L->getValueType(0);
49647   SDValue Freeze_And0_R = DAG.getNode(ISD::FREEZE, SDLoc(), VT, And0_R);
49648   SDValue Xor0 = DAG.getNode(ISD::XOR, DL, VT, And1_R, Freeze_And0_R);
49649   SDValue And = DAG.getNode(ISD::AND, DL, VT, Xor0, NotOp);
49650   SDValue Xor1 = DAG.getNode(ISD::XOR, DL, VT, And, Freeze_And0_R);
49651   return Xor1;
49652 }
49653 
49654 /// Fold "masked merge" expressions like `(m & x) | (~m & y)` into the
49655 /// equivalent `((x ^ y) & m) ^ y)` pattern.
49656 /// This is typically a better representation for  targets without a fused
49657 /// "and-not" operation. This function is intended to be called from a
49658 /// `TargetLowering::PerformDAGCombine` callback on `ISD::OR` nodes.
foldMaskedMerge(SDNode * Node,SelectionDAG & DAG)49659 static SDValue foldMaskedMerge(SDNode *Node, SelectionDAG &DAG) {
49660   // Note that masked-merge variants using XOR or ADD expressions are
49661   // normalized to OR by InstCombine so we only check for OR.
49662   assert(Node->getOpcode() == ISD::OR && "Must be called with ISD::OR node");
49663   SDValue N0 = Node->getOperand(0);
49664   if (N0->getOpcode() != ISD::AND || !N0->hasOneUse())
49665     return SDValue();
49666   SDValue N1 = Node->getOperand(1);
49667   if (N1->getOpcode() != ISD::AND || !N1->hasOneUse())
49668     return SDValue();
49669 
49670   SDLoc DL(Node);
49671   SDValue N00 = N0->getOperand(0);
49672   SDValue N01 = N0->getOperand(1);
49673   SDValue N10 = N1->getOperand(0);
49674   SDValue N11 = N1->getOperand(1);
49675   if (SDValue Result = foldMaskedMergeImpl(N00, N01, N10, N11, DL, DAG))
49676     return Result;
49677   if (SDValue Result = foldMaskedMergeImpl(N01, N00, N10, N11, DL, DAG))
49678     return Result;
49679   if (SDValue Result = foldMaskedMergeImpl(N10, N11, N00, N01, DL, DAG))
49680     return Result;
49681   if (SDValue Result = foldMaskedMergeImpl(N11, N10, N00, N01, DL, DAG))
49682     return Result;
49683   return SDValue();
49684 }
49685 
combineOr(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)49686 static SDValue combineOr(SDNode *N, SelectionDAG &DAG,
49687                          TargetLowering::DAGCombinerInfo &DCI,
49688                          const X86Subtarget &Subtarget) {
49689   SDValue N0 = N->getOperand(0);
49690   SDValue N1 = N->getOperand(1);
49691   EVT VT = N->getValueType(0);
49692   SDLoc dl(N);
49693   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
49694 
49695   // If this is SSE1 only convert to FOR to avoid scalarization.
49696   if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32) {
49697     return DAG.getBitcast(MVT::v4i32,
49698                           DAG.getNode(X86ISD::FOR, dl, MVT::v4f32,
49699                                       DAG.getBitcast(MVT::v4f32, N0),
49700                                       DAG.getBitcast(MVT::v4f32, N1)));
49701   }
49702 
49703   // Match any-of bool scalar reductions into a bitcast/movmsk + cmp.
49704   // TODO: Support multiple SrcOps.
49705   if (VT == MVT::i1) {
49706     SmallVector<SDValue, 2> SrcOps;
49707     SmallVector<APInt, 2> SrcPartials;
49708     if (matchScalarReduction(SDValue(N, 0), ISD::OR, SrcOps, &SrcPartials) &&
49709         SrcOps.size() == 1) {
49710       unsigned NumElts = SrcOps[0].getValueType().getVectorNumElements();
49711       EVT MaskVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
49712       SDValue Mask = combineBitcastvxi1(DAG, MaskVT, SrcOps[0], dl, Subtarget);
49713       if (!Mask && TLI.isTypeLegal(SrcOps[0].getValueType()))
49714         Mask = DAG.getBitcast(MaskVT, SrcOps[0]);
49715       if (Mask) {
49716         assert(SrcPartials[0].getBitWidth() == NumElts &&
49717                "Unexpected partial reduction mask");
49718         SDValue ZeroBits = DAG.getConstant(0, dl, MaskVT);
49719         SDValue PartialBits = DAG.getConstant(SrcPartials[0], dl, MaskVT);
49720         Mask = DAG.getNode(ISD::AND, dl, MaskVT, Mask, PartialBits);
49721         return DAG.getSetCC(dl, MVT::i1, Mask, ZeroBits, ISD::SETNE);
49722       }
49723     }
49724   }
49725 
49726   if (SDValue R = combineBitOpWithMOVMSK(N, DAG))
49727     return R;
49728 
49729   if (SDValue R = combineBitOpWithShift(N, DAG))
49730     return R;
49731 
49732   if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, DCI, Subtarget))
49733     return FPLogic;
49734 
49735   if (DCI.isBeforeLegalizeOps())
49736     return SDValue();
49737 
49738   if (SDValue R = combineCompareEqual(N, DAG, DCI, Subtarget))
49739     return R;
49740 
49741   if (SDValue R = canonicalizeBitSelect(N, DAG, Subtarget))
49742     return R;
49743 
49744   if (SDValue R = combineLogicBlendIntoPBLENDV(N, DAG, Subtarget))
49745     return R;
49746 
49747   // (0 - SetCC) | C -> (zext (not SetCC)) * (C + 1) - 1 if we can get a LEA out of it.
49748   if ((VT == MVT::i32 || VT == MVT::i64) &&
49749       N0.getOpcode() == ISD::SUB && N0.hasOneUse() &&
49750       isNullConstant(N0.getOperand(0))) {
49751     SDValue Cond = N0.getOperand(1);
49752     if (Cond.getOpcode() == ISD::ZERO_EXTEND && Cond.hasOneUse())
49753       Cond = Cond.getOperand(0);
49754 
49755     if (Cond.getOpcode() == X86ISD::SETCC && Cond.hasOneUse()) {
49756       if (auto *CN = dyn_cast<ConstantSDNode>(N1)) {
49757         uint64_t Val = CN->getZExtValue();
49758         if (Val == 1 || Val == 2 || Val == 3 || Val == 4 || Val == 7 || Val == 8) {
49759           X86::CondCode CCode = (X86::CondCode)Cond.getConstantOperandVal(0);
49760           CCode = X86::GetOppositeBranchCondition(CCode);
49761           SDValue NotCond = getSETCC(CCode, Cond.getOperand(1), SDLoc(Cond), DAG);
49762 
49763           SDValue R = DAG.getZExtOrTrunc(NotCond, dl, VT);
49764           R = DAG.getNode(ISD::MUL, dl, VT, R, DAG.getConstant(Val + 1, dl, VT));
49765           R = DAG.getNode(ISD::SUB, dl, VT, R, DAG.getConstant(1, dl, VT));
49766           return R;
49767         }
49768       }
49769     }
49770   }
49771 
49772   // Combine OR(X,KSHIFTL(Y,Elts/2)) -> CONCAT_VECTORS(X,Y) == KUNPCK(X,Y).
49773   // Combine OR(KSHIFTL(X,Elts/2),Y) -> CONCAT_VECTORS(Y,X) == KUNPCK(Y,X).
49774   // iff the upper elements of the non-shifted arg are zero.
49775   // KUNPCK require 16+ bool vector elements.
49776   if (N0.getOpcode() == X86ISD::KSHIFTL || N1.getOpcode() == X86ISD::KSHIFTL) {
49777     unsigned NumElts = VT.getVectorNumElements();
49778     unsigned HalfElts = NumElts / 2;
49779     APInt UpperElts = APInt::getHighBitsSet(NumElts, HalfElts);
49780     if (NumElts >= 16 && N1.getOpcode() == X86ISD::KSHIFTL &&
49781         N1.getConstantOperandAPInt(1) == HalfElts &&
49782         DAG.MaskedVectorIsZero(N0, UpperElts)) {
49783       return DAG.getNode(
49784           ISD::CONCAT_VECTORS, dl, VT,
49785           extractSubVector(N0, 0, DAG, dl, HalfElts),
49786           extractSubVector(N1.getOperand(0), 0, DAG, dl, HalfElts));
49787     }
49788     if (NumElts >= 16 && N0.getOpcode() == X86ISD::KSHIFTL &&
49789         N0.getConstantOperandAPInt(1) == HalfElts &&
49790         DAG.MaskedVectorIsZero(N1, UpperElts)) {
49791       return DAG.getNode(
49792           ISD::CONCAT_VECTORS, dl, VT,
49793           extractSubVector(N1, 0, DAG, dl, HalfElts),
49794           extractSubVector(N0.getOperand(0), 0, DAG, dl, HalfElts));
49795     }
49796   }
49797 
49798   if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) {
49799     // Attempt to recursively combine an OR of shuffles.
49800     SDValue Op(N, 0);
49801     if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
49802       return Res;
49803 
49804     // If either operand is a constant mask, then only the elements that aren't
49805     // allones are actually demanded by the other operand.
49806     auto SimplifyUndemandedElts = [&](SDValue Op, SDValue OtherOp) {
49807       APInt UndefElts;
49808       SmallVector<APInt> EltBits;
49809       int NumElts = VT.getVectorNumElements();
49810       int EltSizeInBits = VT.getScalarSizeInBits();
49811       if (!getTargetConstantBitsFromNode(Op, EltSizeInBits, UndefElts, EltBits))
49812         return false;
49813 
49814       APInt DemandedElts = APInt::getZero(NumElts);
49815       for (int I = 0; I != NumElts; ++I)
49816         if (!EltBits[I].isAllOnes())
49817           DemandedElts.setBit(I);
49818 
49819       return TLI.SimplifyDemandedVectorElts(OtherOp, DemandedElts, DCI);
49820     };
49821     if (SimplifyUndemandedElts(N0, N1) || SimplifyUndemandedElts(N1, N0)) {
49822       if (N->getOpcode() != ISD::DELETED_NODE)
49823         DCI.AddToWorklist(N);
49824       return SDValue(N, 0);
49825     }
49826   }
49827 
49828   // We should fold "masked merge" patterns when `andn` is not available.
49829   if (!Subtarget.hasBMI() && VT.isScalarInteger() && VT != MVT::i1)
49830     if (SDValue R = foldMaskedMerge(N, DAG))
49831       return R;
49832 
49833   return SDValue();
49834 }
49835 
49836 /// Try to turn tests against the signbit in the form of:
49837 ///   XOR(TRUNCATE(SRL(X, size(X)-1)), 1)
49838 /// into:
49839 ///   SETGT(X, -1)
foldXorTruncShiftIntoCmp(SDNode * N,SelectionDAG & DAG)49840 static SDValue foldXorTruncShiftIntoCmp(SDNode *N, SelectionDAG &DAG) {
49841   // This is only worth doing if the output type is i8 or i1.
49842   EVT ResultType = N->getValueType(0);
49843   if (ResultType != MVT::i8 && ResultType != MVT::i1)
49844     return SDValue();
49845 
49846   SDValue N0 = N->getOperand(0);
49847   SDValue N1 = N->getOperand(1);
49848 
49849   // We should be performing an xor against a truncated shift.
49850   if (N0.getOpcode() != ISD::TRUNCATE || !N0.hasOneUse())
49851     return SDValue();
49852 
49853   // Make sure we are performing an xor against one.
49854   if (!isOneConstant(N1))
49855     return SDValue();
49856 
49857   // SetCC on x86 zero extends so only act on this if it's a logical shift.
49858   SDValue Shift = N0.getOperand(0);
49859   if (Shift.getOpcode() != ISD::SRL || !Shift.hasOneUse())
49860     return SDValue();
49861 
49862   // Make sure we are truncating from one of i16, i32 or i64.
49863   EVT ShiftTy = Shift.getValueType();
49864   if (ShiftTy != MVT::i16 && ShiftTy != MVT::i32 && ShiftTy != MVT::i64)
49865     return SDValue();
49866 
49867   // Make sure the shift amount extracts the sign bit.
49868   if (!isa<ConstantSDNode>(Shift.getOperand(1)) ||
49869       Shift.getConstantOperandAPInt(1) != (ShiftTy.getSizeInBits() - 1))
49870     return SDValue();
49871 
49872   // Create a greater-than comparison against -1.
49873   // N.B. Using SETGE against 0 works but we want a canonical looking
49874   // comparison, using SETGT matches up with what TranslateX86CC.
49875   SDLoc DL(N);
49876   SDValue ShiftOp = Shift.getOperand(0);
49877   EVT ShiftOpTy = ShiftOp.getValueType();
49878   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
49879   EVT SetCCResultType = TLI.getSetCCResultType(DAG.getDataLayout(),
49880                                                *DAG.getContext(), ResultType);
49881   SDValue Cond = DAG.getSetCC(DL, SetCCResultType, ShiftOp,
49882                               DAG.getConstant(-1, DL, ShiftOpTy), ISD::SETGT);
49883   if (SetCCResultType != ResultType)
49884     Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, ResultType, Cond);
49885   return Cond;
49886 }
49887 
49888 /// Turn vector tests of the signbit in the form of:
49889 ///   xor (sra X, elt_size(X)-1), -1
49890 /// into:
49891 ///   pcmpgt X, -1
49892 ///
49893 /// This should be called before type legalization because the pattern may not
49894 /// persist after that.
foldVectorXorShiftIntoCmp(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)49895 static SDValue foldVectorXorShiftIntoCmp(SDNode *N, SelectionDAG &DAG,
49896                                          const X86Subtarget &Subtarget) {
49897   EVT VT = N->getValueType(0);
49898   if (!VT.isSimple())
49899     return SDValue();
49900 
49901   switch (VT.getSimpleVT().SimpleTy) {
49902   default: return SDValue();
49903   case MVT::v16i8:
49904   case MVT::v8i16:
49905   case MVT::v4i32:
49906   case MVT::v2i64: if (!Subtarget.hasSSE2()) return SDValue(); break;
49907   case MVT::v32i8:
49908   case MVT::v16i16:
49909   case MVT::v8i32:
49910   case MVT::v4i64: if (!Subtarget.hasAVX2()) return SDValue(); break;
49911   }
49912 
49913   // There must be a shift right algebraic before the xor, and the xor must be a
49914   // 'not' operation.
49915   SDValue Shift = N->getOperand(0);
49916   SDValue Ones = N->getOperand(1);
49917   if (Shift.getOpcode() != ISD::SRA || !Shift.hasOneUse() ||
49918       !ISD::isBuildVectorAllOnes(Ones.getNode()))
49919     return SDValue();
49920 
49921   // The shift should be smearing the sign bit across each vector element.
49922   auto *ShiftAmt =
49923       isConstOrConstSplat(Shift.getOperand(1), /*AllowUndefs*/ true);
49924   if (!ShiftAmt ||
49925       ShiftAmt->getAPIntValue() != (Shift.getScalarValueSizeInBits() - 1))
49926     return SDValue();
49927 
49928   // Create a greater-than comparison against -1. We don't use the more obvious
49929   // greater-than-or-equal-to-zero because SSE/AVX don't have that instruction.
49930   return DAG.getSetCC(SDLoc(N), VT, Shift.getOperand(0), Ones, ISD::SETGT);
49931 }
49932 
49933 /// Detect patterns of truncation with unsigned saturation:
49934 ///
49935 /// 1. (truncate (umin (x, unsigned_max_of_dest_type)) to dest_type).
49936 ///   Return the source value x to be truncated or SDValue() if the pattern was
49937 ///   not matched.
49938 ///
49939 /// 2. (truncate (smin (smax (x, C1), C2)) to dest_type),
49940 ///   where C1 >= 0 and C2 is unsigned max of destination type.
49941 ///
49942 ///    (truncate (smax (smin (x, C2), C1)) to dest_type)
49943 ///   where C1 >= 0, C2 is unsigned max of destination type and C1 <= C2.
49944 ///
49945 ///   These two patterns are equivalent to:
49946 ///   (truncate (umin (smax(x, C1), unsigned_max_of_dest_type)) to dest_type)
49947 ///   So return the smax(x, C1) value to be truncated or SDValue() if the
49948 ///   pattern was not matched.
detectUSatPattern(SDValue In,EVT VT,SelectionDAG & DAG,const SDLoc & DL)49949 static SDValue detectUSatPattern(SDValue In, EVT VT, SelectionDAG &DAG,
49950                                  const SDLoc &DL) {
49951   EVT InVT = In.getValueType();
49952 
49953   // Saturation with truncation. We truncate from InVT to VT.
49954   assert(InVT.getScalarSizeInBits() > VT.getScalarSizeInBits() &&
49955          "Unexpected types for truncate operation");
49956 
49957   // Match min/max and return limit value as a parameter.
49958   auto MatchMinMax = [](SDValue V, unsigned Opcode, APInt &Limit) -> SDValue {
49959     if (V.getOpcode() == Opcode &&
49960         ISD::isConstantSplatVector(V.getOperand(1).getNode(), Limit))
49961       return V.getOperand(0);
49962     return SDValue();
49963   };
49964 
49965   APInt C1, C2;
49966   if (SDValue UMin = MatchMinMax(In, ISD::UMIN, C2))
49967     // C2 should be equal to UINT32_MAX / UINT16_MAX / UINT8_MAX according
49968     // the element size of the destination type.
49969     if (C2.isMask(VT.getScalarSizeInBits()))
49970       return UMin;
49971 
49972   if (SDValue SMin = MatchMinMax(In, ISD::SMIN, C2))
49973     if (MatchMinMax(SMin, ISD::SMAX, C1))
49974       if (C1.isNonNegative() && C2.isMask(VT.getScalarSizeInBits()))
49975         return SMin;
49976 
49977   if (SDValue SMax = MatchMinMax(In, ISD::SMAX, C1))
49978     if (SDValue SMin = MatchMinMax(SMax, ISD::SMIN, C2))
49979       if (C1.isNonNegative() && C2.isMask(VT.getScalarSizeInBits()) &&
49980           C2.uge(C1)) {
49981         return DAG.getNode(ISD::SMAX, DL, InVT, SMin, In.getOperand(1));
49982       }
49983 
49984   return SDValue();
49985 }
49986 
49987 /// Detect patterns of truncation with signed saturation:
49988 /// (truncate (smin ((smax (x, signed_min_of_dest_type)),
49989 ///                  signed_max_of_dest_type)) to dest_type)
49990 /// or:
49991 /// (truncate (smax ((smin (x, signed_max_of_dest_type)),
49992 ///                  signed_min_of_dest_type)) to dest_type).
49993 /// With MatchPackUS, the smax/smin range is [0, unsigned_max_of_dest_type].
49994 /// Return the source value to be truncated or SDValue() if the pattern was not
49995 /// matched.
detectSSatPattern(SDValue In,EVT VT,bool MatchPackUS=false)49996 static SDValue detectSSatPattern(SDValue In, EVT VT, bool MatchPackUS = false) {
49997   unsigned NumDstBits = VT.getScalarSizeInBits();
49998   unsigned NumSrcBits = In.getScalarValueSizeInBits();
49999   assert(NumSrcBits > NumDstBits && "Unexpected types for truncate operation");
50000 
50001   auto MatchMinMax = [](SDValue V, unsigned Opcode,
50002                         const APInt &Limit) -> SDValue {
50003     APInt C;
50004     if (V.getOpcode() == Opcode &&
50005         ISD::isConstantSplatVector(V.getOperand(1).getNode(), C) && C == Limit)
50006       return V.getOperand(0);
50007     return SDValue();
50008   };
50009 
50010   APInt SignedMax, SignedMin;
50011   if (MatchPackUS) {
50012     SignedMax = APInt::getAllOnes(NumDstBits).zext(NumSrcBits);
50013     SignedMin = APInt(NumSrcBits, 0);
50014   } else {
50015     SignedMax = APInt::getSignedMaxValue(NumDstBits).sext(NumSrcBits);
50016     SignedMin = APInt::getSignedMinValue(NumDstBits).sext(NumSrcBits);
50017   }
50018 
50019   if (SDValue SMin = MatchMinMax(In, ISD::SMIN, SignedMax))
50020     if (SDValue SMax = MatchMinMax(SMin, ISD::SMAX, SignedMin))
50021       return SMax;
50022 
50023   if (SDValue SMax = MatchMinMax(In, ISD::SMAX, SignedMin))
50024     if (SDValue SMin = MatchMinMax(SMax, ISD::SMIN, SignedMax))
50025       return SMin;
50026 
50027   return SDValue();
50028 }
50029 
combineTruncateWithSat(SDValue In,EVT VT,const SDLoc & DL,SelectionDAG & DAG,const X86Subtarget & Subtarget)50030 static SDValue combineTruncateWithSat(SDValue In, EVT VT, const SDLoc &DL,
50031                                       SelectionDAG &DAG,
50032                                       const X86Subtarget &Subtarget) {
50033   if (!Subtarget.hasSSE2() || !VT.isVector())
50034     return SDValue();
50035 
50036   EVT SVT = VT.getVectorElementType();
50037   EVT InVT = In.getValueType();
50038   EVT InSVT = InVT.getVectorElementType();
50039 
50040   // If we're clamping a signed 32-bit vector to 0-255 and the 32-bit vector is
50041   // split across two registers. We can use a packusdw+perm to clamp to 0-65535
50042   // and concatenate at the same time. Then we can use a final vpmovuswb to
50043   // clip to 0-255.
50044   if (Subtarget.hasBWI() && !Subtarget.useAVX512Regs() &&
50045       InVT == MVT::v16i32 && VT == MVT::v16i8) {
50046     if (SDValue USatVal = detectSSatPattern(In, VT, true)) {
50047       // Emit a VPACKUSDW+VPERMQ followed by a VPMOVUSWB.
50048       SDValue Mid = truncateVectorWithPACK(X86ISD::PACKUS, MVT::v16i16, USatVal,
50049                                            DL, DAG, Subtarget);
50050       assert(Mid && "Failed to pack!");
50051       return DAG.getNode(X86ISD::VTRUNCUS, DL, VT, Mid);
50052     }
50053   }
50054 
50055   // vXi32 truncate instructions are available with AVX512F.
50056   // vXi16 truncate instructions are only available with AVX512BW.
50057   // For 256-bit or smaller vectors, we require VLX.
50058   // FIXME: We could widen truncates to 512 to remove the VLX restriction.
50059   // If the result type is 256-bits or larger and we have disable 512-bit
50060   // registers, we should go ahead and use the pack instructions if possible.
50061   bool PreferAVX512 = ((Subtarget.hasAVX512() && InSVT == MVT::i32) ||
50062                        (Subtarget.hasBWI() && InSVT == MVT::i16)) &&
50063                       (InVT.getSizeInBits() > 128) &&
50064                       (Subtarget.hasVLX() || InVT.getSizeInBits() > 256) &&
50065                       !(!Subtarget.useAVX512Regs() && VT.getSizeInBits() >= 256);
50066 
50067   if (isPowerOf2_32(VT.getVectorNumElements()) && !PreferAVX512 &&
50068       VT.getSizeInBits() >= 64 &&
50069       (SVT == MVT::i8 || SVT == MVT::i16) &&
50070       (InSVT == MVT::i16 || InSVT == MVT::i32)) {
50071     if (SDValue USatVal = detectSSatPattern(In, VT, true)) {
50072       // vXi32 -> vXi8 must be performed as PACKUSWB(PACKSSDW,PACKSSDW).
50073       // Only do this when the result is at least 64 bits or we'll leaving
50074       // dangling PACKSSDW nodes.
50075       if (SVT == MVT::i8 && InSVT == MVT::i32) {
50076         EVT MidVT = VT.changeVectorElementType(MVT::i16);
50077         SDValue Mid = truncateVectorWithPACK(X86ISD::PACKSS, MidVT, USatVal, DL,
50078                                              DAG, Subtarget);
50079         assert(Mid && "Failed to pack!");
50080         SDValue V = truncateVectorWithPACK(X86ISD::PACKUS, VT, Mid, DL, DAG,
50081                                            Subtarget);
50082         assert(V && "Failed to pack!");
50083         return V;
50084       } else if (SVT == MVT::i8 || Subtarget.hasSSE41())
50085         return truncateVectorWithPACK(X86ISD::PACKUS, VT, USatVal, DL, DAG,
50086                                       Subtarget);
50087     }
50088     if (SDValue SSatVal = detectSSatPattern(In, VT))
50089       return truncateVectorWithPACK(X86ISD::PACKSS, VT, SSatVal, DL, DAG,
50090                                     Subtarget);
50091   }
50092 
50093   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
50094   if (TLI.isTypeLegal(InVT) && InVT.isVector() && SVT != MVT::i1 &&
50095       Subtarget.hasAVX512() && (InSVT != MVT::i16 || Subtarget.hasBWI()) &&
50096       (SVT == MVT::i32 || SVT == MVT::i16 || SVT == MVT::i8)) {
50097     unsigned TruncOpc = 0;
50098     SDValue SatVal;
50099     if (SDValue SSatVal = detectSSatPattern(In, VT)) {
50100       SatVal = SSatVal;
50101       TruncOpc = X86ISD::VTRUNCS;
50102     } else if (SDValue USatVal = detectUSatPattern(In, VT, DAG, DL)) {
50103       SatVal = USatVal;
50104       TruncOpc = X86ISD::VTRUNCUS;
50105     }
50106     if (SatVal) {
50107       unsigned ResElts = VT.getVectorNumElements();
50108       // If the input type is less than 512 bits and we don't have VLX, we need
50109       // to widen to 512 bits.
50110       if (!Subtarget.hasVLX() && !InVT.is512BitVector()) {
50111         unsigned NumConcats = 512 / InVT.getSizeInBits();
50112         ResElts *= NumConcats;
50113         SmallVector<SDValue, 4> ConcatOps(NumConcats, DAG.getUNDEF(InVT));
50114         ConcatOps[0] = SatVal;
50115         InVT = EVT::getVectorVT(*DAG.getContext(), InSVT,
50116                                 NumConcats * InVT.getVectorNumElements());
50117         SatVal = DAG.getNode(ISD::CONCAT_VECTORS, DL, InVT, ConcatOps);
50118       }
50119       // Widen the result if its narrower than 128 bits.
50120       if (ResElts * SVT.getSizeInBits() < 128)
50121         ResElts = 128 / SVT.getSizeInBits();
50122       EVT TruncVT = EVT::getVectorVT(*DAG.getContext(), SVT, ResElts);
50123       SDValue Res = DAG.getNode(TruncOpc, DL, TruncVT, SatVal);
50124       return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
50125                          DAG.getIntPtrConstant(0, DL));
50126     }
50127   }
50128 
50129   return SDValue();
50130 }
50131 
50132 /// This function detects the AVG pattern between vectors of unsigned i8/i16,
50133 /// which is c = (a + b + 1) / 2, and replace this operation with the efficient
50134 /// ISD::AVGCEILU (AVG) instruction.
detectAVGPattern(SDValue In,EVT VT,SelectionDAG & DAG,const X86Subtarget & Subtarget,const SDLoc & DL)50135 static SDValue detectAVGPattern(SDValue In, EVT VT, SelectionDAG &DAG,
50136                                 const X86Subtarget &Subtarget,
50137                                 const SDLoc &DL) {
50138   if (!VT.isVector())
50139     return SDValue();
50140   EVT InVT = In.getValueType();
50141   unsigned NumElems = VT.getVectorNumElements();
50142 
50143   EVT ScalarVT = VT.getVectorElementType();
50144   if (!((ScalarVT == MVT::i8 || ScalarVT == MVT::i16) && NumElems >= 2))
50145     return SDValue();
50146 
50147   // InScalarVT is the intermediate type in AVG pattern and it should be greater
50148   // than the original input type (i8/i16).
50149   EVT InScalarVT = InVT.getVectorElementType();
50150   if (InScalarVT.getFixedSizeInBits() <= ScalarVT.getFixedSizeInBits())
50151     return SDValue();
50152 
50153   if (!Subtarget.hasSSE2())
50154     return SDValue();
50155 
50156   // Detect the following pattern:
50157   //
50158   //   %1 = zext <N x i8> %a to <N x i32>
50159   //   %2 = zext <N x i8> %b to <N x i32>
50160   //   %3 = add nuw nsw <N x i32> %1, <i32 1 x N>
50161   //   %4 = add nuw nsw <N x i32> %3, %2
50162   //   %5 = lshr <N x i32> %N, <i32 1 x N>
50163   //   %6 = trunc <N x i32> %5 to <N x i8>
50164   //
50165   // In AVX512, the last instruction can also be a trunc store.
50166   if (In.getOpcode() != ISD::SRL)
50167     return SDValue();
50168 
50169   // A lambda checking the given SDValue is a constant vector and each element
50170   // is in the range [Min, Max].
50171   auto IsConstVectorInRange = [](SDValue V, unsigned Min, unsigned Max) {
50172     return ISD::matchUnaryPredicate(V, [Min, Max](ConstantSDNode *C) {
50173       return !(C->getAPIntValue().ult(Min) || C->getAPIntValue().ugt(Max));
50174     });
50175   };
50176 
50177   auto IsZExtLike = [DAG = &DAG, ScalarVT](SDValue V) {
50178     unsigned MaxActiveBits = DAG->computeKnownBits(V).countMaxActiveBits();
50179     return MaxActiveBits <= ScalarVT.getSizeInBits();
50180   };
50181 
50182   // Check if each element of the vector is right-shifted by one.
50183   SDValue LHS = In.getOperand(0);
50184   SDValue RHS = In.getOperand(1);
50185   if (!IsConstVectorInRange(RHS, 1, 1))
50186     return SDValue();
50187   if (LHS.getOpcode() != ISD::ADD)
50188     return SDValue();
50189 
50190   // Detect a pattern of a + b + 1 where the order doesn't matter.
50191   SDValue Operands[3];
50192   Operands[0] = LHS.getOperand(0);
50193   Operands[1] = LHS.getOperand(1);
50194 
50195   auto AVGBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
50196                        ArrayRef<SDValue> Ops) {
50197     return DAG.getNode(ISD::AVGCEILU, DL, Ops[0].getValueType(), Ops);
50198   };
50199 
50200   auto AVGSplitter = [&](std::array<SDValue, 2> Ops) {
50201     for (SDValue &Op : Ops)
50202       if (Op.getValueType() != VT)
50203         Op = DAG.getNode(ISD::TRUNCATE, DL, VT, Op);
50204     // Pad to a power-of-2 vector, split+apply and extract the original vector.
50205     unsigned NumElemsPow2 = PowerOf2Ceil(NumElems);
50206     EVT Pow2VT = EVT::getVectorVT(*DAG.getContext(), ScalarVT, NumElemsPow2);
50207     if (NumElemsPow2 != NumElems) {
50208       for (SDValue &Op : Ops) {
50209         SmallVector<SDValue, 32> EltsOfOp(NumElemsPow2, DAG.getUNDEF(ScalarVT));
50210         for (unsigned i = 0; i != NumElems; ++i) {
50211           SDValue Idx = DAG.getIntPtrConstant(i, DL);
50212           EltsOfOp[i] =
50213               DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ScalarVT, Op, Idx);
50214         }
50215         Op = DAG.getBuildVector(Pow2VT, DL, EltsOfOp);
50216       }
50217     }
50218     SDValue Res = SplitOpsAndApply(DAG, Subtarget, DL, Pow2VT, Ops, AVGBuilder);
50219     if (NumElemsPow2 == NumElems)
50220       return Res;
50221     return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
50222                        DAG.getIntPtrConstant(0, DL));
50223   };
50224 
50225   // Take care of the case when one of the operands is a constant vector whose
50226   // element is in the range [1, 256].
50227   if (IsConstVectorInRange(Operands[1], 1, ScalarVT == MVT::i8 ? 256 : 65536) &&
50228       IsZExtLike(Operands[0])) {
50229     // The pattern is detected. Subtract one from the constant vector, then
50230     // demote it and emit X86ISD::AVG instruction.
50231     SDValue VecOnes = DAG.getConstant(1, DL, InVT);
50232     Operands[1] = DAG.getNode(ISD::SUB, DL, InVT, Operands[1], VecOnes);
50233     return AVGSplitter({Operands[0], Operands[1]});
50234   }
50235 
50236   // Matches 'add like' patterns: add(Op0,Op1) + zext(or(Op0,Op1)).
50237   // Match the or case only if its 'add-like' - can be replaced by an add.
50238   auto FindAddLike = [&](SDValue V, SDValue &Op0, SDValue &Op1) {
50239     if (ISD::ADD == V.getOpcode()) {
50240       Op0 = V.getOperand(0);
50241       Op1 = V.getOperand(1);
50242       return true;
50243     }
50244     if (ISD::ZERO_EXTEND != V.getOpcode())
50245       return false;
50246     V = V.getOperand(0);
50247     if (V.getValueType() != VT || ISD::OR != V.getOpcode() ||
50248         !DAG.haveNoCommonBitsSet(V.getOperand(0), V.getOperand(1)))
50249       return false;
50250     Op0 = V.getOperand(0);
50251     Op1 = V.getOperand(1);
50252     return true;
50253   };
50254 
50255   SDValue Op0, Op1;
50256   if (FindAddLike(Operands[0], Op0, Op1))
50257     std::swap(Operands[0], Operands[1]);
50258   else if (!FindAddLike(Operands[1], Op0, Op1))
50259     return SDValue();
50260   Operands[2] = Op0;
50261   Operands[1] = Op1;
50262 
50263   // Now we have three operands of two additions. Check that one of them is a
50264   // constant vector with ones, and the other two can be promoted from i8/i16.
50265   for (SDValue &Op : Operands) {
50266     if (!IsConstVectorInRange(Op, 1, 1))
50267       continue;
50268     std::swap(Op, Operands[2]);
50269 
50270     // Check if Operands[0] and Operands[1] are results of type promotion.
50271     for (int j = 0; j < 2; ++j)
50272       if (Operands[j].getValueType() != VT)
50273         if (!IsZExtLike(Operands[j]))
50274           return SDValue();
50275 
50276     // The pattern is detected, emit X86ISD::AVG instruction(s).
50277     return AVGSplitter({Operands[0], Operands[1]});
50278   }
50279 
50280   return SDValue();
50281 }
50282 
combineLoad(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)50283 static SDValue combineLoad(SDNode *N, SelectionDAG &DAG,
50284                            TargetLowering::DAGCombinerInfo &DCI,
50285                            const X86Subtarget &Subtarget) {
50286   LoadSDNode *Ld = cast<LoadSDNode>(N);
50287   EVT RegVT = Ld->getValueType(0);
50288   EVT MemVT = Ld->getMemoryVT();
50289   SDLoc dl(Ld);
50290   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
50291 
50292   // For chips with slow 32-byte unaligned loads, break the 32-byte operation
50293   // into two 16-byte operations. Also split non-temporal aligned loads on
50294   // pre-AVX2 targets as 32-byte loads will lower to regular temporal loads.
50295   ISD::LoadExtType Ext = Ld->getExtensionType();
50296   unsigned Fast;
50297   if (RegVT.is256BitVector() && !DCI.isBeforeLegalizeOps() &&
50298       Ext == ISD::NON_EXTLOAD &&
50299       ((Ld->isNonTemporal() && !Subtarget.hasInt256() &&
50300         Ld->getAlign() >= Align(16)) ||
50301        (TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), RegVT,
50302                                *Ld->getMemOperand(), &Fast) &&
50303         !Fast))) {
50304     unsigned NumElems = RegVT.getVectorNumElements();
50305     if (NumElems < 2)
50306       return SDValue();
50307 
50308     unsigned HalfOffset = 16;
50309     SDValue Ptr1 = Ld->getBasePtr();
50310     SDValue Ptr2 =
50311         DAG.getMemBasePlusOffset(Ptr1, TypeSize::Fixed(HalfOffset), dl);
50312     EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
50313                                   NumElems / 2);
50314     SDValue Load1 =
50315         DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr1, Ld->getPointerInfo(),
50316                     Ld->getOriginalAlign(),
50317                     Ld->getMemOperand()->getFlags());
50318     SDValue Load2 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr2,
50319                                 Ld->getPointerInfo().getWithOffset(HalfOffset),
50320                                 Ld->getOriginalAlign(),
50321                                 Ld->getMemOperand()->getFlags());
50322     SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
50323                              Load1.getValue(1), Load2.getValue(1));
50324 
50325     SDValue NewVec = DAG.getNode(ISD::CONCAT_VECTORS, dl, RegVT, Load1, Load2);
50326     return DCI.CombineTo(N, NewVec, TF, true);
50327   }
50328 
50329   // Bool vector load - attempt to cast to an integer, as we have good
50330   // (vXiY *ext(vXi1 bitcast(iX))) handling.
50331   if (Ext == ISD::NON_EXTLOAD && !Subtarget.hasAVX512() && RegVT.isVector() &&
50332       RegVT.getScalarType() == MVT::i1 && DCI.isBeforeLegalize()) {
50333     unsigned NumElts = RegVT.getVectorNumElements();
50334     EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
50335     if (TLI.isTypeLegal(IntVT)) {
50336       SDValue IntLoad = DAG.getLoad(IntVT, dl, Ld->getChain(), Ld->getBasePtr(),
50337                                     Ld->getPointerInfo(),
50338                                     Ld->getOriginalAlign(),
50339                                     Ld->getMemOperand()->getFlags());
50340       SDValue BoolVec = DAG.getBitcast(RegVT, IntLoad);
50341       return DCI.CombineTo(N, BoolVec, IntLoad.getValue(1), true);
50342     }
50343   }
50344 
50345   // If we also broadcast this as a subvector to a wider type, then just extract
50346   // the lowest subvector.
50347   if (Ext == ISD::NON_EXTLOAD && Subtarget.hasAVX() && Ld->isSimple() &&
50348       (RegVT.is128BitVector() || RegVT.is256BitVector())) {
50349     SDValue Ptr = Ld->getBasePtr();
50350     SDValue Chain = Ld->getChain();
50351     for (SDNode *User : Ptr->uses()) {
50352       if (User != N && User->getOpcode() == X86ISD::SUBV_BROADCAST_LOAD &&
50353           cast<MemIntrinsicSDNode>(User)->getBasePtr() == Ptr &&
50354           cast<MemIntrinsicSDNode>(User)->getChain() == Chain &&
50355           cast<MemIntrinsicSDNode>(User)->getMemoryVT().getSizeInBits() ==
50356               MemVT.getSizeInBits() &&
50357           !User->hasAnyUseOfValue(1) &&
50358           User->getValueSizeInBits(0).getFixedValue() >
50359               RegVT.getFixedSizeInBits()) {
50360         SDValue Extract = extractSubVector(SDValue(User, 0), 0, DAG, SDLoc(N),
50361                                            RegVT.getSizeInBits());
50362         Extract = DAG.getBitcast(RegVT, Extract);
50363         return DCI.CombineTo(N, Extract, SDValue(User, 1));
50364       }
50365     }
50366   }
50367 
50368   // Cast ptr32 and ptr64 pointers to the default address space before a load.
50369   unsigned AddrSpace = Ld->getAddressSpace();
50370   if (AddrSpace == X86AS::PTR64 || AddrSpace == X86AS::PTR32_SPTR ||
50371       AddrSpace == X86AS::PTR32_UPTR) {
50372     MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
50373     if (PtrVT != Ld->getBasePtr().getSimpleValueType()) {
50374       SDValue Cast =
50375           DAG.getAddrSpaceCast(dl, PtrVT, Ld->getBasePtr(), AddrSpace, 0);
50376       return DAG.getLoad(RegVT, dl, Ld->getChain(), Cast, Ld->getPointerInfo(),
50377                          Ld->getOriginalAlign(),
50378                          Ld->getMemOperand()->getFlags());
50379     }
50380   }
50381 
50382   return SDValue();
50383 }
50384 
50385 /// If V is a build vector of boolean constants and exactly one of those
50386 /// constants is true, return the operand index of that true element.
50387 /// Otherwise, return -1.
getOneTrueElt(SDValue V)50388 static int getOneTrueElt(SDValue V) {
50389   // This needs to be a build vector of booleans.
50390   // TODO: Checking for the i1 type matches the IR definition for the mask,
50391   // but the mask check could be loosened to i8 or other types. That might
50392   // also require checking more than 'allOnesValue'; eg, the x86 HW
50393   // instructions only require that the MSB is set for each mask element.
50394   // The ISD::MSTORE comments/definition do not specify how the mask operand
50395   // is formatted.
50396   auto *BV = dyn_cast<BuildVectorSDNode>(V);
50397   if (!BV || BV->getValueType(0).getVectorElementType() != MVT::i1)
50398     return -1;
50399 
50400   int TrueIndex = -1;
50401   unsigned NumElts = BV->getValueType(0).getVectorNumElements();
50402   for (unsigned i = 0; i < NumElts; ++i) {
50403     const SDValue &Op = BV->getOperand(i);
50404     if (Op.isUndef())
50405       continue;
50406     auto *ConstNode = dyn_cast<ConstantSDNode>(Op);
50407     if (!ConstNode)
50408       return -1;
50409     if (ConstNode->getAPIntValue().countTrailingOnes() >= 1) {
50410       // If we already found a one, this is too many.
50411       if (TrueIndex >= 0)
50412         return -1;
50413       TrueIndex = i;
50414     }
50415   }
50416   return TrueIndex;
50417 }
50418 
50419 /// Given a masked memory load/store operation, return true if it has one mask
50420 /// bit set. If it has one mask bit set, then also return the memory address of
50421 /// the scalar element to load/store, the vector index to insert/extract that
50422 /// scalar element, and the alignment for the scalar memory access.
getParamsForOneTrueMaskedElt(MaskedLoadStoreSDNode * MaskedOp,SelectionDAG & DAG,SDValue & Addr,SDValue & Index,Align & Alignment,unsigned & Offset)50423 static bool getParamsForOneTrueMaskedElt(MaskedLoadStoreSDNode *MaskedOp,
50424                                          SelectionDAG &DAG, SDValue &Addr,
50425                                          SDValue &Index, Align &Alignment,
50426                                          unsigned &Offset) {
50427   int TrueMaskElt = getOneTrueElt(MaskedOp->getMask());
50428   if (TrueMaskElt < 0)
50429     return false;
50430 
50431   // Get the address of the one scalar element that is specified by the mask
50432   // using the appropriate offset from the base pointer.
50433   EVT EltVT = MaskedOp->getMemoryVT().getVectorElementType();
50434   Offset = 0;
50435   Addr = MaskedOp->getBasePtr();
50436   if (TrueMaskElt != 0) {
50437     Offset = TrueMaskElt * EltVT.getStoreSize();
50438     Addr = DAG.getMemBasePlusOffset(Addr, TypeSize::Fixed(Offset),
50439                                     SDLoc(MaskedOp));
50440   }
50441 
50442   Index = DAG.getIntPtrConstant(TrueMaskElt, SDLoc(MaskedOp));
50443   Alignment = commonAlignment(MaskedOp->getOriginalAlign(),
50444                               EltVT.getStoreSize());
50445   return true;
50446 }
50447 
50448 /// If exactly one element of the mask is set for a non-extending masked load,
50449 /// it is a scalar load and vector insert.
50450 /// Note: It is expected that the degenerate cases of an all-zeros or all-ones
50451 /// mask have already been optimized in IR, so we don't bother with those here.
50452 static SDValue
reduceMaskedLoadToScalarLoad(MaskedLoadSDNode * ML,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)50453 reduceMaskedLoadToScalarLoad(MaskedLoadSDNode *ML, SelectionDAG &DAG,
50454                              TargetLowering::DAGCombinerInfo &DCI,
50455                              const X86Subtarget &Subtarget) {
50456   assert(ML->isUnindexed() && "Unexpected indexed masked load!");
50457   // TODO: This is not x86-specific, so it could be lifted to DAGCombiner.
50458   // However, some target hooks may need to be added to know when the transform
50459   // is profitable. Endianness would also have to be considered.
50460 
50461   SDValue Addr, VecIndex;
50462   Align Alignment;
50463   unsigned Offset;
50464   if (!getParamsForOneTrueMaskedElt(ML, DAG, Addr, VecIndex, Alignment, Offset))
50465     return SDValue();
50466 
50467   // Load the one scalar element that is specified by the mask using the
50468   // appropriate offset from the base pointer.
50469   SDLoc DL(ML);
50470   EVT VT = ML->getValueType(0);
50471   EVT EltVT = VT.getVectorElementType();
50472 
50473   EVT CastVT = VT;
50474   if (EltVT == MVT::i64 && !Subtarget.is64Bit()) {
50475     EltVT = MVT::f64;
50476     CastVT = VT.changeVectorElementType(EltVT);
50477   }
50478 
50479   SDValue Load =
50480       DAG.getLoad(EltVT, DL, ML->getChain(), Addr,
50481                   ML->getPointerInfo().getWithOffset(Offset),
50482                   Alignment, ML->getMemOperand()->getFlags());
50483 
50484   SDValue PassThru = DAG.getBitcast(CastVT, ML->getPassThru());
50485 
50486   // Insert the loaded element into the appropriate place in the vector.
50487   SDValue Insert =
50488       DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, CastVT, PassThru, Load, VecIndex);
50489   Insert = DAG.getBitcast(VT, Insert);
50490   return DCI.CombineTo(ML, Insert, Load.getValue(1), true);
50491 }
50492 
50493 static SDValue
combineMaskedLoadConstantMask(MaskedLoadSDNode * ML,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI)50494 combineMaskedLoadConstantMask(MaskedLoadSDNode *ML, SelectionDAG &DAG,
50495                               TargetLowering::DAGCombinerInfo &DCI) {
50496   assert(ML->isUnindexed() && "Unexpected indexed masked load!");
50497   if (!ISD::isBuildVectorOfConstantSDNodes(ML->getMask().getNode()))
50498     return SDValue();
50499 
50500   SDLoc DL(ML);
50501   EVT VT = ML->getValueType(0);
50502 
50503   // If we are loading the first and last elements of a vector, it is safe and
50504   // always faster to load the whole vector. Replace the masked load with a
50505   // vector load and select.
50506   unsigned NumElts = VT.getVectorNumElements();
50507   BuildVectorSDNode *MaskBV = cast<BuildVectorSDNode>(ML->getMask());
50508   bool LoadFirstElt = !isNullConstant(MaskBV->getOperand(0));
50509   bool LoadLastElt = !isNullConstant(MaskBV->getOperand(NumElts - 1));
50510   if (LoadFirstElt && LoadLastElt) {
50511     SDValue VecLd = DAG.getLoad(VT, DL, ML->getChain(), ML->getBasePtr(),
50512                                 ML->getMemOperand());
50513     SDValue Blend = DAG.getSelect(DL, VT, ML->getMask(), VecLd,
50514                                   ML->getPassThru());
50515     return DCI.CombineTo(ML, Blend, VecLd.getValue(1), true);
50516   }
50517 
50518   // Convert a masked load with a constant mask into a masked load and a select.
50519   // This allows the select operation to use a faster kind of select instruction
50520   // (for example, vblendvps -> vblendps).
50521 
50522   // Don't try this if the pass-through operand is already undefined. That would
50523   // cause an infinite loop because that's what we're about to create.
50524   if (ML->getPassThru().isUndef())
50525     return SDValue();
50526 
50527   if (ISD::isBuildVectorAllZeros(ML->getPassThru().getNode()))
50528     return SDValue();
50529 
50530   // The new masked load has an undef pass-through operand. The select uses the
50531   // original pass-through operand.
50532   SDValue NewML = DAG.getMaskedLoad(
50533       VT, DL, ML->getChain(), ML->getBasePtr(), ML->getOffset(), ML->getMask(),
50534       DAG.getUNDEF(VT), ML->getMemoryVT(), ML->getMemOperand(),
50535       ML->getAddressingMode(), ML->getExtensionType());
50536   SDValue Blend = DAG.getSelect(DL, VT, ML->getMask(), NewML,
50537                                 ML->getPassThru());
50538 
50539   return DCI.CombineTo(ML, Blend, NewML.getValue(1), true);
50540 }
50541 
combineMaskedLoad(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)50542 static SDValue combineMaskedLoad(SDNode *N, SelectionDAG &DAG,
50543                                  TargetLowering::DAGCombinerInfo &DCI,
50544                                  const X86Subtarget &Subtarget) {
50545   auto *Mld = cast<MaskedLoadSDNode>(N);
50546 
50547   // TODO: Expanding load with constant mask may be optimized as well.
50548   if (Mld->isExpandingLoad())
50549     return SDValue();
50550 
50551   if (Mld->getExtensionType() == ISD::NON_EXTLOAD) {
50552     if (SDValue ScalarLoad =
50553             reduceMaskedLoadToScalarLoad(Mld, DAG, DCI, Subtarget))
50554       return ScalarLoad;
50555 
50556     // TODO: Do some AVX512 subsets benefit from this transform?
50557     if (!Subtarget.hasAVX512())
50558       if (SDValue Blend = combineMaskedLoadConstantMask(Mld, DAG, DCI))
50559         return Blend;
50560   }
50561 
50562   // If the mask value has been legalized to a non-boolean vector, try to
50563   // simplify ops leading up to it. We only demand the MSB of each lane.
50564   SDValue Mask = Mld->getMask();
50565   if (Mask.getScalarValueSizeInBits() != 1) {
50566     EVT VT = Mld->getValueType(0);
50567     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
50568     APInt DemandedBits(APInt::getSignMask(VT.getScalarSizeInBits()));
50569     if (TLI.SimplifyDemandedBits(Mask, DemandedBits, DCI)) {
50570       if (N->getOpcode() != ISD::DELETED_NODE)
50571         DCI.AddToWorklist(N);
50572       return SDValue(N, 0);
50573     }
50574     if (SDValue NewMask =
50575             TLI.SimplifyMultipleUseDemandedBits(Mask, DemandedBits, DAG))
50576       return DAG.getMaskedLoad(
50577           VT, SDLoc(N), Mld->getChain(), Mld->getBasePtr(), Mld->getOffset(),
50578           NewMask, Mld->getPassThru(), Mld->getMemoryVT(), Mld->getMemOperand(),
50579           Mld->getAddressingMode(), Mld->getExtensionType());
50580   }
50581 
50582   return SDValue();
50583 }
50584 
50585 /// If exactly one element of the mask is set for a non-truncating masked store,
50586 /// it is a vector extract and scalar store.
50587 /// Note: It is expected that the degenerate cases of an all-zeros or all-ones
50588 /// mask have already been optimized in IR, so we don't bother with those here.
reduceMaskedStoreToScalarStore(MaskedStoreSDNode * MS,SelectionDAG & DAG,const X86Subtarget & Subtarget)50589 static SDValue reduceMaskedStoreToScalarStore(MaskedStoreSDNode *MS,
50590                                               SelectionDAG &DAG,
50591                                               const X86Subtarget &Subtarget) {
50592   // TODO: This is not x86-specific, so it could be lifted to DAGCombiner.
50593   // However, some target hooks may need to be added to know when the transform
50594   // is profitable. Endianness would also have to be considered.
50595 
50596   SDValue Addr, VecIndex;
50597   Align Alignment;
50598   unsigned Offset;
50599   if (!getParamsForOneTrueMaskedElt(MS, DAG, Addr, VecIndex, Alignment, Offset))
50600     return SDValue();
50601 
50602   // Extract the one scalar element that is actually being stored.
50603   SDLoc DL(MS);
50604   SDValue Value = MS->getValue();
50605   EVT VT = Value.getValueType();
50606   EVT EltVT = VT.getVectorElementType();
50607   if (EltVT == MVT::i64 && !Subtarget.is64Bit()) {
50608     EltVT = MVT::f64;
50609     EVT CastVT = VT.changeVectorElementType(EltVT);
50610     Value = DAG.getBitcast(CastVT, Value);
50611   }
50612   SDValue Extract =
50613       DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Value, VecIndex);
50614 
50615   // Store that element at the appropriate offset from the base pointer.
50616   return DAG.getStore(MS->getChain(), DL, Extract, Addr,
50617                       MS->getPointerInfo().getWithOffset(Offset),
50618                       Alignment, MS->getMemOperand()->getFlags());
50619 }
50620 
combineMaskedStore(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)50621 static SDValue combineMaskedStore(SDNode *N, SelectionDAG &DAG,
50622                                   TargetLowering::DAGCombinerInfo &DCI,
50623                                   const X86Subtarget &Subtarget) {
50624   MaskedStoreSDNode *Mst = cast<MaskedStoreSDNode>(N);
50625   if (Mst->isCompressingStore())
50626     return SDValue();
50627 
50628   EVT VT = Mst->getValue().getValueType();
50629   SDLoc dl(Mst);
50630   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
50631 
50632   if (Mst->isTruncatingStore())
50633     return SDValue();
50634 
50635   if (SDValue ScalarStore = reduceMaskedStoreToScalarStore(Mst, DAG, Subtarget))
50636     return ScalarStore;
50637 
50638   // If the mask value has been legalized to a non-boolean vector, try to
50639   // simplify ops leading up to it. We only demand the MSB of each lane.
50640   SDValue Mask = Mst->getMask();
50641   if (Mask.getScalarValueSizeInBits() != 1) {
50642     APInt DemandedBits(APInt::getSignMask(VT.getScalarSizeInBits()));
50643     if (TLI.SimplifyDemandedBits(Mask, DemandedBits, DCI)) {
50644       if (N->getOpcode() != ISD::DELETED_NODE)
50645         DCI.AddToWorklist(N);
50646       return SDValue(N, 0);
50647     }
50648     if (SDValue NewMask =
50649             TLI.SimplifyMultipleUseDemandedBits(Mask, DemandedBits, DAG))
50650       return DAG.getMaskedStore(Mst->getChain(), SDLoc(N), Mst->getValue(),
50651                                 Mst->getBasePtr(), Mst->getOffset(), NewMask,
50652                                 Mst->getMemoryVT(), Mst->getMemOperand(),
50653                                 Mst->getAddressingMode());
50654   }
50655 
50656   SDValue Value = Mst->getValue();
50657   if (Value.getOpcode() == ISD::TRUNCATE && Value.getNode()->hasOneUse() &&
50658       TLI.isTruncStoreLegal(Value.getOperand(0).getValueType(),
50659                             Mst->getMemoryVT())) {
50660     return DAG.getMaskedStore(Mst->getChain(), SDLoc(N), Value.getOperand(0),
50661                               Mst->getBasePtr(), Mst->getOffset(), Mask,
50662                               Mst->getMemoryVT(), Mst->getMemOperand(),
50663                               Mst->getAddressingMode(), true);
50664   }
50665 
50666   return SDValue();
50667 }
50668 
combineStore(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)50669 static SDValue combineStore(SDNode *N, SelectionDAG &DAG,
50670                             TargetLowering::DAGCombinerInfo &DCI,
50671                             const X86Subtarget &Subtarget) {
50672   StoreSDNode *St = cast<StoreSDNode>(N);
50673   EVT StVT = St->getMemoryVT();
50674   SDLoc dl(St);
50675   SDValue StoredVal = St->getValue();
50676   EVT VT = StoredVal.getValueType();
50677   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
50678 
50679   // Convert a store of vXi1 into a store of iX and a bitcast.
50680   if (!Subtarget.hasAVX512() && VT == StVT && VT.isVector() &&
50681       VT.getVectorElementType() == MVT::i1) {
50682 
50683     EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), VT.getVectorNumElements());
50684     StoredVal = DAG.getBitcast(NewVT, StoredVal);
50685 
50686     return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
50687                         St->getPointerInfo(), St->getOriginalAlign(),
50688                         St->getMemOperand()->getFlags());
50689   }
50690 
50691   // If this is a store of a scalar_to_vector to v1i1, just use a scalar store.
50692   // This will avoid a copy to k-register.
50693   if (VT == MVT::v1i1 && VT == StVT && Subtarget.hasAVX512() &&
50694       StoredVal.getOpcode() == ISD::SCALAR_TO_VECTOR &&
50695       StoredVal.getOperand(0).getValueType() == MVT::i8) {
50696     SDValue Val = StoredVal.getOperand(0);
50697     // We must store zeros to the unused bits.
50698     Val = DAG.getZeroExtendInReg(Val, dl, MVT::i1);
50699     return DAG.getStore(St->getChain(), dl, Val,
50700                         St->getBasePtr(), St->getPointerInfo(),
50701                         St->getOriginalAlign(),
50702                         St->getMemOperand()->getFlags());
50703   }
50704 
50705   // Widen v2i1/v4i1 stores to v8i1.
50706   if ((VT == MVT::v1i1 || VT == MVT::v2i1 || VT == MVT::v4i1) && VT == StVT &&
50707       Subtarget.hasAVX512()) {
50708     unsigned NumConcats = 8 / VT.getVectorNumElements();
50709     // We must store zeros to the unused bits.
50710     SmallVector<SDValue, 4> Ops(NumConcats, DAG.getConstant(0, dl, VT));
50711     Ops[0] = StoredVal;
50712     StoredVal = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i1, Ops);
50713     return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
50714                         St->getPointerInfo(), St->getOriginalAlign(),
50715                         St->getMemOperand()->getFlags());
50716   }
50717 
50718   // Turn vXi1 stores of constants into a scalar store.
50719   if ((VT == MVT::v8i1 || VT == MVT::v16i1 || VT == MVT::v32i1 ||
50720        VT == MVT::v64i1) && VT == StVT && TLI.isTypeLegal(VT) &&
50721       ISD::isBuildVectorOfConstantSDNodes(StoredVal.getNode())) {
50722     // If its a v64i1 store without 64-bit support, we need two stores.
50723     if (!DCI.isBeforeLegalize() && VT == MVT::v64i1 && !Subtarget.is64Bit()) {
50724       SDValue Lo = DAG.getBuildVector(MVT::v32i1, dl,
50725                                       StoredVal->ops().slice(0, 32));
50726       Lo = combinevXi1ConstantToInteger(Lo, DAG);
50727       SDValue Hi = DAG.getBuildVector(MVT::v32i1, dl,
50728                                       StoredVal->ops().slice(32, 32));
50729       Hi = combinevXi1ConstantToInteger(Hi, DAG);
50730 
50731       SDValue Ptr0 = St->getBasePtr();
50732       SDValue Ptr1 = DAG.getMemBasePlusOffset(Ptr0, TypeSize::Fixed(4), dl);
50733 
50734       SDValue Ch0 =
50735           DAG.getStore(St->getChain(), dl, Lo, Ptr0, St->getPointerInfo(),
50736                        St->getOriginalAlign(),
50737                        St->getMemOperand()->getFlags());
50738       SDValue Ch1 =
50739           DAG.getStore(St->getChain(), dl, Hi, Ptr1,
50740                        St->getPointerInfo().getWithOffset(4),
50741                        St->getOriginalAlign(),
50742                        St->getMemOperand()->getFlags());
50743       return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ch0, Ch1);
50744     }
50745 
50746     StoredVal = combinevXi1ConstantToInteger(StoredVal, DAG);
50747     return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
50748                         St->getPointerInfo(), St->getOriginalAlign(),
50749                         St->getMemOperand()->getFlags());
50750   }
50751 
50752   // If we are saving a 32-byte vector and 32-byte stores are slow, such as on
50753   // Sandy Bridge, perform two 16-byte stores.
50754   unsigned Fast;
50755   if (VT.is256BitVector() && StVT == VT &&
50756       TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
50757                              *St->getMemOperand(), &Fast) &&
50758       !Fast) {
50759     unsigned NumElems = VT.getVectorNumElements();
50760     if (NumElems < 2)
50761       return SDValue();
50762 
50763     return splitVectorStore(St, DAG);
50764   }
50765 
50766   // Split under-aligned vector non-temporal stores.
50767   if (St->isNonTemporal() && StVT == VT &&
50768       St->getAlign().value() < VT.getStoreSize()) {
50769     // ZMM/YMM nt-stores - either it can be stored as a series of shorter
50770     // vectors or the legalizer can scalarize it to use MOVNTI.
50771     if (VT.is256BitVector() || VT.is512BitVector()) {
50772       unsigned NumElems = VT.getVectorNumElements();
50773       if (NumElems < 2)
50774         return SDValue();
50775       return splitVectorStore(St, DAG);
50776     }
50777 
50778     // XMM nt-stores - scalarize this to f64 nt-stores on SSE4A, else i32/i64
50779     // to use MOVNTI.
50780     if (VT.is128BitVector() && Subtarget.hasSSE2()) {
50781       MVT NTVT = Subtarget.hasSSE4A()
50782                      ? MVT::v2f64
50783                      : (TLI.isTypeLegal(MVT::i64) ? MVT::v2i64 : MVT::v4i32);
50784       return scalarizeVectorStore(St, NTVT, DAG);
50785     }
50786   }
50787 
50788   // Try to optimize v16i16->v16i8 truncating stores when BWI is not
50789   // supported, but avx512f is by extending to v16i32 and truncating.
50790   if (!St->isTruncatingStore() && VT == MVT::v16i8 && !Subtarget.hasBWI() &&
50791       St->getValue().getOpcode() == ISD::TRUNCATE &&
50792       St->getValue().getOperand(0).getValueType() == MVT::v16i16 &&
50793       TLI.isTruncStoreLegal(MVT::v16i32, MVT::v16i8) &&
50794       St->getValue().hasOneUse() && !DCI.isBeforeLegalizeOps()) {
50795     SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::v16i32,
50796                               St->getValue().getOperand(0));
50797     return DAG.getTruncStore(St->getChain(), dl, Ext, St->getBasePtr(),
50798                              MVT::v16i8, St->getMemOperand());
50799   }
50800 
50801   // Try to fold a VTRUNCUS or VTRUNCS into a truncating store.
50802   if (!St->isTruncatingStore() &&
50803       (StoredVal.getOpcode() == X86ISD::VTRUNCUS ||
50804        StoredVal.getOpcode() == X86ISD::VTRUNCS) &&
50805       StoredVal.hasOneUse() &&
50806       TLI.isTruncStoreLegal(StoredVal.getOperand(0).getValueType(), VT)) {
50807     bool IsSigned = StoredVal.getOpcode() == X86ISD::VTRUNCS;
50808     return EmitTruncSStore(IsSigned, St->getChain(),
50809                            dl, StoredVal.getOperand(0), St->getBasePtr(),
50810                            VT, St->getMemOperand(), DAG);
50811   }
50812 
50813   // Try to fold a extract_element(VTRUNC) pattern into a truncating store.
50814   if (!St->isTruncatingStore()) {
50815     auto IsExtractedElement = [](SDValue V) {
50816       if (V.getOpcode() == ISD::TRUNCATE && V.hasOneUse())
50817         V = V.getOperand(0);
50818       unsigned Opc = V.getOpcode();
50819       if ((Opc == ISD::EXTRACT_VECTOR_ELT || Opc == X86ISD::PEXTRW) &&
50820           isNullConstant(V.getOperand(1)) && V.hasOneUse() &&
50821           V.getOperand(0).hasOneUse())
50822         return V.getOperand(0);
50823       return SDValue();
50824     };
50825     if (SDValue Extract = IsExtractedElement(StoredVal)) {
50826       SDValue Trunc = peekThroughOneUseBitcasts(Extract);
50827       if (Trunc.getOpcode() == X86ISD::VTRUNC) {
50828         SDValue Src = Trunc.getOperand(0);
50829         MVT DstVT = Trunc.getSimpleValueType();
50830         MVT SrcVT = Src.getSimpleValueType();
50831         unsigned NumSrcElts = SrcVT.getVectorNumElements();
50832         unsigned NumTruncBits = DstVT.getScalarSizeInBits() * NumSrcElts;
50833         MVT TruncVT = MVT::getVectorVT(DstVT.getScalarType(), NumSrcElts);
50834         if (NumTruncBits == VT.getSizeInBits() &&
50835             TLI.isTruncStoreLegal(SrcVT, TruncVT)) {
50836           return DAG.getTruncStore(St->getChain(), dl, Src, St->getBasePtr(),
50837                                    TruncVT, St->getMemOperand());
50838         }
50839       }
50840     }
50841   }
50842 
50843   // Optimize trunc store (of multiple scalars) to shuffle and store.
50844   // First, pack all of the elements in one place. Next, store to memory
50845   // in fewer chunks.
50846   if (St->isTruncatingStore() && VT.isVector()) {
50847     // Check if we can detect an AVG pattern from the truncation. If yes,
50848     // replace the trunc store by a normal store with the result of X86ISD::AVG
50849     // instruction.
50850     if (DCI.isBeforeLegalize() || TLI.isTypeLegal(St->getMemoryVT()))
50851       if (SDValue Avg = detectAVGPattern(St->getValue(), St->getMemoryVT(), DAG,
50852                                          Subtarget, dl))
50853         return DAG.getStore(St->getChain(), dl, Avg, St->getBasePtr(),
50854                             St->getPointerInfo(), St->getOriginalAlign(),
50855                             St->getMemOperand()->getFlags());
50856 
50857     if (TLI.isTruncStoreLegal(VT, StVT)) {
50858       if (SDValue Val = detectSSatPattern(St->getValue(), St->getMemoryVT()))
50859         return EmitTruncSStore(true /* Signed saturation */, St->getChain(),
50860                                dl, Val, St->getBasePtr(),
50861                                St->getMemoryVT(), St->getMemOperand(), DAG);
50862       if (SDValue Val = detectUSatPattern(St->getValue(), St->getMemoryVT(),
50863                                           DAG, dl))
50864         return EmitTruncSStore(false /* Unsigned saturation */, St->getChain(),
50865                                dl, Val, St->getBasePtr(),
50866                                St->getMemoryVT(), St->getMemOperand(), DAG);
50867     }
50868 
50869     return SDValue();
50870   }
50871 
50872   // Cast ptr32 and ptr64 pointers to the default address space before a store.
50873   unsigned AddrSpace = St->getAddressSpace();
50874   if (AddrSpace == X86AS::PTR64 || AddrSpace == X86AS::PTR32_SPTR ||
50875       AddrSpace == X86AS::PTR32_UPTR) {
50876     MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
50877     if (PtrVT != St->getBasePtr().getSimpleValueType()) {
50878       SDValue Cast =
50879           DAG.getAddrSpaceCast(dl, PtrVT, St->getBasePtr(), AddrSpace, 0);
50880       return DAG.getStore(St->getChain(), dl, StoredVal, Cast,
50881                           St->getPointerInfo(), St->getOriginalAlign(),
50882                           St->getMemOperand()->getFlags(), St->getAAInfo());
50883     }
50884   }
50885 
50886   // Turn load->store of MMX types into GPR load/stores.  This avoids clobbering
50887   // the FP state in cases where an emms may be missing.
50888   // A preferable solution to the general problem is to figure out the right
50889   // places to insert EMMS.  This qualifies as a quick hack.
50890 
50891   // Similarly, turn load->store of i64 into double load/stores in 32-bit mode.
50892   if (VT.getSizeInBits() != 64)
50893     return SDValue();
50894 
50895   const Function &F = DAG.getMachineFunction().getFunction();
50896   bool NoImplicitFloatOps = F.hasFnAttribute(Attribute::NoImplicitFloat);
50897   bool F64IsLegal =
50898       !Subtarget.useSoftFloat() && !NoImplicitFloatOps && Subtarget.hasSSE2();
50899   if ((VT == MVT::i64 && F64IsLegal && !Subtarget.is64Bit()) &&
50900       isa<LoadSDNode>(St->getValue()) &&
50901       cast<LoadSDNode>(St->getValue())->isSimple() &&
50902       St->getChain().hasOneUse() && St->isSimple()) {
50903     LoadSDNode *Ld = cast<LoadSDNode>(St->getValue().getNode());
50904 
50905     if (!ISD::isNormalLoad(Ld))
50906       return SDValue();
50907 
50908     // Avoid the transformation if there are multiple uses of the loaded value.
50909     if (!Ld->hasNUsesOfValue(1, 0))
50910       return SDValue();
50911 
50912     SDLoc LdDL(Ld);
50913     SDLoc StDL(N);
50914     // Lower to a single movq load/store pair.
50915     SDValue NewLd = DAG.getLoad(MVT::f64, LdDL, Ld->getChain(),
50916                                 Ld->getBasePtr(), Ld->getMemOperand());
50917 
50918     // Make sure new load is placed in same chain order.
50919     DAG.makeEquivalentMemoryOrdering(Ld, NewLd);
50920     return DAG.getStore(St->getChain(), StDL, NewLd, St->getBasePtr(),
50921                         St->getMemOperand());
50922   }
50923 
50924   // This is similar to the above case, but here we handle a scalar 64-bit
50925   // integer store that is extracted from a vector on a 32-bit target.
50926   // If we have SSE2, then we can treat it like a floating-point double
50927   // to get past legalization. The execution dependencies fixup pass will
50928   // choose the optimal machine instruction for the store if this really is
50929   // an integer or v2f32 rather than an f64.
50930   if (VT == MVT::i64 && F64IsLegal && !Subtarget.is64Bit() &&
50931       St->getOperand(1).getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
50932     SDValue OldExtract = St->getOperand(1);
50933     SDValue ExtOp0 = OldExtract.getOperand(0);
50934     unsigned VecSize = ExtOp0.getValueSizeInBits();
50935     EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, VecSize / 64);
50936     SDValue BitCast = DAG.getBitcast(VecVT, ExtOp0);
50937     SDValue NewExtract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
50938                                      BitCast, OldExtract.getOperand(1));
50939     return DAG.getStore(St->getChain(), dl, NewExtract, St->getBasePtr(),
50940                         St->getPointerInfo(), St->getOriginalAlign(),
50941                         St->getMemOperand()->getFlags());
50942   }
50943 
50944   return SDValue();
50945 }
50946 
combineVEXTRACT_STORE(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)50947 static SDValue combineVEXTRACT_STORE(SDNode *N, SelectionDAG &DAG,
50948                                      TargetLowering::DAGCombinerInfo &DCI,
50949                                      const X86Subtarget &Subtarget) {
50950   auto *St = cast<MemIntrinsicSDNode>(N);
50951 
50952   SDValue StoredVal = N->getOperand(1);
50953   MVT VT = StoredVal.getSimpleValueType();
50954   EVT MemVT = St->getMemoryVT();
50955 
50956   // Figure out which elements we demand.
50957   unsigned StElts = MemVT.getSizeInBits() / VT.getScalarSizeInBits();
50958   APInt DemandedElts = APInt::getLowBitsSet(VT.getVectorNumElements(), StElts);
50959 
50960   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
50961   if (TLI.SimplifyDemandedVectorElts(StoredVal, DemandedElts, DCI)) {
50962     if (N->getOpcode() != ISD::DELETED_NODE)
50963       DCI.AddToWorklist(N);
50964     return SDValue(N, 0);
50965   }
50966 
50967   return SDValue();
50968 }
50969 
50970 /// Return 'true' if this vector operation is "horizontal"
50971 /// and return the operands for the horizontal operation in LHS and RHS.  A
50972 /// horizontal operation performs the binary operation on successive elements
50973 /// of its first operand, then on successive elements of its second operand,
50974 /// returning the resulting values in a vector.  For example, if
50975 ///   A = < float a0, float a1, float a2, float a3 >
50976 /// and
50977 ///   B = < float b0, float b1, float b2, float b3 >
50978 /// then the result of doing a horizontal operation on A and B is
50979 ///   A horizontal-op B = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >.
50980 /// In short, LHS and RHS are inspected to see if LHS op RHS is of the form
50981 /// A horizontal-op B, for some already available A and B, and if so then LHS is
50982 /// set to A, RHS to B, and the routine returns 'true'.
isHorizontalBinOp(unsigned HOpcode,SDValue & LHS,SDValue & RHS,SelectionDAG & DAG,const X86Subtarget & Subtarget,bool IsCommutative,SmallVectorImpl<int> & PostShuffleMask)50983 static bool isHorizontalBinOp(unsigned HOpcode, SDValue &LHS, SDValue &RHS,
50984                               SelectionDAG &DAG, const X86Subtarget &Subtarget,
50985                               bool IsCommutative,
50986                               SmallVectorImpl<int> &PostShuffleMask) {
50987   // If either operand is undef, bail out. The binop should be simplified.
50988   if (LHS.isUndef() || RHS.isUndef())
50989     return false;
50990 
50991   // Look for the following pattern:
50992   //   A = < float a0, float a1, float a2, float a3 >
50993   //   B = < float b0, float b1, float b2, float b3 >
50994   // and
50995   //   LHS = VECTOR_SHUFFLE A, B, <0, 2, 4, 6>
50996   //   RHS = VECTOR_SHUFFLE A, B, <1, 3, 5, 7>
50997   // then LHS op RHS = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >
50998   // which is A horizontal-op B.
50999 
51000   MVT VT = LHS.getSimpleValueType();
51001   assert((VT.is128BitVector() || VT.is256BitVector()) &&
51002          "Unsupported vector type for horizontal add/sub");
51003   unsigned NumElts = VT.getVectorNumElements();
51004 
51005   auto GetShuffle = [&](SDValue Op, SDValue &N0, SDValue &N1,
51006                         SmallVectorImpl<int> &ShuffleMask) {
51007     bool UseSubVector = false;
51008     if (Op.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
51009         Op.getOperand(0).getValueType().is256BitVector() &&
51010         llvm::isNullConstant(Op.getOperand(1))) {
51011       Op = Op.getOperand(0);
51012       UseSubVector = true;
51013     }
51014     SmallVector<SDValue, 2> SrcOps;
51015     SmallVector<int, 16> SrcMask, ScaledMask;
51016     SDValue BC = peekThroughBitcasts(Op);
51017     if (getTargetShuffleInputs(BC, SrcOps, SrcMask, DAG) &&
51018         !isAnyZero(SrcMask) && all_of(SrcOps, [BC](SDValue Op) {
51019           return Op.getValueSizeInBits() == BC.getValueSizeInBits();
51020         })) {
51021       resolveTargetShuffleInputsAndMask(SrcOps, SrcMask);
51022       if (!UseSubVector && SrcOps.size() <= 2 &&
51023           scaleShuffleElements(SrcMask, NumElts, ScaledMask)) {
51024         N0 = SrcOps.size() > 0 ? SrcOps[0] : SDValue();
51025         N1 = SrcOps.size() > 1 ? SrcOps[1] : SDValue();
51026         ShuffleMask.assign(ScaledMask.begin(), ScaledMask.end());
51027       }
51028       if (UseSubVector && SrcOps.size() == 1 &&
51029           scaleShuffleElements(SrcMask, 2 * NumElts, ScaledMask)) {
51030         std::tie(N0, N1) = DAG.SplitVector(SrcOps[0], SDLoc(Op));
51031         ArrayRef<int> Mask = ArrayRef<int>(ScaledMask).slice(0, NumElts);
51032         ShuffleMask.assign(Mask.begin(), Mask.end());
51033       }
51034     }
51035   };
51036 
51037   // View LHS in the form
51038   //   LHS = VECTOR_SHUFFLE A, B, LMask
51039   // If LHS is not a shuffle, then pretend it is the identity shuffle:
51040   //   LHS = VECTOR_SHUFFLE LHS, undef, <0, 1, ..., N-1>
51041   // NOTE: A default initialized SDValue represents an UNDEF of type VT.
51042   SDValue A, B;
51043   SmallVector<int, 16> LMask;
51044   GetShuffle(LHS, A, B, LMask);
51045 
51046   // Likewise, view RHS in the form
51047   //   RHS = VECTOR_SHUFFLE C, D, RMask
51048   SDValue C, D;
51049   SmallVector<int, 16> RMask;
51050   GetShuffle(RHS, C, D, RMask);
51051 
51052   // At least one of the operands should be a vector shuffle.
51053   unsigned NumShuffles = (LMask.empty() ? 0 : 1) + (RMask.empty() ? 0 : 1);
51054   if (NumShuffles == 0)
51055     return false;
51056 
51057   if (LMask.empty()) {
51058     A = LHS;
51059     for (unsigned i = 0; i != NumElts; ++i)
51060       LMask.push_back(i);
51061   }
51062 
51063   if (RMask.empty()) {
51064     C = RHS;
51065     for (unsigned i = 0; i != NumElts; ++i)
51066       RMask.push_back(i);
51067   }
51068 
51069   // If we have an unary mask, ensure the other op is set to null.
51070   if (isUndefOrInRange(LMask, 0, NumElts))
51071     B = SDValue();
51072   else if (isUndefOrInRange(LMask, NumElts, NumElts * 2))
51073     A = SDValue();
51074 
51075   if (isUndefOrInRange(RMask, 0, NumElts))
51076     D = SDValue();
51077   else if (isUndefOrInRange(RMask, NumElts, NumElts * 2))
51078     C = SDValue();
51079 
51080   // If A and B occur in reverse order in RHS, then canonicalize by commuting
51081   // RHS operands and shuffle mask.
51082   if (A != C) {
51083     std::swap(C, D);
51084     ShuffleVectorSDNode::commuteMask(RMask);
51085   }
51086   // Check that the shuffles are both shuffling the same vectors.
51087   if (!(A == C && B == D))
51088     return false;
51089 
51090   PostShuffleMask.clear();
51091   PostShuffleMask.append(NumElts, SM_SentinelUndef);
51092 
51093   // LHS and RHS are now:
51094   //   LHS = shuffle A, B, LMask
51095   //   RHS = shuffle A, B, RMask
51096   // Check that the masks correspond to performing a horizontal operation.
51097   // AVX defines horizontal add/sub to operate independently on 128-bit lanes,
51098   // so we just repeat the inner loop if this is a 256-bit op.
51099   unsigned Num128BitChunks = VT.getSizeInBits() / 128;
51100   unsigned NumEltsPer128BitChunk = NumElts / Num128BitChunks;
51101   unsigned NumEltsPer64BitChunk = NumEltsPer128BitChunk / 2;
51102   assert((NumEltsPer128BitChunk % 2 == 0) &&
51103          "Vector type should have an even number of elements in each lane");
51104   for (unsigned j = 0; j != NumElts; j += NumEltsPer128BitChunk) {
51105     for (unsigned i = 0; i != NumEltsPer128BitChunk; ++i) {
51106       // Ignore undefined components.
51107       int LIdx = LMask[i + j], RIdx = RMask[i + j];
51108       if (LIdx < 0 || RIdx < 0 ||
51109           (!A.getNode() && (LIdx < (int)NumElts || RIdx < (int)NumElts)) ||
51110           (!B.getNode() && (LIdx >= (int)NumElts || RIdx >= (int)NumElts)))
51111         continue;
51112 
51113       // Check that successive odd/even elements are being operated on. If not,
51114       // this is not a horizontal operation.
51115       if (!((RIdx & 1) == 1 && (LIdx + 1) == RIdx) &&
51116           !((LIdx & 1) == 1 && (RIdx + 1) == LIdx && IsCommutative))
51117         return false;
51118 
51119       // Compute the post-shuffle mask index based on where the element
51120       // is stored in the HOP result, and where it needs to be moved to.
51121       int Base = LIdx & ~1u;
51122       int Index = ((Base % NumEltsPer128BitChunk) / 2) +
51123                   ((Base % NumElts) & ~(NumEltsPer128BitChunk - 1));
51124 
51125       // The  low half of the 128-bit result must choose from A.
51126       // The high half of the 128-bit result must choose from B,
51127       // unless B is undef. In that case, we are always choosing from A.
51128       if ((B && Base >= (int)NumElts) || (!B && i >= NumEltsPer64BitChunk))
51129         Index += NumEltsPer64BitChunk;
51130       PostShuffleMask[i + j] = Index;
51131     }
51132   }
51133 
51134   SDValue NewLHS = A.getNode() ? A : B; // If A is 'UNDEF', use B for it.
51135   SDValue NewRHS = B.getNode() ? B : A; // If B is 'UNDEF', use A for it.
51136 
51137   bool IsIdentityPostShuffle =
51138       isSequentialOrUndefInRange(PostShuffleMask, 0, NumElts, 0);
51139   if (IsIdentityPostShuffle)
51140     PostShuffleMask.clear();
51141 
51142   // Avoid 128-bit multi lane shuffles if pre-AVX2 and FP (integer will split).
51143   if (!IsIdentityPostShuffle && !Subtarget.hasAVX2() && VT.isFloatingPoint() &&
51144       isMultiLaneShuffleMask(128, VT.getScalarSizeInBits(), PostShuffleMask))
51145     return false;
51146 
51147   // If the source nodes are already used in HorizOps then always accept this.
51148   // Shuffle folding should merge these back together.
51149   bool FoundHorizLHS = llvm::any_of(NewLHS->uses(), [&](SDNode *User) {
51150     return User->getOpcode() == HOpcode && User->getValueType(0) == VT;
51151   });
51152   bool FoundHorizRHS = llvm::any_of(NewRHS->uses(), [&](SDNode *User) {
51153     return User->getOpcode() == HOpcode && User->getValueType(0) == VT;
51154   });
51155   bool ForceHorizOp = FoundHorizLHS && FoundHorizRHS;
51156 
51157   // Assume a SingleSource HOP if we only shuffle one input and don't need to
51158   // shuffle the result.
51159   if (!ForceHorizOp &&
51160       !shouldUseHorizontalOp(NewLHS == NewRHS &&
51161                                  (NumShuffles < 2 || !IsIdentityPostShuffle),
51162                              DAG, Subtarget))
51163     return false;
51164 
51165   LHS = DAG.getBitcast(VT, NewLHS);
51166   RHS = DAG.getBitcast(VT, NewRHS);
51167   return true;
51168 }
51169 
51170 // Try to synthesize horizontal (f)hadd/hsub from (f)adds/subs of shuffles.
combineToHorizontalAddSub(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)51171 static SDValue combineToHorizontalAddSub(SDNode *N, SelectionDAG &DAG,
51172                                          const X86Subtarget &Subtarget) {
51173   EVT VT = N->getValueType(0);
51174   unsigned Opcode = N->getOpcode();
51175   bool IsAdd = (Opcode == ISD::FADD) || (Opcode == ISD::ADD);
51176   SmallVector<int, 8> PostShuffleMask;
51177 
51178   switch (Opcode) {
51179   case ISD::FADD:
51180   case ISD::FSUB:
51181     if ((Subtarget.hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
51182         (Subtarget.hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))) {
51183       SDValue LHS = N->getOperand(0);
51184       SDValue RHS = N->getOperand(1);
51185       auto HorizOpcode = IsAdd ? X86ISD::FHADD : X86ISD::FHSUB;
51186       if (isHorizontalBinOp(HorizOpcode, LHS, RHS, DAG, Subtarget, IsAdd,
51187                             PostShuffleMask)) {
51188         SDValue HorizBinOp = DAG.getNode(HorizOpcode, SDLoc(N), VT, LHS, RHS);
51189         if (!PostShuffleMask.empty())
51190           HorizBinOp = DAG.getVectorShuffle(VT, SDLoc(HorizBinOp), HorizBinOp,
51191                                             DAG.getUNDEF(VT), PostShuffleMask);
51192         return HorizBinOp;
51193       }
51194     }
51195     break;
51196   case ISD::ADD:
51197   case ISD::SUB:
51198     if (Subtarget.hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32 ||
51199                                  VT == MVT::v16i16 || VT == MVT::v8i32)) {
51200       SDValue LHS = N->getOperand(0);
51201       SDValue RHS = N->getOperand(1);
51202       auto HorizOpcode = IsAdd ? X86ISD::HADD : X86ISD::HSUB;
51203       if (isHorizontalBinOp(HorizOpcode, LHS, RHS, DAG, Subtarget, IsAdd,
51204                             PostShuffleMask)) {
51205         auto HOpBuilder = [HorizOpcode](SelectionDAG &DAG, const SDLoc &DL,
51206                                         ArrayRef<SDValue> Ops) {
51207           return DAG.getNode(HorizOpcode, DL, Ops[0].getValueType(), Ops);
51208         };
51209         SDValue HorizBinOp = SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT,
51210                                               {LHS, RHS}, HOpBuilder);
51211         if (!PostShuffleMask.empty())
51212           HorizBinOp = DAG.getVectorShuffle(VT, SDLoc(HorizBinOp), HorizBinOp,
51213                                             DAG.getUNDEF(VT), PostShuffleMask);
51214         return HorizBinOp;
51215       }
51216     }
51217     break;
51218   }
51219 
51220   return SDValue();
51221 }
51222 
51223 //  Try to combine the following nodes
51224 //  t29: i64 = X86ISD::Wrapper TargetConstantPool:i64
51225 //    <i32 -2147483648[float -0.000000e+00]> 0
51226 //  t27: v16i32[v16f32],ch = X86ISD::VBROADCAST_LOAD
51227 //    <(load 4 from constant-pool)> t0, t29
51228 //  [t30: v16i32 = bitcast t27]
51229 //  t6: v16i32 = xor t7, t27[t30]
51230 //  t11: v16f32 = bitcast t6
51231 //  t21: v16f32 = X86ISD::VFMULC[X86ISD::VCFMULC] t11, t8
51232 //  into X86ISD::VFCMULC[X86ISD::VFMULC] if possible:
51233 //  t22: v16f32 = bitcast t7
51234 //  t23: v16f32 = X86ISD::VFCMULC[X86ISD::VFMULC] t8, t22
51235 //  t24: v32f16 = bitcast t23
combineFMulcFCMulc(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)51236 static SDValue combineFMulcFCMulc(SDNode *N, SelectionDAG &DAG,
51237                                   const X86Subtarget &Subtarget) {
51238   EVT VT = N->getValueType(0);
51239   SDValue LHS = N->getOperand(0);
51240   SDValue RHS = N->getOperand(1);
51241   int CombineOpcode =
51242       N->getOpcode() == X86ISD::VFCMULC ? X86ISD::VFMULC : X86ISD::VFCMULC;
51243   auto isConjugationConstant = [](const Constant *c) {
51244     if (const auto *CI = dyn_cast<ConstantInt>(c)) {
51245       APInt ConjugationInt32 = APInt(32, 0x80000000, true);
51246       APInt ConjugationInt64 = APInt(64, 0x8000000080000000ULL, true);
51247       switch (CI->getBitWidth()) {
51248       case 16:
51249         return false;
51250       case 32:
51251         return CI->getValue() == ConjugationInt32;
51252       case 64:
51253         return CI->getValue() == ConjugationInt64;
51254       default:
51255         llvm_unreachable("Unexpected bit width");
51256       }
51257     }
51258     if (const auto *CF = dyn_cast<ConstantFP>(c))
51259       return CF->isNegativeZeroValue();
51260     return false;
51261   };
51262   auto combineConjugation = [&](SDValue &r) {
51263     if (LHS->getOpcode() == ISD::BITCAST && RHS.hasOneUse()) {
51264       SDValue XOR = LHS.getOperand(0);
51265       if (XOR->getOpcode() == ISD::XOR && XOR.hasOneUse()) {
51266         SDValue XORRHS = XOR.getOperand(1);
51267         if (XORRHS.getOpcode() == ISD::BITCAST && XORRHS.hasOneUse())
51268           XORRHS = XORRHS.getOperand(0);
51269         if (XORRHS.getOpcode() == X86ISD::VBROADCAST_LOAD &&
51270             XORRHS.getOperand(1).getNumOperands()) {
51271           ConstantPoolSDNode *CP =
51272               dyn_cast<ConstantPoolSDNode>(XORRHS.getOperand(1).getOperand(0));
51273           if (CP && isConjugationConstant(CP->getConstVal())) {
51274             SelectionDAG::FlagInserter FlagsInserter(DAG, N);
51275             SDValue I2F = DAG.getBitcast(VT, LHS.getOperand(0).getOperand(0));
51276             SDValue FCMulC = DAG.getNode(CombineOpcode, SDLoc(N), VT, RHS, I2F);
51277             r = DAG.getBitcast(VT, FCMulC);
51278             return true;
51279           }
51280         }
51281       }
51282     }
51283     return false;
51284   };
51285   SDValue Res;
51286   if (combineConjugation(Res))
51287     return Res;
51288   std::swap(LHS, RHS);
51289   if (combineConjugation(Res))
51290     return Res;
51291   return Res;
51292 }
51293 
51294 //  Try to combine the following nodes:
51295 //  FADD(A, FMA(B, C, 0)) and FADD(A, FMUL(B, C)) to FMA(B, C, A)
combineFaddCFmul(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)51296 static SDValue combineFaddCFmul(SDNode *N, SelectionDAG &DAG,
51297                                 const X86Subtarget &Subtarget) {
51298   auto AllowContract = [&DAG](const SDNodeFlags &Flags) {
51299     return DAG.getTarget().Options.AllowFPOpFusion == FPOpFusion::Fast ||
51300            Flags.hasAllowContract();
51301   };
51302 
51303   auto HasNoSignedZero = [&DAG](const SDNodeFlags &Flags) {
51304     return DAG.getTarget().Options.NoSignedZerosFPMath ||
51305            Flags.hasNoSignedZeros();
51306   };
51307   auto IsVectorAllNegativeZero = [](const SDNode *N) {
51308     if (N->getOpcode() != X86ISD::VBROADCAST_LOAD)
51309       return false;
51310     assert(N->getSimpleValueType(0).getScalarType() == MVT::f32 &&
51311            "Unexpected vector type!");
51312     if (ConstantPoolSDNode *CP =
51313             dyn_cast<ConstantPoolSDNode>(N->getOperand(1)->getOperand(0))) {
51314       APInt AI = APInt(32, 0x80008000, true);
51315       if (const auto *CI = dyn_cast<ConstantInt>(CP->getConstVal()))
51316         return CI->getValue() == AI;
51317       if (const auto *CF = dyn_cast<ConstantFP>(CP->getConstVal()))
51318         return CF->getValue() == APFloat(APFloat::IEEEsingle(), AI);
51319     }
51320     return false;
51321   };
51322 
51323   if (N->getOpcode() != ISD::FADD || !Subtarget.hasFP16() ||
51324       !AllowContract(N->getFlags()))
51325     return SDValue();
51326 
51327   EVT VT = N->getValueType(0);
51328   if (VT != MVT::v8f16 && VT != MVT::v16f16 && VT != MVT::v32f16)
51329     return SDValue();
51330 
51331   SDValue LHS = N->getOperand(0);
51332   SDValue RHS = N->getOperand(1);
51333   bool IsConj;
51334   SDValue FAddOp1, MulOp0, MulOp1;
51335   auto GetCFmulFrom = [&MulOp0, &MulOp1, &IsConj, &AllowContract,
51336                        &IsVectorAllNegativeZero,
51337                        &HasNoSignedZero](SDValue N) -> bool {
51338     if (!N.hasOneUse() || N.getOpcode() != ISD::BITCAST)
51339       return false;
51340     SDValue Op0 = N.getOperand(0);
51341     unsigned Opcode = Op0.getOpcode();
51342     if (Op0.hasOneUse() && AllowContract(Op0->getFlags())) {
51343       if ((Opcode == X86ISD::VFMULC || Opcode == X86ISD::VFCMULC)) {
51344         MulOp0 = Op0.getOperand(0);
51345         MulOp1 = Op0.getOperand(1);
51346         IsConj = Opcode == X86ISD::VFCMULC;
51347         return true;
51348       }
51349       if ((Opcode == X86ISD::VFMADDC || Opcode == X86ISD::VFCMADDC) &&
51350           ((ISD::isBuildVectorAllZeros(Op0->getOperand(2).getNode()) &&
51351             HasNoSignedZero(Op0->getFlags())) ||
51352            IsVectorAllNegativeZero(Op0->getOperand(2).getNode()))) {
51353         MulOp0 = Op0.getOperand(0);
51354         MulOp1 = Op0.getOperand(1);
51355         IsConj = Opcode == X86ISD::VFCMADDC;
51356         return true;
51357       }
51358     }
51359     return false;
51360   };
51361 
51362   if (GetCFmulFrom(LHS))
51363     FAddOp1 = RHS;
51364   else if (GetCFmulFrom(RHS))
51365     FAddOp1 = LHS;
51366   else
51367     return SDValue();
51368 
51369   MVT CVT = MVT::getVectorVT(MVT::f32, VT.getVectorNumElements() / 2);
51370   FAddOp1 = DAG.getBitcast(CVT, FAddOp1);
51371   unsigned NewOp = IsConj ? X86ISD::VFCMADDC : X86ISD::VFMADDC;
51372   // FIXME: How do we handle when fast math flags of FADD are different from
51373   // CFMUL's?
51374   SDValue CFmul =
51375       DAG.getNode(NewOp, SDLoc(N), CVT, MulOp0, MulOp1, FAddOp1, N->getFlags());
51376   return DAG.getBitcast(VT, CFmul);
51377 }
51378 
51379 /// Do target-specific dag combines on floating-point adds/subs.
combineFaddFsub(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)51380 static SDValue combineFaddFsub(SDNode *N, SelectionDAG &DAG,
51381                                const X86Subtarget &Subtarget) {
51382   if (SDValue HOp = combineToHorizontalAddSub(N, DAG, Subtarget))
51383     return HOp;
51384 
51385   if (SDValue COp = combineFaddCFmul(N, DAG, Subtarget))
51386     return COp;
51387 
51388   return SDValue();
51389 }
51390 
51391 /// Attempt to pre-truncate inputs to arithmetic ops if it will simplify
51392 /// the codegen.
51393 /// e.g. TRUNC( BINOP( X, Y ) ) --> BINOP( TRUNC( X ), TRUNC( Y ) )
51394 /// TODO: This overlaps with the generic combiner's visitTRUNCATE. Remove
51395 ///       anything that is guaranteed to be transformed by DAGCombiner.
combineTruncatedArithmetic(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget,const SDLoc & DL)51396 static SDValue combineTruncatedArithmetic(SDNode *N, SelectionDAG &DAG,
51397                                           const X86Subtarget &Subtarget,
51398                                           const SDLoc &DL) {
51399   assert(N->getOpcode() == ISD::TRUNCATE && "Wrong opcode");
51400   SDValue Src = N->getOperand(0);
51401   unsigned SrcOpcode = Src.getOpcode();
51402   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
51403 
51404   EVT VT = N->getValueType(0);
51405   EVT SrcVT = Src.getValueType();
51406 
51407   auto IsFreeTruncation = [VT](SDValue Op) {
51408     unsigned TruncSizeInBits = VT.getScalarSizeInBits();
51409 
51410     // See if this has been extended from a smaller/equal size to
51411     // the truncation size, allowing a truncation to combine with the extend.
51412     unsigned Opcode = Op.getOpcode();
51413     if ((Opcode == ISD::ANY_EXTEND || Opcode == ISD::SIGN_EXTEND ||
51414          Opcode == ISD::ZERO_EXTEND) &&
51415         Op.getOperand(0).getScalarValueSizeInBits() <= TruncSizeInBits)
51416       return true;
51417 
51418     // See if this is a single use constant which can be constant folded.
51419     // NOTE: We don't peek throught bitcasts here because there is currently
51420     // no support for constant folding truncate+bitcast+vector_of_constants. So
51421     // we'll just send up with a truncate on both operands which will
51422     // get turned back into (truncate (binop)) causing an infinite loop.
51423     return ISD::isBuildVectorOfConstantSDNodes(Op.getNode());
51424   };
51425 
51426   auto TruncateArithmetic = [&](SDValue N0, SDValue N1) {
51427     SDValue Trunc0 = DAG.getNode(ISD::TRUNCATE, DL, VT, N0);
51428     SDValue Trunc1 = DAG.getNode(ISD::TRUNCATE, DL, VT, N1);
51429     return DAG.getNode(SrcOpcode, DL, VT, Trunc0, Trunc1);
51430   };
51431 
51432   // Don't combine if the operation has other uses.
51433   if (!Src.hasOneUse())
51434     return SDValue();
51435 
51436   // Only support vector truncation for now.
51437   // TODO: i64 scalar math would benefit as well.
51438   if (!VT.isVector())
51439     return SDValue();
51440 
51441   // In most cases its only worth pre-truncating if we're only facing the cost
51442   // of one truncation.
51443   // i.e. if one of the inputs will constant fold or the input is repeated.
51444   switch (SrcOpcode) {
51445   case ISD::MUL:
51446     // X86 is rubbish at scalar and vector i64 multiplies (until AVX512DQ) - its
51447     // better to truncate if we have the chance.
51448     if (SrcVT.getScalarType() == MVT::i64 &&
51449         TLI.isOperationLegal(SrcOpcode, VT) &&
51450         !TLI.isOperationLegal(SrcOpcode, SrcVT))
51451       return TruncateArithmetic(Src.getOperand(0), Src.getOperand(1));
51452     [[fallthrough]];
51453   case ISD::AND:
51454   case ISD::XOR:
51455   case ISD::OR:
51456   case ISD::ADD:
51457   case ISD::SUB: {
51458     SDValue Op0 = Src.getOperand(0);
51459     SDValue Op1 = Src.getOperand(1);
51460     if (TLI.isOperationLegal(SrcOpcode, VT) &&
51461         (Op0 == Op1 || IsFreeTruncation(Op0) || IsFreeTruncation(Op1)))
51462       return TruncateArithmetic(Op0, Op1);
51463     break;
51464   }
51465   }
51466 
51467   return SDValue();
51468 }
51469 
51470 /// Truncate using ISD::AND mask and X86ISD::PACKUS.
51471 /// e.g. trunc <8 x i32> X to <8 x i16> -->
51472 /// MaskX = X & 0xffff (clear high bits to prevent saturation)
51473 /// packus (extract_subv MaskX, 0), (extract_subv MaskX, 1)
combineVectorTruncationWithPACKUS(SDNode * N,const SDLoc & DL,const X86Subtarget & Subtarget,SelectionDAG & DAG)51474 static SDValue combineVectorTruncationWithPACKUS(SDNode *N, const SDLoc &DL,
51475                                                  const X86Subtarget &Subtarget,
51476                                                  SelectionDAG &DAG) {
51477   SDValue In = N->getOperand(0);
51478   EVT InVT = In.getValueType();
51479   EVT OutVT = N->getValueType(0);
51480 
51481   APInt Mask = APInt::getLowBitsSet(InVT.getScalarSizeInBits(),
51482                                     OutVT.getScalarSizeInBits());
51483   In = DAG.getNode(ISD::AND, DL, InVT, In, DAG.getConstant(Mask, DL, InVT));
51484   return truncateVectorWithPACK(X86ISD::PACKUS, OutVT, In, DL, DAG, Subtarget);
51485 }
51486 
51487 /// Truncate a group of v4i32 into v8i16 using X86ISD::PACKSS.
combineVectorTruncationWithPACKSS(SDNode * N,const SDLoc & DL,const X86Subtarget & Subtarget,SelectionDAG & DAG)51488 static SDValue combineVectorTruncationWithPACKSS(SDNode *N, const SDLoc &DL,
51489                                                  const X86Subtarget &Subtarget,
51490                                                  SelectionDAG &DAG) {
51491   SDValue In = N->getOperand(0);
51492   EVT InVT = In.getValueType();
51493   EVT OutVT = N->getValueType(0);
51494   In = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, InVT, In,
51495                    DAG.getValueType(OutVT));
51496   return truncateVectorWithPACK(X86ISD::PACKSS, OutVT, In, DL, DAG, Subtarget);
51497 }
51498 
51499 /// This function transforms truncation from vXi32/vXi64 to vXi8/vXi16 into
51500 /// X86ISD::PACKUS/X86ISD::PACKSS operations. We do it here because after type
51501 /// legalization the truncation will be translated into a BUILD_VECTOR with each
51502 /// element that is extracted from a vector and then truncated, and it is
51503 /// difficult to do this optimization based on them.
combineVectorTruncation(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)51504 static SDValue combineVectorTruncation(SDNode *N, SelectionDAG &DAG,
51505                                        const X86Subtarget &Subtarget) {
51506   EVT OutVT = N->getValueType(0);
51507   if (!OutVT.isVector())
51508     return SDValue();
51509 
51510   SDValue In = N->getOperand(0);
51511   if (!In.getValueType().isSimple())
51512     return SDValue();
51513 
51514   EVT InVT = In.getValueType();
51515   unsigned NumElems = OutVT.getVectorNumElements();
51516 
51517   // AVX512 provides fast truncate ops.
51518   if (!Subtarget.hasSSE2() || Subtarget.hasAVX512())
51519     return SDValue();
51520 
51521   EVT OutSVT = OutVT.getVectorElementType();
51522   EVT InSVT = InVT.getVectorElementType();
51523   if (!((InSVT == MVT::i16 || InSVT == MVT::i32 || InSVT == MVT::i64) &&
51524         (OutSVT == MVT::i8 || OutSVT == MVT::i16) && isPowerOf2_32(NumElems) &&
51525         NumElems >= 8))
51526     return SDValue();
51527 
51528   // SSSE3's pshufb results in less instructions in the cases below.
51529   if (Subtarget.hasSSSE3() && NumElems == 8) {
51530     if (InSVT == MVT::i16)
51531       return SDValue();
51532     if (InSVT == MVT::i32 &&
51533         (OutSVT == MVT::i8 || !Subtarget.hasSSE41() || Subtarget.hasInt256()))
51534       return SDValue();
51535   }
51536 
51537   SDLoc DL(N);
51538   // SSE2 provides PACKUS for only 2 x v8i16 -> v16i8 and SSE4.1 provides PACKUS
51539   // for 2 x v4i32 -> v8i16. For SSSE3 and below, we need to use PACKSS to
51540   // truncate 2 x v4i32 to v8i16.
51541   if (Subtarget.hasSSE41() || OutSVT == MVT::i8)
51542     return combineVectorTruncationWithPACKUS(N, DL, Subtarget, DAG);
51543   if (InSVT == MVT::i32)
51544     return combineVectorTruncationWithPACKSS(N, DL, Subtarget, DAG);
51545 
51546   return SDValue();
51547 }
51548 
51549 /// This function transforms vector truncation of 'extended sign-bits' or
51550 /// 'extended zero-bits' values.
51551 /// vXi16/vXi32/vXi64 to vXi8/vXi16/vXi32 into X86ISD::PACKSS/PACKUS operations.
combineVectorSignBitsTruncation(SDNode * N,const SDLoc & DL,SelectionDAG & DAG,const X86Subtarget & Subtarget)51552 static SDValue combineVectorSignBitsTruncation(SDNode *N, const SDLoc &DL,
51553                                                SelectionDAG &DAG,
51554                                                const X86Subtarget &Subtarget) {
51555   // Requires SSE2.
51556   if (!Subtarget.hasSSE2())
51557     return SDValue();
51558 
51559   if (!N->getValueType(0).isVector() || !N->getValueType(0).isSimple())
51560     return SDValue();
51561 
51562   SDValue In = N->getOperand(0);
51563   if (!In.getValueType().isSimple())
51564     return SDValue();
51565 
51566   MVT VT = N->getValueType(0).getSimpleVT();
51567   MVT SVT = VT.getScalarType();
51568 
51569   MVT InVT = In.getValueType().getSimpleVT();
51570   MVT InSVT = InVT.getScalarType();
51571 
51572   // Check we have a truncation suited for PACKSS/PACKUS.
51573   if (!isPowerOf2_32(VT.getVectorNumElements()))
51574     return SDValue();
51575   if (SVT != MVT::i8 && SVT != MVT::i16 && SVT != MVT::i32)
51576     return SDValue();
51577   if (InSVT != MVT::i16 && InSVT != MVT::i32 && InSVT != MVT::i64)
51578     return SDValue();
51579 
51580   // Truncation to sub-128bit vXi32 can be better handled with shuffles.
51581   if (SVT == MVT::i32 && VT.getSizeInBits() < 128)
51582     return SDValue();
51583 
51584   // AVX512 has fast truncate, but if the input is already going to be split,
51585   // there's no harm in trying pack.
51586   if (Subtarget.hasAVX512() &&
51587       !(!Subtarget.useAVX512Regs() && VT.is256BitVector() &&
51588         InVT.is512BitVector())) {
51589     // PACK should still be worth it for 128-bit vectors if the sources were
51590     // originally concatenated from subvectors.
51591     SmallVector<SDValue> ConcatOps;
51592     if (VT.getSizeInBits() > 128 ||
51593         !collectConcatOps(In.getNode(), ConcatOps, DAG))
51594       return SDValue();
51595   }
51596 
51597   unsigned NumPackedSignBits = std::min<unsigned>(SVT.getSizeInBits(), 16);
51598   unsigned NumPackedZeroBits = Subtarget.hasSSE41() ? NumPackedSignBits : 8;
51599 
51600   // Use PACKUS if the input has zero-bits that extend all the way to the
51601   // packed/truncated value. e.g. masks, zext_in_reg, etc.
51602   KnownBits Known = DAG.computeKnownBits(In);
51603   unsigned NumLeadingZeroBits = Known.countMinLeadingZeros();
51604   if (NumLeadingZeroBits >= (InSVT.getSizeInBits() - NumPackedZeroBits))
51605     return truncateVectorWithPACK(X86ISD::PACKUS, VT, In, DL, DAG, Subtarget);
51606 
51607   // Use PACKSS if the input has sign-bits that extend all the way to the
51608   // packed/truncated value. e.g. Comparison result, sext_in_reg, etc.
51609   unsigned NumSignBits = DAG.ComputeNumSignBits(In);
51610 
51611   // Don't use PACKSS for vXi64 -> vXi32 truncations unless we're dealing with
51612   // a sign splat. ComputeNumSignBits struggles to see through BITCASTs later
51613   // on and combines/simplifications can't then use it.
51614   if (SVT == MVT::i32 && NumSignBits != InSVT.getSizeInBits())
51615     return SDValue();
51616 
51617   unsigned MinSignBits = InSVT.getSizeInBits() - NumPackedSignBits;
51618   if (NumSignBits > MinSignBits)
51619     return truncateVectorWithPACK(X86ISD::PACKSS, VT, In, DL, DAG, Subtarget);
51620 
51621   // If we have a srl that only generates signbits that we will discard in
51622   // the truncation then we can use PACKSS by converting the srl to a sra.
51623   // SimplifyDemandedBits often relaxes sra to srl so we need to reverse it.
51624   if (In.getOpcode() == ISD::SRL && N->isOnlyUserOf(In.getNode()))
51625     if (const APInt *ShAmt = DAG.getValidShiftAmountConstant(
51626             In, APInt::getAllOnes(VT.getVectorNumElements()))) {
51627       if (*ShAmt == MinSignBits) {
51628         SDValue NewIn = DAG.getNode(ISD::SRA, DL, InVT, In->ops());
51629         return truncateVectorWithPACK(X86ISD::PACKSS, VT, NewIn, DL, DAG,
51630                                       Subtarget);
51631       }
51632     }
51633 
51634   return SDValue();
51635 }
51636 
51637 // Try to form a MULHU or MULHS node by looking for
51638 // (trunc (srl (mul ext, ext), 16))
51639 // TODO: This is X86 specific because we want to be able to handle wide types
51640 // before type legalization. But we can only do it if the vector will be
51641 // legalized via widening/splitting. Type legalization can't handle promotion
51642 // of a MULHU/MULHS. There isn't a way to convey this to the generic DAG
51643 // combiner.
combinePMULH(SDValue Src,EVT VT,const SDLoc & DL,SelectionDAG & DAG,const X86Subtarget & Subtarget)51644 static SDValue combinePMULH(SDValue Src, EVT VT, const SDLoc &DL,
51645                             SelectionDAG &DAG, const X86Subtarget &Subtarget) {
51646   // First instruction should be a right shift of a multiply.
51647   if (Src.getOpcode() != ISD::SRL ||
51648       Src.getOperand(0).getOpcode() != ISD::MUL)
51649     return SDValue();
51650 
51651   if (!Subtarget.hasSSE2())
51652     return SDValue();
51653 
51654   // Only handle vXi16 types that are at least 128-bits unless they will be
51655   // widened.
51656   if (!VT.isVector() || VT.getVectorElementType() != MVT::i16)
51657     return SDValue();
51658 
51659   // Input type should be at least vXi32.
51660   EVT InVT = Src.getValueType();
51661   if (InVT.getVectorElementType().getSizeInBits() < 32)
51662     return SDValue();
51663 
51664   // Need a shift by 16.
51665   APInt ShiftAmt;
51666   if (!ISD::isConstantSplatVector(Src.getOperand(1).getNode(), ShiftAmt) ||
51667       ShiftAmt != 16)
51668     return SDValue();
51669 
51670   SDValue LHS = Src.getOperand(0).getOperand(0);
51671   SDValue RHS = Src.getOperand(0).getOperand(1);
51672 
51673   // Count leading sign/zero bits on both inputs - if there are enough then
51674   // truncation back to vXi16 will be cheap - either as a pack/shuffle
51675   // sequence or using AVX512 truncations. If the inputs are sext/zext then the
51676   // truncations may actually be free by peeking through to the ext source.
51677   auto IsSext = [&DAG](SDValue V) {
51678     return DAG.ComputeMaxSignificantBits(V) <= 16;
51679   };
51680   auto IsZext = [&DAG](SDValue V) {
51681     return DAG.computeKnownBits(V).countMaxActiveBits() <= 16;
51682   };
51683 
51684   bool IsSigned = IsSext(LHS) && IsSext(RHS);
51685   bool IsUnsigned = IsZext(LHS) && IsZext(RHS);
51686   if (!IsSigned && !IsUnsigned)
51687     return SDValue();
51688 
51689   // Check if both inputs are extensions, which will be removed by truncation.
51690   bool IsTruncateFree = (LHS.getOpcode() == ISD::SIGN_EXTEND ||
51691                          LHS.getOpcode() == ISD::ZERO_EXTEND) &&
51692                         (RHS.getOpcode() == ISD::SIGN_EXTEND ||
51693                          RHS.getOpcode() == ISD::ZERO_EXTEND) &&
51694                         LHS.getOperand(0).getScalarValueSizeInBits() <= 16 &&
51695                         RHS.getOperand(0).getScalarValueSizeInBits() <= 16;
51696 
51697   // For AVX2+ targets, with the upper bits known zero, we can perform MULHU on
51698   // the (bitcasted) inputs directly, and then cheaply pack/truncate the result
51699   // (upper elts will be zero). Don't attempt this with just AVX512F as MULHU
51700   // will have to split anyway.
51701   unsigned InSizeInBits = InVT.getSizeInBits();
51702   if (IsUnsigned && !IsTruncateFree && Subtarget.hasInt256() &&
51703       !(Subtarget.hasAVX512() && !Subtarget.hasBWI() && VT.is256BitVector()) &&
51704       (InSizeInBits % 16) == 0) {
51705     EVT BCVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
51706                                 InVT.getSizeInBits() / 16);
51707     SDValue Res = DAG.getNode(ISD::MULHU, DL, BCVT, DAG.getBitcast(BCVT, LHS),
51708                               DAG.getBitcast(BCVT, RHS));
51709     return DAG.getNode(ISD::TRUNCATE, DL, VT, DAG.getBitcast(InVT, Res));
51710   }
51711 
51712   // Truncate back to source type.
51713   LHS = DAG.getNode(ISD::TRUNCATE, DL, VT, LHS);
51714   RHS = DAG.getNode(ISD::TRUNCATE, DL, VT, RHS);
51715 
51716   unsigned Opc = IsSigned ? ISD::MULHS : ISD::MULHU;
51717   return DAG.getNode(Opc, DL, VT, LHS, RHS);
51718 }
51719 
51720 // Attempt to match PMADDUBSW, which multiplies corresponding unsigned bytes
51721 // from one vector with signed bytes from another vector, adds together
51722 // adjacent pairs of 16-bit products, and saturates the result before
51723 // truncating to 16-bits.
51724 //
51725 // Which looks something like this:
51726 // (i16 (ssat (add (mul (zext (even elts (i8 A))), (sext (even elts (i8 B)))),
51727 //                 (mul (zext (odd elts (i8 A)), (sext (odd elts (i8 B))))))))
detectPMADDUBSW(SDValue In,EVT VT,SelectionDAG & DAG,const X86Subtarget & Subtarget,const SDLoc & DL)51728 static SDValue detectPMADDUBSW(SDValue In, EVT VT, SelectionDAG &DAG,
51729                                const X86Subtarget &Subtarget,
51730                                const SDLoc &DL) {
51731   if (!VT.isVector() || !Subtarget.hasSSSE3())
51732     return SDValue();
51733 
51734   unsigned NumElems = VT.getVectorNumElements();
51735   EVT ScalarVT = VT.getVectorElementType();
51736   if (ScalarVT != MVT::i16 || NumElems < 8 || !isPowerOf2_32(NumElems))
51737     return SDValue();
51738 
51739   SDValue SSatVal = detectSSatPattern(In, VT);
51740   if (!SSatVal || SSatVal.getOpcode() != ISD::ADD)
51741     return SDValue();
51742 
51743   // Ok this is a signed saturation of an ADD. See if this ADD is adding pairs
51744   // of multiplies from even/odd elements.
51745   SDValue N0 = SSatVal.getOperand(0);
51746   SDValue N1 = SSatVal.getOperand(1);
51747 
51748   if (N0.getOpcode() != ISD::MUL || N1.getOpcode() != ISD::MUL)
51749     return SDValue();
51750 
51751   SDValue N00 = N0.getOperand(0);
51752   SDValue N01 = N0.getOperand(1);
51753   SDValue N10 = N1.getOperand(0);
51754   SDValue N11 = N1.getOperand(1);
51755 
51756   // TODO: Handle constant vectors and use knownbits/computenumsignbits?
51757   // Canonicalize zero_extend to LHS.
51758   if (N01.getOpcode() == ISD::ZERO_EXTEND)
51759     std::swap(N00, N01);
51760   if (N11.getOpcode() == ISD::ZERO_EXTEND)
51761     std::swap(N10, N11);
51762 
51763   // Ensure we have a zero_extend and a sign_extend.
51764   if (N00.getOpcode() != ISD::ZERO_EXTEND ||
51765       N01.getOpcode() != ISD::SIGN_EXTEND ||
51766       N10.getOpcode() != ISD::ZERO_EXTEND ||
51767       N11.getOpcode() != ISD::SIGN_EXTEND)
51768     return SDValue();
51769 
51770   // Peek through the extends.
51771   N00 = N00.getOperand(0);
51772   N01 = N01.getOperand(0);
51773   N10 = N10.getOperand(0);
51774   N11 = N11.getOperand(0);
51775 
51776   // Ensure the extend is from vXi8.
51777   if (N00.getValueType().getVectorElementType() != MVT::i8 ||
51778       N01.getValueType().getVectorElementType() != MVT::i8 ||
51779       N10.getValueType().getVectorElementType() != MVT::i8 ||
51780       N11.getValueType().getVectorElementType() != MVT::i8)
51781     return SDValue();
51782 
51783   // All inputs should be build_vectors.
51784   if (N00.getOpcode() != ISD::BUILD_VECTOR ||
51785       N01.getOpcode() != ISD::BUILD_VECTOR ||
51786       N10.getOpcode() != ISD::BUILD_VECTOR ||
51787       N11.getOpcode() != ISD::BUILD_VECTOR)
51788     return SDValue();
51789 
51790   // N00/N10 are zero extended. N01/N11 are sign extended.
51791 
51792   // For each element, we need to ensure we have an odd element from one vector
51793   // multiplied by the odd element of another vector and the even element from
51794   // one of the same vectors being multiplied by the even element from the
51795   // other vector. So we need to make sure for each element i, this operator
51796   // is being performed:
51797   //  A[2 * i] * B[2 * i] + A[2 * i + 1] * B[2 * i + 1]
51798   SDValue ZExtIn, SExtIn;
51799   for (unsigned i = 0; i != NumElems; ++i) {
51800     SDValue N00Elt = N00.getOperand(i);
51801     SDValue N01Elt = N01.getOperand(i);
51802     SDValue N10Elt = N10.getOperand(i);
51803     SDValue N11Elt = N11.getOperand(i);
51804     // TODO: Be more tolerant to undefs.
51805     if (N00Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
51806         N01Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
51807         N10Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
51808         N11Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
51809       return SDValue();
51810     auto *ConstN00Elt = dyn_cast<ConstantSDNode>(N00Elt.getOperand(1));
51811     auto *ConstN01Elt = dyn_cast<ConstantSDNode>(N01Elt.getOperand(1));
51812     auto *ConstN10Elt = dyn_cast<ConstantSDNode>(N10Elt.getOperand(1));
51813     auto *ConstN11Elt = dyn_cast<ConstantSDNode>(N11Elt.getOperand(1));
51814     if (!ConstN00Elt || !ConstN01Elt || !ConstN10Elt || !ConstN11Elt)
51815       return SDValue();
51816     unsigned IdxN00 = ConstN00Elt->getZExtValue();
51817     unsigned IdxN01 = ConstN01Elt->getZExtValue();
51818     unsigned IdxN10 = ConstN10Elt->getZExtValue();
51819     unsigned IdxN11 = ConstN11Elt->getZExtValue();
51820     // Add is commutative so indices can be reordered.
51821     if (IdxN00 > IdxN10) {
51822       std::swap(IdxN00, IdxN10);
51823       std::swap(IdxN01, IdxN11);
51824     }
51825     // N0 indices be the even element. N1 indices must be the next odd element.
51826     if (IdxN00 != 2 * i || IdxN10 != 2 * i + 1 ||
51827         IdxN01 != 2 * i || IdxN11 != 2 * i + 1)
51828       return SDValue();
51829     SDValue N00In = N00Elt.getOperand(0);
51830     SDValue N01In = N01Elt.getOperand(0);
51831     SDValue N10In = N10Elt.getOperand(0);
51832     SDValue N11In = N11Elt.getOperand(0);
51833     // First time we find an input capture it.
51834     if (!ZExtIn) {
51835       ZExtIn = N00In;
51836       SExtIn = N01In;
51837     }
51838     if (ZExtIn != N00In || SExtIn != N01In ||
51839         ZExtIn != N10In || SExtIn != N11In)
51840       return SDValue();
51841   }
51842 
51843   auto PMADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
51844                          ArrayRef<SDValue> Ops) {
51845     // Shrink by adding truncate nodes and let DAGCombine fold with the
51846     // sources.
51847     EVT InVT = Ops[0].getValueType();
51848     assert(InVT.getScalarType() == MVT::i8 &&
51849            "Unexpected scalar element type");
51850     assert(InVT == Ops[1].getValueType() && "Operands' types mismatch");
51851     EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
51852                                  InVT.getVectorNumElements() / 2);
51853     return DAG.getNode(X86ISD::VPMADDUBSW, DL, ResVT, Ops[0], Ops[1]);
51854   };
51855   return SplitOpsAndApply(DAG, Subtarget, DL, VT, { ZExtIn, SExtIn },
51856                           PMADDBuilder);
51857 }
51858 
combineTruncate(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)51859 static SDValue combineTruncate(SDNode *N, SelectionDAG &DAG,
51860                                const X86Subtarget &Subtarget) {
51861   EVT VT = N->getValueType(0);
51862   SDValue Src = N->getOperand(0);
51863   SDLoc DL(N);
51864 
51865   // Attempt to pre-truncate inputs to arithmetic ops instead.
51866   if (SDValue V = combineTruncatedArithmetic(N, DAG, Subtarget, DL))
51867     return V;
51868 
51869   // Try to detect AVG pattern first.
51870   if (SDValue Avg = detectAVGPattern(Src, VT, DAG, Subtarget, DL))
51871     return Avg;
51872 
51873   // Try to detect PMADD
51874   if (SDValue PMAdd = detectPMADDUBSW(Src, VT, DAG, Subtarget, DL))
51875     return PMAdd;
51876 
51877   // Try to combine truncation with signed/unsigned saturation.
51878   if (SDValue Val = combineTruncateWithSat(Src, VT, DL, DAG, Subtarget))
51879     return Val;
51880 
51881   // Try to combine PMULHUW/PMULHW for vXi16.
51882   if (SDValue V = combinePMULH(Src, VT, DL, DAG, Subtarget))
51883     return V;
51884 
51885   // The bitcast source is a direct mmx result.
51886   // Detect bitcasts between i32 to x86mmx
51887   if (Src.getOpcode() == ISD::BITCAST && VT == MVT::i32) {
51888     SDValue BCSrc = Src.getOperand(0);
51889     if (BCSrc.getValueType() == MVT::x86mmx)
51890       return DAG.getNode(X86ISD::MMX_MOVD2W, DL, MVT::i32, BCSrc);
51891   }
51892 
51893   // Try to truncate extended sign/zero bits with PACKSS/PACKUS.
51894   if (SDValue V = combineVectorSignBitsTruncation(N, DL, DAG, Subtarget))
51895     return V;
51896 
51897   return combineVectorTruncation(N, DAG, Subtarget);
51898 }
51899 
combineVTRUNC(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI)51900 static SDValue combineVTRUNC(SDNode *N, SelectionDAG &DAG,
51901                              TargetLowering::DAGCombinerInfo &DCI) {
51902   EVT VT = N->getValueType(0);
51903   SDValue In = N->getOperand(0);
51904   SDLoc DL(N);
51905 
51906   if (SDValue SSatVal = detectSSatPattern(In, VT))
51907     return DAG.getNode(X86ISD::VTRUNCS, DL, VT, SSatVal);
51908   if (SDValue USatVal = detectUSatPattern(In, VT, DAG, DL))
51909     return DAG.getNode(X86ISD::VTRUNCUS, DL, VT, USatVal);
51910 
51911   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
51912   APInt DemandedMask(APInt::getAllOnes(VT.getScalarSizeInBits()));
51913   if (TLI.SimplifyDemandedBits(SDValue(N, 0), DemandedMask, DCI))
51914     return SDValue(N, 0);
51915 
51916   return SDValue();
51917 }
51918 
51919 /// Returns the negated value if the node \p N flips sign of FP value.
51920 ///
51921 /// FP-negation node may have different forms: FNEG(x), FXOR (x, 0x80000000)
51922 /// or FSUB(0, x)
51923 /// AVX512F does not have FXOR, so FNEG is lowered as
51924 /// (bitcast (xor (bitcast x), (bitcast ConstantFP(0x80000000)))).
51925 /// In this case we go though all bitcasts.
51926 /// This also recognizes splat of a negated value and returns the splat of that
51927 /// value.
isFNEG(SelectionDAG & DAG,SDNode * N,unsigned Depth=0)51928 static SDValue isFNEG(SelectionDAG &DAG, SDNode *N, unsigned Depth = 0) {
51929   if (N->getOpcode() == ISD::FNEG)
51930     return N->getOperand(0);
51931 
51932   // Don't recurse exponentially.
51933   if (Depth > SelectionDAG::MaxRecursionDepth)
51934     return SDValue();
51935 
51936   unsigned ScalarSize = N->getValueType(0).getScalarSizeInBits();
51937 
51938   SDValue Op = peekThroughBitcasts(SDValue(N, 0));
51939   EVT VT = Op->getValueType(0);
51940 
51941   // Make sure the element size doesn't change.
51942   if (VT.getScalarSizeInBits() != ScalarSize)
51943     return SDValue();
51944 
51945   unsigned Opc = Op.getOpcode();
51946   switch (Opc) {
51947   case ISD::VECTOR_SHUFFLE: {
51948     // For a VECTOR_SHUFFLE(VEC1, VEC2), if the VEC2 is undef, then the negate
51949     // of this is VECTOR_SHUFFLE(-VEC1, UNDEF).  The mask can be anything here.
51950     if (!Op.getOperand(1).isUndef())
51951       return SDValue();
51952     if (SDValue NegOp0 = isFNEG(DAG, Op.getOperand(0).getNode(), Depth + 1))
51953       if (NegOp0.getValueType() == VT) // FIXME: Can we do better?
51954         return DAG.getVectorShuffle(VT, SDLoc(Op), NegOp0, DAG.getUNDEF(VT),
51955                                     cast<ShuffleVectorSDNode>(Op)->getMask());
51956     break;
51957   }
51958   case ISD::INSERT_VECTOR_ELT: {
51959     // Negate of INSERT_VECTOR_ELT(UNDEF, V, INDEX) is INSERT_VECTOR_ELT(UNDEF,
51960     // -V, INDEX).
51961     SDValue InsVector = Op.getOperand(0);
51962     SDValue InsVal = Op.getOperand(1);
51963     if (!InsVector.isUndef())
51964       return SDValue();
51965     if (SDValue NegInsVal = isFNEG(DAG, InsVal.getNode(), Depth + 1))
51966       if (NegInsVal.getValueType() == VT.getVectorElementType()) // FIXME
51967         return DAG.getNode(ISD::INSERT_VECTOR_ELT, SDLoc(Op), VT, InsVector,
51968                            NegInsVal, Op.getOperand(2));
51969     break;
51970   }
51971   case ISD::FSUB:
51972   case ISD::XOR:
51973   case X86ISD::FXOR: {
51974     SDValue Op1 = Op.getOperand(1);
51975     SDValue Op0 = Op.getOperand(0);
51976 
51977     // For XOR and FXOR, we want to check if constant
51978     // bits of Op1 are sign bit masks. For FSUB, we
51979     // have to check if constant bits of Op0 are sign
51980     // bit masks and hence we swap the operands.
51981     if (Opc == ISD::FSUB)
51982       std::swap(Op0, Op1);
51983 
51984     APInt UndefElts;
51985     SmallVector<APInt, 16> EltBits;
51986     // Extract constant bits and see if they are all
51987     // sign bit masks. Ignore the undef elements.
51988     if (getTargetConstantBitsFromNode(Op1, ScalarSize, UndefElts, EltBits,
51989                                       /* AllowWholeUndefs */ true,
51990                                       /* AllowPartialUndefs */ false)) {
51991       for (unsigned I = 0, E = EltBits.size(); I < E; I++)
51992         if (!UndefElts[I] && !EltBits[I].isSignMask())
51993           return SDValue();
51994 
51995       // Only allow bitcast from correctly-sized constant.
51996       Op0 = peekThroughBitcasts(Op0);
51997       if (Op0.getScalarValueSizeInBits() == ScalarSize)
51998         return Op0;
51999     }
52000     break;
52001   } // case
52002   } // switch
52003 
52004   return SDValue();
52005 }
52006 
negateFMAOpcode(unsigned Opcode,bool NegMul,bool NegAcc,bool NegRes)52007 static unsigned negateFMAOpcode(unsigned Opcode, bool NegMul, bool NegAcc,
52008                                 bool NegRes) {
52009   if (NegMul) {
52010     switch (Opcode) {
52011     default: llvm_unreachable("Unexpected opcode");
52012     case ISD::FMA:              Opcode = X86ISD::FNMADD;        break;
52013     case ISD::STRICT_FMA:       Opcode = X86ISD::STRICT_FNMADD; break;
52014     case X86ISD::FMADD_RND:     Opcode = X86ISD::FNMADD_RND;    break;
52015     case X86ISD::FMSUB:         Opcode = X86ISD::FNMSUB;        break;
52016     case X86ISD::STRICT_FMSUB:  Opcode = X86ISD::STRICT_FNMSUB; break;
52017     case X86ISD::FMSUB_RND:     Opcode = X86ISD::FNMSUB_RND;    break;
52018     case X86ISD::FNMADD:        Opcode = ISD::FMA;              break;
52019     case X86ISD::STRICT_FNMADD: Opcode = ISD::STRICT_FMA;       break;
52020     case X86ISD::FNMADD_RND:    Opcode = X86ISD::FMADD_RND;     break;
52021     case X86ISD::FNMSUB:        Opcode = X86ISD::FMSUB;         break;
52022     case X86ISD::STRICT_FNMSUB: Opcode = X86ISD::STRICT_FMSUB;  break;
52023     case X86ISD::FNMSUB_RND:    Opcode = X86ISD::FMSUB_RND;     break;
52024     }
52025   }
52026 
52027   if (NegAcc) {
52028     switch (Opcode) {
52029     default: llvm_unreachable("Unexpected opcode");
52030     case ISD::FMA:              Opcode = X86ISD::FMSUB;         break;
52031     case ISD::STRICT_FMA:       Opcode = X86ISD::STRICT_FMSUB;  break;
52032     case X86ISD::FMADD_RND:     Opcode = X86ISD::FMSUB_RND;     break;
52033     case X86ISD::FMSUB:         Opcode = ISD::FMA;              break;
52034     case X86ISD::STRICT_FMSUB:  Opcode = ISD::STRICT_FMA;       break;
52035     case X86ISD::FMSUB_RND:     Opcode = X86ISD::FMADD_RND;     break;
52036     case X86ISD::FNMADD:        Opcode = X86ISD::FNMSUB;        break;
52037     case X86ISD::STRICT_FNMADD: Opcode = X86ISD::STRICT_FNMSUB; break;
52038     case X86ISD::FNMADD_RND:    Opcode = X86ISD::FNMSUB_RND;    break;
52039     case X86ISD::FNMSUB:        Opcode = X86ISD::FNMADD;        break;
52040     case X86ISD::STRICT_FNMSUB: Opcode = X86ISD::STRICT_FNMADD; break;
52041     case X86ISD::FNMSUB_RND:    Opcode = X86ISD::FNMADD_RND;    break;
52042     case X86ISD::FMADDSUB:      Opcode = X86ISD::FMSUBADD;      break;
52043     case X86ISD::FMADDSUB_RND:  Opcode = X86ISD::FMSUBADD_RND;  break;
52044     case X86ISD::FMSUBADD:      Opcode = X86ISD::FMADDSUB;      break;
52045     case X86ISD::FMSUBADD_RND:  Opcode = X86ISD::FMADDSUB_RND;  break;
52046     }
52047   }
52048 
52049   if (NegRes) {
52050     switch (Opcode) {
52051     // For accuracy reason, we never combine fneg and fma under strict FP.
52052     default: llvm_unreachable("Unexpected opcode");
52053     case ISD::FMA:             Opcode = X86ISD::FNMSUB;       break;
52054     case X86ISD::FMADD_RND:    Opcode = X86ISD::FNMSUB_RND;   break;
52055     case X86ISD::FMSUB:        Opcode = X86ISD::FNMADD;       break;
52056     case X86ISD::FMSUB_RND:    Opcode = X86ISD::FNMADD_RND;   break;
52057     case X86ISD::FNMADD:       Opcode = X86ISD::FMSUB;        break;
52058     case X86ISD::FNMADD_RND:   Opcode = X86ISD::FMSUB_RND;    break;
52059     case X86ISD::FNMSUB:       Opcode = ISD::FMA;             break;
52060     case X86ISD::FNMSUB_RND:   Opcode = X86ISD::FMADD_RND;    break;
52061     }
52062   }
52063 
52064   return Opcode;
52065 }
52066 
52067 /// Do target-specific dag combines on floating point negations.
combineFneg(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)52068 static SDValue combineFneg(SDNode *N, SelectionDAG &DAG,
52069                            TargetLowering::DAGCombinerInfo &DCI,
52070                            const X86Subtarget &Subtarget) {
52071   EVT OrigVT = N->getValueType(0);
52072   SDValue Arg = isFNEG(DAG, N);
52073   if (!Arg)
52074     return SDValue();
52075 
52076   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
52077   EVT VT = Arg.getValueType();
52078   EVT SVT = VT.getScalarType();
52079   SDLoc DL(N);
52080 
52081   // Let legalize expand this if it isn't a legal type yet.
52082   if (!TLI.isTypeLegal(VT))
52083     return SDValue();
52084 
52085   // If we're negating a FMUL node on a target with FMA, then we can avoid the
52086   // use of a constant by performing (-0 - A*B) instead.
52087   // FIXME: Check rounding control flags as well once it becomes available.
52088   if (Arg.getOpcode() == ISD::FMUL && (SVT == MVT::f32 || SVT == MVT::f64) &&
52089       Arg->getFlags().hasNoSignedZeros() && Subtarget.hasAnyFMA()) {
52090     SDValue Zero = DAG.getConstantFP(0.0, DL, VT);
52091     SDValue NewNode = DAG.getNode(X86ISD::FNMSUB, DL, VT, Arg.getOperand(0),
52092                                   Arg.getOperand(1), Zero);
52093     return DAG.getBitcast(OrigVT, NewNode);
52094   }
52095 
52096   bool CodeSize = DAG.getMachineFunction().getFunction().hasOptSize();
52097   bool LegalOperations = !DCI.isBeforeLegalizeOps();
52098   if (SDValue NegArg =
52099           TLI.getNegatedExpression(Arg, DAG, LegalOperations, CodeSize))
52100     return DAG.getBitcast(OrigVT, NegArg);
52101 
52102   return SDValue();
52103 }
52104 
getNegatedExpression(SDValue Op,SelectionDAG & DAG,bool LegalOperations,bool ForCodeSize,NegatibleCost & Cost,unsigned Depth) const52105 SDValue X86TargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG,
52106                                                 bool LegalOperations,
52107                                                 bool ForCodeSize,
52108                                                 NegatibleCost &Cost,
52109                                                 unsigned Depth) const {
52110   // fneg patterns are removable even if they have multiple uses.
52111   if (SDValue Arg = isFNEG(DAG, Op.getNode(), Depth)) {
52112     Cost = NegatibleCost::Cheaper;
52113     return DAG.getBitcast(Op.getValueType(), Arg);
52114   }
52115 
52116   EVT VT = Op.getValueType();
52117   EVT SVT = VT.getScalarType();
52118   unsigned Opc = Op.getOpcode();
52119   SDNodeFlags Flags = Op.getNode()->getFlags();
52120   switch (Opc) {
52121   case ISD::FMA:
52122   case X86ISD::FMSUB:
52123   case X86ISD::FNMADD:
52124   case X86ISD::FNMSUB:
52125   case X86ISD::FMADD_RND:
52126   case X86ISD::FMSUB_RND:
52127   case X86ISD::FNMADD_RND:
52128   case X86ISD::FNMSUB_RND: {
52129     if (!Op.hasOneUse() || !Subtarget.hasAnyFMA() || !isTypeLegal(VT) ||
52130         !(SVT == MVT::f32 || SVT == MVT::f64) ||
52131         !isOperationLegal(ISD::FMA, VT))
52132       break;
52133 
52134     // Don't fold (fneg (fma (fneg x), y, (fneg z))) to (fma x, y, z)
52135     // if it may have signed zeros.
52136     if (!Flags.hasNoSignedZeros())
52137       break;
52138 
52139     // This is always negatible for free but we might be able to remove some
52140     // extra operand negations as well.
52141     SmallVector<SDValue, 4> NewOps(Op.getNumOperands(), SDValue());
52142     for (int i = 0; i != 3; ++i)
52143       NewOps[i] = getCheaperNegatedExpression(
52144           Op.getOperand(i), DAG, LegalOperations, ForCodeSize, Depth + 1);
52145 
52146     bool NegA = !!NewOps[0];
52147     bool NegB = !!NewOps[1];
52148     bool NegC = !!NewOps[2];
52149     unsigned NewOpc = negateFMAOpcode(Opc, NegA != NegB, NegC, true);
52150 
52151     Cost = (NegA || NegB || NegC) ? NegatibleCost::Cheaper
52152                                   : NegatibleCost::Neutral;
52153 
52154     // Fill in the non-negated ops with the original values.
52155     for (int i = 0, e = Op.getNumOperands(); i != e; ++i)
52156       if (!NewOps[i])
52157         NewOps[i] = Op.getOperand(i);
52158     return DAG.getNode(NewOpc, SDLoc(Op), VT, NewOps);
52159   }
52160   case X86ISD::FRCP:
52161     if (SDValue NegOp0 =
52162             getNegatedExpression(Op.getOperand(0), DAG, LegalOperations,
52163                                  ForCodeSize, Cost, Depth + 1))
52164       return DAG.getNode(Opc, SDLoc(Op), VT, NegOp0);
52165     break;
52166   }
52167 
52168   return TargetLowering::getNegatedExpression(Op, DAG, LegalOperations,
52169                                               ForCodeSize, Cost, Depth);
52170 }
52171 
lowerX86FPLogicOp(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)52172 static SDValue lowerX86FPLogicOp(SDNode *N, SelectionDAG &DAG,
52173                                  const X86Subtarget &Subtarget) {
52174   MVT VT = N->getSimpleValueType(0);
52175   // If we have integer vector types available, use the integer opcodes.
52176   if (!VT.isVector() || !Subtarget.hasSSE2())
52177     return SDValue();
52178 
52179   SDLoc dl(N);
52180 
52181   unsigned IntBits = VT.getScalarSizeInBits();
52182   MVT IntSVT = MVT::getIntegerVT(IntBits);
52183   MVT IntVT = MVT::getVectorVT(IntSVT, VT.getSizeInBits() / IntBits);
52184 
52185   SDValue Op0 = DAG.getBitcast(IntVT, N->getOperand(0));
52186   SDValue Op1 = DAG.getBitcast(IntVT, N->getOperand(1));
52187   unsigned IntOpcode;
52188   switch (N->getOpcode()) {
52189   default: llvm_unreachable("Unexpected FP logic op");
52190   case X86ISD::FOR:   IntOpcode = ISD::OR; break;
52191   case X86ISD::FXOR:  IntOpcode = ISD::XOR; break;
52192   case X86ISD::FAND:  IntOpcode = ISD::AND; break;
52193   case X86ISD::FANDN: IntOpcode = X86ISD::ANDNP; break;
52194   }
52195   SDValue IntOp = DAG.getNode(IntOpcode, dl, IntVT, Op0, Op1);
52196   return DAG.getBitcast(VT, IntOp);
52197 }
52198 
52199 
52200 /// Fold a xor(setcc cond, val), 1 --> setcc (inverted(cond), val)
foldXor1SetCC(SDNode * N,SelectionDAG & DAG)52201 static SDValue foldXor1SetCC(SDNode *N, SelectionDAG &DAG) {
52202   if (N->getOpcode() != ISD::XOR)
52203     return SDValue();
52204 
52205   SDValue LHS = N->getOperand(0);
52206   if (!isOneConstant(N->getOperand(1)) || LHS->getOpcode() != X86ISD::SETCC)
52207     return SDValue();
52208 
52209   X86::CondCode NewCC = X86::GetOppositeBranchCondition(
52210       X86::CondCode(LHS->getConstantOperandVal(0)));
52211   SDLoc DL(N);
52212   return getSETCC(NewCC, LHS->getOperand(1), DL, DAG);
52213 }
52214 
combineXor(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)52215 static SDValue combineXor(SDNode *N, SelectionDAG &DAG,
52216                           TargetLowering::DAGCombinerInfo &DCI,
52217                           const X86Subtarget &Subtarget) {
52218   SDValue N0 = N->getOperand(0);
52219   SDValue N1 = N->getOperand(1);
52220   EVT VT = N->getValueType(0);
52221 
52222   // If this is SSE1 only convert to FXOR to avoid scalarization.
52223   if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32) {
52224     return DAG.getBitcast(MVT::v4i32,
52225                           DAG.getNode(X86ISD::FXOR, SDLoc(N), MVT::v4f32,
52226                                       DAG.getBitcast(MVT::v4f32, N0),
52227                                       DAG.getBitcast(MVT::v4f32, N1)));
52228   }
52229 
52230   if (SDValue Cmp = foldVectorXorShiftIntoCmp(N, DAG, Subtarget))
52231     return Cmp;
52232 
52233   if (SDValue R = combineBitOpWithMOVMSK(N, DAG))
52234     return R;
52235 
52236   if (SDValue R = combineBitOpWithShift(N, DAG))
52237     return R;
52238 
52239   if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, DCI, Subtarget))
52240     return FPLogic;
52241 
52242   if (DCI.isBeforeLegalizeOps())
52243     return SDValue();
52244 
52245   if (SDValue SetCC = foldXor1SetCC(N, DAG))
52246     return SetCC;
52247 
52248   if (SDValue RV = foldXorTruncShiftIntoCmp(N, DAG))
52249     return RV;
52250 
52251   // Fold not(iX bitcast(vXi1)) -> (iX bitcast(not(vec))) for legal boolvecs.
52252   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
52253   if (llvm::isAllOnesConstant(N1) && N0.getOpcode() == ISD::BITCAST &&
52254       N0.getOperand(0).getValueType().isVector() &&
52255       N0.getOperand(0).getValueType().getVectorElementType() == MVT::i1 &&
52256       TLI.isTypeLegal(N0.getOperand(0).getValueType()) && N0.hasOneUse()) {
52257     return DAG.getBitcast(VT, DAG.getNOT(SDLoc(N), N0.getOperand(0),
52258                                          N0.getOperand(0).getValueType()));
52259   }
52260 
52261   // Handle AVX512 mask widening.
52262   // Fold not(insert_subvector(undef,sub)) -> insert_subvector(undef,not(sub))
52263   if (ISD::isBuildVectorAllOnes(N1.getNode()) && VT.isVector() &&
52264       VT.getVectorElementType() == MVT::i1 &&
52265       N0.getOpcode() == ISD::INSERT_SUBVECTOR && N0.getOperand(0).isUndef() &&
52266       TLI.isTypeLegal(N0.getOperand(1).getValueType())) {
52267     return DAG.getNode(
52268         ISD::INSERT_SUBVECTOR, SDLoc(N), VT, N0.getOperand(0),
52269         DAG.getNOT(SDLoc(N), N0.getOperand(1), N0.getOperand(1).getValueType()),
52270         N0.getOperand(2));
52271   }
52272 
52273   // Fold xor(zext(xor(x,c1)),c2) -> xor(zext(x),xor(zext(c1),c2))
52274   // Fold xor(truncate(xor(x,c1)),c2) -> xor(truncate(x),xor(truncate(c1),c2))
52275   // TODO: Under what circumstances could this be performed in DAGCombine?
52276   if ((N0.getOpcode() == ISD::TRUNCATE || N0.getOpcode() == ISD::ZERO_EXTEND) &&
52277       N0.getOperand(0).getOpcode() == N->getOpcode()) {
52278     SDValue TruncExtSrc = N0.getOperand(0);
52279     auto *N1C = dyn_cast<ConstantSDNode>(N1);
52280     auto *N001C = dyn_cast<ConstantSDNode>(TruncExtSrc.getOperand(1));
52281     if (N1C && !N1C->isOpaque() && N001C && !N001C->isOpaque()) {
52282       SDLoc DL(N);
52283       SDValue LHS = DAG.getZExtOrTrunc(TruncExtSrc.getOperand(0), DL, VT);
52284       SDValue RHS = DAG.getZExtOrTrunc(TruncExtSrc.getOperand(1), DL, VT);
52285       return DAG.getNode(ISD::XOR, DL, VT, LHS,
52286                          DAG.getNode(ISD::XOR, DL, VT, RHS, N1));
52287     }
52288   }
52289 
52290   return combineFneg(N, DAG, DCI, Subtarget);
52291 }
52292 
combineBEXTR(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)52293 static SDValue combineBEXTR(SDNode *N, SelectionDAG &DAG,
52294                             TargetLowering::DAGCombinerInfo &DCI,
52295                             const X86Subtarget &Subtarget) {
52296   EVT VT = N->getValueType(0);
52297   unsigned NumBits = VT.getSizeInBits();
52298 
52299   // TODO - Constant Folding.
52300 
52301   // Simplify the inputs.
52302   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
52303   APInt DemandedMask(APInt::getAllOnes(NumBits));
52304   if (TLI.SimplifyDemandedBits(SDValue(N, 0), DemandedMask, DCI))
52305     return SDValue(N, 0);
52306 
52307   return SDValue();
52308 }
52309 
isNullFPScalarOrVectorConst(SDValue V)52310 static bool isNullFPScalarOrVectorConst(SDValue V) {
52311   return isNullFPConstant(V) || ISD::isBuildVectorAllZeros(V.getNode());
52312 }
52313 
52314 /// If a value is a scalar FP zero or a vector FP zero (potentially including
52315 /// undefined elements), return a zero constant that may be used to fold away
52316 /// that value. In the case of a vector, the returned constant will not contain
52317 /// undefined elements even if the input parameter does. This makes it suitable
52318 /// to be used as a replacement operand with operations (eg, bitwise-and) where
52319 /// an undef should not propagate.
getNullFPConstForNullVal(SDValue V,SelectionDAG & DAG,const X86Subtarget & Subtarget)52320 static SDValue getNullFPConstForNullVal(SDValue V, SelectionDAG &DAG,
52321                                         const X86Subtarget &Subtarget) {
52322   if (!isNullFPScalarOrVectorConst(V))
52323     return SDValue();
52324 
52325   if (V.getValueType().isVector())
52326     return getZeroVector(V.getSimpleValueType(), Subtarget, DAG, SDLoc(V));
52327 
52328   return V;
52329 }
52330 
combineFAndFNotToFAndn(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)52331 static SDValue combineFAndFNotToFAndn(SDNode *N, SelectionDAG &DAG,
52332                                       const X86Subtarget &Subtarget) {
52333   SDValue N0 = N->getOperand(0);
52334   SDValue N1 = N->getOperand(1);
52335   EVT VT = N->getValueType(0);
52336   SDLoc DL(N);
52337 
52338   // Vector types are handled in combineANDXORWithAllOnesIntoANDNP().
52339   if (!((VT == MVT::f32 && Subtarget.hasSSE1()) ||
52340         (VT == MVT::f64 && Subtarget.hasSSE2()) ||
52341         (VT == MVT::v4f32 && Subtarget.hasSSE1() && !Subtarget.hasSSE2())))
52342     return SDValue();
52343 
52344   auto isAllOnesConstantFP = [](SDValue V) {
52345     if (V.getSimpleValueType().isVector())
52346       return ISD::isBuildVectorAllOnes(V.getNode());
52347     auto *C = dyn_cast<ConstantFPSDNode>(V);
52348     return C && C->getConstantFPValue()->isAllOnesValue();
52349   };
52350 
52351   // fand (fxor X, -1), Y --> fandn X, Y
52352   if (N0.getOpcode() == X86ISD::FXOR && isAllOnesConstantFP(N0.getOperand(1)))
52353     return DAG.getNode(X86ISD::FANDN, DL, VT, N0.getOperand(0), N1);
52354 
52355   // fand X, (fxor Y, -1) --> fandn Y, X
52356   if (N1.getOpcode() == X86ISD::FXOR && isAllOnesConstantFP(N1.getOperand(1)))
52357     return DAG.getNode(X86ISD::FANDN, DL, VT, N1.getOperand(0), N0);
52358 
52359   return SDValue();
52360 }
52361 
52362 /// Do target-specific dag combines on X86ISD::FAND nodes.
combineFAnd(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)52363 static SDValue combineFAnd(SDNode *N, SelectionDAG &DAG,
52364                            const X86Subtarget &Subtarget) {
52365   // FAND(0.0, x) -> 0.0
52366   if (SDValue V = getNullFPConstForNullVal(N->getOperand(0), DAG, Subtarget))
52367     return V;
52368 
52369   // FAND(x, 0.0) -> 0.0
52370   if (SDValue V = getNullFPConstForNullVal(N->getOperand(1), DAG, Subtarget))
52371     return V;
52372 
52373   if (SDValue V = combineFAndFNotToFAndn(N, DAG, Subtarget))
52374     return V;
52375 
52376   return lowerX86FPLogicOp(N, DAG, Subtarget);
52377 }
52378 
52379 /// Do target-specific dag combines on X86ISD::FANDN nodes.
combineFAndn(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)52380 static SDValue combineFAndn(SDNode *N, SelectionDAG &DAG,
52381                             const X86Subtarget &Subtarget) {
52382   // FANDN(0.0, x) -> x
52383   if (isNullFPScalarOrVectorConst(N->getOperand(0)))
52384     return N->getOperand(1);
52385 
52386   // FANDN(x, 0.0) -> 0.0
52387   if (SDValue V = getNullFPConstForNullVal(N->getOperand(1), DAG, Subtarget))
52388     return V;
52389 
52390   return lowerX86FPLogicOp(N, DAG, Subtarget);
52391 }
52392 
52393 /// Do target-specific dag combines on X86ISD::FOR and X86ISD::FXOR nodes.
combineFOr(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)52394 static SDValue combineFOr(SDNode *N, SelectionDAG &DAG,
52395                           TargetLowering::DAGCombinerInfo &DCI,
52396                           const X86Subtarget &Subtarget) {
52397   assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR);
52398 
52399   // F[X]OR(0.0, x) -> x
52400   if (isNullFPScalarOrVectorConst(N->getOperand(0)))
52401     return N->getOperand(1);
52402 
52403   // F[X]OR(x, 0.0) -> x
52404   if (isNullFPScalarOrVectorConst(N->getOperand(1)))
52405     return N->getOperand(0);
52406 
52407   if (SDValue NewVal = combineFneg(N, DAG, DCI, Subtarget))
52408     return NewVal;
52409 
52410   return lowerX86FPLogicOp(N, DAG, Subtarget);
52411 }
52412 
52413 /// Do target-specific dag combines on X86ISD::FMIN and X86ISD::FMAX nodes.
combineFMinFMax(SDNode * N,SelectionDAG & DAG)52414 static SDValue combineFMinFMax(SDNode *N, SelectionDAG &DAG) {
52415   assert(N->getOpcode() == X86ISD::FMIN || N->getOpcode() == X86ISD::FMAX);
52416 
52417   // FMIN/FMAX are commutative if no NaNs and no negative zeros are allowed.
52418   if (!DAG.getTarget().Options.NoNaNsFPMath ||
52419       !DAG.getTarget().Options.NoSignedZerosFPMath)
52420     return SDValue();
52421 
52422   // If we run in unsafe-math mode, then convert the FMAX and FMIN nodes
52423   // into FMINC and FMAXC, which are Commutative operations.
52424   unsigned NewOp = 0;
52425   switch (N->getOpcode()) {
52426     default: llvm_unreachable("unknown opcode");
52427     case X86ISD::FMIN:  NewOp = X86ISD::FMINC; break;
52428     case X86ISD::FMAX:  NewOp = X86ISD::FMAXC; break;
52429   }
52430 
52431   return DAG.getNode(NewOp, SDLoc(N), N->getValueType(0),
52432                      N->getOperand(0), N->getOperand(1));
52433 }
52434 
combineFMinNumFMaxNum(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)52435 static SDValue combineFMinNumFMaxNum(SDNode *N, SelectionDAG &DAG,
52436                                      const X86Subtarget &Subtarget) {
52437   EVT VT = N->getValueType(0);
52438   if (Subtarget.useSoftFloat() || isSoftFP16(VT, Subtarget))
52439     return SDValue();
52440 
52441   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
52442 
52443   if (!((Subtarget.hasSSE1() && VT == MVT::f32) ||
52444         (Subtarget.hasSSE2() && VT == MVT::f64) ||
52445         (Subtarget.hasFP16() && VT == MVT::f16) ||
52446         (VT.isVector() && TLI.isTypeLegal(VT))))
52447     return SDValue();
52448 
52449   SDValue Op0 = N->getOperand(0);
52450   SDValue Op1 = N->getOperand(1);
52451   SDLoc DL(N);
52452   auto MinMaxOp = N->getOpcode() == ISD::FMAXNUM ? X86ISD::FMAX : X86ISD::FMIN;
52453 
52454   // If we don't have to respect NaN inputs, this is a direct translation to x86
52455   // min/max instructions.
52456   if (DAG.getTarget().Options.NoNaNsFPMath || N->getFlags().hasNoNaNs())
52457     return DAG.getNode(MinMaxOp, DL, VT, Op0, Op1, N->getFlags());
52458 
52459   // If one of the operands is known non-NaN use the native min/max instructions
52460   // with the non-NaN input as second operand.
52461   if (DAG.isKnownNeverNaN(Op1))
52462     return DAG.getNode(MinMaxOp, DL, VT, Op0, Op1, N->getFlags());
52463   if (DAG.isKnownNeverNaN(Op0))
52464     return DAG.getNode(MinMaxOp, DL, VT, Op1, Op0, N->getFlags());
52465 
52466   // If we have to respect NaN inputs, this takes at least 3 instructions.
52467   // Favor a library call when operating on a scalar and minimizing code size.
52468   if (!VT.isVector() && DAG.getMachineFunction().getFunction().hasMinSize())
52469     return SDValue();
52470 
52471   EVT SetCCType = TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
52472                                          VT);
52473 
52474   // There are 4 possibilities involving NaN inputs, and these are the required
52475   // outputs:
52476   //                   Op1
52477   //               Num     NaN
52478   //            ----------------
52479   //       Num  |  Max  |  Op0 |
52480   // Op0        ----------------
52481   //       NaN  |  Op1  |  NaN |
52482   //            ----------------
52483   //
52484   // The SSE FP max/min instructions were not designed for this case, but rather
52485   // to implement:
52486   //   Min = Op1 < Op0 ? Op1 : Op0
52487   //   Max = Op1 > Op0 ? Op1 : Op0
52488   //
52489   // So they always return Op0 if either input is a NaN. However, we can still
52490   // use those instructions for fmaxnum by selecting away a NaN input.
52491 
52492   // If either operand is NaN, the 2nd source operand (Op0) is passed through.
52493   SDValue MinOrMax = DAG.getNode(MinMaxOp, DL, VT, Op1, Op0);
52494   SDValue IsOp0Nan = DAG.getSetCC(DL, SetCCType, Op0, Op0, ISD::SETUO);
52495 
52496   // If Op0 is a NaN, select Op1. Otherwise, select the max. If both operands
52497   // are NaN, the NaN value of Op1 is the result.
52498   return DAG.getSelect(DL, VT, IsOp0Nan, Op1, MinOrMax);
52499 }
52500 
combineX86INT_TO_FP(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI)52501 static SDValue combineX86INT_TO_FP(SDNode *N, SelectionDAG &DAG,
52502                                    TargetLowering::DAGCombinerInfo &DCI) {
52503   EVT VT = N->getValueType(0);
52504   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
52505 
52506   APInt DemandedElts = APInt::getAllOnes(VT.getVectorNumElements());
52507   if (TLI.SimplifyDemandedVectorElts(SDValue(N, 0), DemandedElts, DCI))
52508     return SDValue(N, 0);
52509 
52510   // Convert a full vector load into vzload when not all bits are needed.
52511   SDValue In = N->getOperand(0);
52512   MVT InVT = In.getSimpleValueType();
52513   if (VT.getVectorNumElements() < InVT.getVectorNumElements() &&
52514       ISD::isNormalLoad(In.getNode()) && In.hasOneUse()) {
52515     assert(InVT.is128BitVector() && "Expected 128-bit input vector");
52516     LoadSDNode *LN = cast<LoadSDNode>(N->getOperand(0));
52517     unsigned NumBits = InVT.getScalarSizeInBits() * VT.getVectorNumElements();
52518     MVT MemVT = MVT::getIntegerVT(NumBits);
52519     MVT LoadVT = MVT::getVectorVT(MemVT, 128 / NumBits);
52520     if (SDValue VZLoad = narrowLoadToVZLoad(LN, MemVT, LoadVT, DAG)) {
52521       SDLoc dl(N);
52522       SDValue Convert = DAG.getNode(N->getOpcode(), dl, VT,
52523                                     DAG.getBitcast(InVT, VZLoad));
52524       DCI.CombineTo(N, Convert);
52525       DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
52526       DCI.recursivelyDeleteUnusedNodes(LN);
52527       return SDValue(N, 0);
52528     }
52529   }
52530 
52531   return SDValue();
52532 }
52533 
combineCVTP2I_CVTTP2I(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI)52534 static SDValue combineCVTP2I_CVTTP2I(SDNode *N, SelectionDAG &DAG,
52535                                      TargetLowering::DAGCombinerInfo &DCI) {
52536   bool IsStrict = N->isTargetStrictFPOpcode();
52537   EVT VT = N->getValueType(0);
52538 
52539   // Convert a full vector load into vzload when not all bits are needed.
52540   SDValue In = N->getOperand(IsStrict ? 1 : 0);
52541   MVT InVT = In.getSimpleValueType();
52542   if (VT.getVectorNumElements() < InVT.getVectorNumElements() &&
52543       ISD::isNormalLoad(In.getNode()) && In.hasOneUse()) {
52544     assert(InVT.is128BitVector() && "Expected 128-bit input vector");
52545     LoadSDNode *LN = cast<LoadSDNode>(In);
52546     unsigned NumBits = InVT.getScalarSizeInBits() * VT.getVectorNumElements();
52547     MVT MemVT = MVT::getFloatingPointVT(NumBits);
52548     MVT LoadVT = MVT::getVectorVT(MemVT, 128 / NumBits);
52549     if (SDValue VZLoad = narrowLoadToVZLoad(LN, MemVT, LoadVT, DAG)) {
52550       SDLoc dl(N);
52551       if (IsStrict) {
52552         SDValue Convert =
52553             DAG.getNode(N->getOpcode(), dl, {VT, MVT::Other},
52554                         {N->getOperand(0), DAG.getBitcast(InVT, VZLoad)});
52555         DCI.CombineTo(N, Convert, Convert.getValue(1));
52556       } else {
52557         SDValue Convert =
52558             DAG.getNode(N->getOpcode(), dl, VT, DAG.getBitcast(InVT, VZLoad));
52559         DCI.CombineTo(N, Convert);
52560       }
52561       DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
52562       DCI.recursivelyDeleteUnusedNodes(LN);
52563       return SDValue(N, 0);
52564     }
52565   }
52566 
52567   return SDValue();
52568 }
52569 
52570 /// Do target-specific dag combines on X86ISD::ANDNP nodes.
combineAndnp(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)52571 static SDValue combineAndnp(SDNode *N, SelectionDAG &DAG,
52572                             TargetLowering::DAGCombinerInfo &DCI,
52573                             const X86Subtarget &Subtarget) {
52574   SDValue N0 = N->getOperand(0);
52575   SDValue N1 = N->getOperand(1);
52576   MVT VT = N->getSimpleValueType(0);
52577   int NumElts = VT.getVectorNumElements();
52578   unsigned EltSizeInBits = VT.getScalarSizeInBits();
52579 
52580   // ANDNP(undef, x) -> 0
52581   // ANDNP(x, undef) -> 0
52582   if (N0.isUndef() || N1.isUndef())
52583     return DAG.getConstant(0, SDLoc(N), VT);
52584 
52585   // ANDNP(0, x) -> x
52586   if (ISD::isBuildVectorAllZeros(N0.getNode()))
52587     return N1;
52588 
52589   // ANDNP(x, 0) -> 0
52590   if (ISD::isBuildVectorAllZeros(N1.getNode()))
52591     return DAG.getConstant(0, SDLoc(N), VT);
52592 
52593   // Turn ANDNP back to AND if input is inverted.
52594   if (SDValue Not = IsNOT(N0, DAG))
52595     return DAG.getNode(ISD::AND, SDLoc(N), VT, DAG.getBitcast(VT, Not), N1);
52596 
52597   // Constant Folding
52598   APInt Undefs0, Undefs1;
52599   SmallVector<APInt> EltBits0, EltBits1;
52600   if (getTargetConstantBitsFromNode(N0, EltSizeInBits, Undefs0, EltBits0)) {
52601     SDLoc DL(N);
52602     APInt ResultUndefs = APInt::getZero(NumElts);
52603 
52604     if (getTargetConstantBitsFromNode(N1, EltSizeInBits, Undefs1, EltBits1)) {
52605       SmallVector<APInt> ResultBits;
52606       for (int I = 0; I != NumElts; ++I)
52607         ResultBits.push_back(~EltBits0[I] & EltBits1[I]);
52608       return getConstVector(ResultBits, ResultUndefs, VT, DAG, DL);
52609     }
52610 
52611     // Constant fold NOT(N0) to allow us to use AND.
52612     // Ensure this is only performed if we can confirm that the bitcasted source
52613     // has oneuse to prevent an infinite loop with canonicalizeBitSelect.
52614     if (N0->hasOneUse()) {
52615       SDValue BC0 = peekThroughOneUseBitcasts(N0);
52616       if (BC0.getOpcode() != ISD::BITCAST) {
52617         for (APInt &Elt : EltBits0)
52618           Elt = ~Elt;
52619         SDValue Not = getConstVector(EltBits0, ResultUndefs, VT, DAG, DL);
52620         return DAG.getNode(ISD::AND, DL, VT, Not, N1);
52621       }
52622     }
52623   }
52624 
52625   // Attempt to recursively combine a bitmask ANDNP with shuffles.
52626   if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) {
52627     SDValue Op(N, 0);
52628     if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
52629       return Res;
52630 
52631     // If either operand is a constant mask, then only the elements that aren't
52632     // zero are actually demanded by the other operand.
52633     auto GetDemandedMasks = [&](SDValue Op, bool Invert = false) {
52634       APInt UndefElts;
52635       SmallVector<APInt> EltBits;
52636       APInt DemandedBits = APInt::getAllOnes(EltSizeInBits);
52637       APInt DemandedElts = APInt::getAllOnes(NumElts);
52638       if (getTargetConstantBitsFromNode(Op, EltSizeInBits, UndefElts,
52639                                         EltBits)) {
52640         DemandedBits.clearAllBits();
52641         DemandedElts.clearAllBits();
52642         for (int I = 0; I != NumElts; ++I) {
52643           if (UndefElts[I]) {
52644             // We can't assume an undef src element gives an undef dst - the
52645             // other src might be zero.
52646             DemandedBits.setAllBits();
52647             DemandedElts.setBit(I);
52648           } else if ((Invert && !EltBits[I].isAllOnes()) ||
52649                      (!Invert && !EltBits[I].isZero())) {
52650             DemandedBits |= Invert ? ~EltBits[I] : EltBits[I];
52651             DemandedElts.setBit(I);
52652           }
52653         }
52654       }
52655       return std::make_pair(DemandedBits, DemandedElts);
52656     };
52657     APInt Bits0, Elts0;
52658     APInt Bits1, Elts1;
52659     std::tie(Bits0, Elts0) = GetDemandedMasks(N1);
52660     std::tie(Bits1, Elts1) = GetDemandedMasks(N0, true);
52661 
52662     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
52663     if (TLI.SimplifyDemandedVectorElts(N0, Elts0, DCI) ||
52664         TLI.SimplifyDemandedVectorElts(N1, Elts1, DCI) ||
52665         TLI.SimplifyDemandedBits(N0, Bits0, Elts0, DCI) ||
52666         TLI.SimplifyDemandedBits(N1, Bits1, Elts1, DCI)) {
52667       if (N->getOpcode() != ISD::DELETED_NODE)
52668         DCI.AddToWorklist(N);
52669       return SDValue(N, 0);
52670     }
52671   }
52672 
52673   return SDValue();
52674 }
52675 
combineBT(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI)52676 static SDValue combineBT(SDNode *N, SelectionDAG &DAG,
52677                          TargetLowering::DAGCombinerInfo &DCI) {
52678   SDValue N1 = N->getOperand(1);
52679 
52680   // BT ignores high bits in the bit index operand.
52681   unsigned BitWidth = N1.getValueSizeInBits();
52682   APInt DemandedMask = APInt::getLowBitsSet(BitWidth, Log2_32(BitWidth));
52683   if (DAG.getTargetLoweringInfo().SimplifyDemandedBits(N1, DemandedMask, DCI)) {
52684     if (N->getOpcode() != ISD::DELETED_NODE)
52685       DCI.AddToWorklist(N);
52686     return SDValue(N, 0);
52687   }
52688 
52689   return SDValue();
52690 }
52691 
combineCVTPH2PS(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI)52692 static SDValue combineCVTPH2PS(SDNode *N, SelectionDAG &DAG,
52693                                TargetLowering::DAGCombinerInfo &DCI) {
52694   bool IsStrict = N->getOpcode() == X86ISD::STRICT_CVTPH2PS;
52695   SDValue Src = N->getOperand(IsStrict ? 1 : 0);
52696 
52697   if (N->getValueType(0) == MVT::v4f32 && Src.getValueType() == MVT::v8i16) {
52698     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
52699     APInt DemandedElts = APInt::getLowBitsSet(8, 4);
52700     if (TLI.SimplifyDemandedVectorElts(Src, DemandedElts, DCI)) {
52701       if (N->getOpcode() != ISD::DELETED_NODE)
52702         DCI.AddToWorklist(N);
52703       return SDValue(N, 0);
52704     }
52705 
52706     // Convert a full vector load into vzload when not all bits are needed.
52707     if (ISD::isNormalLoad(Src.getNode()) && Src.hasOneUse()) {
52708       LoadSDNode *LN = cast<LoadSDNode>(N->getOperand(IsStrict ? 1 : 0));
52709       if (SDValue VZLoad = narrowLoadToVZLoad(LN, MVT::i64, MVT::v2i64, DAG)) {
52710         SDLoc dl(N);
52711         if (IsStrict) {
52712           SDValue Convert = DAG.getNode(
52713               N->getOpcode(), dl, {MVT::v4f32, MVT::Other},
52714               {N->getOperand(0), DAG.getBitcast(MVT::v8i16, VZLoad)});
52715           DCI.CombineTo(N, Convert, Convert.getValue(1));
52716         } else {
52717           SDValue Convert = DAG.getNode(N->getOpcode(), dl, MVT::v4f32,
52718                                         DAG.getBitcast(MVT::v8i16, VZLoad));
52719           DCI.CombineTo(N, Convert);
52720         }
52721 
52722         DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
52723         DCI.recursivelyDeleteUnusedNodes(LN);
52724         return SDValue(N, 0);
52725       }
52726     }
52727   }
52728 
52729   return SDValue();
52730 }
52731 
52732 // Try to combine sext_in_reg of a cmov of constants by extending the constants.
combineSextInRegCmov(SDNode * N,SelectionDAG & DAG)52733 static SDValue combineSextInRegCmov(SDNode *N, SelectionDAG &DAG) {
52734   assert(N->getOpcode() == ISD::SIGN_EXTEND_INREG);
52735 
52736   EVT DstVT = N->getValueType(0);
52737 
52738   SDValue N0 = N->getOperand(0);
52739   SDValue N1 = N->getOperand(1);
52740   EVT ExtraVT = cast<VTSDNode>(N1)->getVT();
52741 
52742   if (ExtraVT != MVT::i8 && ExtraVT != MVT::i16)
52743     return SDValue();
52744 
52745   // Look through single use any_extends / truncs.
52746   SDValue IntermediateBitwidthOp;
52747   if ((N0.getOpcode() == ISD::ANY_EXTEND || N0.getOpcode() == ISD::TRUNCATE) &&
52748       N0.hasOneUse()) {
52749     IntermediateBitwidthOp = N0;
52750     N0 = N0.getOperand(0);
52751   }
52752 
52753   // See if we have a single use cmov.
52754   if (N0.getOpcode() != X86ISD::CMOV || !N0.hasOneUse())
52755     return SDValue();
52756 
52757   SDValue CMovOp0 = N0.getOperand(0);
52758   SDValue CMovOp1 = N0.getOperand(1);
52759 
52760   // Make sure both operands are constants.
52761   if (!isa<ConstantSDNode>(CMovOp0.getNode()) ||
52762       !isa<ConstantSDNode>(CMovOp1.getNode()))
52763     return SDValue();
52764 
52765   SDLoc DL(N);
52766 
52767   // If we looked through an any_extend/trunc above, add one to the constants.
52768   if (IntermediateBitwidthOp) {
52769     unsigned IntermediateOpc = IntermediateBitwidthOp.getOpcode();
52770     CMovOp0 = DAG.getNode(IntermediateOpc, DL, DstVT, CMovOp0);
52771     CMovOp1 = DAG.getNode(IntermediateOpc, DL, DstVT, CMovOp1);
52772   }
52773 
52774   CMovOp0 = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, DstVT, CMovOp0, N1);
52775   CMovOp1 = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, DstVT, CMovOp1, N1);
52776 
52777   EVT CMovVT = DstVT;
52778   // We do not want i16 CMOV's. Promote to i32 and truncate afterwards.
52779   if (DstVT == MVT::i16) {
52780     CMovVT = MVT::i32;
52781     CMovOp0 = DAG.getNode(ISD::ZERO_EXTEND, DL, CMovVT, CMovOp0);
52782     CMovOp1 = DAG.getNode(ISD::ZERO_EXTEND, DL, CMovVT, CMovOp1);
52783   }
52784 
52785   SDValue CMov = DAG.getNode(X86ISD::CMOV, DL, CMovVT, CMovOp0, CMovOp1,
52786                              N0.getOperand(2), N0.getOperand(3));
52787 
52788   if (CMovVT != DstVT)
52789     CMov = DAG.getNode(ISD::TRUNCATE, DL, DstVT, CMov);
52790 
52791   return CMov;
52792 }
52793 
combineSignExtendInReg(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)52794 static SDValue combineSignExtendInReg(SDNode *N, SelectionDAG &DAG,
52795                                       const X86Subtarget &Subtarget) {
52796   assert(N->getOpcode() == ISD::SIGN_EXTEND_INREG);
52797 
52798   if (SDValue V = combineSextInRegCmov(N, DAG))
52799     return V;
52800 
52801   EVT VT = N->getValueType(0);
52802   SDValue N0 = N->getOperand(0);
52803   SDValue N1 = N->getOperand(1);
52804   EVT ExtraVT = cast<VTSDNode>(N1)->getVT();
52805   SDLoc dl(N);
52806 
52807   // The SIGN_EXTEND_INREG to v4i64 is expensive operation on the
52808   // both SSE and AVX2 since there is no sign-extended shift right
52809   // operation on a vector with 64-bit elements.
52810   //(sext_in_reg (v4i64 anyext (v4i32 x )), ExtraVT) ->
52811   // (v4i64 sext (v4i32 sext_in_reg (v4i32 x , ExtraVT)))
52812   if (VT == MVT::v4i64 && (N0.getOpcode() == ISD::ANY_EXTEND ||
52813                            N0.getOpcode() == ISD::SIGN_EXTEND)) {
52814     SDValue N00 = N0.getOperand(0);
52815 
52816     // EXTLOAD has a better solution on AVX2,
52817     // it may be replaced with X86ISD::VSEXT node.
52818     if (N00.getOpcode() == ISD::LOAD && Subtarget.hasInt256())
52819       if (!ISD::isNormalLoad(N00.getNode()))
52820         return SDValue();
52821 
52822     // Attempt to promote any comparison mask ops before moving the
52823     // SIGN_EXTEND_INREG in the way.
52824     if (SDValue Promote = PromoteMaskArithmetic(N0.getNode(), DAG, Subtarget))
52825       return DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, VT, Promote, N1);
52826 
52827     if (N00.getValueType() == MVT::v4i32 && ExtraVT.getSizeInBits() < 128) {
52828       SDValue Tmp =
52829           DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32, N00, N1);
52830       return DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i64, Tmp);
52831     }
52832   }
52833   return SDValue();
52834 }
52835 
52836 /// sext(add_nsw(x, C)) --> add(sext(x), C_sext)
52837 /// zext(add_nuw(x, C)) --> add(zext(x), C_zext)
52838 /// Promoting a sign/zero extension ahead of a no overflow 'add' exposes
52839 /// opportunities to combine math ops, use an LEA, or use a complex addressing
52840 /// mode. This can eliminate extend, add, and shift instructions.
promoteExtBeforeAdd(SDNode * Ext,SelectionDAG & DAG,const X86Subtarget & Subtarget)52841 static SDValue promoteExtBeforeAdd(SDNode *Ext, SelectionDAG &DAG,
52842                                    const X86Subtarget &Subtarget) {
52843   if (Ext->getOpcode() != ISD::SIGN_EXTEND &&
52844       Ext->getOpcode() != ISD::ZERO_EXTEND)
52845     return SDValue();
52846 
52847   // TODO: This should be valid for other integer types.
52848   EVT VT = Ext->getValueType(0);
52849   if (VT != MVT::i64)
52850     return SDValue();
52851 
52852   SDValue Add = Ext->getOperand(0);
52853   if (Add.getOpcode() != ISD::ADD)
52854     return SDValue();
52855 
52856   bool Sext = Ext->getOpcode() == ISD::SIGN_EXTEND;
52857   bool NSW = Add->getFlags().hasNoSignedWrap();
52858   bool NUW = Add->getFlags().hasNoUnsignedWrap();
52859 
52860   // We need an 'add nsw' feeding into the 'sext' or 'add nuw' feeding
52861   // into the 'zext'
52862   if ((Sext && !NSW) || (!Sext && !NUW))
52863     return SDValue();
52864 
52865   // Having a constant operand to the 'add' ensures that we are not increasing
52866   // the instruction count because the constant is extended for free below.
52867   // A constant operand can also become the displacement field of an LEA.
52868   auto *AddOp1 = dyn_cast<ConstantSDNode>(Add.getOperand(1));
52869   if (!AddOp1)
52870     return SDValue();
52871 
52872   // Don't make the 'add' bigger if there's no hope of combining it with some
52873   // other 'add' or 'shl' instruction.
52874   // TODO: It may be profitable to generate simpler LEA instructions in place
52875   // of single 'add' instructions, but the cost model for selecting an LEA
52876   // currently has a high threshold.
52877   bool HasLEAPotential = false;
52878   for (auto *User : Ext->uses()) {
52879     if (User->getOpcode() == ISD::ADD || User->getOpcode() == ISD::SHL) {
52880       HasLEAPotential = true;
52881       break;
52882     }
52883   }
52884   if (!HasLEAPotential)
52885     return SDValue();
52886 
52887   // Everything looks good, so pull the '{s|z}ext' ahead of the 'add'.
52888   int64_t AddConstant = Sext ? AddOp1->getSExtValue() : AddOp1->getZExtValue();
52889   SDValue AddOp0 = Add.getOperand(0);
52890   SDValue NewExt = DAG.getNode(Ext->getOpcode(), SDLoc(Ext), VT, AddOp0);
52891   SDValue NewConstant = DAG.getConstant(AddConstant, SDLoc(Add), VT);
52892 
52893   // The wider add is guaranteed to not wrap because both operands are
52894   // sign-extended.
52895   SDNodeFlags Flags;
52896   Flags.setNoSignedWrap(NSW);
52897   Flags.setNoUnsignedWrap(NUW);
52898   return DAG.getNode(ISD::ADD, SDLoc(Add), VT, NewExt, NewConstant, Flags);
52899 }
52900 
52901 // If we face {ANY,SIGN,ZERO}_EXTEND that is applied to a CMOV with constant
52902 // operands and the result of CMOV is not used anywhere else - promote CMOV
52903 // itself instead of promoting its result. This could be beneficial, because:
52904 //     1) X86TargetLowering::EmitLoweredSelect later can do merging of two
52905 //        (or more) pseudo-CMOVs only when they go one-after-another and
52906 //        getting rid of result extension code after CMOV will help that.
52907 //     2) Promotion of constant CMOV arguments is free, hence the
52908 //        {ANY,SIGN,ZERO}_EXTEND will just be deleted.
52909 //     3) 16-bit CMOV encoding is 4 bytes, 32-bit CMOV is 3-byte, so this
52910 //        promotion is also good in terms of code-size.
52911 //        (64-bit CMOV is 4-bytes, that's why we don't do 32-bit => 64-bit
52912 //         promotion).
combineToExtendCMOV(SDNode * Extend,SelectionDAG & DAG)52913 static SDValue combineToExtendCMOV(SDNode *Extend, SelectionDAG &DAG) {
52914   SDValue CMovN = Extend->getOperand(0);
52915   if (CMovN.getOpcode() != X86ISD::CMOV || !CMovN.hasOneUse())
52916     return SDValue();
52917 
52918   EVT TargetVT = Extend->getValueType(0);
52919   unsigned ExtendOpcode = Extend->getOpcode();
52920   SDLoc DL(Extend);
52921 
52922   EVT VT = CMovN.getValueType();
52923   SDValue CMovOp0 = CMovN.getOperand(0);
52924   SDValue CMovOp1 = CMovN.getOperand(1);
52925 
52926   if (!isa<ConstantSDNode>(CMovOp0.getNode()) ||
52927       !isa<ConstantSDNode>(CMovOp1.getNode()))
52928     return SDValue();
52929 
52930   // Only extend to i32 or i64.
52931   if (TargetVT != MVT::i32 && TargetVT != MVT::i64)
52932     return SDValue();
52933 
52934   // Only extend from i16 unless its a sign_extend from i32. Zext/aext from i32
52935   // are free.
52936   if (VT != MVT::i16 && !(ExtendOpcode == ISD::SIGN_EXTEND && VT == MVT::i32))
52937     return SDValue();
52938 
52939   // If this a zero extend to i64, we should only extend to i32 and use a free
52940   // zero extend to finish.
52941   EVT ExtendVT = TargetVT;
52942   if (TargetVT == MVT::i64 && ExtendOpcode != ISD::SIGN_EXTEND)
52943     ExtendVT = MVT::i32;
52944 
52945   CMovOp0 = DAG.getNode(ExtendOpcode, DL, ExtendVT, CMovOp0);
52946   CMovOp1 = DAG.getNode(ExtendOpcode, DL, ExtendVT, CMovOp1);
52947 
52948   SDValue Res = DAG.getNode(X86ISD::CMOV, DL, ExtendVT, CMovOp0, CMovOp1,
52949                             CMovN.getOperand(2), CMovN.getOperand(3));
52950 
52951   // Finish extending if needed.
52952   if (ExtendVT != TargetVT)
52953     Res = DAG.getNode(ExtendOpcode, DL, TargetVT, Res);
52954 
52955   return Res;
52956 }
52957 
52958 // Attempt to combine a (sext/zext (setcc)) to a setcc with a xmm/ymm/zmm
52959 // result type.
combineExtSetcc(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)52960 static SDValue combineExtSetcc(SDNode *N, SelectionDAG &DAG,
52961                                const X86Subtarget &Subtarget) {
52962   SDValue N0 = N->getOperand(0);
52963   EVT VT = N->getValueType(0);
52964   SDLoc dl(N);
52965 
52966   // Only do this combine with AVX512 for vector extends.
52967   if (!Subtarget.hasAVX512() || !VT.isVector() || N0.getOpcode() != ISD::SETCC)
52968     return SDValue();
52969 
52970   // Only combine legal element types.
52971   EVT SVT = VT.getVectorElementType();
52972   if (SVT != MVT::i8 && SVT != MVT::i16 && SVT != MVT::i32 &&
52973       SVT != MVT::i64 && SVT != MVT::f32 && SVT != MVT::f64)
52974     return SDValue();
52975 
52976   // We don't have CMPP Instruction for vxf16
52977   if (N0.getOperand(0).getValueType().getVectorElementType() == MVT::f16)
52978     return SDValue();
52979   // We can only do this if the vector size in 256 bits or less.
52980   unsigned Size = VT.getSizeInBits();
52981   if (Size > 256 && Subtarget.useAVX512Regs())
52982     return SDValue();
52983 
52984   // Don't fold if the condition code can't be handled by PCMPEQ/PCMPGT since
52985   // that's the only integer compares with we have.
52986   ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get();
52987   if (ISD::isUnsignedIntSetCC(CC))
52988     return SDValue();
52989 
52990   // Only do this combine if the extension will be fully consumed by the setcc.
52991   EVT N00VT = N0.getOperand(0).getValueType();
52992   EVT MatchingVecType = N00VT.changeVectorElementTypeToInteger();
52993   if (Size != MatchingVecType.getSizeInBits())
52994     return SDValue();
52995 
52996   SDValue Res = DAG.getSetCC(dl, VT, N0.getOperand(0), N0.getOperand(1), CC);
52997 
52998   if (N->getOpcode() == ISD::ZERO_EXTEND)
52999     Res = DAG.getZeroExtendInReg(Res, dl, N0.getValueType());
53000 
53001   return Res;
53002 }
53003 
combineSext(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)53004 static SDValue combineSext(SDNode *N, SelectionDAG &DAG,
53005                            TargetLowering::DAGCombinerInfo &DCI,
53006                            const X86Subtarget &Subtarget) {
53007   SDValue N0 = N->getOperand(0);
53008   EVT VT = N->getValueType(0);
53009   SDLoc DL(N);
53010 
53011   // (i32 (sext (i8 (x86isd::setcc_carry)))) -> (i32 (x86isd::setcc_carry))
53012   if (!DCI.isBeforeLegalizeOps() &&
53013       N0.getOpcode() == X86ISD::SETCC_CARRY) {
53014     SDValue Setcc = DAG.getNode(X86ISD::SETCC_CARRY, DL, VT, N0->getOperand(0),
53015                                  N0->getOperand(1));
53016     bool ReplaceOtherUses = !N0.hasOneUse();
53017     DCI.CombineTo(N, Setcc);
53018     // Replace other uses with a truncate of the widened setcc_carry.
53019     if (ReplaceOtherUses) {
53020       SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SDLoc(N0),
53021                                   N0.getValueType(), Setcc);
53022       DCI.CombineTo(N0.getNode(), Trunc);
53023     }
53024 
53025     return SDValue(N, 0);
53026   }
53027 
53028   if (SDValue NewCMov = combineToExtendCMOV(N, DAG))
53029     return NewCMov;
53030 
53031   if (!DCI.isBeforeLegalizeOps())
53032     return SDValue();
53033 
53034   if (SDValue V = combineExtSetcc(N, DAG, Subtarget))
53035     return V;
53036 
53037   if (SDValue V = combineToExtendBoolVectorInReg(N->getOpcode(), DL, VT, N0,
53038                                                  DAG, DCI, Subtarget))
53039     return V;
53040 
53041   if (VT.isVector()) {
53042     if (SDValue R = PromoteMaskArithmetic(N, DAG, Subtarget))
53043       return R;
53044 
53045     if (N0.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG)
53046       return DAG.getNode(N0.getOpcode(), DL, VT, N0.getOperand(0));
53047   }
53048 
53049   if (SDValue NewAdd = promoteExtBeforeAdd(N, DAG, Subtarget))
53050     return NewAdd;
53051 
53052   return SDValue();
53053 }
53054 
combineFMA(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)53055 static SDValue combineFMA(SDNode *N, SelectionDAG &DAG,
53056                           TargetLowering::DAGCombinerInfo &DCI,
53057                           const X86Subtarget &Subtarget) {
53058   SDLoc dl(N);
53059   EVT VT = N->getValueType(0);
53060   bool IsStrict = N->isStrictFPOpcode() || N->isTargetStrictFPOpcode();
53061 
53062   // Let legalize expand this if it isn't a legal type yet.
53063   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
53064   if (!TLI.isTypeLegal(VT))
53065     return SDValue();
53066 
53067   SDValue A = N->getOperand(IsStrict ? 1 : 0);
53068   SDValue B = N->getOperand(IsStrict ? 2 : 1);
53069   SDValue C = N->getOperand(IsStrict ? 3 : 2);
53070 
53071   // If the operation allows fast-math and the target does not support FMA,
53072   // split this into mul+add to avoid libcall(s).
53073   SDNodeFlags Flags = N->getFlags();
53074   if (!IsStrict && Flags.hasAllowReassociation() &&
53075       TLI.isOperationExpand(ISD::FMA, VT)) {
53076     SDValue Fmul = DAG.getNode(ISD::FMUL, dl, VT, A, B, Flags);
53077     return DAG.getNode(ISD::FADD, dl, VT, Fmul, C, Flags);
53078   }
53079 
53080   EVT ScalarVT = VT.getScalarType();
53081   if (((ScalarVT != MVT::f32 && ScalarVT != MVT::f64) ||
53082        !Subtarget.hasAnyFMA()) &&
53083       !(ScalarVT == MVT::f16 && Subtarget.hasFP16()))
53084     return SDValue();
53085 
53086   auto invertIfNegative = [&DAG, &TLI, &DCI](SDValue &V) {
53087     bool CodeSize = DAG.getMachineFunction().getFunction().hasOptSize();
53088     bool LegalOperations = !DCI.isBeforeLegalizeOps();
53089     if (SDValue NegV = TLI.getCheaperNegatedExpression(V, DAG, LegalOperations,
53090                                                        CodeSize)) {
53091       V = NegV;
53092       return true;
53093     }
53094     // Look through extract_vector_elts. If it comes from an FNEG, create a
53095     // new extract from the FNEG input.
53096     if (V.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
53097         isNullConstant(V.getOperand(1))) {
53098       SDValue Vec = V.getOperand(0);
53099       if (SDValue NegV = TLI.getCheaperNegatedExpression(
53100               Vec, DAG, LegalOperations, CodeSize)) {
53101         V = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(V), V.getValueType(),
53102                         NegV, V.getOperand(1));
53103         return true;
53104       }
53105     }
53106 
53107     return false;
53108   };
53109 
53110   // Do not convert the passthru input of scalar intrinsics.
53111   // FIXME: We could allow negations of the lower element only.
53112   bool NegA = invertIfNegative(A);
53113   bool NegB = invertIfNegative(B);
53114   bool NegC = invertIfNegative(C);
53115 
53116   if (!NegA && !NegB && !NegC)
53117     return SDValue();
53118 
53119   unsigned NewOpcode =
53120       negateFMAOpcode(N->getOpcode(), NegA != NegB, NegC, false);
53121 
53122   // Propagate fast-math-flags to new FMA node.
53123   SelectionDAG::FlagInserter FlagsInserter(DAG, Flags);
53124   if (IsStrict) {
53125     assert(N->getNumOperands() == 4 && "Shouldn't be greater than 4");
53126     return DAG.getNode(NewOpcode, dl, {VT, MVT::Other},
53127                        {N->getOperand(0), A, B, C});
53128   } else {
53129     if (N->getNumOperands() == 4)
53130       return DAG.getNode(NewOpcode, dl, VT, A, B, C, N->getOperand(3));
53131     return DAG.getNode(NewOpcode, dl, VT, A, B, C);
53132   }
53133 }
53134 
53135 // Combine FMADDSUB(A, B, FNEG(C)) -> FMSUBADD(A, B, C)
53136 // Combine FMSUBADD(A, B, FNEG(C)) -> FMADDSUB(A, B, C)
combineFMADDSUB(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI)53137 static SDValue combineFMADDSUB(SDNode *N, SelectionDAG &DAG,
53138                                TargetLowering::DAGCombinerInfo &DCI) {
53139   SDLoc dl(N);
53140   EVT VT = N->getValueType(0);
53141   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
53142   bool CodeSize = DAG.getMachineFunction().getFunction().hasOptSize();
53143   bool LegalOperations = !DCI.isBeforeLegalizeOps();
53144 
53145   SDValue N2 = N->getOperand(2);
53146 
53147   SDValue NegN2 =
53148       TLI.getCheaperNegatedExpression(N2, DAG, LegalOperations, CodeSize);
53149   if (!NegN2)
53150     return SDValue();
53151   unsigned NewOpcode = negateFMAOpcode(N->getOpcode(), false, true, false);
53152 
53153   if (N->getNumOperands() == 4)
53154     return DAG.getNode(NewOpcode, dl, VT, N->getOperand(0), N->getOperand(1),
53155                        NegN2, N->getOperand(3));
53156   return DAG.getNode(NewOpcode, dl, VT, N->getOperand(0), N->getOperand(1),
53157                      NegN2);
53158 }
53159 
combineZext(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)53160 static SDValue combineZext(SDNode *N, SelectionDAG &DAG,
53161                            TargetLowering::DAGCombinerInfo &DCI,
53162                            const X86Subtarget &Subtarget) {
53163   SDLoc dl(N);
53164   SDValue N0 = N->getOperand(0);
53165   EVT VT = N->getValueType(0);
53166 
53167   // (i32 (aext (i8 (x86isd::setcc_carry)))) -> (i32 (x86isd::setcc_carry))
53168   // FIXME: Is this needed? We don't seem to have any tests for it.
53169   if (!DCI.isBeforeLegalizeOps() && N->getOpcode() == ISD::ANY_EXTEND &&
53170       N0.getOpcode() == X86ISD::SETCC_CARRY) {
53171     SDValue Setcc = DAG.getNode(X86ISD::SETCC_CARRY, dl, VT, N0->getOperand(0),
53172                                  N0->getOperand(1));
53173     bool ReplaceOtherUses = !N0.hasOneUse();
53174     DCI.CombineTo(N, Setcc);
53175     // Replace other uses with a truncate of the widened setcc_carry.
53176     if (ReplaceOtherUses) {
53177       SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SDLoc(N0),
53178                                   N0.getValueType(), Setcc);
53179       DCI.CombineTo(N0.getNode(), Trunc);
53180     }
53181 
53182     return SDValue(N, 0);
53183   }
53184 
53185   if (SDValue NewCMov = combineToExtendCMOV(N, DAG))
53186     return NewCMov;
53187 
53188   if (DCI.isBeforeLegalizeOps())
53189     if (SDValue V = combineExtSetcc(N, DAG, Subtarget))
53190       return V;
53191 
53192   if (SDValue V = combineToExtendBoolVectorInReg(N->getOpcode(), dl, VT, N0,
53193                                                  DAG, DCI, Subtarget))
53194     return V;
53195 
53196   if (VT.isVector())
53197     if (SDValue R = PromoteMaskArithmetic(N, DAG, Subtarget))
53198       return R;
53199 
53200   if (SDValue NewAdd = promoteExtBeforeAdd(N, DAG, Subtarget))
53201     return NewAdd;
53202 
53203   if (SDValue R = combineOrCmpEqZeroToCtlzSrl(N, DAG, DCI, Subtarget))
53204     return R;
53205 
53206   // TODO: Combine with any target/faux shuffle.
53207   if (N0.getOpcode() == X86ISD::PACKUS && N0.getValueSizeInBits() == 128 &&
53208       VT.getScalarSizeInBits() == N0.getOperand(0).getScalarValueSizeInBits()) {
53209     SDValue N00 = N0.getOperand(0);
53210     SDValue N01 = N0.getOperand(1);
53211     unsigned NumSrcEltBits = N00.getScalarValueSizeInBits();
53212     APInt ZeroMask = APInt::getHighBitsSet(NumSrcEltBits, NumSrcEltBits / 2);
53213     if ((N00.isUndef() || DAG.MaskedValueIsZero(N00, ZeroMask)) &&
53214         (N01.isUndef() || DAG.MaskedValueIsZero(N01, ZeroMask))) {
53215       return concatSubVectors(N00, N01, DAG, dl);
53216     }
53217   }
53218 
53219   return SDValue();
53220 }
53221 
53222 /// Recursive helper for combineVectorSizedSetCCEquality() to see if we have a
53223 /// recognizable memcmp expansion.
isOrXorXorTree(SDValue X,bool Root=true)53224 static bool isOrXorXorTree(SDValue X, bool Root = true) {
53225   if (X.getOpcode() == ISD::OR)
53226     return isOrXorXorTree(X.getOperand(0), false) &&
53227            isOrXorXorTree(X.getOperand(1), false);
53228   if (Root)
53229     return false;
53230   return X.getOpcode() == ISD::XOR;
53231 }
53232 
53233 /// Recursive helper for combineVectorSizedSetCCEquality() to emit the memcmp
53234 /// expansion.
53235 template <typename F>
emitOrXorXorTree(SDValue X,SDLoc & DL,SelectionDAG & DAG,EVT VecVT,EVT CmpVT,bool HasPT,F SToV)53236 static SDValue emitOrXorXorTree(SDValue X, SDLoc &DL, SelectionDAG &DAG,
53237                                 EVT VecVT, EVT CmpVT, bool HasPT, F SToV) {
53238   SDValue Op0 = X.getOperand(0);
53239   SDValue Op1 = X.getOperand(1);
53240   if (X.getOpcode() == ISD::OR) {
53241     SDValue A = emitOrXorXorTree(Op0, DL, DAG, VecVT, CmpVT, HasPT, SToV);
53242     SDValue B = emitOrXorXorTree(Op1, DL, DAG, VecVT, CmpVT, HasPT, SToV);
53243     if (VecVT != CmpVT)
53244       return DAG.getNode(ISD::OR, DL, CmpVT, A, B);
53245     if (HasPT)
53246       return DAG.getNode(ISD::OR, DL, VecVT, A, B);
53247     return DAG.getNode(ISD::AND, DL, CmpVT, A, B);
53248   }
53249   if (X.getOpcode() == ISD::XOR) {
53250     SDValue A = SToV(Op0);
53251     SDValue B = SToV(Op1);
53252     if (VecVT != CmpVT)
53253       return DAG.getSetCC(DL, CmpVT, A, B, ISD::SETNE);
53254     if (HasPT)
53255       return DAG.getNode(ISD::XOR, DL, VecVT, A, B);
53256     return DAG.getSetCC(DL, CmpVT, A, B, ISD::SETEQ);
53257   }
53258   llvm_unreachable("Impossible");
53259 }
53260 
53261 /// Try to map a 128-bit or larger integer comparison to vector instructions
53262 /// before type legalization splits it up into chunks.
combineVectorSizedSetCCEquality(SDNode * SetCC,SelectionDAG & DAG,const X86Subtarget & Subtarget)53263 static SDValue combineVectorSizedSetCCEquality(SDNode *SetCC, SelectionDAG &DAG,
53264                                                const X86Subtarget &Subtarget) {
53265   ISD::CondCode CC = cast<CondCodeSDNode>(SetCC->getOperand(2))->get();
53266   assert((CC == ISD::SETNE || CC == ISD::SETEQ) && "Bad comparison predicate");
53267 
53268   // We're looking for an oversized integer equality comparison.
53269   SDValue X = SetCC->getOperand(0);
53270   SDValue Y = SetCC->getOperand(1);
53271   EVT OpVT = X.getValueType();
53272   unsigned OpSize = OpVT.getSizeInBits();
53273   if (!OpVT.isScalarInteger() || OpSize < 128)
53274     return SDValue();
53275 
53276   // Ignore a comparison with zero because that gets special treatment in
53277   // EmitTest(). But make an exception for the special case of a pair of
53278   // logically-combined vector-sized operands compared to zero. This pattern may
53279   // be generated by the memcmp expansion pass with oversized integer compares
53280   // (see PR33325).
53281   bool IsOrXorXorTreeCCZero = isNullConstant(Y) && isOrXorXorTree(X);
53282   if (isNullConstant(Y) && !IsOrXorXorTreeCCZero)
53283     return SDValue();
53284 
53285   // Don't perform this combine if constructing the vector will be expensive.
53286   auto IsVectorBitCastCheap = [](SDValue X) {
53287     X = peekThroughBitcasts(X);
53288     return isa<ConstantSDNode>(X) || X.getValueType().isVector() ||
53289            X.getOpcode() == ISD::LOAD;
53290   };
53291   if ((!IsVectorBitCastCheap(X) || !IsVectorBitCastCheap(Y)) &&
53292       !IsOrXorXorTreeCCZero)
53293     return SDValue();
53294 
53295   EVT VT = SetCC->getValueType(0);
53296   SDLoc DL(SetCC);
53297 
53298   // Use XOR (plus OR) and PTEST after SSE4.1 for 128/256-bit operands.
53299   // Use PCMPNEQ (plus OR) and KORTEST for 512-bit operands.
53300   // Otherwise use PCMPEQ (plus AND) and mask testing.
53301   bool NoImplicitFloatOps =
53302       DAG.getMachineFunction().getFunction().hasFnAttribute(
53303           Attribute::NoImplicitFloat);
53304   if (!Subtarget.useSoftFloat() && !NoImplicitFloatOps &&
53305       ((OpSize == 128 && Subtarget.hasSSE2()) ||
53306        (OpSize == 256 && Subtarget.hasAVX()) ||
53307        (OpSize == 512 && Subtarget.useAVX512Regs()))) {
53308     bool HasPT = Subtarget.hasSSE41();
53309 
53310     // PTEST and MOVMSK are slow on Knights Landing and Knights Mill and widened
53311     // vector registers are essentially free. (Technically, widening registers
53312     // prevents load folding, but the tradeoff is worth it.)
53313     bool PreferKOT = Subtarget.preferMaskRegisters();
53314     bool NeedZExt = PreferKOT && !Subtarget.hasVLX() && OpSize != 512;
53315 
53316     EVT VecVT = MVT::v16i8;
53317     EVT CmpVT = PreferKOT ? MVT::v16i1 : VecVT;
53318     if (OpSize == 256) {
53319       VecVT = MVT::v32i8;
53320       CmpVT = PreferKOT ? MVT::v32i1 : VecVT;
53321     }
53322     EVT CastVT = VecVT;
53323     bool NeedsAVX512FCast = false;
53324     if (OpSize == 512 || NeedZExt) {
53325       if (Subtarget.hasBWI()) {
53326         VecVT = MVT::v64i8;
53327         CmpVT = MVT::v64i1;
53328         if (OpSize == 512)
53329           CastVT = VecVT;
53330       } else {
53331         VecVT = MVT::v16i32;
53332         CmpVT = MVT::v16i1;
53333         CastVT = OpSize == 512 ? VecVT :
53334                  OpSize == 256 ? MVT::v8i32 : MVT::v4i32;
53335         NeedsAVX512FCast = true;
53336       }
53337     }
53338 
53339     auto ScalarToVector = [&](SDValue X) -> SDValue {
53340       bool TmpZext = false;
53341       EVT TmpCastVT = CastVT;
53342       if (X.getOpcode() == ISD::ZERO_EXTEND) {
53343         SDValue OrigX = X.getOperand(0);
53344         unsigned OrigSize = OrigX.getScalarValueSizeInBits();
53345         if (OrigSize < OpSize) {
53346           if (OrigSize == 128) {
53347             TmpCastVT = NeedsAVX512FCast ? MVT::v4i32 : MVT::v16i8;
53348             X = OrigX;
53349             TmpZext = true;
53350           } else if (OrigSize == 256) {
53351             TmpCastVT = NeedsAVX512FCast ? MVT::v8i32 : MVT::v32i8;
53352             X = OrigX;
53353             TmpZext = true;
53354           }
53355         }
53356       }
53357       X = DAG.getBitcast(TmpCastVT, X);
53358       if (!NeedZExt && !TmpZext)
53359         return X;
53360       return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT,
53361                          DAG.getConstant(0, DL, VecVT), X,
53362                          DAG.getVectorIdxConstant(0, DL));
53363     };
53364 
53365     SDValue Cmp;
53366     if (IsOrXorXorTreeCCZero) {
53367       // This is a bitwise-combined equality comparison of 2 pairs of vectors:
53368       // setcc i128 (or (xor A, B), (xor C, D)), 0, eq|ne
53369       // Use 2 vector equality compares and 'and' the results before doing a
53370       // MOVMSK.
53371       Cmp = emitOrXorXorTree(X, DL, DAG, VecVT, CmpVT, HasPT, ScalarToVector);
53372     } else {
53373       SDValue VecX = ScalarToVector(X);
53374       SDValue VecY = ScalarToVector(Y);
53375       if (VecVT != CmpVT) {
53376         Cmp = DAG.getSetCC(DL, CmpVT, VecX, VecY, ISD::SETNE);
53377       } else if (HasPT) {
53378         Cmp = DAG.getNode(ISD::XOR, DL, VecVT, VecX, VecY);
53379       } else {
53380         Cmp = DAG.getSetCC(DL, CmpVT, VecX, VecY, ISD::SETEQ);
53381       }
53382     }
53383     // AVX512 should emit a setcc that will lower to kortest.
53384     if (VecVT != CmpVT) {
53385       EVT KRegVT = CmpVT == MVT::v64i1 ? MVT::i64 :
53386                    CmpVT == MVT::v32i1 ? MVT::i32 : MVT::i16;
53387       return DAG.getSetCC(DL, VT, DAG.getBitcast(KRegVT, Cmp),
53388                           DAG.getConstant(0, DL, KRegVT), CC);
53389     }
53390     if (HasPT) {
53391       SDValue BCCmp = DAG.getBitcast(OpSize == 256 ? MVT::v4i64 : MVT::v2i64,
53392                                      Cmp);
53393       SDValue PT = DAG.getNode(X86ISD::PTEST, DL, MVT::i32, BCCmp, BCCmp);
53394       X86::CondCode X86CC = CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE;
53395       SDValue X86SetCC = getSETCC(X86CC, PT, DL, DAG);
53396       return DAG.getNode(ISD::TRUNCATE, DL, VT, X86SetCC.getValue(0));
53397     }
53398     // If all bytes match (bitmask is 0x(FFFF)FFFF), that's equality.
53399     // setcc i128 X, Y, eq --> setcc (pmovmskb (pcmpeqb X, Y)), 0xFFFF, eq
53400     // setcc i128 X, Y, ne --> setcc (pmovmskb (pcmpeqb X, Y)), 0xFFFF, ne
53401     assert(Cmp.getValueType() == MVT::v16i8 &&
53402            "Non 128-bit vector on pre-SSE41 target");
53403     SDValue MovMsk = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Cmp);
53404     SDValue FFFFs = DAG.getConstant(0xFFFF, DL, MVT::i32);
53405     return DAG.getSetCC(DL, VT, MovMsk, FFFFs, CC);
53406   }
53407 
53408   return SDValue();
53409 }
53410 
combineSetCC(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)53411 static SDValue combineSetCC(SDNode *N, SelectionDAG &DAG,
53412                             TargetLowering::DAGCombinerInfo &DCI,
53413                             const X86Subtarget &Subtarget) {
53414   const ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
53415   const SDValue LHS = N->getOperand(0);
53416   const SDValue RHS = N->getOperand(1);
53417   EVT VT = N->getValueType(0);
53418   EVT OpVT = LHS.getValueType();
53419   SDLoc DL(N);
53420 
53421   if (CC == ISD::SETNE || CC == ISD::SETEQ) {
53422     if (SDValue V = combineVectorSizedSetCCEquality(N, DAG, Subtarget))
53423       return V;
53424 
53425     if (VT == MVT::i1 && isNullConstant(RHS)) {
53426       SDValue X86CC;
53427       if (SDValue V =
53428               MatchVectorAllZeroTest(LHS, CC, DL, Subtarget, DAG, X86CC))
53429         return DAG.getNode(ISD::TRUNCATE, DL, VT,
53430                            DAG.getNode(X86ISD::SETCC, DL, MVT::i8, X86CC, V));
53431     }
53432 
53433     if (OpVT.isScalarInteger()) {
53434       // cmpeq(or(X,Y),X) --> cmpeq(and(~X,Y),0)
53435       // cmpne(or(X,Y),X) --> cmpne(and(~X,Y),0)
53436       auto MatchOrCmpEq = [&](SDValue N0, SDValue N1) {
53437         if (N0.getOpcode() == ISD::OR && N0->hasOneUse()) {
53438           if (N0.getOperand(0) == N1)
53439             return DAG.getNode(ISD::AND, DL, OpVT, DAG.getNOT(DL, N1, OpVT),
53440                                N0.getOperand(1));
53441           if (N0.getOperand(1) == N1)
53442             return DAG.getNode(ISD::AND, DL, OpVT, DAG.getNOT(DL, N1, OpVT),
53443                                N0.getOperand(0));
53444         }
53445         return SDValue();
53446       };
53447       if (SDValue AndN = MatchOrCmpEq(LHS, RHS))
53448         return DAG.getSetCC(DL, VT, AndN, DAG.getConstant(0, DL, OpVT), CC);
53449       if (SDValue AndN = MatchOrCmpEq(RHS, LHS))
53450         return DAG.getSetCC(DL, VT, AndN, DAG.getConstant(0, DL, OpVT), CC);
53451 
53452       // cmpeq(and(X,Y),Y) --> cmpeq(and(~X,Y),0)
53453       // cmpne(and(X,Y),Y) --> cmpne(and(~X,Y),0)
53454       auto MatchAndCmpEq = [&](SDValue N0, SDValue N1) {
53455         if (N0.getOpcode() == ISD::AND && N0->hasOneUse()) {
53456           if (N0.getOperand(0) == N1)
53457             return DAG.getNode(ISD::AND, DL, OpVT, N1,
53458                                DAG.getNOT(DL, N0.getOperand(1), OpVT));
53459           if (N0.getOperand(1) == N1)
53460             return DAG.getNode(ISD::AND, DL, OpVT, N1,
53461                                DAG.getNOT(DL, N0.getOperand(0), OpVT));
53462         }
53463         return SDValue();
53464       };
53465       if (SDValue AndN = MatchAndCmpEq(LHS, RHS))
53466         return DAG.getSetCC(DL, VT, AndN, DAG.getConstant(0, DL, OpVT), CC);
53467       if (SDValue AndN = MatchAndCmpEq(RHS, LHS))
53468         return DAG.getSetCC(DL, VT, AndN, DAG.getConstant(0, DL, OpVT), CC);
53469 
53470       // cmpeq(trunc(x),0) --> cmpeq(x,0)
53471       // cmpne(trunc(x),0) --> cmpne(x,0)
53472       // iff x upper bits are zero.
53473       // TODO: Add support for RHS to be truncate as well?
53474       if (LHS.getOpcode() == ISD::TRUNCATE &&
53475           LHS.getOperand(0).getScalarValueSizeInBits() >= 32 &&
53476           isNullConstant(RHS) && !DCI.isBeforeLegalize()) {
53477         EVT SrcVT = LHS.getOperand(0).getValueType();
53478         APInt UpperBits = APInt::getBitsSetFrom(SrcVT.getScalarSizeInBits(),
53479                                                 OpVT.getScalarSizeInBits());
53480         const TargetLowering &TLI = DAG.getTargetLoweringInfo();
53481         if (DAG.MaskedValueIsZero(LHS.getOperand(0), UpperBits) &&
53482             TLI.isTypeLegal(LHS.getOperand(0).getValueType()))
53483           return DAG.getSetCC(DL, VT, LHS.getOperand(0),
53484                               DAG.getConstant(0, DL, SrcVT), CC);
53485       }
53486     }
53487   }
53488 
53489   if (VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
53490       (CC == ISD::SETNE || CC == ISD::SETEQ || ISD::isSignedIntSetCC(CC))) {
53491     // Using temporaries to avoid messing up operand ordering for later
53492     // transformations if this doesn't work.
53493     SDValue Op0 = LHS;
53494     SDValue Op1 = RHS;
53495     ISD::CondCode TmpCC = CC;
53496     // Put build_vector on the right.
53497     if (Op0.getOpcode() == ISD::BUILD_VECTOR) {
53498       std::swap(Op0, Op1);
53499       TmpCC = ISD::getSetCCSwappedOperands(TmpCC);
53500     }
53501 
53502     bool IsSEXT0 =
53503         (Op0.getOpcode() == ISD::SIGN_EXTEND) &&
53504         (Op0.getOperand(0).getValueType().getVectorElementType() == MVT::i1);
53505     bool IsVZero1 = ISD::isBuildVectorAllZeros(Op1.getNode());
53506 
53507     if (IsSEXT0 && IsVZero1) {
53508       assert(VT == Op0.getOperand(0).getValueType() &&
53509              "Unexpected operand type");
53510       if (TmpCC == ISD::SETGT)
53511         return DAG.getConstant(0, DL, VT);
53512       if (TmpCC == ISD::SETLE)
53513         return DAG.getConstant(1, DL, VT);
53514       if (TmpCC == ISD::SETEQ || TmpCC == ISD::SETGE)
53515         return DAG.getNOT(DL, Op0.getOperand(0), VT);
53516 
53517       assert((TmpCC == ISD::SETNE || TmpCC == ISD::SETLT) &&
53518              "Unexpected condition code!");
53519       return Op0.getOperand(0);
53520     }
53521   }
53522 
53523   // If we have AVX512, but not BWI and this is a vXi16/vXi8 setcc, just
53524   // pre-promote its result type since vXi1 vectors don't get promoted
53525   // during type legalization.
53526   // NOTE: The element count check is to ignore operand types that need to
53527   // go through type promotion to a 128-bit vector.
53528   if (Subtarget.hasAVX512() && !Subtarget.hasBWI() && VT.isVector() &&
53529       VT.getVectorElementType() == MVT::i1 &&
53530       (OpVT.getVectorElementType() == MVT::i8 ||
53531        OpVT.getVectorElementType() == MVT::i16)) {
53532     SDValue Setcc = DAG.getSetCC(DL, OpVT, LHS, RHS, CC);
53533     return DAG.getNode(ISD::TRUNCATE, DL, VT, Setcc);
53534   }
53535 
53536   // For an SSE1-only target, lower a comparison of v4f32 to X86ISD::CMPP early
53537   // to avoid scalarization via legalization because v4i32 is not a legal type.
53538   if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32 &&
53539       LHS.getValueType() == MVT::v4f32)
53540     return LowerVSETCC(SDValue(N, 0), Subtarget, DAG);
53541 
53542   // X pred 0.0 --> X pred -X
53543   // If the negation of X already exists, use it in the comparison. This removes
53544   // the need to materialize 0.0 and allows matching to SSE's MIN/MAX
53545   // instructions in patterns with a 'select' node.
53546   if (isNullFPScalarOrVectorConst(RHS)) {
53547     SDVTList FNegVT = DAG.getVTList(OpVT);
53548     if (SDNode *FNeg = DAG.getNodeIfExists(ISD::FNEG, FNegVT, {LHS}))
53549       return DAG.getSetCC(DL, VT, LHS, SDValue(FNeg, 0), CC);
53550   }
53551 
53552   return SDValue();
53553 }
53554 
combineMOVMSK(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)53555 static SDValue combineMOVMSK(SDNode *N, SelectionDAG &DAG,
53556                              TargetLowering::DAGCombinerInfo &DCI,
53557                              const X86Subtarget &Subtarget) {
53558   SDValue Src = N->getOperand(0);
53559   MVT SrcVT = Src.getSimpleValueType();
53560   MVT VT = N->getSimpleValueType(0);
53561   unsigned NumBits = VT.getScalarSizeInBits();
53562   unsigned NumElts = SrcVT.getVectorNumElements();
53563   unsigned NumBitsPerElt = SrcVT.getScalarSizeInBits();
53564   assert(VT == MVT::i32 && NumElts <= NumBits && "Unexpected MOVMSK types");
53565 
53566   // Perform constant folding.
53567   APInt UndefElts;
53568   SmallVector<APInt, 32> EltBits;
53569   if (getTargetConstantBitsFromNode(Src, NumBitsPerElt, UndefElts, EltBits)) {
53570     APInt Imm(32, 0);
53571     for (unsigned Idx = 0; Idx != NumElts; ++Idx)
53572       if (!UndefElts[Idx] && EltBits[Idx].isNegative())
53573         Imm.setBit(Idx);
53574 
53575     return DAG.getConstant(Imm, SDLoc(N), VT);
53576   }
53577 
53578   // Look through int->fp bitcasts that don't change the element width.
53579   unsigned EltWidth = SrcVT.getScalarSizeInBits();
53580   if (Subtarget.hasSSE2() && Src.getOpcode() == ISD::BITCAST &&
53581       Src.getOperand(0).getScalarValueSizeInBits() == EltWidth)
53582     return DAG.getNode(X86ISD::MOVMSK, SDLoc(N), VT, Src.getOperand(0));
53583 
53584   // Fold movmsk(not(x)) -> not(movmsk(x)) to improve folding of movmsk results
53585   // with scalar comparisons.
53586   if (SDValue NotSrc = IsNOT(Src, DAG)) {
53587     SDLoc DL(N);
53588     APInt NotMask = APInt::getLowBitsSet(NumBits, NumElts);
53589     NotSrc = DAG.getBitcast(SrcVT, NotSrc);
53590     return DAG.getNode(ISD::XOR, DL, VT,
53591                        DAG.getNode(X86ISD::MOVMSK, DL, VT, NotSrc),
53592                        DAG.getConstant(NotMask, DL, VT));
53593   }
53594 
53595   // Fold movmsk(icmp_sgt(x,-1)) -> not(movmsk(x)) to improve folding of movmsk
53596   // results with scalar comparisons.
53597   if (Src.getOpcode() == X86ISD::PCMPGT &&
53598       ISD::isBuildVectorAllOnes(Src.getOperand(1).getNode())) {
53599     SDLoc DL(N);
53600     APInt NotMask = APInt::getLowBitsSet(NumBits, NumElts);
53601     return DAG.getNode(ISD::XOR, DL, VT,
53602                        DAG.getNode(X86ISD::MOVMSK, DL, VT, Src.getOperand(0)),
53603                        DAG.getConstant(NotMask, DL, VT));
53604   }
53605 
53606   // Fold movmsk(icmp_eq(and(x,c1),0)) -> movmsk(not(shl(x,c2)))
53607   // iff pow2splat(c1).
53608   if (Src.getOpcode() == X86ISD::PCMPEQ &&
53609       Src.getOperand(0).getOpcode() == ISD::AND &&
53610       ISD::isBuildVectorAllZeros(Src.getOperand(1).getNode())) {
53611     SDValue LHS = Src.getOperand(0).getOperand(0);
53612     SDValue RHS = Src.getOperand(0).getOperand(1);
53613     KnownBits KnownRHS = DAG.computeKnownBits(RHS);
53614     if (KnownRHS.isConstant() && KnownRHS.getConstant().isPowerOf2()) {
53615       SDLoc DL(N);
53616       MVT ShiftVT = SrcVT;
53617       if (ShiftVT.getScalarType() == MVT::i8) {
53618         // vXi8 shifts - we only care about the signbit so can use PSLLW.
53619         ShiftVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
53620         LHS = DAG.getBitcast(ShiftVT, LHS);
53621       }
53622       unsigned ShiftAmt = KnownRHS.getConstant().countLeadingZeros();
53623       LHS = getTargetVShiftByConstNode(X86ISD::VSHLI, DL, ShiftVT, LHS,
53624                                        ShiftAmt, DAG);
53625       LHS = DAG.getNOT(DL, DAG.getBitcast(SrcVT, LHS), SrcVT);
53626       return DAG.getNode(X86ISD::MOVMSK, DL, VT, LHS);
53627     }
53628   }
53629 
53630   // Simplify the inputs.
53631   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
53632   APInt DemandedMask(APInt::getAllOnes(NumBits));
53633   if (TLI.SimplifyDemandedBits(SDValue(N, 0), DemandedMask, DCI))
53634     return SDValue(N, 0);
53635 
53636   return SDValue();
53637 }
53638 
combineX86GatherScatter(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)53639 static SDValue combineX86GatherScatter(SDNode *N, SelectionDAG &DAG,
53640                                        TargetLowering::DAGCombinerInfo &DCI,
53641                                        const X86Subtarget &Subtarget) {
53642   auto *MemOp = cast<X86MaskedGatherScatterSDNode>(N);
53643   SDValue BasePtr = MemOp->getBasePtr();
53644   SDValue Index = MemOp->getIndex();
53645   SDValue Scale = MemOp->getScale();
53646   SDValue Mask = MemOp->getMask();
53647 
53648   // Attempt to fold an index scale into the scale value directly.
53649   // For smaller indices, implicit sext is performed BEFORE scale, preventing
53650   // this fold under most circumstances.
53651   // TODO: Move this into X86DAGToDAGISel::matchVectorAddressRecursively?
53652   if ((Index.getOpcode() == X86ISD::VSHLI ||
53653        (Index.getOpcode() == ISD::ADD &&
53654         Index.getOperand(0) == Index.getOperand(1))) &&
53655       isa<ConstantSDNode>(Scale) &&
53656       BasePtr.getScalarValueSizeInBits() == Index.getScalarValueSizeInBits()) {
53657     unsigned ShiftAmt =
53658         Index.getOpcode() == ISD::ADD ? 1 : Index.getConstantOperandVal(1);
53659     uint64_t ScaleAmt = cast<ConstantSDNode>(Scale)->getZExtValue();
53660     uint64_t NewScaleAmt = ScaleAmt * (1ULL << ShiftAmt);
53661     if (isPowerOf2_64(NewScaleAmt) && NewScaleAmt <= 8) {
53662       SDValue NewIndex = Index.getOperand(0);
53663       SDValue NewScale =
53664           DAG.getTargetConstant(NewScaleAmt, SDLoc(N), Scale.getValueType());
53665       if (N->getOpcode() == X86ISD::MGATHER)
53666         return getAVX2GatherNode(N->getOpcode(), SDValue(N, 0), DAG,
53667                                  MemOp->getOperand(1), Mask,
53668                                  MemOp->getBasePtr(), NewIndex, NewScale,
53669                                  MemOp->getChain(), Subtarget);
53670       if (N->getOpcode() == X86ISD::MSCATTER)
53671         return getScatterNode(N->getOpcode(), SDValue(N, 0), DAG,
53672                               MemOp->getOperand(1), Mask, MemOp->getBasePtr(),
53673                               NewIndex, NewScale, MemOp->getChain(), Subtarget);
53674     }
53675   }
53676 
53677   // With vector masks we only demand the upper bit of the mask.
53678   if (Mask.getScalarValueSizeInBits() != 1) {
53679     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
53680     APInt DemandedMask(APInt::getSignMask(Mask.getScalarValueSizeInBits()));
53681     if (TLI.SimplifyDemandedBits(Mask, DemandedMask, DCI)) {
53682       if (N->getOpcode() != ISD::DELETED_NODE)
53683         DCI.AddToWorklist(N);
53684       return SDValue(N, 0);
53685     }
53686   }
53687 
53688   return SDValue();
53689 }
53690 
rebuildGatherScatter(MaskedGatherScatterSDNode * GorS,SDValue Index,SDValue Base,SDValue Scale,SelectionDAG & DAG)53691 static SDValue rebuildGatherScatter(MaskedGatherScatterSDNode *GorS,
53692                                     SDValue Index, SDValue Base, SDValue Scale,
53693                                     SelectionDAG &DAG) {
53694   SDLoc DL(GorS);
53695 
53696   if (auto *Gather = dyn_cast<MaskedGatherSDNode>(GorS)) {
53697     SDValue Ops[] = { Gather->getChain(), Gather->getPassThru(),
53698                       Gather->getMask(), Base, Index, Scale } ;
53699     return DAG.getMaskedGather(Gather->getVTList(),
53700                                Gather->getMemoryVT(), DL, Ops,
53701                                Gather->getMemOperand(),
53702                                Gather->getIndexType(),
53703                                Gather->getExtensionType());
53704   }
53705   auto *Scatter = cast<MaskedScatterSDNode>(GorS);
53706   SDValue Ops[] = { Scatter->getChain(), Scatter->getValue(),
53707                     Scatter->getMask(), Base, Index, Scale };
53708   return DAG.getMaskedScatter(Scatter->getVTList(),
53709                               Scatter->getMemoryVT(), DL,
53710                               Ops, Scatter->getMemOperand(),
53711                               Scatter->getIndexType(),
53712                               Scatter->isTruncatingStore());
53713 }
53714 
combineGatherScatter(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI)53715 static SDValue combineGatherScatter(SDNode *N, SelectionDAG &DAG,
53716                                     TargetLowering::DAGCombinerInfo &DCI) {
53717   SDLoc DL(N);
53718   auto *GorS = cast<MaskedGatherScatterSDNode>(N);
53719   SDValue Index = GorS->getIndex();
53720   SDValue Base = GorS->getBasePtr();
53721   SDValue Scale = GorS->getScale();
53722 
53723   if (DCI.isBeforeLegalize()) {
53724     unsigned IndexWidth = Index.getScalarValueSizeInBits();
53725 
53726     // Shrink constant indices if they are larger than 32-bits.
53727     // Only do this before legalize types since v2i64 could become v2i32.
53728     // FIXME: We could check that the type is legal if we're after legalize
53729     // types, but then we would need to construct test cases where that happens.
53730     // FIXME: We could support more than just constant vectors, but we need to
53731     // careful with costing. A truncate that can be optimized out would be fine.
53732     // Otherwise we might only want to create a truncate if it avoids a split.
53733     if (auto *BV = dyn_cast<BuildVectorSDNode>(Index)) {
53734       if (BV->isConstant() && IndexWidth > 32 &&
53735           DAG.ComputeNumSignBits(Index) > (IndexWidth - 32)) {
53736         EVT NewVT = Index.getValueType().changeVectorElementType(MVT::i32);
53737         Index = DAG.getNode(ISD::TRUNCATE, DL, NewVT, Index);
53738         return rebuildGatherScatter(GorS, Index, Base, Scale, DAG);
53739       }
53740     }
53741 
53742     // Shrink any sign/zero extends from 32 or smaller to larger than 32 if
53743     // there are sufficient sign bits. Only do this before legalize types to
53744     // avoid creating illegal types in truncate.
53745     if ((Index.getOpcode() == ISD::SIGN_EXTEND ||
53746          Index.getOpcode() == ISD::ZERO_EXTEND) &&
53747         IndexWidth > 32 &&
53748         Index.getOperand(0).getScalarValueSizeInBits() <= 32 &&
53749         DAG.ComputeNumSignBits(Index) > (IndexWidth - 32)) {
53750       EVT NewVT = Index.getValueType().changeVectorElementType(MVT::i32);
53751       Index = DAG.getNode(ISD::TRUNCATE, DL, NewVT, Index);
53752       return rebuildGatherScatter(GorS, Index, Base, Scale, DAG);
53753     }
53754   }
53755 
53756   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
53757   EVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
53758   // Try to move splat constant adders from the index operand to the base
53759   // pointer operand. Taking care to multiply by the scale. We can only do
53760   // this when index element type is the same as the pointer type.
53761   // Otherwise we need to be sure the math doesn't wrap before the scale.
53762   if (Index.getOpcode() == ISD::ADD &&
53763       Index.getValueType().getVectorElementType() == PtrVT &&
53764       isa<ConstantSDNode>(Scale)) {
53765     uint64_t ScaleAmt = cast<ConstantSDNode>(Scale)->getZExtValue();
53766     if (auto *BV = dyn_cast<BuildVectorSDNode>(Index.getOperand(1))) {
53767       BitVector UndefElts;
53768       if (ConstantSDNode *C = BV->getConstantSplatNode(&UndefElts)) {
53769         // FIXME: Allow non-constant?
53770         if (UndefElts.none()) {
53771           // Apply the scale.
53772           APInt Adder = C->getAPIntValue() * ScaleAmt;
53773           // Add it to the existing base.
53774           Base = DAG.getNode(ISD::ADD, DL, PtrVT, Base,
53775                              DAG.getConstant(Adder, DL, PtrVT));
53776           Index = Index.getOperand(0);
53777           return rebuildGatherScatter(GorS, Index, Base, Scale, DAG);
53778         }
53779       }
53780 
53781       // It's also possible base is just a constant. In that case, just
53782       // replace it with 0 and move the displacement into the index.
53783       if (BV->isConstant() && isa<ConstantSDNode>(Base) &&
53784           isOneConstant(Scale)) {
53785         SDValue Splat = DAG.getSplatBuildVector(Index.getValueType(), DL, Base);
53786         // Combine the constant build_vector and the constant base.
53787         Splat = DAG.getNode(ISD::ADD, DL, Index.getValueType(),
53788                             Index.getOperand(1), Splat);
53789         // Add to the LHS of the original Index add.
53790         Index = DAG.getNode(ISD::ADD, DL, Index.getValueType(),
53791                             Index.getOperand(0), Splat);
53792         Base = DAG.getConstant(0, DL, Base.getValueType());
53793         return rebuildGatherScatter(GorS, Index, Base, Scale, DAG);
53794       }
53795     }
53796   }
53797 
53798   if (DCI.isBeforeLegalizeOps()) {
53799     unsigned IndexWidth = Index.getScalarValueSizeInBits();
53800 
53801     // Make sure the index is either i32 or i64
53802     if (IndexWidth != 32 && IndexWidth != 64) {
53803       MVT EltVT = IndexWidth > 32 ? MVT::i64 : MVT::i32;
53804       EVT IndexVT = Index.getValueType().changeVectorElementType(EltVT);
53805       Index = DAG.getSExtOrTrunc(Index, DL, IndexVT);
53806       return rebuildGatherScatter(GorS, Index, Base, Scale, DAG);
53807     }
53808   }
53809 
53810   // With vector masks we only demand the upper bit of the mask.
53811   SDValue Mask = GorS->getMask();
53812   if (Mask.getScalarValueSizeInBits() != 1) {
53813     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
53814     APInt DemandedMask(APInt::getSignMask(Mask.getScalarValueSizeInBits()));
53815     if (TLI.SimplifyDemandedBits(Mask, DemandedMask, DCI)) {
53816       if (N->getOpcode() != ISD::DELETED_NODE)
53817         DCI.AddToWorklist(N);
53818       return SDValue(N, 0);
53819     }
53820   }
53821 
53822   return SDValue();
53823 }
53824 
53825 // Optimize  RES = X86ISD::SETCC CONDCODE, EFLAG_INPUT
combineX86SetCC(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)53826 static SDValue combineX86SetCC(SDNode *N, SelectionDAG &DAG,
53827                                const X86Subtarget &Subtarget) {
53828   SDLoc DL(N);
53829   X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(0));
53830   SDValue EFLAGS = N->getOperand(1);
53831 
53832   // Try to simplify the EFLAGS and condition code operands.
53833   if (SDValue Flags = combineSetCCEFLAGS(EFLAGS, CC, DAG, Subtarget))
53834     return getSETCC(CC, Flags, DL, DAG);
53835 
53836   return SDValue();
53837 }
53838 
53839 /// Optimize branch condition evaluation.
combineBrCond(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)53840 static SDValue combineBrCond(SDNode *N, SelectionDAG &DAG,
53841                              const X86Subtarget &Subtarget) {
53842   SDLoc DL(N);
53843   SDValue EFLAGS = N->getOperand(3);
53844   X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(2));
53845 
53846   // Try to simplify the EFLAGS and condition code operands.
53847   // Make sure to not keep references to operands, as combineSetCCEFLAGS can
53848   // RAUW them under us.
53849   if (SDValue Flags = combineSetCCEFLAGS(EFLAGS, CC, DAG, Subtarget)) {
53850     SDValue Cond = DAG.getTargetConstant(CC, DL, MVT::i8);
53851     return DAG.getNode(X86ISD::BRCOND, DL, N->getVTList(), N->getOperand(0),
53852                        N->getOperand(1), Cond, Flags);
53853   }
53854 
53855   return SDValue();
53856 }
53857 
53858 // TODO: Could we move this to DAGCombine?
combineVectorCompareAndMaskUnaryOp(SDNode * N,SelectionDAG & DAG)53859 static SDValue combineVectorCompareAndMaskUnaryOp(SDNode *N,
53860                                                   SelectionDAG &DAG) {
53861   // Take advantage of vector comparisons (etc.) producing 0 or -1 in each lane
53862   // to optimize away operation when it's from a constant.
53863   //
53864   // The general transformation is:
53865   //    UNARYOP(AND(VECTOR_CMP(x,y), constant)) -->
53866   //       AND(VECTOR_CMP(x,y), constant2)
53867   //    constant2 = UNARYOP(constant)
53868 
53869   // Early exit if this isn't a vector operation, the operand of the
53870   // unary operation isn't a bitwise AND, or if the sizes of the operations
53871   // aren't the same.
53872   EVT VT = N->getValueType(0);
53873   bool IsStrict = N->isStrictFPOpcode();
53874   unsigned NumEltBits = VT.getScalarSizeInBits();
53875   SDValue Op0 = N->getOperand(IsStrict ? 1 : 0);
53876   if (!VT.isVector() || Op0.getOpcode() != ISD::AND ||
53877       DAG.ComputeNumSignBits(Op0.getOperand(0)) != NumEltBits ||
53878       VT.getSizeInBits() != Op0.getValueSizeInBits())
53879     return SDValue();
53880 
53881   // Now check that the other operand of the AND is a constant. We could
53882   // make the transformation for non-constant splats as well, but it's unclear
53883   // that would be a benefit as it would not eliminate any operations, just
53884   // perform one more step in scalar code before moving to the vector unit.
53885   if (auto *BV = dyn_cast<BuildVectorSDNode>(Op0.getOperand(1))) {
53886     // Bail out if the vector isn't a constant.
53887     if (!BV->isConstant())
53888       return SDValue();
53889 
53890     // Everything checks out. Build up the new and improved node.
53891     SDLoc DL(N);
53892     EVT IntVT = BV->getValueType(0);
53893     // Create a new constant of the appropriate type for the transformed
53894     // DAG.
53895     SDValue SourceConst;
53896     if (IsStrict)
53897       SourceConst = DAG.getNode(N->getOpcode(), DL, {VT, MVT::Other},
53898                                 {N->getOperand(0), SDValue(BV, 0)});
53899     else
53900       SourceConst = DAG.getNode(N->getOpcode(), DL, VT, SDValue(BV, 0));
53901     // The AND node needs bitcasts to/from an integer vector type around it.
53902     SDValue MaskConst = DAG.getBitcast(IntVT, SourceConst);
53903     SDValue NewAnd = DAG.getNode(ISD::AND, DL, IntVT, Op0->getOperand(0),
53904                                  MaskConst);
53905     SDValue Res = DAG.getBitcast(VT, NewAnd);
53906     if (IsStrict)
53907       return DAG.getMergeValues({Res, SourceConst.getValue(1)}, DL);
53908     return Res;
53909   }
53910 
53911   return SDValue();
53912 }
53913 
53914 /// If we are converting a value to floating-point, try to replace scalar
53915 /// truncate of an extracted vector element with a bitcast. This tries to keep
53916 /// the sequence on XMM registers rather than moving between vector and GPRs.
combineToFPTruncExtElt(SDNode * N,SelectionDAG & DAG)53917 static SDValue combineToFPTruncExtElt(SDNode *N, SelectionDAG &DAG) {
53918   // TODO: This is currently only used by combineSIntToFP, but it is generalized
53919   //       to allow being called by any similar cast opcode.
53920   // TODO: Consider merging this into lowering: vectorizeExtractedCast().
53921   SDValue Trunc = N->getOperand(0);
53922   if (!Trunc.hasOneUse() || Trunc.getOpcode() != ISD::TRUNCATE)
53923     return SDValue();
53924 
53925   SDValue ExtElt = Trunc.getOperand(0);
53926   if (!ExtElt.hasOneUse() || ExtElt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
53927       !isNullConstant(ExtElt.getOperand(1)))
53928     return SDValue();
53929 
53930   EVT TruncVT = Trunc.getValueType();
53931   EVT SrcVT = ExtElt.getValueType();
53932   unsigned DestWidth = TruncVT.getSizeInBits();
53933   unsigned SrcWidth = SrcVT.getSizeInBits();
53934   if (SrcWidth % DestWidth != 0)
53935     return SDValue();
53936 
53937   // inttofp (trunc (extelt X, 0)) --> inttofp (extelt (bitcast X), 0)
53938   EVT SrcVecVT = ExtElt.getOperand(0).getValueType();
53939   unsigned VecWidth = SrcVecVT.getSizeInBits();
53940   unsigned NumElts = VecWidth / DestWidth;
53941   EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), TruncVT, NumElts);
53942   SDValue BitcastVec = DAG.getBitcast(BitcastVT, ExtElt.getOperand(0));
53943   SDLoc DL(N);
53944   SDValue NewExtElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, TruncVT,
53945                                   BitcastVec, ExtElt.getOperand(1));
53946   return DAG.getNode(N->getOpcode(), DL, N->getValueType(0), NewExtElt);
53947 }
53948 
combineUIntToFP(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)53949 static SDValue combineUIntToFP(SDNode *N, SelectionDAG &DAG,
53950                                const X86Subtarget &Subtarget) {
53951   bool IsStrict = N->isStrictFPOpcode();
53952   SDValue Op0 = N->getOperand(IsStrict ? 1 : 0);
53953   EVT VT = N->getValueType(0);
53954   EVT InVT = Op0.getValueType();
53955 
53956   // UINT_TO_FP(vXi1~15)  -> UINT_TO_FP(ZEXT(vXi1~15  to vXi16))
53957   // UINT_TO_FP(vXi17~31) -> UINT_TO_FP(ZEXT(vXi17~31 to vXi32))
53958   // UINT_TO_FP(vXi33~63) -> UINT_TO_FP(ZEXT(vXi33~63 to vXi64))
53959   if (InVT.isVector() && VT.getVectorElementType() == MVT::f16) {
53960     unsigned ScalarSize = InVT.getScalarSizeInBits();
53961     if (ScalarSize == 16 || ScalarSize == 32 || ScalarSize >= 64)
53962       return SDValue();
53963     SDLoc dl(N);
53964     EVT DstVT = EVT::getVectorVT(*DAG.getContext(),
53965                                  ScalarSize < 16   ? MVT::i16
53966                                  : ScalarSize < 32 ? MVT::i32
53967                                                    : MVT::i64,
53968                                  InVT.getVectorNumElements());
53969     SDValue P = DAG.getNode(ISD::ZERO_EXTEND, dl, DstVT, Op0);
53970     if (IsStrict)
53971       return DAG.getNode(ISD::STRICT_UINT_TO_FP, dl, {VT, MVT::Other},
53972                          {N->getOperand(0), P});
53973     return DAG.getNode(ISD::UINT_TO_FP, dl, VT, P);
53974   }
53975 
53976   // UINT_TO_FP(vXi1) -> SINT_TO_FP(ZEXT(vXi1 to vXi32))
53977   // UINT_TO_FP(vXi8) -> SINT_TO_FP(ZEXT(vXi8 to vXi32))
53978   // UINT_TO_FP(vXi16) -> SINT_TO_FP(ZEXT(vXi16 to vXi32))
53979   if (InVT.isVector() && InVT.getScalarSizeInBits() < 32 &&
53980       VT.getScalarType() != MVT::f16) {
53981     SDLoc dl(N);
53982     EVT DstVT = InVT.changeVectorElementType(MVT::i32);
53983     SDValue P = DAG.getNode(ISD::ZERO_EXTEND, dl, DstVT, Op0);
53984 
53985     // UINT_TO_FP isn't legal without AVX512 so use SINT_TO_FP.
53986     if (IsStrict)
53987       return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {VT, MVT::Other},
53988                          {N->getOperand(0), P});
53989     return DAG.getNode(ISD::SINT_TO_FP, dl, VT, P);
53990   }
53991 
53992   // Since UINT_TO_FP is legal (it's marked custom), dag combiner won't
53993   // optimize it to a SINT_TO_FP when the sign bit is known zero. Perform
53994   // the optimization here.
53995   if (DAG.SignBitIsZero(Op0)) {
53996     if (IsStrict)
53997       return DAG.getNode(ISD::STRICT_SINT_TO_FP, SDLoc(N), {VT, MVT::Other},
53998                          {N->getOperand(0), Op0});
53999     return DAG.getNode(ISD::SINT_TO_FP, SDLoc(N), VT, Op0);
54000   }
54001 
54002   return SDValue();
54003 }
54004 
combineSIntToFP(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)54005 static SDValue combineSIntToFP(SDNode *N, SelectionDAG &DAG,
54006                                TargetLowering::DAGCombinerInfo &DCI,
54007                                const X86Subtarget &Subtarget) {
54008   // First try to optimize away the conversion entirely when it's
54009   // conditionally from a constant. Vectors only.
54010   bool IsStrict = N->isStrictFPOpcode();
54011   if (SDValue Res = combineVectorCompareAndMaskUnaryOp(N, DAG))
54012     return Res;
54013 
54014   // Now move on to more general possibilities.
54015   SDValue Op0 = N->getOperand(IsStrict ? 1 : 0);
54016   EVT VT = N->getValueType(0);
54017   EVT InVT = Op0.getValueType();
54018 
54019   // SINT_TO_FP(vXi1~15)  -> SINT_TO_FP(SEXT(vXi1~15  to vXi16))
54020   // SINT_TO_FP(vXi17~31) -> SINT_TO_FP(SEXT(vXi17~31 to vXi32))
54021   // SINT_TO_FP(vXi33~63) -> SINT_TO_FP(SEXT(vXi33~63 to vXi64))
54022   if (InVT.isVector() && VT.getVectorElementType() == MVT::f16) {
54023     unsigned ScalarSize = InVT.getScalarSizeInBits();
54024     if (ScalarSize == 16 || ScalarSize == 32 || ScalarSize >= 64)
54025       return SDValue();
54026     SDLoc dl(N);
54027     EVT DstVT = EVT::getVectorVT(*DAG.getContext(),
54028                                  ScalarSize < 16   ? MVT::i16
54029                                  : ScalarSize < 32 ? MVT::i32
54030                                                    : MVT::i64,
54031                                  InVT.getVectorNumElements());
54032     SDValue P = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Op0);
54033     if (IsStrict)
54034       return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {VT, MVT::Other},
54035                          {N->getOperand(0), P});
54036     return DAG.getNode(ISD::SINT_TO_FP, dl, VT, P);
54037   }
54038 
54039   // SINT_TO_FP(vXi1) -> SINT_TO_FP(SEXT(vXi1 to vXi32))
54040   // SINT_TO_FP(vXi8) -> SINT_TO_FP(SEXT(vXi8 to vXi32))
54041   // SINT_TO_FP(vXi16) -> SINT_TO_FP(SEXT(vXi16 to vXi32))
54042   if (InVT.isVector() && InVT.getScalarSizeInBits() < 32 &&
54043       VT.getScalarType() != MVT::f16) {
54044     SDLoc dl(N);
54045     EVT DstVT = InVT.changeVectorElementType(MVT::i32);
54046     SDValue P = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Op0);
54047     if (IsStrict)
54048       return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {VT, MVT::Other},
54049                          {N->getOperand(0), P});
54050     return DAG.getNode(ISD::SINT_TO_FP, dl, VT, P);
54051   }
54052 
54053   // Without AVX512DQ we only support i64 to float scalar conversion. For both
54054   // vectors and scalars, see if we know that the upper bits are all the sign
54055   // bit, in which case we can truncate the input to i32 and convert from that.
54056   if (InVT.getScalarSizeInBits() > 32 && !Subtarget.hasDQI()) {
54057     unsigned BitWidth = InVT.getScalarSizeInBits();
54058     unsigned NumSignBits = DAG.ComputeNumSignBits(Op0);
54059     if (NumSignBits >= (BitWidth - 31)) {
54060       EVT TruncVT = MVT::i32;
54061       if (InVT.isVector())
54062         TruncVT = InVT.changeVectorElementType(TruncVT);
54063       SDLoc dl(N);
54064       if (DCI.isBeforeLegalize() || TruncVT != MVT::v2i32) {
54065         SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, TruncVT, Op0);
54066         if (IsStrict)
54067           return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {VT, MVT::Other},
54068                              {N->getOperand(0), Trunc});
54069         return DAG.getNode(ISD::SINT_TO_FP, dl, VT, Trunc);
54070       }
54071       // If we're after legalize and the type is v2i32 we need to shuffle and
54072       // use CVTSI2P.
54073       assert(InVT == MVT::v2i64 && "Unexpected VT!");
54074       SDValue Cast = DAG.getBitcast(MVT::v4i32, Op0);
54075       SDValue Shuf = DAG.getVectorShuffle(MVT::v4i32, dl, Cast, Cast,
54076                                           { 0, 2, -1, -1 });
54077       if (IsStrict)
54078         return DAG.getNode(X86ISD::STRICT_CVTSI2P, dl, {VT, MVT::Other},
54079                            {N->getOperand(0), Shuf});
54080       return DAG.getNode(X86ISD::CVTSI2P, dl, VT, Shuf);
54081     }
54082   }
54083 
54084   // Transform (SINT_TO_FP (i64 ...)) into an x87 operation if we have
54085   // a 32-bit target where SSE doesn't support i64->FP operations.
54086   if (!Subtarget.useSoftFloat() && Subtarget.hasX87() &&
54087       Op0.getOpcode() == ISD::LOAD) {
54088     LoadSDNode *Ld = cast<LoadSDNode>(Op0.getNode());
54089 
54090     // This transformation is not supported if the result type is f16 or f128.
54091     if (VT == MVT::f16 || VT == MVT::f128)
54092       return SDValue();
54093 
54094     // If we have AVX512DQ we can use packed conversion instructions unless
54095     // the VT is f80.
54096     if (Subtarget.hasDQI() && VT != MVT::f80)
54097       return SDValue();
54098 
54099     if (Ld->isSimple() && !VT.isVector() && ISD::isNormalLoad(Op0.getNode()) &&
54100         Op0.hasOneUse() && !Subtarget.is64Bit() && InVT == MVT::i64) {
54101       std::pair<SDValue, SDValue> Tmp =
54102           Subtarget.getTargetLowering()->BuildFILD(
54103               VT, InVT, SDLoc(N), Ld->getChain(), Ld->getBasePtr(),
54104               Ld->getPointerInfo(), Ld->getOriginalAlign(), DAG);
54105       DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), Tmp.second);
54106       return Tmp.first;
54107     }
54108   }
54109 
54110   if (IsStrict)
54111     return SDValue();
54112 
54113   if (SDValue V = combineToFPTruncExtElt(N, DAG))
54114     return V;
54115 
54116   return SDValue();
54117 }
54118 
needCarryOrOverflowFlag(SDValue Flags)54119 static bool needCarryOrOverflowFlag(SDValue Flags) {
54120   assert(Flags.getValueType() == MVT::i32 && "Unexpected VT!");
54121 
54122   for (const SDNode *User : Flags->uses()) {
54123     X86::CondCode CC;
54124     switch (User->getOpcode()) {
54125     default:
54126       // Be conservative.
54127       return true;
54128     case X86ISD::SETCC:
54129     case X86ISD::SETCC_CARRY:
54130       CC = (X86::CondCode)User->getConstantOperandVal(0);
54131       break;
54132     case X86ISD::BRCOND:
54133     case X86ISD::CMOV:
54134       CC = (X86::CondCode)User->getConstantOperandVal(2);
54135       break;
54136     }
54137 
54138     switch (CC) {
54139     default: break;
54140     case X86::COND_A: case X86::COND_AE:
54141     case X86::COND_B: case X86::COND_BE:
54142     case X86::COND_O: case X86::COND_NO:
54143     case X86::COND_G: case X86::COND_GE:
54144     case X86::COND_L: case X86::COND_LE:
54145       return true;
54146     }
54147   }
54148 
54149   return false;
54150 }
54151 
onlyZeroFlagUsed(SDValue Flags)54152 static bool onlyZeroFlagUsed(SDValue Flags) {
54153   assert(Flags.getValueType() == MVT::i32 && "Unexpected VT!");
54154 
54155   for (const SDNode *User : Flags->uses()) {
54156     unsigned CCOpNo;
54157     switch (User->getOpcode()) {
54158     default:
54159       // Be conservative.
54160       return false;
54161     case X86ISD::SETCC:
54162     case X86ISD::SETCC_CARRY:
54163       CCOpNo = 0;
54164       break;
54165     case X86ISD::BRCOND:
54166     case X86ISD::CMOV:
54167       CCOpNo = 2;
54168       break;
54169     }
54170 
54171     X86::CondCode CC = (X86::CondCode)User->getConstantOperandVal(CCOpNo);
54172     if (CC != X86::COND_E && CC != X86::COND_NE)
54173       return false;
54174   }
54175 
54176   return true;
54177 }
54178 
54179 /// If this is an add or subtract where one operand is produced by a cmp+setcc,
54180 /// then try to convert it to an ADC or SBB. This replaces TEST+SET+{ADD/SUB}
54181 /// with CMP+{ADC, SBB}.
54182 /// Also try (ADD/SUB)+(AND(SRL,1)) bit extraction pattern with BT+{ADC, SBB}.
combineAddOrSubToADCOrSBB(bool IsSub,const SDLoc & DL,EVT VT,SDValue X,SDValue Y,SelectionDAG & DAG,bool ZeroSecondOpOnly=false)54183 static SDValue combineAddOrSubToADCOrSBB(bool IsSub, const SDLoc &DL, EVT VT,
54184                                          SDValue X, SDValue Y,
54185                                          SelectionDAG &DAG,
54186                                          bool ZeroSecondOpOnly = false) {
54187   if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
54188     return SDValue();
54189 
54190   // Look through a one-use zext.
54191   if (Y.getOpcode() == ISD::ZERO_EXTEND && Y.hasOneUse())
54192     Y = Y.getOperand(0);
54193 
54194   X86::CondCode CC;
54195   SDValue EFLAGS;
54196   if (Y.getOpcode() == X86ISD::SETCC && Y.hasOneUse()) {
54197     CC = (X86::CondCode)Y.getConstantOperandVal(0);
54198     EFLAGS = Y.getOperand(1);
54199   } else if (Y.getOpcode() == ISD::AND && isOneConstant(Y.getOperand(1)) &&
54200              Y.hasOneUse()) {
54201     EFLAGS = LowerAndToBT(Y, ISD::SETNE, DL, DAG, CC);
54202   }
54203 
54204   if (!EFLAGS)
54205     return SDValue();
54206 
54207   // If X is -1 or 0, then we have an opportunity to avoid constants required in
54208   // the general case below.
54209   auto *ConstantX = dyn_cast<ConstantSDNode>(X);
54210   if (ConstantX && !ZeroSecondOpOnly) {
54211     if ((!IsSub && CC == X86::COND_AE && ConstantX->isAllOnes()) ||
54212         (IsSub && CC == X86::COND_B && ConstantX->isZero())) {
54213       // This is a complicated way to get -1 or 0 from the carry flag:
54214       // -1 + SETAE --> -1 + (!CF) --> CF ? -1 : 0 --> SBB %eax, %eax
54215       //  0 - SETB  -->  0 -  (CF) --> CF ? -1 : 0 --> SBB %eax, %eax
54216       return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
54217                          DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
54218                          EFLAGS);
54219     }
54220 
54221     if ((!IsSub && CC == X86::COND_BE && ConstantX->isAllOnes()) ||
54222         (IsSub && CC == X86::COND_A && ConstantX->isZero())) {
54223       if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.hasOneUse() &&
54224           EFLAGS.getValueType().isInteger() &&
54225           !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
54226         // Swap the operands of a SUB, and we have the same pattern as above.
54227         // -1 + SETBE (SUB A, B) --> -1 + SETAE (SUB B, A) --> SUB + SBB
54228         //  0 - SETA  (SUB A, B) -->  0 - SETB  (SUB B, A) --> SUB + SBB
54229         SDValue NewSub = DAG.getNode(
54230             X86ISD::SUB, SDLoc(EFLAGS), EFLAGS.getNode()->getVTList(),
54231             EFLAGS.getOperand(1), EFLAGS.getOperand(0));
54232         SDValue NewEFLAGS = SDValue(NewSub.getNode(), EFLAGS.getResNo());
54233         return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
54234                            DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
54235                            NewEFLAGS);
54236       }
54237     }
54238   }
54239 
54240   if (CC == X86::COND_B) {
54241     // X + SETB Z --> adc X, 0
54242     // X - SETB Z --> sbb X, 0
54243     return DAG.getNode(IsSub ? X86ISD::SBB : X86ISD::ADC, DL,
54244                        DAG.getVTList(VT, MVT::i32), X,
54245                        DAG.getConstant(0, DL, VT), EFLAGS);
54246   }
54247 
54248   if (ZeroSecondOpOnly)
54249     return SDValue();
54250 
54251   if (CC == X86::COND_A) {
54252     // Try to convert COND_A into COND_B in an attempt to facilitate
54253     // materializing "setb reg".
54254     //
54255     // Do not flip "e > c", where "c" is a constant, because Cmp instruction
54256     // cannot take an immediate as its first operand.
54257     //
54258     if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.getNode()->hasOneUse() &&
54259         EFLAGS.getValueType().isInteger() &&
54260         !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
54261       SDValue NewSub =
54262           DAG.getNode(X86ISD::SUB, SDLoc(EFLAGS), EFLAGS.getNode()->getVTList(),
54263                       EFLAGS.getOperand(1), EFLAGS.getOperand(0));
54264       SDValue NewEFLAGS = NewSub.getValue(EFLAGS.getResNo());
54265       return DAG.getNode(IsSub ? X86ISD::SBB : X86ISD::ADC, DL,
54266                          DAG.getVTList(VT, MVT::i32), X,
54267                          DAG.getConstant(0, DL, VT), NewEFLAGS);
54268     }
54269   }
54270 
54271   if (CC == X86::COND_AE) {
54272     // X + SETAE --> sbb X, -1
54273     // X - SETAE --> adc X, -1
54274     return DAG.getNode(IsSub ? X86ISD::ADC : X86ISD::SBB, DL,
54275                        DAG.getVTList(VT, MVT::i32), X,
54276                        DAG.getConstant(-1, DL, VT), EFLAGS);
54277   }
54278 
54279   if (CC == X86::COND_BE) {
54280     // X + SETBE --> sbb X, -1
54281     // X - SETBE --> adc X, -1
54282     // Try to convert COND_BE into COND_AE in an attempt to facilitate
54283     // materializing "setae reg".
54284     //
54285     // Do not flip "e <= c", where "c" is a constant, because Cmp instruction
54286     // cannot take an immediate as its first operand.
54287     //
54288     if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.getNode()->hasOneUse() &&
54289         EFLAGS.getValueType().isInteger() &&
54290         !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
54291       SDValue NewSub =
54292           DAG.getNode(X86ISD::SUB, SDLoc(EFLAGS), EFLAGS.getNode()->getVTList(),
54293                       EFLAGS.getOperand(1), EFLAGS.getOperand(0));
54294       SDValue NewEFLAGS = NewSub.getValue(EFLAGS.getResNo());
54295       return DAG.getNode(IsSub ? X86ISD::ADC : X86ISD::SBB, DL,
54296                          DAG.getVTList(VT, MVT::i32), X,
54297                          DAG.getConstant(-1, DL, VT), NewEFLAGS);
54298     }
54299   }
54300 
54301   if (CC != X86::COND_E && CC != X86::COND_NE)
54302     return SDValue();
54303 
54304   if (EFLAGS.getOpcode() != X86ISD::CMP || !EFLAGS.hasOneUse() ||
54305       !X86::isZeroNode(EFLAGS.getOperand(1)) ||
54306       !EFLAGS.getOperand(0).getValueType().isInteger())
54307     return SDValue();
54308 
54309   SDValue Z = EFLAGS.getOperand(0);
54310   EVT ZVT = Z.getValueType();
54311 
54312   // If X is -1 or 0, then we have an opportunity to avoid constants required in
54313   // the general case below.
54314   if (ConstantX) {
54315     // 'neg' sets the carry flag when Z != 0, so create 0 or -1 using 'sbb' with
54316     // fake operands:
54317     //  0 - (Z != 0) --> sbb %eax, %eax, (neg Z)
54318     // -1 + (Z == 0) --> sbb %eax, %eax, (neg Z)
54319     if ((IsSub && CC == X86::COND_NE && ConstantX->isZero()) ||
54320         (!IsSub && CC == X86::COND_E && ConstantX->isAllOnes())) {
54321       SDValue Zero = DAG.getConstant(0, DL, ZVT);
54322       SDVTList X86SubVTs = DAG.getVTList(ZVT, MVT::i32);
54323       SDValue Neg = DAG.getNode(X86ISD::SUB, DL, X86SubVTs, Zero, Z);
54324       return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
54325                          DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
54326                          SDValue(Neg.getNode(), 1));
54327     }
54328 
54329     // cmp with 1 sets the carry flag when Z == 0, so create 0 or -1 using 'sbb'
54330     // with fake operands:
54331     //  0 - (Z == 0) --> sbb %eax, %eax, (cmp Z, 1)
54332     // -1 + (Z != 0) --> sbb %eax, %eax, (cmp Z, 1)
54333     if ((IsSub && CC == X86::COND_E && ConstantX->isZero()) ||
54334         (!IsSub && CC == X86::COND_NE && ConstantX->isAllOnes())) {
54335       SDValue One = DAG.getConstant(1, DL, ZVT);
54336       SDVTList X86SubVTs = DAG.getVTList(ZVT, MVT::i32);
54337       SDValue Cmp1 = DAG.getNode(X86ISD::SUB, DL, X86SubVTs, Z, One);
54338       return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
54339                          DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
54340                          Cmp1.getValue(1));
54341     }
54342   }
54343 
54344   // (cmp Z, 1) sets the carry flag if Z is 0.
54345   SDValue One = DAG.getConstant(1, DL, ZVT);
54346   SDVTList X86SubVTs = DAG.getVTList(ZVT, MVT::i32);
54347   SDValue Cmp1 = DAG.getNode(X86ISD::SUB, DL, X86SubVTs, Z, One);
54348 
54349   // Add the flags type for ADC/SBB nodes.
54350   SDVTList VTs = DAG.getVTList(VT, MVT::i32);
54351 
54352   // X - (Z != 0) --> sub X, (zext(setne Z, 0)) --> adc X, -1, (cmp Z, 1)
54353   // X + (Z != 0) --> add X, (zext(setne Z, 0)) --> sbb X, -1, (cmp Z, 1)
54354   if (CC == X86::COND_NE)
54355     return DAG.getNode(IsSub ? X86ISD::ADC : X86ISD::SBB, DL, VTs, X,
54356                        DAG.getConstant(-1ULL, DL, VT), Cmp1.getValue(1));
54357 
54358   // X - (Z == 0) --> sub X, (zext(sete  Z, 0)) --> sbb X, 0, (cmp Z, 1)
54359   // X + (Z == 0) --> add X, (zext(sete  Z, 0)) --> adc X, 0, (cmp Z, 1)
54360   return DAG.getNode(IsSub ? X86ISD::SBB : X86ISD::ADC, DL, VTs, X,
54361                      DAG.getConstant(0, DL, VT), Cmp1.getValue(1));
54362 }
54363 
54364 /// If this is an add or subtract where one operand is produced by a cmp+setcc,
54365 /// then try to convert it to an ADC or SBB. This replaces TEST+SET+{ADD/SUB}
54366 /// with CMP+{ADC, SBB}.
combineAddOrSubToADCOrSBB(SDNode * N,SelectionDAG & DAG)54367 static SDValue combineAddOrSubToADCOrSBB(SDNode *N, SelectionDAG &DAG) {
54368   bool IsSub = N->getOpcode() == ISD::SUB;
54369   SDValue X = N->getOperand(0);
54370   SDValue Y = N->getOperand(1);
54371   EVT VT = N->getValueType(0);
54372   SDLoc DL(N);
54373 
54374   if (SDValue ADCOrSBB = combineAddOrSubToADCOrSBB(IsSub, DL, VT, X, Y, DAG))
54375     return ADCOrSBB;
54376 
54377   // Commute and try again (negate the result for subtracts).
54378   if (SDValue ADCOrSBB = combineAddOrSubToADCOrSBB(IsSub, DL, VT, Y, X, DAG)) {
54379     if (IsSub)
54380       ADCOrSBB =
54381           DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), ADCOrSBB);
54382     return ADCOrSBB;
54383   }
54384 
54385   return SDValue();
54386 }
54387 
combineCMP(SDNode * N,SelectionDAG & DAG)54388 static SDValue combineCMP(SDNode *N, SelectionDAG &DAG) {
54389   // Only handle test patterns.
54390   if (!isNullConstant(N->getOperand(1)))
54391     return SDValue();
54392 
54393   // If we have a CMP of a truncated binop, see if we can make a smaller binop
54394   // and use its flags directly.
54395   // TODO: Maybe we should try promoting compares that only use the zero flag
54396   // first if we can prove the upper bits with computeKnownBits?
54397   SDLoc dl(N);
54398   SDValue Op = N->getOperand(0);
54399   EVT VT = Op.getValueType();
54400 
54401   // If we have a constant logical shift that's only used in a comparison
54402   // against zero turn it into an equivalent AND. This allows turning it into
54403   // a TEST instruction later.
54404   if ((Op.getOpcode() == ISD::SRL || Op.getOpcode() == ISD::SHL) &&
54405       Op.hasOneUse() && isa<ConstantSDNode>(Op.getOperand(1)) &&
54406       onlyZeroFlagUsed(SDValue(N, 0))) {
54407     unsigned BitWidth = VT.getSizeInBits();
54408     const APInt &ShAmt = Op.getConstantOperandAPInt(1);
54409     if (ShAmt.ult(BitWidth)) { // Avoid undefined shifts.
54410       unsigned MaskBits = BitWidth - ShAmt.getZExtValue();
54411       APInt Mask = Op.getOpcode() == ISD::SRL
54412                        ? APInt::getHighBitsSet(BitWidth, MaskBits)
54413                        : APInt::getLowBitsSet(BitWidth, MaskBits);
54414       if (Mask.isSignedIntN(32)) {
54415         Op = DAG.getNode(ISD::AND, dl, VT, Op.getOperand(0),
54416                          DAG.getConstant(Mask, dl, VT));
54417         return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
54418                            DAG.getConstant(0, dl, VT));
54419       }
54420     }
54421   }
54422 
54423   // Peek through any zero-extend if we're only testing for a zero result.
54424   if (Op.getOpcode() == ISD::ZERO_EXTEND && onlyZeroFlagUsed(SDValue(N, 0))) {
54425     SDValue Src = Op.getOperand(0);
54426     EVT SrcVT = Src.getValueType();
54427     if (SrcVT.getScalarSizeInBits() >= 8 &&
54428         DAG.getTargetLoweringInfo().isTypeLegal(SrcVT))
54429       return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Src,
54430                          DAG.getConstant(0, dl, SrcVT));
54431   }
54432 
54433   // Look for a truncate.
54434   if (Op.getOpcode() != ISD::TRUNCATE)
54435     return SDValue();
54436 
54437   SDValue Trunc = Op;
54438   Op = Op.getOperand(0);
54439 
54440   // See if we can compare with zero against the truncation source,
54441   // which should help using the Z flag from many ops. Only do this for
54442   // i32 truncated op to prevent partial-reg compares of promoted ops.
54443   EVT OpVT = Op.getValueType();
54444   APInt UpperBits =
54445       APInt::getBitsSetFrom(OpVT.getSizeInBits(), VT.getSizeInBits());
54446   if (OpVT == MVT::i32 && DAG.MaskedValueIsZero(Op, UpperBits) &&
54447       onlyZeroFlagUsed(SDValue(N, 0))) {
54448     return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
54449                        DAG.getConstant(0, dl, OpVT));
54450   }
54451 
54452   // After this the truncate and arithmetic op must have a single use.
54453   if (!Trunc.hasOneUse() || !Op.hasOneUse())
54454       return SDValue();
54455 
54456   unsigned NewOpc;
54457   switch (Op.getOpcode()) {
54458   default: return SDValue();
54459   case ISD::AND:
54460     // Skip and with constant. We have special handling for and with immediate
54461     // during isel to generate test instructions.
54462     if (isa<ConstantSDNode>(Op.getOperand(1)))
54463       return SDValue();
54464     NewOpc = X86ISD::AND;
54465     break;
54466   case ISD::OR:  NewOpc = X86ISD::OR;  break;
54467   case ISD::XOR: NewOpc = X86ISD::XOR; break;
54468   case ISD::ADD:
54469     // If the carry or overflow flag is used, we can't truncate.
54470     if (needCarryOrOverflowFlag(SDValue(N, 0)))
54471       return SDValue();
54472     NewOpc = X86ISD::ADD;
54473     break;
54474   case ISD::SUB:
54475     // If the carry or overflow flag is used, we can't truncate.
54476     if (needCarryOrOverflowFlag(SDValue(N, 0)))
54477       return SDValue();
54478     NewOpc = X86ISD::SUB;
54479     break;
54480   }
54481 
54482   // We found an op we can narrow. Truncate its inputs.
54483   SDValue Op0 = DAG.getNode(ISD::TRUNCATE, dl, VT, Op.getOperand(0));
54484   SDValue Op1 = DAG.getNode(ISD::TRUNCATE, dl, VT, Op.getOperand(1));
54485 
54486   // Use a X86 specific opcode to avoid DAG combine messing with it.
54487   SDVTList VTs = DAG.getVTList(VT, MVT::i32);
54488   Op = DAG.getNode(NewOpc, dl, VTs, Op0, Op1);
54489 
54490   // For AND, keep a CMP so that we can match the test pattern.
54491   if (NewOpc == X86ISD::AND)
54492     return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
54493                        DAG.getConstant(0, dl, VT));
54494 
54495   // Return the flags.
54496   return Op.getValue(1);
54497 }
54498 
combineX86AddSub(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI)54499 static SDValue combineX86AddSub(SDNode *N, SelectionDAG &DAG,
54500                                 TargetLowering::DAGCombinerInfo &DCI) {
54501   assert((X86ISD::ADD == N->getOpcode() || X86ISD::SUB == N->getOpcode()) &&
54502          "Expected X86ISD::ADD or X86ISD::SUB");
54503 
54504   SDLoc DL(N);
54505   SDValue LHS = N->getOperand(0);
54506   SDValue RHS = N->getOperand(1);
54507   MVT VT = LHS.getSimpleValueType();
54508   bool IsSub = X86ISD::SUB == N->getOpcode();
54509   unsigned GenericOpc = IsSub ? ISD::SUB : ISD::ADD;
54510 
54511   // If we don't use the flag result, simplify back to a generic ADD/SUB.
54512   if (!N->hasAnyUseOfValue(1)) {
54513     SDValue Res = DAG.getNode(GenericOpc, DL, VT, LHS, RHS);
54514     return DAG.getMergeValues({Res, DAG.getConstant(0, DL, MVT::i32)}, DL);
54515   }
54516 
54517   // Fold any similar generic ADD/SUB opcodes to reuse this node.
54518   auto MatchGeneric = [&](SDValue N0, SDValue N1, bool Negate) {
54519     SDValue Ops[] = {N0, N1};
54520     SDVTList VTs = DAG.getVTList(N->getValueType(0));
54521     if (SDNode *GenericAddSub = DAG.getNodeIfExists(GenericOpc, VTs, Ops)) {
54522       SDValue Op(N, 0);
54523       if (Negate)
54524         Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op);
54525       DCI.CombineTo(GenericAddSub, Op);
54526     }
54527   };
54528   MatchGeneric(LHS, RHS, false);
54529   MatchGeneric(RHS, LHS, X86ISD::SUB == N->getOpcode());
54530 
54531   // TODO: Can we drop the ZeroSecondOpOnly limit? This is to guarantee that the
54532   // EFLAGS result doesn't change.
54533   return combineAddOrSubToADCOrSBB(IsSub, DL, VT, LHS, RHS, DAG,
54534                                    /*ZeroSecondOpOnly*/ true);
54535 }
54536 
combineSBB(SDNode * N,SelectionDAG & DAG)54537 static SDValue combineSBB(SDNode *N, SelectionDAG &DAG) {
54538   SDValue LHS = N->getOperand(0);
54539   SDValue RHS = N->getOperand(1);
54540   SDValue BorrowIn = N->getOperand(2);
54541 
54542   if (SDValue Flags = combineCarryThroughADD(BorrowIn, DAG)) {
54543     MVT VT = N->getSimpleValueType(0);
54544     SDVTList VTs = DAG.getVTList(VT, MVT::i32);
54545     return DAG.getNode(X86ISD::SBB, SDLoc(N), VTs, LHS, RHS, Flags);
54546   }
54547 
54548   // Fold SBB(SUB(X,Y),0,Carry) -> SBB(X,Y,Carry)
54549   // iff the flag result is dead.
54550   if (LHS.getOpcode() == ISD::SUB && isNullConstant(RHS) &&
54551       !N->hasAnyUseOfValue(1))
54552     return DAG.getNode(X86ISD::SBB, SDLoc(N), N->getVTList(), LHS.getOperand(0),
54553                        LHS.getOperand(1), BorrowIn);
54554 
54555   return SDValue();
54556 }
54557 
54558 // Optimize RES, EFLAGS = X86ISD::ADC LHS, RHS, EFLAGS
combineADC(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI)54559 static SDValue combineADC(SDNode *N, SelectionDAG &DAG,
54560                           TargetLowering::DAGCombinerInfo &DCI) {
54561   SDValue LHS = N->getOperand(0);
54562   SDValue RHS = N->getOperand(1);
54563   SDValue CarryIn = N->getOperand(2);
54564   auto *LHSC = dyn_cast<ConstantSDNode>(LHS);
54565   auto *RHSC = dyn_cast<ConstantSDNode>(RHS);
54566 
54567   // Canonicalize constant to RHS.
54568   if (LHSC && !RHSC)
54569     return DAG.getNode(X86ISD::ADC, SDLoc(N), N->getVTList(), RHS, LHS,
54570                        CarryIn);
54571 
54572   // If the LHS and RHS of the ADC node are zero, then it can't overflow and
54573   // the result is either zero or one (depending on the input carry bit).
54574   // Strength reduce this down to a "set on carry" aka SETCC_CARRY&1.
54575   if (LHSC && RHSC && LHSC->isZero() && RHSC->isZero() &&
54576       // We don't have a good way to replace an EFLAGS use, so only do this when
54577       // dead right now.
54578       SDValue(N, 1).use_empty()) {
54579     SDLoc DL(N);
54580     EVT VT = N->getValueType(0);
54581     SDValue CarryOut = DAG.getConstant(0, DL, N->getValueType(1));
54582     SDValue Res1 = DAG.getNode(
54583         ISD::AND, DL, VT,
54584         DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
54585                     DAG.getTargetConstant(X86::COND_B, DL, MVT::i8), CarryIn),
54586         DAG.getConstant(1, DL, VT));
54587     return DCI.CombineTo(N, Res1, CarryOut);
54588   }
54589 
54590   // Fold ADC(C1,C2,Carry) -> ADC(0,C1+C2,Carry)
54591   // iff the flag result is dead.
54592   // TODO: Allow flag result if C1+C2 doesn't signed/unsigned overflow.
54593   if (LHSC && RHSC && !LHSC->isZero() && !N->hasAnyUseOfValue(1)) {
54594     SDLoc DL(N);
54595     APInt Sum = LHSC->getAPIntValue() + RHSC->getAPIntValue();
54596     return DAG.getNode(X86ISD::ADC, DL, N->getVTList(),
54597                        DAG.getConstant(0, DL, LHS.getValueType()),
54598                        DAG.getConstant(Sum, DL, LHS.getValueType()), CarryIn);
54599   }
54600 
54601   if (SDValue Flags = combineCarryThroughADD(CarryIn, DAG)) {
54602     MVT VT = N->getSimpleValueType(0);
54603     SDVTList VTs = DAG.getVTList(VT, MVT::i32);
54604     return DAG.getNode(X86ISD::ADC, SDLoc(N), VTs, LHS, RHS, Flags);
54605   }
54606 
54607   // Fold ADC(ADD(X,Y),0,Carry) -> ADC(X,Y,Carry)
54608   // iff the flag result is dead.
54609   if (LHS.getOpcode() == ISD::ADD && RHSC && RHSC->isZero() &&
54610       !N->hasAnyUseOfValue(1))
54611     return DAG.getNode(X86ISD::ADC, SDLoc(N), N->getVTList(), LHS.getOperand(0),
54612                        LHS.getOperand(1), CarryIn);
54613 
54614   return SDValue();
54615 }
54616 
matchPMADDWD(SelectionDAG & DAG,SDValue Op0,SDValue Op1,const SDLoc & DL,EVT VT,const X86Subtarget & Subtarget)54617 static SDValue matchPMADDWD(SelectionDAG &DAG, SDValue Op0, SDValue Op1,
54618                             const SDLoc &DL, EVT VT,
54619                             const X86Subtarget &Subtarget) {
54620   // Example of pattern we try to detect:
54621   // t := (v8i32 mul (sext (v8i16 x0), (sext (v8i16 x1))))
54622   //(add (build_vector (extract_elt t, 0),
54623   //                   (extract_elt t, 2),
54624   //                   (extract_elt t, 4),
54625   //                   (extract_elt t, 6)),
54626   //     (build_vector (extract_elt t, 1),
54627   //                   (extract_elt t, 3),
54628   //                   (extract_elt t, 5),
54629   //                   (extract_elt t, 7)))
54630 
54631   if (!Subtarget.hasSSE2())
54632     return SDValue();
54633 
54634   if (Op0.getOpcode() != ISD::BUILD_VECTOR ||
54635       Op1.getOpcode() != ISD::BUILD_VECTOR)
54636     return SDValue();
54637 
54638   if (!VT.isVector() || VT.getVectorElementType() != MVT::i32 ||
54639       VT.getVectorNumElements() < 4 ||
54640       !isPowerOf2_32(VT.getVectorNumElements()))
54641     return SDValue();
54642 
54643   // Check if one of Op0,Op1 is of the form:
54644   // (build_vector (extract_elt Mul, 0),
54645   //               (extract_elt Mul, 2),
54646   //               (extract_elt Mul, 4),
54647   //                   ...
54648   // the other is of the form:
54649   // (build_vector (extract_elt Mul, 1),
54650   //               (extract_elt Mul, 3),
54651   //               (extract_elt Mul, 5),
54652   //                   ...
54653   // and identify Mul.
54654   SDValue Mul;
54655   for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; i += 2) {
54656     SDValue Op0L = Op0->getOperand(i), Op1L = Op1->getOperand(i),
54657             Op0H = Op0->getOperand(i + 1), Op1H = Op1->getOperand(i + 1);
54658     // TODO: Be more tolerant to undefs.
54659     if (Op0L.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
54660         Op1L.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
54661         Op0H.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
54662         Op1H.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
54663       return SDValue();
54664     auto *Const0L = dyn_cast<ConstantSDNode>(Op0L->getOperand(1));
54665     auto *Const1L = dyn_cast<ConstantSDNode>(Op1L->getOperand(1));
54666     auto *Const0H = dyn_cast<ConstantSDNode>(Op0H->getOperand(1));
54667     auto *Const1H = dyn_cast<ConstantSDNode>(Op1H->getOperand(1));
54668     if (!Const0L || !Const1L || !Const0H || !Const1H)
54669       return SDValue();
54670     unsigned Idx0L = Const0L->getZExtValue(), Idx1L = Const1L->getZExtValue(),
54671              Idx0H = Const0H->getZExtValue(), Idx1H = Const1H->getZExtValue();
54672     // Commutativity of mul allows factors of a product to reorder.
54673     if (Idx0L > Idx1L)
54674       std::swap(Idx0L, Idx1L);
54675     if (Idx0H > Idx1H)
54676       std::swap(Idx0H, Idx1H);
54677     // Commutativity of add allows pairs of factors to reorder.
54678     if (Idx0L > Idx0H) {
54679       std::swap(Idx0L, Idx0H);
54680       std::swap(Idx1L, Idx1H);
54681     }
54682     if (Idx0L != 2 * i || Idx1L != 2 * i + 1 || Idx0H != 2 * i + 2 ||
54683         Idx1H != 2 * i + 3)
54684       return SDValue();
54685     if (!Mul) {
54686       // First time an extract_elt's source vector is visited. Must be a MUL
54687       // with 2X number of vector elements than the BUILD_VECTOR.
54688       // Both extracts must be from same MUL.
54689       Mul = Op0L->getOperand(0);
54690       if (Mul->getOpcode() != ISD::MUL ||
54691           Mul.getValueType().getVectorNumElements() != 2 * e)
54692         return SDValue();
54693     }
54694     // Check that the extract is from the same MUL previously seen.
54695     if (Mul != Op0L->getOperand(0) || Mul != Op1L->getOperand(0) ||
54696         Mul != Op0H->getOperand(0) || Mul != Op1H->getOperand(0))
54697       return SDValue();
54698   }
54699 
54700   // Check if the Mul source can be safely shrunk.
54701   ShrinkMode Mode;
54702   if (!canReduceVMulWidth(Mul.getNode(), DAG, Mode) ||
54703       Mode == ShrinkMode::MULU16)
54704     return SDValue();
54705 
54706   EVT TruncVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
54707                                  VT.getVectorNumElements() * 2);
54708   SDValue N0 = DAG.getNode(ISD::TRUNCATE, DL, TruncVT, Mul.getOperand(0));
54709   SDValue N1 = DAG.getNode(ISD::TRUNCATE, DL, TruncVT, Mul.getOperand(1));
54710 
54711   auto PMADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
54712                          ArrayRef<SDValue> Ops) {
54713     EVT InVT = Ops[0].getValueType();
54714     assert(InVT == Ops[1].getValueType() && "Operands' types mismatch");
54715     EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
54716                                  InVT.getVectorNumElements() / 2);
54717     return DAG.getNode(X86ISD::VPMADDWD, DL, ResVT, Ops[0], Ops[1]);
54718   };
54719   return SplitOpsAndApply(DAG, Subtarget, DL, VT, { N0, N1 }, PMADDBuilder);
54720 }
54721 
54722 // Attempt to turn this pattern into PMADDWD.
54723 // (add (mul (sext (build_vector)), (sext (build_vector))),
54724 //      (mul (sext (build_vector)), (sext (build_vector)))
matchPMADDWD_2(SelectionDAG & DAG,SDValue N0,SDValue N1,const SDLoc & DL,EVT VT,const X86Subtarget & Subtarget)54725 static SDValue matchPMADDWD_2(SelectionDAG &DAG, SDValue N0, SDValue N1,
54726                               const SDLoc &DL, EVT VT,
54727                               const X86Subtarget &Subtarget) {
54728   if (!Subtarget.hasSSE2())
54729     return SDValue();
54730 
54731   if (N0.getOpcode() != ISD::MUL || N1.getOpcode() != ISD::MUL)
54732     return SDValue();
54733 
54734   if (!VT.isVector() || VT.getVectorElementType() != MVT::i32 ||
54735       VT.getVectorNumElements() < 4 ||
54736       !isPowerOf2_32(VT.getVectorNumElements()))
54737     return SDValue();
54738 
54739   SDValue N00 = N0.getOperand(0);
54740   SDValue N01 = N0.getOperand(1);
54741   SDValue N10 = N1.getOperand(0);
54742   SDValue N11 = N1.getOperand(1);
54743 
54744   // All inputs need to be sign extends.
54745   // TODO: Support ZERO_EXTEND from known positive?
54746   if (N00.getOpcode() != ISD::SIGN_EXTEND ||
54747       N01.getOpcode() != ISD::SIGN_EXTEND ||
54748       N10.getOpcode() != ISD::SIGN_EXTEND ||
54749       N11.getOpcode() != ISD::SIGN_EXTEND)
54750     return SDValue();
54751 
54752   // Peek through the extends.
54753   N00 = N00.getOperand(0);
54754   N01 = N01.getOperand(0);
54755   N10 = N10.getOperand(0);
54756   N11 = N11.getOperand(0);
54757 
54758   // Must be extending from vXi16.
54759   EVT InVT = N00.getValueType();
54760   if (InVT.getVectorElementType() != MVT::i16 || N01.getValueType() != InVT ||
54761       N10.getValueType() != InVT || N11.getValueType() != InVT)
54762     return SDValue();
54763 
54764   // All inputs should be build_vectors.
54765   if (N00.getOpcode() != ISD::BUILD_VECTOR ||
54766       N01.getOpcode() != ISD::BUILD_VECTOR ||
54767       N10.getOpcode() != ISD::BUILD_VECTOR ||
54768       N11.getOpcode() != ISD::BUILD_VECTOR)
54769     return SDValue();
54770 
54771   // For each element, we need to ensure we have an odd element from one vector
54772   // multiplied by the odd element of another vector and the even element from
54773   // one of the same vectors being multiplied by the even element from the
54774   // other vector. So we need to make sure for each element i, this operator
54775   // is being performed:
54776   //  A[2 * i] * B[2 * i] + A[2 * i + 1] * B[2 * i + 1]
54777   SDValue In0, In1;
54778   for (unsigned i = 0; i != N00.getNumOperands(); ++i) {
54779     SDValue N00Elt = N00.getOperand(i);
54780     SDValue N01Elt = N01.getOperand(i);
54781     SDValue N10Elt = N10.getOperand(i);
54782     SDValue N11Elt = N11.getOperand(i);
54783     // TODO: Be more tolerant to undefs.
54784     if (N00Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
54785         N01Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
54786         N10Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
54787         N11Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
54788       return SDValue();
54789     auto *ConstN00Elt = dyn_cast<ConstantSDNode>(N00Elt.getOperand(1));
54790     auto *ConstN01Elt = dyn_cast<ConstantSDNode>(N01Elt.getOperand(1));
54791     auto *ConstN10Elt = dyn_cast<ConstantSDNode>(N10Elt.getOperand(1));
54792     auto *ConstN11Elt = dyn_cast<ConstantSDNode>(N11Elt.getOperand(1));
54793     if (!ConstN00Elt || !ConstN01Elt || !ConstN10Elt || !ConstN11Elt)
54794       return SDValue();
54795     unsigned IdxN00 = ConstN00Elt->getZExtValue();
54796     unsigned IdxN01 = ConstN01Elt->getZExtValue();
54797     unsigned IdxN10 = ConstN10Elt->getZExtValue();
54798     unsigned IdxN11 = ConstN11Elt->getZExtValue();
54799     // Add is commutative so indices can be reordered.
54800     if (IdxN00 > IdxN10) {
54801       std::swap(IdxN00, IdxN10);
54802       std::swap(IdxN01, IdxN11);
54803     }
54804     // N0 indices be the even element. N1 indices must be the next odd element.
54805     if (IdxN00 != 2 * i || IdxN10 != 2 * i + 1 ||
54806         IdxN01 != 2 * i || IdxN11 != 2 * i + 1)
54807       return SDValue();
54808     SDValue N00In = N00Elt.getOperand(0);
54809     SDValue N01In = N01Elt.getOperand(0);
54810     SDValue N10In = N10Elt.getOperand(0);
54811     SDValue N11In = N11Elt.getOperand(0);
54812 
54813     // First time we find an input capture it.
54814     if (!In0) {
54815       In0 = N00In;
54816       In1 = N01In;
54817 
54818       // The input vectors must be at least as wide as the output.
54819       // If they are larger than the output, we extract subvector below.
54820       if (In0.getValueSizeInBits() < VT.getSizeInBits() ||
54821           In1.getValueSizeInBits() < VT.getSizeInBits())
54822         return SDValue();
54823     }
54824     // Mul is commutative so the input vectors can be in any order.
54825     // Canonicalize to make the compares easier.
54826     if (In0 != N00In)
54827       std::swap(N00In, N01In);
54828     if (In0 != N10In)
54829       std::swap(N10In, N11In);
54830     if (In0 != N00In || In1 != N01In || In0 != N10In || In1 != N11In)
54831       return SDValue();
54832   }
54833 
54834   auto PMADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
54835                          ArrayRef<SDValue> Ops) {
54836     EVT OpVT = Ops[0].getValueType();
54837     assert(OpVT.getScalarType() == MVT::i16 &&
54838            "Unexpected scalar element type");
54839     assert(OpVT == Ops[1].getValueType() && "Operands' types mismatch");
54840     EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
54841                                  OpVT.getVectorNumElements() / 2);
54842     return DAG.getNode(X86ISD::VPMADDWD, DL, ResVT, Ops[0], Ops[1]);
54843   };
54844 
54845   // If the output is narrower than an input, extract the low part of the input
54846   // vector.
54847   EVT OutVT16 = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
54848                                VT.getVectorNumElements() * 2);
54849   if (OutVT16.bitsLT(In0.getValueType())) {
54850     In0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OutVT16, In0,
54851                       DAG.getIntPtrConstant(0, DL));
54852   }
54853   if (OutVT16.bitsLT(In1.getValueType())) {
54854     In1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OutVT16, In1,
54855                       DAG.getIntPtrConstant(0, DL));
54856   }
54857   return SplitOpsAndApply(DAG, Subtarget, DL, VT, { In0, In1 },
54858                           PMADDBuilder);
54859 }
54860 
54861 // ADD(VPMADDWD(X,Y),VPMADDWD(Z,W)) -> VPMADDWD(SHUFFLE(X,Z), SHUFFLE(Y,W))
54862 // If upper element in each pair of both VPMADDWD are zero then we can merge
54863 // the operand elements and use the implicit add of VPMADDWD.
54864 // TODO: Add support for VPMADDUBSW (which isn't commutable).
combineAddOfPMADDWD(SelectionDAG & DAG,SDValue N0,SDValue N1,const SDLoc & DL,EVT VT)54865 static SDValue combineAddOfPMADDWD(SelectionDAG &DAG, SDValue N0, SDValue N1,
54866                                    const SDLoc &DL, EVT VT) {
54867   if (N0.getOpcode() != N1.getOpcode() || N0.getOpcode() != X86ISD::VPMADDWD)
54868     return SDValue();
54869 
54870   // TODO: Add 256/512-bit support once VPMADDWD combines with shuffles.
54871   if (VT.getSizeInBits() > 128)
54872     return SDValue();
54873 
54874   unsigned NumElts = VT.getVectorNumElements();
54875   MVT OpVT = N0.getOperand(0).getSimpleValueType();
54876   APInt DemandedBits = APInt::getAllOnes(OpVT.getScalarSizeInBits());
54877   APInt DemandedHiElts = APInt::getSplat(2 * NumElts, APInt(2, 2));
54878 
54879   bool Op0HiZero =
54880       DAG.MaskedValueIsZero(N0.getOperand(0), DemandedBits, DemandedHiElts) ||
54881       DAG.MaskedValueIsZero(N0.getOperand(1), DemandedBits, DemandedHiElts);
54882   bool Op1HiZero =
54883       DAG.MaskedValueIsZero(N1.getOperand(0), DemandedBits, DemandedHiElts) ||
54884       DAG.MaskedValueIsZero(N1.getOperand(1), DemandedBits, DemandedHiElts);
54885 
54886   // TODO: Check for zero lower elements once we have actual codegen that
54887   // creates them.
54888   if (!Op0HiZero || !Op1HiZero)
54889     return SDValue();
54890 
54891   // Create a shuffle mask packing the lower elements from each VPMADDWD.
54892   SmallVector<int> Mask;
54893   for (int i = 0; i != (int)NumElts; ++i) {
54894     Mask.push_back(2 * i);
54895     Mask.push_back(2 * (i + NumElts));
54896   }
54897 
54898   SDValue LHS =
54899       DAG.getVectorShuffle(OpVT, DL, N0.getOperand(0), N1.getOperand(0), Mask);
54900   SDValue RHS =
54901       DAG.getVectorShuffle(OpVT, DL, N0.getOperand(1), N1.getOperand(1), Mask);
54902   return DAG.getNode(X86ISD::VPMADDWD, DL, VT, LHS, RHS);
54903 }
54904 
54905 /// CMOV of constants requires materializing constant operands in registers.
54906 /// Try to fold those constants into an 'add' instruction to reduce instruction
54907 /// count. We do this with CMOV rather the generic 'select' because there are
54908 /// earlier folds that may be used to turn select-of-constants into logic hacks.
pushAddIntoCmovOfConsts(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)54909 static SDValue pushAddIntoCmovOfConsts(SDNode *N, SelectionDAG &DAG,
54910                                        const X86Subtarget &Subtarget) {
54911   // If an operand is zero, add-of-0 gets simplified away, so that's clearly
54912   // better because we eliminate 1-2 instructions. This transform is still
54913   // an improvement without zero operands because we trade 2 move constants and
54914   // 1 add for 2 adds (LEA) as long as the constants can be represented as
54915   // immediate asm operands (fit in 32-bits).
54916   auto isSuitableCmov = [](SDValue V) {
54917     if (V.getOpcode() != X86ISD::CMOV || !V.hasOneUse())
54918       return false;
54919     if (!isa<ConstantSDNode>(V.getOperand(0)) ||
54920         !isa<ConstantSDNode>(V.getOperand(1)))
54921       return false;
54922     return isNullConstant(V.getOperand(0)) || isNullConstant(V.getOperand(1)) ||
54923            (V.getConstantOperandAPInt(0).isSignedIntN(32) &&
54924             V.getConstantOperandAPInt(1).isSignedIntN(32));
54925   };
54926 
54927   // Match an appropriate CMOV as the first operand of the add.
54928   SDValue Cmov = N->getOperand(0);
54929   SDValue OtherOp = N->getOperand(1);
54930   if (!isSuitableCmov(Cmov))
54931     std::swap(Cmov, OtherOp);
54932   if (!isSuitableCmov(Cmov))
54933     return SDValue();
54934 
54935   // Don't remove a load folding opportunity for the add. That would neutralize
54936   // any improvements from removing constant materializations.
54937   if (X86::mayFoldLoad(OtherOp, Subtarget))
54938     return SDValue();
54939 
54940   EVT VT = N->getValueType(0);
54941   SDLoc DL(N);
54942   SDValue FalseOp = Cmov.getOperand(0);
54943   SDValue TrueOp = Cmov.getOperand(1);
54944 
54945   // We will push the add through the select, but we can potentially do better
54946   // if we know there is another add in the sequence and this is pointer math.
54947   // In that case, we can absorb an add into the trailing memory op and avoid
54948   // a 3-operand LEA which is likely slower than a 2-operand LEA.
54949   // TODO: If target has "slow3OpsLEA", do this even without the trailing memop?
54950   if (OtherOp.getOpcode() == ISD::ADD && OtherOp.hasOneUse() &&
54951       !isa<ConstantSDNode>(OtherOp.getOperand(0)) &&
54952       all_of(N->uses(), [&](SDNode *Use) {
54953         auto *MemNode = dyn_cast<MemSDNode>(Use);
54954         return MemNode && MemNode->getBasePtr().getNode() == N;
54955       })) {
54956     // add (cmov C1, C2), add (X, Y) --> add (cmov (add X, C1), (add X, C2)), Y
54957     // TODO: We are arbitrarily choosing op0 as the 1st piece of the sum, but
54958     //       it is possible that choosing op1 might be better.
54959     SDValue X = OtherOp.getOperand(0), Y = OtherOp.getOperand(1);
54960     FalseOp = DAG.getNode(ISD::ADD, DL, VT, X, FalseOp);
54961     TrueOp = DAG.getNode(ISD::ADD, DL, VT, X, TrueOp);
54962     Cmov = DAG.getNode(X86ISD::CMOV, DL, VT, FalseOp, TrueOp,
54963                        Cmov.getOperand(2), Cmov.getOperand(3));
54964     return DAG.getNode(ISD::ADD, DL, VT, Cmov, Y);
54965   }
54966 
54967   // add (cmov C1, C2), OtherOp --> cmov (add OtherOp, C1), (add OtherOp, C2)
54968   FalseOp = DAG.getNode(ISD::ADD, DL, VT, OtherOp, FalseOp);
54969   TrueOp = DAG.getNode(ISD::ADD, DL, VT, OtherOp, TrueOp);
54970   return DAG.getNode(X86ISD::CMOV, DL, VT, FalseOp, TrueOp, Cmov.getOperand(2),
54971                      Cmov.getOperand(3));
54972 }
54973 
combineAdd(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)54974 static SDValue combineAdd(SDNode *N, SelectionDAG &DAG,
54975                           TargetLowering::DAGCombinerInfo &DCI,
54976                           const X86Subtarget &Subtarget) {
54977   EVT VT = N->getValueType(0);
54978   SDValue Op0 = N->getOperand(0);
54979   SDValue Op1 = N->getOperand(1);
54980   SDLoc DL(N);
54981 
54982   if (SDValue Select = pushAddIntoCmovOfConsts(N, DAG, Subtarget))
54983     return Select;
54984 
54985   if (SDValue MAdd = matchPMADDWD(DAG, Op0, Op1, DL, VT, Subtarget))
54986     return MAdd;
54987   if (SDValue MAdd = matchPMADDWD_2(DAG, Op0, Op1, DL, VT, Subtarget))
54988     return MAdd;
54989   if (SDValue MAdd = combineAddOfPMADDWD(DAG, Op0, Op1, DL, VT))
54990     return MAdd;
54991 
54992   // Try to synthesize horizontal adds from adds of shuffles.
54993   if (SDValue V = combineToHorizontalAddSub(N, DAG, Subtarget))
54994     return V;
54995 
54996   // If vectors of i1 are legal, turn (add (zext (vXi1 X)), Y) into
54997   // (sub Y, (sext (vXi1 X))).
54998   // FIXME: We have the (sub Y, (zext (vXi1 X))) -> (add (sext (vXi1 X)), Y) in
54999   // generic DAG combine without a legal type check, but adding this there
55000   // caused regressions.
55001   if (VT.isVector()) {
55002     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
55003     if (Op0.getOpcode() == ISD::ZERO_EXTEND &&
55004         Op0.getOperand(0).getValueType().getVectorElementType() == MVT::i1 &&
55005         TLI.isTypeLegal(Op0.getOperand(0).getValueType())) {
55006       SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Op0.getOperand(0));
55007       return DAG.getNode(ISD::SUB, DL, VT, Op1, SExt);
55008     }
55009 
55010     if (Op1.getOpcode() == ISD::ZERO_EXTEND &&
55011         Op1.getOperand(0).getValueType().getVectorElementType() == MVT::i1 &&
55012         TLI.isTypeLegal(Op1.getOperand(0).getValueType())) {
55013       SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Op1.getOperand(0));
55014       return DAG.getNode(ISD::SUB, DL, VT, Op0, SExt);
55015     }
55016   }
55017 
55018   // Fold ADD(ADC(Y,0,W),X) -> ADC(X,Y,W)
55019   if (Op0.getOpcode() == X86ISD::ADC && Op0->hasOneUse() &&
55020       X86::isZeroNode(Op0.getOperand(1))) {
55021     assert(!Op0->hasAnyUseOfValue(1) && "Overflow bit in use");
55022     return DAG.getNode(X86ISD::ADC, SDLoc(Op0), Op0->getVTList(), Op1,
55023                        Op0.getOperand(0), Op0.getOperand(2));
55024   }
55025 
55026   return combineAddOrSubToADCOrSBB(N, DAG);
55027 }
55028 
55029 // Try to fold (sub Y, cmovns X, -X) -> (add Y, cmovns -X, X) if the cmov
55030 // condition comes from the subtract node that produced -X. This matches the
55031 // cmov expansion for absolute value. By swapping the operands we convert abs
55032 // to nabs.
combineSubABS(SDNode * N,SelectionDAG & DAG)55033 static SDValue combineSubABS(SDNode *N, SelectionDAG &DAG) {
55034   SDValue N0 = N->getOperand(0);
55035   SDValue N1 = N->getOperand(1);
55036 
55037   if (N1.getOpcode() != X86ISD::CMOV || !N1.hasOneUse())
55038     return SDValue();
55039 
55040   X86::CondCode CC = (X86::CondCode)N1.getConstantOperandVal(2);
55041   if (CC != X86::COND_S && CC != X86::COND_NS)
55042     return SDValue();
55043 
55044   // Condition should come from a negate operation.
55045   SDValue Cond = N1.getOperand(3);
55046   if (Cond.getOpcode() != X86ISD::SUB || !isNullConstant(Cond.getOperand(0)))
55047     return SDValue();
55048   assert(Cond.getResNo() == 1 && "Unexpected result number");
55049 
55050   // Get the X and -X from the negate.
55051   SDValue NegX = Cond.getValue(0);
55052   SDValue X = Cond.getOperand(1);
55053 
55054   SDValue FalseOp = N1.getOperand(0);
55055   SDValue TrueOp = N1.getOperand(1);
55056 
55057   // Cmov operands should be X and NegX. Order doesn't matter.
55058   if (!(TrueOp == X && FalseOp == NegX) && !(TrueOp == NegX && FalseOp == X))
55059     return SDValue();
55060 
55061   // Build a new CMOV with the operands swapped.
55062   SDLoc DL(N);
55063   MVT VT = N->getSimpleValueType(0);
55064   SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, VT, TrueOp, FalseOp,
55065                              N1.getOperand(2), Cond);
55066   // Convert sub to add.
55067   return DAG.getNode(ISD::ADD, DL, VT, N0, Cmov);
55068 }
55069 
combineSub(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)55070 static SDValue combineSub(SDNode *N, SelectionDAG &DAG,
55071                           TargetLowering::DAGCombinerInfo &DCI,
55072                           const X86Subtarget &Subtarget) {
55073   SDValue Op0 = N->getOperand(0);
55074   SDValue Op1 = N->getOperand(1);
55075 
55076   // TODO: Add NoOpaque handling to isConstantIntBuildVectorOrConstantInt.
55077   auto IsNonOpaqueConstant = [&](SDValue Op) {
55078     if (SDNode *C = DAG.isConstantIntBuildVectorOrConstantInt(Op)) {
55079       if (auto *Cst = dyn_cast<ConstantSDNode>(C))
55080         return !Cst->isOpaque();
55081       return true;
55082     }
55083     return false;
55084   };
55085 
55086   // X86 can't encode an immediate LHS of a sub. See if we can push the
55087   // negation into a preceding instruction. If the RHS of the sub is a XOR with
55088   // one use and a constant, invert the immediate, saving one register.
55089   // sub(C1, xor(X, C2)) -> add(xor(X, ~C2), C1+1)
55090   if (Op1.getOpcode() == ISD::XOR && IsNonOpaqueConstant(Op0) &&
55091       IsNonOpaqueConstant(Op1.getOperand(1)) && Op1->hasOneUse()) {
55092     SDLoc DL(N);
55093     EVT VT = Op0.getValueType();
55094     SDValue NewXor = DAG.getNode(ISD::XOR, SDLoc(Op1), VT, Op1.getOperand(0),
55095                                  DAG.getNOT(SDLoc(Op1), Op1.getOperand(1), VT));
55096     SDValue NewAdd =
55097         DAG.getNode(ISD::ADD, DL, VT, Op0, DAG.getConstant(1, DL, VT));
55098     return DAG.getNode(ISD::ADD, DL, VT, NewXor, NewAdd);
55099   }
55100 
55101   if (SDValue V = combineSubABS(N, DAG))
55102     return V;
55103 
55104   // Try to synthesize horizontal subs from subs of shuffles.
55105   if (SDValue V = combineToHorizontalAddSub(N, DAG, Subtarget))
55106     return V;
55107 
55108   // Fold SUB(X,ADC(Y,0,W)) -> SBB(X,Y,W)
55109   if (Op1.getOpcode() == X86ISD::ADC && Op1->hasOneUse() &&
55110       X86::isZeroNode(Op1.getOperand(1))) {
55111     assert(!Op1->hasAnyUseOfValue(1) && "Overflow bit in use");
55112     return DAG.getNode(X86ISD::SBB, SDLoc(Op1), Op1->getVTList(), Op0,
55113                        Op1.getOperand(0), Op1.getOperand(2));
55114   }
55115 
55116   // Fold SUB(X,SBB(Y,Z,W)) -> SUB(ADC(X,Z,W),Y)
55117   // Don't fold to ADC(0,0,W)/SETCC_CARRY pattern which will prevent more folds.
55118   if (Op1.getOpcode() == X86ISD::SBB && Op1->hasOneUse() &&
55119       !(X86::isZeroNode(Op0) && X86::isZeroNode(Op1.getOperand(1)))) {
55120     assert(!Op1->hasAnyUseOfValue(1) && "Overflow bit in use");
55121     SDValue ADC = DAG.getNode(X86ISD::ADC, SDLoc(Op1), Op1->getVTList(), Op0,
55122                               Op1.getOperand(1), Op1.getOperand(2));
55123     return DAG.getNode(ISD::SUB, SDLoc(N), Op0.getValueType(), ADC.getValue(0),
55124                        Op1.getOperand(0));
55125   }
55126 
55127   return combineAddOrSubToADCOrSBB(N, DAG);
55128 }
55129 
combineVectorCompare(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)55130 static SDValue combineVectorCompare(SDNode *N, SelectionDAG &DAG,
55131                                     const X86Subtarget &Subtarget) {
55132   MVT VT = N->getSimpleValueType(0);
55133   SDLoc DL(N);
55134 
55135   if (N->getOperand(0) == N->getOperand(1)) {
55136     if (N->getOpcode() == X86ISD::PCMPEQ)
55137       return DAG.getConstant(-1, DL, VT);
55138     if (N->getOpcode() == X86ISD::PCMPGT)
55139       return DAG.getConstant(0, DL, VT);
55140   }
55141 
55142   return SDValue();
55143 }
55144 
55145 /// Helper that combines an array of subvector ops as if they were the operands
55146 /// of a ISD::CONCAT_VECTORS node, but may have come from another source (e.g.
55147 /// ISD::INSERT_SUBVECTOR). The ops are assumed to be of the same type.
combineConcatVectorOps(const SDLoc & DL,MVT VT,ArrayRef<SDValue> Ops,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)55148 static SDValue combineConcatVectorOps(const SDLoc &DL, MVT VT,
55149                                       ArrayRef<SDValue> Ops, SelectionDAG &DAG,
55150                                       TargetLowering::DAGCombinerInfo &DCI,
55151                                       const X86Subtarget &Subtarget) {
55152   assert(Subtarget.hasAVX() && "AVX assumed for concat_vectors");
55153   unsigned EltSizeInBits = VT.getScalarSizeInBits();
55154 
55155   if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); }))
55156     return DAG.getUNDEF(VT);
55157 
55158   if (llvm::all_of(Ops, [](SDValue Op) {
55159         return ISD::isBuildVectorAllZeros(Op.getNode());
55160       }))
55161     return getZeroVector(VT, Subtarget, DAG, DL);
55162 
55163   SDValue Op0 = Ops[0];
55164   bool IsSplat = llvm::all_equal(Ops);
55165 
55166   // Repeated subvectors.
55167   if (IsSplat &&
55168       (VT.is256BitVector() || (VT.is512BitVector() && Subtarget.hasAVX512()))) {
55169     // If this broadcast is inserted into both halves, use a larger broadcast.
55170     if (Op0.getOpcode() == X86ISD::VBROADCAST)
55171       return DAG.getNode(Op0.getOpcode(), DL, VT, Op0.getOperand(0));
55172 
55173     // If this simple subvector or scalar/subvector broadcast_load is inserted
55174     // into both halves, use a larger broadcast_load. Update other uses to use
55175     // an extracted subvector.
55176     if (ISD::isNormalLoad(Op0.getNode()) ||
55177         Op0.getOpcode() == X86ISD::VBROADCAST_LOAD ||
55178         Op0.getOpcode() == X86ISD::SUBV_BROADCAST_LOAD) {
55179       auto *Mem = cast<MemSDNode>(Op0);
55180       unsigned Opc = Op0.getOpcode() == X86ISD::VBROADCAST_LOAD
55181                          ? X86ISD::VBROADCAST_LOAD
55182                          : X86ISD::SUBV_BROADCAST_LOAD;
55183       if (SDValue BcastLd =
55184               getBROADCAST_LOAD(Opc, DL, VT, Mem->getMemoryVT(), Mem, 0, DAG)) {
55185         SDValue BcastSrc =
55186             extractSubVector(BcastLd, 0, DAG, DL, Op0.getValueSizeInBits());
55187         DAG.ReplaceAllUsesOfValueWith(Op0, BcastSrc);
55188         return BcastLd;
55189       }
55190     }
55191 
55192     // concat_vectors(movddup(x),movddup(x)) -> broadcast(x)
55193     if (Op0.getOpcode() == X86ISD::MOVDDUP && VT == MVT::v4f64 &&
55194         (Subtarget.hasAVX2() ||
55195          X86::mayFoldLoadIntoBroadcastFromMem(Op0.getOperand(0),
55196                                               VT.getScalarType(), Subtarget)))
55197       return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
55198                          DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f64,
55199                                      Op0.getOperand(0),
55200                                      DAG.getIntPtrConstant(0, DL)));
55201 
55202     // concat_vectors(scalar_to_vector(x),scalar_to_vector(x)) -> broadcast(x)
55203     if (Op0.getOpcode() == ISD::SCALAR_TO_VECTOR &&
55204         (Subtarget.hasAVX2() ||
55205          (EltSizeInBits >= 32 &&
55206           X86::mayFoldLoad(Op0.getOperand(0), Subtarget))) &&
55207         Op0.getOperand(0).getValueType() == VT.getScalarType())
55208       return DAG.getNode(X86ISD::VBROADCAST, DL, VT, Op0.getOperand(0));
55209 
55210     // concat_vectors(extract_subvector(broadcast(x)),
55211     //                extract_subvector(broadcast(x))) -> broadcast(x)
55212     if (Op0.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
55213         Op0.getOperand(0).getValueType() == VT) {
55214       if (Op0.getOperand(0).getOpcode() == X86ISD::VBROADCAST ||
55215           Op0.getOperand(0).getOpcode() == X86ISD::VBROADCAST_LOAD)
55216         return Op0.getOperand(0);
55217     }
55218   }
55219 
55220   // concat(extract_subvector(v0,c0), extract_subvector(v1,c1)) -> vperm2x128.
55221   // Only concat of subvector high halves which vperm2x128 is best at.
55222   // TODO: This should go in combineX86ShufflesRecursively eventually.
55223   if (VT.is256BitVector() && Ops.size() == 2) {
55224     SDValue Src0 = peekThroughBitcasts(Ops[0]);
55225     SDValue Src1 = peekThroughBitcasts(Ops[1]);
55226     if (Src0.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
55227         Src1.getOpcode() == ISD::EXTRACT_SUBVECTOR) {
55228       EVT SrcVT0 = Src0.getOperand(0).getValueType();
55229       EVT SrcVT1 = Src1.getOperand(0).getValueType();
55230       unsigned NumSrcElts0 = SrcVT0.getVectorNumElements();
55231       unsigned NumSrcElts1 = SrcVT1.getVectorNumElements();
55232       if (SrcVT0.is256BitVector() && SrcVT1.is256BitVector() &&
55233           Src0.getConstantOperandAPInt(1) == (NumSrcElts0 / 2) &&
55234           Src1.getConstantOperandAPInt(1) == (NumSrcElts1 / 2)) {
55235         return DAG.getNode(X86ISD::VPERM2X128, DL, VT,
55236                            DAG.getBitcast(VT, Src0.getOperand(0)),
55237                            DAG.getBitcast(VT, Src1.getOperand(0)),
55238                            DAG.getTargetConstant(0x31, DL, MVT::i8));
55239       }
55240     }
55241   }
55242 
55243   // Repeated opcode.
55244   // TODO - combineX86ShufflesRecursively should handle shuffle concatenation
55245   // but it currently struggles with different vector widths.
55246   if (llvm::all_of(Ops, [Op0](SDValue Op) {
55247         return Op.getOpcode() == Op0.getOpcode();
55248       })) {
55249     auto ConcatSubOperand = [&](MVT VT, ArrayRef<SDValue> SubOps, unsigned I) {
55250       SmallVector<SDValue> Subs;
55251       for (SDValue SubOp : SubOps)
55252         Subs.push_back(SubOp.getOperand(I));
55253       return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Subs);
55254     };
55255     auto IsConcatFree = [](MVT VT, ArrayRef<SDValue> SubOps, unsigned Op) {
55256       for (unsigned I = 0, E = SubOps.size(); I != E; ++I) {
55257         SDValue Sub = SubOps[I].getOperand(Op);
55258         unsigned NumSubElts = Sub.getValueType().getVectorNumElements();
55259         if (Sub.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
55260             Sub.getOperand(0).getValueType() != VT ||
55261             Sub.getConstantOperandAPInt(1) != (I * NumSubElts))
55262           return false;
55263       }
55264       return true;
55265     };
55266 
55267     unsigned NumOps = Ops.size();
55268     switch (Op0.getOpcode()) {
55269     case X86ISD::VBROADCAST: {
55270       if (!IsSplat && llvm::all_of(Ops, [](SDValue Op) {
55271             return Op.getOperand(0).getValueType().is128BitVector();
55272           })) {
55273         if (VT == MVT::v4f64 || VT == MVT::v4i64)
55274           return DAG.getNode(X86ISD::UNPCKL, DL, VT,
55275                              ConcatSubOperand(VT, Ops, 0),
55276                              ConcatSubOperand(VT, Ops, 0));
55277         // TODO: Add pseudo v8i32 PSHUFD handling to AVX1Only targets.
55278         if (VT == MVT::v8f32 || (VT == MVT::v8i32 && Subtarget.hasInt256()))
55279           return DAG.getNode(VT == MVT::v8f32 ? X86ISD::VPERMILPI
55280                                               : X86ISD::PSHUFD,
55281                              DL, VT, ConcatSubOperand(VT, Ops, 0),
55282                              getV4X86ShuffleImm8ForMask({0, 0, 0, 0}, DL, DAG));
55283       }
55284       break;
55285     }
55286     case X86ISD::MOVDDUP:
55287     case X86ISD::MOVSHDUP:
55288     case X86ISD::MOVSLDUP: {
55289       if (!IsSplat)
55290         return DAG.getNode(Op0.getOpcode(), DL, VT,
55291                            ConcatSubOperand(VT, Ops, 0));
55292       break;
55293     }
55294     case X86ISD::SHUFP: {
55295       // Add SHUFPD support if/when necessary.
55296       if (!IsSplat && VT.getScalarType() == MVT::f32 &&
55297           llvm::all_of(Ops, [Op0](SDValue Op) {
55298             return Op.getOperand(2) == Op0.getOperand(2);
55299           })) {
55300         return DAG.getNode(Op0.getOpcode(), DL, VT,
55301                            ConcatSubOperand(VT, Ops, 0),
55302                            ConcatSubOperand(VT, Ops, 1), Op0.getOperand(2));
55303       }
55304       break;
55305     }
55306     case X86ISD::PSHUFHW:
55307     case X86ISD::PSHUFLW:
55308     case X86ISD::PSHUFD:
55309       if (!IsSplat && NumOps == 2 && VT.is256BitVector() &&
55310           Subtarget.hasInt256() && Op0.getOperand(1) == Ops[1].getOperand(1)) {
55311         return DAG.getNode(Op0.getOpcode(), DL, VT,
55312                            ConcatSubOperand(VT, Ops, 0), Op0.getOperand(1));
55313       }
55314       [[fallthrough]];
55315     case X86ISD::VPERMILPI:
55316       if (!IsSplat && NumOps == 2 && (VT == MVT::v8f32 || VT == MVT::v8i32) &&
55317           Op0.getOperand(1) == Ops[1].getOperand(1)) {
55318         SDValue Res = DAG.getBitcast(MVT::v8f32, ConcatSubOperand(VT, Ops, 0));
55319         Res = DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f32, Res,
55320                           Op0.getOperand(1));
55321         return DAG.getBitcast(VT, Res);
55322       }
55323       if (!IsSplat && NumOps == 2 && VT == MVT::v4f64) {
55324         uint64_t Idx0 = Ops[0].getConstantOperandVal(1);
55325         uint64_t Idx1 = Ops[1].getConstantOperandVal(1);
55326         uint64_t Idx = ((Idx1 & 3) << 2) | (Idx0 & 3);
55327         return DAG.getNode(Op0.getOpcode(), DL, VT,
55328                            ConcatSubOperand(VT, Ops, 0),
55329                            DAG.getTargetConstant(Idx, DL, MVT::i8));
55330       }
55331       break;
55332     case X86ISD::PSHUFB:
55333       if (!IsSplat && ((VT.is256BitVector() && Subtarget.hasInt256()) ||
55334                        (VT.is512BitVector() && Subtarget.useBWIRegs()))) {
55335         return DAG.getNode(Op0.getOpcode(), DL, VT,
55336                            ConcatSubOperand(VT, Ops, 0),
55337                            ConcatSubOperand(VT, Ops, 1));
55338       }
55339       break;
55340     case X86ISD::VPERMV3:
55341       if (!IsSplat && NumOps == 2 && VT.is512BitVector()) {
55342         MVT OpVT = Op0.getSimpleValueType();
55343         int NumSrcElts = OpVT.getVectorNumElements();
55344         SmallVector<int, 64> ConcatMask;
55345         for (unsigned i = 0; i != NumOps; ++i) {
55346           SmallVector<int, 64> SubMask;
55347           SmallVector<SDValue, 2> SubOps;
55348           if (!getTargetShuffleMask(Ops[i].getNode(), OpVT, false, SubOps,
55349                                     SubMask))
55350             break;
55351           for (int M : SubMask) {
55352             if (0 <= M) {
55353               M += M < NumSrcElts ? 0 : NumSrcElts;
55354               M += i * NumSrcElts;
55355             }
55356             ConcatMask.push_back(M);
55357           }
55358         }
55359         if (ConcatMask.size() == (NumOps * NumSrcElts)) {
55360           SDValue Src0 = concatSubVectors(Ops[0].getOperand(0),
55361                                           Ops[1].getOperand(0), DAG, DL);
55362           SDValue Src1 = concatSubVectors(Ops[0].getOperand(2),
55363                                           Ops[1].getOperand(2), DAG, DL);
55364           MVT IntMaskSVT = MVT::getIntegerVT(VT.getScalarSizeInBits());
55365           MVT IntMaskVT = MVT::getVectorVT(IntMaskSVT, NumOps * NumSrcElts);
55366           SDValue Mask = getConstVector(ConcatMask, IntMaskVT, DAG, DL, true);
55367           return DAG.getNode(X86ISD::VPERMV3, DL, VT, Src0, Mask, Src1);
55368         }
55369       }
55370       break;
55371     case X86ISD::VSHLI:
55372     case X86ISD::VSRLI:
55373       // Special case: SHL/SRL AVX1 V4i64 by 32-bits can lower as a shuffle.
55374       // TODO: Move this to LowerShiftByScalarImmediate?
55375       if (VT == MVT::v4i64 && !Subtarget.hasInt256() &&
55376           llvm::all_of(Ops, [](SDValue Op) {
55377             return Op.getConstantOperandAPInt(1) == 32;
55378           })) {
55379         SDValue Res = DAG.getBitcast(MVT::v8i32, ConcatSubOperand(VT, Ops, 0));
55380         SDValue Zero = getZeroVector(MVT::v8i32, Subtarget, DAG, DL);
55381         if (Op0.getOpcode() == X86ISD::VSHLI) {
55382           Res = DAG.getVectorShuffle(MVT::v8i32, DL, Res, Zero,
55383                                      {8, 0, 8, 2, 8, 4, 8, 6});
55384         } else {
55385           Res = DAG.getVectorShuffle(MVT::v8i32, DL, Res, Zero,
55386                                      {1, 8, 3, 8, 5, 8, 7, 8});
55387         }
55388         return DAG.getBitcast(VT, Res);
55389       }
55390       [[fallthrough]];
55391     case X86ISD::VSRAI:
55392     case X86ISD::VSHL:
55393     case X86ISD::VSRL:
55394     case X86ISD::VSRA:
55395       if (((VT.is256BitVector() && Subtarget.hasInt256()) ||
55396            (VT.is512BitVector() && Subtarget.useAVX512Regs() &&
55397             (EltSizeInBits >= 32 || Subtarget.useBWIRegs()))) &&
55398           llvm::all_of(Ops, [Op0](SDValue Op) {
55399             return Op0.getOperand(1) == Op.getOperand(1);
55400           })) {
55401         return DAG.getNode(Op0.getOpcode(), DL, VT,
55402                            ConcatSubOperand(VT, Ops, 0), Op0.getOperand(1));
55403       }
55404       break;
55405     case X86ISD::VPERMI:
55406     case X86ISD::VROTLI:
55407     case X86ISD::VROTRI:
55408       if (VT.is512BitVector() && Subtarget.useAVX512Regs() &&
55409           llvm::all_of(Ops, [Op0](SDValue Op) {
55410             return Op0.getOperand(1) == Op.getOperand(1);
55411           })) {
55412         return DAG.getNode(Op0.getOpcode(), DL, VT,
55413                            ConcatSubOperand(VT, Ops, 0), Op0.getOperand(1));
55414       }
55415       break;
55416     case ISD::AND:
55417     case ISD::OR:
55418     case ISD::XOR:
55419     case X86ISD::ANDNP:
55420       // TODO: Add 256-bit support.
55421       if (!IsSplat && VT.is512BitVector()) {
55422         MVT SrcVT = Op0.getOperand(0).getSimpleValueType();
55423         SrcVT = MVT::getVectorVT(SrcVT.getScalarType(),
55424                                  NumOps * SrcVT.getVectorNumElements());
55425         return DAG.getNode(Op0.getOpcode(), DL, VT,
55426                            ConcatSubOperand(SrcVT, Ops, 0),
55427                            ConcatSubOperand(SrcVT, Ops, 1));
55428       }
55429       break;
55430     case X86ISD::GF2P8AFFINEQB:
55431       if (!IsSplat &&
55432           (VT.is256BitVector() ||
55433            (VT.is512BitVector() && Subtarget.useAVX512Regs())) &&
55434           llvm::all_of(Ops, [Op0](SDValue Op) {
55435             return Op0.getOperand(2) == Op.getOperand(2);
55436           })) {
55437         return DAG.getNode(Op0.getOpcode(), DL, VT,
55438                            ConcatSubOperand(VT, Ops, 0),
55439                            ConcatSubOperand(VT, Ops, 1), Op0.getOperand(2));
55440       }
55441       break;
55442     case X86ISD::HADD:
55443     case X86ISD::HSUB:
55444     case X86ISD::FHADD:
55445     case X86ISD::FHSUB:
55446     case X86ISD::PACKSS:
55447     case X86ISD::PACKUS:
55448       if (!IsSplat && VT.is256BitVector() &&
55449           (VT.isFloatingPoint() || Subtarget.hasInt256())) {
55450         MVT SrcVT = Op0.getOperand(0).getSimpleValueType();
55451         SrcVT = MVT::getVectorVT(SrcVT.getScalarType(),
55452                                  NumOps * SrcVT.getVectorNumElements());
55453         return DAG.getNode(Op0.getOpcode(), DL, VT,
55454                            ConcatSubOperand(SrcVT, Ops, 0),
55455                            ConcatSubOperand(SrcVT, Ops, 1));
55456       }
55457       break;
55458     case X86ISD::PALIGNR:
55459       if (!IsSplat &&
55460           ((VT.is256BitVector() && Subtarget.hasInt256()) ||
55461            (VT.is512BitVector() && Subtarget.useBWIRegs())) &&
55462           llvm::all_of(Ops, [Op0](SDValue Op) {
55463             return Op0.getOperand(2) == Op.getOperand(2);
55464           })) {
55465         return DAG.getNode(Op0.getOpcode(), DL, VT,
55466                            ConcatSubOperand(VT, Ops, 0),
55467                            ConcatSubOperand(VT, Ops, 1), Op0.getOperand(2));
55468       }
55469       break;
55470     case ISD::VSELECT:
55471     case X86ISD::BLENDV:
55472       if (!IsSplat && VT.is256BitVector() && Ops.size() == 2 &&
55473           (VT.getScalarSizeInBits() >= 32 || Subtarget.hasInt256()) &&
55474           IsConcatFree(VT, Ops, 1) && IsConcatFree(VT, Ops, 2)) {
55475         EVT SelVT = Ops[0].getOperand(0).getValueType();
55476         SelVT = SelVT.getDoubleNumVectorElementsVT(*DAG.getContext());
55477         if (DAG.getTargetLoweringInfo().isTypeLegal(SelVT))
55478           return DAG.getNode(Op0.getOpcode(), DL, VT,
55479                              ConcatSubOperand(SelVT.getSimpleVT(), Ops, 0),
55480                              ConcatSubOperand(VT, Ops, 1),
55481                              ConcatSubOperand(VT, Ops, 2));
55482       }
55483       break;
55484     }
55485   }
55486 
55487   // Fold subvector loads into one.
55488   // If needed, look through bitcasts to get to the load.
55489   if (auto *FirstLd = dyn_cast<LoadSDNode>(peekThroughBitcasts(Op0))) {
55490     unsigned Fast;
55491     const X86TargetLowering *TLI = Subtarget.getTargetLowering();
55492     if (TLI->allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
55493                                 *FirstLd->getMemOperand(), &Fast) &&
55494         Fast) {
55495       if (SDValue Ld =
55496               EltsFromConsecutiveLoads(VT, Ops, DL, DAG, Subtarget, false))
55497         return Ld;
55498     }
55499   }
55500 
55501   // Attempt to fold target constant loads.
55502   if (all_of(Ops, [](SDValue Op) { return getTargetConstantFromNode(Op); })) {
55503     SmallVector<APInt> EltBits;
55504     APInt UndefElts = APInt::getNullValue(VT.getVectorNumElements());
55505     for (unsigned I = 0, E = Ops.size(); I != E; ++I) {
55506       APInt OpUndefElts;
55507       SmallVector<APInt> OpEltBits;
55508       if (!getTargetConstantBitsFromNode(Ops[I], EltSizeInBits, OpUndefElts,
55509                                         OpEltBits, true, false))
55510           break;
55511       EltBits.append(OpEltBits);
55512       UndefElts.insertBits(OpUndefElts, I * OpUndefElts.getBitWidth());
55513     }
55514     if (EltBits.size() == VT.getVectorNumElements())
55515       return getConstVector(EltBits, UndefElts, VT, DAG, DL);
55516   }
55517 
55518   return SDValue();
55519 }
55520 
combineCONCAT_VECTORS(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)55521 static SDValue combineCONCAT_VECTORS(SDNode *N, SelectionDAG &DAG,
55522                                      TargetLowering::DAGCombinerInfo &DCI,
55523                                      const X86Subtarget &Subtarget) {
55524   EVT VT = N->getValueType(0);
55525   EVT SrcVT = N->getOperand(0).getValueType();
55526   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
55527 
55528   // Don't do anything for i1 vectors.
55529   if (VT.getVectorElementType() == MVT::i1)
55530     return SDValue();
55531 
55532   if (Subtarget.hasAVX() && TLI.isTypeLegal(VT) && TLI.isTypeLegal(SrcVT)) {
55533     SmallVector<SDValue, 4> Ops(N->op_begin(), N->op_end());
55534     if (SDValue R = combineConcatVectorOps(SDLoc(N), VT.getSimpleVT(), Ops, DAG,
55535                                            DCI, Subtarget))
55536       return R;
55537   }
55538 
55539   return SDValue();
55540 }
55541 
combineINSERT_SUBVECTOR(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)55542 static SDValue combineINSERT_SUBVECTOR(SDNode *N, SelectionDAG &DAG,
55543                                        TargetLowering::DAGCombinerInfo &DCI,
55544                                        const X86Subtarget &Subtarget) {
55545   if (DCI.isBeforeLegalizeOps())
55546     return SDValue();
55547 
55548   MVT OpVT = N->getSimpleValueType(0);
55549 
55550   bool IsI1Vector = OpVT.getVectorElementType() == MVT::i1;
55551 
55552   SDLoc dl(N);
55553   SDValue Vec = N->getOperand(0);
55554   SDValue SubVec = N->getOperand(1);
55555 
55556   uint64_t IdxVal = N->getConstantOperandVal(2);
55557   MVT SubVecVT = SubVec.getSimpleValueType();
55558 
55559   if (Vec.isUndef() && SubVec.isUndef())
55560     return DAG.getUNDEF(OpVT);
55561 
55562   // Inserting undefs/zeros into zeros/undefs is a zero vector.
55563   if ((Vec.isUndef() || ISD::isBuildVectorAllZeros(Vec.getNode())) &&
55564       (SubVec.isUndef() || ISD::isBuildVectorAllZeros(SubVec.getNode())))
55565     return getZeroVector(OpVT, Subtarget, DAG, dl);
55566 
55567   if (ISD::isBuildVectorAllZeros(Vec.getNode())) {
55568     // If we're inserting into a zero vector and then into a larger zero vector,
55569     // just insert into the larger zero vector directly.
55570     if (SubVec.getOpcode() == ISD::INSERT_SUBVECTOR &&
55571         ISD::isBuildVectorAllZeros(SubVec.getOperand(0).getNode())) {
55572       uint64_t Idx2Val = SubVec.getConstantOperandVal(2);
55573       return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT,
55574                          getZeroVector(OpVT, Subtarget, DAG, dl),
55575                          SubVec.getOperand(1),
55576                          DAG.getIntPtrConstant(IdxVal + Idx2Val, dl));
55577     }
55578 
55579     // If we're inserting into a zero vector and our input was extracted from an
55580     // insert into a zero vector of the same type and the extraction was at
55581     // least as large as the original insertion. Just insert the original
55582     // subvector into a zero vector.
55583     if (SubVec.getOpcode() == ISD::EXTRACT_SUBVECTOR && IdxVal == 0 &&
55584         isNullConstant(SubVec.getOperand(1)) &&
55585         SubVec.getOperand(0).getOpcode() == ISD::INSERT_SUBVECTOR) {
55586       SDValue Ins = SubVec.getOperand(0);
55587       if (isNullConstant(Ins.getOperand(2)) &&
55588           ISD::isBuildVectorAllZeros(Ins.getOperand(0).getNode()) &&
55589           Ins.getOperand(1).getValueSizeInBits().getFixedValue() <=
55590               SubVecVT.getFixedSizeInBits())
55591           return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT,
55592                              getZeroVector(OpVT, Subtarget, DAG, dl),
55593                              Ins.getOperand(1), N->getOperand(2));
55594     }
55595   }
55596 
55597   // Stop here if this is an i1 vector.
55598   if (IsI1Vector)
55599     return SDValue();
55600 
55601   // If this is an insert of an extract, combine to a shuffle. Don't do this
55602   // if the insert or extract can be represented with a subregister operation.
55603   if (SubVec.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
55604       SubVec.getOperand(0).getSimpleValueType() == OpVT &&
55605       (IdxVal != 0 ||
55606        !(Vec.isUndef() || ISD::isBuildVectorAllZeros(Vec.getNode())))) {
55607     int ExtIdxVal = SubVec.getConstantOperandVal(1);
55608     if (ExtIdxVal != 0) {
55609       int VecNumElts = OpVT.getVectorNumElements();
55610       int SubVecNumElts = SubVecVT.getVectorNumElements();
55611       SmallVector<int, 64> Mask(VecNumElts);
55612       // First create an identity shuffle mask.
55613       for (int i = 0; i != VecNumElts; ++i)
55614         Mask[i] = i;
55615       // Now insert the extracted portion.
55616       for (int i = 0; i != SubVecNumElts; ++i)
55617         Mask[i + IdxVal] = i + ExtIdxVal + VecNumElts;
55618 
55619       return DAG.getVectorShuffle(OpVT, dl, Vec, SubVec.getOperand(0), Mask);
55620     }
55621   }
55622 
55623   // Match concat_vector style patterns.
55624   SmallVector<SDValue, 2> SubVectorOps;
55625   if (collectConcatOps(N, SubVectorOps, DAG)) {
55626     if (SDValue Fold =
55627             combineConcatVectorOps(dl, OpVT, SubVectorOps, DAG, DCI, Subtarget))
55628       return Fold;
55629 
55630     // If we're inserting all zeros into the upper half, change this to
55631     // a concat with zero. We will match this to a move
55632     // with implicit upper bit zeroing during isel.
55633     // We do this here because we don't want combineConcatVectorOps to
55634     // create INSERT_SUBVECTOR from CONCAT_VECTORS.
55635     if (SubVectorOps.size() == 2 &&
55636         ISD::isBuildVectorAllZeros(SubVectorOps[1].getNode()))
55637       return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT,
55638                          getZeroVector(OpVT, Subtarget, DAG, dl),
55639                          SubVectorOps[0], DAG.getIntPtrConstant(0, dl));
55640   }
55641 
55642   // If this is a broadcast insert into an upper undef, use a larger broadcast.
55643   if (Vec.isUndef() && IdxVal != 0 && SubVec.getOpcode() == X86ISD::VBROADCAST)
55644     return DAG.getNode(X86ISD::VBROADCAST, dl, OpVT, SubVec.getOperand(0));
55645 
55646   // If this is a broadcast load inserted into an upper undef, use a larger
55647   // broadcast load.
55648   if (Vec.isUndef() && IdxVal != 0 && SubVec.hasOneUse() &&
55649       SubVec.getOpcode() == X86ISD::VBROADCAST_LOAD) {
55650     auto *MemIntr = cast<MemIntrinsicSDNode>(SubVec);
55651     SDVTList Tys = DAG.getVTList(OpVT, MVT::Other);
55652     SDValue Ops[] = { MemIntr->getChain(), MemIntr->getBasePtr() };
55653     SDValue BcastLd =
55654         DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, dl, Tys, Ops,
55655                                 MemIntr->getMemoryVT(),
55656                                 MemIntr->getMemOperand());
55657     DAG.ReplaceAllUsesOfValueWith(SDValue(MemIntr, 1), BcastLd.getValue(1));
55658     return BcastLd;
55659   }
55660 
55661   // If we're splatting the lower half subvector of a full vector load into the
55662   // upper half, attempt to create a subvector broadcast.
55663   if (IdxVal == (OpVT.getVectorNumElements() / 2) && SubVec.hasOneUse() &&
55664       Vec.getValueSizeInBits() == (2 * SubVec.getValueSizeInBits())) {
55665     auto *VecLd = dyn_cast<LoadSDNode>(Vec);
55666     auto *SubLd = dyn_cast<LoadSDNode>(SubVec);
55667     if (VecLd && SubLd &&
55668         DAG.areNonVolatileConsecutiveLoads(SubLd, VecLd,
55669                                            SubVec.getValueSizeInBits() / 8, 0))
55670       return getBROADCAST_LOAD(X86ISD::SUBV_BROADCAST_LOAD, dl, OpVT, SubVecVT,
55671                                SubLd, 0, DAG);
55672   }
55673 
55674   return SDValue();
55675 }
55676 
55677 /// If we are extracting a subvector of a vector select and the select condition
55678 /// is composed of concatenated vectors, try to narrow the select width. This
55679 /// is a common pattern for AVX1 integer code because 256-bit selects may be
55680 /// legal, but there is almost no integer math/logic available for 256-bit.
55681 /// This function should only be called with legal types (otherwise, the calls
55682 /// to get simple value types will assert).
narrowExtractedVectorSelect(SDNode * Ext,SelectionDAG & DAG)55683 static SDValue narrowExtractedVectorSelect(SDNode *Ext, SelectionDAG &DAG) {
55684   SDValue Sel = Ext->getOperand(0);
55685   SmallVector<SDValue, 4> CatOps;
55686   if (Sel.getOpcode() != ISD::VSELECT ||
55687       !collectConcatOps(Sel.getOperand(0).getNode(), CatOps, DAG))
55688     return SDValue();
55689 
55690   // Note: We assume simple value types because this should only be called with
55691   //       legal operations/types.
55692   // TODO: This can be extended to handle extraction to 256-bits.
55693   MVT VT = Ext->getSimpleValueType(0);
55694   if (!VT.is128BitVector())
55695     return SDValue();
55696 
55697   MVT SelCondVT = Sel.getOperand(0).getSimpleValueType();
55698   if (!SelCondVT.is256BitVector() && !SelCondVT.is512BitVector())
55699     return SDValue();
55700 
55701   MVT WideVT = Ext->getOperand(0).getSimpleValueType();
55702   MVT SelVT = Sel.getSimpleValueType();
55703   assert((SelVT.is256BitVector() || SelVT.is512BitVector()) &&
55704          "Unexpected vector type with legal operations");
55705 
55706   unsigned SelElts = SelVT.getVectorNumElements();
55707   unsigned CastedElts = WideVT.getVectorNumElements();
55708   unsigned ExtIdx = Ext->getConstantOperandVal(1);
55709   if (SelElts % CastedElts == 0) {
55710     // The select has the same or more (narrower) elements than the extract
55711     // operand. The extraction index gets scaled by that factor.
55712     ExtIdx *= (SelElts / CastedElts);
55713   } else if (CastedElts % SelElts == 0) {
55714     // The select has less (wider) elements than the extract operand. Make sure
55715     // that the extraction index can be divided evenly.
55716     unsigned IndexDivisor = CastedElts / SelElts;
55717     if (ExtIdx % IndexDivisor != 0)
55718       return SDValue();
55719     ExtIdx /= IndexDivisor;
55720   } else {
55721     llvm_unreachable("Element count of simple vector types are not divisible?");
55722   }
55723 
55724   unsigned NarrowingFactor = WideVT.getSizeInBits() / VT.getSizeInBits();
55725   unsigned NarrowElts = SelElts / NarrowingFactor;
55726   MVT NarrowSelVT = MVT::getVectorVT(SelVT.getVectorElementType(), NarrowElts);
55727   SDLoc DL(Ext);
55728   SDValue ExtCond = extract128BitVector(Sel.getOperand(0), ExtIdx, DAG, DL);
55729   SDValue ExtT = extract128BitVector(Sel.getOperand(1), ExtIdx, DAG, DL);
55730   SDValue ExtF = extract128BitVector(Sel.getOperand(2), ExtIdx, DAG, DL);
55731   SDValue NarrowSel = DAG.getSelect(DL, NarrowSelVT, ExtCond, ExtT, ExtF);
55732   return DAG.getBitcast(VT, NarrowSel);
55733 }
55734 
combineEXTRACT_SUBVECTOR(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)55735 static SDValue combineEXTRACT_SUBVECTOR(SDNode *N, SelectionDAG &DAG,
55736                                         TargetLowering::DAGCombinerInfo &DCI,
55737                                         const X86Subtarget &Subtarget) {
55738   // For AVX1 only, if we are extracting from a 256-bit and+not (which will
55739   // eventually get combined/lowered into ANDNP) with a concatenated operand,
55740   // split the 'and' into 128-bit ops to avoid the concatenate and extract.
55741   // We let generic combining take over from there to simplify the
55742   // insert/extract and 'not'.
55743   // This pattern emerges during AVX1 legalization. We handle it before lowering
55744   // to avoid complications like splitting constant vector loads.
55745 
55746   // Capture the original wide type in the likely case that we need to bitcast
55747   // back to this type.
55748   if (!N->getValueType(0).isSimple())
55749     return SDValue();
55750 
55751   MVT VT = N->getSimpleValueType(0);
55752   SDValue InVec = N->getOperand(0);
55753   unsigned IdxVal = N->getConstantOperandVal(1);
55754   SDValue InVecBC = peekThroughBitcasts(InVec);
55755   EVT InVecVT = InVec.getValueType();
55756   unsigned SizeInBits = VT.getSizeInBits();
55757   unsigned InSizeInBits = InVecVT.getSizeInBits();
55758   unsigned NumSubElts = VT.getVectorNumElements();
55759   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
55760 
55761   if (Subtarget.hasAVX() && !Subtarget.hasAVX2() &&
55762       TLI.isTypeLegal(InVecVT) &&
55763       InSizeInBits == 256 && InVecBC.getOpcode() == ISD::AND) {
55764     auto isConcatenatedNot = [](SDValue V) {
55765       V = peekThroughBitcasts(V);
55766       if (!isBitwiseNot(V))
55767         return false;
55768       SDValue NotOp = V->getOperand(0);
55769       return peekThroughBitcasts(NotOp).getOpcode() == ISD::CONCAT_VECTORS;
55770     };
55771     if (isConcatenatedNot(InVecBC.getOperand(0)) ||
55772         isConcatenatedNot(InVecBC.getOperand(1))) {
55773       // extract (and v4i64 X, (not (concat Y1, Y2))), n -> andnp v2i64 X(n), Y1
55774       SDValue Concat = splitVectorIntBinary(InVecBC, DAG);
55775       return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(N), VT,
55776                          DAG.getBitcast(InVecVT, Concat), N->getOperand(1));
55777     }
55778   }
55779 
55780   if (DCI.isBeforeLegalizeOps())
55781     return SDValue();
55782 
55783   if (SDValue V = narrowExtractedVectorSelect(N, DAG))
55784     return V;
55785 
55786   if (ISD::isBuildVectorAllZeros(InVec.getNode()))
55787     return getZeroVector(VT, Subtarget, DAG, SDLoc(N));
55788 
55789   if (ISD::isBuildVectorAllOnes(InVec.getNode())) {
55790     if (VT.getScalarType() == MVT::i1)
55791       return DAG.getConstant(1, SDLoc(N), VT);
55792     return getOnesVector(VT, DAG, SDLoc(N));
55793   }
55794 
55795   if (InVec.getOpcode() == ISD::BUILD_VECTOR)
55796     return DAG.getBuildVector(VT, SDLoc(N),
55797                               InVec->ops().slice(IdxVal, NumSubElts));
55798 
55799   // If we are extracting from an insert into a larger vector, replace with a
55800   // smaller insert if we don't access less than the original subvector. Don't
55801   // do this for i1 vectors.
55802   // TODO: Relax the matching indices requirement?
55803   if (VT.getVectorElementType() != MVT::i1 &&
55804       InVec.getOpcode() == ISD::INSERT_SUBVECTOR && InVec.hasOneUse() &&
55805       IdxVal == InVec.getConstantOperandVal(2) &&
55806       InVec.getOperand(1).getValueSizeInBits() <= SizeInBits) {
55807     SDLoc DL(N);
55808     SDValue NewExt = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT,
55809                                  InVec.getOperand(0), N->getOperand(1));
55810     unsigned NewIdxVal = InVec.getConstantOperandVal(2) - IdxVal;
55811     return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, NewExt,
55812                        InVec.getOperand(1),
55813                        DAG.getVectorIdxConstant(NewIdxVal, DL));
55814   }
55815 
55816   // If we're extracting an upper subvector from a broadcast we should just
55817   // extract the lowest subvector instead which should allow
55818   // SimplifyDemandedVectorElts do more simplifications.
55819   if (IdxVal != 0 && (InVec.getOpcode() == X86ISD::VBROADCAST ||
55820                       InVec.getOpcode() == X86ISD::VBROADCAST_LOAD ||
55821                       DAG.isSplatValue(InVec, /*AllowUndefs*/ false)))
55822     return extractSubVector(InVec, 0, DAG, SDLoc(N), SizeInBits);
55823 
55824   // If we're extracting a broadcasted subvector, just use the lowest subvector.
55825   if (IdxVal != 0 && InVec.getOpcode() == X86ISD::SUBV_BROADCAST_LOAD &&
55826       cast<MemIntrinsicSDNode>(InVec)->getMemoryVT() == VT)
55827     return extractSubVector(InVec, 0, DAG, SDLoc(N), SizeInBits);
55828 
55829   // Attempt to extract from the source of a shuffle vector.
55830   if ((InSizeInBits % SizeInBits) == 0 && (IdxVal % NumSubElts) == 0) {
55831     SmallVector<int, 32> ShuffleMask;
55832     SmallVector<int, 32> ScaledMask;
55833     SmallVector<SDValue, 2> ShuffleInputs;
55834     unsigned NumSubVecs = InSizeInBits / SizeInBits;
55835     // Decode the shuffle mask and scale it so its shuffling subvectors.
55836     if (getTargetShuffleInputs(InVecBC, ShuffleInputs, ShuffleMask, DAG) &&
55837         scaleShuffleElements(ShuffleMask, NumSubVecs, ScaledMask)) {
55838       unsigned SubVecIdx = IdxVal / NumSubElts;
55839       if (ScaledMask[SubVecIdx] == SM_SentinelUndef)
55840         return DAG.getUNDEF(VT);
55841       if (ScaledMask[SubVecIdx] == SM_SentinelZero)
55842         return getZeroVector(VT, Subtarget, DAG, SDLoc(N));
55843       SDValue Src = ShuffleInputs[ScaledMask[SubVecIdx] / NumSubVecs];
55844       if (Src.getValueSizeInBits() == InSizeInBits) {
55845         unsigned SrcSubVecIdx = ScaledMask[SubVecIdx] % NumSubVecs;
55846         unsigned SrcEltIdx = SrcSubVecIdx * NumSubElts;
55847         return extractSubVector(DAG.getBitcast(InVecVT, Src), SrcEltIdx, DAG,
55848                                 SDLoc(N), SizeInBits);
55849       }
55850     }
55851   }
55852 
55853   // If we're extracting the lowest subvector and we're the only user,
55854   // we may be able to perform this with a smaller vector width.
55855   unsigned InOpcode = InVec.getOpcode();
55856   if (InVec.hasOneUse()) {
55857     if (IdxVal == 0 && VT == MVT::v2f64 && InVecVT == MVT::v4f64) {
55858       // v2f64 CVTDQ2PD(v4i32).
55859       if (InOpcode == ISD::SINT_TO_FP &&
55860           InVec.getOperand(0).getValueType() == MVT::v4i32) {
55861         return DAG.getNode(X86ISD::CVTSI2P, SDLoc(N), VT, InVec.getOperand(0));
55862       }
55863       // v2f64 CVTUDQ2PD(v4i32).
55864       if (InOpcode == ISD::UINT_TO_FP && Subtarget.hasVLX() &&
55865           InVec.getOperand(0).getValueType() == MVT::v4i32) {
55866         return DAG.getNode(X86ISD::CVTUI2P, SDLoc(N), VT, InVec.getOperand(0));
55867       }
55868       // v2f64 CVTPS2PD(v4f32).
55869       if (InOpcode == ISD::FP_EXTEND &&
55870           InVec.getOperand(0).getValueType() == MVT::v4f32) {
55871         return DAG.getNode(X86ISD::VFPEXT, SDLoc(N), VT, InVec.getOperand(0));
55872       }
55873     }
55874     if (IdxVal == 0 &&
55875         (InOpcode == ISD::ANY_EXTEND ||
55876          InOpcode == ISD::ANY_EXTEND_VECTOR_INREG ||
55877          InOpcode == ISD::ZERO_EXTEND ||
55878          InOpcode == ISD::ZERO_EXTEND_VECTOR_INREG ||
55879          InOpcode == ISD::SIGN_EXTEND ||
55880          InOpcode == ISD::SIGN_EXTEND_VECTOR_INREG) &&
55881         (SizeInBits == 128 || SizeInBits == 256) &&
55882         InVec.getOperand(0).getValueSizeInBits() >= SizeInBits) {
55883       SDLoc DL(N);
55884       SDValue Ext = InVec.getOperand(0);
55885       if (Ext.getValueSizeInBits() > SizeInBits)
55886         Ext = extractSubVector(Ext, 0, DAG, DL, SizeInBits);
55887       unsigned ExtOp = DAG.getOpcode_EXTEND_VECTOR_INREG(InOpcode);
55888       return DAG.getNode(ExtOp, DL, VT, Ext);
55889     }
55890     if (IdxVal == 0 && InOpcode == ISD::VSELECT &&
55891         InVec.getOperand(0).getValueType().is256BitVector() &&
55892         InVec.getOperand(1).getValueType().is256BitVector() &&
55893         InVec.getOperand(2).getValueType().is256BitVector()) {
55894       SDLoc DL(N);
55895       SDValue Ext0 = extractSubVector(InVec.getOperand(0), 0, DAG, DL, 128);
55896       SDValue Ext1 = extractSubVector(InVec.getOperand(1), 0, DAG, DL, 128);
55897       SDValue Ext2 = extractSubVector(InVec.getOperand(2), 0, DAG, DL, 128);
55898       return DAG.getNode(InOpcode, DL, VT, Ext0, Ext1, Ext2);
55899     }
55900     if (IdxVal == 0 && InOpcode == ISD::TRUNCATE && Subtarget.hasVLX() &&
55901         (VT.is128BitVector() || VT.is256BitVector())) {
55902       SDLoc DL(N);
55903       SDValue InVecSrc = InVec.getOperand(0);
55904       unsigned Scale = InVecSrc.getValueSizeInBits() / InSizeInBits;
55905       SDValue Ext = extractSubVector(InVecSrc, 0, DAG, DL, Scale * SizeInBits);
55906       return DAG.getNode(InOpcode, DL, VT, Ext);
55907     }
55908     if (InOpcode == X86ISD::MOVDDUP &&
55909         (VT.is128BitVector() || VT.is256BitVector())) {
55910       SDLoc DL(N);
55911       SDValue Ext0 =
55912           extractSubVector(InVec.getOperand(0), IdxVal, DAG, DL, SizeInBits);
55913       return DAG.getNode(InOpcode, DL, VT, Ext0);
55914     }
55915   }
55916 
55917   // Always split vXi64 logical shifts where we're extracting the upper 32-bits
55918   // as this is very likely to fold into a shuffle/truncation.
55919   if ((InOpcode == X86ISD::VSHLI || InOpcode == X86ISD::VSRLI) &&
55920       InVecVT.getScalarSizeInBits() == 64 &&
55921       InVec.getConstantOperandAPInt(1) == 32) {
55922     SDLoc DL(N);
55923     SDValue Ext =
55924         extractSubVector(InVec.getOperand(0), IdxVal, DAG, DL, SizeInBits);
55925     return DAG.getNode(InOpcode, DL, VT, Ext, InVec.getOperand(1));
55926   }
55927 
55928   return SDValue();
55929 }
55930 
combineScalarToVector(SDNode * N,SelectionDAG & DAG)55931 static SDValue combineScalarToVector(SDNode *N, SelectionDAG &DAG) {
55932   EVT VT = N->getValueType(0);
55933   SDValue Src = N->getOperand(0);
55934   SDLoc DL(N);
55935 
55936   // If this is a scalar to vector to v1i1 from an AND with 1, bypass the and.
55937   // This occurs frequently in our masked scalar intrinsic code and our
55938   // floating point select lowering with AVX512.
55939   // TODO: SimplifyDemandedBits instead?
55940   if (VT == MVT::v1i1 && Src.getOpcode() == ISD::AND && Src.hasOneUse())
55941     if (auto *C = dyn_cast<ConstantSDNode>(Src.getOperand(1)))
55942       if (C->getAPIntValue().isOne())
55943         return DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v1i1,
55944                            Src.getOperand(0));
55945 
55946   // Combine scalar_to_vector of an extract_vector_elt into an extract_subvec.
55947   if (VT == MVT::v1i1 && Src.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
55948       Src.hasOneUse() && Src.getOperand(0).getValueType().isVector() &&
55949       Src.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
55950     if (auto *C = dyn_cast<ConstantSDNode>(Src.getOperand(1)))
55951       if (C->isZero())
55952         return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Src.getOperand(0),
55953                            Src.getOperand(1));
55954 
55955   // Reduce v2i64 to v4i32 if we don't need the upper bits or are known zero.
55956   // TODO: Move to DAGCombine/SimplifyDemandedBits?
55957   if ((VT == MVT::v2i64 || VT == MVT::v2f64) && Src.hasOneUse()) {
55958     auto IsExt64 = [&DAG](SDValue Op, bool IsZeroExt) {
55959       if (Op.getValueType() != MVT::i64)
55960         return SDValue();
55961       unsigned Opc = IsZeroExt ? ISD::ZERO_EXTEND : ISD::ANY_EXTEND;
55962       if (Op.getOpcode() == Opc &&
55963           Op.getOperand(0).getScalarValueSizeInBits() <= 32)
55964         return Op.getOperand(0);
55965       unsigned Ext = IsZeroExt ? ISD::ZEXTLOAD : ISD::EXTLOAD;
55966       if (auto *Ld = dyn_cast<LoadSDNode>(Op))
55967         if (Ld->getExtensionType() == Ext &&
55968             Ld->getMemoryVT().getScalarSizeInBits() <= 32)
55969           return Op;
55970       if (IsZeroExt) {
55971         KnownBits Known = DAG.computeKnownBits(Op);
55972         if (!Known.isConstant() && Known.countMinLeadingZeros() >= 32)
55973           return Op;
55974       }
55975       return SDValue();
55976     };
55977 
55978     if (SDValue AnyExt = IsExt64(peekThroughOneUseBitcasts(Src), false))
55979       return DAG.getBitcast(
55980           VT, DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v4i32,
55981                           DAG.getAnyExtOrTrunc(AnyExt, DL, MVT::i32)));
55982 
55983     if (SDValue ZeroExt = IsExt64(peekThroughOneUseBitcasts(Src), true))
55984       return DAG.getBitcast(
55985           VT,
55986           DAG.getNode(X86ISD::VZEXT_MOVL, DL, MVT::v4i32,
55987                       DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v4i32,
55988                                   DAG.getZExtOrTrunc(ZeroExt, DL, MVT::i32))));
55989   }
55990 
55991   // Combine (v2i64 (scalar_to_vector (i64 (bitconvert (mmx))))) to MOVQ2DQ.
55992   if (VT == MVT::v2i64 && Src.getOpcode() == ISD::BITCAST &&
55993       Src.getOperand(0).getValueType() == MVT::x86mmx)
55994     return DAG.getNode(X86ISD::MOVQ2DQ, DL, VT, Src.getOperand(0));
55995 
55996   // See if we're broadcasting the scalar value, in which case just reuse that.
55997   // Ensure the same SDValue from the SDNode use is being used.
55998   if (VT.getScalarType() == Src.getValueType())
55999     for (SDNode *User : Src->uses())
56000       if (User->getOpcode() == X86ISD::VBROADCAST &&
56001           Src == User->getOperand(0)) {
56002         unsigned SizeInBits = VT.getFixedSizeInBits();
56003         unsigned BroadcastSizeInBits =
56004             User->getValueSizeInBits(0).getFixedValue();
56005         if (BroadcastSizeInBits == SizeInBits)
56006           return SDValue(User, 0);
56007         if (BroadcastSizeInBits > SizeInBits)
56008           return extractSubVector(SDValue(User, 0), 0, DAG, DL, SizeInBits);
56009         // TODO: Handle BroadcastSizeInBits < SizeInBits when we have test
56010         // coverage.
56011       }
56012 
56013   return SDValue();
56014 }
56015 
56016 // Simplify PMULDQ and PMULUDQ operations.
combinePMULDQ(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)56017 static SDValue combinePMULDQ(SDNode *N, SelectionDAG &DAG,
56018                              TargetLowering::DAGCombinerInfo &DCI,
56019                              const X86Subtarget &Subtarget) {
56020   SDValue LHS = N->getOperand(0);
56021   SDValue RHS = N->getOperand(1);
56022 
56023   // Canonicalize constant to RHS.
56024   if (DAG.isConstantIntBuildVectorOrConstantInt(LHS) &&
56025       !DAG.isConstantIntBuildVectorOrConstantInt(RHS))
56026     return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0), RHS, LHS);
56027 
56028   // Multiply by zero.
56029   // Don't return RHS as it may contain UNDEFs.
56030   if (ISD::isBuildVectorAllZeros(RHS.getNode()))
56031     return DAG.getConstant(0, SDLoc(N), N->getValueType(0));
56032 
56033   // PMULDQ/PMULUDQ only uses lower 32 bits from each vector element.
56034   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
56035   if (TLI.SimplifyDemandedBits(SDValue(N, 0), APInt::getAllOnes(64), DCI))
56036     return SDValue(N, 0);
56037 
56038   // If the input is an extend_invec and the SimplifyDemandedBits call didn't
56039   // convert it to any_extend_invec, due to the LegalOperations check, do the
56040   // conversion directly to a vector shuffle manually. This exposes combine
56041   // opportunities missed by combineEXTEND_VECTOR_INREG not calling
56042   // combineX86ShufflesRecursively on SSE4.1 targets.
56043   // FIXME: This is basically a hack around several other issues related to
56044   // ANY_EXTEND_VECTOR_INREG.
56045   if (N->getValueType(0) == MVT::v2i64 && LHS.hasOneUse() &&
56046       (LHS.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG ||
56047        LHS.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG) &&
56048       LHS.getOperand(0).getValueType() == MVT::v4i32) {
56049     SDLoc dl(N);
56050     LHS = DAG.getVectorShuffle(MVT::v4i32, dl, LHS.getOperand(0),
56051                                LHS.getOperand(0), { 0, -1, 1, -1 });
56052     LHS = DAG.getBitcast(MVT::v2i64, LHS);
56053     return DAG.getNode(N->getOpcode(), dl, MVT::v2i64, LHS, RHS);
56054   }
56055   if (N->getValueType(0) == MVT::v2i64 && RHS.hasOneUse() &&
56056       (RHS.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG ||
56057        RHS.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG) &&
56058       RHS.getOperand(0).getValueType() == MVT::v4i32) {
56059     SDLoc dl(N);
56060     RHS = DAG.getVectorShuffle(MVT::v4i32, dl, RHS.getOperand(0),
56061                                RHS.getOperand(0), { 0, -1, 1, -1 });
56062     RHS = DAG.getBitcast(MVT::v2i64, RHS);
56063     return DAG.getNode(N->getOpcode(), dl, MVT::v2i64, LHS, RHS);
56064   }
56065 
56066   return SDValue();
56067 }
56068 
56069 // Simplify VPMADDUBSW/VPMADDWD operations.
combineVPMADD(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI)56070 static SDValue combineVPMADD(SDNode *N, SelectionDAG &DAG,
56071                              TargetLowering::DAGCombinerInfo &DCI) {
56072   EVT VT = N->getValueType(0);
56073   SDValue LHS = N->getOperand(0);
56074   SDValue RHS = N->getOperand(1);
56075 
56076   // Multiply by zero.
56077   // Don't return LHS/RHS as it may contain UNDEFs.
56078   if (ISD::isBuildVectorAllZeros(LHS.getNode()) ||
56079       ISD::isBuildVectorAllZeros(RHS.getNode()))
56080     return DAG.getConstant(0, SDLoc(N), VT);
56081 
56082   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
56083   APInt DemandedElts = APInt::getAllOnes(VT.getVectorNumElements());
56084   if (TLI.SimplifyDemandedVectorElts(SDValue(N, 0), DemandedElts, DCI))
56085     return SDValue(N, 0);
56086 
56087   return SDValue();
56088 }
56089 
combineEXTEND_VECTOR_INREG(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)56090 static SDValue combineEXTEND_VECTOR_INREG(SDNode *N, SelectionDAG &DAG,
56091                                           TargetLowering::DAGCombinerInfo &DCI,
56092                                           const X86Subtarget &Subtarget) {
56093   EVT VT = N->getValueType(0);
56094   SDValue In = N->getOperand(0);
56095   unsigned Opcode = N->getOpcode();
56096   unsigned InOpcode = In.getOpcode();
56097   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
56098   SDLoc DL(N);
56099 
56100   // Try to merge vector loads and extend_inreg to an extload.
56101   if (!DCI.isBeforeLegalizeOps() && ISD::isNormalLoad(In.getNode()) &&
56102       In.hasOneUse()) {
56103     auto *Ld = cast<LoadSDNode>(In);
56104     if (Ld->isSimple()) {
56105       MVT SVT = In.getSimpleValueType().getVectorElementType();
56106       ISD::LoadExtType Ext = Opcode == ISD::SIGN_EXTEND_VECTOR_INREG
56107                                  ? ISD::SEXTLOAD
56108                                  : ISD::ZEXTLOAD;
56109       EVT MemVT = VT.changeVectorElementType(SVT);
56110       if (TLI.isLoadExtLegal(Ext, VT, MemVT)) {
56111         SDValue Load = DAG.getExtLoad(
56112             Ext, DL, VT, Ld->getChain(), Ld->getBasePtr(), Ld->getPointerInfo(),
56113             MemVT, Ld->getOriginalAlign(), Ld->getMemOperand()->getFlags());
56114         DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), Load.getValue(1));
56115         return Load;
56116       }
56117     }
56118   }
56119 
56120   // Fold EXTEND_VECTOR_INREG(EXTEND_VECTOR_INREG(X)) -> EXTEND_VECTOR_INREG(X).
56121   if (Opcode == InOpcode)
56122     return DAG.getNode(Opcode, DL, VT, In.getOperand(0));
56123 
56124   // Fold EXTEND_VECTOR_INREG(EXTRACT_SUBVECTOR(EXTEND(X),0))
56125   // -> EXTEND_VECTOR_INREG(X).
56126   // TODO: Handle non-zero subvector indices.
56127   if (InOpcode == ISD::EXTRACT_SUBVECTOR && In.getConstantOperandVal(1) == 0 &&
56128       In.getOperand(0).getOpcode() == DAG.getOpcode_EXTEND(Opcode) &&
56129       In.getOperand(0).getOperand(0).getValueSizeInBits() ==
56130           In.getValueSizeInBits())
56131     return DAG.getNode(Opcode, DL, VT, In.getOperand(0).getOperand(0));
56132 
56133   // Fold EXTEND_VECTOR_INREG(BUILD_VECTOR(X,Y,?,?)) -> BUILD_VECTOR(X,0,Y,0).
56134   // TODO: Move to DAGCombine?
56135   if (!DCI.isBeforeLegalizeOps() && Opcode == ISD::ZERO_EXTEND_VECTOR_INREG &&
56136       In.getOpcode() == ISD::BUILD_VECTOR && In.hasOneUse() &&
56137       In.getValueSizeInBits() == VT.getSizeInBits()) {
56138     unsigned NumElts = VT.getVectorNumElements();
56139     unsigned Scale = VT.getScalarSizeInBits() / In.getScalarValueSizeInBits();
56140     EVT EltVT = In.getOperand(0).getValueType();
56141     SmallVector<SDValue> Elts(Scale * NumElts, DAG.getConstant(0, DL, EltVT));
56142     for (unsigned I = 0; I != NumElts; ++I)
56143       Elts[I * Scale] = In.getOperand(I);
56144     return DAG.getBitcast(VT, DAG.getBuildVector(In.getValueType(), DL, Elts));
56145   }
56146 
56147   // Attempt to combine as a shuffle on SSE41+ targets.
56148   if ((Opcode == ISD::ANY_EXTEND_VECTOR_INREG ||
56149        Opcode == ISD::ZERO_EXTEND_VECTOR_INREG) &&
56150       Subtarget.hasSSE41()) {
56151     SDValue Op(N, 0);
56152     if (TLI.isTypeLegal(VT) && TLI.isTypeLegal(In.getValueType()))
56153       if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
56154         return Res;
56155   }
56156 
56157   return SDValue();
56158 }
56159 
combineKSHIFT(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI)56160 static SDValue combineKSHIFT(SDNode *N, SelectionDAG &DAG,
56161                              TargetLowering::DAGCombinerInfo &DCI) {
56162   EVT VT = N->getValueType(0);
56163 
56164   if (ISD::isBuildVectorAllZeros(N->getOperand(0).getNode()))
56165     return DAG.getConstant(0, SDLoc(N), VT);
56166 
56167   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
56168   APInt DemandedElts = APInt::getAllOnes(VT.getVectorNumElements());
56169   if (TLI.SimplifyDemandedVectorElts(SDValue(N, 0), DemandedElts, DCI))
56170     return SDValue(N, 0);
56171 
56172   return SDValue();
56173 }
56174 
56175 // Optimize (fp16_to_fp (fp_to_fp16 X)) to VCVTPS2PH followed by VCVTPH2PS.
56176 // Done as a combine because the lowering for fp16_to_fp and fp_to_fp16 produce
56177 // extra instructions between the conversion due to going to scalar and back.
combineFP16_TO_FP(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)56178 static SDValue combineFP16_TO_FP(SDNode *N, SelectionDAG &DAG,
56179                                  const X86Subtarget &Subtarget) {
56180   if (Subtarget.useSoftFloat() || !Subtarget.hasF16C())
56181     return SDValue();
56182 
56183   if (N->getOperand(0).getOpcode() != ISD::FP_TO_FP16)
56184     return SDValue();
56185 
56186   if (N->getValueType(0) != MVT::f32 ||
56187       N->getOperand(0).getOperand(0).getValueType() != MVT::f32)
56188     return SDValue();
56189 
56190   SDLoc dl(N);
56191   SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32,
56192                             N->getOperand(0).getOperand(0));
56193   Res = DAG.getNode(X86ISD::CVTPS2PH, dl, MVT::v8i16, Res,
56194                     DAG.getTargetConstant(4, dl, MVT::i32));
56195   Res = DAG.getNode(X86ISD::CVTPH2PS, dl, MVT::v4f32, Res);
56196   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, Res,
56197                      DAG.getIntPtrConstant(0, dl));
56198 }
56199 
combineFP_EXTEND(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)56200 static SDValue combineFP_EXTEND(SDNode *N, SelectionDAG &DAG,
56201                                 const X86Subtarget &Subtarget) {
56202   if (!Subtarget.hasF16C() || Subtarget.useSoftFloat())
56203     return SDValue();
56204 
56205   if (Subtarget.hasFP16())
56206     return SDValue();
56207 
56208   bool IsStrict = N->isStrictFPOpcode();
56209   EVT VT = N->getValueType(0);
56210   SDValue Src = N->getOperand(IsStrict ? 1 : 0);
56211   EVT SrcVT = Src.getValueType();
56212 
56213   if (!SrcVT.isVector() || SrcVT.getVectorElementType() != MVT::f16)
56214     return SDValue();
56215 
56216   if (VT.getVectorElementType() != MVT::f32 &&
56217       VT.getVectorElementType() != MVT::f64)
56218     return SDValue();
56219 
56220   unsigned NumElts = VT.getVectorNumElements();
56221   if (NumElts == 1 || !isPowerOf2_32(NumElts))
56222     return SDValue();
56223 
56224   SDLoc dl(N);
56225 
56226   // Convert the input to vXi16.
56227   EVT IntVT = SrcVT.changeVectorElementTypeToInteger();
56228   Src = DAG.getBitcast(IntVT, Src);
56229 
56230   // Widen to at least 8 input elements.
56231   if (NumElts < 8) {
56232     unsigned NumConcats = 8 / NumElts;
56233     SDValue Fill = NumElts == 4 ? DAG.getUNDEF(IntVT)
56234                                 : DAG.getConstant(0, dl, IntVT);
56235     SmallVector<SDValue, 4> Ops(NumConcats, Fill);
56236     Ops[0] = Src;
56237     Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, Ops);
56238   }
56239 
56240   // Destination is vXf32 with at least 4 elements.
56241   EVT CvtVT = EVT::getVectorVT(*DAG.getContext(), MVT::f32,
56242                                std::max(4U, NumElts));
56243   SDValue Cvt, Chain;
56244   if (IsStrict) {
56245     Cvt = DAG.getNode(X86ISD::STRICT_CVTPH2PS, dl, {CvtVT, MVT::Other},
56246                       {N->getOperand(0), Src});
56247     Chain = Cvt.getValue(1);
56248   } else {
56249     Cvt = DAG.getNode(X86ISD::CVTPH2PS, dl, CvtVT, Src);
56250   }
56251 
56252   if (NumElts < 4) {
56253     assert(NumElts == 2 && "Unexpected size");
56254     Cvt = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2f32, Cvt,
56255                       DAG.getIntPtrConstant(0, dl));
56256   }
56257 
56258   if (IsStrict) {
56259     // Extend to the original VT if necessary.
56260     if (Cvt.getValueType() != VT) {
56261       Cvt = DAG.getNode(ISD::STRICT_FP_EXTEND, dl, {VT, MVT::Other},
56262                         {Chain, Cvt});
56263       Chain = Cvt.getValue(1);
56264     }
56265     return DAG.getMergeValues({Cvt, Chain}, dl);
56266   }
56267 
56268   // Extend to the original VT if necessary.
56269   return DAG.getNode(ISD::FP_EXTEND, dl, VT, Cvt);
56270 }
56271 
56272 // Try to find a larger VBROADCAST_LOAD/SUBV_BROADCAST_LOAD that we can extract
56273 // from. Limit this to cases where the loads have the same input chain and the
56274 // output chains are unused. This avoids any memory ordering issues.
combineBROADCAST_LOAD(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI)56275 static SDValue combineBROADCAST_LOAD(SDNode *N, SelectionDAG &DAG,
56276                                      TargetLowering::DAGCombinerInfo &DCI) {
56277   assert((N->getOpcode() == X86ISD::VBROADCAST_LOAD ||
56278           N->getOpcode() == X86ISD::SUBV_BROADCAST_LOAD) &&
56279          "Unknown broadcast load type");
56280 
56281   // Only do this if the chain result is unused.
56282   if (N->hasAnyUseOfValue(1))
56283     return SDValue();
56284 
56285   auto *MemIntrin = cast<MemIntrinsicSDNode>(N);
56286 
56287   SDValue Ptr = MemIntrin->getBasePtr();
56288   SDValue Chain = MemIntrin->getChain();
56289   EVT VT = N->getSimpleValueType(0);
56290   EVT MemVT = MemIntrin->getMemoryVT();
56291 
56292   // Look at other users of our base pointer and try to find a wider broadcast.
56293   // The input chain and the size of the memory VT must match.
56294   for (SDNode *User : Ptr->uses())
56295     if (User != N && User->getOpcode() == N->getOpcode() &&
56296         cast<MemIntrinsicSDNode>(User)->getBasePtr() == Ptr &&
56297         cast<MemIntrinsicSDNode>(User)->getChain() == Chain &&
56298         cast<MemIntrinsicSDNode>(User)->getMemoryVT().getSizeInBits() ==
56299             MemVT.getSizeInBits() &&
56300         !User->hasAnyUseOfValue(1) &&
56301         User->getValueSizeInBits(0).getFixedValue() > VT.getFixedSizeInBits()) {
56302       SDValue Extract = extractSubVector(SDValue(User, 0), 0, DAG, SDLoc(N),
56303                                          VT.getSizeInBits());
56304       Extract = DAG.getBitcast(VT, Extract);
56305       return DCI.CombineTo(N, Extract, SDValue(User, 1));
56306     }
56307 
56308   return SDValue();
56309 }
56310 
combineFP_ROUND(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)56311 static SDValue combineFP_ROUND(SDNode *N, SelectionDAG &DAG,
56312                                const X86Subtarget &Subtarget) {
56313   if (!Subtarget.hasF16C() || Subtarget.useSoftFloat())
56314     return SDValue();
56315 
56316   if (Subtarget.hasFP16())
56317     return SDValue();
56318 
56319   bool IsStrict = N->isStrictFPOpcode();
56320   EVT VT = N->getValueType(0);
56321   SDValue Src = N->getOperand(IsStrict ? 1 : 0);
56322   EVT SrcVT = Src.getValueType();
56323 
56324   if (!VT.isVector() || VT.getVectorElementType() != MVT::f16 ||
56325       SrcVT.getVectorElementType() != MVT::f32)
56326     return SDValue();
56327 
56328   unsigned NumElts = VT.getVectorNumElements();
56329   if (NumElts == 1 || !isPowerOf2_32(NumElts))
56330     return SDValue();
56331 
56332   SDLoc dl(N);
56333 
56334   // Widen to at least 4 input elements.
56335   if (NumElts < 4)
56336     Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, Src,
56337                       DAG.getConstantFP(0.0, dl, SrcVT));
56338 
56339   // Destination is v8i16 with at least 8 elements.
56340   EVT CvtVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
56341                                std::max(8U, NumElts));
56342   SDValue Cvt, Chain;
56343   SDValue Rnd = DAG.getTargetConstant(4, dl, MVT::i32);
56344   if (IsStrict) {
56345     Cvt = DAG.getNode(X86ISD::STRICT_CVTPS2PH, dl, {CvtVT, MVT::Other},
56346                       {N->getOperand(0), Src, Rnd});
56347     Chain = Cvt.getValue(1);
56348   } else {
56349     Cvt = DAG.getNode(X86ISD::CVTPS2PH, dl, CvtVT, Src, Rnd);
56350   }
56351 
56352   // Extract down to real number of elements.
56353   if (NumElts < 8) {
56354     EVT IntVT = VT.changeVectorElementTypeToInteger();
56355     Cvt = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, IntVT, Cvt,
56356                       DAG.getIntPtrConstant(0, dl));
56357   }
56358 
56359   Cvt = DAG.getBitcast(VT, Cvt);
56360 
56361   if (IsStrict)
56362     return DAG.getMergeValues({Cvt, Chain}, dl);
56363 
56364   return Cvt;
56365 }
56366 
combineMOVDQ2Q(SDNode * N,SelectionDAG & DAG)56367 static SDValue combineMOVDQ2Q(SDNode *N, SelectionDAG &DAG) {
56368   SDValue Src = N->getOperand(0);
56369 
56370   // Turn MOVDQ2Q+simple_load into an mmx load.
56371   if (ISD::isNormalLoad(Src.getNode()) && Src.hasOneUse()) {
56372     LoadSDNode *LN = cast<LoadSDNode>(Src.getNode());
56373 
56374     if (LN->isSimple()) {
56375       SDValue NewLd = DAG.getLoad(MVT::x86mmx, SDLoc(N), LN->getChain(),
56376                                   LN->getBasePtr(),
56377                                   LN->getPointerInfo(),
56378                                   LN->getOriginalAlign(),
56379                                   LN->getMemOperand()->getFlags());
56380       DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), NewLd.getValue(1));
56381       return NewLd;
56382     }
56383   }
56384 
56385   return SDValue();
56386 }
56387 
combinePDEP(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI)56388 static SDValue combinePDEP(SDNode *N, SelectionDAG &DAG,
56389                            TargetLowering::DAGCombinerInfo &DCI) {
56390   unsigned NumBits = N->getSimpleValueType(0).getSizeInBits();
56391   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
56392   if (TLI.SimplifyDemandedBits(SDValue(N, 0), APInt::getAllOnes(NumBits), DCI))
56393     return SDValue(N, 0);
56394 
56395   return SDValue();
56396 }
56397 
PerformDAGCombine(SDNode * N,DAGCombinerInfo & DCI) const56398 SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
56399                                              DAGCombinerInfo &DCI) const {
56400   SelectionDAG &DAG = DCI.DAG;
56401   switch (N->getOpcode()) {
56402   default: break;
56403   case ISD::SCALAR_TO_VECTOR:
56404     return combineScalarToVector(N, DAG);
56405   case ISD::EXTRACT_VECTOR_ELT:
56406   case X86ISD::PEXTRW:
56407   case X86ISD::PEXTRB:
56408     return combineExtractVectorElt(N, DAG, DCI, Subtarget);
56409   case ISD::CONCAT_VECTORS:
56410     return combineCONCAT_VECTORS(N, DAG, DCI, Subtarget);
56411   case ISD::INSERT_SUBVECTOR:
56412     return combineINSERT_SUBVECTOR(N, DAG, DCI, Subtarget);
56413   case ISD::EXTRACT_SUBVECTOR:
56414     return combineEXTRACT_SUBVECTOR(N, DAG, DCI, Subtarget);
56415   case ISD::VSELECT:
56416   case ISD::SELECT:
56417   case X86ISD::BLENDV:      return combineSelect(N, DAG, DCI, Subtarget);
56418   case ISD::BITCAST:        return combineBitcast(N, DAG, DCI, Subtarget);
56419   case X86ISD::CMOV:        return combineCMov(N, DAG, DCI, Subtarget);
56420   case X86ISD::CMP:         return combineCMP(N, DAG);
56421   case ISD::ADD:            return combineAdd(N, DAG, DCI, Subtarget);
56422   case ISD::SUB:            return combineSub(N, DAG, DCI, Subtarget);
56423   case X86ISD::ADD:
56424   case X86ISD::SUB:         return combineX86AddSub(N, DAG, DCI);
56425   case X86ISD::SBB:         return combineSBB(N, DAG);
56426   case X86ISD::ADC:         return combineADC(N, DAG, DCI);
56427   case ISD::MUL:            return combineMul(N, DAG, DCI, Subtarget);
56428   case ISD::SHL:            return combineShiftLeft(N, DAG);
56429   case ISD::SRA:            return combineShiftRightArithmetic(N, DAG, Subtarget);
56430   case ISD::SRL:            return combineShiftRightLogical(N, DAG, DCI, Subtarget);
56431   case ISD::AND:            return combineAnd(N, DAG, DCI, Subtarget);
56432   case ISD::OR:             return combineOr(N, DAG, DCI, Subtarget);
56433   case ISD::XOR:            return combineXor(N, DAG, DCI, Subtarget);
56434   case X86ISD::BEXTR:
56435   case X86ISD::BEXTRI:      return combineBEXTR(N, DAG, DCI, Subtarget);
56436   case ISD::LOAD:           return combineLoad(N, DAG, DCI, Subtarget);
56437   case ISD::MLOAD:          return combineMaskedLoad(N, DAG, DCI, Subtarget);
56438   case ISD::STORE:          return combineStore(N, DAG, DCI, Subtarget);
56439   case ISD::MSTORE:         return combineMaskedStore(N, DAG, DCI, Subtarget);
56440   case X86ISD::VEXTRACT_STORE:
56441     return combineVEXTRACT_STORE(N, DAG, DCI, Subtarget);
56442   case ISD::SINT_TO_FP:
56443   case ISD::STRICT_SINT_TO_FP:
56444     return combineSIntToFP(N, DAG, DCI, Subtarget);
56445   case ISD::UINT_TO_FP:
56446   case ISD::STRICT_UINT_TO_FP:
56447     return combineUIntToFP(N, DAG, Subtarget);
56448   case ISD::FADD:
56449   case ISD::FSUB:           return combineFaddFsub(N, DAG, Subtarget);
56450   case X86ISD::VFCMULC:
56451   case X86ISD::VFMULC:      return combineFMulcFCMulc(N, DAG, Subtarget);
56452   case ISD::FNEG:           return combineFneg(N, DAG, DCI, Subtarget);
56453   case ISD::TRUNCATE:       return combineTruncate(N, DAG, Subtarget);
56454   case X86ISD::VTRUNC:      return combineVTRUNC(N, DAG, DCI);
56455   case X86ISD::ANDNP:       return combineAndnp(N, DAG, DCI, Subtarget);
56456   case X86ISD::FAND:        return combineFAnd(N, DAG, Subtarget);
56457   case X86ISD::FANDN:       return combineFAndn(N, DAG, Subtarget);
56458   case X86ISD::FXOR:
56459   case X86ISD::FOR:         return combineFOr(N, DAG, DCI, Subtarget);
56460   case X86ISD::FMIN:
56461   case X86ISD::FMAX:        return combineFMinFMax(N, DAG);
56462   case ISD::FMINNUM:
56463   case ISD::FMAXNUM:        return combineFMinNumFMaxNum(N, DAG, Subtarget);
56464   case X86ISD::CVTSI2P:
56465   case X86ISD::CVTUI2P:     return combineX86INT_TO_FP(N, DAG, DCI);
56466   case X86ISD::CVTP2SI:
56467   case X86ISD::CVTP2UI:
56468   case X86ISD::STRICT_CVTTP2SI:
56469   case X86ISD::CVTTP2SI:
56470   case X86ISD::STRICT_CVTTP2UI:
56471   case X86ISD::CVTTP2UI:
56472                             return combineCVTP2I_CVTTP2I(N, DAG, DCI);
56473   case X86ISD::STRICT_CVTPH2PS:
56474   case X86ISD::CVTPH2PS:    return combineCVTPH2PS(N, DAG, DCI);
56475   case X86ISD::BT:          return combineBT(N, DAG, DCI);
56476   case ISD::ANY_EXTEND:
56477   case ISD::ZERO_EXTEND:    return combineZext(N, DAG, DCI, Subtarget);
56478   case ISD::SIGN_EXTEND:    return combineSext(N, DAG, DCI, Subtarget);
56479   case ISD::SIGN_EXTEND_INREG: return combineSignExtendInReg(N, DAG, Subtarget);
56480   case ISD::ANY_EXTEND_VECTOR_INREG:
56481   case ISD::SIGN_EXTEND_VECTOR_INREG:
56482   case ISD::ZERO_EXTEND_VECTOR_INREG:
56483     return combineEXTEND_VECTOR_INREG(N, DAG, DCI, Subtarget);
56484   case ISD::SETCC:          return combineSetCC(N, DAG, DCI, Subtarget);
56485   case X86ISD::SETCC:       return combineX86SetCC(N, DAG, Subtarget);
56486   case X86ISD::BRCOND:      return combineBrCond(N, DAG, Subtarget);
56487   case X86ISD::PACKSS:
56488   case X86ISD::PACKUS:      return combineVectorPack(N, DAG, DCI, Subtarget);
56489   case X86ISD::HADD:
56490   case X86ISD::HSUB:
56491   case X86ISD::FHADD:
56492   case X86ISD::FHSUB:       return combineVectorHADDSUB(N, DAG, DCI, Subtarget);
56493   case X86ISD::VSHL:
56494   case X86ISD::VSRA:
56495   case X86ISD::VSRL:
56496     return combineVectorShiftVar(N, DAG, DCI, Subtarget);
56497   case X86ISD::VSHLI:
56498   case X86ISD::VSRAI:
56499   case X86ISD::VSRLI:
56500     return combineVectorShiftImm(N, DAG, DCI, Subtarget);
56501   case ISD::INSERT_VECTOR_ELT:
56502   case X86ISD::PINSRB:
56503   case X86ISD::PINSRW:      return combineVectorInsert(N, DAG, DCI, Subtarget);
56504   case X86ISD::SHUFP:       // Handle all target specific shuffles
56505   case X86ISD::INSERTPS:
56506   case X86ISD::EXTRQI:
56507   case X86ISD::INSERTQI:
56508   case X86ISD::VALIGN:
56509   case X86ISD::PALIGNR:
56510   case X86ISD::VSHLDQ:
56511   case X86ISD::VSRLDQ:
56512   case X86ISD::BLENDI:
56513   case X86ISD::UNPCKH:
56514   case X86ISD::UNPCKL:
56515   case X86ISD::MOVHLPS:
56516   case X86ISD::MOVLHPS:
56517   case X86ISD::PSHUFB:
56518   case X86ISD::PSHUFD:
56519   case X86ISD::PSHUFHW:
56520   case X86ISD::PSHUFLW:
56521   case X86ISD::MOVSHDUP:
56522   case X86ISD::MOVSLDUP:
56523   case X86ISD::MOVDDUP:
56524   case X86ISD::MOVSS:
56525   case X86ISD::MOVSD:
56526   case X86ISD::MOVSH:
56527   case X86ISD::VBROADCAST:
56528   case X86ISD::VPPERM:
56529   case X86ISD::VPERMI:
56530   case X86ISD::VPERMV:
56531   case X86ISD::VPERMV3:
56532   case X86ISD::VPERMIL2:
56533   case X86ISD::VPERMILPI:
56534   case X86ISD::VPERMILPV:
56535   case X86ISD::VPERM2X128:
56536   case X86ISD::SHUF128:
56537   case X86ISD::VZEXT_MOVL:
56538   case ISD::VECTOR_SHUFFLE: return combineShuffle(N, DAG, DCI,Subtarget);
56539   case X86ISD::FMADD_RND:
56540   case X86ISD::FMSUB:
56541   case X86ISD::STRICT_FMSUB:
56542   case X86ISD::FMSUB_RND:
56543   case X86ISD::FNMADD:
56544   case X86ISD::STRICT_FNMADD:
56545   case X86ISD::FNMADD_RND:
56546   case X86ISD::FNMSUB:
56547   case X86ISD::STRICT_FNMSUB:
56548   case X86ISD::FNMSUB_RND:
56549   case ISD::FMA:
56550   case ISD::STRICT_FMA:     return combineFMA(N, DAG, DCI, Subtarget);
56551   case X86ISD::FMADDSUB_RND:
56552   case X86ISD::FMSUBADD_RND:
56553   case X86ISD::FMADDSUB:
56554   case X86ISD::FMSUBADD:    return combineFMADDSUB(N, DAG, DCI);
56555   case X86ISD::MOVMSK:      return combineMOVMSK(N, DAG, DCI, Subtarget);
56556   case X86ISD::MGATHER:
56557   case X86ISD::MSCATTER:
56558     return combineX86GatherScatter(N, DAG, DCI, Subtarget);
56559   case ISD::MGATHER:
56560   case ISD::MSCATTER:       return combineGatherScatter(N, DAG, DCI);
56561   case X86ISD::PCMPEQ:
56562   case X86ISD::PCMPGT:      return combineVectorCompare(N, DAG, Subtarget);
56563   case X86ISD::PMULDQ:
56564   case X86ISD::PMULUDQ:     return combinePMULDQ(N, DAG, DCI, Subtarget);
56565   case X86ISD::VPMADDUBSW:
56566   case X86ISD::VPMADDWD:    return combineVPMADD(N, DAG, DCI);
56567   case X86ISD::KSHIFTL:
56568   case X86ISD::KSHIFTR:     return combineKSHIFT(N, DAG, DCI);
56569   case ISD::FP16_TO_FP:     return combineFP16_TO_FP(N, DAG, Subtarget);
56570   case ISD::STRICT_FP_EXTEND:
56571   case ISD::FP_EXTEND:      return combineFP_EXTEND(N, DAG, Subtarget);
56572   case ISD::STRICT_FP_ROUND:
56573   case ISD::FP_ROUND:       return combineFP_ROUND(N, DAG, Subtarget);
56574   case X86ISD::VBROADCAST_LOAD:
56575   case X86ISD::SUBV_BROADCAST_LOAD: return combineBROADCAST_LOAD(N, DAG, DCI);
56576   case X86ISD::MOVDQ2Q:     return combineMOVDQ2Q(N, DAG);
56577   case X86ISD::PDEP:        return combinePDEP(N, DAG, DCI);
56578   }
56579 
56580   return SDValue();
56581 }
56582 
isTypeDesirableForOp(unsigned Opc,EVT VT) const56583 bool X86TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const {
56584   if (!isTypeLegal(VT))
56585     return false;
56586 
56587   // There are no vXi8 shifts.
56588   if (Opc == ISD::SHL && VT.isVector() && VT.getVectorElementType() == MVT::i8)
56589     return false;
56590 
56591   // TODO: Almost no 8-bit ops are desirable because they have no actual
56592   //       size/speed advantages vs. 32-bit ops, but they do have a major
56593   //       potential disadvantage by causing partial register stalls.
56594   //
56595   // 8-bit multiply/shl is probably not cheaper than 32-bit multiply/shl, and
56596   // we have specializations to turn 32-bit multiply/shl into LEA or other ops.
56597   // Also, see the comment in "IsDesirableToPromoteOp" - where we additionally
56598   // check for a constant operand to the multiply.
56599   if ((Opc == ISD::MUL || Opc == ISD::SHL) && VT == MVT::i8)
56600     return false;
56601 
56602   // i16 instruction encodings are longer and some i16 instructions are slow,
56603   // so those are not desirable.
56604   if (VT == MVT::i16) {
56605     switch (Opc) {
56606     default:
56607       break;
56608     case ISD::LOAD:
56609     case ISD::SIGN_EXTEND:
56610     case ISD::ZERO_EXTEND:
56611     case ISD::ANY_EXTEND:
56612     case ISD::SHL:
56613     case ISD::SRA:
56614     case ISD::SRL:
56615     case ISD::SUB:
56616     case ISD::ADD:
56617     case ISD::MUL:
56618     case ISD::AND:
56619     case ISD::OR:
56620     case ISD::XOR:
56621       return false;
56622     }
56623   }
56624 
56625   // Any legal type not explicitly accounted for above here is desirable.
56626   return true;
56627 }
56628 
expandIndirectJTBranch(const SDLoc & dl,SDValue Value,SDValue Addr,SelectionDAG & DAG) const56629 SDValue X86TargetLowering::expandIndirectJTBranch(const SDLoc& dl,
56630                                                   SDValue Value, SDValue Addr,
56631                                                   SelectionDAG &DAG) const {
56632   const Module *M = DAG.getMachineFunction().getMMI().getModule();
56633   Metadata *IsCFProtectionSupported = M->getModuleFlag("cf-protection-branch");
56634   if (IsCFProtectionSupported) {
56635     // In case control-flow branch protection is enabled, we need to add
56636     // notrack prefix to the indirect branch.
56637     // In order to do that we create NT_BRIND SDNode.
56638     // Upon ISEL, the pattern will convert it to jmp with NoTrack prefix.
56639     return DAG.getNode(X86ISD::NT_BRIND, dl, MVT::Other, Value, Addr);
56640   }
56641 
56642   return TargetLowering::expandIndirectJTBranch(dl, Value, Addr, DAG);
56643 }
56644 
IsDesirableToPromoteOp(SDValue Op,EVT & PVT) const56645 bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const {
56646   EVT VT = Op.getValueType();
56647   bool Is8BitMulByConstant = VT == MVT::i8 && Op.getOpcode() == ISD::MUL &&
56648                              isa<ConstantSDNode>(Op.getOperand(1));
56649 
56650   // i16 is legal, but undesirable since i16 instruction encodings are longer
56651   // and some i16 instructions are slow.
56652   // 8-bit multiply-by-constant can usually be expanded to something cheaper
56653   // using LEA and/or other ALU ops.
56654   if (VT != MVT::i16 && !Is8BitMulByConstant)
56655     return false;
56656 
56657   auto IsFoldableRMW = [](SDValue Load, SDValue Op) {
56658     if (!Op.hasOneUse())
56659       return false;
56660     SDNode *User = *Op->use_begin();
56661     if (!ISD::isNormalStore(User))
56662       return false;
56663     auto *Ld = cast<LoadSDNode>(Load);
56664     auto *St = cast<StoreSDNode>(User);
56665     return Ld->getBasePtr() == St->getBasePtr();
56666   };
56667 
56668   auto IsFoldableAtomicRMW = [](SDValue Load, SDValue Op) {
56669     if (!Load.hasOneUse() || Load.getOpcode() != ISD::ATOMIC_LOAD)
56670       return false;
56671     if (!Op.hasOneUse())
56672       return false;
56673     SDNode *User = *Op->use_begin();
56674     if (User->getOpcode() != ISD::ATOMIC_STORE)
56675       return false;
56676     auto *Ld = cast<AtomicSDNode>(Load);
56677     auto *St = cast<AtomicSDNode>(User);
56678     return Ld->getBasePtr() == St->getBasePtr();
56679   };
56680 
56681   bool Commute = false;
56682   switch (Op.getOpcode()) {
56683   default: return false;
56684   case ISD::SIGN_EXTEND:
56685   case ISD::ZERO_EXTEND:
56686   case ISD::ANY_EXTEND:
56687     break;
56688   case ISD::SHL:
56689   case ISD::SRA:
56690   case ISD::SRL: {
56691     SDValue N0 = Op.getOperand(0);
56692     // Look out for (store (shl (load), x)).
56693     if (X86::mayFoldLoad(N0, Subtarget) && IsFoldableRMW(N0, Op))
56694       return false;
56695     break;
56696   }
56697   case ISD::ADD:
56698   case ISD::MUL:
56699   case ISD::AND:
56700   case ISD::OR:
56701   case ISD::XOR:
56702     Commute = true;
56703     [[fallthrough]];
56704   case ISD::SUB: {
56705     SDValue N0 = Op.getOperand(0);
56706     SDValue N1 = Op.getOperand(1);
56707     // Avoid disabling potential load folding opportunities.
56708     if (X86::mayFoldLoad(N1, Subtarget) &&
56709         (!Commute || !isa<ConstantSDNode>(N0) ||
56710          (Op.getOpcode() != ISD::MUL && IsFoldableRMW(N1, Op))))
56711       return false;
56712     if (X86::mayFoldLoad(N0, Subtarget) &&
56713         ((Commute && !isa<ConstantSDNode>(N1)) ||
56714          (Op.getOpcode() != ISD::MUL && IsFoldableRMW(N0, Op))))
56715       return false;
56716     if (IsFoldableAtomicRMW(N0, Op) ||
56717         (Commute && IsFoldableAtomicRMW(N1, Op)))
56718       return false;
56719   }
56720   }
56721 
56722   PVT = MVT::i32;
56723   return true;
56724 }
56725 
56726 //===----------------------------------------------------------------------===//
56727 //                           X86 Inline Assembly Support
56728 //===----------------------------------------------------------------------===//
56729 
56730 // Helper to match a string separated by whitespace.
matchAsm(StringRef S,ArrayRef<const char * > Pieces)56731 static bool matchAsm(StringRef S, ArrayRef<const char *> Pieces) {
56732   S = S.substr(S.find_first_not_of(" \t")); // Skip leading whitespace.
56733 
56734   for (StringRef Piece : Pieces) {
56735     if (!S.startswith(Piece)) // Check if the piece matches.
56736       return false;
56737 
56738     S = S.substr(Piece.size());
56739     StringRef::size_type Pos = S.find_first_not_of(" \t");
56740     if (Pos == 0) // We matched a prefix.
56741       return false;
56742 
56743     S = S.substr(Pos);
56744   }
56745 
56746   return S.empty();
56747 }
56748 
clobbersFlagRegisters(const SmallVector<StringRef,4> & AsmPieces)56749 static bool clobbersFlagRegisters(const SmallVector<StringRef, 4> &AsmPieces) {
56750 
56751   if (AsmPieces.size() == 3 || AsmPieces.size() == 4) {
56752     if (llvm::is_contained(AsmPieces, "~{cc}") &&
56753         llvm::is_contained(AsmPieces, "~{flags}") &&
56754         llvm::is_contained(AsmPieces, "~{fpsr}")) {
56755 
56756       if (AsmPieces.size() == 3)
56757         return true;
56758       else if (llvm::is_contained(AsmPieces, "~{dirflag}"))
56759         return true;
56760     }
56761   }
56762   return false;
56763 }
56764 
ExpandInlineAsm(CallInst * CI) const56765 bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {
56766   InlineAsm *IA = cast<InlineAsm>(CI->getCalledOperand());
56767 
56768   const std::string &AsmStr = IA->getAsmString();
56769 
56770   IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
56771   if (!Ty || Ty->getBitWidth() % 16 != 0)
56772     return false;
56773 
56774   // TODO: should remove alternatives from the asmstring: "foo {a|b}" -> "foo a"
56775   SmallVector<StringRef, 4> AsmPieces;
56776   SplitString(AsmStr, AsmPieces, ";\n");
56777 
56778   switch (AsmPieces.size()) {
56779   default: return false;
56780   case 1:
56781     // FIXME: this should verify that we are targeting a 486 or better.  If not,
56782     // we will turn this bswap into something that will be lowered to logical
56783     // ops instead of emitting the bswap asm.  For now, we don't support 486 or
56784     // lower so don't worry about this.
56785     // bswap $0
56786     if (matchAsm(AsmPieces[0], {"bswap", "$0"}) ||
56787         matchAsm(AsmPieces[0], {"bswapl", "$0"}) ||
56788         matchAsm(AsmPieces[0], {"bswapq", "$0"}) ||
56789         matchAsm(AsmPieces[0], {"bswap", "${0:q}"}) ||
56790         matchAsm(AsmPieces[0], {"bswapl", "${0:q}"}) ||
56791         matchAsm(AsmPieces[0], {"bswapq", "${0:q}"})) {
56792       // No need to check constraints, nothing other than the equivalent of
56793       // "=r,0" would be valid here.
56794       return IntrinsicLowering::LowerToByteSwap(CI);
56795     }
56796 
56797     // rorw $$8, ${0:w}  -->  llvm.bswap.i16
56798     if (CI->getType()->isIntegerTy(16) &&
56799         IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
56800         (matchAsm(AsmPieces[0], {"rorw", "$$8,", "${0:w}"}) ||
56801          matchAsm(AsmPieces[0], {"rolw", "$$8,", "${0:w}"}))) {
56802       AsmPieces.clear();
56803       StringRef ConstraintsStr = IA->getConstraintString();
56804       SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
56805       array_pod_sort(AsmPieces.begin(), AsmPieces.end());
56806       if (clobbersFlagRegisters(AsmPieces))
56807         return IntrinsicLowering::LowerToByteSwap(CI);
56808     }
56809     break;
56810   case 3:
56811     if (CI->getType()->isIntegerTy(32) &&
56812         IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
56813         matchAsm(AsmPieces[0], {"rorw", "$$8,", "${0:w}"}) &&
56814         matchAsm(AsmPieces[1], {"rorl", "$$16,", "$0"}) &&
56815         matchAsm(AsmPieces[2], {"rorw", "$$8,", "${0:w}"})) {
56816       AsmPieces.clear();
56817       StringRef ConstraintsStr = IA->getConstraintString();
56818       SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
56819       array_pod_sort(AsmPieces.begin(), AsmPieces.end());
56820       if (clobbersFlagRegisters(AsmPieces))
56821         return IntrinsicLowering::LowerToByteSwap(CI);
56822     }
56823 
56824     if (CI->getType()->isIntegerTy(64)) {
56825       InlineAsm::ConstraintInfoVector Constraints = IA->ParseConstraints();
56826       if (Constraints.size() >= 2 &&
56827           Constraints[0].Codes.size() == 1 && Constraints[0].Codes[0] == "A" &&
56828           Constraints[1].Codes.size() == 1 && Constraints[1].Codes[0] == "0") {
56829         // bswap %eax / bswap %edx / xchgl %eax, %edx  -> llvm.bswap.i64
56830         if (matchAsm(AsmPieces[0], {"bswap", "%eax"}) &&
56831             matchAsm(AsmPieces[1], {"bswap", "%edx"}) &&
56832             matchAsm(AsmPieces[2], {"xchgl", "%eax,", "%edx"}))
56833           return IntrinsicLowering::LowerToByteSwap(CI);
56834       }
56835     }
56836     break;
56837   }
56838   return false;
56839 }
56840 
parseConstraintCode(llvm::StringRef Constraint)56841 static X86::CondCode parseConstraintCode(llvm::StringRef Constraint) {
56842   X86::CondCode Cond = StringSwitch<X86::CondCode>(Constraint)
56843                            .Case("{@cca}", X86::COND_A)
56844                            .Case("{@ccae}", X86::COND_AE)
56845                            .Case("{@ccb}", X86::COND_B)
56846                            .Case("{@ccbe}", X86::COND_BE)
56847                            .Case("{@ccc}", X86::COND_B)
56848                            .Case("{@cce}", X86::COND_E)
56849                            .Case("{@ccz}", X86::COND_E)
56850                            .Case("{@ccg}", X86::COND_G)
56851                            .Case("{@ccge}", X86::COND_GE)
56852                            .Case("{@ccl}", X86::COND_L)
56853                            .Case("{@ccle}", X86::COND_LE)
56854                            .Case("{@ccna}", X86::COND_BE)
56855                            .Case("{@ccnae}", X86::COND_B)
56856                            .Case("{@ccnb}", X86::COND_AE)
56857                            .Case("{@ccnbe}", X86::COND_A)
56858                            .Case("{@ccnc}", X86::COND_AE)
56859                            .Case("{@ccne}", X86::COND_NE)
56860                            .Case("{@ccnz}", X86::COND_NE)
56861                            .Case("{@ccng}", X86::COND_LE)
56862                            .Case("{@ccnge}", X86::COND_L)
56863                            .Case("{@ccnl}", X86::COND_GE)
56864                            .Case("{@ccnle}", X86::COND_G)
56865                            .Case("{@ccno}", X86::COND_NO)
56866                            .Case("{@ccnp}", X86::COND_NP)
56867                            .Case("{@ccns}", X86::COND_NS)
56868                            .Case("{@cco}", X86::COND_O)
56869                            .Case("{@ccp}", X86::COND_P)
56870                            .Case("{@ccs}", X86::COND_S)
56871                            .Default(X86::COND_INVALID);
56872   return Cond;
56873 }
56874 
56875 /// Given a constraint letter, return the type of constraint for this target.
56876 X86TargetLowering::ConstraintType
getConstraintType(StringRef Constraint) const56877 X86TargetLowering::getConstraintType(StringRef Constraint) const {
56878   if (Constraint.size() == 1) {
56879     switch (Constraint[0]) {
56880     case 'R':
56881     case 'q':
56882     case 'Q':
56883     case 'f':
56884     case 't':
56885     case 'u':
56886     case 'y':
56887     case 'x':
56888     case 'v':
56889     case 'l':
56890     case 'k': // AVX512 masking registers.
56891       return C_RegisterClass;
56892     case 'a':
56893     case 'b':
56894     case 'c':
56895     case 'd':
56896     case 'S':
56897     case 'D':
56898     case 'A':
56899       return C_Register;
56900     case 'I':
56901     case 'J':
56902     case 'K':
56903     case 'N':
56904     case 'G':
56905     case 'L':
56906     case 'M':
56907       return C_Immediate;
56908     case 'C':
56909     case 'e':
56910     case 'Z':
56911       return C_Other;
56912     default:
56913       break;
56914     }
56915   }
56916   else if (Constraint.size() == 2) {
56917     switch (Constraint[0]) {
56918     default:
56919       break;
56920     case 'Y':
56921       switch (Constraint[1]) {
56922       default:
56923         break;
56924       case 'z':
56925         return C_Register;
56926       case 'i':
56927       case 'm':
56928       case 'k':
56929       case 't':
56930       case '2':
56931         return C_RegisterClass;
56932       }
56933     }
56934   } else if (parseConstraintCode(Constraint) != X86::COND_INVALID)
56935     return C_Other;
56936   return TargetLowering::getConstraintType(Constraint);
56937 }
56938 
56939 /// Examine constraint type and operand type and determine a weight value.
56940 /// This object must already have been set up with the operand type
56941 /// and the current alternative constraint selected.
56942 TargetLowering::ConstraintWeight
getSingleConstraintMatchWeight(AsmOperandInfo & info,const char * constraint) const56943   X86TargetLowering::getSingleConstraintMatchWeight(
56944     AsmOperandInfo &info, const char *constraint) const {
56945   ConstraintWeight weight = CW_Invalid;
56946   Value *CallOperandVal = info.CallOperandVal;
56947     // If we don't have a value, we can't do a match,
56948     // but allow it at the lowest weight.
56949   if (!CallOperandVal)
56950     return CW_Default;
56951   Type *type = CallOperandVal->getType();
56952   // Look at the constraint type.
56953   switch (*constraint) {
56954   default:
56955     weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
56956     [[fallthrough]];
56957   case 'R':
56958   case 'q':
56959   case 'Q':
56960   case 'a':
56961   case 'b':
56962   case 'c':
56963   case 'd':
56964   case 'S':
56965   case 'D':
56966   case 'A':
56967     if (CallOperandVal->getType()->isIntegerTy())
56968       weight = CW_SpecificReg;
56969     break;
56970   case 'f':
56971   case 't':
56972   case 'u':
56973     if (type->isFloatingPointTy())
56974       weight = CW_SpecificReg;
56975     break;
56976   case 'y':
56977     if (type->isX86_MMXTy() && Subtarget.hasMMX())
56978       weight = CW_SpecificReg;
56979     break;
56980   case 'Y':
56981     if (StringRef(constraint).size() != 2)
56982       break;
56983     switch (constraint[1]) {
56984       default:
56985         return CW_Invalid;
56986       // XMM0
56987       case 'z':
56988         if (((type->getPrimitiveSizeInBits() == 128) && Subtarget.hasSSE1()) ||
56989             ((type->getPrimitiveSizeInBits() == 256) && Subtarget.hasAVX()) ||
56990             ((type->getPrimitiveSizeInBits() == 512) && Subtarget.hasAVX512()))
56991           return CW_SpecificReg;
56992         return CW_Invalid;
56993       // Conditional OpMask regs (AVX512)
56994       case 'k':
56995         if ((type->getPrimitiveSizeInBits() == 64) && Subtarget.hasAVX512())
56996           return CW_Register;
56997         return CW_Invalid;
56998       // Any MMX reg
56999       case 'm':
57000         if (type->isX86_MMXTy() && Subtarget.hasMMX())
57001           return weight;
57002         return CW_Invalid;
57003       // Any SSE reg when ISA >= SSE2, same as 'x'
57004       case 'i':
57005       case 't':
57006       case '2':
57007         if (!Subtarget.hasSSE2())
57008           return CW_Invalid;
57009         break;
57010     }
57011     break;
57012   case 'v':
57013     if ((type->getPrimitiveSizeInBits() == 512) && Subtarget.hasAVX512())
57014       weight = CW_Register;
57015     [[fallthrough]];
57016   case 'x':
57017     if (((type->getPrimitiveSizeInBits() == 128) && Subtarget.hasSSE1()) ||
57018         ((type->getPrimitiveSizeInBits() == 256) && Subtarget.hasAVX()))
57019       weight = CW_Register;
57020     break;
57021   case 'k':
57022     // Enable conditional vector operations using %k<#> registers.
57023     if ((type->getPrimitiveSizeInBits() == 64) && Subtarget.hasAVX512())
57024       weight = CW_Register;
57025     break;
57026   case 'I':
57027     if (auto *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {
57028       if (C->getZExtValue() <= 31)
57029         weight = CW_Constant;
57030     }
57031     break;
57032   case 'J':
57033     if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) {
57034       if (C->getZExtValue() <= 63)
57035         weight = CW_Constant;
57036     }
57037     break;
57038   case 'K':
57039     if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) {
57040       if ((C->getSExtValue() >= -0x80) && (C->getSExtValue() <= 0x7f))
57041         weight = CW_Constant;
57042     }
57043     break;
57044   case 'L':
57045     if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) {
57046       if ((C->getZExtValue() == 0xff) || (C->getZExtValue() == 0xffff))
57047         weight = CW_Constant;
57048     }
57049     break;
57050   case 'M':
57051     if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) {
57052       if (C->getZExtValue() <= 3)
57053         weight = CW_Constant;
57054     }
57055     break;
57056   case 'N':
57057     if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) {
57058       if (C->getZExtValue() <= 0xff)
57059         weight = CW_Constant;
57060     }
57061     break;
57062   case 'G':
57063   case 'C':
57064     if (isa<ConstantFP>(CallOperandVal)) {
57065       weight = CW_Constant;
57066     }
57067     break;
57068   case 'e':
57069     if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) {
57070       if ((C->getSExtValue() >= -0x80000000LL) &&
57071           (C->getSExtValue() <= 0x7fffffffLL))
57072         weight = CW_Constant;
57073     }
57074     break;
57075   case 'Z':
57076     if (auto *C = dyn_cast<ConstantInt>(CallOperandVal)) {
57077       if (C->getZExtValue() <= 0xffffffff)
57078         weight = CW_Constant;
57079     }
57080     break;
57081   }
57082   return weight;
57083 }
57084 
57085 /// Try to replace an X constraint, which matches anything, with another that
57086 /// has more specific requirements based on the type of the corresponding
57087 /// operand.
57088 const char *X86TargetLowering::
LowerXConstraint(EVT ConstraintVT) const57089 LowerXConstraint(EVT ConstraintVT) const {
57090   // FP X constraints get lowered to SSE1/2 registers if available, otherwise
57091   // 'f' like normal targets.
57092   if (ConstraintVT.isFloatingPoint()) {
57093     if (Subtarget.hasSSE1())
57094       return "x";
57095   }
57096 
57097   return TargetLowering::LowerXConstraint(ConstraintVT);
57098 }
57099 
57100 // Lower @cc targets via setcc.
LowerAsmOutputForConstraint(SDValue & Chain,SDValue & Flag,const SDLoc & DL,const AsmOperandInfo & OpInfo,SelectionDAG & DAG) const57101 SDValue X86TargetLowering::LowerAsmOutputForConstraint(
57102     SDValue &Chain, SDValue &Flag, const SDLoc &DL,
57103     const AsmOperandInfo &OpInfo, SelectionDAG &DAG) const {
57104   X86::CondCode Cond = parseConstraintCode(OpInfo.ConstraintCode);
57105   if (Cond == X86::COND_INVALID)
57106     return SDValue();
57107   // Check that return type is valid.
57108   if (OpInfo.ConstraintVT.isVector() || !OpInfo.ConstraintVT.isInteger() ||
57109       OpInfo.ConstraintVT.getSizeInBits() < 8)
57110     report_fatal_error("Flag output operand is of invalid type");
57111 
57112   // Get EFLAGS register. Only update chain when copyfrom is glued.
57113   if (Flag.getNode()) {
57114     Flag = DAG.getCopyFromReg(Chain, DL, X86::EFLAGS, MVT::i32, Flag);
57115     Chain = Flag.getValue(1);
57116   } else
57117     Flag = DAG.getCopyFromReg(Chain, DL, X86::EFLAGS, MVT::i32);
57118   // Extract CC code.
57119   SDValue CC = getSETCC(Cond, Flag, DL, DAG);
57120   // Extend to 32-bits
57121   SDValue Result = DAG.getNode(ISD::ZERO_EXTEND, DL, OpInfo.ConstraintVT, CC);
57122 
57123   return Result;
57124 }
57125 
57126 /// Lower the specified operand into the Ops vector.
57127 /// If it is invalid, don't add anything to Ops.
LowerAsmOperandForConstraint(SDValue Op,std::string & Constraint,std::vector<SDValue> & Ops,SelectionDAG & DAG) const57128 void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
57129                                                      std::string &Constraint,
57130                                                      std::vector<SDValue>&Ops,
57131                                                      SelectionDAG &DAG) const {
57132   SDValue Result;
57133 
57134   // Only support length 1 constraints for now.
57135   if (Constraint.length() > 1) return;
57136 
57137   char ConstraintLetter = Constraint[0];
57138   switch (ConstraintLetter) {
57139   default: break;
57140   case 'I':
57141     if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
57142       if (C->getZExtValue() <= 31) {
57143         Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
57144                                        Op.getValueType());
57145         break;
57146       }
57147     }
57148     return;
57149   case 'J':
57150     if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
57151       if (C->getZExtValue() <= 63) {
57152         Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
57153                                        Op.getValueType());
57154         break;
57155       }
57156     }
57157     return;
57158   case 'K':
57159     if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
57160       if (isInt<8>(C->getSExtValue())) {
57161         Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
57162                                        Op.getValueType());
57163         break;
57164       }
57165     }
57166     return;
57167   case 'L':
57168     if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
57169       if (C->getZExtValue() == 0xff || C->getZExtValue() == 0xffff ||
57170           (Subtarget.is64Bit() && C->getZExtValue() == 0xffffffff)) {
57171         Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),
57172                                        Op.getValueType());
57173         break;
57174       }
57175     }
57176     return;
57177   case 'M':
57178     if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
57179       if (C->getZExtValue() <= 3) {
57180         Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
57181                                        Op.getValueType());
57182         break;
57183       }
57184     }
57185     return;
57186   case 'N':
57187     if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
57188       if (C->getZExtValue() <= 255) {
57189         Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
57190                                        Op.getValueType());
57191         break;
57192       }
57193     }
57194     return;
57195   case 'O':
57196     if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
57197       if (C->getZExtValue() <= 127) {
57198         Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
57199                                        Op.getValueType());
57200         break;
57201       }
57202     }
57203     return;
57204   case 'e': {
57205     // 32-bit signed value
57206     if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
57207       if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
57208                                            C->getSExtValue())) {
57209         // Widen to 64 bits here to get it sign extended.
57210         Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op), MVT::i64);
57211         break;
57212       }
57213     // FIXME gcc accepts some relocatable values here too, but only in certain
57214     // memory models; it's complicated.
57215     }
57216     return;
57217   }
57218   case 'Z': {
57219     // 32-bit unsigned value
57220     if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
57221       if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
57222                                            C->getZExtValue())) {
57223         Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
57224                                        Op.getValueType());
57225         break;
57226       }
57227     }
57228     // FIXME gcc accepts some relocatable values here too, but only in certain
57229     // memory models; it's complicated.
57230     return;
57231   }
57232   case 'i': {
57233     // Literal immediates are always ok.
57234     if (auto *CST = dyn_cast<ConstantSDNode>(Op)) {
57235       bool IsBool = CST->getConstantIntValue()->getBitWidth() == 1;
57236       BooleanContent BCont = getBooleanContents(MVT::i64);
57237       ISD::NodeType ExtOpc = IsBool ? getExtendForContent(BCont)
57238                                     : ISD::SIGN_EXTEND;
57239       int64_t ExtVal = ExtOpc == ISD::ZERO_EXTEND ? CST->getZExtValue()
57240                                                   : CST->getSExtValue();
57241       Result = DAG.getTargetConstant(ExtVal, SDLoc(Op), MVT::i64);
57242       break;
57243     }
57244 
57245     // In any sort of PIC mode addresses need to be computed at runtime by
57246     // adding in a register or some sort of table lookup.  These can't
57247     // be used as immediates. BlockAddresses and BasicBlocks are fine though.
57248     if ((Subtarget.isPICStyleGOT() || Subtarget.isPICStyleStubPIC()) &&
57249         !(isa<BlockAddressSDNode>(Op) || isa<BasicBlockSDNode>(Op)))
57250       return;
57251 
57252     // If we are in non-pic codegen mode, we allow the address of a global (with
57253     // an optional displacement) to be used with 'i'.
57254     if (auto *GA = dyn_cast<GlobalAddressSDNode>(Op))
57255       // If we require an extra load to get this address, as in PIC mode, we
57256       // can't accept it.
57257       if (isGlobalStubReference(
57258               Subtarget.classifyGlobalReference(GA->getGlobal())))
57259         return;
57260     break;
57261   }
57262   }
57263 
57264   if (Result.getNode()) {
57265     Ops.push_back(Result);
57266     return;
57267   }
57268   return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
57269 }
57270 
57271 /// Check if \p RC is a general purpose register class.
57272 /// I.e., GR* or one of their variant.
isGRClass(const TargetRegisterClass & RC)57273 static bool isGRClass(const TargetRegisterClass &RC) {
57274   return RC.hasSuperClassEq(&X86::GR8RegClass) ||
57275          RC.hasSuperClassEq(&X86::GR16RegClass) ||
57276          RC.hasSuperClassEq(&X86::GR32RegClass) ||
57277          RC.hasSuperClassEq(&X86::GR64RegClass) ||
57278          RC.hasSuperClassEq(&X86::LOW32_ADDR_ACCESS_RBPRegClass);
57279 }
57280 
57281 /// Check if \p RC is a vector register class.
57282 /// I.e., FR* / VR* or one of their variant.
isFRClass(const TargetRegisterClass & RC)57283 static bool isFRClass(const TargetRegisterClass &RC) {
57284   return RC.hasSuperClassEq(&X86::FR16XRegClass) ||
57285          RC.hasSuperClassEq(&X86::FR32XRegClass) ||
57286          RC.hasSuperClassEq(&X86::FR64XRegClass) ||
57287          RC.hasSuperClassEq(&X86::VR128XRegClass) ||
57288          RC.hasSuperClassEq(&X86::VR256XRegClass) ||
57289          RC.hasSuperClassEq(&X86::VR512RegClass);
57290 }
57291 
57292 /// Check if \p RC is a mask register class.
57293 /// I.e., VK* or one of their variant.
isVKClass(const TargetRegisterClass & RC)57294 static bool isVKClass(const TargetRegisterClass &RC) {
57295   return RC.hasSuperClassEq(&X86::VK1RegClass) ||
57296          RC.hasSuperClassEq(&X86::VK2RegClass) ||
57297          RC.hasSuperClassEq(&X86::VK4RegClass) ||
57298          RC.hasSuperClassEq(&X86::VK8RegClass) ||
57299          RC.hasSuperClassEq(&X86::VK16RegClass) ||
57300          RC.hasSuperClassEq(&X86::VK32RegClass) ||
57301          RC.hasSuperClassEq(&X86::VK64RegClass);
57302 }
57303 
57304 std::pair<unsigned, const TargetRegisterClass *>
getRegForInlineAsmConstraint(const TargetRegisterInfo * TRI,StringRef Constraint,MVT VT) const57305 X86TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
57306                                                 StringRef Constraint,
57307                                                 MVT VT) const {
57308   // First, see if this is a constraint that directly corresponds to an LLVM
57309   // register class.
57310   if (Constraint.size() == 1) {
57311     // GCC Constraint Letters
57312     switch (Constraint[0]) {
57313     default: break;
57314     // 'A' means [ER]AX + [ER]DX.
57315     case 'A':
57316       if (Subtarget.is64Bit())
57317         return std::make_pair(X86::RAX, &X86::GR64_ADRegClass);
57318       assert((Subtarget.is32Bit() || Subtarget.is16Bit()) &&
57319              "Expecting 64, 32 or 16 bit subtarget");
57320       return std::make_pair(X86::EAX, &X86::GR32_ADRegClass);
57321 
57322       // TODO: Slight differences here in allocation order and leaving
57323       // RIP in the class. Do they matter any more here than they do
57324       // in the normal allocation?
57325     case 'k':
57326       if (Subtarget.hasAVX512()) {
57327         if (VT == MVT::i1)
57328           return std::make_pair(0U, &X86::VK1RegClass);
57329         if (VT == MVT::i8)
57330           return std::make_pair(0U, &X86::VK8RegClass);
57331         if (VT == MVT::i16)
57332           return std::make_pair(0U, &X86::VK16RegClass);
57333       }
57334       if (Subtarget.hasBWI()) {
57335         if (VT == MVT::i32)
57336           return std::make_pair(0U, &X86::VK32RegClass);
57337         if (VT == MVT::i64)
57338           return std::make_pair(0U, &X86::VK64RegClass);
57339       }
57340       break;
57341     case 'q':   // GENERAL_REGS in 64-bit mode, Q_REGS in 32-bit mode.
57342       if (Subtarget.is64Bit()) {
57343         if (VT == MVT::i8 || VT == MVT::i1)
57344           return std::make_pair(0U, &X86::GR8RegClass);
57345         if (VT == MVT::i16)
57346           return std::make_pair(0U, &X86::GR16RegClass);
57347         if (VT == MVT::i32 || VT == MVT::f32)
57348           return std::make_pair(0U, &X86::GR32RegClass);
57349         if (VT != MVT::f80 && !VT.isVector())
57350           return std::make_pair(0U, &X86::GR64RegClass);
57351         break;
57352       }
57353       [[fallthrough]];
57354       // 32-bit fallthrough
57355     case 'Q':   // Q_REGS
57356       if (VT == MVT::i8 || VT == MVT::i1)
57357         return std::make_pair(0U, &X86::GR8_ABCD_LRegClass);
57358       if (VT == MVT::i16)
57359         return std::make_pair(0U, &X86::GR16_ABCDRegClass);
57360       if (VT == MVT::i32 || VT == MVT::f32 ||
57361           (!VT.isVector() && !Subtarget.is64Bit()))
57362         return std::make_pair(0U, &X86::GR32_ABCDRegClass);
57363       if (VT != MVT::f80 && !VT.isVector())
57364         return std::make_pair(0U, &X86::GR64_ABCDRegClass);
57365       break;
57366     case 'r':   // GENERAL_REGS
57367     case 'l':   // INDEX_REGS
57368       if (VT == MVT::i8 || VT == MVT::i1)
57369         return std::make_pair(0U, &X86::GR8RegClass);
57370       if (VT == MVT::i16)
57371         return std::make_pair(0U, &X86::GR16RegClass);
57372       if (VT == MVT::i32 || VT == MVT::f32 ||
57373           (!VT.isVector() && !Subtarget.is64Bit()))
57374         return std::make_pair(0U, &X86::GR32RegClass);
57375       if (VT != MVT::f80 && !VT.isVector())
57376         return std::make_pair(0U, &X86::GR64RegClass);
57377       break;
57378     case 'R':   // LEGACY_REGS
57379       if (VT == MVT::i8 || VT == MVT::i1)
57380         return std::make_pair(0U, &X86::GR8_NOREXRegClass);
57381       if (VT == MVT::i16)
57382         return std::make_pair(0U, &X86::GR16_NOREXRegClass);
57383       if (VT == MVT::i32 || VT == MVT::f32 ||
57384           (!VT.isVector() && !Subtarget.is64Bit()))
57385         return std::make_pair(0U, &X86::GR32_NOREXRegClass);
57386       if (VT != MVT::f80 && !VT.isVector())
57387         return std::make_pair(0U, &X86::GR64_NOREXRegClass);
57388       break;
57389     case 'f':  // FP Stack registers.
57390       // If SSE is enabled for this VT, use f80 to ensure the isel moves the
57391       // value to the correct fpstack register class.
57392       if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT))
57393         return std::make_pair(0U, &X86::RFP32RegClass);
57394       if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT))
57395         return std::make_pair(0U, &X86::RFP64RegClass);
57396       if (VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f80)
57397         return std::make_pair(0U, &X86::RFP80RegClass);
57398       break;
57399     case 'y':   // MMX_REGS if MMX allowed.
57400       if (!Subtarget.hasMMX()) break;
57401       return std::make_pair(0U, &X86::VR64RegClass);
57402     case 'v':
57403     case 'x':   // SSE_REGS if SSE1 allowed or AVX_REGS if AVX allowed
57404       if (!Subtarget.hasSSE1()) break;
57405       bool VConstraint = (Constraint[0] == 'v');
57406 
57407       switch (VT.SimpleTy) {
57408       default: break;
57409       // Scalar SSE types.
57410       case MVT::f16:
57411         if (VConstraint && Subtarget.hasFP16())
57412           return std::make_pair(0U, &X86::FR16XRegClass);
57413         break;
57414       case MVT::f32:
57415       case MVT::i32:
57416         if (VConstraint && Subtarget.hasVLX())
57417           return std::make_pair(0U, &X86::FR32XRegClass);
57418         return std::make_pair(0U, &X86::FR32RegClass);
57419       case MVT::f64:
57420       case MVT::i64:
57421         if (VConstraint && Subtarget.hasVLX())
57422           return std::make_pair(0U, &X86::FR64XRegClass);
57423         return std::make_pair(0U, &X86::FR64RegClass);
57424       case MVT::i128:
57425         if (Subtarget.is64Bit()) {
57426           if (VConstraint && Subtarget.hasVLX())
57427             return std::make_pair(0U, &X86::VR128XRegClass);
57428           return std::make_pair(0U, &X86::VR128RegClass);
57429         }
57430         break;
57431       // Vector types and fp128.
57432       case MVT::v8f16:
57433         if (!Subtarget.hasFP16())
57434           break;
57435         [[fallthrough]];
57436       case MVT::f128:
57437       case MVT::v16i8:
57438       case MVT::v8i16:
57439       case MVT::v4i32:
57440       case MVT::v2i64:
57441       case MVT::v4f32:
57442       case MVT::v2f64:
57443         if (VConstraint && Subtarget.hasVLX())
57444           return std::make_pair(0U, &X86::VR128XRegClass);
57445         return std::make_pair(0U, &X86::VR128RegClass);
57446       // AVX types.
57447       case MVT::v16f16:
57448         if (!Subtarget.hasFP16())
57449           break;
57450         [[fallthrough]];
57451       case MVT::v32i8:
57452       case MVT::v16i16:
57453       case MVT::v8i32:
57454       case MVT::v4i64:
57455       case MVT::v8f32:
57456       case MVT::v4f64:
57457         if (VConstraint && Subtarget.hasVLX())
57458           return std::make_pair(0U, &X86::VR256XRegClass);
57459         if (Subtarget.hasAVX())
57460           return std::make_pair(0U, &X86::VR256RegClass);
57461         break;
57462       case MVT::v32f16:
57463         if (!Subtarget.hasFP16())
57464           break;
57465         [[fallthrough]];
57466       case MVT::v64i8:
57467       case MVT::v32i16:
57468       case MVT::v8f64:
57469       case MVT::v16f32:
57470       case MVT::v16i32:
57471       case MVT::v8i64:
57472         if (!Subtarget.hasAVX512()) break;
57473         if (VConstraint)
57474           return std::make_pair(0U, &X86::VR512RegClass);
57475         return std::make_pair(0U, &X86::VR512_0_15RegClass);
57476       }
57477       break;
57478     }
57479   } else if (Constraint.size() == 2 && Constraint[0] == 'Y') {
57480     switch (Constraint[1]) {
57481     default:
57482       break;
57483     case 'i':
57484     case 't':
57485     case '2':
57486       return getRegForInlineAsmConstraint(TRI, "x", VT);
57487     case 'm':
57488       if (!Subtarget.hasMMX()) break;
57489       return std::make_pair(0U, &X86::VR64RegClass);
57490     case 'z':
57491       if (!Subtarget.hasSSE1()) break;
57492       switch (VT.SimpleTy) {
57493       default: break;
57494       // Scalar SSE types.
57495       case MVT::f16:
57496         if (!Subtarget.hasFP16())
57497           break;
57498         return std::make_pair(X86::XMM0, &X86::FR16XRegClass);
57499       case MVT::f32:
57500       case MVT::i32:
57501         return std::make_pair(X86::XMM0, &X86::FR32RegClass);
57502       case MVT::f64:
57503       case MVT::i64:
57504         return std::make_pair(X86::XMM0, &X86::FR64RegClass);
57505       case MVT::v8f16:
57506         if (!Subtarget.hasFP16())
57507           break;
57508         [[fallthrough]];
57509       case MVT::f128:
57510       case MVT::v16i8:
57511       case MVT::v8i16:
57512       case MVT::v4i32:
57513       case MVT::v2i64:
57514       case MVT::v4f32:
57515       case MVT::v2f64:
57516         return std::make_pair(X86::XMM0, &X86::VR128RegClass);
57517       // AVX types.
57518       case MVT::v16f16:
57519         if (!Subtarget.hasFP16())
57520           break;
57521         [[fallthrough]];
57522       case MVT::v32i8:
57523       case MVT::v16i16:
57524       case MVT::v8i32:
57525       case MVT::v4i64:
57526       case MVT::v8f32:
57527       case MVT::v4f64:
57528         if (Subtarget.hasAVX())
57529           return std::make_pair(X86::YMM0, &X86::VR256RegClass);
57530         break;
57531       case MVT::v32f16:
57532         if (!Subtarget.hasFP16())
57533           break;
57534         [[fallthrough]];
57535       case MVT::v64i8:
57536       case MVT::v32i16:
57537       case MVT::v8f64:
57538       case MVT::v16f32:
57539       case MVT::v16i32:
57540       case MVT::v8i64:
57541         if (Subtarget.hasAVX512())
57542           return std::make_pair(X86::ZMM0, &X86::VR512_0_15RegClass);
57543         break;
57544       }
57545       break;
57546     case 'k':
57547       // This register class doesn't allocate k0 for masked vector operation.
57548       if (Subtarget.hasAVX512()) {
57549         if (VT == MVT::i1)
57550           return std::make_pair(0U, &X86::VK1WMRegClass);
57551         if (VT == MVT::i8)
57552           return std::make_pair(0U, &X86::VK8WMRegClass);
57553         if (VT == MVT::i16)
57554           return std::make_pair(0U, &X86::VK16WMRegClass);
57555       }
57556       if (Subtarget.hasBWI()) {
57557         if (VT == MVT::i32)
57558           return std::make_pair(0U, &X86::VK32WMRegClass);
57559         if (VT == MVT::i64)
57560           return std::make_pair(0U, &X86::VK64WMRegClass);
57561       }
57562       break;
57563     }
57564   }
57565 
57566   if (parseConstraintCode(Constraint) != X86::COND_INVALID)
57567     return std::make_pair(0U, &X86::GR32RegClass);
57568 
57569   // Use the default implementation in TargetLowering to convert the register
57570   // constraint into a member of a register class.
57571   std::pair<Register, const TargetRegisterClass*> Res;
57572   Res = TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
57573 
57574   // Not found as a standard register?
57575   if (!Res.second) {
57576     // Only match x87 registers if the VT is one SelectionDAGBuilder can convert
57577     // to/from f80.
57578     if (VT == MVT::Other || VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f80) {
57579       // Map st(0) -> st(7) -> ST0
57580       if (Constraint.size() == 7 && Constraint[0] == '{' &&
57581           tolower(Constraint[1]) == 's' && tolower(Constraint[2]) == 't' &&
57582           Constraint[3] == '(' &&
57583           (Constraint[4] >= '0' && Constraint[4] <= '7') &&
57584           Constraint[5] == ')' && Constraint[6] == '}') {
57585         // st(7) is not allocatable and thus not a member of RFP80. Return
57586         // singleton class in cases where we have a reference to it.
57587         if (Constraint[4] == '7')
57588           return std::make_pair(X86::FP7, &X86::RFP80_7RegClass);
57589         return std::make_pair(X86::FP0 + Constraint[4] - '0',
57590                               &X86::RFP80RegClass);
57591       }
57592 
57593       // GCC allows "st(0)" to be called just plain "st".
57594       if (StringRef("{st}").equals_insensitive(Constraint))
57595         return std::make_pair(X86::FP0, &X86::RFP80RegClass);
57596     }
57597 
57598     // flags -> EFLAGS
57599     if (StringRef("{flags}").equals_insensitive(Constraint))
57600       return std::make_pair(X86::EFLAGS, &X86::CCRRegClass);
57601 
57602     // dirflag -> DF
57603     // Only allow for clobber.
57604     if (StringRef("{dirflag}").equals_insensitive(Constraint) &&
57605         VT == MVT::Other)
57606       return std::make_pair(X86::DF, &X86::DFCCRRegClass);
57607 
57608     // fpsr -> FPSW
57609     if (StringRef("{fpsr}").equals_insensitive(Constraint))
57610       return std::make_pair(X86::FPSW, &X86::FPCCRRegClass);
57611 
57612     return Res;
57613   }
57614 
57615   // Make sure it isn't a register that requires 64-bit mode.
57616   if (!Subtarget.is64Bit() &&
57617       (isFRClass(*Res.second) || isGRClass(*Res.second)) &&
57618       TRI->getEncodingValue(Res.first) >= 8) {
57619     // Register requires REX prefix, but we're in 32-bit mode.
57620     return std::make_pair(0, nullptr);
57621   }
57622 
57623   // Make sure it isn't a register that requires AVX512.
57624   if (!Subtarget.hasAVX512() && isFRClass(*Res.second) &&
57625       TRI->getEncodingValue(Res.first) & 0x10) {
57626     // Register requires EVEX prefix.
57627     return std::make_pair(0, nullptr);
57628   }
57629 
57630   // Otherwise, check to see if this is a register class of the wrong value
57631   // type.  For example, we want to map "{ax},i32" -> {eax}, we don't want it to
57632   // turn into {ax},{dx}.
57633   // MVT::Other is used to specify clobber names.
57634   if (TRI->isTypeLegalForClass(*Res.second, VT) || VT == MVT::Other)
57635     return Res;   // Correct type already, nothing to do.
57636 
57637   // Get a matching integer of the correct size. i.e. "ax" with MVT::32 should
57638   // return "eax". This should even work for things like getting 64bit integer
57639   // registers when given an f64 type.
57640   const TargetRegisterClass *Class = Res.second;
57641   // The generic code will match the first register class that contains the
57642   // given register. Thus, based on the ordering of the tablegened file,
57643   // the "plain" GR classes might not come first.
57644   // Therefore, use a helper method.
57645   if (isGRClass(*Class)) {
57646     unsigned Size = VT.getSizeInBits();
57647     if (Size == 1) Size = 8;
57648     Register DestReg = getX86SubSuperRegisterOrZero(Res.first, Size);
57649     if (DestReg > 0) {
57650       bool is64Bit = Subtarget.is64Bit();
57651       const TargetRegisterClass *RC =
57652           Size == 8 ? (is64Bit ? &X86::GR8RegClass : &X86::GR8_NOREXRegClass)
57653         : Size == 16 ? (is64Bit ? &X86::GR16RegClass : &X86::GR16_NOREXRegClass)
57654         : Size == 32 ? (is64Bit ? &X86::GR32RegClass : &X86::GR32_NOREXRegClass)
57655         : Size == 64 ? (is64Bit ? &X86::GR64RegClass : nullptr)
57656         : nullptr;
57657       if (Size == 64 && !is64Bit) {
57658         // Model GCC's behavior here and select a fixed pair of 32-bit
57659         // registers.
57660         switch (DestReg) {
57661         case X86::RAX:
57662           return std::make_pair(X86::EAX, &X86::GR32_ADRegClass);
57663         case X86::RDX:
57664           return std::make_pair(X86::EDX, &X86::GR32_DCRegClass);
57665         case X86::RCX:
57666           return std::make_pair(X86::ECX, &X86::GR32_CBRegClass);
57667         case X86::RBX:
57668           return std::make_pair(X86::EBX, &X86::GR32_BSIRegClass);
57669         case X86::RSI:
57670           return std::make_pair(X86::ESI, &X86::GR32_SIDIRegClass);
57671         case X86::RDI:
57672           return std::make_pair(X86::EDI, &X86::GR32_DIBPRegClass);
57673         case X86::RBP:
57674           return std::make_pair(X86::EBP, &X86::GR32_BPSPRegClass);
57675         default:
57676           return std::make_pair(0, nullptr);
57677         }
57678       }
57679       if (RC && RC->contains(DestReg))
57680         return std::make_pair(DestReg, RC);
57681       return Res;
57682     }
57683     // No register found/type mismatch.
57684     return std::make_pair(0, nullptr);
57685   } else if (isFRClass(*Class)) {
57686     // Handle references to XMM physical registers that got mapped into the
57687     // wrong class.  This can happen with constraints like {xmm0} where the
57688     // target independent register mapper will just pick the first match it can
57689     // find, ignoring the required type.
57690 
57691     // TODO: Handle f128 and i128 in FR128RegClass after it is tested well.
57692     if (VT == MVT::f16)
57693       Res.second = &X86::FR16XRegClass;
57694     else if (VT == MVT::f32 || VT == MVT::i32)
57695       Res.second = &X86::FR32XRegClass;
57696     else if (VT == MVT::f64 || VT == MVT::i64)
57697       Res.second = &X86::FR64XRegClass;
57698     else if (TRI->isTypeLegalForClass(X86::VR128XRegClass, VT))
57699       Res.second = &X86::VR128XRegClass;
57700     else if (TRI->isTypeLegalForClass(X86::VR256XRegClass, VT))
57701       Res.second = &X86::VR256XRegClass;
57702     else if (TRI->isTypeLegalForClass(X86::VR512RegClass, VT))
57703       Res.second = &X86::VR512RegClass;
57704     else {
57705       // Type mismatch and not a clobber: Return an error;
57706       Res.first = 0;
57707       Res.second = nullptr;
57708     }
57709   } else if (isVKClass(*Class)) {
57710     if (VT == MVT::i1)
57711       Res.second = &X86::VK1RegClass;
57712     else if (VT == MVT::i8)
57713       Res.second = &X86::VK8RegClass;
57714     else if (VT == MVT::i16)
57715       Res.second = &X86::VK16RegClass;
57716     else if (VT == MVT::i32)
57717       Res.second = &X86::VK32RegClass;
57718     else if (VT == MVT::i64)
57719       Res.second = &X86::VK64RegClass;
57720     else {
57721       // Type mismatch and not a clobber: Return an error;
57722       Res.first = 0;
57723       Res.second = nullptr;
57724     }
57725   }
57726 
57727   return Res;
57728 }
57729 
isIntDivCheap(EVT VT,AttributeList Attr) const57730 bool X86TargetLowering::isIntDivCheap(EVT VT, AttributeList Attr) const {
57731   // Integer division on x86 is expensive. However, when aggressively optimizing
57732   // for code size, we prefer to use a div instruction, as it is usually smaller
57733   // than the alternative sequence.
57734   // The exception to this is vector division. Since x86 doesn't have vector
57735   // integer division, leaving the division as-is is a loss even in terms of
57736   // size, because it will have to be scalarized, while the alternative code
57737   // sequence can be performed in vector form.
57738   bool OptSize = Attr.hasFnAttr(Attribute::MinSize);
57739   return OptSize && !VT.isVector();
57740 }
57741 
initializeSplitCSR(MachineBasicBlock * Entry) const57742 void X86TargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
57743   if (!Subtarget.is64Bit())
57744     return;
57745 
57746   // Update IsSplitCSR in X86MachineFunctionInfo.
57747   X86MachineFunctionInfo *AFI =
57748       Entry->getParent()->getInfo<X86MachineFunctionInfo>();
57749   AFI->setIsSplitCSR(true);
57750 }
57751 
insertCopiesSplitCSR(MachineBasicBlock * Entry,const SmallVectorImpl<MachineBasicBlock * > & Exits) const57752 void X86TargetLowering::insertCopiesSplitCSR(
57753     MachineBasicBlock *Entry,
57754     const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
57755   const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
57756   const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
57757   if (!IStart)
57758     return;
57759 
57760   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
57761   MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
57762   MachineBasicBlock::iterator MBBI = Entry->begin();
57763   for (const MCPhysReg *I = IStart; *I; ++I) {
57764     const TargetRegisterClass *RC = nullptr;
57765     if (X86::GR64RegClass.contains(*I))
57766       RC = &X86::GR64RegClass;
57767     else
57768       llvm_unreachable("Unexpected register class in CSRsViaCopy!");
57769 
57770     Register NewVR = MRI->createVirtualRegister(RC);
57771     // Create copy from CSR to a virtual register.
57772     // FIXME: this currently does not emit CFI pseudo-instructions, it works
57773     // fine for CXX_FAST_TLS since the C++-style TLS access functions should be
57774     // nounwind. If we want to generalize this later, we may need to emit
57775     // CFI pseudo-instructions.
57776     assert(
57777         Entry->getParent()->getFunction().hasFnAttribute(Attribute::NoUnwind) &&
57778         "Function should be nounwind in insertCopiesSplitCSR!");
57779     Entry->addLiveIn(*I);
57780     BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
57781         .addReg(*I);
57782 
57783     // Insert the copy-back instructions right before the terminator.
57784     for (auto *Exit : Exits)
57785       BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
57786               TII->get(TargetOpcode::COPY), *I)
57787           .addReg(NewVR);
57788   }
57789 }
57790 
supportSwiftError() const57791 bool X86TargetLowering::supportSwiftError() const {
57792   return Subtarget.is64Bit();
57793 }
57794 
57795 /// Returns true if stack probing through a function call is requested.
hasStackProbeSymbol(const MachineFunction & MF) const57796 bool X86TargetLowering::hasStackProbeSymbol(const MachineFunction &MF) const {
57797   return !getStackProbeSymbolName(MF).empty();
57798 }
57799 
57800 /// Returns true if stack probing through inline assembly is requested.
hasInlineStackProbe(const MachineFunction & MF) const57801 bool X86TargetLowering::hasInlineStackProbe(const MachineFunction &MF) const {
57802 
57803   // No inline stack probe for Windows, they have their own mechanism.
57804   if (Subtarget.isOSWindows() ||
57805       MF.getFunction().hasFnAttribute("no-stack-arg-probe"))
57806     return false;
57807 
57808   // If the function specifically requests inline stack probes, emit them.
57809   if (MF.getFunction().hasFnAttribute("probe-stack"))
57810     return MF.getFunction().getFnAttribute("probe-stack").getValueAsString() ==
57811            "inline-asm";
57812 
57813   return false;
57814 }
57815 
57816 /// Returns the name of the symbol used to emit stack probes or the empty
57817 /// string if not applicable.
57818 StringRef
getStackProbeSymbolName(const MachineFunction & MF) const57819 X86TargetLowering::getStackProbeSymbolName(const MachineFunction &MF) const {
57820   // Inline Stack probes disable stack probe call
57821   if (hasInlineStackProbe(MF))
57822     return "";
57823 
57824   // If the function specifically requests stack probes, emit them.
57825   if (MF.getFunction().hasFnAttribute("probe-stack"))
57826     return MF.getFunction().getFnAttribute("probe-stack").getValueAsString();
57827 
57828   // Generally, if we aren't on Windows, the platform ABI does not include
57829   // support for stack probes, so don't emit them.
57830   if (!Subtarget.isOSWindows() || Subtarget.isTargetMachO() ||
57831       MF.getFunction().hasFnAttribute("no-stack-arg-probe"))
57832     return "";
57833 
57834   // We need a stack probe to conform to the Windows ABI. Choose the right
57835   // symbol.
57836   if (Subtarget.is64Bit())
57837     return Subtarget.isTargetCygMing() ? "___chkstk_ms" : "__chkstk";
57838   return Subtarget.isTargetCygMing() ? "_alloca" : "_chkstk";
57839 }
57840 
57841 unsigned
getStackProbeSize(const MachineFunction & MF) const57842 X86TargetLowering::getStackProbeSize(const MachineFunction &MF) const {
57843   // The default stack probe size is 4096 if the function has no stackprobesize
57844   // attribute.
57845   return MF.getFunction().getFnAttributeAsParsedInteger("stack-probe-size",
57846                                                         4096);
57847 }
57848 
getPrefLoopAlignment(MachineLoop * ML) const57849 Align X86TargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
57850   if (ML->isInnermost() &&
57851       ExperimentalPrefInnermostLoopAlignment.getNumOccurrences())
57852     return Align(1ULL << ExperimentalPrefInnermostLoopAlignment);
57853   return TargetLowering::getPrefLoopAlignment();
57854 }
57855