1 //===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that X86 uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "X86ISelLowering.h"
15 #include "MCTargetDesc/X86ShuffleDecode.h"
16 #include "X86.h"
17 #include "X86CallingConv.h"
18 #include "X86FrameLowering.h"
19 #include "X86InstrBuilder.h"
20 #include "X86IntrinsicsInfo.h"
21 #include "X86MachineFunctionInfo.h"
22 #include "X86TargetMachine.h"
23 #include "X86TargetObjectFile.h"
24 #include "llvm/ADT/SmallBitVector.h"
25 #include "llvm/ADT/SmallSet.h"
26 #include "llvm/ADT/Statistic.h"
27 #include "llvm/ADT/StringExtras.h"
28 #include "llvm/ADT/StringSwitch.h"
29 #include "llvm/Analysis/BlockFrequencyInfo.h"
30 #include "llvm/Analysis/ObjCARCUtil.h"
31 #include "llvm/Analysis/ProfileSummaryInfo.h"
32 #include "llvm/Analysis/VectorUtils.h"
33 #include "llvm/CodeGen/IntrinsicLowering.h"
34 #include "llvm/CodeGen/MachineFrameInfo.h"
35 #include "llvm/CodeGen/MachineFunction.h"
36 #include "llvm/CodeGen/MachineInstrBuilder.h"
37 #include "llvm/CodeGen/MachineJumpTableInfo.h"
38 #include "llvm/CodeGen/MachineLoopInfo.h"
39 #include "llvm/CodeGen/MachineModuleInfo.h"
40 #include "llvm/CodeGen/MachineRegisterInfo.h"
41 #include "llvm/CodeGen/TargetLowering.h"
42 #include "llvm/CodeGen/WinEHFuncInfo.h"
43 #include "llvm/IR/CallingConv.h"
44 #include "llvm/IR/Constants.h"
45 #include "llvm/IR/DerivedTypes.h"
46 #include "llvm/IR/EHPersonalities.h"
47 #include "llvm/IR/Function.h"
48 #include "llvm/IR/GlobalAlias.h"
49 #include "llvm/IR/GlobalVariable.h"
50 #include "llvm/IR/IRBuilder.h"
51 #include "llvm/IR/Instructions.h"
52 #include "llvm/IR/Intrinsics.h"
53 #include "llvm/IR/PatternMatch.h"
54 #include "llvm/MC/MCAsmInfo.h"
55 #include "llvm/MC/MCContext.h"
56 #include "llvm/MC/MCExpr.h"
57 #include "llvm/MC/MCSymbol.h"
58 #include "llvm/Support/CommandLine.h"
59 #include "llvm/Support/Debug.h"
60 #include "llvm/Support/ErrorHandling.h"
61 #include "llvm/Support/KnownBits.h"
62 #include "llvm/Support/MathExtras.h"
63 #include "llvm/Target/TargetOptions.h"
64 #include <algorithm>
65 #include <bitset>
66 #include <cctype>
67 #include <numeric>
68 using namespace llvm;
69 
70 #define DEBUG_TYPE "x86-isel"
71 
72 static cl::opt<int> ExperimentalPrefInnermostLoopAlignment(
73     "x86-experimental-pref-innermost-loop-alignment", cl::init(4),
74     cl::desc(
75         "Sets the preferable loop alignment for experiments (as log2 bytes) "
76         "for innermost loops only. If specified, this option overrides "
77         "alignment set by x86-experimental-pref-loop-alignment."),
78     cl::Hidden);
79 
80 static cl::opt<bool> MulConstantOptimization(
81     "mul-constant-optimization", cl::init(true),
82     cl::desc("Replace 'mul x, Const' with more effective instructions like "
83              "SHIFT, LEA, etc."),
84     cl::Hidden);
85 
X86TargetLowering(const X86TargetMachine & TM,const X86Subtarget & STI)86 X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
87                                      const X86Subtarget &STI)
88     : TargetLowering(TM), Subtarget(STI) {
89   bool UseX87 = !Subtarget.useSoftFloat() && Subtarget.hasX87();
90   MVT PtrVT = MVT::getIntegerVT(TM.getPointerSizeInBits(0));
91 
92   // Set up the TargetLowering object.
93 
94   // X86 is weird. It always uses i8 for shift amounts and setcc results.
95   setBooleanContents(ZeroOrOneBooleanContent);
96   // X86-SSE is even stranger. It uses -1 or 0 for vector masks.
97   setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
98 
99   // For 64-bit, since we have so many registers, use the ILP scheduler.
100   // For 32-bit, use the register pressure specific scheduling.
101   // For Atom, always use ILP scheduling.
102   if (Subtarget.isAtom())
103     setSchedulingPreference(Sched::ILP);
104   else if (Subtarget.is64Bit())
105     setSchedulingPreference(Sched::ILP);
106   else
107     setSchedulingPreference(Sched::RegPressure);
108   const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
109   setStackPointerRegisterToSaveRestore(RegInfo->getStackRegister());
110 
111   // Bypass expensive divides and use cheaper ones.
112   if (TM.getOptLevel() >= CodeGenOptLevel::Default) {
113     if (Subtarget.hasSlowDivide32())
114       addBypassSlowDiv(32, 8);
115     if (Subtarget.hasSlowDivide64() && Subtarget.is64Bit())
116       addBypassSlowDiv(64, 32);
117   }
118 
119   // Setup Windows compiler runtime calls.
120   if (Subtarget.isTargetWindowsMSVC() || Subtarget.isTargetWindowsItanium()) {
121     static const struct {
122       const RTLIB::Libcall Op;
123       const char * const Name;
124       const CallingConv::ID CC;
125     } LibraryCalls[] = {
126       { RTLIB::SDIV_I64, "_alldiv", CallingConv::X86_StdCall },
127       { RTLIB::UDIV_I64, "_aulldiv", CallingConv::X86_StdCall },
128       { RTLIB::SREM_I64, "_allrem", CallingConv::X86_StdCall },
129       { RTLIB::UREM_I64, "_aullrem", CallingConv::X86_StdCall },
130       { RTLIB::MUL_I64, "_allmul", CallingConv::X86_StdCall },
131     };
132 
133     for (const auto &LC : LibraryCalls) {
134       setLibcallName(LC.Op, LC.Name);
135       setLibcallCallingConv(LC.Op, LC.CC);
136     }
137   }
138 
139   if (Subtarget.getTargetTriple().isOSMSVCRT()) {
140     // MSVCRT doesn't have powi; fall back to pow
141     setLibcallName(RTLIB::POWI_F32, nullptr);
142     setLibcallName(RTLIB::POWI_F64, nullptr);
143   }
144 
145   if (Subtarget.canUseCMPXCHG16B())
146     setMaxAtomicSizeInBitsSupported(128);
147   else if (Subtarget.canUseCMPXCHG8B())
148     setMaxAtomicSizeInBitsSupported(64);
149   else
150     setMaxAtomicSizeInBitsSupported(32);
151 
152   setMaxDivRemBitWidthSupported(Subtarget.is64Bit() ? 128 : 64);
153 
154   setMaxLargeFPConvertBitWidthSupported(128);
155 
156   // Set up the register classes.
157   addRegisterClass(MVT::i8, &X86::GR8RegClass);
158   addRegisterClass(MVT::i16, &X86::GR16RegClass);
159   addRegisterClass(MVT::i32, &X86::GR32RegClass);
160   if (Subtarget.is64Bit())
161     addRegisterClass(MVT::i64, &X86::GR64RegClass);
162 
163   for (MVT VT : MVT::integer_valuetypes())
164     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
165 
166   // We don't accept any truncstore of integer registers.
167   setTruncStoreAction(MVT::i64, MVT::i32, Expand);
168   setTruncStoreAction(MVT::i64, MVT::i16, Expand);
169   setTruncStoreAction(MVT::i64, MVT::i8 , Expand);
170   setTruncStoreAction(MVT::i32, MVT::i16, Expand);
171   setTruncStoreAction(MVT::i32, MVT::i8 , Expand);
172   setTruncStoreAction(MVT::i16, MVT::i8,  Expand);
173 
174   setTruncStoreAction(MVT::f64, MVT::f32, Expand);
175 
176   // SETOEQ and SETUNE require checking two conditions.
177   for (auto VT : {MVT::f32, MVT::f64, MVT::f80}) {
178     setCondCodeAction(ISD::SETOEQ, VT, Expand);
179     setCondCodeAction(ISD::SETUNE, VT, Expand);
180   }
181 
182   // Integer absolute.
183   if (Subtarget.canUseCMOV()) {
184     setOperationAction(ISD::ABS            , MVT::i16  , Custom);
185     setOperationAction(ISD::ABS            , MVT::i32  , Custom);
186     if (Subtarget.is64Bit())
187       setOperationAction(ISD::ABS          , MVT::i64  , Custom);
188   }
189 
190   // Absolute difference.
191   for (auto Op : {ISD::ABDS, ISD::ABDU}) {
192     setOperationAction(Op                  , MVT::i8   , Custom);
193     setOperationAction(Op                  , MVT::i16  , Custom);
194     setOperationAction(Op                  , MVT::i32  , Custom);
195     if (Subtarget.is64Bit())
196      setOperationAction(Op                 , MVT::i64  , Custom);
197   }
198 
199   // Signed saturation subtraction.
200   setOperationAction(ISD::SSUBSAT          , MVT::i8   , Custom);
201   setOperationAction(ISD::SSUBSAT          , MVT::i16  , Custom);
202   setOperationAction(ISD::SSUBSAT          , MVT::i32  , Custom);
203   if (Subtarget.is64Bit())
204     setOperationAction(ISD::SSUBSAT        , MVT::i64  , Custom);
205 
206   // Funnel shifts.
207   for (auto ShiftOp : {ISD::FSHL, ISD::FSHR}) {
208     // For slow shld targets we only lower for code size.
209     LegalizeAction ShiftDoubleAction = Subtarget.isSHLDSlow() ? Custom : Legal;
210 
211     setOperationAction(ShiftOp             , MVT::i8   , Custom);
212     setOperationAction(ShiftOp             , MVT::i16  , Custom);
213     setOperationAction(ShiftOp             , MVT::i32  , ShiftDoubleAction);
214     if (Subtarget.is64Bit())
215       setOperationAction(ShiftOp           , MVT::i64  , ShiftDoubleAction);
216   }
217 
218   if (!Subtarget.useSoftFloat()) {
219     // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
220     // operation.
221     setOperationAction(ISD::UINT_TO_FP,        MVT::i8, Promote);
222     setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i8, Promote);
223     setOperationAction(ISD::UINT_TO_FP,        MVT::i16, Promote);
224     setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i16, Promote);
225     // We have an algorithm for SSE2, and we turn this into a 64-bit
226     // FILD or VCVTUSI2SS/SD for other targets.
227     setOperationAction(ISD::UINT_TO_FP,        MVT::i32, Custom);
228     setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Custom);
229     // We have an algorithm for SSE2->double, and we turn this into a
230     // 64-bit FILD followed by conditional FADD for other targets.
231     setOperationAction(ISD::UINT_TO_FP,        MVT::i64, Custom);
232     setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i64, Custom);
233 
234     // Promote i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have
235     // this operation.
236     setOperationAction(ISD::SINT_TO_FP,        MVT::i8, Promote);
237     setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i8, Promote);
238     // SSE has no i16 to fp conversion, only i32. We promote in the handler
239     // to allow f80 to use i16 and f64 to use i16 with sse1 only
240     setOperationAction(ISD::SINT_TO_FP,        MVT::i16, Custom);
241     setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i16, Custom);
242     // f32 and f64 cases are Legal with SSE1/SSE2, f80 case is not
243     setOperationAction(ISD::SINT_TO_FP,        MVT::i32, Custom);
244     setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Custom);
245     // In 32-bit mode these are custom lowered.  In 64-bit mode F32 and F64
246     // are Legal, f80 is custom lowered.
247     setOperationAction(ISD::SINT_TO_FP,        MVT::i64, Custom);
248     setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i64, Custom);
249 
250     // Promote i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
251     // this operation.
252     setOperationAction(ISD::FP_TO_SINT,        MVT::i8,  Promote);
253     // FIXME: This doesn't generate invalid exception when it should. PR44019.
254     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i8,  Promote);
255     setOperationAction(ISD::FP_TO_SINT,        MVT::i16, Custom);
256     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i16, Custom);
257     setOperationAction(ISD::FP_TO_SINT,        MVT::i32, Custom);
258     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
259     // In 32-bit mode these are custom lowered.  In 64-bit mode F32 and F64
260     // are Legal, f80 is custom lowered.
261     setOperationAction(ISD::FP_TO_SINT,        MVT::i64, Custom);
262     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i64, Custom);
263 
264     // Handle FP_TO_UINT by promoting the destination to a larger signed
265     // conversion.
266     setOperationAction(ISD::FP_TO_UINT,        MVT::i8,  Promote);
267     // FIXME: This doesn't generate invalid exception when it should. PR44019.
268     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i8,  Promote);
269     setOperationAction(ISD::FP_TO_UINT,        MVT::i16, Promote);
270     // FIXME: This doesn't generate invalid exception when it should. PR44019.
271     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i16, Promote);
272     setOperationAction(ISD::FP_TO_UINT,        MVT::i32, Custom);
273     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
274     setOperationAction(ISD::FP_TO_UINT,        MVT::i64, Custom);
275     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i64, Custom);
276 
277     setOperationAction(ISD::LRINT,             MVT::f32, Custom);
278     setOperationAction(ISD::LRINT,             MVT::f64, Custom);
279     setOperationAction(ISD::LLRINT,            MVT::f32, Custom);
280     setOperationAction(ISD::LLRINT,            MVT::f64, Custom);
281 
282     if (!Subtarget.is64Bit()) {
283       setOperationAction(ISD::LRINT,  MVT::i64, Custom);
284       setOperationAction(ISD::LLRINT, MVT::i64, Custom);
285     }
286   }
287 
288   if (Subtarget.hasSSE2()) {
289     // Custom lowering for saturating float to int conversions.
290     // We handle promotion to larger result types manually.
291     for (MVT VT : { MVT::i8, MVT::i16, MVT::i32 }) {
292       setOperationAction(ISD::FP_TO_UINT_SAT, VT, Custom);
293       setOperationAction(ISD::FP_TO_SINT_SAT, VT, Custom);
294     }
295     if (Subtarget.is64Bit()) {
296       setOperationAction(ISD::FP_TO_UINT_SAT, MVT::i64, Custom);
297       setOperationAction(ISD::FP_TO_SINT_SAT, MVT::i64, Custom);
298     }
299   }
300 
301   // Handle address space casts between mixed sized pointers.
302   setOperationAction(ISD::ADDRSPACECAST, MVT::i32, Custom);
303   setOperationAction(ISD::ADDRSPACECAST, MVT::i64, Custom);
304 
305   // TODO: when we have SSE, these could be more efficient, by using movd/movq.
306   if (!Subtarget.hasSSE2()) {
307     setOperationAction(ISD::BITCAST        , MVT::f32  , Expand);
308     setOperationAction(ISD::BITCAST        , MVT::i32  , Expand);
309     if (Subtarget.is64Bit()) {
310       setOperationAction(ISD::BITCAST      , MVT::f64  , Expand);
311       // Without SSE, i64->f64 goes through memory.
312       setOperationAction(ISD::BITCAST      , MVT::i64  , Expand);
313     }
314   } else if (!Subtarget.is64Bit())
315     setOperationAction(ISD::BITCAST      , MVT::i64  , Custom);
316 
317   // Scalar integer divide and remainder are lowered to use operations that
318   // produce two results, to match the available instructions. This exposes
319   // the two-result form to trivial CSE, which is able to combine x/y and x%y
320   // into a single instruction.
321   //
322   // Scalar integer multiply-high is also lowered to use two-result
323   // operations, to match the available instructions. However, plain multiply
324   // (low) operations are left as Legal, as there are single-result
325   // instructions for this in x86. Using the two-result multiply instructions
326   // when both high and low results are needed must be arranged by dagcombine.
327   for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
328     setOperationAction(ISD::MULHS, VT, Expand);
329     setOperationAction(ISD::MULHU, VT, Expand);
330     setOperationAction(ISD::SDIV, VT, Expand);
331     setOperationAction(ISD::UDIV, VT, Expand);
332     setOperationAction(ISD::SREM, VT, Expand);
333     setOperationAction(ISD::UREM, VT, Expand);
334   }
335 
336   setOperationAction(ISD::BR_JT            , MVT::Other, Expand);
337   setOperationAction(ISD::BRCOND           , MVT::Other, Custom);
338   for (auto VT : { MVT::f32, MVT::f64, MVT::f80, MVT::f128,
339                    MVT::i8,  MVT::i16, MVT::i32, MVT::i64 }) {
340     setOperationAction(ISD::BR_CC,     VT, Expand);
341     setOperationAction(ISD::SELECT_CC, VT, Expand);
342   }
343   if (Subtarget.is64Bit())
344     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
345   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16  , Legal);
346   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8   , Legal);
347   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1   , Expand);
348 
349   setOperationAction(ISD::FREM             , MVT::f32  , Expand);
350   setOperationAction(ISD::FREM             , MVT::f64  , Expand);
351   setOperationAction(ISD::FREM             , MVT::f80  , Expand);
352   setOperationAction(ISD::FREM             , MVT::f128 , Expand);
353 
354   if (!Subtarget.useSoftFloat() && Subtarget.hasX87()) {
355     setOperationAction(ISD::GET_ROUNDING   , MVT::i32  , Custom);
356     setOperationAction(ISD::SET_ROUNDING   , MVT::Other, Custom);
357     setOperationAction(ISD::GET_FPENV_MEM  , MVT::Other, Custom);
358     setOperationAction(ISD::SET_FPENV_MEM  , MVT::Other, Custom);
359     setOperationAction(ISD::RESET_FPENV    , MVT::Other, Custom);
360   }
361 
362   // Promote the i8 variants and force them on up to i32 which has a shorter
363   // encoding.
364   setOperationPromotedToType(ISD::CTTZ           , MVT::i8   , MVT::i32);
365   setOperationPromotedToType(ISD::CTTZ_ZERO_UNDEF, MVT::i8   , MVT::i32);
366   // Promoted i16. tzcntw has a false dependency on Intel CPUs. For BSF, we emit
367   // a REP prefix to encode it as TZCNT for modern CPUs so it makes sense to
368   // promote that too.
369   setOperationPromotedToType(ISD::CTTZ           , MVT::i16  , MVT::i32);
370   setOperationPromotedToType(ISD::CTTZ_ZERO_UNDEF, MVT::i16  , MVT::i32);
371 
372   if (!Subtarget.hasBMI()) {
373     setOperationAction(ISD::CTTZ           , MVT::i32  , Custom);
374     setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32  , Legal);
375     if (Subtarget.is64Bit()) {
376       setOperationAction(ISD::CTTZ         , MVT::i64  , Custom);
377       setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Legal);
378     }
379   }
380 
381   if (Subtarget.hasLZCNT()) {
382     // When promoting the i8 variants, force them to i32 for a shorter
383     // encoding.
384     setOperationPromotedToType(ISD::CTLZ           , MVT::i8   , MVT::i32);
385     setOperationPromotedToType(ISD::CTLZ_ZERO_UNDEF, MVT::i8   , MVT::i32);
386   } else {
387     for (auto VT : {MVT::i8, MVT::i16, MVT::i32, MVT::i64}) {
388       if (VT == MVT::i64 && !Subtarget.is64Bit())
389         continue;
390       setOperationAction(ISD::CTLZ           , VT, Custom);
391       setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Custom);
392     }
393   }
394 
395   for (auto Op : {ISD::FP16_TO_FP, ISD::STRICT_FP16_TO_FP, ISD::FP_TO_FP16,
396                   ISD::STRICT_FP_TO_FP16}) {
397     // Special handling for half-precision floating point conversions.
398     // If we don't have F16C support, then lower half float conversions
399     // into library calls.
400     setOperationAction(
401         Op, MVT::f32,
402         (!Subtarget.useSoftFloat() && Subtarget.hasF16C()) ? Custom : Expand);
403     // There's never any support for operations beyond MVT::f32.
404     setOperationAction(Op, MVT::f64, Expand);
405     setOperationAction(Op, MVT::f80, Expand);
406     setOperationAction(Op, MVT::f128, Expand);
407   }
408 
409   for (MVT VT : {MVT::f32, MVT::f64, MVT::f80, MVT::f128}) {
410     setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand);
411     setLoadExtAction(ISD::EXTLOAD, VT, MVT::bf16, Expand);
412     setTruncStoreAction(VT, MVT::f16, Expand);
413     setTruncStoreAction(VT, MVT::bf16, Expand);
414 
415     setOperationAction(ISD::BF16_TO_FP, VT, Expand);
416     setOperationAction(ISD::FP_TO_BF16, VT, Custom);
417   }
418 
419   setOperationAction(ISD::PARITY, MVT::i8, Custom);
420   setOperationAction(ISD::PARITY, MVT::i16, Custom);
421   setOperationAction(ISD::PARITY, MVT::i32, Custom);
422   if (Subtarget.is64Bit())
423     setOperationAction(ISD::PARITY, MVT::i64, Custom);
424   if (Subtarget.hasPOPCNT()) {
425     setOperationPromotedToType(ISD::CTPOP, MVT::i8, MVT::i32);
426     // popcntw is longer to encode than popcntl and also has a false dependency
427     // on the dest that popcntl hasn't had since Cannon Lake.
428     setOperationPromotedToType(ISD::CTPOP, MVT::i16, MVT::i32);
429   } else {
430     setOperationAction(ISD::CTPOP          , MVT::i8   , Expand);
431     setOperationAction(ISD::CTPOP          , MVT::i16  , Expand);
432     setOperationAction(ISD::CTPOP          , MVT::i32  , Expand);
433     if (Subtarget.is64Bit())
434       setOperationAction(ISD::CTPOP        , MVT::i64  , Expand);
435     else
436       setOperationAction(ISD::CTPOP        , MVT::i64  , Custom);
437   }
438 
439   setOperationAction(ISD::READCYCLECOUNTER , MVT::i64  , Custom);
440 
441   if (!Subtarget.hasMOVBE())
442     setOperationAction(ISD::BSWAP          , MVT::i16  , Expand);
443 
444   // X86 wants to expand cmov itself.
445   for (auto VT : { MVT::f32, MVT::f64, MVT::f80, MVT::f128 }) {
446     setOperationAction(ISD::SELECT, VT, Custom);
447     setOperationAction(ISD::SETCC, VT, Custom);
448     setOperationAction(ISD::STRICT_FSETCC, VT, Custom);
449     setOperationAction(ISD::STRICT_FSETCCS, VT, Custom);
450   }
451   for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
452     if (VT == MVT::i64 && !Subtarget.is64Bit())
453       continue;
454     setOperationAction(ISD::SELECT, VT, Custom);
455     setOperationAction(ISD::SETCC,  VT, Custom);
456   }
457 
458   // Custom action for SELECT MMX and expand action for SELECT_CC MMX
459   setOperationAction(ISD::SELECT, MVT::x86mmx, Custom);
460   setOperationAction(ISD::SELECT_CC, MVT::x86mmx, Expand);
461 
462   setOperationAction(ISD::EH_RETURN       , MVT::Other, Custom);
463   // NOTE: EH_SJLJ_SETJMP/_LONGJMP are not recommended, since
464   // LLVM/Clang supports zero-cost DWARF and SEH exception handling.
465   setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
466   setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
467   setOperationAction(ISD::EH_SJLJ_SETUP_DISPATCH, MVT::Other, Custom);
468   if (TM.Options.ExceptionModel == ExceptionHandling::SjLj)
469     setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume");
470 
471   // Darwin ABI issue.
472   for (auto VT : { MVT::i32, MVT::i64 }) {
473     if (VT == MVT::i64 && !Subtarget.is64Bit())
474       continue;
475     setOperationAction(ISD::ConstantPool    , VT, Custom);
476     setOperationAction(ISD::JumpTable       , VT, Custom);
477     setOperationAction(ISD::GlobalAddress   , VT, Custom);
478     setOperationAction(ISD::GlobalTLSAddress, VT, Custom);
479     setOperationAction(ISD::ExternalSymbol  , VT, Custom);
480     setOperationAction(ISD::BlockAddress    , VT, Custom);
481   }
482 
483   // 64-bit shl, sra, srl (iff 32-bit x86)
484   for (auto VT : { MVT::i32, MVT::i64 }) {
485     if (VT == MVT::i64 && !Subtarget.is64Bit())
486       continue;
487     setOperationAction(ISD::SHL_PARTS, VT, Custom);
488     setOperationAction(ISD::SRA_PARTS, VT, Custom);
489     setOperationAction(ISD::SRL_PARTS, VT, Custom);
490   }
491 
492   if (Subtarget.hasSSEPrefetch() || Subtarget.hasThreeDNow())
493     setOperationAction(ISD::PREFETCH      , MVT::Other, Custom);
494 
495   setOperationAction(ISD::ATOMIC_FENCE  , MVT::Other, Custom);
496 
497   // Expand certain atomics
498   for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
499     setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Custom);
500     setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom);
501     setOperationAction(ISD::ATOMIC_LOAD_ADD, VT, Custom);
502     setOperationAction(ISD::ATOMIC_LOAD_OR, VT, Custom);
503     setOperationAction(ISD::ATOMIC_LOAD_XOR, VT, Custom);
504     setOperationAction(ISD::ATOMIC_LOAD_AND, VT, Custom);
505     setOperationAction(ISD::ATOMIC_STORE, VT, Custom);
506   }
507 
508   if (!Subtarget.is64Bit())
509     setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Custom);
510 
511   if (Subtarget.canUseCMPXCHG16B())
512     setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i128, Custom);
513 
514   // FIXME - use subtarget debug flags
515   if (!Subtarget.isTargetDarwin() && !Subtarget.isTargetELF() &&
516       !Subtarget.isTargetCygMing() && !Subtarget.isTargetWin64() &&
517       TM.Options.ExceptionModel != ExceptionHandling::SjLj) {
518     setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
519   }
520 
521   setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom);
522   setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i64, Custom);
523 
524   setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
525   setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
526 
527   setOperationAction(ISD::TRAP, MVT::Other, Legal);
528   setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
529   if (Subtarget.isTargetPS())
530     setOperationAction(ISD::UBSANTRAP, MVT::Other, Expand);
531   else
532     setOperationAction(ISD::UBSANTRAP, MVT::Other, Legal);
533 
534   // VASTART needs to be custom lowered to use the VarArgsFrameIndex
535   setOperationAction(ISD::VASTART           , MVT::Other, Custom);
536   setOperationAction(ISD::VAEND             , MVT::Other, Expand);
537   bool Is64Bit = Subtarget.is64Bit();
538   setOperationAction(ISD::VAARG,  MVT::Other, Is64Bit ? Custom : Expand);
539   setOperationAction(ISD::VACOPY, MVT::Other, Is64Bit ? Custom : Expand);
540 
541   setOperationAction(ISD::STACKSAVE,          MVT::Other, Expand);
542   setOperationAction(ISD::STACKRESTORE,       MVT::Other, Expand);
543 
544   setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom);
545 
546   // GC_TRANSITION_START and GC_TRANSITION_END need custom lowering.
547   setOperationAction(ISD::GC_TRANSITION_START, MVT::Other, Custom);
548   setOperationAction(ISD::GC_TRANSITION_END, MVT::Other, Custom);
549 
550   setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Legal);
551 
552   auto setF16Action = [&] (MVT VT, LegalizeAction Action) {
553     setOperationAction(ISD::FABS, VT, Action);
554     setOperationAction(ISD::FNEG, VT, Action);
555     setOperationAction(ISD::FCOPYSIGN, VT, Expand);
556     setOperationAction(ISD::FREM, VT, Action);
557     setOperationAction(ISD::FMA, VT, Action);
558     setOperationAction(ISD::FMINNUM, VT, Action);
559     setOperationAction(ISD::FMAXNUM, VT, Action);
560     setOperationAction(ISD::FMINIMUM, VT, Action);
561     setOperationAction(ISD::FMAXIMUM, VT, Action);
562     setOperationAction(ISD::FSIN, VT, Action);
563     setOperationAction(ISD::FCOS, VT, Action);
564     setOperationAction(ISD::FSINCOS, VT, Action);
565     setOperationAction(ISD::FSQRT, VT, Action);
566     setOperationAction(ISD::FPOW, VT, Action);
567     setOperationAction(ISD::FLOG, VT, Action);
568     setOperationAction(ISD::FLOG2, VT, Action);
569     setOperationAction(ISD::FLOG10, VT, Action);
570     setOperationAction(ISD::FEXP, VT, Action);
571     setOperationAction(ISD::FEXP2, VT, Action);
572     setOperationAction(ISD::FEXP10, VT, Action);
573     setOperationAction(ISD::FCEIL, VT, Action);
574     setOperationAction(ISD::FFLOOR, VT, Action);
575     setOperationAction(ISD::FNEARBYINT, VT, Action);
576     setOperationAction(ISD::FRINT, VT, Action);
577     setOperationAction(ISD::BR_CC, VT, Action);
578     setOperationAction(ISD::SETCC, VT, Action);
579     setOperationAction(ISD::SELECT, VT, Custom);
580     setOperationAction(ISD::SELECT_CC, VT, Action);
581     setOperationAction(ISD::FROUND, VT, Action);
582     setOperationAction(ISD::FROUNDEVEN, VT, Action);
583     setOperationAction(ISD::FTRUNC, VT, Action);
584     setOperationAction(ISD::FLDEXP, VT, Action);
585   };
586 
587   if (!Subtarget.useSoftFloat() && Subtarget.hasSSE2()) {
588     // f16, f32 and f64 use SSE.
589     // Set up the FP register classes.
590     addRegisterClass(MVT::f16, Subtarget.hasAVX512() ? &X86::FR16XRegClass
591                                                      : &X86::FR16RegClass);
592     addRegisterClass(MVT::f32, Subtarget.hasAVX512() ? &X86::FR32XRegClass
593                                                      : &X86::FR32RegClass);
594     addRegisterClass(MVT::f64, Subtarget.hasAVX512() ? &X86::FR64XRegClass
595                                                      : &X86::FR64RegClass);
596 
597     // Disable f32->f64 extload as we can only generate this in one instruction
598     // under optsize. So its easier to pattern match (fpext (load)) for that
599     // case instead of needing to emit 2 instructions for extload in the
600     // non-optsize case.
601     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
602 
603     for (auto VT : { MVT::f32, MVT::f64 }) {
604       // Use ANDPD to simulate FABS.
605       setOperationAction(ISD::FABS, VT, Custom);
606 
607       // Use XORP to simulate FNEG.
608       setOperationAction(ISD::FNEG, VT, Custom);
609 
610       // Use ANDPD and ORPD to simulate FCOPYSIGN.
611       setOperationAction(ISD::FCOPYSIGN, VT, Custom);
612 
613       // These might be better off as horizontal vector ops.
614       setOperationAction(ISD::FADD, VT, Custom);
615       setOperationAction(ISD::FSUB, VT, Custom);
616 
617       // We don't support sin/cos/fmod
618       setOperationAction(ISD::FSIN   , VT, Expand);
619       setOperationAction(ISD::FCOS   , VT, Expand);
620       setOperationAction(ISD::FSINCOS, VT, Expand);
621     }
622 
623     // Half type will be promoted by default.
624     setF16Action(MVT::f16, Promote);
625     setOperationAction(ISD::FADD, MVT::f16, Promote);
626     setOperationAction(ISD::FSUB, MVT::f16, Promote);
627     setOperationAction(ISD::FMUL, MVT::f16, Promote);
628     setOperationAction(ISD::FDIV, MVT::f16, Promote);
629     setOperationAction(ISD::FP_ROUND, MVT::f16, Custom);
630     setOperationAction(ISD::FP_EXTEND, MVT::f32, Custom);
631     setOperationAction(ISD::FP_EXTEND, MVT::f64, Custom);
632 
633     setOperationAction(ISD::STRICT_FADD, MVT::f16, Promote);
634     setOperationAction(ISD::STRICT_FSUB, MVT::f16, Promote);
635     setOperationAction(ISD::STRICT_FMUL, MVT::f16, Promote);
636     setOperationAction(ISD::STRICT_FDIV, MVT::f16, Promote);
637     setOperationAction(ISD::STRICT_FMA, MVT::f16, Promote);
638     setOperationAction(ISD::STRICT_FMINNUM, MVT::f16, Promote);
639     setOperationAction(ISD::STRICT_FMAXNUM, MVT::f16, Promote);
640     setOperationAction(ISD::STRICT_FMINIMUM, MVT::f16, Promote);
641     setOperationAction(ISD::STRICT_FMAXIMUM, MVT::f16, Promote);
642     setOperationAction(ISD::STRICT_FSQRT, MVT::f16, Promote);
643     setOperationAction(ISD::STRICT_FPOW, MVT::f16, Promote);
644     setOperationAction(ISD::STRICT_FLDEXP, MVT::f16, Promote);
645     setOperationAction(ISD::STRICT_FLOG, MVT::f16, Promote);
646     setOperationAction(ISD::STRICT_FLOG2, MVT::f16, Promote);
647     setOperationAction(ISD::STRICT_FLOG10, MVT::f16, Promote);
648     setOperationAction(ISD::STRICT_FEXP, MVT::f16, Promote);
649     setOperationAction(ISD::STRICT_FEXP2, MVT::f16, Promote);
650     setOperationAction(ISD::STRICT_FCEIL, MVT::f16, Promote);
651     setOperationAction(ISD::STRICT_FFLOOR, MVT::f16, Promote);
652     setOperationAction(ISD::STRICT_FNEARBYINT, MVT::f16, Promote);
653     setOperationAction(ISD::STRICT_FRINT, MVT::f16, Promote);
654     setOperationAction(ISD::STRICT_FSETCC, MVT::f16, Promote);
655     setOperationAction(ISD::STRICT_FSETCCS, MVT::f16, Promote);
656     setOperationAction(ISD::STRICT_FROUND, MVT::f16, Promote);
657     setOperationAction(ISD::STRICT_FROUNDEVEN, MVT::f16, Promote);
658     setOperationAction(ISD::STRICT_FTRUNC, MVT::f16, Promote);
659     setOperationAction(ISD::STRICT_FP_ROUND, MVT::f16, Custom);
660     setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, Custom);
661     setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Custom);
662 
663     setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
664     setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
665 
666     // Lower this to MOVMSK plus an AND.
667     setOperationAction(ISD::FGETSIGN, MVT::i64, Custom);
668     setOperationAction(ISD::FGETSIGN, MVT::i32, Custom);
669 
670   } else if (!Subtarget.useSoftFloat() && Subtarget.hasSSE1() &&
671              (UseX87 || Is64Bit)) {
672     // Use SSE for f32, x87 for f64.
673     // Set up the FP register classes.
674     addRegisterClass(MVT::f32, &X86::FR32RegClass);
675     if (UseX87)
676       addRegisterClass(MVT::f64, &X86::RFP64RegClass);
677 
678     // Use ANDPS to simulate FABS.
679     setOperationAction(ISD::FABS , MVT::f32, Custom);
680 
681     // Use XORP to simulate FNEG.
682     setOperationAction(ISD::FNEG , MVT::f32, Custom);
683 
684     if (UseX87)
685       setOperationAction(ISD::UNDEF, MVT::f64, Expand);
686 
687     // Use ANDPS and ORPS to simulate FCOPYSIGN.
688     if (UseX87)
689       setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
690     setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
691 
692     // We don't support sin/cos/fmod
693     setOperationAction(ISD::FSIN   , MVT::f32, Expand);
694     setOperationAction(ISD::FCOS   , MVT::f32, Expand);
695     setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
696 
697     if (UseX87) {
698       // Always expand sin/cos functions even though x87 has an instruction.
699       setOperationAction(ISD::FSIN, MVT::f64, Expand);
700       setOperationAction(ISD::FCOS, MVT::f64, Expand);
701       setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
702     }
703   } else if (UseX87) {
704     // f32 and f64 in x87.
705     // Set up the FP register classes.
706     addRegisterClass(MVT::f64, &X86::RFP64RegClass);
707     addRegisterClass(MVT::f32, &X86::RFP32RegClass);
708 
709     for (auto VT : { MVT::f32, MVT::f64 }) {
710       setOperationAction(ISD::UNDEF,     VT, Expand);
711       setOperationAction(ISD::FCOPYSIGN, VT, Expand);
712 
713       // Always expand sin/cos functions even though x87 has an instruction.
714       setOperationAction(ISD::FSIN   , VT, Expand);
715       setOperationAction(ISD::FCOS   , VT, Expand);
716       setOperationAction(ISD::FSINCOS, VT, Expand);
717     }
718   }
719 
720   // Expand FP32 immediates into loads from the stack, save special cases.
721   if (isTypeLegal(MVT::f32)) {
722     if (UseX87 && (getRegClassFor(MVT::f32) == &X86::RFP32RegClass)) {
723       addLegalFPImmediate(APFloat(+0.0f)); // FLD0
724       addLegalFPImmediate(APFloat(+1.0f)); // FLD1
725       addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS
726       addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS
727     } else // SSE immediates.
728       addLegalFPImmediate(APFloat(+0.0f)); // xorps
729   }
730   // Expand FP64 immediates into loads from the stack, save special cases.
731   if (isTypeLegal(MVT::f64)) {
732     if (UseX87 && getRegClassFor(MVT::f64) == &X86::RFP64RegClass) {
733       addLegalFPImmediate(APFloat(+0.0)); // FLD0
734       addLegalFPImmediate(APFloat(+1.0)); // FLD1
735       addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
736       addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
737     } else // SSE immediates.
738       addLegalFPImmediate(APFloat(+0.0)); // xorpd
739   }
740   // Support fp16 0 immediate.
741   if (isTypeLegal(MVT::f16))
742     addLegalFPImmediate(APFloat::getZero(APFloat::IEEEhalf()));
743 
744   // Handle constrained floating-point operations of scalar.
745   setOperationAction(ISD::STRICT_FADD,      MVT::f32, Legal);
746   setOperationAction(ISD::STRICT_FADD,      MVT::f64, Legal);
747   setOperationAction(ISD::STRICT_FSUB,      MVT::f32, Legal);
748   setOperationAction(ISD::STRICT_FSUB,      MVT::f64, Legal);
749   setOperationAction(ISD::STRICT_FMUL,      MVT::f32, Legal);
750   setOperationAction(ISD::STRICT_FMUL,      MVT::f64, Legal);
751   setOperationAction(ISD::STRICT_FDIV,      MVT::f32, Legal);
752   setOperationAction(ISD::STRICT_FDIV,      MVT::f64, Legal);
753   setOperationAction(ISD::STRICT_FP_ROUND,  MVT::f32, Legal);
754   setOperationAction(ISD::STRICT_FP_ROUND,  MVT::f64, Legal);
755   setOperationAction(ISD::STRICT_FSQRT,     MVT::f32, Legal);
756   setOperationAction(ISD::STRICT_FSQRT,     MVT::f64, Legal);
757 
758   // We don't support FMA.
759   setOperationAction(ISD::FMA, MVT::f64, Expand);
760   setOperationAction(ISD::FMA, MVT::f32, Expand);
761 
762   // f80 always uses X87.
763   if (UseX87) {
764     addRegisterClass(MVT::f80, &X86::RFP80RegClass);
765     setOperationAction(ISD::UNDEF,     MVT::f80, Expand);
766     setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand);
767     {
768       APFloat TmpFlt = APFloat::getZero(APFloat::x87DoubleExtended());
769       addLegalFPImmediate(TmpFlt);  // FLD0
770       TmpFlt.changeSign();
771       addLegalFPImmediate(TmpFlt);  // FLD0/FCHS
772 
773       bool ignored;
774       APFloat TmpFlt2(+1.0);
775       TmpFlt2.convert(APFloat::x87DoubleExtended(), APFloat::rmNearestTiesToEven,
776                       &ignored);
777       addLegalFPImmediate(TmpFlt2);  // FLD1
778       TmpFlt2.changeSign();
779       addLegalFPImmediate(TmpFlt2);  // FLD1/FCHS
780     }
781 
782     // Always expand sin/cos functions even though x87 has an instruction.
783     setOperationAction(ISD::FSIN   , MVT::f80, Expand);
784     setOperationAction(ISD::FCOS   , MVT::f80, Expand);
785     setOperationAction(ISD::FSINCOS, MVT::f80, Expand);
786 
787     setOperationAction(ISD::FFLOOR, MVT::f80, Expand);
788     setOperationAction(ISD::FCEIL,  MVT::f80, Expand);
789     setOperationAction(ISD::FTRUNC, MVT::f80, Expand);
790     setOperationAction(ISD::FRINT,  MVT::f80, Expand);
791     setOperationAction(ISD::FNEARBYINT, MVT::f80, Expand);
792     setOperationAction(ISD::FROUNDEVEN, MVT::f80, Expand);
793     setOperationAction(ISD::FMA, MVT::f80, Expand);
794     setOperationAction(ISD::LROUND, MVT::f80, Expand);
795     setOperationAction(ISD::LLROUND, MVT::f80, Expand);
796     setOperationAction(ISD::LRINT, MVT::f80, Custom);
797     setOperationAction(ISD::LLRINT, MVT::f80, Custom);
798 
799     // Handle constrained floating-point operations of scalar.
800     setOperationAction(ISD::STRICT_FADD     , MVT::f80, Legal);
801     setOperationAction(ISD::STRICT_FSUB     , MVT::f80, Legal);
802     setOperationAction(ISD::STRICT_FMUL     , MVT::f80, Legal);
803     setOperationAction(ISD::STRICT_FDIV     , MVT::f80, Legal);
804     setOperationAction(ISD::STRICT_FSQRT    , MVT::f80, Legal);
805     if (isTypeLegal(MVT::f16)) {
806       setOperationAction(ISD::FP_EXTEND, MVT::f80, Custom);
807       setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f80, Custom);
808     } else {
809       setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f80, Legal);
810     }
811     // FIXME: When the target is 64-bit, STRICT_FP_ROUND will be overwritten
812     // as Custom.
813     setOperationAction(ISD::STRICT_FP_ROUND, MVT::f80, Legal);
814   }
815 
816   // f128 uses xmm registers, but most operations require libcalls.
817   if (!Subtarget.useSoftFloat() && Subtarget.is64Bit() && Subtarget.hasSSE1()) {
818     addRegisterClass(MVT::f128, Subtarget.hasVLX() ? &X86::VR128XRegClass
819                                                    : &X86::VR128RegClass);
820 
821     addLegalFPImmediate(APFloat::getZero(APFloat::IEEEquad())); // xorps
822 
823     setOperationAction(ISD::FADD,        MVT::f128, LibCall);
824     setOperationAction(ISD::STRICT_FADD, MVT::f128, LibCall);
825     setOperationAction(ISD::FSUB,        MVT::f128, LibCall);
826     setOperationAction(ISD::STRICT_FSUB, MVT::f128, LibCall);
827     setOperationAction(ISD::FDIV,        MVT::f128, LibCall);
828     setOperationAction(ISD::STRICT_FDIV, MVT::f128, LibCall);
829     setOperationAction(ISD::FMUL,        MVT::f128, LibCall);
830     setOperationAction(ISD::STRICT_FMUL, MVT::f128, LibCall);
831     setOperationAction(ISD::FMA,         MVT::f128, LibCall);
832     setOperationAction(ISD::STRICT_FMA,  MVT::f128, LibCall);
833 
834     setOperationAction(ISD::FABS, MVT::f128, Custom);
835     setOperationAction(ISD::FNEG, MVT::f128, Custom);
836     setOperationAction(ISD::FCOPYSIGN, MVT::f128, Custom);
837 
838     setOperationAction(ISD::FSIN,         MVT::f128, LibCall);
839     setOperationAction(ISD::STRICT_FSIN,  MVT::f128, LibCall);
840     setOperationAction(ISD::FCOS,         MVT::f128, LibCall);
841     setOperationAction(ISD::STRICT_FCOS,  MVT::f128, LibCall);
842     setOperationAction(ISD::FSINCOS,      MVT::f128, LibCall);
843     // No STRICT_FSINCOS
844     setOperationAction(ISD::FSQRT,        MVT::f128, LibCall);
845     setOperationAction(ISD::STRICT_FSQRT, MVT::f128, LibCall);
846 
847     setOperationAction(ISD::FP_EXTEND,        MVT::f128, Custom);
848     setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f128, Custom);
849     // We need to custom handle any FP_ROUND with an f128 input, but
850     // LegalizeDAG uses the result type to know when to run a custom handler.
851     // So we have to list all legal floating point result types here.
852     if (isTypeLegal(MVT::f32)) {
853       setOperationAction(ISD::FP_ROUND, MVT::f32, Custom);
854       setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Custom);
855     }
856     if (isTypeLegal(MVT::f64)) {
857       setOperationAction(ISD::FP_ROUND, MVT::f64, Custom);
858       setOperationAction(ISD::STRICT_FP_ROUND, MVT::f64, Custom);
859     }
860     if (isTypeLegal(MVT::f80)) {
861       setOperationAction(ISD::FP_ROUND, MVT::f80, Custom);
862       setOperationAction(ISD::STRICT_FP_ROUND, MVT::f80, Custom);
863     }
864 
865     setOperationAction(ISD::SETCC, MVT::f128, Custom);
866 
867     setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f32, Expand);
868     setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f64, Expand);
869     setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f80, Expand);
870     setTruncStoreAction(MVT::f128, MVT::f32, Expand);
871     setTruncStoreAction(MVT::f128, MVT::f64, Expand);
872     setTruncStoreAction(MVT::f128, MVT::f80, Expand);
873   }
874 
875   // Always use a library call for pow.
876   setOperationAction(ISD::FPOW             , MVT::f32  , Expand);
877   setOperationAction(ISD::FPOW             , MVT::f64  , Expand);
878   setOperationAction(ISD::FPOW             , MVT::f80  , Expand);
879   setOperationAction(ISD::FPOW             , MVT::f128 , Expand);
880 
881   setOperationAction(ISD::FLOG, MVT::f80, Expand);
882   setOperationAction(ISD::FLOG2, MVT::f80, Expand);
883   setOperationAction(ISD::FLOG10, MVT::f80, Expand);
884   setOperationAction(ISD::FEXP, MVT::f80, Expand);
885   setOperationAction(ISD::FEXP2, MVT::f80, Expand);
886   setOperationAction(ISD::FEXP10, MVT::f80, Expand);
887   setOperationAction(ISD::FMINNUM, MVT::f80, Expand);
888   setOperationAction(ISD::FMAXNUM, MVT::f80, Expand);
889 
890   // Some FP actions are always expanded for vector types.
891   for (auto VT : { MVT::v8f16, MVT::v16f16, MVT::v32f16,
892                    MVT::v4f32, MVT::v8f32,  MVT::v16f32,
893                    MVT::v2f64, MVT::v4f64,  MVT::v8f64 }) {
894     setOperationAction(ISD::FSIN,      VT, Expand);
895     setOperationAction(ISD::FSINCOS,   VT, Expand);
896     setOperationAction(ISD::FCOS,      VT, Expand);
897     setOperationAction(ISD::FREM,      VT, Expand);
898     setOperationAction(ISD::FCOPYSIGN, VT, Expand);
899     setOperationAction(ISD::FPOW,      VT, Expand);
900     setOperationAction(ISD::FLOG,      VT, Expand);
901     setOperationAction(ISD::FLOG2,     VT, Expand);
902     setOperationAction(ISD::FLOG10,    VT, Expand);
903     setOperationAction(ISD::FEXP,      VT, Expand);
904     setOperationAction(ISD::FEXP2,     VT, Expand);
905     setOperationAction(ISD::FEXP10,    VT, Expand);
906   }
907 
908   // First set operation action for all vector types to either promote
909   // (for widening) or expand (for scalarization). Then we will selectively
910   // turn on ones that can be effectively codegen'd.
911   for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
912     setOperationAction(ISD::SDIV, VT, Expand);
913     setOperationAction(ISD::UDIV, VT, Expand);
914     setOperationAction(ISD::SREM, VT, Expand);
915     setOperationAction(ISD::UREM, VT, Expand);
916     setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT,Expand);
917     setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
918     setOperationAction(ISD::EXTRACT_SUBVECTOR, VT,Expand);
919     setOperationAction(ISD::INSERT_SUBVECTOR, VT,Expand);
920     setOperationAction(ISD::FMA,  VT, Expand);
921     setOperationAction(ISD::FFLOOR, VT, Expand);
922     setOperationAction(ISD::FCEIL, VT, Expand);
923     setOperationAction(ISD::FTRUNC, VT, Expand);
924     setOperationAction(ISD::FRINT, VT, Expand);
925     setOperationAction(ISD::FNEARBYINT, VT, Expand);
926     setOperationAction(ISD::FROUNDEVEN, VT, Expand);
927     setOperationAction(ISD::SMUL_LOHI, VT, Expand);
928     setOperationAction(ISD::MULHS, VT, Expand);
929     setOperationAction(ISD::UMUL_LOHI, VT, Expand);
930     setOperationAction(ISD::MULHU, VT, Expand);
931     setOperationAction(ISD::SDIVREM, VT, Expand);
932     setOperationAction(ISD::UDIVREM, VT, Expand);
933     setOperationAction(ISD::CTPOP, VT, Expand);
934     setOperationAction(ISD::CTTZ, VT, Expand);
935     setOperationAction(ISD::CTLZ, VT, Expand);
936     setOperationAction(ISD::ROTL, VT, Expand);
937     setOperationAction(ISD::ROTR, VT, Expand);
938     setOperationAction(ISD::BSWAP, VT, Expand);
939     setOperationAction(ISD::SETCC, VT, Expand);
940     setOperationAction(ISD::FP_TO_UINT, VT, Expand);
941     setOperationAction(ISD::FP_TO_SINT, VT, Expand);
942     setOperationAction(ISD::UINT_TO_FP, VT, Expand);
943     setOperationAction(ISD::SINT_TO_FP, VT, Expand);
944     setOperationAction(ISD::SIGN_EXTEND_INREG, VT,Expand);
945     setOperationAction(ISD::TRUNCATE, VT, Expand);
946     setOperationAction(ISD::SIGN_EXTEND, VT, Expand);
947     setOperationAction(ISD::ZERO_EXTEND, VT, Expand);
948     setOperationAction(ISD::ANY_EXTEND, VT, Expand);
949     setOperationAction(ISD::SELECT_CC, VT, Expand);
950     for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) {
951       setTruncStoreAction(InnerVT, VT, Expand);
952 
953       setLoadExtAction(ISD::SEXTLOAD, InnerVT, VT, Expand);
954       setLoadExtAction(ISD::ZEXTLOAD, InnerVT, VT, Expand);
955 
956       // N.b. ISD::EXTLOAD legality is basically ignored except for i1-like
957       // types, we have to deal with them whether we ask for Expansion or not.
958       // Setting Expand causes its own optimisation problems though, so leave
959       // them legal.
960       if (VT.getVectorElementType() == MVT::i1)
961         setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
962 
963       // EXTLOAD for MVT::f16 vectors is not legal because f16 vectors are
964       // split/scalarized right now.
965       if (VT.getVectorElementType() == MVT::f16 ||
966           VT.getVectorElementType() == MVT::bf16)
967         setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
968     }
969   }
970 
971   // FIXME: In order to prevent SSE instructions being expanded to MMX ones
972   // with -msoft-float, disable use of MMX as well.
973   if (!Subtarget.useSoftFloat() && Subtarget.hasMMX()) {
974     addRegisterClass(MVT::x86mmx, &X86::VR64RegClass);
975     // No operations on x86mmx supported, everything uses intrinsics.
976   }
977 
978   if (!Subtarget.useSoftFloat() && Subtarget.hasSSE1()) {
979     addRegisterClass(MVT::v4f32, Subtarget.hasVLX() ? &X86::VR128XRegClass
980                                                     : &X86::VR128RegClass);
981 
982     setOperationAction(ISD::FMAXIMUM,           MVT::f32, Custom);
983     setOperationAction(ISD::FMINIMUM,           MVT::f32, Custom);
984 
985     setOperationAction(ISD::FNEG,               MVT::v4f32, Custom);
986     setOperationAction(ISD::FABS,               MVT::v4f32, Custom);
987     setOperationAction(ISD::FCOPYSIGN,          MVT::v4f32, Custom);
988     setOperationAction(ISD::BUILD_VECTOR,       MVT::v4f32, Custom);
989     setOperationAction(ISD::VECTOR_SHUFFLE,     MVT::v4f32, Custom);
990     setOperationAction(ISD::VSELECT,            MVT::v4f32, Custom);
991     setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
992     setOperationAction(ISD::SELECT,             MVT::v4f32, Custom);
993 
994     setOperationAction(ISD::LOAD,               MVT::v2f32, Custom);
995     setOperationAction(ISD::STORE,              MVT::v2f32, Custom);
996 
997     setOperationAction(ISD::STRICT_FADD,        MVT::v4f32, Legal);
998     setOperationAction(ISD::STRICT_FSUB,        MVT::v4f32, Legal);
999     setOperationAction(ISD::STRICT_FMUL,        MVT::v4f32, Legal);
1000     setOperationAction(ISD::STRICT_FDIV,        MVT::v4f32, Legal);
1001     setOperationAction(ISD::STRICT_FSQRT,       MVT::v4f32, Legal);
1002   }
1003 
1004   if (!Subtarget.useSoftFloat() && Subtarget.hasSSE2()) {
1005     addRegisterClass(MVT::v2f64, Subtarget.hasVLX() ? &X86::VR128XRegClass
1006                                                     : &X86::VR128RegClass);
1007 
1008     // FIXME: Unfortunately, -soft-float and -no-implicit-float mean XMM
1009     // registers cannot be used even for integer operations.
1010     addRegisterClass(MVT::v16i8, Subtarget.hasVLX() ? &X86::VR128XRegClass
1011                                                     : &X86::VR128RegClass);
1012     addRegisterClass(MVT::v8i16, Subtarget.hasVLX() ? &X86::VR128XRegClass
1013                                                     : &X86::VR128RegClass);
1014     addRegisterClass(MVT::v8f16, Subtarget.hasVLX() ? &X86::VR128XRegClass
1015                                                     : &X86::VR128RegClass);
1016     addRegisterClass(MVT::v4i32, Subtarget.hasVLX() ? &X86::VR128XRegClass
1017                                                     : &X86::VR128RegClass);
1018     addRegisterClass(MVT::v2i64, Subtarget.hasVLX() ? &X86::VR128XRegClass
1019                                                     : &X86::VR128RegClass);
1020 
1021     for (auto VT : { MVT::f64, MVT::v4f32, MVT::v2f64 }) {
1022       setOperationAction(ISD::FMAXIMUM, VT, Custom);
1023       setOperationAction(ISD::FMINIMUM, VT, Custom);
1024     }
1025 
1026     for (auto VT : { MVT::v2i8, MVT::v4i8, MVT::v8i8,
1027                      MVT::v2i16, MVT::v4i16, MVT::v2i32 }) {
1028       setOperationAction(ISD::SDIV, VT, Custom);
1029       setOperationAction(ISD::SREM, VT, Custom);
1030       setOperationAction(ISD::UDIV, VT, Custom);
1031       setOperationAction(ISD::UREM, VT, Custom);
1032     }
1033 
1034     setOperationAction(ISD::MUL,                MVT::v2i8,  Custom);
1035     setOperationAction(ISD::MUL,                MVT::v4i8,  Custom);
1036     setOperationAction(ISD::MUL,                MVT::v8i8,  Custom);
1037 
1038     setOperationAction(ISD::MUL,                MVT::v16i8, Custom);
1039     setOperationAction(ISD::MUL,                MVT::v4i32, Custom);
1040     setOperationAction(ISD::MUL,                MVT::v2i64, Custom);
1041     setOperationAction(ISD::MULHU,              MVT::v4i32, Custom);
1042     setOperationAction(ISD::MULHS,              MVT::v4i32, Custom);
1043     setOperationAction(ISD::MULHU,              MVT::v16i8, Custom);
1044     setOperationAction(ISD::MULHS,              MVT::v16i8, Custom);
1045     setOperationAction(ISD::MULHU,              MVT::v8i16, Legal);
1046     setOperationAction(ISD::MULHS,              MVT::v8i16, Legal);
1047     setOperationAction(ISD::MUL,                MVT::v8i16, Legal);
1048     setOperationAction(ISD::AVGCEILU,           MVT::v16i8, Legal);
1049     setOperationAction(ISD::AVGCEILU,           MVT::v8i16, Legal);
1050 
1051     setOperationAction(ISD::SMULO,              MVT::v16i8, Custom);
1052     setOperationAction(ISD::UMULO,              MVT::v16i8, Custom);
1053     setOperationAction(ISD::UMULO,              MVT::v2i32, Custom);
1054 
1055     setOperationAction(ISD::FNEG,               MVT::v2f64, Custom);
1056     setOperationAction(ISD::FABS,               MVT::v2f64, Custom);
1057     setOperationAction(ISD::FCOPYSIGN,          MVT::v2f64, Custom);
1058 
1059     for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1060       setOperationAction(ISD::SMAX, VT, VT == MVT::v8i16 ? Legal : Custom);
1061       setOperationAction(ISD::SMIN, VT, VT == MVT::v8i16 ? Legal : Custom);
1062       setOperationAction(ISD::UMAX, VT, VT == MVT::v16i8 ? Legal : Custom);
1063       setOperationAction(ISD::UMIN, VT, VT == MVT::v16i8 ? Legal : Custom);
1064     }
1065 
1066     setOperationAction(ISD::ABDU,               MVT::v16i8, Custom);
1067     setOperationAction(ISD::ABDS,               MVT::v16i8, Custom);
1068     setOperationAction(ISD::ABDU,               MVT::v8i16, Custom);
1069     setOperationAction(ISD::ABDS,               MVT::v8i16, Custom);
1070     setOperationAction(ISD::ABDU,               MVT::v4i32, Custom);
1071     setOperationAction(ISD::ABDS,               MVT::v4i32, Custom);
1072 
1073     setOperationAction(ISD::UADDSAT,            MVT::v16i8, Legal);
1074     setOperationAction(ISD::SADDSAT,            MVT::v16i8, Legal);
1075     setOperationAction(ISD::USUBSAT,            MVT::v16i8, Legal);
1076     setOperationAction(ISD::SSUBSAT,            MVT::v16i8, Legal);
1077     setOperationAction(ISD::UADDSAT,            MVT::v8i16, Legal);
1078     setOperationAction(ISD::SADDSAT,            MVT::v8i16, Legal);
1079     setOperationAction(ISD::USUBSAT,            MVT::v8i16, Legal);
1080     setOperationAction(ISD::SSUBSAT,            MVT::v8i16, Legal);
1081     setOperationAction(ISD::USUBSAT,            MVT::v4i32, Custom);
1082     setOperationAction(ISD::USUBSAT,            MVT::v2i64, Custom);
1083 
1084     setOperationAction(ISD::INSERT_VECTOR_ELT,  MVT::v16i8, Custom);
1085     setOperationAction(ISD::INSERT_VECTOR_ELT,  MVT::v8i16, Custom);
1086     setOperationAction(ISD::INSERT_VECTOR_ELT,  MVT::v4i32, Custom);
1087     setOperationAction(ISD::INSERT_VECTOR_ELT,  MVT::v4f32, Custom);
1088 
1089     for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1090       setOperationAction(ISD::SETCC,              VT, Custom);
1091       setOperationAction(ISD::CTPOP,              VT, Custom);
1092       setOperationAction(ISD::ABS,                VT, Custom);
1093 
1094       // The condition codes aren't legal in SSE/AVX and under AVX512 we use
1095       // setcc all the way to isel and prefer SETGT in some isel patterns.
1096       setCondCodeAction(ISD::SETLT, VT, Custom);
1097       setCondCodeAction(ISD::SETLE, VT, Custom);
1098     }
1099 
1100     setOperationAction(ISD::SETCC,          MVT::v2f64, Custom);
1101     setOperationAction(ISD::SETCC,          MVT::v4f32, Custom);
1102     setOperationAction(ISD::STRICT_FSETCC,  MVT::v2f64, Custom);
1103     setOperationAction(ISD::STRICT_FSETCC,  MVT::v4f32, Custom);
1104     setOperationAction(ISD::STRICT_FSETCCS, MVT::v2f64, Custom);
1105     setOperationAction(ISD::STRICT_FSETCCS, MVT::v4f32, Custom);
1106 
1107     for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) {
1108       setOperationAction(ISD::SCALAR_TO_VECTOR,   VT, Custom);
1109       setOperationAction(ISD::BUILD_VECTOR,       VT, Custom);
1110       setOperationAction(ISD::VECTOR_SHUFFLE,     VT, Custom);
1111       setOperationAction(ISD::VSELECT,            VT, Custom);
1112       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1113     }
1114 
1115     for (auto VT : { MVT::v8f16, MVT::v2f64, MVT::v2i64 }) {
1116       setOperationAction(ISD::BUILD_VECTOR,       VT, Custom);
1117       setOperationAction(ISD::VECTOR_SHUFFLE,     VT, Custom);
1118       setOperationAction(ISD::VSELECT,            VT, Custom);
1119 
1120       if (VT == MVT::v2i64 && !Subtarget.is64Bit())
1121         continue;
1122 
1123       setOperationAction(ISD::INSERT_VECTOR_ELT,  VT, Custom);
1124       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1125     }
1126     setF16Action(MVT::v8f16, Expand);
1127     setOperationAction(ISD::FADD, MVT::v8f16, Expand);
1128     setOperationAction(ISD::FSUB, MVT::v8f16, Expand);
1129     setOperationAction(ISD::FMUL, MVT::v8f16, Expand);
1130     setOperationAction(ISD::FDIV, MVT::v8f16, Expand);
1131     setOperationAction(ISD::FNEG, MVT::v8f16, Custom);
1132     setOperationAction(ISD::FABS, MVT::v8f16, Custom);
1133     setOperationAction(ISD::FCOPYSIGN, MVT::v8f16, Custom);
1134 
1135     // Custom lower v2i64 and v2f64 selects.
1136     setOperationAction(ISD::SELECT,             MVT::v2f64, Custom);
1137     setOperationAction(ISD::SELECT,             MVT::v2i64, Custom);
1138     setOperationAction(ISD::SELECT,             MVT::v4i32, Custom);
1139     setOperationAction(ISD::SELECT,             MVT::v8i16, Custom);
1140     setOperationAction(ISD::SELECT,             MVT::v8f16, Custom);
1141     setOperationAction(ISD::SELECT,             MVT::v16i8, Custom);
1142 
1143     setOperationAction(ISD::FP_TO_SINT,         MVT::v4i32, Custom);
1144     setOperationAction(ISD::FP_TO_UINT,         MVT::v4i32, Custom);
1145     setOperationAction(ISD::FP_TO_SINT,         MVT::v2i32, Custom);
1146     setOperationAction(ISD::FP_TO_UINT,         MVT::v2i32, Custom);
1147     setOperationAction(ISD::STRICT_FP_TO_SINT,  MVT::v4i32, Custom);
1148     setOperationAction(ISD::STRICT_FP_TO_SINT,  MVT::v2i32, Custom);
1149 
1150     // Custom legalize these to avoid over promotion or custom promotion.
1151     for (auto VT : {MVT::v2i8, MVT::v4i8, MVT::v8i8, MVT::v2i16, MVT::v4i16}) {
1152       setOperationAction(ISD::FP_TO_SINT,        VT, Custom);
1153       setOperationAction(ISD::FP_TO_UINT,        VT, Custom);
1154       setOperationAction(ISD::STRICT_FP_TO_SINT, VT, Custom);
1155       setOperationAction(ISD::STRICT_FP_TO_UINT, VT, Custom);
1156     }
1157 
1158     setOperationAction(ISD::SINT_TO_FP,         MVT::v4i32, Custom);
1159     setOperationAction(ISD::STRICT_SINT_TO_FP,  MVT::v4i32, Custom);
1160     setOperationAction(ISD::SINT_TO_FP,         MVT::v2i32, Custom);
1161     setOperationAction(ISD::STRICT_SINT_TO_FP,  MVT::v2i32, Custom);
1162 
1163     setOperationAction(ISD::UINT_TO_FP,         MVT::v2i32, Custom);
1164     setOperationAction(ISD::STRICT_UINT_TO_FP,  MVT::v2i32, Custom);
1165 
1166     setOperationAction(ISD::UINT_TO_FP,         MVT::v4i32, Custom);
1167     setOperationAction(ISD::STRICT_UINT_TO_FP,  MVT::v4i32, Custom);
1168 
1169     // Fast v2f32 UINT_TO_FP( v2i32 ) custom conversion.
1170     setOperationAction(ISD::SINT_TO_FP,         MVT::v2f32, Custom);
1171     setOperationAction(ISD::STRICT_SINT_TO_FP,  MVT::v2f32, Custom);
1172     setOperationAction(ISD::UINT_TO_FP,         MVT::v2f32, Custom);
1173     setOperationAction(ISD::STRICT_UINT_TO_FP,  MVT::v2f32, Custom);
1174 
1175     setOperationAction(ISD::FP_EXTEND,          MVT::v2f32, Custom);
1176     setOperationAction(ISD::STRICT_FP_EXTEND,   MVT::v2f32, Custom);
1177     setOperationAction(ISD::FP_ROUND,           MVT::v2f32, Custom);
1178     setOperationAction(ISD::STRICT_FP_ROUND,    MVT::v2f32, Custom);
1179 
1180     // We want to legalize this to an f64 load rather than an i64 load on
1181     // 64-bit targets and two 32-bit loads on a 32-bit target. Similar for
1182     // store.
1183     setOperationAction(ISD::LOAD,               MVT::v2i32, Custom);
1184     setOperationAction(ISD::LOAD,               MVT::v4i16, Custom);
1185     setOperationAction(ISD::LOAD,               MVT::v8i8,  Custom);
1186     setOperationAction(ISD::STORE,              MVT::v2i32, Custom);
1187     setOperationAction(ISD::STORE,              MVT::v4i16, Custom);
1188     setOperationAction(ISD::STORE,              MVT::v8i8,  Custom);
1189 
1190     // Add 32-bit vector stores to help vectorization opportunities.
1191     setOperationAction(ISD::STORE,              MVT::v2i16, Custom);
1192     setOperationAction(ISD::STORE,              MVT::v4i8,  Custom);
1193 
1194     setOperationAction(ISD::BITCAST,            MVT::v2i32, Custom);
1195     setOperationAction(ISD::BITCAST,            MVT::v4i16, Custom);
1196     setOperationAction(ISD::BITCAST,            MVT::v8i8,  Custom);
1197     if (!Subtarget.hasAVX512())
1198       setOperationAction(ISD::BITCAST, MVT::v16i1, Custom);
1199 
1200     setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v2i64, Custom);
1201     setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v4i32, Custom);
1202     setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v8i16, Custom);
1203 
1204     setOperationAction(ISD::SIGN_EXTEND, MVT::v4i64, Custom);
1205 
1206     setOperationAction(ISD::TRUNCATE,    MVT::v2i8,  Custom);
1207     setOperationAction(ISD::TRUNCATE,    MVT::v2i16, Custom);
1208     setOperationAction(ISD::TRUNCATE,    MVT::v2i32, Custom);
1209     setOperationAction(ISD::TRUNCATE,    MVT::v2i64, Custom);
1210     setOperationAction(ISD::TRUNCATE,    MVT::v4i8,  Custom);
1211     setOperationAction(ISD::TRUNCATE,    MVT::v4i16, Custom);
1212     setOperationAction(ISD::TRUNCATE,    MVT::v4i32, Custom);
1213     setOperationAction(ISD::TRUNCATE,    MVT::v4i64, Custom);
1214     setOperationAction(ISD::TRUNCATE,    MVT::v8i8,  Custom);
1215     setOperationAction(ISD::TRUNCATE,    MVT::v8i16, Custom);
1216     setOperationAction(ISD::TRUNCATE,    MVT::v8i32, Custom);
1217     setOperationAction(ISD::TRUNCATE,    MVT::v8i64, Custom);
1218     setOperationAction(ISD::TRUNCATE,    MVT::v16i8, Custom);
1219     setOperationAction(ISD::TRUNCATE,    MVT::v16i16, Custom);
1220     setOperationAction(ISD::TRUNCATE,    MVT::v16i32, Custom);
1221     setOperationAction(ISD::TRUNCATE,    MVT::v16i64, Custom);
1222 
1223     // In the customized shift lowering, the legal v4i32/v2i64 cases
1224     // in AVX2 will be recognized.
1225     for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1226       setOperationAction(ISD::SRL,              VT, Custom);
1227       setOperationAction(ISD::SHL,              VT, Custom);
1228       setOperationAction(ISD::SRA,              VT, Custom);
1229       if (VT == MVT::v2i64) continue;
1230       setOperationAction(ISD::ROTL,             VT, Custom);
1231       setOperationAction(ISD::ROTR,             VT, Custom);
1232       setOperationAction(ISD::FSHL,             VT, Custom);
1233       setOperationAction(ISD::FSHR,             VT, Custom);
1234     }
1235 
1236     setOperationAction(ISD::STRICT_FSQRT,       MVT::v2f64, Legal);
1237     setOperationAction(ISD::STRICT_FADD,        MVT::v2f64, Legal);
1238     setOperationAction(ISD::STRICT_FSUB,        MVT::v2f64, Legal);
1239     setOperationAction(ISD::STRICT_FMUL,        MVT::v2f64, Legal);
1240     setOperationAction(ISD::STRICT_FDIV,        MVT::v2f64, Legal);
1241   }
1242 
1243   if (!Subtarget.useSoftFloat() && Subtarget.hasSSSE3()) {
1244     setOperationAction(ISD::ABS,                MVT::v16i8, Legal);
1245     setOperationAction(ISD::ABS,                MVT::v8i16, Legal);
1246     setOperationAction(ISD::ABS,                MVT::v4i32, Legal);
1247     setOperationAction(ISD::BITREVERSE,         MVT::v16i8, Custom);
1248     setOperationAction(ISD::CTLZ,               MVT::v16i8, Custom);
1249     setOperationAction(ISD::CTLZ,               MVT::v8i16, Custom);
1250     setOperationAction(ISD::CTLZ,               MVT::v4i32, Custom);
1251     setOperationAction(ISD::CTLZ,               MVT::v2i64, Custom);
1252 
1253     // These might be better off as horizontal vector ops.
1254     setOperationAction(ISD::ADD,                MVT::i16, Custom);
1255     setOperationAction(ISD::ADD,                MVT::i32, Custom);
1256     setOperationAction(ISD::SUB,                MVT::i16, Custom);
1257     setOperationAction(ISD::SUB,                MVT::i32, Custom);
1258   }
1259 
1260   if (!Subtarget.useSoftFloat() && Subtarget.hasSSE41()) {
1261     for (MVT RoundedTy : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64}) {
1262       setOperationAction(ISD::FFLOOR,            RoundedTy,  Legal);
1263       setOperationAction(ISD::STRICT_FFLOOR,     RoundedTy,  Legal);
1264       setOperationAction(ISD::FCEIL,             RoundedTy,  Legal);
1265       setOperationAction(ISD::STRICT_FCEIL,      RoundedTy,  Legal);
1266       setOperationAction(ISD::FTRUNC,            RoundedTy,  Legal);
1267       setOperationAction(ISD::STRICT_FTRUNC,     RoundedTy,  Legal);
1268       setOperationAction(ISD::FRINT,             RoundedTy,  Legal);
1269       setOperationAction(ISD::STRICT_FRINT,      RoundedTy,  Legal);
1270       setOperationAction(ISD::FNEARBYINT,        RoundedTy,  Legal);
1271       setOperationAction(ISD::STRICT_FNEARBYINT, RoundedTy,  Legal);
1272       setOperationAction(ISD::FROUNDEVEN,        RoundedTy,  Legal);
1273       setOperationAction(ISD::STRICT_FROUNDEVEN, RoundedTy,  Legal);
1274 
1275       setOperationAction(ISD::FROUND,            RoundedTy,  Custom);
1276     }
1277 
1278     setOperationAction(ISD::SMAX,               MVT::v16i8, Legal);
1279     setOperationAction(ISD::SMAX,               MVT::v4i32, Legal);
1280     setOperationAction(ISD::UMAX,               MVT::v8i16, Legal);
1281     setOperationAction(ISD::UMAX,               MVT::v4i32, Legal);
1282     setOperationAction(ISD::SMIN,               MVT::v16i8, Legal);
1283     setOperationAction(ISD::SMIN,               MVT::v4i32, Legal);
1284     setOperationAction(ISD::UMIN,               MVT::v8i16, Legal);
1285     setOperationAction(ISD::UMIN,               MVT::v4i32, Legal);
1286 
1287     for (auto VT : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64}) {
1288       setOperationAction(ISD::ABDS,             VT, Custom);
1289       setOperationAction(ISD::ABDU,             VT, Custom);
1290     }
1291 
1292     setOperationAction(ISD::UADDSAT,            MVT::v4i32, Custom);
1293     setOperationAction(ISD::SADDSAT,            MVT::v2i64, Custom);
1294     setOperationAction(ISD::SSUBSAT,            MVT::v2i64, Custom);
1295 
1296     // FIXME: Do we need to handle scalar-to-vector here?
1297     setOperationAction(ISD::MUL,                MVT::v4i32, Legal);
1298     setOperationAction(ISD::SMULO,              MVT::v2i32, Custom);
1299 
1300     // We directly match byte blends in the backend as they match the VSELECT
1301     // condition form.
1302     setOperationAction(ISD::VSELECT,            MVT::v16i8, Legal);
1303 
1304     // SSE41 brings specific instructions for doing vector sign extend even in
1305     // cases where we don't have SRA.
1306     for (auto VT : { MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1307       setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Legal);
1308       setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Legal);
1309     }
1310 
1311     // SSE41 also has vector sign/zero extending loads, PMOV[SZ]X
1312     for (auto LoadExtOp : { ISD::SEXTLOAD, ISD::ZEXTLOAD }) {
1313       setLoadExtAction(LoadExtOp, MVT::v8i16, MVT::v8i8,  Legal);
1314       setLoadExtAction(LoadExtOp, MVT::v4i32, MVT::v4i8,  Legal);
1315       setLoadExtAction(LoadExtOp, MVT::v2i64, MVT::v2i8,  Legal);
1316       setLoadExtAction(LoadExtOp, MVT::v4i32, MVT::v4i16, Legal);
1317       setLoadExtAction(LoadExtOp, MVT::v2i64, MVT::v2i16, Legal);
1318       setLoadExtAction(LoadExtOp, MVT::v2i64, MVT::v2i32, Legal);
1319     }
1320 
1321     if (Subtarget.is64Bit() && !Subtarget.hasAVX512()) {
1322       // We need to scalarize v4i64->v432 uint_to_fp using cvtsi2ss, but we can
1323       // do the pre and post work in the vector domain.
1324       setOperationAction(ISD::UINT_TO_FP,        MVT::v4i64, Custom);
1325       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i64, Custom);
1326       // We need to mark SINT_TO_FP as Custom even though we want to expand it
1327       // so that DAG combine doesn't try to turn it into uint_to_fp.
1328       setOperationAction(ISD::SINT_TO_FP,        MVT::v4i64, Custom);
1329       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i64, Custom);
1330     }
1331   }
1332 
1333   if (!Subtarget.useSoftFloat() && Subtarget.hasSSE42()) {
1334     setOperationAction(ISD::UADDSAT,            MVT::v2i64, Custom);
1335   }
1336 
1337   if (!Subtarget.useSoftFloat() && Subtarget.hasXOP()) {
1338     for (auto VT : { MVT::v16i8, MVT::v8i16,  MVT::v4i32, MVT::v2i64,
1339                      MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1340       setOperationAction(ISD::ROTL, VT, Custom);
1341       setOperationAction(ISD::ROTR, VT, Custom);
1342     }
1343 
1344     // XOP can efficiently perform BITREVERSE with VPPERM.
1345     for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 })
1346       setOperationAction(ISD::BITREVERSE, VT, Custom);
1347 
1348     for (auto VT : { MVT::v16i8, MVT::v8i16,  MVT::v4i32, MVT::v2i64,
1349                      MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 })
1350       setOperationAction(ISD::BITREVERSE, VT, Custom);
1351   }
1352 
1353   if (!Subtarget.useSoftFloat() && Subtarget.hasAVX()) {
1354     bool HasInt256 = Subtarget.hasInt256();
1355 
1356     addRegisterClass(MVT::v32i8,  Subtarget.hasVLX() ? &X86::VR256XRegClass
1357                                                      : &X86::VR256RegClass);
1358     addRegisterClass(MVT::v16i16, Subtarget.hasVLX() ? &X86::VR256XRegClass
1359                                                      : &X86::VR256RegClass);
1360     addRegisterClass(MVT::v16f16, Subtarget.hasVLX() ? &X86::VR256XRegClass
1361                                                      : &X86::VR256RegClass);
1362     addRegisterClass(MVT::v8i32,  Subtarget.hasVLX() ? &X86::VR256XRegClass
1363                                                      : &X86::VR256RegClass);
1364     addRegisterClass(MVT::v8f32,  Subtarget.hasVLX() ? &X86::VR256XRegClass
1365                                                      : &X86::VR256RegClass);
1366     addRegisterClass(MVT::v4i64,  Subtarget.hasVLX() ? &X86::VR256XRegClass
1367                                                      : &X86::VR256RegClass);
1368     addRegisterClass(MVT::v4f64,  Subtarget.hasVLX() ? &X86::VR256XRegClass
1369                                                      : &X86::VR256RegClass);
1370 
1371     for (auto VT : { MVT::v8f32, MVT::v4f64 }) {
1372       setOperationAction(ISD::FFLOOR,            VT, Legal);
1373       setOperationAction(ISD::STRICT_FFLOOR,     VT, Legal);
1374       setOperationAction(ISD::FCEIL,             VT, Legal);
1375       setOperationAction(ISD::STRICT_FCEIL,      VT, Legal);
1376       setOperationAction(ISD::FTRUNC,            VT, Legal);
1377       setOperationAction(ISD::STRICT_FTRUNC,     VT, Legal);
1378       setOperationAction(ISD::FRINT,             VT, Legal);
1379       setOperationAction(ISD::STRICT_FRINT,      VT, Legal);
1380       setOperationAction(ISD::FNEARBYINT,        VT, Legal);
1381       setOperationAction(ISD::STRICT_FNEARBYINT, VT, Legal);
1382       setOperationAction(ISD::FROUNDEVEN,        VT, Legal);
1383       setOperationAction(ISD::STRICT_FROUNDEVEN, VT, Legal);
1384 
1385       setOperationAction(ISD::FROUND,            VT, Custom);
1386 
1387       setOperationAction(ISD::FNEG,              VT, Custom);
1388       setOperationAction(ISD::FABS,              VT, Custom);
1389       setOperationAction(ISD::FCOPYSIGN,         VT, Custom);
1390 
1391       setOperationAction(ISD::FMAXIMUM,          VT, Custom);
1392       setOperationAction(ISD::FMINIMUM,          VT, Custom);
1393     }
1394 
1395     // (fp_to_int:v8i16 (v8f32 ..)) requires the result type to be promoted
1396     // even though v8i16 is a legal type.
1397     setOperationPromotedToType(ISD::FP_TO_SINT,        MVT::v8i16, MVT::v8i32);
1398     setOperationPromotedToType(ISD::FP_TO_UINT,        MVT::v8i16, MVT::v8i32);
1399     setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::v8i16, MVT::v8i32);
1400     setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::v8i16, MVT::v8i32);
1401     setOperationAction(ISD::FP_TO_SINT,                MVT::v8i32, Custom);
1402     setOperationAction(ISD::FP_TO_UINT,                MVT::v8i32, Custom);
1403     setOperationAction(ISD::STRICT_FP_TO_SINT,         MVT::v8i32, Custom);
1404 
1405     setOperationAction(ISD::SINT_TO_FP,         MVT::v8i32, Custom);
1406     setOperationAction(ISD::STRICT_SINT_TO_FP,  MVT::v8i32, Custom);
1407     setOperationAction(ISD::FP_EXTEND,          MVT::v8f32, Expand);
1408     setOperationAction(ISD::FP_ROUND,           MVT::v8f16, Expand);
1409     setOperationAction(ISD::FP_EXTEND,          MVT::v4f64, Custom);
1410     setOperationAction(ISD::STRICT_FP_EXTEND,   MVT::v4f64, Custom);
1411 
1412     setOperationAction(ISD::STRICT_FP_ROUND,    MVT::v4f32, Legal);
1413     setOperationAction(ISD::STRICT_FADD,        MVT::v8f32, Legal);
1414     setOperationAction(ISD::STRICT_FADD,        MVT::v4f64, Legal);
1415     setOperationAction(ISD::STRICT_FSUB,        MVT::v8f32, Legal);
1416     setOperationAction(ISD::STRICT_FSUB,        MVT::v4f64, Legal);
1417     setOperationAction(ISD::STRICT_FMUL,        MVT::v8f32, Legal);
1418     setOperationAction(ISD::STRICT_FMUL,        MVT::v4f64, Legal);
1419     setOperationAction(ISD::STRICT_FDIV,        MVT::v8f32, Legal);
1420     setOperationAction(ISD::STRICT_FDIV,        MVT::v4f64, Legal);
1421     setOperationAction(ISD::STRICT_FSQRT,       MVT::v8f32, Legal);
1422     setOperationAction(ISD::STRICT_FSQRT,       MVT::v4f64, Legal);
1423 
1424     if (!Subtarget.hasAVX512())
1425       setOperationAction(ISD::BITCAST, MVT::v32i1, Custom);
1426 
1427     // In the customized shift lowering, the legal v8i32/v4i64 cases
1428     // in AVX2 will be recognized.
1429     for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1430       setOperationAction(ISD::SRL,             VT, Custom);
1431       setOperationAction(ISD::SHL,             VT, Custom);
1432       setOperationAction(ISD::SRA,             VT, Custom);
1433       setOperationAction(ISD::ABDS,            VT, Custom);
1434       setOperationAction(ISD::ABDU,            VT, Custom);
1435       if (VT == MVT::v4i64) continue;
1436       setOperationAction(ISD::ROTL,            VT, Custom);
1437       setOperationAction(ISD::ROTR,            VT, Custom);
1438       setOperationAction(ISD::FSHL,            VT, Custom);
1439       setOperationAction(ISD::FSHR,            VT, Custom);
1440     }
1441 
1442     // These types need custom splitting if their input is a 128-bit vector.
1443     setOperationAction(ISD::SIGN_EXTEND,       MVT::v8i64,  Custom);
1444     setOperationAction(ISD::SIGN_EXTEND,       MVT::v16i32, Custom);
1445     setOperationAction(ISD::ZERO_EXTEND,       MVT::v8i64,  Custom);
1446     setOperationAction(ISD::ZERO_EXTEND,       MVT::v16i32, Custom);
1447 
1448     setOperationAction(ISD::SELECT,            MVT::v4f64, Custom);
1449     setOperationAction(ISD::SELECT,            MVT::v4i64, Custom);
1450     setOperationAction(ISD::SELECT,            MVT::v8i32, Custom);
1451     setOperationAction(ISD::SELECT,            MVT::v16i16, Custom);
1452     setOperationAction(ISD::SELECT,            MVT::v16f16, Custom);
1453     setOperationAction(ISD::SELECT,            MVT::v32i8, Custom);
1454     setOperationAction(ISD::SELECT,            MVT::v8f32, Custom);
1455 
1456     for (auto VT : { MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1457       setOperationAction(ISD::SIGN_EXTEND,     VT, Custom);
1458       setOperationAction(ISD::ZERO_EXTEND,     VT, Custom);
1459       setOperationAction(ISD::ANY_EXTEND,      VT, Custom);
1460     }
1461 
1462     setOperationAction(ISD::TRUNCATE,          MVT::v32i8, Custom);
1463     setOperationAction(ISD::TRUNCATE,          MVT::v32i16, Custom);
1464     setOperationAction(ISD::TRUNCATE,          MVT::v32i32, Custom);
1465     setOperationAction(ISD::TRUNCATE,          MVT::v32i64, Custom);
1466 
1467     setOperationAction(ISD::BITREVERSE,        MVT::v32i8, Custom);
1468 
1469     for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1470       setOperationAction(ISD::SETCC,           VT, Custom);
1471       setOperationAction(ISD::CTPOP,           VT, Custom);
1472       setOperationAction(ISD::CTLZ,            VT, Custom);
1473 
1474       // The condition codes aren't legal in SSE/AVX and under AVX512 we use
1475       // setcc all the way to isel and prefer SETGT in some isel patterns.
1476       setCondCodeAction(ISD::SETLT, VT, Custom);
1477       setCondCodeAction(ISD::SETLE, VT, Custom);
1478     }
1479 
1480     setOperationAction(ISD::SETCC,          MVT::v4f64, Custom);
1481     setOperationAction(ISD::SETCC,          MVT::v8f32, Custom);
1482     setOperationAction(ISD::STRICT_FSETCC,  MVT::v4f64, Custom);
1483     setOperationAction(ISD::STRICT_FSETCC,  MVT::v8f32, Custom);
1484     setOperationAction(ISD::STRICT_FSETCCS, MVT::v4f64, Custom);
1485     setOperationAction(ISD::STRICT_FSETCCS, MVT::v8f32, Custom);
1486 
1487     if (Subtarget.hasAnyFMA()) {
1488       for (auto VT : { MVT::f32, MVT::f64, MVT::v4f32, MVT::v8f32,
1489                        MVT::v2f64, MVT::v4f64 }) {
1490         setOperationAction(ISD::FMA, VT, Legal);
1491         setOperationAction(ISD::STRICT_FMA, VT, Legal);
1492       }
1493     }
1494 
1495     for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1496       setOperationAction(ISD::ADD, VT, HasInt256 ? Legal : Custom);
1497       setOperationAction(ISD::SUB, VT, HasInt256 ? Legal : Custom);
1498     }
1499 
1500     setOperationAction(ISD::MUL,       MVT::v4i64,  Custom);
1501     setOperationAction(ISD::MUL,       MVT::v8i32,  HasInt256 ? Legal : Custom);
1502     setOperationAction(ISD::MUL,       MVT::v16i16, HasInt256 ? Legal : Custom);
1503     setOperationAction(ISD::MUL,       MVT::v32i8,  Custom);
1504 
1505     setOperationAction(ISD::MULHU,     MVT::v8i32,  Custom);
1506     setOperationAction(ISD::MULHS,     MVT::v8i32,  Custom);
1507     setOperationAction(ISD::MULHU,     MVT::v16i16, HasInt256 ? Legal : Custom);
1508     setOperationAction(ISD::MULHS,     MVT::v16i16, HasInt256 ? Legal : Custom);
1509     setOperationAction(ISD::MULHU,     MVT::v32i8,  Custom);
1510     setOperationAction(ISD::MULHS,     MVT::v32i8,  Custom);
1511     setOperationAction(ISD::AVGCEILU,  MVT::v16i16, HasInt256 ? Legal : Custom);
1512     setOperationAction(ISD::AVGCEILU,  MVT::v32i8,  HasInt256 ? Legal : Custom);
1513 
1514     setOperationAction(ISD::SMULO,     MVT::v32i8, Custom);
1515     setOperationAction(ISD::UMULO,     MVT::v32i8, Custom);
1516 
1517     setOperationAction(ISD::ABS,       MVT::v4i64,  Custom);
1518     setOperationAction(ISD::SMAX,      MVT::v4i64,  Custom);
1519     setOperationAction(ISD::UMAX,      MVT::v4i64,  Custom);
1520     setOperationAction(ISD::SMIN,      MVT::v4i64,  Custom);
1521     setOperationAction(ISD::UMIN,      MVT::v4i64,  Custom);
1522 
1523     setOperationAction(ISD::UADDSAT,   MVT::v32i8,  HasInt256 ? Legal : Custom);
1524     setOperationAction(ISD::SADDSAT,   MVT::v32i8,  HasInt256 ? Legal : Custom);
1525     setOperationAction(ISD::USUBSAT,   MVT::v32i8,  HasInt256 ? Legal : Custom);
1526     setOperationAction(ISD::SSUBSAT,   MVT::v32i8,  HasInt256 ? Legal : Custom);
1527     setOperationAction(ISD::UADDSAT,   MVT::v16i16, HasInt256 ? Legal : Custom);
1528     setOperationAction(ISD::SADDSAT,   MVT::v16i16, HasInt256 ? Legal : Custom);
1529     setOperationAction(ISD::USUBSAT,   MVT::v16i16, HasInt256 ? Legal : Custom);
1530     setOperationAction(ISD::SSUBSAT,   MVT::v16i16, HasInt256 ? Legal : Custom);
1531     setOperationAction(ISD::UADDSAT,   MVT::v8i32, Custom);
1532     setOperationAction(ISD::USUBSAT,   MVT::v8i32, Custom);
1533     setOperationAction(ISD::UADDSAT,   MVT::v4i64, Custom);
1534     setOperationAction(ISD::USUBSAT,   MVT::v4i64, Custom);
1535 
1536     for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32 }) {
1537       setOperationAction(ISD::ABS,  VT, HasInt256 ? Legal : Custom);
1538       setOperationAction(ISD::SMAX, VT, HasInt256 ? Legal : Custom);
1539       setOperationAction(ISD::UMAX, VT, HasInt256 ? Legal : Custom);
1540       setOperationAction(ISD::SMIN, VT, HasInt256 ? Legal : Custom);
1541       setOperationAction(ISD::UMIN, VT, HasInt256 ? Legal : Custom);
1542     }
1543 
1544     for (auto VT : {MVT::v16i16, MVT::v8i32, MVT::v4i64}) {
1545       setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Custom);
1546       setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Custom);
1547     }
1548 
1549     if (HasInt256) {
1550       // The custom lowering for UINT_TO_FP for v8i32 becomes interesting
1551       // when we have a 256bit-wide blend with immediate.
1552       setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Custom);
1553       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v8i32, Custom);
1554 
1555       // AVX2 also has wider vector sign/zero extending loads, VPMOV[SZ]X
1556       for (auto LoadExtOp : { ISD::SEXTLOAD, ISD::ZEXTLOAD }) {
1557         setLoadExtAction(LoadExtOp, MVT::v16i16, MVT::v16i8, Legal);
1558         setLoadExtAction(LoadExtOp, MVT::v8i32,  MVT::v8i8,  Legal);
1559         setLoadExtAction(LoadExtOp, MVT::v4i64,  MVT::v4i8,  Legal);
1560         setLoadExtAction(LoadExtOp, MVT::v8i32,  MVT::v8i16, Legal);
1561         setLoadExtAction(LoadExtOp, MVT::v4i64,  MVT::v4i16, Legal);
1562         setLoadExtAction(LoadExtOp, MVT::v4i64,  MVT::v4i32, Legal);
1563       }
1564     }
1565 
1566     for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1567                      MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 }) {
1568       setOperationAction(ISD::MLOAD,  VT, Subtarget.hasVLX() ? Legal : Custom);
1569       setOperationAction(ISD::MSTORE, VT, Legal);
1570     }
1571 
1572     // Extract subvector is special because the value type
1573     // (result) is 128-bit but the source is 256-bit wide.
1574     for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64,
1575                      MVT::v8f16, MVT::v4f32, MVT::v2f64 }) {
1576       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
1577     }
1578 
1579     // Custom lower several nodes for 256-bit types.
1580     for (MVT VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64,
1581                     MVT::v16f16, MVT::v8f32, MVT::v4f64 }) {
1582       setOperationAction(ISD::BUILD_VECTOR,       VT, Custom);
1583       setOperationAction(ISD::VECTOR_SHUFFLE,     VT, Custom);
1584       setOperationAction(ISD::VSELECT,            VT, Custom);
1585       setOperationAction(ISD::INSERT_VECTOR_ELT,  VT, Custom);
1586       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1587       setOperationAction(ISD::SCALAR_TO_VECTOR,   VT, Custom);
1588       setOperationAction(ISD::INSERT_SUBVECTOR,   VT, Legal);
1589       setOperationAction(ISD::CONCAT_VECTORS,     VT, Custom);
1590       setOperationAction(ISD::STORE,              VT, Custom);
1591     }
1592     setF16Action(MVT::v16f16, Expand);
1593     setOperationAction(ISD::FNEG, MVT::v16f16, Custom);
1594     setOperationAction(ISD::FABS, MVT::v16f16, Custom);
1595     setOperationAction(ISD::FCOPYSIGN, MVT::v16f16, Custom);
1596     setOperationAction(ISD::FADD, MVT::v16f16, Expand);
1597     setOperationAction(ISD::FSUB, MVT::v16f16, Expand);
1598     setOperationAction(ISD::FMUL, MVT::v16f16, Expand);
1599     setOperationAction(ISD::FDIV, MVT::v16f16, Expand);
1600 
1601     if (HasInt256) {
1602       setOperationAction(ISD::VSELECT, MVT::v32i8, Legal);
1603 
1604       // Custom legalize 2x32 to get a little better code.
1605       setOperationAction(ISD::MGATHER, MVT::v2f32, Custom);
1606       setOperationAction(ISD::MGATHER, MVT::v2i32, Custom);
1607 
1608       for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1609                        MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 })
1610         setOperationAction(ISD::MGATHER,  VT, Custom);
1611     }
1612   }
1613 
1614   if (!Subtarget.useSoftFloat() && !Subtarget.hasFP16() &&
1615       Subtarget.hasF16C()) {
1616     for (MVT VT : { MVT::f16, MVT::v2f16, MVT::v4f16, MVT::v8f16 }) {
1617       setOperationAction(ISD::FP_ROUND,           VT, Custom);
1618       setOperationAction(ISD::STRICT_FP_ROUND,    VT, Custom);
1619     }
1620     for (MVT VT : { MVT::f32, MVT::v2f32, MVT::v4f32, MVT::v8f32 }) {
1621       setOperationAction(ISD::FP_EXTEND,          VT, Custom);
1622       setOperationAction(ISD::STRICT_FP_EXTEND,   VT, Custom);
1623     }
1624     for (unsigned Opc : {ISD::FADD, ISD::FSUB, ISD::FMUL, ISD::FDIV}) {
1625       setOperationPromotedToType(Opc, MVT::v8f16, MVT::v8f32);
1626       setOperationPromotedToType(Opc, MVT::v16f16, MVT::v16f32);
1627     }
1628   }
1629 
1630   // This block controls legalization of the mask vector sizes that are
1631   // available with AVX512. 512-bit vectors are in a separate block controlled
1632   // by useAVX512Regs.
1633   if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512()) {
1634     addRegisterClass(MVT::v1i1,   &X86::VK1RegClass);
1635     addRegisterClass(MVT::v2i1,   &X86::VK2RegClass);
1636     addRegisterClass(MVT::v4i1,   &X86::VK4RegClass);
1637     addRegisterClass(MVT::v8i1,   &X86::VK8RegClass);
1638     addRegisterClass(MVT::v16i1,  &X86::VK16RegClass);
1639 
1640     setOperationAction(ISD::SELECT,             MVT::v1i1, Custom);
1641     setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v1i1, Custom);
1642     setOperationAction(ISD::BUILD_VECTOR,       MVT::v1i1, Custom);
1643 
1644     setOperationPromotedToType(ISD::FP_TO_SINT,        MVT::v8i1,  MVT::v8i32);
1645     setOperationPromotedToType(ISD::FP_TO_UINT,        MVT::v8i1,  MVT::v8i32);
1646     setOperationPromotedToType(ISD::FP_TO_SINT,        MVT::v4i1,  MVT::v4i32);
1647     setOperationPromotedToType(ISD::FP_TO_UINT,        MVT::v4i1,  MVT::v4i32);
1648     setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::v8i1,  MVT::v8i32);
1649     setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::v8i1,  MVT::v8i32);
1650     setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::v4i1,  MVT::v4i32);
1651     setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::v4i1,  MVT::v4i32);
1652     setOperationAction(ISD::FP_TO_SINT,                MVT::v2i1,  Custom);
1653     setOperationAction(ISD::FP_TO_UINT,                MVT::v2i1,  Custom);
1654     setOperationAction(ISD::STRICT_FP_TO_SINT,         MVT::v2i1,  Custom);
1655     setOperationAction(ISD::STRICT_FP_TO_UINT,         MVT::v2i1,  Custom);
1656 
1657     // There is no byte sized k-register load or store without AVX512DQ.
1658     if (!Subtarget.hasDQI()) {
1659       setOperationAction(ISD::LOAD, MVT::v1i1, Custom);
1660       setOperationAction(ISD::LOAD, MVT::v2i1, Custom);
1661       setOperationAction(ISD::LOAD, MVT::v4i1, Custom);
1662       setOperationAction(ISD::LOAD, MVT::v8i1, Custom);
1663 
1664       setOperationAction(ISD::STORE, MVT::v1i1, Custom);
1665       setOperationAction(ISD::STORE, MVT::v2i1, Custom);
1666       setOperationAction(ISD::STORE, MVT::v4i1, Custom);
1667       setOperationAction(ISD::STORE, MVT::v8i1, Custom);
1668     }
1669 
1670     // Extends of v16i1/v8i1/v4i1/v2i1 to 128-bit vectors.
1671     for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1672       setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
1673       setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
1674       setOperationAction(ISD::ANY_EXTEND,  VT, Custom);
1675     }
1676 
1677     for (auto VT : { MVT::v1i1, MVT::v2i1, MVT::v4i1, MVT::v8i1, MVT::v16i1 })
1678       setOperationAction(ISD::VSELECT,          VT, Expand);
1679 
1680     for (auto VT : { MVT::v2i1, MVT::v4i1, MVT::v8i1, MVT::v16i1 }) {
1681       setOperationAction(ISD::SETCC,            VT, Custom);
1682       setOperationAction(ISD::SELECT,           VT, Custom);
1683       setOperationAction(ISD::TRUNCATE,         VT, Custom);
1684 
1685       setOperationAction(ISD::BUILD_VECTOR,     VT, Custom);
1686       setOperationAction(ISD::CONCAT_VECTORS,   VT, Custom);
1687       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1688       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1689       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1690       setOperationAction(ISD::VECTOR_SHUFFLE,   VT,  Custom);
1691     }
1692 
1693     for (auto VT : { MVT::v1i1, MVT::v2i1, MVT::v4i1, MVT::v8i1 })
1694       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1695   }
1696 
1697   // This block controls legalization for 512-bit operations with 8/16/32/64 bit
1698   // elements. 512-bits can be disabled based on prefer-vector-width and
1699   // required-vector-width function attributes.
1700   if (!Subtarget.useSoftFloat() && Subtarget.useAVX512Regs()) {
1701     bool HasBWI = Subtarget.hasBWI();
1702 
1703     addRegisterClass(MVT::v16i32, &X86::VR512RegClass);
1704     addRegisterClass(MVT::v16f32, &X86::VR512RegClass);
1705     addRegisterClass(MVT::v8i64,  &X86::VR512RegClass);
1706     addRegisterClass(MVT::v8f64,  &X86::VR512RegClass);
1707     addRegisterClass(MVT::v32i16, &X86::VR512RegClass);
1708     addRegisterClass(MVT::v32f16, &X86::VR512RegClass);
1709     addRegisterClass(MVT::v64i8,  &X86::VR512RegClass);
1710 
1711     for (auto ExtType : {ISD::ZEXTLOAD, ISD::SEXTLOAD}) {
1712       setLoadExtAction(ExtType, MVT::v16i32, MVT::v16i8,  Legal);
1713       setLoadExtAction(ExtType, MVT::v16i32, MVT::v16i16, Legal);
1714       setLoadExtAction(ExtType, MVT::v8i64,  MVT::v8i8,   Legal);
1715       setLoadExtAction(ExtType, MVT::v8i64,  MVT::v8i16,  Legal);
1716       setLoadExtAction(ExtType, MVT::v8i64,  MVT::v8i32,  Legal);
1717       if (HasBWI)
1718         setLoadExtAction(ExtType, MVT::v32i16, MVT::v32i8, Legal);
1719     }
1720 
1721     for (MVT VT : { MVT::v16f32, MVT::v8f64 }) {
1722       setOperationAction(ISD::FMAXIMUM, VT, Custom);
1723       setOperationAction(ISD::FMINIMUM, VT, Custom);
1724       setOperationAction(ISD::FNEG,  VT, Custom);
1725       setOperationAction(ISD::FABS,  VT, Custom);
1726       setOperationAction(ISD::FMA,   VT, Legal);
1727       setOperationAction(ISD::STRICT_FMA, VT, Legal);
1728       setOperationAction(ISD::FCOPYSIGN, VT, Custom);
1729     }
1730 
1731     for (MVT VT : { MVT::v16i1, MVT::v16i8 }) {
1732       setOperationPromotedToType(ISD::FP_TO_SINT       , VT, MVT::v16i32);
1733       setOperationPromotedToType(ISD::FP_TO_UINT       , VT, MVT::v16i32);
1734       setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, VT, MVT::v16i32);
1735       setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, VT, MVT::v16i32);
1736     }
1737 
1738     for (MVT VT : { MVT::v16i16, MVT::v16i32 }) {
1739       setOperationAction(ISD::FP_TO_SINT,        VT, Custom);
1740       setOperationAction(ISD::FP_TO_UINT,        VT, Custom);
1741       setOperationAction(ISD::STRICT_FP_TO_SINT, VT, Custom);
1742       setOperationAction(ISD::STRICT_FP_TO_UINT, VT, Custom);
1743     }
1744 
1745     setOperationAction(ISD::SINT_TO_FP,        MVT::v16i32, Custom);
1746     setOperationAction(ISD::UINT_TO_FP,        MVT::v16i32, Custom);
1747     setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v16i32, Custom);
1748     setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v16i32, Custom);
1749     setOperationAction(ISD::FP_EXTEND,         MVT::v8f64,  Custom);
1750     setOperationAction(ISD::STRICT_FP_EXTEND,  MVT::v8f64,  Custom);
1751 
1752     setOperationAction(ISD::STRICT_FADD,      MVT::v16f32, Legal);
1753     setOperationAction(ISD::STRICT_FADD,      MVT::v8f64,  Legal);
1754     setOperationAction(ISD::STRICT_FSUB,      MVT::v16f32, Legal);
1755     setOperationAction(ISD::STRICT_FSUB,      MVT::v8f64,  Legal);
1756     setOperationAction(ISD::STRICT_FMUL,      MVT::v16f32, Legal);
1757     setOperationAction(ISD::STRICT_FMUL,      MVT::v8f64,  Legal);
1758     setOperationAction(ISD::STRICT_FDIV,      MVT::v16f32, Legal);
1759     setOperationAction(ISD::STRICT_FDIV,      MVT::v8f64,  Legal);
1760     setOperationAction(ISD::STRICT_FSQRT,     MVT::v16f32, Legal);
1761     setOperationAction(ISD::STRICT_FSQRT,     MVT::v8f64,  Legal);
1762     setOperationAction(ISD::STRICT_FP_ROUND,  MVT::v8f32,  Legal);
1763 
1764     setTruncStoreAction(MVT::v8i64,   MVT::v8i8,   Legal);
1765     setTruncStoreAction(MVT::v8i64,   MVT::v8i16,  Legal);
1766     setTruncStoreAction(MVT::v8i64,   MVT::v8i32,  Legal);
1767     setTruncStoreAction(MVT::v16i32,  MVT::v16i8,  Legal);
1768     setTruncStoreAction(MVT::v16i32,  MVT::v16i16, Legal);
1769     if (HasBWI)
1770       setTruncStoreAction(MVT::v32i16,  MVT::v32i8, Legal);
1771 
1772     // With 512-bit vectors and no VLX, we prefer to widen MLOAD/MSTORE
1773     // to 512-bit rather than use the AVX2 instructions so that we can use
1774     // k-masks.
1775     if (!Subtarget.hasVLX()) {
1776       for (auto VT : {MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1777            MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64}) {
1778         setOperationAction(ISD::MLOAD,  VT, Custom);
1779         setOperationAction(ISD::MSTORE, VT, Custom);
1780       }
1781     }
1782 
1783     setOperationAction(ISD::TRUNCATE,    MVT::v8i32,  Legal);
1784     setOperationAction(ISD::TRUNCATE,    MVT::v16i16, Legal);
1785     setOperationAction(ISD::TRUNCATE,    MVT::v32i8,  HasBWI ? Legal : Custom);
1786     setOperationAction(ISD::ZERO_EXTEND, MVT::v32i16, Custom);
1787     setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom);
1788     setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64,  Custom);
1789     setOperationAction(ISD::ANY_EXTEND,  MVT::v32i16, Custom);
1790     setOperationAction(ISD::ANY_EXTEND,  MVT::v16i32, Custom);
1791     setOperationAction(ISD::ANY_EXTEND,  MVT::v8i64,  Custom);
1792     setOperationAction(ISD::SIGN_EXTEND, MVT::v32i16, Custom);
1793     setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom);
1794     setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64,  Custom);
1795 
1796     if (HasBWI) {
1797       // Extends from v64i1 masks to 512-bit vectors.
1798       setOperationAction(ISD::SIGN_EXTEND,        MVT::v64i8, Custom);
1799       setOperationAction(ISD::ZERO_EXTEND,        MVT::v64i8, Custom);
1800       setOperationAction(ISD::ANY_EXTEND,         MVT::v64i8, Custom);
1801     }
1802 
1803     for (auto VT : { MVT::v16f32, MVT::v8f64 }) {
1804       setOperationAction(ISD::FFLOOR,            VT, Legal);
1805       setOperationAction(ISD::STRICT_FFLOOR,     VT, Legal);
1806       setOperationAction(ISD::FCEIL,             VT, Legal);
1807       setOperationAction(ISD::STRICT_FCEIL,      VT, Legal);
1808       setOperationAction(ISD::FTRUNC,            VT, Legal);
1809       setOperationAction(ISD::STRICT_FTRUNC,     VT, Legal);
1810       setOperationAction(ISD::FRINT,             VT, Legal);
1811       setOperationAction(ISD::STRICT_FRINT,      VT, Legal);
1812       setOperationAction(ISD::FNEARBYINT,        VT, Legal);
1813       setOperationAction(ISD::STRICT_FNEARBYINT, VT, Legal);
1814       setOperationAction(ISD::FROUNDEVEN,        VT, Legal);
1815       setOperationAction(ISD::STRICT_FROUNDEVEN, VT, Legal);
1816 
1817       setOperationAction(ISD::FROUND,            VT, Custom);
1818     }
1819 
1820     for (auto VT : {MVT::v32i16, MVT::v16i32, MVT::v8i64}) {
1821       setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Custom);
1822       setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Custom);
1823     }
1824 
1825     setOperationAction(ISD::ADD, MVT::v32i16, HasBWI ? Legal : Custom);
1826     setOperationAction(ISD::SUB, MVT::v32i16, HasBWI ? Legal : Custom);
1827     setOperationAction(ISD::ADD, MVT::v64i8,  HasBWI ? Legal : Custom);
1828     setOperationAction(ISD::SUB, MVT::v64i8,  HasBWI ? Legal : Custom);
1829 
1830     setOperationAction(ISD::MUL, MVT::v8i64,  Custom);
1831     setOperationAction(ISD::MUL, MVT::v16i32, Legal);
1832     setOperationAction(ISD::MUL, MVT::v32i16, HasBWI ? Legal : Custom);
1833     setOperationAction(ISD::MUL, MVT::v64i8,  Custom);
1834 
1835     setOperationAction(ISD::MULHU, MVT::v16i32, Custom);
1836     setOperationAction(ISD::MULHS, MVT::v16i32, Custom);
1837     setOperationAction(ISD::MULHS, MVT::v32i16, HasBWI ? Legal : Custom);
1838     setOperationAction(ISD::MULHU, MVT::v32i16, HasBWI ? Legal : Custom);
1839     setOperationAction(ISD::MULHS, MVT::v64i8,  Custom);
1840     setOperationAction(ISD::MULHU, MVT::v64i8,  Custom);
1841     setOperationAction(ISD::AVGCEILU, MVT::v32i16, HasBWI ? Legal : Custom);
1842     setOperationAction(ISD::AVGCEILU, MVT::v64i8,  HasBWI ? Legal : Custom);
1843 
1844     setOperationAction(ISD::SMULO, MVT::v64i8, Custom);
1845     setOperationAction(ISD::UMULO, MVT::v64i8, Custom);
1846 
1847     setOperationAction(ISD::BITREVERSE, MVT::v64i8,  Custom);
1848 
1849     for (auto VT : { MVT::v64i8, MVT::v32i16, MVT::v16i32, MVT::v8i64 }) {
1850       setOperationAction(ISD::SRL,              VT, Custom);
1851       setOperationAction(ISD::SHL,              VT, Custom);
1852       setOperationAction(ISD::SRA,              VT, Custom);
1853       setOperationAction(ISD::ROTL,             VT, Custom);
1854       setOperationAction(ISD::ROTR,             VT, Custom);
1855       setOperationAction(ISD::SETCC,            VT, Custom);
1856       setOperationAction(ISD::ABDS,             VT, Custom);
1857       setOperationAction(ISD::ABDU,             VT, Custom);
1858 
1859       // The condition codes aren't legal in SSE/AVX and under AVX512 we use
1860       // setcc all the way to isel and prefer SETGT in some isel patterns.
1861       setCondCodeAction(ISD::SETLT, VT, Custom);
1862       setCondCodeAction(ISD::SETLE, VT, Custom);
1863     }
1864 
1865     setOperationAction(ISD::SETCC,          MVT::v8f64, Custom);
1866     setOperationAction(ISD::SETCC,          MVT::v16f32, Custom);
1867     setOperationAction(ISD::STRICT_FSETCC,  MVT::v8f64, Custom);
1868     setOperationAction(ISD::STRICT_FSETCC,  MVT::v16f32, Custom);
1869     setOperationAction(ISD::STRICT_FSETCCS, MVT::v8f64, Custom);
1870     setOperationAction(ISD::STRICT_FSETCCS, MVT::v16f32, Custom);
1871 
1872     for (auto VT : { MVT::v16i32, MVT::v8i64 }) {
1873       setOperationAction(ISD::SMAX,             VT, Legal);
1874       setOperationAction(ISD::UMAX,             VT, Legal);
1875       setOperationAction(ISD::SMIN,             VT, Legal);
1876       setOperationAction(ISD::UMIN,             VT, Legal);
1877       setOperationAction(ISD::ABS,              VT, Legal);
1878       setOperationAction(ISD::CTPOP,            VT, Custom);
1879     }
1880 
1881     for (auto VT : { MVT::v64i8, MVT::v32i16 }) {
1882       setOperationAction(ISD::ABS,     VT, HasBWI ? Legal : Custom);
1883       setOperationAction(ISD::CTPOP,   VT, Subtarget.hasBITALG() ? Legal : Custom);
1884       setOperationAction(ISD::CTLZ,    VT, Custom);
1885       setOperationAction(ISD::SMAX,    VT, HasBWI ? Legal : Custom);
1886       setOperationAction(ISD::UMAX,    VT, HasBWI ? Legal : Custom);
1887       setOperationAction(ISD::SMIN,    VT, HasBWI ? Legal : Custom);
1888       setOperationAction(ISD::UMIN,    VT, HasBWI ? Legal : Custom);
1889       setOperationAction(ISD::UADDSAT, VT, HasBWI ? Legal : Custom);
1890       setOperationAction(ISD::SADDSAT, VT, HasBWI ? Legal : Custom);
1891       setOperationAction(ISD::USUBSAT, VT, HasBWI ? Legal : Custom);
1892       setOperationAction(ISD::SSUBSAT, VT, HasBWI ? Legal : Custom);
1893     }
1894 
1895     setOperationAction(ISD::FSHL,       MVT::v64i8, Custom);
1896     setOperationAction(ISD::FSHR,       MVT::v64i8, Custom);
1897     setOperationAction(ISD::FSHL,      MVT::v32i16, Custom);
1898     setOperationAction(ISD::FSHR,      MVT::v32i16, Custom);
1899     setOperationAction(ISD::FSHL,      MVT::v16i32, Custom);
1900     setOperationAction(ISD::FSHR,      MVT::v16i32, Custom);
1901 
1902     if (Subtarget.hasDQI()) {
1903       for (auto Opc : {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::STRICT_SINT_TO_FP,
1904                        ISD::STRICT_UINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT,
1905                        ISD::STRICT_FP_TO_SINT, ISD::STRICT_FP_TO_UINT})
1906         setOperationAction(Opc,           MVT::v8i64, Custom);
1907       setOperationAction(ISD::MUL,        MVT::v8i64, Legal);
1908     }
1909 
1910     if (Subtarget.hasCDI()) {
1911       // NonVLX sub-targets extend 128/256 vectors to use the 512 version.
1912       for (auto VT : { MVT::v16i32, MVT::v8i64} ) {
1913         setOperationAction(ISD::CTLZ,            VT, Legal);
1914       }
1915     } // Subtarget.hasCDI()
1916 
1917     if (Subtarget.hasVPOPCNTDQ()) {
1918       for (auto VT : { MVT::v16i32, MVT::v8i64 })
1919         setOperationAction(ISD::CTPOP, VT, Legal);
1920     }
1921 
1922     // Extract subvector is special because the value type
1923     // (result) is 256-bit but the source is 512-bit wide.
1924     // 128-bit was made Legal under AVX1.
1925     for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64,
1926                      MVT::v16f16, MVT::v8f32, MVT::v4f64 })
1927       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
1928 
1929     for (auto VT : { MVT::v64i8, MVT::v32i16, MVT::v16i32, MVT::v8i64,
1930                      MVT::v32f16, MVT::v16f32, MVT::v8f64 }) {
1931       setOperationAction(ISD::CONCAT_VECTORS,     VT, Custom);
1932       setOperationAction(ISD::INSERT_SUBVECTOR,   VT, Legal);
1933       setOperationAction(ISD::SELECT,             VT, Custom);
1934       setOperationAction(ISD::VSELECT,            VT, Custom);
1935       setOperationAction(ISD::BUILD_VECTOR,       VT, Custom);
1936       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1937       setOperationAction(ISD::VECTOR_SHUFFLE,     VT, Custom);
1938       setOperationAction(ISD::SCALAR_TO_VECTOR,   VT, Custom);
1939       setOperationAction(ISD::INSERT_VECTOR_ELT,  VT, Custom);
1940     }
1941     setF16Action(MVT::v32f16, Expand);
1942     setOperationAction(ISD::FP_ROUND, MVT::v16f16, Custom);
1943     setOperationAction(ISD::STRICT_FP_ROUND, MVT::v16f16, Custom);
1944     setOperationAction(ISD::FP_EXTEND, MVT::v16f32, Custom);
1945     setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v16f32, Custom);
1946     for (unsigned Opc : {ISD::FADD, ISD::FSUB, ISD::FMUL, ISD::FDIV}) {
1947       setOperationPromotedToType(Opc, MVT::v16f16, MVT::v16f32);
1948       setOperationPromotedToType(Opc, MVT::v32f16, MVT::v32f32);
1949     }
1950 
1951     for (auto VT : { MVT::v16i32, MVT::v8i64, MVT::v16f32, MVT::v8f64 }) {
1952       setOperationAction(ISD::MLOAD,               VT, Legal);
1953       setOperationAction(ISD::MSTORE,              VT, Legal);
1954       setOperationAction(ISD::MGATHER,             VT, Custom);
1955       setOperationAction(ISD::MSCATTER,            VT, Custom);
1956     }
1957     if (HasBWI) {
1958       for (auto VT : { MVT::v64i8, MVT::v32i16 }) {
1959         setOperationAction(ISD::MLOAD,        VT, Legal);
1960         setOperationAction(ISD::MSTORE,       VT, Legal);
1961       }
1962     } else {
1963       setOperationAction(ISD::STORE, MVT::v32i16, Custom);
1964       setOperationAction(ISD::STORE, MVT::v64i8,  Custom);
1965     }
1966 
1967     if (Subtarget.hasVBMI2()) {
1968       for (auto VT : {MVT::v32i16, MVT::v16i32, MVT::v8i64}) {
1969         setOperationAction(ISD::FSHL, VT, Custom);
1970         setOperationAction(ISD::FSHR, VT, Custom);
1971       }
1972 
1973       setOperationAction(ISD::ROTL, MVT::v32i16, Custom);
1974       setOperationAction(ISD::ROTR, MVT::v32i16, Custom);
1975     }
1976   }// useAVX512Regs
1977 
1978   if (!Subtarget.useSoftFloat() && Subtarget.hasVBMI2()) {
1979     for (auto VT : {MVT::v8i16, MVT::v4i32, MVT::v2i64, MVT::v16i16, MVT::v8i32,
1980                     MVT::v4i64}) {
1981       setOperationAction(ISD::FSHL, VT, Custom);
1982       setOperationAction(ISD::FSHR, VT, Custom);
1983     }
1984   }
1985 
1986   // This block controls legalization for operations that don't have
1987   // pre-AVX512 equivalents. Without VLX we use 512-bit operations for
1988   // narrower widths.
1989   if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512()) {
1990     // These operations are handled on non-VLX by artificially widening in
1991     // isel patterns.
1992 
1993     setOperationAction(ISD::STRICT_FP_TO_UINT,  MVT::v8i32, Custom);
1994     setOperationAction(ISD::STRICT_FP_TO_UINT,  MVT::v4i32, Custom);
1995     setOperationAction(ISD::STRICT_FP_TO_UINT,  MVT::v2i32, Custom);
1996 
1997     if (Subtarget.hasDQI()) {
1998       // Fast v2f32 SINT_TO_FP( v2i64 ) custom conversion.
1999       // v2f32 UINT_TO_FP is already custom under SSE2.
2000       assert(isOperationCustom(ISD::UINT_TO_FP, MVT::v2f32) &&
2001              isOperationCustom(ISD::STRICT_UINT_TO_FP, MVT::v2f32) &&
2002              "Unexpected operation action!");
2003       // v2i64 FP_TO_S/UINT(v2f32) custom conversion.
2004       setOperationAction(ISD::FP_TO_SINT,        MVT::v2f32, Custom);
2005       setOperationAction(ISD::FP_TO_UINT,        MVT::v2f32, Custom);
2006       setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2f32, Custom);
2007       setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2f32, Custom);
2008     }
2009 
2010     for (auto VT : { MVT::v2i64, MVT::v4i64 }) {
2011       setOperationAction(ISD::SMAX, VT, Legal);
2012       setOperationAction(ISD::UMAX, VT, Legal);
2013       setOperationAction(ISD::SMIN, VT, Legal);
2014       setOperationAction(ISD::UMIN, VT, Legal);
2015       setOperationAction(ISD::ABS,  VT, Legal);
2016     }
2017 
2018     for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 }) {
2019       setOperationAction(ISD::ROTL,     VT, Custom);
2020       setOperationAction(ISD::ROTR,     VT, Custom);
2021     }
2022 
2023     // Custom legalize 2x32 to get a little better code.
2024     setOperationAction(ISD::MSCATTER, MVT::v2f32, Custom);
2025     setOperationAction(ISD::MSCATTER, MVT::v2i32, Custom);
2026 
2027     for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
2028                      MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 })
2029       setOperationAction(ISD::MSCATTER, VT, Custom);
2030 
2031     if (Subtarget.hasDQI()) {
2032       for (auto Opc : {ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::STRICT_SINT_TO_FP,
2033                        ISD::STRICT_UINT_TO_FP, ISD::FP_TO_SINT, ISD::FP_TO_UINT,
2034                        ISD::STRICT_FP_TO_SINT, ISD::STRICT_FP_TO_UINT}) {
2035         setOperationAction(Opc, MVT::v2i64, Custom);
2036         setOperationAction(Opc, MVT::v4i64, Custom);
2037       }
2038       setOperationAction(ISD::MUL, MVT::v2i64, Legal);
2039       setOperationAction(ISD::MUL, MVT::v4i64, Legal);
2040     }
2041 
2042     if (Subtarget.hasCDI()) {
2043       for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 }) {
2044         setOperationAction(ISD::CTLZ,            VT, Legal);
2045       }
2046     } // Subtarget.hasCDI()
2047 
2048     if (Subtarget.hasVPOPCNTDQ()) {
2049       for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 })
2050         setOperationAction(ISD::CTPOP, VT, Legal);
2051     }
2052     setOperationAction(ISD::FNEG, MVT::v32f16, Custom);
2053     setOperationAction(ISD::FABS, MVT::v32f16, Custom);
2054     setOperationAction(ISD::FCOPYSIGN, MVT::v32f16, Custom);
2055   }
2056 
2057   // This block control legalization of v32i1/v64i1 which are available with
2058   // AVX512BW..
2059   if (!Subtarget.useSoftFloat() && Subtarget.hasBWI()) {
2060     addRegisterClass(MVT::v32i1,  &X86::VK32RegClass);
2061     addRegisterClass(MVT::v64i1,  &X86::VK64RegClass);
2062 
2063     for (auto VT : { MVT::v32i1, MVT::v64i1 }) {
2064       setOperationAction(ISD::VSELECT,            VT, Expand);
2065       setOperationAction(ISD::TRUNCATE,           VT, Custom);
2066       setOperationAction(ISD::SETCC,              VT, Custom);
2067       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
2068       setOperationAction(ISD::INSERT_VECTOR_ELT,  VT, Custom);
2069       setOperationAction(ISD::SELECT,             VT, Custom);
2070       setOperationAction(ISD::BUILD_VECTOR,       VT, Custom);
2071       setOperationAction(ISD::VECTOR_SHUFFLE,     VT, Custom);
2072       setOperationAction(ISD::CONCAT_VECTORS,     VT, Custom);
2073       setOperationAction(ISD::INSERT_SUBVECTOR,   VT, Custom);
2074     }
2075 
2076     for (auto VT : { MVT::v16i1, MVT::v32i1 })
2077       setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
2078 
2079     // Extends from v32i1 masks to 256-bit vectors.
2080     setOperationAction(ISD::SIGN_EXTEND,        MVT::v32i8, Custom);
2081     setOperationAction(ISD::ZERO_EXTEND,        MVT::v32i8, Custom);
2082     setOperationAction(ISD::ANY_EXTEND,         MVT::v32i8, Custom);
2083 
2084     for (auto VT : { MVT::v32i8, MVT::v16i8, MVT::v16i16, MVT::v8i16 }) {
2085       setOperationAction(ISD::MLOAD,  VT, Subtarget.hasVLX() ? Legal : Custom);
2086       setOperationAction(ISD::MSTORE, VT, Subtarget.hasVLX() ? Legal : Custom);
2087     }
2088 
2089     // These operations are handled on non-VLX by artificially widening in
2090     // isel patterns.
2091     // TODO: Custom widen in lowering on non-VLX and drop the isel patterns?
2092 
2093     if (Subtarget.hasBITALG()) {
2094       for (auto VT : { MVT::v16i8, MVT::v32i8, MVT::v8i16, MVT::v16i16 })
2095         setOperationAction(ISD::CTPOP, VT, Legal);
2096     }
2097   }
2098 
2099   if (!Subtarget.useSoftFloat() && Subtarget.hasFP16()) {
2100     auto setGroup = [&] (MVT VT) {
2101       setOperationAction(ISD::FADD,               VT, Legal);
2102       setOperationAction(ISD::STRICT_FADD,        VT, Legal);
2103       setOperationAction(ISD::FSUB,               VT, Legal);
2104       setOperationAction(ISD::STRICT_FSUB,        VT, Legal);
2105       setOperationAction(ISD::FMUL,               VT, Legal);
2106       setOperationAction(ISD::STRICT_FMUL,        VT, Legal);
2107       setOperationAction(ISD::FDIV,               VT, Legal);
2108       setOperationAction(ISD::STRICT_FDIV,        VT, Legal);
2109       setOperationAction(ISD::FSQRT,              VT, Legal);
2110       setOperationAction(ISD::STRICT_FSQRT,       VT, Legal);
2111 
2112       setOperationAction(ISD::FFLOOR,             VT, Legal);
2113       setOperationAction(ISD::STRICT_FFLOOR,      VT, Legal);
2114       setOperationAction(ISD::FCEIL,              VT, Legal);
2115       setOperationAction(ISD::STRICT_FCEIL,       VT, Legal);
2116       setOperationAction(ISD::FTRUNC,             VT, Legal);
2117       setOperationAction(ISD::STRICT_FTRUNC,      VT, Legal);
2118       setOperationAction(ISD::FRINT,              VT, Legal);
2119       setOperationAction(ISD::STRICT_FRINT,       VT, Legal);
2120       setOperationAction(ISD::FNEARBYINT,         VT, Legal);
2121       setOperationAction(ISD::STRICT_FNEARBYINT,  VT, Legal);
2122       setOperationAction(ISD::FROUNDEVEN, VT, Legal);
2123       setOperationAction(ISD::STRICT_FROUNDEVEN, VT, Legal);
2124 
2125       setOperationAction(ISD::FROUND,             VT, Custom);
2126 
2127       setOperationAction(ISD::LOAD,               VT, Legal);
2128       setOperationAction(ISD::STORE,              VT, Legal);
2129 
2130       setOperationAction(ISD::FMA,                VT, Legal);
2131       setOperationAction(ISD::STRICT_FMA,         VT, Legal);
2132       setOperationAction(ISD::VSELECT,            VT, Legal);
2133       setOperationAction(ISD::BUILD_VECTOR,       VT, Custom);
2134       setOperationAction(ISD::SELECT,             VT, Custom);
2135 
2136       setOperationAction(ISD::FNEG,               VT, Custom);
2137       setOperationAction(ISD::FABS,               VT, Custom);
2138       setOperationAction(ISD::FCOPYSIGN,          VT, Custom);
2139       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
2140       setOperationAction(ISD::VECTOR_SHUFFLE,     VT, Custom);
2141 
2142       setOperationAction(ISD::SETCC,              VT, Custom);
2143       setOperationAction(ISD::STRICT_FSETCC,      VT, Custom);
2144       setOperationAction(ISD::STRICT_FSETCCS,     VT, Custom);
2145     };
2146 
2147     // AVX512_FP16 scalar operations
2148     setGroup(MVT::f16);
2149     setOperationAction(ISD::FREM,                 MVT::f16, Promote);
2150     setOperationAction(ISD::STRICT_FREM,          MVT::f16, Promote);
2151     setOperationAction(ISD::SELECT_CC,            MVT::f16, Expand);
2152     setOperationAction(ISD::BR_CC,                MVT::f16, Expand);
2153     setOperationAction(ISD::STRICT_FROUND,        MVT::f16, Promote);
2154     setOperationAction(ISD::FROUNDEVEN,           MVT::f16, Legal);
2155     setOperationAction(ISD::STRICT_FROUNDEVEN,    MVT::f16, Legal);
2156     setOperationAction(ISD::FP_ROUND,             MVT::f16, Custom);
2157     setOperationAction(ISD::STRICT_FP_ROUND,      MVT::f16, Custom);
2158     setOperationAction(ISD::FMAXIMUM,             MVT::f16, Custom);
2159     setOperationAction(ISD::FMINIMUM,             MVT::f16, Custom);
2160     setOperationAction(ISD::FP_EXTEND,            MVT::f32, Legal);
2161     setOperationAction(ISD::STRICT_FP_EXTEND,     MVT::f32, Legal);
2162 
2163     setCondCodeAction(ISD::SETOEQ, MVT::f16, Expand);
2164     setCondCodeAction(ISD::SETUNE, MVT::f16, Expand);
2165 
2166     if (Subtarget.useAVX512Regs()) {
2167       setGroup(MVT::v32f16);
2168       setOperationAction(ISD::SCALAR_TO_VECTOR,       MVT::v32f16, Custom);
2169       setOperationAction(ISD::SINT_TO_FP,             MVT::v32i16, Legal);
2170       setOperationAction(ISD::STRICT_SINT_TO_FP,      MVT::v32i16, Legal);
2171       setOperationAction(ISD::UINT_TO_FP,             MVT::v32i16, Legal);
2172       setOperationAction(ISD::STRICT_UINT_TO_FP,      MVT::v32i16, Legal);
2173       setOperationAction(ISD::FP_ROUND,               MVT::v16f16, Legal);
2174       setOperationAction(ISD::STRICT_FP_ROUND,        MVT::v16f16, Legal);
2175       setOperationAction(ISD::FP_EXTEND,              MVT::v16f32, Custom);
2176       setOperationAction(ISD::STRICT_FP_EXTEND,       MVT::v16f32, Legal);
2177       setOperationAction(ISD::FP_EXTEND,              MVT::v8f64,  Custom);
2178       setOperationAction(ISD::STRICT_FP_EXTEND,       MVT::v8f64,  Legal);
2179       setOperationAction(ISD::INSERT_VECTOR_ELT,      MVT::v32f16, Custom);
2180 
2181       setOperationAction(ISD::FP_TO_SINT,             MVT::v32i16, Custom);
2182       setOperationAction(ISD::STRICT_FP_TO_SINT,      MVT::v32i16, Custom);
2183       setOperationAction(ISD::FP_TO_UINT,             MVT::v32i16, Custom);
2184       setOperationAction(ISD::STRICT_FP_TO_UINT,      MVT::v32i16, Custom);
2185       setOperationPromotedToType(ISD::FP_TO_SINT,     MVT::v32i8,  MVT::v32i16);
2186       setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::v32i8,
2187                                  MVT::v32i16);
2188       setOperationPromotedToType(ISD::FP_TO_UINT,     MVT::v32i8,  MVT::v32i16);
2189       setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::v32i8,
2190                                  MVT::v32i16);
2191       setOperationPromotedToType(ISD::FP_TO_SINT,     MVT::v32i1,  MVT::v32i16);
2192       setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::v32i1,
2193                                  MVT::v32i16);
2194       setOperationPromotedToType(ISD::FP_TO_UINT,     MVT::v32i1,  MVT::v32i16);
2195       setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::v32i1,
2196                                  MVT::v32i16);
2197 
2198       setOperationAction(ISD::EXTRACT_SUBVECTOR,      MVT::v16f16, Legal);
2199       setOperationAction(ISD::INSERT_SUBVECTOR,       MVT::v32f16, Legal);
2200       setOperationAction(ISD::CONCAT_VECTORS,         MVT::v32f16, Custom);
2201 
2202       setLoadExtAction(ISD::EXTLOAD, MVT::v8f64,  MVT::v8f16,  Legal);
2203       setLoadExtAction(ISD::EXTLOAD, MVT::v16f32, MVT::v16f16, Legal);
2204     }
2205 
2206     if (Subtarget.hasVLX()) {
2207       setGroup(MVT::v8f16);
2208       setGroup(MVT::v16f16);
2209 
2210       setOperationAction(ISD::SCALAR_TO_VECTOR,   MVT::v8f16,  Legal);
2211       setOperationAction(ISD::SCALAR_TO_VECTOR,   MVT::v16f16, Custom);
2212       setOperationAction(ISD::SINT_TO_FP,         MVT::v16i16, Legal);
2213       setOperationAction(ISD::STRICT_SINT_TO_FP,  MVT::v16i16, Legal);
2214       setOperationAction(ISD::SINT_TO_FP,         MVT::v8i16,  Legal);
2215       setOperationAction(ISD::STRICT_SINT_TO_FP,  MVT::v8i16,  Legal);
2216       setOperationAction(ISD::UINT_TO_FP,         MVT::v16i16, Legal);
2217       setOperationAction(ISD::STRICT_UINT_TO_FP,  MVT::v16i16, Legal);
2218       setOperationAction(ISD::UINT_TO_FP,         MVT::v8i16,  Legal);
2219       setOperationAction(ISD::STRICT_UINT_TO_FP,  MVT::v8i16,  Legal);
2220 
2221       setOperationAction(ISD::FP_TO_SINT,         MVT::v8i16, Custom);
2222       setOperationAction(ISD::STRICT_FP_TO_SINT,  MVT::v8i16, Custom);
2223       setOperationAction(ISD::FP_TO_UINT,         MVT::v8i16, Custom);
2224       setOperationAction(ISD::STRICT_FP_TO_UINT,  MVT::v8i16, Custom);
2225       setOperationAction(ISD::FP_ROUND,           MVT::v8f16, Legal);
2226       setOperationAction(ISD::STRICT_FP_ROUND,    MVT::v8f16, Legal);
2227       setOperationAction(ISD::FP_EXTEND,          MVT::v8f32, Custom);
2228       setOperationAction(ISD::STRICT_FP_EXTEND,   MVT::v8f32, Legal);
2229       setOperationAction(ISD::FP_EXTEND,          MVT::v4f64, Custom);
2230       setOperationAction(ISD::STRICT_FP_EXTEND,   MVT::v4f64, Legal);
2231 
2232       // INSERT_VECTOR_ELT v8f16 extended to VECTOR_SHUFFLE
2233       setOperationAction(ISD::INSERT_VECTOR_ELT,    MVT::v8f16,  Custom);
2234       setOperationAction(ISD::INSERT_VECTOR_ELT,    MVT::v16f16, Custom);
2235 
2236       setOperationAction(ISD::EXTRACT_SUBVECTOR,    MVT::v8f16, Legal);
2237       setOperationAction(ISD::INSERT_SUBVECTOR,     MVT::v16f16, Legal);
2238       setOperationAction(ISD::CONCAT_VECTORS,       MVT::v16f16, Custom);
2239 
2240       setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f16, Legal);
2241       setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f16, Legal);
2242       setLoadExtAction(ISD::EXTLOAD, MVT::v8f32, MVT::v8f16, Legal);
2243       setLoadExtAction(ISD::EXTLOAD, MVT::v4f32, MVT::v4f16, Legal);
2244 
2245       // Need to custom widen these to prevent scalarization.
2246       setOperationAction(ISD::LOAD,  MVT::v4f16, Custom);
2247       setOperationAction(ISD::STORE, MVT::v4f16, Custom);
2248     }
2249   }
2250 
2251   if (!Subtarget.useSoftFloat() &&
2252       (Subtarget.hasAVXNECONVERT() || Subtarget.hasBF16())) {
2253     addRegisterClass(MVT::v8bf16, Subtarget.hasAVX512() ? &X86::VR128XRegClass
2254                                                         : &X86::VR128RegClass);
2255     addRegisterClass(MVT::v16bf16, Subtarget.hasAVX512() ? &X86::VR256XRegClass
2256                                                          : &X86::VR256RegClass);
2257     // We set the type action of bf16 to TypeSoftPromoteHalf, but we don't
2258     // provide the method to promote BUILD_VECTOR and INSERT_VECTOR_ELT.
2259     // Set the operation action Custom to do the customization later.
2260     setOperationAction(ISD::BUILD_VECTOR, MVT::bf16, Custom);
2261     setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::bf16, Custom);
2262     for (auto VT : {MVT::v8bf16, MVT::v16bf16}) {
2263       setF16Action(VT, Expand);
2264       setOperationAction(ISD::FADD, VT, Expand);
2265       setOperationAction(ISD::FSUB, VT, Expand);
2266       setOperationAction(ISD::FMUL, VT, Expand);
2267       setOperationAction(ISD::FDIV, VT, Expand);
2268       setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
2269       setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
2270       setOperationAction(ISD::INSERT_SUBVECTOR, VT, Legal);
2271       setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
2272     }
2273     setOperationAction(ISD::FP_ROUND, MVT::v8bf16, Custom);
2274     addLegalFPImmediate(APFloat::getZero(APFloat::BFloat()));
2275   }
2276 
2277   if (!Subtarget.useSoftFloat() && Subtarget.hasBF16()) {
2278     addRegisterClass(MVT::v32bf16, &X86::VR512RegClass);
2279     setF16Action(MVT::v32bf16, Expand);
2280     setOperationAction(ISD::FADD, MVT::v32bf16, Expand);
2281     setOperationAction(ISD::FSUB, MVT::v32bf16, Expand);
2282     setOperationAction(ISD::FMUL, MVT::v32bf16, Expand);
2283     setOperationAction(ISD::FDIV, MVT::v32bf16, Expand);
2284     setOperationAction(ISD::BUILD_VECTOR, MVT::v32bf16, Custom);
2285     setOperationAction(ISD::FP_ROUND, MVT::v16bf16, Custom);
2286     setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v32bf16, Custom);
2287     setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v32bf16, Legal);
2288     setOperationAction(ISD::CONCAT_VECTORS, MVT::v32bf16, Custom);
2289   }
2290 
2291   if (!Subtarget.useSoftFloat() && Subtarget.hasVLX()) {
2292     setTruncStoreAction(MVT::v4i64, MVT::v4i8,  Legal);
2293     setTruncStoreAction(MVT::v4i64, MVT::v4i16, Legal);
2294     setTruncStoreAction(MVT::v4i64, MVT::v4i32, Legal);
2295     setTruncStoreAction(MVT::v8i32, MVT::v8i8,  Legal);
2296     setTruncStoreAction(MVT::v8i32, MVT::v8i16, Legal);
2297 
2298     setTruncStoreAction(MVT::v2i64, MVT::v2i8,  Legal);
2299     setTruncStoreAction(MVT::v2i64, MVT::v2i16, Legal);
2300     setTruncStoreAction(MVT::v2i64, MVT::v2i32, Legal);
2301     setTruncStoreAction(MVT::v4i32, MVT::v4i8,  Legal);
2302     setTruncStoreAction(MVT::v4i32, MVT::v4i16, Legal);
2303 
2304     if (Subtarget.hasBWI()) {
2305       setTruncStoreAction(MVT::v16i16,  MVT::v16i8, Legal);
2306       setTruncStoreAction(MVT::v8i16,   MVT::v8i8,  Legal);
2307     }
2308 
2309     if (Subtarget.hasFP16()) {
2310       // vcvttph2[u]dq v4f16 -> v4i32/64, v2f16 -> v2i32/64
2311       setOperationAction(ISD::FP_TO_SINT,        MVT::v2f16, Custom);
2312       setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2f16, Custom);
2313       setOperationAction(ISD::FP_TO_UINT,        MVT::v2f16, Custom);
2314       setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2f16, Custom);
2315       setOperationAction(ISD::FP_TO_SINT,        MVT::v4f16, Custom);
2316       setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v4f16, Custom);
2317       setOperationAction(ISD::FP_TO_UINT,        MVT::v4f16, Custom);
2318       setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v4f16, Custom);
2319       // vcvt[u]dq2ph v4i32/64 -> v4f16, v2i32/64 -> v2f16
2320       setOperationAction(ISD::SINT_TO_FP,        MVT::v2f16, Custom);
2321       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2f16, Custom);
2322       setOperationAction(ISD::UINT_TO_FP,        MVT::v2f16, Custom);
2323       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2f16, Custom);
2324       setOperationAction(ISD::SINT_TO_FP,        MVT::v4f16, Custom);
2325       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4f16, Custom);
2326       setOperationAction(ISD::UINT_TO_FP,        MVT::v4f16, Custom);
2327       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4f16, Custom);
2328       // vcvtps2phx v4f32 -> v4f16, v2f32 -> v2f16
2329       setOperationAction(ISD::FP_ROUND,          MVT::v2f16, Custom);
2330       setOperationAction(ISD::STRICT_FP_ROUND,   MVT::v2f16, Custom);
2331       setOperationAction(ISD::FP_ROUND,          MVT::v4f16, Custom);
2332       setOperationAction(ISD::STRICT_FP_ROUND,   MVT::v4f16, Custom);
2333       // vcvtph2psx v4f16 -> v4f32, v2f16 -> v2f32
2334       setOperationAction(ISD::FP_EXTEND,         MVT::v2f16, Custom);
2335       setOperationAction(ISD::STRICT_FP_EXTEND,  MVT::v2f16, Custom);
2336       setOperationAction(ISD::FP_EXTEND,         MVT::v4f16, Custom);
2337       setOperationAction(ISD::STRICT_FP_EXTEND,  MVT::v4f16, Custom);
2338     }
2339   }
2340 
2341   if (!Subtarget.useSoftFloat() && Subtarget.hasAMXTILE()) {
2342     addRegisterClass(MVT::x86amx, &X86::TILERegClass);
2343   }
2344 
2345   // We want to custom lower some of our intrinsics.
2346   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
2347   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
2348   setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
2349   if (!Subtarget.is64Bit()) {
2350     setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
2351   }
2352 
2353   // Only custom-lower 64-bit SADDO and friends on 64-bit because we don't
2354   // handle type legalization for these operations here.
2355   //
2356   // FIXME: We really should do custom legalization for addition and
2357   // subtraction on x86-32 once PR3203 is fixed.  We really can't do much better
2358   // than generic legalization for 64-bit multiplication-with-overflow, though.
2359   for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
2360     if (VT == MVT::i64 && !Subtarget.is64Bit())
2361       continue;
2362     // Add/Sub/Mul with overflow operations are custom lowered.
2363     setOperationAction(ISD::SADDO, VT, Custom);
2364     setOperationAction(ISD::UADDO, VT, Custom);
2365     setOperationAction(ISD::SSUBO, VT, Custom);
2366     setOperationAction(ISD::USUBO, VT, Custom);
2367     setOperationAction(ISD::SMULO, VT, Custom);
2368     setOperationAction(ISD::UMULO, VT, Custom);
2369 
2370     // Support carry in as value rather than glue.
2371     setOperationAction(ISD::UADDO_CARRY, VT, Custom);
2372     setOperationAction(ISD::USUBO_CARRY, VT, Custom);
2373     setOperationAction(ISD::SETCCCARRY, VT, Custom);
2374     setOperationAction(ISD::SADDO_CARRY, VT, Custom);
2375     setOperationAction(ISD::SSUBO_CARRY, VT, Custom);
2376   }
2377 
2378   if (!Subtarget.is64Bit()) {
2379     // These libcalls are not available in 32-bit.
2380     setLibcallName(RTLIB::SHL_I128, nullptr);
2381     setLibcallName(RTLIB::SRL_I128, nullptr);
2382     setLibcallName(RTLIB::SRA_I128, nullptr);
2383     setLibcallName(RTLIB::MUL_I128, nullptr);
2384     // The MULO libcall is not part of libgcc, only compiler-rt.
2385     setLibcallName(RTLIB::MULO_I64, nullptr);
2386   }
2387   // The MULO libcall is not part of libgcc, only compiler-rt.
2388   setLibcallName(RTLIB::MULO_I128, nullptr);
2389 
2390   // Combine sin / cos into _sincos_stret if it is available.
2391   if (getLibcallName(RTLIB::SINCOS_STRET_F32) != nullptr &&
2392       getLibcallName(RTLIB::SINCOS_STRET_F64) != nullptr) {
2393     setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
2394     setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
2395   }
2396 
2397   if (Subtarget.isTargetWin64()) {
2398     setOperationAction(ISD::SDIV, MVT::i128, Custom);
2399     setOperationAction(ISD::UDIV, MVT::i128, Custom);
2400     setOperationAction(ISD::SREM, MVT::i128, Custom);
2401     setOperationAction(ISD::UREM, MVT::i128, Custom);
2402     setOperationAction(ISD::FP_TO_SINT, MVT::i128, Custom);
2403     setOperationAction(ISD::FP_TO_UINT, MVT::i128, Custom);
2404     setOperationAction(ISD::SINT_TO_FP, MVT::i128, Custom);
2405     setOperationAction(ISD::UINT_TO_FP, MVT::i128, Custom);
2406     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i128, Custom);
2407     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i128, Custom);
2408     setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i128, Custom);
2409     setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i128, Custom);
2410   }
2411 
2412   // On 32 bit MSVC, `fmodf(f32)` is not defined - only `fmod(f64)`
2413   // is. We should promote the value to 64-bits to solve this.
2414   // This is what the CRT headers do - `fmodf` is an inline header
2415   // function casting to f64 and calling `fmod`.
2416   if (Subtarget.is32Bit() &&
2417       (Subtarget.isTargetWindowsMSVC() || Subtarget.isTargetWindowsItanium()))
2418     for (ISD::NodeType Op :
2419          {ISD::FCEIL,  ISD::STRICT_FCEIL,
2420           ISD::FCOS,   ISD::STRICT_FCOS,
2421           ISD::FEXP,   ISD::STRICT_FEXP,
2422           ISD::FFLOOR, ISD::STRICT_FFLOOR,
2423           ISD::FREM,   ISD::STRICT_FREM,
2424           ISD::FLOG,   ISD::STRICT_FLOG,
2425           ISD::FLOG10, ISD::STRICT_FLOG10,
2426           ISD::FPOW,   ISD::STRICT_FPOW,
2427           ISD::FSIN,   ISD::STRICT_FSIN})
2428       if (isOperationExpand(Op, MVT::f32))
2429         setOperationAction(Op, MVT::f32, Promote);
2430 
2431   // We have target-specific dag combine patterns for the following nodes:
2432   setTargetDAGCombine({ISD::VECTOR_SHUFFLE,
2433                        ISD::SCALAR_TO_VECTOR,
2434                        ISD::INSERT_VECTOR_ELT,
2435                        ISD::EXTRACT_VECTOR_ELT,
2436                        ISD::CONCAT_VECTORS,
2437                        ISD::INSERT_SUBVECTOR,
2438                        ISD::EXTRACT_SUBVECTOR,
2439                        ISD::BITCAST,
2440                        ISD::VSELECT,
2441                        ISD::SELECT,
2442                        ISD::SHL,
2443                        ISD::SRA,
2444                        ISD::SRL,
2445                        ISD::OR,
2446                        ISD::AND,
2447                        ISD::BITREVERSE,
2448                        ISD::ADD,
2449                        ISD::FADD,
2450                        ISD::FSUB,
2451                        ISD::FNEG,
2452                        ISD::FMA,
2453                        ISD::STRICT_FMA,
2454                        ISD::FMINNUM,
2455                        ISD::FMAXNUM,
2456                        ISD::SUB,
2457                        ISD::LOAD,
2458                        ISD::MLOAD,
2459                        ISD::STORE,
2460                        ISD::MSTORE,
2461                        ISD::TRUNCATE,
2462                        ISD::ZERO_EXTEND,
2463                        ISD::ANY_EXTEND,
2464                        ISD::SIGN_EXTEND,
2465                        ISD::SIGN_EXTEND_INREG,
2466                        ISD::ANY_EXTEND_VECTOR_INREG,
2467                        ISD::SIGN_EXTEND_VECTOR_INREG,
2468                        ISD::ZERO_EXTEND_VECTOR_INREG,
2469                        ISD::SINT_TO_FP,
2470                        ISD::UINT_TO_FP,
2471                        ISD::STRICT_SINT_TO_FP,
2472                        ISD::STRICT_UINT_TO_FP,
2473                        ISD::SETCC,
2474                        ISD::MUL,
2475                        ISD::XOR,
2476                        ISD::MSCATTER,
2477                        ISD::MGATHER,
2478                        ISD::FP16_TO_FP,
2479                        ISD::FP_EXTEND,
2480                        ISD::STRICT_FP_EXTEND,
2481                        ISD::FP_ROUND,
2482                        ISD::STRICT_FP_ROUND});
2483 
2484   computeRegisterProperties(Subtarget.getRegisterInfo());
2485 
2486   MaxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores
2487   MaxStoresPerMemsetOptSize = 8;
2488   MaxStoresPerMemcpy = 8; // For @llvm.memcpy -> sequence of stores
2489   MaxStoresPerMemcpyOptSize = 4;
2490   MaxStoresPerMemmove = 8; // For @llvm.memmove -> sequence of stores
2491   MaxStoresPerMemmoveOptSize = 4;
2492 
2493   // TODO: These control memcmp expansion in CGP and could be raised higher, but
2494   // that needs to benchmarked and balanced with the potential use of vector
2495   // load/store types (PR33329, PR33914).
2496   MaxLoadsPerMemcmp = 2;
2497   MaxLoadsPerMemcmpOptSize = 2;
2498 
2499   // Default loop alignment, which can be overridden by -align-loops.
2500   setPrefLoopAlignment(Align(16));
2501 
2502   // An out-of-order CPU can speculatively execute past a predictable branch,
2503   // but a conditional move could be stalled by an expensive earlier operation.
2504   PredictableSelectIsExpensive = Subtarget.getSchedModel().isOutOfOrder();
2505   EnableExtLdPromotion = true;
2506   setPrefFunctionAlignment(Align(16));
2507 
2508   verifyIntrinsicTables();
2509 
2510   // Default to having -disable-strictnode-mutation on
2511   IsStrictFPEnabled = true;
2512 }
2513 
2514 // This has so far only been implemented for 64-bit MachO.
useLoadStackGuardNode() const2515 bool X86TargetLowering::useLoadStackGuardNode() const {
2516   return Subtarget.isTargetMachO() && Subtarget.is64Bit();
2517 }
2518 
useStackGuardXorFP() const2519 bool X86TargetLowering::useStackGuardXorFP() const {
2520   // Currently only MSVC CRTs XOR the frame pointer into the stack guard value.
2521   return Subtarget.getTargetTriple().isOSMSVCRT() && !Subtarget.isTargetMachO();
2522 }
2523 
emitStackGuardXorFP(SelectionDAG & DAG,SDValue Val,const SDLoc & DL) const2524 SDValue X86TargetLowering::emitStackGuardXorFP(SelectionDAG &DAG, SDValue Val,
2525                                                const SDLoc &DL) const {
2526   EVT PtrTy = getPointerTy(DAG.getDataLayout());
2527   unsigned XorOp = Subtarget.is64Bit() ? X86::XOR64_FP : X86::XOR32_FP;
2528   MachineSDNode *Node = DAG.getMachineNode(XorOp, DL, PtrTy, Val);
2529   return SDValue(Node, 0);
2530 }
2531 
2532 TargetLoweringBase::LegalizeTypeAction
getPreferredVectorAction(MVT VT) const2533 X86TargetLowering::getPreferredVectorAction(MVT VT) const {
2534   if ((VT == MVT::v32i1 || VT == MVT::v64i1) && Subtarget.hasAVX512() &&
2535       !Subtarget.hasBWI())
2536     return TypeSplitVector;
2537 
2538   if (!VT.isScalableVector() && VT.getVectorNumElements() != 1 &&
2539       !Subtarget.hasF16C() && VT.getVectorElementType() == MVT::f16)
2540     return TypeSplitVector;
2541 
2542   if (!VT.isScalableVector() && VT.getVectorNumElements() != 1 &&
2543       VT.getVectorElementType() != MVT::i1)
2544     return TypeWidenVector;
2545 
2546   return TargetLoweringBase::getPreferredVectorAction(VT);
2547 }
2548 
2549 FastISel *
createFastISel(FunctionLoweringInfo & funcInfo,const TargetLibraryInfo * libInfo) const2550 X86TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
2551                                   const TargetLibraryInfo *libInfo) const {
2552   return X86::createFastISel(funcInfo, libInfo);
2553 }
2554 
2555 //===----------------------------------------------------------------------===//
2556 //                           Other Lowering Hooks
2557 //===----------------------------------------------------------------------===//
2558 
mayFoldLoad(SDValue Op,const X86Subtarget & Subtarget,bool AssumeSingleUse)2559 bool X86::mayFoldLoad(SDValue Op, const X86Subtarget &Subtarget,
2560                       bool AssumeSingleUse) {
2561   if (!AssumeSingleUse && !Op.hasOneUse())
2562     return false;
2563   if (!ISD::isNormalLoad(Op.getNode()))
2564     return false;
2565 
2566   // If this is an unaligned vector, make sure the target supports folding it.
2567   auto *Ld = cast<LoadSDNode>(Op.getNode());
2568   if (!Subtarget.hasAVX() && !Subtarget.hasSSEUnalignedMem() &&
2569       Ld->getValueSizeInBits(0) == 128 && Ld->getAlign() < Align(16))
2570     return false;
2571 
2572   // TODO: If this is a non-temporal load and the target has an instruction
2573   //       for it, it should not be folded. See "useNonTemporalLoad()".
2574 
2575   return true;
2576 }
2577 
mayFoldLoadIntoBroadcastFromMem(SDValue Op,MVT EltVT,const X86Subtarget & Subtarget,bool AssumeSingleUse)2578 bool X86::mayFoldLoadIntoBroadcastFromMem(SDValue Op, MVT EltVT,
2579                                           const X86Subtarget &Subtarget,
2580                                           bool AssumeSingleUse) {
2581   assert(Subtarget.hasAVX() && "Expected AVX for broadcast from memory");
2582   if (!X86::mayFoldLoad(Op, Subtarget, AssumeSingleUse))
2583     return false;
2584 
2585   // We can not replace a wide volatile load with a broadcast-from-memory,
2586   // because that would narrow the load, which isn't legal for volatiles.
2587   auto *Ld = cast<LoadSDNode>(Op.getNode());
2588   return !Ld->isVolatile() ||
2589          Ld->getValueSizeInBits(0) == EltVT.getScalarSizeInBits();
2590 }
2591 
mayFoldIntoStore(SDValue Op)2592 bool X86::mayFoldIntoStore(SDValue Op) {
2593   return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->use_begin());
2594 }
2595 
mayFoldIntoZeroExtend(SDValue Op)2596 bool X86::mayFoldIntoZeroExtend(SDValue Op) {
2597   if (Op.hasOneUse()) {
2598     unsigned Opcode = Op.getNode()->use_begin()->getOpcode();
2599     return (ISD::ZERO_EXTEND == Opcode);
2600   }
2601   return false;
2602 }
2603 
isTargetShuffle(unsigned Opcode)2604 static bool isTargetShuffle(unsigned Opcode) {
2605   switch(Opcode) {
2606   default: return false;
2607   case X86ISD::BLENDI:
2608   case X86ISD::PSHUFB:
2609   case X86ISD::PSHUFD:
2610   case X86ISD::PSHUFHW:
2611   case X86ISD::PSHUFLW:
2612   case X86ISD::SHUFP:
2613   case X86ISD::INSERTPS:
2614   case X86ISD::EXTRQI:
2615   case X86ISD::INSERTQI:
2616   case X86ISD::VALIGN:
2617   case X86ISD::PALIGNR:
2618   case X86ISD::VSHLDQ:
2619   case X86ISD::VSRLDQ:
2620   case X86ISD::MOVLHPS:
2621   case X86ISD::MOVHLPS:
2622   case X86ISD::MOVSHDUP:
2623   case X86ISD::MOVSLDUP:
2624   case X86ISD::MOVDDUP:
2625   case X86ISD::MOVSS:
2626   case X86ISD::MOVSD:
2627   case X86ISD::MOVSH:
2628   case X86ISD::UNPCKL:
2629   case X86ISD::UNPCKH:
2630   case X86ISD::VBROADCAST:
2631   case X86ISD::VPERMILPI:
2632   case X86ISD::VPERMILPV:
2633   case X86ISD::VPERM2X128:
2634   case X86ISD::SHUF128:
2635   case X86ISD::VPERMIL2:
2636   case X86ISD::VPERMI:
2637   case X86ISD::VPPERM:
2638   case X86ISD::VPERMV:
2639   case X86ISD::VPERMV3:
2640   case X86ISD::VZEXT_MOVL:
2641     return true;
2642   }
2643 }
2644 
isTargetShuffleVariableMask(unsigned Opcode)2645 static bool isTargetShuffleVariableMask(unsigned Opcode) {
2646   switch (Opcode) {
2647   default: return false;
2648   // Target Shuffles.
2649   case X86ISD::PSHUFB:
2650   case X86ISD::VPERMILPV:
2651   case X86ISD::VPERMIL2:
2652   case X86ISD::VPPERM:
2653   case X86ISD::VPERMV:
2654   case X86ISD::VPERMV3:
2655     return true;
2656   // 'Faux' Target Shuffles.
2657   case ISD::OR:
2658   case ISD::AND:
2659   case X86ISD::ANDNP:
2660     return true;
2661   }
2662 }
2663 
getReturnAddressFrameIndex(SelectionDAG & DAG) const2664 SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {
2665   MachineFunction &MF = DAG.getMachineFunction();
2666   const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
2667   X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
2668   int ReturnAddrIndex = FuncInfo->getRAIndex();
2669 
2670   if (ReturnAddrIndex == 0) {
2671     // Set up a frame object for the return address.
2672     unsigned SlotSize = RegInfo->getSlotSize();
2673     ReturnAddrIndex = MF.getFrameInfo().CreateFixedObject(SlotSize,
2674                                                           -(int64_t)SlotSize,
2675                                                           false);
2676     FuncInfo->setRAIndex(ReturnAddrIndex);
2677   }
2678 
2679   return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy(DAG.getDataLayout()));
2680 }
2681 
isOffsetSuitableForCodeModel(int64_t Offset,CodeModel::Model CM,bool HasSymbolicDisplacement)2682 bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model CM,
2683                                        bool HasSymbolicDisplacement) {
2684   // Offset should fit into 32 bit immediate field.
2685   if (!isInt<32>(Offset))
2686     return false;
2687 
2688   // If we don't have a symbolic displacement - we don't have any extra
2689   // restrictions.
2690   if (!HasSymbolicDisplacement)
2691     return true;
2692 
2693   // We can fold large offsets in the large code model because we always use
2694   // 64-bit offsets.
2695   if (CM == CodeModel::Large)
2696     return true;
2697 
2698   // For kernel code model we know that all object resist in the negative half
2699   // of 32bits address space. We may not accept negative offsets, since they may
2700   // be just off and we may accept pretty large positive ones.
2701   if (CM == CodeModel::Kernel)
2702     return Offset >= 0;
2703 
2704   // For other non-large code models we assume that latest small object is 16MB
2705   // before end of 31 bits boundary. We may also accept pretty large negative
2706   // constants knowing that all objects are in the positive half of address
2707   // space.
2708   return Offset < 16 * 1024 * 1024;
2709 }
2710 
2711 /// Return true if the condition is an signed comparison operation.
isX86CCSigned(unsigned X86CC)2712 static bool isX86CCSigned(unsigned X86CC) {
2713   switch (X86CC) {
2714   default:
2715     llvm_unreachable("Invalid integer condition!");
2716   case X86::COND_E:
2717   case X86::COND_NE:
2718   case X86::COND_B:
2719   case X86::COND_A:
2720   case X86::COND_BE:
2721   case X86::COND_AE:
2722     return false;
2723   case X86::COND_G:
2724   case X86::COND_GE:
2725   case X86::COND_L:
2726   case X86::COND_LE:
2727     return true;
2728   }
2729 }
2730 
TranslateIntegerX86CC(ISD::CondCode SetCCOpcode)2731 static X86::CondCode TranslateIntegerX86CC(ISD::CondCode SetCCOpcode) {
2732   switch (SetCCOpcode) {
2733   default: llvm_unreachable("Invalid integer condition!");
2734   case ISD::SETEQ:  return X86::COND_E;
2735   case ISD::SETGT:  return X86::COND_G;
2736   case ISD::SETGE:  return X86::COND_GE;
2737   case ISD::SETLT:  return X86::COND_L;
2738   case ISD::SETLE:  return X86::COND_LE;
2739   case ISD::SETNE:  return X86::COND_NE;
2740   case ISD::SETULT: return X86::COND_B;
2741   case ISD::SETUGT: return X86::COND_A;
2742   case ISD::SETULE: return X86::COND_BE;
2743   case ISD::SETUGE: return X86::COND_AE;
2744   }
2745 }
2746 
2747 /// Do a one-to-one translation of a ISD::CondCode to the X86-specific
2748 /// condition code, returning the condition code and the LHS/RHS of the
2749 /// comparison to make.
TranslateX86CC(ISD::CondCode SetCCOpcode,const SDLoc & DL,bool isFP,SDValue & LHS,SDValue & RHS,SelectionDAG & DAG)2750 static X86::CondCode TranslateX86CC(ISD::CondCode SetCCOpcode, const SDLoc &DL,
2751                                     bool isFP, SDValue &LHS, SDValue &RHS,
2752                                     SelectionDAG &DAG) {
2753   if (!isFP) {
2754     if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
2755       if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnes()) {
2756         // X > -1   -> X == 0, jump !sign.
2757         RHS = DAG.getConstant(0, DL, RHS.getValueType());
2758         return X86::COND_NS;
2759       }
2760       if (SetCCOpcode == ISD::SETLT && RHSC->isZero()) {
2761         // X < 0   -> X == 0, jump on sign.
2762         return X86::COND_S;
2763       }
2764       if (SetCCOpcode == ISD::SETGE && RHSC->isZero()) {
2765         // X >= 0   -> X == 0, jump on !sign.
2766         return X86::COND_NS;
2767       }
2768       if (SetCCOpcode == ISD::SETLT && RHSC->isOne()) {
2769         // X < 1   -> X <= 0
2770         RHS = DAG.getConstant(0, DL, RHS.getValueType());
2771         return X86::COND_LE;
2772       }
2773     }
2774 
2775     return TranslateIntegerX86CC(SetCCOpcode);
2776   }
2777 
2778   // First determine if it is required or is profitable to flip the operands.
2779 
2780   // If LHS is a foldable load, but RHS is not, flip the condition.
2781   if (ISD::isNON_EXTLoad(LHS.getNode()) &&
2782       !ISD::isNON_EXTLoad(RHS.getNode())) {
2783     SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode);
2784     std::swap(LHS, RHS);
2785   }
2786 
2787   switch (SetCCOpcode) {
2788   default: break;
2789   case ISD::SETOLT:
2790   case ISD::SETOLE:
2791   case ISD::SETUGT:
2792   case ISD::SETUGE:
2793     std::swap(LHS, RHS);
2794     break;
2795   }
2796 
2797   // On a floating point condition, the flags are set as follows:
2798   // ZF  PF  CF   op
2799   //  0 | 0 | 0 | X > Y
2800   //  0 | 0 | 1 | X < Y
2801   //  1 | 0 | 0 | X == Y
2802   //  1 | 1 | 1 | unordered
2803   switch (SetCCOpcode) {
2804   default: llvm_unreachable("Condcode should be pre-legalized away");
2805   case ISD::SETUEQ:
2806   case ISD::SETEQ:   return X86::COND_E;
2807   case ISD::SETOLT:              // flipped
2808   case ISD::SETOGT:
2809   case ISD::SETGT:   return X86::COND_A;
2810   case ISD::SETOLE:              // flipped
2811   case ISD::SETOGE:
2812   case ISD::SETGE:   return X86::COND_AE;
2813   case ISD::SETUGT:              // flipped
2814   case ISD::SETULT:
2815   case ISD::SETLT:   return X86::COND_B;
2816   case ISD::SETUGE:              // flipped
2817   case ISD::SETULE:
2818   case ISD::SETLE:   return X86::COND_BE;
2819   case ISD::SETONE:
2820   case ISD::SETNE:   return X86::COND_NE;
2821   case ISD::SETUO:   return X86::COND_P;
2822   case ISD::SETO:    return X86::COND_NP;
2823   case ISD::SETOEQ:
2824   case ISD::SETUNE:  return X86::COND_INVALID;
2825   }
2826 }
2827 
2828 /// Is there a floating point cmov for the specific X86 condition code?
2829 /// Current x86 isa includes the following FP cmov instructions:
2830 /// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu.
hasFPCMov(unsigned X86CC)2831 static bool hasFPCMov(unsigned X86CC) {
2832   switch (X86CC) {
2833   default:
2834     return false;
2835   case X86::COND_B:
2836   case X86::COND_BE:
2837   case X86::COND_E:
2838   case X86::COND_P:
2839   case X86::COND_A:
2840   case X86::COND_AE:
2841   case X86::COND_NE:
2842   case X86::COND_NP:
2843     return true;
2844   }
2845 }
2846 
useVPTERNLOG(const X86Subtarget & Subtarget,MVT VT)2847 static bool useVPTERNLOG(const X86Subtarget &Subtarget, MVT VT) {
2848   return Subtarget.hasVLX() || Subtarget.canExtendTo512DQ() ||
2849          VT.is512BitVector();
2850 }
2851 
getTgtMemIntrinsic(IntrinsicInfo & Info,const CallInst & I,MachineFunction & MF,unsigned Intrinsic) const2852 bool X86TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
2853                                            const CallInst &I,
2854                                            MachineFunction &MF,
2855                                            unsigned Intrinsic) const {
2856   Info.flags = MachineMemOperand::MONone;
2857   Info.offset = 0;
2858 
2859   const IntrinsicData* IntrData = getIntrinsicWithChain(Intrinsic);
2860   if (!IntrData) {
2861     switch (Intrinsic) {
2862     case Intrinsic::x86_aesenc128kl:
2863     case Intrinsic::x86_aesdec128kl:
2864       Info.opc = ISD::INTRINSIC_W_CHAIN;
2865       Info.ptrVal = I.getArgOperand(1);
2866       Info.memVT = EVT::getIntegerVT(I.getType()->getContext(), 48);
2867       Info.align = Align(1);
2868       Info.flags |= MachineMemOperand::MOLoad;
2869       return true;
2870     case Intrinsic::x86_aesenc256kl:
2871     case Intrinsic::x86_aesdec256kl:
2872       Info.opc = ISD::INTRINSIC_W_CHAIN;
2873       Info.ptrVal = I.getArgOperand(1);
2874       Info.memVT = EVT::getIntegerVT(I.getType()->getContext(), 64);
2875       Info.align = Align(1);
2876       Info.flags |= MachineMemOperand::MOLoad;
2877       return true;
2878     case Intrinsic::x86_aesencwide128kl:
2879     case Intrinsic::x86_aesdecwide128kl:
2880       Info.opc = ISD::INTRINSIC_W_CHAIN;
2881       Info.ptrVal = I.getArgOperand(0);
2882       Info.memVT = EVT::getIntegerVT(I.getType()->getContext(), 48);
2883       Info.align = Align(1);
2884       Info.flags |= MachineMemOperand::MOLoad;
2885       return true;
2886     case Intrinsic::x86_aesencwide256kl:
2887     case Intrinsic::x86_aesdecwide256kl:
2888       Info.opc = ISD::INTRINSIC_W_CHAIN;
2889       Info.ptrVal = I.getArgOperand(0);
2890       Info.memVT = EVT::getIntegerVT(I.getType()->getContext(), 64);
2891       Info.align = Align(1);
2892       Info.flags |= MachineMemOperand::MOLoad;
2893       return true;
2894     case Intrinsic::x86_cmpccxadd32:
2895     case Intrinsic::x86_cmpccxadd64:
2896     case Intrinsic::x86_atomic_bts:
2897     case Intrinsic::x86_atomic_btc:
2898     case Intrinsic::x86_atomic_btr: {
2899       Info.opc = ISD::INTRINSIC_W_CHAIN;
2900       Info.ptrVal = I.getArgOperand(0);
2901       unsigned Size = I.getType()->getScalarSizeInBits();
2902       Info.memVT = EVT::getIntegerVT(I.getType()->getContext(), Size);
2903       Info.align = Align(Size);
2904       Info.flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
2905                     MachineMemOperand::MOVolatile;
2906       return true;
2907     }
2908     case Intrinsic::x86_atomic_bts_rm:
2909     case Intrinsic::x86_atomic_btc_rm:
2910     case Intrinsic::x86_atomic_btr_rm: {
2911       Info.opc = ISD::INTRINSIC_W_CHAIN;
2912       Info.ptrVal = I.getArgOperand(0);
2913       unsigned Size = I.getArgOperand(1)->getType()->getScalarSizeInBits();
2914       Info.memVT = EVT::getIntegerVT(I.getType()->getContext(), Size);
2915       Info.align = Align(Size);
2916       Info.flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
2917                     MachineMemOperand::MOVolatile;
2918       return true;
2919     }
2920     case Intrinsic::x86_aadd32:
2921     case Intrinsic::x86_aadd64:
2922     case Intrinsic::x86_aand32:
2923     case Intrinsic::x86_aand64:
2924     case Intrinsic::x86_aor32:
2925     case Intrinsic::x86_aor64:
2926     case Intrinsic::x86_axor32:
2927     case Intrinsic::x86_axor64:
2928     case Intrinsic::x86_atomic_add_cc:
2929     case Intrinsic::x86_atomic_sub_cc:
2930     case Intrinsic::x86_atomic_or_cc:
2931     case Intrinsic::x86_atomic_and_cc:
2932     case Intrinsic::x86_atomic_xor_cc: {
2933       Info.opc = ISD::INTRINSIC_W_CHAIN;
2934       Info.ptrVal = I.getArgOperand(0);
2935       unsigned Size = I.getArgOperand(1)->getType()->getScalarSizeInBits();
2936       Info.memVT = EVT::getIntegerVT(I.getType()->getContext(), Size);
2937       Info.align = Align(Size);
2938       Info.flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
2939                     MachineMemOperand::MOVolatile;
2940       return true;
2941     }
2942     }
2943     return false;
2944   }
2945 
2946   switch (IntrData->Type) {
2947   case TRUNCATE_TO_MEM_VI8:
2948   case TRUNCATE_TO_MEM_VI16:
2949   case TRUNCATE_TO_MEM_VI32: {
2950     Info.opc = ISD::INTRINSIC_VOID;
2951     Info.ptrVal = I.getArgOperand(0);
2952     MVT VT  = MVT::getVT(I.getArgOperand(1)->getType());
2953     MVT ScalarVT = MVT::INVALID_SIMPLE_VALUE_TYPE;
2954     if (IntrData->Type == TRUNCATE_TO_MEM_VI8)
2955       ScalarVT = MVT::i8;
2956     else if (IntrData->Type == TRUNCATE_TO_MEM_VI16)
2957       ScalarVT = MVT::i16;
2958     else if (IntrData->Type == TRUNCATE_TO_MEM_VI32)
2959       ScalarVT = MVT::i32;
2960 
2961     Info.memVT = MVT::getVectorVT(ScalarVT, VT.getVectorNumElements());
2962     Info.align = Align(1);
2963     Info.flags |= MachineMemOperand::MOStore;
2964     break;
2965   }
2966   case GATHER:
2967   case GATHER_AVX2: {
2968     Info.opc = ISD::INTRINSIC_W_CHAIN;
2969     Info.ptrVal = nullptr;
2970     MVT DataVT = MVT::getVT(I.getType());
2971     MVT IndexVT = MVT::getVT(I.getArgOperand(2)->getType());
2972     unsigned NumElts = std::min(DataVT.getVectorNumElements(),
2973                                 IndexVT.getVectorNumElements());
2974     Info.memVT = MVT::getVectorVT(DataVT.getVectorElementType(), NumElts);
2975     Info.align = Align(1);
2976     Info.flags |= MachineMemOperand::MOLoad;
2977     break;
2978   }
2979   case SCATTER: {
2980     Info.opc = ISD::INTRINSIC_VOID;
2981     Info.ptrVal = nullptr;
2982     MVT DataVT = MVT::getVT(I.getArgOperand(3)->getType());
2983     MVT IndexVT = MVT::getVT(I.getArgOperand(2)->getType());
2984     unsigned NumElts = std::min(DataVT.getVectorNumElements(),
2985                                 IndexVT.getVectorNumElements());
2986     Info.memVT = MVT::getVectorVT(DataVT.getVectorElementType(), NumElts);
2987     Info.align = Align(1);
2988     Info.flags |= MachineMemOperand::MOStore;
2989     break;
2990   }
2991   default:
2992     return false;
2993   }
2994 
2995   return true;
2996 }
2997 
2998 /// Returns true if the target can instruction select the
2999 /// specified FP immediate natively. If false, the legalizer will
3000 /// materialize the FP immediate as a load from a constant pool.
isFPImmLegal(const APFloat & Imm,EVT VT,bool ForCodeSize) const3001 bool X86TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
3002                                      bool ForCodeSize) const {
3003   for (const APFloat &FPImm : LegalFPImmediates)
3004     if (Imm.bitwiseIsEqual(FPImm))
3005       return true;
3006   return false;
3007 }
3008 
shouldReduceLoadWidth(SDNode * Load,ISD::LoadExtType ExtTy,EVT NewVT) const3009 bool X86TargetLowering::shouldReduceLoadWidth(SDNode *Load,
3010                                               ISD::LoadExtType ExtTy,
3011                                               EVT NewVT) const {
3012   assert(cast<LoadSDNode>(Load)->isSimple() && "illegal to narrow");
3013 
3014   // "ELF Handling for Thread-Local Storage" specifies that R_X86_64_GOTTPOFF
3015   // relocation target a movq or addq instruction: don't let the load shrink.
3016   SDValue BasePtr = cast<LoadSDNode>(Load)->getBasePtr();
3017   if (BasePtr.getOpcode() == X86ISD::WrapperRIP)
3018     if (const auto *GA = dyn_cast<GlobalAddressSDNode>(BasePtr.getOperand(0)))
3019       return GA->getTargetFlags() != X86II::MO_GOTTPOFF;
3020 
3021   // If this is an (1) AVX vector load with (2) multiple uses and (3) all of
3022   // those uses are extracted directly into a store, then the extract + store
3023   // can be store-folded. Therefore, it's probably not worth splitting the load.
3024   EVT VT = Load->getValueType(0);
3025   if ((VT.is256BitVector() || VT.is512BitVector()) && !Load->hasOneUse()) {
3026     for (auto UI = Load->use_begin(), UE = Load->use_end(); UI != UE; ++UI) {
3027       // Skip uses of the chain value. Result 0 of the node is the load value.
3028       if (UI.getUse().getResNo() != 0)
3029         continue;
3030 
3031       // If this use is not an extract + store, it's probably worth splitting.
3032       if (UI->getOpcode() != ISD::EXTRACT_SUBVECTOR || !UI->hasOneUse() ||
3033           UI->use_begin()->getOpcode() != ISD::STORE)
3034         return true;
3035     }
3036     // All non-chain uses are extract + store.
3037     return false;
3038   }
3039 
3040   return true;
3041 }
3042 
3043 /// Returns true if it is beneficial to convert a load of a constant
3044 /// to just the constant itself.
shouldConvertConstantLoadToIntImm(const APInt & Imm,Type * Ty) const3045 bool X86TargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
3046                                                           Type *Ty) const {
3047   assert(Ty->isIntegerTy());
3048 
3049   unsigned BitSize = Ty->getPrimitiveSizeInBits();
3050   if (BitSize == 0 || BitSize > 64)
3051     return false;
3052   return true;
3053 }
3054 
reduceSelectOfFPConstantLoads(EVT CmpOpVT) const3055 bool X86TargetLowering::reduceSelectOfFPConstantLoads(EVT CmpOpVT) const {
3056   // If we are using XMM registers in the ABI and the condition of the select is
3057   // a floating-point compare and we have blendv or conditional move, then it is
3058   // cheaper to select instead of doing a cross-register move and creating a
3059   // load that depends on the compare result.
3060   bool IsFPSetCC = CmpOpVT.isFloatingPoint() && CmpOpVT != MVT::f128;
3061   return !IsFPSetCC || !Subtarget.isTarget64BitLP64() || !Subtarget.hasAVX();
3062 }
3063 
convertSelectOfConstantsToMath(EVT VT) const3064 bool X86TargetLowering::convertSelectOfConstantsToMath(EVT VT) const {
3065   // TODO: It might be a win to ease or lift this restriction, but the generic
3066   // folds in DAGCombiner conflict with vector folds for an AVX512 target.
3067   if (VT.isVector() && Subtarget.hasAVX512())
3068     return false;
3069 
3070   return true;
3071 }
3072 
decomposeMulByConstant(LLVMContext & Context,EVT VT,SDValue C) const3073 bool X86TargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
3074                                                SDValue C) const {
3075   // TODO: We handle scalars using custom code, but generic combining could make
3076   // that unnecessary.
3077   APInt MulC;
3078   if (!ISD::isConstantSplatVector(C.getNode(), MulC))
3079     return false;
3080 
3081   // Find the type this will be legalized too. Otherwise we might prematurely
3082   // convert this to shl+add/sub and then still have to type legalize those ops.
3083   // Another choice would be to defer the decision for illegal types until
3084   // after type legalization. But constant splat vectors of i64 can't make it
3085   // through type legalization on 32-bit targets so we would need to special
3086   // case vXi64.
3087   while (getTypeAction(Context, VT) != TypeLegal)
3088     VT = getTypeToTransformTo(Context, VT);
3089 
3090   // If vector multiply is legal, assume that's faster than shl + add/sub.
3091   // Multiply is a complex op with higher latency and lower throughput in
3092   // most implementations, sub-vXi32 vector multiplies are always fast,
3093   // vXi32 mustn't have a SlowMULLD implementation, and anything larger (vXi64)
3094   // is always going to be slow.
3095   unsigned EltSizeInBits = VT.getScalarSizeInBits();
3096   if (isOperationLegal(ISD::MUL, VT) && EltSizeInBits <= 32 &&
3097       (EltSizeInBits != 32 || !Subtarget.isPMULLDSlow()))
3098     return false;
3099 
3100   // shl+add, shl+sub, shl+add+neg
3101   return (MulC + 1).isPowerOf2() || (MulC - 1).isPowerOf2() ||
3102          (1 - MulC).isPowerOf2() || (-(MulC + 1)).isPowerOf2();
3103 }
3104 
isExtractSubvectorCheap(EVT ResVT,EVT SrcVT,unsigned Index) const3105 bool X86TargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
3106                                                 unsigned Index) const {
3107   if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT))
3108     return false;
3109 
3110   // Mask vectors support all subregister combinations and operations that
3111   // extract half of vector.
3112   if (ResVT.getVectorElementType() == MVT::i1)
3113     return Index == 0 || ((ResVT.getSizeInBits() == SrcVT.getSizeInBits()*2) &&
3114                           (Index == ResVT.getVectorNumElements()));
3115 
3116   return (Index % ResVT.getVectorNumElements()) == 0;
3117 }
3118 
shouldScalarizeBinop(SDValue VecOp) const3119 bool X86TargetLowering::shouldScalarizeBinop(SDValue VecOp) const {
3120   unsigned Opc = VecOp.getOpcode();
3121 
3122   // Assume target opcodes can't be scalarized.
3123   // TODO - do we have any exceptions?
3124   if (Opc >= ISD::BUILTIN_OP_END)
3125     return false;
3126 
3127   // If the vector op is not supported, try to convert to scalar.
3128   EVT VecVT = VecOp.getValueType();
3129   if (!isOperationLegalOrCustomOrPromote(Opc, VecVT))
3130     return true;
3131 
3132   // If the vector op is supported, but the scalar op is not, the transform may
3133   // not be worthwhile.
3134   EVT ScalarVT = VecVT.getScalarType();
3135   return isOperationLegalOrCustomOrPromote(Opc, ScalarVT);
3136 }
3137 
shouldFormOverflowOp(unsigned Opcode,EVT VT,bool) const3138 bool X86TargetLowering::shouldFormOverflowOp(unsigned Opcode, EVT VT,
3139                                              bool) const {
3140   // TODO: Allow vectors?
3141   if (VT.isVector())
3142     return false;
3143   return VT.isSimple() || !isOperationExpand(Opcode, VT);
3144 }
3145 
isCheapToSpeculateCttz(Type * Ty) const3146 bool X86TargetLowering::isCheapToSpeculateCttz(Type *Ty) const {
3147   // Speculate cttz only if we can directly use TZCNT or can promote to i32.
3148   return Subtarget.hasBMI() ||
3149          (!Ty->isVectorTy() && Ty->getScalarSizeInBits() < 32);
3150 }
3151 
isCheapToSpeculateCtlz(Type * Ty) const3152 bool X86TargetLowering::isCheapToSpeculateCtlz(Type *Ty) const {
3153   // Speculate ctlz only if we can directly use LZCNT.
3154   return Subtarget.hasLZCNT();
3155 }
3156 
ShouldShrinkFPConstant(EVT VT) const3157 bool X86TargetLowering::ShouldShrinkFPConstant(EVT VT) const {
3158   // Don't shrink FP constpool if SSE2 is available since cvtss2sd is more
3159   // expensive than a straight movsd. On the other hand, it's important to
3160   // shrink long double fp constant since fldt is very slow.
3161   return !Subtarget.hasSSE2() || VT == MVT::f80;
3162 }
3163 
isScalarFPTypeInSSEReg(EVT VT) const3164 bool X86TargetLowering::isScalarFPTypeInSSEReg(EVT VT) const {
3165   return (VT == MVT::f64 && Subtarget.hasSSE2()) ||
3166          (VT == MVT::f32 && Subtarget.hasSSE1()) || VT == MVT::f16;
3167 }
3168 
isLoadBitCastBeneficial(EVT LoadVT,EVT BitcastVT,const SelectionDAG & DAG,const MachineMemOperand & MMO) const3169 bool X86TargetLowering::isLoadBitCastBeneficial(EVT LoadVT, EVT BitcastVT,
3170                                                 const SelectionDAG &DAG,
3171                                                 const MachineMemOperand &MMO) const {
3172   if (!Subtarget.hasAVX512() && !LoadVT.isVector() && BitcastVT.isVector() &&
3173       BitcastVT.getVectorElementType() == MVT::i1)
3174     return false;
3175 
3176   if (!Subtarget.hasDQI() && BitcastVT == MVT::v8i1 && LoadVT == MVT::i8)
3177     return false;
3178 
3179   // If both types are legal vectors, it's always ok to convert them.
3180   if (LoadVT.isVector() && BitcastVT.isVector() &&
3181       isTypeLegal(LoadVT) && isTypeLegal(BitcastVT))
3182     return true;
3183 
3184   return TargetLowering::isLoadBitCastBeneficial(LoadVT, BitcastVT, DAG, MMO);
3185 }
3186 
canMergeStoresTo(unsigned AddressSpace,EVT MemVT,const MachineFunction & MF) const3187 bool X86TargetLowering::canMergeStoresTo(unsigned AddressSpace, EVT MemVT,
3188                                          const MachineFunction &MF) const {
3189   // Do not merge to float value size (128 bytes) if no implicit
3190   // float attribute is set.
3191   bool NoFloat = MF.getFunction().hasFnAttribute(Attribute::NoImplicitFloat);
3192 
3193   if (NoFloat) {
3194     unsigned MaxIntSize = Subtarget.is64Bit() ? 64 : 32;
3195     return (MemVT.getSizeInBits() <= MaxIntSize);
3196   }
3197   // Make sure we don't merge greater than our preferred vector
3198   // width.
3199   if (MemVT.getSizeInBits() > Subtarget.getPreferVectorWidth())
3200     return false;
3201 
3202   return true;
3203 }
3204 
isCtlzFast() const3205 bool X86TargetLowering::isCtlzFast() const {
3206   return Subtarget.hasFastLZCNT();
3207 }
3208 
isMaskAndCmp0FoldingBeneficial(const Instruction & AndI) const3209 bool X86TargetLowering::isMaskAndCmp0FoldingBeneficial(
3210     const Instruction &AndI) const {
3211   return true;
3212 }
3213 
hasAndNotCompare(SDValue Y) const3214 bool X86TargetLowering::hasAndNotCompare(SDValue Y) const {
3215   EVT VT = Y.getValueType();
3216 
3217   if (VT.isVector())
3218     return false;
3219 
3220   if (!Subtarget.hasBMI())
3221     return false;
3222 
3223   // There are only 32-bit and 64-bit forms for 'andn'.
3224   if (VT != MVT::i32 && VT != MVT::i64)
3225     return false;
3226 
3227   return !isa<ConstantSDNode>(Y);
3228 }
3229 
hasAndNot(SDValue Y) const3230 bool X86TargetLowering::hasAndNot(SDValue Y) const {
3231   EVT VT = Y.getValueType();
3232 
3233   if (!VT.isVector())
3234     return hasAndNotCompare(Y);
3235 
3236   // Vector.
3237 
3238   if (!Subtarget.hasSSE1() || VT.getSizeInBits() < 128)
3239     return false;
3240 
3241   if (VT == MVT::v4i32)
3242     return true;
3243 
3244   return Subtarget.hasSSE2();
3245 }
3246 
hasBitTest(SDValue X,SDValue Y) const3247 bool X86TargetLowering::hasBitTest(SDValue X, SDValue Y) const {
3248   return X.getValueType().isScalarInteger(); // 'bt'
3249 }
3250 
3251 bool X86TargetLowering::
shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(SDValue X,ConstantSDNode * XC,ConstantSDNode * CC,SDValue Y,unsigned OldShiftOpcode,unsigned NewShiftOpcode,SelectionDAG & DAG) const3252     shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
3253         SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y,
3254         unsigned OldShiftOpcode, unsigned NewShiftOpcode,
3255         SelectionDAG &DAG) const {
3256   // Does baseline recommend not to perform the fold by default?
3257   if (!TargetLowering::shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
3258           X, XC, CC, Y, OldShiftOpcode, NewShiftOpcode, DAG))
3259     return false;
3260   // For scalars this transform is always beneficial.
3261   if (X.getValueType().isScalarInteger())
3262     return true;
3263   // If all the shift amounts are identical, then transform is beneficial even
3264   // with rudimentary SSE2 shifts.
3265   if (DAG.isSplatValue(Y, /*AllowUndefs=*/true))
3266     return true;
3267   // If we have AVX2 with it's powerful shift operations, then it's also good.
3268   if (Subtarget.hasAVX2())
3269     return true;
3270   // Pre-AVX2 vector codegen for this pattern is best for variant with 'shl'.
3271   return NewShiftOpcode == ISD::SHL;
3272 }
3273 
preferedOpcodeForCmpEqPiecesOfOperand(EVT VT,unsigned ShiftOpc,bool MayTransformRotate,const APInt & ShiftOrRotateAmt,const std::optional<APInt> & AndMask) const3274 unsigned X86TargetLowering::preferedOpcodeForCmpEqPiecesOfOperand(
3275     EVT VT, unsigned ShiftOpc, bool MayTransformRotate,
3276     const APInt &ShiftOrRotateAmt, const std::optional<APInt> &AndMask) const {
3277   if (!VT.isInteger())
3278     return ShiftOpc;
3279 
3280   bool PreferRotate = false;
3281   if (VT.isVector()) {
3282     // For vectors, if we have rotate instruction support, then its definetly
3283     // best. Otherwise its not clear what the best so just don't make changed.
3284     PreferRotate = Subtarget.hasAVX512() && (VT.getScalarType() == MVT::i32 ||
3285                                              VT.getScalarType() == MVT::i64);
3286   } else {
3287     // For scalar, if we have bmi prefer rotate for rorx. Otherwise prefer
3288     // rotate unless we have a zext mask+shr.
3289     PreferRotate = Subtarget.hasBMI2();
3290     if (!PreferRotate) {
3291       unsigned MaskBits =
3292           VT.getScalarSizeInBits() - ShiftOrRotateAmt.getZExtValue();
3293       PreferRotate = (MaskBits != 8) && (MaskBits != 16) && (MaskBits != 32);
3294     }
3295   }
3296 
3297   if (ShiftOpc == ISD::SHL || ShiftOpc == ISD::SRL) {
3298     assert(AndMask.has_value() && "Null andmask when querying about shift+and");
3299 
3300     if (PreferRotate && MayTransformRotate)
3301       return ISD::ROTL;
3302 
3303     // If vector we don't really get much benefit swapping around constants.
3304     // Maybe we could check if the DAG has the flipped node already in the
3305     // future.
3306     if (VT.isVector())
3307       return ShiftOpc;
3308 
3309     // See if the beneficial to swap shift type.
3310     if (ShiftOpc == ISD::SHL) {
3311       // If the current setup has imm64 mask, then inverse will have
3312       // at least imm32 mask (or be zext i32 -> i64).
3313       if (VT == MVT::i64)
3314         return AndMask->getSignificantBits() > 32 ? (unsigned)ISD::SRL
3315                                                   : ShiftOpc;
3316 
3317       // We can only benefit if req at least 7-bit for the mask. We
3318       // don't want to replace shl of 1,2,3 as they can be implemented
3319       // with lea/add.
3320       return ShiftOrRotateAmt.uge(7) ? (unsigned)ISD::SRL : ShiftOpc;
3321     }
3322 
3323     if (VT == MVT::i64)
3324       // Keep exactly 32-bit imm64, this is zext i32 -> i64 which is
3325       // extremely efficient.
3326       return AndMask->getSignificantBits() > 33 ? (unsigned)ISD::SHL : ShiftOpc;
3327 
3328     // Keep small shifts as shl so we can generate add/lea.
3329     return ShiftOrRotateAmt.ult(7) ? (unsigned)ISD::SHL : ShiftOpc;
3330   }
3331 
3332   // We prefer rotate for vectors of if we won't get a zext mask with SRL
3333   // (PreferRotate will be set in the latter case).
3334   if (PreferRotate || VT.isVector())
3335     return ShiftOpc;
3336 
3337   // Non-vector type and we have a zext mask with SRL.
3338   return ISD::SRL;
3339 }
3340 
preferScalarizeSplat(SDNode * N) const3341 bool X86TargetLowering::preferScalarizeSplat(SDNode *N) const {
3342   return N->getOpcode() != ISD::FP_EXTEND;
3343 }
3344 
shouldFoldConstantShiftPairToMask(const SDNode * N,CombineLevel Level) const3345 bool X86TargetLowering::shouldFoldConstantShiftPairToMask(
3346     const SDNode *N, CombineLevel Level) const {
3347   assert(((N->getOpcode() == ISD::SHL &&
3348            N->getOperand(0).getOpcode() == ISD::SRL) ||
3349           (N->getOpcode() == ISD::SRL &&
3350            N->getOperand(0).getOpcode() == ISD::SHL)) &&
3351          "Expected shift-shift mask");
3352   // TODO: Should we always create i64 masks? Or only folded immediates?
3353   EVT VT = N->getValueType(0);
3354   if ((Subtarget.hasFastVectorShiftMasks() && VT.isVector()) ||
3355       (Subtarget.hasFastScalarShiftMasks() && !VT.isVector())) {
3356     // Only fold if the shift values are equal - so it folds to AND.
3357     // TODO - we should fold if either is a non-uniform vector but we don't do
3358     // the fold for non-splats yet.
3359     return N->getOperand(1) == N->getOperand(0).getOperand(1);
3360   }
3361   return TargetLoweringBase::shouldFoldConstantShiftPairToMask(N, Level);
3362 }
3363 
shouldFoldMaskToVariableShiftPair(SDValue Y) const3364 bool X86TargetLowering::shouldFoldMaskToVariableShiftPair(SDValue Y) const {
3365   EVT VT = Y.getValueType();
3366 
3367   // For vectors, we don't have a preference, but we probably want a mask.
3368   if (VT.isVector())
3369     return false;
3370 
3371   // 64-bit shifts on 32-bit targets produce really bad bloated code.
3372   if (VT == MVT::i64 && !Subtarget.is64Bit())
3373     return false;
3374 
3375   return true;
3376 }
3377 
3378 TargetLowering::ShiftLegalizationStrategy
preferredShiftLegalizationStrategy(SelectionDAG & DAG,SDNode * N,unsigned ExpansionFactor) const3379 X86TargetLowering::preferredShiftLegalizationStrategy(
3380     SelectionDAG &DAG, SDNode *N, unsigned ExpansionFactor) const {
3381   if (DAG.getMachineFunction().getFunction().hasMinSize() &&
3382       !Subtarget.isOSWindows())
3383     return ShiftLegalizationStrategy::LowerToLibcall;
3384   return TargetLowering::preferredShiftLegalizationStrategy(DAG, N,
3385                                                             ExpansionFactor);
3386 }
3387 
shouldSplatInsEltVarIndex(EVT VT) const3388 bool X86TargetLowering::shouldSplatInsEltVarIndex(EVT VT) const {
3389   // Any legal vector type can be splatted more efficiently than
3390   // loading/spilling from memory.
3391   return isTypeLegal(VT);
3392 }
3393 
hasFastEqualityCompare(unsigned NumBits) const3394 MVT X86TargetLowering::hasFastEqualityCompare(unsigned NumBits) const {
3395   MVT VT = MVT::getIntegerVT(NumBits);
3396   if (isTypeLegal(VT))
3397     return VT;
3398 
3399   // PMOVMSKB can handle this.
3400   if (NumBits == 128 && isTypeLegal(MVT::v16i8))
3401     return MVT::v16i8;
3402 
3403   // VPMOVMSKB can handle this.
3404   if (NumBits == 256 && isTypeLegal(MVT::v32i8))
3405     return MVT::v32i8;
3406 
3407   // TODO: Allow 64-bit type for 32-bit target.
3408   // TODO: 512-bit types should be allowed, but make sure that those
3409   // cases are handled in combineVectorSizedSetCCEquality().
3410 
3411   return MVT::INVALID_SIMPLE_VALUE_TYPE;
3412 }
3413 
3414 /// Val is the undef sentinel value or equal to the specified value.
isUndefOrEqual(int Val,int CmpVal)3415 static bool isUndefOrEqual(int Val, int CmpVal) {
3416   return ((Val == SM_SentinelUndef) || (Val == CmpVal));
3417 }
3418 
3419 /// Return true if every element in Mask is the undef sentinel value or equal to
3420 /// the specified value.
isUndefOrEqual(ArrayRef<int> Mask,int CmpVal)3421 static bool isUndefOrEqual(ArrayRef<int> Mask, int CmpVal) {
3422   return llvm::all_of(Mask, [CmpVal](int M) {
3423     return (M == SM_SentinelUndef) || (M == CmpVal);
3424   });
3425 }
3426 
3427 /// Return true if every element in Mask, beginning from position Pos and ending
3428 /// in Pos+Size is the undef sentinel value or equal to the specified value.
isUndefOrEqualInRange(ArrayRef<int> Mask,int CmpVal,unsigned Pos,unsigned Size)3429 static bool isUndefOrEqualInRange(ArrayRef<int> Mask, int CmpVal, unsigned Pos,
3430                                   unsigned Size) {
3431   return llvm::all_of(Mask.slice(Pos, Size),
3432                       [CmpVal](int M) { return isUndefOrEqual(M, CmpVal); });
3433 }
3434 
3435 /// Val is either the undef or zero sentinel value.
isUndefOrZero(int Val)3436 static bool isUndefOrZero(int Val) {
3437   return ((Val == SM_SentinelUndef) || (Val == SM_SentinelZero));
3438 }
3439 
3440 /// Return true if every element in Mask, beginning from position Pos and ending
3441 /// in Pos+Size is the undef sentinel value.
isUndefInRange(ArrayRef<int> Mask,unsigned Pos,unsigned Size)3442 static bool isUndefInRange(ArrayRef<int> Mask, unsigned Pos, unsigned Size) {
3443   return llvm::all_of(Mask.slice(Pos, Size),
3444                       [](int M) { return M == SM_SentinelUndef; });
3445 }
3446 
3447 /// Return true if the mask creates a vector whose lower half is undefined.
isUndefLowerHalf(ArrayRef<int> Mask)3448 static bool isUndefLowerHalf(ArrayRef<int> Mask) {
3449   unsigned NumElts = Mask.size();
3450   return isUndefInRange(Mask, 0, NumElts / 2);
3451 }
3452 
3453 /// Return true if the mask creates a vector whose upper half is undefined.
isUndefUpperHalf(ArrayRef<int> Mask)3454 static bool isUndefUpperHalf(ArrayRef<int> Mask) {
3455   unsigned NumElts = Mask.size();
3456   return isUndefInRange(Mask, NumElts / 2, NumElts / 2);
3457 }
3458 
3459 /// Return true if Val falls within the specified range (L, H].
isInRange(int Val,int Low,int Hi)3460 static bool isInRange(int Val, int Low, int Hi) {
3461   return (Val >= Low && Val < Hi);
3462 }
3463 
3464 /// Return true if the value of any element in Mask falls within the specified
3465 /// range (L, H].
isAnyInRange(ArrayRef<int> Mask,int Low,int Hi)3466 static bool isAnyInRange(ArrayRef<int> Mask, int Low, int Hi) {
3467   return llvm::any_of(Mask, [Low, Hi](int M) { return isInRange(M, Low, Hi); });
3468 }
3469 
3470 /// Return true if the value of any element in Mask is the zero sentinel value.
isAnyZero(ArrayRef<int> Mask)3471 static bool isAnyZero(ArrayRef<int> Mask) {
3472   return llvm::any_of(Mask, [](int M) { return M == SM_SentinelZero; });
3473 }
3474 
3475 /// Return true if the value of any element in Mask is the zero or undef
3476 /// sentinel values.
isAnyZeroOrUndef(ArrayRef<int> Mask)3477 static bool isAnyZeroOrUndef(ArrayRef<int> Mask) {
3478   return llvm::any_of(Mask, [](int M) {
3479     return M == SM_SentinelZero || M == SM_SentinelUndef;
3480   });
3481 }
3482 
3483 /// Return true if Val is undef or if its value falls within the
3484 /// specified range (L, H].
isUndefOrInRange(int Val,int Low,int Hi)3485 static bool isUndefOrInRange(int Val, int Low, int Hi) {
3486   return (Val == SM_SentinelUndef) || isInRange(Val, Low, Hi);
3487 }
3488 
3489 /// Return true if every element in Mask is undef or if its value
3490 /// falls within the specified range (L, H].
isUndefOrInRange(ArrayRef<int> Mask,int Low,int Hi)3491 static bool isUndefOrInRange(ArrayRef<int> Mask, int Low, int Hi) {
3492   return llvm::all_of(
3493       Mask, [Low, Hi](int M) { return isUndefOrInRange(M, Low, Hi); });
3494 }
3495 
3496 /// Return true if Val is undef, zero or if its value falls within the
3497 /// specified range (L, H].
isUndefOrZeroOrInRange(int Val,int Low,int Hi)3498 static bool isUndefOrZeroOrInRange(int Val, int Low, int Hi) {
3499   return isUndefOrZero(Val) || isInRange(Val, Low, Hi);
3500 }
3501 
3502 /// Return true if every element in Mask is undef, zero or if its value
3503 /// falls within the specified range (L, H].
isUndefOrZeroOrInRange(ArrayRef<int> Mask,int Low,int Hi)3504 static bool isUndefOrZeroOrInRange(ArrayRef<int> Mask, int Low, int Hi) {
3505   return llvm::all_of(
3506       Mask, [Low, Hi](int M) { return isUndefOrZeroOrInRange(M, Low, Hi); });
3507 }
3508 
3509 /// Return true if every element in Mask, beginning
3510 /// from position Pos and ending in Pos + Size, falls within the specified
3511 /// sequence (Low, Low + Step, ..., Low + (Size - 1) * Step) or is undef.
isSequentialOrUndefInRange(ArrayRef<int> Mask,unsigned Pos,unsigned Size,int Low,int Step=1)3512 static bool isSequentialOrUndefInRange(ArrayRef<int> Mask, unsigned Pos,
3513                                        unsigned Size, int Low, int Step = 1) {
3514   for (unsigned i = Pos, e = Pos + Size; i != e; ++i, Low += Step)
3515     if (!isUndefOrEqual(Mask[i], Low))
3516       return false;
3517   return true;
3518 }
3519 
3520 /// Return true if every element in Mask, beginning
3521 /// from position Pos and ending in Pos+Size, falls within the specified
3522 /// sequential range (Low, Low+Size], or is undef or is zero.
isSequentialOrUndefOrZeroInRange(ArrayRef<int> Mask,unsigned Pos,unsigned Size,int Low,int Step=1)3523 static bool isSequentialOrUndefOrZeroInRange(ArrayRef<int> Mask, unsigned Pos,
3524                                              unsigned Size, int Low,
3525                                              int Step = 1) {
3526   for (unsigned i = Pos, e = Pos + Size; i != e; ++i, Low += Step)
3527     if (!isUndefOrZero(Mask[i]) && Mask[i] != Low)
3528       return false;
3529   return true;
3530 }
3531 
3532 /// Return true if every element in Mask, beginning
3533 /// from position Pos and ending in Pos+Size is undef or is zero.
isUndefOrZeroInRange(ArrayRef<int> Mask,unsigned Pos,unsigned Size)3534 static bool isUndefOrZeroInRange(ArrayRef<int> Mask, unsigned Pos,
3535                                  unsigned Size) {
3536   return llvm::all_of(Mask.slice(Pos, Size), isUndefOrZero);
3537 }
3538 
3539 /// Helper function to test whether a shuffle mask could be
3540 /// simplified by widening the elements being shuffled.
3541 ///
3542 /// Appends the mask for wider elements in WidenedMask if valid. Otherwise
3543 /// leaves it in an unspecified state.
3544 ///
3545 /// NOTE: This must handle normal vector shuffle masks and *target* vector
3546 /// shuffle masks. The latter have the special property of a '-2' representing
3547 /// a zero-ed lane of a vector.
canWidenShuffleElements(ArrayRef<int> Mask,SmallVectorImpl<int> & WidenedMask)3548 static bool canWidenShuffleElements(ArrayRef<int> Mask,
3549                                     SmallVectorImpl<int> &WidenedMask) {
3550   WidenedMask.assign(Mask.size() / 2, 0);
3551   for (int i = 0, Size = Mask.size(); i < Size; i += 2) {
3552     int M0 = Mask[i];
3553     int M1 = Mask[i + 1];
3554 
3555     // If both elements are undef, its trivial.
3556     if (M0 == SM_SentinelUndef && M1 == SM_SentinelUndef) {
3557       WidenedMask[i / 2] = SM_SentinelUndef;
3558       continue;
3559     }
3560 
3561     // Check for an undef mask and a mask value properly aligned to fit with
3562     // a pair of values. If we find such a case, use the non-undef mask's value.
3563     if (M0 == SM_SentinelUndef && M1 >= 0 && (M1 % 2) == 1) {
3564       WidenedMask[i / 2] = M1 / 2;
3565       continue;
3566     }
3567     if (M1 == SM_SentinelUndef && M0 >= 0 && (M0 % 2) == 0) {
3568       WidenedMask[i / 2] = M0 / 2;
3569       continue;
3570     }
3571 
3572     // When zeroing, we need to spread the zeroing across both lanes to widen.
3573     if (M0 == SM_SentinelZero || M1 == SM_SentinelZero) {
3574       if ((M0 == SM_SentinelZero || M0 == SM_SentinelUndef) &&
3575           (M1 == SM_SentinelZero || M1 == SM_SentinelUndef)) {
3576         WidenedMask[i / 2] = SM_SentinelZero;
3577         continue;
3578       }
3579       return false;
3580     }
3581 
3582     // Finally check if the two mask values are adjacent and aligned with
3583     // a pair.
3584     if (M0 != SM_SentinelUndef && (M0 % 2) == 0 && (M0 + 1) == M1) {
3585       WidenedMask[i / 2] = M0 / 2;
3586       continue;
3587     }
3588 
3589     // Otherwise we can't safely widen the elements used in this shuffle.
3590     return false;
3591   }
3592   assert(WidenedMask.size() == Mask.size() / 2 &&
3593          "Incorrect size of mask after widening the elements!");
3594 
3595   return true;
3596 }
3597 
canWidenShuffleElements(ArrayRef<int> Mask,const APInt & Zeroable,bool V2IsZero,SmallVectorImpl<int> & WidenedMask)3598 static bool canWidenShuffleElements(ArrayRef<int> Mask,
3599                                     const APInt &Zeroable,
3600                                     bool V2IsZero,
3601                                     SmallVectorImpl<int> &WidenedMask) {
3602   // Create an alternative mask with info about zeroable elements.
3603   // Here we do not set undef elements as zeroable.
3604   SmallVector<int, 64> ZeroableMask(Mask);
3605   if (V2IsZero) {
3606     assert(!Zeroable.isZero() && "V2's non-undef elements are used?!");
3607     for (int i = 0, Size = Mask.size(); i != Size; ++i)
3608       if (Mask[i] != SM_SentinelUndef && Zeroable[i])
3609         ZeroableMask[i] = SM_SentinelZero;
3610   }
3611   return canWidenShuffleElements(ZeroableMask, WidenedMask);
3612 }
3613 
canWidenShuffleElements(ArrayRef<int> Mask)3614 static bool canWidenShuffleElements(ArrayRef<int> Mask) {
3615   SmallVector<int, 32> WidenedMask;
3616   return canWidenShuffleElements(Mask, WidenedMask);
3617 }
3618 
3619 // Attempt to narrow/widen shuffle mask until it matches the target number of
3620 // elements.
scaleShuffleElements(ArrayRef<int> Mask,unsigned NumDstElts,SmallVectorImpl<int> & ScaledMask)3621 static bool scaleShuffleElements(ArrayRef<int> Mask, unsigned NumDstElts,
3622                                  SmallVectorImpl<int> &ScaledMask) {
3623   unsigned NumSrcElts = Mask.size();
3624   assert(((NumSrcElts % NumDstElts) == 0 || (NumDstElts % NumSrcElts) == 0) &&
3625          "Illegal shuffle scale factor");
3626 
3627   // Narrowing is guaranteed to work.
3628   if (NumDstElts >= NumSrcElts) {
3629     int Scale = NumDstElts / NumSrcElts;
3630     llvm::narrowShuffleMaskElts(Scale, Mask, ScaledMask);
3631     return true;
3632   }
3633 
3634   // We have to repeat the widening until we reach the target size, but we can
3635   // split out the first widening as it sets up ScaledMask for us.
3636   if (canWidenShuffleElements(Mask, ScaledMask)) {
3637     while (ScaledMask.size() > NumDstElts) {
3638       SmallVector<int, 16> WidenedMask;
3639       if (!canWidenShuffleElements(ScaledMask, WidenedMask))
3640         return false;
3641       ScaledMask = std::move(WidenedMask);
3642     }
3643     return true;
3644   }
3645 
3646   return false;
3647 }
3648 
3649 /// Returns true if Elt is a constant zero or a floating point constant +0.0.
isZeroNode(SDValue Elt)3650 bool X86::isZeroNode(SDValue Elt) {
3651   return isNullConstant(Elt) || isNullFPConstant(Elt);
3652 }
3653 
3654 // Build a vector of constants.
3655 // Use an UNDEF node if MaskElt == -1.
3656 // Split 64-bit constants in the 32-bit mode.
getConstVector(ArrayRef<int> Values,MVT VT,SelectionDAG & DAG,const SDLoc & dl,bool IsMask=false)3657 static SDValue getConstVector(ArrayRef<int> Values, MVT VT, SelectionDAG &DAG,
3658                               const SDLoc &dl, bool IsMask = false) {
3659 
3660   SmallVector<SDValue, 32>  Ops;
3661   bool Split = false;
3662 
3663   MVT ConstVecVT = VT;
3664   unsigned NumElts = VT.getVectorNumElements();
3665   bool In64BitMode = DAG.getTargetLoweringInfo().isTypeLegal(MVT::i64);
3666   if (!In64BitMode && VT.getVectorElementType() == MVT::i64) {
3667     ConstVecVT = MVT::getVectorVT(MVT::i32, NumElts * 2);
3668     Split = true;
3669   }
3670 
3671   MVT EltVT = ConstVecVT.getVectorElementType();
3672   for (unsigned i = 0; i < NumElts; ++i) {
3673     bool IsUndef = Values[i] < 0 && IsMask;
3674     SDValue OpNode = IsUndef ? DAG.getUNDEF(EltVT) :
3675       DAG.getConstant(Values[i], dl, EltVT);
3676     Ops.push_back(OpNode);
3677     if (Split)
3678       Ops.push_back(IsUndef ? DAG.getUNDEF(EltVT) :
3679                     DAG.getConstant(0, dl, EltVT));
3680   }
3681   SDValue ConstsNode = DAG.getBuildVector(ConstVecVT, dl, Ops);
3682   if (Split)
3683     ConstsNode = DAG.getBitcast(VT, ConstsNode);
3684   return ConstsNode;
3685 }
3686 
getConstVector(ArrayRef<APInt> Bits,const APInt & Undefs,MVT VT,SelectionDAG & DAG,const SDLoc & dl)3687 static SDValue getConstVector(ArrayRef<APInt> Bits, const APInt &Undefs,
3688                               MVT VT, SelectionDAG &DAG, const SDLoc &dl) {
3689   assert(Bits.size() == Undefs.getBitWidth() &&
3690          "Unequal constant and undef arrays");
3691   SmallVector<SDValue, 32> Ops;
3692   bool Split = false;
3693 
3694   MVT ConstVecVT = VT;
3695   unsigned NumElts = VT.getVectorNumElements();
3696   bool In64BitMode = DAG.getTargetLoweringInfo().isTypeLegal(MVT::i64);
3697   if (!In64BitMode && VT.getVectorElementType() == MVT::i64) {
3698     ConstVecVT = MVT::getVectorVT(MVT::i32, NumElts * 2);
3699     Split = true;
3700   }
3701 
3702   MVT EltVT = ConstVecVT.getVectorElementType();
3703   for (unsigned i = 0, e = Bits.size(); i != e; ++i) {
3704     if (Undefs[i]) {
3705       Ops.append(Split ? 2 : 1, DAG.getUNDEF(EltVT));
3706       continue;
3707     }
3708     const APInt &V = Bits[i];
3709     assert(V.getBitWidth() == VT.getScalarSizeInBits() && "Unexpected sizes");
3710     if (Split) {
3711       Ops.push_back(DAG.getConstant(V.trunc(32), dl, EltVT));
3712       Ops.push_back(DAG.getConstant(V.lshr(32).trunc(32), dl, EltVT));
3713     } else if (EltVT == MVT::f32) {
3714       APFloat FV(APFloat::IEEEsingle(), V);
3715       Ops.push_back(DAG.getConstantFP(FV, dl, EltVT));
3716     } else if (EltVT == MVT::f64) {
3717       APFloat FV(APFloat::IEEEdouble(), V);
3718       Ops.push_back(DAG.getConstantFP(FV, dl, EltVT));
3719     } else {
3720       Ops.push_back(DAG.getConstant(V, dl, EltVT));
3721     }
3722   }
3723 
3724   SDValue ConstsNode = DAG.getBuildVector(ConstVecVT, dl, Ops);
3725   return DAG.getBitcast(VT, ConstsNode);
3726 }
3727 
getConstVector(ArrayRef<APInt> Bits,MVT VT,SelectionDAG & DAG,const SDLoc & dl)3728 static SDValue getConstVector(ArrayRef<APInt> Bits, MVT VT,
3729                               SelectionDAG &DAG, const SDLoc &dl) {
3730   APInt Undefs = APInt::getZero(Bits.size());
3731   return getConstVector(Bits, Undefs, VT, DAG, dl);
3732 }
3733 
3734 /// Returns a vector of specified type with all zero elements.
getZeroVector(MVT VT,const X86Subtarget & Subtarget,SelectionDAG & DAG,const SDLoc & dl)3735 static SDValue getZeroVector(MVT VT, const X86Subtarget &Subtarget,
3736                              SelectionDAG &DAG, const SDLoc &dl) {
3737   assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector() ||
3738           VT.getVectorElementType() == MVT::i1) &&
3739          "Unexpected vector type");
3740 
3741   // Try to build SSE/AVX zero vectors as <N x i32> bitcasted to their dest
3742   // type. This ensures they get CSE'd. But if the integer type is not
3743   // available, use a floating-point +0.0 instead.
3744   SDValue Vec;
3745   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3746   if (!Subtarget.hasSSE2() && VT.is128BitVector()) {
3747     Vec = DAG.getConstantFP(+0.0, dl, MVT::v4f32);
3748   } else if (VT.isFloatingPoint() &&
3749              TLI.isTypeLegal(VT.getVectorElementType())) {
3750     Vec = DAG.getConstantFP(+0.0, dl, VT);
3751   } else if (VT.getVectorElementType() == MVT::i1) {
3752     assert((Subtarget.hasBWI() || VT.getVectorNumElements() <= 16) &&
3753            "Unexpected vector type");
3754     Vec = DAG.getConstant(0, dl, VT);
3755   } else {
3756     unsigned Num32BitElts = VT.getSizeInBits() / 32;
3757     Vec = DAG.getConstant(0, dl, MVT::getVectorVT(MVT::i32, Num32BitElts));
3758   }
3759   return DAG.getBitcast(VT, Vec);
3760 }
3761 
3762 // Helper to determine if the ops are all the extracted subvectors come from a
3763 // single source. If we allow commute they don't have to be in order (Lo/Hi).
getSplitVectorSrc(SDValue LHS,SDValue RHS,bool AllowCommute)3764 static SDValue getSplitVectorSrc(SDValue LHS, SDValue RHS, bool AllowCommute) {
3765   if (LHS.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
3766       RHS.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
3767       LHS.getValueType() != RHS.getValueType() ||
3768       LHS.getOperand(0) != RHS.getOperand(0))
3769     return SDValue();
3770 
3771   SDValue Src = LHS.getOperand(0);
3772   if (Src.getValueSizeInBits() != (LHS.getValueSizeInBits() * 2))
3773     return SDValue();
3774 
3775   unsigned NumElts = LHS.getValueType().getVectorNumElements();
3776   if ((LHS.getConstantOperandAPInt(1) == 0 &&
3777        RHS.getConstantOperandAPInt(1) == NumElts) ||
3778       (AllowCommute && RHS.getConstantOperandAPInt(1) == 0 &&
3779        LHS.getConstantOperandAPInt(1) == NumElts))
3780     return Src;
3781 
3782   return SDValue();
3783 }
3784 
extractSubVector(SDValue Vec,unsigned IdxVal,SelectionDAG & DAG,const SDLoc & dl,unsigned vectorWidth)3785 static SDValue extractSubVector(SDValue Vec, unsigned IdxVal, SelectionDAG &DAG,
3786                                 const SDLoc &dl, unsigned vectorWidth) {
3787   EVT VT = Vec.getValueType();
3788   EVT ElVT = VT.getVectorElementType();
3789   unsigned Factor = VT.getSizeInBits() / vectorWidth;
3790   EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT,
3791                                   VT.getVectorNumElements() / Factor);
3792 
3793   // Extract the relevant vectorWidth bits.  Generate an EXTRACT_SUBVECTOR
3794   unsigned ElemsPerChunk = vectorWidth / ElVT.getSizeInBits();
3795   assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
3796 
3797   // This is the index of the first element of the vectorWidth-bit chunk
3798   // we want. Since ElemsPerChunk is a power of 2 just need to clear bits.
3799   IdxVal &= ~(ElemsPerChunk - 1);
3800 
3801   // If the input is a buildvector just emit a smaller one.
3802   if (Vec.getOpcode() == ISD::BUILD_VECTOR)
3803     return DAG.getBuildVector(ResultVT, dl,
3804                               Vec->ops().slice(IdxVal, ElemsPerChunk));
3805 
3806   // Check if we're extracting the upper undef of a widening pattern.
3807   if (Vec.getOpcode() == ISD::INSERT_SUBVECTOR && Vec.getOperand(0).isUndef() &&
3808       Vec.getOperand(1).getValueType().getVectorNumElements() <= IdxVal &&
3809       isNullConstant(Vec.getOperand(2)))
3810     return DAG.getUNDEF(ResultVT);
3811 
3812   SDValue VecIdx = DAG.getIntPtrConstant(IdxVal, dl);
3813   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec, VecIdx);
3814 }
3815 
3816 /// Generate a DAG to grab 128-bits from a vector > 128 bits.  This
3817 /// sets things up to match to an AVX VEXTRACTF128 / VEXTRACTI128
3818 /// or AVX-512 VEXTRACTF32x4 / VEXTRACTI32x4
3819 /// instructions or a simple subregister reference. Idx is an index in the
3820 /// 128 bits we want.  It need not be aligned to a 128-bit boundary.  That makes
3821 /// lowering EXTRACT_VECTOR_ELT operations easier.
extract128BitVector(SDValue Vec,unsigned IdxVal,SelectionDAG & DAG,const SDLoc & dl)3822 static SDValue extract128BitVector(SDValue Vec, unsigned IdxVal,
3823                                    SelectionDAG &DAG, const SDLoc &dl) {
3824   assert((Vec.getValueType().is256BitVector() ||
3825           Vec.getValueType().is512BitVector()) && "Unexpected vector size!");
3826   return extractSubVector(Vec, IdxVal, DAG, dl, 128);
3827 }
3828 
3829 /// Generate a DAG to grab 256-bits from a 512-bit vector.
extract256BitVector(SDValue Vec,unsigned IdxVal,SelectionDAG & DAG,const SDLoc & dl)3830 static SDValue extract256BitVector(SDValue Vec, unsigned IdxVal,
3831                                    SelectionDAG &DAG, const SDLoc &dl) {
3832   assert(Vec.getValueType().is512BitVector() && "Unexpected vector size!");
3833   return extractSubVector(Vec, IdxVal, DAG, dl, 256);
3834 }
3835 
insertSubVector(SDValue Result,SDValue Vec,unsigned IdxVal,SelectionDAG & DAG,const SDLoc & dl,unsigned vectorWidth)3836 static SDValue insertSubVector(SDValue Result, SDValue Vec, unsigned IdxVal,
3837                                SelectionDAG &DAG, const SDLoc &dl,
3838                                unsigned vectorWidth) {
3839   assert((vectorWidth == 128 || vectorWidth == 256) &&
3840          "Unsupported vector width");
3841   // Inserting UNDEF is Result
3842   if (Vec.isUndef())
3843     return Result;
3844   EVT VT = Vec.getValueType();
3845   EVT ElVT = VT.getVectorElementType();
3846   EVT ResultVT = Result.getValueType();
3847 
3848   // Insert the relevant vectorWidth bits.
3849   unsigned ElemsPerChunk = vectorWidth/ElVT.getSizeInBits();
3850   assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
3851 
3852   // This is the index of the first element of the vectorWidth-bit chunk
3853   // we want. Since ElemsPerChunk is a power of 2 just need to clear bits.
3854   IdxVal &= ~(ElemsPerChunk - 1);
3855 
3856   SDValue VecIdx = DAG.getIntPtrConstant(IdxVal, dl);
3857   return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec, VecIdx);
3858 }
3859 
3860 /// Generate a DAG to put 128-bits into a vector > 128 bits.  This
3861 /// sets things up to match to an AVX VINSERTF128/VINSERTI128 or
3862 /// AVX-512 VINSERTF32x4/VINSERTI32x4 instructions or a
3863 /// simple superregister reference.  Idx is an index in the 128 bits
3864 /// we want.  It need not be aligned to a 128-bit boundary.  That makes
3865 /// lowering INSERT_VECTOR_ELT operations easier.
insert128BitVector(SDValue Result,SDValue Vec,unsigned IdxVal,SelectionDAG & DAG,const SDLoc & dl)3866 static SDValue insert128BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
3867                                   SelectionDAG &DAG, const SDLoc &dl) {
3868   assert(Vec.getValueType().is128BitVector() && "Unexpected vector size!");
3869   return insertSubVector(Result, Vec, IdxVal, DAG, dl, 128);
3870 }
3871 
3872 /// Widen a vector to a larger size with the same scalar type, with the new
3873 /// elements either zero or undef.
widenSubVector(MVT VT,SDValue Vec,bool ZeroNewElements,const X86Subtarget & Subtarget,SelectionDAG & DAG,const SDLoc & dl)3874 static SDValue widenSubVector(MVT VT, SDValue Vec, bool ZeroNewElements,
3875                               const X86Subtarget &Subtarget, SelectionDAG &DAG,
3876                               const SDLoc &dl) {
3877   assert(Vec.getValueSizeInBits().getFixedValue() <= VT.getFixedSizeInBits() &&
3878          Vec.getValueType().getScalarType() == VT.getScalarType() &&
3879          "Unsupported vector widening type");
3880   SDValue Res = ZeroNewElements ? getZeroVector(VT, Subtarget, DAG, dl)
3881                                 : DAG.getUNDEF(VT);
3882   return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, VT, Res, Vec,
3883                      DAG.getIntPtrConstant(0, dl));
3884 }
3885 
3886 /// Widen a vector to a larger size with the same scalar type, with the new
3887 /// elements either zero or undef.
widenSubVector(SDValue Vec,bool ZeroNewElements,const X86Subtarget & Subtarget,SelectionDAG & DAG,const SDLoc & dl,unsigned WideSizeInBits)3888 static SDValue widenSubVector(SDValue Vec, bool ZeroNewElements,
3889                               const X86Subtarget &Subtarget, SelectionDAG &DAG,
3890                               const SDLoc &dl, unsigned WideSizeInBits) {
3891   assert(Vec.getValueSizeInBits() <= WideSizeInBits &&
3892          (WideSizeInBits % Vec.getScalarValueSizeInBits()) == 0 &&
3893          "Unsupported vector widening type");
3894   unsigned WideNumElts = WideSizeInBits / Vec.getScalarValueSizeInBits();
3895   MVT SVT = Vec.getSimpleValueType().getScalarType();
3896   MVT VT = MVT::getVectorVT(SVT, WideNumElts);
3897   return widenSubVector(VT, Vec, ZeroNewElements, Subtarget, DAG, dl);
3898 }
3899 
3900 /// Widen a mask vector type to a minimum of v8i1/v16i1 to allow use of KSHIFT
3901 /// and bitcast with integer types.
widenMaskVectorType(MVT VT,const X86Subtarget & Subtarget)3902 static MVT widenMaskVectorType(MVT VT, const X86Subtarget &Subtarget) {
3903   assert(VT.getVectorElementType() == MVT::i1 && "Expected bool vector");
3904   unsigned NumElts = VT.getVectorNumElements();
3905   if ((!Subtarget.hasDQI() && NumElts == 8) || NumElts < 8)
3906     return Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
3907   return VT;
3908 }
3909 
3910 /// Widen a mask vector to a minimum of v8i1/v16i1 to allow use of KSHIFT and
3911 /// bitcast with integer types.
widenMaskVector(SDValue Vec,bool ZeroNewElements,const X86Subtarget & Subtarget,SelectionDAG & DAG,const SDLoc & dl)3912 static SDValue widenMaskVector(SDValue Vec, bool ZeroNewElements,
3913                                const X86Subtarget &Subtarget, SelectionDAG &DAG,
3914                                const SDLoc &dl) {
3915   MVT VT = widenMaskVectorType(Vec.getSimpleValueType(), Subtarget);
3916   return widenSubVector(VT, Vec, ZeroNewElements, Subtarget, DAG, dl);
3917 }
3918 
3919 // Helper function to collect subvector ops that are concatenated together,
3920 // either by ISD::CONCAT_VECTORS or a ISD::INSERT_SUBVECTOR series.
3921 // The subvectors in Ops are guaranteed to be the same type.
collectConcatOps(SDNode * N,SmallVectorImpl<SDValue> & Ops,SelectionDAG & DAG)3922 static bool collectConcatOps(SDNode *N, SmallVectorImpl<SDValue> &Ops,
3923                              SelectionDAG &DAG) {
3924   assert(Ops.empty() && "Expected an empty ops vector");
3925 
3926   if (N->getOpcode() == ISD::CONCAT_VECTORS) {
3927     Ops.append(N->op_begin(), N->op_end());
3928     return true;
3929   }
3930 
3931   if (N->getOpcode() == ISD::INSERT_SUBVECTOR) {
3932     SDValue Src = N->getOperand(0);
3933     SDValue Sub = N->getOperand(1);
3934     const APInt &Idx = N->getConstantOperandAPInt(2);
3935     EVT VT = Src.getValueType();
3936     EVT SubVT = Sub.getValueType();
3937 
3938     // TODO - Handle more general insert_subvector chains.
3939     if (VT.getSizeInBits() == (SubVT.getSizeInBits() * 2)) {
3940       // insert_subvector(undef, x, lo)
3941       if (Idx == 0 && Src.isUndef()) {
3942         Ops.push_back(Sub);
3943         Ops.push_back(DAG.getUNDEF(SubVT));
3944         return true;
3945       }
3946       if (Idx == (VT.getVectorNumElements() / 2)) {
3947         // insert_subvector(insert_subvector(undef, x, lo), y, hi)
3948         if (Src.getOpcode() == ISD::INSERT_SUBVECTOR &&
3949             Src.getOperand(1).getValueType() == SubVT &&
3950             isNullConstant(Src.getOperand(2))) {
3951           Ops.push_back(Src.getOperand(1));
3952           Ops.push_back(Sub);
3953           return true;
3954         }
3955         // insert_subvector(x, extract_subvector(x, lo), hi)
3956         if (Sub.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
3957             Sub.getOperand(0) == Src && isNullConstant(Sub.getOperand(1))) {
3958           Ops.append(2, Sub);
3959           return true;
3960         }
3961         // insert_subvector(undef, x, hi)
3962         if (Src.isUndef()) {
3963           Ops.push_back(DAG.getUNDEF(SubVT));
3964           Ops.push_back(Sub);
3965           return true;
3966         }
3967       }
3968     }
3969   }
3970 
3971   return false;
3972 }
3973 
3974 // Helper to check if \p V can be split into subvectors and the upper subvectors
3975 // are all undef. In which case return the lower subvector.
isUpperSubvectorUndef(SDValue V,const SDLoc & DL,SelectionDAG & DAG)3976 static SDValue isUpperSubvectorUndef(SDValue V, const SDLoc &DL,
3977                                      SelectionDAG &DAG) {
3978   SmallVector<SDValue> SubOps;
3979   if (!collectConcatOps(V.getNode(), SubOps, DAG))
3980     return SDValue();
3981 
3982   unsigned NumSubOps = SubOps.size();
3983   unsigned HalfNumSubOps = NumSubOps / 2;
3984   assert((NumSubOps % 2) == 0 && "Unexpected number of subvectors");
3985 
3986   ArrayRef<SDValue> UpperOps(SubOps.begin() + HalfNumSubOps, SubOps.end());
3987   if (any_of(UpperOps, [](SDValue Op) { return !Op.isUndef(); }))
3988     return SDValue();
3989 
3990   EVT HalfVT = V.getValueType().getHalfNumVectorElementsVT(*DAG.getContext());
3991   ArrayRef<SDValue> LowerOps(SubOps.begin(), SubOps.begin() + HalfNumSubOps);
3992   return DAG.getNode(ISD::CONCAT_VECTORS, DL, HalfVT, LowerOps);
3993 }
3994 
3995 // Helper to check if we can access all the constituent subvectors without any
3996 // extract ops.
isFreeToSplitVector(SDNode * N,SelectionDAG & DAG)3997 static bool isFreeToSplitVector(SDNode *N, SelectionDAG &DAG) {
3998   SmallVector<SDValue> Ops;
3999   return collectConcatOps(N, Ops, DAG);
4000 }
4001 
splitVector(SDValue Op,SelectionDAG & DAG,const SDLoc & dl)4002 static std::pair<SDValue, SDValue> splitVector(SDValue Op, SelectionDAG &DAG,
4003                                                const SDLoc &dl) {
4004   EVT VT = Op.getValueType();
4005   unsigned NumElems = VT.getVectorNumElements();
4006   unsigned SizeInBits = VT.getSizeInBits();
4007   assert((NumElems % 2) == 0 && (SizeInBits % 2) == 0 &&
4008          "Can't split odd sized vector");
4009 
4010   // If this is a splat value (with no-undefs) then use the lower subvector,
4011   // which should be a free extraction.
4012   SDValue Lo = extractSubVector(Op, 0, DAG, dl, SizeInBits / 2);
4013   if (DAG.isSplatValue(Op, /*AllowUndefs*/ false))
4014     return std::make_pair(Lo, Lo);
4015 
4016   SDValue Hi = extractSubVector(Op, NumElems / 2, DAG, dl, SizeInBits / 2);
4017   return std::make_pair(Lo, Hi);
4018 }
4019 
4020 /// Break an operation into 2 half sized ops and then concatenate the results.
splitVectorOp(SDValue Op,SelectionDAG & DAG)4021 static SDValue splitVectorOp(SDValue Op, SelectionDAG &DAG) {
4022   unsigned NumOps = Op.getNumOperands();
4023   EVT VT = Op.getValueType();
4024   SDLoc dl(Op);
4025 
4026   // Extract the LHS Lo/Hi vectors
4027   SmallVector<SDValue> LoOps(NumOps, SDValue());
4028   SmallVector<SDValue> HiOps(NumOps, SDValue());
4029   for (unsigned I = 0; I != NumOps; ++I) {
4030     SDValue SrcOp = Op.getOperand(I);
4031     if (!SrcOp.getValueType().isVector()) {
4032       LoOps[I] = HiOps[I] = SrcOp;
4033       continue;
4034     }
4035     std::tie(LoOps[I], HiOps[I]) = splitVector(SrcOp, DAG, dl);
4036   }
4037 
4038   EVT LoVT, HiVT;
4039   std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
4040   return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
4041                      DAG.getNode(Op.getOpcode(), dl, LoVT, LoOps),
4042                      DAG.getNode(Op.getOpcode(), dl, HiVT, HiOps));
4043 }
4044 
4045 /// Break an unary integer operation into 2 half sized ops and then
4046 /// concatenate the result back.
splitVectorIntUnary(SDValue Op,SelectionDAG & DAG)4047 static SDValue splitVectorIntUnary(SDValue Op, SelectionDAG &DAG) {
4048   // Make sure we only try to split 256/512-bit types to avoid creating
4049   // narrow vectors.
4050   EVT VT = Op.getValueType();
4051   (void)VT;
4052   assert((Op.getOperand(0).getValueType().is256BitVector() ||
4053           Op.getOperand(0).getValueType().is512BitVector()) &&
4054          (VT.is256BitVector() || VT.is512BitVector()) && "Unsupported VT!");
4055   assert(Op.getOperand(0).getValueType().getVectorNumElements() ==
4056              VT.getVectorNumElements() &&
4057          "Unexpected VTs!");
4058   return splitVectorOp(Op, DAG);
4059 }
4060 
4061 /// Break a binary integer operation into 2 half sized ops and then
4062 /// concatenate the result back.
splitVectorIntBinary(SDValue Op,SelectionDAG & DAG)4063 static SDValue splitVectorIntBinary(SDValue Op, SelectionDAG &DAG) {
4064   // Assert that all the types match.
4065   EVT VT = Op.getValueType();
4066   (void)VT;
4067   assert(Op.getOperand(0).getValueType() == VT &&
4068          Op.getOperand(1).getValueType() == VT && "Unexpected VTs!");
4069   assert((VT.is256BitVector() || VT.is512BitVector()) && "Unsupported VT!");
4070   return splitVectorOp(Op, DAG);
4071 }
4072 
4073 // Helper for splitting operands of an operation to legal target size and
4074 // apply a function on each part.
4075 // Useful for operations that are available on SSE2 in 128-bit, on AVX2 in
4076 // 256-bit and on AVX512BW in 512-bit. The argument VT is the type used for
4077 // deciding if/how to split Ops. Ops elements do *not* have to be of type VT.
4078 // The argument Builder is a function that will be applied on each split part:
4079 // SDValue Builder(SelectionDAG&G, SDLoc, ArrayRef<SDValue>)
4080 template <typename F>
SplitOpsAndApply(SelectionDAG & DAG,const X86Subtarget & Subtarget,const SDLoc & DL,EVT VT,ArrayRef<SDValue> Ops,F Builder,bool CheckBWI=true)4081 SDValue SplitOpsAndApply(SelectionDAG &DAG, const X86Subtarget &Subtarget,
4082                          const SDLoc &DL, EVT VT, ArrayRef<SDValue> Ops,
4083                          F Builder, bool CheckBWI = true) {
4084   assert(Subtarget.hasSSE2() && "Target assumed to support at least SSE2");
4085   unsigned NumSubs = 1;
4086   if ((CheckBWI && Subtarget.useBWIRegs()) ||
4087       (!CheckBWI && Subtarget.useAVX512Regs())) {
4088     if (VT.getSizeInBits() > 512) {
4089       NumSubs = VT.getSizeInBits() / 512;
4090       assert((VT.getSizeInBits() % 512) == 0 && "Illegal vector size");
4091     }
4092   } else if (Subtarget.hasAVX2()) {
4093     if (VT.getSizeInBits() > 256) {
4094       NumSubs = VT.getSizeInBits() / 256;
4095       assert((VT.getSizeInBits() % 256) == 0 && "Illegal vector size");
4096     }
4097   } else {
4098     if (VT.getSizeInBits() > 128) {
4099       NumSubs = VT.getSizeInBits() / 128;
4100       assert((VT.getSizeInBits() % 128) == 0 && "Illegal vector size");
4101     }
4102   }
4103 
4104   if (NumSubs == 1)
4105     return Builder(DAG, DL, Ops);
4106 
4107   SmallVector<SDValue, 4> Subs;
4108   for (unsigned i = 0; i != NumSubs; ++i) {
4109     SmallVector<SDValue, 2> SubOps;
4110     for (SDValue Op : Ops) {
4111       EVT OpVT = Op.getValueType();
4112       unsigned NumSubElts = OpVT.getVectorNumElements() / NumSubs;
4113       unsigned SizeSub = OpVT.getSizeInBits() / NumSubs;
4114       SubOps.push_back(extractSubVector(Op, i * NumSubElts, DAG, DL, SizeSub));
4115     }
4116     Subs.push_back(Builder(DAG, DL, SubOps));
4117   }
4118   return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Subs);
4119 }
4120 
4121 // Helper function that extends a non-512-bit vector op to 512-bits on non-VLX
4122 // targets.
getAVX512Node(unsigned Opcode,const SDLoc & DL,MVT VT,ArrayRef<SDValue> Ops,SelectionDAG & DAG,const X86Subtarget & Subtarget)4123 static SDValue getAVX512Node(unsigned Opcode, const SDLoc &DL, MVT VT,
4124                              ArrayRef<SDValue> Ops, SelectionDAG &DAG,
4125                              const X86Subtarget &Subtarget) {
4126   assert(Subtarget.hasAVX512() && "AVX512 target expected");
4127   MVT SVT = VT.getScalarType();
4128 
4129   // If we have a 32/64 splatted constant, splat it to DstTy to
4130   // encourage a foldable broadcast'd operand.
4131   auto MakeBroadcastOp = [&](SDValue Op, MVT OpVT, MVT DstVT) {
4132     unsigned OpEltSizeInBits = OpVT.getScalarSizeInBits();
4133     // AVX512 broadcasts 32/64-bit operands.
4134     // TODO: Support float once getAVX512Node is used by fp-ops.
4135     if (!OpVT.isInteger() || OpEltSizeInBits < 32 ||
4136         !DAG.getTargetLoweringInfo().isTypeLegal(SVT))
4137       return SDValue();
4138     // If we're not widening, don't bother if we're not bitcasting.
4139     if (OpVT == DstVT && Op.getOpcode() != ISD::BITCAST)
4140       return SDValue();
4141     if (auto *BV = dyn_cast<BuildVectorSDNode>(peekThroughBitcasts(Op))) {
4142       APInt SplatValue, SplatUndef;
4143       unsigned SplatBitSize;
4144       bool HasAnyUndefs;
4145       if (BV->isConstantSplat(SplatValue, SplatUndef, SplatBitSize,
4146                               HasAnyUndefs, OpEltSizeInBits) &&
4147           !HasAnyUndefs && SplatValue.getBitWidth() == OpEltSizeInBits)
4148         return DAG.getConstant(SplatValue, DL, DstVT);
4149     }
4150     return SDValue();
4151   };
4152 
4153   bool Widen = !(Subtarget.hasVLX() || VT.is512BitVector());
4154 
4155   MVT DstVT = VT;
4156   if (Widen)
4157     DstVT = MVT::getVectorVT(SVT, 512 / SVT.getSizeInBits());
4158 
4159   // Canonicalize src operands.
4160   SmallVector<SDValue> SrcOps(Ops.begin(), Ops.end());
4161   for (SDValue &Op : SrcOps) {
4162     MVT OpVT = Op.getSimpleValueType();
4163     // Just pass through scalar operands.
4164     if (!OpVT.isVector())
4165       continue;
4166     assert(OpVT == VT && "Vector type mismatch");
4167 
4168     if (SDValue BroadcastOp = MakeBroadcastOp(Op, OpVT, DstVT)) {
4169       Op = BroadcastOp;
4170       continue;
4171     }
4172 
4173     // Just widen the subvector by inserting into an undef wide vector.
4174     if (Widen)
4175       Op = widenSubVector(Op, false, Subtarget, DAG, DL, 512);
4176   }
4177 
4178   SDValue Res = DAG.getNode(Opcode, DL, DstVT, SrcOps);
4179 
4180   // Perform the 512-bit op then extract the bottom subvector.
4181   if (Widen)
4182     Res = extractSubVector(Res, 0, DAG, DL, VT.getSizeInBits());
4183   return Res;
4184 }
4185 
4186 /// Insert i1-subvector to i1-vector.
insert1BitVector(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)4187 static SDValue insert1BitVector(SDValue Op, SelectionDAG &DAG,
4188                                 const X86Subtarget &Subtarget) {
4189 
4190   SDLoc dl(Op);
4191   SDValue Vec = Op.getOperand(0);
4192   SDValue SubVec = Op.getOperand(1);
4193   SDValue Idx = Op.getOperand(2);
4194   unsigned IdxVal = Op.getConstantOperandVal(2);
4195 
4196   // Inserting undef is a nop. We can just return the original vector.
4197   if (SubVec.isUndef())
4198     return Vec;
4199 
4200   if (IdxVal == 0 && Vec.isUndef()) // the operation is legal
4201     return Op;
4202 
4203   MVT OpVT = Op.getSimpleValueType();
4204   unsigned NumElems = OpVT.getVectorNumElements();
4205   SDValue ZeroIdx = DAG.getIntPtrConstant(0, dl);
4206 
4207   // Extend to natively supported kshift.
4208   MVT WideOpVT = widenMaskVectorType(OpVT, Subtarget);
4209 
4210   // Inserting into the lsbs of a zero vector is legal. ISel will insert shifts
4211   // if necessary.
4212   if (IdxVal == 0 && ISD::isBuildVectorAllZeros(Vec.getNode())) {
4213     // May need to promote to a legal type.
4214     Op = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
4215                      DAG.getConstant(0, dl, WideOpVT),
4216                      SubVec, Idx);
4217     return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
4218   }
4219 
4220   MVT SubVecVT = SubVec.getSimpleValueType();
4221   unsigned SubVecNumElems = SubVecVT.getVectorNumElements();
4222   assert(IdxVal + SubVecNumElems <= NumElems &&
4223          IdxVal % SubVecVT.getSizeInBits() == 0 &&
4224          "Unexpected index value in INSERT_SUBVECTOR");
4225 
4226   SDValue Undef = DAG.getUNDEF(WideOpVT);
4227 
4228   if (IdxVal == 0) {
4229     // Zero lower bits of the Vec
4230     SDValue ShiftBits = DAG.getTargetConstant(SubVecNumElems, dl, MVT::i8);
4231     Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, Undef, Vec,
4232                       ZeroIdx);
4233     Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Vec, ShiftBits);
4234     Vec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, Vec, ShiftBits);
4235     // Merge them together, SubVec should be zero extended.
4236     SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
4237                          DAG.getConstant(0, dl, WideOpVT),
4238                          SubVec, ZeroIdx);
4239     Op = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, SubVec);
4240     return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
4241   }
4242 
4243   SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
4244                        Undef, SubVec, ZeroIdx);
4245 
4246   if (Vec.isUndef()) {
4247     assert(IdxVal != 0 && "Unexpected index");
4248     SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
4249                          DAG.getTargetConstant(IdxVal, dl, MVT::i8));
4250     return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, SubVec, ZeroIdx);
4251   }
4252 
4253   if (ISD::isBuildVectorAllZeros(Vec.getNode())) {
4254     assert(IdxVal != 0 && "Unexpected index");
4255     // If upper elements of Vec are known undef, then just shift into place.
4256     if (llvm::all_of(Vec->ops().slice(IdxVal + SubVecNumElems),
4257                      [](SDValue V) { return V.isUndef(); })) {
4258       SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
4259                            DAG.getTargetConstant(IdxVal, dl, MVT::i8));
4260     } else {
4261       NumElems = WideOpVT.getVectorNumElements();
4262       unsigned ShiftLeft = NumElems - SubVecNumElems;
4263       unsigned ShiftRight = NumElems - SubVecNumElems - IdxVal;
4264       SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
4265                            DAG.getTargetConstant(ShiftLeft, dl, MVT::i8));
4266       if (ShiftRight != 0)
4267         SubVec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, SubVec,
4268                              DAG.getTargetConstant(ShiftRight, dl, MVT::i8));
4269     }
4270     return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, SubVec, ZeroIdx);
4271   }
4272 
4273   // Simple case when we put subvector in the upper part
4274   if (IdxVal + SubVecNumElems == NumElems) {
4275     SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
4276                          DAG.getTargetConstant(IdxVal, dl, MVT::i8));
4277     if (SubVecNumElems * 2 == NumElems) {
4278       // Special case, use legal zero extending insert_subvector. This allows
4279       // isel to optimize when bits are known zero.
4280       Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, SubVecVT, Vec, ZeroIdx);
4281       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
4282                         DAG.getConstant(0, dl, WideOpVT),
4283                         Vec, ZeroIdx);
4284     } else {
4285       // Otherwise use explicit shifts to zero the bits.
4286       Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
4287                         Undef, Vec, ZeroIdx);
4288       NumElems = WideOpVT.getVectorNumElements();
4289       SDValue ShiftBits = DAG.getTargetConstant(NumElems - IdxVal, dl, MVT::i8);
4290       Vec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, Vec, ShiftBits);
4291       Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Vec, ShiftBits);
4292     }
4293     Op = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, SubVec);
4294     return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
4295   }
4296 
4297   // Inserting into the middle is more complicated.
4298 
4299   NumElems = WideOpVT.getVectorNumElements();
4300 
4301   // Widen the vector if needed.
4302   Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, Undef, Vec, ZeroIdx);
4303 
4304   unsigned ShiftLeft = NumElems - SubVecNumElems;
4305   unsigned ShiftRight = NumElems - SubVecNumElems - IdxVal;
4306 
4307   // Do an optimization for the most frequently used types.
4308   if (WideOpVT != MVT::v64i1 || Subtarget.is64Bit()) {
4309     APInt Mask0 = APInt::getBitsSet(NumElems, IdxVal, IdxVal + SubVecNumElems);
4310     Mask0.flipAllBits();
4311     SDValue CMask0 = DAG.getConstant(Mask0, dl, MVT::getIntegerVT(NumElems));
4312     SDValue VMask0 = DAG.getNode(ISD::BITCAST, dl, WideOpVT, CMask0);
4313     Vec = DAG.getNode(ISD::AND, dl, WideOpVT, Vec, VMask0);
4314     SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
4315                          DAG.getTargetConstant(ShiftLeft, dl, MVT::i8));
4316     SubVec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, SubVec,
4317                          DAG.getTargetConstant(ShiftRight, dl, MVT::i8));
4318     Op = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, SubVec);
4319 
4320     // Reduce to original width if needed.
4321     return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
4322   }
4323 
4324   // Clear the upper bits of the subvector and move it to its insert position.
4325   SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
4326                        DAG.getTargetConstant(ShiftLeft, dl, MVT::i8));
4327   SubVec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, SubVec,
4328                        DAG.getTargetConstant(ShiftRight, dl, MVT::i8));
4329 
4330   // Isolate the bits below the insertion point.
4331   unsigned LowShift = NumElems - IdxVal;
4332   SDValue Low = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, Vec,
4333                             DAG.getTargetConstant(LowShift, dl, MVT::i8));
4334   Low = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Low,
4335                     DAG.getTargetConstant(LowShift, dl, MVT::i8));
4336 
4337   // Isolate the bits after the last inserted bit.
4338   unsigned HighShift = IdxVal + SubVecNumElems;
4339   SDValue High = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Vec,
4340                             DAG.getTargetConstant(HighShift, dl, MVT::i8));
4341   High = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, High,
4342                     DAG.getTargetConstant(HighShift, dl, MVT::i8));
4343 
4344   // Now OR all 3 pieces together.
4345   Vec = DAG.getNode(ISD::OR, dl, WideOpVT, Low, High);
4346   SubVec = DAG.getNode(ISD::OR, dl, WideOpVT, SubVec, Vec);
4347 
4348   // Reduce to original width if needed.
4349   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, SubVec, ZeroIdx);
4350 }
4351 
concatSubVectors(SDValue V1,SDValue V2,SelectionDAG & DAG,const SDLoc & dl)4352 static SDValue concatSubVectors(SDValue V1, SDValue V2, SelectionDAG &DAG,
4353                                 const SDLoc &dl) {
4354   assert(V1.getValueType() == V2.getValueType() && "subvector type mismatch");
4355   EVT SubVT = V1.getValueType();
4356   EVT SubSVT = SubVT.getScalarType();
4357   unsigned SubNumElts = SubVT.getVectorNumElements();
4358   unsigned SubVectorWidth = SubVT.getSizeInBits();
4359   EVT VT = EVT::getVectorVT(*DAG.getContext(), SubSVT, 2 * SubNumElts);
4360   SDValue V = insertSubVector(DAG.getUNDEF(VT), V1, 0, DAG, dl, SubVectorWidth);
4361   return insertSubVector(V, V2, SubNumElts, DAG, dl, SubVectorWidth);
4362 }
4363 
4364 /// Returns a vector of specified type with all bits set.
4365 /// Always build ones vectors as <4 x i32>, <8 x i32> or <16 x i32>.
4366 /// Then bitcast to their original type, ensuring they get CSE'd.
getOnesVector(EVT VT,SelectionDAG & DAG,const SDLoc & dl)4367 static SDValue getOnesVector(EVT VT, SelectionDAG &DAG, const SDLoc &dl) {
4368   assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&
4369          "Expected a 128/256/512-bit vector type");
4370 
4371   APInt Ones = APInt::getAllOnes(32);
4372   unsigned NumElts = VT.getSizeInBits() / 32;
4373   SDValue Vec = DAG.getConstant(Ones, dl, MVT::getVectorVT(MVT::i32, NumElts));
4374   return DAG.getBitcast(VT, Vec);
4375 }
4376 
getEXTEND_VECTOR_INREG(unsigned Opcode,const SDLoc & DL,EVT VT,SDValue In,SelectionDAG & DAG)4377 static SDValue getEXTEND_VECTOR_INREG(unsigned Opcode, const SDLoc &DL, EVT VT,
4378                                       SDValue In, SelectionDAG &DAG) {
4379   EVT InVT = In.getValueType();
4380   assert(VT.isVector() && InVT.isVector() && "Expected vector VTs.");
4381   assert((ISD::ANY_EXTEND == Opcode || ISD::SIGN_EXTEND == Opcode ||
4382           ISD::ZERO_EXTEND == Opcode) &&
4383          "Unknown extension opcode");
4384 
4385   // For 256-bit vectors, we only need the lower (128-bit) input half.
4386   // For 512-bit vectors, we only need the lower input half or quarter.
4387   if (InVT.getSizeInBits() > 128) {
4388     assert(VT.getSizeInBits() == InVT.getSizeInBits() &&
4389            "Expected VTs to be the same size!");
4390     unsigned Scale = VT.getScalarSizeInBits() / InVT.getScalarSizeInBits();
4391     In = extractSubVector(In, 0, DAG, DL,
4392                           std::max(128U, (unsigned)VT.getSizeInBits() / Scale));
4393     InVT = In.getValueType();
4394   }
4395 
4396   if (VT.getVectorNumElements() != InVT.getVectorNumElements())
4397     Opcode = DAG.getOpcode_EXTEND_VECTOR_INREG(Opcode);
4398 
4399   return DAG.getNode(Opcode, DL, VT, In);
4400 }
4401 
4402 // Create OR(AND(LHS,MASK),AND(RHS,~MASK)) bit select pattern
getBitSelect(const SDLoc & DL,MVT VT,SDValue LHS,SDValue RHS,SDValue Mask,SelectionDAG & DAG)4403 static SDValue getBitSelect(const SDLoc &DL, MVT VT, SDValue LHS, SDValue RHS,
4404                             SDValue Mask, SelectionDAG &DAG) {
4405   LHS = DAG.getNode(ISD::AND, DL, VT, LHS, Mask);
4406   RHS = DAG.getNode(X86ISD::ANDNP, DL, VT, Mask, RHS);
4407   return DAG.getNode(ISD::OR, DL, VT, LHS, RHS);
4408 }
4409 
createUnpackShuffleMask(EVT VT,SmallVectorImpl<int> & Mask,bool Lo,bool Unary)4410 void llvm::createUnpackShuffleMask(EVT VT, SmallVectorImpl<int> &Mask,
4411                                    bool Lo, bool Unary) {
4412   assert(VT.getScalarType().isSimple() && (VT.getSizeInBits() % 128) == 0 &&
4413          "Illegal vector type to unpack");
4414   assert(Mask.empty() && "Expected an empty shuffle mask vector");
4415   int NumElts = VT.getVectorNumElements();
4416   int NumEltsInLane = 128 / VT.getScalarSizeInBits();
4417   for (int i = 0; i < NumElts; ++i) {
4418     unsigned LaneStart = (i / NumEltsInLane) * NumEltsInLane;
4419     int Pos = (i % NumEltsInLane) / 2 + LaneStart;
4420     Pos += (Unary ? 0 : NumElts * (i % 2));
4421     Pos += (Lo ? 0 : NumEltsInLane / 2);
4422     Mask.push_back(Pos);
4423   }
4424 }
4425 
4426 /// Similar to unpacklo/unpackhi, but without the 128-bit lane limitation
4427 /// imposed by AVX and specific to the unary pattern. Example:
4428 /// v8iX Lo --> <0, 0, 1, 1, 2, 2, 3, 3>
4429 /// v8iX Hi --> <4, 4, 5, 5, 6, 6, 7, 7>
createSplat2ShuffleMask(MVT VT,SmallVectorImpl<int> & Mask,bool Lo)4430 void llvm::createSplat2ShuffleMask(MVT VT, SmallVectorImpl<int> &Mask,
4431                                    bool Lo) {
4432   assert(Mask.empty() && "Expected an empty shuffle mask vector");
4433   int NumElts = VT.getVectorNumElements();
4434   for (int i = 0; i < NumElts; ++i) {
4435     int Pos = i / 2;
4436     Pos += (Lo ? 0 : NumElts / 2);
4437     Mask.push_back(Pos);
4438   }
4439 }
4440 
4441 // Attempt to constant fold, else just create a VECTOR_SHUFFLE.
getVectorShuffle(SelectionDAG & DAG,EVT VT,const SDLoc & dl,SDValue V1,SDValue V2,ArrayRef<int> Mask)4442 static SDValue getVectorShuffle(SelectionDAG &DAG, EVT VT, const SDLoc &dl,
4443                                 SDValue V1, SDValue V2, ArrayRef<int> Mask) {
4444   if ((ISD::isBuildVectorOfConstantSDNodes(V1.getNode()) || V1.isUndef()) &&
4445       (ISD::isBuildVectorOfConstantSDNodes(V2.getNode()) || V2.isUndef())) {
4446     SmallVector<SDValue> Ops(Mask.size(), DAG.getUNDEF(VT.getScalarType()));
4447     for (int I = 0, NumElts = Mask.size(); I != NumElts; ++I) {
4448       int M = Mask[I];
4449       if (M < 0)
4450         continue;
4451       SDValue V = (M < NumElts) ? V1 : V2;
4452       if (V.isUndef())
4453         continue;
4454       Ops[I] = V.getOperand(M % NumElts);
4455     }
4456     return DAG.getBuildVector(VT, dl, Ops);
4457   }
4458 
4459   return DAG.getVectorShuffle(VT, dl, V1, V2, Mask);
4460 }
4461 
4462 /// Returns a vector_shuffle node for an unpackl operation.
getUnpackl(SelectionDAG & DAG,const SDLoc & dl,EVT VT,SDValue V1,SDValue V2)4463 static SDValue getUnpackl(SelectionDAG &DAG, const SDLoc &dl, EVT VT,
4464                           SDValue V1, SDValue V2) {
4465   SmallVector<int, 8> Mask;
4466   createUnpackShuffleMask(VT, Mask, /* Lo = */ true, /* Unary = */ false);
4467   return getVectorShuffle(DAG, VT, dl, V1, V2, Mask);
4468 }
4469 
4470 /// Returns a vector_shuffle node for an unpackh operation.
getUnpackh(SelectionDAG & DAG,const SDLoc & dl,EVT VT,SDValue V1,SDValue V2)4471 static SDValue getUnpackh(SelectionDAG &DAG, const SDLoc &dl, EVT VT,
4472                           SDValue V1, SDValue V2) {
4473   SmallVector<int, 8> Mask;
4474   createUnpackShuffleMask(VT, Mask, /* Lo = */ false, /* Unary = */ false);
4475   return getVectorShuffle(DAG, VT, dl, V1, V2, Mask);
4476 }
4477 
4478 /// Returns a node that packs the LHS + RHS nodes together at half width.
4479 /// May return X86ISD::PACKSS/PACKUS, packing the top/bottom half.
4480 /// TODO: Add subvector splitting if/when we have a need for it.
getPack(SelectionDAG & DAG,const X86Subtarget & Subtarget,const SDLoc & dl,MVT VT,SDValue LHS,SDValue RHS,bool PackHiHalf=false)4481 static SDValue getPack(SelectionDAG &DAG, const X86Subtarget &Subtarget,
4482                        const SDLoc &dl, MVT VT, SDValue LHS, SDValue RHS,
4483                        bool PackHiHalf = false) {
4484   MVT OpVT = LHS.getSimpleValueType();
4485   unsigned EltSizeInBits = VT.getScalarSizeInBits();
4486   bool UsePackUS = Subtarget.hasSSE41() || EltSizeInBits == 8;
4487   assert(OpVT == RHS.getSimpleValueType() &&
4488          VT.getSizeInBits() == OpVT.getSizeInBits() &&
4489          (EltSizeInBits * 2) == OpVT.getScalarSizeInBits() &&
4490          "Unexpected PACK operand types");
4491   assert((EltSizeInBits == 8 || EltSizeInBits == 16 || EltSizeInBits == 32) &&
4492          "Unexpected PACK result type");
4493 
4494   // Rely on vector shuffles for vXi64 -> vXi32 packing.
4495   if (EltSizeInBits == 32) {
4496     SmallVector<int> PackMask;
4497     int Offset = PackHiHalf ? 1 : 0;
4498     int NumElts = VT.getVectorNumElements();
4499     for (int I = 0; I != NumElts; I += 4) {
4500       PackMask.push_back(I + Offset);
4501       PackMask.push_back(I + Offset + 2);
4502       PackMask.push_back(I + Offset + NumElts);
4503       PackMask.push_back(I + Offset + NumElts + 2);
4504     }
4505     return DAG.getVectorShuffle(VT, dl, DAG.getBitcast(VT, LHS),
4506                                 DAG.getBitcast(VT, RHS), PackMask);
4507   }
4508 
4509   // See if we already have sufficient leading bits for PACKSS/PACKUS.
4510   if (!PackHiHalf) {
4511     if (UsePackUS &&
4512         DAG.computeKnownBits(LHS).countMaxActiveBits() <= EltSizeInBits &&
4513         DAG.computeKnownBits(RHS).countMaxActiveBits() <= EltSizeInBits)
4514       return DAG.getNode(X86ISD::PACKUS, dl, VT, LHS, RHS);
4515 
4516     if (DAG.ComputeMaxSignificantBits(LHS) <= EltSizeInBits &&
4517         DAG.ComputeMaxSignificantBits(RHS) <= EltSizeInBits)
4518       return DAG.getNode(X86ISD::PACKSS, dl, VT, LHS, RHS);
4519   }
4520 
4521   // Fallback to sign/zero extending the requested half and pack.
4522   SDValue Amt = DAG.getTargetConstant(EltSizeInBits, dl, MVT::i8);
4523   if (UsePackUS) {
4524     if (PackHiHalf) {
4525       LHS = DAG.getNode(X86ISD::VSRLI, dl, OpVT, LHS, Amt);
4526       RHS = DAG.getNode(X86ISD::VSRLI, dl, OpVT, RHS, Amt);
4527     } else {
4528       SDValue Mask = DAG.getConstant((1ULL << EltSizeInBits) - 1, dl, OpVT);
4529       LHS = DAG.getNode(ISD::AND, dl, OpVT, LHS, Mask);
4530       RHS = DAG.getNode(ISD::AND, dl, OpVT, RHS, Mask);
4531     };
4532     return DAG.getNode(X86ISD::PACKUS, dl, VT, LHS, RHS);
4533   };
4534 
4535   if (!PackHiHalf) {
4536     LHS = DAG.getNode(X86ISD::VSHLI, dl, OpVT, LHS, Amt);
4537     RHS = DAG.getNode(X86ISD::VSHLI, dl, OpVT, RHS, Amt);
4538   }
4539   LHS = DAG.getNode(X86ISD::VSRAI, dl, OpVT, LHS, Amt);
4540   RHS = DAG.getNode(X86ISD::VSRAI, dl, OpVT, RHS, Amt);
4541   return DAG.getNode(X86ISD::PACKSS, dl, VT, LHS, RHS);
4542 }
4543 
4544 /// Return a vector_shuffle of the specified vector of zero or undef vector.
4545 /// This produces a shuffle where the low element of V2 is swizzled into the
4546 /// zero/undef vector, landing at element Idx.
4547 /// This produces a shuffle mask like 4,1,2,3 (idx=0) or  0,1,2,4 (idx=3).
getShuffleVectorZeroOrUndef(SDValue V2,int Idx,bool IsZero,const X86Subtarget & Subtarget,SelectionDAG & DAG)4548 static SDValue getShuffleVectorZeroOrUndef(SDValue V2, int Idx,
4549                                            bool IsZero,
4550                                            const X86Subtarget &Subtarget,
4551                                            SelectionDAG &DAG) {
4552   MVT VT = V2.getSimpleValueType();
4553   SDValue V1 = IsZero
4554     ? getZeroVector(VT, Subtarget, DAG, SDLoc(V2)) : DAG.getUNDEF(VT);
4555   int NumElems = VT.getVectorNumElements();
4556   SmallVector<int, 16> MaskVec(NumElems);
4557   for (int i = 0; i != NumElems; ++i)
4558     // If this is the insertion idx, put the low elt of V2 here.
4559     MaskVec[i] = (i == Idx) ? NumElems : i;
4560   return DAG.getVectorShuffle(VT, SDLoc(V2), V1, V2, MaskVec);
4561 }
4562 
getTargetConstantPoolFromBasePtr(SDValue Ptr)4563 static ConstantPoolSDNode *getTargetConstantPoolFromBasePtr(SDValue Ptr) {
4564   if (Ptr.getOpcode() == X86ISD::Wrapper ||
4565       Ptr.getOpcode() == X86ISD::WrapperRIP)
4566     Ptr = Ptr.getOperand(0);
4567   return dyn_cast<ConstantPoolSDNode>(Ptr);
4568 }
4569 
getTargetConstantFromBasePtr(SDValue Ptr)4570 static const Constant *getTargetConstantFromBasePtr(SDValue Ptr) {
4571   ConstantPoolSDNode *CNode = getTargetConstantPoolFromBasePtr(Ptr);
4572   if (!CNode || CNode->isMachineConstantPoolEntry() || CNode->getOffset() != 0)
4573     return nullptr;
4574   return CNode->getConstVal();
4575 }
4576 
getTargetConstantFromNode(LoadSDNode * Load)4577 static const Constant *getTargetConstantFromNode(LoadSDNode *Load) {
4578   if (!Load || !ISD::isNormalLoad(Load))
4579     return nullptr;
4580   return getTargetConstantFromBasePtr(Load->getBasePtr());
4581 }
4582 
getTargetConstantFromNode(SDValue Op)4583 static const Constant *getTargetConstantFromNode(SDValue Op) {
4584   Op = peekThroughBitcasts(Op);
4585   return getTargetConstantFromNode(dyn_cast<LoadSDNode>(Op));
4586 }
4587 
4588 const Constant *
getTargetConstantFromLoad(LoadSDNode * LD) const4589 X86TargetLowering::getTargetConstantFromLoad(LoadSDNode *LD) const {
4590   assert(LD && "Unexpected null LoadSDNode");
4591   return getTargetConstantFromNode(LD);
4592 }
4593 
4594 // Extract raw constant bits from constant pools.
getTargetConstantBitsFromNode(SDValue Op,unsigned EltSizeInBits,APInt & UndefElts,SmallVectorImpl<APInt> & EltBits,bool AllowWholeUndefs=true,bool AllowPartialUndefs=true)4595 static bool getTargetConstantBitsFromNode(SDValue Op, unsigned EltSizeInBits,
4596                                           APInt &UndefElts,
4597                                           SmallVectorImpl<APInt> &EltBits,
4598                                           bool AllowWholeUndefs = true,
4599                                           bool AllowPartialUndefs = true) {
4600   assert(EltBits.empty() && "Expected an empty EltBits vector");
4601 
4602   Op = peekThroughBitcasts(Op);
4603 
4604   EVT VT = Op.getValueType();
4605   unsigned SizeInBits = VT.getSizeInBits();
4606   assert((SizeInBits % EltSizeInBits) == 0 && "Can't split constant!");
4607   unsigned NumElts = SizeInBits / EltSizeInBits;
4608 
4609   // Bitcast a source array of element bits to the target size.
4610   auto CastBitData = [&](APInt &UndefSrcElts, ArrayRef<APInt> SrcEltBits) {
4611     unsigned NumSrcElts = UndefSrcElts.getBitWidth();
4612     unsigned SrcEltSizeInBits = SrcEltBits[0].getBitWidth();
4613     assert((NumSrcElts * SrcEltSizeInBits) == SizeInBits &&
4614            "Constant bit sizes don't match");
4615 
4616     // Don't split if we don't allow undef bits.
4617     bool AllowUndefs = AllowWholeUndefs || AllowPartialUndefs;
4618     if (UndefSrcElts.getBoolValue() && !AllowUndefs)
4619       return false;
4620 
4621     // If we're already the right size, don't bother bitcasting.
4622     if (NumSrcElts == NumElts) {
4623       UndefElts = UndefSrcElts;
4624       EltBits.assign(SrcEltBits.begin(), SrcEltBits.end());
4625       return true;
4626     }
4627 
4628     // Extract all the undef/constant element data and pack into single bitsets.
4629     APInt UndefBits(SizeInBits, 0);
4630     APInt MaskBits(SizeInBits, 0);
4631 
4632     for (unsigned i = 0; i != NumSrcElts; ++i) {
4633       unsigned BitOffset = i * SrcEltSizeInBits;
4634       if (UndefSrcElts[i])
4635         UndefBits.setBits(BitOffset, BitOffset + SrcEltSizeInBits);
4636       MaskBits.insertBits(SrcEltBits[i], BitOffset);
4637     }
4638 
4639     // Split the undef/constant single bitset data into the target elements.
4640     UndefElts = APInt(NumElts, 0);
4641     EltBits.resize(NumElts, APInt(EltSizeInBits, 0));
4642 
4643     for (unsigned i = 0; i != NumElts; ++i) {
4644       unsigned BitOffset = i * EltSizeInBits;
4645       APInt UndefEltBits = UndefBits.extractBits(EltSizeInBits, BitOffset);
4646 
4647       // Only treat an element as UNDEF if all bits are UNDEF.
4648       if (UndefEltBits.isAllOnes()) {
4649         if (!AllowWholeUndefs)
4650           return false;
4651         UndefElts.setBit(i);
4652         continue;
4653       }
4654 
4655       // If only some bits are UNDEF then treat them as zero (or bail if not
4656       // supported).
4657       if (UndefEltBits.getBoolValue() && !AllowPartialUndefs)
4658         return false;
4659 
4660       EltBits[i] = MaskBits.extractBits(EltSizeInBits, BitOffset);
4661     }
4662     return true;
4663   };
4664 
4665   // Collect constant bits and insert into mask/undef bit masks.
4666   auto CollectConstantBits = [](const Constant *Cst, APInt &Mask, APInt &Undefs,
4667                                 unsigned UndefBitIndex) {
4668     if (!Cst)
4669       return false;
4670     if (isa<UndefValue>(Cst)) {
4671       Undefs.setBit(UndefBitIndex);
4672       return true;
4673     }
4674     if (auto *CInt = dyn_cast<ConstantInt>(Cst)) {
4675       Mask = CInt->getValue();
4676       return true;
4677     }
4678     if (auto *CFP = dyn_cast<ConstantFP>(Cst)) {
4679       Mask = CFP->getValueAPF().bitcastToAPInt();
4680       return true;
4681     }
4682     if (auto *CDS = dyn_cast<ConstantDataSequential>(Cst)) {
4683       Type *Ty = CDS->getType();
4684       Mask = APInt::getZero(Ty->getPrimitiveSizeInBits());
4685       Type *EltTy = CDS->getElementType();
4686       bool IsInteger = EltTy->isIntegerTy();
4687       bool IsFP =
4688           EltTy->isHalfTy() || EltTy->isFloatTy() || EltTy->isDoubleTy();
4689       if (!IsInteger && !IsFP)
4690         return false;
4691       unsigned EltBits = EltTy->getPrimitiveSizeInBits();
4692       for (unsigned I = 0, E = CDS->getNumElements(); I != E; ++I)
4693         if (IsInteger)
4694           Mask.insertBits(CDS->getElementAsAPInt(I), I * EltBits);
4695         else
4696           Mask.insertBits(CDS->getElementAsAPFloat(I).bitcastToAPInt(),
4697                           I * EltBits);
4698       return true;
4699     }
4700     return false;
4701   };
4702 
4703   // Handle UNDEFs.
4704   if (Op.isUndef()) {
4705     APInt UndefSrcElts = APInt::getAllOnes(NumElts);
4706     SmallVector<APInt, 64> SrcEltBits(NumElts, APInt(EltSizeInBits, 0));
4707     return CastBitData(UndefSrcElts, SrcEltBits);
4708   }
4709 
4710   // Extract scalar constant bits.
4711   if (auto *Cst = dyn_cast<ConstantSDNode>(Op)) {
4712     APInt UndefSrcElts = APInt::getZero(1);
4713     SmallVector<APInt, 64> SrcEltBits(1, Cst->getAPIntValue());
4714     return CastBitData(UndefSrcElts, SrcEltBits);
4715   }
4716   if (auto *Cst = dyn_cast<ConstantFPSDNode>(Op)) {
4717     APInt UndefSrcElts = APInt::getZero(1);
4718     APInt RawBits = Cst->getValueAPF().bitcastToAPInt();
4719     SmallVector<APInt, 64> SrcEltBits(1, RawBits);
4720     return CastBitData(UndefSrcElts, SrcEltBits);
4721   }
4722 
4723   // Extract constant bits from build vector.
4724   if (auto *BV = dyn_cast<BuildVectorSDNode>(Op)) {
4725     BitVector Undefs;
4726     SmallVector<APInt> SrcEltBits;
4727     unsigned SrcEltSizeInBits = VT.getScalarSizeInBits();
4728     if (BV->getConstantRawBits(true, SrcEltSizeInBits, SrcEltBits, Undefs)) {
4729       APInt UndefSrcElts = APInt::getZero(SrcEltBits.size());
4730       for (unsigned I = 0, E = SrcEltBits.size(); I != E; ++I)
4731         if (Undefs[I])
4732           UndefSrcElts.setBit(I);
4733       return CastBitData(UndefSrcElts, SrcEltBits);
4734     }
4735   }
4736 
4737   // Extract constant bits from constant pool vector.
4738   if (auto *Cst = getTargetConstantFromNode(Op)) {
4739     Type *CstTy = Cst->getType();
4740     unsigned CstSizeInBits = CstTy->getPrimitiveSizeInBits();
4741     if (!CstTy->isVectorTy() || (CstSizeInBits % SizeInBits) != 0)
4742       return false;
4743 
4744     unsigned SrcEltSizeInBits = CstTy->getScalarSizeInBits();
4745     unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
4746     if ((SizeInBits % SrcEltSizeInBits) != 0)
4747       return false;
4748 
4749     APInt UndefSrcElts(NumSrcElts, 0);
4750     SmallVector<APInt, 64> SrcEltBits(NumSrcElts, APInt(SrcEltSizeInBits, 0));
4751     for (unsigned i = 0; i != NumSrcElts; ++i)
4752       if (!CollectConstantBits(Cst->getAggregateElement(i), SrcEltBits[i],
4753                                UndefSrcElts, i))
4754         return false;
4755 
4756     return CastBitData(UndefSrcElts, SrcEltBits);
4757   }
4758 
4759   // Extract constant bits from a broadcasted constant pool scalar.
4760   if (Op.getOpcode() == X86ISD::VBROADCAST_LOAD &&
4761       EltSizeInBits <= VT.getScalarSizeInBits()) {
4762     auto *MemIntr = cast<MemIntrinsicSDNode>(Op);
4763     if (MemIntr->getMemoryVT().getStoreSizeInBits() != VT.getScalarSizeInBits())
4764       return false;
4765 
4766     SDValue Ptr = MemIntr->getBasePtr();
4767     if (const Constant *C = getTargetConstantFromBasePtr(Ptr)) {
4768       unsigned SrcEltSizeInBits = VT.getScalarSizeInBits();
4769       unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
4770 
4771       APInt UndefSrcElts(NumSrcElts, 0);
4772       SmallVector<APInt, 64> SrcEltBits(1, APInt(SrcEltSizeInBits, 0));
4773       if (CollectConstantBits(C, SrcEltBits[0], UndefSrcElts, 0)) {
4774         if (UndefSrcElts[0])
4775           UndefSrcElts.setBits(0, NumSrcElts);
4776         if (SrcEltBits[0].getBitWidth() != SrcEltSizeInBits)
4777           SrcEltBits[0] = SrcEltBits[0].trunc(SrcEltSizeInBits);
4778         SrcEltBits.append(NumSrcElts - 1, SrcEltBits[0]);
4779         return CastBitData(UndefSrcElts, SrcEltBits);
4780       }
4781     }
4782   }
4783 
4784   // Extract constant bits from a subvector broadcast.
4785   if (Op.getOpcode() == X86ISD::SUBV_BROADCAST_LOAD) {
4786     auto *MemIntr = cast<MemIntrinsicSDNode>(Op);
4787     SDValue Ptr = MemIntr->getBasePtr();
4788     // The source constant may be larger than the subvector broadcast,
4789     // ensure we extract the correct subvector constants.
4790     if (const Constant *Cst = getTargetConstantFromBasePtr(Ptr)) {
4791       Type *CstTy = Cst->getType();
4792       unsigned CstSizeInBits = CstTy->getPrimitiveSizeInBits();
4793       unsigned SubVecSizeInBits = MemIntr->getMemoryVT().getStoreSizeInBits();
4794       if (!CstTy->isVectorTy() || (CstSizeInBits % SubVecSizeInBits) != 0 ||
4795           (SizeInBits % SubVecSizeInBits) != 0)
4796         return false;
4797       unsigned CstEltSizeInBits = CstTy->getScalarSizeInBits();
4798       unsigned NumSubElts = SubVecSizeInBits / CstEltSizeInBits;
4799       unsigned NumSubVecs = SizeInBits / SubVecSizeInBits;
4800       APInt UndefSubElts(NumSubElts, 0);
4801       SmallVector<APInt, 64> SubEltBits(NumSubElts * NumSubVecs,
4802                                         APInt(CstEltSizeInBits, 0));
4803       for (unsigned i = 0; i != NumSubElts; ++i) {
4804         if (!CollectConstantBits(Cst->getAggregateElement(i), SubEltBits[i],
4805                                  UndefSubElts, i))
4806           return false;
4807         for (unsigned j = 1; j != NumSubVecs; ++j)
4808           SubEltBits[i + (j * NumSubElts)] = SubEltBits[i];
4809       }
4810       UndefSubElts = APInt::getSplat(NumSubVecs * UndefSubElts.getBitWidth(),
4811                                      UndefSubElts);
4812       return CastBitData(UndefSubElts, SubEltBits);
4813     }
4814   }
4815 
4816   // Extract a rematerialized scalar constant insertion.
4817   if (Op.getOpcode() == X86ISD::VZEXT_MOVL &&
4818       Op.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR &&
4819       isa<ConstantSDNode>(Op.getOperand(0).getOperand(0))) {
4820     unsigned SrcEltSizeInBits = VT.getScalarSizeInBits();
4821     unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
4822 
4823     APInt UndefSrcElts(NumSrcElts, 0);
4824     SmallVector<APInt, 64> SrcEltBits;
4825     const APInt &C = Op.getOperand(0).getConstantOperandAPInt(0);
4826     SrcEltBits.push_back(C.zextOrTrunc(SrcEltSizeInBits));
4827     SrcEltBits.append(NumSrcElts - 1, APInt(SrcEltSizeInBits, 0));
4828     return CastBitData(UndefSrcElts, SrcEltBits);
4829   }
4830 
4831   // Insert constant bits from a base and sub vector sources.
4832   if (Op.getOpcode() == ISD::INSERT_SUBVECTOR) {
4833     // If bitcasts to larger elements we might lose track of undefs - don't
4834     // allow any to be safe.
4835     unsigned SrcEltSizeInBits = VT.getScalarSizeInBits();
4836     bool AllowUndefs = EltSizeInBits >= SrcEltSizeInBits;
4837 
4838     APInt UndefSrcElts, UndefSubElts;
4839     SmallVector<APInt, 32> EltSrcBits, EltSubBits;
4840     if (getTargetConstantBitsFromNode(Op.getOperand(1), SrcEltSizeInBits,
4841                                       UndefSubElts, EltSubBits,
4842                                       AllowWholeUndefs && AllowUndefs,
4843                                       AllowPartialUndefs && AllowUndefs) &&
4844         getTargetConstantBitsFromNode(Op.getOperand(0), SrcEltSizeInBits,
4845                                       UndefSrcElts, EltSrcBits,
4846                                       AllowWholeUndefs && AllowUndefs,
4847                                       AllowPartialUndefs && AllowUndefs)) {
4848       unsigned BaseIdx = Op.getConstantOperandVal(2);
4849       UndefSrcElts.insertBits(UndefSubElts, BaseIdx);
4850       for (unsigned i = 0, e = EltSubBits.size(); i != e; ++i)
4851         EltSrcBits[BaseIdx + i] = EltSubBits[i];
4852       return CastBitData(UndefSrcElts, EltSrcBits);
4853     }
4854   }
4855 
4856   // Extract constant bits from a subvector's source.
4857   if (Op.getOpcode() == ISD::EXTRACT_SUBVECTOR) {
4858     // TODO - support extract_subvector through bitcasts.
4859     if (EltSizeInBits != VT.getScalarSizeInBits())
4860       return false;
4861 
4862     if (getTargetConstantBitsFromNode(Op.getOperand(0), EltSizeInBits,
4863                                       UndefElts, EltBits, AllowWholeUndefs,
4864                                       AllowPartialUndefs)) {
4865       EVT SrcVT = Op.getOperand(0).getValueType();
4866       unsigned NumSrcElts = SrcVT.getVectorNumElements();
4867       unsigned NumSubElts = VT.getVectorNumElements();
4868       unsigned BaseIdx = Op.getConstantOperandVal(1);
4869       UndefElts = UndefElts.extractBits(NumSubElts, BaseIdx);
4870       if ((BaseIdx + NumSubElts) != NumSrcElts)
4871         EltBits.erase(EltBits.begin() + BaseIdx + NumSubElts, EltBits.end());
4872       if (BaseIdx != 0)
4873         EltBits.erase(EltBits.begin(), EltBits.begin() + BaseIdx);
4874       return true;
4875     }
4876   }
4877 
4878   // Extract constant bits from shuffle node sources.
4879   if (auto *SVN = dyn_cast<ShuffleVectorSDNode>(Op)) {
4880     // TODO - support shuffle through bitcasts.
4881     if (EltSizeInBits != VT.getScalarSizeInBits())
4882       return false;
4883 
4884     ArrayRef<int> Mask = SVN->getMask();
4885     if ((!AllowWholeUndefs || !AllowPartialUndefs) &&
4886         llvm::any_of(Mask, [](int M) { return M < 0; }))
4887       return false;
4888 
4889     APInt UndefElts0, UndefElts1;
4890     SmallVector<APInt, 32> EltBits0, EltBits1;
4891     if (isAnyInRange(Mask, 0, NumElts) &&
4892         !getTargetConstantBitsFromNode(Op.getOperand(0), EltSizeInBits,
4893                                        UndefElts0, EltBits0, AllowWholeUndefs,
4894                                        AllowPartialUndefs))
4895       return false;
4896     if (isAnyInRange(Mask, NumElts, 2 * NumElts) &&
4897         !getTargetConstantBitsFromNode(Op.getOperand(1), EltSizeInBits,
4898                                        UndefElts1, EltBits1, AllowWholeUndefs,
4899                                        AllowPartialUndefs))
4900       return false;
4901 
4902     UndefElts = APInt::getZero(NumElts);
4903     for (int i = 0; i != (int)NumElts; ++i) {
4904       int M = Mask[i];
4905       if (M < 0) {
4906         UndefElts.setBit(i);
4907         EltBits.push_back(APInt::getZero(EltSizeInBits));
4908       } else if (M < (int)NumElts) {
4909         if (UndefElts0[M])
4910           UndefElts.setBit(i);
4911         EltBits.push_back(EltBits0[M]);
4912       } else {
4913         if (UndefElts1[M - NumElts])
4914           UndefElts.setBit(i);
4915         EltBits.push_back(EltBits1[M - NumElts]);
4916       }
4917     }
4918     return true;
4919   }
4920 
4921   return false;
4922 }
4923 
4924 namespace llvm {
4925 namespace X86 {
isConstantSplat(SDValue Op,APInt & SplatVal,bool AllowPartialUndefs)4926 bool isConstantSplat(SDValue Op, APInt &SplatVal, bool AllowPartialUndefs) {
4927   APInt UndefElts;
4928   SmallVector<APInt, 16> EltBits;
4929   if (getTargetConstantBitsFromNode(Op, Op.getScalarValueSizeInBits(),
4930                                     UndefElts, EltBits, true,
4931                                     AllowPartialUndefs)) {
4932     int SplatIndex = -1;
4933     for (int i = 0, e = EltBits.size(); i != e; ++i) {
4934       if (UndefElts[i])
4935         continue;
4936       if (0 <= SplatIndex && EltBits[i] != EltBits[SplatIndex]) {
4937         SplatIndex = -1;
4938         break;
4939       }
4940       SplatIndex = i;
4941     }
4942     if (0 <= SplatIndex) {
4943       SplatVal = EltBits[SplatIndex];
4944       return true;
4945     }
4946   }
4947 
4948   return false;
4949 }
4950 } // namespace X86
4951 } // namespace llvm
4952 
getTargetShuffleMaskIndices(SDValue MaskNode,unsigned MaskEltSizeInBits,SmallVectorImpl<uint64_t> & RawMask,APInt & UndefElts)4953 static bool getTargetShuffleMaskIndices(SDValue MaskNode,
4954                                         unsigned MaskEltSizeInBits,
4955                                         SmallVectorImpl<uint64_t> &RawMask,
4956                                         APInt &UndefElts) {
4957   // Extract the raw target constant bits.
4958   SmallVector<APInt, 64> EltBits;
4959   if (!getTargetConstantBitsFromNode(MaskNode, MaskEltSizeInBits, UndefElts,
4960                                      EltBits, /* AllowWholeUndefs */ true,
4961                                      /* AllowPartialUndefs */ false))
4962     return false;
4963 
4964   // Insert the extracted elements into the mask.
4965   for (const APInt &Elt : EltBits)
4966     RawMask.push_back(Elt.getZExtValue());
4967 
4968   return true;
4969 }
4970 
4971 // Match not(xor X, -1) -> X.
4972 // Match not(pcmpgt(C, X)) -> pcmpgt(X, C - 1).
4973 // Match not(extract_subvector(xor X, -1)) -> extract_subvector(X).
4974 // Match not(concat_vectors(xor X, -1, xor Y, -1)) -> concat_vectors(X, Y).
IsNOT(SDValue V,SelectionDAG & DAG)4975 static SDValue IsNOT(SDValue V, SelectionDAG &DAG) {
4976   V = peekThroughBitcasts(V);
4977   if (V.getOpcode() == ISD::XOR &&
4978       (ISD::isBuildVectorAllOnes(V.getOperand(1).getNode()) ||
4979        isAllOnesConstant(V.getOperand(1))))
4980     return V.getOperand(0);
4981   if (V.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
4982       (isNullConstant(V.getOperand(1)) || V.getOperand(0).hasOneUse())) {
4983     if (SDValue Not = IsNOT(V.getOperand(0), DAG)) {
4984       Not = DAG.getBitcast(V.getOperand(0).getValueType(), Not);
4985       return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(Not), V.getValueType(),
4986                          Not, V.getOperand(1));
4987     }
4988   }
4989   if (V.getOpcode() == X86ISD::PCMPGT &&
4990       !ISD::isBuildVectorAllZeros(V.getOperand(0).getNode()) &&
4991       !ISD::isBuildVectorAllOnes(V.getOperand(0).getNode()) &&
4992       V.getOperand(0).hasOneUse()) {
4993     APInt UndefElts;
4994     SmallVector<APInt> EltBits;
4995     if (getTargetConstantBitsFromNode(V.getOperand(0),
4996                                       V.getScalarValueSizeInBits(), UndefElts,
4997                                       EltBits)) {
4998       // Don't fold min_signed_value -> (min_signed_value - 1)
4999       bool MinSigned = false;
5000       for (APInt &Elt : EltBits) {
5001         MinSigned |= Elt.isMinSignedValue();
5002         Elt -= 1;
5003       }
5004       if (!MinSigned) {
5005         SDLoc DL(V);
5006         MVT VT = V.getSimpleValueType();
5007         return DAG.getNode(X86ISD::PCMPGT, DL, VT, V.getOperand(1),
5008                            getConstVector(EltBits, UndefElts, VT, DAG, DL));
5009       }
5010     }
5011   }
5012   SmallVector<SDValue, 2> CatOps;
5013   if (collectConcatOps(V.getNode(), CatOps, DAG)) {
5014     for (SDValue &CatOp : CatOps) {
5015       SDValue NotCat = IsNOT(CatOp, DAG);
5016       if (!NotCat) return SDValue();
5017       CatOp = DAG.getBitcast(CatOp.getValueType(), NotCat);
5018     }
5019     return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(V), V.getValueType(), CatOps);
5020   }
5021   return SDValue();
5022 }
5023 
5024 /// Create a shuffle mask that matches the PACKSS/PACKUS truncation.
5025 /// A multi-stage pack shuffle mask is created by specifying NumStages > 1.
5026 /// Note: This ignores saturation, so inputs must be checked first.
createPackShuffleMask(MVT VT,SmallVectorImpl<int> & Mask,bool Unary,unsigned NumStages=1)5027 static void createPackShuffleMask(MVT VT, SmallVectorImpl<int> &Mask,
5028                                   bool Unary, unsigned NumStages = 1) {
5029   assert(Mask.empty() && "Expected an empty shuffle mask vector");
5030   unsigned NumElts = VT.getVectorNumElements();
5031   unsigned NumLanes = VT.getSizeInBits() / 128;
5032   unsigned NumEltsPerLane = 128 / VT.getScalarSizeInBits();
5033   unsigned Offset = Unary ? 0 : NumElts;
5034   unsigned Repetitions = 1u << (NumStages - 1);
5035   unsigned Increment = 1u << NumStages;
5036   assert((NumEltsPerLane >> NumStages) > 0 && "Illegal packing compaction");
5037 
5038   for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
5039     for (unsigned Stage = 0; Stage != Repetitions; ++Stage) {
5040       for (unsigned Elt = 0; Elt != NumEltsPerLane; Elt += Increment)
5041         Mask.push_back(Elt + (Lane * NumEltsPerLane));
5042       for (unsigned Elt = 0; Elt != NumEltsPerLane; Elt += Increment)
5043         Mask.push_back(Elt + (Lane * NumEltsPerLane) + Offset);
5044     }
5045   }
5046 }
5047 
5048 // Split the demanded elts of a PACKSS/PACKUS node between its operands.
getPackDemandedElts(EVT VT,const APInt & DemandedElts,APInt & DemandedLHS,APInt & DemandedRHS)5049 static void getPackDemandedElts(EVT VT, const APInt &DemandedElts,
5050                                 APInt &DemandedLHS, APInt &DemandedRHS) {
5051   int NumLanes = VT.getSizeInBits() / 128;
5052   int NumElts = DemandedElts.getBitWidth();
5053   int NumInnerElts = NumElts / 2;
5054   int NumEltsPerLane = NumElts / NumLanes;
5055   int NumInnerEltsPerLane = NumInnerElts / NumLanes;
5056 
5057   DemandedLHS = APInt::getZero(NumInnerElts);
5058   DemandedRHS = APInt::getZero(NumInnerElts);
5059 
5060   // Map DemandedElts to the packed operands.
5061   for (int Lane = 0; Lane != NumLanes; ++Lane) {
5062     for (int Elt = 0; Elt != NumInnerEltsPerLane; ++Elt) {
5063       int OuterIdx = (Lane * NumEltsPerLane) + Elt;
5064       int InnerIdx = (Lane * NumInnerEltsPerLane) + Elt;
5065       if (DemandedElts[OuterIdx])
5066         DemandedLHS.setBit(InnerIdx);
5067       if (DemandedElts[OuterIdx + NumInnerEltsPerLane])
5068         DemandedRHS.setBit(InnerIdx);
5069     }
5070   }
5071 }
5072 
5073 // Split the demanded elts of a HADD/HSUB node between its operands.
getHorizDemandedElts(EVT VT,const APInt & DemandedElts,APInt & DemandedLHS,APInt & DemandedRHS)5074 static void getHorizDemandedElts(EVT VT, const APInt &DemandedElts,
5075                                  APInt &DemandedLHS, APInt &DemandedRHS) {
5076   int NumLanes = VT.getSizeInBits() / 128;
5077   int NumElts = DemandedElts.getBitWidth();
5078   int NumEltsPerLane = NumElts / NumLanes;
5079   int HalfEltsPerLane = NumEltsPerLane / 2;
5080 
5081   DemandedLHS = APInt::getZero(NumElts);
5082   DemandedRHS = APInt::getZero(NumElts);
5083 
5084   // Map DemandedElts to the horizontal operands.
5085   for (int Idx = 0; Idx != NumElts; ++Idx) {
5086     if (!DemandedElts[Idx])
5087       continue;
5088     int LaneIdx = (Idx / NumEltsPerLane) * NumEltsPerLane;
5089     int LocalIdx = Idx % NumEltsPerLane;
5090     if (LocalIdx < HalfEltsPerLane) {
5091       DemandedLHS.setBit(LaneIdx + 2 * LocalIdx + 0);
5092       DemandedLHS.setBit(LaneIdx + 2 * LocalIdx + 1);
5093     } else {
5094       LocalIdx -= HalfEltsPerLane;
5095       DemandedRHS.setBit(LaneIdx + 2 * LocalIdx + 0);
5096       DemandedRHS.setBit(LaneIdx + 2 * LocalIdx + 1);
5097     }
5098   }
5099 }
5100 
5101 /// Calculates the shuffle mask corresponding to the target-specific opcode.
5102 /// If the mask could be calculated, returns it in \p Mask, returns the shuffle
5103 /// operands in \p Ops, and returns true.
5104 /// Sets \p IsUnary to true if only one source is used. Note that this will set
5105 /// IsUnary for shuffles which use a single input multiple times, and in those
5106 /// cases it will adjust the mask to only have indices within that single input.
5107 /// It is an error to call this with non-empty Mask/Ops vectors.
getTargetShuffleMask(SDNode * N,MVT VT,bool AllowSentinelZero,SmallVectorImpl<SDValue> & Ops,SmallVectorImpl<int> & Mask,bool & IsUnary)5108 static bool getTargetShuffleMask(SDNode *N, MVT VT, bool AllowSentinelZero,
5109                                  SmallVectorImpl<SDValue> &Ops,
5110                                  SmallVectorImpl<int> &Mask, bool &IsUnary) {
5111   unsigned NumElems = VT.getVectorNumElements();
5112   unsigned MaskEltSize = VT.getScalarSizeInBits();
5113   SmallVector<uint64_t, 32> RawMask;
5114   APInt RawUndefs;
5115   uint64_t ImmN;
5116 
5117   assert(Mask.empty() && "getTargetShuffleMask expects an empty Mask vector");
5118   assert(Ops.empty() && "getTargetShuffleMask expects an empty Ops vector");
5119 
5120   IsUnary = false;
5121   bool IsFakeUnary = false;
5122   switch (N->getOpcode()) {
5123   case X86ISD::BLENDI:
5124     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5125     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
5126     ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
5127     DecodeBLENDMask(NumElems, ImmN, Mask);
5128     IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5129     break;
5130   case X86ISD::SHUFP:
5131     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5132     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
5133     ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
5134     DecodeSHUFPMask(NumElems, MaskEltSize, ImmN, Mask);
5135     IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5136     break;
5137   case X86ISD::INSERTPS:
5138     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5139     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
5140     ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
5141     DecodeINSERTPSMask(ImmN, Mask);
5142     IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5143     break;
5144   case X86ISD::EXTRQI:
5145     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5146     if (isa<ConstantSDNode>(N->getOperand(1)) &&
5147         isa<ConstantSDNode>(N->getOperand(2))) {
5148       int BitLen = N->getConstantOperandVal(1);
5149       int BitIdx = N->getConstantOperandVal(2);
5150       DecodeEXTRQIMask(NumElems, MaskEltSize, BitLen, BitIdx, Mask);
5151       IsUnary = true;
5152     }
5153     break;
5154   case X86ISD::INSERTQI:
5155     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5156     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
5157     if (isa<ConstantSDNode>(N->getOperand(2)) &&
5158         isa<ConstantSDNode>(N->getOperand(3))) {
5159       int BitLen = N->getConstantOperandVal(2);
5160       int BitIdx = N->getConstantOperandVal(3);
5161       DecodeINSERTQIMask(NumElems, MaskEltSize, BitLen, BitIdx, Mask);
5162       IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5163     }
5164     break;
5165   case X86ISD::UNPCKH:
5166     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5167     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
5168     DecodeUNPCKHMask(NumElems, MaskEltSize, Mask);
5169     IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5170     break;
5171   case X86ISD::UNPCKL:
5172     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5173     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
5174     DecodeUNPCKLMask(NumElems, MaskEltSize, Mask);
5175     IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5176     break;
5177   case X86ISD::MOVHLPS:
5178     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5179     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
5180     DecodeMOVHLPSMask(NumElems, Mask);
5181     IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5182     break;
5183   case X86ISD::MOVLHPS:
5184     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5185     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
5186     DecodeMOVLHPSMask(NumElems, Mask);
5187     IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5188     break;
5189   case X86ISD::VALIGN:
5190     assert((VT.getScalarType() == MVT::i32 || VT.getScalarType() == MVT::i64) &&
5191            "Only 32-bit and 64-bit elements are supported!");
5192     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5193     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
5194     ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
5195     DecodeVALIGNMask(NumElems, ImmN, Mask);
5196     IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5197     Ops.push_back(N->getOperand(1));
5198     Ops.push_back(N->getOperand(0));
5199     break;
5200   case X86ISD::PALIGNR:
5201     assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
5202     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5203     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
5204     ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
5205     DecodePALIGNRMask(NumElems, ImmN, Mask);
5206     IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5207     Ops.push_back(N->getOperand(1));
5208     Ops.push_back(N->getOperand(0));
5209     break;
5210   case X86ISD::VSHLDQ:
5211     assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
5212     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5213     ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
5214     DecodePSLLDQMask(NumElems, ImmN, Mask);
5215     IsUnary = true;
5216     break;
5217   case X86ISD::VSRLDQ:
5218     assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
5219     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5220     ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
5221     DecodePSRLDQMask(NumElems, ImmN, Mask);
5222     IsUnary = true;
5223     break;
5224   case X86ISD::PSHUFD:
5225   case X86ISD::VPERMILPI:
5226     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5227     ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
5228     DecodePSHUFMask(NumElems, MaskEltSize, ImmN, Mask);
5229     IsUnary = true;
5230     break;
5231   case X86ISD::PSHUFHW:
5232     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5233     ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
5234     DecodePSHUFHWMask(NumElems, ImmN, Mask);
5235     IsUnary = true;
5236     break;
5237   case X86ISD::PSHUFLW:
5238     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5239     ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
5240     DecodePSHUFLWMask(NumElems, ImmN, Mask);
5241     IsUnary = true;
5242     break;
5243   case X86ISD::VZEXT_MOVL:
5244     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5245     DecodeZeroMoveLowMask(NumElems, Mask);
5246     IsUnary = true;
5247     break;
5248   case X86ISD::VBROADCAST:
5249     // We only decode broadcasts of same-sized vectors, peeking through to
5250     // extracted subvectors is likely to cause hasOneUse issues with
5251     // SimplifyDemandedBits etc.
5252     if (N->getOperand(0).getValueType() == VT) {
5253       DecodeVectorBroadcast(NumElems, Mask);
5254       IsUnary = true;
5255       break;
5256     }
5257     return false;
5258   case X86ISD::VPERMILPV: {
5259     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5260     IsUnary = true;
5261     SDValue MaskNode = N->getOperand(1);
5262     if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
5263                                     RawUndefs)) {
5264       DecodeVPERMILPMask(NumElems, MaskEltSize, RawMask, RawUndefs, Mask);
5265       break;
5266     }
5267     return false;
5268   }
5269   case X86ISD::PSHUFB: {
5270     assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
5271     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5272     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
5273     IsUnary = true;
5274     SDValue MaskNode = N->getOperand(1);
5275     if (getTargetShuffleMaskIndices(MaskNode, 8, RawMask, RawUndefs)) {
5276       DecodePSHUFBMask(RawMask, RawUndefs, Mask);
5277       break;
5278     }
5279     return false;
5280   }
5281   case X86ISD::VPERMI:
5282     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5283     ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
5284     DecodeVPERMMask(NumElems, ImmN, Mask);
5285     IsUnary = true;
5286     break;
5287   case X86ISD::MOVSS:
5288   case X86ISD::MOVSD:
5289   case X86ISD::MOVSH:
5290     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5291     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
5292     DecodeScalarMoveMask(NumElems, /* IsLoad */ false, Mask);
5293     break;
5294   case X86ISD::VPERM2X128:
5295     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5296     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
5297     ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
5298     DecodeVPERM2X128Mask(NumElems, ImmN, Mask);
5299     IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5300     break;
5301   case X86ISD::SHUF128:
5302     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5303     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
5304     ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
5305     decodeVSHUF64x2FamilyMask(NumElems, MaskEltSize, ImmN, Mask);
5306     IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5307     break;
5308   case X86ISD::MOVSLDUP:
5309     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5310     DecodeMOVSLDUPMask(NumElems, Mask);
5311     IsUnary = true;
5312     break;
5313   case X86ISD::MOVSHDUP:
5314     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5315     DecodeMOVSHDUPMask(NumElems, Mask);
5316     IsUnary = true;
5317     break;
5318   case X86ISD::MOVDDUP:
5319     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5320     DecodeMOVDDUPMask(NumElems, Mask);
5321     IsUnary = true;
5322     break;
5323   case X86ISD::VPERMIL2: {
5324     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5325     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
5326     IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5327     SDValue MaskNode = N->getOperand(2);
5328     SDValue CtrlNode = N->getOperand(3);
5329     if (ConstantSDNode *CtrlOp = dyn_cast<ConstantSDNode>(CtrlNode)) {
5330       unsigned CtrlImm = CtrlOp->getZExtValue();
5331       if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
5332                                       RawUndefs)) {
5333         DecodeVPERMIL2PMask(NumElems, MaskEltSize, CtrlImm, RawMask, RawUndefs,
5334                             Mask);
5335         break;
5336       }
5337     }
5338     return false;
5339   }
5340   case X86ISD::VPPERM: {
5341     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5342     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
5343     IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5344     SDValue MaskNode = N->getOperand(2);
5345     if (getTargetShuffleMaskIndices(MaskNode, 8, RawMask, RawUndefs)) {
5346       DecodeVPPERMMask(RawMask, RawUndefs, Mask);
5347       break;
5348     }
5349     return false;
5350   }
5351   case X86ISD::VPERMV: {
5352     assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
5353     IsUnary = true;
5354     // Unlike most shuffle nodes, VPERMV's mask operand is operand 0.
5355     Ops.push_back(N->getOperand(1));
5356     SDValue MaskNode = N->getOperand(0);
5357     if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
5358                                     RawUndefs)) {
5359       DecodeVPERMVMask(RawMask, RawUndefs, Mask);
5360       break;
5361     }
5362     return false;
5363   }
5364   case X86ISD::VPERMV3: {
5365     assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
5366     assert(N->getOperand(2).getValueType() == VT && "Unexpected value type");
5367     IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(2);
5368     // Unlike most shuffle nodes, VPERMV3's mask operand is the middle one.
5369     Ops.push_back(N->getOperand(0));
5370     Ops.push_back(N->getOperand(2));
5371     SDValue MaskNode = N->getOperand(1);
5372     if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
5373                                     RawUndefs)) {
5374       DecodeVPERMV3Mask(RawMask, RawUndefs, Mask);
5375       break;
5376     }
5377     return false;
5378   }
5379   default: llvm_unreachable("unknown target shuffle node");
5380   }
5381 
5382   // Empty mask indicates the decode failed.
5383   if (Mask.empty())
5384     return false;
5385 
5386   // Check if we're getting a shuffle mask with zero'd elements.
5387   if (!AllowSentinelZero && isAnyZero(Mask))
5388     return false;
5389 
5390   // If we have a fake unary shuffle, the shuffle mask is spread across two
5391   // inputs that are actually the same node. Re-map the mask to always point
5392   // into the first input.
5393   if (IsFakeUnary)
5394     for (int &M : Mask)
5395       if (M >= (int)Mask.size())
5396         M -= Mask.size();
5397 
5398   // If we didn't already add operands in the opcode-specific code, default to
5399   // adding 1 or 2 operands starting at 0.
5400   if (Ops.empty()) {
5401     Ops.push_back(N->getOperand(0));
5402     if (!IsUnary || IsFakeUnary)
5403       Ops.push_back(N->getOperand(1));
5404   }
5405 
5406   return true;
5407 }
5408 
5409 // Wrapper for getTargetShuffleMask with InUnary;
getTargetShuffleMask(SDNode * N,MVT VT,bool AllowSentinelZero,SmallVectorImpl<SDValue> & Ops,SmallVectorImpl<int> & Mask)5410 static bool getTargetShuffleMask(SDNode *N, MVT VT, bool AllowSentinelZero,
5411                                  SmallVectorImpl<SDValue> &Ops,
5412                                  SmallVectorImpl<int> &Mask) {
5413   bool IsUnary;
5414   return getTargetShuffleMask(N, VT, AllowSentinelZero, Ops, Mask, IsUnary);
5415 }
5416 
5417 /// Compute whether each element of a shuffle is zeroable.
5418 ///
5419 /// A "zeroable" vector shuffle element is one which can be lowered to zero.
5420 /// Either it is an undef element in the shuffle mask, the element of the input
5421 /// referenced is undef, or the element of the input referenced is known to be
5422 /// zero. Many x86 shuffles can zero lanes cheaply and we often want to handle
5423 /// as many lanes with this technique as possible to simplify the remaining
5424 /// shuffle.
computeZeroableShuffleElements(ArrayRef<int> Mask,SDValue V1,SDValue V2,APInt & KnownUndef,APInt & KnownZero)5425 static void computeZeroableShuffleElements(ArrayRef<int> Mask,
5426                                            SDValue V1, SDValue V2,
5427                                            APInt &KnownUndef, APInt &KnownZero) {
5428   int Size = Mask.size();
5429   KnownUndef = KnownZero = APInt::getZero(Size);
5430 
5431   V1 = peekThroughBitcasts(V1);
5432   V2 = peekThroughBitcasts(V2);
5433 
5434   bool V1IsZero = ISD::isBuildVectorAllZeros(V1.getNode());
5435   bool V2IsZero = ISD::isBuildVectorAllZeros(V2.getNode());
5436 
5437   int VectorSizeInBits = V1.getValueSizeInBits();
5438   int ScalarSizeInBits = VectorSizeInBits / Size;
5439   assert(!(VectorSizeInBits % ScalarSizeInBits) && "Illegal shuffle mask size");
5440 
5441   for (int i = 0; i < Size; ++i) {
5442     int M = Mask[i];
5443     // Handle the easy cases.
5444     if (M < 0) {
5445       KnownUndef.setBit(i);
5446       continue;
5447     }
5448     if ((M >= 0 && M < Size && V1IsZero) || (M >= Size && V2IsZero)) {
5449       KnownZero.setBit(i);
5450       continue;
5451     }
5452 
5453     // Determine shuffle input and normalize the mask.
5454     SDValue V = M < Size ? V1 : V2;
5455     M %= Size;
5456 
5457     // Currently we can only search BUILD_VECTOR for UNDEF/ZERO elements.
5458     if (V.getOpcode() != ISD::BUILD_VECTOR)
5459       continue;
5460 
5461     // If the BUILD_VECTOR has fewer elements then the bitcasted portion of
5462     // the (larger) source element must be UNDEF/ZERO.
5463     if ((Size % V.getNumOperands()) == 0) {
5464       int Scale = Size / V->getNumOperands();
5465       SDValue Op = V.getOperand(M / Scale);
5466       if (Op.isUndef())
5467         KnownUndef.setBit(i);
5468       if (X86::isZeroNode(Op))
5469         KnownZero.setBit(i);
5470       else if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Op)) {
5471         APInt Val = Cst->getAPIntValue();
5472         Val = Val.extractBits(ScalarSizeInBits, (M % Scale) * ScalarSizeInBits);
5473         if (Val == 0)
5474           KnownZero.setBit(i);
5475       } else if (ConstantFPSDNode *Cst = dyn_cast<ConstantFPSDNode>(Op)) {
5476         APInt Val = Cst->getValueAPF().bitcastToAPInt();
5477         Val = Val.extractBits(ScalarSizeInBits, (M % Scale) * ScalarSizeInBits);
5478         if (Val == 0)
5479           KnownZero.setBit(i);
5480       }
5481       continue;
5482     }
5483 
5484     // If the BUILD_VECTOR has more elements then all the (smaller) source
5485     // elements must be UNDEF or ZERO.
5486     if ((V.getNumOperands() % Size) == 0) {
5487       int Scale = V->getNumOperands() / Size;
5488       bool AllUndef = true;
5489       bool AllZero = true;
5490       for (int j = 0; j < Scale; ++j) {
5491         SDValue Op = V.getOperand((M * Scale) + j);
5492         AllUndef &= Op.isUndef();
5493         AllZero &= X86::isZeroNode(Op);
5494       }
5495       if (AllUndef)
5496         KnownUndef.setBit(i);
5497       if (AllZero)
5498         KnownZero.setBit(i);
5499       continue;
5500     }
5501   }
5502 }
5503 
5504 /// Decode a target shuffle mask and inputs and see if any values are
5505 /// known to be undef or zero from their inputs.
5506 /// Returns true if the target shuffle mask was decoded.
5507 /// FIXME: Merge this with computeZeroableShuffleElements?
getTargetShuffleAndZeroables(SDValue N,SmallVectorImpl<int> & Mask,SmallVectorImpl<SDValue> & Ops,APInt & KnownUndef,APInt & KnownZero)5508 static bool getTargetShuffleAndZeroables(SDValue N, SmallVectorImpl<int> &Mask,
5509                                          SmallVectorImpl<SDValue> &Ops,
5510                                          APInt &KnownUndef, APInt &KnownZero) {
5511   bool IsUnary;
5512   if (!isTargetShuffle(N.getOpcode()))
5513     return false;
5514 
5515   MVT VT = N.getSimpleValueType();
5516   if (!getTargetShuffleMask(N.getNode(), VT, true, Ops, Mask, IsUnary))
5517     return false;
5518 
5519   int Size = Mask.size();
5520   SDValue V1 = Ops[0];
5521   SDValue V2 = IsUnary ? V1 : Ops[1];
5522   KnownUndef = KnownZero = APInt::getZero(Size);
5523 
5524   V1 = peekThroughBitcasts(V1);
5525   V2 = peekThroughBitcasts(V2);
5526 
5527   assert((VT.getSizeInBits() % Size) == 0 &&
5528          "Illegal split of shuffle value type");
5529   unsigned EltSizeInBits = VT.getSizeInBits() / Size;
5530 
5531   // Extract known constant input data.
5532   APInt UndefSrcElts[2];
5533   SmallVector<APInt, 32> SrcEltBits[2];
5534   bool IsSrcConstant[2] = {
5535       getTargetConstantBitsFromNode(V1, EltSizeInBits, UndefSrcElts[0],
5536                                     SrcEltBits[0], true, false),
5537       getTargetConstantBitsFromNode(V2, EltSizeInBits, UndefSrcElts[1],
5538                                     SrcEltBits[1], true, false)};
5539 
5540   for (int i = 0; i < Size; ++i) {
5541     int M = Mask[i];
5542 
5543     // Already decoded as SM_SentinelZero / SM_SentinelUndef.
5544     if (M < 0) {
5545       assert(isUndefOrZero(M) && "Unknown shuffle sentinel value!");
5546       if (SM_SentinelUndef == M)
5547         KnownUndef.setBit(i);
5548       if (SM_SentinelZero == M)
5549         KnownZero.setBit(i);
5550       continue;
5551     }
5552 
5553     // Determine shuffle input and normalize the mask.
5554     unsigned SrcIdx = M / Size;
5555     SDValue V = M < Size ? V1 : V2;
5556     M %= Size;
5557 
5558     // We are referencing an UNDEF input.
5559     if (V.isUndef()) {
5560       KnownUndef.setBit(i);
5561       continue;
5562     }
5563 
5564     // SCALAR_TO_VECTOR - only the first element is defined, and the rest UNDEF.
5565     // TODO: We currently only set UNDEF for integer types - floats use the same
5566     // registers as vectors and many of the scalar folded loads rely on the
5567     // SCALAR_TO_VECTOR pattern.
5568     if (V.getOpcode() == ISD::SCALAR_TO_VECTOR &&
5569         (Size % V.getValueType().getVectorNumElements()) == 0) {
5570       int Scale = Size / V.getValueType().getVectorNumElements();
5571       int Idx = M / Scale;
5572       if (Idx != 0 && !VT.isFloatingPoint())
5573         KnownUndef.setBit(i);
5574       else if (Idx == 0 && X86::isZeroNode(V.getOperand(0)))
5575         KnownZero.setBit(i);
5576       continue;
5577     }
5578 
5579     // INSERT_SUBVECTOR - to widen vectors we often insert them into UNDEF
5580     // base vectors.
5581     if (V.getOpcode() == ISD::INSERT_SUBVECTOR) {
5582       SDValue Vec = V.getOperand(0);
5583       int NumVecElts = Vec.getValueType().getVectorNumElements();
5584       if (Vec.isUndef() && Size == NumVecElts) {
5585         int Idx = V.getConstantOperandVal(2);
5586         int NumSubElts = V.getOperand(1).getValueType().getVectorNumElements();
5587         if (M < Idx || (Idx + NumSubElts) <= M)
5588           KnownUndef.setBit(i);
5589       }
5590       continue;
5591     }
5592 
5593     // Attempt to extract from the source's constant bits.
5594     if (IsSrcConstant[SrcIdx]) {
5595       if (UndefSrcElts[SrcIdx][M])
5596         KnownUndef.setBit(i);
5597       else if (SrcEltBits[SrcIdx][M] == 0)
5598         KnownZero.setBit(i);
5599     }
5600   }
5601 
5602   assert(VT.getVectorNumElements() == (unsigned)Size &&
5603          "Different mask size from vector size!");
5604   return true;
5605 }
5606 
5607 // Replace target shuffle mask elements with known undef/zero sentinels.
resolveTargetShuffleFromZeroables(SmallVectorImpl<int> & Mask,const APInt & KnownUndef,const APInt & KnownZero,bool ResolveKnownZeros=true)5608 static void resolveTargetShuffleFromZeroables(SmallVectorImpl<int> &Mask,
5609                                               const APInt &KnownUndef,
5610                                               const APInt &KnownZero,
5611                                               bool ResolveKnownZeros= true) {
5612   unsigned NumElts = Mask.size();
5613   assert(KnownUndef.getBitWidth() == NumElts &&
5614          KnownZero.getBitWidth() == NumElts && "Shuffle mask size mismatch");
5615 
5616   for (unsigned i = 0; i != NumElts; ++i) {
5617     if (KnownUndef[i])
5618       Mask[i] = SM_SentinelUndef;
5619     else if (ResolveKnownZeros && KnownZero[i])
5620       Mask[i] = SM_SentinelZero;
5621   }
5622 }
5623 
5624 // Extract target shuffle mask sentinel elements to known undef/zero bitmasks.
resolveZeroablesFromTargetShuffle(const SmallVectorImpl<int> & Mask,APInt & KnownUndef,APInt & KnownZero)5625 static void resolveZeroablesFromTargetShuffle(const SmallVectorImpl<int> &Mask,
5626                                               APInt &KnownUndef,
5627                                               APInt &KnownZero) {
5628   unsigned NumElts = Mask.size();
5629   KnownUndef = KnownZero = APInt::getZero(NumElts);
5630 
5631   for (unsigned i = 0; i != NumElts; ++i) {
5632     int M = Mask[i];
5633     if (SM_SentinelUndef == M)
5634       KnownUndef.setBit(i);
5635     if (SM_SentinelZero == M)
5636       KnownZero.setBit(i);
5637   }
5638 }
5639 
5640 // Attempt to create a shuffle mask from a VSELECT/BLENDV condition mask.
createShuffleMaskFromVSELECT(SmallVectorImpl<int> & Mask,SDValue Cond,bool IsBLENDV=false)5641 static bool createShuffleMaskFromVSELECT(SmallVectorImpl<int> &Mask,
5642                                          SDValue Cond, bool IsBLENDV = false) {
5643   EVT CondVT = Cond.getValueType();
5644   unsigned EltSizeInBits = CondVT.getScalarSizeInBits();
5645   unsigned NumElts = CondVT.getVectorNumElements();
5646 
5647   APInt UndefElts;
5648   SmallVector<APInt, 32> EltBits;
5649   if (!getTargetConstantBitsFromNode(Cond, EltSizeInBits, UndefElts, EltBits,
5650                                      true, false))
5651     return false;
5652 
5653   Mask.resize(NumElts, SM_SentinelUndef);
5654 
5655   for (int i = 0; i != (int)NumElts; ++i) {
5656     Mask[i] = i;
5657     // Arbitrarily choose from the 2nd operand if the select condition element
5658     // is undef.
5659     // TODO: Can we do better by matching patterns such as even/odd?
5660     if (UndefElts[i] || (!IsBLENDV && EltBits[i].isZero()) ||
5661         (IsBLENDV && EltBits[i].isNonNegative()))
5662       Mask[i] += NumElts;
5663   }
5664 
5665   return true;
5666 }
5667 
5668 // Forward declaration (for getFauxShuffleMask recursive check).
5669 static bool getTargetShuffleInputs(SDValue Op, const APInt &DemandedElts,
5670                                    SmallVectorImpl<SDValue> &Inputs,
5671                                    SmallVectorImpl<int> &Mask,
5672                                    const SelectionDAG &DAG, unsigned Depth,
5673                                    bool ResolveKnownElts);
5674 
5675 // Attempt to decode ops that could be represented as a shuffle mask.
5676 // The decoded shuffle mask may contain a different number of elements to the
5677 // destination value type.
5678 // TODO: Merge into getTargetShuffleInputs()
getFauxShuffleMask(SDValue N,const APInt & DemandedElts,SmallVectorImpl<int> & Mask,SmallVectorImpl<SDValue> & Ops,const SelectionDAG & DAG,unsigned Depth,bool ResolveKnownElts)5679 static bool getFauxShuffleMask(SDValue N, const APInt &DemandedElts,
5680                                SmallVectorImpl<int> &Mask,
5681                                SmallVectorImpl<SDValue> &Ops,
5682                                const SelectionDAG &DAG, unsigned Depth,
5683                                bool ResolveKnownElts) {
5684   Mask.clear();
5685   Ops.clear();
5686 
5687   MVT VT = N.getSimpleValueType();
5688   unsigned NumElts = VT.getVectorNumElements();
5689   unsigned NumSizeInBits = VT.getSizeInBits();
5690   unsigned NumBitsPerElt = VT.getScalarSizeInBits();
5691   if ((NumBitsPerElt % 8) != 0 || (NumSizeInBits % 8) != 0)
5692     return false;
5693   assert(NumElts == DemandedElts.getBitWidth() && "Unexpected vector size");
5694   unsigned NumSizeInBytes = NumSizeInBits / 8;
5695   unsigned NumBytesPerElt = NumBitsPerElt / 8;
5696 
5697   unsigned Opcode = N.getOpcode();
5698   switch (Opcode) {
5699   case ISD::VECTOR_SHUFFLE: {
5700     // Don't treat ISD::VECTOR_SHUFFLE as a target shuffle so decode it here.
5701     ArrayRef<int> ShuffleMask = cast<ShuffleVectorSDNode>(N)->getMask();
5702     if (isUndefOrInRange(ShuffleMask, 0, 2 * NumElts)) {
5703       Mask.append(ShuffleMask.begin(), ShuffleMask.end());
5704       Ops.push_back(N.getOperand(0));
5705       Ops.push_back(N.getOperand(1));
5706       return true;
5707     }
5708     return false;
5709   }
5710   case ISD::AND:
5711   case X86ISD::ANDNP: {
5712     // Attempt to decode as a per-byte mask.
5713     APInt UndefElts;
5714     SmallVector<APInt, 32> EltBits;
5715     SDValue N0 = N.getOperand(0);
5716     SDValue N1 = N.getOperand(1);
5717     bool IsAndN = (X86ISD::ANDNP == Opcode);
5718     uint64_t ZeroMask = IsAndN ? 255 : 0;
5719     if (!getTargetConstantBitsFromNode(IsAndN ? N0 : N1, 8, UndefElts, EltBits))
5720       return false;
5721     // We can't assume an undef src element gives an undef dst - the other src
5722     // might be zero.
5723     if (!UndefElts.isZero())
5724       return false;
5725     for (int i = 0, e = (int)EltBits.size(); i != e; ++i) {
5726       const APInt &ByteBits = EltBits[i];
5727       if (ByteBits != 0 && ByteBits != 255)
5728         return false;
5729       Mask.push_back(ByteBits == ZeroMask ? SM_SentinelZero : i);
5730     }
5731     Ops.push_back(IsAndN ? N1 : N0);
5732     return true;
5733   }
5734   case ISD::OR: {
5735     // Handle OR(SHUFFLE,SHUFFLE) case where one source is zero and the other
5736     // is a valid shuffle index.
5737     SDValue N0 = peekThroughBitcasts(N.getOperand(0));
5738     SDValue N1 = peekThroughBitcasts(N.getOperand(1));
5739     if (!N0.getValueType().isVector() || !N1.getValueType().isVector())
5740       return false;
5741 
5742     SmallVector<int, 64> SrcMask0, SrcMask1;
5743     SmallVector<SDValue, 2> SrcInputs0, SrcInputs1;
5744     APInt Demand0 = APInt::getAllOnes(N0.getValueType().getVectorNumElements());
5745     APInt Demand1 = APInt::getAllOnes(N1.getValueType().getVectorNumElements());
5746     if (!getTargetShuffleInputs(N0, Demand0, SrcInputs0, SrcMask0, DAG,
5747                                 Depth + 1, true) ||
5748         !getTargetShuffleInputs(N1, Demand1, SrcInputs1, SrcMask1, DAG,
5749                                 Depth + 1, true))
5750       return false;
5751 
5752     size_t MaskSize = std::max(SrcMask0.size(), SrcMask1.size());
5753     SmallVector<int, 64> Mask0, Mask1;
5754     narrowShuffleMaskElts(MaskSize / SrcMask0.size(), SrcMask0, Mask0);
5755     narrowShuffleMaskElts(MaskSize / SrcMask1.size(), SrcMask1, Mask1);
5756     for (int i = 0; i != (int)MaskSize; ++i) {
5757       // NOTE: Don't handle SM_SentinelUndef, as we can end up in infinite
5758       // loops converting between OR and BLEND shuffles due to
5759       // canWidenShuffleElements merging away undef elements, meaning we
5760       // fail to recognise the OR as the undef element isn't known zero.
5761       if (Mask0[i] == SM_SentinelZero && Mask1[i] == SM_SentinelZero)
5762         Mask.push_back(SM_SentinelZero);
5763       else if (Mask1[i] == SM_SentinelZero)
5764         Mask.push_back(i);
5765       else if (Mask0[i] == SM_SentinelZero)
5766         Mask.push_back(i + MaskSize);
5767       else
5768         return false;
5769     }
5770     Ops.push_back(N0);
5771     Ops.push_back(N1);
5772     return true;
5773   }
5774   case ISD::INSERT_SUBVECTOR: {
5775     SDValue Src = N.getOperand(0);
5776     SDValue Sub = N.getOperand(1);
5777     EVT SubVT = Sub.getValueType();
5778     unsigned NumSubElts = SubVT.getVectorNumElements();
5779     if (!N->isOnlyUserOf(Sub.getNode()))
5780       return false;
5781     SDValue SubBC = peekThroughBitcasts(Sub);
5782     uint64_t InsertIdx = N.getConstantOperandVal(2);
5783     // Handle INSERT_SUBVECTOR(SRC0, EXTRACT_SUBVECTOR(SRC1)).
5784     if (SubBC.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
5785         SubBC.getOperand(0).getValueSizeInBits() == NumSizeInBits) {
5786       uint64_t ExtractIdx = SubBC.getConstantOperandVal(1);
5787       SDValue SubBCSrc = SubBC.getOperand(0);
5788       unsigned NumSubSrcBCElts = SubBCSrc.getValueType().getVectorNumElements();
5789       unsigned MaxElts = std::max(NumElts, NumSubSrcBCElts);
5790       assert((MaxElts % NumElts) == 0 && (MaxElts % NumSubSrcBCElts) == 0 &&
5791              "Subvector valuetype mismatch");
5792       InsertIdx *= (MaxElts / NumElts);
5793       ExtractIdx *= (MaxElts / NumSubSrcBCElts);
5794       NumSubElts *= (MaxElts / NumElts);
5795       bool SrcIsUndef = Src.isUndef();
5796       for (int i = 0; i != (int)MaxElts; ++i)
5797         Mask.push_back(SrcIsUndef ? SM_SentinelUndef : i);
5798       for (int i = 0; i != (int)NumSubElts; ++i)
5799         Mask[InsertIdx + i] = (SrcIsUndef ? 0 : MaxElts) + ExtractIdx + i;
5800       if (!SrcIsUndef)
5801         Ops.push_back(Src);
5802       Ops.push_back(SubBCSrc);
5803       return true;
5804     }
5805     // Handle INSERT_SUBVECTOR(SRC0, SHUFFLE(SRC1)).
5806     SmallVector<int, 64> SubMask;
5807     SmallVector<SDValue, 2> SubInputs;
5808     SDValue SubSrc = peekThroughOneUseBitcasts(Sub);
5809     EVT SubSrcVT = SubSrc.getValueType();
5810     if (!SubSrcVT.isVector())
5811       return false;
5812 
5813     APInt SubDemand = APInt::getAllOnes(SubSrcVT.getVectorNumElements());
5814     if (!getTargetShuffleInputs(SubSrc, SubDemand, SubInputs, SubMask, DAG,
5815                                 Depth + 1, ResolveKnownElts))
5816       return false;
5817 
5818     // Subvector shuffle inputs must not be larger than the subvector.
5819     if (llvm::any_of(SubInputs, [SubVT](SDValue SubInput) {
5820           return SubVT.getFixedSizeInBits() <
5821                  SubInput.getValueSizeInBits().getFixedValue();
5822         }))
5823       return false;
5824 
5825     if (SubMask.size() != NumSubElts) {
5826       assert(((SubMask.size() % NumSubElts) == 0 ||
5827               (NumSubElts % SubMask.size()) == 0) && "Illegal submask scale");
5828       if ((NumSubElts % SubMask.size()) == 0) {
5829         int Scale = NumSubElts / SubMask.size();
5830         SmallVector<int,64> ScaledSubMask;
5831         narrowShuffleMaskElts(Scale, SubMask, ScaledSubMask);
5832         SubMask = ScaledSubMask;
5833       } else {
5834         int Scale = SubMask.size() / NumSubElts;
5835         NumSubElts = SubMask.size();
5836         NumElts *= Scale;
5837         InsertIdx *= Scale;
5838       }
5839     }
5840     Ops.push_back(Src);
5841     Ops.append(SubInputs.begin(), SubInputs.end());
5842     if (ISD::isBuildVectorAllZeros(Src.getNode()))
5843       Mask.append(NumElts, SM_SentinelZero);
5844     else
5845       for (int i = 0; i != (int)NumElts; ++i)
5846         Mask.push_back(i);
5847     for (int i = 0; i != (int)NumSubElts; ++i) {
5848       int M = SubMask[i];
5849       if (0 <= M) {
5850         int InputIdx = M / NumSubElts;
5851         M = (NumElts * (1 + InputIdx)) + (M % NumSubElts);
5852       }
5853       Mask[i + InsertIdx] = M;
5854     }
5855     return true;
5856   }
5857   case X86ISD::PINSRB:
5858   case X86ISD::PINSRW:
5859   case ISD::SCALAR_TO_VECTOR:
5860   case ISD::INSERT_VECTOR_ELT: {
5861     // Match against a insert_vector_elt/scalar_to_vector of an extract from a
5862     // vector, for matching src/dst vector types.
5863     SDValue Scl = N.getOperand(Opcode == ISD::SCALAR_TO_VECTOR ? 0 : 1);
5864 
5865     unsigned DstIdx = 0;
5866     if (Opcode != ISD::SCALAR_TO_VECTOR) {
5867       // Check we have an in-range constant insertion index.
5868       if (!isa<ConstantSDNode>(N.getOperand(2)) ||
5869           N.getConstantOperandAPInt(2).uge(NumElts))
5870         return false;
5871       DstIdx = N.getConstantOperandVal(2);
5872 
5873       // Attempt to recognise an INSERT*(VEC, 0, DstIdx) shuffle pattern.
5874       if (X86::isZeroNode(Scl)) {
5875         Ops.push_back(N.getOperand(0));
5876         for (unsigned i = 0; i != NumElts; ++i)
5877           Mask.push_back(i == DstIdx ? SM_SentinelZero : (int)i);
5878         return true;
5879       }
5880     }
5881 
5882     // Peek through trunc/aext/zext.
5883     // TODO: aext shouldn't require SM_SentinelZero padding.
5884     // TODO: handle shift of scalars.
5885     unsigned MinBitsPerElt = Scl.getScalarValueSizeInBits();
5886     while (Scl.getOpcode() == ISD::TRUNCATE ||
5887            Scl.getOpcode() == ISD::ANY_EXTEND ||
5888            Scl.getOpcode() == ISD::ZERO_EXTEND) {
5889       Scl = Scl.getOperand(0);
5890       MinBitsPerElt =
5891           std::min<unsigned>(MinBitsPerElt, Scl.getScalarValueSizeInBits());
5892     }
5893     if ((MinBitsPerElt % 8) != 0)
5894       return false;
5895 
5896     // Attempt to find the source vector the scalar was extracted from.
5897     SDValue SrcExtract;
5898     if ((Scl.getOpcode() == ISD::EXTRACT_VECTOR_ELT ||
5899          Scl.getOpcode() == X86ISD::PEXTRW ||
5900          Scl.getOpcode() == X86ISD::PEXTRB) &&
5901         Scl.getOperand(0).getValueSizeInBits() == NumSizeInBits) {
5902       SrcExtract = Scl;
5903     }
5904     if (!SrcExtract || !isa<ConstantSDNode>(SrcExtract.getOperand(1)))
5905       return false;
5906 
5907     SDValue SrcVec = SrcExtract.getOperand(0);
5908     EVT SrcVT = SrcVec.getValueType();
5909     if (!SrcVT.getScalarType().isByteSized())
5910       return false;
5911     unsigned SrcIdx = SrcExtract.getConstantOperandVal(1);
5912     unsigned SrcByte = SrcIdx * (SrcVT.getScalarSizeInBits() / 8);
5913     unsigned DstByte = DstIdx * NumBytesPerElt;
5914     MinBitsPerElt =
5915         std::min<unsigned>(MinBitsPerElt, SrcVT.getScalarSizeInBits());
5916 
5917     // Create 'identity' byte level shuffle mask and then add inserted bytes.
5918     if (Opcode == ISD::SCALAR_TO_VECTOR) {
5919       Ops.push_back(SrcVec);
5920       Mask.append(NumSizeInBytes, SM_SentinelUndef);
5921     } else {
5922       Ops.push_back(SrcVec);
5923       Ops.push_back(N.getOperand(0));
5924       for (int i = 0; i != (int)NumSizeInBytes; ++i)
5925         Mask.push_back(NumSizeInBytes + i);
5926     }
5927 
5928     unsigned MinBytesPerElts = MinBitsPerElt / 8;
5929     MinBytesPerElts = std::min(MinBytesPerElts, NumBytesPerElt);
5930     for (unsigned i = 0; i != MinBytesPerElts; ++i)
5931       Mask[DstByte + i] = SrcByte + i;
5932     for (unsigned i = MinBytesPerElts; i < NumBytesPerElt; ++i)
5933       Mask[DstByte + i] = SM_SentinelZero;
5934     return true;
5935   }
5936   case X86ISD::PACKSS:
5937   case X86ISD::PACKUS: {
5938     SDValue N0 = N.getOperand(0);
5939     SDValue N1 = N.getOperand(1);
5940     assert(N0.getValueType().getVectorNumElements() == (NumElts / 2) &&
5941            N1.getValueType().getVectorNumElements() == (NumElts / 2) &&
5942            "Unexpected input value type");
5943 
5944     APInt EltsLHS, EltsRHS;
5945     getPackDemandedElts(VT, DemandedElts, EltsLHS, EltsRHS);
5946 
5947     // If we know input saturation won't happen (or we don't care for particular
5948     // lanes), we can treat this as a truncation shuffle.
5949     bool Offset0 = false, Offset1 = false;
5950     if (Opcode == X86ISD::PACKSS) {
5951       if ((!(N0.isUndef() || EltsLHS.isZero()) &&
5952            DAG.ComputeNumSignBits(N0, EltsLHS, Depth + 1) <= NumBitsPerElt) ||
5953           (!(N1.isUndef() || EltsRHS.isZero()) &&
5954            DAG.ComputeNumSignBits(N1, EltsRHS, Depth + 1) <= NumBitsPerElt))
5955         return false;
5956       // We can't easily fold ASHR into a shuffle, but if it was feeding a
5957       // PACKSS then it was likely being used for sign-extension for a
5958       // truncation, so just peek through and adjust the mask accordingly.
5959       if (N0.getOpcode() == X86ISD::VSRAI && N->isOnlyUserOf(N0.getNode()) &&
5960           N0.getConstantOperandAPInt(1) == NumBitsPerElt) {
5961         Offset0 = true;
5962         N0 = N0.getOperand(0);
5963       }
5964       if (N1.getOpcode() == X86ISD::VSRAI && N->isOnlyUserOf(N1.getNode()) &&
5965           N1.getConstantOperandAPInt(1) == NumBitsPerElt) {
5966         Offset1 = true;
5967         N1 = N1.getOperand(0);
5968       }
5969     } else {
5970       APInt ZeroMask = APInt::getHighBitsSet(2 * NumBitsPerElt, NumBitsPerElt);
5971       if ((!(N0.isUndef() || EltsLHS.isZero()) &&
5972            !DAG.MaskedValueIsZero(N0, ZeroMask, EltsLHS, Depth + 1)) ||
5973           (!(N1.isUndef() || EltsRHS.isZero()) &&
5974            !DAG.MaskedValueIsZero(N1, ZeroMask, EltsRHS, Depth + 1)))
5975         return false;
5976     }
5977 
5978     bool IsUnary = (N0 == N1);
5979 
5980     Ops.push_back(N0);
5981     if (!IsUnary)
5982       Ops.push_back(N1);
5983 
5984     createPackShuffleMask(VT, Mask, IsUnary);
5985 
5986     if (Offset0 || Offset1) {
5987       for (int &M : Mask)
5988         if ((Offset0 && isInRange(M, 0, NumElts)) ||
5989             (Offset1 && isInRange(M, NumElts, 2 * NumElts)))
5990           ++M;
5991     }
5992     return true;
5993   }
5994   case ISD::VSELECT:
5995   case X86ISD::BLENDV: {
5996     SDValue Cond = N.getOperand(0);
5997     if (createShuffleMaskFromVSELECT(Mask, Cond, Opcode == X86ISD::BLENDV)) {
5998       Ops.push_back(N.getOperand(1));
5999       Ops.push_back(N.getOperand(2));
6000       return true;
6001     }
6002     return false;
6003   }
6004   case X86ISD::VTRUNC: {
6005     SDValue Src = N.getOperand(0);
6006     EVT SrcVT = Src.getValueType();
6007     // Truncated source must be a simple vector.
6008     if (!SrcVT.isSimple() || (SrcVT.getSizeInBits() % 128) != 0 ||
6009         (SrcVT.getScalarSizeInBits() % 8) != 0)
6010       return false;
6011     unsigned NumSrcElts = SrcVT.getVectorNumElements();
6012     unsigned NumBitsPerSrcElt = SrcVT.getScalarSizeInBits();
6013     unsigned Scale = NumBitsPerSrcElt / NumBitsPerElt;
6014     assert((NumBitsPerSrcElt % NumBitsPerElt) == 0 && "Illegal truncation");
6015     for (unsigned i = 0; i != NumSrcElts; ++i)
6016       Mask.push_back(i * Scale);
6017     Mask.append(NumElts - NumSrcElts, SM_SentinelZero);
6018     Ops.push_back(Src);
6019     return true;
6020   }
6021   case X86ISD::VSHLI:
6022   case X86ISD::VSRLI: {
6023     uint64_t ShiftVal = N.getConstantOperandVal(1);
6024     // Out of range bit shifts are guaranteed to be zero.
6025     if (NumBitsPerElt <= ShiftVal) {
6026       Mask.append(NumElts, SM_SentinelZero);
6027       return true;
6028     }
6029 
6030     // We can only decode 'whole byte' bit shifts as shuffles.
6031     if ((ShiftVal % 8) != 0)
6032       break;
6033 
6034     uint64_t ByteShift = ShiftVal / 8;
6035     Ops.push_back(N.getOperand(0));
6036 
6037     // Clear mask to all zeros and insert the shifted byte indices.
6038     Mask.append(NumSizeInBytes, SM_SentinelZero);
6039 
6040     if (X86ISD::VSHLI == Opcode) {
6041       for (unsigned i = 0; i != NumSizeInBytes; i += NumBytesPerElt)
6042         for (unsigned j = ByteShift; j != NumBytesPerElt; ++j)
6043           Mask[i + j] = i + j - ByteShift;
6044     } else {
6045       for (unsigned i = 0; i != NumSizeInBytes; i += NumBytesPerElt)
6046         for (unsigned j = ByteShift; j != NumBytesPerElt; ++j)
6047           Mask[i + j - ByteShift] = i + j;
6048     }
6049     return true;
6050   }
6051   case X86ISD::VROTLI:
6052   case X86ISD::VROTRI: {
6053     // We can only decode 'whole byte' bit rotates as shuffles.
6054     uint64_t RotateVal = N.getConstantOperandAPInt(1).urem(NumBitsPerElt);
6055     if ((RotateVal % 8) != 0)
6056       return false;
6057     Ops.push_back(N.getOperand(0));
6058     int Offset = RotateVal / 8;
6059     Offset = (X86ISD::VROTLI == Opcode ? NumBytesPerElt - Offset : Offset);
6060     for (int i = 0; i != (int)NumElts; ++i) {
6061       int BaseIdx = i * NumBytesPerElt;
6062       for (int j = 0; j != (int)NumBytesPerElt; ++j) {
6063         Mask.push_back(BaseIdx + ((Offset + j) % NumBytesPerElt));
6064       }
6065     }
6066     return true;
6067   }
6068   case X86ISD::VBROADCAST: {
6069     SDValue Src = N.getOperand(0);
6070     if (!Src.getSimpleValueType().isVector()) {
6071       if (Src.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
6072           !isNullConstant(Src.getOperand(1)) ||
6073           Src.getOperand(0).getValueType().getScalarType() !=
6074               VT.getScalarType())
6075         return false;
6076       Src = Src.getOperand(0);
6077     }
6078     Ops.push_back(Src);
6079     Mask.append(NumElts, 0);
6080     return true;
6081   }
6082   case ISD::SIGN_EXTEND_VECTOR_INREG: {
6083     SDValue Src = N.getOperand(0);
6084     EVT SrcVT = Src.getValueType();
6085     unsigned NumBitsPerSrcElt = SrcVT.getScalarSizeInBits();
6086 
6087     // Extended source must be a simple vector.
6088     if (!SrcVT.isSimple() || (SrcVT.getSizeInBits() % 128) != 0 ||
6089         (NumBitsPerSrcElt % 8) != 0)
6090       return false;
6091 
6092     // We can only handle all-signbits extensions.
6093     APInt DemandedSrcElts =
6094         DemandedElts.zextOrTrunc(SrcVT.getVectorNumElements());
6095     if (DAG.ComputeNumSignBits(Src, DemandedSrcElts) != NumBitsPerSrcElt)
6096       return false;
6097 
6098     assert((NumBitsPerElt % NumBitsPerSrcElt) == 0 && "Unexpected extension");
6099     unsigned Scale = NumBitsPerElt / NumBitsPerSrcElt;
6100     for (unsigned I = 0; I != NumElts; ++I)
6101       Mask.append(Scale, I);
6102     Ops.push_back(Src);
6103     return true;
6104   }
6105   case ISD::ZERO_EXTEND:
6106   case ISD::ANY_EXTEND:
6107   case ISD::ZERO_EXTEND_VECTOR_INREG:
6108   case ISD::ANY_EXTEND_VECTOR_INREG: {
6109     SDValue Src = N.getOperand(0);
6110     EVT SrcVT = Src.getValueType();
6111 
6112     // Extended source must be a simple vector.
6113     if (!SrcVT.isSimple() || (SrcVT.getSizeInBits() % 128) != 0 ||
6114         (SrcVT.getScalarSizeInBits() % 8) != 0)
6115       return false;
6116 
6117     bool IsAnyExtend =
6118         (ISD::ANY_EXTEND == Opcode || ISD::ANY_EXTEND_VECTOR_INREG == Opcode);
6119     DecodeZeroExtendMask(SrcVT.getScalarSizeInBits(), NumBitsPerElt, NumElts,
6120                          IsAnyExtend, Mask);
6121     Ops.push_back(Src);
6122     return true;
6123   }
6124   }
6125 
6126   return false;
6127 }
6128 
6129 /// Removes unused/repeated shuffle source inputs and adjusts the shuffle mask.
resolveTargetShuffleInputsAndMask(SmallVectorImpl<SDValue> & Inputs,SmallVectorImpl<int> & Mask)6130 static void resolveTargetShuffleInputsAndMask(SmallVectorImpl<SDValue> &Inputs,
6131                                               SmallVectorImpl<int> &Mask) {
6132   int MaskWidth = Mask.size();
6133   SmallVector<SDValue, 16> UsedInputs;
6134   for (int i = 0, e = Inputs.size(); i < e; ++i) {
6135     int lo = UsedInputs.size() * MaskWidth;
6136     int hi = lo + MaskWidth;
6137 
6138     // Strip UNDEF input usage.
6139     if (Inputs[i].isUndef())
6140       for (int &M : Mask)
6141         if ((lo <= M) && (M < hi))
6142           M = SM_SentinelUndef;
6143 
6144     // Check for unused inputs.
6145     if (none_of(Mask, [lo, hi](int i) { return (lo <= i) && (i < hi); })) {
6146       for (int &M : Mask)
6147         if (lo <= M)
6148           M -= MaskWidth;
6149       continue;
6150     }
6151 
6152     // Check for repeated inputs.
6153     bool IsRepeat = false;
6154     for (int j = 0, ue = UsedInputs.size(); j != ue; ++j) {
6155       if (UsedInputs[j] != Inputs[i])
6156         continue;
6157       for (int &M : Mask)
6158         if (lo <= M)
6159           M = (M < hi) ? ((M - lo) + (j * MaskWidth)) : (M - MaskWidth);
6160       IsRepeat = true;
6161       break;
6162     }
6163     if (IsRepeat)
6164       continue;
6165 
6166     UsedInputs.push_back(Inputs[i]);
6167   }
6168   Inputs = UsedInputs;
6169 }
6170 
6171 /// Calls getTargetShuffleAndZeroables to resolve a target shuffle mask's inputs
6172 /// and then sets the SM_SentinelUndef and SM_SentinelZero values.
6173 /// Returns true if the target shuffle mask was decoded.
getTargetShuffleInputs(SDValue Op,const APInt & DemandedElts,SmallVectorImpl<SDValue> & Inputs,SmallVectorImpl<int> & Mask,APInt & KnownUndef,APInt & KnownZero,const SelectionDAG & DAG,unsigned Depth,bool ResolveKnownElts)6174 static bool getTargetShuffleInputs(SDValue Op, const APInt &DemandedElts,
6175                                    SmallVectorImpl<SDValue> &Inputs,
6176                                    SmallVectorImpl<int> &Mask,
6177                                    APInt &KnownUndef, APInt &KnownZero,
6178                                    const SelectionDAG &DAG, unsigned Depth,
6179                                    bool ResolveKnownElts) {
6180   if (Depth >= SelectionDAG::MaxRecursionDepth)
6181     return false; // Limit search depth.
6182 
6183   EVT VT = Op.getValueType();
6184   if (!VT.isSimple() || !VT.isVector())
6185     return false;
6186 
6187   if (getTargetShuffleAndZeroables(Op, Mask, Inputs, KnownUndef, KnownZero)) {
6188     if (ResolveKnownElts)
6189       resolveTargetShuffleFromZeroables(Mask, KnownUndef, KnownZero);
6190     return true;
6191   }
6192   if (getFauxShuffleMask(Op, DemandedElts, Mask, Inputs, DAG, Depth,
6193                          ResolveKnownElts)) {
6194     resolveZeroablesFromTargetShuffle(Mask, KnownUndef, KnownZero);
6195     return true;
6196   }
6197   return false;
6198 }
6199 
getTargetShuffleInputs(SDValue Op,const APInt & DemandedElts,SmallVectorImpl<SDValue> & Inputs,SmallVectorImpl<int> & Mask,const SelectionDAG & DAG,unsigned Depth,bool ResolveKnownElts)6200 static bool getTargetShuffleInputs(SDValue Op, const APInt &DemandedElts,
6201                                    SmallVectorImpl<SDValue> &Inputs,
6202                                    SmallVectorImpl<int> &Mask,
6203                                    const SelectionDAG &DAG, unsigned Depth,
6204                                    bool ResolveKnownElts) {
6205   APInt KnownUndef, KnownZero;
6206   return getTargetShuffleInputs(Op, DemandedElts, Inputs, Mask, KnownUndef,
6207                                 KnownZero, DAG, Depth, ResolveKnownElts);
6208 }
6209 
getTargetShuffleInputs(SDValue Op,SmallVectorImpl<SDValue> & Inputs,SmallVectorImpl<int> & Mask,const SelectionDAG & DAG,unsigned Depth=0,bool ResolveKnownElts=true)6210 static bool getTargetShuffleInputs(SDValue Op, SmallVectorImpl<SDValue> &Inputs,
6211                                    SmallVectorImpl<int> &Mask,
6212                                    const SelectionDAG &DAG, unsigned Depth = 0,
6213                                    bool ResolveKnownElts = true) {
6214   EVT VT = Op.getValueType();
6215   if (!VT.isSimple() || !VT.isVector())
6216     return false;
6217 
6218   unsigned NumElts = Op.getValueType().getVectorNumElements();
6219   APInt DemandedElts = APInt::getAllOnes(NumElts);
6220   return getTargetShuffleInputs(Op, DemandedElts, Inputs, Mask, DAG, Depth,
6221                                 ResolveKnownElts);
6222 }
6223 
6224 // Attempt to create a scalar/subvector broadcast from the base MemSDNode.
getBROADCAST_LOAD(unsigned Opcode,const SDLoc & DL,EVT VT,EVT MemVT,MemSDNode * Mem,unsigned Offset,SelectionDAG & DAG)6225 static SDValue getBROADCAST_LOAD(unsigned Opcode, const SDLoc &DL, EVT VT,
6226                                  EVT MemVT, MemSDNode *Mem, unsigned Offset,
6227                                  SelectionDAG &DAG) {
6228   assert((Opcode == X86ISD::VBROADCAST_LOAD ||
6229           Opcode == X86ISD::SUBV_BROADCAST_LOAD) &&
6230          "Unknown broadcast load type");
6231 
6232   // Ensure this is a simple (non-atomic, non-voltile), temporal read memop.
6233   if (!Mem || !Mem->readMem() || !Mem->isSimple() || Mem->isNonTemporal())
6234     return SDValue();
6235 
6236   SDValue Ptr = DAG.getMemBasePlusOffset(Mem->getBasePtr(),
6237                                          TypeSize::getFixed(Offset), DL);
6238   SDVTList Tys = DAG.getVTList(VT, MVT::Other);
6239   SDValue Ops[] = {Mem->getChain(), Ptr};
6240   SDValue BcstLd = DAG.getMemIntrinsicNode(
6241       Opcode, DL, Tys, Ops, MemVT,
6242       DAG.getMachineFunction().getMachineMemOperand(
6243           Mem->getMemOperand(), Offset, MemVT.getStoreSize()));
6244   DAG.makeEquivalentMemoryOrdering(SDValue(Mem, 1), BcstLd.getValue(1));
6245   return BcstLd;
6246 }
6247 
6248 /// Returns the scalar element that will make up the i'th
6249 /// element of the result of the vector shuffle.
getShuffleScalarElt(SDValue Op,unsigned Index,SelectionDAG & DAG,unsigned Depth)6250 static SDValue getShuffleScalarElt(SDValue Op, unsigned Index,
6251                                    SelectionDAG &DAG, unsigned Depth) {
6252   if (Depth >= SelectionDAG::MaxRecursionDepth)
6253     return SDValue(); // Limit search depth.
6254 
6255   EVT VT = Op.getValueType();
6256   unsigned Opcode = Op.getOpcode();
6257   unsigned NumElems = VT.getVectorNumElements();
6258 
6259   // Recurse into ISD::VECTOR_SHUFFLE node to find scalars.
6260   if (auto *SV = dyn_cast<ShuffleVectorSDNode>(Op)) {
6261     int Elt = SV->getMaskElt(Index);
6262 
6263     if (Elt < 0)
6264       return DAG.getUNDEF(VT.getVectorElementType());
6265 
6266     SDValue Src = (Elt < (int)NumElems) ? SV->getOperand(0) : SV->getOperand(1);
6267     return getShuffleScalarElt(Src, Elt % NumElems, DAG, Depth + 1);
6268   }
6269 
6270   // Recurse into target specific vector shuffles to find scalars.
6271   if (isTargetShuffle(Opcode)) {
6272     MVT ShufVT = VT.getSimpleVT();
6273     MVT ShufSVT = ShufVT.getVectorElementType();
6274     int NumElems = (int)ShufVT.getVectorNumElements();
6275     SmallVector<int, 16> ShuffleMask;
6276     SmallVector<SDValue, 16> ShuffleOps;
6277     if (!getTargetShuffleMask(Op.getNode(), ShufVT, true, ShuffleOps,
6278                               ShuffleMask))
6279       return SDValue();
6280 
6281     int Elt = ShuffleMask[Index];
6282     if (Elt == SM_SentinelZero)
6283       return ShufSVT.isInteger() ? DAG.getConstant(0, SDLoc(Op), ShufSVT)
6284                                  : DAG.getConstantFP(+0.0, SDLoc(Op), ShufSVT);
6285     if (Elt == SM_SentinelUndef)
6286       return DAG.getUNDEF(ShufSVT);
6287 
6288     assert(0 <= Elt && Elt < (2 * NumElems) && "Shuffle index out of range");
6289     SDValue Src = (Elt < NumElems) ? ShuffleOps[0] : ShuffleOps[1];
6290     return getShuffleScalarElt(Src, Elt % NumElems, DAG, Depth + 1);
6291   }
6292 
6293   // Recurse into insert_subvector base/sub vector to find scalars.
6294   if (Opcode == ISD::INSERT_SUBVECTOR) {
6295     SDValue Vec = Op.getOperand(0);
6296     SDValue Sub = Op.getOperand(1);
6297     uint64_t SubIdx = Op.getConstantOperandVal(2);
6298     unsigned NumSubElts = Sub.getValueType().getVectorNumElements();
6299 
6300     if (SubIdx <= Index && Index < (SubIdx + NumSubElts))
6301       return getShuffleScalarElt(Sub, Index - SubIdx, DAG, Depth + 1);
6302     return getShuffleScalarElt(Vec, Index, DAG, Depth + 1);
6303   }
6304 
6305   // Recurse into concat_vectors sub vector to find scalars.
6306   if (Opcode == ISD::CONCAT_VECTORS) {
6307     EVT SubVT = Op.getOperand(0).getValueType();
6308     unsigned NumSubElts = SubVT.getVectorNumElements();
6309     uint64_t SubIdx = Index / NumSubElts;
6310     uint64_t SubElt = Index % NumSubElts;
6311     return getShuffleScalarElt(Op.getOperand(SubIdx), SubElt, DAG, Depth + 1);
6312   }
6313 
6314   // Recurse into extract_subvector src vector to find scalars.
6315   if (Opcode == ISD::EXTRACT_SUBVECTOR) {
6316     SDValue Src = Op.getOperand(0);
6317     uint64_t SrcIdx = Op.getConstantOperandVal(1);
6318     return getShuffleScalarElt(Src, Index + SrcIdx, DAG, Depth + 1);
6319   }
6320 
6321   // We only peek through bitcasts of the same vector width.
6322   if (Opcode == ISD::BITCAST) {
6323     SDValue Src = Op.getOperand(0);
6324     EVT SrcVT = Src.getValueType();
6325     if (SrcVT.isVector() && SrcVT.getVectorNumElements() == NumElems)
6326       return getShuffleScalarElt(Src, Index, DAG, Depth + 1);
6327     return SDValue();
6328   }
6329 
6330   // Actual nodes that may contain scalar elements
6331 
6332   // For insert_vector_elt - either return the index matching scalar or recurse
6333   // into the base vector.
6334   if (Opcode == ISD::INSERT_VECTOR_ELT &&
6335       isa<ConstantSDNode>(Op.getOperand(2))) {
6336     if (Op.getConstantOperandAPInt(2) == Index)
6337       return Op.getOperand(1);
6338     return getShuffleScalarElt(Op.getOperand(0), Index, DAG, Depth + 1);
6339   }
6340 
6341   if (Opcode == ISD::SCALAR_TO_VECTOR)
6342     return (Index == 0) ? Op.getOperand(0)
6343                         : DAG.getUNDEF(VT.getVectorElementType());
6344 
6345   if (Opcode == ISD::BUILD_VECTOR)
6346     return Op.getOperand(Index);
6347 
6348   return SDValue();
6349 }
6350 
6351 // Use PINSRB/PINSRW/PINSRD to create a build vector.
LowerBuildVectorAsInsert(SDValue Op,const APInt & NonZeroMask,unsigned NumNonZero,unsigned NumZero,SelectionDAG & DAG,const X86Subtarget & Subtarget)6352 static SDValue LowerBuildVectorAsInsert(SDValue Op, const APInt &NonZeroMask,
6353                                         unsigned NumNonZero, unsigned NumZero,
6354                                         SelectionDAG &DAG,
6355                                         const X86Subtarget &Subtarget) {
6356   MVT VT = Op.getSimpleValueType();
6357   unsigned NumElts = VT.getVectorNumElements();
6358   assert(((VT == MVT::v8i16 && Subtarget.hasSSE2()) ||
6359           ((VT == MVT::v16i8 || VT == MVT::v4i32) && Subtarget.hasSSE41())) &&
6360          "Illegal vector insertion");
6361 
6362   SDLoc dl(Op);
6363   SDValue V;
6364   bool First = true;
6365 
6366   for (unsigned i = 0; i < NumElts; ++i) {
6367     bool IsNonZero = NonZeroMask[i];
6368     if (!IsNonZero)
6369       continue;
6370 
6371     // If the build vector contains zeros or our first insertion is not the
6372     // first index then insert into zero vector to break any register
6373     // dependency else use SCALAR_TO_VECTOR.
6374     if (First) {
6375       First = false;
6376       if (NumZero || 0 != i)
6377         V = getZeroVector(VT, Subtarget, DAG, dl);
6378       else {
6379         assert(0 == i && "Expected insertion into zero-index");
6380         V = DAG.getAnyExtOrTrunc(Op.getOperand(i), dl, MVT::i32);
6381         V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, V);
6382         V = DAG.getBitcast(VT, V);
6383         continue;
6384       }
6385     }
6386     V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, V, Op.getOperand(i),
6387                     DAG.getIntPtrConstant(i, dl));
6388   }
6389 
6390   return V;
6391 }
6392 
6393 /// Custom lower build_vector of v16i8.
LowerBuildVectorv16i8(SDValue Op,const APInt & NonZeroMask,unsigned NumNonZero,unsigned NumZero,SelectionDAG & DAG,const X86Subtarget & Subtarget)6394 static SDValue LowerBuildVectorv16i8(SDValue Op, const APInt &NonZeroMask,
6395                                      unsigned NumNonZero, unsigned NumZero,
6396                                      SelectionDAG &DAG,
6397                                      const X86Subtarget &Subtarget) {
6398   if (NumNonZero > 8 && !Subtarget.hasSSE41())
6399     return SDValue();
6400 
6401   // SSE4.1 - use PINSRB to insert each byte directly.
6402   if (Subtarget.hasSSE41())
6403     return LowerBuildVectorAsInsert(Op, NonZeroMask, NumNonZero, NumZero, DAG,
6404                                     Subtarget);
6405 
6406   SDLoc dl(Op);
6407   SDValue V;
6408 
6409   // Pre-SSE4.1 - merge byte pairs and insert with PINSRW.
6410   // If both the lowest 16-bits are non-zero, then convert to MOVD.
6411   if (!NonZeroMask.extractBits(2, 0).isZero() &&
6412       !NonZeroMask.extractBits(2, 2).isZero()) {
6413     for (unsigned I = 0; I != 4; ++I) {
6414       if (!NonZeroMask[I])
6415         continue;
6416       SDValue Elt = DAG.getZExtOrTrunc(Op.getOperand(I), dl, MVT::i32);
6417       if (I != 0)
6418         Elt = DAG.getNode(ISD::SHL, dl, MVT::i32, Elt,
6419                           DAG.getConstant(I * 8, dl, MVT::i8));
6420       V = V ? DAG.getNode(ISD::OR, dl, MVT::i32, V, Elt) : Elt;
6421     }
6422     assert(V && "Failed to fold v16i8 vector to zero");
6423     V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, V);
6424     V = DAG.getNode(X86ISD::VZEXT_MOVL, dl, MVT::v4i32, V);
6425     V = DAG.getBitcast(MVT::v8i16, V);
6426   }
6427   for (unsigned i = V ? 4 : 0; i < 16; i += 2) {
6428     bool ThisIsNonZero = NonZeroMask[i];
6429     bool NextIsNonZero = NonZeroMask[i + 1];
6430     if (!ThisIsNonZero && !NextIsNonZero)
6431       continue;
6432 
6433     SDValue Elt;
6434     if (ThisIsNonZero) {
6435       if (NumZero || NextIsNonZero)
6436         Elt = DAG.getZExtOrTrunc(Op.getOperand(i), dl, MVT::i32);
6437       else
6438         Elt = DAG.getAnyExtOrTrunc(Op.getOperand(i), dl, MVT::i32);
6439     }
6440 
6441     if (NextIsNonZero) {
6442       SDValue NextElt = Op.getOperand(i + 1);
6443       if (i == 0 && NumZero)
6444         NextElt = DAG.getZExtOrTrunc(NextElt, dl, MVT::i32);
6445       else
6446         NextElt = DAG.getAnyExtOrTrunc(NextElt, dl, MVT::i32);
6447       NextElt = DAG.getNode(ISD::SHL, dl, MVT::i32, NextElt,
6448                             DAG.getConstant(8, dl, MVT::i8));
6449       if (ThisIsNonZero)
6450         Elt = DAG.getNode(ISD::OR, dl, MVT::i32, NextElt, Elt);
6451       else
6452         Elt = NextElt;
6453     }
6454 
6455     // If our first insertion is not the first index or zeros are needed, then
6456     // insert into zero vector. Otherwise, use SCALAR_TO_VECTOR (leaves high
6457     // elements undefined).
6458     if (!V) {
6459       if (i != 0 || NumZero)
6460         V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl);
6461       else {
6462         V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Elt);
6463         V = DAG.getBitcast(MVT::v8i16, V);
6464         continue;
6465       }
6466     }
6467     Elt = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, Elt);
6468     V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, V, Elt,
6469                     DAG.getIntPtrConstant(i / 2, dl));
6470   }
6471 
6472   return DAG.getBitcast(MVT::v16i8, V);
6473 }
6474 
6475 /// Custom lower build_vector of v8i16.
LowerBuildVectorv8i16(SDValue Op,const APInt & NonZeroMask,unsigned NumNonZero,unsigned NumZero,SelectionDAG & DAG,const X86Subtarget & Subtarget)6476 static SDValue LowerBuildVectorv8i16(SDValue Op, const APInt &NonZeroMask,
6477                                      unsigned NumNonZero, unsigned NumZero,
6478                                      SelectionDAG &DAG,
6479                                      const X86Subtarget &Subtarget) {
6480   if (NumNonZero > 4 && !Subtarget.hasSSE41())
6481     return SDValue();
6482 
6483   // Use PINSRW to insert each byte directly.
6484   return LowerBuildVectorAsInsert(Op, NonZeroMask, NumNonZero, NumZero, DAG,
6485                                   Subtarget);
6486 }
6487 
6488 /// Custom lower build_vector of v4i32 or v4f32.
LowerBuildVectorv4x32(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)6489 static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG,
6490                                      const X86Subtarget &Subtarget) {
6491   // If this is a splat of a pair of elements, use MOVDDUP (unless the target
6492   // has XOP; in that case defer lowering to potentially use VPERMIL2PS).
6493   // Because we're creating a less complicated build vector here, we may enable
6494   // further folding of the MOVDDUP via shuffle transforms.
6495   if (Subtarget.hasSSE3() && !Subtarget.hasXOP() &&
6496       Op.getOperand(0) == Op.getOperand(2) &&
6497       Op.getOperand(1) == Op.getOperand(3) &&
6498       Op.getOperand(0) != Op.getOperand(1)) {
6499     SDLoc DL(Op);
6500     MVT VT = Op.getSimpleValueType();
6501     MVT EltVT = VT.getVectorElementType();
6502     // Create a new build vector with the first 2 elements followed by undef
6503     // padding, bitcast to v2f64, duplicate, and bitcast back.
6504     SDValue Ops[4] = { Op.getOperand(0), Op.getOperand(1),
6505                        DAG.getUNDEF(EltVT), DAG.getUNDEF(EltVT) };
6506     SDValue NewBV = DAG.getBitcast(MVT::v2f64, DAG.getBuildVector(VT, DL, Ops));
6507     SDValue Dup = DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v2f64, NewBV);
6508     return DAG.getBitcast(VT, Dup);
6509   }
6510 
6511   // Find all zeroable elements.
6512   std::bitset<4> Zeroable, Undefs;
6513   for (int i = 0; i < 4; ++i) {
6514     SDValue Elt = Op.getOperand(i);
6515     Undefs[i] = Elt.isUndef();
6516     Zeroable[i] = (Elt.isUndef() || X86::isZeroNode(Elt));
6517   }
6518   assert(Zeroable.size() - Zeroable.count() > 1 &&
6519          "We expect at least two non-zero elements!");
6520 
6521   // We only know how to deal with build_vector nodes where elements are either
6522   // zeroable or extract_vector_elt with constant index.
6523   SDValue FirstNonZero;
6524   unsigned FirstNonZeroIdx;
6525   for (unsigned i = 0; i < 4; ++i) {
6526     if (Zeroable[i])
6527       continue;
6528     SDValue Elt = Op.getOperand(i);
6529     if (Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
6530         !isa<ConstantSDNode>(Elt.getOperand(1)))
6531       return SDValue();
6532     // Make sure that this node is extracting from a 128-bit vector.
6533     MVT VT = Elt.getOperand(0).getSimpleValueType();
6534     if (!VT.is128BitVector())
6535       return SDValue();
6536     if (!FirstNonZero.getNode()) {
6537       FirstNonZero = Elt;
6538       FirstNonZeroIdx = i;
6539     }
6540   }
6541 
6542   assert(FirstNonZero.getNode() && "Unexpected build vector of all zeros!");
6543   SDValue V1 = FirstNonZero.getOperand(0);
6544   MVT VT = V1.getSimpleValueType();
6545 
6546   // See if this build_vector can be lowered as a blend with zero.
6547   SDValue Elt;
6548   unsigned EltMaskIdx, EltIdx;
6549   int Mask[4];
6550   for (EltIdx = 0; EltIdx < 4; ++EltIdx) {
6551     if (Zeroable[EltIdx]) {
6552       // The zero vector will be on the right hand side.
6553       Mask[EltIdx] = EltIdx+4;
6554       continue;
6555     }
6556 
6557     Elt = Op->getOperand(EltIdx);
6558     // By construction, Elt is a EXTRACT_VECTOR_ELT with constant index.
6559     EltMaskIdx = Elt.getConstantOperandVal(1);
6560     if (Elt.getOperand(0) != V1 || EltMaskIdx != EltIdx)
6561       break;
6562     Mask[EltIdx] = EltIdx;
6563   }
6564 
6565   if (EltIdx == 4) {
6566     // Let the shuffle legalizer deal with blend operations.
6567     SDValue VZeroOrUndef = (Zeroable == Undefs)
6568                                ? DAG.getUNDEF(VT)
6569                                : getZeroVector(VT, Subtarget, DAG, SDLoc(Op));
6570     if (V1.getSimpleValueType() != VT)
6571       V1 = DAG.getBitcast(VT, V1);
6572     return DAG.getVectorShuffle(VT, SDLoc(V1), V1, VZeroOrUndef, Mask);
6573   }
6574 
6575   // See if we can lower this build_vector to a INSERTPS.
6576   if (!Subtarget.hasSSE41())
6577     return SDValue();
6578 
6579   SDValue V2 = Elt.getOperand(0);
6580   if (Elt == FirstNonZero && EltIdx == FirstNonZeroIdx)
6581     V1 = SDValue();
6582 
6583   bool CanFold = true;
6584   for (unsigned i = EltIdx + 1; i < 4 && CanFold; ++i) {
6585     if (Zeroable[i])
6586       continue;
6587 
6588     SDValue Current = Op->getOperand(i);
6589     SDValue SrcVector = Current->getOperand(0);
6590     if (!V1.getNode())
6591       V1 = SrcVector;
6592     CanFold = (SrcVector == V1) && (Current.getConstantOperandAPInt(1) == i);
6593   }
6594 
6595   if (!CanFold)
6596     return SDValue();
6597 
6598   assert(V1.getNode() && "Expected at least two non-zero elements!");
6599   if (V1.getSimpleValueType() != MVT::v4f32)
6600     V1 = DAG.getBitcast(MVT::v4f32, V1);
6601   if (V2.getSimpleValueType() != MVT::v4f32)
6602     V2 = DAG.getBitcast(MVT::v4f32, V2);
6603 
6604   // Ok, we can emit an INSERTPS instruction.
6605   unsigned ZMask = Zeroable.to_ulong();
6606 
6607   unsigned InsertPSMask = EltMaskIdx << 6 | EltIdx << 4 | ZMask;
6608   assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
6609   SDLoc DL(Op);
6610   SDValue Result = DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
6611                                DAG.getIntPtrConstant(InsertPSMask, DL, true));
6612   return DAG.getBitcast(VT, Result);
6613 }
6614 
6615 /// Return a vector logical shift node.
getVShift(bool isLeft,EVT VT,SDValue SrcOp,unsigned NumBits,SelectionDAG & DAG,const TargetLowering & TLI,const SDLoc & dl)6616 static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp, unsigned NumBits,
6617                          SelectionDAG &DAG, const TargetLowering &TLI,
6618                          const SDLoc &dl) {
6619   assert(VT.is128BitVector() && "Unknown type for VShift");
6620   MVT ShVT = MVT::v16i8;
6621   unsigned Opc = isLeft ? X86ISD::VSHLDQ : X86ISD::VSRLDQ;
6622   SrcOp = DAG.getBitcast(ShVT, SrcOp);
6623   assert(NumBits % 8 == 0 && "Only support byte sized shifts");
6624   SDValue ShiftVal = DAG.getTargetConstant(NumBits / 8, dl, MVT::i8);
6625   return DAG.getBitcast(VT, DAG.getNode(Opc, dl, ShVT, SrcOp, ShiftVal));
6626 }
6627 
LowerAsSplatVectorLoad(SDValue SrcOp,MVT VT,const SDLoc & dl,SelectionDAG & DAG)6628 static SDValue LowerAsSplatVectorLoad(SDValue SrcOp, MVT VT, const SDLoc &dl,
6629                                       SelectionDAG &DAG) {
6630 
6631   // Check if the scalar load can be widened into a vector load. And if
6632   // the address is "base + cst" see if the cst can be "absorbed" into
6633   // the shuffle mask.
6634   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(SrcOp)) {
6635     SDValue Ptr = LD->getBasePtr();
6636     if (!ISD::isNormalLoad(LD) || !LD->isSimple())
6637       return SDValue();
6638     EVT PVT = LD->getValueType(0);
6639     if (PVT != MVT::i32 && PVT != MVT::f32)
6640       return SDValue();
6641 
6642     int FI = -1;
6643     int64_t Offset = 0;
6644     if (FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr)) {
6645       FI = FINode->getIndex();
6646       Offset = 0;
6647     } else if (DAG.isBaseWithConstantOffset(Ptr) &&
6648                isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
6649       FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
6650       Offset = Ptr.getConstantOperandVal(1);
6651       Ptr = Ptr.getOperand(0);
6652     } else {
6653       return SDValue();
6654     }
6655 
6656     // FIXME: 256-bit vector instructions don't require a strict alignment,
6657     // improve this code to support it better.
6658     Align RequiredAlign(VT.getSizeInBits() / 8);
6659     SDValue Chain = LD->getChain();
6660     // Make sure the stack object alignment is at least 16 or 32.
6661     MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
6662     MaybeAlign InferredAlign = DAG.InferPtrAlign(Ptr);
6663     if (!InferredAlign || *InferredAlign < RequiredAlign) {
6664       if (MFI.isFixedObjectIndex(FI)) {
6665         // Can't change the alignment. FIXME: It's possible to compute
6666         // the exact stack offset and reference FI + adjust offset instead.
6667         // If someone *really* cares about this. That's the way to implement it.
6668         return SDValue();
6669       } else {
6670         MFI.setObjectAlignment(FI, RequiredAlign);
6671       }
6672     }
6673 
6674     // (Offset % 16 or 32) must be multiple of 4. Then address is then
6675     // Ptr + (Offset & ~15).
6676     if (Offset < 0)
6677       return SDValue();
6678     if ((Offset % RequiredAlign.value()) & 3)
6679       return SDValue();
6680     int64_t StartOffset = Offset & ~int64_t(RequiredAlign.value() - 1);
6681     if (StartOffset) {
6682       SDLoc DL(Ptr);
6683       Ptr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr,
6684                         DAG.getConstant(StartOffset, DL, Ptr.getValueType()));
6685     }
6686 
6687     int EltNo = (Offset - StartOffset) >> 2;
6688     unsigned NumElems = VT.getVectorNumElements();
6689 
6690     EVT NVT = EVT::getVectorVT(*DAG.getContext(), PVT, NumElems);
6691     SDValue V1 = DAG.getLoad(NVT, dl, Chain, Ptr,
6692                              LD->getPointerInfo().getWithOffset(StartOffset));
6693 
6694     SmallVector<int, 8> Mask(NumElems, EltNo);
6695 
6696     return DAG.getVectorShuffle(NVT, dl, V1, DAG.getUNDEF(NVT), Mask);
6697   }
6698 
6699   return SDValue();
6700 }
6701 
6702 // Recurse to find a LoadSDNode source and the accumulated ByteOffest.
findEltLoadSrc(SDValue Elt,LoadSDNode * & Ld,int64_t & ByteOffset)6703 static bool findEltLoadSrc(SDValue Elt, LoadSDNode *&Ld, int64_t &ByteOffset) {
6704   if (ISD::isNON_EXTLoad(Elt.getNode())) {
6705     auto *BaseLd = cast<LoadSDNode>(Elt);
6706     if (!BaseLd->isSimple())
6707       return false;
6708     Ld = BaseLd;
6709     ByteOffset = 0;
6710     return true;
6711   }
6712 
6713   switch (Elt.getOpcode()) {
6714   case ISD::BITCAST:
6715   case ISD::TRUNCATE:
6716   case ISD::SCALAR_TO_VECTOR:
6717     return findEltLoadSrc(Elt.getOperand(0), Ld, ByteOffset);
6718   case ISD::SRL:
6719     if (auto *AmtC = dyn_cast<ConstantSDNode>(Elt.getOperand(1))) {
6720       uint64_t Amt = AmtC->getZExtValue();
6721       if ((Amt % 8) == 0 && findEltLoadSrc(Elt.getOperand(0), Ld, ByteOffset)) {
6722         ByteOffset += Amt / 8;
6723         return true;
6724       }
6725     }
6726     break;
6727   case ISD::EXTRACT_VECTOR_ELT:
6728     if (auto *IdxC = dyn_cast<ConstantSDNode>(Elt.getOperand(1))) {
6729       SDValue Src = Elt.getOperand(0);
6730       unsigned SrcSizeInBits = Src.getScalarValueSizeInBits();
6731       unsigned DstSizeInBits = Elt.getScalarValueSizeInBits();
6732       if (DstSizeInBits == SrcSizeInBits && (SrcSizeInBits % 8) == 0 &&
6733           findEltLoadSrc(Src, Ld, ByteOffset)) {
6734         uint64_t Idx = IdxC->getZExtValue();
6735         ByteOffset += Idx * (SrcSizeInBits / 8);
6736         return true;
6737       }
6738     }
6739     break;
6740   }
6741 
6742   return false;
6743 }
6744 
6745 /// Given the initializing elements 'Elts' of a vector of type 'VT', see if the
6746 /// elements can be replaced by a single large load which has the same value as
6747 /// a build_vector or insert_subvector whose loaded operands are 'Elts'.
6748 ///
6749 /// Example: <load i32 *a, load i32 *a+4, zero, undef> -> zextload a
EltsFromConsecutiveLoads(EVT VT,ArrayRef<SDValue> Elts,const SDLoc & DL,SelectionDAG & DAG,const X86Subtarget & Subtarget,bool IsAfterLegalize)6750 static SDValue EltsFromConsecutiveLoads(EVT VT, ArrayRef<SDValue> Elts,
6751                                         const SDLoc &DL, SelectionDAG &DAG,
6752                                         const X86Subtarget &Subtarget,
6753                                         bool IsAfterLegalize) {
6754   if ((VT.getScalarSizeInBits() % 8) != 0)
6755     return SDValue();
6756 
6757   unsigned NumElems = Elts.size();
6758 
6759   int LastLoadedElt = -1;
6760   APInt LoadMask = APInt::getZero(NumElems);
6761   APInt ZeroMask = APInt::getZero(NumElems);
6762   APInt UndefMask = APInt::getZero(NumElems);
6763 
6764   SmallVector<LoadSDNode*, 8> Loads(NumElems, nullptr);
6765   SmallVector<int64_t, 8> ByteOffsets(NumElems, 0);
6766 
6767   // For each element in the initializer, see if we've found a load, zero or an
6768   // undef.
6769   for (unsigned i = 0; i < NumElems; ++i) {
6770     SDValue Elt = peekThroughBitcasts(Elts[i]);
6771     if (!Elt.getNode())
6772       return SDValue();
6773     if (Elt.isUndef()) {
6774       UndefMask.setBit(i);
6775       continue;
6776     }
6777     if (X86::isZeroNode(Elt) || ISD::isBuildVectorAllZeros(Elt.getNode())) {
6778       ZeroMask.setBit(i);
6779       continue;
6780     }
6781 
6782     // Each loaded element must be the correct fractional portion of the
6783     // requested vector load.
6784     unsigned EltSizeInBits = Elt.getValueSizeInBits();
6785     if ((NumElems * EltSizeInBits) != VT.getSizeInBits())
6786       return SDValue();
6787 
6788     if (!findEltLoadSrc(Elt, Loads[i], ByteOffsets[i]) || ByteOffsets[i] < 0)
6789       return SDValue();
6790     unsigned LoadSizeInBits = Loads[i]->getValueSizeInBits(0);
6791     if (((ByteOffsets[i] * 8) + EltSizeInBits) > LoadSizeInBits)
6792       return SDValue();
6793 
6794     LoadMask.setBit(i);
6795     LastLoadedElt = i;
6796   }
6797   assert((ZeroMask.popcount() + UndefMask.popcount() + LoadMask.popcount()) ==
6798              NumElems &&
6799          "Incomplete element masks");
6800 
6801   // Handle Special Cases - all undef or undef/zero.
6802   if (UndefMask.popcount() == NumElems)
6803     return DAG.getUNDEF(VT);
6804   if ((ZeroMask.popcount() + UndefMask.popcount()) == NumElems)
6805     return VT.isInteger() ? DAG.getConstant(0, DL, VT)
6806                           : DAG.getConstantFP(0.0, DL, VT);
6807 
6808   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6809   int FirstLoadedElt = LoadMask.countr_zero();
6810   SDValue EltBase = peekThroughBitcasts(Elts[FirstLoadedElt]);
6811   EVT EltBaseVT = EltBase.getValueType();
6812   assert(EltBaseVT.getSizeInBits() == EltBaseVT.getStoreSizeInBits() &&
6813          "Register/Memory size mismatch");
6814   LoadSDNode *LDBase = Loads[FirstLoadedElt];
6815   assert(LDBase && "Did not find base load for merging consecutive loads");
6816   unsigned BaseSizeInBits = EltBaseVT.getStoreSizeInBits();
6817   unsigned BaseSizeInBytes = BaseSizeInBits / 8;
6818   int NumLoadedElts = (1 + LastLoadedElt - FirstLoadedElt);
6819   int LoadSizeInBits = NumLoadedElts * BaseSizeInBits;
6820   assert((BaseSizeInBits % 8) == 0 && "Sub-byte element loads detected");
6821 
6822   // TODO: Support offsetting the base load.
6823   if (ByteOffsets[FirstLoadedElt] != 0)
6824     return SDValue();
6825 
6826   // Check to see if the element's load is consecutive to the base load
6827   // or offset from a previous (already checked) load.
6828   auto CheckConsecutiveLoad = [&](LoadSDNode *Base, int EltIdx) {
6829     LoadSDNode *Ld = Loads[EltIdx];
6830     int64_t ByteOffset = ByteOffsets[EltIdx];
6831     if (ByteOffset && (ByteOffset % BaseSizeInBytes) == 0) {
6832       int64_t BaseIdx = EltIdx - (ByteOffset / BaseSizeInBytes);
6833       return (0 <= BaseIdx && BaseIdx < (int)NumElems && LoadMask[BaseIdx] &&
6834               Loads[BaseIdx] == Ld && ByteOffsets[BaseIdx] == 0);
6835     }
6836     return DAG.areNonVolatileConsecutiveLoads(Ld, Base, BaseSizeInBytes,
6837                                               EltIdx - FirstLoadedElt);
6838   };
6839 
6840   // Consecutive loads can contain UNDEFS but not ZERO elements.
6841   // Consecutive loads with UNDEFs and ZEROs elements require a
6842   // an additional shuffle stage to clear the ZERO elements.
6843   bool IsConsecutiveLoad = true;
6844   bool IsConsecutiveLoadWithZeros = true;
6845   for (int i = FirstLoadedElt + 1; i <= LastLoadedElt; ++i) {
6846     if (LoadMask[i]) {
6847       if (!CheckConsecutiveLoad(LDBase, i)) {
6848         IsConsecutiveLoad = false;
6849         IsConsecutiveLoadWithZeros = false;
6850         break;
6851       }
6852     } else if (ZeroMask[i]) {
6853       IsConsecutiveLoad = false;
6854     }
6855   }
6856 
6857   auto CreateLoad = [&DAG, &DL, &Loads](EVT VT, LoadSDNode *LDBase) {
6858     auto MMOFlags = LDBase->getMemOperand()->getFlags();
6859     assert(LDBase->isSimple() &&
6860            "Cannot merge volatile or atomic loads.");
6861     SDValue NewLd =
6862         DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(),
6863                     LDBase->getPointerInfo(), LDBase->getOriginalAlign(),
6864                     MMOFlags);
6865     for (auto *LD : Loads)
6866       if (LD)
6867         DAG.makeEquivalentMemoryOrdering(LD, NewLd);
6868     return NewLd;
6869   };
6870 
6871   // Check if the base load is entirely dereferenceable.
6872   bool IsDereferenceable = LDBase->getPointerInfo().isDereferenceable(
6873       VT.getSizeInBits() / 8, *DAG.getContext(), DAG.getDataLayout());
6874 
6875   // LOAD - all consecutive load/undefs (must start/end with a load or be
6876   // entirely dereferenceable). If we have found an entire vector of loads and
6877   // undefs, then return a large load of the entire vector width starting at the
6878   // base pointer. If the vector contains zeros, then attempt to shuffle those
6879   // elements.
6880   if (FirstLoadedElt == 0 &&
6881       (NumLoadedElts == (int)NumElems || IsDereferenceable) &&
6882       (IsConsecutiveLoad || IsConsecutiveLoadWithZeros)) {
6883     if (IsAfterLegalize && !TLI.isOperationLegal(ISD::LOAD, VT))
6884       return SDValue();
6885 
6886     // Don't create 256-bit non-temporal aligned loads without AVX2 as these
6887     // will lower to regular temporal loads and use the cache.
6888     if (LDBase->isNonTemporal() && LDBase->getAlign() >= Align(32) &&
6889         VT.is256BitVector() && !Subtarget.hasInt256())
6890       return SDValue();
6891 
6892     if (NumElems == 1)
6893       return DAG.getBitcast(VT, Elts[FirstLoadedElt]);
6894 
6895     if (!ZeroMask)
6896       return CreateLoad(VT, LDBase);
6897 
6898     // IsConsecutiveLoadWithZeros - we need to create a shuffle of the loaded
6899     // vector and a zero vector to clear out the zero elements.
6900     if (!IsAfterLegalize && VT.isVector()) {
6901       unsigned NumMaskElts = VT.getVectorNumElements();
6902       if ((NumMaskElts % NumElems) == 0) {
6903         unsigned Scale = NumMaskElts / NumElems;
6904         SmallVector<int, 4> ClearMask(NumMaskElts, -1);
6905         for (unsigned i = 0; i < NumElems; ++i) {
6906           if (UndefMask[i])
6907             continue;
6908           int Offset = ZeroMask[i] ? NumMaskElts : 0;
6909           for (unsigned j = 0; j != Scale; ++j)
6910             ClearMask[(i * Scale) + j] = (i * Scale) + j + Offset;
6911         }
6912         SDValue V = CreateLoad(VT, LDBase);
6913         SDValue Z = VT.isInteger() ? DAG.getConstant(0, DL, VT)
6914                                    : DAG.getConstantFP(0.0, DL, VT);
6915         return DAG.getVectorShuffle(VT, DL, V, Z, ClearMask);
6916       }
6917     }
6918   }
6919 
6920   // If the upper half of a ymm/zmm load is undef then just load the lower half.
6921   if (VT.is256BitVector() || VT.is512BitVector()) {
6922     unsigned HalfNumElems = NumElems / 2;
6923     if (UndefMask.extractBits(HalfNumElems, HalfNumElems).isAllOnes()) {
6924       EVT HalfVT =
6925           EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(), HalfNumElems);
6926       SDValue HalfLD =
6927           EltsFromConsecutiveLoads(HalfVT, Elts.drop_back(HalfNumElems), DL,
6928                                    DAG, Subtarget, IsAfterLegalize);
6929       if (HalfLD)
6930         return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT),
6931                            HalfLD, DAG.getIntPtrConstant(0, DL));
6932     }
6933   }
6934 
6935   // VZEXT_LOAD - consecutive 32/64-bit load/undefs followed by zeros/undefs.
6936   if (IsConsecutiveLoad && FirstLoadedElt == 0 &&
6937       ((LoadSizeInBits == 16 && Subtarget.hasFP16()) || LoadSizeInBits == 32 ||
6938        LoadSizeInBits == 64) &&
6939       ((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()))) {
6940     MVT VecSVT = VT.isFloatingPoint() ? MVT::getFloatingPointVT(LoadSizeInBits)
6941                                       : MVT::getIntegerVT(LoadSizeInBits);
6942     MVT VecVT = MVT::getVectorVT(VecSVT, VT.getSizeInBits() / LoadSizeInBits);
6943     // Allow v4f32 on SSE1 only targets.
6944     // FIXME: Add more isel patterns so we can just use VT directly.
6945     if (!Subtarget.hasSSE2() && VT == MVT::v4f32)
6946       VecVT = MVT::v4f32;
6947     if (TLI.isTypeLegal(VecVT)) {
6948       SDVTList Tys = DAG.getVTList(VecVT, MVT::Other);
6949       SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() };
6950       SDValue ResNode = DAG.getMemIntrinsicNode(
6951           X86ISD::VZEXT_LOAD, DL, Tys, Ops, VecSVT, LDBase->getPointerInfo(),
6952           LDBase->getOriginalAlign(), MachineMemOperand::MOLoad);
6953       for (auto *LD : Loads)
6954         if (LD)
6955           DAG.makeEquivalentMemoryOrdering(LD, ResNode);
6956       return DAG.getBitcast(VT, ResNode);
6957     }
6958   }
6959 
6960   // BROADCAST - match the smallest possible repetition pattern, load that
6961   // scalar/subvector element and then broadcast to the entire vector.
6962   if (ZeroMask.isZero() && isPowerOf2_32(NumElems) && Subtarget.hasAVX() &&
6963       (VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector())) {
6964     for (unsigned SubElems = 1; SubElems < NumElems; SubElems *= 2) {
6965       unsigned RepeatSize = SubElems * BaseSizeInBits;
6966       unsigned ScalarSize = std::min(RepeatSize, 64u);
6967       if (!Subtarget.hasAVX2() && ScalarSize < 32)
6968         continue;
6969 
6970       // Don't attempt a 1:N subvector broadcast - it should be caught by
6971       // combineConcatVectorOps, else will cause infinite loops.
6972       if (RepeatSize > ScalarSize && SubElems == 1)
6973         continue;
6974 
6975       bool Match = true;
6976       SmallVector<SDValue, 8> RepeatedLoads(SubElems, DAG.getUNDEF(EltBaseVT));
6977       for (unsigned i = 0; i != NumElems && Match; ++i) {
6978         if (!LoadMask[i])
6979           continue;
6980         SDValue Elt = peekThroughBitcasts(Elts[i]);
6981         if (RepeatedLoads[i % SubElems].isUndef())
6982           RepeatedLoads[i % SubElems] = Elt;
6983         else
6984           Match &= (RepeatedLoads[i % SubElems] == Elt);
6985       }
6986 
6987       // We must have loads at both ends of the repetition.
6988       Match &= !RepeatedLoads.front().isUndef();
6989       Match &= !RepeatedLoads.back().isUndef();
6990       if (!Match)
6991         continue;
6992 
6993       EVT RepeatVT =
6994           VT.isInteger() && (RepeatSize != 64 || TLI.isTypeLegal(MVT::i64))
6995               ? EVT::getIntegerVT(*DAG.getContext(), ScalarSize)
6996               : EVT::getFloatingPointVT(ScalarSize);
6997       if (RepeatSize > ScalarSize)
6998         RepeatVT = EVT::getVectorVT(*DAG.getContext(), RepeatVT,
6999                                     RepeatSize / ScalarSize);
7000       EVT BroadcastVT =
7001           EVT::getVectorVT(*DAG.getContext(), RepeatVT.getScalarType(),
7002                            VT.getSizeInBits() / ScalarSize);
7003       if (TLI.isTypeLegal(BroadcastVT)) {
7004         if (SDValue RepeatLoad = EltsFromConsecutiveLoads(
7005                 RepeatVT, RepeatedLoads, DL, DAG, Subtarget, IsAfterLegalize)) {
7006           SDValue Broadcast = RepeatLoad;
7007           if (RepeatSize > ScalarSize) {
7008             while (Broadcast.getValueSizeInBits() < VT.getSizeInBits())
7009               Broadcast = concatSubVectors(Broadcast, Broadcast, DAG, DL);
7010           } else {
7011             if (!Subtarget.hasAVX2() &&
7012                 !X86::mayFoldLoadIntoBroadcastFromMem(
7013                     RepeatLoad, RepeatVT.getScalarType().getSimpleVT(),
7014                     Subtarget,
7015                     /*AssumeSingleUse=*/true))
7016               return SDValue();
7017             Broadcast =
7018                 DAG.getNode(X86ISD::VBROADCAST, DL, BroadcastVT, RepeatLoad);
7019           }
7020           return DAG.getBitcast(VT, Broadcast);
7021         }
7022       }
7023     }
7024   }
7025 
7026   return SDValue();
7027 }
7028 
7029 // Combine a vector ops (shuffles etc.) that is equal to build_vector load1,
7030 // load2, load3, load4, <0, 1, 2, 3> into a vector load if the load addresses
7031 // are consecutive, non-overlapping, and in the right order.
combineToConsecutiveLoads(EVT VT,SDValue Op,const SDLoc & DL,SelectionDAG & DAG,const X86Subtarget & Subtarget,bool IsAfterLegalize)7032 static SDValue combineToConsecutiveLoads(EVT VT, SDValue Op, const SDLoc &DL,
7033                                          SelectionDAG &DAG,
7034                                          const X86Subtarget &Subtarget,
7035                                          bool IsAfterLegalize) {
7036   SmallVector<SDValue, 64> Elts;
7037   for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
7038     if (SDValue Elt = getShuffleScalarElt(Op, i, DAG, 0)) {
7039       Elts.push_back(Elt);
7040       continue;
7041     }
7042     return SDValue();
7043   }
7044   assert(Elts.size() == VT.getVectorNumElements());
7045   return EltsFromConsecutiveLoads(VT, Elts, DL, DAG, Subtarget,
7046                                   IsAfterLegalize);
7047 }
7048 
getConstantVector(MVT VT,ArrayRef<APInt> Bits,const APInt & Undefs,LLVMContext & C)7049 static Constant *getConstantVector(MVT VT, ArrayRef<APInt> Bits,
7050                                    const APInt &Undefs, LLVMContext &C) {
7051   unsigned ScalarSize = VT.getScalarSizeInBits();
7052   Type *Ty = EVT(VT.getScalarType()).getTypeForEVT(C);
7053 
7054   auto getConstantScalar = [&](const APInt &Val) -> Constant * {
7055     if (VT.isFloatingPoint()) {
7056       if (ScalarSize == 16)
7057         return ConstantFP::get(C, APFloat(APFloat::IEEEhalf(), Val));
7058       if (ScalarSize == 32)
7059         return ConstantFP::get(C, APFloat(APFloat::IEEEsingle(), Val));
7060       assert(ScalarSize == 64 && "Unsupported floating point scalar size");
7061       return ConstantFP::get(C, APFloat(APFloat::IEEEdouble(), Val));
7062     }
7063     return Constant::getIntegerValue(Ty, Val);
7064   };
7065 
7066   SmallVector<Constant *, 32> ConstantVec;
7067   for (unsigned I = 0, E = Bits.size(); I != E; ++I)
7068     ConstantVec.push_back(Undefs[I] ? UndefValue::get(Ty)
7069                                     : getConstantScalar(Bits[I]));
7070 
7071   return ConstantVector::get(ArrayRef<Constant *>(ConstantVec));
7072 }
7073 
getConstantVector(MVT VT,const APInt & SplatValue,unsigned SplatBitSize,LLVMContext & C)7074 static Constant *getConstantVector(MVT VT, const APInt &SplatValue,
7075                                    unsigned SplatBitSize, LLVMContext &C) {
7076   unsigned ScalarSize = VT.getScalarSizeInBits();
7077 
7078   auto getConstantScalar = [&](const APInt &Val) -> Constant * {
7079     if (VT.isFloatingPoint()) {
7080       if (ScalarSize == 16)
7081         return ConstantFP::get(C, APFloat(APFloat::IEEEhalf(), Val));
7082       if (ScalarSize == 32)
7083         return ConstantFP::get(C, APFloat(APFloat::IEEEsingle(), Val));
7084       assert(ScalarSize == 64 && "Unsupported floating point scalar size");
7085       return ConstantFP::get(C, APFloat(APFloat::IEEEdouble(), Val));
7086     }
7087     return Constant::getIntegerValue(Type::getIntNTy(C, ScalarSize), Val);
7088   };
7089 
7090   if (ScalarSize == SplatBitSize)
7091     return getConstantScalar(SplatValue);
7092 
7093   unsigned NumElm = SplatBitSize / ScalarSize;
7094   SmallVector<Constant *, 32> ConstantVec;
7095   for (unsigned I = 0; I != NumElm; ++I) {
7096     APInt Val = SplatValue.extractBits(ScalarSize, ScalarSize * I);
7097     ConstantVec.push_back(getConstantScalar(Val));
7098   }
7099   return ConstantVector::get(ArrayRef<Constant *>(ConstantVec));
7100 }
7101 
isFoldableUseOfShuffle(SDNode * N)7102 static bool isFoldableUseOfShuffle(SDNode *N) {
7103   for (auto *U : N->uses()) {
7104     unsigned Opc = U->getOpcode();
7105     // VPERMV/VPERMV3 shuffles can never fold their index operands.
7106     if (Opc == X86ISD::VPERMV && U->getOperand(0).getNode() == N)
7107       return false;
7108     if (Opc == X86ISD::VPERMV3 && U->getOperand(1).getNode() == N)
7109       return false;
7110     if (isTargetShuffle(Opc))
7111       return true;
7112     if (Opc == ISD::BITCAST) // Ignore bitcasts
7113       return isFoldableUseOfShuffle(U);
7114     if (N->hasOneUse()) {
7115       // TODO, there may be some general way to know if a SDNode can
7116       // be folded. We now only know whether an MI is foldable.
7117       if (Opc == X86ISD::VPDPBUSD && U->getOperand(2).getNode() != N)
7118         return false;
7119       return true;
7120     }
7121   }
7122   return false;
7123 }
7124 
7125 /// Attempt to use the vbroadcast instruction to generate a splat value
7126 /// from a splat BUILD_VECTOR which uses:
7127 ///  a. A single scalar load, or a constant.
7128 ///  b. Repeated pattern of constants (e.g. <0,1,0,1> or <0,1,2,3,0,1,2,3>).
7129 ///
7130 /// The VBROADCAST node is returned when a pattern is found,
7131 /// or SDValue() otherwise.
lowerBuildVectorAsBroadcast(BuildVectorSDNode * BVOp,const X86Subtarget & Subtarget,SelectionDAG & DAG)7132 static SDValue lowerBuildVectorAsBroadcast(BuildVectorSDNode *BVOp,
7133                                            const X86Subtarget &Subtarget,
7134                                            SelectionDAG &DAG) {
7135   // VBROADCAST requires AVX.
7136   // TODO: Splats could be generated for non-AVX CPUs using SSE
7137   // instructions, but there's less potential gain for only 128-bit vectors.
7138   if (!Subtarget.hasAVX())
7139     return SDValue();
7140 
7141   MVT VT = BVOp->getSimpleValueType(0);
7142   unsigned NumElts = VT.getVectorNumElements();
7143   SDLoc dl(BVOp);
7144 
7145   assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&
7146          "Unsupported vector type for broadcast.");
7147 
7148   // See if the build vector is a repeating sequence of scalars (inc. splat).
7149   SDValue Ld;
7150   BitVector UndefElements;
7151   SmallVector<SDValue, 16> Sequence;
7152   if (BVOp->getRepeatedSequence(Sequence, &UndefElements)) {
7153     assert((NumElts % Sequence.size()) == 0 && "Sequence doesn't fit.");
7154     if (Sequence.size() == 1)
7155       Ld = Sequence[0];
7156   }
7157 
7158   // Attempt to use VBROADCASTM
7159   // From this pattern:
7160   // a. t0 = (zext_i64 (bitcast_i8 v2i1 X))
7161   // b. t1 = (build_vector t0 t0)
7162   //
7163   // Create (VBROADCASTM v2i1 X)
7164   if (!Sequence.empty() && Subtarget.hasCDI()) {
7165     // If not a splat, are the upper sequence values zeroable?
7166     unsigned SeqLen = Sequence.size();
7167     bool UpperZeroOrUndef =
7168         SeqLen == 1 ||
7169         llvm::all_of(ArrayRef(Sequence).drop_front(), [](SDValue V) {
7170           return !V || V.isUndef() || isNullConstant(V);
7171         });
7172     SDValue Op0 = Sequence[0];
7173     if (UpperZeroOrUndef && ((Op0.getOpcode() == ISD::BITCAST) ||
7174                              (Op0.getOpcode() == ISD::ZERO_EXTEND &&
7175                               Op0.getOperand(0).getOpcode() == ISD::BITCAST))) {
7176       SDValue BOperand = Op0.getOpcode() == ISD::BITCAST
7177                              ? Op0.getOperand(0)
7178                              : Op0.getOperand(0).getOperand(0);
7179       MVT MaskVT = BOperand.getSimpleValueType();
7180       MVT EltType = MVT::getIntegerVT(VT.getScalarSizeInBits() * SeqLen);
7181       if ((EltType == MVT::i64 && MaskVT == MVT::v8i1) ||  // for broadcastmb2q
7182           (EltType == MVT::i32 && MaskVT == MVT::v16i1)) { // for broadcastmw2d
7183         MVT BcstVT = MVT::getVectorVT(EltType, NumElts / SeqLen);
7184         if (!VT.is512BitVector() && !Subtarget.hasVLX()) {
7185           unsigned Scale = 512 / VT.getSizeInBits();
7186           BcstVT = MVT::getVectorVT(EltType, Scale * (NumElts / SeqLen));
7187         }
7188         SDValue Bcst = DAG.getNode(X86ISD::VBROADCASTM, dl, BcstVT, BOperand);
7189         if (BcstVT.getSizeInBits() != VT.getSizeInBits())
7190           Bcst = extractSubVector(Bcst, 0, DAG, dl, VT.getSizeInBits());
7191         return DAG.getBitcast(VT, Bcst);
7192       }
7193     }
7194   }
7195 
7196   unsigned NumUndefElts = UndefElements.count();
7197   if (!Ld || (NumElts - NumUndefElts) <= 1) {
7198     APInt SplatValue, Undef;
7199     unsigned SplatBitSize;
7200     bool HasUndef;
7201     // Check if this is a repeated constant pattern suitable for broadcasting.
7202     if (BVOp->isConstantSplat(SplatValue, Undef, SplatBitSize, HasUndef) &&
7203         SplatBitSize > VT.getScalarSizeInBits() &&
7204         SplatBitSize < VT.getSizeInBits()) {
7205       // Avoid replacing with broadcast when it's a use of a shuffle
7206       // instruction to preserve the present custom lowering of shuffles.
7207       if (isFoldableUseOfShuffle(BVOp))
7208         return SDValue();
7209       // replace BUILD_VECTOR with broadcast of the repeated constants.
7210       const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7211       LLVMContext *Ctx = DAG.getContext();
7212       MVT PVT = TLI.getPointerTy(DAG.getDataLayout());
7213       if (SplatBitSize == 32 || SplatBitSize == 64 ||
7214           (SplatBitSize < 32 && Subtarget.hasAVX2())) {
7215         // Load the constant scalar/subvector and broadcast it.
7216         MVT CVT = MVT::getIntegerVT(SplatBitSize);
7217         Constant *C = getConstantVector(VT, SplatValue, SplatBitSize, *Ctx);
7218         SDValue CP = DAG.getConstantPool(C, PVT);
7219         unsigned Repeat = VT.getSizeInBits() / SplatBitSize;
7220 
7221         Align Alignment = cast<ConstantPoolSDNode>(CP)->getAlign();
7222         SDVTList Tys = DAG.getVTList(MVT::getVectorVT(CVT, Repeat), MVT::Other);
7223         SDValue Ops[] = {DAG.getEntryNode(), CP};
7224         MachinePointerInfo MPI =
7225             MachinePointerInfo::getConstantPool(DAG.getMachineFunction());
7226         SDValue Brdcst =
7227             DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, dl, Tys, Ops, CVT,
7228                                     MPI, Alignment, MachineMemOperand::MOLoad);
7229         return DAG.getBitcast(VT, Brdcst);
7230       }
7231       if (SplatBitSize > 64) {
7232         // Load the vector of constants and broadcast it.
7233         Constant *VecC = getConstantVector(VT, SplatValue, SplatBitSize, *Ctx);
7234         SDValue VCP = DAG.getConstantPool(VecC, PVT);
7235         unsigned NumElm = SplatBitSize / VT.getScalarSizeInBits();
7236         MVT VVT = MVT::getVectorVT(VT.getScalarType(), NumElm);
7237         Align Alignment = cast<ConstantPoolSDNode>(VCP)->getAlign();
7238         SDVTList Tys = DAG.getVTList(VT, MVT::Other);
7239         SDValue Ops[] = {DAG.getEntryNode(), VCP};
7240         MachinePointerInfo MPI =
7241             MachinePointerInfo::getConstantPool(DAG.getMachineFunction());
7242         return DAG.getMemIntrinsicNode(X86ISD::SUBV_BROADCAST_LOAD, dl, Tys,
7243                                        Ops, VVT, MPI, Alignment,
7244                                        MachineMemOperand::MOLoad);
7245       }
7246     }
7247 
7248     // If we are moving a scalar into a vector (Ld must be set and all elements
7249     // but 1 are undef) and that operation is not obviously supported by
7250     // vmovd/vmovq/vmovss/vmovsd, then keep trying to form a broadcast.
7251     // That's better than general shuffling and may eliminate a load to GPR and
7252     // move from scalar to vector register.
7253     if (!Ld || NumElts - NumUndefElts != 1)
7254       return SDValue();
7255     unsigned ScalarSize = Ld.getValueSizeInBits();
7256     if (!(UndefElements[0] || (ScalarSize != 32 && ScalarSize != 64)))
7257       return SDValue();
7258   }
7259 
7260   bool ConstSplatVal =
7261       (Ld.getOpcode() == ISD::Constant || Ld.getOpcode() == ISD::ConstantFP);
7262   bool IsLoad = ISD::isNormalLoad(Ld.getNode());
7263 
7264   // TODO: Handle broadcasts of non-constant sequences.
7265 
7266   // Make sure that all of the users of a non-constant load are from the
7267   // BUILD_VECTOR node.
7268   // FIXME: Is the use count needed for non-constant, non-load case?
7269   if (!ConstSplatVal && !IsLoad && !BVOp->isOnlyUserOf(Ld.getNode()))
7270     return SDValue();
7271 
7272   unsigned ScalarSize = Ld.getValueSizeInBits();
7273   bool IsGE256 = (VT.getSizeInBits() >= 256);
7274 
7275   // When optimizing for size, generate up to 5 extra bytes for a broadcast
7276   // instruction to save 8 or more bytes of constant pool data.
7277   // TODO: If multiple splats are generated to load the same constant,
7278   // it may be detrimental to overall size. There needs to be a way to detect
7279   // that condition to know if this is truly a size win.
7280   bool OptForSize = DAG.shouldOptForSize();
7281 
7282   // Handle broadcasting a single constant scalar from the constant pool
7283   // into a vector.
7284   // On Sandybridge (no AVX2), it is still better to load a constant vector
7285   // from the constant pool and not to broadcast it from a scalar.
7286   // But override that restriction when optimizing for size.
7287   // TODO: Check if splatting is recommended for other AVX-capable CPUs.
7288   if (ConstSplatVal && (Subtarget.hasAVX2() || OptForSize)) {
7289     EVT CVT = Ld.getValueType();
7290     assert(!CVT.isVector() && "Must not broadcast a vector type");
7291 
7292     // Splat f16, f32, i32, v4f64, v4i64 in all cases with AVX2.
7293     // For size optimization, also splat v2f64 and v2i64, and for size opt
7294     // with AVX2, also splat i8 and i16.
7295     // With pattern matching, the VBROADCAST node may become a VMOVDDUP.
7296     if (ScalarSize == 32 ||
7297         (ScalarSize == 64 && (IsGE256 || Subtarget.hasVLX())) ||
7298         (CVT == MVT::f16 && Subtarget.hasAVX2()) ||
7299         (OptForSize && (ScalarSize == 64 || Subtarget.hasAVX2()))) {
7300       const Constant *C = nullptr;
7301       if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Ld))
7302         C = CI->getConstantIntValue();
7303       else if (ConstantFPSDNode *CF = dyn_cast<ConstantFPSDNode>(Ld))
7304         C = CF->getConstantFPValue();
7305 
7306       assert(C && "Invalid constant type");
7307 
7308       const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7309       SDValue CP =
7310           DAG.getConstantPool(C, TLI.getPointerTy(DAG.getDataLayout()));
7311       Align Alignment = cast<ConstantPoolSDNode>(CP)->getAlign();
7312 
7313       SDVTList Tys = DAG.getVTList(VT, MVT::Other);
7314       SDValue Ops[] = {DAG.getEntryNode(), CP};
7315       MachinePointerInfo MPI =
7316           MachinePointerInfo::getConstantPool(DAG.getMachineFunction());
7317       return DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, dl, Tys, Ops, CVT,
7318                                      MPI, Alignment, MachineMemOperand::MOLoad);
7319     }
7320   }
7321 
7322   // Handle AVX2 in-register broadcasts.
7323   if (!IsLoad && Subtarget.hasInt256() &&
7324       (ScalarSize == 32 || (IsGE256 && ScalarSize == 64)))
7325     return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
7326 
7327   // The scalar source must be a normal load.
7328   if (!IsLoad)
7329     return SDValue();
7330 
7331   // Make sure the non-chain result is only used by this build vector.
7332   if (!Ld->hasNUsesOfValue(NumElts - NumUndefElts, 0))
7333     return SDValue();
7334 
7335   if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
7336       (Subtarget.hasVLX() && ScalarSize == 64)) {
7337     auto *LN = cast<LoadSDNode>(Ld);
7338     SDVTList Tys = DAG.getVTList(VT, MVT::Other);
7339     SDValue Ops[] = {LN->getChain(), LN->getBasePtr()};
7340     SDValue BCast =
7341         DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, dl, Tys, Ops,
7342                                 LN->getMemoryVT(), LN->getMemOperand());
7343     DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BCast.getValue(1));
7344     return BCast;
7345   }
7346 
7347   // The integer check is needed for the 64-bit into 128-bit so it doesn't match
7348   // double since there is no vbroadcastsd xmm
7349   if (Subtarget.hasInt256() && Ld.getValueType().isInteger() &&
7350       (ScalarSize == 8 || ScalarSize == 16 || ScalarSize == 64)) {
7351     auto *LN = cast<LoadSDNode>(Ld);
7352     SDVTList Tys = DAG.getVTList(VT, MVT::Other);
7353     SDValue Ops[] = {LN->getChain(), LN->getBasePtr()};
7354     SDValue BCast =
7355         DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, dl, Tys, Ops,
7356                                 LN->getMemoryVT(), LN->getMemOperand());
7357     DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BCast.getValue(1));
7358     return BCast;
7359   }
7360 
7361   if (ScalarSize == 16 && Subtarget.hasFP16() && IsGE256)
7362     return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
7363 
7364   // Unsupported broadcast.
7365   return SDValue();
7366 }
7367 
7368 /// For an EXTRACT_VECTOR_ELT with a constant index return the real
7369 /// underlying vector and index.
7370 ///
7371 /// Modifies \p ExtractedFromVec to the real vector and returns the real
7372 /// index.
getUnderlyingExtractedFromVec(SDValue & ExtractedFromVec,SDValue ExtIdx)7373 static int getUnderlyingExtractedFromVec(SDValue &ExtractedFromVec,
7374                                          SDValue ExtIdx) {
7375   int Idx = ExtIdx->getAsZExtVal();
7376   if (!isa<ShuffleVectorSDNode>(ExtractedFromVec))
7377     return Idx;
7378 
7379   // For 256-bit vectors, LowerEXTRACT_VECTOR_ELT_SSE4 may have already
7380   // lowered this:
7381   //   (extract_vector_elt (v8f32 %1), Constant<6>)
7382   // to:
7383   //   (extract_vector_elt (vector_shuffle<2,u,u,u>
7384   //                           (extract_subvector (v8f32 %0), Constant<4>),
7385   //                           undef)
7386   //                       Constant<0>)
7387   // In this case the vector is the extract_subvector expression and the index
7388   // is 2, as specified by the shuffle.
7389   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(ExtractedFromVec);
7390   SDValue ShuffleVec = SVOp->getOperand(0);
7391   MVT ShuffleVecVT = ShuffleVec.getSimpleValueType();
7392   assert(ShuffleVecVT.getVectorElementType() ==
7393          ExtractedFromVec.getSimpleValueType().getVectorElementType());
7394 
7395   int ShuffleIdx = SVOp->getMaskElt(Idx);
7396   if (isUndefOrInRange(ShuffleIdx, 0, ShuffleVecVT.getVectorNumElements())) {
7397     ExtractedFromVec = ShuffleVec;
7398     return ShuffleIdx;
7399   }
7400   return Idx;
7401 }
7402 
buildFromShuffleMostly(SDValue Op,SelectionDAG & DAG)7403 static SDValue buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) {
7404   MVT VT = Op.getSimpleValueType();
7405 
7406   // Skip if insert_vec_elt is not supported.
7407   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7408   if (!TLI.isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT))
7409     return SDValue();
7410 
7411   SDLoc DL(Op);
7412   unsigned NumElems = Op.getNumOperands();
7413 
7414   SDValue VecIn1;
7415   SDValue VecIn2;
7416   SmallVector<unsigned, 4> InsertIndices;
7417   SmallVector<int, 8> Mask(NumElems, -1);
7418 
7419   for (unsigned i = 0; i != NumElems; ++i) {
7420     unsigned Opc = Op.getOperand(i).getOpcode();
7421 
7422     if (Opc == ISD::UNDEF)
7423       continue;
7424 
7425     if (Opc != ISD::EXTRACT_VECTOR_ELT) {
7426       // Quit if more than 1 elements need inserting.
7427       if (InsertIndices.size() > 1)
7428         return SDValue();
7429 
7430       InsertIndices.push_back(i);
7431       continue;
7432     }
7433 
7434     SDValue ExtractedFromVec = Op.getOperand(i).getOperand(0);
7435     SDValue ExtIdx = Op.getOperand(i).getOperand(1);
7436 
7437     // Quit if non-constant index.
7438     if (!isa<ConstantSDNode>(ExtIdx))
7439       return SDValue();
7440     int Idx = getUnderlyingExtractedFromVec(ExtractedFromVec, ExtIdx);
7441 
7442     // Quit if extracted from vector of different type.
7443     if (ExtractedFromVec.getValueType() != VT)
7444       return SDValue();
7445 
7446     if (!VecIn1.getNode())
7447       VecIn1 = ExtractedFromVec;
7448     else if (VecIn1 != ExtractedFromVec) {
7449       if (!VecIn2.getNode())
7450         VecIn2 = ExtractedFromVec;
7451       else if (VecIn2 != ExtractedFromVec)
7452         // Quit if more than 2 vectors to shuffle
7453         return SDValue();
7454     }
7455 
7456     if (ExtractedFromVec == VecIn1)
7457       Mask[i] = Idx;
7458     else if (ExtractedFromVec == VecIn2)
7459       Mask[i] = Idx + NumElems;
7460   }
7461 
7462   if (!VecIn1.getNode())
7463     return SDValue();
7464 
7465   VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(VT);
7466   SDValue NV = DAG.getVectorShuffle(VT, DL, VecIn1, VecIn2, Mask);
7467 
7468   for (unsigned Idx : InsertIndices)
7469     NV = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, NV, Op.getOperand(Idx),
7470                      DAG.getIntPtrConstant(Idx, DL));
7471 
7472   return NV;
7473 }
7474 
7475 // Lower BUILD_VECTOR operation for v8bf16, v16bf16 and v32bf16 types.
LowerBUILD_VECTORvXbf16(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)7476 static SDValue LowerBUILD_VECTORvXbf16(SDValue Op, SelectionDAG &DAG,
7477                                        const X86Subtarget &Subtarget) {
7478   MVT VT = Op.getSimpleValueType();
7479   MVT IVT =
7480       VT.changeVectorElementType(Subtarget.hasFP16() ? MVT::f16 : MVT::i16);
7481   SmallVector<SDValue, 16> NewOps;
7482   for (unsigned I = 0, E = Op.getNumOperands(); I != E; ++I)
7483     NewOps.push_back(DAG.getBitcast(Subtarget.hasFP16() ? MVT::f16 : MVT::i16,
7484                                     Op.getOperand(I)));
7485   SDValue Res = DAG.getNode(ISD::BUILD_VECTOR, SDLoc(), IVT, NewOps);
7486   return DAG.getBitcast(VT, Res);
7487 }
7488 
7489 // Lower BUILD_VECTOR operation for v8i1 and v16i1 types.
LowerBUILD_VECTORvXi1(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)7490 static SDValue LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG,
7491                                      const X86Subtarget &Subtarget) {
7492 
7493   MVT VT = Op.getSimpleValueType();
7494   assert((VT.getVectorElementType() == MVT::i1) &&
7495          "Unexpected type in LowerBUILD_VECTORvXi1!");
7496 
7497   SDLoc dl(Op);
7498   if (ISD::isBuildVectorAllZeros(Op.getNode()) ||
7499       ISD::isBuildVectorAllOnes(Op.getNode()))
7500     return Op;
7501 
7502   uint64_t Immediate = 0;
7503   SmallVector<unsigned, 16> NonConstIdx;
7504   bool IsSplat = true;
7505   bool HasConstElts = false;
7506   int SplatIdx = -1;
7507   for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) {
7508     SDValue In = Op.getOperand(idx);
7509     if (In.isUndef())
7510       continue;
7511     if (auto *InC = dyn_cast<ConstantSDNode>(In)) {
7512       Immediate |= (InC->getZExtValue() & 0x1) << idx;
7513       HasConstElts = true;
7514     } else {
7515       NonConstIdx.push_back(idx);
7516     }
7517     if (SplatIdx < 0)
7518       SplatIdx = idx;
7519     else if (In != Op.getOperand(SplatIdx))
7520       IsSplat = false;
7521   }
7522 
7523   // for splat use " (select i1 splat_elt, all-ones, all-zeroes)"
7524   if (IsSplat) {
7525     // The build_vector allows the scalar element to be larger than the vector
7526     // element type. We need to mask it to use as a condition unless we know
7527     // the upper bits are zero.
7528     // FIXME: Use computeKnownBits instead of checking specific opcode?
7529     SDValue Cond = Op.getOperand(SplatIdx);
7530     assert(Cond.getValueType() == MVT::i8 && "Unexpected VT!");
7531     if (Cond.getOpcode() != ISD::SETCC)
7532       Cond = DAG.getNode(ISD::AND, dl, MVT::i8, Cond,
7533                          DAG.getConstant(1, dl, MVT::i8));
7534 
7535     // Perform the select in the scalar domain so we can use cmov.
7536     if (VT == MVT::v64i1 && !Subtarget.is64Bit()) {
7537       SDValue Select = DAG.getSelect(dl, MVT::i32, Cond,
7538                                      DAG.getAllOnesConstant(dl, MVT::i32),
7539                                      DAG.getConstant(0, dl, MVT::i32));
7540       Select = DAG.getBitcast(MVT::v32i1, Select);
7541       return DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, Select, Select);
7542     } else {
7543       MVT ImmVT = MVT::getIntegerVT(std::max((unsigned)VT.getSizeInBits(), 8U));
7544       SDValue Select = DAG.getSelect(dl, ImmVT, Cond,
7545                                      DAG.getAllOnesConstant(dl, ImmVT),
7546                                      DAG.getConstant(0, dl, ImmVT));
7547       MVT VecVT = VT.getSizeInBits() >= 8 ? VT : MVT::v8i1;
7548       Select = DAG.getBitcast(VecVT, Select);
7549       return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Select,
7550                          DAG.getIntPtrConstant(0, dl));
7551     }
7552   }
7553 
7554   // insert elements one by one
7555   SDValue DstVec;
7556   if (HasConstElts) {
7557     if (VT == MVT::v64i1 && !Subtarget.is64Bit()) {
7558       SDValue ImmL = DAG.getConstant(Lo_32(Immediate), dl, MVT::i32);
7559       SDValue ImmH = DAG.getConstant(Hi_32(Immediate), dl, MVT::i32);
7560       ImmL = DAG.getBitcast(MVT::v32i1, ImmL);
7561       ImmH = DAG.getBitcast(MVT::v32i1, ImmH);
7562       DstVec = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, ImmL, ImmH);
7563     } else {
7564       MVT ImmVT = MVT::getIntegerVT(std::max((unsigned)VT.getSizeInBits(), 8U));
7565       SDValue Imm = DAG.getConstant(Immediate, dl, ImmVT);
7566       MVT VecVT = VT.getSizeInBits() >= 8 ? VT : MVT::v8i1;
7567       DstVec = DAG.getBitcast(VecVT, Imm);
7568       DstVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, DstVec,
7569                            DAG.getIntPtrConstant(0, dl));
7570     }
7571   } else
7572     DstVec = DAG.getUNDEF(VT);
7573 
7574   for (unsigned InsertIdx : NonConstIdx) {
7575     DstVec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DstVec,
7576                          Op.getOperand(InsertIdx),
7577                          DAG.getIntPtrConstant(InsertIdx, dl));
7578   }
7579   return DstVec;
7580 }
7581 
isHorizOp(unsigned Opcode)7582 LLVM_ATTRIBUTE_UNUSED static bool isHorizOp(unsigned Opcode) {
7583   switch (Opcode) {
7584   case X86ISD::PACKSS:
7585   case X86ISD::PACKUS:
7586   case X86ISD::FHADD:
7587   case X86ISD::FHSUB:
7588   case X86ISD::HADD:
7589   case X86ISD::HSUB:
7590     return true;
7591   }
7592   return false;
7593 }
7594 
7595 /// This is a helper function of LowerToHorizontalOp().
7596 /// This function checks that the build_vector \p N in input implements a
7597 /// 128-bit partial horizontal operation on a 256-bit vector, but that operation
7598 /// may not match the layout of an x86 256-bit horizontal instruction.
7599 /// In other words, if this returns true, then some extraction/insertion will
7600 /// be required to produce a valid horizontal instruction.
7601 ///
7602 /// Parameter \p Opcode defines the kind of horizontal operation to match.
7603 /// For example, if \p Opcode is equal to ISD::ADD, then this function
7604 /// checks if \p N implements a horizontal arithmetic add; if instead \p Opcode
7605 /// is equal to ISD::SUB, then this function checks if this is a horizontal
7606 /// arithmetic sub.
7607 ///
7608 /// This function only analyzes elements of \p N whose indices are
7609 /// in range [BaseIdx, LastIdx).
7610 ///
7611 /// TODO: This function was originally used to match both real and fake partial
7612 /// horizontal operations, but the index-matching logic is incorrect for that.
7613 /// See the corrected implementation in isHopBuildVector(). Can we reduce this
7614 /// code because it is only used for partial h-op matching now?
isHorizontalBinOpPart(const BuildVectorSDNode * N,unsigned Opcode,SelectionDAG & DAG,unsigned BaseIdx,unsigned LastIdx,SDValue & V0,SDValue & V1)7615 static bool isHorizontalBinOpPart(const BuildVectorSDNode *N, unsigned Opcode,
7616                                   SelectionDAG &DAG,
7617                                   unsigned BaseIdx, unsigned LastIdx,
7618                                   SDValue &V0, SDValue &V1) {
7619   EVT VT = N->getValueType(0);
7620   assert(VT.is256BitVector() && "Only use for matching partial 256-bit h-ops");
7621   assert(BaseIdx * 2 <= LastIdx && "Invalid Indices in input!");
7622   assert(VT.isVector() && VT.getVectorNumElements() >= LastIdx &&
7623          "Invalid Vector in input!");
7624 
7625   bool IsCommutable = (Opcode == ISD::ADD || Opcode == ISD::FADD);
7626   bool CanFold = true;
7627   unsigned ExpectedVExtractIdx = BaseIdx;
7628   unsigned NumElts = LastIdx - BaseIdx;
7629   V0 = DAG.getUNDEF(VT);
7630   V1 = DAG.getUNDEF(VT);
7631 
7632   // Check if N implements a horizontal binop.
7633   for (unsigned i = 0, e = NumElts; i != e && CanFold; ++i) {
7634     SDValue Op = N->getOperand(i + BaseIdx);
7635 
7636     // Skip UNDEFs.
7637     if (Op->isUndef()) {
7638       // Update the expected vector extract index.
7639       if (i * 2 == NumElts)
7640         ExpectedVExtractIdx = BaseIdx;
7641       ExpectedVExtractIdx += 2;
7642       continue;
7643     }
7644 
7645     CanFold = Op->getOpcode() == Opcode && Op->hasOneUse();
7646 
7647     if (!CanFold)
7648       break;
7649 
7650     SDValue Op0 = Op.getOperand(0);
7651     SDValue Op1 = Op.getOperand(1);
7652 
7653     // Try to match the following pattern:
7654     // (BINOP (extract_vector_elt A, I), (extract_vector_elt A, I+1))
7655     CanFold = (Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
7656         Op1.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
7657         Op0.getOperand(0) == Op1.getOperand(0) &&
7658         isa<ConstantSDNode>(Op0.getOperand(1)) &&
7659         isa<ConstantSDNode>(Op1.getOperand(1)));
7660     if (!CanFold)
7661       break;
7662 
7663     unsigned I0 = Op0.getConstantOperandVal(1);
7664     unsigned I1 = Op1.getConstantOperandVal(1);
7665 
7666     if (i * 2 < NumElts) {
7667       if (V0.isUndef()) {
7668         V0 = Op0.getOperand(0);
7669         if (V0.getValueType() != VT)
7670           return false;
7671       }
7672     } else {
7673       if (V1.isUndef()) {
7674         V1 = Op0.getOperand(0);
7675         if (V1.getValueType() != VT)
7676           return false;
7677       }
7678       if (i * 2 == NumElts)
7679         ExpectedVExtractIdx = BaseIdx;
7680     }
7681 
7682     SDValue Expected = (i * 2 < NumElts) ? V0 : V1;
7683     if (I0 == ExpectedVExtractIdx)
7684       CanFold = I1 == I0 + 1 && Op0.getOperand(0) == Expected;
7685     else if (IsCommutable && I1 == ExpectedVExtractIdx) {
7686       // Try to match the following dag sequence:
7687       // (BINOP (extract_vector_elt A, I+1), (extract_vector_elt A, I))
7688       CanFold = I0 == I1 + 1 && Op1.getOperand(0) == Expected;
7689     } else
7690       CanFold = false;
7691 
7692     ExpectedVExtractIdx += 2;
7693   }
7694 
7695   return CanFold;
7696 }
7697 
7698 /// Emit a sequence of two 128-bit horizontal add/sub followed by
7699 /// a concat_vector.
7700 ///
7701 /// This is a helper function of LowerToHorizontalOp().
7702 /// This function expects two 256-bit vectors called V0 and V1.
7703 /// At first, each vector is split into two separate 128-bit vectors.
7704 /// Then, the resulting 128-bit vectors are used to implement two
7705 /// horizontal binary operations.
7706 ///
7707 /// The kind of horizontal binary operation is defined by \p X86Opcode.
7708 ///
7709 /// \p Mode specifies how the 128-bit parts of V0 and V1 are passed in input to
7710 /// the two new horizontal binop.
7711 /// When Mode is set, the first horizontal binop dag node would take as input
7712 /// the lower 128-bit of V0 and the upper 128-bit of V0. The second
7713 /// horizontal binop dag node would take as input the lower 128-bit of V1
7714 /// and the upper 128-bit of V1.
7715 ///   Example:
7716 ///     HADD V0_LO, V0_HI
7717 ///     HADD V1_LO, V1_HI
7718 ///
7719 /// Otherwise, the first horizontal binop dag node takes as input the lower
7720 /// 128-bit of V0 and the lower 128-bit of V1, and the second horizontal binop
7721 /// dag node takes the upper 128-bit of V0 and the upper 128-bit of V1.
7722 ///   Example:
7723 ///     HADD V0_LO, V1_LO
7724 ///     HADD V0_HI, V1_HI
7725 ///
7726 /// If \p isUndefLO is set, then the algorithm propagates UNDEF to the lower
7727 /// 128-bits of the result. If \p isUndefHI is set, then UNDEF is propagated to
7728 /// the upper 128-bits of the result.
ExpandHorizontalBinOp(const SDValue & V0,const SDValue & V1,const SDLoc & DL,SelectionDAG & DAG,unsigned X86Opcode,bool Mode,bool isUndefLO,bool isUndefHI)7729 static SDValue ExpandHorizontalBinOp(const SDValue &V0, const SDValue &V1,
7730                                      const SDLoc &DL, SelectionDAG &DAG,
7731                                      unsigned X86Opcode, bool Mode,
7732                                      bool isUndefLO, bool isUndefHI) {
7733   MVT VT = V0.getSimpleValueType();
7734   assert(VT.is256BitVector() && VT == V1.getSimpleValueType() &&
7735          "Invalid nodes in input!");
7736 
7737   unsigned NumElts = VT.getVectorNumElements();
7738   SDValue V0_LO = extract128BitVector(V0, 0, DAG, DL);
7739   SDValue V0_HI = extract128BitVector(V0, NumElts/2, DAG, DL);
7740   SDValue V1_LO = extract128BitVector(V1, 0, DAG, DL);
7741   SDValue V1_HI = extract128BitVector(V1, NumElts/2, DAG, DL);
7742   MVT NewVT = V0_LO.getSimpleValueType();
7743 
7744   SDValue LO = DAG.getUNDEF(NewVT);
7745   SDValue HI = DAG.getUNDEF(NewVT);
7746 
7747   if (Mode) {
7748     // Don't emit a horizontal binop if the result is expected to be UNDEF.
7749     if (!isUndefLO && !V0->isUndef())
7750       LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V0_HI);
7751     if (!isUndefHI && !V1->isUndef())
7752       HI = DAG.getNode(X86Opcode, DL, NewVT, V1_LO, V1_HI);
7753   } else {
7754     // Don't emit a horizontal binop if the result is expected to be UNDEF.
7755     if (!isUndefLO && (!V0_LO->isUndef() || !V1_LO->isUndef()))
7756       LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V1_LO);
7757 
7758     if (!isUndefHI && (!V0_HI->isUndef() || !V1_HI->isUndef()))
7759       HI = DAG.getNode(X86Opcode, DL, NewVT, V0_HI, V1_HI);
7760   }
7761 
7762   return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LO, HI);
7763 }
7764 
7765 /// Returns true iff \p BV builds a vector with the result equivalent to
7766 /// the result of ADDSUB/SUBADD operation.
7767 /// If true is returned then the operands of ADDSUB = Opnd0 +- Opnd1
7768 /// (SUBADD = Opnd0 -+ Opnd1) operation are written to the parameters
7769 /// \p Opnd0 and \p Opnd1.
isAddSubOrSubAdd(const BuildVectorSDNode * BV,const X86Subtarget & Subtarget,SelectionDAG & DAG,SDValue & Opnd0,SDValue & Opnd1,unsigned & NumExtracts,bool & IsSubAdd)7770 static bool isAddSubOrSubAdd(const BuildVectorSDNode *BV,
7771                              const X86Subtarget &Subtarget, SelectionDAG &DAG,
7772                              SDValue &Opnd0, SDValue &Opnd1,
7773                              unsigned &NumExtracts,
7774                              bool &IsSubAdd) {
7775 
7776   MVT VT = BV->getSimpleValueType(0);
7777   if (!Subtarget.hasSSE3() || !VT.isFloatingPoint())
7778     return false;
7779 
7780   unsigned NumElts = VT.getVectorNumElements();
7781   SDValue InVec0 = DAG.getUNDEF(VT);
7782   SDValue InVec1 = DAG.getUNDEF(VT);
7783 
7784   NumExtracts = 0;
7785 
7786   // Odd-numbered elements in the input build vector are obtained from
7787   // adding/subtracting two integer/float elements.
7788   // Even-numbered elements in the input build vector are obtained from
7789   // subtracting/adding two integer/float elements.
7790   unsigned Opc[2] = {0, 0};
7791   for (unsigned i = 0, e = NumElts; i != e; ++i) {
7792     SDValue Op = BV->getOperand(i);
7793 
7794     // Skip 'undef' values.
7795     unsigned Opcode = Op.getOpcode();
7796     if (Opcode == ISD::UNDEF)
7797       continue;
7798 
7799     // Early exit if we found an unexpected opcode.
7800     if (Opcode != ISD::FADD && Opcode != ISD::FSUB)
7801       return false;
7802 
7803     SDValue Op0 = Op.getOperand(0);
7804     SDValue Op1 = Op.getOperand(1);
7805 
7806     // Try to match the following pattern:
7807     // (BINOP (extract_vector_elt A, i), (extract_vector_elt B, i))
7808     // Early exit if we cannot match that sequence.
7809     if (Op0.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
7810         Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
7811         !isa<ConstantSDNode>(Op0.getOperand(1)) ||
7812         Op0.getOperand(1) != Op1.getOperand(1))
7813       return false;
7814 
7815     unsigned I0 = Op0.getConstantOperandVal(1);
7816     if (I0 != i)
7817       return false;
7818 
7819     // We found a valid add/sub node, make sure its the same opcode as previous
7820     // elements for this parity.
7821     if (Opc[i % 2] != 0 && Opc[i % 2] != Opcode)
7822       return false;
7823     Opc[i % 2] = Opcode;
7824 
7825     // Update InVec0 and InVec1.
7826     if (InVec0.isUndef()) {
7827       InVec0 = Op0.getOperand(0);
7828       if (InVec0.getSimpleValueType() != VT)
7829         return false;
7830     }
7831     if (InVec1.isUndef()) {
7832       InVec1 = Op1.getOperand(0);
7833       if (InVec1.getSimpleValueType() != VT)
7834         return false;
7835     }
7836 
7837     // Make sure that operands in input to each add/sub node always
7838     // come from a same pair of vectors.
7839     if (InVec0 != Op0.getOperand(0)) {
7840       if (Opcode == ISD::FSUB)
7841         return false;
7842 
7843       // FADD is commutable. Try to commute the operands
7844       // and then test again.
7845       std::swap(Op0, Op1);
7846       if (InVec0 != Op0.getOperand(0))
7847         return false;
7848     }
7849 
7850     if (InVec1 != Op1.getOperand(0))
7851       return false;
7852 
7853     // Increment the number of extractions done.
7854     ++NumExtracts;
7855   }
7856 
7857   // Ensure we have found an opcode for both parities and that they are
7858   // different. Don't try to fold this build_vector into an ADDSUB/SUBADD if the
7859   // inputs are undef.
7860   if (!Opc[0] || !Opc[1] || Opc[0] == Opc[1] ||
7861       InVec0.isUndef() || InVec1.isUndef())
7862     return false;
7863 
7864   IsSubAdd = Opc[0] == ISD::FADD;
7865 
7866   Opnd0 = InVec0;
7867   Opnd1 = InVec1;
7868   return true;
7869 }
7870 
7871 /// Returns true if is possible to fold MUL and an idiom that has already been
7872 /// recognized as ADDSUB/SUBADD(\p Opnd0, \p Opnd1) into
7873 /// FMADDSUB/FMSUBADD(x, y, \p Opnd1). If (and only if) true is returned, the
7874 /// operands of FMADDSUB/FMSUBADD are written to parameters \p Opnd0, \p Opnd1, \p Opnd2.
7875 ///
7876 /// Prior to calling this function it should be known that there is some
7877 /// SDNode that potentially can be replaced with an X86ISD::ADDSUB operation
7878 /// using \p Opnd0 and \p Opnd1 as operands. Also, this method is called
7879 /// before replacement of such SDNode with ADDSUB operation. Thus the number
7880 /// of \p Opnd0 uses is expected to be equal to 2.
7881 /// For example, this function may be called for the following IR:
7882 ///    %AB = fmul fast <2 x double> %A, %B
7883 ///    %Sub = fsub fast <2 x double> %AB, %C
7884 ///    %Add = fadd fast <2 x double> %AB, %C
7885 ///    %Addsub = shufflevector <2 x double> %Sub, <2 x double> %Add,
7886 ///                            <2 x i32> <i32 0, i32 3>
7887 /// There is a def for %Addsub here, which potentially can be replaced by
7888 /// X86ISD::ADDSUB operation:
7889 ///    %Addsub = X86ISD::ADDSUB %AB, %C
7890 /// and such ADDSUB can further be replaced with FMADDSUB:
7891 ///    %Addsub = FMADDSUB %A, %B, %C.
7892 ///
7893 /// The main reason why this method is called before the replacement of the
7894 /// recognized ADDSUB idiom with ADDSUB operation is that such replacement
7895 /// is illegal sometimes. E.g. 512-bit ADDSUB is not available, while 512-bit
7896 /// FMADDSUB is.
isFMAddSubOrFMSubAdd(const X86Subtarget & Subtarget,SelectionDAG & DAG,SDValue & Opnd0,SDValue & Opnd1,SDValue & Opnd2,unsigned ExpectedUses)7897 static bool isFMAddSubOrFMSubAdd(const X86Subtarget &Subtarget,
7898                                  SelectionDAG &DAG,
7899                                  SDValue &Opnd0, SDValue &Opnd1, SDValue &Opnd2,
7900                                  unsigned ExpectedUses) {
7901   if (Opnd0.getOpcode() != ISD::FMUL ||
7902       !Opnd0->hasNUsesOfValue(ExpectedUses, 0) || !Subtarget.hasAnyFMA())
7903     return false;
7904 
7905   // FIXME: These checks must match the similar ones in
7906   // DAGCombiner::visitFADDForFMACombine. It would be good to have one
7907   // function that would answer if it is Ok to fuse MUL + ADD to FMADD
7908   // or MUL + ADDSUB to FMADDSUB.
7909   const TargetOptions &Options = DAG.getTarget().Options;
7910   bool AllowFusion =
7911       (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath);
7912   if (!AllowFusion)
7913     return false;
7914 
7915   Opnd2 = Opnd1;
7916   Opnd1 = Opnd0.getOperand(1);
7917   Opnd0 = Opnd0.getOperand(0);
7918 
7919   return true;
7920 }
7921 
7922 /// Try to fold a build_vector that performs an 'addsub' or 'fmaddsub' or
7923 /// 'fsubadd' operation accordingly to X86ISD::ADDSUB or X86ISD::FMADDSUB or
7924 /// X86ISD::FMSUBADD node.
lowerToAddSubOrFMAddSub(const BuildVectorSDNode * BV,const X86Subtarget & Subtarget,SelectionDAG & DAG)7925 static SDValue lowerToAddSubOrFMAddSub(const BuildVectorSDNode *BV,
7926                                        const X86Subtarget &Subtarget,
7927                                        SelectionDAG &DAG) {
7928   SDValue Opnd0, Opnd1;
7929   unsigned NumExtracts;
7930   bool IsSubAdd;
7931   if (!isAddSubOrSubAdd(BV, Subtarget, DAG, Opnd0, Opnd1, NumExtracts,
7932                         IsSubAdd))
7933     return SDValue();
7934 
7935   MVT VT = BV->getSimpleValueType(0);
7936   SDLoc DL(BV);
7937 
7938   // Try to generate X86ISD::FMADDSUB node here.
7939   SDValue Opnd2;
7940   if (isFMAddSubOrFMSubAdd(Subtarget, DAG, Opnd0, Opnd1, Opnd2, NumExtracts)) {
7941     unsigned Opc = IsSubAdd ? X86ISD::FMSUBADD : X86ISD::FMADDSUB;
7942     return DAG.getNode(Opc, DL, VT, Opnd0, Opnd1, Opnd2);
7943   }
7944 
7945   // We only support ADDSUB.
7946   if (IsSubAdd)
7947     return SDValue();
7948 
7949   // There are no known X86 targets with 512-bit ADDSUB instructions!
7950   // Convert to blend(fsub,fadd).
7951   if (VT.is512BitVector()) {
7952     SmallVector<int> Mask;
7953     for (int I = 0, E = VT.getVectorNumElements(); I != E; I += 2) {
7954         Mask.push_back(I);
7955         Mask.push_back(I + E + 1);
7956     }
7957     SDValue Sub = DAG.getNode(ISD::FSUB, DL, VT, Opnd0, Opnd1);
7958     SDValue Add = DAG.getNode(ISD::FADD, DL, VT, Opnd0, Opnd1);
7959     return DAG.getVectorShuffle(VT, DL, Sub, Add, Mask);
7960   }
7961 
7962   return DAG.getNode(X86ISD::ADDSUB, DL, VT, Opnd0, Opnd1);
7963 }
7964 
isHopBuildVector(const BuildVectorSDNode * BV,SelectionDAG & DAG,unsigned & HOpcode,SDValue & V0,SDValue & V1)7965 static bool isHopBuildVector(const BuildVectorSDNode *BV, SelectionDAG &DAG,
7966                              unsigned &HOpcode, SDValue &V0, SDValue &V1) {
7967   // Initialize outputs to known values.
7968   MVT VT = BV->getSimpleValueType(0);
7969   HOpcode = ISD::DELETED_NODE;
7970   V0 = DAG.getUNDEF(VT);
7971   V1 = DAG.getUNDEF(VT);
7972 
7973   // x86 256-bit horizontal ops are defined in a non-obvious way. Each 128-bit
7974   // half of the result is calculated independently from the 128-bit halves of
7975   // the inputs, so that makes the index-checking logic below more complicated.
7976   unsigned NumElts = VT.getVectorNumElements();
7977   unsigned GenericOpcode = ISD::DELETED_NODE;
7978   unsigned Num128BitChunks = VT.is256BitVector() ? 2 : 1;
7979   unsigned NumEltsIn128Bits = NumElts / Num128BitChunks;
7980   unsigned NumEltsIn64Bits = NumEltsIn128Bits / 2;
7981   for (unsigned i = 0; i != Num128BitChunks; ++i) {
7982     for (unsigned j = 0; j != NumEltsIn128Bits; ++j) {
7983       // Ignore undef elements.
7984       SDValue Op = BV->getOperand(i * NumEltsIn128Bits + j);
7985       if (Op.isUndef())
7986         continue;
7987 
7988       // If there's an opcode mismatch, we're done.
7989       if (HOpcode != ISD::DELETED_NODE && Op.getOpcode() != GenericOpcode)
7990         return false;
7991 
7992       // Initialize horizontal opcode.
7993       if (HOpcode == ISD::DELETED_NODE) {
7994         GenericOpcode = Op.getOpcode();
7995         switch (GenericOpcode) {
7996         case ISD::ADD: HOpcode = X86ISD::HADD; break;
7997         case ISD::SUB: HOpcode = X86ISD::HSUB; break;
7998         case ISD::FADD: HOpcode = X86ISD::FHADD; break;
7999         case ISD::FSUB: HOpcode = X86ISD::FHSUB; break;
8000         default: return false;
8001         }
8002       }
8003 
8004       SDValue Op0 = Op.getOperand(0);
8005       SDValue Op1 = Op.getOperand(1);
8006       if (Op0.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
8007           Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
8008           Op0.getOperand(0) != Op1.getOperand(0) ||
8009           !isa<ConstantSDNode>(Op0.getOperand(1)) ||
8010           !isa<ConstantSDNode>(Op1.getOperand(1)) || !Op.hasOneUse())
8011         return false;
8012 
8013       // The source vector is chosen based on which 64-bit half of the
8014       // destination vector is being calculated.
8015       if (j < NumEltsIn64Bits) {
8016         if (V0.isUndef())
8017           V0 = Op0.getOperand(0);
8018       } else {
8019         if (V1.isUndef())
8020           V1 = Op0.getOperand(0);
8021       }
8022 
8023       SDValue SourceVec = (j < NumEltsIn64Bits) ? V0 : V1;
8024       if (SourceVec != Op0.getOperand(0))
8025         return false;
8026 
8027       // op (extract_vector_elt A, I), (extract_vector_elt A, I+1)
8028       unsigned ExtIndex0 = Op0.getConstantOperandVal(1);
8029       unsigned ExtIndex1 = Op1.getConstantOperandVal(1);
8030       unsigned ExpectedIndex = i * NumEltsIn128Bits +
8031                                (j % NumEltsIn64Bits) * 2;
8032       if (ExpectedIndex == ExtIndex0 && ExtIndex1 == ExtIndex0 + 1)
8033         continue;
8034 
8035       // If this is not a commutative op, this does not match.
8036       if (GenericOpcode != ISD::ADD && GenericOpcode != ISD::FADD)
8037         return false;
8038 
8039       // Addition is commutative, so try swapping the extract indexes.
8040       // op (extract_vector_elt A, I+1), (extract_vector_elt A, I)
8041       if (ExpectedIndex == ExtIndex1 && ExtIndex0 == ExtIndex1 + 1)
8042         continue;
8043 
8044       // Extract indexes do not match horizontal requirement.
8045       return false;
8046     }
8047   }
8048   // We matched. Opcode and operands are returned by reference as arguments.
8049   return true;
8050 }
8051 
getHopForBuildVector(const BuildVectorSDNode * BV,SelectionDAG & DAG,unsigned HOpcode,SDValue V0,SDValue V1)8052 static SDValue getHopForBuildVector(const BuildVectorSDNode *BV,
8053                                     SelectionDAG &DAG, unsigned HOpcode,
8054                                     SDValue V0, SDValue V1) {
8055   // If either input vector is not the same size as the build vector,
8056   // extract/insert the low bits to the correct size.
8057   // This is free (examples: zmm --> xmm, xmm --> ymm).
8058   MVT VT = BV->getSimpleValueType(0);
8059   unsigned Width = VT.getSizeInBits();
8060   if (V0.getValueSizeInBits() > Width)
8061     V0 = extractSubVector(V0, 0, DAG, SDLoc(BV), Width);
8062   else if (V0.getValueSizeInBits() < Width)
8063     V0 = insertSubVector(DAG.getUNDEF(VT), V0, 0, DAG, SDLoc(BV), Width);
8064 
8065   if (V1.getValueSizeInBits() > Width)
8066     V1 = extractSubVector(V1, 0, DAG, SDLoc(BV), Width);
8067   else if (V1.getValueSizeInBits() < Width)
8068     V1 = insertSubVector(DAG.getUNDEF(VT), V1, 0, DAG, SDLoc(BV), Width);
8069 
8070   unsigned NumElts = VT.getVectorNumElements();
8071   APInt DemandedElts = APInt::getAllOnes(NumElts);
8072   for (unsigned i = 0; i != NumElts; ++i)
8073     if (BV->getOperand(i).isUndef())
8074       DemandedElts.clearBit(i);
8075 
8076   // If we don't need the upper xmm, then perform as a xmm hop.
8077   unsigned HalfNumElts = NumElts / 2;
8078   if (VT.is256BitVector() && DemandedElts.lshr(HalfNumElts) == 0) {
8079     MVT HalfVT = VT.getHalfNumVectorElementsVT();
8080     V0 = extractSubVector(V0, 0, DAG, SDLoc(BV), 128);
8081     V1 = extractSubVector(V1, 0, DAG, SDLoc(BV), 128);
8082     SDValue Half = DAG.getNode(HOpcode, SDLoc(BV), HalfVT, V0, V1);
8083     return insertSubVector(DAG.getUNDEF(VT), Half, 0, DAG, SDLoc(BV), 256);
8084   }
8085 
8086   return DAG.getNode(HOpcode, SDLoc(BV), VT, V0, V1);
8087 }
8088 
8089 /// Lower BUILD_VECTOR to a horizontal add/sub operation if possible.
LowerToHorizontalOp(const BuildVectorSDNode * BV,const X86Subtarget & Subtarget,SelectionDAG & DAG)8090 static SDValue LowerToHorizontalOp(const BuildVectorSDNode *BV,
8091                                    const X86Subtarget &Subtarget,
8092                                    SelectionDAG &DAG) {
8093   // We need at least 2 non-undef elements to make this worthwhile by default.
8094   unsigned NumNonUndefs =
8095       count_if(BV->op_values(), [](SDValue V) { return !V.isUndef(); });
8096   if (NumNonUndefs < 2)
8097     return SDValue();
8098 
8099   // There are 4 sets of horizontal math operations distinguished by type:
8100   // int/FP at 128-bit/256-bit. Each type was introduced with a different
8101   // subtarget feature. Try to match those "native" patterns first.
8102   MVT VT = BV->getSimpleValueType(0);
8103   if (((VT == MVT::v4f32 || VT == MVT::v2f64) && Subtarget.hasSSE3()) ||
8104       ((VT == MVT::v8i16 || VT == MVT::v4i32) && Subtarget.hasSSSE3()) ||
8105       ((VT == MVT::v8f32 || VT == MVT::v4f64) && Subtarget.hasAVX()) ||
8106       ((VT == MVT::v16i16 || VT == MVT::v8i32) && Subtarget.hasAVX2())) {
8107     unsigned HOpcode;
8108     SDValue V0, V1;
8109     if (isHopBuildVector(BV, DAG, HOpcode, V0, V1))
8110       return getHopForBuildVector(BV, DAG, HOpcode, V0, V1);
8111   }
8112 
8113   // Try harder to match 256-bit ops by using extract/concat.
8114   if (!Subtarget.hasAVX() || !VT.is256BitVector())
8115     return SDValue();
8116 
8117   // Count the number of UNDEF operands in the build_vector in input.
8118   unsigned NumElts = VT.getVectorNumElements();
8119   unsigned Half = NumElts / 2;
8120   unsigned NumUndefsLO = 0;
8121   unsigned NumUndefsHI = 0;
8122   for (unsigned i = 0, e = Half; i != e; ++i)
8123     if (BV->getOperand(i)->isUndef())
8124       NumUndefsLO++;
8125 
8126   for (unsigned i = Half, e = NumElts; i != e; ++i)
8127     if (BV->getOperand(i)->isUndef())
8128       NumUndefsHI++;
8129 
8130   SDLoc DL(BV);
8131   SDValue InVec0, InVec1;
8132   if (VT == MVT::v8i32 || VT == MVT::v16i16) {
8133     SDValue InVec2, InVec3;
8134     unsigned X86Opcode;
8135     bool CanFold = true;
8136 
8137     if (isHorizontalBinOpPart(BV, ISD::ADD, DAG, 0, Half, InVec0, InVec1) &&
8138         isHorizontalBinOpPart(BV, ISD::ADD, DAG, Half, NumElts, InVec2,
8139                               InVec3) &&
8140         ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
8141         ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
8142       X86Opcode = X86ISD::HADD;
8143     else if (isHorizontalBinOpPart(BV, ISD::SUB, DAG, 0, Half, InVec0,
8144                                    InVec1) &&
8145              isHorizontalBinOpPart(BV, ISD::SUB, DAG, Half, NumElts, InVec2,
8146                                    InVec3) &&
8147              ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
8148              ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
8149       X86Opcode = X86ISD::HSUB;
8150     else
8151       CanFold = false;
8152 
8153     if (CanFold) {
8154       // Do not try to expand this build_vector into a pair of horizontal
8155       // add/sub if we can emit a pair of scalar add/sub.
8156       if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
8157         return SDValue();
8158 
8159       // Convert this build_vector into a pair of horizontal binops followed by
8160       // a concat vector. We must adjust the outputs from the partial horizontal
8161       // matching calls above to account for undefined vector halves.
8162       SDValue V0 = InVec0.isUndef() ? InVec2 : InVec0;
8163       SDValue V1 = InVec1.isUndef() ? InVec3 : InVec1;
8164       assert((!V0.isUndef() || !V1.isUndef()) && "Horizontal-op of undefs?");
8165       bool isUndefLO = NumUndefsLO == Half;
8166       bool isUndefHI = NumUndefsHI == Half;
8167       return ExpandHorizontalBinOp(V0, V1, DL, DAG, X86Opcode, false, isUndefLO,
8168                                    isUndefHI);
8169     }
8170   }
8171 
8172   if (VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v8i32 ||
8173       VT == MVT::v16i16) {
8174     unsigned X86Opcode;
8175     if (isHorizontalBinOpPart(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
8176       X86Opcode = X86ISD::HADD;
8177     else if (isHorizontalBinOpPart(BV, ISD::SUB, DAG, 0, NumElts, InVec0,
8178                                    InVec1))
8179       X86Opcode = X86ISD::HSUB;
8180     else if (isHorizontalBinOpPart(BV, ISD::FADD, DAG, 0, NumElts, InVec0,
8181                                    InVec1))
8182       X86Opcode = X86ISD::FHADD;
8183     else if (isHorizontalBinOpPart(BV, ISD::FSUB, DAG, 0, NumElts, InVec0,
8184                                    InVec1))
8185       X86Opcode = X86ISD::FHSUB;
8186     else
8187       return SDValue();
8188 
8189     // Don't try to expand this build_vector into a pair of horizontal add/sub
8190     // if we can simply emit a pair of scalar add/sub.
8191     if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
8192       return SDValue();
8193 
8194     // Convert this build_vector into two horizontal add/sub followed by
8195     // a concat vector.
8196     bool isUndefLO = NumUndefsLO == Half;
8197     bool isUndefHI = NumUndefsHI == Half;
8198     return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, true,
8199                                  isUndefLO, isUndefHI);
8200   }
8201 
8202   return SDValue();
8203 }
8204 
8205 static SDValue LowerShift(SDValue Op, const X86Subtarget &Subtarget,
8206                           SelectionDAG &DAG);
8207 
8208 /// If a BUILD_VECTOR's source elements all apply the same bit operation and
8209 /// one of their operands is constant, lower to a pair of BUILD_VECTOR and
8210 /// just apply the bit to the vectors.
8211 /// NOTE: Its not in our interest to start make a general purpose vectorizer
8212 /// from this, but enough scalar bit operations are created from the later
8213 /// legalization + scalarization stages to need basic support.
lowerBuildVectorToBitOp(BuildVectorSDNode * Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)8214 static SDValue lowerBuildVectorToBitOp(BuildVectorSDNode *Op,
8215                                        const X86Subtarget &Subtarget,
8216                                        SelectionDAG &DAG) {
8217   SDLoc DL(Op);
8218   MVT VT = Op->getSimpleValueType(0);
8219   unsigned NumElems = VT.getVectorNumElements();
8220   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8221 
8222   // Check that all elements have the same opcode.
8223   // TODO: Should we allow UNDEFS and if so how many?
8224   unsigned Opcode = Op->getOperand(0).getOpcode();
8225   for (unsigned i = 1; i < NumElems; ++i)
8226     if (Opcode != Op->getOperand(i).getOpcode())
8227       return SDValue();
8228 
8229   // TODO: We may be able to add support for other Ops (ADD/SUB + shifts).
8230   bool IsShift = false;
8231   switch (Opcode) {
8232   default:
8233     return SDValue();
8234   case ISD::SHL:
8235   case ISD::SRL:
8236   case ISD::SRA:
8237     IsShift = true;
8238     break;
8239   case ISD::AND:
8240   case ISD::XOR:
8241   case ISD::OR:
8242     // Don't do this if the buildvector is a splat - we'd replace one
8243     // constant with an entire vector.
8244     if (Op->getSplatValue())
8245       return SDValue();
8246     if (!TLI.isOperationLegalOrPromote(Opcode, VT))
8247       return SDValue();
8248     break;
8249   }
8250 
8251   SmallVector<SDValue, 4> LHSElts, RHSElts;
8252   for (SDValue Elt : Op->ops()) {
8253     SDValue LHS = Elt.getOperand(0);
8254     SDValue RHS = Elt.getOperand(1);
8255 
8256     // We expect the canonicalized RHS operand to be the constant.
8257     if (!isa<ConstantSDNode>(RHS))
8258       return SDValue();
8259 
8260     // Extend shift amounts.
8261     if (RHS.getValueSizeInBits() != VT.getScalarSizeInBits()) {
8262       if (!IsShift)
8263         return SDValue();
8264       RHS = DAG.getZExtOrTrunc(RHS, DL, VT.getScalarType());
8265     }
8266 
8267     LHSElts.push_back(LHS);
8268     RHSElts.push_back(RHS);
8269   }
8270 
8271   // Limit to shifts by uniform immediates.
8272   // TODO: Only accept vXi8/vXi64 special cases?
8273   // TODO: Permit non-uniform XOP/AVX2/MULLO cases?
8274   if (IsShift && any_of(RHSElts, [&](SDValue V) { return RHSElts[0] != V; }))
8275     return SDValue();
8276 
8277   SDValue LHS = DAG.getBuildVector(VT, DL, LHSElts);
8278   SDValue RHS = DAG.getBuildVector(VT, DL, RHSElts);
8279   SDValue Res = DAG.getNode(Opcode, DL, VT, LHS, RHS);
8280 
8281   if (!IsShift)
8282     return Res;
8283 
8284   // Immediately lower the shift to ensure the constant build vector doesn't
8285   // get converted to a constant pool before the shift is lowered.
8286   return LowerShift(Res, Subtarget, DAG);
8287 }
8288 
8289 /// Create a vector constant without a load. SSE/AVX provide the bare minimum
8290 /// functionality to do this, so it's all zeros, all ones, or some derivation
8291 /// that is cheap to calculate.
materializeVectorConstant(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)8292 static SDValue materializeVectorConstant(SDValue Op, SelectionDAG &DAG,
8293                                          const X86Subtarget &Subtarget) {
8294   SDLoc DL(Op);
8295   MVT VT = Op.getSimpleValueType();
8296 
8297   // Vectors containing all zeros can be matched by pxor and xorps.
8298   if (ISD::isBuildVectorAllZeros(Op.getNode()))
8299     return Op;
8300 
8301   // Vectors containing all ones can be matched by pcmpeqd on 128-bit width
8302   // vectors or broken into v4i32 operations on 256-bit vectors. AVX2 can use
8303   // vpcmpeqd on 256-bit vectors.
8304   if (Subtarget.hasSSE2() && ISD::isBuildVectorAllOnes(Op.getNode())) {
8305     if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32)
8306       return Op;
8307 
8308     return getOnesVector(VT, DAG, DL);
8309   }
8310 
8311   return SDValue();
8312 }
8313 
8314 /// Look for opportunities to create a VPERMV/VPERMILPV/PSHUFB variable permute
8315 /// from a vector of source values and a vector of extraction indices.
8316 /// The vectors might be manipulated to match the type of the permute op.
createVariablePermute(MVT VT,SDValue SrcVec,SDValue IndicesVec,SDLoc & DL,SelectionDAG & DAG,const X86Subtarget & Subtarget)8317 static SDValue createVariablePermute(MVT VT, SDValue SrcVec, SDValue IndicesVec,
8318                                      SDLoc &DL, SelectionDAG &DAG,
8319                                      const X86Subtarget &Subtarget) {
8320   MVT ShuffleVT = VT;
8321   EVT IndicesVT = EVT(VT).changeVectorElementTypeToInteger();
8322   unsigned NumElts = VT.getVectorNumElements();
8323   unsigned SizeInBits = VT.getSizeInBits();
8324 
8325   // Adjust IndicesVec to match VT size.
8326   assert(IndicesVec.getValueType().getVectorNumElements() >= NumElts &&
8327          "Illegal variable permute mask size");
8328   if (IndicesVec.getValueType().getVectorNumElements() > NumElts) {
8329     // Narrow/widen the indices vector to the correct size.
8330     if (IndicesVec.getValueSizeInBits() > SizeInBits)
8331       IndicesVec = extractSubVector(IndicesVec, 0, DAG, SDLoc(IndicesVec),
8332                                     NumElts * VT.getScalarSizeInBits());
8333     else if (IndicesVec.getValueSizeInBits() < SizeInBits)
8334       IndicesVec = widenSubVector(IndicesVec, false, Subtarget, DAG,
8335                                   SDLoc(IndicesVec), SizeInBits);
8336     // Zero-extend the index elements within the vector.
8337     if (IndicesVec.getValueType().getVectorNumElements() > NumElts)
8338       IndicesVec = DAG.getNode(ISD::ZERO_EXTEND_VECTOR_INREG, SDLoc(IndicesVec),
8339                                IndicesVT, IndicesVec);
8340   }
8341   IndicesVec = DAG.getZExtOrTrunc(IndicesVec, SDLoc(IndicesVec), IndicesVT);
8342 
8343   // Handle SrcVec that don't match VT type.
8344   if (SrcVec.getValueSizeInBits() != SizeInBits) {
8345     if ((SrcVec.getValueSizeInBits() % SizeInBits) == 0) {
8346       // Handle larger SrcVec by treating it as a larger permute.
8347       unsigned Scale = SrcVec.getValueSizeInBits() / SizeInBits;
8348       VT = MVT::getVectorVT(VT.getScalarType(), Scale * NumElts);
8349       IndicesVT = EVT(VT).changeVectorElementTypeToInteger();
8350       IndicesVec = widenSubVector(IndicesVT.getSimpleVT(), IndicesVec, false,
8351                                   Subtarget, DAG, SDLoc(IndicesVec));
8352       SDValue NewSrcVec =
8353           createVariablePermute(VT, SrcVec, IndicesVec, DL, DAG, Subtarget);
8354       if (NewSrcVec)
8355         return extractSubVector(NewSrcVec, 0, DAG, DL, SizeInBits);
8356       return SDValue();
8357     } else if (SrcVec.getValueSizeInBits() < SizeInBits) {
8358       // Widen smaller SrcVec to match VT.
8359       SrcVec = widenSubVector(VT, SrcVec, false, Subtarget, DAG, SDLoc(SrcVec));
8360     } else
8361       return SDValue();
8362   }
8363 
8364   auto ScaleIndices = [&DAG](SDValue Idx, uint64_t Scale) {
8365     assert(isPowerOf2_64(Scale) && "Illegal variable permute shuffle scale");
8366     EVT SrcVT = Idx.getValueType();
8367     unsigned NumDstBits = SrcVT.getScalarSizeInBits() / Scale;
8368     uint64_t IndexScale = 0;
8369     uint64_t IndexOffset = 0;
8370 
8371     // If we're scaling a smaller permute op, then we need to repeat the
8372     // indices, scaling and offsetting them as well.
8373     // e.g. v4i32 -> v16i8 (Scale = 4)
8374     // IndexScale = v4i32 Splat(4 << 24 | 4 << 16 | 4 << 8 | 4)
8375     // IndexOffset = v4i32 Splat(3 << 24 | 2 << 16 | 1 << 8 | 0)
8376     for (uint64_t i = 0; i != Scale; ++i) {
8377       IndexScale |= Scale << (i * NumDstBits);
8378       IndexOffset |= i << (i * NumDstBits);
8379     }
8380 
8381     Idx = DAG.getNode(ISD::MUL, SDLoc(Idx), SrcVT, Idx,
8382                       DAG.getConstant(IndexScale, SDLoc(Idx), SrcVT));
8383     Idx = DAG.getNode(ISD::ADD, SDLoc(Idx), SrcVT, Idx,
8384                       DAG.getConstant(IndexOffset, SDLoc(Idx), SrcVT));
8385     return Idx;
8386   };
8387 
8388   unsigned Opcode = 0;
8389   switch (VT.SimpleTy) {
8390   default:
8391     break;
8392   case MVT::v16i8:
8393     if (Subtarget.hasSSSE3())
8394       Opcode = X86ISD::PSHUFB;
8395     break;
8396   case MVT::v8i16:
8397     if (Subtarget.hasVLX() && Subtarget.hasBWI())
8398       Opcode = X86ISD::VPERMV;
8399     else if (Subtarget.hasSSSE3()) {
8400       Opcode = X86ISD::PSHUFB;
8401       ShuffleVT = MVT::v16i8;
8402     }
8403     break;
8404   case MVT::v4f32:
8405   case MVT::v4i32:
8406     if (Subtarget.hasAVX()) {
8407       Opcode = X86ISD::VPERMILPV;
8408       ShuffleVT = MVT::v4f32;
8409     } else if (Subtarget.hasSSSE3()) {
8410       Opcode = X86ISD::PSHUFB;
8411       ShuffleVT = MVT::v16i8;
8412     }
8413     break;
8414   case MVT::v2f64:
8415   case MVT::v2i64:
8416     if (Subtarget.hasAVX()) {
8417       // VPERMILPD selects using bit#1 of the index vector, so scale IndicesVec.
8418       IndicesVec = DAG.getNode(ISD::ADD, DL, IndicesVT, IndicesVec, IndicesVec);
8419       Opcode = X86ISD::VPERMILPV;
8420       ShuffleVT = MVT::v2f64;
8421     } else if (Subtarget.hasSSE41()) {
8422       // SSE41 can compare v2i64 - select between indices 0 and 1.
8423       return DAG.getSelectCC(
8424           DL, IndicesVec,
8425           getZeroVector(IndicesVT.getSimpleVT(), Subtarget, DAG, DL),
8426           DAG.getVectorShuffle(VT, DL, SrcVec, SrcVec, {0, 0}),
8427           DAG.getVectorShuffle(VT, DL, SrcVec, SrcVec, {1, 1}),
8428           ISD::CondCode::SETEQ);
8429     }
8430     break;
8431   case MVT::v32i8:
8432     if (Subtarget.hasVLX() && Subtarget.hasVBMI())
8433       Opcode = X86ISD::VPERMV;
8434     else if (Subtarget.hasXOP()) {
8435       SDValue LoSrc = extract128BitVector(SrcVec, 0, DAG, DL);
8436       SDValue HiSrc = extract128BitVector(SrcVec, 16, DAG, DL);
8437       SDValue LoIdx = extract128BitVector(IndicesVec, 0, DAG, DL);
8438       SDValue HiIdx = extract128BitVector(IndicesVec, 16, DAG, DL);
8439       return DAG.getNode(
8440           ISD::CONCAT_VECTORS, DL, VT,
8441           DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, LoSrc, HiSrc, LoIdx),
8442           DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, LoSrc, HiSrc, HiIdx));
8443     } else if (Subtarget.hasAVX()) {
8444       SDValue Lo = extract128BitVector(SrcVec, 0, DAG, DL);
8445       SDValue Hi = extract128BitVector(SrcVec, 16, DAG, DL);
8446       SDValue LoLo = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Lo);
8447       SDValue HiHi = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Hi, Hi);
8448       auto PSHUFBBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
8449                               ArrayRef<SDValue> Ops) {
8450         // Permute Lo and Hi and then select based on index range.
8451         // This works as SHUFB uses bits[3:0] to permute elements and we don't
8452         // care about the bit[7] as its just an index vector.
8453         SDValue Idx = Ops[2];
8454         EVT VT = Idx.getValueType();
8455         return DAG.getSelectCC(DL, Idx, DAG.getConstant(15, DL, VT),
8456                                DAG.getNode(X86ISD::PSHUFB, DL, VT, Ops[1], Idx),
8457                                DAG.getNode(X86ISD::PSHUFB, DL, VT, Ops[0], Idx),
8458                                ISD::CondCode::SETGT);
8459       };
8460       SDValue Ops[] = {LoLo, HiHi, IndicesVec};
8461       return SplitOpsAndApply(DAG, Subtarget, DL, MVT::v32i8, Ops,
8462                               PSHUFBBuilder);
8463     }
8464     break;
8465   case MVT::v16i16:
8466     if (Subtarget.hasVLX() && Subtarget.hasBWI())
8467       Opcode = X86ISD::VPERMV;
8468     else if (Subtarget.hasAVX()) {
8469       // Scale to v32i8 and perform as v32i8.
8470       IndicesVec = ScaleIndices(IndicesVec, 2);
8471       return DAG.getBitcast(
8472           VT, createVariablePermute(
8473                   MVT::v32i8, DAG.getBitcast(MVT::v32i8, SrcVec),
8474                   DAG.getBitcast(MVT::v32i8, IndicesVec), DL, DAG, Subtarget));
8475     }
8476     break;
8477   case MVT::v8f32:
8478   case MVT::v8i32:
8479     if (Subtarget.hasAVX2())
8480       Opcode = X86ISD::VPERMV;
8481     else if (Subtarget.hasAVX()) {
8482       SrcVec = DAG.getBitcast(MVT::v8f32, SrcVec);
8483       SDValue LoLo = DAG.getVectorShuffle(MVT::v8f32, DL, SrcVec, SrcVec,
8484                                           {0, 1, 2, 3, 0, 1, 2, 3});
8485       SDValue HiHi = DAG.getVectorShuffle(MVT::v8f32, DL, SrcVec, SrcVec,
8486                                           {4, 5, 6, 7, 4, 5, 6, 7});
8487       if (Subtarget.hasXOP())
8488         return DAG.getBitcast(
8489             VT, DAG.getNode(X86ISD::VPERMIL2, DL, MVT::v8f32, LoLo, HiHi,
8490                             IndicesVec, DAG.getTargetConstant(0, DL, MVT::i8)));
8491       // Permute Lo and Hi and then select based on index range.
8492       // This works as VPERMILPS only uses index bits[0:1] to permute elements.
8493       SDValue Res = DAG.getSelectCC(
8494           DL, IndicesVec, DAG.getConstant(3, DL, MVT::v8i32),
8495           DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, HiHi, IndicesVec),
8496           DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, LoLo, IndicesVec),
8497           ISD::CondCode::SETGT);
8498       return DAG.getBitcast(VT, Res);
8499     }
8500     break;
8501   case MVT::v4i64:
8502   case MVT::v4f64:
8503     if (Subtarget.hasAVX512()) {
8504       if (!Subtarget.hasVLX()) {
8505         MVT WidenSrcVT = MVT::getVectorVT(VT.getScalarType(), 8);
8506         SrcVec = widenSubVector(WidenSrcVT, SrcVec, false, Subtarget, DAG,
8507                                 SDLoc(SrcVec));
8508         IndicesVec = widenSubVector(MVT::v8i64, IndicesVec, false, Subtarget,
8509                                     DAG, SDLoc(IndicesVec));
8510         SDValue Res = createVariablePermute(WidenSrcVT, SrcVec, IndicesVec, DL,
8511                                             DAG, Subtarget);
8512         return extract256BitVector(Res, 0, DAG, DL);
8513       }
8514       Opcode = X86ISD::VPERMV;
8515     } else if (Subtarget.hasAVX()) {
8516       SrcVec = DAG.getBitcast(MVT::v4f64, SrcVec);
8517       SDValue LoLo =
8518           DAG.getVectorShuffle(MVT::v4f64, DL, SrcVec, SrcVec, {0, 1, 0, 1});
8519       SDValue HiHi =
8520           DAG.getVectorShuffle(MVT::v4f64, DL, SrcVec, SrcVec, {2, 3, 2, 3});
8521       // VPERMIL2PD selects with bit#1 of the index vector, so scale IndicesVec.
8522       IndicesVec = DAG.getNode(ISD::ADD, DL, IndicesVT, IndicesVec, IndicesVec);
8523       if (Subtarget.hasXOP())
8524         return DAG.getBitcast(
8525             VT, DAG.getNode(X86ISD::VPERMIL2, DL, MVT::v4f64, LoLo, HiHi,
8526                             IndicesVec, DAG.getTargetConstant(0, DL, MVT::i8)));
8527       // Permute Lo and Hi and then select based on index range.
8528       // This works as VPERMILPD only uses index bit[1] to permute elements.
8529       SDValue Res = DAG.getSelectCC(
8530           DL, IndicesVec, DAG.getConstant(2, DL, MVT::v4i64),
8531           DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v4f64, HiHi, IndicesVec),
8532           DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v4f64, LoLo, IndicesVec),
8533           ISD::CondCode::SETGT);
8534       return DAG.getBitcast(VT, Res);
8535     }
8536     break;
8537   case MVT::v64i8:
8538     if (Subtarget.hasVBMI())
8539       Opcode = X86ISD::VPERMV;
8540     break;
8541   case MVT::v32i16:
8542     if (Subtarget.hasBWI())
8543       Opcode = X86ISD::VPERMV;
8544     break;
8545   case MVT::v16f32:
8546   case MVT::v16i32:
8547   case MVT::v8f64:
8548   case MVT::v8i64:
8549     if (Subtarget.hasAVX512())
8550       Opcode = X86ISD::VPERMV;
8551     break;
8552   }
8553   if (!Opcode)
8554     return SDValue();
8555 
8556   assert((VT.getSizeInBits() == ShuffleVT.getSizeInBits()) &&
8557          (VT.getScalarSizeInBits() % ShuffleVT.getScalarSizeInBits()) == 0 &&
8558          "Illegal variable permute shuffle type");
8559 
8560   uint64_t Scale = VT.getScalarSizeInBits() / ShuffleVT.getScalarSizeInBits();
8561   if (Scale > 1)
8562     IndicesVec = ScaleIndices(IndicesVec, Scale);
8563 
8564   EVT ShuffleIdxVT = EVT(ShuffleVT).changeVectorElementTypeToInteger();
8565   IndicesVec = DAG.getBitcast(ShuffleIdxVT, IndicesVec);
8566 
8567   SrcVec = DAG.getBitcast(ShuffleVT, SrcVec);
8568   SDValue Res = Opcode == X86ISD::VPERMV
8569                     ? DAG.getNode(Opcode, DL, ShuffleVT, IndicesVec, SrcVec)
8570                     : DAG.getNode(Opcode, DL, ShuffleVT, SrcVec, IndicesVec);
8571   return DAG.getBitcast(VT, Res);
8572 }
8573 
8574 // Tries to lower a BUILD_VECTOR composed of extract-extract chains that can be
8575 // reasoned to be a permutation of a vector by indices in a non-constant vector.
8576 // (build_vector (extract_elt V, (extract_elt I, 0)),
8577 //               (extract_elt V, (extract_elt I, 1)),
8578 //                    ...
8579 // ->
8580 // (vpermv I, V)
8581 //
8582 // TODO: Handle undefs
8583 // TODO: Utilize pshufb and zero mask blending to support more efficient
8584 // construction of vectors with constant-0 elements.
8585 static SDValue
LowerBUILD_VECTORAsVariablePermute(SDValue V,SelectionDAG & DAG,const X86Subtarget & Subtarget)8586 LowerBUILD_VECTORAsVariablePermute(SDValue V, SelectionDAG &DAG,
8587                                    const X86Subtarget &Subtarget) {
8588   SDValue SrcVec, IndicesVec;
8589   // Check for a match of the permute source vector and permute index elements.
8590   // This is done by checking that the i-th build_vector operand is of the form:
8591   // (extract_elt SrcVec, (extract_elt IndicesVec, i)).
8592   for (unsigned Idx = 0, E = V.getNumOperands(); Idx != E; ++Idx) {
8593     SDValue Op = V.getOperand(Idx);
8594     if (Op.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
8595       return SDValue();
8596 
8597     // If this is the first extract encountered in V, set the source vector,
8598     // otherwise verify the extract is from the previously defined source
8599     // vector.
8600     if (!SrcVec)
8601       SrcVec = Op.getOperand(0);
8602     else if (SrcVec != Op.getOperand(0))
8603       return SDValue();
8604     SDValue ExtractedIndex = Op->getOperand(1);
8605     // Peek through extends.
8606     if (ExtractedIndex.getOpcode() == ISD::ZERO_EXTEND ||
8607         ExtractedIndex.getOpcode() == ISD::SIGN_EXTEND)
8608       ExtractedIndex = ExtractedIndex.getOperand(0);
8609     if (ExtractedIndex.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
8610       return SDValue();
8611 
8612     // If this is the first extract from the index vector candidate, set the
8613     // indices vector, otherwise verify the extract is from the previously
8614     // defined indices vector.
8615     if (!IndicesVec)
8616       IndicesVec = ExtractedIndex.getOperand(0);
8617     else if (IndicesVec != ExtractedIndex.getOperand(0))
8618       return SDValue();
8619 
8620     auto *PermIdx = dyn_cast<ConstantSDNode>(ExtractedIndex.getOperand(1));
8621     if (!PermIdx || PermIdx->getAPIntValue() != Idx)
8622       return SDValue();
8623   }
8624 
8625   SDLoc DL(V);
8626   MVT VT = V.getSimpleValueType();
8627   return createVariablePermute(VT, SrcVec, IndicesVec, DL, DAG, Subtarget);
8628 }
8629 
8630 SDValue
LowerBUILD_VECTOR(SDValue Op,SelectionDAG & DAG) const8631 X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
8632   SDLoc dl(Op);
8633 
8634   MVT VT = Op.getSimpleValueType();
8635   MVT EltVT = VT.getVectorElementType();
8636   MVT OpEltVT = Op.getOperand(0).getSimpleValueType();
8637   unsigned NumElems = Op.getNumOperands();
8638 
8639   // Generate vectors for predicate vectors.
8640   if (VT.getVectorElementType() == MVT::i1 && Subtarget.hasAVX512())
8641     return LowerBUILD_VECTORvXi1(Op, DAG, Subtarget);
8642 
8643   if (VT.getVectorElementType() == MVT::bf16 &&
8644       (Subtarget.hasAVXNECONVERT() || Subtarget.hasBF16()))
8645     return LowerBUILD_VECTORvXbf16(Op, DAG, Subtarget);
8646 
8647   if (SDValue VectorConstant = materializeVectorConstant(Op, DAG, Subtarget))
8648     return VectorConstant;
8649 
8650   unsigned EVTBits = EltVT.getSizeInBits();
8651   APInt UndefMask = APInt::getZero(NumElems);
8652   APInt FrozenUndefMask = APInt::getZero(NumElems);
8653   APInt ZeroMask = APInt::getZero(NumElems);
8654   APInt NonZeroMask = APInt::getZero(NumElems);
8655   bool IsAllConstants = true;
8656   bool OneUseFrozenUndefs = true;
8657   SmallSet<SDValue, 8> Values;
8658   unsigned NumConstants = NumElems;
8659   for (unsigned i = 0; i < NumElems; ++i) {
8660     SDValue Elt = Op.getOperand(i);
8661     if (Elt.isUndef()) {
8662       UndefMask.setBit(i);
8663       continue;
8664     }
8665     if (ISD::isFreezeUndef(Elt.getNode())) {
8666       OneUseFrozenUndefs = OneUseFrozenUndefs && Elt->hasOneUse();
8667       FrozenUndefMask.setBit(i);
8668       continue;
8669     }
8670     Values.insert(Elt);
8671     if (!isIntOrFPConstant(Elt)) {
8672       IsAllConstants = false;
8673       NumConstants--;
8674     }
8675     if (X86::isZeroNode(Elt)) {
8676       ZeroMask.setBit(i);
8677     } else {
8678       NonZeroMask.setBit(i);
8679     }
8680   }
8681 
8682   // All undef vector. Return an UNDEF.
8683   if (UndefMask.isAllOnes())
8684     return DAG.getUNDEF(VT);
8685 
8686   // All undef/freeze(undef) vector. Return a FREEZE UNDEF.
8687   if (OneUseFrozenUndefs && (UndefMask | FrozenUndefMask).isAllOnes())
8688     return DAG.getFreeze(DAG.getUNDEF(VT));
8689 
8690   // All undef/freeze(undef)/zero vector. Return a zero vector.
8691   if ((UndefMask | FrozenUndefMask | ZeroMask).isAllOnes())
8692     return getZeroVector(VT, Subtarget, DAG, dl);
8693 
8694   // If we have multiple FREEZE-UNDEF operands, we are likely going to end up
8695   // lowering into a suboptimal insertion sequence. Instead, thaw the UNDEF in
8696   // our source BUILD_VECTOR, create another FREEZE-UNDEF splat BUILD_VECTOR,
8697   // and blend the FREEZE-UNDEF operands back in.
8698   // FIXME: is this worthwhile even for a single FREEZE-UNDEF operand?
8699   if (unsigned NumFrozenUndefElts = FrozenUndefMask.popcount();
8700       NumFrozenUndefElts >= 2 && NumFrozenUndefElts < NumElems) {
8701     SmallVector<int, 16> BlendMask(NumElems, -1);
8702     SmallVector<SDValue, 16> Elts(NumElems, DAG.getUNDEF(OpEltVT));
8703     for (unsigned i = 0; i < NumElems; ++i) {
8704       if (UndefMask[i]) {
8705         BlendMask[i] = -1;
8706         continue;
8707       }
8708       BlendMask[i] = i;
8709       if (!FrozenUndefMask[i])
8710         Elts[i] = Op.getOperand(i);
8711       else
8712         BlendMask[i] += NumElems;
8713     }
8714     SDValue EltsBV = DAG.getBuildVector(VT, dl, Elts);
8715     SDValue FrozenUndefElt = DAG.getFreeze(DAG.getUNDEF(OpEltVT));
8716     SDValue FrozenUndefBV = DAG.getSplatBuildVector(VT, dl, FrozenUndefElt);
8717     return DAG.getVectorShuffle(VT, dl, EltsBV, FrozenUndefBV, BlendMask);
8718   }
8719 
8720   BuildVectorSDNode *BV = cast<BuildVectorSDNode>(Op.getNode());
8721 
8722   // If the upper elts of a ymm/zmm are undef/freeze(undef)/zero then we might
8723   // be better off lowering to a smaller build vector and padding with
8724   // undef/zero.
8725   if ((VT.is256BitVector() || VT.is512BitVector()) &&
8726       !isFoldableUseOfShuffle(BV)) {
8727     unsigned UpperElems = NumElems / 2;
8728     APInt UndefOrZeroMask = FrozenUndefMask | UndefMask | ZeroMask;
8729     unsigned NumUpperUndefsOrZeros = UndefOrZeroMask.countl_one();
8730     if (NumUpperUndefsOrZeros >= UpperElems) {
8731       if (VT.is512BitVector() &&
8732           NumUpperUndefsOrZeros >= (NumElems - (NumElems / 4)))
8733         UpperElems = NumElems - (NumElems / 4);
8734       // If freeze(undef) is in any upper elements, force to zero.
8735       bool UndefUpper = UndefMask.countl_one() >= UpperElems;
8736       MVT LowerVT = MVT::getVectorVT(EltVT, NumElems - UpperElems);
8737       SDValue NewBV =
8738           DAG.getBuildVector(LowerVT, dl, Op->ops().drop_back(UpperElems));
8739       return widenSubVector(VT, NewBV, !UndefUpper, Subtarget, DAG, dl);
8740     }
8741   }
8742 
8743   if (SDValue AddSub = lowerToAddSubOrFMAddSub(BV, Subtarget, DAG))
8744     return AddSub;
8745   if (SDValue HorizontalOp = LowerToHorizontalOp(BV, Subtarget, DAG))
8746     return HorizontalOp;
8747   if (SDValue Broadcast = lowerBuildVectorAsBroadcast(BV, Subtarget, DAG))
8748     return Broadcast;
8749   if (SDValue BitOp = lowerBuildVectorToBitOp(BV, Subtarget, DAG))
8750     return BitOp;
8751 
8752   unsigned NumZero = ZeroMask.popcount();
8753   unsigned NumNonZero = NonZeroMask.popcount();
8754 
8755   // If we are inserting one variable into a vector of non-zero constants, try
8756   // to avoid loading each constant element as a scalar. Load the constants as a
8757   // vector and then insert the variable scalar element. If insertion is not
8758   // supported, fall back to a shuffle to get the scalar blended with the
8759   // constants. Insertion into a zero vector is handled as a special-case
8760   // somewhere below here.
8761   if (NumConstants == NumElems - 1 && NumNonZero != 1 &&
8762       FrozenUndefMask.isZero() &&
8763       (isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT) ||
8764        isOperationLegalOrCustom(ISD::VECTOR_SHUFFLE, VT))) {
8765     // Create an all-constant vector. The variable element in the old
8766     // build vector is replaced by undef in the constant vector. Save the
8767     // variable scalar element and its index for use in the insertelement.
8768     LLVMContext &Context = *DAG.getContext();
8769     Type *EltType = Op.getValueType().getScalarType().getTypeForEVT(Context);
8770     SmallVector<Constant *, 16> ConstVecOps(NumElems, UndefValue::get(EltType));
8771     SDValue VarElt;
8772     SDValue InsIndex;
8773     for (unsigned i = 0; i != NumElems; ++i) {
8774       SDValue Elt = Op.getOperand(i);
8775       if (auto *C = dyn_cast<ConstantSDNode>(Elt))
8776         ConstVecOps[i] = ConstantInt::get(Context, C->getAPIntValue());
8777       else if (auto *C = dyn_cast<ConstantFPSDNode>(Elt))
8778         ConstVecOps[i] = ConstantFP::get(Context, C->getValueAPF());
8779       else if (!Elt.isUndef()) {
8780         assert(!VarElt.getNode() && !InsIndex.getNode() &&
8781                "Expected one variable element in this vector");
8782         VarElt = Elt;
8783         InsIndex = DAG.getVectorIdxConstant(i, dl);
8784       }
8785     }
8786     Constant *CV = ConstantVector::get(ConstVecOps);
8787     SDValue DAGConstVec = DAG.getConstantPool(CV, VT);
8788 
8789     // The constants we just created may not be legal (eg, floating point). We
8790     // must lower the vector right here because we can not guarantee that we'll
8791     // legalize it before loading it. This is also why we could not just create
8792     // a new build vector here. If the build vector contains illegal constants,
8793     // it could get split back up into a series of insert elements.
8794     // TODO: Improve this by using shorter loads with broadcast/VZEXT_LOAD.
8795     SDValue LegalDAGConstVec = LowerConstantPool(DAGConstVec, DAG);
8796     MachineFunction &MF = DAG.getMachineFunction();
8797     MachinePointerInfo MPI = MachinePointerInfo::getConstantPool(MF);
8798     SDValue Ld = DAG.getLoad(VT, dl, DAG.getEntryNode(), LegalDAGConstVec, MPI);
8799     unsigned InsertC = InsIndex->getAsZExtVal();
8800     unsigned NumEltsInLow128Bits = 128 / VT.getScalarSizeInBits();
8801     if (InsertC < NumEltsInLow128Bits)
8802       return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Ld, VarElt, InsIndex);
8803 
8804     // There's no good way to insert into the high elements of a >128-bit
8805     // vector, so use shuffles to avoid an extract/insert sequence.
8806     assert(VT.getSizeInBits() > 128 && "Invalid insertion index?");
8807     assert(Subtarget.hasAVX() && "Must have AVX with >16-byte vector");
8808     SmallVector<int, 8> ShuffleMask;
8809     unsigned NumElts = VT.getVectorNumElements();
8810     for (unsigned i = 0; i != NumElts; ++i)
8811       ShuffleMask.push_back(i == InsertC ? NumElts : i);
8812     SDValue S2V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, VarElt);
8813     return DAG.getVectorShuffle(VT, dl, Ld, S2V, ShuffleMask);
8814   }
8815 
8816   // Special case for single non-zero, non-undef, element.
8817   if (NumNonZero == 1) {
8818     unsigned Idx = NonZeroMask.countr_zero();
8819     SDValue Item = Op.getOperand(Idx);
8820 
8821     // If we have a constant or non-constant insertion into the low element of
8822     // a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into
8823     // the rest of the elements.  This will be matched as movd/movq/movss/movsd
8824     // depending on what the source datatype is.
8825     if (Idx == 0) {
8826       if (NumZero == 0)
8827         return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
8828 
8829       if (EltVT == MVT::i32 || EltVT == MVT::f16 || EltVT == MVT::f32 ||
8830           EltVT == MVT::f64 || (EltVT == MVT::i64 && Subtarget.is64Bit()) ||
8831           (EltVT == MVT::i16 && Subtarget.hasFP16())) {
8832         assert((VT.is128BitVector() || VT.is256BitVector() ||
8833                 VT.is512BitVector()) &&
8834                "Expected an SSE value type!");
8835         Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
8836         // Turn it into a MOVL (i.e. movsh, movss, movsd, movw or movd) to a
8837         // zero vector.
8838         return getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
8839       }
8840 
8841       // We can't directly insert an i8 or i16 into a vector, so zero extend
8842       // it to i32 first.
8843       if (EltVT == MVT::i16 || EltVT == MVT::i8) {
8844         Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item);
8845         MVT ShufVT = MVT::getVectorVT(MVT::i32, VT.getSizeInBits() / 32);
8846         Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, ShufVT, Item);
8847         Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
8848         return DAG.getBitcast(VT, Item);
8849       }
8850     }
8851 
8852     // Is it a vector logical left shift?
8853     if (NumElems == 2 && Idx == 1 &&
8854         X86::isZeroNode(Op.getOperand(0)) &&
8855         !X86::isZeroNode(Op.getOperand(1))) {
8856       unsigned NumBits = VT.getSizeInBits();
8857       return getVShift(true, VT,
8858                        DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
8859                                    VT, Op.getOperand(1)),
8860                        NumBits/2, DAG, *this, dl);
8861     }
8862 
8863     if (IsAllConstants) // Otherwise, it's better to do a constpool load.
8864       return SDValue();
8865 
8866     // Otherwise, if this is a vector with i32 or f32 elements, and the element
8867     // is a non-constant being inserted into an element other than the low one,
8868     // we can't use a constant pool load.  Instead, use SCALAR_TO_VECTOR (aka
8869     // movd/movss) to move this into the low element, then shuffle it into
8870     // place.
8871     if (EVTBits == 32) {
8872       Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
8873       return getShuffleVectorZeroOrUndef(Item, Idx, NumZero > 0, Subtarget, DAG);
8874     }
8875   }
8876 
8877   // Splat is obviously ok. Let legalizer expand it to a shuffle.
8878   if (Values.size() == 1) {
8879     if (EVTBits == 32) {
8880       // Instead of a shuffle like this:
8881       // shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0>
8882       // Check if it's possible to issue this instead.
8883       // shuffle (vload ptr)), undef, <1, 1, 1, 1>
8884       unsigned Idx = NonZeroMask.countr_zero();
8885       SDValue Item = Op.getOperand(Idx);
8886       if (Op.getNode()->isOnlyUserOf(Item.getNode()))
8887         return LowerAsSplatVectorLoad(Item, VT, dl, DAG);
8888     }
8889     return SDValue();
8890   }
8891 
8892   // A vector full of immediates; various special cases are already
8893   // handled, so this is best done with a single constant-pool load.
8894   if (IsAllConstants)
8895     return SDValue();
8896 
8897   if (SDValue V = LowerBUILD_VECTORAsVariablePermute(Op, DAG, Subtarget))
8898       return V;
8899 
8900   // See if we can use a vector load to get all of the elements.
8901   {
8902     SmallVector<SDValue, 64> Ops(Op->op_begin(), Op->op_begin() + NumElems);
8903     if (SDValue LD =
8904             EltsFromConsecutiveLoads(VT, Ops, dl, DAG, Subtarget, false))
8905       return LD;
8906   }
8907 
8908   // If this is a splat of pairs of 32-bit elements, we can use a narrower
8909   // build_vector and broadcast it.
8910   // TODO: We could probably generalize this more.
8911   if (Subtarget.hasAVX2() && EVTBits == 32 && Values.size() == 2) {
8912     SDValue Ops[4] = { Op.getOperand(0), Op.getOperand(1),
8913                        DAG.getUNDEF(EltVT), DAG.getUNDEF(EltVT) };
8914     auto CanSplat = [](SDValue Op, unsigned NumElems, ArrayRef<SDValue> Ops) {
8915       // Make sure all the even/odd operands match.
8916       for (unsigned i = 2; i != NumElems; ++i)
8917         if (Ops[i % 2] != Op.getOperand(i))
8918           return false;
8919       return true;
8920     };
8921     if (CanSplat(Op, NumElems, Ops)) {
8922       MVT WideEltVT = VT.isFloatingPoint() ? MVT::f64 : MVT::i64;
8923       MVT NarrowVT = MVT::getVectorVT(EltVT, 4);
8924       // Create a new build vector and cast to v2i64/v2f64.
8925       SDValue NewBV = DAG.getBitcast(MVT::getVectorVT(WideEltVT, 2),
8926                                      DAG.getBuildVector(NarrowVT, dl, Ops));
8927       // Broadcast from v2i64/v2f64 and cast to final VT.
8928       MVT BcastVT = MVT::getVectorVT(WideEltVT, NumElems / 2);
8929       return DAG.getBitcast(VT, DAG.getNode(X86ISD::VBROADCAST, dl, BcastVT,
8930                                             NewBV));
8931     }
8932   }
8933 
8934   // For AVX-length vectors, build the individual 128-bit pieces and use
8935   // shuffles to put them in place.
8936   if (VT.getSizeInBits() > 128) {
8937     MVT HVT = MVT::getVectorVT(EltVT, NumElems / 2);
8938 
8939     // Build both the lower and upper subvector.
8940     SDValue Lower =
8941         DAG.getBuildVector(HVT, dl, Op->ops().slice(0, NumElems / 2));
8942     SDValue Upper = DAG.getBuildVector(
8943         HVT, dl, Op->ops().slice(NumElems / 2, NumElems /2));
8944 
8945     // Recreate the wider vector with the lower and upper part.
8946     return concatSubVectors(Lower, Upper, DAG, dl);
8947   }
8948 
8949   // Let legalizer expand 2-wide build_vectors.
8950   if (EVTBits == 64) {
8951     if (NumNonZero == 1) {
8952       // One half is zero or undef.
8953       unsigned Idx = NonZeroMask.countr_zero();
8954       SDValue V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT,
8955                                Op.getOperand(Idx));
8956       return getShuffleVectorZeroOrUndef(V2, Idx, true, Subtarget, DAG);
8957     }
8958     return SDValue();
8959   }
8960 
8961   // If element VT is < 32 bits, convert it to inserts into a zero vector.
8962   if (EVTBits == 8 && NumElems == 16)
8963     if (SDValue V = LowerBuildVectorv16i8(Op, NonZeroMask, NumNonZero, NumZero,
8964                                           DAG, Subtarget))
8965       return V;
8966 
8967   if (EltVT == MVT::i16 && NumElems == 8)
8968     if (SDValue V = LowerBuildVectorv8i16(Op, NonZeroMask, NumNonZero, NumZero,
8969                                           DAG, Subtarget))
8970       return V;
8971 
8972   // If element VT is == 32 bits and has 4 elems, try to generate an INSERTPS
8973   if (EVTBits == 32 && NumElems == 4)
8974     if (SDValue V = LowerBuildVectorv4x32(Op, DAG, Subtarget))
8975       return V;
8976 
8977   // If element VT is == 32 bits, turn it into a number of shuffles.
8978   if (NumElems == 4 && NumZero > 0) {
8979     SmallVector<SDValue, 8> Ops(NumElems);
8980     for (unsigned i = 0; i < 4; ++i) {
8981       bool isZero = !NonZeroMask[i];
8982       if (isZero)
8983         Ops[i] = getZeroVector(VT, Subtarget, DAG, dl);
8984       else
8985         Ops[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
8986     }
8987 
8988     for (unsigned i = 0; i < 2; ++i) {
8989       switch (NonZeroMask.extractBitsAsZExtValue(2, i * 2)) {
8990         default: llvm_unreachable("Unexpected NonZero count");
8991         case 0:
8992           Ops[i] = Ops[i*2];  // Must be a zero vector.
8993           break;
8994         case 1:
8995           Ops[i] = getMOVL(DAG, dl, VT, Ops[i*2+1], Ops[i*2]);
8996           break;
8997         case 2:
8998           Ops[i] = getMOVL(DAG, dl, VT, Ops[i*2], Ops[i*2+1]);
8999           break;
9000         case 3:
9001           Ops[i] = getUnpackl(DAG, dl, VT, Ops[i*2], Ops[i*2+1]);
9002           break;
9003       }
9004     }
9005 
9006     bool Reverse1 = NonZeroMask.extractBitsAsZExtValue(2, 0) == 2;
9007     bool Reverse2 = NonZeroMask.extractBitsAsZExtValue(2, 2) == 2;
9008     int MaskVec[] = {
9009       Reverse1 ? 1 : 0,
9010       Reverse1 ? 0 : 1,
9011       static_cast<int>(Reverse2 ? NumElems+1 : NumElems),
9012       static_cast<int>(Reverse2 ? NumElems   : NumElems+1)
9013     };
9014     return DAG.getVectorShuffle(VT, dl, Ops[0], Ops[1], MaskVec);
9015   }
9016 
9017   assert(Values.size() > 1 && "Expected non-undef and non-splat vector");
9018 
9019   // Check for a build vector from mostly shuffle plus few inserting.
9020   if (SDValue Sh = buildFromShuffleMostly(Op, DAG))
9021     return Sh;
9022 
9023   // For SSE 4.1, use insertps to put the high elements into the low element.
9024   if (Subtarget.hasSSE41() && EltVT != MVT::f16) {
9025     SDValue Result;
9026     if (!Op.getOperand(0).isUndef())
9027       Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0));
9028     else
9029       Result = DAG.getUNDEF(VT);
9030 
9031     for (unsigned i = 1; i < NumElems; ++i) {
9032       if (Op.getOperand(i).isUndef()) continue;
9033       Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result,
9034                            Op.getOperand(i), DAG.getIntPtrConstant(i, dl));
9035     }
9036     return Result;
9037   }
9038 
9039   // Otherwise, expand into a number of unpckl*, start by extending each of
9040   // our (non-undef) elements to the full vector width with the element in the
9041   // bottom slot of the vector (which generates no code for SSE).
9042   SmallVector<SDValue, 8> Ops(NumElems);
9043   for (unsigned i = 0; i < NumElems; ++i) {
9044     if (!Op.getOperand(i).isUndef())
9045       Ops[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
9046     else
9047       Ops[i] = DAG.getUNDEF(VT);
9048   }
9049 
9050   // Next, we iteratively mix elements, e.g. for v4f32:
9051   //   Step 1: unpcklps 0, 1 ==> X: <?, ?, 1, 0>
9052   //         : unpcklps 2, 3 ==> Y: <?, ?, 3, 2>
9053   //   Step 2: unpcklpd X, Y ==>    <3, 2, 1, 0>
9054   for (unsigned Scale = 1; Scale < NumElems; Scale *= 2) {
9055     // Generate scaled UNPCKL shuffle mask.
9056     SmallVector<int, 16> Mask;
9057     for(unsigned i = 0; i != Scale; ++i)
9058       Mask.push_back(i);
9059     for (unsigned i = 0; i != Scale; ++i)
9060       Mask.push_back(NumElems+i);
9061     Mask.append(NumElems - Mask.size(), SM_SentinelUndef);
9062 
9063     for (unsigned i = 0, e = NumElems / (2 * Scale); i != e; ++i)
9064       Ops[i] = DAG.getVectorShuffle(VT, dl, Ops[2*i], Ops[(2*i)+1], Mask);
9065   }
9066   return Ops[0];
9067 }
9068 
9069 // 256-bit AVX can use the vinsertf128 instruction
9070 // to create 256-bit vectors from two other 128-bit ones.
9071 // TODO: Detect subvector broadcast here instead of DAG combine?
LowerAVXCONCAT_VECTORS(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)9072 static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG,
9073                                       const X86Subtarget &Subtarget) {
9074   SDLoc dl(Op);
9075   MVT ResVT = Op.getSimpleValueType();
9076 
9077   assert((ResVT.is256BitVector() ||
9078           ResVT.is512BitVector()) && "Value type must be 256-/512-bit wide");
9079 
9080   unsigned NumOperands = Op.getNumOperands();
9081   unsigned NumFreezeUndef = 0;
9082   unsigned NumZero = 0;
9083   unsigned NumNonZero = 0;
9084   unsigned NonZeros = 0;
9085   for (unsigned i = 0; i != NumOperands; ++i) {
9086     SDValue SubVec = Op.getOperand(i);
9087     if (SubVec.isUndef())
9088       continue;
9089     if (ISD::isFreezeUndef(SubVec.getNode())) {
9090         // If the freeze(undef) has multiple uses then we must fold to zero.
9091         if (SubVec.hasOneUse())
9092           ++NumFreezeUndef;
9093         else
9094           ++NumZero;
9095     }
9096     else if (ISD::isBuildVectorAllZeros(SubVec.getNode()))
9097       ++NumZero;
9098     else {
9099       assert(i < sizeof(NonZeros) * CHAR_BIT); // Ensure the shift is in range.
9100       NonZeros |= 1 << i;
9101       ++NumNonZero;
9102     }
9103   }
9104 
9105   // If we have more than 2 non-zeros, build each half separately.
9106   if (NumNonZero > 2) {
9107     MVT HalfVT = ResVT.getHalfNumVectorElementsVT();
9108     ArrayRef<SDUse> Ops = Op->ops();
9109     SDValue Lo = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
9110                              Ops.slice(0, NumOperands/2));
9111     SDValue Hi = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
9112                              Ops.slice(NumOperands/2));
9113     return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Lo, Hi);
9114   }
9115 
9116   // Otherwise, build it up through insert_subvectors.
9117   SDValue Vec = NumZero ? getZeroVector(ResVT, Subtarget, DAG, dl)
9118                         : (NumFreezeUndef ? DAG.getFreeze(DAG.getUNDEF(ResVT))
9119                                           : DAG.getUNDEF(ResVT));
9120 
9121   MVT SubVT = Op.getOperand(0).getSimpleValueType();
9122   unsigned NumSubElems = SubVT.getVectorNumElements();
9123   for (unsigned i = 0; i != NumOperands; ++i) {
9124     if ((NonZeros & (1 << i)) == 0)
9125       continue;
9126 
9127     Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec,
9128                       Op.getOperand(i),
9129                       DAG.getIntPtrConstant(i * NumSubElems, dl));
9130   }
9131 
9132   return Vec;
9133 }
9134 
9135 // Returns true if the given node is a type promotion (by concatenating i1
9136 // zeros) of the result of a node that already zeros all upper bits of
9137 // k-register.
9138 // TODO: Merge this with LowerAVXCONCAT_VECTORS?
LowerCONCAT_VECTORSvXi1(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)9139 static SDValue LowerCONCAT_VECTORSvXi1(SDValue Op,
9140                                        const X86Subtarget &Subtarget,
9141                                        SelectionDAG & DAG) {
9142   SDLoc dl(Op);
9143   MVT ResVT = Op.getSimpleValueType();
9144   unsigned NumOperands = Op.getNumOperands();
9145 
9146   assert(NumOperands > 1 && isPowerOf2_32(NumOperands) &&
9147          "Unexpected number of operands in CONCAT_VECTORS");
9148 
9149   uint64_t Zeros = 0;
9150   uint64_t NonZeros = 0;
9151   for (unsigned i = 0; i != NumOperands; ++i) {
9152     SDValue SubVec = Op.getOperand(i);
9153     if (SubVec.isUndef())
9154       continue;
9155     assert(i < sizeof(NonZeros) * CHAR_BIT); // Ensure the shift is in range.
9156     if (ISD::isBuildVectorAllZeros(SubVec.getNode()))
9157       Zeros |= (uint64_t)1 << i;
9158     else
9159       NonZeros |= (uint64_t)1 << i;
9160   }
9161 
9162   unsigned NumElems = ResVT.getVectorNumElements();
9163 
9164   // If we are inserting non-zero vector and there are zeros in LSBs and undef
9165   // in the MSBs we need to emit a KSHIFTL. The generic lowering to
9166   // insert_subvector will give us two kshifts.
9167   if (isPowerOf2_64(NonZeros) && Zeros != 0 && NonZeros > Zeros &&
9168       Log2_64(NonZeros) != NumOperands - 1) {
9169     unsigned Idx = Log2_64(NonZeros);
9170     SDValue SubVec = Op.getOperand(Idx);
9171     unsigned SubVecNumElts = SubVec.getSimpleValueType().getVectorNumElements();
9172     MVT ShiftVT = widenMaskVectorType(ResVT, Subtarget);
9173     Op = widenSubVector(ShiftVT, SubVec, false, Subtarget, DAG, dl);
9174     Op = DAG.getNode(X86ISD::KSHIFTL, dl, ShiftVT, Op,
9175                      DAG.getTargetConstant(Idx * SubVecNumElts, dl, MVT::i8));
9176     return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResVT, Op,
9177                        DAG.getIntPtrConstant(0, dl));
9178   }
9179 
9180   // If there are zero or one non-zeros we can handle this very simply.
9181   if (NonZeros == 0 || isPowerOf2_64(NonZeros)) {
9182     SDValue Vec = Zeros ? DAG.getConstant(0, dl, ResVT) : DAG.getUNDEF(ResVT);
9183     if (!NonZeros)
9184       return Vec;
9185     unsigned Idx = Log2_64(NonZeros);
9186     SDValue SubVec = Op.getOperand(Idx);
9187     unsigned SubVecNumElts = SubVec.getSimpleValueType().getVectorNumElements();
9188     return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec, SubVec,
9189                        DAG.getIntPtrConstant(Idx * SubVecNumElts, dl));
9190   }
9191 
9192   if (NumOperands > 2) {
9193     MVT HalfVT = ResVT.getHalfNumVectorElementsVT();
9194     ArrayRef<SDUse> Ops = Op->ops();
9195     SDValue Lo = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
9196                              Ops.slice(0, NumOperands/2));
9197     SDValue Hi = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
9198                              Ops.slice(NumOperands/2));
9199     return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Lo, Hi);
9200   }
9201 
9202   assert(llvm::popcount(NonZeros) == 2 && "Simple cases not handled?");
9203 
9204   if (ResVT.getVectorNumElements() >= 16)
9205     return Op; // The operation is legal with KUNPCK
9206 
9207   SDValue Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT,
9208                             DAG.getUNDEF(ResVT), Op.getOperand(0),
9209                             DAG.getIntPtrConstant(0, dl));
9210   return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec, Op.getOperand(1),
9211                      DAG.getIntPtrConstant(NumElems/2, dl));
9212 }
9213 
LowerCONCAT_VECTORS(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)9214 static SDValue LowerCONCAT_VECTORS(SDValue Op,
9215                                    const X86Subtarget &Subtarget,
9216                                    SelectionDAG &DAG) {
9217   MVT VT = Op.getSimpleValueType();
9218   if (VT.getVectorElementType() == MVT::i1)
9219     return LowerCONCAT_VECTORSvXi1(Op, Subtarget, DAG);
9220 
9221   assert((VT.is256BitVector() && Op.getNumOperands() == 2) ||
9222          (VT.is512BitVector() && (Op.getNumOperands() == 2 ||
9223           Op.getNumOperands() == 4)));
9224 
9225   // AVX can use the vinsertf128 instruction to create 256-bit vectors
9226   // from two other 128-bit ones.
9227 
9228   // 512-bit vector may contain 2 256-bit vectors or 4 128-bit vectors
9229   return LowerAVXCONCAT_VECTORS(Op, DAG, Subtarget);
9230 }
9231 
9232 //===----------------------------------------------------------------------===//
9233 // Vector shuffle lowering
9234 //
9235 // This is an experimental code path for lowering vector shuffles on x86. It is
9236 // designed to handle arbitrary vector shuffles and blends, gracefully
9237 // degrading performance as necessary. It works hard to recognize idiomatic
9238 // shuffles and lower them to optimal instruction patterns without leaving
9239 // a framework that allows reasonably efficient handling of all vector shuffle
9240 // patterns.
9241 //===----------------------------------------------------------------------===//
9242 
9243 /// Tiny helper function to identify a no-op mask.
9244 ///
9245 /// This is a somewhat boring predicate function. It checks whether the mask
9246 /// array input, which is assumed to be a single-input shuffle mask of the kind
9247 /// used by the X86 shuffle instructions (not a fully general
9248 /// ShuffleVectorSDNode mask) requires any shuffles to occur. Both undef and an
9249 /// in-place shuffle are 'no-op's.
isNoopShuffleMask(ArrayRef<int> Mask)9250 static bool isNoopShuffleMask(ArrayRef<int> Mask) {
9251   for (int i = 0, Size = Mask.size(); i < Size; ++i) {
9252     assert(Mask[i] >= -1 && "Out of bound mask element!");
9253     if (Mask[i] >= 0 && Mask[i] != i)
9254       return false;
9255   }
9256   return true;
9257 }
9258 
9259 /// Test whether there are elements crossing LaneSizeInBits lanes in this
9260 /// shuffle mask.
9261 ///
9262 /// X86 divides up its shuffles into in-lane and cross-lane shuffle operations
9263 /// and we routinely test for these.
isLaneCrossingShuffleMask(unsigned LaneSizeInBits,unsigned ScalarSizeInBits,ArrayRef<int> Mask)9264 static bool isLaneCrossingShuffleMask(unsigned LaneSizeInBits,
9265                                       unsigned ScalarSizeInBits,
9266                                       ArrayRef<int> Mask) {
9267   assert(LaneSizeInBits && ScalarSizeInBits &&
9268          (LaneSizeInBits % ScalarSizeInBits) == 0 &&
9269          "Illegal shuffle lane size");
9270   int LaneSize = LaneSizeInBits / ScalarSizeInBits;
9271   int Size = Mask.size();
9272   for (int i = 0; i < Size; ++i)
9273     if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
9274       return true;
9275   return false;
9276 }
9277 
9278 /// Test whether there are elements crossing 128-bit lanes in this
9279 /// shuffle mask.
is128BitLaneCrossingShuffleMask(MVT VT,ArrayRef<int> Mask)9280 static bool is128BitLaneCrossingShuffleMask(MVT VT, ArrayRef<int> Mask) {
9281   return isLaneCrossingShuffleMask(128, VT.getScalarSizeInBits(), Mask);
9282 }
9283 
9284 /// Test whether elements in each LaneSizeInBits lane in this shuffle mask come
9285 /// from multiple lanes - this is different to isLaneCrossingShuffleMask to
9286 /// better support 'repeated mask + lane permute' style shuffles.
isMultiLaneShuffleMask(unsigned LaneSizeInBits,unsigned ScalarSizeInBits,ArrayRef<int> Mask)9287 static bool isMultiLaneShuffleMask(unsigned LaneSizeInBits,
9288                                    unsigned ScalarSizeInBits,
9289                                    ArrayRef<int> Mask) {
9290   assert(LaneSizeInBits && ScalarSizeInBits &&
9291          (LaneSizeInBits % ScalarSizeInBits) == 0 &&
9292          "Illegal shuffle lane size");
9293   int NumElts = Mask.size();
9294   int NumEltsPerLane = LaneSizeInBits / ScalarSizeInBits;
9295   int NumLanes = NumElts / NumEltsPerLane;
9296   if (NumLanes > 1) {
9297     for (int i = 0; i != NumLanes; ++i) {
9298       int SrcLane = -1;
9299       for (int j = 0; j != NumEltsPerLane; ++j) {
9300         int M = Mask[(i * NumEltsPerLane) + j];
9301         if (M < 0)
9302           continue;
9303         int Lane = (M % NumElts) / NumEltsPerLane;
9304         if (SrcLane >= 0 && SrcLane != Lane)
9305           return true;
9306         SrcLane = Lane;
9307       }
9308     }
9309   }
9310   return false;
9311 }
9312 
9313 /// Test whether a shuffle mask is equivalent within each sub-lane.
9314 ///
9315 /// This checks a shuffle mask to see if it is performing the same
9316 /// lane-relative shuffle in each sub-lane. This trivially implies
9317 /// that it is also not lane-crossing. It may however involve a blend from the
9318 /// same lane of a second vector.
9319 ///
9320 /// The specific repeated shuffle mask is populated in \p RepeatedMask, as it is
9321 /// non-trivial to compute in the face of undef lanes. The representation is
9322 /// suitable for use with existing 128-bit shuffles as entries from the second
9323 /// vector have been remapped to [LaneSize, 2*LaneSize).
isRepeatedShuffleMask(unsigned LaneSizeInBits,MVT VT,ArrayRef<int> Mask,SmallVectorImpl<int> & RepeatedMask)9324 static bool isRepeatedShuffleMask(unsigned LaneSizeInBits, MVT VT,
9325                                   ArrayRef<int> Mask,
9326                                   SmallVectorImpl<int> &RepeatedMask) {
9327   auto LaneSize = LaneSizeInBits / VT.getScalarSizeInBits();
9328   RepeatedMask.assign(LaneSize, -1);
9329   int Size = Mask.size();
9330   for (int i = 0; i < Size; ++i) {
9331     assert(Mask[i] == SM_SentinelUndef || Mask[i] >= 0);
9332     if (Mask[i] < 0)
9333       continue;
9334     if ((Mask[i] % Size) / LaneSize != i / LaneSize)
9335       // This entry crosses lanes, so there is no way to model this shuffle.
9336       return false;
9337 
9338     // Ok, handle the in-lane shuffles by detecting if and when they repeat.
9339     // Adjust second vector indices to start at LaneSize instead of Size.
9340     int LocalM = Mask[i] < Size ? Mask[i] % LaneSize
9341                                 : Mask[i] % LaneSize + LaneSize;
9342     if (RepeatedMask[i % LaneSize] < 0)
9343       // This is the first non-undef entry in this slot of a 128-bit lane.
9344       RepeatedMask[i % LaneSize] = LocalM;
9345     else if (RepeatedMask[i % LaneSize] != LocalM)
9346       // Found a mismatch with the repeated mask.
9347       return false;
9348   }
9349   return true;
9350 }
9351 
9352 /// Test whether a shuffle mask is equivalent within each 128-bit lane.
9353 static bool
is128BitLaneRepeatedShuffleMask(MVT VT,ArrayRef<int> Mask,SmallVectorImpl<int> & RepeatedMask)9354 is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
9355                                 SmallVectorImpl<int> &RepeatedMask) {
9356   return isRepeatedShuffleMask(128, VT, Mask, RepeatedMask);
9357 }
9358 
9359 static bool
is128BitLaneRepeatedShuffleMask(MVT VT,ArrayRef<int> Mask)9360 is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask) {
9361   SmallVector<int, 32> RepeatedMask;
9362   return isRepeatedShuffleMask(128, VT, Mask, RepeatedMask);
9363 }
9364 
9365 /// Test whether a shuffle mask is equivalent within each 256-bit lane.
9366 static bool
is256BitLaneRepeatedShuffleMask(MVT VT,ArrayRef<int> Mask,SmallVectorImpl<int> & RepeatedMask)9367 is256BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
9368                                 SmallVectorImpl<int> &RepeatedMask) {
9369   return isRepeatedShuffleMask(256, VT, Mask, RepeatedMask);
9370 }
9371 
9372 /// Test whether a target shuffle mask is equivalent within each sub-lane.
9373 /// Unlike isRepeatedShuffleMask we must respect SM_SentinelZero.
isRepeatedTargetShuffleMask(unsigned LaneSizeInBits,unsigned EltSizeInBits,ArrayRef<int> Mask,SmallVectorImpl<int> & RepeatedMask)9374 static bool isRepeatedTargetShuffleMask(unsigned LaneSizeInBits,
9375                                         unsigned EltSizeInBits,
9376                                         ArrayRef<int> Mask,
9377                                         SmallVectorImpl<int> &RepeatedMask) {
9378   int LaneSize = LaneSizeInBits / EltSizeInBits;
9379   RepeatedMask.assign(LaneSize, SM_SentinelUndef);
9380   int Size = Mask.size();
9381   for (int i = 0; i < Size; ++i) {
9382     assert(isUndefOrZero(Mask[i]) || (Mask[i] >= 0));
9383     if (Mask[i] == SM_SentinelUndef)
9384       continue;
9385     if (Mask[i] == SM_SentinelZero) {
9386       if (!isUndefOrZero(RepeatedMask[i % LaneSize]))
9387         return false;
9388       RepeatedMask[i % LaneSize] = SM_SentinelZero;
9389       continue;
9390     }
9391     if ((Mask[i] % Size) / LaneSize != i / LaneSize)
9392       // This entry crosses lanes, so there is no way to model this shuffle.
9393       return false;
9394 
9395     // Handle the in-lane shuffles by detecting if and when they repeat. Adjust
9396     // later vector indices to start at multiples of LaneSize instead of Size.
9397     int LaneM = Mask[i] / Size;
9398     int LocalM = (Mask[i] % LaneSize) + (LaneM * LaneSize);
9399     if (RepeatedMask[i % LaneSize] == SM_SentinelUndef)
9400       // This is the first non-undef entry in this slot of a 128-bit lane.
9401       RepeatedMask[i % LaneSize] = LocalM;
9402     else if (RepeatedMask[i % LaneSize] != LocalM)
9403       // Found a mismatch with the repeated mask.
9404       return false;
9405   }
9406   return true;
9407 }
9408 
9409 /// Test whether a target shuffle mask is equivalent within each sub-lane.
9410 /// Unlike isRepeatedShuffleMask we must respect SM_SentinelZero.
isRepeatedTargetShuffleMask(unsigned LaneSizeInBits,MVT VT,ArrayRef<int> Mask,SmallVectorImpl<int> & RepeatedMask)9411 static bool isRepeatedTargetShuffleMask(unsigned LaneSizeInBits, MVT VT,
9412                                         ArrayRef<int> Mask,
9413                                         SmallVectorImpl<int> &RepeatedMask) {
9414   return isRepeatedTargetShuffleMask(LaneSizeInBits, VT.getScalarSizeInBits(),
9415                                      Mask, RepeatedMask);
9416 }
9417 
9418 /// Checks whether the vector elements referenced by two shuffle masks are
9419 /// equivalent.
IsElementEquivalent(int MaskSize,SDValue Op,SDValue ExpectedOp,int Idx,int ExpectedIdx)9420 static bool IsElementEquivalent(int MaskSize, SDValue Op, SDValue ExpectedOp,
9421                                 int Idx, int ExpectedIdx) {
9422   assert(0 <= Idx && Idx < MaskSize && 0 <= ExpectedIdx &&
9423          ExpectedIdx < MaskSize && "Out of range element index");
9424   if (!Op || !ExpectedOp || Op.getOpcode() != ExpectedOp.getOpcode())
9425     return false;
9426 
9427   switch (Op.getOpcode()) {
9428   case ISD::BUILD_VECTOR:
9429     // If the values are build vectors, we can look through them to find
9430     // equivalent inputs that make the shuffles equivalent.
9431     // TODO: Handle MaskSize != Op.getNumOperands()?
9432     if (MaskSize == (int)Op.getNumOperands() &&
9433         MaskSize == (int)ExpectedOp.getNumOperands())
9434       return Op.getOperand(Idx) == ExpectedOp.getOperand(ExpectedIdx);
9435     break;
9436   case X86ISD::VBROADCAST:
9437   case X86ISD::VBROADCAST_LOAD:
9438     // TODO: Handle MaskSize != Op.getValueType().getVectorNumElements()?
9439     return (Op == ExpectedOp &&
9440             (int)Op.getValueType().getVectorNumElements() == MaskSize);
9441   case X86ISD::HADD:
9442   case X86ISD::HSUB:
9443   case X86ISD::FHADD:
9444   case X86ISD::FHSUB:
9445   case X86ISD::PACKSS:
9446   case X86ISD::PACKUS:
9447     // HOP(X,X) can refer to the elt from the lower/upper half of a lane.
9448     // TODO: Handle MaskSize != NumElts?
9449     // TODO: Handle HOP(X,Y) vs HOP(Y,X) equivalence cases.
9450     if (Op == ExpectedOp && Op.getOperand(0) == Op.getOperand(1)) {
9451       MVT VT = Op.getSimpleValueType();
9452       int NumElts = VT.getVectorNumElements();
9453       if (MaskSize == NumElts) {
9454         int NumLanes = VT.getSizeInBits() / 128;
9455         int NumEltsPerLane = NumElts / NumLanes;
9456         int NumHalfEltsPerLane = NumEltsPerLane / 2;
9457         bool SameLane =
9458             (Idx / NumEltsPerLane) == (ExpectedIdx / NumEltsPerLane);
9459         bool SameElt =
9460             (Idx % NumHalfEltsPerLane) == (ExpectedIdx % NumHalfEltsPerLane);
9461         return SameLane && SameElt;
9462       }
9463     }
9464     break;
9465   }
9466 
9467   return false;
9468 }
9469 
9470 /// Checks whether a shuffle mask is equivalent to an explicit list of
9471 /// arguments.
9472 ///
9473 /// This is a fast way to test a shuffle mask against a fixed pattern:
9474 ///
9475 ///   if (isShuffleEquivalent(Mask, 3, 2, {1, 0})) { ... }
9476 ///
9477 /// It returns true if the mask is exactly as wide as the argument list, and
9478 /// each element of the mask is either -1 (signifying undef) or the value given
9479 /// in the argument.
isShuffleEquivalent(ArrayRef<int> Mask,ArrayRef<int> ExpectedMask,SDValue V1=SDValue (),SDValue V2=SDValue ())9480 static bool isShuffleEquivalent(ArrayRef<int> Mask, ArrayRef<int> ExpectedMask,
9481                                 SDValue V1 = SDValue(),
9482                                 SDValue V2 = SDValue()) {
9483   int Size = Mask.size();
9484   if (Size != (int)ExpectedMask.size())
9485     return false;
9486 
9487   for (int i = 0; i < Size; ++i) {
9488     assert(Mask[i] >= -1 && "Out of bound mask element!");
9489     int MaskIdx = Mask[i];
9490     int ExpectedIdx = ExpectedMask[i];
9491     if (0 <= MaskIdx && MaskIdx != ExpectedIdx) {
9492       SDValue MaskV = MaskIdx < Size ? V1 : V2;
9493       SDValue ExpectedV = ExpectedIdx < Size ? V1 : V2;
9494       MaskIdx = MaskIdx < Size ? MaskIdx : (MaskIdx - Size);
9495       ExpectedIdx = ExpectedIdx < Size ? ExpectedIdx : (ExpectedIdx - Size);
9496       if (!IsElementEquivalent(Size, MaskV, ExpectedV, MaskIdx, ExpectedIdx))
9497         return false;
9498     }
9499   }
9500   return true;
9501 }
9502 
9503 /// Checks whether a target shuffle mask is equivalent to an explicit pattern.
9504 ///
9505 /// The masks must be exactly the same width.
9506 ///
9507 /// If an element in Mask matches SM_SentinelUndef (-1) then the corresponding
9508 /// value in ExpectedMask is always accepted. Otherwise the indices must match.
9509 ///
9510 /// SM_SentinelZero is accepted as a valid negative index but must match in
9511 /// both, or via a known bits test.
isTargetShuffleEquivalent(MVT VT,ArrayRef<int> Mask,ArrayRef<int> ExpectedMask,const SelectionDAG & DAG,SDValue V1=SDValue (),SDValue V2=SDValue ())9512 static bool isTargetShuffleEquivalent(MVT VT, ArrayRef<int> Mask,
9513                                       ArrayRef<int> ExpectedMask,
9514                                       const SelectionDAG &DAG,
9515                                       SDValue V1 = SDValue(),
9516                                       SDValue V2 = SDValue()) {
9517   int Size = Mask.size();
9518   if (Size != (int)ExpectedMask.size())
9519     return false;
9520   assert(llvm::all_of(ExpectedMask,
9521                       [Size](int M) { return isInRange(M, 0, 2 * Size); }) &&
9522          "Illegal target shuffle mask");
9523 
9524   // Check for out-of-range target shuffle mask indices.
9525   if (!isUndefOrZeroOrInRange(Mask, 0, 2 * Size))
9526     return false;
9527 
9528   // Don't use V1/V2 if they're not the same size as the shuffle mask type.
9529   if (V1 && (V1.getValueSizeInBits() != VT.getSizeInBits() ||
9530              !V1.getValueType().isVector()))
9531     V1 = SDValue();
9532   if (V2 && (V2.getValueSizeInBits() != VT.getSizeInBits() ||
9533              !V2.getValueType().isVector()))
9534     V2 = SDValue();
9535 
9536   APInt ZeroV1 = APInt::getZero(Size);
9537   APInt ZeroV2 = APInt::getZero(Size);
9538 
9539   for (int i = 0; i < Size; ++i) {
9540     int MaskIdx = Mask[i];
9541     int ExpectedIdx = ExpectedMask[i];
9542     if (MaskIdx == SM_SentinelUndef || MaskIdx == ExpectedIdx)
9543       continue;
9544     if (MaskIdx == SM_SentinelZero) {
9545       // If we need this expected index to be a zero element, then update the
9546       // relevant zero mask and perform the known bits at the end to minimize
9547       // repeated computes.
9548       SDValue ExpectedV = ExpectedIdx < Size ? V1 : V2;
9549       if (ExpectedV &&
9550           Size == (int)ExpectedV.getValueType().getVectorNumElements()) {
9551         int BitIdx = ExpectedIdx < Size ? ExpectedIdx : (ExpectedIdx - Size);
9552         APInt &ZeroMask = ExpectedIdx < Size ? ZeroV1 : ZeroV2;
9553         ZeroMask.setBit(BitIdx);
9554         continue;
9555       }
9556     }
9557     if (MaskIdx >= 0) {
9558       SDValue MaskV = MaskIdx < Size ? V1 : V2;
9559       SDValue ExpectedV = ExpectedIdx < Size ? V1 : V2;
9560       MaskIdx = MaskIdx < Size ? MaskIdx : (MaskIdx - Size);
9561       ExpectedIdx = ExpectedIdx < Size ? ExpectedIdx : (ExpectedIdx - Size);
9562       if (IsElementEquivalent(Size, MaskV, ExpectedV, MaskIdx, ExpectedIdx))
9563         continue;
9564     }
9565     return false;
9566   }
9567   return (ZeroV1.isZero() || DAG.MaskedVectorIsZero(V1, ZeroV1)) &&
9568          (ZeroV2.isZero() || DAG.MaskedVectorIsZero(V2, ZeroV2));
9569 }
9570 
9571 // Check if the shuffle mask is suitable for the AVX vpunpcklwd or vpunpckhwd
9572 // instructions.
isUnpackWdShuffleMask(ArrayRef<int> Mask,MVT VT,const SelectionDAG & DAG)9573 static bool isUnpackWdShuffleMask(ArrayRef<int> Mask, MVT VT,
9574                                   const SelectionDAG &DAG) {
9575   if (VT != MVT::v8i32 && VT != MVT::v8f32)
9576     return false;
9577 
9578   SmallVector<int, 8> Unpcklwd;
9579   createUnpackShuffleMask(MVT::v8i16, Unpcklwd, /* Lo = */ true,
9580                           /* Unary = */ false);
9581   SmallVector<int, 8> Unpckhwd;
9582   createUnpackShuffleMask(MVT::v8i16, Unpckhwd, /* Lo = */ false,
9583                           /* Unary = */ false);
9584   bool IsUnpackwdMask = (isTargetShuffleEquivalent(VT, Mask, Unpcklwd, DAG) ||
9585                          isTargetShuffleEquivalent(VT, Mask, Unpckhwd, DAG));
9586   return IsUnpackwdMask;
9587 }
9588 
is128BitUnpackShuffleMask(ArrayRef<int> Mask,const SelectionDAG & DAG)9589 static bool is128BitUnpackShuffleMask(ArrayRef<int> Mask,
9590                                       const SelectionDAG &DAG) {
9591   // Create 128-bit vector type based on mask size.
9592   MVT EltVT = MVT::getIntegerVT(128 / Mask.size());
9593   MVT VT = MVT::getVectorVT(EltVT, Mask.size());
9594 
9595   // We can't assume a canonical shuffle mask, so try the commuted version too.
9596   SmallVector<int, 4> CommutedMask(Mask);
9597   ShuffleVectorSDNode::commuteMask(CommutedMask);
9598 
9599   // Match any of unary/binary or low/high.
9600   for (unsigned i = 0; i != 4; ++i) {
9601     SmallVector<int, 16> UnpackMask;
9602     createUnpackShuffleMask(VT, UnpackMask, (i >> 1) % 2, i % 2);
9603     if (isTargetShuffleEquivalent(VT, Mask, UnpackMask, DAG) ||
9604         isTargetShuffleEquivalent(VT, CommutedMask, UnpackMask, DAG))
9605       return true;
9606   }
9607   return false;
9608 }
9609 
9610 /// Return true if a shuffle mask chooses elements identically in its top and
9611 /// bottom halves. For example, any splat mask has the same top and bottom
9612 /// halves. If an element is undefined in only one half of the mask, the halves
9613 /// are not considered identical.
hasIdenticalHalvesShuffleMask(ArrayRef<int> Mask)9614 static bool hasIdenticalHalvesShuffleMask(ArrayRef<int> Mask) {
9615   assert(Mask.size() % 2 == 0 && "Expecting even number of elements in mask");
9616   unsigned HalfSize = Mask.size() / 2;
9617   for (unsigned i = 0; i != HalfSize; ++i) {
9618     if (Mask[i] != Mask[i + HalfSize])
9619       return false;
9620   }
9621   return true;
9622 }
9623 
9624 /// Get a 4-lane 8-bit shuffle immediate for a mask.
9625 ///
9626 /// This helper function produces an 8-bit shuffle immediate corresponding to
9627 /// the ubiquitous shuffle encoding scheme used in x86 instructions for
9628 /// shuffling 4 lanes. It can be used with most of the PSHUF instructions for
9629 /// example.
9630 ///
9631 /// NB: We rely heavily on "undef" masks preserving the input lane.
getV4X86ShuffleImm(ArrayRef<int> Mask)9632 static unsigned getV4X86ShuffleImm(ArrayRef<int> Mask) {
9633   assert(Mask.size() == 4 && "Only 4-lane shuffle masks");
9634   assert(Mask[0] >= -1 && Mask[0] < 4 && "Out of bound mask element!");
9635   assert(Mask[1] >= -1 && Mask[1] < 4 && "Out of bound mask element!");
9636   assert(Mask[2] >= -1 && Mask[2] < 4 && "Out of bound mask element!");
9637   assert(Mask[3] >= -1 && Mask[3] < 4 && "Out of bound mask element!");
9638 
9639   // If the mask only uses one non-undef element, then fully 'splat' it to
9640   // improve later broadcast matching.
9641   int FirstIndex = find_if(Mask, [](int M) { return M >= 0; }) - Mask.begin();
9642   assert(0 <= FirstIndex && FirstIndex < 4 && "All undef shuffle mask");
9643 
9644   int FirstElt = Mask[FirstIndex];
9645   if (all_of(Mask, [FirstElt](int M) { return M < 0 || M == FirstElt; }))
9646     return (FirstElt << 6) | (FirstElt << 4) | (FirstElt << 2) | FirstElt;
9647 
9648   unsigned Imm = 0;
9649   Imm |= (Mask[0] < 0 ? 0 : Mask[0]) << 0;
9650   Imm |= (Mask[1] < 0 ? 1 : Mask[1]) << 2;
9651   Imm |= (Mask[2] < 0 ? 2 : Mask[2]) << 4;
9652   Imm |= (Mask[3] < 0 ? 3 : Mask[3]) << 6;
9653   return Imm;
9654 }
9655 
getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask,const SDLoc & DL,SelectionDAG & DAG)9656 static SDValue getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask, const SDLoc &DL,
9657                                           SelectionDAG &DAG) {
9658   return DAG.getTargetConstant(getV4X86ShuffleImm(Mask), DL, MVT::i8);
9659 }
9660 
9661 // The Shuffle result is as follow:
9662 // 0*a[0]0*a[1]...0*a[n] , n >=0 where a[] elements in a ascending order.
9663 // Each Zeroable's element correspond to a particular Mask's element.
9664 // As described in computeZeroableShuffleElements function.
9665 //
9666 // The function looks for a sub-mask that the nonzero elements are in
9667 // increasing order. If such sub-mask exist. The function returns true.
isNonZeroElementsInOrder(const APInt & Zeroable,ArrayRef<int> Mask,const EVT & VectorType,bool & IsZeroSideLeft)9668 static bool isNonZeroElementsInOrder(const APInt &Zeroable,
9669                                      ArrayRef<int> Mask, const EVT &VectorType,
9670                                      bool &IsZeroSideLeft) {
9671   int NextElement = -1;
9672   // Check if the Mask's nonzero elements are in increasing order.
9673   for (int i = 0, e = Mask.size(); i < e; i++) {
9674     // Checks if the mask's zeros elements are built from only zeros.
9675     assert(Mask[i] >= -1 && "Out of bound mask element!");
9676     if (Mask[i] < 0)
9677       return false;
9678     if (Zeroable[i])
9679       continue;
9680     // Find the lowest non zero element
9681     if (NextElement < 0) {
9682       NextElement = Mask[i] != 0 ? VectorType.getVectorNumElements() : 0;
9683       IsZeroSideLeft = NextElement != 0;
9684     }
9685     // Exit if the mask's non zero elements are not in increasing order.
9686     if (NextElement != Mask[i])
9687       return false;
9688     NextElement++;
9689   }
9690   return true;
9691 }
9692 
9693 /// Try to lower a shuffle with a single PSHUFB of V1 or V2.
lowerShuffleWithPSHUFB(const SDLoc & DL,MVT VT,ArrayRef<int> Mask,SDValue V1,SDValue V2,const APInt & Zeroable,const X86Subtarget & Subtarget,SelectionDAG & DAG)9694 static SDValue lowerShuffleWithPSHUFB(const SDLoc &DL, MVT VT,
9695                                       ArrayRef<int> Mask, SDValue V1,
9696                                       SDValue V2, const APInt &Zeroable,
9697                                       const X86Subtarget &Subtarget,
9698                                       SelectionDAG &DAG) {
9699   int Size = Mask.size();
9700   int LaneSize = 128 / VT.getScalarSizeInBits();
9701   const int NumBytes = VT.getSizeInBits() / 8;
9702   const int NumEltBytes = VT.getScalarSizeInBits() / 8;
9703 
9704   assert((Subtarget.hasSSSE3() && VT.is128BitVector()) ||
9705          (Subtarget.hasAVX2() && VT.is256BitVector()) ||
9706          (Subtarget.hasBWI() && VT.is512BitVector()));
9707 
9708   SmallVector<SDValue, 64> PSHUFBMask(NumBytes);
9709   // Sign bit set in i8 mask means zero element.
9710   SDValue ZeroMask = DAG.getConstant(0x80, DL, MVT::i8);
9711 
9712   SDValue V;
9713   for (int i = 0; i < NumBytes; ++i) {
9714     int M = Mask[i / NumEltBytes];
9715     if (M < 0) {
9716       PSHUFBMask[i] = DAG.getUNDEF(MVT::i8);
9717       continue;
9718     }
9719     if (Zeroable[i / NumEltBytes]) {
9720       PSHUFBMask[i] = ZeroMask;
9721       continue;
9722     }
9723 
9724     // We can only use a single input of V1 or V2.
9725     SDValue SrcV = (M >= Size ? V2 : V1);
9726     if (V && V != SrcV)
9727       return SDValue();
9728     V = SrcV;
9729     M %= Size;
9730 
9731     // PSHUFB can't cross lanes, ensure this doesn't happen.
9732     if ((M / LaneSize) != ((i / NumEltBytes) / LaneSize))
9733       return SDValue();
9734 
9735     M = M % LaneSize;
9736     M = M * NumEltBytes + (i % NumEltBytes);
9737     PSHUFBMask[i] = DAG.getConstant(M, DL, MVT::i8);
9738   }
9739   assert(V && "Failed to find a source input");
9740 
9741   MVT I8VT = MVT::getVectorVT(MVT::i8, NumBytes);
9742   return DAG.getBitcast(
9743       VT, DAG.getNode(X86ISD::PSHUFB, DL, I8VT, DAG.getBitcast(I8VT, V),
9744                       DAG.getBuildVector(I8VT, DL, PSHUFBMask)));
9745 }
9746 
9747 static SDValue getMaskNode(SDValue Mask, MVT MaskVT,
9748                            const X86Subtarget &Subtarget, SelectionDAG &DAG,
9749                            const SDLoc &dl);
9750 
9751 // X86 has dedicated shuffle that can be lowered to VEXPAND
lowerShuffleToEXPAND(const SDLoc & DL,MVT VT,const APInt & Zeroable,ArrayRef<int> Mask,SDValue & V1,SDValue & V2,SelectionDAG & DAG,const X86Subtarget & Subtarget)9752 static SDValue lowerShuffleToEXPAND(const SDLoc &DL, MVT VT,
9753                                     const APInt &Zeroable,
9754                                     ArrayRef<int> Mask, SDValue &V1,
9755                                     SDValue &V2, SelectionDAG &DAG,
9756                                     const X86Subtarget &Subtarget) {
9757   bool IsLeftZeroSide = true;
9758   if (!isNonZeroElementsInOrder(Zeroable, Mask, V1.getValueType(),
9759                                 IsLeftZeroSide))
9760     return SDValue();
9761   unsigned VEXPANDMask = (~Zeroable).getZExtValue();
9762   MVT IntegerType =
9763       MVT::getIntegerVT(std::max((int)VT.getVectorNumElements(), 8));
9764   SDValue MaskNode = DAG.getConstant(VEXPANDMask, DL, IntegerType);
9765   unsigned NumElts = VT.getVectorNumElements();
9766   assert((NumElts == 4 || NumElts == 8 || NumElts == 16) &&
9767          "Unexpected number of vector elements");
9768   SDValue VMask = getMaskNode(MaskNode, MVT::getVectorVT(MVT::i1, NumElts),
9769                               Subtarget, DAG, DL);
9770   SDValue ZeroVector = getZeroVector(VT, Subtarget, DAG, DL);
9771   SDValue ExpandedVector = IsLeftZeroSide ? V2 : V1;
9772   return DAG.getNode(X86ISD::EXPAND, DL, VT, ExpandedVector, ZeroVector, VMask);
9773 }
9774 
matchShuffleWithUNPCK(MVT VT,SDValue & V1,SDValue & V2,unsigned & UnpackOpcode,bool IsUnary,ArrayRef<int> TargetMask,const SDLoc & DL,SelectionDAG & DAG,const X86Subtarget & Subtarget)9775 static bool matchShuffleWithUNPCK(MVT VT, SDValue &V1, SDValue &V2,
9776                                   unsigned &UnpackOpcode, bool IsUnary,
9777                                   ArrayRef<int> TargetMask, const SDLoc &DL,
9778                                   SelectionDAG &DAG,
9779                                   const X86Subtarget &Subtarget) {
9780   int NumElts = VT.getVectorNumElements();
9781 
9782   bool Undef1 = true, Undef2 = true, Zero1 = true, Zero2 = true;
9783   for (int i = 0; i != NumElts; i += 2) {
9784     int M1 = TargetMask[i + 0];
9785     int M2 = TargetMask[i + 1];
9786     Undef1 &= (SM_SentinelUndef == M1);
9787     Undef2 &= (SM_SentinelUndef == M2);
9788     Zero1 &= isUndefOrZero(M1);
9789     Zero2 &= isUndefOrZero(M2);
9790   }
9791   assert(!((Undef1 || Zero1) && (Undef2 || Zero2)) &&
9792          "Zeroable shuffle detected");
9793 
9794   // Attempt to match the target mask against the unpack lo/hi mask patterns.
9795   SmallVector<int, 64> Unpckl, Unpckh;
9796   createUnpackShuffleMask(VT, Unpckl, /* Lo = */ true, IsUnary);
9797   if (isTargetShuffleEquivalent(VT, TargetMask, Unpckl, DAG, V1,
9798                                 (IsUnary ? V1 : V2))) {
9799     UnpackOpcode = X86ISD::UNPCKL;
9800     V2 = (Undef2 ? DAG.getUNDEF(VT) : (IsUnary ? V1 : V2));
9801     V1 = (Undef1 ? DAG.getUNDEF(VT) : V1);
9802     return true;
9803   }
9804 
9805   createUnpackShuffleMask(VT, Unpckh, /* Lo = */ false, IsUnary);
9806   if (isTargetShuffleEquivalent(VT, TargetMask, Unpckh, DAG, V1,
9807                                 (IsUnary ? V1 : V2))) {
9808     UnpackOpcode = X86ISD::UNPCKH;
9809     V2 = (Undef2 ? DAG.getUNDEF(VT) : (IsUnary ? V1 : V2));
9810     V1 = (Undef1 ? DAG.getUNDEF(VT) : V1);
9811     return true;
9812   }
9813 
9814   // If an unary shuffle, attempt to match as an unpack lo/hi with zero.
9815   if (IsUnary && (Zero1 || Zero2)) {
9816     // Don't bother if we can blend instead.
9817     if ((Subtarget.hasSSE41() || VT == MVT::v2i64 || VT == MVT::v2f64) &&
9818         isSequentialOrUndefOrZeroInRange(TargetMask, 0, NumElts, 0))
9819       return false;
9820 
9821     bool MatchLo = true, MatchHi = true;
9822     for (int i = 0; (i != NumElts) && (MatchLo || MatchHi); ++i) {
9823       int M = TargetMask[i];
9824 
9825       // Ignore if the input is known to be zero or the index is undef.
9826       if ((((i & 1) == 0) && Zero1) || (((i & 1) == 1) && Zero2) ||
9827           (M == SM_SentinelUndef))
9828         continue;
9829 
9830       MatchLo &= (M == Unpckl[i]);
9831       MatchHi &= (M == Unpckh[i]);
9832     }
9833 
9834     if (MatchLo || MatchHi) {
9835       UnpackOpcode = MatchLo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
9836       V2 = Zero2 ? getZeroVector(VT, Subtarget, DAG, DL) : V1;
9837       V1 = Zero1 ? getZeroVector(VT, Subtarget, DAG, DL) : V1;
9838       return true;
9839     }
9840   }
9841 
9842   // If a binary shuffle, commute and try again.
9843   if (!IsUnary) {
9844     ShuffleVectorSDNode::commuteMask(Unpckl);
9845     if (isTargetShuffleEquivalent(VT, TargetMask, Unpckl, DAG)) {
9846       UnpackOpcode = X86ISD::UNPCKL;
9847       std::swap(V1, V2);
9848       return true;
9849     }
9850 
9851     ShuffleVectorSDNode::commuteMask(Unpckh);
9852     if (isTargetShuffleEquivalent(VT, TargetMask, Unpckh, DAG)) {
9853       UnpackOpcode = X86ISD::UNPCKH;
9854       std::swap(V1, V2);
9855       return true;
9856     }
9857   }
9858 
9859   return false;
9860 }
9861 
9862 // X86 has dedicated unpack instructions that can handle specific blend
9863 // operations: UNPCKH and UNPCKL.
lowerShuffleWithUNPCK(const SDLoc & DL,MVT VT,ArrayRef<int> Mask,SDValue V1,SDValue V2,SelectionDAG & DAG)9864 static SDValue lowerShuffleWithUNPCK(const SDLoc &DL, MVT VT,
9865                                      ArrayRef<int> Mask, SDValue V1, SDValue V2,
9866                                      SelectionDAG &DAG) {
9867   SmallVector<int, 8> Unpckl;
9868   createUnpackShuffleMask(VT, Unpckl, /* Lo = */ true, /* Unary = */ false);
9869   if (isShuffleEquivalent(Mask, Unpckl, V1, V2))
9870     return DAG.getNode(X86ISD::UNPCKL, DL, VT, V1, V2);
9871 
9872   SmallVector<int, 8> Unpckh;
9873   createUnpackShuffleMask(VT, Unpckh, /* Lo = */ false, /* Unary = */ false);
9874   if (isShuffleEquivalent(Mask, Unpckh, V1, V2))
9875     return DAG.getNode(X86ISD::UNPCKH, DL, VT, V1, V2);
9876 
9877   // Commute and try again.
9878   ShuffleVectorSDNode::commuteMask(Unpckl);
9879   if (isShuffleEquivalent(Mask, Unpckl, V1, V2))
9880     return DAG.getNode(X86ISD::UNPCKL, DL, VT, V2, V1);
9881 
9882   ShuffleVectorSDNode::commuteMask(Unpckh);
9883   if (isShuffleEquivalent(Mask, Unpckh, V1, V2))
9884     return DAG.getNode(X86ISD::UNPCKH, DL, VT, V2, V1);
9885 
9886   return SDValue();
9887 }
9888 
9889 /// Check if the mask can be mapped to a preliminary shuffle (vperm 64-bit)
9890 /// followed by unpack 256-bit.
lowerShuffleWithUNPCK256(const SDLoc & DL,MVT VT,ArrayRef<int> Mask,SDValue V1,SDValue V2,SelectionDAG & DAG)9891 static SDValue lowerShuffleWithUNPCK256(const SDLoc &DL, MVT VT,
9892                                         ArrayRef<int> Mask, SDValue V1,
9893                                         SDValue V2, SelectionDAG &DAG) {
9894   SmallVector<int, 32> Unpckl, Unpckh;
9895   createSplat2ShuffleMask(VT, Unpckl, /* Lo */ true);
9896   createSplat2ShuffleMask(VT, Unpckh, /* Lo */ false);
9897 
9898   unsigned UnpackOpcode;
9899   if (isShuffleEquivalent(Mask, Unpckl, V1, V2))
9900     UnpackOpcode = X86ISD::UNPCKL;
9901   else if (isShuffleEquivalent(Mask, Unpckh, V1, V2))
9902     UnpackOpcode = X86ISD::UNPCKH;
9903   else
9904     return SDValue();
9905 
9906   // This is a "natural" unpack operation (rather than the 128-bit sectored
9907   // operation implemented by AVX). We need to rearrange 64-bit chunks of the
9908   // input in order to use the x86 instruction.
9909   V1 = DAG.getVectorShuffle(MVT::v4f64, DL, DAG.getBitcast(MVT::v4f64, V1),
9910                             DAG.getUNDEF(MVT::v4f64), {0, 2, 1, 3});
9911   V1 = DAG.getBitcast(VT, V1);
9912   return DAG.getNode(UnpackOpcode, DL, VT, V1, V1);
9913 }
9914 
9915 // Check if the mask can be mapped to a TRUNCATE or VTRUNC, truncating the
9916 // source into the lower elements and zeroing the upper elements.
matchShuffleAsVTRUNC(MVT & SrcVT,MVT & DstVT,MVT VT,ArrayRef<int> Mask,const APInt & Zeroable,const X86Subtarget & Subtarget)9917 static bool matchShuffleAsVTRUNC(MVT &SrcVT, MVT &DstVT, MVT VT,
9918                                  ArrayRef<int> Mask, const APInt &Zeroable,
9919                                  const X86Subtarget &Subtarget) {
9920   if (!VT.is512BitVector() && !Subtarget.hasVLX())
9921     return false;
9922 
9923   unsigned NumElts = Mask.size();
9924   unsigned EltSizeInBits = VT.getScalarSizeInBits();
9925   unsigned MaxScale = 64 / EltSizeInBits;
9926 
9927   for (unsigned Scale = 2; Scale <= MaxScale; Scale += Scale) {
9928     unsigned SrcEltBits = EltSizeInBits * Scale;
9929     if (SrcEltBits < 32 && !Subtarget.hasBWI())
9930       continue;
9931     unsigned NumSrcElts = NumElts / Scale;
9932     if (!isSequentialOrUndefInRange(Mask, 0, NumSrcElts, 0, Scale))
9933       continue;
9934     unsigned UpperElts = NumElts - NumSrcElts;
9935     if (!Zeroable.extractBits(UpperElts, NumSrcElts).isAllOnes())
9936       continue;
9937     SrcVT = MVT::getIntegerVT(EltSizeInBits * Scale);
9938     SrcVT = MVT::getVectorVT(SrcVT, NumSrcElts);
9939     DstVT = MVT::getIntegerVT(EltSizeInBits);
9940     if ((NumSrcElts * EltSizeInBits) >= 128) {
9941       // ISD::TRUNCATE
9942       DstVT = MVT::getVectorVT(DstVT, NumSrcElts);
9943     } else {
9944       // X86ISD::VTRUNC
9945       DstVT = MVT::getVectorVT(DstVT, 128 / EltSizeInBits);
9946     }
9947     return true;
9948   }
9949 
9950   return false;
9951 }
9952 
9953 // Helper to create TRUNCATE/VTRUNC nodes, optionally with zero/undef upper
9954 // element padding to the final DstVT.
getAVX512TruncNode(const SDLoc & DL,MVT DstVT,SDValue Src,const X86Subtarget & Subtarget,SelectionDAG & DAG,bool ZeroUppers)9955 static SDValue getAVX512TruncNode(const SDLoc &DL, MVT DstVT, SDValue Src,
9956                                   const X86Subtarget &Subtarget,
9957                                   SelectionDAG &DAG, bool ZeroUppers) {
9958   MVT SrcVT = Src.getSimpleValueType();
9959   MVT DstSVT = DstVT.getScalarType();
9960   unsigned NumDstElts = DstVT.getVectorNumElements();
9961   unsigned NumSrcElts = SrcVT.getVectorNumElements();
9962   unsigned DstEltSizeInBits = DstVT.getScalarSizeInBits();
9963 
9964   if (!DAG.getTargetLoweringInfo().isTypeLegal(SrcVT))
9965     return SDValue();
9966 
9967   // Perform a direct ISD::TRUNCATE if possible.
9968   if (NumSrcElts == NumDstElts)
9969     return DAG.getNode(ISD::TRUNCATE, DL, DstVT, Src);
9970 
9971   if (NumSrcElts > NumDstElts) {
9972     MVT TruncVT = MVT::getVectorVT(DstSVT, NumSrcElts);
9973     SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, TruncVT, Src);
9974     return extractSubVector(Trunc, 0, DAG, DL, DstVT.getSizeInBits());
9975   }
9976 
9977   if ((NumSrcElts * DstEltSizeInBits) >= 128) {
9978     MVT TruncVT = MVT::getVectorVT(DstSVT, NumSrcElts);
9979     SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, TruncVT, Src);
9980     return widenSubVector(Trunc, ZeroUppers, Subtarget, DAG, DL,
9981                           DstVT.getSizeInBits());
9982   }
9983 
9984   // Non-VLX targets must truncate from a 512-bit type, so we need to
9985   // widen, truncate and then possibly extract the original subvector.
9986   if (!Subtarget.hasVLX() && !SrcVT.is512BitVector()) {
9987     SDValue NewSrc = widenSubVector(Src, ZeroUppers, Subtarget, DAG, DL, 512);
9988     return getAVX512TruncNode(DL, DstVT, NewSrc, Subtarget, DAG, ZeroUppers);
9989   }
9990 
9991   // Fallback to a X86ISD::VTRUNC, padding if necessary.
9992   MVT TruncVT = MVT::getVectorVT(DstSVT, 128 / DstEltSizeInBits);
9993   SDValue Trunc = DAG.getNode(X86ISD::VTRUNC, DL, TruncVT, Src);
9994   if (DstVT != TruncVT)
9995     Trunc = widenSubVector(Trunc, ZeroUppers, Subtarget, DAG, DL,
9996                            DstVT.getSizeInBits());
9997   return Trunc;
9998 }
9999 
10000 // Try to lower trunc+vector_shuffle to a vpmovdb or a vpmovdw instruction.
10001 //
10002 // An example is the following:
10003 //
10004 // t0: ch = EntryToken
10005 //           t2: v4i64,ch = CopyFromReg t0, Register:v4i64 %0
10006 //         t25: v4i32 = truncate t2
10007 //       t41: v8i16 = bitcast t25
10008 //       t21: v8i16 = BUILD_VECTOR undef:i16, undef:i16, undef:i16, undef:i16,
10009 //       Constant:i16<0>, Constant:i16<0>, Constant:i16<0>, Constant:i16<0>
10010 //     t51: v8i16 = vector_shuffle<0,2,4,6,12,13,14,15> t41, t21
10011 //   t18: v2i64 = bitcast t51
10012 //
10013 // One can just use a single vpmovdw instruction, without avx512vl we need to
10014 // use the zmm variant and extract the lower subvector, padding with zeroes.
10015 // TODO: Merge with lowerShuffleAsVTRUNC.
lowerShuffleWithVPMOV(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const APInt & Zeroable,const X86Subtarget & Subtarget,SelectionDAG & DAG)10016 static SDValue lowerShuffleWithVPMOV(const SDLoc &DL, MVT VT, SDValue V1,
10017                                      SDValue V2, ArrayRef<int> Mask,
10018                                      const APInt &Zeroable,
10019                                      const X86Subtarget &Subtarget,
10020                                      SelectionDAG &DAG) {
10021   assert((VT == MVT::v16i8 || VT == MVT::v8i16) && "Unexpected VTRUNC type");
10022   if (!Subtarget.hasAVX512())
10023     return SDValue();
10024 
10025   unsigned NumElts = VT.getVectorNumElements();
10026   unsigned EltSizeInBits = VT.getScalarSizeInBits();
10027   unsigned MaxScale = 64 / EltSizeInBits;
10028   for (unsigned Scale = 2; Scale <= MaxScale; Scale += Scale) {
10029     unsigned SrcEltBits = EltSizeInBits * Scale;
10030     unsigned NumSrcElts = NumElts / Scale;
10031     unsigned UpperElts = NumElts - NumSrcElts;
10032     if (!isSequentialOrUndefInRange(Mask, 0, NumSrcElts, 0, Scale) ||
10033         !Zeroable.extractBits(UpperElts, NumSrcElts).isAllOnes())
10034       continue;
10035 
10036     // Attempt to find a matching source truncation, but as a fall back VLX
10037     // cases can use the VPMOV directly.
10038     SDValue Src = peekThroughBitcasts(V1);
10039     if (Src.getOpcode() == ISD::TRUNCATE &&
10040         Src.getScalarValueSizeInBits() == SrcEltBits) {
10041       Src = Src.getOperand(0);
10042     } else if (Subtarget.hasVLX()) {
10043       MVT SrcSVT = MVT::getIntegerVT(SrcEltBits);
10044       MVT SrcVT = MVT::getVectorVT(SrcSVT, NumSrcElts);
10045       Src = DAG.getBitcast(SrcVT, Src);
10046       // Don't do this if PACKSS/PACKUS could perform it cheaper.
10047       if (Scale == 2 &&
10048           ((DAG.ComputeNumSignBits(Src) > EltSizeInBits) ||
10049            (DAG.computeKnownBits(Src).countMinLeadingZeros() >= EltSizeInBits)))
10050         return SDValue();
10051     } else
10052       return SDValue();
10053 
10054     // VPMOVWB is only available with avx512bw.
10055     if (!Subtarget.hasBWI() && Src.getScalarValueSizeInBits() < 32)
10056       return SDValue();
10057 
10058     bool UndefUppers = isUndefInRange(Mask, NumSrcElts, UpperElts);
10059     return getAVX512TruncNode(DL, VT, Src, Subtarget, DAG, !UndefUppers);
10060   }
10061 
10062   return SDValue();
10063 }
10064 
10065 // Attempt to match binary shuffle patterns as a truncate.
lowerShuffleAsVTRUNC(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const APInt & Zeroable,const X86Subtarget & Subtarget,SelectionDAG & DAG)10066 static SDValue lowerShuffleAsVTRUNC(const SDLoc &DL, MVT VT, SDValue V1,
10067                                     SDValue V2, ArrayRef<int> Mask,
10068                                     const APInt &Zeroable,
10069                                     const X86Subtarget &Subtarget,
10070                                     SelectionDAG &DAG) {
10071   assert((VT.is128BitVector() || VT.is256BitVector()) &&
10072          "Unexpected VTRUNC type");
10073   if (!Subtarget.hasAVX512())
10074     return SDValue();
10075 
10076   unsigned NumElts = VT.getVectorNumElements();
10077   unsigned EltSizeInBits = VT.getScalarSizeInBits();
10078   unsigned MaxScale = 64 / EltSizeInBits;
10079   for (unsigned Scale = 2; Scale <= MaxScale; Scale += Scale) {
10080     // TODO: Support non-BWI VPMOVWB truncations?
10081     unsigned SrcEltBits = EltSizeInBits * Scale;
10082     if (SrcEltBits < 32 && !Subtarget.hasBWI())
10083       continue;
10084 
10085     // Match shuffle <Ofs,Ofs+Scale,Ofs+2*Scale,..,undef_or_zero,undef_or_zero>
10086     // Bail if the V2 elements are undef.
10087     unsigned NumHalfSrcElts = NumElts / Scale;
10088     unsigned NumSrcElts = 2 * NumHalfSrcElts;
10089     for (unsigned Offset = 0; Offset != Scale; ++Offset) {
10090       if (!isSequentialOrUndefInRange(Mask, 0, NumSrcElts, Offset, Scale) ||
10091           isUndefInRange(Mask, NumHalfSrcElts, NumHalfSrcElts))
10092         continue;
10093 
10094       // The elements beyond the truncation must be undef/zero.
10095       unsigned UpperElts = NumElts - NumSrcElts;
10096       if (UpperElts > 0 &&
10097           !Zeroable.extractBits(UpperElts, NumSrcElts).isAllOnes())
10098         continue;
10099       bool UndefUppers =
10100           UpperElts > 0 && isUndefInRange(Mask, NumSrcElts, UpperElts);
10101 
10102       // For offset truncations, ensure that the concat is cheap.
10103       if (Offset) {
10104         auto IsCheapConcat = [&](SDValue Lo, SDValue Hi) {
10105           if (Lo.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
10106               Hi.getOpcode() == ISD::EXTRACT_SUBVECTOR)
10107             return Lo.getOperand(0) == Hi.getOperand(0);
10108           if (ISD::isNormalLoad(Lo.getNode()) &&
10109               ISD::isNormalLoad(Hi.getNode())) {
10110             auto *LDLo = cast<LoadSDNode>(Lo);
10111             auto *LDHi = cast<LoadSDNode>(Hi);
10112             return DAG.areNonVolatileConsecutiveLoads(
10113                 LDHi, LDLo, Lo.getValueType().getStoreSize(), 1);
10114           }
10115           return false;
10116         };
10117         if (!IsCheapConcat(V1, V2))
10118           continue;
10119       }
10120 
10121       // As we're using both sources then we need to concat them together
10122       // and truncate from the double-sized src.
10123       MVT ConcatVT = MVT::getVectorVT(VT.getScalarType(), NumElts * 2);
10124       SDValue Src = DAG.getNode(ISD::CONCAT_VECTORS, DL, ConcatVT, V1, V2);
10125 
10126       MVT SrcSVT = MVT::getIntegerVT(SrcEltBits);
10127       MVT SrcVT = MVT::getVectorVT(SrcSVT, NumSrcElts);
10128       Src = DAG.getBitcast(SrcVT, Src);
10129 
10130       // Shift the offset'd elements into place for the truncation.
10131       // TODO: Use getTargetVShiftByConstNode.
10132       if (Offset)
10133         Src = DAG.getNode(
10134             X86ISD::VSRLI, DL, SrcVT, Src,
10135             DAG.getTargetConstant(Offset * EltSizeInBits, DL, MVT::i8));
10136 
10137       return getAVX512TruncNode(DL, VT, Src, Subtarget, DAG, !UndefUppers);
10138     }
10139   }
10140 
10141   return SDValue();
10142 }
10143 
10144 /// Check whether a compaction lowering can be done by dropping even/odd
10145 /// elements and compute how many times even/odd elements must be dropped.
10146 ///
10147 /// This handles shuffles which take every Nth element where N is a power of
10148 /// two. Example shuffle masks:
10149 ///
10150 /// (even)
10151 ///  N = 1:  0,  2,  4,  6,  8, 10, 12, 14,  0,  2,  4,  6,  8, 10, 12, 14
10152 ///  N = 1:  0,  2,  4,  6,  8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
10153 ///  N = 2:  0,  4,  8, 12,  0,  4,  8, 12,  0,  4,  8, 12,  0,  4,  8, 12
10154 ///  N = 2:  0,  4,  8, 12, 16, 20, 24, 28,  0,  4,  8, 12, 16, 20, 24, 28
10155 ///  N = 3:  0,  8,  0,  8,  0,  8,  0,  8,  0,  8,  0,  8,  0,  8,  0,  8
10156 ///  N = 3:  0,  8, 16, 24,  0,  8, 16, 24,  0,  8, 16, 24,  0,  8, 16, 24
10157 ///
10158 /// (odd)
10159 ///  N = 1:  1,  3,  5,  7,  9, 11, 13, 15,  0,  2,  4,  6,  8, 10, 12, 14
10160 ///  N = 1:  1,  3,  5,  7,  9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31
10161 ///
10162 /// Any of these lanes can of course be undef.
10163 ///
10164 /// This routine only supports N <= 3.
10165 /// FIXME: Evaluate whether either AVX or AVX-512 have any opportunities here
10166 /// for larger N.
10167 ///
10168 /// \returns N above, or the number of times even/odd elements must be dropped
10169 /// if there is such a number. Otherwise returns zero.
canLowerByDroppingElements(ArrayRef<int> Mask,bool MatchEven,bool IsSingleInput)10170 static int canLowerByDroppingElements(ArrayRef<int> Mask, bool MatchEven,
10171                                       bool IsSingleInput) {
10172   // The modulus for the shuffle vector entries is based on whether this is
10173   // a single input or not.
10174   int ShuffleModulus = Mask.size() * (IsSingleInput ? 1 : 2);
10175   assert(isPowerOf2_32((uint32_t)ShuffleModulus) &&
10176          "We should only be called with masks with a power-of-2 size!");
10177 
10178   uint64_t ModMask = (uint64_t)ShuffleModulus - 1;
10179   int Offset = MatchEven ? 0 : 1;
10180 
10181   // We track whether the input is viable for all power-of-2 strides 2^1, 2^2,
10182   // and 2^3 simultaneously. This is because we may have ambiguity with
10183   // partially undef inputs.
10184   bool ViableForN[3] = {true, true, true};
10185 
10186   for (int i = 0, e = Mask.size(); i < e; ++i) {
10187     // Ignore undef lanes, we'll optimistically collapse them to the pattern we
10188     // want.
10189     if (Mask[i] < 0)
10190       continue;
10191 
10192     bool IsAnyViable = false;
10193     for (unsigned j = 0; j != std::size(ViableForN); ++j)
10194       if (ViableForN[j]) {
10195         uint64_t N = j + 1;
10196 
10197         // The shuffle mask must be equal to (i * 2^N) % M.
10198         if ((uint64_t)(Mask[i] - Offset) == (((uint64_t)i << N) & ModMask))
10199           IsAnyViable = true;
10200         else
10201           ViableForN[j] = false;
10202       }
10203     // Early exit if we exhaust the possible powers of two.
10204     if (!IsAnyViable)
10205       break;
10206   }
10207 
10208   for (unsigned j = 0; j != std::size(ViableForN); ++j)
10209     if (ViableForN[j])
10210       return j + 1;
10211 
10212   // Return 0 as there is no viable power of two.
10213   return 0;
10214 }
10215 
10216 // X86 has dedicated pack instructions that can handle specific truncation
10217 // operations: PACKSS and PACKUS.
10218 // Checks for compaction shuffle masks if MaxStages > 1.
10219 // TODO: Add support for matching multiple PACKSS/PACKUS stages.
matchShuffleWithPACK(MVT VT,MVT & SrcVT,SDValue & V1,SDValue & V2,unsigned & PackOpcode,ArrayRef<int> TargetMask,const SelectionDAG & DAG,const X86Subtarget & Subtarget,unsigned MaxStages=1)10220 static bool matchShuffleWithPACK(MVT VT, MVT &SrcVT, SDValue &V1, SDValue &V2,
10221                                  unsigned &PackOpcode, ArrayRef<int> TargetMask,
10222                                  const SelectionDAG &DAG,
10223                                  const X86Subtarget &Subtarget,
10224                                  unsigned MaxStages = 1) {
10225   unsigned NumElts = VT.getVectorNumElements();
10226   unsigned BitSize = VT.getScalarSizeInBits();
10227   assert(0 < MaxStages && MaxStages <= 3 && (BitSize << MaxStages) <= 64 &&
10228          "Illegal maximum compaction");
10229 
10230   auto MatchPACK = [&](SDValue N1, SDValue N2, MVT PackVT) {
10231     unsigned NumSrcBits = PackVT.getScalarSizeInBits();
10232     unsigned NumPackedBits = NumSrcBits - BitSize;
10233     N1 = peekThroughBitcasts(N1);
10234     N2 = peekThroughBitcasts(N2);
10235     unsigned NumBits1 = N1.getScalarValueSizeInBits();
10236     unsigned NumBits2 = N2.getScalarValueSizeInBits();
10237     bool IsZero1 = llvm::isNullOrNullSplat(N1, /*AllowUndefs*/ false);
10238     bool IsZero2 = llvm::isNullOrNullSplat(N2, /*AllowUndefs*/ false);
10239     if ((!N1.isUndef() && !IsZero1 && NumBits1 != NumSrcBits) ||
10240         (!N2.isUndef() && !IsZero2 && NumBits2 != NumSrcBits))
10241       return false;
10242     if (Subtarget.hasSSE41() || BitSize == 8) {
10243       APInt ZeroMask = APInt::getHighBitsSet(NumSrcBits, NumPackedBits);
10244       if ((N1.isUndef() || IsZero1 || DAG.MaskedValueIsZero(N1, ZeroMask)) &&
10245           (N2.isUndef() || IsZero2 || DAG.MaskedValueIsZero(N2, ZeroMask))) {
10246         V1 = N1;
10247         V2 = N2;
10248         SrcVT = PackVT;
10249         PackOpcode = X86ISD::PACKUS;
10250         return true;
10251       }
10252     }
10253     bool IsAllOnes1 = llvm::isAllOnesOrAllOnesSplat(N1, /*AllowUndefs*/ false);
10254     bool IsAllOnes2 = llvm::isAllOnesOrAllOnesSplat(N2, /*AllowUndefs*/ false);
10255     if ((N1.isUndef() || IsZero1 || IsAllOnes1 ||
10256          DAG.ComputeNumSignBits(N1) > NumPackedBits) &&
10257         (N2.isUndef() || IsZero2 || IsAllOnes2 ||
10258          DAG.ComputeNumSignBits(N2) > NumPackedBits)) {
10259       V1 = N1;
10260       V2 = N2;
10261       SrcVT = PackVT;
10262       PackOpcode = X86ISD::PACKSS;
10263       return true;
10264     }
10265     return false;
10266   };
10267 
10268   // Attempt to match against wider and wider compaction patterns.
10269   for (unsigned NumStages = 1; NumStages <= MaxStages; ++NumStages) {
10270     MVT PackSVT = MVT::getIntegerVT(BitSize << NumStages);
10271     MVT PackVT = MVT::getVectorVT(PackSVT, NumElts >> NumStages);
10272 
10273     // Try binary shuffle.
10274     SmallVector<int, 32> BinaryMask;
10275     createPackShuffleMask(VT, BinaryMask, false, NumStages);
10276     if (isTargetShuffleEquivalent(VT, TargetMask, BinaryMask, DAG, V1, V2))
10277       if (MatchPACK(V1, V2, PackVT))
10278         return true;
10279 
10280     // Try unary shuffle.
10281     SmallVector<int, 32> UnaryMask;
10282     createPackShuffleMask(VT, UnaryMask, true, NumStages);
10283     if (isTargetShuffleEquivalent(VT, TargetMask, UnaryMask, DAG, V1))
10284       if (MatchPACK(V1, V1, PackVT))
10285         return true;
10286   }
10287 
10288   return false;
10289 }
10290 
lowerShuffleWithPACK(const SDLoc & DL,MVT VT,ArrayRef<int> Mask,SDValue V1,SDValue V2,SelectionDAG & DAG,const X86Subtarget & Subtarget)10291 static SDValue lowerShuffleWithPACK(const SDLoc &DL, MVT VT, ArrayRef<int> Mask,
10292                                     SDValue V1, SDValue V2, SelectionDAG &DAG,
10293                                     const X86Subtarget &Subtarget) {
10294   MVT PackVT;
10295   unsigned PackOpcode;
10296   unsigned SizeBits = VT.getSizeInBits();
10297   unsigned EltBits = VT.getScalarSizeInBits();
10298   unsigned MaxStages = Log2_32(64 / EltBits);
10299   if (!matchShuffleWithPACK(VT, PackVT, V1, V2, PackOpcode, Mask, DAG,
10300                             Subtarget, MaxStages))
10301     return SDValue();
10302 
10303   unsigned CurrentEltBits = PackVT.getScalarSizeInBits();
10304   unsigned NumStages = Log2_32(CurrentEltBits / EltBits);
10305 
10306   // Don't lower multi-stage packs on AVX512, truncation is better.
10307   if (NumStages != 1 && SizeBits == 128 && Subtarget.hasVLX())
10308     return SDValue();
10309 
10310   // Pack to the largest type possible:
10311   // vXi64/vXi32 -> PACK*SDW and vXi16 -> PACK*SWB.
10312   unsigned MaxPackBits = 16;
10313   if (CurrentEltBits > 16 &&
10314       (PackOpcode == X86ISD::PACKSS || Subtarget.hasSSE41()))
10315     MaxPackBits = 32;
10316 
10317   // Repeatedly pack down to the target size.
10318   SDValue Res;
10319   for (unsigned i = 0; i != NumStages; ++i) {
10320     unsigned SrcEltBits = std::min(MaxPackBits, CurrentEltBits);
10321     unsigned NumSrcElts = SizeBits / SrcEltBits;
10322     MVT SrcSVT = MVT::getIntegerVT(SrcEltBits);
10323     MVT DstSVT = MVT::getIntegerVT(SrcEltBits / 2);
10324     MVT SrcVT = MVT::getVectorVT(SrcSVT, NumSrcElts);
10325     MVT DstVT = MVT::getVectorVT(DstSVT, NumSrcElts * 2);
10326     Res = DAG.getNode(PackOpcode, DL, DstVT, DAG.getBitcast(SrcVT, V1),
10327                       DAG.getBitcast(SrcVT, V2));
10328     V1 = V2 = Res;
10329     CurrentEltBits /= 2;
10330   }
10331   assert(Res && Res.getValueType() == VT &&
10332          "Failed to lower compaction shuffle");
10333   return Res;
10334 }
10335 
10336 /// Try to emit a bitmask instruction for a shuffle.
10337 ///
10338 /// This handles cases where we can model a blend exactly as a bitmask due to
10339 /// one of the inputs being zeroable.
lowerShuffleAsBitMask(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const APInt & Zeroable,const X86Subtarget & Subtarget,SelectionDAG & DAG)10340 static SDValue lowerShuffleAsBitMask(const SDLoc &DL, MVT VT, SDValue V1,
10341                                      SDValue V2, ArrayRef<int> Mask,
10342                                      const APInt &Zeroable,
10343                                      const X86Subtarget &Subtarget,
10344                                      SelectionDAG &DAG) {
10345   MVT MaskVT = VT;
10346   MVT EltVT = VT.getVectorElementType();
10347   SDValue Zero, AllOnes;
10348   // Use f64 if i64 isn't legal.
10349   if (EltVT == MVT::i64 && !Subtarget.is64Bit()) {
10350     EltVT = MVT::f64;
10351     MaskVT = MVT::getVectorVT(EltVT, Mask.size());
10352   }
10353 
10354   MVT LogicVT = VT;
10355   if (EltVT == MVT::f32 || EltVT == MVT::f64) {
10356     Zero = DAG.getConstantFP(0.0, DL, EltVT);
10357     APFloat AllOnesValue =
10358         APFloat::getAllOnesValue(SelectionDAG::EVTToAPFloatSemantics(EltVT));
10359     AllOnes = DAG.getConstantFP(AllOnesValue, DL, EltVT);
10360     LogicVT =
10361         MVT::getVectorVT(EltVT == MVT::f64 ? MVT::i64 : MVT::i32, Mask.size());
10362   } else {
10363     Zero = DAG.getConstant(0, DL, EltVT);
10364     AllOnes = DAG.getAllOnesConstant(DL, EltVT);
10365   }
10366 
10367   SmallVector<SDValue, 16> VMaskOps(Mask.size(), Zero);
10368   SDValue V;
10369   for (int i = 0, Size = Mask.size(); i < Size; ++i) {
10370     if (Zeroable[i])
10371       continue;
10372     if (Mask[i] % Size != i)
10373       return SDValue(); // Not a blend.
10374     if (!V)
10375       V = Mask[i] < Size ? V1 : V2;
10376     else if (V != (Mask[i] < Size ? V1 : V2))
10377       return SDValue(); // Can only let one input through the mask.
10378 
10379     VMaskOps[i] = AllOnes;
10380   }
10381   if (!V)
10382     return SDValue(); // No non-zeroable elements!
10383 
10384   SDValue VMask = DAG.getBuildVector(MaskVT, DL, VMaskOps);
10385   VMask = DAG.getBitcast(LogicVT, VMask);
10386   V = DAG.getBitcast(LogicVT, V);
10387   SDValue And = DAG.getNode(ISD::AND, DL, LogicVT, V, VMask);
10388   return DAG.getBitcast(VT, And);
10389 }
10390 
10391 /// Try to emit a blend instruction for a shuffle using bit math.
10392 ///
10393 /// This is used as a fallback approach when first class blend instructions are
10394 /// unavailable. Currently it is only suitable for integer vectors, but could
10395 /// be generalized for floating point vectors if desirable.
lowerShuffleAsBitBlend(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,SelectionDAG & DAG)10396 static SDValue lowerShuffleAsBitBlend(const SDLoc &DL, MVT VT, SDValue V1,
10397                                       SDValue V2, ArrayRef<int> Mask,
10398                                       SelectionDAG &DAG) {
10399   assert(VT.isInteger() && "Only supports integer vector types!");
10400   MVT EltVT = VT.getVectorElementType();
10401   SDValue Zero = DAG.getConstant(0, DL, EltVT);
10402   SDValue AllOnes = DAG.getAllOnesConstant(DL, EltVT);
10403   SmallVector<SDValue, 16> MaskOps;
10404   for (int i = 0, Size = Mask.size(); i < Size; ++i) {
10405     if (Mask[i] >= 0 && Mask[i] != i && Mask[i] != i + Size)
10406       return SDValue(); // Shuffled input!
10407     MaskOps.push_back(Mask[i] < Size ? AllOnes : Zero);
10408   }
10409 
10410   SDValue V1Mask = DAG.getBuildVector(VT, DL, MaskOps);
10411   return getBitSelect(DL, VT, V1, V2, V1Mask, DAG);
10412 }
10413 
10414 static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
10415                                     SDValue PreservedSrc,
10416                                     const X86Subtarget &Subtarget,
10417                                     SelectionDAG &DAG);
10418 
matchShuffleAsBlend(MVT VT,SDValue V1,SDValue V2,MutableArrayRef<int> Mask,const APInt & Zeroable,bool & ForceV1Zero,bool & ForceV2Zero,uint64_t & BlendMask)10419 static bool matchShuffleAsBlend(MVT VT, SDValue V1, SDValue V2,
10420                                 MutableArrayRef<int> Mask,
10421                                 const APInt &Zeroable, bool &ForceV1Zero,
10422                                 bool &ForceV2Zero, uint64_t &BlendMask) {
10423   bool V1IsZeroOrUndef =
10424       V1.isUndef() || ISD::isBuildVectorAllZeros(V1.getNode());
10425   bool V2IsZeroOrUndef =
10426       V2.isUndef() || ISD::isBuildVectorAllZeros(V2.getNode());
10427 
10428   BlendMask = 0;
10429   ForceV1Zero = false, ForceV2Zero = false;
10430   assert(Mask.size() <= 64 && "Shuffle mask too big for blend mask");
10431 
10432   int NumElts = Mask.size();
10433   int NumLanes = VT.getSizeInBits() / 128;
10434   int NumEltsPerLane = NumElts / NumLanes;
10435   assert((NumLanes * NumEltsPerLane) == NumElts && "Value type mismatch");
10436 
10437   // For 32/64-bit elements, if we only reference one input (plus any undefs),
10438   // then ensure the blend mask part for that lane just references that input.
10439   bool ForceWholeLaneMasks =
10440       VT.is256BitVector() && VT.getScalarSizeInBits() >= 32;
10441 
10442   // Attempt to generate the binary blend mask. If an input is zero then
10443   // we can use any lane.
10444   for (int Lane = 0; Lane != NumLanes; ++Lane) {
10445     // Keep track of the inputs used per lane.
10446     bool LaneV1InUse = false;
10447     bool LaneV2InUse = false;
10448     uint64_t LaneBlendMask = 0;
10449     for (int LaneElt = 0; LaneElt != NumEltsPerLane; ++LaneElt) {
10450       int Elt = (Lane * NumEltsPerLane) + LaneElt;
10451       int M = Mask[Elt];
10452       if (M == SM_SentinelUndef)
10453         continue;
10454       if (M == Elt || (0 <= M && M < NumElts &&
10455                      IsElementEquivalent(NumElts, V1, V1, M, Elt))) {
10456         Mask[Elt] = Elt;
10457         LaneV1InUse = true;
10458         continue;
10459       }
10460       if (M == (Elt + NumElts) ||
10461           (NumElts <= M &&
10462            IsElementEquivalent(NumElts, V2, V2, M - NumElts, Elt))) {
10463         LaneBlendMask |= 1ull << LaneElt;
10464         Mask[Elt] = Elt + NumElts;
10465         LaneV2InUse = true;
10466         continue;
10467       }
10468       if (Zeroable[Elt]) {
10469         if (V1IsZeroOrUndef) {
10470           ForceV1Zero = true;
10471           Mask[Elt] = Elt;
10472           LaneV1InUse = true;
10473           continue;
10474         }
10475         if (V2IsZeroOrUndef) {
10476           ForceV2Zero = true;
10477           LaneBlendMask |= 1ull << LaneElt;
10478           Mask[Elt] = Elt + NumElts;
10479           LaneV2InUse = true;
10480           continue;
10481         }
10482       }
10483       return false;
10484     }
10485 
10486     // If we only used V2 then splat the lane blend mask to avoid any demanded
10487     // elts from V1 in this lane (the V1 equivalent is implicit with a zero
10488     // blend mask bit).
10489     if (ForceWholeLaneMasks && LaneV2InUse && !LaneV1InUse)
10490       LaneBlendMask = (1ull << NumEltsPerLane) - 1;
10491 
10492     BlendMask |= LaneBlendMask << (Lane * NumEltsPerLane);
10493   }
10494   return true;
10495 }
10496 
scaleVectorShuffleBlendMask(uint64_t BlendMask,int Size,int Scale)10497 static uint64_t scaleVectorShuffleBlendMask(uint64_t BlendMask, int Size,
10498                                             int Scale) {
10499   uint64_t ScaledMask = 0;
10500   for (int i = 0; i != Size; ++i)
10501     if (BlendMask & (1ull << i))
10502       ScaledMask |= ((1ull << Scale) - 1) << (i * Scale);
10503   return ScaledMask;
10504 }
10505 
10506 /// Try to emit a blend instruction for a shuffle.
10507 ///
10508 /// This doesn't do any checks for the availability of instructions for blending
10509 /// these values. It relies on the availability of the X86ISD::BLENDI pattern to
10510 /// be matched in the backend with the type given. What it does check for is
10511 /// that the shuffle mask is a blend, or convertible into a blend with zero.
lowerShuffleAsBlend(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Original,const APInt & Zeroable,const X86Subtarget & Subtarget,SelectionDAG & DAG)10512 static SDValue lowerShuffleAsBlend(const SDLoc &DL, MVT VT, SDValue V1,
10513                                    SDValue V2, ArrayRef<int> Original,
10514                                    const APInt &Zeroable,
10515                                    const X86Subtarget &Subtarget,
10516                                    SelectionDAG &DAG) {
10517   uint64_t BlendMask = 0;
10518   bool ForceV1Zero = false, ForceV2Zero = false;
10519   SmallVector<int, 64> Mask(Original);
10520   if (!matchShuffleAsBlend(VT, V1, V2, Mask, Zeroable, ForceV1Zero, ForceV2Zero,
10521                            BlendMask))
10522     return SDValue();
10523 
10524   // Create a REAL zero vector - ISD::isBuildVectorAllZeros allows UNDEFs.
10525   if (ForceV1Zero)
10526     V1 = getZeroVector(VT, Subtarget, DAG, DL);
10527   if (ForceV2Zero)
10528     V2 = getZeroVector(VT, Subtarget, DAG, DL);
10529 
10530   unsigned NumElts = VT.getVectorNumElements();
10531 
10532   switch (VT.SimpleTy) {
10533   case MVT::v4i64:
10534   case MVT::v8i32:
10535     assert(Subtarget.hasAVX2() && "256-bit integer blends require AVX2!");
10536     [[fallthrough]];
10537   case MVT::v4f64:
10538   case MVT::v8f32:
10539     assert(Subtarget.hasAVX() && "256-bit float blends require AVX!");
10540     [[fallthrough]];
10541   case MVT::v2f64:
10542   case MVT::v2i64:
10543   case MVT::v4f32:
10544   case MVT::v4i32:
10545   case MVT::v8i16:
10546     assert(Subtarget.hasSSE41() && "128-bit blends require SSE41!");
10547     return DAG.getNode(X86ISD::BLENDI, DL, VT, V1, V2,
10548                        DAG.getTargetConstant(BlendMask, DL, MVT::i8));
10549   case MVT::v16i16: {
10550     assert(Subtarget.hasAVX2() && "v16i16 blends require AVX2!");
10551     SmallVector<int, 8> RepeatedMask;
10552     if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) {
10553       // We can lower these with PBLENDW which is mirrored across 128-bit lanes.
10554       assert(RepeatedMask.size() == 8 && "Repeated mask size doesn't match!");
10555       BlendMask = 0;
10556       for (int i = 0; i < 8; ++i)
10557         if (RepeatedMask[i] >= 8)
10558           BlendMask |= 1ull << i;
10559       return DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
10560                          DAG.getTargetConstant(BlendMask, DL, MVT::i8));
10561     }
10562     // Use PBLENDW for lower/upper lanes and then blend lanes.
10563     // TODO - we should allow 2 PBLENDW here and leave shuffle combine to
10564     // merge to VSELECT where useful.
10565     uint64_t LoMask = BlendMask & 0xFF;
10566     uint64_t HiMask = (BlendMask >> 8) & 0xFF;
10567     if (LoMask == 0 || LoMask == 255 || HiMask == 0 || HiMask == 255) {
10568       SDValue Lo = DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
10569                                DAG.getTargetConstant(LoMask, DL, MVT::i8));
10570       SDValue Hi = DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
10571                                DAG.getTargetConstant(HiMask, DL, MVT::i8));
10572       return DAG.getVectorShuffle(
10573           MVT::v16i16, DL, Lo, Hi,
10574           {0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31});
10575     }
10576     [[fallthrough]];
10577   }
10578   case MVT::v32i8:
10579     assert(Subtarget.hasAVX2() && "256-bit byte-blends require AVX2!");
10580     [[fallthrough]];
10581   case MVT::v16i8: {
10582     assert(Subtarget.hasSSE41() && "128-bit byte-blends require SSE41!");
10583 
10584     // Attempt to lower to a bitmask if we can. VPAND is faster than VPBLENDVB.
10585     if (SDValue Masked = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable,
10586                                                Subtarget, DAG))
10587       return Masked;
10588 
10589     if (Subtarget.hasBWI() && Subtarget.hasVLX()) {
10590       MVT IntegerType = MVT::getIntegerVT(std::max<unsigned>(NumElts, 8));
10591       SDValue MaskNode = DAG.getConstant(BlendMask, DL, IntegerType);
10592       return getVectorMaskingNode(V2, MaskNode, V1, Subtarget, DAG);
10593     }
10594 
10595     // If we have VPTERNLOG, we can use that as a bit blend.
10596     if (Subtarget.hasVLX())
10597       if (SDValue BitBlend =
10598               lowerShuffleAsBitBlend(DL, VT, V1, V2, Mask, DAG))
10599         return BitBlend;
10600 
10601     // Scale the blend by the number of bytes per element.
10602     int Scale = VT.getScalarSizeInBits() / 8;
10603 
10604     // This form of blend is always done on bytes. Compute the byte vector
10605     // type.
10606     MVT BlendVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
10607 
10608     // x86 allows load folding with blendvb from the 2nd source operand. But
10609     // we are still using LLVM select here (see comment below), so that's V1.
10610     // If V2 can be load-folded and V1 cannot be load-folded, then commute to
10611     // allow that load-folding possibility.
10612     if (!ISD::isNormalLoad(V1.getNode()) && ISD::isNormalLoad(V2.getNode())) {
10613       ShuffleVectorSDNode::commuteMask(Mask);
10614       std::swap(V1, V2);
10615     }
10616 
10617     // Compute the VSELECT mask. Note that VSELECT is really confusing in the
10618     // mix of LLVM's code generator and the x86 backend. We tell the code
10619     // generator that boolean values in the elements of an x86 vector register
10620     // are -1 for true and 0 for false. We then use the LLVM semantics of 'true'
10621     // mapping a select to operand #1, and 'false' mapping to operand #2. The
10622     // reality in x86 is that vector masks (pre-AVX-512) use only the high bit
10623     // of the element (the remaining are ignored) and 0 in that high bit would
10624     // mean operand #1 while 1 in the high bit would mean operand #2. So while
10625     // the LLVM model for boolean values in vector elements gets the relevant
10626     // bit set, it is set backwards and over constrained relative to x86's
10627     // actual model.
10628     SmallVector<SDValue, 32> VSELECTMask;
10629     for (int i = 0, Size = Mask.size(); i < Size; ++i)
10630       for (int j = 0; j < Scale; ++j)
10631         VSELECTMask.push_back(
10632             Mask[i] < 0 ? DAG.getUNDEF(MVT::i8)
10633                         : DAG.getConstant(Mask[i] < Size ? -1 : 0, DL,
10634                                           MVT::i8));
10635 
10636     V1 = DAG.getBitcast(BlendVT, V1);
10637     V2 = DAG.getBitcast(BlendVT, V2);
10638     return DAG.getBitcast(
10639         VT,
10640         DAG.getSelect(DL, BlendVT, DAG.getBuildVector(BlendVT, DL, VSELECTMask),
10641                       V1, V2));
10642   }
10643   case MVT::v16f32:
10644   case MVT::v8f64:
10645   case MVT::v8i64:
10646   case MVT::v16i32:
10647   case MVT::v32i16:
10648   case MVT::v64i8: {
10649     // Attempt to lower to a bitmask if we can. Only if not optimizing for size.
10650     bool OptForSize = DAG.shouldOptForSize();
10651     if (!OptForSize) {
10652       if (SDValue Masked = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable,
10653                                                  Subtarget, DAG))
10654         return Masked;
10655     }
10656 
10657     // Otherwise load an immediate into a GPR, cast to k-register, and use a
10658     // masked move.
10659     MVT IntegerType = MVT::getIntegerVT(std::max<unsigned>(NumElts, 8));
10660     SDValue MaskNode = DAG.getConstant(BlendMask, DL, IntegerType);
10661     return getVectorMaskingNode(V2, MaskNode, V1, Subtarget, DAG);
10662   }
10663   default:
10664     llvm_unreachable("Not a supported integer vector type!");
10665   }
10666 }
10667 
10668 /// Try to lower as a blend of elements from two inputs followed by
10669 /// a single-input permutation.
10670 ///
10671 /// This matches the pattern where we can blend elements from two inputs and
10672 /// then reduce the shuffle to a single-input permutation.
lowerShuffleAsBlendAndPermute(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,SelectionDAG & DAG,bool ImmBlends=false)10673 static SDValue lowerShuffleAsBlendAndPermute(const SDLoc &DL, MVT VT,
10674                                              SDValue V1, SDValue V2,
10675                                              ArrayRef<int> Mask,
10676                                              SelectionDAG &DAG,
10677                                              bool ImmBlends = false) {
10678   // We build up the blend mask while checking whether a blend is a viable way
10679   // to reduce the shuffle.
10680   SmallVector<int, 32> BlendMask(Mask.size(), -1);
10681   SmallVector<int, 32> PermuteMask(Mask.size(), -1);
10682 
10683   for (int i = 0, Size = Mask.size(); i < Size; ++i) {
10684     if (Mask[i] < 0)
10685       continue;
10686 
10687     assert(Mask[i] < Size * 2 && "Shuffle input is out of bounds.");
10688 
10689     if (BlendMask[Mask[i] % Size] < 0)
10690       BlendMask[Mask[i] % Size] = Mask[i];
10691     else if (BlendMask[Mask[i] % Size] != Mask[i])
10692       return SDValue(); // Can't blend in the needed input!
10693 
10694     PermuteMask[i] = Mask[i] % Size;
10695   }
10696 
10697   // If only immediate blends, then bail if the blend mask can't be widened to
10698   // i16.
10699   unsigned EltSize = VT.getScalarSizeInBits();
10700   if (ImmBlends && EltSize == 8 && !canWidenShuffleElements(BlendMask))
10701     return SDValue();
10702 
10703   SDValue V = DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
10704   return DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), PermuteMask);
10705 }
10706 
10707 /// Try to lower as an unpack of elements from two inputs followed by
10708 /// a single-input permutation.
10709 ///
10710 /// This matches the pattern where we can unpack elements from two inputs and
10711 /// then reduce the shuffle to a single-input (wider) permutation.
lowerShuffleAsUNPCKAndPermute(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,SelectionDAG & DAG)10712 static SDValue lowerShuffleAsUNPCKAndPermute(const SDLoc &DL, MVT VT,
10713                                              SDValue V1, SDValue V2,
10714                                              ArrayRef<int> Mask,
10715                                              SelectionDAG &DAG) {
10716   int NumElts = Mask.size();
10717   int NumLanes = VT.getSizeInBits() / 128;
10718   int NumLaneElts = NumElts / NumLanes;
10719   int NumHalfLaneElts = NumLaneElts / 2;
10720 
10721   bool MatchLo = true, MatchHi = true;
10722   SDValue Ops[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT)};
10723 
10724   // Determine UNPCKL/UNPCKH type and operand order.
10725   for (int Elt = 0; Elt != NumElts; ++Elt) {
10726     int M = Mask[Elt];
10727     if (M < 0)
10728       continue;
10729 
10730     // Normalize the mask value depending on whether it's V1 or V2.
10731     int NormM = M;
10732     SDValue &Op = Ops[Elt & 1];
10733     if (M < NumElts && (Op.isUndef() || Op == V1))
10734       Op = V1;
10735     else if (NumElts <= M && (Op.isUndef() || Op == V2)) {
10736       Op = V2;
10737       NormM -= NumElts;
10738     } else
10739       return SDValue();
10740 
10741     bool MatchLoAnyLane = false, MatchHiAnyLane = false;
10742     for (int Lane = 0; Lane != NumElts; Lane += NumLaneElts) {
10743       int Lo = Lane, Mid = Lane + NumHalfLaneElts, Hi = Lane + NumLaneElts;
10744       MatchLoAnyLane |= isUndefOrInRange(NormM, Lo, Mid);
10745       MatchHiAnyLane |= isUndefOrInRange(NormM, Mid, Hi);
10746       if (MatchLoAnyLane || MatchHiAnyLane) {
10747         assert((MatchLoAnyLane ^ MatchHiAnyLane) &&
10748                "Failed to match UNPCKLO/UNPCKHI");
10749         break;
10750       }
10751     }
10752     MatchLo &= MatchLoAnyLane;
10753     MatchHi &= MatchHiAnyLane;
10754     if (!MatchLo && !MatchHi)
10755       return SDValue();
10756   }
10757   assert((MatchLo ^ MatchHi) && "Failed to match UNPCKLO/UNPCKHI");
10758 
10759   // Element indices have changed after unpacking. Calculate permute mask
10760   // so that they will be put back to the position as dictated by the
10761   // original shuffle mask indices.
10762   SmallVector<int, 32> PermuteMask(NumElts, -1);
10763   for (int Elt = 0; Elt != NumElts; ++Elt) {
10764     int M = Mask[Elt];
10765     if (M < 0)
10766       continue;
10767     int NormM = M;
10768     if (NumElts <= M)
10769       NormM -= NumElts;
10770     bool IsFirstOp = M < NumElts;
10771     int BaseMaskElt =
10772         NumLaneElts * (NormM / NumLaneElts) + (2 * (NormM % NumHalfLaneElts));
10773     if ((IsFirstOp && V1 == Ops[0]) || (!IsFirstOp && V2 == Ops[0]))
10774       PermuteMask[Elt] = BaseMaskElt;
10775     else if ((IsFirstOp && V1 == Ops[1]) || (!IsFirstOp && V2 == Ops[1]))
10776       PermuteMask[Elt] = BaseMaskElt + 1;
10777     assert(PermuteMask[Elt] != -1 &&
10778            "Input mask element is defined but failed to assign permute mask");
10779   }
10780 
10781   unsigned UnpckOp = MatchLo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
10782   SDValue Unpck = DAG.getNode(UnpckOp, DL, VT, Ops);
10783   return DAG.getVectorShuffle(VT, DL, Unpck, DAG.getUNDEF(VT), PermuteMask);
10784 }
10785 
10786 /// Try to lower a shuffle as a permute of the inputs followed by an
10787 /// UNPCK instruction.
10788 ///
10789 /// This specifically targets cases where we end up with alternating between
10790 /// the two inputs, and so can permute them into something that feeds a single
10791 /// UNPCK instruction. Note that this routine only targets integer vectors
10792 /// because for floating point vectors we have a generalized SHUFPS lowering
10793 /// strategy that handles everything that doesn't *exactly* match an unpack,
10794 /// making this clever lowering unnecessary.
lowerShuffleAsPermuteAndUnpack(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const X86Subtarget & Subtarget,SelectionDAG & DAG)10795 static SDValue lowerShuffleAsPermuteAndUnpack(const SDLoc &DL, MVT VT,
10796                                               SDValue V1, SDValue V2,
10797                                               ArrayRef<int> Mask,
10798                                               const X86Subtarget &Subtarget,
10799                                               SelectionDAG &DAG) {
10800   int Size = Mask.size();
10801   assert(Mask.size() >= 2 && "Single element masks are invalid.");
10802 
10803   // This routine only supports 128-bit integer dual input vectors.
10804   if (VT.isFloatingPoint() || !VT.is128BitVector() || V2.isUndef())
10805     return SDValue();
10806 
10807   int NumLoInputs =
10808       count_if(Mask, [Size](int M) { return M >= 0 && M % Size < Size / 2; });
10809   int NumHiInputs =
10810       count_if(Mask, [Size](int M) { return M % Size >= Size / 2; });
10811 
10812   bool UnpackLo = NumLoInputs >= NumHiInputs;
10813 
10814   auto TryUnpack = [&](int ScalarSize, int Scale) {
10815     SmallVector<int, 16> V1Mask((unsigned)Size, -1);
10816     SmallVector<int, 16> V2Mask((unsigned)Size, -1);
10817 
10818     for (int i = 0; i < Size; ++i) {
10819       if (Mask[i] < 0)
10820         continue;
10821 
10822       // Each element of the unpack contains Scale elements from this mask.
10823       int UnpackIdx = i / Scale;
10824 
10825       // We only handle the case where V1 feeds the first slots of the unpack.
10826       // We rely on canonicalization to ensure this is the case.
10827       if ((UnpackIdx % 2 == 0) != (Mask[i] < Size))
10828         return SDValue();
10829 
10830       // Setup the mask for this input. The indexing is tricky as we have to
10831       // handle the unpack stride.
10832       SmallVectorImpl<int> &VMask = (UnpackIdx % 2 == 0) ? V1Mask : V2Mask;
10833       VMask[(UnpackIdx / 2) * Scale + i % Scale + (UnpackLo ? 0 : Size / 2)] =
10834           Mask[i] % Size;
10835     }
10836 
10837     // If we will have to shuffle both inputs to use the unpack, check whether
10838     // we can just unpack first and shuffle the result. If so, skip this unpack.
10839     if ((NumLoInputs == 0 || NumHiInputs == 0) && !isNoopShuffleMask(V1Mask) &&
10840         !isNoopShuffleMask(V2Mask))
10841       return SDValue();
10842 
10843     // Shuffle the inputs into place.
10844     V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
10845     V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
10846 
10847     // Cast the inputs to the type we will use to unpack them.
10848     MVT UnpackVT =
10849         MVT::getVectorVT(MVT::getIntegerVT(ScalarSize), Size / Scale);
10850     V1 = DAG.getBitcast(UnpackVT, V1);
10851     V2 = DAG.getBitcast(UnpackVT, V2);
10852 
10853     // Unpack the inputs and cast the result back to the desired type.
10854     return DAG.getBitcast(
10855         VT, DAG.getNode(UnpackLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
10856                         UnpackVT, V1, V2));
10857   };
10858 
10859   // We try each unpack from the largest to the smallest to try and find one
10860   // that fits this mask.
10861   int OrigScalarSize = VT.getScalarSizeInBits();
10862   for (int ScalarSize = 64; ScalarSize >= OrigScalarSize; ScalarSize /= 2)
10863     if (SDValue Unpack = TryUnpack(ScalarSize, ScalarSize / OrigScalarSize))
10864       return Unpack;
10865 
10866   // If we're shuffling with a zero vector then we're better off not doing
10867   // VECTOR_SHUFFLE(UNPCK()) as we lose track of those zero elements.
10868   if (ISD::isBuildVectorAllZeros(V1.getNode()) ||
10869       ISD::isBuildVectorAllZeros(V2.getNode()))
10870     return SDValue();
10871 
10872   // If none of the unpack-rooted lowerings worked (or were profitable) try an
10873   // initial unpack.
10874   if (NumLoInputs == 0 || NumHiInputs == 0) {
10875     assert((NumLoInputs > 0 || NumHiInputs > 0) &&
10876            "We have to have *some* inputs!");
10877     int HalfOffset = NumLoInputs == 0 ? Size / 2 : 0;
10878 
10879     // FIXME: We could consider the total complexity of the permute of each
10880     // possible unpacking. Or at the least we should consider how many
10881     // half-crossings are created.
10882     // FIXME: We could consider commuting the unpacks.
10883 
10884     SmallVector<int, 32> PermMask((unsigned)Size, -1);
10885     for (int i = 0; i < Size; ++i) {
10886       if (Mask[i] < 0)
10887         continue;
10888 
10889       assert(Mask[i] % Size >= HalfOffset && "Found input from wrong half!");
10890 
10891       PermMask[i] =
10892           2 * ((Mask[i] % Size) - HalfOffset) + (Mask[i] < Size ? 0 : 1);
10893     }
10894     return DAG.getVectorShuffle(
10895         VT, DL,
10896         DAG.getNode(NumLoInputs == 0 ? X86ISD::UNPCKH : X86ISD::UNPCKL, DL, VT,
10897                     V1, V2),
10898         DAG.getUNDEF(VT), PermMask);
10899   }
10900 
10901   return SDValue();
10902 }
10903 
10904 /// Helper to form a PALIGNR-based rotate+permute, merging 2 inputs and then
10905 /// permuting the elements of the result in place.
lowerShuffleAsByteRotateAndPermute(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const X86Subtarget & Subtarget,SelectionDAG & DAG)10906 static SDValue lowerShuffleAsByteRotateAndPermute(
10907     const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
10908     const X86Subtarget &Subtarget, SelectionDAG &DAG) {
10909   if ((VT.is128BitVector() && !Subtarget.hasSSSE3()) ||
10910       (VT.is256BitVector() && !Subtarget.hasAVX2()) ||
10911       (VT.is512BitVector() && !Subtarget.hasBWI()))
10912     return SDValue();
10913 
10914   // We don't currently support lane crossing permutes.
10915   if (is128BitLaneCrossingShuffleMask(VT, Mask))
10916     return SDValue();
10917 
10918   int Scale = VT.getScalarSizeInBits() / 8;
10919   int NumLanes = VT.getSizeInBits() / 128;
10920   int NumElts = VT.getVectorNumElements();
10921   int NumEltsPerLane = NumElts / NumLanes;
10922 
10923   // Determine range of mask elts.
10924   bool Blend1 = true;
10925   bool Blend2 = true;
10926   std::pair<int, int> Range1 = std::make_pair(INT_MAX, INT_MIN);
10927   std::pair<int, int> Range2 = std::make_pair(INT_MAX, INT_MIN);
10928   for (int Lane = 0; Lane != NumElts; Lane += NumEltsPerLane) {
10929     for (int Elt = 0; Elt != NumEltsPerLane; ++Elt) {
10930       int M = Mask[Lane + Elt];
10931       if (M < 0)
10932         continue;
10933       if (M < NumElts) {
10934         Blend1 &= (M == (Lane + Elt));
10935         assert(Lane <= M && M < (Lane + NumEltsPerLane) && "Out of range mask");
10936         M = M % NumEltsPerLane;
10937         Range1.first = std::min(Range1.first, M);
10938         Range1.second = std::max(Range1.second, M);
10939       } else {
10940         M -= NumElts;
10941         Blend2 &= (M == (Lane + Elt));
10942         assert(Lane <= M && M < (Lane + NumEltsPerLane) && "Out of range mask");
10943         M = M % NumEltsPerLane;
10944         Range2.first = std::min(Range2.first, M);
10945         Range2.second = std::max(Range2.second, M);
10946       }
10947     }
10948   }
10949 
10950   // Bail if we don't need both elements.
10951   // TODO - it might be worth doing this for unary shuffles if the permute
10952   // can be widened.
10953   if (!(0 <= Range1.first && Range1.second < NumEltsPerLane) ||
10954       !(0 <= Range2.first && Range2.second < NumEltsPerLane))
10955     return SDValue();
10956 
10957   if (VT.getSizeInBits() > 128 && (Blend1 || Blend2))
10958     return SDValue();
10959 
10960   // Rotate the 2 ops so we can access both ranges, then permute the result.
10961   auto RotateAndPermute = [&](SDValue Lo, SDValue Hi, int RotAmt, int Ofs) {
10962     MVT ByteVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
10963     SDValue Rotate = DAG.getBitcast(
10964         VT, DAG.getNode(X86ISD::PALIGNR, DL, ByteVT, DAG.getBitcast(ByteVT, Hi),
10965                         DAG.getBitcast(ByteVT, Lo),
10966                         DAG.getTargetConstant(Scale * RotAmt, DL, MVT::i8)));
10967     SmallVector<int, 64> PermMask(NumElts, SM_SentinelUndef);
10968     for (int Lane = 0; Lane != NumElts; Lane += NumEltsPerLane) {
10969       for (int Elt = 0; Elt != NumEltsPerLane; ++Elt) {
10970         int M = Mask[Lane + Elt];
10971         if (M < 0)
10972           continue;
10973         if (M < NumElts)
10974           PermMask[Lane + Elt] = Lane + ((M + Ofs - RotAmt) % NumEltsPerLane);
10975         else
10976           PermMask[Lane + Elt] = Lane + ((M - Ofs - RotAmt) % NumEltsPerLane);
10977       }
10978     }
10979     return DAG.getVectorShuffle(VT, DL, Rotate, DAG.getUNDEF(VT), PermMask);
10980   };
10981 
10982   // Check if the ranges are small enough to rotate from either direction.
10983   if (Range2.second < Range1.first)
10984     return RotateAndPermute(V1, V2, Range1.first, 0);
10985   if (Range1.second < Range2.first)
10986     return RotateAndPermute(V2, V1, Range2.first, NumElts);
10987   return SDValue();
10988 }
10989 
isBroadcastShuffleMask(ArrayRef<int> Mask)10990 static bool isBroadcastShuffleMask(ArrayRef<int> Mask) {
10991   return isUndefOrEqual(Mask, 0);
10992 }
10993 
isNoopOrBroadcastShuffleMask(ArrayRef<int> Mask)10994 static bool isNoopOrBroadcastShuffleMask(ArrayRef<int> Mask) {
10995   return isNoopShuffleMask(Mask) || isBroadcastShuffleMask(Mask);
10996 }
10997 
10998 /// Check if the Mask consists of the same element repeated multiple times.
isSingleElementRepeatedMask(ArrayRef<int> Mask)10999 static bool isSingleElementRepeatedMask(ArrayRef<int> Mask) {
11000   size_t NumUndefs = 0;
11001   std::optional<int> UniqueElt;
11002   for (int Elt : Mask) {
11003     if (Elt == SM_SentinelUndef) {
11004       NumUndefs++;
11005       continue;
11006     }
11007     if (UniqueElt.has_value() && UniqueElt.value() != Elt)
11008       return false;
11009     UniqueElt = Elt;
11010   }
11011   // Make sure the element is repeated enough times by checking the number of
11012   // undefs is small.
11013   return NumUndefs <= Mask.size() / 2 && UniqueElt.has_value();
11014 }
11015 
11016 /// Generic routine to decompose a shuffle and blend into independent
11017 /// blends and permutes.
11018 ///
11019 /// This matches the extremely common pattern for handling combined
11020 /// shuffle+blend operations on newer X86 ISAs where we have very fast blend
11021 /// operations. It will try to pick the best arrangement of shuffles and
11022 /// blends. For vXi8/vXi16 shuffles we may use unpack instead of blend.
lowerShuffleAsDecomposedShuffleMerge(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const X86Subtarget & Subtarget,SelectionDAG & DAG)11023 static SDValue lowerShuffleAsDecomposedShuffleMerge(
11024     const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
11025     const X86Subtarget &Subtarget, SelectionDAG &DAG) {
11026   int NumElts = Mask.size();
11027   int NumLanes = VT.getSizeInBits() / 128;
11028   int NumEltsPerLane = NumElts / NumLanes;
11029 
11030   // Shuffle the input elements into the desired positions in V1 and V2 and
11031   // unpack/blend them together.
11032   bool IsAlternating = true;
11033   SmallVector<int, 32> V1Mask(NumElts, -1);
11034   SmallVector<int, 32> V2Mask(NumElts, -1);
11035   SmallVector<int, 32> FinalMask(NumElts, -1);
11036   for (int i = 0; i < NumElts; ++i) {
11037     int M = Mask[i];
11038     if (M >= 0 && M < NumElts) {
11039       V1Mask[i] = M;
11040       FinalMask[i] = i;
11041       IsAlternating &= (i & 1) == 0;
11042     } else if (M >= NumElts) {
11043       V2Mask[i] = M - NumElts;
11044       FinalMask[i] = i + NumElts;
11045       IsAlternating &= (i & 1) == 1;
11046     }
11047   }
11048 
11049   // If we effectively only demand the 0'th element of \p Input, and not only
11050   // as 0'th element, then broadcast said input,
11051   // and change \p InputMask to be a no-op (identity) mask.
11052   auto canonicalizeBroadcastableInput = [DL, VT, &Subtarget,
11053                                          &DAG](SDValue &Input,
11054                                                MutableArrayRef<int> InputMask) {
11055     unsigned EltSizeInBits = Input.getScalarValueSizeInBits();
11056     if (!Subtarget.hasAVX2() && (!Subtarget.hasAVX() || EltSizeInBits < 32 ||
11057                                  !X86::mayFoldLoad(Input, Subtarget)))
11058       return;
11059     if (isNoopShuffleMask(InputMask))
11060       return;
11061     assert(isBroadcastShuffleMask(InputMask) &&
11062            "Expected to demand only the 0'th element.");
11063     Input = DAG.getNode(X86ISD::VBROADCAST, DL, VT, Input);
11064     for (auto I : enumerate(InputMask)) {
11065       int &InputMaskElt = I.value();
11066       if (InputMaskElt >= 0)
11067         InputMaskElt = I.index();
11068     }
11069   };
11070 
11071   // Currently, we may need to produce one shuffle per input, and blend results.
11072   // It is possible that the shuffle for one of the inputs is already a no-op.
11073   // See if we can simplify non-no-op shuffles into broadcasts,
11074   // which we consider to be strictly better than an arbitrary shuffle.
11075   if (isNoopOrBroadcastShuffleMask(V1Mask) &&
11076       isNoopOrBroadcastShuffleMask(V2Mask)) {
11077     canonicalizeBroadcastableInput(V1, V1Mask);
11078     canonicalizeBroadcastableInput(V2, V2Mask);
11079   }
11080 
11081   // Try to lower with the simpler initial blend/unpack/rotate strategies unless
11082   // one of the input shuffles would be a no-op. We prefer to shuffle inputs as
11083   // the shuffle may be able to fold with a load or other benefit. However, when
11084   // we'll have to do 2x as many shuffles in order to achieve this, a 2-input
11085   // pre-shuffle first is a better strategy.
11086   if (!isNoopShuffleMask(V1Mask) && !isNoopShuffleMask(V2Mask)) {
11087     // Only prefer immediate blends to unpack/rotate.
11088     if (SDValue BlendPerm = lowerShuffleAsBlendAndPermute(DL, VT, V1, V2, Mask,
11089                                                           DAG, true))
11090       return BlendPerm;
11091     // If either input vector provides only a single element which is repeated
11092     // multiple times, unpacking from both input vectors would generate worse
11093     // code. e.g. for
11094     // t5: v16i8 = vector_shuffle<16,0,16,1,16,2,16,3,16,4,16,5,16,6,16,7> t2, t4
11095     // it is better to process t4 first to create a vector of t4[0], then unpack
11096     // that vector with t2.
11097     if (!isSingleElementRepeatedMask(V1Mask) &&
11098         !isSingleElementRepeatedMask(V2Mask))
11099       if (SDValue UnpackPerm =
11100               lowerShuffleAsUNPCKAndPermute(DL, VT, V1, V2, Mask, DAG))
11101         return UnpackPerm;
11102     if (SDValue RotatePerm = lowerShuffleAsByteRotateAndPermute(
11103             DL, VT, V1, V2, Mask, Subtarget, DAG))
11104       return RotatePerm;
11105     // Unpack/rotate failed - try again with variable blends.
11106     if (SDValue BlendPerm = lowerShuffleAsBlendAndPermute(DL, VT, V1, V2, Mask,
11107                                                           DAG))
11108       return BlendPerm;
11109     if (VT.getScalarSizeInBits() >= 32)
11110       if (SDValue PermUnpack = lowerShuffleAsPermuteAndUnpack(
11111               DL, VT, V1, V2, Mask, Subtarget, DAG))
11112         return PermUnpack;
11113   }
11114 
11115   // If the final mask is an alternating blend of vXi8/vXi16, convert to an
11116   // UNPCKL(SHUFFLE, SHUFFLE) pattern.
11117   // TODO: It doesn't have to be alternating - but each lane mustn't have more
11118   // than half the elements coming from each source.
11119   if (IsAlternating && VT.getScalarSizeInBits() < 32) {
11120     V1Mask.assign(NumElts, -1);
11121     V2Mask.assign(NumElts, -1);
11122     FinalMask.assign(NumElts, -1);
11123     for (int i = 0; i != NumElts; i += NumEltsPerLane)
11124       for (int j = 0; j != NumEltsPerLane; ++j) {
11125         int M = Mask[i + j];
11126         if (M >= 0 && M < NumElts) {
11127           V1Mask[i + (j / 2)] = M;
11128           FinalMask[i + j] = i + (j / 2);
11129         } else if (M >= NumElts) {
11130           V2Mask[i + (j / 2)] = M - NumElts;
11131           FinalMask[i + j] = i + (j / 2) + NumElts;
11132         }
11133       }
11134   }
11135 
11136   V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
11137   V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
11138   return DAG.getVectorShuffle(VT, DL, V1, V2, FinalMask);
11139 }
11140 
matchShuffleAsBitRotate(MVT & RotateVT,int EltSizeInBits,const X86Subtarget & Subtarget,ArrayRef<int> Mask)11141 static int matchShuffleAsBitRotate(MVT &RotateVT, int EltSizeInBits,
11142                                    const X86Subtarget &Subtarget,
11143                                    ArrayRef<int> Mask) {
11144   assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
11145   assert(EltSizeInBits < 64 && "Can't rotate 64-bit integers");
11146 
11147   // AVX512 only has vXi32/vXi64 rotates, so limit the rotation sub group size.
11148   int MinSubElts = Subtarget.hasAVX512() ? std::max(32 / EltSizeInBits, 2) : 2;
11149   int MaxSubElts = 64 / EltSizeInBits;
11150   unsigned RotateAmt, NumSubElts;
11151   if (!ShuffleVectorInst::isBitRotateMask(Mask, EltSizeInBits, MinSubElts,
11152                                           MaxSubElts, NumSubElts, RotateAmt))
11153     return -1;
11154   unsigned NumElts = Mask.size();
11155   MVT RotateSVT = MVT::getIntegerVT(EltSizeInBits * NumSubElts);
11156   RotateVT = MVT::getVectorVT(RotateSVT, NumElts / NumSubElts);
11157   return RotateAmt;
11158 }
11159 
11160 /// Lower shuffle using X86ISD::VROTLI rotations.
lowerShuffleAsBitRotate(const SDLoc & DL,MVT VT,SDValue V1,ArrayRef<int> Mask,const X86Subtarget & Subtarget,SelectionDAG & DAG)11161 static SDValue lowerShuffleAsBitRotate(const SDLoc &DL, MVT VT, SDValue V1,
11162                                        ArrayRef<int> Mask,
11163                                        const X86Subtarget &Subtarget,
11164                                        SelectionDAG &DAG) {
11165   // Only XOP + AVX512 targets have bit rotation instructions.
11166   // If we at least have SSSE3 (PSHUFB) then we shouldn't attempt to use this.
11167   bool IsLegal =
11168       (VT.is128BitVector() && Subtarget.hasXOP()) || Subtarget.hasAVX512();
11169   if (!IsLegal && Subtarget.hasSSE3())
11170     return SDValue();
11171 
11172   MVT RotateVT;
11173   int RotateAmt = matchShuffleAsBitRotate(RotateVT, VT.getScalarSizeInBits(),
11174                                           Subtarget, Mask);
11175   if (RotateAmt < 0)
11176     return SDValue();
11177 
11178   // For pre-SSSE3 targets, if we are shuffling vXi8 elts then ISD::ROTL,
11179   // expanded to OR(SRL,SHL), will be more efficient, but if they can
11180   // widen to vXi16 or more then existing lowering should will be better.
11181   if (!IsLegal) {
11182     if ((RotateAmt % 16) == 0)
11183       return SDValue();
11184     // TODO: Use getTargetVShiftByConstNode.
11185     unsigned ShlAmt = RotateAmt;
11186     unsigned SrlAmt = RotateVT.getScalarSizeInBits() - RotateAmt;
11187     V1 = DAG.getBitcast(RotateVT, V1);
11188     SDValue SHL = DAG.getNode(X86ISD::VSHLI, DL, RotateVT, V1,
11189                               DAG.getTargetConstant(ShlAmt, DL, MVT::i8));
11190     SDValue SRL = DAG.getNode(X86ISD::VSRLI, DL, RotateVT, V1,
11191                               DAG.getTargetConstant(SrlAmt, DL, MVT::i8));
11192     SDValue Rot = DAG.getNode(ISD::OR, DL, RotateVT, SHL, SRL);
11193     return DAG.getBitcast(VT, Rot);
11194   }
11195 
11196   SDValue Rot =
11197       DAG.getNode(X86ISD::VROTLI, DL, RotateVT, DAG.getBitcast(RotateVT, V1),
11198                   DAG.getTargetConstant(RotateAmt, DL, MVT::i8));
11199   return DAG.getBitcast(VT, Rot);
11200 }
11201 
11202 /// Try to match a vector shuffle as an element rotation.
11203 ///
11204 /// This is used for support PALIGNR for SSSE3 or VALIGND/Q for AVX512.
matchShuffleAsElementRotate(SDValue & V1,SDValue & V2,ArrayRef<int> Mask)11205 static int matchShuffleAsElementRotate(SDValue &V1, SDValue &V2,
11206                                        ArrayRef<int> Mask) {
11207   int NumElts = Mask.size();
11208 
11209   // We need to detect various ways of spelling a rotation:
11210   //   [11, 12, 13, 14, 15,  0,  1,  2]
11211   //   [-1, 12, 13, 14, -1, -1,  1, -1]
11212   //   [-1, -1, -1, -1, -1, -1,  1,  2]
11213   //   [ 3,  4,  5,  6,  7,  8,  9, 10]
11214   //   [-1,  4,  5,  6, -1, -1,  9, -1]
11215   //   [-1,  4,  5,  6, -1, -1, -1, -1]
11216   int Rotation = 0;
11217   SDValue Lo, Hi;
11218   for (int i = 0; i < NumElts; ++i) {
11219     int M = Mask[i];
11220     assert((M == SM_SentinelUndef || (0 <= M && M < (2*NumElts))) &&
11221            "Unexpected mask index.");
11222     if (M < 0)
11223       continue;
11224 
11225     // Determine where a rotated vector would have started.
11226     int StartIdx = i - (M % NumElts);
11227     if (StartIdx == 0)
11228       // The identity rotation isn't interesting, stop.
11229       return -1;
11230 
11231     // If we found the tail of a vector the rotation must be the missing
11232     // front. If we found the head of a vector, it must be how much of the
11233     // head.
11234     int CandidateRotation = StartIdx < 0 ? -StartIdx : NumElts - StartIdx;
11235 
11236     if (Rotation == 0)
11237       Rotation = CandidateRotation;
11238     else if (Rotation != CandidateRotation)
11239       // The rotations don't match, so we can't match this mask.
11240       return -1;
11241 
11242     // Compute which value this mask is pointing at.
11243     SDValue MaskV = M < NumElts ? V1 : V2;
11244 
11245     // Compute which of the two target values this index should be assigned
11246     // to. This reflects whether the high elements are remaining or the low
11247     // elements are remaining.
11248     SDValue &TargetV = StartIdx < 0 ? Hi : Lo;
11249 
11250     // Either set up this value if we've not encountered it before, or check
11251     // that it remains consistent.
11252     if (!TargetV)
11253       TargetV = MaskV;
11254     else if (TargetV != MaskV)
11255       // This may be a rotation, but it pulls from the inputs in some
11256       // unsupported interleaving.
11257       return -1;
11258   }
11259 
11260   // Check that we successfully analyzed the mask, and normalize the results.
11261   assert(Rotation != 0 && "Failed to locate a viable rotation!");
11262   assert((Lo || Hi) && "Failed to find a rotated input vector!");
11263   if (!Lo)
11264     Lo = Hi;
11265   else if (!Hi)
11266     Hi = Lo;
11267 
11268   V1 = Lo;
11269   V2 = Hi;
11270 
11271   return Rotation;
11272 }
11273 
11274 /// Try to lower a vector shuffle as a byte rotation.
11275 ///
11276 /// SSSE3 has a generic PALIGNR instruction in x86 that will do an arbitrary
11277 /// byte-rotation of the concatenation of two vectors; pre-SSSE3 can use
11278 /// a PSRLDQ/PSLLDQ/POR pattern to get a similar effect. This routine will
11279 /// try to generically lower a vector shuffle through such an pattern. It
11280 /// does not check for the profitability of lowering either as PALIGNR or
11281 /// PSRLDQ/PSLLDQ/POR, only whether the mask is valid to lower in that form.
11282 /// This matches shuffle vectors that look like:
11283 ///
11284 ///   v8i16 [11, 12, 13, 14, 15, 0, 1, 2]
11285 ///
11286 /// Essentially it concatenates V1 and V2, shifts right by some number of
11287 /// elements, and takes the low elements as the result. Note that while this is
11288 /// specified as a *right shift* because x86 is little-endian, it is a *left
11289 /// rotate* of the vector lanes.
matchShuffleAsByteRotate(MVT VT,SDValue & V1,SDValue & V2,ArrayRef<int> Mask)11290 static int matchShuffleAsByteRotate(MVT VT, SDValue &V1, SDValue &V2,
11291                                     ArrayRef<int> Mask) {
11292   // Don't accept any shuffles with zero elements.
11293   if (isAnyZero(Mask))
11294     return -1;
11295 
11296   // PALIGNR works on 128-bit lanes.
11297   SmallVector<int, 16> RepeatedMask;
11298   if (!is128BitLaneRepeatedShuffleMask(VT, Mask, RepeatedMask))
11299     return -1;
11300 
11301   int Rotation = matchShuffleAsElementRotate(V1, V2, RepeatedMask);
11302   if (Rotation <= 0)
11303     return -1;
11304 
11305   // PALIGNR rotates bytes, so we need to scale the
11306   // rotation based on how many bytes are in the vector lane.
11307   int NumElts = RepeatedMask.size();
11308   int Scale = 16 / NumElts;
11309   return Rotation * Scale;
11310 }
11311 
lowerShuffleAsByteRotate(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const X86Subtarget & Subtarget,SelectionDAG & DAG)11312 static SDValue lowerShuffleAsByteRotate(const SDLoc &DL, MVT VT, SDValue V1,
11313                                         SDValue V2, ArrayRef<int> Mask,
11314                                         const X86Subtarget &Subtarget,
11315                                         SelectionDAG &DAG) {
11316   assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
11317 
11318   SDValue Lo = V1, Hi = V2;
11319   int ByteRotation = matchShuffleAsByteRotate(VT, Lo, Hi, Mask);
11320   if (ByteRotation <= 0)
11321     return SDValue();
11322 
11323   // Cast the inputs to i8 vector of correct length to match PALIGNR or
11324   // PSLLDQ/PSRLDQ.
11325   MVT ByteVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
11326   Lo = DAG.getBitcast(ByteVT, Lo);
11327   Hi = DAG.getBitcast(ByteVT, Hi);
11328 
11329   // SSSE3 targets can use the palignr instruction.
11330   if (Subtarget.hasSSSE3()) {
11331     assert((!VT.is512BitVector() || Subtarget.hasBWI()) &&
11332            "512-bit PALIGNR requires BWI instructions");
11333     return DAG.getBitcast(
11334         VT, DAG.getNode(X86ISD::PALIGNR, DL, ByteVT, Lo, Hi,
11335                         DAG.getTargetConstant(ByteRotation, DL, MVT::i8)));
11336   }
11337 
11338   assert(VT.is128BitVector() &&
11339          "Rotate-based lowering only supports 128-bit lowering!");
11340   assert(Mask.size() <= 16 &&
11341          "Can shuffle at most 16 bytes in a 128-bit vector!");
11342   assert(ByteVT == MVT::v16i8 &&
11343          "SSE2 rotate lowering only needed for v16i8!");
11344 
11345   // Default SSE2 implementation
11346   int LoByteShift = 16 - ByteRotation;
11347   int HiByteShift = ByteRotation;
11348 
11349   SDValue LoShift =
11350       DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Lo,
11351                   DAG.getTargetConstant(LoByteShift, DL, MVT::i8));
11352   SDValue HiShift =
11353       DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Hi,
11354                   DAG.getTargetConstant(HiByteShift, DL, MVT::i8));
11355   return DAG.getBitcast(VT,
11356                         DAG.getNode(ISD::OR, DL, MVT::v16i8, LoShift, HiShift));
11357 }
11358 
11359 /// Try to lower a vector shuffle as a dword/qword rotation.
11360 ///
11361 /// AVX512 has a VALIGND/VALIGNQ instructions that will do an arbitrary
11362 /// rotation of the concatenation of two vectors; This routine will
11363 /// try to generically lower a vector shuffle through such an pattern.
11364 ///
11365 /// Essentially it concatenates V1 and V2, shifts right by some number of
11366 /// elements, and takes the low elements as the result. Note that while this is
11367 /// specified as a *right shift* because x86 is little-endian, it is a *left
11368 /// rotate* of the vector lanes.
lowerShuffleAsVALIGN(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const APInt & Zeroable,const X86Subtarget & Subtarget,SelectionDAG & DAG)11369 static SDValue lowerShuffleAsVALIGN(const SDLoc &DL, MVT VT, SDValue V1,
11370                                     SDValue V2, ArrayRef<int> Mask,
11371                                     const APInt &Zeroable,
11372                                     const X86Subtarget &Subtarget,
11373                                     SelectionDAG &DAG) {
11374   assert((VT.getScalarType() == MVT::i32 || VT.getScalarType() == MVT::i64) &&
11375          "Only 32-bit and 64-bit elements are supported!");
11376 
11377   // 128/256-bit vectors are only supported with VLX.
11378   assert((Subtarget.hasVLX() || (!VT.is128BitVector() && !VT.is256BitVector()))
11379          && "VLX required for 128/256-bit vectors");
11380 
11381   SDValue Lo = V1, Hi = V2;
11382   int Rotation = matchShuffleAsElementRotate(Lo, Hi, Mask);
11383   if (0 < Rotation)
11384     return DAG.getNode(X86ISD::VALIGN, DL, VT, Lo, Hi,
11385                        DAG.getTargetConstant(Rotation, DL, MVT::i8));
11386 
11387   // See if we can use VALIGN as a cross-lane version of VSHLDQ/VSRLDQ.
11388   // TODO: Pull this out as a matchShuffleAsElementShift helper?
11389   // TODO: We can probably make this more aggressive and use shift-pairs like
11390   // lowerShuffleAsByteShiftMask.
11391   unsigned NumElts = Mask.size();
11392   unsigned ZeroLo = Zeroable.countr_one();
11393   unsigned ZeroHi = Zeroable.countl_one();
11394   assert((ZeroLo + ZeroHi) < NumElts && "Zeroable shuffle detected");
11395   if (!ZeroLo && !ZeroHi)
11396     return SDValue();
11397 
11398   if (ZeroLo) {
11399     SDValue Src = Mask[ZeroLo] < (int)NumElts ? V1 : V2;
11400     int Low = Mask[ZeroLo] < (int)NumElts ? 0 : NumElts;
11401     if (isSequentialOrUndefInRange(Mask, ZeroLo, NumElts - ZeroLo, Low))
11402       return DAG.getNode(X86ISD::VALIGN, DL, VT, Src,
11403                          getZeroVector(VT, Subtarget, DAG, DL),
11404                          DAG.getTargetConstant(NumElts - ZeroLo, DL, MVT::i8));
11405   }
11406 
11407   if (ZeroHi) {
11408     SDValue Src = Mask[0] < (int)NumElts ? V1 : V2;
11409     int Low = Mask[0] < (int)NumElts ? 0 : NumElts;
11410     if (isSequentialOrUndefInRange(Mask, 0, NumElts - ZeroHi, Low + ZeroHi))
11411       return DAG.getNode(X86ISD::VALIGN, DL, VT,
11412                          getZeroVector(VT, Subtarget, DAG, DL), Src,
11413                          DAG.getTargetConstant(ZeroHi, DL, MVT::i8));
11414   }
11415 
11416   return SDValue();
11417 }
11418 
11419 /// Try to lower a vector shuffle as a byte shift sequence.
lowerShuffleAsByteShiftMask(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const APInt & Zeroable,const X86Subtarget & Subtarget,SelectionDAG & DAG)11420 static SDValue lowerShuffleAsByteShiftMask(const SDLoc &DL, MVT VT, SDValue V1,
11421                                            SDValue V2, ArrayRef<int> Mask,
11422                                            const APInt &Zeroable,
11423                                            const X86Subtarget &Subtarget,
11424                                            SelectionDAG &DAG) {
11425   assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
11426   assert(VT.is128BitVector() && "Only 128-bit vectors supported");
11427 
11428   // We need a shuffle that has zeros at one/both ends and a sequential
11429   // shuffle from one source within.
11430   unsigned ZeroLo = Zeroable.countr_one();
11431   unsigned ZeroHi = Zeroable.countl_one();
11432   if (!ZeroLo && !ZeroHi)
11433     return SDValue();
11434 
11435   unsigned NumElts = Mask.size();
11436   unsigned Len = NumElts - (ZeroLo + ZeroHi);
11437   if (!isSequentialOrUndefInRange(Mask, ZeroLo, Len, Mask[ZeroLo]))
11438     return SDValue();
11439 
11440   unsigned Scale = VT.getScalarSizeInBits() / 8;
11441   ArrayRef<int> StubMask = Mask.slice(ZeroLo, Len);
11442   if (!isUndefOrInRange(StubMask, 0, NumElts) &&
11443       !isUndefOrInRange(StubMask, NumElts, 2 * NumElts))
11444     return SDValue();
11445 
11446   SDValue Res = Mask[ZeroLo] < (int)NumElts ? V1 : V2;
11447   Res = DAG.getBitcast(MVT::v16i8, Res);
11448 
11449   // Use VSHLDQ/VSRLDQ ops to zero the ends of a vector and leave an
11450   // inner sequential set of elements, possibly offset:
11451   // 01234567 --> zzzzzz01 --> 1zzzzzzz
11452   // 01234567 --> 4567zzzz --> zzzzz456
11453   // 01234567 --> z0123456 --> 3456zzzz --> zz3456zz
11454   if (ZeroLo == 0) {
11455     unsigned Shift = (NumElts - 1) - (Mask[ZeroLo + Len - 1] % NumElts);
11456     Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
11457                       DAG.getTargetConstant(Scale * Shift, DL, MVT::i8));
11458     Res = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Res,
11459                       DAG.getTargetConstant(Scale * ZeroHi, DL, MVT::i8));
11460   } else if (ZeroHi == 0) {
11461     unsigned Shift = Mask[ZeroLo] % NumElts;
11462     Res = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Res,
11463                       DAG.getTargetConstant(Scale * Shift, DL, MVT::i8));
11464     Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
11465                       DAG.getTargetConstant(Scale * ZeroLo, DL, MVT::i8));
11466   } else if (!Subtarget.hasSSSE3()) {
11467     // If we don't have PSHUFB then its worth avoiding an AND constant mask
11468     // by performing 3 byte shifts. Shuffle combining can kick in above that.
11469     // TODO: There may be some cases where VSH{LR}DQ+PAND is still better.
11470     unsigned Shift = (NumElts - 1) - (Mask[ZeroLo + Len - 1] % NumElts);
11471     Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
11472                       DAG.getTargetConstant(Scale * Shift, DL, MVT::i8));
11473     Shift += Mask[ZeroLo] % NumElts;
11474     Res = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Res,
11475                       DAG.getTargetConstant(Scale * Shift, DL, MVT::i8));
11476     Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
11477                       DAG.getTargetConstant(Scale * ZeroLo, DL, MVT::i8));
11478   } else
11479     return SDValue();
11480 
11481   return DAG.getBitcast(VT, Res);
11482 }
11483 
11484 /// Try to lower a vector shuffle as a bit shift (shifts in zeros).
11485 ///
11486 /// Attempts to match a shuffle mask against the PSLL(W/D/Q/DQ) and
11487 /// PSRL(W/D/Q/DQ) SSE2 and AVX2 logical bit-shift instructions. The function
11488 /// matches elements from one of the input vectors shuffled to the left or
11489 /// right with zeroable elements 'shifted in'. It handles both the strictly
11490 /// bit-wise element shifts and the byte shift across an entire 128-bit double
11491 /// quad word lane.
11492 ///
11493 /// PSHL : (little-endian) left bit shift.
11494 /// [ zz, 0, zz,  2 ]
11495 /// [ -1, 4, zz, -1 ]
11496 /// PSRL : (little-endian) right bit shift.
11497 /// [  1, zz,  3, zz]
11498 /// [ -1, -1,  7, zz]
11499 /// PSLLDQ : (little-endian) left byte shift
11500 /// [ zz,  0,  1,  2,  3,  4,  5,  6]
11501 /// [ zz, zz, -1, -1,  2,  3,  4, -1]
11502 /// [ zz, zz, zz, zz, zz, zz, -1,  1]
11503 /// PSRLDQ : (little-endian) right byte shift
11504 /// [  5, 6,  7, zz, zz, zz, zz, zz]
11505 /// [ -1, 5,  6,  7, zz, zz, zz, zz]
11506 /// [  1, 2, -1, -1, -1, -1, zz, zz]
matchShuffleAsShift(MVT & ShiftVT,unsigned & Opcode,unsigned ScalarSizeInBits,ArrayRef<int> Mask,int MaskOffset,const APInt & Zeroable,const X86Subtarget & Subtarget)11507 static int matchShuffleAsShift(MVT &ShiftVT, unsigned &Opcode,
11508                                unsigned ScalarSizeInBits, ArrayRef<int> Mask,
11509                                int MaskOffset, const APInt &Zeroable,
11510                                const X86Subtarget &Subtarget) {
11511   int Size = Mask.size();
11512   unsigned SizeInBits = Size * ScalarSizeInBits;
11513 
11514   auto CheckZeros = [&](int Shift, int Scale, bool Left) {
11515     for (int i = 0; i < Size; i += Scale)
11516       for (int j = 0; j < Shift; ++j)
11517         if (!Zeroable[i + j + (Left ? 0 : (Scale - Shift))])
11518           return false;
11519 
11520     return true;
11521   };
11522 
11523   auto MatchShift = [&](int Shift, int Scale, bool Left) {
11524     for (int i = 0; i != Size; i += Scale) {
11525       unsigned Pos = Left ? i + Shift : i;
11526       unsigned Low = Left ? i : i + Shift;
11527       unsigned Len = Scale - Shift;
11528       if (!isSequentialOrUndefInRange(Mask, Pos, Len, Low + MaskOffset))
11529         return -1;
11530     }
11531 
11532     int ShiftEltBits = ScalarSizeInBits * Scale;
11533     bool ByteShift = ShiftEltBits > 64;
11534     Opcode = Left ? (ByteShift ? X86ISD::VSHLDQ : X86ISD::VSHLI)
11535                   : (ByteShift ? X86ISD::VSRLDQ : X86ISD::VSRLI);
11536     int ShiftAmt = Shift * ScalarSizeInBits / (ByteShift ? 8 : 1);
11537 
11538     // Normalize the scale for byte shifts to still produce an i64 element
11539     // type.
11540     Scale = ByteShift ? Scale / 2 : Scale;
11541 
11542     // We need to round trip through the appropriate type for the shift.
11543     MVT ShiftSVT = MVT::getIntegerVT(ScalarSizeInBits * Scale);
11544     ShiftVT = ByteShift ? MVT::getVectorVT(MVT::i8, SizeInBits / 8)
11545                         : MVT::getVectorVT(ShiftSVT, Size / Scale);
11546     return (int)ShiftAmt;
11547   };
11548 
11549   // SSE/AVX supports logical shifts up to 64-bit integers - so we can just
11550   // keep doubling the size of the integer elements up to that. We can
11551   // then shift the elements of the integer vector by whole multiples of
11552   // their width within the elements of the larger integer vector. Test each
11553   // multiple to see if we can find a match with the moved element indices
11554   // and that the shifted in elements are all zeroable.
11555   unsigned MaxWidth = ((SizeInBits == 512) && !Subtarget.hasBWI() ? 64 : 128);
11556   for (int Scale = 2; Scale * ScalarSizeInBits <= MaxWidth; Scale *= 2)
11557     for (int Shift = 1; Shift != Scale; ++Shift)
11558       for (bool Left : {true, false})
11559         if (CheckZeros(Shift, Scale, Left)) {
11560           int ShiftAmt = MatchShift(Shift, Scale, Left);
11561           if (0 < ShiftAmt)
11562             return ShiftAmt;
11563         }
11564 
11565   // no match
11566   return -1;
11567 }
11568 
lowerShuffleAsShift(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const APInt & Zeroable,const X86Subtarget & Subtarget,SelectionDAG & DAG,bool BitwiseOnly)11569 static SDValue lowerShuffleAsShift(const SDLoc &DL, MVT VT, SDValue V1,
11570                                    SDValue V2, ArrayRef<int> Mask,
11571                                    const APInt &Zeroable,
11572                                    const X86Subtarget &Subtarget,
11573                                    SelectionDAG &DAG, bool BitwiseOnly) {
11574   int Size = Mask.size();
11575   assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
11576 
11577   MVT ShiftVT;
11578   SDValue V = V1;
11579   unsigned Opcode;
11580 
11581   // Try to match shuffle against V1 shift.
11582   int ShiftAmt = matchShuffleAsShift(ShiftVT, Opcode, VT.getScalarSizeInBits(),
11583                                      Mask, 0, Zeroable, Subtarget);
11584 
11585   // If V1 failed, try to match shuffle against V2 shift.
11586   if (ShiftAmt < 0) {
11587     ShiftAmt = matchShuffleAsShift(ShiftVT, Opcode, VT.getScalarSizeInBits(),
11588                                    Mask, Size, Zeroable, Subtarget);
11589     V = V2;
11590   }
11591 
11592   if (ShiftAmt < 0)
11593     return SDValue();
11594 
11595   if (BitwiseOnly && (Opcode == X86ISD::VSHLDQ || Opcode == X86ISD::VSRLDQ))
11596     return SDValue();
11597 
11598   assert(DAG.getTargetLoweringInfo().isTypeLegal(ShiftVT) &&
11599          "Illegal integer vector type");
11600   V = DAG.getBitcast(ShiftVT, V);
11601   V = DAG.getNode(Opcode, DL, ShiftVT, V,
11602                   DAG.getTargetConstant(ShiftAmt, DL, MVT::i8));
11603   return DAG.getBitcast(VT, V);
11604 }
11605 
11606 // EXTRQ: Extract Len elements from lower half of source, starting at Idx.
11607 // Remainder of lower half result is zero and upper half is all undef.
matchShuffleAsEXTRQ(MVT VT,SDValue & V1,SDValue & V2,ArrayRef<int> Mask,uint64_t & BitLen,uint64_t & BitIdx,const APInt & Zeroable)11608 static bool matchShuffleAsEXTRQ(MVT VT, SDValue &V1, SDValue &V2,
11609                                 ArrayRef<int> Mask, uint64_t &BitLen,
11610                                 uint64_t &BitIdx, const APInt &Zeroable) {
11611   int Size = Mask.size();
11612   int HalfSize = Size / 2;
11613   assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
11614   assert(!Zeroable.isAllOnes() && "Fully zeroable shuffle mask");
11615 
11616   // Upper half must be undefined.
11617   if (!isUndefUpperHalf(Mask))
11618     return false;
11619 
11620   // Determine the extraction length from the part of the
11621   // lower half that isn't zeroable.
11622   int Len = HalfSize;
11623   for (; Len > 0; --Len)
11624     if (!Zeroable[Len - 1])
11625       break;
11626   assert(Len > 0 && "Zeroable shuffle mask");
11627 
11628   // Attempt to match first Len sequential elements from the lower half.
11629   SDValue Src;
11630   int Idx = -1;
11631   for (int i = 0; i != Len; ++i) {
11632     int M = Mask[i];
11633     if (M == SM_SentinelUndef)
11634       continue;
11635     SDValue &V = (M < Size ? V1 : V2);
11636     M = M % Size;
11637 
11638     // The extracted elements must start at a valid index and all mask
11639     // elements must be in the lower half.
11640     if (i > M || M >= HalfSize)
11641       return false;
11642 
11643     if (Idx < 0 || (Src == V && Idx == (M - i))) {
11644       Src = V;
11645       Idx = M - i;
11646       continue;
11647     }
11648     return false;
11649   }
11650 
11651   if (!Src || Idx < 0)
11652     return false;
11653 
11654   assert((Idx + Len) <= HalfSize && "Illegal extraction mask");
11655   BitLen = (Len * VT.getScalarSizeInBits()) & 0x3f;
11656   BitIdx = (Idx * VT.getScalarSizeInBits()) & 0x3f;
11657   V1 = Src;
11658   return true;
11659 }
11660 
11661 // INSERTQ: Extract lowest Len elements from lower half of second source and
11662 // insert over first source, starting at Idx.
11663 // { A[0], .., A[Idx-1], B[0], .., B[Len-1], A[Idx+Len], .., UNDEF, ... }
matchShuffleAsINSERTQ(MVT VT,SDValue & V1,SDValue & V2,ArrayRef<int> Mask,uint64_t & BitLen,uint64_t & BitIdx)11664 static bool matchShuffleAsINSERTQ(MVT VT, SDValue &V1, SDValue &V2,
11665                                   ArrayRef<int> Mask, uint64_t &BitLen,
11666                                   uint64_t &BitIdx) {
11667   int Size = Mask.size();
11668   int HalfSize = Size / 2;
11669   assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
11670 
11671   // Upper half must be undefined.
11672   if (!isUndefUpperHalf(Mask))
11673     return false;
11674 
11675   for (int Idx = 0; Idx != HalfSize; ++Idx) {
11676     SDValue Base;
11677 
11678     // Attempt to match first source from mask before insertion point.
11679     if (isUndefInRange(Mask, 0, Idx)) {
11680       /* EMPTY */
11681     } else if (isSequentialOrUndefInRange(Mask, 0, Idx, 0)) {
11682       Base = V1;
11683     } else if (isSequentialOrUndefInRange(Mask, 0, Idx, Size)) {
11684       Base = V2;
11685     } else {
11686       continue;
11687     }
11688 
11689     // Extend the extraction length looking to match both the insertion of
11690     // the second source and the remaining elements of the first.
11691     for (int Hi = Idx + 1; Hi <= HalfSize; ++Hi) {
11692       SDValue Insert;
11693       int Len = Hi - Idx;
11694 
11695       // Match insertion.
11696       if (isSequentialOrUndefInRange(Mask, Idx, Len, 0)) {
11697         Insert = V1;
11698       } else if (isSequentialOrUndefInRange(Mask, Idx, Len, Size)) {
11699         Insert = V2;
11700       } else {
11701         continue;
11702       }
11703 
11704       // Match the remaining elements of the lower half.
11705       if (isUndefInRange(Mask, Hi, HalfSize - Hi)) {
11706         /* EMPTY */
11707       } else if ((!Base || (Base == V1)) &&
11708                  isSequentialOrUndefInRange(Mask, Hi, HalfSize - Hi, Hi)) {
11709         Base = V1;
11710       } else if ((!Base || (Base == V2)) &&
11711                  isSequentialOrUndefInRange(Mask, Hi, HalfSize - Hi,
11712                                             Size + Hi)) {
11713         Base = V2;
11714       } else {
11715         continue;
11716       }
11717 
11718       BitLen = (Len * VT.getScalarSizeInBits()) & 0x3f;
11719       BitIdx = (Idx * VT.getScalarSizeInBits()) & 0x3f;
11720       V1 = Base;
11721       V2 = Insert;
11722       return true;
11723     }
11724   }
11725 
11726   return false;
11727 }
11728 
11729 /// Try to lower a vector shuffle using SSE4a EXTRQ/INSERTQ.
lowerShuffleWithSSE4A(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const APInt & Zeroable,SelectionDAG & DAG)11730 static SDValue lowerShuffleWithSSE4A(const SDLoc &DL, MVT VT, SDValue V1,
11731                                      SDValue V2, ArrayRef<int> Mask,
11732                                      const APInt &Zeroable, SelectionDAG &DAG) {
11733   uint64_t BitLen, BitIdx;
11734   if (matchShuffleAsEXTRQ(VT, V1, V2, Mask, BitLen, BitIdx, Zeroable))
11735     return DAG.getNode(X86ISD::EXTRQI, DL, VT, V1,
11736                        DAG.getTargetConstant(BitLen, DL, MVT::i8),
11737                        DAG.getTargetConstant(BitIdx, DL, MVT::i8));
11738 
11739   if (matchShuffleAsINSERTQ(VT, V1, V2, Mask, BitLen, BitIdx))
11740     return DAG.getNode(X86ISD::INSERTQI, DL, VT, V1 ? V1 : DAG.getUNDEF(VT),
11741                        V2 ? V2 : DAG.getUNDEF(VT),
11742                        DAG.getTargetConstant(BitLen, DL, MVT::i8),
11743                        DAG.getTargetConstant(BitIdx, DL, MVT::i8));
11744 
11745   return SDValue();
11746 }
11747 
11748 /// Lower a vector shuffle as a zero or any extension.
11749 ///
11750 /// Given a specific number of elements, element bit width, and extension
11751 /// stride, produce either a zero or any extension based on the available
11752 /// features of the subtarget. The extended elements are consecutive and
11753 /// begin and can start from an offsetted element index in the input; to
11754 /// avoid excess shuffling the offset must either being in the bottom lane
11755 /// or at the start of a higher lane. All extended elements must be from
11756 /// the same lane.
lowerShuffleAsSpecificZeroOrAnyExtend(const SDLoc & DL,MVT VT,int Scale,int Offset,bool AnyExt,SDValue InputV,ArrayRef<int> Mask,const X86Subtarget & Subtarget,SelectionDAG & DAG)11757 static SDValue lowerShuffleAsSpecificZeroOrAnyExtend(
11758     const SDLoc &DL, MVT VT, int Scale, int Offset, bool AnyExt, SDValue InputV,
11759     ArrayRef<int> Mask, const X86Subtarget &Subtarget, SelectionDAG &DAG) {
11760   assert(Scale > 1 && "Need a scale to extend.");
11761   int EltBits = VT.getScalarSizeInBits();
11762   int NumElements = VT.getVectorNumElements();
11763   int NumEltsPerLane = 128 / EltBits;
11764   int OffsetLane = Offset / NumEltsPerLane;
11765   assert((EltBits == 8 || EltBits == 16 || EltBits == 32) &&
11766          "Only 8, 16, and 32 bit elements can be extended.");
11767   assert(Scale * EltBits <= 64 && "Cannot zero extend past 64 bits.");
11768   assert(0 <= Offset && "Extension offset must be positive.");
11769   assert((Offset < NumEltsPerLane || Offset % NumEltsPerLane == 0) &&
11770          "Extension offset must be in the first lane or start an upper lane.");
11771 
11772   // Check that an index is in same lane as the base offset.
11773   auto SafeOffset = [&](int Idx) {
11774     return OffsetLane == (Idx / NumEltsPerLane);
11775   };
11776 
11777   // Shift along an input so that the offset base moves to the first element.
11778   auto ShuffleOffset = [&](SDValue V) {
11779     if (!Offset)
11780       return V;
11781 
11782     SmallVector<int, 8> ShMask((unsigned)NumElements, -1);
11783     for (int i = 0; i * Scale < NumElements; ++i) {
11784       int SrcIdx = i + Offset;
11785       ShMask[i] = SafeOffset(SrcIdx) ? SrcIdx : -1;
11786     }
11787     return DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), ShMask);
11788   };
11789 
11790   // Found a valid a/zext mask! Try various lowering strategies based on the
11791   // input type and available ISA extensions.
11792   if (Subtarget.hasSSE41()) {
11793     // Not worth offsetting 128-bit vectors if scale == 2, a pattern using
11794     // PUNPCK will catch this in a later shuffle match.
11795     if (Offset && Scale == 2 && VT.is128BitVector())
11796       return SDValue();
11797     MVT ExtVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits * Scale),
11798                                  NumElements / Scale);
11799     InputV = DAG.getBitcast(VT, InputV);
11800     InputV = ShuffleOffset(InputV);
11801     InputV = getEXTEND_VECTOR_INREG(AnyExt ? ISD::ANY_EXTEND : ISD::ZERO_EXTEND,
11802                                     DL, ExtVT, InputV, DAG);
11803     return DAG.getBitcast(VT, InputV);
11804   }
11805 
11806   assert(VT.is128BitVector() && "Only 128-bit vectors can be extended.");
11807   InputV = DAG.getBitcast(VT, InputV);
11808 
11809   // For any extends we can cheat for larger element sizes and use shuffle
11810   // instructions that can fold with a load and/or copy.
11811   if (AnyExt && EltBits == 32) {
11812     int PSHUFDMask[4] = {Offset, -1, SafeOffset(Offset + 1) ? Offset + 1 : -1,
11813                          -1};
11814     return DAG.getBitcast(
11815         VT, DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
11816                         DAG.getBitcast(MVT::v4i32, InputV),
11817                         getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
11818   }
11819   if (AnyExt && EltBits == 16 && Scale > 2) {
11820     int PSHUFDMask[4] = {Offset / 2, -1,
11821                          SafeOffset(Offset + 1) ? (Offset + 1) / 2 : -1, -1};
11822     InputV = DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
11823                          DAG.getBitcast(MVT::v4i32, InputV),
11824                          getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG));
11825     int PSHUFWMask[4] = {1, -1, -1, -1};
11826     unsigned OddEvenOp = (Offset & 1) ? X86ISD::PSHUFLW : X86ISD::PSHUFHW;
11827     return DAG.getBitcast(
11828         VT, DAG.getNode(OddEvenOp, DL, MVT::v8i16,
11829                         DAG.getBitcast(MVT::v8i16, InputV),
11830                         getV4X86ShuffleImm8ForMask(PSHUFWMask, DL, DAG)));
11831   }
11832 
11833   // The SSE4A EXTRQ instruction can efficiently extend the first 2 lanes
11834   // to 64-bits.
11835   if ((Scale * EltBits) == 64 && EltBits < 32 && Subtarget.hasSSE4A()) {
11836     assert(NumElements == (int)Mask.size() && "Unexpected shuffle mask size!");
11837     assert(VT.is128BitVector() && "Unexpected vector width!");
11838 
11839     int LoIdx = Offset * EltBits;
11840     SDValue Lo = DAG.getBitcast(
11841         MVT::v2i64, DAG.getNode(X86ISD::EXTRQI, DL, VT, InputV,
11842                                 DAG.getTargetConstant(EltBits, DL, MVT::i8),
11843                                 DAG.getTargetConstant(LoIdx, DL, MVT::i8)));
11844 
11845     if (isUndefUpperHalf(Mask) || !SafeOffset(Offset + 1))
11846       return DAG.getBitcast(VT, Lo);
11847 
11848     int HiIdx = (Offset + 1) * EltBits;
11849     SDValue Hi = DAG.getBitcast(
11850         MVT::v2i64, DAG.getNode(X86ISD::EXTRQI, DL, VT, InputV,
11851                                 DAG.getTargetConstant(EltBits, DL, MVT::i8),
11852                                 DAG.getTargetConstant(HiIdx, DL, MVT::i8)));
11853     return DAG.getBitcast(VT,
11854                           DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2i64, Lo, Hi));
11855   }
11856 
11857   // If this would require more than 2 unpack instructions to expand, use
11858   // pshufb when available. We can only use more than 2 unpack instructions
11859   // when zero extending i8 elements which also makes it easier to use pshufb.
11860   if (Scale > 4 && EltBits == 8 && Subtarget.hasSSSE3()) {
11861     assert(NumElements == 16 && "Unexpected byte vector width!");
11862     SDValue PSHUFBMask[16];
11863     for (int i = 0; i < 16; ++i) {
11864       int Idx = Offset + (i / Scale);
11865       if ((i % Scale == 0 && SafeOffset(Idx))) {
11866         PSHUFBMask[i] = DAG.getConstant(Idx, DL, MVT::i8);
11867         continue;
11868       }
11869       PSHUFBMask[i] =
11870           AnyExt ? DAG.getUNDEF(MVT::i8) : DAG.getConstant(0x80, DL, MVT::i8);
11871     }
11872     InputV = DAG.getBitcast(MVT::v16i8, InputV);
11873     return DAG.getBitcast(
11874         VT, DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, InputV,
11875                         DAG.getBuildVector(MVT::v16i8, DL, PSHUFBMask)));
11876   }
11877 
11878   // If we are extending from an offset, ensure we start on a boundary that
11879   // we can unpack from.
11880   int AlignToUnpack = Offset % (NumElements / Scale);
11881   if (AlignToUnpack) {
11882     SmallVector<int, 8> ShMask((unsigned)NumElements, -1);
11883     for (int i = AlignToUnpack; i < NumElements; ++i)
11884       ShMask[i - AlignToUnpack] = i;
11885     InputV = DAG.getVectorShuffle(VT, DL, InputV, DAG.getUNDEF(VT), ShMask);
11886     Offset -= AlignToUnpack;
11887   }
11888 
11889   // Otherwise emit a sequence of unpacks.
11890   do {
11891     unsigned UnpackLoHi = X86ISD::UNPCKL;
11892     if (Offset >= (NumElements / 2)) {
11893       UnpackLoHi = X86ISD::UNPCKH;
11894       Offset -= (NumElements / 2);
11895     }
11896 
11897     MVT InputVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits), NumElements);
11898     SDValue Ext = AnyExt ? DAG.getUNDEF(InputVT)
11899                          : getZeroVector(InputVT, Subtarget, DAG, DL);
11900     InputV = DAG.getBitcast(InputVT, InputV);
11901     InputV = DAG.getNode(UnpackLoHi, DL, InputVT, InputV, Ext);
11902     Scale /= 2;
11903     EltBits *= 2;
11904     NumElements /= 2;
11905   } while (Scale > 1);
11906   return DAG.getBitcast(VT, InputV);
11907 }
11908 
11909 /// Try to lower a vector shuffle as a zero extension on any microarch.
11910 ///
11911 /// This routine will try to do everything in its power to cleverly lower
11912 /// a shuffle which happens to match the pattern of a zero extend. It doesn't
11913 /// check for the profitability of this lowering,  it tries to aggressively
11914 /// match this pattern. It will use all of the micro-architectural details it
11915 /// can to emit an efficient lowering. It handles both blends with all-zero
11916 /// inputs to explicitly zero-extend and undef-lanes (sometimes undef due to
11917 /// masking out later).
11918 ///
11919 /// The reason we have dedicated lowering for zext-style shuffles is that they
11920 /// are both incredibly common and often quite performance sensitive.
lowerShuffleAsZeroOrAnyExtend(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const APInt & Zeroable,const X86Subtarget & Subtarget,SelectionDAG & DAG)11921 static SDValue lowerShuffleAsZeroOrAnyExtend(
11922     const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
11923     const APInt &Zeroable, const X86Subtarget &Subtarget,
11924     SelectionDAG &DAG) {
11925   int Bits = VT.getSizeInBits();
11926   int NumLanes = Bits / 128;
11927   int NumElements = VT.getVectorNumElements();
11928   int NumEltsPerLane = NumElements / NumLanes;
11929   assert(VT.getScalarSizeInBits() <= 32 &&
11930          "Exceeds 32-bit integer zero extension limit");
11931   assert((int)Mask.size() == NumElements && "Unexpected shuffle mask size");
11932 
11933   // Define a helper function to check a particular ext-scale and lower to it if
11934   // valid.
11935   auto Lower = [&](int Scale) -> SDValue {
11936     SDValue InputV;
11937     bool AnyExt = true;
11938     int Offset = 0;
11939     int Matches = 0;
11940     for (int i = 0; i < NumElements; ++i) {
11941       int M = Mask[i];
11942       if (M < 0)
11943         continue; // Valid anywhere but doesn't tell us anything.
11944       if (i % Scale != 0) {
11945         // Each of the extended elements need to be zeroable.
11946         if (!Zeroable[i])
11947           return SDValue();
11948 
11949         // We no longer are in the anyext case.
11950         AnyExt = false;
11951         continue;
11952       }
11953 
11954       // Each of the base elements needs to be consecutive indices into the
11955       // same input vector.
11956       SDValue V = M < NumElements ? V1 : V2;
11957       M = M % NumElements;
11958       if (!InputV) {
11959         InputV = V;
11960         Offset = M - (i / Scale);
11961       } else if (InputV != V)
11962         return SDValue(); // Flip-flopping inputs.
11963 
11964       // Offset must start in the lowest 128-bit lane or at the start of an
11965       // upper lane.
11966       // FIXME: Is it ever worth allowing a negative base offset?
11967       if (!((0 <= Offset && Offset < NumEltsPerLane) ||
11968             (Offset % NumEltsPerLane) == 0))
11969         return SDValue();
11970 
11971       // If we are offsetting, all referenced entries must come from the same
11972       // lane.
11973       if (Offset && (Offset / NumEltsPerLane) != (M / NumEltsPerLane))
11974         return SDValue();
11975 
11976       if ((M % NumElements) != (Offset + (i / Scale)))
11977         return SDValue(); // Non-consecutive strided elements.
11978       Matches++;
11979     }
11980 
11981     // If we fail to find an input, we have a zero-shuffle which should always
11982     // have already been handled.
11983     // FIXME: Maybe handle this here in case during blending we end up with one?
11984     if (!InputV)
11985       return SDValue();
11986 
11987     // If we are offsetting, don't extend if we only match a single input, we
11988     // can always do better by using a basic PSHUF or PUNPCK.
11989     if (Offset != 0 && Matches < 2)
11990       return SDValue();
11991 
11992     return lowerShuffleAsSpecificZeroOrAnyExtend(DL, VT, Scale, Offset, AnyExt,
11993                                                  InputV, Mask, Subtarget, DAG);
11994   };
11995 
11996   // The widest scale possible for extending is to a 64-bit integer.
11997   assert(Bits % 64 == 0 &&
11998          "The number of bits in a vector must be divisible by 64 on x86!");
11999   int NumExtElements = Bits / 64;
12000 
12001   // Each iteration, try extending the elements half as much, but into twice as
12002   // many elements.
12003   for (; NumExtElements < NumElements; NumExtElements *= 2) {
12004     assert(NumElements % NumExtElements == 0 &&
12005            "The input vector size must be divisible by the extended size.");
12006     if (SDValue V = Lower(NumElements / NumExtElements))
12007       return V;
12008   }
12009 
12010   // General extends failed, but 128-bit vectors may be able to use MOVQ.
12011   if (Bits != 128)
12012     return SDValue();
12013 
12014   // Returns one of the source operands if the shuffle can be reduced to a
12015   // MOVQ, copying the lower 64-bits and zero-extending to the upper 64-bits.
12016   auto CanZExtLowHalf = [&]() {
12017     for (int i = NumElements / 2; i != NumElements; ++i)
12018       if (!Zeroable[i])
12019         return SDValue();
12020     if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, 0))
12021       return V1;
12022     if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, NumElements))
12023       return V2;
12024     return SDValue();
12025   };
12026 
12027   if (SDValue V = CanZExtLowHalf()) {
12028     V = DAG.getBitcast(MVT::v2i64, V);
12029     V = DAG.getNode(X86ISD::VZEXT_MOVL, DL, MVT::v2i64, V);
12030     return DAG.getBitcast(VT, V);
12031   }
12032 
12033   // No viable ext lowering found.
12034   return SDValue();
12035 }
12036 
12037 /// Try to get a scalar value for a specific element of a vector.
12038 ///
12039 /// Looks through BUILD_VECTOR and SCALAR_TO_VECTOR nodes to find a scalar.
getScalarValueForVectorElement(SDValue V,int Idx,SelectionDAG & DAG)12040 static SDValue getScalarValueForVectorElement(SDValue V, int Idx,
12041                                               SelectionDAG &DAG) {
12042   MVT VT = V.getSimpleValueType();
12043   MVT EltVT = VT.getVectorElementType();
12044   V = peekThroughBitcasts(V);
12045 
12046   // If the bitcasts shift the element size, we can't extract an equivalent
12047   // element from it.
12048   MVT NewVT = V.getSimpleValueType();
12049   if (!NewVT.isVector() || NewVT.getScalarSizeInBits() != VT.getScalarSizeInBits())
12050     return SDValue();
12051 
12052   if (V.getOpcode() == ISD::BUILD_VECTOR ||
12053       (Idx == 0 && V.getOpcode() == ISD::SCALAR_TO_VECTOR)) {
12054     // Ensure the scalar operand is the same size as the destination.
12055     // FIXME: Add support for scalar truncation where possible.
12056     SDValue S = V.getOperand(Idx);
12057     if (EltVT.getSizeInBits() == S.getSimpleValueType().getSizeInBits())
12058       return DAG.getBitcast(EltVT, S);
12059   }
12060 
12061   return SDValue();
12062 }
12063 
12064 /// Helper to test for a load that can be folded with x86 shuffles.
12065 ///
12066 /// This is particularly important because the set of instructions varies
12067 /// significantly based on whether the operand is a load or not.
isShuffleFoldableLoad(SDValue V)12068 static bool isShuffleFoldableLoad(SDValue V) {
12069   return V->hasOneUse() &&
12070          ISD::isNON_EXTLoad(peekThroughOneUseBitcasts(V).getNode());
12071 }
12072 
12073 template<typename T>
isSoftF16(T VT,const X86Subtarget & Subtarget)12074 static bool isSoftF16(T VT, const X86Subtarget &Subtarget) {
12075   T EltVT = VT.getScalarType();
12076   return EltVT == MVT::bf16 || (EltVT == MVT::f16 && !Subtarget.hasFP16());
12077 }
12078 
12079 /// Try to lower insertion of a single element into a zero vector.
12080 ///
12081 /// This is a common pattern that we have especially efficient patterns to lower
12082 /// across all subtarget feature sets.
lowerShuffleAsElementInsertion(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const APInt & Zeroable,const X86Subtarget & Subtarget,SelectionDAG & DAG)12083 static SDValue lowerShuffleAsElementInsertion(
12084     const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
12085     const APInt &Zeroable, const X86Subtarget &Subtarget,
12086     SelectionDAG &DAG) {
12087   MVT ExtVT = VT;
12088   MVT EltVT = VT.getVectorElementType();
12089   unsigned NumElts = VT.getVectorNumElements();
12090   unsigned EltBits = VT.getScalarSizeInBits();
12091 
12092   if (isSoftF16(EltVT, Subtarget))
12093     return SDValue();
12094 
12095   int V2Index =
12096       find_if(Mask, [&Mask](int M) { return M >= (int)Mask.size(); }) -
12097       Mask.begin();
12098   bool IsV1Constant = getTargetConstantFromNode(V1) != nullptr;
12099   bool IsV1Zeroable = true;
12100   for (int i = 0, Size = Mask.size(); i < Size; ++i)
12101     if (i != V2Index && !Zeroable[i]) {
12102       IsV1Zeroable = false;
12103       break;
12104     }
12105 
12106   // Bail if a non-zero V1 isn't used in place.
12107   if (!IsV1Zeroable) {
12108     SmallVector<int, 8> V1Mask(Mask);
12109     V1Mask[V2Index] = -1;
12110     if (!isNoopShuffleMask(V1Mask))
12111       return SDValue();
12112   }
12113 
12114   // Check for a single input from a SCALAR_TO_VECTOR node.
12115   // FIXME: All of this should be canonicalized into INSERT_VECTOR_ELT and
12116   // all the smarts here sunk into that routine. However, the current
12117   // lowering of BUILD_VECTOR makes that nearly impossible until the old
12118   // vector shuffle lowering is dead.
12119   SDValue V2S = getScalarValueForVectorElement(V2, Mask[V2Index] - Mask.size(),
12120                                                DAG);
12121   if (V2S && DAG.getTargetLoweringInfo().isTypeLegal(V2S.getValueType())) {
12122     // We need to zext the scalar if it is smaller than an i32.
12123     V2S = DAG.getBitcast(EltVT, V2S);
12124     if (EltVT == MVT::i8 || (EltVT == MVT::i16 && !Subtarget.hasFP16())) {
12125       // Using zext to expand a narrow element won't work for non-zero
12126       // insertions. But we can use a masked constant vector if we're
12127       // inserting V2 into the bottom of V1.
12128       if (!IsV1Zeroable && !(IsV1Constant && V2Index == 0))
12129         return SDValue();
12130 
12131       // Zero-extend directly to i32.
12132       ExtVT = MVT::getVectorVT(MVT::i32, ExtVT.getSizeInBits() / 32);
12133       V2S = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, V2S);
12134 
12135       // If we're inserting into a constant, mask off the inserted index
12136       // and OR with the zero-extended scalar.
12137       if (!IsV1Zeroable) {
12138         SmallVector<APInt> Bits(NumElts, APInt::getAllOnes(EltBits));
12139         Bits[V2Index] = APInt::getZero(EltBits);
12140         SDValue BitMask = getConstVector(Bits, VT, DAG, DL);
12141         V1 = DAG.getNode(ISD::AND, DL, VT, V1, BitMask);
12142         V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, ExtVT, V2S);
12143         V2 = DAG.getBitcast(VT, DAG.getNode(X86ISD::VZEXT_MOVL, DL, ExtVT, V2));
12144         return DAG.getNode(ISD::OR, DL, VT, V1, V2);
12145       }
12146     }
12147     V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, ExtVT, V2S);
12148   } else if (Mask[V2Index] != (int)Mask.size() || EltVT == MVT::i8 ||
12149              EltVT == MVT::i16) {
12150     // Either not inserting from the low element of the input or the input
12151     // element size is too small to use VZEXT_MOVL to clear the high bits.
12152     return SDValue();
12153   }
12154 
12155   if (!IsV1Zeroable) {
12156     // If V1 can't be treated as a zero vector we have fewer options to lower
12157     // this. We can't support integer vectors or non-zero targets cheaply.
12158     assert(VT == ExtVT && "Cannot change extended type when non-zeroable!");
12159     if (!VT.isFloatingPoint() || V2Index != 0)
12160       return SDValue();
12161     if (!VT.is128BitVector())
12162       return SDValue();
12163 
12164     // Otherwise, use MOVSD, MOVSS or MOVSH.
12165     unsigned MovOpc = 0;
12166     if (EltVT == MVT::f16)
12167       MovOpc = X86ISD::MOVSH;
12168     else if (EltVT == MVT::f32)
12169       MovOpc = X86ISD::MOVSS;
12170     else if (EltVT == MVT::f64)
12171       MovOpc = X86ISD::MOVSD;
12172     else
12173       llvm_unreachable("Unsupported floating point element type to handle!");
12174     return DAG.getNode(MovOpc, DL, ExtVT, V1, V2);
12175   }
12176 
12177   // This lowering only works for the low element with floating point vectors.
12178   if (VT.isFloatingPoint() && V2Index != 0)
12179     return SDValue();
12180 
12181   V2 = DAG.getNode(X86ISD::VZEXT_MOVL, DL, ExtVT, V2);
12182   if (ExtVT != VT)
12183     V2 = DAG.getBitcast(VT, V2);
12184 
12185   if (V2Index != 0) {
12186     // If we have 4 or fewer lanes we can cheaply shuffle the element into
12187     // the desired position. Otherwise it is more efficient to do a vector
12188     // shift left. We know that we can do a vector shift left because all
12189     // the inputs are zero.
12190     if (VT.isFloatingPoint() || NumElts <= 4) {
12191       SmallVector<int, 4> V2Shuffle(Mask.size(), 1);
12192       V2Shuffle[V2Index] = 0;
12193       V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Shuffle);
12194     } else {
12195       V2 = DAG.getBitcast(MVT::v16i8, V2);
12196       V2 = DAG.getNode(
12197           X86ISD::VSHLDQ, DL, MVT::v16i8, V2,
12198           DAG.getTargetConstant(V2Index * EltBits / 8, DL, MVT::i8));
12199       V2 = DAG.getBitcast(VT, V2);
12200     }
12201   }
12202   return V2;
12203 }
12204 
12205 /// Try to lower broadcast of a single - truncated - integer element,
12206 /// coming from a scalar_to_vector/build_vector node \p V0 with larger elements.
12207 ///
12208 /// This assumes we have AVX2.
lowerShuffleAsTruncBroadcast(const SDLoc & DL,MVT VT,SDValue V0,int BroadcastIdx,const X86Subtarget & Subtarget,SelectionDAG & DAG)12209 static SDValue lowerShuffleAsTruncBroadcast(const SDLoc &DL, MVT VT, SDValue V0,
12210                                             int BroadcastIdx,
12211                                             const X86Subtarget &Subtarget,
12212                                             SelectionDAG &DAG) {
12213   assert(Subtarget.hasAVX2() &&
12214          "We can only lower integer broadcasts with AVX2!");
12215 
12216   MVT EltVT = VT.getVectorElementType();
12217   MVT V0VT = V0.getSimpleValueType();
12218 
12219   assert(VT.isInteger() && "Unexpected non-integer trunc broadcast!");
12220   assert(V0VT.isVector() && "Unexpected non-vector vector-sized value!");
12221 
12222   MVT V0EltVT = V0VT.getVectorElementType();
12223   if (!V0EltVT.isInteger())
12224     return SDValue();
12225 
12226   const unsigned EltSize = EltVT.getSizeInBits();
12227   const unsigned V0EltSize = V0EltVT.getSizeInBits();
12228 
12229   // This is only a truncation if the original element type is larger.
12230   if (V0EltSize <= EltSize)
12231     return SDValue();
12232 
12233   assert(((V0EltSize % EltSize) == 0) &&
12234          "Scalar type sizes must all be powers of 2 on x86!");
12235 
12236   const unsigned V0Opc = V0.getOpcode();
12237   const unsigned Scale = V0EltSize / EltSize;
12238   const unsigned V0BroadcastIdx = BroadcastIdx / Scale;
12239 
12240   if ((V0Opc != ISD::SCALAR_TO_VECTOR || V0BroadcastIdx != 0) &&
12241       V0Opc != ISD::BUILD_VECTOR)
12242     return SDValue();
12243 
12244   SDValue Scalar = V0.getOperand(V0BroadcastIdx);
12245 
12246   // If we're extracting non-least-significant bits, shift so we can truncate.
12247   // Hopefully, we can fold away the trunc/srl/load into the broadcast.
12248   // Even if we can't (and !isShuffleFoldableLoad(Scalar)), prefer
12249   // vpbroadcast+vmovd+shr to vpshufb(m)+vmovd.
12250   if (const int OffsetIdx = BroadcastIdx % Scale)
12251     Scalar = DAG.getNode(ISD::SRL, DL, Scalar.getValueType(), Scalar,
12252                          DAG.getConstant(OffsetIdx * EltSize, DL, MVT::i8));
12253 
12254   return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
12255                      DAG.getNode(ISD::TRUNCATE, DL, EltVT, Scalar));
12256 }
12257 
12258 /// Test whether this can be lowered with a single SHUFPS instruction.
12259 ///
12260 /// This is used to disable more specialized lowerings when the shufps lowering
12261 /// will happen to be efficient.
isSingleSHUFPSMask(ArrayRef<int> Mask)12262 static bool isSingleSHUFPSMask(ArrayRef<int> Mask) {
12263   // This routine only handles 128-bit shufps.
12264   assert(Mask.size() == 4 && "Unsupported mask size!");
12265   assert(Mask[0] >= -1 && Mask[0] < 8 && "Out of bound mask element!");
12266   assert(Mask[1] >= -1 && Mask[1] < 8 && "Out of bound mask element!");
12267   assert(Mask[2] >= -1 && Mask[2] < 8 && "Out of bound mask element!");
12268   assert(Mask[3] >= -1 && Mask[3] < 8 && "Out of bound mask element!");
12269 
12270   // To lower with a single SHUFPS we need to have the low half and high half
12271   // each requiring a single input.
12272   if (Mask[0] >= 0 && Mask[1] >= 0 && (Mask[0] < 4) != (Mask[1] < 4))
12273     return false;
12274   if (Mask[2] >= 0 && Mask[3] >= 0 && (Mask[2] < 4) != (Mask[3] < 4))
12275     return false;
12276 
12277   return true;
12278 }
12279 
12280 /// Test whether the specified input (0 or 1) is in-place blended by the
12281 /// given mask.
12282 ///
12283 /// This returns true if the elements from a particular input are already in the
12284 /// slot required by the given mask and require no permutation.
isShuffleMaskInputInPlace(int Input,ArrayRef<int> Mask)12285 static bool isShuffleMaskInputInPlace(int Input, ArrayRef<int> Mask) {
12286   assert((Input == 0 || Input == 1) && "Only two inputs to shuffles.");
12287   int Size = Mask.size();
12288   for (int i = 0; i < Size; ++i)
12289     if (Mask[i] >= 0 && Mask[i] / Size == Input && Mask[i] % Size != i)
12290       return false;
12291 
12292   return true;
12293 }
12294 
12295 /// If we are extracting two 128-bit halves of a vector and shuffling the
12296 /// result, match that to a 256-bit AVX2 vperm* instruction to avoid a
12297 /// multi-shuffle lowering.
lowerShuffleOfExtractsAsVperm(const SDLoc & DL,SDValue N0,SDValue N1,ArrayRef<int> Mask,SelectionDAG & DAG)12298 static SDValue lowerShuffleOfExtractsAsVperm(const SDLoc &DL, SDValue N0,
12299                                              SDValue N1, ArrayRef<int> Mask,
12300                                              SelectionDAG &DAG) {
12301   MVT VT = N0.getSimpleValueType();
12302   assert((VT.is128BitVector() &&
12303           (VT.getScalarSizeInBits() == 32 || VT.getScalarSizeInBits() == 64)) &&
12304          "VPERM* family of shuffles requires 32-bit or 64-bit elements");
12305 
12306   // Check that both sources are extracts of the same source vector.
12307   if (N0.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
12308       N1.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
12309       N0.getOperand(0) != N1.getOperand(0) ||
12310       !N0.hasOneUse() || !N1.hasOneUse())
12311     return SDValue();
12312 
12313   SDValue WideVec = N0.getOperand(0);
12314   MVT WideVT = WideVec.getSimpleValueType();
12315   if (!WideVT.is256BitVector())
12316     return SDValue();
12317 
12318   // Match extracts of each half of the wide source vector. Commute the shuffle
12319   // if the extract of the low half is N1.
12320   unsigned NumElts = VT.getVectorNumElements();
12321   SmallVector<int, 4> NewMask(Mask);
12322   const APInt &ExtIndex0 = N0.getConstantOperandAPInt(1);
12323   const APInt &ExtIndex1 = N1.getConstantOperandAPInt(1);
12324   if (ExtIndex1 == 0 && ExtIndex0 == NumElts)
12325     ShuffleVectorSDNode::commuteMask(NewMask);
12326   else if (ExtIndex0 != 0 || ExtIndex1 != NumElts)
12327     return SDValue();
12328 
12329   // Final bailout: if the mask is simple, we are better off using an extract
12330   // and a simple narrow shuffle. Prefer extract+unpack(h/l)ps to vpermps
12331   // because that avoids a constant load from memory.
12332   if (NumElts == 4 &&
12333       (isSingleSHUFPSMask(NewMask) || is128BitUnpackShuffleMask(NewMask, DAG)))
12334     return SDValue();
12335 
12336   // Extend the shuffle mask with undef elements.
12337   NewMask.append(NumElts, -1);
12338 
12339   // shuf (extract X, 0), (extract X, 4), M --> extract (shuf X, undef, M'), 0
12340   SDValue Shuf = DAG.getVectorShuffle(WideVT, DL, WideVec, DAG.getUNDEF(WideVT),
12341                                       NewMask);
12342   // This is free: ymm -> xmm.
12343   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Shuf,
12344                      DAG.getIntPtrConstant(0, DL));
12345 }
12346 
12347 /// Try to lower broadcast of a single element.
12348 ///
12349 /// For convenience, this code also bundles all of the subtarget feature set
12350 /// filtering. While a little annoying to re-dispatch on type here, there isn't
12351 /// a convenient way to factor it out.
lowerShuffleAsBroadcast(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const X86Subtarget & Subtarget,SelectionDAG & DAG)12352 static SDValue lowerShuffleAsBroadcast(const SDLoc &DL, MVT VT, SDValue V1,
12353                                        SDValue V2, ArrayRef<int> Mask,
12354                                        const X86Subtarget &Subtarget,
12355                                        SelectionDAG &DAG) {
12356   MVT EltVT = VT.getVectorElementType();
12357   if (!((Subtarget.hasSSE3() && VT == MVT::v2f64) ||
12358         (Subtarget.hasAVX() && (EltVT == MVT::f64 || EltVT == MVT::f32)) ||
12359         (Subtarget.hasAVX2() && (VT.isInteger() || EltVT == MVT::f16))))
12360     return SDValue();
12361 
12362   // With MOVDDUP (v2f64) we can broadcast from a register or a load, otherwise
12363   // we can only broadcast from a register with AVX2.
12364   unsigned NumEltBits = VT.getScalarSizeInBits();
12365   unsigned Opcode = (VT == MVT::v2f64 && !Subtarget.hasAVX2())
12366                         ? X86ISD::MOVDDUP
12367                         : X86ISD::VBROADCAST;
12368   bool BroadcastFromReg = (Opcode == X86ISD::MOVDDUP) || Subtarget.hasAVX2();
12369 
12370   // Check that the mask is a broadcast.
12371   int BroadcastIdx = getSplatIndex(Mask);
12372   if (BroadcastIdx < 0)
12373     return SDValue();
12374   assert(BroadcastIdx < (int)Mask.size() && "We only expect to be called with "
12375                                             "a sorted mask where the broadcast "
12376                                             "comes from V1.");
12377 
12378   // Go up the chain of (vector) values to find a scalar load that we can
12379   // combine with the broadcast.
12380   // TODO: Combine this logic with findEltLoadSrc() used by
12381   //       EltsFromConsecutiveLoads().
12382   int BitOffset = BroadcastIdx * NumEltBits;
12383   SDValue V = V1;
12384   for (;;) {
12385     switch (V.getOpcode()) {
12386     case ISD::BITCAST: {
12387       V = V.getOperand(0);
12388       continue;
12389     }
12390     case ISD::CONCAT_VECTORS: {
12391       int OpBitWidth = V.getOperand(0).getValueSizeInBits();
12392       int OpIdx = BitOffset / OpBitWidth;
12393       V = V.getOperand(OpIdx);
12394       BitOffset %= OpBitWidth;
12395       continue;
12396     }
12397     case ISD::EXTRACT_SUBVECTOR: {
12398       // The extraction index adds to the existing offset.
12399       unsigned EltBitWidth = V.getScalarValueSizeInBits();
12400       unsigned Idx = V.getConstantOperandVal(1);
12401       unsigned BeginOffset = Idx * EltBitWidth;
12402       BitOffset += BeginOffset;
12403       V = V.getOperand(0);
12404       continue;
12405     }
12406     case ISD::INSERT_SUBVECTOR: {
12407       SDValue VOuter = V.getOperand(0), VInner = V.getOperand(1);
12408       int EltBitWidth = VOuter.getScalarValueSizeInBits();
12409       int Idx = (int)V.getConstantOperandVal(2);
12410       int NumSubElts = (int)VInner.getSimpleValueType().getVectorNumElements();
12411       int BeginOffset = Idx * EltBitWidth;
12412       int EndOffset = BeginOffset + NumSubElts * EltBitWidth;
12413       if (BeginOffset <= BitOffset && BitOffset < EndOffset) {
12414         BitOffset -= BeginOffset;
12415         V = VInner;
12416       } else {
12417         V = VOuter;
12418       }
12419       continue;
12420     }
12421     }
12422     break;
12423   }
12424   assert((BitOffset % NumEltBits) == 0 && "Illegal bit-offset");
12425   BroadcastIdx = BitOffset / NumEltBits;
12426 
12427   // Do we need to bitcast the source to retrieve the original broadcast index?
12428   bool BitCastSrc = V.getScalarValueSizeInBits() != NumEltBits;
12429 
12430   // Check if this is a broadcast of a scalar. We special case lowering
12431   // for scalars so that we can more effectively fold with loads.
12432   // If the original value has a larger element type than the shuffle, the
12433   // broadcast element is in essence truncated. Make that explicit to ease
12434   // folding.
12435   if (BitCastSrc && VT.isInteger())
12436     if (SDValue TruncBroadcast = lowerShuffleAsTruncBroadcast(
12437             DL, VT, V, BroadcastIdx, Subtarget, DAG))
12438       return TruncBroadcast;
12439 
12440   // Also check the simpler case, where we can directly reuse the scalar.
12441   if (!BitCastSrc &&
12442       ((V.getOpcode() == ISD::BUILD_VECTOR && V.hasOneUse()) ||
12443        (V.getOpcode() == ISD::SCALAR_TO_VECTOR && BroadcastIdx == 0))) {
12444     V = V.getOperand(BroadcastIdx);
12445 
12446     // If we can't broadcast from a register, check that the input is a load.
12447     if (!BroadcastFromReg && !isShuffleFoldableLoad(V))
12448       return SDValue();
12449   } else if (ISD::isNormalLoad(V.getNode()) &&
12450              cast<LoadSDNode>(V)->isSimple()) {
12451     // We do not check for one-use of the vector load because a broadcast load
12452     // is expected to be a win for code size, register pressure, and possibly
12453     // uops even if the original vector load is not eliminated.
12454 
12455     // Reduce the vector load and shuffle to a broadcasted scalar load.
12456     LoadSDNode *Ld = cast<LoadSDNode>(V);
12457     SDValue BaseAddr = Ld->getOperand(1);
12458     MVT SVT = VT.getScalarType();
12459     unsigned Offset = BroadcastIdx * SVT.getStoreSize();
12460     assert((int)(Offset * 8) == BitOffset && "Unexpected bit-offset");
12461     SDValue NewAddr =
12462         DAG.getMemBasePlusOffset(BaseAddr, TypeSize::getFixed(Offset), DL);
12463 
12464     // Directly form VBROADCAST_LOAD if we're using VBROADCAST opcode rather
12465     // than MOVDDUP.
12466     // FIXME: Should we add VBROADCAST_LOAD isel patterns for pre-AVX?
12467     if (Opcode == X86ISD::VBROADCAST) {
12468       SDVTList Tys = DAG.getVTList(VT, MVT::Other);
12469       SDValue Ops[] = {Ld->getChain(), NewAddr};
12470       V = DAG.getMemIntrinsicNode(
12471           X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, SVT,
12472           DAG.getMachineFunction().getMachineMemOperand(
12473               Ld->getMemOperand(), Offset, SVT.getStoreSize()));
12474       DAG.makeEquivalentMemoryOrdering(Ld, V);
12475       return DAG.getBitcast(VT, V);
12476     }
12477     assert(SVT == MVT::f64 && "Unexpected VT!");
12478     V = DAG.getLoad(SVT, DL, Ld->getChain(), NewAddr,
12479                     DAG.getMachineFunction().getMachineMemOperand(
12480                         Ld->getMemOperand(), Offset, SVT.getStoreSize()));
12481     DAG.makeEquivalentMemoryOrdering(Ld, V);
12482   } else if (!BroadcastFromReg) {
12483     // We can't broadcast from a vector register.
12484     return SDValue();
12485   } else if (BitOffset != 0) {
12486     // We can only broadcast from the zero-element of a vector register,
12487     // but it can be advantageous to broadcast from the zero-element of a
12488     // subvector.
12489     if (!VT.is256BitVector() && !VT.is512BitVector())
12490       return SDValue();
12491 
12492     // VPERMQ/VPERMPD can perform the cross-lane shuffle directly.
12493     if (VT == MVT::v4f64 || VT == MVT::v4i64)
12494       return SDValue();
12495 
12496     // Only broadcast the zero-element of a 128-bit subvector.
12497     if ((BitOffset % 128) != 0)
12498       return SDValue();
12499 
12500     assert((BitOffset % V.getScalarValueSizeInBits()) == 0 &&
12501            "Unexpected bit-offset");
12502     assert((V.getValueSizeInBits() == 256 || V.getValueSizeInBits() == 512) &&
12503            "Unexpected vector size");
12504     unsigned ExtractIdx = BitOffset / V.getScalarValueSizeInBits();
12505     V = extract128BitVector(V, ExtractIdx, DAG, DL);
12506   }
12507 
12508   // On AVX we can use VBROADCAST directly for scalar sources.
12509   if (Opcode == X86ISD::MOVDDUP && !V.getValueType().isVector()) {
12510     V = DAG.getBitcast(MVT::f64, V);
12511     if (Subtarget.hasAVX()) {
12512       V = DAG.getNode(X86ISD::VBROADCAST, DL, MVT::v2f64, V);
12513       return DAG.getBitcast(VT, V);
12514     }
12515     V = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64, V);
12516   }
12517 
12518   // If this is a scalar, do the broadcast on this type and bitcast.
12519   if (!V.getValueType().isVector()) {
12520     assert(V.getScalarValueSizeInBits() == NumEltBits &&
12521            "Unexpected scalar size");
12522     MVT BroadcastVT = MVT::getVectorVT(V.getSimpleValueType(),
12523                                        VT.getVectorNumElements());
12524     return DAG.getBitcast(VT, DAG.getNode(Opcode, DL, BroadcastVT, V));
12525   }
12526 
12527   // We only support broadcasting from 128-bit vectors to minimize the
12528   // number of patterns we need to deal with in isel. So extract down to
12529   // 128-bits, removing as many bitcasts as possible.
12530   if (V.getValueSizeInBits() > 128)
12531     V = extract128BitVector(peekThroughBitcasts(V), 0, DAG, DL);
12532 
12533   // Otherwise cast V to a vector with the same element type as VT, but
12534   // possibly narrower than VT. Then perform the broadcast.
12535   unsigned NumSrcElts = V.getValueSizeInBits() / NumEltBits;
12536   MVT CastVT = MVT::getVectorVT(VT.getVectorElementType(), NumSrcElts);
12537   return DAG.getNode(Opcode, DL, VT, DAG.getBitcast(CastVT, V));
12538 }
12539 
12540 // Check for whether we can use INSERTPS to perform the shuffle. We only use
12541 // INSERTPS when the V1 elements are already in the correct locations
12542 // because otherwise we can just always use two SHUFPS instructions which
12543 // are much smaller to encode than a SHUFPS and an INSERTPS. We can also
12544 // perform INSERTPS if a single V1 element is out of place and all V2
12545 // elements are zeroable.
matchShuffleAsInsertPS(SDValue & V1,SDValue & V2,unsigned & InsertPSMask,const APInt & Zeroable,ArrayRef<int> Mask,SelectionDAG & DAG)12546 static bool matchShuffleAsInsertPS(SDValue &V1, SDValue &V2,
12547                                    unsigned &InsertPSMask,
12548                                    const APInt &Zeroable,
12549                                    ArrayRef<int> Mask, SelectionDAG &DAG) {
12550   assert(V1.getSimpleValueType().is128BitVector() && "Bad operand type!");
12551   assert(V2.getSimpleValueType().is128BitVector() && "Bad operand type!");
12552   assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
12553 
12554   // Attempt to match INSERTPS with one element from VA or VB being
12555   // inserted into VA (or undef). If successful, V1, V2 and InsertPSMask
12556   // are updated.
12557   auto matchAsInsertPS = [&](SDValue VA, SDValue VB,
12558                              ArrayRef<int> CandidateMask) {
12559     unsigned ZMask = 0;
12560     int VADstIndex = -1;
12561     int VBDstIndex = -1;
12562     bool VAUsedInPlace = false;
12563 
12564     for (int i = 0; i < 4; ++i) {
12565       // Synthesize a zero mask from the zeroable elements (includes undefs).
12566       if (Zeroable[i]) {
12567         ZMask |= 1 << i;
12568         continue;
12569       }
12570 
12571       // Flag if we use any VA inputs in place.
12572       if (i == CandidateMask[i]) {
12573         VAUsedInPlace = true;
12574         continue;
12575       }
12576 
12577       // We can only insert a single non-zeroable element.
12578       if (VADstIndex >= 0 || VBDstIndex >= 0)
12579         return false;
12580 
12581       if (CandidateMask[i] < 4) {
12582         // VA input out of place for insertion.
12583         VADstIndex = i;
12584       } else {
12585         // VB input for insertion.
12586         VBDstIndex = i;
12587       }
12588     }
12589 
12590     // Don't bother if we have no (non-zeroable) element for insertion.
12591     if (VADstIndex < 0 && VBDstIndex < 0)
12592       return false;
12593 
12594     // Determine element insertion src/dst indices. The src index is from the
12595     // start of the inserted vector, not the start of the concatenated vector.
12596     unsigned VBSrcIndex = 0;
12597     if (VADstIndex >= 0) {
12598       // If we have a VA input out of place, we use VA as the V2 element
12599       // insertion and don't use the original V2 at all.
12600       VBSrcIndex = CandidateMask[VADstIndex];
12601       VBDstIndex = VADstIndex;
12602       VB = VA;
12603     } else {
12604       VBSrcIndex = CandidateMask[VBDstIndex] - 4;
12605     }
12606 
12607     // If no V1 inputs are used in place, then the result is created only from
12608     // the zero mask and the V2 insertion - so remove V1 dependency.
12609     if (!VAUsedInPlace)
12610       VA = DAG.getUNDEF(MVT::v4f32);
12611 
12612     // Update V1, V2 and InsertPSMask accordingly.
12613     V1 = VA;
12614     V2 = VB;
12615 
12616     // Insert the V2 element into the desired position.
12617     InsertPSMask = VBSrcIndex << 6 | VBDstIndex << 4 | ZMask;
12618     assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
12619     return true;
12620   };
12621 
12622   if (matchAsInsertPS(V1, V2, Mask))
12623     return true;
12624 
12625   // Commute and try again.
12626   SmallVector<int, 4> CommutedMask(Mask);
12627   ShuffleVectorSDNode::commuteMask(CommutedMask);
12628   if (matchAsInsertPS(V2, V1, CommutedMask))
12629     return true;
12630 
12631   return false;
12632 }
12633 
lowerShuffleAsInsertPS(const SDLoc & DL,SDValue V1,SDValue V2,ArrayRef<int> Mask,const APInt & Zeroable,SelectionDAG & DAG)12634 static SDValue lowerShuffleAsInsertPS(const SDLoc &DL, SDValue V1, SDValue V2,
12635                                       ArrayRef<int> Mask, const APInt &Zeroable,
12636                                       SelectionDAG &DAG) {
12637   assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
12638   assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
12639 
12640   // Attempt to match the insertps pattern.
12641   unsigned InsertPSMask = 0;
12642   if (!matchShuffleAsInsertPS(V1, V2, InsertPSMask, Zeroable, Mask, DAG))
12643     return SDValue();
12644 
12645   // Insert the V2 element into the desired position.
12646   return DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
12647                      DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
12648 }
12649 
12650 /// Handle lowering of 2-lane 64-bit floating point shuffles.
12651 ///
12652 /// This is the basis function for the 2-lane 64-bit shuffles as we have full
12653 /// support for floating point shuffles but not integer shuffles. These
12654 /// instructions will incur a domain crossing penalty on some chips though so
12655 /// it is better to avoid lowering through this for integer vectors where
12656 /// possible.
lowerV2F64Shuffle(const SDLoc & DL,ArrayRef<int> Mask,const APInt & Zeroable,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)12657 static SDValue lowerV2F64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
12658                                  const APInt &Zeroable, SDValue V1, SDValue V2,
12659                                  const X86Subtarget &Subtarget,
12660                                  SelectionDAG &DAG) {
12661   assert(V1.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
12662   assert(V2.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
12663   assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
12664 
12665   if (V2.isUndef()) {
12666     // Check for being able to broadcast a single element.
12667     if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v2f64, V1, V2,
12668                                                     Mask, Subtarget, DAG))
12669       return Broadcast;
12670 
12671     // Straight shuffle of a single input vector. Simulate this by using the
12672     // single input as both of the "inputs" to this instruction..
12673     unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1);
12674 
12675     if (Subtarget.hasAVX()) {
12676       // If we have AVX, we can use VPERMILPS which will allow folding a load
12677       // into the shuffle.
12678       return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v2f64, V1,
12679                          DAG.getTargetConstant(SHUFPDMask, DL, MVT::i8));
12680     }
12681 
12682     return DAG.getNode(
12683         X86ISD::SHUFP, DL, MVT::v2f64,
12684         Mask[0] == SM_SentinelUndef ? DAG.getUNDEF(MVT::v2f64) : V1,
12685         Mask[1] == SM_SentinelUndef ? DAG.getUNDEF(MVT::v2f64) : V1,
12686         DAG.getTargetConstant(SHUFPDMask, DL, MVT::i8));
12687   }
12688   assert(Mask[0] >= 0 && "No undef lanes in multi-input v2 shuffles!");
12689   assert(Mask[1] >= 0 && "No undef lanes in multi-input v2 shuffles!");
12690   assert(Mask[0] < 2 && "We sort V1 to be the first input.");
12691   assert(Mask[1] >= 2 && "We sort V2 to be the second input.");
12692 
12693   if (Subtarget.hasAVX2())
12694     if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
12695       return Extract;
12696 
12697   // When loading a scalar and then shuffling it into a vector we can often do
12698   // the insertion cheaply.
12699   if (SDValue Insertion = lowerShuffleAsElementInsertion(
12700           DL, MVT::v2f64, V1, V2, Mask, Zeroable, Subtarget, DAG))
12701     return Insertion;
12702   // Try inverting the insertion since for v2 masks it is easy to do and we
12703   // can't reliably sort the mask one way or the other.
12704   int InverseMask[2] = {Mask[0] < 0 ? -1 : (Mask[0] ^ 2),
12705                         Mask[1] < 0 ? -1 : (Mask[1] ^ 2)};
12706   if (SDValue Insertion = lowerShuffleAsElementInsertion(
12707           DL, MVT::v2f64, V2, V1, InverseMask, Zeroable, Subtarget, DAG))
12708     return Insertion;
12709 
12710   // Try to use one of the special instruction patterns to handle two common
12711   // blend patterns if a zero-blend above didn't work.
12712   if (isShuffleEquivalent(Mask, {0, 3}, V1, V2) ||
12713       isShuffleEquivalent(Mask, {1, 3}, V1, V2))
12714     if (SDValue V1S = getScalarValueForVectorElement(V1, Mask[0], DAG))
12715       // We can either use a special instruction to load over the low double or
12716       // to move just the low double.
12717       return DAG.getNode(
12718           X86ISD::MOVSD, DL, MVT::v2f64, V2,
12719           DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64, V1S));
12720 
12721   if (Subtarget.hasSSE41())
12722     if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v2f64, V1, V2, Mask,
12723                                             Zeroable, Subtarget, DAG))
12724       return Blend;
12725 
12726   // Use dedicated unpack instructions for masks that match their pattern.
12727   if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v2f64, Mask, V1, V2, DAG))
12728     return V;
12729 
12730   unsigned SHUFPDMask = (Mask[0] == 1) | (((Mask[1] - 2) == 1) << 1);
12731   return DAG.getNode(X86ISD::SHUFP, DL, MVT::v2f64, V1, V2,
12732                      DAG.getTargetConstant(SHUFPDMask, DL, MVT::i8));
12733 }
12734 
12735 /// Handle lowering of 2-lane 64-bit integer shuffles.
12736 ///
12737 /// Tries to lower a 2-lane 64-bit shuffle using shuffle operations provided by
12738 /// the integer unit to minimize domain crossing penalties. However, for blends
12739 /// it falls back to the floating point shuffle operation with appropriate bit
12740 /// casting.
lowerV2I64Shuffle(const SDLoc & DL,ArrayRef<int> Mask,const APInt & Zeroable,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)12741 static SDValue lowerV2I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
12742                                  const APInt &Zeroable, SDValue V1, SDValue V2,
12743                                  const X86Subtarget &Subtarget,
12744                                  SelectionDAG &DAG) {
12745   assert(V1.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
12746   assert(V2.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
12747   assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
12748 
12749   if (V2.isUndef()) {
12750     // Check for being able to broadcast a single element.
12751     if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v2i64, V1, V2,
12752                                                     Mask, Subtarget, DAG))
12753       return Broadcast;
12754 
12755     // Straight shuffle of a single input vector. For everything from SSE2
12756     // onward this has a single fast instruction with no scary immediates.
12757     // We have to map the mask as it is actually a v4i32 shuffle instruction.
12758     V1 = DAG.getBitcast(MVT::v4i32, V1);
12759     int WidenedMask[4] = {Mask[0] < 0 ? -1 : (Mask[0] * 2),
12760                           Mask[0] < 0 ? -1 : ((Mask[0] * 2) + 1),
12761                           Mask[1] < 0 ? -1 : (Mask[1] * 2),
12762                           Mask[1] < 0 ? -1 : ((Mask[1] * 2) + 1)};
12763     return DAG.getBitcast(
12764         MVT::v2i64,
12765         DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
12766                     getV4X86ShuffleImm8ForMask(WidenedMask, DL, DAG)));
12767   }
12768   assert(Mask[0] != -1 && "No undef lanes in multi-input v2 shuffles!");
12769   assert(Mask[1] != -1 && "No undef lanes in multi-input v2 shuffles!");
12770   assert(Mask[0] < 2 && "We sort V1 to be the first input.");
12771   assert(Mask[1] >= 2 && "We sort V2 to be the second input.");
12772 
12773   if (Subtarget.hasAVX2())
12774     if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
12775       return Extract;
12776 
12777   // Try to use shift instructions.
12778   if (SDValue Shift =
12779           lowerShuffleAsShift(DL, MVT::v2i64, V1, V2, Mask, Zeroable, Subtarget,
12780                               DAG, /*BitwiseOnly*/ false))
12781     return Shift;
12782 
12783   // When loading a scalar and then shuffling it into a vector we can often do
12784   // the insertion cheaply.
12785   if (SDValue Insertion = lowerShuffleAsElementInsertion(
12786           DL, MVT::v2i64, V1, V2, Mask, Zeroable, Subtarget, DAG))
12787     return Insertion;
12788   // Try inverting the insertion since for v2 masks it is easy to do and we
12789   // can't reliably sort the mask one way or the other.
12790   int InverseMask[2] = {Mask[0] ^ 2, Mask[1] ^ 2};
12791   if (SDValue Insertion = lowerShuffleAsElementInsertion(
12792           DL, MVT::v2i64, V2, V1, InverseMask, Zeroable, Subtarget, DAG))
12793     return Insertion;
12794 
12795   // We have different paths for blend lowering, but they all must use the
12796   // *exact* same predicate.
12797   bool IsBlendSupported = Subtarget.hasSSE41();
12798   if (IsBlendSupported)
12799     if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v2i64, V1, V2, Mask,
12800                                             Zeroable, Subtarget, DAG))
12801       return Blend;
12802 
12803   // Use dedicated unpack instructions for masks that match their pattern.
12804   if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v2i64, Mask, V1, V2, DAG))
12805     return V;
12806 
12807   // Try to use byte rotation instructions.
12808   // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
12809   if (Subtarget.hasSSSE3()) {
12810     if (Subtarget.hasVLX())
12811       if (SDValue Rotate = lowerShuffleAsVALIGN(DL, MVT::v2i64, V1, V2, Mask,
12812                                                 Zeroable, Subtarget, DAG))
12813         return Rotate;
12814 
12815     if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v2i64, V1, V2, Mask,
12816                                                   Subtarget, DAG))
12817       return Rotate;
12818   }
12819 
12820   // If we have direct support for blends, we should lower by decomposing into
12821   // a permute. That will be faster than the domain cross.
12822   if (IsBlendSupported)
12823     return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v2i64, V1, V2, Mask,
12824                                                 Subtarget, DAG);
12825 
12826   // We implement this with SHUFPD which is pretty lame because it will likely
12827   // incur 2 cycles of stall for integer vectors on Nehalem and older chips.
12828   // However, all the alternatives are still more cycles and newer chips don't
12829   // have this problem. It would be really nice if x86 had better shuffles here.
12830   V1 = DAG.getBitcast(MVT::v2f64, V1);
12831   V2 = DAG.getBitcast(MVT::v2f64, V2);
12832   return DAG.getBitcast(MVT::v2i64,
12833                         DAG.getVectorShuffle(MVT::v2f64, DL, V1, V2, Mask));
12834 }
12835 
12836 /// Lower a vector shuffle using the SHUFPS instruction.
12837 ///
12838 /// This is a helper routine dedicated to lowering vector shuffles using SHUFPS.
12839 /// It makes no assumptions about whether this is the *best* lowering, it simply
12840 /// uses it.
lowerShuffleWithSHUFPS(const SDLoc & DL,MVT VT,ArrayRef<int> Mask,SDValue V1,SDValue V2,SelectionDAG & DAG)12841 static SDValue lowerShuffleWithSHUFPS(const SDLoc &DL, MVT VT,
12842                                       ArrayRef<int> Mask, SDValue V1,
12843                                       SDValue V2, SelectionDAG &DAG) {
12844   SDValue LowV = V1, HighV = V2;
12845   SmallVector<int, 4> NewMask(Mask);
12846   int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
12847 
12848   if (NumV2Elements == 1) {
12849     int V2Index = find_if(Mask, [](int M) { return M >= 4; }) - Mask.begin();
12850 
12851     // Compute the index adjacent to V2Index and in the same half by toggling
12852     // the low bit.
12853     int V2AdjIndex = V2Index ^ 1;
12854 
12855     if (Mask[V2AdjIndex] < 0) {
12856       // Handles all the cases where we have a single V2 element and an undef.
12857       // This will only ever happen in the high lanes because we commute the
12858       // vector otherwise.
12859       if (V2Index < 2)
12860         std::swap(LowV, HighV);
12861       NewMask[V2Index] -= 4;
12862     } else {
12863       // Handle the case where the V2 element ends up adjacent to a V1 element.
12864       // To make this work, blend them together as the first step.
12865       int V1Index = V2AdjIndex;
12866       int BlendMask[4] = {Mask[V2Index] - 4, 0, Mask[V1Index], 0};
12867       V2 = DAG.getNode(X86ISD::SHUFP, DL, VT, V2, V1,
12868                        getV4X86ShuffleImm8ForMask(BlendMask, DL, DAG));
12869 
12870       // Now proceed to reconstruct the final blend as we have the necessary
12871       // high or low half formed.
12872       if (V2Index < 2) {
12873         LowV = V2;
12874         HighV = V1;
12875       } else {
12876         HighV = V2;
12877       }
12878       NewMask[V1Index] = 2; // We put the V1 element in V2[2].
12879       NewMask[V2Index] = 0; // We shifted the V2 element into V2[0].
12880     }
12881   } else if (NumV2Elements == 2) {
12882     if (Mask[0] < 4 && Mask[1] < 4) {
12883       // Handle the easy case where we have V1 in the low lanes and V2 in the
12884       // high lanes.
12885       NewMask[2] -= 4;
12886       NewMask[3] -= 4;
12887     } else if (Mask[2] < 4 && Mask[3] < 4) {
12888       // We also handle the reversed case because this utility may get called
12889       // when we detect a SHUFPS pattern but can't easily commute the shuffle to
12890       // arrange things in the right direction.
12891       NewMask[0] -= 4;
12892       NewMask[1] -= 4;
12893       HighV = V1;
12894       LowV = V2;
12895     } else {
12896       // We have a mixture of V1 and V2 in both low and high lanes. Rather than
12897       // trying to place elements directly, just blend them and set up the final
12898       // shuffle to place them.
12899 
12900       // The first two blend mask elements are for V1, the second two are for
12901       // V2.
12902       int BlendMask[4] = {Mask[0] < 4 ? Mask[0] : Mask[1],
12903                           Mask[2] < 4 ? Mask[2] : Mask[3],
12904                           (Mask[0] >= 4 ? Mask[0] : Mask[1]) - 4,
12905                           (Mask[2] >= 4 ? Mask[2] : Mask[3]) - 4};
12906       V1 = DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2,
12907                        getV4X86ShuffleImm8ForMask(BlendMask, DL, DAG));
12908 
12909       // Now we do a normal shuffle of V1 by giving V1 as both operands to
12910       // a blend.
12911       LowV = HighV = V1;
12912       NewMask[0] = Mask[0] < 4 ? 0 : 2;
12913       NewMask[1] = Mask[0] < 4 ? 2 : 0;
12914       NewMask[2] = Mask[2] < 4 ? 1 : 3;
12915       NewMask[3] = Mask[2] < 4 ? 3 : 1;
12916     }
12917   } else if (NumV2Elements == 3) {
12918     // Ideally canonicalizeShuffleMaskWithCommute should have caught this, but
12919     // we can get here due to other paths (e.g repeated mask matching) that we
12920     // don't want to do another round of lowerVECTOR_SHUFFLE.
12921     ShuffleVectorSDNode::commuteMask(NewMask);
12922     return lowerShuffleWithSHUFPS(DL, VT, NewMask, V2, V1, DAG);
12923   }
12924   return DAG.getNode(X86ISD::SHUFP, DL, VT, LowV, HighV,
12925                      getV4X86ShuffleImm8ForMask(NewMask, DL, DAG));
12926 }
12927 
12928 /// Lower 4-lane 32-bit floating point shuffles.
12929 ///
12930 /// Uses instructions exclusively from the floating point unit to minimize
12931 /// domain crossing penalties, as these are sufficient to implement all v4f32
12932 /// shuffles.
lowerV4F32Shuffle(const SDLoc & DL,ArrayRef<int> Mask,const APInt & Zeroable,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)12933 static SDValue lowerV4F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
12934                                  const APInt &Zeroable, SDValue V1, SDValue V2,
12935                                  const X86Subtarget &Subtarget,
12936                                  SelectionDAG &DAG) {
12937   assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
12938   assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
12939   assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
12940 
12941   if (Subtarget.hasSSE41())
12942     if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4f32, V1, V2, Mask,
12943                                             Zeroable, Subtarget, DAG))
12944       return Blend;
12945 
12946   int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
12947 
12948   if (NumV2Elements == 0) {
12949     // Check for being able to broadcast a single element.
12950     if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4f32, V1, V2,
12951                                                     Mask, Subtarget, DAG))
12952       return Broadcast;
12953 
12954     // Use even/odd duplicate instructions for masks that match their pattern.
12955     if (Subtarget.hasSSE3()) {
12956       if (isShuffleEquivalent(Mask, {0, 0, 2, 2}, V1, V2))
12957         return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v4f32, V1);
12958       if (isShuffleEquivalent(Mask, {1, 1, 3, 3}, V1, V2))
12959         return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v4f32, V1);
12960     }
12961 
12962     if (Subtarget.hasAVX()) {
12963       // If we have AVX, we can use VPERMILPS which will allow folding a load
12964       // into the shuffle.
12965       return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f32, V1,
12966                          getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
12967     }
12968 
12969     // Use MOVLHPS/MOVHLPS to simulate unary shuffles. These are only valid
12970     // in SSE1 because otherwise they are widened to v2f64 and never get here.
12971     if (!Subtarget.hasSSE2()) {
12972       if (isShuffleEquivalent(Mask, {0, 1, 0, 1}, V1, V2))
12973         return DAG.getNode(X86ISD::MOVLHPS, DL, MVT::v4f32, V1, V1);
12974       if (isShuffleEquivalent(Mask, {2, 3, 2, 3}, V1, V2))
12975         return DAG.getNode(X86ISD::MOVHLPS, DL, MVT::v4f32, V1, V1);
12976     }
12977 
12978     // Otherwise, use a straight shuffle of a single input vector. We pass the
12979     // input vector to both operands to simulate this with a SHUFPS.
12980     return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f32, V1, V1,
12981                        getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
12982   }
12983 
12984   if (Subtarget.hasSSE2())
12985     if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
12986             DL, MVT::v4i32, V1, V2, Mask, Zeroable, Subtarget, DAG)) {
12987       ZExt = DAG.getBitcast(MVT::v4f32, ZExt);
12988       return ZExt;
12989     }
12990 
12991   if (Subtarget.hasAVX2())
12992     if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
12993       return Extract;
12994 
12995   // There are special ways we can lower some single-element blends. However, we
12996   // have custom ways we can lower more complex single-element blends below that
12997   // we defer to if both this and BLENDPS fail to match, so restrict this to
12998   // when the V2 input is targeting element 0 of the mask -- that is the fast
12999   // case here.
13000   if (NumV2Elements == 1 && Mask[0] >= 4)
13001     if (SDValue V = lowerShuffleAsElementInsertion(
13002             DL, MVT::v4f32, V1, V2, Mask, Zeroable, Subtarget, DAG))
13003       return V;
13004 
13005   if (Subtarget.hasSSE41()) {
13006     // Use INSERTPS if we can complete the shuffle efficiently.
13007     if (SDValue V = lowerShuffleAsInsertPS(DL, V1, V2, Mask, Zeroable, DAG))
13008       return V;
13009 
13010     if (!isSingleSHUFPSMask(Mask))
13011       if (SDValue BlendPerm = lowerShuffleAsBlendAndPermute(DL, MVT::v4f32, V1,
13012                                                             V2, Mask, DAG))
13013         return BlendPerm;
13014   }
13015 
13016   // Use low/high mov instructions. These are only valid in SSE1 because
13017   // otherwise they are widened to v2f64 and never get here.
13018   if (!Subtarget.hasSSE2()) {
13019     if (isShuffleEquivalent(Mask, {0, 1, 4, 5}, V1, V2))
13020       return DAG.getNode(X86ISD::MOVLHPS, DL, MVT::v4f32, V1, V2);
13021     if (isShuffleEquivalent(Mask, {2, 3, 6, 7}, V1, V2))
13022       return DAG.getNode(X86ISD::MOVHLPS, DL, MVT::v4f32, V2, V1);
13023   }
13024 
13025   // Use dedicated unpack instructions for masks that match their pattern.
13026   if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4f32, Mask, V1, V2, DAG))
13027     return V;
13028 
13029   // Otherwise fall back to a SHUFPS lowering strategy.
13030   return lowerShuffleWithSHUFPS(DL, MVT::v4f32, Mask, V1, V2, DAG);
13031 }
13032 
13033 /// Lower 4-lane i32 vector shuffles.
13034 ///
13035 /// We try to handle these with integer-domain shuffles where we can, but for
13036 /// blends we use the floating point domain blend instructions.
lowerV4I32Shuffle(const SDLoc & DL,ArrayRef<int> Mask,const APInt & Zeroable,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)13037 static SDValue lowerV4I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
13038                                  const APInt &Zeroable, SDValue V1, SDValue V2,
13039                                  const X86Subtarget &Subtarget,
13040                                  SelectionDAG &DAG) {
13041   assert(V1.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
13042   assert(V2.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
13043   assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
13044 
13045   // Whenever we can lower this as a zext, that instruction is strictly faster
13046   // than any alternative. It also allows us to fold memory operands into the
13047   // shuffle in many cases.
13048   if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v4i32, V1, V2, Mask,
13049                                                    Zeroable, Subtarget, DAG))
13050     return ZExt;
13051 
13052   int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
13053 
13054   // Try to use shift instructions if fast.
13055   if (Subtarget.preferLowerShuffleAsShift()) {
13056     if (SDValue Shift =
13057             lowerShuffleAsShift(DL, MVT::v4i32, V1, V2, Mask, Zeroable,
13058                                 Subtarget, DAG, /*BitwiseOnly*/ true))
13059       return Shift;
13060     if (NumV2Elements == 0)
13061       if (SDValue Rotate =
13062               lowerShuffleAsBitRotate(DL, MVT::v4i32, V1, Mask, Subtarget, DAG))
13063         return Rotate;
13064   }
13065 
13066   if (NumV2Elements == 0) {
13067     // Try to use broadcast unless the mask only has one non-undef element.
13068     if (count_if(Mask, [](int M) { return M >= 0 && M < 4; }) > 1) {
13069       if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4i32, V1, V2,
13070                                                       Mask, Subtarget, DAG))
13071         return Broadcast;
13072     }
13073 
13074     // Straight shuffle of a single input vector. For everything from SSE2
13075     // onward this has a single fast instruction with no scary immediates.
13076     // We coerce the shuffle pattern to be compatible with UNPCK instructions
13077     // but we aren't actually going to use the UNPCK instruction because doing
13078     // so prevents folding a load into this instruction or making a copy.
13079     const int UnpackLoMask[] = {0, 0, 1, 1};
13080     const int UnpackHiMask[] = {2, 2, 3, 3};
13081     if (isShuffleEquivalent(Mask, {0, 0, 1, 1}, V1, V2))
13082       Mask = UnpackLoMask;
13083     else if (isShuffleEquivalent(Mask, {2, 2, 3, 3}, V1, V2))
13084       Mask = UnpackHiMask;
13085 
13086     return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
13087                        getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
13088   }
13089 
13090   if (Subtarget.hasAVX2())
13091     if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
13092       return Extract;
13093 
13094   // Try to use shift instructions.
13095   if (SDValue Shift =
13096           lowerShuffleAsShift(DL, MVT::v4i32, V1, V2, Mask, Zeroable, Subtarget,
13097                               DAG, /*BitwiseOnly*/ false))
13098     return Shift;
13099 
13100   // There are special ways we can lower some single-element blends.
13101   if (NumV2Elements == 1)
13102     if (SDValue V = lowerShuffleAsElementInsertion(
13103             DL, MVT::v4i32, V1, V2, Mask, Zeroable, Subtarget, DAG))
13104       return V;
13105 
13106   // We have different paths for blend lowering, but they all must use the
13107   // *exact* same predicate.
13108   bool IsBlendSupported = Subtarget.hasSSE41();
13109   if (IsBlendSupported)
13110     if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4i32, V1, V2, Mask,
13111                                             Zeroable, Subtarget, DAG))
13112       return Blend;
13113 
13114   if (SDValue Masked = lowerShuffleAsBitMask(DL, MVT::v4i32, V1, V2, Mask,
13115                                              Zeroable, Subtarget, DAG))
13116     return Masked;
13117 
13118   // Use dedicated unpack instructions for masks that match their pattern.
13119   if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4i32, Mask, V1, V2, DAG))
13120     return V;
13121 
13122   // Try to use byte rotation instructions.
13123   // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
13124   if (Subtarget.hasSSSE3()) {
13125     if (Subtarget.hasVLX())
13126       if (SDValue Rotate = lowerShuffleAsVALIGN(DL, MVT::v4i32, V1, V2, Mask,
13127                                                 Zeroable, Subtarget, DAG))
13128         return Rotate;
13129 
13130     if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v4i32, V1, V2, Mask,
13131                                                   Subtarget, DAG))
13132       return Rotate;
13133   }
13134 
13135   // Assume that a single SHUFPS is faster than an alternative sequence of
13136   // multiple instructions (even if the CPU has a domain penalty).
13137   // If some CPU is harmed by the domain switch, we can fix it in a later pass.
13138   if (!isSingleSHUFPSMask(Mask)) {
13139     // If we have direct support for blends, we should lower by decomposing into
13140     // a permute. That will be faster than the domain cross.
13141     if (IsBlendSupported)
13142       return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v4i32, V1, V2, Mask,
13143                                                   Subtarget, DAG);
13144 
13145     // Try to lower by permuting the inputs into an unpack instruction.
13146     if (SDValue Unpack = lowerShuffleAsPermuteAndUnpack(DL, MVT::v4i32, V1, V2,
13147                                                         Mask, Subtarget, DAG))
13148       return Unpack;
13149   }
13150 
13151   // We implement this with SHUFPS because it can blend from two vectors.
13152   // Because we're going to eventually use SHUFPS, we use SHUFPS even to build
13153   // up the inputs, bypassing domain shift penalties that we would incur if we
13154   // directly used PSHUFD on Nehalem and older. For newer chips, this isn't
13155   // relevant.
13156   SDValue CastV1 = DAG.getBitcast(MVT::v4f32, V1);
13157   SDValue CastV2 = DAG.getBitcast(MVT::v4f32, V2);
13158   SDValue ShufPS = DAG.getVectorShuffle(MVT::v4f32, DL, CastV1, CastV2, Mask);
13159   return DAG.getBitcast(MVT::v4i32, ShufPS);
13160 }
13161 
13162 /// Lowering of single-input v8i16 shuffles is the cornerstone of SSE2
13163 /// shuffle lowering, and the most complex part.
13164 ///
13165 /// The lowering strategy is to try to form pairs of input lanes which are
13166 /// targeted at the same half of the final vector, and then use a dword shuffle
13167 /// to place them onto the right half, and finally unpack the paired lanes into
13168 /// their final position.
13169 ///
13170 /// The exact breakdown of how to form these dword pairs and align them on the
13171 /// correct sides is really tricky. See the comments within the function for
13172 /// more of the details.
13173 ///
13174 /// This code also handles repeated 128-bit lanes of v8i16 shuffles, but each
13175 /// lane must shuffle the *exact* same way. In fact, you must pass a v8 Mask to
13176 /// this routine for it to work correctly. To shuffle a 256-bit or 512-bit i16
13177 /// vector, form the analogous 128-bit 8-element Mask.
lowerV8I16GeneralSingleInputShuffle(const SDLoc & DL,MVT VT,SDValue V,MutableArrayRef<int> Mask,const X86Subtarget & Subtarget,SelectionDAG & DAG)13178 static SDValue lowerV8I16GeneralSingleInputShuffle(
13179     const SDLoc &DL, MVT VT, SDValue V, MutableArrayRef<int> Mask,
13180     const X86Subtarget &Subtarget, SelectionDAG &DAG) {
13181   assert(VT.getVectorElementType() == MVT::i16 && "Bad input type!");
13182   MVT PSHUFDVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() / 2);
13183 
13184   assert(Mask.size() == 8 && "Shuffle mask length doesn't match!");
13185   MutableArrayRef<int> LoMask = Mask.slice(0, 4);
13186   MutableArrayRef<int> HiMask = Mask.slice(4, 4);
13187 
13188   // Attempt to directly match PSHUFLW or PSHUFHW.
13189   if (isUndefOrInRange(LoMask, 0, 4) &&
13190       isSequentialOrUndefInRange(HiMask, 0, 4, 4)) {
13191     return DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
13192                        getV4X86ShuffleImm8ForMask(LoMask, DL, DAG));
13193   }
13194   if (isUndefOrInRange(HiMask, 4, 8) &&
13195       isSequentialOrUndefInRange(LoMask, 0, 4, 0)) {
13196     for (int i = 0; i != 4; ++i)
13197       HiMask[i] = (HiMask[i] < 0 ? HiMask[i] : (HiMask[i] - 4));
13198     return DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
13199                        getV4X86ShuffleImm8ForMask(HiMask, DL, DAG));
13200   }
13201 
13202   SmallVector<int, 4> LoInputs;
13203   copy_if(LoMask, std::back_inserter(LoInputs), [](int M) { return M >= 0; });
13204   array_pod_sort(LoInputs.begin(), LoInputs.end());
13205   LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()), LoInputs.end());
13206   SmallVector<int, 4> HiInputs;
13207   copy_if(HiMask, std::back_inserter(HiInputs), [](int M) { return M >= 0; });
13208   array_pod_sort(HiInputs.begin(), HiInputs.end());
13209   HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()), HiInputs.end());
13210   int NumLToL = llvm::lower_bound(LoInputs, 4) - LoInputs.begin();
13211   int NumHToL = LoInputs.size() - NumLToL;
13212   int NumLToH = llvm::lower_bound(HiInputs, 4) - HiInputs.begin();
13213   int NumHToH = HiInputs.size() - NumLToH;
13214   MutableArrayRef<int> LToLInputs(LoInputs.data(), NumLToL);
13215   MutableArrayRef<int> LToHInputs(HiInputs.data(), NumLToH);
13216   MutableArrayRef<int> HToLInputs(LoInputs.data() + NumLToL, NumHToL);
13217   MutableArrayRef<int> HToHInputs(HiInputs.data() + NumLToH, NumHToH);
13218 
13219   // If we are shuffling values from one half - check how many different DWORD
13220   // pairs we need to create. If only 1 or 2 then we can perform this as a
13221   // PSHUFLW/PSHUFHW + PSHUFD instead of the PSHUFD+PSHUFLW+PSHUFHW chain below.
13222   auto ShuffleDWordPairs = [&](ArrayRef<int> PSHUFHalfMask,
13223                                ArrayRef<int> PSHUFDMask, unsigned ShufWOp) {
13224     V = DAG.getNode(ShufWOp, DL, VT, V,
13225                     getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DL, DAG));
13226     V = DAG.getBitcast(PSHUFDVT, V);
13227     V = DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, V,
13228                     getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG));
13229     return DAG.getBitcast(VT, V);
13230   };
13231 
13232   if ((NumHToL + NumHToH) == 0 || (NumLToL + NumLToH) == 0) {
13233     int PSHUFDMask[4] = { -1, -1, -1, -1 };
13234     SmallVector<std::pair<int, int>, 4> DWordPairs;
13235     int DOffset = ((NumHToL + NumHToH) == 0 ? 0 : 2);
13236 
13237     // Collect the different DWORD pairs.
13238     for (int DWord = 0; DWord != 4; ++DWord) {
13239       int M0 = Mask[2 * DWord + 0];
13240       int M1 = Mask[2 * DWord + 1];
13241       M0 = (M0 >= 0 ? M0 % 4 : M0);
13242       M1 = (M1 >= 0 ? M1 % 4 : M1);
13243       if (M0 < 0 && M1 < 0)
13244         continue;
13245 
13246       bool Match = false;
13247       for (int j = 0, e = DWordPairs.size(); j < e; ++j) {
13248         auto &DWordPair = DWordPairs[j];
13249         if ((M0 < 0 || isUndefOrEqual(DWordPair.first, M0)) &&
13250             (M1 < 0 || isUndefOrEqual(DWordPair.second, M1))) {
13251           DWordPair.first = (M0 >= 0 ? M0 : DWordPair.first);
13252           DWordPair.second = (M1 >= 0 ? M1 : DWordPair.second);
13253           PSHUFDMask[DWord] = DOffset + j;
13254           Match = true;
13255           break;
13256         }
13257       }
13258       if (!Match) {
13259         PSHUFDMask[DWord] = DOffset + DWordPairs.size();
13260         DWordPairs.push_back(std::make_pair(M0, M1));
13261       }
13262     }
13263 
13264     if (DWordPairs.size() <= 2) {
13265       DWordPairs.resize(2, std::make_pair(-1, -1));
13266       int PSHUFHalfMask[4] = {DWordPairs[0].first, DWordPairs[0].second,
13267                               DWordPairs[1].first, DWordPairs[1].second};
13268       if ((NumHToL + NumHToH) == 0)
13269         return ShuffleDWordPairs(PSHUFHalfMask, PSHUFDMask, X86ISD::PSHUFLW);
13270       if ((NumLToL + NumLToH) == 0)
13271         return ShuffleDWordPairs(PSHUFHalfMask, PSHUFDMask, X86ISD::PSHUFHW);
13272     }
13273   }
13274 
13275   // Simplify the 1-into-3 and 3-into-1 cases with a single pshufd. For all
13276   // such inputs we can swap two of the dwords across the half mark and end up
13277   // with <=2 inputs to each half in each half. Once there, we can fall through
13278   // to the generic code below. For example:
13279   //
13280   // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
13281   // Mask:  [0, 1, 2, 7, 4, 5, 6, 3] -----------------> [0, 1, 4, 7, 2, 3, 6, 5]
13282   //
13283   // However in some very rare cases we have a 1-into-3 or 3-into-1 on one half
13284   // and an existing 2-into-2 on the other half. In this case we may have to
13285   // pre-shuffle the 2-into-2 half to avoid turning it into a 3-into-1 or
13286   // 1-into-3 which could cause us to cycle endlessly fixing each side in turn.
13287   // Fortunately, we don't have to handle anything but a 2-into-2 pattern
13288   // because any other situation (including a 3-into-1 or 1-into-3 in the other
13289   // half than the one we target for fixing) will be fixed when we re-enter this
13290   // path. We will also combine away any sequence of PSHUFD instructions that
13291   // result into a single instruction. Here is an example of the tricky case:
13292   //
13293   // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
13294   // Mask:  [3, 7, 1, 0, 2, 7, 3, 5] -THIS-IS-BAD!!!!-> [5, 7, 1, 0, 4, 7, 5, 3]
13295   //
13296   // This now has a 1-into-3 in the high half! Instead, we do two shuffles:
13297   //
13298   // Input: [a, b, c, d, e, f, g, h] PSHUFHW[0,2,1,3]-> [a, b, c, d, e, g, f, h]
13299   // Mask:  [3, 7, 1, 0, 2, 7, 3, 5] -----------------> [3, 7, 1, 0, 2, 7, 3, 6]
13300   //
13301   // Input: [a, b, c, d, e, g, f, h] -PSHUFD[0,2,1,3]-> [a, b, e, g, c, d, f, h]
13302   // Mask:  [3, 7, 1, 0, 2, 7, 3, 6] -----------------> [5, 7, 1, 0, 4, 7, 5, 6]
13303   //
13304   // The result is fine to be handled by the generic logic.
13305   auto balanceSides = [&](ArrayRef<int> AToAInputs, ArrayRef<int> BToAInputs,
13306                           ArrayRef<int> BToBInputs, ArrayRef<int> AToBInputs,
13307                           int AOffset, int BOffset) {
13308     assert((AToAInputs.size() == 3 || AToAInputs.size() == 1) &&
13309            "Must call this with A having 3 or 1 inputs from the A half.");
13310     assert((BToAInputs.size() == 1 || BToAInputs.size() == 3) &&
13311            "Must call this with B having 1 or 3 inputs from the B half.");
13312     assert(AToAInputs.size() + BToAInputs.size() == 4 &&
13313            "Must call this with either 3:1 or 1:3 inputs (summing to 4).");
13314 
13315     bool ThreeAInputs = AToAInputs.size() == 3;
13316 
13317     // Compute the index of dword with only one word among the three inputs in
13318     // a half by taking the sum of the half with three inputs and subtracting
13319     // the sum of the actual three inputs. The difference is the remaining
13320     // slot.
13321     int ADWord = 0, BDWord = 0;
13322     int &TripleDWord = ThreeAInputs ? ADWord : BDWord;
13323     int &OneInputDWord = ThreeAInputs ? BDWord : ADWord;
13324     int TripleInputOffset = ThreeAInputs ? AOffset : BOffset;
13325     ArrayRef<int> TripleInputs = ThreeAInputs ? AToAInputs : BToAInputs;
13326     int OneInput = ThreeAInputs ? BToAInputs[0] : AToAInputs[0];
13327     int TripleInputSum = 0 + 1 + 2 + 3 + (4 * TripleInputOffset);
13328     int TripleNonInputIdx =
13329         TripleInputSum - std::accumulate(TripleInputs.begin(), TripleInputs.end(), 0);
13330     TripleDWord = TripleNonInputIdx / 2;
13331 
13332     // We use xor with one to compute the adjacent DWord to whichever one the
13333     // OneInput is in.
13334     OneInputDWord = (OneInput / 2) ^ 1;
13335 
13336     // Check for one tricky case: We're fixing a 3<-1 or a 1<-3 shuffle for AToA
13337     // and BToA inputs. If there is also such a problem with the BToB and AToB
13338     // inputs, we don't try to fix it necessarily -- we'll recurse and see it in
13339     // the next pass. However, if we have a 2<-2 in the BToB and AToB inputs, it
13340     // is essential that we don't *create* a 3<-1 as then we might oscillate.
13341     if (BToBInputs.size() == 2 && AToBInputs.size() == 2) {
13342       // Compute how many inputs will be flipped by swapping these DWords. We
13343       // need
13344       // to balance this to ensure we don't form a 3-1 shuffle in the other
13345       // half.
13346       int NumFlippedAToBInputs = llvm::count(AToBInputs, 2 * ADWord) +
13347                                  llvm::count(AToBInputs, 2 * ADWord + 1);
13348       int NumFlippedBToBInputs = llvm::count(BToBInputs, 2 * BDWord) +
13349                                  llvm::count(BToBInputs, 2 * BDWord + 1);
13350       if ((NumFlippedAToBInputs == 1 &&
13351            (NumFlippedBToBInputs == 0 || NumFlippedBToBInputs == 2)) ||
13352           (NumFlippedBToBInputs == 1 &&
13353            (NumFlippedAToBInputs == 0 || NumFlippedAToBInputs == 2))) {
13354         // We choose whether to fix the A half or B half based on whether that
13355         // half has zero flipped inputs. At zero, we may not be able to fix it
13356         // with that half. We also bias towards fixing the B half because that
13357         // will more commonly be the high half, and we have to bias one way.
13358         auto FixFlippedInputs = [&V, &DL, &Mask, &DAG](int PinnedIdx, int DWord,
13359                                                        ArrayRef<int> Inputs) {
13360           int FixIdx = PinnedIdx ^ 1; // The adjacent slot to the pinned slot.
13361           bool IsFixIdxInput = is_contained(Inputs, PinnedIdx ^ 1);
13362           // Determine whether the free index is in the flipped dword or the
13363           // unflipped dword based on where the pinned index is. We use this bit
13364           // in an xor to conditionally select the adjacent dword.
13365           int FixFreeIdx = 2 * (DWord ^ (PinnedIdx / 2 == DWord));
13366           bool IsFixFreeIdxInput = is_contained(Inputs, FixFreeIdx);
13367           if (IsFixIdxInput == IsFixFreeIdxInput)
13368             FixFreeIdx += 1;
13369           IsFixFreeIdxInput = is_contained(Inputs, FixFreeIdx);
13370           assert(IsFixIdxInput != IsFixFreeIdxInput &&
13371                  "We need to be changing the number of flipped inputs!");
13372           int PSHUFHalfMask[] = {0, 1, 2, 3};
13373           std::swap(PSHUFHalfMask[FixFreeIdx % 4], PSHUFHalfMask[FixIdx % 4]);
13374           V = DAG.getNode(
13375               FixIdx < 4 ? X86ISD::PSHUFLW : X86ISD::PSHUFHW, DL,
13376               MVT::getVectorVT(MVT::i16, V.getValueSizeInBits() / 16), V,
13377               getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DL, DAG));
13378 
13379           for (int &M : Mask)
13380             if (M >= 0 && M == FixIdx)
13381               M = FixFreeIdx;
13382             else if (M >= 0 && M == FixFreeIdx)
13383               M = FixIdx;
13384         };
13385         if (NumFlippedBToBInputs != 0) {
13386           int BPinnedIdx =
13387               BToAInputs.size() == 3 ? TripleNonInputIdx : OneInput;
13388           FixFlippedInputs(BPinnedIdx, BDWord, BToBInputs);
13389         } else {
13390           assert(NumFlippedAToBInputs != 0 && "Impossible given predicates!");
13391           int APinnedIdx = ThreeAInputs ? TripleNonInputIdx : OneInput;
13392           FixFlippedInputs(APinnedIdx, ADWord, AToBInputs);
13393         }
13394       }
13395     }
13396 
13397     int PSHUFDMask[] = {0, 1, 2, 3};
13398     PSHUFDMask[ADWord] = BDWord;
13399     PSHUFDMask[BDWord] = ADWord;
13400     V = DAG.getBitcast(
13401         VT,
13402         DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, DAG.getBitcast(PSHUFDVT, V),
13403                     getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
13404 
13405     // Adjust the mask to match the new locations of A and B.
13406     for (int &M : Mask)
13407       if (M >= 0 && M/2 == ADWord)
13408         M = 2 * BDWord + M % 2;
13409       else if (M >= 0 && M/2 == BDWord)
13410         M = 2 * ADWord + M % 2;
13411 
13412     // Recurse back into this routine to re-compute state now that this isn't
13413     // a 3 and 1 problem.
13414     return lowerV8I16GeneralSingleInputShuffle(DL, VT, V, Mask, Subtarget, DAG);
13415   };
13416   if ((NumLToL == 3 && NumHToL == 1) || (NumLToL == 1 && NumHToL == 3))
13417     return balanceSides(LToLInputs, HToLInputs, HToHInputs, LToHInputs, 0, 4);
13418   if ((NumHToH == 3 && NumLToH == 1) || (NumHToH == 1 && NumLToH == 3))
13419     return balanceSides(HToHInputs, LToHInputs, LToLInputs, HToLInputs, 4, 0);
13420 
13421   // At this point there are at most two inputs to the low and high halves from
13422   // each half. That means the inputs can always be grouped into dwords and
13423   // those dwords can then be moved to the correct half with a dword shuffle.
13424   // We use at most one low and one high word shuffle to collect these paired
13425   // inputs into dwords, and finally a dword shuffle to place them.
13426   int PSHUFLMask[4] = {-1, -1, -1, -1};
13427   int PSHUFHMask[4] = {-1, -1, -1, -1};
13428   int PSHUFDMask[4] = {-1, -1, -1, -1};
13429 
13430   // First fix the masks for all the inputs that are staying in their
13431   // original halves. This will then dictate the targets of the cross-half
13432   // shuffles.
13433   auto fixInPlaceInputs =
13434       [&PSHUFDMask](ArrayRef<int> InPlaceInputs, ArrayRef<int> IncomingInputs,
13435                     MutableArrayRef<int> SourceHalfMask,
13436                     MutableArrayRef<int> HalfMask, int HalfOffset) {
13437     if (InPlaceInputs.empty())
13438       return;
13439     if (InPlaceInputs.size() == 1) {
13440       SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
13441           InPlaceInputs[0] - HalfOffset;
13442       PSHUFDMask[InPlaceInputs[0] / 2] = InPlaceInputs[0] / 2;
13443       return;
13444     }
13445     if (IncomingInputs.empty()) {
13446       // Just fix all of the in place inputs.
13447       for (int Input : InPlaceInputs) {
13448         SourceHalfMask[Input - HalfOffset] = Input - HalfOffset;
13449         PSHUFDMask[Input / 2] = Input / 2;
13450       }
13451       return;
13452     }
13453 
13454     assert(InPlaceInputs.size() == 2 && "Cannot handle 3 or 4 inputs!");
13455     SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
13456         InPlaceInputs[0] - HalfOffset;
13457     // Put the second input next to the first so that they are packed into
13458     // a dword. We find the adjacent index by toggling the low bit.
13459     int AdjIndex = InPlaceInputs[0] ^ 1;
13460     SourceHalfMask[AdjIndex - HalfOffset] = InPlaceInputs[1] - HalfOffset;
13461     std::replace(HalfMask.begin(), HalfMask.end(), InPlaceInputs[1], AdjIndex);
13462     PSHUFDMask[AdjIndex / 2] = AdjIndex / 2;
13463   };
13464   fixInPlaceInputs(LToLInputs, HToLInputs, PSHUFLMask, LoMask, 0);
13465   fixInPlaceInputs(HToHInputs, LToHInputs, PSHUFHMask, HiMask, 4);
13466 
13467   // Now gather the cross-half inputs and place them into a free dword of
13468   // their target half.
13469   // FIXME: This operation could almost certainly be simplified dramatically to
13470   // look more like the 3-1 fixing operation.
13471   auto moveInputsToRightHalf = [&PSHUFDMask](
13472       MutableArrayRef<int> IncomingInputs, ArrayRef<int> ExistingInputs,
13473       MutableArrayRef<int> SourceHalfMask, MutableArrayRef<int> HalfMask,
13474       MutableArrayRef<int> FinalSourceHalfMask, int SourceOffset,
13475       int DestOffset) {
13476     auto isWordClobbered = [](ArrayRef<int> SourceHalfMask, int Word) {
13477       return SourceHalfMask[Word] >= 0 && SourceHalfMask[Word] != Word;
13478     };
13479     auto isDWordClobbered = [&isWordClobbered](ArrayRef<int> SourceHalfMask,
13480                                                int Word) {
13481       int LowWord = Word & ~1;
13482       int HighWord = Word | 1;
13483       return isWordClobbered(SourceHalfMask, LowWord) ||
13484              isWordClobbered(SourceHalfMask, HighWord);
13485     };
13486 
13487     if (IncomingInputs.empty())
13488       return;
13489 
13490     if (ExistingInputs.empty()) {
13491       // Map any dwords with inputs from them into the right half.
13492       for (int Input : IncomingInputs) {
13493         // If the source half mask maps over the inputs, turn those into
13494         // swaps and use the swapped lane.
13495         if (isWordClobbered(SourceHalfMask, Input - SourceOffset)) {
13496           if (SourceHalfMask[SourceHalfMask[Input - SourceOffset]] < 0) {
13497             SourceHalfMask[SourceHalfMask[Input - SourceOffset]] =
13498                 Input - SourceOffset;
13499             // We have to swap the uses in our half mask in one sweep.
13500             for (int &M : HalfMask)
13501               if (M == SourceHalfMask[Input - SourceOffset] + SourceOffset)
13502                 M = Input;
13503               else if (M == Input)
13504                 M = SourceHalfMask[Input - SourceOffset] + SourceOffset;
13505           } else {
13506             assert(SourceHalfMask[SourceHalfMask[Input - SourceOffset]] ==
13507                        Input - SourceOffset &&
13508                    "Previous placement doesn't match!");
13509           }
13510           // Note that this correctly re-maps both when we do a swap and when
13511           // we observe the other side of the swap above. We rely on that to
13512           // avoid swapping the members of the input list directly.
13513           Input = SourceHalfMask[Input - SourceOffset] + SourceOffset;
13514         }
13515 
13516         // Map the input's dword into the correct half.
13517         if (PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] < 0)
13518           PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] = Input / 2;
13519         else
13520           assert(PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] ==
13521                      Input / 2 &&
13522                  "Previous placement doesn't match!");
13523       }
13524 
13525       // And just directly shift any other-half mask elements to be same-half
13526       // as we will have mirrored the dword containing the element into the
13527       // same position within that half.
13528       for (int &M : HalfMask)
13529         if (M >= SourceOffset && M < SourceOffset + 4) {
13530           M = M - SourceOffset + DestOffset;
13531           assert(M >= 0 && "This should never wrap below zero!");
13532         }
13533       return;
13534     }
13535 
13536     // Ensure we have the input in a viable dword of its current half. This
13537     // is particularly tricky because the original position may be clobbered
13538     // by inputs being moved and *staying* in that half.
13539     if (IncomingInputs.size() == 1) {
13540       if (isWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
13541         int InputFixed = find(SourceHalfMask, -1) - std::begin(SourceHalfMask) +
13542                          SourceOffset;
13543         SourceHalfMask[InputFixed - SourceOffset] =
13544             IncomingInputs[0] - SourceOffset;
13545         std::replace(HalfMask.begin(), HalfMask.end(), IncomingInputs[0],
13546                      InputFixed);
13547         IncomingInputs[0] = InputFixed;
13548       }
13549     } else if (IncomingInputs.size() == 2) {
13550       if (IncomingInputs[0] / 2 != IncomingInputs[1] / 2 ||
13551           isDWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
13552         // We have two non-adjacent or clobbered inputs we need to extract from
13553         // the source half. To do this, we need to map them into some adjacent
13554         // dword slot in the source mask.
13555         int InputsFixed[2] = {IncomingInputs[0] - SourceOffset,
13556                               IncomingInputs[1] - SourceOffset};
13557 
13558         // If there is a free slot in the source half mask adjacent to one of
13559         // the inputs, place the other input in it. We use (Index XOR 1) to
13560         // compute an adjacent index.
13561         if (!isWordClobbered(SourceHalfMask, InputsFixed[0]) &&
13562             SourceHalfMask[InputsFixed[0] ^ 1] < 0) {
13563           SourceHalfMask[InputsFixed[0]] = InputsFixed[0];
13564           SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
13565           InputsFixed[1] = InputsFixed[0] ^ 1;
13566         } else if (!isWordClobbered(SourceHalfMask, InputsFixed[1]) &&
13567                    SourceHalfMask[InputsFixed[1] ^ 1] < 0) {
13568           SourceHalfMask[InputsFixed[1]] = InputsFixed[1];
13569           SourceHalfMask[InputsFixed[1] ^ 1] = InputsFixed[0];
13570           InputsFixed[0] = InputsFixed[1] ^ 1;
13571         } else if (SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] < 0 &&
13572                    SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] < 0) {
13573           // The two inputs are in the same DWord but it is clobbered and the
13574           // adjacent DWord isn't used at all. Move both inputs to the free
13575           // slot.
13576           SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] = InputsFixed[0];
13577           SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] = InputsFixed[1];
13578           InputsFixed[0] = 2 * ((InputsFixed[0] / 2) ^ 1);
13579           InputsFixed[1] = 2 * ((InputsFixed[0] / 2) ^ 1) + 1;
13580         } else {
13581           // The only way we hit this point is if there is no clobbering
13582           // (because there are no off-half inputs to this half) and there is no
13583           // free slot adjacent to one of the inputs. In this case, we have to
13584           // swap an input with a non-input.
13585           for (int i = 0; i < 4; ++i)
13586             assert((SourceHalfMask[i] < 0 || SourceHalfMask[i] == i) &&
13587                    "We can't handle any clobbers here!");
13588           assert(InputsFixed[1] != (InputsFixed[0] ^ 1) &&
13589                  "Cannot have adjacent inputs here!");
13590 
13591           SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
13592           SourceHalfMask[InputsFixed[1]] = InputsFixed[0] ^ 1;
13593 
13594           // We also have to update the final source mask in this case because
13595           // it may need to undo the above swap.
13596           for (int &M : FinalSourceHalfMask)
13597             if (M == (InputsFixed[0] ^ 1) + SourceOffset)
13598               M = InputsFixed[1] + SourceOffset;
13599             else if (M == InputsFixed[1] + SourceOffset)
13600               M = (InputsFixed[0] ^ 1) + SourceOffset;
13601 
13602           InputsFixed[1] = InputsFixed[0] ^ 1;
13603         }
13604 
13605         // Point everything at the fixed inputs.
13606         for (int &M : HalfMask)
13607           if (M == IncomingInputs[0])
13608             M = InputsFixed[0] + SourceOffset;
13609           else if (M == IncomingInputs[1])
13610             M = InputsFixed[1] + SourceOffset;
13611 
13612         IncomingInputs[0] = InputsFixed[0] + SourceOffset;
13613         IncomingInputs[1] = InputsFixed[1] + SourceOffset;
13614       }
13615     } else {
13616       llvm_unreachable("Unhandled input size!");
13617     }
13618 
13619     // Now hoist the DWord down to the right half.
13620     int FreeDWord = (PSHUFDMask[DestOffset / 2] < 0 ? 0 : 1) + DestOffset / 2;
13621     assert(PSHUFDMask[FreeDWord] < 0 && "DWord not free");
13622     PSHUFDMask[FreeDWord] = IncomingInputs[0] / 2;
13623     for (int &M : HalfMask)
13624       for (int Input : IncomingInputs)
13625         if (M == Input)
13626           M = FreeDWord * 2 + Input % 2;
13627   };
13628   moveInputsToRightHalf(HToLInputs, LToLInputs, PSHUFHMask, LoMask, HiMask,
13629                         /*SourceOffset*/ 4, /*DestOffset*/ 0);
13630   moveInputsToRightHalf(LToHInputs, HToHInputs, PSHUFLMask, HiMask, LoMask,
13631                         /*SourceOffset*/ 0, /*DestOffset*/ 4);
13632 
13633   // Now enact all the shuffles we've computed to move the inputs into their
13634   // target half.
13635   if (!isNoopShuffleMask(PSHUFLMask))
13636     V = DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
13637                     getV4X86ShuffleImm8ForMask(PSHUFLMask, DL, DAG));
13638   if (!isNoopShuffleMask(PSHUFHMask))
13639     V = DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
13640                     getV4X86ShuffleImm8ForMask(PSHUFHMask, DL, DAG));
13641   if (!isNoopShuffleMask(PSHUFDMask))
13642     V = DAG.getBitcast(
13643         VT,
13644         DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, DAG.getBitcast(PSHUFDVT, V),
13645                     getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
13646 
13647   // At this point, each half should contain all its inputs, and we can then
13648   // just shuffle them into their final position.
13649   assert(count_if(LoMask, [](int M) { return M >= 4; }) == 0 &&
13650          "Failed to lift all the high half inputs to the low mask!");
13651   assert(count_if(HiMask, [](int M) { return M >= 0 && M < 4; }) == 0 &&
13652          "Failed to lift all the low half inputs to the high mask!");
13653 
13654   // Do a half shuffle for the low mask.
13655   if (!isNoopShuffleMask(LoMask))
13656     V = DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
13657                     getV4X86ShuffleImm8ForMask(LoMask, DL, DAG));
13658 
13659   // Do a half shuffle with the high mask after shifting its values down.
13660   for (int &M : HiMask)
13661     if (M >= 0)
13662       M -= 4;
13663   if (!isNoopShuffleMask(HiMask))
13664     V = DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
13665                     getV4X86ShuffleImm8ForMask(HiMask, DL, DAG));
13666 
13667   return V;
13668 }
13669 
13670 /// Helper to form a PSHUFB-based shuffle+blend, opportunistically avoiding the
13671 /// blend if only one input is used.
lowerShuffleAsBlendOfPSHUFBs(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const APInt & Zeroable,SelectionDAG & DAG,bool & V1InUse,bool & V2InUse)13672 static SDValue lowerShuffleAsBlendOfPSHUFBs(
13673     const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
13674     const APInt &Zeroable, SelectionDAG &DAG, bool &V1InUse, bool &V2InUse) {
13675   assert(!is128BitLaneCrossingShuffleMask(VT, Mask) &&
13676          "Lane crossing shuffle masks not supported");
13677 
13678   int NumBytes = VT.getSizeInBits() / 8;
13679   int Size = Mask.size();
13680   int Scale = NumBytes / Size;
13681 
13682   SmallVector<SDValue, 64> V1Mask(NumBytes, DAG.getUNDEF(MVT::i8));
13683   SmallVector<SDValue, 64> V2Mask(NumBytes, DAG.getUNDEF(MVT::i8));
13684   V1InUse = false;
13685   V2InUse = false;
13686 
13687   for (int i = 0; i < NumBytes; ++i) {
13688     int M = Mask[i / Scale];
13689     if (M < 0)
13690       continue;
13691 
13692     const int ZeroMask = 0x80;
13693     int V1Idx = M < Size ? M * Scale + i % Scale : ZeroMask;
13694     int V2Idx = M < Size ? ZeroMask : (M - Size) * Scale + i % Scale;
13695     if (Zeroable[i / Scale])
13696       V1Idx = V2Idx = ZeroMask;
13697 
13698     V1Mask[i] = DAG.getConstant(V1Idx, DL, MVT::i8);
13699     V2Mask[i] = DAG.getConstant(V2Idx, DL, MVT::i8);
13700     V1InUse |= (ZeroMask != V1Idx);
13701     V2InUse |= (ZeroMask != V2Idx);
13702   }
13703 
13704   MVT ShufVT = MVT::getVectorVT(MVT::i8, NumBytes);
13705   if (V1InUse)
13706     V1 = DAG.getNode(X86ISD::PSHUFB, DL, ShufVT, DAG.getBitcast(ShufVT, V1),
13707                      DAG.getBuildVector(ShufVT, DL, V1Mask));
13708   if (V2InUse)
13709     V2 = DAG.getNode(X86ISD::PSHUFB, DL, ShufVT, DAG.getBitcast(ShufVT, V2),
13710                      DAG.getBuildVector(ShufVT, DL, V2Mask));
13711 
13712   // If we need shuffled inputs from both, blend the two.
13713   SDValue V;
13714   if (V1InUse && V2InUse)
13715     V = DAG.getNode(ISD::OR, DL, ShufVT, V1, V2);
13716   else
13717     V = V1InUse ? V1 : V2;
13718 
13719   // Cast the result back to the correct type.
13720   return DAG.getBitcast(VT, V);
13721 }
13722 
13723 /// Generic lowering of 8-lane i16 shuffles.
13724 ///
13725 /// This handles both single-input shuffles and combined shuffle/blends with
13726 /// two inputs. The single input shuffles are immediately delegated to
13727 /// a dedicated lowering routine.
13728 ///
13729 /// The blends are lowered in one of three fundamental ways. If there are few
13730 /// enough inputs, it delegates to a basic UNPCK-based strategy. If the shuffle
13731 /// of the input is significantly cheaper when lowered as an interleaving of
13732 /// the two inputs, try to interleave them. Otherwise, blend the low and high
13733 /// halves of the inputs separately (making them have relatively few inputs)
13734 /// and then concatenate them.
lowerV8I16Shuffle(const SDLoc & DL,ArrayRef<int> Mask,const APInt & Zeroable,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)13735 static SDValue lowerV8I16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
13736                                  const APInt &Zeroable, SDValue V1, SDValue V2,
13737                                  const X86Subtarget &Subtarget,
13738                                  SelectionDAG &DAG) {
13739   assert(V1.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
13740   assert(V2.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
13741   assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
13742 
13743   // Whenever we can lower this as a zext, that instruction is strictly faster
13744   // than any alternative.
13745   if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v8i16, V1, V2, Mask,
13746                                                    Zeroable, Subtarget, DAG))
13747     return ZExt;
13748 
13749   // Try to use lower using a truncation.
13750   if (SDValue V = lowerShuffleWithVPMOV(DL, MVT::v8i16, V1, V2, Mask, Zeroable,
13751                                         Subtarget, DAG))
13752     return V;
13753 
13754   int NumV2Inputs = count_if(Mask, [](int M) { return M >= 8; });
13755 
13756   if (NumV2Inputs == 0) {
13757     // Try to use shift instructions.
13758     if (SDValue Shift =
13759             lowerShuffleAsShift(DL, MVT::v8i16, V1, V1, Mask, Zeroable,
13760                                 Subtarget, DAG, /*BitwiseOnly*/ false))
13761       return Shift;
13762 
13763     // Check for being able to broadcast a single element.
13764     if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v8i16, V1, V2,
13765                                                     Mask, Subtarget, DAG))
13766       return Broadcast;
13767 
13768     // Try to use bit rotation instructions.
13769     if (SDValue Rotate = lowerShuffleAsBitRotate(DL, MVT::v8i16, V1, Mask,
13770                                                  Subtarget, DAG))
13771       return Rotate;
13772 
13773     // Use dedicated unpack instructions for masks that match their pattern.
13774     if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8i16, Mask, V1, V2, DAG))
13775       return V;
13776 
13777     // Use dedicated pack instructions for masks that match their pattern.
13778     if (SDValue V = lowerShuffleWithPACK(DL, MVT::v8i16, Mask, V1, V2, DAG,
13779                                          Subtarget))
13780       return V;
13781 
13782     // Try to use byte rotation instructions.
13783     if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i16, V1, V1, Mask,
13784                                                   Subtarget, DAG))
13785       return Rotate;
13786 
13787     // Make a copy of the mask so it can be modified.
13788     SmallVector<int, 8> MutableMask(Mask);
13789     return lowerV8I16GeneralSingleInputShuffle(DL, MVT::v8i16, V1, MutableMask,
13790                                                Subtarget, DAG);
13791   }
13792 
13793   assert(llvm::any_of(Mask, [](int M) { return M >= 0 && M < 8; }) &&
13794          "All single-input shuffles should be canonicalized to be V1-input "
13795          "shuffles.");
13796 
13797   // Try to use shift instructions.
13798   if (SDValue Shift =
13799           lowerShuffleAsShift(DL, MVT::v8i16, V1, V2, Mask, Zeroable, Subtarget,
13800                               DAG, /*BitwiseOnly*/ false))
13801     return Shift;
13802 
13803   // See if we can use SSE4A Extraction / Insertion.
13804   if (Subtarget.hasSSE4A())
13805     if (SDValue V = lowerShuffleWithSSE4A(DL, MVT::v8i16, V1, V2, Mask,
13806                                           Zeroable, DAG))
13807       return V;
13808 
13809   // There are special ways we can lower some single-element blends.
13810   if (NumV2Inputs == 1)
13811     if (SDValue V = lowerShuffleAsElementInsertion(
13812             DL, MVT::v8i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
13813       return V;
13814 
13815   // We have different paths for blend lowering, but they all must use the
13816   // *exact* same predicate.
13817   bool IsBlendSupported = Subtarget.hasSSE41();
13818   if (IsBlendSupported)
13819     if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8i16, V1, V2, Mask,
13820                                             Zeroable, Subtarget, DAG))
13821       return Blend;
13822 
13823   if (SDValue Masked = lowerShuffleAsBitMask(DL, MVT::v8i16, V1, V2, Mask,
13824                                              Zeroable, Subtarget, DAG))
13825     return Masked;
13826 
13827   // Use dedicated unpack instructions for masks that match their pattern.
13828   if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8i16, Mask, V1, V2, DAG))
13829     return V;
13830 
13831   // Use dedicated pack instructions for masks that match their pattern.
13832   if (SDValue V = lowerShuffleWithPACK(DL, MVT::v8i16, Mask, V1, V2, DAG,
13833                                        Subtarget))
13834     return V;
13835 
13836   // Try to use lower using a truncation.
13837   if (SDValue V = lowerShuffleAsVTRUNC(DL, MVT::v8i16, V1, V2, Mask, Zeroable,
13838                                        Subtarget, DAG))
13839     return V;
13840 
13841   // Try to use byte rotation instructions.
13842   if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i16, V1, V2, Mask,
13843                                                 Subtarget, DAG))
13844     return Rotate;
13845 
13846   if (SDValue BitBlend =
13847           lowerShuffleAsBitBlend(DL, MVT::v8i16, V1, V2, Mask, DAG))
13848     return BitBlend;
13849 
13850   // Try to use byte shift instructions to mask.
13851   if (SDValue V = lowerShuffleAsByteShiftMask(DL, MVT::v8i16, V1, V2, Mask,
13852                                               Zeroable, Subtarget, DAG))
13853     return V;
13854 
13855   // Attempt to lower using compaction, SSE41 is necessary for PACKUSDW.
13856   int NumEvenDrops = canLowerByDroppingElements(Mask, true, false);
13857   if ((NumEvenDrops == 1 || (NumEvenDrops == 2 && Subtarget.hasSSE41())) &&
13858       !Subtarget.hasVLX()) {
13859     // Check if this is part of a 256-bit vector truncation.
13860     unsigned PackOpc = 0;
13861     if (NumEvenDrops == 2 && Subtarget.hasAVX2() &&
13862         peekThroughBitcasts(V1).getOpcode() == ISD::EXTRACT_SUBVECTOR &&
13863         peekThroughBitcasts(V2).getOpcode() == ISD::EXTRACT_SUBVECTOR) {
13864       SDValue V1V2 = concatSubVectors(V1, V2, DAG, DL);
13865       V1V2 = DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1V2,
13866                          getZeroVector(MVT::v16i16, Subtarget, DAG, DL),
13867                          DAG.getTargetConstant(0xEE, DL, MVT::i8));
13868       V1V2 = DAG.getBitcast(MVT::v8i32, V1V2);
13869       V1 = extract128BitVector(V1V2, 0, DAG, DL);
13870       V2 = extract128BitVector(V1V2, 4, DAG, DL);
13871       PackOpc = X86ISD::PACKUS;
13872     } else if (Subtarget.hasSSE41()) {
13873       SmallVector<SDValue, 4> DWordClearOps(4,
13874                                             DAG.getConstant(0, DL, MVT::i32));
13875       for (unsigned i = 0; i != 4; i += 1 << (NumEvenDrops - 1))
13876         DWordClearOps[i] = DAG.getConstant(0xFFFF, DL, MVT::i32);
13877       SDValue DWordClearMask =
13878           DAG.getBuildVector(MVT::v4i32, DL, DWordClearOps);
13879       V1 = DAG.getNode(ISD::AND, DL, MVT::v4i32, DAG.getBitcast(MVT::v4i32, V1),
13880                        DWordClearMask);
13881       V2 = DAG.getNode(ISD::AND, DL, MVT::v4i32, DAG.getBitcast(MVT::v4i32, V2),
13882                        DWordClearMask);
13883       PackOpc = X86ISD::PACKUS;
13884     } else if (!Subtarget.hasSSSE3()) {
13885       SDValue ShAmt = DAG.getTargetConstant(16, DL, MVT::i8);
13886       V1 = DAG.getBitcast(MVT::v4i32, V1);
13887       V2 = DAG.getBitcast(MVT::v4i32, V2);
13888       V1 = DAG.getNode(X86ISD::VSHLI, DL, MVT::v4i32, V1, ShAmt);
13889       V2 = DAG.getNode(X86ISD::VSHLI, DL, MVT::v4i32, V2, ShAmt);
13890       V1 = DAG.getNode(X86ISD::VSRAI, DL, MVT::v4i32, V1, ShAmt);
13891       V2 = DAG.getNode(X86ISD::VSRAI, DL, MVT::v4i32, V2, ShAmt);
13892       PackOpc = X86ISD::PACKSS;
13893     }
13894     if (PackOpc) {
13895       // Now pack things back together.
13896       SDValue Result = DAG.getNode(PackOpc, DL, MVT::v8i16, V1, V2);
13897       if (NumEvenDrops == 2) {
13898         Result = DAG.getBitcast(MVT::v4i32, Result);
13899         Result = DAG.getNode(PackOpc, DL, MVT::v8i16, Result, Result);
13900       }
13901       return Result;
13902     }
13903   }
13904 
13905   // When compacting odd (upper) elements, use PACKSS pre-SSE41.
13906   int NumOddDrops = canLowerByDroppingElements(Mask, false, false);
13907   if (NumOddDrops == 1) {
13908     bool HasSSE41 = Subtarget.hasSSE41();
13909     V1 = DAG.getNode(HasSSE41 ? X86ISD::VSRLI : X86ISD::VSRAI, DL, MVT::v4i32,
13910                      DAG.getBitcast(MVT::v4i32, V1),
13911                      DAG.getTargetConstant(16, DL, MVT::i8));
13912     V2 = DAG.getNode(HasSSE41 ? X86ISD::VSRLI : X86ISD::VSRAI, DL, MVT::v4i32,
13913                      DAG.getBitcast(MVT::v4i32, V2),
13914                      DAG.getTargetConstant(16, DL, MVT::i8));
13915     return DAG.getNode(HasSSE41 ? X86ISD::PACKUS : X86ISD::PACKSS, DL,
13916                        MVT::v8i16, V1, V2);
13917   }
13918 
13919   // Try to lower by permuting the inputs into an unpack instruction.
13920   if (SDValue Unpack = lowerShuffleAsPermuteAndUnpack(DL, MVT::v8i16, V1, V2,
13921                                                       Mask, Subtarget, DAG))
13922     return Unpack;
13923 
13924   // If we can't directly blend but can use PSHUFB, that will be better as it
13925   // can both shuffle and set up the inefficient blend.
13926   if (!IsBlendSupported && Subtarget.hasSSSE3()) {
13927     bool V1InUse, V2InUse;
13928     return lowerShuffleAsBlendOfPSHUFBs(DL, MVT::v8i16, V1, V2, Mask,
13929                                         Zeroable, DAG, V1InUse, V2InUse);
13930   }
13931 
13932   // We can always bit-blend if we have to so the fallback strategy is to
13933   // decompose into single-input permutes and blends/unpacks.
13934   return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v8i16, V1, V2,
13935                                               Mask, Subtarget, DAG);
13936 }
13937 
13938 /// Lower 8-lane 16-bit floating point shuffles.
lowerV8F16Shuffle(const SDLoc & DL,ArrayRef<int> Mask,const APInt & Zeroable,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)13939 static SDValue lowerV8F16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
13940                                  const APInt &Zeroable, SDValue V1, SDValue V2,
13941                                  const X86Subtarget &Subtarget,
13942                                  SelectionDAG &DAG) {
13943   assert(V1.getSimpleValueType() == MVT::v8f16 && "Bad operand type!");
13944   assert(V2.getSimpleValueType() == MVT::v8f16 && "Bad operand type!");
13945   assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
13946   int NumV2Elements = count_if(Mask, [](int M) { return M >= 8; });
13947 
13948   if (Subtarget.hasFP16()) {
13949     if (NumV2Elements == 0) {
13950       // Check for being able to broadcast a single element.
13951       if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v8f16, V1, V2,
13952                                                       Mask, Subtarget, DAG))
13953         return Broadcast;
13954     }
13955     if (NumV2Elements == 1 && Mask[0] >= 8)
13956       if (SDValue V = lowerShuffleAsElementInsertion(
13957               DL, MVT::v8f16, V1, V2, Mask, Zeroable, Subtarget, DAG))
13958         return V;
13959   }
13960 
13961   V1 = DAG.getBitcast(MVT::v8i16, V1);
13962   V2 = DAG.getBitcast(MVT::v8i16, V2);
13963   return DAG.getBitcast(MVT::v8f16,
13964                         DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, Mask));
13965 }
13966 
13967 // Lowers unary/binary shuffle as VPERMV/VPERMV3, for non-VLX targets,
13968 // sub-512-bit shuffles are padded to 512-bits for the shuffle and then
13969 // the active subvector is extracted.
lowerShuffleWithPERMV(const SDLoc & DL,MVT VT,ArrayRef<int> Mask,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)13970 static SDValue lowerShuffleWithPERMV(const SDLoc &DL, MVT VT,
13971                                      ArrayRef<int> Mask, SDValue V1, SDValue V2,
13972                                      const X86Subtarget &Subtarget,
13973                                      SelectionDAG &DAG) {
13974   MVT MaskVT = VT.changeTypeToInteger();
13975   SDValue MaskNode;
13976   MVT ShuffleVT = VT;
13977   if (!VT.is512BitVector() && !Subtarget.hasVLX()) {
13978     V1 = widenSubVector(V1, false, Subtarget, DAG, DL, 512);
13979     V2 = widenSubVector(V2, false, Subtarget, DAG, DL, 512);
13980     ShuffleVT = V1.getSimpleValueType();
13981 
13982     // Adjust mask to correct indices for the second input.
13983     int NumElts = VT.getVectorNumElements();
13984     unsigned Scale = 512 / VT.getSizeInBits();
13985     SmallVector<int, 32> AdjustedMask(Mask);
13986     for (int &M : AdjustedMask)
13987       if (NumElts <= M)
13988         M += (Scale - 1) * NumElts;
13989     MaskNode = getConstVector(AdjustedMask, MaskVT, DAG, DL, true);
13990     MaskNode = widenSubVector(MaskNode, false, Subtarget, DAG, DL, 512);
13991   } else {
13992     MaskNode = getConstVector(Mask, MaskVT, DAG, DL, true);
13993   }
13994 
13995   SDValue Result;
13996   if (V2.isUndef())
13997     Result = DAG.getNode(X86ISD::VPERMV, DL, ShuffleVT, MaskNode, V1);
13998   else
13999     Result = DAG.getNode(X86ISD::VPERMV3, DL, ShuffleVT, V1, MaskNode, V2);
14000 
14001   if (VT != ShuffleVT)
14002     Result = extractSubVector(Result, 0, DAG, DL, VT.getSizeInBits());
14003 
14004   return Result;
14005 }
14006 
14007 /// Generic lowering of v16i8 shuffles.
14008 ///
14009 /// This is a hybrid strategy to lower v16i8 vectors. It first attempts to
14010 /// detect any complexity reducing interleaving. If that doesn't help, it uses
14011 /// UNPCK to spread the i8 elements across two i16-element vectors, and uses
14012 /// the existing lowering for v8i16 blends on each half, finally PACK-ing them
14013 /// back together.
lowerV16I8Shuffle(const SDLoc & DL,ArrayRef<int> Mask,const APInt & Zeroable,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)14014 static SDValue lowerV16I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
14015                                  const APInt &Zeroable, SDValue V1, SDValue V2,
14016                                  const X86Subtarget &Subtarget,
14017                                  SelectionDAG &DAG) {
14018   assert(V1.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
14019   assert(V2.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
14020   assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
14021 
14022   // Try to use shift instructions.
14023   if (SDValue Shift =
14024           lowerShuffleAsShift(DL, MVT::v16i8, V1, V2, Mask, Zeroable, Subtarget,
14025                               DAG, /*BitwiseOnly*/ false))
14026     return Shift;
14027 
14028   // Try to use byte rotation instructions.
14029   if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v16i8, V1, V2, Mask,
14030                                                 Subtarget, DAG))
14031     return Rotate;
14032 
14033   // Use dedicated pack instructions for masks that match their pattern.
14034   if (SDValue V = lowerShuffleWithPACK(DL, MVT::v16i8, Mask, V1, V2, DAG,
14035                                        Subtarget))
14036     return V;
14037 
14038   // Try to use a zext lowering.
14039   if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v16i8, V1, V2, Mask,
14040                                                    Zeroable, Subtarget, DAG))
14041     return ZExt;
14042 
14043   // Try to use lower using a truncation.
14044   if (SDValue V = lowerShuffleWithVPMOV(DL, MVT::v16i8, V1, V2, Mask, Zeroable,
14045                                         Subtarget, DAG))
14046     return V;
14047 
14048   if (SDValue V = lowerShuffleAsVTRUNC(DL, MVT::v16i8, V1, V2, Mask, Zeroable,
14049                                        Subtarget, DAG))
14050     return V;
14051 
14052   // See if we can use SSE4A Extraction / Insertion.
14053   if (Subtarget.hasSSE4A())
14054     if (SDValue V = lowerShuffleWithSSE4A(DL, MVT::v16i8, V1, V2, Mask,
14055                                           Zeroable, DAG))
14056       return V;
14057 
14058   int NumV2Elements = count_if(Mask, [](int M) { return M >= 16; });
14059 
14060   // For single-input shuffles, there are some nicer lowering tricks we can use.
14061   if (NumV2Elements == 0) {
14062     // Check for being able to broadcast a single element.
14063     if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v16i8, V1, V2,
14064                                                     Mask, Subtarget, DAG))
14065       return Broadcast;
14066 
14067     // Try to use bit rotation instructions.
14068     if (SDValue Rotate = lowerShuffleAsBitRotate(DL, MVT::v16i8, V1, Mask,
14069                                                  Subtarget, DAG))
14070       return Rotate;
14071 
14072     if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i8, Mask, V1, V2, DAG))
14073       return V;
14074 
14075     // Check whether we can widen this to an i16 shuffle by duplicating bytes.
14076     // Notably, this handles splat and partial-splat shuffles more efficiently.
14077     // However, it only makes sense if the pre-duplication shuffle simplifies
14078     // things significantly. Currently, this means we need to be able to
14079     // express the pre-duplication shuffle as an i16 shuffle.
14080     //
14081     // FIXME: We should check for other patterns which can be widened into an
14082     // i16 shuffle as well.
14083     auto canWidenViaDuplication = [](ArrayRef<int> Mask) {
14084       for (int i = 0; i < 16; i += 2)
14085         if (Mask[i] >= 0 && Mask[i + 1] >= 0 && Mask[i] != Mask[i + 1])
14086           return false;
14087 
14088       return true;
14089     };
14090     auto tryToWidenViaDuplication = [&]() -> SDValue {
14091       if (!canWidenViaDuplication(Mask))
14092         return SDValue();
14093       SmallVector<int, 4> LoInputs;
14094       copy_if(Mask, std::back_inserter(LoInputs),
14095               [](int M) { return M >= 0 && M < 8; });
14096       array_pod_sort(LoInputs.begin(), LoInputs.end());
14097       LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()),
14098                      LoInputs.end());
14099       SmallVector<int, 4> HiInputs;
14100       copy_if(Mask, std::back_inserter(HiInputs), [](int M) { return M >= 8; });
14101       array_pod_sort(HiInputs.begin(), HiInputs.end());
14102       HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()),
14103                      HiInputs.end());
14104 
14105       bool TargetLo = LoInputs.size() >= HiInputs.size();
14106       ArrayRef<int> InPlaceInputs = TargetLo ? LoInputs : HiInputs;
14107       ArrayRef<int> MovingInputs = TargetLo ? HiInputs : LoInputs;
14108 
14109       int PreDupI16Shuffle[] = {-1, -1, -1, -1, -1, -1, -1, -1};
14110       SmallDenseMap<int, int, 8> LaneMap;
14111       for (int I : InPlaceInputs) {
14112         PreDupI16Shuffle[I/2] = I/2;
14113         LaneMap[I] = I;
14114       }
14115       int j = TargetLo ? 0 : 4, je = j + 4;
14116       for (int i = 0, ie = MovingInputs.size(); i < ie; ++i) {
14117         // Check if j is already a shuffle of this input. This happens when
14118         // there are two adjacent bytes after we move the low one.
14119         if (PreDupI16Shuffle[j] != MovingInputs[i] / 2) {
14120           // If we haven't yet mapped the input, search for a slot into which
14121           // we can map it.
14122           while (j < je && PreDupI16Shuffle[j] >= 0)
14123             ++j;
14124 
14125           if (j == je)
14126             // We can't place the inputs into a single half with a simple i16 shuffle, so bail.
14127             return SDValue();
14128 
14129           // Map this input with the i16 shuffle.
14130           PreDupI16Shuffle[j] = MovingInputs[i] / 2;
14131         }
14132 
14133         // Update the lane map based on the mapping we ended up with.
14134         LaneMap[MovingInputs[i]] = 2 * j + MovingInputs[i] % 2;
14135       }
14136       V1 = DAG.getBitcast(
14137           MVT::v16i8,
14138           DAG.getVectorShuffle(MVT::v8i16, DL, DAG.getBitcast(MVT::v8i16, V1),
14139                                DAG.getUNDEF(MVT::v8i16), PreDupI16Shuffle));
14140 
14141       // Unpack the bytes to form the i16s that will be shuffled into place.
14142       bool EvenInUse = false, OddInUse = false;
14143       for (int i = 0; i < 16; i += 2) {
14144         EvenInUse |= (Mask[i + 0] >= 0);
14145         OddInUse |= (Mask[i + 1] >= 0);
14146         if (EvenInUse && OddInUse)
14147           break;
14148       }
14149       V1 = DAG.getNode(TargetLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
14150                        MVT::v16i8, EvenInUse ? V1 : DAG.getUNDEF(MVT::v16i8),
14151                        OddInUse ? V1 : DAG.getUNDEF(MVT::v16i8));
14152 
14153       int PostDupI16Shuffle[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
14154       for (int i = 0; i < 16; ++i)
14155         if (Mask[i] >= 0) {
14156           int MappedMask = LaneMap[Mask[i]] - (TargetLo ? 0 : 8);
14157           assert(MappedMask < 8 && "Invalid v8 shuffle mask!");
14158           if (PostDupI16Shuffle[i / 2] < 0)
14159             PostDupI16Shuffle[i / 2] = MappedMask;
14160           else
14161             assert(PostDupI16Shuffle[i / 2] == MappedMask &&
14162                    "Conflicting entries in the original shuffle!");
14163         }
14164       return DAG.getBitcast(
14165           MVT::v16i8,
14166           DAG.getVectorShuffle(MVT::v8i16, DL, DAG.getBitcast(MVT::v8i16, V1),
14167                                DAG.getUNDEF(MVT::v8i16), PostDupI16Shuffle));
14168     };
14169     if (SDValue V = tryToWidenViaDuplication())
14170       return V;
14171   }
14172 
14173   if (SDValue Masked = lowerShuffleAsBitMask(DL, MVT::v16i8, V1, V2, Mask,
14174                                              Zeroable, Subtarget, DAG))
14175     return Masked;
14176 
14177   // Use dedicated unpack instructions for masks that match their pattern.
14178   if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i8, Mask, V1, V2, DAG))
14179     return V;
14180 
14181   // Try to use byte shift instructions to mask.
14182   if (SDValue V = lowerShuffleAsByteShiftMask(DL, MVT::v16i8, V1, V2, Mask,
14183                                               Zeroable, Subtarget, DAG))
14184     return V;
14185 
14186   // Check for compaction patterns.
14187   bool IsSingleInput = V2.isUndef();
14188   int NumEvenDrops = canLowerByDroppingElements(Mask, true, IsSingleInput);
14189 
14190   // Check for SSSE3 which lets us lower all v16i8 shuffles much more directly
14191   // with PSHUFB. It is important to do this before we attempt to generate any
14192   // blends but after all of the single-input lowerings. If the single input
14193   // lowerings can find an instruction sequence that is faster than a PSHUFB, we
14194   // want to preserve that and we can DAG combine any longer sequences into
14195   // a PSHUFB in the end. But once we start blending from multiple inputs,
14196   // the complexity of DAG combining bad patterns back into PSHUFB is too high,
14197   // and there are *very* few patterns that would actually be faster than the
14198   // PSHUFB approach because of its ability to zero lanes.
14199   //
14200   // If the mask is a binary compaction, we can more efficiently perform this
14201   // as a PACKUS(AND(),AND()) - which is quicker than UNPACK(PSHUFB(),PSHUFB()).
14202   //
14203   // FIXME: The only exceptions to the above are blends which are exact
14204   // interleavings with direct instructions supporting them. We currently don't
14205   // handle those well here.
14206   if (Subtarget.hasSSSE3() && (IsSingleInput || NumEvenDrops != 1)) {
14207     bool V1InUse = false;
14208     bool V2InUse = false;
14209 
14210     SDValue PSHUFB = lowerShuffleAsBlendOfPSHUFBs(
14211         DL, MVT::v16i8, V1, V2, Mask, Zeroable, DAG, V1InUse, V2InUse);
14212 
14213     // If both V1 and V2 are in use and we can use a direct blend or an unpack,
14214     // do so. This avoids using them to handle blends-with-zero which is
14215     // important as a single pshufb is significantly faster for that.
14216     if (V1InUse && V2InUse) {
14217       if (Subtarget.hasSSE41())
14218         if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16i8, V1, V2, Mask,
14219                                                 Zeroable, Subtarget, DAG))
14220           return Blend;
14221 
14222       // We can use an unpack to do the blending rather than an or in some
14223       // cases. Even though the or may be (very minorly) more efficient, we
14224       // preference this lowering because there are common cases where part of
14225       // the complexity of the shuffles goes away when we do the final blend as
14226       // an unpack.
14227       // FIXME: It might be worth trying to detect if the unpack-feeding
14228       // shuffles will both be pshufb, in which case we shouldn't bother with
14229       // this.
14230       if (SDValue Unpack = lowerShuffleAsPermuteAndUnpack(
14231               DL, MVT::v16i8, V1, V2, Mask, Subtarget, DAG))
14232         return Unpack;
14233 
14234       // AVX512VBMI can lower to VPERMB (non-VLX will pad to v64i8).
14235       if (Subtarget.hasVBMI())
14236         return lowerShuffleWithPERMV(DL, MVT::v16i8, Mask, V1, V2, Subtarget,
14237                                      DAG);
14238 
14239       // If we have XOP we can use one VPPERM instead of multiple PSHUFBs.
14240       if (Subtarget.hasXOP()) {
14241         SDValue MaskNode = getConstVector(Mask, MVT::v16i8, DAG, DL, true);
14242         return DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, V1, V2, MaskNode);
14243       }
14244 
14245       // Use PALIGNR+Permute if possible - permute might become PSHUFB but the
14246       // PALIGNR will be cheaper than the second PSHUFB+OR.
14247       if (SDValue V = lowerShuffleAsByteRotateAndPermute(
14248               DL, MVT::v16i8, V1, V2, Mask, Subtarget, DAG))
14249         return V;
14250     }
14251 
14252     return PSHUFB;
14253   }
14254 
14255   // There are special ways we can lower some single-element blends.
14256   if (NumV2Elements == 1)
14257     if (SDValue V = lowerShuffleAsElementInsertion(
14258             DL, MVT::v16i8, V1, V2, Mask, Zeroable, Subtarget, DAG))
14259       return V;
14260 
14261   if (SDValue Blend = lowerShuffleAsBitBlend(DL, MVT::v16i8, V1, V2, Mask, DAG))
14262     return Blend;
14263 
14264   // Check whether a compaction lowering can be done. This handles shuffles
14265   // which take every Nth element for some even N. See the helper function for
14266   // details.
14267   //
14268   // We special case these as they can be particularly efficiently handled with
14269   // the PACKUSB instruction on x86 and they show up in common patterns of
14270   // rearranging bytes to truncate wide elements.
14271   if (NumEvenDrops) {
14272     // NumEvenDrops is the power of two stride of the elements. Another way of
14273     // thinking about it is that we need to drop the even elements this many
14274     // times to get the original input.
14275 
14276     // First we need to zero all the dropped bytes.
14277     assert(NumEvenDrops <= 3 &&
14278            "No support for dropping even elements more than 3 times.");
14279     SmallVector<SDValue, 8> WordClearOps(8, DAG.getConstant(0, DL, MVT::i16));
14280     for (unsigned i = 0; i != 8; i += 1 << (NumEvenDrops - 1))
14281       WordClearOps[i] = DAG.getConstant(0xFF, DL, MVT::i16);
14282     SDValue WordClearMask = DAG.getBuildVector(MVT::v8i16, DL, WordClearOps);
14283     V1 = DAG.getNode(ISD::AND, DL, MVT::v8i16, DAG.getBitcast(MVT::v8i16, V1),
14284                      WordClearMask);
14285     if (!IsSingleInput)
14286       V2 = DAG.getNode(ISD::AND, DL, MVT::v8i16, DAG.getBitcast(MVT::v8i16, V2),
14287                        WordClearMask);
14288 
14289     // Now pack things back together.
14290     SDValue Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, V1,
14291                                  IsSingleInput ? V1 : V2);
14292     for (int i = 1; i < NumEvenDrops; ++i) {
14293       Result = DAG.getBitcast(MVT::v8i16, Result);
14294       Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, Result, Result);
14295     }
14296     return Result;
14297   }
14298 
14299   int NumOddDrops = canLowerByDroppingElements(Mask, false, IsSingleInput);
14300   if (NumOddDrops == 1) {
14301     V1 = DAG.getNode(X86ISD::VSRLI, DL, MVT::v8i16,
14302                      DAG.getBitcast(MVT::v8i16, V1),
14303                      DAG.getTargetConstant(8, DL, MVT::i8));
14304     if (!IsSingleInput)
14305       V2 = DAG.getNode(X86ISD::VSRLI, DL, MVT::v8i16,
14306                        DAG.getBitcast(MVT::v8i16, V2),
14307                        DAG.getTargetConstant(8, DL, MVT::i8));
14308     return DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, V1,
14309                        IsSingleInput ? V1 : V2);
14310   }
14311 
14312   // Handle multi-input cases by blending/unpacking single-input shuffles.
14313   if (NumV2Elements > 0)
14314     return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v16i8, V1, V2, Mask,
14315                                                 Subtarget, DAG);
14316 
14317   // The fallback path for single-input shuffles widens this into two v8i16
14318   // vectors with unpacks, shuffles those, and then pulls them back together
14319   // with a pack.
14320   SDValue V = V1;
14321 
14322   std::array<int, 8> LoBlendMask = {{-1, -1, -1, -1, -1, -1, -1, -1}};
14323   std::array<int, 8> HiBlendMask = {{-1, -1, -1, -1, -1, -1, -1, -1}};
14324   for (int i = 0; i < 16; ++i)
14325     if (Mask[i] >= 0)
14326       (i < 8 ? LoBlendMask[i] : HiBlendMask[i % 8]) = Mask[i];
14327 
14328   SDValue VLoHalf, VHiHalf;
14329   // Check if any of the odd lanes in the v16i8 are used. If not, we can mask
14330   // them out and avoid using UNPCK{L,H} to extract the elements of V as
14331   // i16s.
14332   if (none_of(LoBlendMask, [](int M) { return M >= 0 && M % 2 == 1; }) &&
14333       none_of(HiBlendMask, [](int M) { return M >= 0 && M % 2 == 1; })) {
14334     // Use a mask to drop the high bytes.
14335     VLoHalf = DAG.getBitcast(MVT::v8i16, V);
14336     VLoHalf = DAG.getNode(ISD::AND, DL, MVT::v8i16, VLoHalf,
14337                           DAG.getConstant(0x00FF, DL, MVT::v8i16));
14338 
14339     // This will be a single vector shuffle instead of a blend so nuke VHiHalf.
14340     VHiHalf = DAG.getUNDEF(MVT::v8i16);
14341 
14342     // Squash the masks to point directly into VLoHalf.
14343     for (int &M : LoBlendMask)
14344       if (M >= 0)
14345         M /= 2;
14346     for (int &M : HiBlendMask)
14347       if (M >= 0)
14348         M /= 2;
14349   } else {
14350     // Otherwise just unpack the low half of V into VLoHalf and the high half into
14351     // VHiHalf so that we can blend them as i16s.
14352     SDValue Zero = getZeroVector(MVT::v16i8, Subtarget, DAG, DL);
14353 
14354     VLoHalf = DAG.getBitcast(
14355         MVT::v8i16, DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i8, V, Zero));
14356     VHiHalf = DAG.getBitcast(
14357         MVT::v8i16, DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i8, V, Zero));
14358   }
14359 
14360   SDValue LoV = DAG.getVectorShuffle(MVT::v8i16, DL, VLoHalf, VHiHalf, LoBlendMask);
14361   SDValue HiV = DAG.getVectorShuffle(MVT::v8i16, DL, VLoHalf, VHiHalf, HiBlendMask);
14362 
14363   return DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, LoV, HiV);
14364 }
14365 
14366 /// Dispatching routine to lower various 128-bit x86 vector shuffles.
14367 ///
14368 /// This routine breaks down the specific type of 128-bit shuffle and
14369 /// dispatches to the lowering routines accordingly.
lower128BitShuffle(const SDLoc & DL,ArrayRef<int> Mask,MVT VT,SDValue V1,SDValue V2,const APInt & Zeroable,const X86Subtarget & Subtarget,SelectionDAG & DAG)14370 static SDValue lower128BitShuffle(const SDLoc &DL, ArrayRef<int> Mask,
14371                                   MVT VT, SDValue V1, SDValue V2,
14372                                   const APInt &Zeroable,
14373                                   const X86Subtarget &Subtarget,
14374                                   SelectionDAG &DAG) {
14375   if (VT == MVT::v8bf16) {
14376     V1 = DAG.getBitcast(MVT::v8i16, V1);
14377     V2 = DAG.getBitcast(MVT::v8i16, V2);
14378     return DAG.getBitcast(VT,
14379                           DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, Mask));
14380   }
14381 
14382   switch (VT.SimpleTy) {
14383   case MVT::v2i64:
14384     return lowerV2I64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
14385   case MVT::v2f64:
14386     return lowerV2F64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
14387   case MVT::v4i32:
14388     return lowerV4I32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
14389   case MVT::v4f32:
14390     return lowerV4F32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
14391   case MVT::v8i16:
14392     return lowerV8I16Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
14393   case MVT::v8f16:
14394     return lowerV8F16Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
14395   case MVT::v16i8:
14396     return lowerV16I8Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
14397 
14398   default:
14399     llvm_unreachable("Unimplemented!");
14400   }
14401 }
14402 
14403 /// Generic routine to split vector shuffle into half-sized shuffles.
14404 ///
14405 /// This routine just extracts two subvectors, shuffles them independently, and
14406 /// then concatenates them back together. This should work effectively with all
14407 /// AVX vector shuffle types.
splitAndLowerShuffle(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,SelectionDAG & DAG,bool SimpleOnly)14408 static SDValue splitAndLowerShuffle(const SDLoc &DL, MVT VT, SDValue V1,
14409                                     SDValue V2, ArrayRef<int> Mask,
14410                                     SelectionDAG &DAG, bool SimpleOnly) {
14411   assert(VT.getSizeInBits() >= 256 &&
14412          "Only for 256-bit or wider vector shuffles!");
14413   assert(V1.getSimpleValueType() == VT && "Bad operand type!");
14414   assert(V2.getSimpleValueType() == VT && "Bad operand type!");
14415 
14416   ArrayRef<int> LoMask = Mask.slice(0, Mask.size() / 2);
14417   ArrayRef<int> HiMask = Mask.slice(Mask.size() / 2);
14418 
14419   int NumElements = VT.getVectorNumElements();
14420   int SplitNumElements = NumElements / 2;
14421   MVT ScalarVT = VT.getVectorElementType();
14422   MVT SplitVT = MVT::getVectorVT(ScalarVT, SplitNumElements);
14423 
14424   // Use splitVector/extractSubVector so that split build-vectors just build two
14425   // narrower build vectors. This helps shuffling with splats and zeros.
14426   auto SplitVector = [&](SDValue V) {
14427     SDValue LoV, HiV;
14428     std::tie(LoV, HiV) = splitVector(peekThroughBitcasts(V), DAG, DL);
14429     return std::make_pair(DAG.getBitcast(SplitVT, LoV),
14430                           DAG.getBitcast(SplitVT, HiV));
14431   };
14432 
14433   SDValue LoV1, HiV1, LoV2, HiV2;
14434   std::tie(LoV1, HiV1) = SplitVector(V1);
14435   std::tie(LoV2, HiV2) = SplitVector(V2);
14436 
14437   // Now create two 4-way blends of these half-width vectors.
14438   auto GetHalfBlendPiecesReq = [&](const ArrayRef<int> &HalfMask, bool &UseLoV1,
14439                                    bool &UseHiV1, bool &UseLoV2,
14440                                    bool &UseHiV2) {
14441     UseLoV1 = UseHiV1 = UseLoV2 = UseHiV2 = false;
14442     for (int i = 0; i < SplitNumElements; ++i) {
14443       int M = HalfMask[i];
14444       if (M >= NumElements) {
14445         if (M >= NumElements + SplitNumElements)
14446           UseHiV2 = true;
14447         else
14448           UseLoV2 = true;
14449       } else if (M >= 0) {
14450         if (M >= SplitNumElements)
14451           UseHiV1 = true;
14452         else
14453           UseLoV1 = true;
14454       }
14455     }
14456   };
14457 
14458   auto CheckHalfBlendUsable = [&](const ArrayRef<int> &HalfMask) -> bool {
14459     if (!SimpleOnly)
14460       return true;
14461 
14462     bool UseLoV1, UseHiV1, UseLoV2, UseHiV2;
14463     GetHalfBlendPiecesReq(HalfMask, UseLoV1, UseHiV1, UseLoV2, UseHiV2);
14464 
14465     return !(UseHiV1 || UseHiV2);
14466   };
14467 
14468   auto HalfBlend = [&](ArrayRef<int> HalfMask) {
14469     SmallVector<int, 32> V1BlendMask((unsigned)SplitNumElements, -1);
14470     SmallVector<int, 32> V2BlendMask((unsigned)SplitNumElements, -1);
14471     SmallVector<int, 32> BlendMask((unsigned)SplitNumElements, -1);
14472     for (int i = 0; i < SplitNumElements; ++i) {
14473       int M = HalfMask[i];
14474       if (M >= NumElements) {
14475         V2BlendMask[i] = M - NumElements;
14476         BlendMask[i] = SplitNumElements + i;
14477       } else if (M >= 0) {
14478         V1BlendMask[i] = M;
14479         BlendMask[i] = i;
14480       }
14481     }
14482 
14483     bool UseLoV1, UseHiV1, UseLoV2, UseHiV2;
14484     GetHalfBlendPiecesReq(HalfMask, UseLoV1, UseHiV1, UseLoV2, UseHiV2);
14485 
14486     // Because the lowering happens after all combining takes place, we need to
14487     // manually combine these blend masks as much as possible so that we create
14488     // a minimal number of high-level vector shuffle nodes.
14489     assert((!SimpleOnly || (!UseHiV1 && !UseHiV2)) && "Shuffle isn't simple");
14490 
14491     // First try just blending the halves of V1 or V2.
14492     if (!UseLoV1 && !UseHiV1 && !UseLoV2 && !UseHiV2)
14493       return DAG.getUNDEF(SplitVT);
14494     if (!UseLoV2 && !UseHiV2)
14495       return DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
14496     if (!UseLoV1 && !UseHiV1)
14497       return DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
14498 
14499     SDValue V1Blend, V2Blend;
14500     if (UseLoV1 && UseHiV1) {
14501       V1Blend = DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
14502     } else {
14503       // We only use half of V1 so map the usage down into the final blend mask.
14504       V1Blend = UseLoV1 ? LoV1 : HiV1;
14505       for (int i = 0; i < SplitNumElements; ++i)
14506         if (BlendMask[i] >= 0 && BlendMask[i] < SplitNumElements)
14507           BlendMask[i] = V1BlendMask[i] - (UseLoV1 ? 0 : SplitNumElements);
14508     }
14509     if (UseLoV2 && UseHiV2) {
14510       V2Blend = DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
14511     } else {
14512       // We only use half of V2 so map the usage down into the final blend mask.
14513       V2Blend = UseLoV2 ? LoV2 : HiV2;
14514       for (int i = 0; i < SplitNumElements; ++i)
14515         if (BlendMask[i] >= SplitNumElements)
14516           BlendMask[i] = V2BlendMask[i] + (UseLoV2 ? SplitNumElements : 0);
14517     }
14518     return DAG.getVectorShuffle(SplitVT, DL, V1Blend, V2Blend, BlendMask);
14519   };
14520 
14521   if (!CheckHalfBlendUsable(LoMask) || !CheckHalfBlendUsable(HiMask))
14522     return SDValue();
14523 
14524   SDValue Lo = HalfBlend(LoMask);
14525   SDValue Hi = HalfBlend(HiMask);
14526   return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
14527 }
14528 
14529 /// Either split a vector in halves or decompose the shuffles and the
14530 /// blend/unpack.
14531 ///
14532 /// This is provided as a good fallback for many lowerings of non-single-input
14533 /// shuffles with more than one 128-bit lane. In those cases, we want to select
14534 /// between splitting the shuffle into 128-bit components and stitching those
14535 /// back together vs. extracting the single-input shuffles and blending those
14536 /// results.
lowerShuffleAsSplitOrBlend(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const X86Subtarget & Subtarget,SelectionDAG & DAG)14537 static SDValue lowerShuffleAsSplitOrBlend(const SDLoc &DL, MVT VT, SDValue V1,
14538                                           SDValue V2, ArrayRef<int> Mask,
14539                                           const X86Subtarget &Subtarget,
14540                                           SelectionDAG &DAG) {
14541   assert(!V2.isUndef() && "This routine must not be used to lower single-input "
14542          "shuffles as it could then recurse on itself.");
14543   int Size = Mask.size();
14544 
14545   // If this can be modeled as a broadcast of two elements followed by a blend,
14546   // prefer that lowering. This is especially important because broadcasts can
14547   // often fold with memory operands.
14548   auto DoBothBroadcast = [&] {
14549     int V1BroadcastIdx = -1, V2BroadcastIdx = -1;
14550     for (int M : Mask)
14551       if (M >= Size) {
14552         if (V2BroadcastIdx < 0)
14553           V2BroadcastIdx = M - Size;
14554         else if (M - Size != V2BroadcastIdx)
14555           return false;
14556       } else if (M >= 0) {
14557         if (V1BroadcastIdx < 0)
14558           V1BroadcastIdx = M;
14559         else if (M != V1BroadcastIdx)
14560           return false;
14561       }
14562     return true;
14563   };
14564   if (DoBothBroadcast())
14565     return lowerShuffleAsDecomposedShuffleMerge(DL, VT, V1, V2, Mask, Subtarget,
14566                                                 DAG);
14567 
14568   // If the inputs all stem from a single 128-bit lane of each input, then we
14569   // split them rather than blending because the split will decompose to
14570   // unusually few instructions.
14571   int LaneCount = VT.getSizeInBits() / 128;
14572   int LaneSize = Size / LaneCount;
14573   SmallBitVector LaneInputs[2];
14574   LaneInputs[0].resize(LaneCount, false);
14575   LaneInputs[1].resize(LaneCount, false);
14576   for (int i = 0; i < Size; ++i)
14577     if (Mask[i] >= 0)
14578       LaneInputs[Mask[i] / Size][(Mask[i] % Size) / LaneSize] = true;
14579   if (LaneInputs[0].count() <= 1 && LaneInputs[1].count() <= 1)
14580     return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG,
14581                                 /*SimpleOnly*/ false);
14582 
14583   // Otherwise, just fall back to decomposed shuffles and a blend/unpack. This
14584   // requires that the decomposed single-input shuffles don't end up here.
14585   return lowerShuffleAsDecomposedShuffleMerge(DL, VT, V1, V2, Mask, Subtarget,
14586                                               DAG);
14587 }
14588 
14589 // Lower as SHUFPD(VPERM2F128(V1, V2), VPERM2F128(V1, V2)).
14590 // TODO: Extend to support v8f32 (+ 512-bit shuffles).
lowerShuffleAsLanePermuteAndSHUFP(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,SelectionDAG & DAG)14591 static SDValue lowerShuffleAsLanePermuteAndSHUFP(const SDLoc &DL, MVT VT,
14592                                                  SDValue V1, SDValue V2,
14593                                                  ArrayRef<int> Mask,
14594                                                  SelectionDAG &DAG) {
14595   assert(VT == MVT::v4f64 && "Only for v4f64 shuffles");
14596 
14597   int LHSMask[4] = {-1, -1, -1, -1};
14598   int RHSMask[4] = {-1, -1, -1, -1};
14599   unsigned SHUFPMask = 0;
14600 
14601   // As SHUFPD uses a single LHS/RHS element per lane, we can always
14602   // perform the shuffle once the lanes have been shuffled in place.
14603   for (int i = 0; i != 4; ++i) {
14604     int M = Mask[i];
14605     if (M < 0)
14606       continue;
14607     int LaneBase = i & ~1;
14608     auto &LaneMask = (i & 1) ? RHSMask : LHSMask;
14609     LaneMask[LaneBase + (M & 1)] = M;
14610     SHUFPMask |= (M & 1) << i;
14611   }
14612 
14613   SDValue LHS = DAG.getVectorShuffle(VT, DL, V1, V2, LHSMask);
14614   SDValue RHS = DAG.getVectorShuffle(VT, DL, V1, V2, RHSMask);
14615   return DAG.getNode(X86ISD::SHUFP, DL, VT, LHS, RHS,
14616                      DAG.getTargetConstant(SHUFPMask, DL, MVT::i8));
14617 }
14618 
14619 /// Lower a vector shuffle crossing multiple 128-bit lanes as
14620 /// a lane permutation followed by a per-lane permutation.
14621 ///
14622 /// This is mainly for cases where we can have non-repeating permutes
14623 /// in each lane.
14624 ///
14625 /// TODO: This is very similar to lowerShuffleAsLanePermuteAndRepeatedMask,
14626 /// we should investigate merging them.
lowerShuffleAsLanePermuteAndPermute(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,SelectionDAG & DAG,const X86Subtarget & Subtarget)14627 static SDValue lowerShuffleAsLanePermuteAndPermute(
14628     const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
14629     SelectionDAG &DAG, const X86Subtarget &Subtarget) {
14630   int NumElts = VT.getVectorNumElements();
14631   int NumLanes = VT.getSizeInBits() / 128;
14632   int NumEltsPerLane = NumElts / NumLanes;
14633   bool CanUseSublanes = Subtarget.hasAVX2() && V2.isUndef();
14634 
14635   /// Attempts to find a sublane permute with the given size
14636   /// that gets all elements into their target lanes.
14637   ///
14638   /// If successful, fills CrossLaneMask and InLaneMask and returns true.
14639   /// If unsuccessful, returns false and may overwrite InLaneMask.
14640   auto getSublanePermute = [&](int NumSublanes) -> SDValue {
14641     int NumSublanesPerLane = NumSublanes / NumLanes;
14642     int NumEltsPerSublane = NumElts / NumSublanes;
14643 
14644     SmallVector<int, 16> CrossLaneMask;
14645     SmallVector<int, 16> InLaneMask(NumElts, SM_SentinelUndef);
14646     // CrossLaneMask but one entry == one sublane.
14647     SmallVector<int, 16> CrossLaneMaskLarge(NumSublanes, SM_SentinelUndef);
14648 
14649     for (int i = 0; i != NumElts; ++i) {
14650       int M = Mask[i];
14651       if (M < 0)
14652         continue;
14653 
14654       int SrcSublane = M / NumEltsPerSublane;
14655       int DstLane = i / NumEltsPerLane;
14656 
14657       // We only need to get the elements into the right lane, not sublane.
14658       // So search all sublanes that make up the destination lane.
14659       bool Found = false;
14660       int DstSubStart = DstLane * NumSublanesPerLane;
14661       int DstSubEnd = DstSubStart + NumSublanesPerLane;
14662       for (int DstSublane = DstSubStart; DstSublane < DstSubEnd; ++DstSublane) {
14663         if (!isUndefOrEqual(CrossLaneMaskLarge[DstSublane], SrcSublane))
14664           continue;
14665 
14666         Found = true;
14667         CrossLaneMaskLarge[DstSublane] = SrcSublane;
14668         int DstSublaneOffset = DstSublane * NumEltsPerSublane;
14669         InLaneMask[i] = DstSublaneOffset + M % NumEltsPerSublane;
14670         break;
14671       }
14672       if (!Found)
14673         return SDValue();
14674     }
14675 
14676     // Fill CrossLaneMask using CrossLaneMaskLarge.
14677     narrowShuffleMaskElts(NumEltsPerSublane, CrossLaneMaskLarge, CrossLaneMask);
14678 
14679     if (!CanUseSublanes) {
14680       // If we're only shuffling a single lowest lane and the rest are identity
14681       // then don't bother.
14682       // TODO - isShuffleMaskInputInPlace could be extended to something like
14683       // this.
14684       int NumIdentityLanes = 0;
14685       bool OnlyShuffleLowestLane = true;
14686       for (int i = 0; i != NumLanes; ++i) {
14687         int LaneOffset = i * NumEltsPerLane;
14688         if (isSequentialOrUndefInRange(InLaneMask, LaneOffset, NumEltsPerLane,
14689                                        i * NumEltsPerLane))
14690           NumIdentityLanes++;
14691         else if (CrossLaneMask[LaneOffset] != 0)
14692           OnlyShuffleLowestLane = false;
14693       }
14694       if (OnlyShuffleLowestLane && NumIdentityLanes == (NumLanes - 1))
14695         return SDValue();
14696     }
14697 
14698     // Avoid returning the same shuffle operation. For example,
14699     // t7: v16i16 = vector_shuffle<8,9,10,11,4,5,6,7,0,1,2,3,12,13,14,15> t5,
14700     //                             undef:v16i16
14701     if (CrossLaneMask == Mask || InLaneMask == Mask)
14702       return SDValue();
14703 
14704     SDValue CrossLane = DAG.getVectorShuffle(VT, DL, V1, V2, CrossLaneMask);
14705     return DAG.getVectorShuffle(VT, DL, CrossLane, DAG.getUNDEF(VT),
14706                                 InLaneMask);
14707   };
14708 
14709   // First attempt a solution with full lanes.
14710   if (SDValue V = getSublanePermute(/*NumSublanes=*/NumLanes))
14711     return V;
14712 
14713   // The rest of the solutions use sublanes.
14714   if (!CanUseSublanes)
14715     return SDValue();
14716 
14717   // Then attempt a solution with 64-bit sublanes (vpermq).
14718   if (SDValue V = getSublanePermute(/*NumSublanes=*/NumLanes * 2))
14719     return V;
14720 
14721   // If that doesn't work and we have fast variable cross-lane shuffle,
14722   // attempt 32-bit sublanes (vpermd).
14723   if (!Subtarget.hasFastVariableCrossLaneShuffle())
14724     return SDValue();
14725 
14726   return getSublanePermute(/*NumSublanes=*/NumLanes * 4);
14727 }
14728 
14729 /// Helper to get compute inlane shuffle mask for a complete shuffle mask.
computeInLaneShuffleMask(const ArrayRef<int> & Mask,int LaneSize,SmallVector<int> & InLaneMask)14730 static void computeInLaneShuffleMask(const ArrayRef<int> &Mask, int LaneSize,
14731                                      SmallVector<int> &InLaneMask) {
14732   int Size = Mask.size();
14733   InLaneMask.assign(Mask.begin(), Mask.end());
14734   for (int i = 0; i < Size; ++i) {
14735     int &M = InLaneMask[i];
14736     if (M < 0)
14737       continue;
14738     if (((M % Size) / LaneSize) != (i / LaneSize))
14739       M = (M % LaneSize) + ((i / LaneSize) * LaneSize) + Size;
14740   }
14741 }
14742 
14743 /// Lower a vector shuffle crossing multiple 128-bit lanes by shuffling one
14744 /// source with a lane permutation.
14745 ///
14746 /// This lowering strategy results in four instructions in the worst case for a
14747 /// single-input cross lane shuffle which is lower than any other fully general
14748 /// cross-lane shuffle strategy I'm aware of. Special cases for each particular
14749 /// shuffle pattern should be handled prior to trying this lowering.
lowerShuffleAsLanePermuteAndShuffle(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,SelectionDAG & DAG,const X86Subtarget & Subtarget)14750 static SDValue lowerShuffleAsLanePermuteAndShuffle(
14751     const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
14752     SelectionDAG &DAG, const X86Subtarget &Subtarget) {
14753   // FIXME: This should probably be generalized for 512-bit vectors as well.
14754   assert(VT.is256BitVector() && "Only for 256-bit vector shuffles!");
14755   int Size = Mask.size();
14756   int LaneSize = Size / 2;
14757 
14758   // Fold to SHUFPD(VPERM2F128(V1, V2), VPERM2F128(V1, V2)).
14759   // Only do this if the elements aren't all from the lower lane,
14760   // otherwise we're (probably) better off doing a split.
14761   if (VT == MVT::v4f64 &&
14762       !all_of(Mask, [LaneSize](int M) { return M < LaneSize; }))
14763     return lowerShuffleAsLanePermuteAndSHUFP(DL, VT, V1, V2, Mask, DAG);
14764 
14765   // If there are only inputs from one 128-bit lane, splitting will in fact be
14766   // less expensive. The flags track whether the given lane contains an element
14767   // that crosses to another lane.
14768   bool AllLanes;
14769   if (!Subtarget.hasAVX2()) {
14770     bool LaneCrossing[2] = {false, false};
14771     for (int i = 0; i < Size; ++i)
14772       if (Mask[i] >= 0 && ((Mask[i] % Size) / LaneSize) != (i / LaneSize))
14773         LaneCrossing[(Mask[i] % Size) / LaneSize] = true;
14774     AllLanes = LaneCrossing[0] && LaneCrossing[1];
14775   } else {
14776     bool LaneUsed[2] = {false, false};
14777     for (int i = 0; i < Size; ++i)
14778       if (Mask[i] >= 0)
14779         LaneUsed[(Mask[i] % Size) / LaneSize] = true;
14780     AllLanes = LaneUsed[0] && LaneUsed[1];
14781   }
14782 
14783   // TODO - we could support shuffling V2 in the Flipped input.
14784   assert(V2.isUndef() &&
14785          "This last part of this routine only works on single input shuffles");
14786 
14787   SmallVector<int> InLaneMask;
14788   computeInLaneShuffleMask(Mask, Mask.size() / 2, InLaneMask);
14789 
14790   assert(!is128BitLaneCrossingShuffleMask(VT, InLaneMask) &&
14791          "In-lane shuffle mask expected");
14792 
14793   // If we're not using both lanes in each lane and the inlane mask is not
14794   // repeating, then we're better off splitting.
14795   if (!AllLanes && !is128BitLaneRepeatedShuffleMask(VT, InLaneMask))
14796     return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG,
14797                                 /*SimpleOnly*/ false);
14798 
14799   // Flip the lanes, and shuffle the results which should now be in-lane.
14800   MVT PVT = VT.isFloatingPoint() ? MVT::v4f64 : MVT::v4i64;
14801   SDValue Flipped = DAG.getBitcast(PVT, V1);
14802   Flipped =
14803       DAG.getVectorShuffle(PVT, DL, Flipped, DAG.getUNDEF(PVT), {2, 3, 0, 1});
14804   Flipped = DAG.getBitcast(VT, Flipped);
14805   return DAG.getVectorShuffle(VT, DL, V1, Flipped, InLaneMask);
14806 }
14807 
14808 /// Handle lowering 2-lane 128-bit shuffles.
lowerV2X128Shuffle(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const APInt & Zeroable,const X86Subtarget & Subtarget,SelectionDAG & DAG)14809 static SDValue lowerV2X128Shuffle(const SDLoc &DL, MVT VT, SDValue V1,
14810                                   SDValue V2, ArrayRef<int> Mask,
14811                                   const APInt &Zeroable,
14812                                   const X86Subtarget &Subtarget,
14813                                   SelectionDAG &DAG) {
14814   if (V2.isUndef()) {
14815     // Attempt to match VBROADCAST*128 subvector broadcast load.
14816     bool SplatLo = isShuffleEquivalent(Mask, {0, 1, 0, 1}, V1);
14817     bool SplatHi = isShuffleEquivalent(Mask, {2, 3, 2, 3}, V1);
14818     if ((SplatLo || SplatHi) && !Subtarget.hasAVX512() && V1.hasOneUse() &&
14819         X86::mayFoldLoad(peekThroughOneUseBitcasts(V1), Subtarget)) {
14820       MVT MemVT = VT.getHalfNumVectorElementsVT();
14821       unsigned Ofs = SplatLo ? 0 : MemVT.getStoreSize();
14822       auto *Ld = cast<LoadSDNode>(peekThroughOneUseBitcasts(V1));
14823       if (SDValue BcstLd = getBROADCAST_LOAD(X86ISD::SUBV_BROADCAST_LOAD, DL,
14824                                              VT, MemVT, Ld, Ofs, DAG))
14825         return BcstLd;
14826     }
14827 
14828     // With AVX2, use VPERMQ/VPERMPD for unary shuffles to allow memory folding.
14829     if (Subtarget.hasAVX2())
14830       return SDValue();
14831   }
14832 
14833   bool V2IsZero = !V2.isUndef() && ISD::isBuildVectorAllZeros(V2.getNode());
14834 
14835   SmallVector<int, 4> WidenedMask;
14836   if (!canWidenShuffleElements(Mask, Zeroable, V2IsZero, WidenedMask))
14837     return SDValue();
14838 
14839   bool IsLowZero = (Zeroable & 0x3) == 0x3;
14840   bool IsHighZero = (Zeroable & 0xc) == 0xc;
14841 
14842   // Try to use an insert into a zero vector.
14843   if (WidenedMask[0] == 0 && IsHighZero) {
14844     MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 2);
14845     SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
14846                               DAG.getIntPtrConstant(0, DL));
14847     return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
14848                        getZeroVector(VT, Subtarget, DAG, DL), LoV,
14849                        DAG.getIntPtrConstant(0, DL));
14850   }
14851 
14852   // TODO: If minimizing size and one of the inputs is a zero vector and the
14853   // the zero vector has only one use, we could use a VPERM2X128 to save the
14854   // instruction bytes needed to explicitly generate the zero vector.
14855 
14856   // Blends are faster and handle all the non-lane-crossing cases.
14857   if (SDValue Blend = lowerShuffleAsBlend(DL, VT, V1, V2, Mask, Zeroable,
14858                                           Subtarget, DAG))
14859     return Blend;
14860 
14861   // If either input operand is a zero vector, use VPERM2X128 because its mask
14862   // allows us to replace the zero input with an implicit zero.
14863   if (!IsLowZero && !IsHighZero) {
14864     // Check for patterns which can be matched with a single insert of a 128-bit
14865     // subvector.
14866     bool OnlyUsesV1 = isShuffleEquivalent(Mask, {0, 1, 0, 1}, V1, V2);
14867     if (OnlyUsesV1 || isShuffleEquivalent(Mask, {0, 1, 4, 5}, V1, V2)) {
14868 
14869       // With AVX1, use vperm2f128 (below) to allow load folding. Otherwise,
14870       // this will likely become vinsertf128 which can't fold a 256-bit memop.
14871       if (!isa<LoadSDNode>(peekThroughBitcasts(V1))) {
14872         MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 2);
14873         SDValue SubVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT,
14874                                      OnlyUsesV1 ? V1 : V2,
14875                                      DAG.getIntPtrConstant(0, DL));
14876         return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, V1, SubVec,
14877                            DAG.getIntPtrConstant(2, DL));
14878       }
14879     }
14880 
14881     // Try to use SHUF128 if possible.
14882     if (Subtarget.hasVLX()) {
14883       if (WidenedMask[0] < 2 && WidenedMask[1] >= 2) {
14884         unsigned PermMask = ((WidenedMask[0] % 2) << 0) |
14885                             ((WidenedMask[1] % 2) << 1);
14886         return DAG.getNode(X86ISD::SHUF128, DL, VT, V1, V2,
14887                            DAG.getTargetConstant(PermMask, DL, MVT::i8));
14888       }
14889     }
14890   }
14891 
14892   // Otherwise form a 128-bit permutation. After accounting for undefs,
14893   // convert the 64-bit shuffle mask selection values into 128-bit
14894   // selection bits by dividing the indexes by 2 and shifting into positions
14895   // defined by a vperm2*128 instruction's immediate control byte.
14896 
14897   // The immediate permute control byte looks like this:
14898   //    [1:0] - select 128 bits from sources for low half of destination
14899   //    [2]   - ignore
14900   //    [3]   - zero low half of destination
14901   //    [5:4] - select 128 bits from sources for high half of destination
14902   //    [6]   - ignore
14903   //    [7]   - zero high half of destination
14904 
14905   assert((WidenedMask[0] >= 0 || IsLowZero) &&
14906          (WidenedMask[1] >= 0 || IsHighZero) && "Undef half?");
14907 
14908   unsigned PermMask = 0;
14909   PermMask |= IsLowZero  ? 0x08 : (WidenedMask[0] << 0);
14910   PermMask |= IsHighZero ? 0x80 : (WidenedMask[1] << 4);
14911 
14912   // Check the immediate mask and replace unused sources with undef.
14913   if ((PermMask & 0x0a) != 0x00 && (PermMask & 0xa0) != 0x00)
14914     V1 = DAG.getUNDEF(VT);
14915   if ((PermMask & 0x0a) != 0x02 && (PermMask & 0xa0) != 0x20)
14916     V2 = DAG.getUNDEF(VT);
14917 
14918   return DAG.getNode(X86ISD::VPERM2X128, DL, VT, V1, V2,
14919                      DAG.getTargetConstant(PermMask, DL, MVT::i8));
14920 }
14921 
14922 /// Lower a vector shuffle by first fixing the 128-bit lanes and then
14923 /// shuffling each lane.
14924 ///
14925 /// This attempts to create a repeated lane shuffle where each lane uses one
14926 /// or two of the lanes of the inputs. The lanes of the input vectors are
14927 /// shuffled in one or two independent shuffles to get the lanes into the
14928 /// position needed by the final shuffle.
lowerShuffleAsLanePermuteAndRepeatedMask(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const X86Subtarget & Subtarget,SelectionDAG & DAG)14929 static SDValue lowerShuffleAsLanePermuteAndRepeatedMask(
14930     const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
14931     const X86Subtarget &Subtarget, SelectionDAG &DAG) {
14932   assert(!V2.isUndef() && "This is only useful with multiple inputs.");
14933 
14934   if (is128BitLaneRepeatedShuffleMask(VT, Mask))
14935     return SDValue();
14936 
14937   int NumElts = Mask.size();
14938   int NumLanes = VT.getSizeInBits() / 128;
14939   int NumLaneElts = 128 / VT.getScalarSizeInBits();
14940   SmallVector<int, 16> RepeatMask(NumLaneElts, -1);
14941   SmallVector<std::array<int, 2>, 2> LaneSrcs(NumLanes, {{-1, -1}});
14942 
14943   // First pass will try to fill in the RepeatMask from lanes that need two
14944   // sources.
14945   for (int Lane = 0; Lane != NumLanes; ++Lane) {
14946     int Srcs[2] = {-1, -1};
14947     SmallVector<int, 16> InLaneMask(NumLaneElts, -1);
14948     for (int i = 0; i != NumLaneElts; ++i) {
14949       int M = Mask[(Lane * NumLaneElts) + i];
14950       if (M < 0)
14951         continue;
14952       // Determine which of the possible input lanes (NumLanes from each source)
14953       // this element comes from. Assign that as one of the sources for this
14954       // lane. We can assign up to 2 sources for this lane. If we run out
14955       // sources we can't do anything.
14956       int LaneSrc = M / NumLaneElts;
14957       int Src;
14958       if (Srcs[0] < 0 || Srcs[0] == LaneSrc)
14959         Src = 0;
14960       else if (Srcs[1] < 0 || Srcs[1] == LaneSrc)
14961         Src = 1;
14962       else
14963         return SDValue();
14964 
14965       Srcs[Src] = LaneSrc;
14966       InLaneMask[i] = (M % NumLaneElts) + Src * NumElts;
14967     }
14968 
14969     // If this lane has two sources, see if it fits with the repeat mask so far.
14970     if (Srcs[1] < 0)
14971       continue;
14972 
14973     LaneSrcs[Lane][0] = Srcs[0];
14974     LaneSrcs[Lane][1] = Srcs[1];
14975 
14976     auto MatchMasks = [](ArrayRef<int> M1, ArrayRef<int> M2) {
14977       assert(M1.size() == M2.size() && "Unexpected mask size");
14978       for (int i = 0, e = M1.size(); i != e; ++i)
14979         if (M1[i] >= 0 && M2[i] >= 0 && M1[i] != M2[i])
14980           return false;
14981       return true;
14982     };
14983 
14984     auto MergeMasks = [](ArrayRef<int> Mask, MutableArrayRef<int> MergedMask) {
14985       assert(Mask.size() == MergedMask.size() && "Unexpected mask size");
14986       for (int i = 0, e = MergedMask.size(); i != e; ++i) {
14987         int M = Mask[i];
14988         if (M < 0)
14989           continue;
14990         assert((MergedMask[i] < 0 || MergedMask[i] == M) &&
14991                "Unexpected mask element");
14992         MergedMask[i] = M;
14993       }
14994     };
14995 
14996     if (MatchMasks(InLaneMask, RepeatMask)) {
14997       // Merge this lane mask into the final repeat mask.
14998       MergeMasks(InLaneMask, RepeatMask);
14999       continue;
15000     }
15001 
15002     // Didn't find a match. Swap the operands and try again.
15003     std::swap(LaneSrcs[Lane][0], LaneSrcs[Lane][1]);
15004     ShuffleVectorSDNode::commuteMask(InLaneMask);
15005 
15006     if (MatchMasks(InLaneMask, RepeatMask)) {
15007       // Merge this lane mask into the final repeat mask.
15008       MergeMasks(InLaneMask, RepeatMask);
15009       continue;
15010     }
15011 
15012     // Couldn't find a match with the operands in either order.
15013     return SDValue();
15014   }
15015 
15016   // Now handle any lanes with only one source.
15017   for (int Lane = 0; Lane != NumLanes; ++Lane) {
15018     // If this lane has already been processed, skip it.
15019     if (LaneSrcs[Lane][0] >= 0)
15020       continue;
15021 
15022     for (int i = 0; i != NumLaneElts; ++i) {
15023       int M = Mask[(Lane * NumLaneElts) + i];
15024       if (M < 0)
15025         continue;
15026 
15027       // If RepeatMask isn't defined yet we can define it ourself.
15028       if (RepeatMask[i] < 0)
15029         RepeatMask[i] = M % NumLaneElts;
15030 
15031       if (RepeatMask[i] < NumElts) {
15032         if (RepeatMask[i] != M % NumLaneElts)
15033           return SDValue();
15034         LaneSrcs[Lane][0] = M / NumLaneElts;
15035       } else {
15036         if (RepeatMask[i] != ((M % NumLaneElts) + NumElts))
15037           return SDValue();
15038         LaneSrcs[Lane][1] = M / NumLaneElts;
15039       }
15040     }
15041 
15042     if (LaneSrcs[Lane][0] < 0 && LaneSrcs[Lane][1] < 0)
15043       return SDValue();
15044   }
15045 
15046   SmallVector<int, 16> NewMask(NumElts, -1);
15047   for (int Lane = 0; Lane != NumLanes; ++Lane) {
15048     int Src = LaneSrcs[Lane][0];
15049     for (int i = 0; i != NumLaneElts; ++i) {
15050       int M = -1;
15051       if (Src >= 0)
15052         M = Src * NumLaneElts + i;
15053       NewMask[Lane * NumLaneElts + i] = M;
15054     }
15055   }
15056   SDValue NewV1 = DAG.getVectorShuffle(VT, DL, V1, V2, NewMask);
15057   // Ensure we didn't get back the shuffle we started with.
15058   // FIXME: This is a hack to make up for some splat handling code in
15059   // getVectorShuffle.
15060   if (isa<ShuffleVectorSDNode>(NewV1) &&
15061       cast<ShuffleVectorSDNode>(NewV1)->getMask() == Mask)
15062     return SDValue();
15063 
15064   for (int Lane = 0; Lane != NumLanes; ++Lane) {
15065     int Src = LaneSrcs[Lane][1];
15066     for (int i = 0; i != NumLaneElts; ++i) {
15067       int M = -1;
15068       if (Src >= 0)
15069         M = Src * NumLaneElts + i;
15070       NewMask[Lane * NumLaneElts + i] = M;
15071     }
15072   }
15073   SDValue NewV2 = DAG.getVectorShuffle(VT, DL, V1, V2, NewMask);
15074   // Ensure we didn't get back the shuffle we started with.
15075   // FIXME: This is a hack to make up for some splat handling code in
15076   // getVectorShuffle.
15077   if (isa<ShuffleVectorSDNode>(NewV2) &&
15078       cast<ShuffleVectorSDNode>(NewV2)->getMask() == Mask)
15079     return SDValue();
15080 
15081   for (int i = 0; i != NumElts; ++i) {
15082     if (Mask[i] < 0) {
15083       NewMask[i] = -1;
15084       continue;
15085     }
15086     NewMask[i] = RepeatMask[i % NumLaneElts];
15087     if (NewMask[i] < 0)
15088       continue;
15089 
15090     NewMask[i] += (i / NumLaneElts) * NumLaneElts;
15091   }
15092   return DAG.getVectorShuffle(VT, DL, NewV1, NewV2, NewMask);
15093 }
15094 
15095 /// If the input shuffle mask results in a vector that is undefined in all upper
15096 /// or lower half elements and that mask accesses only 2 halves of the
15097 /// shuffle's operands, return true. A mask of half the width with mask indexes
15098 /// adjusted to access the extracted halves of the original shuffle operands is
15099 /// returned in HalfMask. HalfIdx1 and HalfIdx2 return whether the upper or
15100 /// lower half of each input operand is accessed.
15101 static bool
getHalfShuffleMask(ArrayRef<int> Mask,MutableArrayRef<int> HalfMask,int & HalfIdx1,int & HalfIdx2)15102 getHalfShuffleMask(ArrayRef<int> Mask, MutableArrayRef<int> HalfMask,
15103                    int &HalfIdx1, int &HalfIdx2) {
15104   assert((Mask.size() == HalfMask.size() * 2) &&
15105          "Expected input mask to be twice as long as output");
15106 
15107   // Exactly one half of the result must be undef to allow narrowing.
15108   bool UndefLower = isUndefLowerHalf(Mask);
15109   bool UndefUpper = isUndefUpperHalf(Mask);
15110   if (UndefLower == UndefUpper)
15111     return false;
15112 
15113   unsigned HalfNumElts = HalfMask.size();
15114   unsigned MaskIndexOffset = UndefLower ? HalfNumElts : 0;
15115   HalfIdx1 = -1;
15116   HalfIdx2 = -1;
15117   for (unsigned i = 0; i != HalfNumElts; ++i) {
15118     int M = Mask[i + MaskIndexOffset];
15119     if (M < 0) {
15120       HalfMask[i] = M;
15121       continue;
15122     }
15123 
15124     // Determine which of the 4 half vectors this element is from.
15125     // i.e. 0 = Lower V1, 1 = Upper V1, 2 = Lower V2, 3 = Upper V2.
15126     int HalfIdx = M / HalfNumElts;
15127 
15128     // Determine the element index into its half vector source.
15129     int HalfElt = M % HalfNumElts;
15130 
15131     // We can shuffle with up to 2 half vectors, set the new 'half'
15132     // shuffle mask accordingly.
15133     if (HalfIdx1 < 0 || HalfIdx1 == HalfIdx) {
15134       HalfMask[i] = HalfElt;
15135       HalfIdx1 = HalfIdx;
15136       continue;
15137     }
15138     if (HalfIdx2 < 0 || HalfIdx2 == HalfIdx) {
15139       HalfMask[i] = HalfElt + HalfNumElts;
15140       HalfIdx2 = HalfIdx;
15141       continue;
15142     }
15143 
15144     // Too many half vectors referenced.
15145     return false;
15146   }
15147 
15148   return true;
15149 }
15150 
15151 /// Given the output values from getHalfShuffleMask(), create a half width
15152 /// shuffle of extracted vectors followed by an insert back to full width.
getShuffleHalfVectors(const SDLoc & DL,SDValue V1,SDValue V2,ArrayRef<int> HalfMask,int HalfIdx1,int HalfIdx2,bool UndefLower,SelectionDAG & DAG,bool UseConcat=false)15153 static SDValue getShuffleHalfVectors(const SDLoc &DL, SDValue V1, SDValue V2,
15154                                      ArrayRef<int> HalfMask, int HalfIdx1,
15155                                      int HalfIdx2, bool UndefLower,
15156                                      SelectionDAG &DAG, bool UseConcat = false) {
15157   assert(V1.getValueType() == V2.getValueType() && "Different sized vectors?");
15158   assert(V1.getValueType().isSimple() && "Expecting only simple types");
15159 
15160   MVT VT = V1.getSimpleValueType();
15161   MVT HalfVT = VT.getHalfNumVectorElementsVT();
15162   unsigned HalfNumElts = HalfVT.getVectorNumElements();
15163 
15164   auto getHalfVector = [&](int HalfIdx) {
15165     if (HalfIdx < 0)
15166       return DAG.getUNDEF(HalfVT);
15167     SDValue V = (HalfIdx < 2 ? V1 : V2);
15168     HalfIdx = (HalfIdx % 2) * HalfNumElts;
15169     return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V,
15170                        DAG.getIntPtrConstant(HalfIdx, DL));
15171   };
15172 
15173   // ins undef, (shuf (ext V1, HalfIdx1), (ext V2, HalfIdx2), HalfMask), Offset
15174   SDValue Half1 = getHalfVector(HalfIdx1);
15175   SDValue Half2 = getHalfVector(HalfIdx2);
15176   SDValue V = DAG.getVectorShuffle(HalfVT, DL, Half1, Half2, HalfMask);
15177   if (UseConcat) {
15178     SDValue Op0 = V;
15179     SDValue Op1 = DAG.getUNDEF(HalfVT);
15180     if (UndefLower)
15181       std::swap(Op0, Op1);
15182     return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Op0, Op1);
15183   }
15184 
15185   unsigned Offset = UndefLower ? HalfNumElts : 0;
15186   return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V,
15187                      DAG.getIntPtrConstant(Offset, DL));
15188 }
15189 
15190 /// Lower shuffles where an entire half of a 256 or 512-bit vector is UNDEF.
15191 /// This allows for fast cases such as subvector extraction/insertion
15192 /// or shuffling smaller vector types which can lower more efficiently.
lowerShuffleWithUndefHalf(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const X86Subtarget & Subtarget,SelectionDAG & DAG)15193 static SDValue lowerShuffleWithUndefHalf(const SDLoc &DL, MVT VT, SDValue V1,
15194                                          SDValue V2, ArrayRef<int> Mask,
15195                                          const X86Subtarget &Subtarget,
15196                                          SelectionDAG &DAG) {
15197   assert((VT.is256BitVector() || VT.is512BitVector()) &&
15198          "Expected 256-bit or 512-bit vector");
15199 
15200   bool UndefLower = isUndefLowerHalf(Mask);
15201   if (!UndefLower && !isUndefUpperHalf(Mask))
15202     return SDValue();
15203 
15204   assert((!UndefLower || !isUndefUpperHalf(Mask)) &&
15205          "Completely undef shuffle mask should have been simplified already");
15206 
15207   // Upper half is undef and lower half is whole upper subvector.
15208   // e.g. vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u>
15209   MVT HalfVT = VT.getHalfNumVectorElementsVT();
15210   unsigned HalfNumElts = HalfVT.getVectorNumElements();
15211   if (!UndefLower &&
15212       isSequentialOrUndefInRange(Mask, 0, HalfNumElts, HalfNumElts)) {
15213     SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V1,
15214                              DAG.getIntPtrConstant(HalfNumElts, DL));
15215     return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), Hi,
15216                        DAG.getIntPtrConstant(0, DL));
15217   }
15218 
15219   // Lower half is undef and upper half is whole lower subvector.
15220   // e.g. vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1>
15221   if (UndefLower &&
15222       isSequentialOrUndefInRange(Mask, HalfNumElts, HalfNumElts, 0)) {
15223     SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V1,
15224                              DAG.getIntPtrConstant(0, DL));
15225     return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), Hi,
15226                        DAG.getIntPtrConstant(HalfNumElts, DL));
15227   }
15228 
15229   int HalfIdx1, HalfIdx2;
15230   SmallVector<int, 8> HalfMask(HalfNumElts);
15231   if (!getHalfShuffleMask(Mask, HalfMask, HalfIdx1, HalfIdx2))
15232     return SDValue();
15233 
15234   assert(HalfMask.size() == HalfNumElts && "Unexpected shuffle mask length");
15235 
15236   // Only shuffle the halves of the inputs when useful.
15237   unsigned NumLowerHalves =
15238       (HalfIdx1 == 0 || HalfIdx1 == 2) + (HalfIdx2 == 0 || HalfIdx2 == 2);
15239   unsigned NumUpperHalves =
15240       (HalfIdx1 == 1 || HalfIdx1 == 3) + (HalfIdx2 == 1 || HalfIdx2 == 3);
15241   assert(NumLowerHalves + NumUpperHalves <= 2 && "Only 1 or 2 halves allowed");
15242 
15243   // Determine the larger pattern of undef/halves, then decide if it's worth
15244   // splitting the shuffle based on subtarget capabilities and types.
15245   unsigned EltWidth = VT.getVectorElementType().getSizeInBits();
15246   if (!UndefLower) {
15247     // XXXXuuuu: no insert is needed.
15248     // Always extract lowers when setting lower - these are all free subreg ops.
15249     if (NumUpperHalves == 0)
15250       return getShuffleHalfVectors(DL, V1, V2, HalfMask, HalfIdx1, HalfIdx2,
15251                                    UndefLower, DAG);
15252 
15253     if (NumUpperHalves == 1) {
15254       // AVX2 has efficient 32/64-bit element cross-lane shuffles.
15255       if (Subtarget.hasAVX2()) {
15256         // extract128 + vunpckhps/vshufps, is better than vblend + vpermps.
15257         if (EltWidth == 32 && NumLowerHalves && HalfVT.is128BitVector() &&
15258             !is128BitUnpackShuffleMask(HalfMask, DAG) &&
15259             (!isSingleSHUFPSMask(HalfMask) ||
15260              Subtarget.hasFastVariableCrossLaneShuffle()))
15261           return SDValue();
15262         // If this is a unary shuffle (assume that the 2nd operand is
15263         // canonicalized to undef), then we can use vpermpd. Otherwise, we
15264         // are better off extracting the upper half of 1 operand and using a
15265         // narrow shuffle.
15266         if (EltWidth == 64 && V2.isUndef())
15267           return SDValue();
15268       }
15269       // AVX512 has efficient cross-lane shuffles for all legal 512-bit types.
15270       if (Subtarget.hasAVX512() && VT.is512BitVector())
15271         return SDValue();
15272       // Extract + narrow shuffle is better than the wide alternative.
15273       return getShuffleHalfVectors(DL, V1, V2, HalfMask, HalfIdx1, HalfIdx2,
15274                                    UndefLower, DAG);
15275     }
15276 
15277     // Don't extract both uppers, instead shuffle and then extract.
15278     assert(NumUpperHalves == 2 && "Half vector count went wrong");
15279     return SDValue();
15280   }
15281 
15282   // UndefLower - uuuuXXXX: an insert to high half is required if we split this.
15283   if (NumUpperHalves == 0) {
15284     // AVX2 has efficient 64-bit element cross-lane shuffles.
15285     // TODO: Refine to account for unary shuffle, splat, and other masks?
15286     if (Subtarget.hasAVX2() && EltWidth == 64)
15287       return SDValue();
15288     // AVX512 has efficient cross-lane shuffles for all legal 512-bit types.
15289     if (Subtarget.hasAVX512() && VT.is512BitVector())
15290       return SDValue();
15291     // Narrow shuffle + insert is better than the wide alternative.
15292     return getShuffleHalfVectors(DL, V1, V2, HalfMask, HalfIdx1, HalfIdx2,
15293                                  UndefLower, DAG);
15294   }
15295 
15296   // NumUpperHalves != 0: don't bother with extract, shuffle, and then insert.
15297   return SDValue();
15298 }
15299 
15300 /// Handle case where shuffle sources are coming from the same 128-bit lane and
15301 /// every lane can be represented as the same repeating mask - allowing us to
15302 /// shuffle the sources with the repeating shuffle and then permute the result
15303 /// to the destination lanes.
lowerShuffleAsRepeatedMaskAndLanePermute(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const X86Subtarget & Subtarget,SelectionDAG & DAG)15304 static SDValue lowerShuffleAsRepeatedMaskAndLanePermute(
15305     const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
15306     const X86Subtarget &Subtarget, SelectionDAG &DAG) {
15307   int NumElts = VT.getVectorNumElements();
15308   int NumLanes = VT.getSizeInBits() / 128;
15309   int NumLaneElts = NumElts / NumLanes;
15310 
15311   // On AVX2 we may be able to just shuffle the lowest elements and then
15312   // broadcast the result.
15313   if (Subtarget.hasAVX2()) {
15314     for (unsigned BroadcastSize : {16, 32, 64}) {
15315       if (BroadcastSize <= VT.getScalarSizeInBits())
15316         continue;
15317       int NumBroadcastElts = BroadcastSize / VT.getScalarSizeInBits();
15318 
15319       // Attempt to match a repeating pattern every NumBroadcastElts,
15320       // accounting for UNDEFs but only references the lowest 128-bit
15321       // lane of the inputs.
15322       auto FindRepeatingBroadcastMask = [&](SmallVectorImpl<int> &RepeatMask) {
15323         for (int i = 0; i != NumElts; i += NumBroadcastElts)
15324           for (int j = 0; j != NumBroadcastElts; ++j) {
15325             int M = Mask[i + j];
15326             if (M < 0)
15327               continue;
15328             int &R = RepeatMask[j];
15329             if (0 != ((M % NumElts) / NumLaneElts))
15330               return false;
15331             if (0 <= R && R != M)
15332               return false;
15333             R = M;
15334           }
15335         return true;
15336       };
15337 
15338       SmallVector<int, 8> RepeatMask((unsigned)NumElts, -1);
15339       if (!FindRepeatingBroadcastMask(RepeatMask))
15340         continue;
15341 
15342       // Shuffle the (lowest) repeated elements in place for broadcast.
15343       SDValue RepeatShuf = DAG.getVectorShuffle(VT, DL, V1, V2, RepeatMask);
15344 
15345       // Shuffle the actual broadcast.
15346       SmallVector<int, 8> BroadcastMask((unsigned)NumElts, -1);
15347       for (int i = 0; i != NumElts; i += NumBroadcastElts)
15348         for (int j = 0; j != NumBroadcastElts; ++j)
15349           BroadcastMask[i + j] = j;
15350 
15351       // Avoid returning the same shuffle operation. For example,
15352       // v8i32 = vector_shuffle<0,1,0,1,0,1,0,1> t5, undef:v8i32
15353       if (BroadcastMask == Mask)
15354         return SDValue();
15355 
15356       return DAG.getVectorShuffle(VT, DL, RepeatShuf, DAG.getUNDEF(VT),
15357                                   BroadcastMask);
15358     }
15359   }
15360 
15361   // Bail if the shuffle mask doesn't cross 128-bit lanes.
15362   if (!is128BitLaneCrossingShuffleMask(VT, Mask))
15363     return SDValue();
15364 
15365   // Bail if we already have a repeated lane shuffle mask.
15366   if (is128BitLaneRepeatedShuffleMask(VT, Mask))
15367     return SDValue();
15368 
15369   // Helper to look for repeated mask in each split sublane, and that those
15370   // sublanes can then be permuted into place.
15371   auto ShuffleSubLanes = [&](int SubLaneScale) {
15372     int NumSubLanes = NumLanes * SubLaneScale;
15373     int NumSubLaneElts = NumLaneElts / SubLaneScale;
15374 
15375     // Check that all the sources are coming from the same lane and see if we
15376     // can form a repeating shuffle mask (local to each sub-lane). At the same
15377     // time, determine the source sub-lane for each destination sub-lane.
15378     int TopSrcSubLane = -1;
15379     SmallVector<int, 8> Dst2SrcSubLanes((unsigned)NumSubLanes, -1);
15380     SmallVector<SmallVector<int, 8>> RepeatedSubLaneMasks(
15381         SubLaneScale,
15382         SmallVector<int, 8>((unsigned)NumSubLaneElts, SM_SentinelUndef));
15383 
15384     for (int DstSubLane = 0; DstSubLane != NumSubLanes; ++DstSubLane) {
15385       // Extract the sub-lane mask, check that it all comes from the same lane
15386       // and normalize the mask entries to come from the first lane.
15387       int SrcLane = -1;
15388       SmallVector<int, 8> SubLaneMask((unsigned)NumSubLaneElts, -1);
15389       for (int Elt = 0; Elt != NumSubLaneElts; ++Elt) {
15390         int M = Mask[(DstSubLane * NumSubLaneElts) + Elt];
15391         if (M < 0)
15392           continue;
15393         int Lane = (M % NumElts) / NumLaneElts;
15394         if ((0 <= SrcLane) && (SrcLane != Lane))
15395           return SDValue();
15396         SrcLane = Lane;
15397         int LocalM = (M % NumLaneElts) + (M < NumElts ? 0 : NumElts);
15398         SubLaneMask[Elt] = LocalM;
15399       }
15400 
15401       // Whole sub-lane is UNDEF.
15402       if (SrcLane < 0)
15403         continue;
15404 
15405       // Attempt to match against the candidate repeated sub-lane masks.
15406       for (int SubLane = 0; SubLane != SubLaneScale; ++SubLane) {
15407         auto MatchMasks = [NumSubLaneElts](ArrayRef<int> M1, ArrayRef<int> M2) {
15408           for (int i = 0; i != NumSubLaneElts; ++i) {
15409             if (M1[i] < 0 || M2[i] < 0)
15410               continue;
15411             if (M1[i] != M2[i])
15412               return false;
15413           }
15414           return true;
15415         };
15416 
15417         auto &RepeatedSubLaneMask = RepeatedSubLaneMasks[SubLane];
15418         if (!MatchMasks(SubLaneMask, RepeatedSubLaneMask))
15419           continue;
15420 
15421         // Merge the sub-lane mask into the matching repeated sub-lane mask.
15422         for (int i = 0; i != NumSubLaneElts; ++i) {
15423           int M = SubLaneMask[i];
15424           if (M < 0)
15425             continue;
15426           assert((RepeatedSubLaneMask[i] < 0 || RepeatedSubLaneMask[i] == M) &&
15427                  "Unexpected mask element");
15428           RepeatedSubLaneMask[i] = M;
15429         }
15430 
15431         // Track the top most source sub-lane - by setting the remaining to
15432         // UNDEF we can greatly simplify shuffle matching.
15433         int SrcSubLane = (SrcLane * SubLaneScale) + SubLane;
15434         TopSrcSubLane = std::max(TopSrcSubLane, SrcSubLane);
15435         Dst2SrcSubLanes[DstSubLane] = SrcSubLane;
15436         break;
15437       }
15438 
15439       // Bail if we failed to find a matching repeated sub-lane mask.
15440       if (Dst2SrcSubLanes[DstSubLane] < 0)
15441         return SDValue();
15442     }
15443     assert(0 <= TopSrcSubLane && TopSrcSubLane < NumSubLanes &&
15444            "Unexpected source lane");
15445 
15446     // Create a repeating shuffle mask for the entire vector.
15447     SmallVector<int, 8> RepeatedMask((unsigned)NumElts, -1);
15448     for (int SubLane = 0; SubLane <= TopSrcSubLane; ++SubLane) {
15449       int Lane = SubLane / SubLaneScale;
15450       auto &RepeatedSubLaneMask = RepeatedSubLaneMasks[SubLane % SubLaneScale];
15451       for (int Elt = 0; Elt != NumSubLaneElts; ++Elt) {
15452         int M = RepeatedSubLaneMask[Elt];
15453         if (M < 0)
15454           continue;
15455         int Idx = (SubLane * NumSubLaneElts) + Elt;
15456         RepeatedMask[Idx] = M + (Lane * NumLaneElts);
15457       }
15458     }
15459 
15460     // Shuffle each source sub-lane to its destination.
15461     SmallVector<int, 8> SubLaneMask((unsigned)NumElts, -1);
15462     for (int i = 0; i != NumElts; i += NumSubLaneElts) {
15463       int SrcSubLane = Dst2SrcSubLanes[i / NumSubLaneElts];
15464       if (SrcSubLane < 0)
15465         continue;
15466       for (int j = 0; j != NumSubLaneElts; ++j)
15467         SubLaneMask[i + j] = j + (SrcSubLane * NumSubLaneElts);
15468     }
15469 
15470     // Avoid returning the same shuffle operation.
15471     // v8i32 = vector_shuffle<0,1,4,5,2,3,6,7> t5, undef:v8i32
15472     if (RepeatedMask == Mask || SubLaneMask == Mask)
15473       return SDValue();
15474 
15475     SDValue RepeatedShuffle =
15476         DAG.getVectorShuffle(VT, DL, V1, V2, RepeatedMask);
15477 
15478     return DAG.getVectorShuffle(VT, DL, RepeatedShuffle, DAG.getUNDEF(VT),
15479                                 SubLaneMask);
15480   };
15481 
15482   // On AVX2 targets we can permute 256-bit vectors as 64-bit sub-lanes
15483   // (with PERMQ/PERMPD). On AVX2/AVX512BW targets, permuting 32-bit sub-lanes,
15484   // even with a variable shuffle, can be worth it for v32i8/v64i8 vectors.
15485   // Otherwise we can only permute whole 128-bit lanes.
15486   int MinSubLaneScale = 1, MaxSubLaneScale = 1;
15487   if (Subtarget.hasAVX2() && VT.is256BitVector()) {
15488     bool OnlyLowestElts = isUndefOrInRange(Mask, 0, NumLaneElts);
15489     MinSubLaneScale = 2;
15490     MaxSubLaneScale =
15491         (!OnlyLowestElts && V2.isUndef() && VT == MVT::v32i8) ? 4 : 2;
15492   }
15493   if (Subtarget.hasBWI() && VT == MVT::v64i8)
15494     MinSubLaneScale = MaxSubLaneScale = 4;
15495 
15496   for (int Scale = MinSubLaneScale; Scale <= MaxSubLaneScale; Scale *= 2)
15497     if (SDValue Shuffle = ShuffleSubLanes(Scale))
15498       return Shuffle;
15499 
15500   return SDValue();
15501 }
15502 
matchShuffleWithSHUFPD(MVT VT,SDValue & V1,SDValue & V2,bool & ForceV1Zero,bool & ForceV2Zero,unsigned & ShuffleImm,ArrayRef<int> Mask,const APInt & Zeroable)15503 static bool matchShuffleWithSHUFPD(MVT VT, SDValue &V1, SDValue &V2,
15504                                    bool &ForceV1Zero, bool &ForceV2Zero,
15505                                    unsigned &ShuffleImm, ArrayRef<int> Mask,
15506                                    const APInt &Zeroable) {
15507   int NumElts = VT.getVectorNumElements();
15508   assert(VT.getScalarSizeInBits() == 64 &&
15509          (NumElts == 2 || NumElts == 4 || NumElts == 8) &&
15510          "Unexpected data type for VSHUFPD");
15511   assert(isUndefOrZeroOrInRange(Mask, 0, 2 * NumElts) &&
15512          "Illegal shuffle mask");
15513 
15514   bool ZeroLane[2] = { true, true };
15515   for (int i = 0; i < NumElts; ++i)
15516     ZeroLane[i & 1] &= Zeroable[i];
15517 
15518   // Mask for V8F64: 0/1,  8/9,  2/3,  10/11, 4/5, ..
15519   // Mask for V4F64; 0/1,  4/5,  2/3,  6/7..
15520   ShuffleImm = 0;
15521   bool ShufpdMask = true;
15522   bool CommutableMask = true;
15523   for (int i = 0; i < NumElts; ++i) {
15524     if (Mask[i] == SM_SentinelUndef || ZeroLane[i & 1])
15525       continue;
15526     if (Mask[i] < 0)
15527       return false;
15528     int Val = (i & 6) + NumElts * (i & 1);
15529     int CommutVal = (i & 0xe) + NumElts * ((i & 1) ^ 1);
15530     if (Mask[i] < Val || Mask[i] > Val + 1)
15531       ShufpdMask = false;
15532     if (Mask[i] < CommutVal || Mask[i] > CommutVal + 1)
15533       CommutableMask = false;
15534     ShuffleImm |= (Mask[i] % 2) << i;
15535   }
15536 
15537   if (!ShufpdMask && !CommutableMask)
15538     return false;
15539 
15540   if (!ShufpdMask && CommutableMask)
15541     std::swap(V1, V2);
15542 
15543   ForceV1Zero = ZeroLane[0];
15544   ForceV2Zero = ZeroLane[1];
15545   return true;
15546 }
15547 
lowerShuffleWithSHUFPD(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const APInt & Zeroable,const X86Subtarget & Subtarget,SelectionDAG & DAG)15548 static SDValue lowerShuffleWithSHUFPD(const SDLoc &DL, MVT VT, SDValue V1,
15549                                       SDValue V2, ArrayRef<int> Mask,
15550                                       const APInt &Zeroable,
15551                                       const X86Subtarget &Subtarget,
15552                                       SelectionDAG &DAG) {
15553   assert((VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v8f64) &&
15554          "Unexpected data type for VSHUFPD");
15555 
15556   unsigned Immediate = 0;
15557   bool ForceV1Zero = false, ForceV2Zero = false;
15558   if (!matchShuffleWithSHUFPD(VT, V1, V2, ForceV1Zero, ForceV2Zero, Immediate,
15559                               Mask, Zeroable))
15560     return SDValue();
15561 
15562   // Create a REAL zero vector - ISD::isBuildVectorAllZeros allows UNDEFs.
15563   if (ForceV1Zero)
15564     V1 = getZeroVector(VT, Subtarget, DAG, DL);
15565   if (ForceV2Zero)
15566     V2 = getZeroVector(VT, Subtarget, DAG, DL);
15567 
15568   return DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2,
15569                      DAG.getTargetConstant(Immediate, DL, MVT::i8));
15570 }
15571 
15572 // Look for {0, 8, 16, 24, 32, 40, 48, 56 } in the first 8 elements. Followed
15573 // by zeroable elements in the remaining 24 elements. Turn this into two
15574 // vmovqb instructions shuffled together.
lowerShuffleAsVTRUNCAndUnpack(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,const APInt & Zeroable,SelectionDAG & DAG)15575 static SDValue lowerShuffleAsVTRUNCAndUnpack(const SDLoc &DL, MVT VT,
15576                                              SDValue V1, SDValue V2,
15577                                              ArrayRef<int> Mask,
15578                                              const APInt &Zeroable,
15579                                              SelectionDAG &DAG) {
15580   assert(VT == MVT::v32i8 && "Unexpected type!");
15581 
15582   // The first 8 indices should be every 8th element.
15583   if (!isSequentialOrUndefInRange(Mask, 0, 8, 0, 8))
15584     return SDValue();
15585 
15586   // Remaining elements need to be zeroable.
15587   if (Zeroable.countl_one() < (Mask.size() - 8))
15588     return SDValue();
15589 
15590   V1 = DAG.getBitcast(MVT::v4i64, V1);
15591   V2 = DAG.getBitcast(MVT::v4i64, V2);
15592 
15593   V1 = DAG.getNode(X86ISD::VTRUNC, DL, MVT::v16i8, V1);
15594   V2 = DAG.getNode(X86ISD::VTRUNC, DL, MVT::v16i8, V2);
15595 
15596   // The VTRUNCs will put 0s in the upper 12 bytes. Use them to put zeroes in
15597   // the upper bits of the result using an unpckldq.
15598   SDValue Unpack = DAG.getVectorShuffle(MVT::v16i8, DL, V1, V2,
15599                                         { 0, 1, 2, 3, 16, 17, 18, 19,
15600                                           4, 5, 6, 7, 20, 21, 22, 23 });
15601   // Insert the unpckldq into a zero vector to widen to v32i8.
15602   return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, MVT::v32i8,
15603                      DAG.getConstant(0, DL, MVT::v32i8), Unpack,
15604                      DAG.getIntPtrConstant(0, DL));
15605 }
15606 
15607 // a = shuffle v1, v2, mask1    ; interleaving lower lanes of v1 and v2
15608 // b = shuffle v1, v2, mask2    ; interleaving higher lanes of v1 and v2
15609 //     =>
15610 // ul = unpckl v1, v2
15611 // uh = unpckh v1, v2
15612 // a = vperm ul, uh
15613 // b = vperm ul, uh
15614 //
15615 // Pattern-match interleave(256b v1, 256b v2) -> 512b v3 and lower it into unpck
15616 // and permute. We cannot directly match v3 because it is split into two
15617 // 256-bit vectors in earlier isel stages. Therefore, this function matches a
15618 // pair of 256-bit shuffles and makes sure the masks are consecutive.
15619 //
15620 // Once unpck and permute nodes are created, the permute corresponding to this
15621 // shuffle is returned, while the other permute replaces the other half of the
15622 // shuffle in the selection dag.
lowerShufflePairAsUNPCKAndPermute(const SDLoc & DL,MVT VT,SDValue V1,SDValue V2,ArrayRef<int> Mask,SelectionDAG & DAG)15623 static SDValue lowerShufflePairAsUNPCKAndPermute(const SDLoc &DL, MVT VT,
15624                                                  SDValue V1, SDValue V2,
15625                                                  ArrayRef<int> Mask,
15626                                                  SelectionDAG &DAG) {
15627   if (VT != MVT::v8f32 && VT != MVT::v8i32 && VT != MVT::v16i16 &&
15628       VT != MVT::v32i8)
15629     return SDValue();
15630   // <B0, B1, B0+1, B1+1, ..., >
15631   auto IsInterleavingPattern = [&](ArrayRef<int> Mask, unsigned Begin0,
15632                                    unsigned Begin1) {
15633     size_t Size = Mask.size();
15634     assert(Size % 2 == 0 && "Expected even mask size");
15635     for (unsigned I = 0; I < Size; I += 2) {
15636       if (Mask[I] != (int)(Begin0 + I / 2) ||
15637           Mask[I + 1] != (int)(Begin1 + I / 2))
15638         return false;
15639     }
15640     return true;
15641   };
15642   // Check which half is this shuffle node
15643   int NumElts = VT.getVectorNumElements();
15644   size_t FirstQtr = NumElts / 2;
15645   size_t ThirdQtr = NumElts + NumElts / 2;
15646   bool IsFirstHalf = IsInterleavingPattern(Mask, 0, NumElts);
15647   bool IsSecondHalf = IsInterleavingPattern(Mask, FirstQtr, ThirdQtr);
15648   if (!IsFirstHalf && !IsSecondHalf)
15649     return SDValue();
15650 
15651   // Find the intersection between shuffle users of V1 and V2.
15652   SmallVector<SDNode *, 2> Shuffles;
15653   for (SDNode *User : V1->uses())
15654     if (User->getOpcode() == ISD::VECTOR_SHUFFLE && User->getOperand(0) == V1 &&
15655         User->getOperand(1) == V2)
15656       Shuffles.push_back(User);
15657   // Limit user size to two for now.
15658   if (Shuffles.size() != 2)
15659     return SDValue();
15660   // Find out which half of the 512-bit shuffles is each smaller shuffle
15661   auto *SVN1 = cast<ShuffleVectorSDNode>(Shuffles[0]);
15662   auto *SVN2 = cast<ShuffleVectorSDNode>(Shuffles[1]);
15663   SDNode *FirstHalf;
15664   SDNode *SecondHalf;
15665   if (IsInterleavingPattern(SVN1->getMask(), 0, NumElts) &&
15666       IsInterleavingPattern(SVN2->getMask(), FirstQtr, ThirdQtr)) {
15667     FirstHalf = Shuffles[0];
15668     SecondHalf = Shuffles[1];
15669   } else if (IsInterleavingPattern(SVN1->getMask(), FirstQtr, ThirdQtr) &&
15670              IsInterleavingPattern(SVN2->getMask(), 0, NumElts)) {
15671     FirstHalf = Shuffles[1];
15672     SecondHalf = Shuffles[0];
15673   } else {
15674     return SDValue();
15675   }
15676   // Lower into unpck and perm. Return the perm of this shuffle and replace
15677   // the other.
15678   SDValue Unpckl = DAG.getNode(X86ISD::UNPCKL, DL, VT, V1, V2);
15679   SDValue Unpckh = DAG.getNode(X86ISD::UNPCKH, DL, VT, V1, V2);
15680   SDValue Perm1 = DAG.getNode(X86ISD::VPERM2X128, DL, VT, Unpckl, Unpckh,
15681                               DAG.getTargetConstant(0x20, DL, MVT::i8));
15682   SDValue Perm2 = DAG.getNode(X86ISD::VPERM2X128, DL, VT, Unpckl, Unpckh,
15683                               DAG.getTargetConstant(0x31, DL, MVT::i8));
15684   if (IsFirstHalf) {
15685     DAG.ReplaceAllUsesWith(SecondHalf, &Perm2);
15686     return Perm1;
15687   }
15688   DAG.ReplaceAllUsesWith(FirstHalf, &Perm1);
15689   return Perm2;
15690 }
15691 
15692 /// Handle lowering of 4-lane 64-bit floating point shuffles.
15693 ///
15694 /// Also ends up handling lowering of 4-lane 64-bit integer shuffles when AVX2
15695 /// isn't available.
lowerV4F64Shuffle(const SDLoc & DL,ArrayRef<int> Mask,const APInt & Zeroable,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)15696 static SDValue lowerV4F64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
15697                                  const APInt &Zeroable, SDValue V1, SDValue V2,
15698                                  const X86Subtarget &Subtarget,
15699                                  SelectionDAG &DAG) {
15700   assert(V1.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
15701   assert(V2.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
15702   assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
15703 
15704   if (SDValue V = lowerV2X128Shuffle(DL, MVT::v4f64, V1, V2, Mask, Zeroable,
15705                                      Subtarget, DAG))
15706     return V;
15707 
15708   if (V2.isUndef()) {
15709     // Check for being able to broadcast a single element.
15710     if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4f64, V1, V2,
15711                                                     Mask, Subtarget, DAG))
15712       return Broadcast;
15713 
15714     // Use low duplicate instructions for masks that match their pattern.
15715     if (isShuffleEquivalent(Mask, {0, 0, 2, 2}, V1, V2))
15716       return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v4f64, V1);
15717 
15718     if (!is128BitLaneCrossingShuffleMask(MVT::v4f64, Mask)) {
15719       // Non-half-crossing single input shuffles can be lowered with an
15720       // interleaved permutation.
15721       unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
15722                               ((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3);
15723       return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f64, V1,
15724                          DAG.getTargetConstant(VPERMILPMask, DL, MVT::i8));
15725     }
15726 
15727     // With AVX2 we have direct support for this permutation.
15728     if (Subtarget.hasAVX2())
15729       return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4f64, V1,
15730                          getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
15731 
15732     // Try to create an in-lane repeating shuffle mask and then shuffle the
15733     // results into the target lanes.
15734     if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
15735             DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
15736       return V;
15737 
15738     // Try to permute the lanes and then use a per-lane permute.
15739     if (SDValue V = lowerShuffleAsLanePermuteAndPermute(DL, MVT::v4f64, V1, V2,
15740                                                         Mask, DAG, Subtarget))
15741       return V;
15742 
15743     // Otherwise, fall back.
15744     return lowerShuffleAsLanePermuteAndShuffle(DL, MVT::v4f64, V1, V2, Mask,
15745                                                DAG, Subtarget);
15746   }
15747 
15748   // Use dedicated unpack instructions for masks that match their pattern.
15749   if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4f64, Mask, V1, V2, DAG))
15750     return V;
15751 
15752   if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4f64, V1, V2, Mask,
15753                                           Zeroable, Subtarget, DAG))
15754     return Blend;
15755 
15756   // Check if the blend happens to exactly fit that of SHUFPD.
15757   if (SDValue Op = lowerShuffleWithSHUFPD(DL, MVT::v4f64, V1, V2, Mask,
15758                                           Zeroable, Subtarget, DAG))
15759     return Op;
15760 
15761   bool V1IsInPlace = isShuffleMaskInputInPlace(0, Mask);
15762   bool V2IsInPlace = isShuffleMaskInputInPlace(1, Mask);
15763 
15764   // If we have lane crossing shuffles AND they don't all come from the lower
15765   // lane elements, lower to SHUFPD(VPERM2F128(V1, V2), VPERM2F128(V1, V2)).
15766   // TODO: Handle BUILD_VECTOR sources which getVectorShuffle currently
15767   // canonicalize to a blend of splat which isn't necessary for this combine.
15768   if (is128BitLaneCrossingShuffleMask(MVT::v4f64, Mask) &&
15769       !all_of(Mask, [](int M) { return M < 2 || (4 <= M && M < 6); }) &&
15770       (V1.getOpcode() != ISD::BUILD_VECTOR) &&
15771       (V2.getOpcode() != ISD::BUILD_VECTOR))
15772     return lowerShuffleAsLanePermuteAndSHUFP(DL, MVT::v4f64, V1, V2, Mask, DAG);
15773 
15774   // If we have one input in place, then we can permute the other input and
15775   // blend the result.
15776   if (V1IsInPlace || V2IsInPlace)
15777     return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v4f64, V1, V2, Mask,
15778                                                 Subtarget, DAG);
15779 
15780   // Try to create an in-lane repeating shuffle mask and then shuffle the
15781   // results into the target lanes.
15782   if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
15783           DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
15784     return V;
15785 
15786   // Try to simplify this by merging 128-bit lanes to enable a lane-based
15787   // shuffle. However, if we have AVX2 and either inputs are already in place,
15788   // we will be able to shuffle even across lanes the other input in a single
15789   // instruction so skip this pattern.
15790   if (!(Subtarget.hasAVX2() && (V1IsInPlace || V2IsInPlace)))
15791     if (SDValue V = lowerShuffleAsLanePermuteAndRepeatedMask(
15792             DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
15793       return V;
15794 
15795   // If we have VLX support, we can use VEXPAND.
15796   if (Subtarget.hasVLX())
15797     if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v4f64, Zeroable, Mask, V1, V2,
15798                                          DAG, Subtarget))
15799       return V;
15800 
15801   // If we have AVX2 then we always want to lower with a blend because an v4 we
15802   // can fully permute the elements.
15803   if (Subtarget.hasAVX2())
15804     return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v4f64, V1, V2, Mask,
15805                                                 Subtarget, DAG);
15806 
15807   // Otherwise fall back on generic lowering.
15808   return lowerShuffleAsSplitOrBlend(DL, MVT::v4f64, V1, V2, Mask,
15809                                     Subtarget, DAG);
15810 }
15811 
15812 /// Handle lowering of 4-lane 64-bit integer shuffles.
15813 ///
15814 /// This routine is only called when we have AVX2 and thus a reasonable
15815 /// instruction set for v4i64 shuffling..
lowerV4I64Shuffle(const SDLoc & DL,ArrayRef<int> Mask,const APInt & Zeroable,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)15816 static SDValue lowerV4I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
15817                                  const APInt &Zeroable, SDValue V1, SDValue V2,
15818                                  const X86Subtarget &Subtarget,
15819                                  SelectionDAG &DAG) {
15820   assert(V1.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
15821   assert(V2.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
15822   assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
15823   assert(Subtarget.hasAVX2() && "We can only lower v4i64 with AVX2!");
15824 
15825   if (SDValue V = lowerV2X128Shuffle(DL, MVT::v4i64, V1, V2, Mask, Zeroable,
15826                                      Subtarget, DAG))
15827     return V;
15828 
15829   if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4i64, V1, V2, Mask,
15830                                           Zeroable, Subtarget, DAG))
15831     return Blend;
15832 
15833   // Check for being able to broadcast a single element.
15834   if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4i64, V1, V2, Mask,
15835                                                   Subtarget, DAG))
15836     return Broadcast;
15837 
15838   // Try to use shift instructions if fast.
15839   if (Subtarget.preferLowerShuffleAsShift())
15840     if (SDValue Shift =
15841             lowerShuffleAsShift(DL, MVT::v4i64, V1, V2, Mask, Zeroable,
15842                                 Subtarget, DAG, /*BitwiseOnly*/ true))
15843       return Shift;
15844 
15845   if (V2.isUndef()) {
15846     // When the shuffle is mirrored between the 128-bit lanes of the unit, we
15847     // can use lower latency instructions that will operate on both lanes.
15848     SmallVector<int, 2> RepeatedMask;
15849     if (is128BitLaneRepeatedShuffleMask(MVT::v4i64, Mask, RepeatedMask)) {
15850       SmallVector<int, 4> PSHUFDMask;
15851       narrowShuffleMaskElts(2, RepeatedMask, PSHUFDMask);
15852       return DAG.getBitcast(
15853           MVT::v4i64,
15854           DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32,
15855                       DAG.getBitcast(MVT::v8i32, V1),
15856                       getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
15857     }
15858 
15859     // AVX2 provides a direct instruction for permuting a single input across
15860     // lanes.
15861     return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4i64, V1,
15862                        getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
15863   }
15864 
15865   // Try to use shift instructions.
15866   if (SDValue Shift =
15867           lowerShuffleAsShift(DL, MVT::v4i64, V1, V2, Mask, Zeroable, Subtarget,
15868                               DAG, /*BitwiseOnly*/ false))
15869     return Shift;
15870 
15871   // If we have VLX support, we can use VALIGN or VEXPAND.
15872   if (Subtarget.hasVLX()) {
15873     if (SDValue Rotate = lowerShuffleAsVALIGN(DL, MVT::v4i64, V1, V2, Mask,
15874                                               Zeroable, Subtarget, DAG))
15875       return Rotate;
15876 
15877     if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v4i64, Zeroable, Mask, V1, V2,
15878                                          DAG, Subtarget))
15879       return V;
15880   }
15881 
15882   // Try to use PALIGNR.
15883   if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v4i64, V1, V2, Mask,
15884                                                 Subtarget, DAG))
15885     return Rotate;
15886 
15887   // Use dedicated unpack instructions for masks that match their pattern.
15888   if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4i64, Mask, V1, V2, DAG))
15889     return V;
15890 
15891   bool V1IsInPlace = isShuffleMaskInputInPlace(0, Mask);
15892   bool V2IsInPlace = isShuffleMaskInputInPlace(1, Mask);
15893 
15894   // If we have one input in place, then we can permute the other input and
15895   // blend the result.
15896   if (V1IsInPlace || V2IsInPlace)
15897     return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v4i64, V1, V2, Mask,
15898                                                 Subtarget, DAG);
15899 
15900   // Try to create an in-lane repeating shuffle mask and then shuffle the
15901   // results into the target lanes.
15902   if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
15903           DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG))
15904     return V;
15905 
15906   // Try to lower to PERMQ(BLENDD(V1,V2)).
15907   if (SDValue V =
15908           lowerShuffleAsBlendAndPermute(DL, MVT::v4i64, V1, V2, Mask, DAG))
15909     return V;
15910 
15911   // Try to simplify this by merging 128-bit lanes to enable a lane-based
15912   // shuffle. However, if we have AVX2 and either inputs are already in place,
15913   // we will be able to shuffle even across lanes the other input in a single
15914   // instruction so skip this pattern.
15915   if (!V1IsInPlace && !V2IsInPlace)
15916     if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
15917             DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG))
15918       return Result;
15919 
15920   // Otherwise fall back on generic blend lowering.
15921   return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v4i64, V1, V2, Mask,
15922                                               Subtarget, DAG);
15923 }
15924 
15925 /// Handle lowering of 8-lane 32-bit floating point shuffles.
15926 ///
15927 /// Also ends up handling lowering of 8-lane 32-bit integer shuffles when AVX2
15928 /// isn't available.
lowerV8F32Shuffle(const SDLoc & DL,ArrayRef<int> Mask,const APInt & Zeroable,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)15929 static SDValue lowerV8F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
15930                                  const APInt &Zeroable, SDValue V1, SDValue V2,
15931                                  const X86Subtarget &Subtarget,
15932                                  SelectionDAG &DAG) {
15933   assert(V1.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
15934   assert(V2.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
15935   assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
15936 
15937   if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8f32, V1, V2, Mask,
15938                                           Zeroable, Subtarget, DAG))
15939     return Blend;
15940 
15941   // Check for being able to broadcast a single element.
15942   if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v8f32, V1, V2, Mask,
15943                                                   Subtarget, DAG))
15944     return Broadcast;
15945 
15946   if (!Subtarget.hasAVX2()) {
15947     SmallVector<int> InLaneMask;
15948     computeInLaneShuffleMask(Mask, Mask.size() / 2, InLaneMask);
15949 
15950     if (!is128BitLaneRepeatedShuffleMask(MVT::v8f32, InLaneMask))
15951       if (SDValue R = splitAndLowerShuffle(DL, MVT::v8f32, V1, V2, Mask, DAG,
15952                                            /*SimpleOnly*/ true))
15953         return R;
15954   }
15955   if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v8i32, V1, V2, Mask,
15956                                                    Zeroable, Subtarget, DAG))
15957     return DAG.getBitcast(MVT::v8f32, ZExt);
15958 
15959   // If the shuffle mask is repeated in each 128-bit lane, we have many more
15960   // options to efficiently lower the shuffle.
15961   SmallVector<int, 4> RepeatedMask;
15962   if (is128BitLaneRepeatedShuffleMask(MVT::v8f32, Mask, RepeatedMask)) {
15963     assert(RepeatedMask.size() == 4 &&
15964            "Repeated masks must be half the mask width!");
15965 
15966     // Use even/odd duplicate instructions for masks that match their pattern.
15967     if (isShuffleEquivalent(RepeatedMask, {0, 0, 2, 2}, V1, V2))
15968       return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v8f32, V1);
15969     if (isShuffleEquivalent(RepeatedMask, {1, 1, 3, 3}, V1, V2))
15970       return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v8f32, V1);
15971 
15972     if (V2.isUndef())
15973       return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f32, V1,
15974                          getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
15975 
15976     // Use dedicated unpack instructions for masks that match their pattern.
15977     if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8f32, Mask, V1, V2, DAG))
15978       return V;
15979 
15980     // Otherwise, fall back to a SHUFPS sequence. Here it is important that we
15981     // have already handled any direct blends.
15982     return lowerShuffleWithSHUFPS(DL, MVT::v8f32, RepeatedMask, V1, V2, DAG);
15983   }
15984 
15985   // Try to create an in-lane repeating shuffle mask and then shuffle the
15986   // results into the target lanes.
15987   if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
15988           DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
15989     return V;
15990 
15991   // If we have a single input shuffle with different shuffle patterns in the
15992   // two 128-bit lanes use the variable mask to VPERMILPS.
15993   if (V2.isUndef()) {
15994     if (!is128BitLaneCrossingShuffleMask(MVT::v8f32, Mask)) {
15995       SDValue VPermMask = getConstVector(Mask, MVT::v8i32, DAG, DL, true);
15996       return DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, V1, VPermMask);
15997     }
15998     if (Subtarget.hasAVX2()) {
15999       SDValue VPermMask = getConstVector(Mask, MVT::v8i32, DAG, DL, true);
16000       return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8f32, VPermMask, V1);
16001     }
16002     // Otherwise, fall back.
16003     return lowerShuffleAsLanePermuteAndShuffle(DL, MVT::v8f32, V1, V2, Mask,
16004                                                DAG, Subtarget);
16005   }
16006 
16007   // Try to simplify this by merging 128-bit lanes to enable a lane-based
16008   // shuffle.
16009   if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
16010           DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
16011     return Result;
16012 
16013   // If we have VLX support, we can use VEXPAND.
16014   if (Subtarget.hasVLX())
16015     if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8f32, Zeroable, Mask, V1, V2,
16016                                          DAG, Subtarget))
16017       return V;
16018 
16019   // Try to match an interleave of two v8f32s and lower them as unpck and
16020   // permutes using ymms. This needs to go before we try to split the vectors.
16021   //
16022   // TODO: Expand this to AVX1. Currently v8i32 is casted to v8f32 and hits
16023   // this path inadvertently.
16024   if (Subtarget.hasAVX2() && !Subtarget.hasAVX512())
16025     if (SDValue V = lowerShufflePairAsUNPCKAndPermute(DL, MVT::v8f32, V1, V2,
16026                                                       Mask, DAG))
16027       return V;
16028 
16029   // For non-AVX512 if the Mask is of 16bit elements in lane then try to split
16030   // since after split we get a more efficient code using vpunpcklwd and
16031   // vpunpckhwd instrs than vblend.
16032   if (!Subtarget.hasAVX512() && isUnpackWdShuffleMask(Mask, MVT::v8f32, DAG))
16033     return lowerShuffleAsSplitOrBlend(DL, MVT::v8f32, V1, V2, Mask, Subtarget,
16034                                       DAG);
16035 
16036   // If we have AVX2 then we always want to lower with a blend because at v8 we
16037   // can fully permute the elements.
16038   if (Subtarget.hasAVX2())
16039     return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v8f32, V1, V2, Mask,
16040                                                 Subtarget, DAG);
16041 
16042   // Otherwise fall back on generic lowering.
16043   return lowerShuffleAsSplitOrBlend(DL, MVT::v8f32, V1, V2, Mask,
16044                                     Subtarget, DAG);
16045 }
16046 
16047 /// Handle lowering of 8-lane 32-bit integer shuffles.
16048 ///
16049 /// This routine is only called when we have AVX2 and thus a reasonable
16050 /// instruction set for v8i32 shuffling..
lowerV8I32Shuffle(const SDLoc & DL,ArrayRef<int> Mask,const APInt & Zeroable,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)16051 static SDValue lowerV8I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16052                                  const APInt &Zeroable, SDValue V1, SDValue V2,
16053                                  const X86Subtarget &Subtarget,
16054                                  SelectionDAG &DAG) {
16055   assert(V1.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
16056   assert(V2.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
16057   assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
16058   assert(Subtarget.hasAVX2() && "We can only lower v8i32 with AVX2!");
16059 
16060   int NumV2Elements = count_if(Mask, [](int M) { return M >= 8; });
16061 
16062   // Whenever we can lower this as a zext, that instruction is strictly faster
16063   // than any alternative. It also allows us to fold memory operands into the
16064   // shuffle in many cases.
16065   if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v8i32, V1, V2, Mask,
16066                                                    Zeroable, Subtarget, DAG))
16067     return ZExt;
16068 
16069   // Try to match an interleave of two v8i32s and lower them as unpck and
16070   // permutes using ymms. This needs to go before we try to split the vectors.
16071   if (!Subtarget.hasAVX512())
16072     if (SDValue V = lowerShufflePairAsUNPCKAndPermute(DL, MVT::v8i32, V1, V2,
16073                                                       Mask, DAG))
16074       return V;
16075 
16076   // For non-AVX512 if the Mask is of 16bit elements in lane then try to split
16077   // since after split we get a more efficient code than vblend by using
16078   // vpunpcklwd and vpunpckhwd instrs.
16079   if (isUnpackWdShuffleMask(Mask, MVT::v8i32, DAG) && !V2.isUndef() &&
16080       !Subtarget.hasAVX512())
16081     return lowerShuffleAsSplitOrBlend(DL, MVT::v8i32, V1, V2, Mask, Subtarget,
16082                                       DAG);
16083 
16084   if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8i32, V1, V2, Mask,
16085                                           Zeroable, Subtarget, DAG))
16086     return Blend;
16087 
16088   // Check for being able to broadcast a single element.
16089   if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v8i32, V1, V2, Mask,
16090                                                   Subtarget, DAG))
16091     return Broadcast;
16092 
16093   // Try to use shift instructions if fast.
16094   if (Subtarget.preferLowerShuffleAsShift()) {
16095     if (SDValue Shift =
16096             lowerShuffleAsShift(DL, MVT::v8i32, V1, V2, Mask, Zeroable,
16097                                 Subtarget, DAG, /*BitwiseOnly*/ true))
16098       return Shift;
16099     if (NumV2Elements == 0)
16100       if (SDValue Rotate =
16101               lowerShuffleAsBitRotate(DL, MVT::v8i32, V1, Mask, Subtarget, DAG))
16102         return Rotate;
16103   }
16104 
16105   // If the shuffle mask is repeated in each 128-bit lane we can use more
16106   // efficient instructions that mirror the shuffles across the two 128-bit
16107   // lanes.
16108   SmallVector<int, 4> RepeatedMask;
16109   bool Is128BitLaneRepeatedShuffle =
16110       is128BitLaneRepeatedShuffleMask(MVT::v8i32, Mask, RepeatedMask);
16111   if (Is128BitLaneRepeatedShuffle) {
16112     assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
16113     if (V2.isUndef())
16114       return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32, V1,
16115                          getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
16116 
16117     // Use dedicated unpack instructions for masks that match their pattern.
16118     if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8i32, Mask, V1, V2, DAG))
16119       return V;
16120   }
16121 
16122   // Try to use shift instructions.
16123   if (SDValue Shift =
16124           lowerShuffleAsShift(DL, MVT::v8i32, V1, V2, Mask, Zeroable, Subtarget,
16125                               DAG, /*BitwiseOnly*/ false))
16126     return Shift;
16127 
16128   if (!Subtarget.preferLowerShuffleAsShift() && NumV2Elements == 0)
16129     if (SDValue Rotate =
16130             lowerShuffleAsBitRotate(DL, MVT::v8i32, V1, Mask, Subtarget, DAG))
16131       return Rotate;
16132 
16133   // If we have VLX support, we can use VALIGN or EXPAND.
16134   if (Subtarget.hasVLX()) {
16135     if (SDValue Rotate = lowerShuffleAsVALIGN(DL, MVT::v8i32, V1, V2, Mask,
16136                                               Zeroable, Subtarget, DAG))
16137       return Rotate;
16138 
16139     if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8i32, Zeroable, Mask, V1, V2,
16140                                          DAG, Subtarget))
16141       return V;
16142   }
16143 
16144   // Try to use byte rotation instructions.
16145   if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i32, V1, V2, Mask,
16146                                                 Subtarget, DAG))
16147     return Rotate;
16148 
16149   // Try to create an in-lane repeating shuffle mask and then shuffle the
16150   // results into the target lanes.
16151   if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
16152           DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
16153     return V;
16154 
16155   if (V2.isUndef()) {
16156     // Try to produce a fixed cross-128-bit lane permute followed by unpack
16157     // because that should be faster than the variable permute alternatives.
16158     if (SDValue V = lowerShuffleWithUNPCK256(DL, MVT::v8i32, Mask, V1, V2, DAG))
16159       return V;
16160 
16161     // If the shuffle patterns aren't repeated but it's a single input, directly
16162     // generate a cross-lane VPERMD instruction.
16163     SDValue VPermMask = getConstVector(Mask, MVT::v8i32, DAG, DL, true);
16164     return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8i32, VPermMask, V1);
16165   }
16166 
16167   // Assume that a single SHUFPS is faster than an alternative sequence of
16168   // multiple instructions (even if the CPU has a domain penalty).
16169   // If some CPU is harmed by the domain switch, we can fix it in a later pass.
16170   if (Is128BitLaneRepeatedShuffle && isSingleSHUFPSMask(RepeatedMask)) {
16171     SDValue CastV1 = DAG.getBitcast(MVT::v8f32, V1);
16172     SDValue CastV2 = DAG.getBitcast(MVT::v8f32, V2);
16173     SDValue ShufPS = lowerShuffleWithSHUFPS(DL, MVT::v8f32, RepeatedMask,
16174                                             CastV1, CastV2, DAG);
16175     return DAG.getBitcast(MVT::v8i32, ShufPS);
16176   }
16177 
16178   // Try to simplify this by merging 128-bit lanes to enable a lane-based
16179   // shuffle.
16180   if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
16181           DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
16182     return Result;
16183 
16184   // Otherwise fall back on generic blend lowering.
16185   return lowerShuffleAsDecomposedShuffleMerge(DL, MVT::v8i32, V1, V2, Mask,
16186                                               Subtarget, DAG);
16187 }
16188 
16189 /// Handle lowering of 16-lane 16-bit integer shuffles.
16190 ///
16191 /// This routine is only called when we have AVX2 and thus a reasonable
16192 /// instruction set for v16i16 shuffling..
lowerV16I16Shuffle(const SDLoc & DL,ArrayRef<int> Mask,const APInt & Zeroable,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)16193 static SDValue lowerV16I16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16194                                   const APInt &Zeroable, SDValue V1, SDValue V2,
16195                                   const X86Subtarget &Subtarget,
16196                                   SelectionDAG &DAG) {
16197   assert(V1.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
16198   assert(V2.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
16199   assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
16200   assert(Subtarget.hasAVX2() && "We can only lower v16i16 with AVX2!");
16201 
16202   // Whenever we can lower this as a zext, that instruction is strictly faster
16203   // than any alternative. It also allows us to fold memory operands into the
16204   // shuffle in many cases.
16205   if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
16206           DL, MVT::v16i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
16207     return ZExt;
16208 
16209   // Check for being able to broadcast a single element.
16210   if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v16i16, V1, V2, Mask,
16211                                                   Subtarget, DAG))
16212     return Broadcast;
16213 
16214   if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16i16, V1, V2, Mask,
16215                                           Zeroable, Subtarget, DAG))
16216     return Blend;
16217 
16218   // Use dedicated unpack instructions for masks that match their pattern.
16219   if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i16, Mask, V1, V2, DAG))
16220     return V;
16221 
16222   // Use dedicated pack instructions for masks that match their pattern.
16223   if (SDValue V = lowerShuffleWithPACK(DL, MVT::v16i16, Mask, V1, V2, DAG,
16224                                        Subtarget))
16225     return V;
16226 
16227   // Try to use lower using a truncation.
16228   if (SDValue V = lowerShuffleAsVTRUNC(DL, MVT::v16i16, V1, V2, Mask, Zeroable,
16229                                        Subtarget, DAG))
16230     return V;
16231 
16232   // Try to use shift instructions.
16233   if (SDValue Shift =
16234           lowerShuffleAsShift(DL, MVT::v16i16, V1, V2, Mask, Zeroable,
16235                               Subtarget, DAG, /*BitwiseOnly*/ false))
16236     return Shift;
16237 
16238   // Try to use byte rotation instructions.
16239   if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v16i16, V1, V2, Mask,
16240                                                 Subtarget, DAG))
16241     return Rotate;
16242 
16243   // Try to create an in-lane repeating shuffle mask and then shuffle the
16244   // results into the target lanes.
16245   if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
16246           DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
16247     return V;
16248 
16249   if (V2.isUndef()) {
16250     // Try to use bit rotation instructions.
16251     if (SDValue Rotate =
16252             lowerShuffleAsBitRotate(DL, MVT::v16i16, V1, Mask, Subtarget, DAG))
16253       return Rotate;
16254 
16255     // Try to produce a fixed cross-128-bit lane permute followed by unpack
16256     // because that should be faster than the variable permute alternatives.
16257     if (SDValue V = lowerShuffleWithUNPCK256(DL, MVT::v16i16, Mask, V1, V2, DAG))
16258       return V;
16259 
16260     // There are no generalized cross-lane shuffle operations available on i16
16261     // element types.
16262     if (is128BitLaneCrossingShuffleMask(MVT::v16i16, Mask)) {
16263       if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
16264               DL, MVT::v16i16, V1, V2, Mask, DAG, Subtarget))
16265         return V;
16266 
16267       return lowerShuffleAsLanePermuteAndShuffle(DL, MVT::v16i16, V1, V2, Mask,
16268                                                  DAG, Subtarget);
16269     }
16270 
16271     SmallVector<int, 8> RepeatedMask;
16272     if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) {
16273       // As this is a single-input shuffle, the repeated mask should be
16274       // a strictly valid v8i16 mask that we can pass through to the v8i16
16275       // lowering to handle even the v16 case.
16276       return lowerV8I16GeneralSingleInputShuffle(
16277           DL, MVT::v16i16, V1, RepeatedMask, Subtarget, DAG);
16278     }
16279   }
16280 
16281   if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v16i16, Mask, V1, V2,
16282                                               Zeroable, Subtarget, DAG))
16283     return PSHUFB;
16284 
16285   // AVX512BW can lower to VPERMW (non-VLX will pad to v32i16).
16286   if (Subtarget.hasBWI())
16287     return lowerShuffleWithPERMV(DL, MVT::v16i16, Mask, V1, V2, Subtarget, DAG);
16288 
16289   // Try to simplify this by merging 128-bit lanes to enable a lane-based
16290   // shuffle.
16291   if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
16292           DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
16293     return Result;
16294 
16295   // Try to permute the lanes and then use a per-lane permute.
16296   if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
16297           DL, MVT::v16i16, V1, V2, Mask, DAG, Subtarget))
16298     return V;
16299 
16300   // Try to match an interleave of two v16i16s and lower them as unpck and
16301   // permutes using ymms.
16302   if (!Subtarget.hasAVX512())
16303     if (SDValue V = lowerShufflePairAsUNPCKAndPermute(DL, MVT::v16i16, V1, V2,
16304                                                       Mask, DAG))
16305       return V;
16306 
16307   // Otherwise fall back on generic lowering.
16308   return lowerShuffleAsSplitOrBlend(DL, MVT::v16i16, V1, V2, Mask,
16309                                     Subtarget, DAG);
16310 }
16311 
16312 /// Handle lowering of 32-lane 8-bit integer shuffles.
16313 ///
16314 /// This routine is only called when we have AVX2 and thus a reasonable
16315 /// instruction set for v32i8 shuffling..
lowerV32I8Shuffle(const SDLoc & DL,ArrayRef<int> Mask,const APInt & Zeroable,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)16316 static SDValue lowerV32I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16317                                  const APInt &Zeroable, SDValue V1, SDValue V2,
16318                                  const X86Subtarget &Subtarget,
16319                                  SelectionDAG &DAG) {
16320   assert(V1.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
16321   assert(V2.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
16322   assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
16323   assert(Subtarget.hasAVX2() && "We can only lower v32i8 with AVX2!");
16324 
16325   // Whenever we can lower this as a zext, that instruction is strictly faster
16326   // than any alternative. It also allows us to fold memory operands into the
16327   // shuffle in many cases.
16328   if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v32i8, V1, V2, Mask,
16329                                                    Zeroable, Subtarget, DAG))
16330     return ZExt;
16331 
16332   // Check for being able to broadcast a single element.
16333   if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v32i8, V1, V2, Mask,
16334                                                   Subtarget, DAG))
16335     return Broadcast;
16336 
16337   if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v32i8, V1, V2, Mask,
16338                                           Zeroable, Subtarget, DAG))
16339     return Blend;
16340 
16341   // Use dedicated unpack instructions for masks that match their pattern.
16342   if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v32i8, Mask, V1, V2, DAG))
16343     return V;
16344 
16345   // Use dedicated pack instructions for masks that match their pattern.
16346   if (SDValue V = lowerShuffleWithPACK(DL, MVT::v32i8, Mask, V1, V2, DAG,
16347                                        Subtarget))
16348     return V;
16349 
16350   // Try to use lower using a truncation.
16351   if (SDValue V = lowerShuffleAsVTRUNC(DL, MVT::v32i8, V1, V2, Mask, Zeroable,
16352                                        Subtarget, DAG))
16353     return V;
16354 
16355   // Try to use shift instructions.
16356   if (SDValue Shift =
16357           lowerShuffleAsShift(DL, MVT::v32i8, V1, V2, Mask, Zeroable, Subtarget,
16358                               DAG, /*BitwiseOnly*/ false))
16359     return Shift;
16360 
16361   // Try to use byte rotation instructions.
16362   if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v32i8, V1, V2, Mask,
16363                                                 Subtarget, DAG))
16364     return Rotate;
16365 
16366   // Try to use bit rotation instructions.
16367   if (V2.isUndef())
16368     if (SDValue Rotate =
16369             lowerShuffleAsBitRotate(DL, MVT::v32i8, V1, Mask, Subtarget, DAG))
16370       return Rotate;
16371 
16372   // Try to create an in-lane repeating shuffle mask and then shuffle the
16373   // results into the target lanes.
16374   if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
16375           DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
16376     return V;
16377 
16378   // There are no generalized cross-lane shuffle operations available on i8
16379   // element types.
16380   if (V2.isUndef() && is128BitLaneCrossingShuffleMask(MVT::v32i8, Mask)) {
16381     // Try to produce a fixed cross-128-bit lane permute followed by unpack
16382     // because that should be faster than the variable permute alternatives.
16383     if (SDValue V = lowerShuffleWithUNPCK256(DL, MVT::v32i8, Mask, V1, V2, DAG))
16384       return V;
16385 
16386     if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
16387             DL, MVT::v32i8, V1, V2, Mask, DAG, Subtarget))
16388       return V;
16389 
16390     return lowerShuffleAsLanePermuteAndShuffle(DL, MVT::v32i8, V1, V2, Mask,
16391                                                DAG, Subtarget);
16392   }
16393 
16394   if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v32i8, Mask, V1, V2,
16395                                               Zeroable, Subtarget, DAG))
16396     return PSHUFB;
16397 
16398   // AVX512VBMI can lower to VPERMB (non-VLX will pad to v64i8).
16399   if (Subtarget.hasVBMI())
16400     return lowerShuffleWithPERMV(DL, MVT::v32i8, Mask, V1, V2, Subtarget, DAG);
16401 
16402   // Try to simplify this by merging 128-bit lanes to enable a lane-based
16403   // shuffle.
16404   if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
16405           DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
16406     return Result;
16407 
16408   // Try to permute the lanes and then use a per-lane permute.
16409   if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
16410           DL, MVT::v32i8, V1, V2, Mask, DAG, Subtarget))
16411     return V;
16412 
16413   // Look for {0, 8, 16, 24, 32, 40, 48, 56 } in the first 8 elements. Followed
16414   // by zeroable elements in the remaining 24 elements. Turn this into two
16415   // vmovqb instructions shuffled together.
16416   if (Subtarget.hasVLX())
16417     if (SDValue V = lowerShuffleAsVTRUNCAndUnpack(DL, MVT::v32i8, V1, V2,
16418                                                   Mask, Zeroable, DAG))
16419       return V;
16420 
16421   // Try to match an interleave of two v32i8s and lower them as unpck and
16422   // permutes using ymms.
16423   if (!Subtarget.hasAVX512())
16424     if (SDValue V = lowerShufflePairAsUNPCKAndPermute(DL, MVT::v32i8, V1, V2,
16425                                                       Mask, DAG))
16426       return V;
16427 
16428   // Otherwise fall back on generic lowering.
16429   return lowerShuffleAsSplitOrBlend(DL, MVT::v32i8, V1, V2, Mask,
16430                                     Subtarget, DAG);
16431 }
16432 
16433 /// High-level routine to lower various 256-bit x86 vector shuffles.
16434 ///
16435 /// This routine either breaks down the specific type of a 256-bit x86 vector
16436 /// shuffle or splits it into two 128-bit shuffles and fuses the results back
16437 /// together based on the available instructions.
lower256BitShuffle(const SDLoc & DL,ArrayRef<int> Mask,MVT VT,SDValue V1,SDValue V2,const APInt & Zeroable,const X86Subtarget & Subtarget,SelectionDAG & DAG)16438 static SDValue lower256BitShuffle(const SDLoc &DL, ArrayRef<int> Mask, MVT VT,
16439                                   SDValue V1, SDValue V2, const APInt &Zeroable,
16440                                   const X86Subtarget &Subtarget,
16441                                   SelectionDAG &DAG) {
16442   // If we have a single input to the zero element, insert that into V1 if we
16443   // can do so cheaply.
16444   int NumElts = VT.getVectorNumElements();
16445   int NumV2Elements = count_if(Mask, [NumElts](int M) { return M >= NumElts; });
16446 
16447   if (NumV2Elements == 1 && Mask[0] >= NumElts)
16448     if (SDValue Insertion = lowerShuffleAsElementInsertion(
16449             DL, VT, V1, V2, Mask, Zeroable, Subtarget, DAG))
16450       return Insertion;
16451 
16452   // Handle special cases where the lower or upper half is UNDEF.
16453   if (SDValue V =
16454           lowerShuffleWithUndefHalf(DL, VT, V1, V2, Mask, Subtarget, DAG))
16455     return V;
16456 
16457   // There is a really nice hard cut-over between AVX1 and AVX2 that means we
16458   // can check for those subtargets here and avoid much of the subtarget
16459   // querying in the per-vector-type lowering routines. With AVX1 we have
16460   // essentially *zero* ability to manipulate a 256-bit vector with integer
16461   // types. Since we'll use floating point types there eventually, just
16462   // immediately cast everything to a float and operate entirely in that domain.
16463   if (VT.isInteger() && !Subtarget.hasAVX2()) {
16464     int ElementBits = VT.getScalarSizeInBits();
16465     if (ElementBits < 32) {
16466       // No floating point type available, if we can't use the bit operations
16467       // for masking/blending then decompose into 128-bit vectors.
16468       if (SDValue V = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable,
16469                                             Subtarget, DAG))
16470         return V;
16471       if (SDValue V = lowerShuffleAsBitBlend(DL, VT, V1, V2, Mask, DAG))
16472         return V;
16473       return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG, /*SimpleOnly*/ false);
16474     }
16475 
16476     MVT FpVT = MVT::getVectorVT(MVT::getFloatingPointVT(ElementBits),
16477                                 VT.getVectorNumElements());
16478     V1 = DAG.getBitcast(FpVT, V1);
16479     V2 = DAG.getBitcast(FpVT, V2);
16480     return DAG.getBitcast(VT, DAG.getVectorShuffle(FpVT, DL, V1, V2, Mask));
16481   }
16482 
16483   if (VT == MVT::v16f16 || VT == MVT::v16bf16) {
16484     V1 = DAG.getBitcast(MVT::v16i16, V1);
16485     V2 = DAG.getBitcast(MVT::v16i16, V2);
16486     return DAG.getBitcast(VT,
16487                           DAG.getVectorShuffle(MVT::v16i16, DL, V1, V2, Mask));
16488   }
16489 
16490   switch (VT.SimpleTy) {
16491   case MVT::v4f64:
16492     return lowerV4F64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16493   case MVT::v4i64:
16494     return lowerV4I64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16495   case MVT::v8f32:
16496     return lowerV8F32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16497   case MVT::v8i32:
16498     return lowerV8I32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16499   case MVT::v16i16:
16500     return lowerV16I16Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16501   case MVT::v32i8:
16502     return lowerV32I8Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16503 
16504   default:
16505     llvm_unreachable("Not a valid 256-bit x86 vector type!");
16506   }
16507 }
16508 
16509 /// Try to lower a vector shuffle as a 128-bit shuffles.
lowerV4X128Shuffle(const SDLoc & DL,MVT VT,ArrayRef<int> Mask,const APInt & Zeroable,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)16510 static SDValue lowerV4X128Shuffle(const SDLoc &DL, MVT VT, ArrayRef<int> Mask,
16511                                   const APInt &Zeroable, SDValue V1, SDValue V2,
16512                                   const X86Subtarget &Subtarget,
16513                                   SelectionDAG &DAG) {
16514   assert(VT.getScalarSizeInBits() == 64 &&
16515          "Unexpected element type size for 128bit shuffle.");
16516 
16517   // To handle 256 bit vector requires VLX and most probably
16518   // function lowerV2X128VectorShuffle() is better solution.
16519   assert(VT.is512BitVector() && "Unexpected vector size for 512bit shuffle.");
16520 
16521   // TODO - use Zeroable like we do for lowerV2X128VectorShuffle?
16522   SmallVector<int, 4> Widened128Mask;
16523   if (!canWidenShuffleElements(Mask, Widened128Mask))
16524     return SDValue();
16525   assert(Widened128Mask.size() == 4 && "Shuffle widening mismatch");
16526 
16527   // Try to use an insert into a zero vector.
16528   if (Widened128Mask[0] == 0 && (Zeroable & 0xf0) == 0xf0 &&
16529       (Widened128Mask[1] == 1 || (Zeroable & 0x0c) == 0x0c)) {
16530     unsigned NumElts = ((Zeroable & 0x0c) == 0x0c) ? 2 : 4;
16531     MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), NumElts);
16532     SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
16533                               DAG.getIntPtrConstant(0, DL));
16534     return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
16535                        getZeroVector(VT, Subtarget, DAG, DL), LoV,
16536                        DAG.getIntPtrConstant(0, DL));
16537   }
16538 
16539   // Check for patterns which can be matched with a single insert of a 256-bit
16540   // subvector.
16541   bool OnlyUsesV1 = isShuffleEquivalent(Mask, {0, 1, 2, 3, 0, 1, 2, 3}, V1, V2);
16542   if (OnlyUsesV1 ||
16543       isShuffleEquivalent(Mask, {0, 1, 2, 3, 8, 9, 10, 11}, V1, V2)) {
16544     MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 4);
16545     SDValue SubVec =
16546         DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, OnlyUsesV1 ? V1 : V2,
16547                     DAG.getIntPtrConstant(0, DL));
16548     return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, V1, SubVec,
16549                        DAG.getIntPtrConstant(4, DL));
16550   }
16551 
16552   // See if this is an insertion of the lower 128-bits of V2 into V1.
16553   bool IsInsert = true;
16554   int V2Index = -1;
16555   for (int i = 0; i < 4; ++i) {
16556     assert(Widened128Mask[i] >= -1 && "Illegal shuffle sentinel value");
16557     if (Widened128Mask[i] < 0)
16558       continue;
16559 
16560     // Make sure all V1 subvectors are in place.
16561     if (Widened128Mask[i] < 4) {
16562       if (Widened128Mask[i] != i) {
16563         IsInsert = false;
16564         break;
16565       }
16566     } else {
16567       // Make sure we only have a single V2 index and its the lowest 128-bits.
16568       if (V2Index >= 0 || Widened128Mask[i] != 4) {
16569         IsInsert = false;
16570         break;
16571       }
16572       V2Index = i;
16573     }
16574   }
16575   if (IsInsert && V2Index >= 0) {
16576     MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 2);
16577     SDValue Subvec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V2,
16578                                  DAG.getIntPtrConstant(0, DL));
16579     return insert128BitVector(V1, Subvec, V2Index * 2, DAG, DL);
16580   }
16581 
16582   // See if we can widen to a 256-bit lane shuffle, we're going to lose 128-lane
16583   // UNDEF info by lowering to X86ISD::SHUF128 anyway, so by widening where
16584   // possible we at least ensure the lanes stay sequential to help later
16585   // combines.
16586   SmallVector<int, 2> Widened256Mask;
16587   if (canWidenShuffleElements(Widened128Mask, Widened256Mask)) {
16588     Widened128Mask.clear();
16589     narrowShuffleMaskElts(2, Widened256Mask, Widened128Mask);
16590   }
16591 
16592   // Try to lower to vshuf64x2/vshuf32x4.
16593   SDValue Ops[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT)};
16594   int PermMask[4] = {-1, -1, -1, -1};
16595   // Ensure elements came from the same Op.
16596   for (int i = 0; i < 4; ++i) {
16597     assert(Widened128Mask[i] >= -1 && "Illegal shuffle sentinel value");
16598     if (Widened128Mask[i] < 0)
16599       continue;
16600 
16601     SDValue Op = Widened128Mask[i] >= 4 ? V2 : V1;
16602     unsigned OpIndex = i / 2;
16603     if (Ops[OpIndex].isUndef())
16604       Ops[OpIndex] = Op;
16605     else if (Ops[OpIndex] != Op)
16606       return SDValue();
16607 
16608     PermMask[i] = Widened128Mask[i] % 4;
16609   }
16610 
16611   return DAG.getNode(X86ISD::SHUF128, DL, VT, Ops[0], Ops[1],
16612                      getV4X86ShuffleImm8ForMask(PermMask, DL, DAG));
16613 }
16614 
16615 /// Handle lowering of 8-lane 64-bit floating point shuffles.
lowerV8F64Shuffle(const SDLoc & DL,ArrayRef<int> Mask,const APInt & Zeroable,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)16616 static SDValue lowerV8F64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16617                                  const APInt &Zeroable, SDValue V1, SDValue V2,
16618                                  const X86Subtarget &Subtarget,
16619                                  SelectionDAG &DAG) {
16620   assert(V1.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
16621   assert(V2.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
16622   assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
16623 
16624   if (V2.isUndef()) {
16625     // Use low duplicate instructions for masks that match their pattern.
16626     if (isShuffleEquivalent(Mask, {0, 0, 2, 2, 4, 4, 6, 6}, V1, V2))
16627       return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v8f64, V1);
16628 
16629     if (!is128BitLaneCrossingShuffleMask(MVT::v8f64, Mask)) {
16630       // Non-half-crossing single input shuffles can be lowered with an
16631       // interleaved permutation.
16632       unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
16633                               ((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3) |
16634                               ((Mask[4] == 5) << 4) | ((Mask[5] == 5) << 5) |
16635                               ((Mask[6] == 7) << 6) | ((Mask[7] == 7) << 7);
16636       return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f64, V1,
16637                          DAG.getTargetConstant(VPERMILPMask, DL, MVT::i8));
16638     }
16639 
16640     SmallVector<int, 4> RepeatedMask;
16641     if (is256BitLaneRepeatedShuffleMask(MVT::v8f64, Mask, RepeatedMask))
16642       return DAG.getNode(X86ISD::VPERMI, DL, MVT::v8f64, V1,
16643                          getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
16644   }
16645 
16646   if (SDValue Shuf128 = lowerV4X128Shuffle(DL, MVT::v8f64, Mask, Zeroable, V1,
16647                                            V2, Subtarget, DAG))
16648     return Shuf128;
16649 
16650   if (SDValue Unpck = lowerShuffleWithUNPCK(DL, MVT::v8f64, Mask, V1, V2, DAG))
16651     return Unpck;
16652 
16653   // Check if the blend happens to exactly fit that of SHUFPD.
16654   if (SDValue Op = lowerShuffleWithSHUFPD(DL, MVT::v8f64, V1, V2, Mask,
16655                                           Zeroable, Subtarget, DAG))
16656     return Op;
16657 
16658   if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8f64, Zeroable, Mask, V1, V2,
16659                                        DAG, Subtarget))
16660     return V;
16661 
16662   if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8f64, V1, V2, Mask,
16663                                           Zeroable, Subtarget, DAG))
16664     return Blend;
16665 
16666   return lowerShuffleWithPERMV(DL, MVT::v8f64, Mask, V1, V2, Subtarget, DAG);
16667 }
16668 
16669 /// Handle lowering of 16-lane 32-bit floating point shuffles.
lowerV16F32Shuffle(const SDLoc & DL,ArrayRef<int> Mask,const APInt & Zeroable,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)16670 static SDValue lowerV16F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16671                                   const APInt &Zeroable, SDValue V1, SDValue V2,
16672                                   const X86Subtarget &Subtarget,
16673                                   SelectionDAG &DAG) {
16674   assert(V1.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
16675   assert(V2.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
16676   assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
16677 
16678   // If the shuffle mask is repeated in each 128-bit lane, we have many more
16679   // options to efficiently lower the shuffle.
16680   SmallVector<int, 4> RepeatedMask;
16681   if (is128BitLaneRepeatedShuffleMask(MVT::v16f32, Mask, RepeatedMask)) {
16682     assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
16683 
16684     // Use even/odd duplicate instructions for masks that match their pattern.
16685     if (isShuffleEquivalent(RepeatedMask, {0, 0, 2, 2}, V1, V2))
16686       return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v16f32, V1);
16687     if (isShuffleEquivalent(RepeatedMask, {1, 1, 3, 3}, V1, V2))
16688       return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v16f32, V1);
16689 
16690     if (V2.isUndef())
16691       return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v16f32, V1,
16692                          getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
16693 
16694     // Use dedicated unpack instructions for masks that match their pattern.
16695     if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16f32, Mask, V1, V2, DAG))
16696       return V;
16697 
16698     if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16f32, V1, V2, Mask,
16699                                             Zeroable, Subtarget, DAG))
16700       return Blend;
16701 
16702     // Otherwise, fall back to a SHUFPS sequence.
16703     return lowerShuffleWithSHUFPS(DL, MVT::v16f32, RepeatedMask, V1, V2, DAG);
16704   }
16705 
16706   if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16f32, V1, V2, Mask,
16707                                           Zeroable, Subtarget, DAG))
16708     return Blend;
16709 
16710   if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
16711           DL, MVT::v16i32, V1, V2, Mask, Zeroable, Subtarget, DAG))
16712     return DAG.getBitcast(MVT::v16f32, ZExt);
16713 
16714   // Try to create an in-lane repeating shuffle mask and then shuffle the
16715   // results into the target lanes.
16716   if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
16717           DL, MVT::v16f32, V1, V2, Mask, Subtarget, DAG))
16718     return V;
16719 
16720   // If we have a single input shuffle with different shuffle patterns in the
16721   // 128-bit lanes and don't lane cross, use variable mask VPERMILPS.
16722   if (V2.isUndef() &&
16723       !is128BitLaneCrossingShuffleMask(MVT::v16f32, Mask)) {
16724     SDValue VPermMask = getConstVector(Mask, MVT::v16i32, DAG, DL, true);
16725     return DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v16f32, V1, VPermMask);
16726   }
16727 
16728   // If we have AVX512F support, we can use VEXPAND.
16729   if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v16f32, Zeroable, Mask,
16730                                              V1, V2, DAG, Subtarget))
16731     return V;
16732 
16733   return lowerShuffleWithPERMV(DL, MVT::v16f32, Mask, V1, V2, Subtarget, DAG);
16734 }
16735 
16736 /// Handle lowering of 8-lane 64-bit integer shuffles.
lowerV8I64Shuffle(const SDLoc & DL,ArrayRef<int> Mask,const APInt & Zeroable,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)16737 static SDValue lowerV8I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16738                                  const APInt &Zeroable, SDValue V1, SDValue V2,
16739                                  const X86Subtarget &Subtarget,
16740                                  SelectionDAG &DAG) {
16741   assert(V1.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
16742   assert(V2.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
16743   assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
16744 
16745   // Try to use shift instructions if fast.
16746   if (Subtarget.preferLowerShuffleAsShift())
16747     if (SDValue Shift =
16748             lowerShuffleAsShift(DL, MVT::v8i64, V1, V2, Mask, Zeroable,
16749                                 Subtarget, DAG, /*BitwiseOnly*/ true))
16750       return Shift;
16751 
16752   if (V2.isUndef()) {
16753     // When the shuffle is mirrored between the 128-bit lanes of the unit, we
16754     // can use lower latency instructions that will operate on all four
16755     // 128-bit lanes.
16756     SmallVector<int, 2> Repeated128Mask;
16757     if (is128BitLaneRepeatedShuffleMask(MVT::v8i64, Mask, Repeated128Mask)) {
16758       SmallVector<int, 4> PSHUFDMask;
16759       narrowShuffleMaskElts(2, Repeated128Mask, PSHUFDMask);
16760       return DAG.getBitcast(
16761           MVT::v8i64,
16762           DAG.getNode(X86ISD::PSHUFD, DL, MVT::v16i32,
16763                       DAG.getBitcast(MVT::v16i32, V1),
16764                       getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
16765     }
16766 
16767     SmallVector<int, 4> Repeated256Mask;
16768     if (is256BitLaneRepeatedShuffleMask(MVT::v8i64, Mask, Repeated256Mask))
16769       return DAG.getNode(X86ISD::VPERMI, DL, MVT::v8i64, V1,
16770                          getV4X86ShuffleImm8ForMask(Repeated256Mask, DL, DAG));
16771   }
16772 
16773   if (SDValue Shuf128 = lowerV4X128Shuffle(DL, MVT::v8i64, Mask, Zeroable, V1,
16774                                            V2, Subtarget, DAG))
16775     return Shuf128;
16776 
16777   // Try to use shift instructions.
16778   if (SDValue Shift =
16779           lowerShuffleAsShift(DL, MVT::v8i64, V1, V2, Mask, Zeroable, Subtarget,
16780                               DAG, /*BitwiseOnly*/ false))
16781     return Shift;
16782 
16783   // Try to use VALIGN.
16784   if (SDValue Rotate = lowerShuffleAsVALIGN(DL, MVT::v8i64, V1, V2, Mask,
16785                                             Zeroable, Subtarget, DAG))
16786     return Rotate;
16787 
16788   // Try to use PALIGNR.
16789   if (Subtarget.hasBWI())
16790     if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i64, V1, V2, Mask,
16791                                                   Subtarget, DAG))
16792       return Rotate;
16793 
16794   if (SDValue Unpck = lowerShuffleWithUNPCK(DL, MVT::v8i64, Mask, V1, V2, DAG))
16795     return Unpck;
16796 
16797   // If we have AVX512F support, we can use VEXPAND.
16798   if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8i64, Zeroable, Mask, V1, V2,
16799                                        DAG, Subtarget))
16800     return V;
16801 
16802   if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8i64, V1, V2, Mask,
16803                                           Zeroable, Subtarget, DAG))
16804     return Blend;
16805 
16806   return lowerShuffleWithPERMV(DL, MVT::v8i64, Mask, V1, V2, Subtarget, DAG);
16807 }
16808 
16809 /// Handle lowering of 16-lane 32-bit integer shuffles.
lowerV16I32Shuffle(const SDLoc & DL,ArrayRef<int> Mask,const APInt & Zeroable,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)16810 static SDValue lowerV16I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16811                                   const APInt &Zeroable, SDValue V1, SDValue V2,
16812                                   const X86Subtarget &Subtarget,
16813                                   SelectionDAG &DAG) {
16814   assert(V1.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
16815   assert(V2.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
16816   assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
16817 
16818   int NumV2Elements = count_if(Mask, [](int M) { return M >= 16; });
16819 
16820   // Whenever we can lower this as a zext, that instruction is strictly faster
16821   // than any alternative. It also allows us to fold memory operands into the
16822   // shuffle in many cases.
16823   if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
16824           DL, MVT::v16i32, V1, V2, Mask, Zeroable, Subtarget, DAG))
16825     return ZExt;
16826 
16827   // Try to use shift instructions if fast.
16828   if (Subtarget.preferLowerShuffleAsShift()) {
16829     if (SDValue Shift =
16830             lowerShuffleAsShift(DL, MVT::v16i32, V1, V2, Mask, Zeroable,
16831                                 Subtarget, DAG, /*BitwiseOnly*/ true))
16832       return Shift;
16833     if (NumV2Elements == 0)
16834       if (SDValue Rotate = lowerShuffleAsBitRotate(DL, MVT::v16i32, V1, Mask,
16835                                                    Subtarget, DAG))
16836         return Rotate;
16837   }
16838 
16839   // If the shuffle mask is repeated in each 128-bit lane we can use more
16840   // efficient instructions that mirror the shuffles across the four 128-bit
16841   // lanes.
16842   SmallVector<int, 4> RepeatedMask;
16843   bool Is128BitLaneRepeatedShuffle =
16844       is128BitLaneRepeatedShuffleMask(MVT::v16i32, Mask, RepeatedMask);
16845   if (Is128BitLaneRepeatedShuffle) {
16846     assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
16847     if (V2.isUndef())
16848       return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v16i32, V1,
16849                          getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
16850 
16851     // Use dedicated unpack instructions for masks that match their pattern.
16852     if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i32, Mask, V1, V2, DAG))
16853       return V;
16854   }
16855 
16856   // Try to use shift instructions.
16857   if (SDValue Shift =
16858           lowerShuffleAsShift(DL, MVT::v16i32, V1, V2, Mask, Zeroable,
16859                               Subtarget, DAG, /*BitwiseOnly*/ false))
16860     return Shift;
16861 
16862   if (!Subtarget.preferLowerShuffleAsShift() && NumV2Elements != 0)
16863     if (SDValue Rotate =
16864             lowerShuffleAsBitRotate(DL, MVT::v16i32, V1, Mask, Subtarget, DAG))
16865       return Rotate;
16866 
16867   // Try to use VALIGN.
16868   if (SDValue Rotate = lowerShuffleAsVALIGN(DL, MVT::v16i32, V1, V2, Mask,
16869                                             Zeroable, Subtarget, DAG))
16870     return Rotate;
16871 
16872   // Try to use byte rotation instructions.
16873   if (Subtarget.hasBWI())
16874     if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v16i32, V1, V2, Mask,
16875                                                   Subtarget, DAG))
16876       return Rotate;
16877 
16878   // Assume that a single SHUFPS is faster than using a permv shuffle.
16879   // If some CPU is harmed by the domain switch, we can fix it in a later pass.
16880   if (Is128BitLaneRepeatedShuffle && isSingleSHUFPSMask(RepeatedMask)) {
16881     SDValue CastV1 = DAG.getBitcast(MVT::v16f32, V1);
16882     SDValue CastV2 = DAG.getBitcast(MVT::v16f32, V2);
16883     SDValue ShufPS = lowerShuffleWithSHUFPS(DL, MVT::v16f32, RepeatedMask,
16884                                             CastV1, CastV2, DAG);
16885     return DAG.getBitcast(MVT::v16i32, ShufPS);
16886   }
16887 
16888   // Try to create an in-lane repeating shuffle mask and then shuffle the
16889   // results into the target lanes.
16890   if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
16891           DL, MVT::v16i32, V1, V2, Mask, Subtarget, DAG))
16892     return V;
16893 
16894   // If we have AVX512F support, we can use VEXPAND.
16895   if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v16i32, Zeroable, Mask, V1, V2,
16896                                        DAG, Subtarget))
16897     return V;
16898 
16899   if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16i32, V1, V2, Mask,
16900                                           Zeroable, Subtarget, DAG))
16901     return Blend;
16902 
16903   return lowerShuffleWithPERMV(DL, MVT::v16i32, Mask, V1, V2, Subtarget, DAG);
16904 }
16905 
16906 /// Handle lowering of 32-lane 16-bit integer shuffles.
lowerV32I16Shuffle(const SDLoc & DL,ArrayRef<int> Mask,const APInt & Zeroable,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)16907 static SDValue lowerV32I16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16908                                   const APInt &Zeroable, SDValue V1, SDValue V2,
16909                                   const X86Subtarget &Subtarget,
16910                                   SelectionDAG &DAG) {
16911   assert(V1.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
16912   assert(V2.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
16913   assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
16914   assert(Subtarget.hasBWI() && "We can only lower v32i16 with AVX-512-BWI!");
16915 
16916   // Whenever we can lower this as a zext, that instruction is strictly faster
16917   // than any alternative. It also allows us to fold memory operands into the
16918   // shuffle in many cases.
16919   if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
16920           DL, MVT::v32i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
16921     return ZExt;
16922 
16923   // Use dedicated unpack instructions for masks that match their pattern.
16924   if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v32i16, Mask, V1, V2, DAG))
16925     return V;
16926 
16927   // Use dedicated pack instructions for masks that match their pattern.
16928   if (SDValue V =
16929           lowerShuffleWithPACK(DL, MVT::v32i16, Mask, V1, V2, DAG, Subtarget))
16930     return V;
16931 
16932   // Try to use shift instructions.
16933   if (SDValue Shift =
16934           lowerShuffleAsShift(DL, MVT::v32i16, V1, V2, Mask, Zeroable,
16935                               Subtarget, DAG, /*BitwiseOnly*/ false))
16936     return Shift;
16937 
16938   // Try to use byte rotation instructions.
16939   if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v32i16, V1, V2, Mask,
16940                                                 Subtarget, DAG))
16941     return Rotate;
16942 
16943   if (V2.isUndef()) {
16944     // Try to use bit rotation instructions.
16945     if (SDValue Rotate =
16946             lowerShuffleAsBitRotate(DL, MVT::v32i16, V1, Mask, Subtarget, DAG))
16947       return Rotate;
16948 
16949     SmallVector<int, 8> RepeatedMask;
16950     if (is128BitLaneRepeatedShuffleMask(MVT::v32i16, Mask, RepeatedMask)) {
16951       // As this is a single-input shuffle, the repeated mask should be
16952       // a strictly valid v8i16 mask that we can pass through to the v8i16
16953       // lowering to handle even the v32 case.
16954       return lowerV8I16GeneralSingleInputShuffle(DL, MVT::v32i16, V1,
16955                                                  RepeatedMask, Subtarget, DAG);
16956     }
16957   }
16958 
16959   if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v32i16, V1, V2, Mask,
16960                                           Zeroable, Subtarget, DAG))
16961     return Blend;
16962 
16963   if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v32i16, Mask, V1, V2,
16964                                               Zeroable, Subtarget, DAG))
16965     return PSHUFB;
16966 
16967   return lowerShuffleWithPERMV(DL, MVT::v32i16, Mask, V1, V2, Subtarget, DAG);
16968 }
16969 
16970 /// Handle lowering of 64-lane 8-bit integer shuffles.
lowerV64I8Shuffle(const SDLoc & DL,ArrayRef<int> Mask,const APInt & Zeroable,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)16971 static SDValue lowerV64I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16972                                  const APInt &Zeroable, SDValue V1, SDValue V2,
16973                                  const X86Subtarget &Subtarget,
16974                                  SelectionDAG &DAG) {
16975   assert(V1.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
16976   assert(V2.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
16977   assert(Mask.size() == 64 && "Unexpected mask size for v64 shuffle!");
16978   assert(Subtarget.hasBWI() && "We can only lower v64i8 with AVX-512-BWI!");
16979 
16980   // Whenever we can lower this as a zext, that instruction is strictly faster
16981   // than any alternative. It also allows us to fold memory operands into the
16982   // shuffle in many cases.
16983   if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
16984           DL, MVT::v64i8, V1, V2, Mask, Zeroable, Subtarget, DAG))
16985     return ZExt;
16986 
16987   // Use dedicated unpack instructions for masks that match their pattern.
16988   if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v64i8, Mask, V1, V2, DAG))
16989     return V;
16990 
16991   // Use dedicated pack instructions for masks that match their pattern.
16992   if (SDValue V = lowerShuffleWithPACK(DL, MVT::v64i8, Mask, V1, V2, DAG,
16993                                        Subtarget))
16994     return V;
16995 
16996   // Try to use shift instructions.
16997   if (SDValue Shift =
16998           lowerShuffleAsShift(DL, MVT::v64i8, V1, V2, Mask, Zeroable, Subtarget,
16999                               DAG, /*BitwiseOnly*/ false))
17000     return Shift;
17001 
17002   // Try to use byte rotation instructions.
17003   if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v64i8, V1, V2, Mask,
17004                                                 Subtarget, DAG))
17005     return Rotate;
17006 
17007   // Try to use bit rotation instructions.
17008   if (V2.isUndef())
17009     if (SDValue Rotate =
17010             lowerShuffleAsBitRotate(DL, MVT::v64i8, V1, Mask, Subtarget, DAG))
17011       return Rotate;
17012 
17013   // Lower as AND if possible.
17014   if (SDValue Masked = lowerShuffleAsBitMask(DL, MVT::v64i8, V1, V2, Mask,
17015                                              Zeroable, Subtarget, DAG))
17016     return Masked;
17017 
17018   if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v64i8, Mask, V1, V2,
17019                                               Zeroable, Subtarget, DAG))
17020     return PSHUFB;
17021 
17022   // Try to create an in-lane repeating shuffle mask and then shuffle the
17023   // results into the target lanes.
17024   if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
17025           DL, MVT::v64i8, V1, V2, Mask, Subtarget, DAG))
17026     return V;
17027 
17028   if (SDValue Result = lowerShuffleAsLanePermuteAndPermute(
17029           DL, MVT::v64i8, V1, V2, Mask, DAG, Subtarget))
17030     return Result;
17031 
17032   if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v64i8, V1, V2, Mask,
17033                                           Zeroable, Subtarget, DAG))
17034     return Blend;
17035 
17036   if (!is128BitLaneCrossingShuffleMask(MVT::v64i8, Mask)) {
17037     // Use PALIGNR+Permute if possible - permute might become PSHUFB but the
17038     // PALIGNR will be cheaper than the second PSHUFB+OR.
17039     if (SDValue V = lowerShuffleAsByteRotateAndPermute(DL, MVT::v64i8, V1, V2,
17040                                                        Mask, Subtarget, DAG))
17041       return V;
17042 
17043     // If we can't directly blend but can use PSHUFB, that will be better as it
17044     // can both shuffle and set up the inefficient blend.
17045     bool V1InUse, V2InUse;
17046     return lowerShuffleAsBlendOfPSHUFBs(DL, MVT::v64i8, V1, V2, Mask, Zeroable,
17047                                         DAG, V1InUse, V2InUse);
17048   }
17049 
17050   // Try to simplify this by merging 128-bit lanes to enable a lane-based
17051   // shuffle.
17052   if (!V2.isUndef())
17053     if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
17054             DL, MVT::v64i8, V1, V2, Mask, Subtarget, DAG))
17055       return Result;
17056 
17057   // VBMI can use VPERMV/VPERMV3 byte shuffles.
17058   if (Subtarget.hasVBMI())
17059     return lowerShuffleWithPERMV(DL, MVT::v64i8, Mask, V1, V2, Subtarget, DAG);
17060 
17061   return splitAndLowerShuffle(DL, MVT::v64i8, V1, V2, Mask, DAG, /*SimpleOnly*/ false);
17062 }
17063 
17064 /// High-level routine to lower various 512-bit x86 vector shuffles.
17065 ///
17066 /// This routine either breaks down the specific type of a 512-bit x86 vector
17067 /// shuffle or splits it into two 256-bit shuffles and fuses the results back
17068 /// together based on the available instructions.
lower512BitShuffle(const SDLoc & DL,ArrayRef<int> Mask,MVT VT,SDValue V1,SDValue V2,const APInt & Zeroable,const X86Subtarget & Subtarget,SelectionDAG & DAG)17069 static SDValue lower512BitShuffle(const SDLoc &DL, ArrayRef<int> Mask,
17070                                   MVT VT, SDValue V1, SDValue V2,
17071                                   const APInt &Zeroable,
17072                                   const X86Subtarget &Subtarget,
17073                                   SelectionDAG &DAG) {
17074   assert(Subtarget.hasAVX512() &&
17075          "Cannot lower 512-bit vectors w/ basic ISA!");
17076 
17077   // If we have a single input to the zero element, insert that into V1 if we
17078   // can do so cheaply.
17079   int NumElts = Mask.size();
17080   int NumV2Elements = count_if(Mask, [NumElts](int M) { return M >= NumElts; });
17081 
17082   if (NumV2Elements == 1 && Mask[0] >= NumElts)
17083     if (SDValue Insertion = lowerShuffleAsElementInsertion(
17084             DL, VT, V1, V2, Mask, Zeroable, Subtarget, DAG))
17085       return Insertion;
17086 
17087   // Handle special cases where the lower or upper half is UNDEF.
17088   if (SDValue V =
17089           lowerShuffleWithUndefHalf(DL, VT, V1, V2, Mask, Subtarget, DAG))
17090     return V;
17091 
17092   // Check for being able to broadcast a single element.
17093   if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, VT, V1, V2, Mask,
17094                                                   Subtarget, DAG))
17095     return Broadcast;
17096 
17097   if ((VT == MVT::v32i16 || VT == MVT::v64i8) && !Subtarget.hasBWI()) {
17098     // Try using bit ops for masking and blending before falling back to
17099     // splitting.
17100     if (SDValue V = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable,
17101                                           Subtarget, DAG))
17102       return V;
17103     if (SDValue V = lowerShuffleAsBitBlend(DL, VT, V1, V2, Mask, DAG))
17104       return V;
17105 
17106     return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG, /*SimpleOnly*/ false);
17107   }
17108 
17109   if (VT == MVT::v32f16 || VT == MVT::v32bf16) {
17110     if (!Subtarget.hasBWI())
17111       return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG,
17112                                   /*SimpleOnly*/ false);
17113 
17114     V1 = DAG.getBitcast(MVT::v32i16, V1);
17115     V2 = DAG.getBitcast(MVT::v32i16, V2);
17116     return DAG.getBitcast(VT,
17117                           DAG.getVectorShuffle(MVT::v32i16, DL, V1, V2, Mask));
17118   }
17119 
17120   // Dispatch to each element type for lowering. If we don't have support for
17121   // specific element type shuffles at 512 bits, immediately split them and
17122   // lower them. Each lowering routine of a given type is allowed to assume that
17123   // the requisite ISA extensions for that element type are available.
17124   switch (VT.SimpleTy) {
17125   case MVT::v8f64:
17126     return lowerV8F64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
17127   case MVT::v16f32:
17128     return lowerV16F32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
17129   case MVT::v8i64:
17130     return lowerV8I64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
17131   case MVT::v16i32:
17132     return lowerV16I32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
17133   case MVT::v32i16:
17134     return lowerV32I16Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
17135   case MVT::v64i8:
17136     return lowerV64I8Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
17137 
17138   default:
17139     llvm_unreachable("Not a valid 512-bit x86 vector type!");
17140   }
17141 }
17142 
lower1BitShuffleAsKSHIFTR(const SDLoc & DL,ArrayRef<int> Mask,MVT VT,SDValue V1,SDValue V2,const X86Subtarget & Subtarget,SelectionDAG & DAG)17143 static SDValue lower1BitShuffleAsKSHIFTR(const SDLoc &DL, ArrayRef<int> Mask,
17144                                          MVT VT, SDValue V1, SDValue V2,
17145                                          const X86Subtarget &Subtarget,
17146                                          SelectionDAG &DAG) {
17147   // Shuffle should be unary.
17148   if (!V2.isUndef())
17149     return SDValue();
17150 
17151   int ShiftAmt = -1;
17152   int NumElts = Mask.size();
17153   for (int i = 0; i != NumElts; ++i) {
17154     int M = Mask[i];
17155     assert((M == SM_SentinelUndef || (0 <= M && M < NumElts)) &&
17156            "Unexpected mask index.");
17157     if (M < 0)
17158       continue;
17159 
17160     // The first non-undef element determines our shift amount.
17161     if (ShiftAmt < 0) {
17162       ShiftAmt = M - i;
17163       // Need to be shifting right.
17164       if (ShiftAmt <= 0)
17165         return SDValue();
17166     }
17167     // All non-undef elements must shift by the same amount.
17168     if (ShiftAmt != M - i)
17169       return SDValue();
17170   }
17171   assert(ShiftAmt >= 0 && "All undef?");
17172 
17173   // Great we found a shift right.
17174   SDValue Res = widenMaskVector(V1, false, Subtarget, DAG, DL);
17175   Res = DAG.getNode(X86ISD::KSHIFTR, DL, Res.getValueType(), Res,
17176                     DAG.getTargetConstant(ShiftAmt, DL, MVT::i8));
17177   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
17178                      DAG.getIntPtrConstant(0, DL));
17179 }
17180 
17181 // Determine if this shuffle can be implemented with a KSHIFT instruction.
17182 // Returns the shift amount if possible or -1 if not. This is a simplified
17183 // version of matchShuffleAsShift.
match1BitShuffleAsKSHIFT(unsigned & Opcode,ArrayRef<int> Mask,int MaskOffset,const APInt & Zeroable)17184 static int match1BitShuffleAsKSHIFT(unsigned &Opcode, ArrayRef<int> Mask,
17185                                     int MaskOffset, const APInt &Zeroable) {
17186   int Size = Mask.size();
17187 
17188   auto CheckZeros = [&](int Shift, bool Left) {
17189     for (int j = 0; j < Shift; ++j)
17190       if (!Zeroable[j + (Left ? 0 : (Size - Shift))])
17191         return false;
17192 
17193     return true;
17194   };
17195 
17196   auto MatchShift = [&](int Shift, bool Left) {
17197     unsigned Pos = Left ? Shift : 0;
17198     unsigned Low = Left ? 0 : Shift;
17199     unsigned Len = Size - Shift;
17200     return isSequentialOrUndefInRange(Mask, Pos, Len, Low + MaskOffset);
17201   };
17202 
17203   for (int Shift = 1; Shift != Size; ++Shift)
17204     for (bool Left : {true, false})
17205       if (CheckZeros(Shift, Left) && MatchShift(Shift, Left)) {
17206         Opcode = Left ? X86ISD::KSHIFTL : X86ISD::KSHIFTR;
17207         return Shift;
17208       }
17209 
17210   return -1;
17211 }
17212 
17213 
17214 // Lower vXi1 vector shuffles.
17215 // There is no a dedicated instruction on AVX-512 that shuffles the masks.
17216 // The only way to shuffle bits is to sign-extend the mask vector to SIMD
17217 // vector, shuffle and then truncate it back.
lower1BitShuffle(const SDLoc & DL,ArrayRef<int> Mask,MVT VT,SDValue V1,SDValue V2,const APInt & Zeroable,const X86Subtarget & Subtarget,SelectionDAG & DAG)17218 static SDValue lower1BitShuffle(const SDLoc &DL, ArrayRef<int> Mask,
17219                                 MVT VT, SDValue V1, SDValue V2,
17220                                 const APInt &Zeroable,
17221                                 const X86Subtarget &Subtarget,
17222                                 SelectionDAG &DAG) {
17223   assert(Subtarget.hasAVX512() &&
17224          "Cannot lower 512-bit vectors w/o basic ISA!");
17225 
17226   int NumElts = Mask.size();
17227   int NumV2Elements = count_if(Mask, [NumElts](int M) { return M >= NumElts; });
17228 
17229   // Try to recognize shuffles that are just padding a subvector with zeros.
17230   int SubvecElts = 0;
17231   int Src = -1;
17232   for (int i = 0; i != NumElts; ++i) {
17233     if (Mask[i] >= 0) {
17234       // Grab the source from the first valid mask. All subsequent elements need
17235       // to use this same source.
17236       if (Src < 0)
17237         Src = Mask[i] / NumElts;
17238       if (Src != (Mask[i] / NumElts) || (Mask[i] % NumElts) != i)
17239         break;
17240     }
17241 
17242     ++SubvecElts;
17243   }
17244   assert(SubvecElts != NumElts && "Identity shuffle?");
17245 
17246   // Clip to a power 2.
17247   SubvecElts = llvm::bit_floor<uint32_t>(SubvecElts);
17248 
17249   // Make sure the number of zeroable bits in the top at least covers the bits
17250   // not covered by the subvector.
17251   if ((int)Zeroable.countl_one() >= (NumElts - SubvecElts)) {
17252     assert(Src >= 0 && "Expected a source!");
17253     MVT ExtractVT = MVT::getVectorVT(MVT::i1, SubvecElts);
17254     SDValue Extract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtractVT,
17255                                   Src == 0 ? V1 : V2,
17256                                   DAG.getIntPtrConstant(0, DL));
17257     return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
17258                        DAG.getConstant(0, DL, VT),
17259                        Extract, DAG.getIntPtrConstant(0, DL));
17260   }
17261 
17262   // Try a simple shift right with undef elements. Later we'll try with zeros.
17263   if (SDValue Shift = lower1BitShuffleAsKSHIFTR(DL, Mask, VT, V1, V2, Subtarget,
17264                                                 DAG))
17265     return Shift;
17266 
17267   // Try to match KSHIFTs.
17268   unsigned Offset = 0;
17269   for (SDValue V : { V1, V2 }) {
17270     unsigned Opcode;
17271     int ShiftAmt = match1BitShuffleAsKSHIFT(Opcode, Mask, Offset, Zeroable);
17272     if (ShiftAmt >= 0) {
17273       SDValue Res = widenMaskVector(V, false, Subtarget, DAG, DL);
17274       MVT WideVT = Res.getSimpleValueType();
17275       // Widened right shifts need two shifts to ensure we shift in zeroes.
17276       if (Opcode == X86ISD::KSHIFTR && WideVT != VT) {
17277         int WideElts = WideVT.getVectorNumElements();
17278         // Shift left to put the original vector in the MSBs of the new size.
17279         Res = DAG.getNode(X86ISD::KSHIFTL, DL, WideVT, Res,
17280                           DAG.getTargetConstant(WideElts - NumElts, DL, MVT::i8));
17281         // Increase the shift amount to account for the left shift.
17282         ShiftAmt += WideElts - NumElts;
17283       }
17284 
17285       Res = DAG.getNode(Opcode, DL, WideVT, Res,
17286                         DAG.getTargetConstant(ShiftAmt, DL, MVT::i8));
17287       return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
17288                          DAG.getIntPtrConstant(0, DL));
17289     }
17290     Offset += NumElts; // Increment for next iteration.
17291   }
17292 
17293   // If we're performing an unary shuffle on a SETCC result, try to shuffle the
17294   // ops instead.
17295   // TODO: What other unary shuffles would benefit from this?
17296   if (NumV2Elements == 0 && V1.getOpcode() == ISD::SETCC && V1->hasOneUse()) {
17297     SDValue Op0 = V1.getOperand(0);
17298     SDValue Op1 = V1.getOperand(1);
17299     ISD::CondCode CC = cast<CondCodeSDNode>(V1.getOperand(2))->get();
17300     EVT OpVT = Op0.getValueType();
17301     if (OpVT.getScalarSizeInBits() >= 32 || isBroadcastShuffleMask(Mask))
17302       return DAG.getSetCC(
17303           DL, VT, DAG.getVectorShuffle(OpVT, DL, Op0, DAG.getUNDEF(OpVT), Mask),
17304           DAG.getVectorShuffle(OpVT, DL, Op1, DAG.getUNDEF(OpVT), Mask), CC);
17305   }
17306 
17307   MVT ExtVT;
17308   switch (VT.SimpleTy) {
17309   default:
17310     llvm_unreachable("Expected a vector of i1 elements");
17311   case MVT::v2i1:
17312     ExtVT = MVT::v2i64;
17313     break;
17314   case MVT::v4i1:
17315     ExtVT = MVT::v4i32;
17316     break;
17317   case MVT::v8i1:
17318     // Take 512-bit type, more shuffles on KNL. If we have VLX use a 256-bit
17319     // shuffle.
17320     ExtVT = Subtarget.hasVLX() ? MVT::v8i32 : MVT::v8i64;
17321     break;
17322   case MVT::v16i1:
17323     // Take 512-bit type, unless we are avoiding 512-bit types and have the
17324     // 256-bit operation available.
17325     ExtVT = Subtarget.canExtendTo512DQ() ? MVT::v16i32 : MVT::v16i16;
17326     break;
17327   case MVT::v32i1:
17328     // Take 512-bit type, unless we are avoiding 512-bit types and have the
17329     // 256-bit operation available.
17330     assert(Subtarget.hasBWI() && "Expected AVX512BW support");
17331     ExtVT = Subtarget.canExtendTo512BW() ? MVT::v32i16 : MVT::v32i8;
17332     break;
17333   case MVT::v64i1:
17334     // Fall back to scalarization. FIXME: We can do better if the shuffle
17335     // can be partitioned cleanly.
17336     if (!Subtarget.useBWIRegs())
17337       return SDValue();
17338     ExtVT = MVT::v64i8;
17339     break;
17340   }
17341 
17342   V1 = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, V1);
17343   V2 = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, V2);
17344 
17345   SDValue Shuffle = DAG.getVectorShuffle(ExtVT, DL, V1, V2, Mask);
17346   // i1 was sign extended we can use X86ISD::CVT2MASK.
17347   int NumElems = VT.getVectorNumElements();
17348   if ((Subtarget.hasBWI() && (NumElems >= 32)) ||
17349       (Subtarget.hasDQI() && (NumElems < 32)))
17350     return DAG.getSetCC(DL, VT, DAG.getConstant(0, DL, ExtVT),
17351                        Shuffle, ISD::SETGT);
17352 
17353   return DAG.getNode(ISD::TRUNCATE, DL, VT, Shuffle);
17354 }
17355 
17356 /// Helper function that returns true if the shuffle mask should be
17357 /// commuted to improve canonicalization.
canonicalizeShuffleMaskWithCommute(ArrayRef<int> Mask)17358 static bool canonicalizeShuffleMaskWithCommute(ArrayRef<int> Mask) {
17359   int NumElements = Mask.size();
17360 
17361   int NumV1Elements = 0, NumV2Elements = 0;
17362   for (int M : Mask)
17363     if (M < 0)
17364       continue;
17365     else if (M < NumElements)
17366       ++NumV1Elements;
17367     else
17368       ++NumV2Elements;
17369 
17370   // Commute the shuffle as needed such that more elements come from V1 than
17371   // V2. This allows us to match the shuffle pattern strictly on how many
17372   // elements come from V1 without handling the symmetric cases.
17373   if (NumV2Elements > NumV1Elements)
17374     return true;
17375 
17376   assert(NumV1Elements > 0 && "No V1 indices");
17377 
17378   if (NumV2Elements == 0)
17379     return false;
17380 
17381   // When the number of V1 and V2 elements are the same, try to minimize the
17382   // number of uses of V2 in the low half of the vector. When that is tied,
17383   // ensure that the sum of indices for V1 is equal to or lower than the sum
17384   // indices for V2. When those are equal, try to ensure that the number of odd
17385   // indices for V1 is lower than the number of odd indices for V2.
17386   if (NumV1Elements == NumV2Elements) {
17387     int LowV1Elements = 0, LowV2Elements = 0;
17388     for (int M : Mask.slice(0, NumElements / 2))
17389       if (M >= NumElements)
17390         ++LowV2Elements;
17391       else if (M >= 0)
17392         ++LowV1Elements;
17393     if (LowV2Elements > LowV1Elements)
17394       return true;
17395     if (LowV2Elements == LowV1Elements) {
17396       int SumV1Indices = 0, SumV2Indices = 0;
17397       for (int i = 0, Size = Mask.size(); i < Size; ++i)
17398         if (Mask[i] >= NumElements)
17399           SumV2Indices += i;
17400         else if (Mask[i] >= 0)
17401           SumV1Indices += i;
17402       if (SumV2Indices < SumV1Indices)
17403         return true;
17404       if (SumV2Indices == SumV1Indices) {
17405         int NumV1OddIndices = 0, NumV2OddIndices = 0;
17406         for (int i = 0, Size = Mask.size(); i < Size; ++i)
17407           if (Mask[i] >= NumElements)
17408             NumV2OddIndices += i % 2;
17409           else if (Mask[i] >= 0)
17410             NumV1OddIndices += i % 2;
17411         if (NumV2OddIndices < NumV1OddIndices)
17412           return true;
17413       }
17414     }
17415   }
17416 
17417   return false;
17418 }
17419 
canCombineAsMaskOperation(SDValue V,const X86Subtarget & Subtarget)17420 static bool canCombineAsMaskOperation(SDValue V,
17421                                       const X86Subtarget &Subtarget) {
17422   if (!Subtarget.hasAVX512())
17423     return false;
17424 
17425   if (!V.getValueType().isSimple())
17426     return false;
17427 
17428   MVT VT = V.getSimpleValueType().getScalarType();
17429   if ((VT == MVT::i16 || VT == MVT::i8) && !Subtarget.hasBWI())
17430     return false;
17431 
17432   // If vec width < 512, widen i8/i16 even with BWI as blendd/blendps/blendpd
17433   // are preferable to blendw/blendvb/masked-mov.
17434   if ((VT == MVT::i16 || VT == MVT::i8) &&
17435       V.getSimpleValueType().getSizeInBits() < 512)
17436     return false;
17437 
17438   auto HasMaskOperation = [&](SDValue V) {
17439     // TODO: Currently we only check limited opcode. We probably extend
17440     // it to all binary operation by checking TLI.isBinOp().
17441     switch (V->getOpcode()) {
17442     default:
17443       return false;
17444     case ISD::ADD:
17445     case ISD::SUB:
17446     case ISD::AND:
17447     case ISD::XOR:
17448     case ISD::OR:
17449     case ISD::SMAX:
17450     case ISD::SMIN:
17451     case ISD::UMAX:
17452     case ISD::UMIN:
17453     case ISD::ABS:
17454     case ISD::SHL:
17455     case ISD::SRL:
17456     case ISD::SRA:
17457     case ISD::MUL:
17458       break;
17459     }
17460     if (!V->hasOneUse())
17461       return false;
17462 
17463     return true;
17464   };
17465 
17466   if (HasMaskOperation(V))
17467     return true;
17468 
17469   return false;
17470 }
17471 
17472 // Forward declaration.
17473 static SDValue canonicalizeShuffleMaskWithHorizOp(
17474     MutableArrayRef<SDValue> Ops, MutableArrayRef<int> Mask,
17475     unsigned RootSizeInBits, const SDLoc &DL, SelectionDAG &DAG,
17476     const X86Subtarget &Subtarget);
17477 
17478     /// Top-level lowering for x86 vector shuffles.
17479 ///
17480 /// This handles decomposition, canonicalization, and lowering of all x86
17481 /// vector shuffles. Most of the specific lowering strategies are encapsulated
17482 /// above in helper routines. The canonicalization attempts to widen shuffles
17483 /// to involve fewer lanes of wider elements, consolidate symmetric patterns
17484 /// s.t. only one of the two inputs needs to be tested, etc.
lowerVECTOR_SHUFFLE(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)17485 static SDValue lowerVECTOR_SHUFFLE(SDValue Op, const X86Subtarget &Subtarget,
17486                                    SelectionDAG &DAG) {
17487   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
17488   ArrayRef<int> OrigMask = SVOp->getMask();
17489   SDValue V1 = Op.getOperand(0);
17490   SDValue V2 = Op.getOperand(1);
17491   MVT VT = Op.getSimpleValueType();
17492   int NumElements = VT.getVectorNumElements();
17493   SDLoc DL(Op);
17494   bool Is1BitVector = (VT.getVectorElementType() == MVT::i1);
17495 
17496   assert((VT.getSizeInBits() != 64 || Is1BitVector) &&
17497          "Can't lower MMX shuffles");
17498 
17499   bool V1IsUndef = V1.isUndef();
17500   bool V2IsUndef = V2.isUndef();
17501   if (V1IsUndef && V2IsUndef)
17502     return DAG.getUNDEF(VT);
17503 
17504   // When we create a shuffle node we put the UNDEF node to second operand,
17505   // but in some cases the first operand may be transformed to UNDEF.
17506   // In this case we should just commute the node.
17507   if (V1IsUndef)
17508     return DAG.getCommutedVectorShuffle(*SVOp);
17509 
17510   // Check for non-undef masks pointing at an undef vector and make the masks
17511   // undef as well. This makes it easier to match the shuffle based solely on
17512   // the mask.
17513   if (V2IsUndef &&
17514       any_of(OrigMask, [NumElements](int M) { return M >= NumElements; })) {
17515     SmallVector<int, 8> NewMask(OrigMask);
17516     for (int &M : NewMask)
17517       if (M >= NumElements)
17518         M = -1;
17519     return DAG.getVectorShuffle(VT, DL, V1, V2, NewMask);
17520   }
17521 
17522   // Check for illegal shuffle mask element index values.
17523   int MaskUpperLimit = OrigMask.size() * (V2IsUndef ? 1 : 2);
17524   (void)MaskUpperLimit;
17525   assert(llvm::all_of(OrigMask,
17526                       [&](int M) { return -1 <= M && M < MaskUpperLimit; }) &&
17527          "Out of bounds shuffle index");
17528 
17529   // We actually see shuffles that are entirely re-arrangements of a set of
17530   // zero inputs. This mostly happens while decomposing complex shuffles into
17531   // simple ones. Directly lower these as a buildvector of zeros.
17532   APInt KnownUndef, KnownZero;
17533   computeZeroableShuffleElements(OrigMask, V1, V2, KnownUndef, KnownZero);
17534 
17535   APInt Zeroable = KnownUndef | KnownZero;
17536   if (Zeroable.isAllOnes())
17537     return getZeroVector(VT, Subtarget, DAG, DL);
17538 
17539   bool V2IsZero = !V2IsUndef && ISD::isBuildVectorAllZeros(V2.getNode());
17540 
17541   // Try to collapse shuffles into using a vector type with fewer elements but
17542   // wider element types. We cap this to not form integers or floating point
17543   // elements wider than 64 bits. It does not seem beneficial to form i128
17544   // integers to handle flipping the low and high halves of AVX 256-bit vectors.
17545   SmallVector<int, 16> WidenedMask;
17546   if (VT.getScalarSizeInBits() < 64 && !Is1BitVector &&
17547       !canCombineAsMaskOperation(V1, Subtarget) &&
17548       !canCombineAsMaskOperation(V2, Subtarget) &&
17549       canWidenShuffleElements(OrigMask, Zeroable, V2IsZero, WidenedMask)) {
17550     // Shuffle mask widening should not interfere with a broadcast opportunity
17551     // by obfuscating the operands with bitcasts.
17552     // TODO: Avoid lowering directly from this top-level function: make this
17553     // a query (canLowerAsBroadcast) and defer lowering to the type-based calls.
17554     if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, VT, V1, V2, OrigMask,
17555                                                     Subtarget, DAG))
17556       return Broadcast;
17557 
17558     MVT NewEltVT = VT.isFloatingPoint()
17559                        ? MVT::getFloatingPointVT(VT.getScalarSizeInBits() * 2)
17560                        : MVT::getIntegerVT(VT.getScalarSizeInBits() * 2);
17561     int NewNumElts = NumElements / 2;
17562     MVT NewVT = MVT::getVectorVT(NewEltVT, NewNumElts);
17563     // Make sure that the new vector type is legal. For example, v2f64 isn't
17564     // legal on SSE1.
17565     if (DAG.getTargetLoweringInfo().isTypeLegal(NewVT)) {
17566       if (V2IsZero) {
17567         // Modify the new Mask to take all zeros from the all-zero vector.
17568         // Choose indices that are blend-friendly.
17569         bool UsedZeroVector = false;
17570         assert(is_contained(WidenedMask, SM_SentinelZero) &&
17571                "V2's non-undef elements are used?!");
17572         for (int i = 0; i != NewNumElts; ++i)
17573           if (WidenedMask[i] == SM_SentinelZero) {
17574             WidenedMask[i] = i + NewNumElts;
17575             UsedZeroVector = true;
17576           }
17577         // Ensure all elements of V2 are zero - isBuildVectorAllZeros permits
17578         // some elements to be undef.
17579         if (UsedZeroVector)
17580           V2 = getZeroVector(NewVT, Subtarget, DAG, DL);
17581       }
17582       V1 = DAG.getBitcast(NewVT, V1);
17583       V2 = DAG.getBitcast(NewVT, V2);
17584       return DAG.getBitcast(
17585           VT, DAG.getVectorShuffle(NewVT, DL, V1, V2, WidenedMask));
17586     }
17587   }
17588 
17589   SmallVector<SDValue> Ops = {V1, V2};
17590   SmallVector<int> Mask(OrigMask);
17591 
17592   // Canonicalize the shuffle with any horizontal ops inputs.
17593   // NOTE: This may update Ops and Mask.
17594   if (SDValue HOp = canonicalizeShuffleMaskWithHorizOp(
17595           Ops, Mask, VT.getSizeInBits(), DL, DAG, Subtarget))
17596     return DAG.getBitcast(VT, HOp);
17597 
17598   V1 = DAG.getBitcast(VT, Ops[0]);
17599   V2 = DAG.getBitcast(VT, Ops[1]);
17600   assert(NumElements == (int)Mask.size() &&
17601          "canonicalizeShuffleMaskWithHorizOp "
17602          "shouldn't alter the shuffle mask size");
17603 
17604   // Commute the shuffle if it will improve canonicalization.
17605   if (canonicalizeShuffleMaskWithCommute(Mask)) {
17606     ShuffleVectorSDNode::commuteMask(Mask);
17607     std::swap(V1, V2);
17608   }
17609 
17610   // For each vector width, delegate to a specialized lowering routine.
17611   if (VT.is128BitVector())
17612     return lower128BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);
17613 
17614   if (VT.is256BitVector())
17615     return lower256BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);
17616 
17617   if (VT.is512BitVector())
17618     return lower512BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);
17619 
17620   if (Is1BitVector)
17621     return lower1BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);
17622 
17623   llvm_unreachable("Unimplemented!");
17624 }
17625 
17626 /// Try to lower a VSELECT instruction to a vector shuffle.
lowerVSELECTtoVectorShuffle(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)17627 static SDValue lowerVSELECTtoVectorShuffle(SDValue Op,
17628                                            const X86Subtarget &Subtarget,
17629                                            SelectionDAG &DAG) {
17630   SDValue Cond = Op.getOperand(0);
17631   SDValue LHS = Op.getOperand(1);
17632   SDValue RHS = Op.getOperand(2);
17633   MVT VT = Op.getSimpleValueType();
17634 
17635   // Only non-legal VSELECTs reach this lowering, convert those into generic
17636   // shuffles and re-use the shuffle lowering path for blends.
17637   if (ISD::isBuildVectorOfConstantSDNodes(Cond.getNode())) {
17638     SmallVector<int, 32> Mask;
17639     if (createShuffleMaskFromVSELECT(Mask, Cond))
17640       return DAG.getVectorShuffle(VT, SDLoc(Op), LHS, RHS, Mask);
17641   }
17642 
17643   return SDValue();
17644 }
17645 
LowerVSELECT(SDValue Op,SelectionDAG & DAG) const17646 SDValue X86TargetLowering::LowerVSELECT(SDValue Op, SelectionDAG &DAG) const {
17647   SDValue Cond = Op.getOperand(0);
17648   SDValue LHS = Op.getOperand(1);
17649   SDValue RHS = Op.getOperand(2);
17650 
17651   SDLoc dl(Op);
17652   MVT VT = Op.getSimpleValueType();
17653   if (isSoftF16(VT, Subtarget)) {
17654     MVT NVT = VT.changeVectorElementTypeToInteger();
17655     return DAG.getBitcast(VT, DAG.getNode(ISD::VSELECT, dl, NVT, Cond,
17656                                           DAG.getBitcast(NVT, LHS),
17657                                           DAG.getBitcast(NVT, RHS)));
17658   }
17659 
17660   // A vselect where all conditions and data are constants can be optimized into
17661   // a single vector load by SelectionDAGLegalize::ExpandBUILD_VECTOR().
17662   if (ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()) &&
17663       ISD::isBuildVectorOfConstantSDNodes(LHS.getNode()) &&
17664       ISD::isBuildVectorOfConstantSDNodes(RHS.getNode()))
17665     return SDValue();
17666 
17667   // Try to lower this to a blend-style vector shuffle. This can handle all
17668   // constant condition cases.
17669   if (SDValue BlendOp = lowerVSELECTtoVectorShuffle(Op, Subtarget, DAG))
17670     return BlendOp;
17671 
17672   // If this VSELECT has a vector if i1 as a mask, it will be directly matched
17673   // with patterns on the mask registers on AVX-512.
17674   MVT CondVT = Cond.getSimpleValueType();
17675   unsigned CondEltSize = Cond.getScalarValueSizeInBits();
17676   if (CondEltSize == 1)
17677     return Op;
17678 
17679   // Variable blends are only legal from SSE4.1 onward.
17680   if (!Subtarget.hasSSE41())
17681     return SDValue();
17682 
17683   unsigned EltSize = VT.getScalarSizeInBits();
17684   unsigned NumElts = VT.getVectorNumElements();
17685 
17686   // Expand v32i16/v64i8 without BWI.
17687   if ((VT == MVT::v32i16 || VT == MVT::v64i8) && !Subtarget.hasBWI())
17688     return SDValue();
17689 
17690   // If the VSELECT is on a 512-bit type, we have to convert a non-i1 condition
17691   // into an i1 condition so that we can use the mask-based 512-bit blend
17692   // instructions.
17693   if (VT.getSizeInBits() == 512) {
17694     // Build a mask by testing the condition against zero.
17695     MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
17696     SDValue Mask = DAG.getSetCC(dl, MaskVT, Cond,
17697                                 DAG.getConstant(0, dl, CondVT),
17698                                 ISD::SETNE);
17699     // Now return a new VSELECT using the mask.
17700     return DAG.getSelect(dl, VT, Mask, LHS, RHS);
17701   }
17702 
17703   // SEXT/TRUNC cases where the mask doesn't match the destination size.
17704   if (CondEltSize != EltSize) {
17705     // If we don't have a sign splat, rely on the expansion.
17706     if (CondEltSize != DAG.ComputeNumSignBits(Cond))
17707       return SDValue();
17708 
17709     MVT NewCondSVT = MVT::getIntegerVT(EltSize);
17710     MVT NewCondVT = MVT::getVectorVT(NewCondSVT, NumElts);
17711     Cond = DAG.getSExtOrTrunc(Cond, dl, NewCondVT);
17712     return DAG.getNode(ISD::VSELECT, dl, VT, Cond, LHS, RHS);
17713   }
17714 
17715   // Only some types will be legal on some subtargets. If we can emit a legal
17716   // VSELECT-matching blend, return Op, and but if we need to expand, return
17717   // a null value.
17718   switch (VT.SimpleTy) {
17719   default:
17720     // Most of the vector types have blends past SSE4.1.
17721     return Op;
17722 
17723   case MVT::v32i8:
17724     // The byte blends for AVX vectors were introduced only in AVX2.
17725     if (Subtarget.hasAVX2())
17726       return Op;
17727 
17728     return SDValue();
17729 
17730   case MVT::v8i16:
17731   case MVT::v16i16: {
17732     // Bitcast everything to the vXi8 type and use a vXi8 vselect.
17733     MVT CastVT = MVT::getVectorVT(MVT::i8, NumElts * 2);
17734     Cond = DAG.getBitcast(CastVT, Cond);
17735     LHS = DAG.getBitcast(CastVT, LHS);
17736     RHS = DAG.getBitcast(CastVT, RHS);
17737     SDValue Select = DAG.getNode(ISD::VSELECT, dl, CastVT, Cond, LHS, RHS);
17738     return DAG.getBitcast(VT, Select);
17739   }
17740   }
17741 }
17742 
LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op,SelectionDAG & DAG)17743 static SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) {
17744   MVT VT = Op.getSimpleValueType();
17745   SDValue Vec = Op.getOperand(0);
17746   SDValue Idx = Op.getOperand(1);
17747   assert(isa<ConstantSDNode>(Idx) && "Constant index expected");
17748   SDLoc dl(Op);
17749 
17750   if (!Vec.getSimpleValueType().is128BitVector())
17751     return SDValue();
17752 
17753   if (VT.getSizeInBits() == 8) {
17754     // If IdxVal is 0, it's cheaper to do a move instead of a pextrb, unless
17755     // we're going to zero extend the register or fold the store.
17756     if (llvm::isNullConstant(Idx) && !X86::mayFoldIntoZeroExtend(Op) &&
17757         !X86::mayFoldIntoStore(Op))
17758       return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
17759                          DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
17760                                      DAG.getBitcast(MVT::v4i32, Vec), Idx));
17761 
17762     unsigned IdxVal = Idx->getAsZExtVal();
17763     SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, MVT::i32, Vec,
17764                                   DAG.getTargetConstant(IdxVal, dl, MVT::i8));
17765     return DAG.getNode(ISD::TRUNCATE, dl, VT, Extract);
17766   }
17767 
17768   if (VT == MVT::f32) {
17769     // EXTRACTPS outputs to a GPR32 register which will require a movd to copy
17770     // the result back to FR32 register. It's only worth matching if the
17771     // result has a single use which is a store or a bitcast to i32.  And in
17772     // the case of a store, it's not worth it if the index is a constant 0,
17773     // because a MOVSSmr can be used instead, which is smaller and faster.
17774     if (!Op.hasOneUse())
17775       return SDValue();
17776     SDNode *User = *Op.getNode()->use_begin();
17777     if ((User->getOpcode() != ISD::STORE || isNullConstant(Idx)) &&
17778         (User->getOpcode() != ISD::BITCAST ||
17779          User->getValueType(0) != MVT::i32))
17780       return SDValue();
17781     SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
17782                                   DAG.getBitcast(MVT::v4i32, Vec), Idx);
17783     return DAG.getBitcast(MVT::f32, Extract);
17784   }
17785 
17786   if (VT == MVT::i32 || VT == MVT::i64)
17787       return Op;
17788 
17789   return SDValue();
17790 }
17791 
17792 /// Extract one bit from mask vector, like v16i1 or v8i1.
17793 /// AVX-512 feature.
ExtractBitFromMaskVector(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)17794 static SDValue ExtractBitFromMaskVector(SDValue Op, SelectionDAG &DAG,
17795                                         const X86Subtarget &Subtarget) {
17796   SDValue Vec = Op.getOperand(0);
17797   SDLoc dl(Vec);
17798   MVT VecVT = Vec.getSimpleValueType();
17799   SDValue Idx = Op.getOperand(1);
17800   auto* IdxC = dyn_cast<ConstantSDNode>(Idx);
17801   MVT EltVT = Op.getSimpleValueType();
17802 
17803   assert((VecVT.getVectorNumElements() <= 16 || Subtarget.hasBWI()) &&
17804          "Unexpected vector type in ExtractBitFromMaskVector");
17805 
17806   // variable index can't be handled in mask registers,
17807   // extend vector to VR512/128
17808   if (!IdxC) {
17809     unsigned NumElts = VecVT.getVectorNumElements();
17810     // Extending v8i1/v16i1 to 512-bit get better performance on KNL
17811     // than extending to 128/256bit.
17812     if (NumElts == 1) {
17813       Vec = widenMaskVector(Vec, false, Subtarget, DAG, dl);
17814       MVT IntVT = MVT::getIntegerVT(Vec.getValueType().getVectorNumElements());
17815       return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, DAG.getBitcast(IntVT, Vec));
17816     }
17817     MVT ExtEltVT = (NumElts <= 8) ? MVT::getIntegerVT(128 / NumElts) : MVT::i8;
17818     MVT ExtVecVT = MVT::getVectorVT(ExtEltVT, NumElts);
17819     SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND, dl, ExtVecVT, Vec);
17820     SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ExtEltVT, Ext, Idx);
17821     return DAG.getNode(ISD::TRUNCATE, dl, EltVT, Elt);
17822   }
17823 
17824   unsigned IdxVal = IdxC->getZExtValue();
17825   if (IdxVal == 0) // the operation is legal
17826     return Op;
17827 
17828   // Extend to natively supported kshift.
17829   Vec = widenMaskVector(Vec, false, Subtarget, DAG, dl);
17830 
17831   // Use kshiftr instruction to move to the lower element.
17832   Vec = DAG.getNode(X86ISD::KSHIFTR, dl, Vec.getSimpleValueType(), Vec,
17833                     DAG.getTargetConstant(IdxVal, dl, MVT::i8));
17834 
17835   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec,
17836                      DAG.getIntPtrConstant(0, dl));
17837 }
17838 
17839 // Helper to find all the extracted elements from a vector.
getExtractedDemandedElts(SDNode * N)17840 static APInt getExtractedDemandedElts(SDNode *N) {
17841   MVT VT = N->getSimpleValueType(0);
17842   unsigned NumElts = VT.getVectorNumElements();
17843   APInt DemandedElts = APInt::getZero(NumElts);
17844   for (SDNode *User : N->uses()) {
17845     switch (User->getOpcode()) {
17846     case X86ISD::PEXTRB:
17847     case X86ISD::PEXTRW:
17848     case ISD::EXTRACT_VECTOR_ELT:
17849       if (!isa<ConstantSDNode>(User->getOperand(1))) {
17850         DemandedElts.setAllBits();
17851         return DemandedElts;
17852       }
17853       DemandedElts.setBit(User->getConstantOperandVal(1));
17854       break;
17855     case ISD::BITCAST: {
17856       if (!User->getValueType(0).isSimple() ||
17857           !User->getValueType(0).isVector()) {
17858         DemandedElts.setAllBits();
17859         return DemandedElts;
17860       }
17861       APInt DemandedSrcElts = getExtractedDemandedElts(User);
17862       DemandedElts |= APIntOps::ScaleBitMask(DemandedSrcElts, NumElts);
17863       break;
17864     }
17865     default:
17866       DemandedElts.setAllBits();
17867       return DemandedElts;
17868     }
17869   }
17870   return DemandedElts;
17871 }
17872 
17873 SDValue
LowerEXTRACT_VECTOR_ELT(SDValue Op,SelectionDAG & DAG) const17874 X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
17875                                            SelectionDAG &DAG) const {
17876   SDLoc dl(Op);
17877   SDValue Vec = Op.getOperand(0);
17878   MVT VecVT = Vec.getSimpleValueType();
17879   SDValue Idx = Op.getOperand(1);
17880   auto* IdxC = dyn_cast<ConstantSDNode>(Idx);
17881 
17882   if (VecVT.getVectorElementType() == MVT::i1)
17883     return ExtractBitFromMaskVector(Op, DAG, Subtarget);
17884 
17885   if (!IdxC) {
17886     // Its more profitable to go through memory (1 cycles throughput)
17887     // than using VMOVD + VPERMV/PSHUFB sequence (2/3 cycles throughput)
17888     // IACA tool was used to get performance estimation
17889     // (https://software.intel.com/en-us/articles/intel-architecture-code-analyzer)
17890     //
17891     // example : extractelement <16 x i8> %a, i32 %i
17892     //
17893     // Block Throughput: 3.00 Cycles
17894     // Throughput Bottleneck: Port5
17895     //
17896     // | Num Of |   Ports pressure in cycles  |    |
17897     // |  Uops  |  0  - DV  |  5  |  6  |  7  |    |
17898     // ---------------------------------------------
17899     // |   1    |           | 1.0 |     |     | CP | vmovd xmm1, edi
17900     // |   1    |           | 1.0 |     |     | CP | vpshufb xmm0, xmm0, xmm1
17901     // |   2    | 1.0       | 1.0 |     |     | CP | vpextrb eax, xmm0, 0x0
17902     // Total Num Of Uops: 4
17903     //
17904     //
17905     // Block Throughput: 1.00 Cycles
17906     // Throughput Bottleneck: PORT2_AGU, PORT3_AGU, Port4
17907     //
17908     // |    |  Ports pressure in cycles   |  |
17909     // |Uops| 1 | 2 - D  |3 -  D  | 4 | 5 |  |
17910     // ---------------------------------------------------------
17911     // |2^  |   | 0.5    | 0.5    |1.0|   |CP| vmovaps xmmword ptr [rsp-0x18], xmm0
17912     // |1   |0.5|        |        |   |0.5|  | lea rax, ptr [rsp-0x18]
17913     // |1   |   |0.5, 0.5|0.5, 0.5|   |   |CP| mov al, byte ptr [rdi+rax*1]
17914     // Total Num Of Uops: 4
17915 
17916     return SDValue();
17917   }
17918 
17919   unsigned IdxVal = IdxC->getZExtValue();
17920 
17921   // If this is a 256-bit vector result, first extract the 128-bit vector and
17922   // then extract the element from the 128-bit vector.
17923   if (VecVT.is256BitVector() || VecVT.is512BitVector()) {
17924     // Get the 128-bit vector.
17925     Vec = extract128BitVector(Vec, IdxVal, DAG, dl);
17926     MVT EltVT = VecVT.getVectorElementType();
17927 
17928     unsigned ElemsPerChunk = 128 / EltVT.getSizeInBits();
17929     assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
17930 
17931     // Find IdxVal modulo ElemsPerChunk. Since ElemsPerChunk is a power of 2
17932     // this can be done with a mask.
17933     IdxVal &= ElemsPerChunk - 1;
17934     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec,
17935                        DAG.getIntPtrConstant(IdxVal, dl));
17936   }
17937 
17938   assert(VecVT.is128BitVector() && "Unexpected vector length");
17939 
17940   MVT VT = Op.getSimpleValueType();
17941 
17942   if (VT == MVT::i16) {
17943     // If IdxVal is 0, it's cheaper to do a move instead of a pextrw, unless
17944     // we're going to zero extend the register or fold the store (SSE41 only).
17945     if (IdxVal == 0 && !X86::mayFoldIntoZeroExtend(Op) &&
17946         !(Subtarget.hasSSE41() && X86::mayFoldIntoStore(Op))) {
17947       if (Subtarget.hasFP16())
17948         return Op;
17949 
17950       return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
17951                          DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
17952                                      DAG.getBitcast(MVT::v4i32, Vec), Idx));
17953     }
17954 
17955     SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, MVT::i32, Vec,
17956                                   DAG.getTargetConstant(IdxVal, dl, MVT::i8));
17957     return DAG.getNode(ISD::TRUNCATE, dl, VT, Extract);
17958   }
17959 
17960   if (Subtarget.hasSSE41())
17961     if (SDValue Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG))
17962       return Res;
17963 
17964   // Only extract a single element from a v16i8 source - determine the common
17965   // DWORD/WORD that all extractions share, and extract the sub-byte.
17966   // TODO: Add QWORD MOVQ extraction?
17967   if (VT == MVT::i8) {
17968     APInt DemandedElts = getExtractedDemandedElts(Vec.getNode());
17969     assert(DemandedElts.getBitWidth() == 16 && "Vector width mismatch");
17970 
17971     // Extract either the lowest i32 or any i16, and extract the sub-byte.
17972     int DWordIdx = IdxVal / 4;
17973     if (DWordIdx == 0 && DemandedElts == (DemandedElts & 15)) {
17974       SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
17975                                 DAG.getBitcast(MVT::v4i32, Vec),
17976                                 DAG.getIntPtrConstant(DWordIdx, dl));
17977       int ShiftVal = (IdxVal % 4) * 8;
17978       if (ShiftVal != 0)
17979         Res = DAG.getNode(ISD::SRL, dl, MVT::i32, Res,
17980                           DAG.getConstant(ShiftVal, dl, MVT::i8));
17981       return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
17982     }
17983 
17984     int WordIdx = IdxVal / 2;
17985     if (DemandedElts == (DemandedElts & (3 << (WordIdx * 2)))) {
17986       SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16,
17987                                 DAG.getBitcast(MVT::v8i16, Vec),
17988                                 DAG.getIntPtrConstant(WordIdx, dl));
17989       int ShiftVal = (IdxVal % 2) * 8;
17990       if (ShiftVal != 0)
17991         Res = DAG.getNode(ISD::SRL, dl, MVT::i16, Res,
17992                           DAG.getConstant(ShiftVal, dl, MVT::i8));
17993       return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
17994     }
17995   }
17996 
17997   if (VT == MVT::f16 || VT.getSizeInBits() == 32) {
17998     if (IdxVal == 0)
17999       return Op;
18000 
18001     // Shuffle the element to the lowest element, then movss or movsh.
18002     SmallVector<int, 8> Mask(VecVT.getVectorNumElements(), -1);
18003     Mask[0] = static_cast<int>(IdxVal);
18004     Vec = DAG.getVectorShuffle(VecVT, dl, Vec, DAG.getUNDEF(VecVT), Mask);
18005     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
18006                        DAG.getIntPtrConstant(0, dl));
18007   }
18008 
18009   if (VT.getSizeInBits() == 64) {
18010     // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b
18011     // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught
18012     //        to match extract_elt for f64.
18013     if (IdxVal == 0)
18014       return Op;
18015 
18016     // UNPCKHPD the element to the lowest double word, then movsd.
18017     // Note if the lower 64 bits of the result of the UNPCKHPD is then stored
18018     // to a f64mem, the whole operation is folded into a single MOVHPDmr.
18019     int Mask[2] = { 1, -1 };
18020     Vec = DAG.getVectorShuffle(VecVT, dl, Vec, DAG.getUNDEF(VecVT), Mask);
18021     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
18022                        DAG.getIntPtrConstant(0, dl));
18023   }
18024 
18025   return SDValue();
18026 }
18027 
18028 /// Insert one bit to mask vector, like v16i1 or v8i1.
18029 /// AVX-512 feature.
InsertBitToMaskVector(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)18030 static SDValue InsertBitToMaskVector(SDValue Op, SelectionDAG &DAG,
18031                                      const X86Subtarget &Subtarget) {
18032   SDLoc dl(Op);
18033   SDValue Vec = Op.getOperand(0);
18034   SDValue Elt = Op.getOperand(1);
18035   SDValue Idx = Op.getOperand(2);
18036   MVT VecVT = Vec.getSimpleValueType();
18037 
18038   if (!isa<ConstantSDNode>(Idx)) {
18039     // Non constant index. Extend source and destination,
18040     // insert element and then truncate the result.
18041     unsigned NumElts = VecVT.getVectorNumElements();
18042     MVT ExtEltVT = (NumElts <= 8) ? MVT::getIntegerVT(128 / NumElts) : MVT::i8;
18043     MVT ExtVecVT = MVT::getVectorVT(ExtEltVT, NumElts);
18044     SDValue ExtOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ExtVecVT,
18045       DAG.getNode(ISD::SIGN_EXTEND, dl, ExtVecVT, Vec),
18046       DAG.getNode(ISD::SIGN_EXTEND, dl, ExtEltVT, Elt), Idx);
18047     return DAG.getNode(ISD::TRUNCATE, dl, VecVT, ExtOp);
18048   }
18049 
18050   // Copy into a k-register, extract to v1i1 and insert_subvector.
18051   SDValue EltInVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i1, Elt);
18052   return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, VecVT, Vec, EltInVec, Idx);
18053 }
18054 
LowerINSERT_VECTOR_ELT(SDValue Op,SelectionDAG & DAG) const18055 SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
18056                                                   SelectionDAG &DAG) const {
18057   MVT VT = Op.getSimpleValueType();
18058   MVT EltVT = VT.getVectorElementType();
18059   unsigned NumElts = VT.getVectorNumElements();
18060   unsigned EltSizeInBits = EltVT.getScalarSizeInBits();
18061 
18062   if (EltVT == MVT::i1)
18063     return InsertBitToMaskVector(Op, DAG, Subtarget);
18064 
18065   SDLoc dl(Op);
18066   SDValue N0 = Op.getOperand(0);
18067   SDValue N1 = Op.getOperand(1);
18068   SDValue N2 = Op.getOperand(2);
18069   auto *N2C = dyn_cast<ConstantSDNode>(N2);
18070 
18071   if (EltVT == MVT::bf16) {
18072     MVT IVT = VT.changeVectorElementTypeToInteger();
18073     SDValue Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, IVT,
18074                               DAG.getBitcast(IVT, N0),
18075                               DAG.getBitcast(MVT::i16, N1), N2);
18076     return DAG.getBitcast(VT, Res);
18077   }
18078 
18079   if (!N2C) {
18080     // Variable insertion indices, usually we're better off spilling to stack,
18081     // but AVX512 can use a variable compare+select by comparing against all
18082     // possible vector indices, and FP insertion has less gpr->simd traffic.
18083     if (!(Subtarget.hasBWI() ||
18084           (Subtarget.hasAVX512() && EltSizeInBits >= 32) ||
18085           (Subtarget.hasSSE41() && (EltVT == MVT::f32 || EltVT == MVT::f64))))
18086       return SDValue();
18087 
18088     MVT IdxSVT = MVT::getIntegerVT(EltSizeInBits);
18089     MVT IdxVT = MVT::getVectorVT(IdxSVT, NumElts);
18090     if (!isTypeLegal(IdxSVT) || !isTypeLegal(IdxVT))
18091       return SDValue();
18092 
18093     SDValue IdxExt = DAG.getZExtOrTrunc(N2, dl, IdxSVT);
18094     SDValue IdxSplat = DAG.getSplatBuildVector(IdxVT, dl, IdxExt);
18095     SDValue EltSplat = DAG.getSplatBuildVector(VT, dl, N1);
18096 
18097     SmallVector<SDValue, 16> RawIndices;
18098     for (unsigned I = 0; I != NumElts; ++I)
18099       RawIndices.push_back(DAG.getConstant(I, dl, IdxSVT));
18100     SDValue Indices = DAG.getBuildVector(IdxVT, dl, RawIndices);
18101 
18102     // inselt N0, N1, N2 --> select (SplatN2 == {0,1,2...}) ? SplatN1 : N0.
18103     return DAG.getSelectCC(dl, IdxSplat, Indices, EltSplat, N0,
18104                            ISD::CondCode::SETEQ);
18105   }
18106 
18107   if (N2C->getAPIntValue().uge(NumElts))
18108     return SDValue();
18109   uint64_t IdxVal = N2C->getZExtValue();
18110 
18111   bool IsZeroElt = X86::isZeroNode(N1);
18112   bool IsAllOnesElt = VT.isInteger() && llvm::isAllOnesConstant(N1);
18113 
18114   if (IsZeroElt || IsAllOnesElt) {
18115     // Lower insertion of v16i8/v32i8/v64i16 -1 elts as an 'OR' blend.
18116     // We don't deal with i8 0 since it appears to be handled elsewhere.
18117     if (IsAllOnesElt &&
18118         ((VT == MVT::v16i8 && !Subtarget.hasSSE41()) ||
18119          ((VT == MVT::v32i8 || VT == MVT::v16i16) && !Subtarget.hasInt256()))) {
18120       SDValue ZeroCst = DAG.getConstant(0, dl, VT.getScalarType());
18121       SDValue OnesCst = DAG.getAllOnesConstant(dl, VT.getScalarType());
18122       SmallVector<SDValue, 8> CstVectorElts(NumElts, ZeroCst);
18123       CstVectorElts[IdxVal] = OnesCst;
18124       SDValue CstVector = DAG.getBuildVector(VT, dl, CstVectorElts);
18125       return DAG.getNode(ISD::OR, dl, VT, N0, CstVector);
18126     }
18127     // See if we can do this more efficiently with a blend shuffle with a
18128     // rematerializable vector.
18129     if (Subtarget.hasSSE41() &&
18130         (EltSizeInBits >= 16 || (IsZeroElt && !VT.is128BitVector()))) {
18131       SmallVector<int, 8> BlendMask;
18132       for (unsigned i = 0; i != NumElts; ++i)
18133         BlendMask.push_back(i == IdxVal ? i + NumElts : i);
18134       SDValue CstVector = IsZeroElt ? getZeroVector(VT, Subtarget, DAG, dl)
18135                                     : getOnesVector(VT, DAG, dl);
18136       return DAG.getVectorShuffle(VT, dl, N0, CstVector, BlendMask);
18137     }
18138   }
18139 
18140   // If the vector is wider than 128 bits, extract the 128-bit subvector, insert
18141   // into that, and then insert the subvector back into the result.
18142   if (VT.is256BitVector() || VT.is512BitVector()) {
18143     // With a 256-bit vector, we can insert into the zero element efficiently
18144     // using a blend if we have AVX or AVX2 and the right data type.
18145     if (VT.is256BitVector() && IdxVal == 0) {
18146       // TODO: It is worthwhile to cast integer to floating point and back
18147       // and incur a domain crossing penalty if that's what we'll end up
18148       // doing anyway after extracting to a 128-bit vector.
18149       if ((Subtarget.hasAVX() && (EltVT == MVT::f64 || EltVT == MVT::f32)) ||
18150           (Subtarget.hasAVX2() && (EltVT == MVT::i32 || EltVT == MVT::i64))) {
18151         SDValue N1Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, N1);
18152         return DAG.getNode(X86ISD::BLENDI, dl, VT, N0, N1Vec,
18153                            DAG.getTargetConstant(1, dl, MVT::i8));
18154       }
18155     }
18156 
18157     unsigned NumEltsIn128 = 128 / EltSizeInBits;
18158     assert(isPowerOf2_32(NumEltsIn128) &&
18159            "Vectors will always have power-of-two number of elements.");
18160 
18161     // If we are not inserting into the low 128-bit vector chunk,
18162     // then prefer the broadcast+blend sequence.
18163     // FIXME: relax the profitability check iff all N1 uses are insertions.
18164     if (IdxVal >= NumEltsIn128 &&
18165         ((Subtarget.hasAVX2() && EltSizeInBits != 8) ||
18166          (Subtarget.hasAVX() && (EltSizeInBits >= 32) &&
18167           X86::mayFoldLoad(N1, Subtarget)))) {
18168       SDValue N1SplatVec = DAG.getSplatBuildVector(VT, dl, N1);
18169       SmallVector<int, 8> BlendMask;
18170       for (unsigned i = 0; i != NumElts; ++i)
18171         BlendMask.push_back(i == IdxVal ? i + NumElts : i);
18172       return DAG.getVectorShuffle(VT, dl, N0, N1SplatVec, BlendMask);
18173     }
18174 
18175     // Get the desired 128-bit vector chunk.
18176     SDValue V = extract128BitVector(N0, IdxVal, DAG, dl);
18177 
18178     // Insert the element into the desired chunk.
18179     // Since NumEltsIn128 is a power of 2 we can use mask instead of modulo.
18180     unsigned IdxIn128 = IdxVal & (NumEltsIn128 - 1);
18181 
18182     V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, V.getValueType(), V, N1,
18183                     DAG.getIntPtrConstant(IdxIn128, dl));
18184 
18185     // Insert the changed part back into the bigger vector
18186     return insert128BitVector(N0, V, IdxVal, DAG, dl);
18187   }
18188   assert(VT.is128BitVector() && "Only 128-bit vector types should be left!");
18189 
18190   // This will be just movw/movd/movq/movsh/movss/movsd.
18191   if (IdxVal == 0 && ISD::isBuildVectorAllZeros(N0.getNode())) {
18192     if (EltVT == MVT::i32 || EltVT == MVT::f32 || EltVT == MVT::f64 ||
18193         EltVT == MVT::f16 || EltVT == MVT::i64) {
18194       N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, N1);
18195       return getShuffleVectorZeroOrUndef(N1, 0, true, Subtarget, DAG);
18196     }
18197 
18198     // We can't directly insert an i8 or i16 into a vector, so zero extend
18199     // it to i32 first.
18200     if (EltVT == MVT::i16 || EltVT == MVT::i8) {
18201       N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, N1);
18202       MVT ShufVT = MVT::getVectorVT(MVT::i32, VT.getSizeInBits() / 32);
18203       N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, ShufVT, N1);
18204       N1 = getShuffleVectorZeroOrUndef(N1, 0, true, Subtarget, DAG);
18205       return DAG.getBitcast(VT, N1);
18206     }
18207   }
18208 
18209   // Transform it so it match pinsr{b,w} which expects a GR32 as its second
18210   // argument. SSE41 required for pinsrb.
18211   if (VT == MVT::v8i16 || (VT == MVT::v16i8 && Subtarget.hasSSE41())) {
18212     unsigned Opc;
18213     if (VT == MVT::v8i16) {
18214       assert(Subtarget.hasSSE2() && "SSE2 required for PINSRW");
18215       Opc = X86ISD::PINSRW;
18216     } else {
18217       assert(VT == MVT::v16i8 && "PINSRB requires v16i8 vector");
18218       assert(Subtarget.hasSSE41() && "SSE41 required for PINSRB");
18219       Opc = X86ISD::PINSRB;
18220     }
18221 
18222     assert(N1.getValueType() != MVT::i32 && "Unexpected VT");
18223     N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
18224     N2 = DAG.getTargetConstant(IdxVal, dl, MVT::i8);
18225     return DAG.getNode(Opc, dl, VT, N0, N1, N2);
18226   }
18227 
18228   if (Subtarget.hasSSE41()) {
18229     if (EltVT == MVT::f32) {
18230       // Bits [7:6] of the constant are the source select. This will always be
18231       //   zero here. The DAG Combiner may combine an extract_elt index into
18232       //   these bits. For example (insert (extract, 3), 2) could be matched by
18233       //   putting the '3' into bits [7:6] of X86ISD::INSERTPS.
18234       // Bits [5:4] of the constant are the destination select. This is the
18235       //   value of the incoming immediate.
18236       // Bits [3:0] of the constant are the zero mask. The DAG Combiner may
18237       //   combine either bitwise AND or insert of float 0.0 to set these bits.
18238 
18239       bool MinSize = DAG.getMachineFunction().getFunction().hasMinSize();
18240       if (IdxVal == 0 && (!MinSize || !X86::mayFoldLoad(N1, Subtarget))) {
18241         // If this is an insertion of 32-bits into the low 32-bits of
18242         // a vector, we prefer to generate a blend with immediate rather
18243         // than an insertps. Blends are simpler operations in hardware and so
18244         // will always have equal or better performance than insertps.
18245         // But if optimizing for size and there's a load folding opportunity,
18246         // generate insertps because blendps does not have a 32-bit memory
18247         // operand form.
18248         N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
18249         return DAG.getNode(X86ISD::BLENDI, dl, VT, N0, N1,
18250                            DAG.getTargetConstant(1, dl, MVT::i8));
18251       }
18252       // Create this as a scalar to vector..
18253       N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
18254       return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1,
18255                          DAG.getTargetConstant(IdxVal << 4, dl, MVT::i8));
18256     }
18257 
18258     // PINSR* works with constant index.
18259     if (EltVT == MVT::i32 || EltVT == MVT::i64)
18260       return Op;
18261   }
18262 
18263   return SDValue();
18264 }
18265 
LowerSCALAR_TO_VECTOR(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)18266 static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, const X86Subtarget &Subtarget,
18267                                      SelectionDAG &DAG) {
18268   SDLoc dl(Op);
18269   MVT OpVT = Op.getSimpleValueType();
18270 
18271   // It's always cheaper to replace a xor+movd with xorps and simplifies further
18272   // combines.
18273   if (X86::isZeroNode(Op.getOperand(0)))
18274     return getZeroVector(OpVT, Subtarget, DAG, dl);
18275 
18276   // If this is a 256-bit vector result, first insert into a 128-bit
18277   // vector and then insert into the 256-bit vector.
18278   if (!OpVT.is128BitVector()) {
18279     // Insert into a 128-bit vector.
18280     unsigned SizeFactor = OpVT.getSizeInBits() / 128;
18281     MVT VT128 = MVT::getVectorVT(OpVT.getVectorElementType(),
18282                                  OpVT.getVectorNumElements() / SizeFactor);
18283 
18284     Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT128, Op.getOperand(0));
18285 
18286     // Insert the 128-bit vector.
18287     return insert128BitVector(DAG.getUNDEF(OpVT), Op, 0, DAG, dl);
18288   }
18289   assert(OpVT.is128BitVector() && OpVT.isInteger() && OpVT != MVT::v2i64 &&
18290          "Expected an SSE type!");
18291 
18292   // Pass through a v4i32 or V8i16 SCALAR_TO_VECTOR as that's what we use in
18293   // tblgen.
18294   if (OpVT == MVT::v4i32 || (OpVT == MVT::v8i16 && Subtarget.hasFP16()))
18295     return Op;
18296 
18297   SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0));
18298   return DAG.getBitcast(
18299       OpVT, DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, AnyExt));
18300 }
18301 
18302 // Lower a node with an INSERT_SUBVECTOR opcode.  This may result in a
18303 // simple superregister reference or explicit instructions to insert
18304 // the upper bits of a vector.
LowerINSERT_SUBVECTOR(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)18305 static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget &Subtarget,
18306                                      SelectionDAG &DAG) {
18307   assert(Op.getSimpleValueType().getVectorElementType() == MVT::i1);
18308 
18309   return insert1BitVector(Op, DAG, Subtarget);
18310 }
18311 
LowerEXTRACT_SUBVECTOR(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)18312 static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget &Subtarget,
18313                                       SelectionDAG &DAG) {
18314   assert(Op.getSimpleValueType().getVectorElementType() == MVT::i1 &&
18315          "Only vXi1 extract_subvectors need custom lowering");
18316 
18317   SDLoc dl(Op);
18318   SDValue Vec = Op.getOperand(0);
18319   uint64_t IdxVal = Op.getConstantOperandVal(1);
18320 
18321   if (IdxVal == 0) // the operation is legal
18322     return Op;
18323 
18324   // Extend to natively supported kshift.
18325   Vec = widenMaskVector(Vec, false, Subtarget, DAG, dl);
18326 
18327   // Shift to the LSB.
18328   Vec = DAG.getNode(X86ISD::KSHIFTR, dl, Vec.getSimpleValueType(), Vec,
18329                     DAG.getTargetConstant(IdxVal, dl, MVT::i8));
18330 
18331   return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, Op.getValueType(), Vec,
18332                      DAG.getIntPtrConstant(0, dl));
18333 }
18334 
18335 // Returns the appropriate wrapper opcode for a global reference.
getGlobalWrapperKind(const GlobalValue * GV,const unsigned char OpFlags) const18336 unsigned X86TargetLowering::getGlobalWrapperKind(
18337     const GlobalValue *GV, const unsigned char OpFlags) const {
18338   // References to absolute symbols are never PC-relative.
18339   if (GV && GV->isAbsoluteSymbolRef())
18340     return X86ISD::Wrapper;
18341 
18342   // The following OpFlags under RIP-rel PIC use RIP.
18343   if (Subtarget.isPICStyleRIPRel() &&
18344       (OpFlags == X86II::MO_NO_FLAG || OpFlags == X86II::MO_COFFSTUB ||
18345        OpFlags == X86II::MO_DLLIMPORT))
18346     return X86ISD::WrapperRIP;
18347 
18348   // GOTPCREL references must always use RIP.
18349   if (OpFlags == X86II::MO_GOTPCREL || OpFlags == X86II::MO_GOTPCREL_NORELAX)
18350     return X86ISD::WrapperRIP;
18351 
18352   return X86ISD::Wrapper;
18353 }
18354 
18355 // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
18356 // their target counterpart wrapped in the X86ISD::Wrapper node. Suppose N is
18357 // one of the above mentioned nodes. It has to be wrapped because otherwise
18358 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
18359 // be used to form addressing mode. These wrapped nodes will be selected
18360 // into MOV32ri.
18361 SDValue
LowerConstantPool(SDValue Op,SelectionDAG & DAG) const18362 X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
18363   ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
18364 
18365   // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
18366   // global base reg.
18367   unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr);
18368 
18369   auto PtrVT = getPointerTy(DAG.getDataLayout());
18370   SDValue Result = DAG.getTargetConstantPool(
18371       CP->getConstVal(), PtrVT, CP->getAlign(), CP->getOffset(), OpFlag);
18372   SDLoc DL(CP);
18373   Result =
18374       DAG.getNode(getGlobalWrapperKind(nullptr, OpFlag), DL, PtrVT, Result);
18375   // With PIC, the address is actually $g + Offset.
18376   if (OpFlag) {
18377     Result =
18378         DAG.getNode(ISD::ADD, DL, PtrVT,
18379                     DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), Result);
18380   }
18381 
18382   return Result;
18383 }
18384 
LowerJumpTable(SDValue Op,SelectionDAG & DAG) const18385 SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
18386   JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
18387 
18388   // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
18389   // global base reg.
18390   unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr);
18391 
18392   auto PtrVT = getPointerTy(DAG.getDataLayout());
18393   SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, OpFlag);
18394   SDLoc DL(JT);
18395   Result =
18396       DAG.getNode(getGlobalWrapperKind(nullptr, OpFlag), DL, PtrVT, Result);
18397 
18398   // With PIC, the address is actually $g + Offset.
18399   if (OpFlag)
18400     Result =
18401         DAG.getNode(ISD::ADD, DL, PtrVT,
18402                     DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), Result);
18403 
18404   return Result;
18405 }
18406 
LowerExternalSymbol(SDValue Op,SelectionDAG & DAG) const18407 SDValue X86TargetLowering::LowerExternalSymbol(SDValue Op,
18408                                                SelectionDAG &DAG) const {
18409   return LowerGlobalOrExternal(Op, DAG, /*ForCall=*/false);
18410 }
18411 
18412 SDValue
LowerBlockAddress(SDValue Op,SelectionDAG & DAG) const18413 X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
18414   // Create the TargetBlockAddressAddress node.
18415   unsigned char OpFlags =
18416     Subtarget.classifyBlockAddressReference();
18417   const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
18418   int64_t Offset = cast<BlockAddressSDNode>(Op)->getOffset();
18419   SDLoc dl(Op);
18420   auto PtrVT = getPointerTy(DAG.getDataLayout());
18421   SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset, OpFlags);
18422   Result =
18423       DAG.getNode(getGlobalWrapperKind(nullptr, OpFlags), dl, PtrVT, Result);
18424 
18425   // With PIC, the address is actually $g + Offset.
18426   if (isGlobalRelativeToPICBase(OpFlags)) {
18427     Result = DAG.getNode(ISD::ADD, dl, PtrVT,
18428                          DAG.getNode(X86ISD::GlobalBaseReg, dl, PtrVT), Result);
18429   }
18430 
18431   return Result;
18432 }
18433 
18434 /// Creates target global address or external symbol nodes for calls or
18435 /// other uses.
LowerGlobalOrExternal(SDValue Op,SelectionDAG & DAG,bool ForCall) const18436 SDValue X86TargetLowering::LowerGlobalOrExternal(SDValue Op, SelectionDAG &DAG,
18437                                                  bool ForCall) const {
18438   // Unpack the global address or external symbol.
18439   const SDLoc &dl = SDLoc(Op);
18440   const GlobalValue *GV = nullptr;
18441   int64_t Offset = 0;
18442   const char *ExternalSym = nullptr;
18443   if (const auto *G = dyn_cast<GlobalAddressSDNode>(Op)) {
18444     GV = G->getGlobal();
18445     Offset = G->getOffset();
18446   } else {
18447     const auto *ES = cast<ExternalSymbolSDNode>(Op);
18448     ExternalSym = ES->getSymbol();
18449   }
18450 
18451   // Calculate some flags for address lowering.
18452   const Module &Mod = *DAG.getMachineFunction().getFunction().getParent();
18453   unsigned char OpFlags;
18454   if (ForCall)
18455     OpFlags = Subtarget.classifyGlobalFunctionReference(GV, Mod);
18456   else
18457     OpFlags = Subtarget.classifyGlobalReference(GV, Mod);
18458   bool HasPICReg = isGlobalRelativeToPICBase(OpFlags);
18459   bool NeedsLoad = isGlobalStubReference(OpFlags);
18460 
18461   CodeModel::Model M = DAG.getTarget().getCodeModel();
18462   auto PtrVT = getPointerTy(DAG.getDataLayout());
18463   SDValue Result;
18464 
18465   if (GV) {
18466     // Create a target global address if this is a global. If possible, fold the
18467     // offset into the global address reference. Otherwise, ADD it on later.
18468     // Suppress the folding if Offset is negative: movl foo-1, %eax is not
18469     // allowed because if the address of foo is 0, the ELF R_X86_64_32
18470     // relocation will compute to a negative value, which is invalid.
18471     int64_t GlobalOffset = 0;
18472     if (OpFlags == X86II::MO_NO_FLAG && Offset >= 0 &&
18473         X86::isOffsetSuitableForCodeModel(Offset, M, true)) {
18474       std::swap(GlobalOffset, Offset);
18475     }
18476     Result = DAG.getTargetGlobalAddress(GV, dl, PtrVT, GlobalOffset, OpFlags);
18477   } else {
18478     // If this is not a global address, this must be an external symbol.
18479     Result = DAG.getTargetExternalSymbol(ExternalSym, PtrVT, OpFlags);
18480   }
18481 
18482   // If this is a direct call, avoid the wrapper if we don't need to do any
18483   // loads or adds. This allows SDAG ISel to match direct calls.
18484   if (ForCall && !NeedsLoad && !HasPICReg && Offset == 0)
18485     return Result;
18486 
18487   Result = DAG.getNode(getGlobalWrapperKind(GV, OpFlags), dl, PtrVT, Result);
18488 
18489   // With PIC, the address is actually $g + Offset.
18490   if (HasPICReg) {
18491     Result = DAG.getNode(ISD::ADD, dl, PtrVT,
18492                          DAG.getNode(X86ISD::GlobalBaseReg, dl, PtrVT), Result);
18493   }
18494 
18495   // For globals that require a load from a stub to get the address, emit the
18496   // load.
18497   if (NeedsLoad)
18498     Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result,
18499                          MachinePointerInfo::getGOT(DAG.getMachineFunction()));
18500 
18501   // If there was a non-zero offset that we didn't fold, create an explicit
18502   // addition for it.
18503   if (Offset != 0)
18504     Result = DAG.getNode(ISD::ADD, dl, PtrVT, Result,
18505                          DAG.getConstant(Offset, dl, PtrVT));
18506 
18507   return Result;
18508 }
18509 
18510 SDValue
LowerGlobalAddress(SDValue Op,SelectionDAG & DAG) const18511 X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
18512   return LowerGlobalOrExternal(Op, DAG, /*ForCall=*/false);
18513 }
18514 
18515 static SDValue
GetTLSADDR(SelectionDAG & DAG,SDValue Chain,GlobalAddressSDNode * GA,SDValue * InGlue,const EVT PtrVT,unsigned ReturnReg,unsigned char OperandFlags,bool LocalDynamic=false)18516 GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA,
18517            SDValue *InGlue, const EVT PtrVT, unsigned ReturnReg,
18518            unsigned char OperandFlags, bool LocalDynamic = false) {
18519   MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
18520   SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
18521   SDLoc dl(GA);
18522   SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
18523                                            GA->getValueType(0),
18524                                            GA->getOffset(),
18525                                            OperandFlags);
18526 
18527   X86ISD::NodeType CallType = LocalDynamic ? X86ISD::TLSBASEADDR
18528                                            : X86ISD::TLSADDR;
18529 
18530   if (InGlue) {
18531     SDValue Ops[] = { Chain,  TGA, *InGlue };
18532     Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
18533   } else {
18534     SDValue Ops[]  = { Chain, TGA };
18535     Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
18536   }
18537 
18538   // TLSADDR will be codegen'ed as call. Inform MFI that function has calls.
18539   MFI.setAdjustsStack(true);
18540   MFI.setHasCalls(true);
18541 
18542   SDValue Glue = Chain.getValue(1);
18543   return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Glue);
18544 }
18545 
18546 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit
18547 static SDValue
LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode * GA,SelectionDAG & DAG,const EVT PtrVT)18548 LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG,
18549                                 const EVT PtrVT) {
18550   SDValue InGlue;
18551   SDLoc dl(GA);  // ? function entry point might be better
18552   SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
18553                                    DAG.getNode(X86ISD::GlobalBaseReg,
18554                                                SDLoc(), PtrVT), InGlue);
18555   InGlue = Chain.getValue(1);
18556 
18557   return GetTLSADDR(DAG, Chain, GA, &InGlue, PtrVT, X86::EAX, X86II::MO_TLSGD);
18558 }
18559 
18560 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit LP64
18561 static SDValue
LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode * GA,SelectionDAG & DAG,const EVT PtrVT)18562 LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG,
18563                                 const EVT PtrVT) {
18564   return GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT,
18565                     X86::RAX, X86II::MO_TLSGD);
18566 }
18567 
18568 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit ILP32
18569 static SDValue
LowerToTLSGeneralDynamicModelX32(GlobalAddressSDNode * GA,SelectionDAG & DAG,const EVT PtrVT)18570 LowerToTLSGeneralDynamicModelX32(GlobalAddressSDNode *GA, SelectionDAG &DAG,
18571                                  const EVT PtrVT) {
18572   return GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT,
18573                     X86::EAX, X86II::MO_TLSGD);
18574 }
18575 
LowerToTLSLocalDynamicModel(GlobalAddressSDNode * GA,SelectionDAG & DAG,const EVT PtrVT,bool Is64Bit,bool Is64BitLP64)18576 static SDValue LowerToTLSLocalDynamicModel(GlobalAddressSDNode *GA,
18577                                            SelectionDAG &DAG, const EVT PtrVT,
18578                                            bool Is64Bit, bool Is64BitLP64) {
18579   SDLoc dl(GA);
18580 
18581   // Get the start address of the TLS block for this module.
18582   X86MachineFunctionInfo *MFI = DAG.getMachineFunction()
18583       .getInfo<X86MachineFunctionInfo>();
18584   MFI->incNumLocalDynamicTLSAccesses();
18585 
18586   SDValue Base;
18587   if (Is64Bit) {
18588     unsigned ReturnReg = Is64BitLP64 ? X86::RAX : X86::EAX;
18589     Base = GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT, ReturnReg,
18590                       X86II::MO_TLSLD, /*LocalDynamic=*/true);
18591   } else {
18592     SDValue InGlue;
18593     SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
18594         DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), InGlue);
18595     InGlue = Chain.getValue(1);
18596     Base = GetTLSADDR(DAG, Chain, GA, &InGlue, PtrVT, X86::EAX,
18597                       X86II::MO_TLSLDM, /*LocalDynamic=*/true);
18598   }
18599 
18600   // Note: the CleanupLocalDynamicTLSPass will remove redundant computations
18601   // of Base.
18602 
18603   // Build x@dtpoff.
18604   unsigned char OperandFlags = X86II::MO_DTPOFF;
18605   unsigned WrapperKind = X86ISD::Wrapper;
18606   SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
18607                                            GA->getValueType(0),
18608                                            GA->getOffset(), OperandFlags);
18609   SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
18610 
18611   // Add x@dtpoff with the base.
18612   return DAG.getNode(ISD::ADD, dl, PtrVT, Offset, Base);
18613 }
18614 
18615 // Lower ISD::GlobalTLSAddress using the "initial exec" or "local exec" model.
LowerToTLSExecModel(GlobalAddressSDNode * GA,SelectionDAG & DAG,const EVT PtrVT,TLSModel::Model model,bool is64Bit,bool isPIC)18616 static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
18617                                    const EVT PtrVT, TLSModel::Model model,
18618                                    bool is64Bit, bool isPIC) {
18619   SDLoc dl(GA);
18620 
18621   // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit).
18622   Value *Ptr = Constant::getNullValue(
18623       PointerType::get(*DAG.getContext(), is64Bit ? 257 : 256));
18624 
18625   SDValue ThreadPointer =
18626       DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), DAG.getIntPtrConstant(0, dl),
18627                   MachinePointerInfo(Ptr));
18628 
18629   unsigned char OperandFlags = 0;
18630   // Most TLS accesses are not RIP relative, even on x86-64.  One exception is
18631   // initialexec.
18632   unsigned WrapperKind = X86ISD::Wrapper;
18633   if (model == TLSModel::LocalExec) {
18634     OperandFlags = is64Bit ? X86II::MO_TPOFF : X86II::MO_NTPOFF;
18635   } else if (model == TLSModel::InitialExec) {
18636     if (is64Bit) {
18637       OperandFlags = X86II::MO_GOTTPOFF;
18638       WrapperKind = X86ISD::WrapperRIP;
18639     } else {
18640       OperandFlags = isPIC ? X86II::MO_GOTNTPOFF : X86II::MO_INDNTPOFF;
18641     }
18642   } else {
18643     llvm_unreachable("Unexpected model");
18644   }
18645 
18646   // emit "addl x@ntpoff,%eax" (local exec)
18647   // or "addl x@indntpoff,%eax" (initial exec)
18648   // or "addl x@gotntpoff(%ebx) ,%eax" (initial exec, 32-bit pic)
18649   SDValue TGA =
18650       DAG.getTargetGlobalAddress(GA->getGlobal(), dl, GA->getValueType(0),
18651                                  GA->getOffset(), OperandFlags);
18652   SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
18653 
18654   if (model == TLSModel::InitialExec) {
18655     if (isPIC && !is64Bit) {
18656       Offset = DAG.getNode(ISD::ADD, dl, PtrVT,
18657                            DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT),
18658                            Offset);
18659     }
18660 
18661     Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset,
18662                          MachinePointerInfo::getGOT(DAG.getMachineFunction()));
18663   }
18664 
18665   // The address of the thread local variable is the add of the thread
18666   // pointer with the offset of the variable.
18667   return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
18668 }
18669 
18670 SDValue
LowerGlobalTLSAddress(SDValue Op,SelectionDAG & DAG) const18671 X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
18672 
18673   GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
18674 
18675   if (DAG.getTarget().useEmulatedTLS())
18676     return LowerToTLSEmulatedModel(GA, DAG);
18677 
18678   const GlobalValue *GV = GA->getGlobal();
18679   auto PtrVT = getPointerTy(DAG.getDataLayout());
18680   bool PositionIndependent = isPositionIndependent();
18681 
18682   if (Subtarget.isTargetELF()) {
18683     TLSModel::Model model = DAG.getTarget().getTLSModel(GV);
18684     switch (model) {
18685       case TLSModel::GeneralDynamic:
18686         if (Subtarget.is64Bit()) {
18687           if (Subtarget.isTarget64BitLP64())
18688             return LowerToTLSGeneralDynamicModel64(GA, DAG, PtrVT);
18689           return LowerToTLSGeneralDynamicModelX32(GA, DAG, PtrVT);
18690         }
18691         return LowerToTLSGeneralDynamicModel32(GA, DAG, PtrVT);
18692       case TLSModel::LocalDynamic:
18693         return LowerToTLSLocalDynamicModel(GA, DAG, PtrVT, Subtarget.is64Bit(),
18694                                            Subtarget.isTarget64BitLP64());
18695       case TLSModel::InitialExec:
18696       case TLSModel::LocalExec:
18697         return LowerToTLSExecModel(GA, DAG, PtrVT, model, Subtarget.is64Bit(),
18698                                    PositionIndependent);
18699     }
18700     llvm_unreachable("Unknown TLS model.");
18701   }
18702 
18703   if (Subtarget.isTargetDarwin()) {
18704     // Darwin only has one model of TLS.  Lower to that.
18705     unsigned char OpFlag = 0;
18706     unsigned WrapperKind = 0;
18707 
18708     // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
18709     // global base reg.
18710     bool PIC32 = PositionIndependent && !Subtarget.is64Bit();
18711     if (PIC32) {
18712       OpFlag = X86II::MO_TLVP_PIC_BASE;
18713       WrapperKind = X86ISD::Wrapper;
18714     } else {
18715       OpFlag = X86II::MO_TLVP;
18716       WrapperKind = X86ISD::WrapperRIP;
18717     }
18718     SDLoc DL(Op);
18719     SDValue Result = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
18720                                                 GA->getValueType(0),
18721                                                 GA->getOffset(), OpFlag);
18722     SDValue Offset = DAG.getNode(WrapperKind, DL, PtrVT, Result);
18723 
18724     // With PIC32, the address is actually $g + Offset.
18725     if (PIC32)
18726       Offset = DAG.getNode(ISD::ADD, DL, PtrVT,
18727                            DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT),
18728                            Offset);
18729 
18730     // Lowering the machine isd will make sure everything is in the right
18731     // location.
18732     SDValue Chain = DAG.getEntryNode();
18733     SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
18734     Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL);
18735     SDValue Args[] = { Chain, Offset };
18736     Chain = DAG.getNode(X86ISD::TLSCALL, DL, NodeTys, Args);
18737     Chain = DAG.getCALLSEQ_END(Chain, 0, 0, Chain.getValue(1), DL);
18738 
18739     // TLSCALL will be codegen'ed as call. Inform MFI that function has calls.
18740     MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
18741     MFI.setAdjustsStack(true);
18742 
18743     // And our return value (tls address) is in the standard call return value
18744     // location.
18745     unsigned Reg = Subtarget.is64Bit() ? X86::RAX : X86::EAX;
18746     return DAG.getCopyFromReg(Chain, DL, Reg, PtrVT, Chain.getValue(1));
18747   }
18748 
18749   if (Subtarget.isOSWindows()) {
18750     // Just use the implicit TLS architecture
18751     // Need to generate something similar to:
18752     //   mov     rdx, qword [gs:abs 58H]; Load pointer to ThreadLocalStorage
18753     //                                  ; from TEB
18754     //   mov     ecx, dword [rel _tls_index]: Load index (from C runtime)
18755     //   mov     rcx, qword [rdx+rcx*8]
18756     //   mov     eax, .tls$:tlsvar
18757     //   [rax+rcx] contains the address
18758     // Windows 64bit: gs:0x58
18759     // Windows 32bit: fs:__tls_array
18760 
18761     SDLoc dl(GA);
18762     SDValue Chain = DAG.getEntryNode();
18763 
18764     // Get the Thread Pointer, which is %fs:__tls_array (32-bit) or
18765     // %gs:0x58 (64-bit). On MinGW, __tls_array is not available, so directly
18766     // use its literal value of 0x2C.
18767     Value *Ptr = Constant::getNullValue(
18768         Subtarget.is64Bit() ? PointerType::get(*DAG.getContext(), 256)
18769                             : PointerType::get(*DAG.getContext(), 257));
18770 
18771     SDValue TlsArray = Subtarget.is64Bit()
18772                            ? DAG.getIntPtrConstant(0x58, dl)
18773                            : (Subtarget.isTargetWindowsGNU()
18774                                   ? DAG.getIntPtrConstant(0x2C, dl)
18775                                   : DAG.getExternalSymbol("_tls_array", PtrVT));
18776 
18777     SDValue ThreadPointer =
18778         DAG.getLoad(PtrVT, dl, Chain, TlsArray, MachinePointerInfo(Ptr));
18779 
18780     SDValue res;
18781     if (GV->getThreadLocalMode() == GlobalVariable::LocalExecTLSModel) {
18782       res = ThreadPointer;
18783     } else {
18784       // Load the _tls_index variable
18785       SDValue IDX = DAG.getExternalSymbol("_tls_index", PtrVT);
18786       if (Subtarget.is64Bit())
18787         IDX = DAG.getExtLoad(ISD::ZEXTLOAD, dl, PtrVT, Chain, IDX,
18788                              MachinePointerInfo(), MVT::i32);
18789       else
18790         IDX = DAG.getLoad(PtrVT, dl, Chain, IDX, MachinePointerInfo());
18791 
18792       const DataLayout &DL = DAG.getDataLayout();
18793       SDValue Scale =
18794           DAG.getConstant(Log2_64_Ceil(DL.getPointerSize()), dl, MVT::i8);
18795       IDX = DAG.getNode(ISD::SHL, dl, PtrVT, IDX, Scale);
18796 
18797       res = DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, IDX);
18798     }
18799 
18800     res = DAG.getLoad(PtrVT, dl, Chain, res, MachinePointerInfo());
18801 
18802     // Get the offset of start of .tls section
18803     SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
18804                                              GA->getValueType(0),
18805                                              GA->getOffset(), X86II::MO_SECREL);
18806     SDValue Offset = DAG.getNode(X86ISD::Wrapper, dl, PtrVT, TGA);
18807 
18808     // The address of the thread local variable is the add of the thread
18809     // pointer with the offset of the variable.
18810     return DAG.getNode(ISD::ADD, dl, PtrVT, res, Offset);
18811   }
18812 
18813   llvm_unreachable("TLS not implemented for this target.");
18814 }
18815 
18816 /// Lower SRA_PARTS and friends, which return two i32 values
18817 /// and take a 2 x i32 value to shift plus a shift amount.
18818 /// TODO: Can this be moved to general expansion code?
LowerShiftParts(SDValue Op,SelectionDAG & DAG)18819 static SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG) {
18820   SDValue Lo, Hi;
18821   DAG.getTargetLoweringInfo().expandShiftParts(Op.getNode(), Lo, Hi, DAG);
18822   return DAG.getMergeValues({Lo, Hi}, SDLoc(Op));
18823 }
18824 
18825 // Try to use a packed vector operation to handle i64 on 32-bit targets when
18826 // AVX512DQ is enabled.
LowerI64IntToFP_AVX512DQ(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)18827 static SDValue LowerI64IntToFP_AVX512DQ(SDValue Op, SelectionDAG &DAG,
18828                                         const X86Subtarget &Subtarget) {
18829   assert((Op.getOpcode() == ISD::SINT_TO_FP ||
18830           Op.getOpcode() == ISD::STRICT_SINT_TO_FP ||
18831           Op.getOpcode() == ISD::STRICT_UINT_TO_FP ||
18832           Op.getOpcode() == ISD::UINT_TO_FP) &&
18833          "Unexpected opcode!");
18834   bool IsStrict = Op->isStrictFPOpcode();
18835   unsigned OpNo = IsStrict ? 1 : 0;
18836   SDValue Src = Op.getOperand(OpNo);
18837   MVT SrcVT = Src.getSimpleValueType();
18838   MVT VT = Op.getSimpleValueType();
18839 
18840    if (!Subtarget.hasDQI() || SrcVT != MVT::i64 || Subtarget.is64Bit() ||
18841        (VT != MVT::f32 && VT != MVT::f64))
18842     return SDValue();
18843 
18844   // Pack the i64 into a vector, do the operation and extract.
18845 
18846   // Using 256-bit to ensure result is 128-bits for f32 case.
18847   unsigned NumElts = Subtarget.hasVLX() ? 4 : 8;
18848   MVT VecInVT = MVT::getVectorVT(MVT::i64, NumElts);
18849   MVT VecVT = MVT::getVectorVT(VT, NumElts);
18850 
18851   SDLoc dl(Op);
18852   SDValue InVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecInVT, Src);
18853   if (IsStrict) {
18854     SDValue CvtVec = DAG.getNode(Op.getOpcode(), dl, {VecVT, MVT::Other},
18855                                  {Op.getOperand(0), InVec});
18856     SDValue Chain = CvtVec.getValue(1);
18857     SDValue Value = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, CvtVec,
18858                                 DAG.getIntPtrConstant(0, dl));
18859     return DAG.getMergeValues({Value, Chain}, dl);
18860   }
18861 
18862   SDValue CvtVec = DAG.getNode(Op.getOpcode(), dl, VecVT, InVec);
18863 
18864   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, CvtVec,
18865                      DAG.getIntPtrConstant(0, dl));
18866 }
18867 
18868 // Try to use a packed vector operation to handle i64 on 32-bit targets.
LowerI64IntToFP16(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)18869 static SDValue LowerI64IntToFP16(SDValue Op, SelectionDAG &DAG,
18870                                  const X86Subtarget &Subtarget) {
18871   assert((Op.getOpcode() == ISD::SINT_TO_FP ||
18872           Op.getOpcode() == ISD::STRICT_SINT_TO_FP ||
18873           Op.getOpcode() == ISD::STRICT_UINT_TO_FP ||
18874           Op.getOpcode() == ISD::UINT_TO_FP) &&
18875          "Unexpected opcode!");
18876   bool IsStrict = Op->isStrictFPOpcode();
18877   SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
18878   MVT SrcVT = Src.getSimpleValueType();
18879   MVT VT = Op.getSimpleValueType();
18880 
18881   if (SrcVT != MVT::i64 || Subtarget.is64Bit() || VT != MVT::f16)
18882     return SDValue();
18883 
18884   // Pack the i64 into a vector, do the operation and extract.
18885 
18886   assert(Subtarget.hasFP16() && "Expected FP16");
18887 
18888   SDLoc dl(Op);
18889   SDValue InVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Src);
18890   if (IsStrict) {
18891     SDValue CvtVec = DAG.getNode(Op.getOpcode(), dl, {MVT::v2f16, MVT::Other},
18892                                  {Op.getOperand(0), InVec});
18893     SDValue Chain = CvtVec.getValue(1);
18894     SDValue Value = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, CvtVec,
18895                                 DAG.getIntPtrConstant(0, dl));
18896     return DAG.getMergeValues({Value, Chain}, dl);
18897   }
18898 
18899   SDValue CvtVec = DAG.getNode(Op.getOpcode(), dl, MVT::v2f16, InVec);
18900 
18901   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, CvtVec,
18902                      DAG.getIntPtrConstant(0, dl));
18903 }
18904 
useVectorCast(unsigned Opcode,MVT FromVT,MVT ToVT,const X86Subtarget & Subtarget)18905 static bool useVectorCast(unsigned Opcode, MVT FromVT, MVT ToVT,
18906                           const X86Subtarget &Subtarget) {
18907   switch (Opcode) {
18908     case ISD::SINT_TO_FP:
18909       // TODO: Handle wider types with AVX/AVX512.
18910       if (!Subtarget.hasSSE2() || FromVT != MVT::v4i32)
18911         return false;
18912       // CVTDQ2PS or (V)CVTDQ2PD
18913       return ToVT == MVT::v4f32 || (Subtarget.hasAVX() && ToVT == MVT::v4f64);
18914 
18915     case ISD::UINT_TO_FP:
18916       // TODO: Handle wider types and i64 elements.
18917       if (!Subtarget.hasAVX512() || FromVT != MVT::v4i32)
18918         return false;
18919       // VCVTUDQ2PS or VCVTUDQ2PD
18920       return ToVT == MVT::v4f32 || ToVT == MVT::v4f64;
18921 
18922     default:
18923       return false;
18924   }
18925 }
18926 
18927 /// Given a scalar cast operation that is extracted from a vector, try to
18928 /// vectorize the cast op followed by extraction. This will avoid an expensive
18929 /// round-trip between XMM and GPR.
vectorizeExtractedCast(SDValue Cast,SelectionDAG & DAG,const X86Subtarget & Subtarget)18930 static SDValue vectorizeExtractedCast(SDValue Cast, SelectionDAG &DAG,
18931                                       const X86Subtarget &Subtarget) {
18932   // TODO: This could be enhanced to handle smaller integer types by peeking
18933   // through an extend.
18934   SDValue Extract = Cast.getOperand(0);
18935   MVT DestVT = Cast.getSimpleValueType();
18936   if (Extract.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
18937       !isa<ConstantSDNode>(Extract.getOperand(1)))
18938     return SDValue();
18939 
18940   // See if we have a 128-bit vector cast op for this type of cast.
18941   SDValue VecOp = Extract.getOperand(0);
18942   MVT FromVT = VecOp.getSimpleValueType();
18943   unsigned NumEltsInXMM = 128 / FromVT.getScalarSizeInBits();
18944   MVT Vec128VT = MVT::getVectorVT(FromVT.getScalarType(), NumEltsInXMM);
18945   MVT ToVT = MVT::getVectorVT(DestVT, NumEltsInXMM);
18946   if (!useVectorCast(Cast.getOpcode(), Vec128VT, ToVT, Subtarget))
18947     return SDValue();
18948 
18949   // If we are extracting from a non-zero element, first shuffle the source
18950   // vector to allow extracting from element zero.
18951   SDLoc DL(Cast);
18952   if (!isNullConstant(Extract.getOperand(1))) {
18953     SmallVector<int, 16> Mask(FromVT.getVectorNumElements(), -1);
18954     Mask[0] = Extract.getConstantOperandVal(1);
18955     VecOp = DAG.getVectorShuffle(FromVT, DL, VecOp, DAG.getUNDEF(FromVT), Mask);
18956   }
18957   // If the source vector is wider than 128-bits, extract the low part. Do not
18958   // create an unnecessarily wide vector cast op.
18959   if (FromVT != Vec128VT)
18960     VecOp = extract128BitVector(VecOp, 0, DAG, DL);
18961 
18962   // cast (extelt V, 0) --> extelt (cast (extract_subv V)), 0
18963   // cast (extelt V, C) --> extelt (cast (extract_subv (shuffle V, [C...]))), 0
18964   SDValue VCast = DAG.getNode(Cast.getOpcode(), DL, ToVT, VecOp);
18965   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, DestVT, VCast,
18966                      DAG.getIntPtrConstant(0, DL));
18967 }
18968 
18969 /// Given a scalar cast to FP with a cast to integer operand (almost an ftrunc),
18970 /// try to vectorize the cast ops. This will avoid an expensive round-trip
18971 /// between XMM and GPR.
lowerFPToIntToFP(SDValue CastToFP,SelectionDAG & DAG,const X86Subtarget & Subtarget)18972 static SDValue lowerFPToIntToFP(SDValue CastToFP, SelectionDAG &DAG,
18973                                 const X86Subtarget &Subtarget) {
18974   // TODO: Allow FP_TO_UINT.
18975   SDValue CastToInt = CastToFP.getOperand(0);
18976   MVT VT = CastToFP.getSimpleValueType();
18977   if (CastToInt.getOpcode() != ISD::FP_TO_SINT || VT.isVector())
18978     return SDValue();
18979 
18980   MVT IntVT = CastToInt.getSimpleValueType();
18981   SDValue X = CastToInt.getOperand(0);
18982   MVT SrcVT = X.getSimpleValueType();
18983   if (SrcVT != MVT::f32 && SrcVT != MVT::f64)
18984     return SDValue();
18985 
18986   // See if we have 128-bit vector cast instructions for this type of cast.
18987   // We need cvttps2dq/cvttpd2dq and cvtdq2ps/cvtdq2pd.
18988   if (!Subtarget.hasSSE2() || (VT != MVT::f32 && VT != MVT::f64) ||
18989       IntVT != MVT::i32)
18990     return SDValue();
18991 
18992   unsigned SrcSize = SrcVT.getSizeInBits();
18993   unsigned IntSize = IntVT.getSizeInBits();
18994   unsigned VTSize = VT.getSizeInBits();
18995   MVT VecSrcVT = MVT::getVectorVT(SrcVT, 128 / SrcSize);
18996   MVT VecIntVT = MVT::getVectorVT(IntVT, 128 / IntSize);
18997   MVT VecVT = MVT::getVectorVT(VT, 128 / VTSize);
18998 
18999   // We need target-specific opcodes if this is v2f64 -> v4i32 -> v2f64.
19000   unsigned ToIntOpcode =
19001       SrcSize != IntSize ? X86ISD::CVTTP2SI : (unsigned)ISD::FP_TO_SINT;
19002   unsigned ToFPOpcode =
19003       IntSize != VTSize ? X86ISD::CVTSI2P : (unsigned)ISD::SINT_TO_FP;
19004 
19005   // sint_to_fp (fp_to_sint X) --> extelt (sint_to_fp (fp_to_sint (s2v X))), 0
19006   //
19007   // We are not defining the high elements (for example, zero them) because
19008   // that could nullify any performance advantage that we hoped to gain from
19009   // this vector op hack. We do not expect any adverse effects (like denorm
19010   // penalties) with cast ops.
19011   SDLoc DL(CastToFP);
19012   SDValue ZeroIdx = DAG.getIntPtrConstant(0, DL);
19013   SDValue VecX = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecSrcVT, X);
19014   SDValue VCastToInt = DAG.getNode(ToIntOpcode, DL, VecIntVT, VecX);
19015   SDValue VCastToFP = DAG.getNode(ToFPOpcode, DL, VecVT, VCastToInt);
19016   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, VCastToFP, ZeroIdx);
19017 }
19018 
lowerINT_TO_FP_vXi64(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)19019 static SDValue lowerINT_TO_FP_vXi64(SDValue Op, SelectionDAG &DAG,
19020                                     const X86Subtarget &Subtarget) {
19021   SDLoc DL(Op);
19022   bool IsStrict = Op->isStrictFPOpcode();
19023   MVT VT = Op->getSimpleValueType(0);
19024   SDValue Src = Op->getOperand(IsStrict ? 1 : 0);
19025 
19026   if (Subtarget.hasDQI()) {
19027     assert(!Subtarget.hasVLX() && "Unexpected features");
19028 
19029     assert((Src.getSimpleValueType() == MVT::v2i64 ||
19030             Src.getSimpleValueType() == MVT::v4i64) &&
19031            "Unsupported custom type");
19032 
19033     // With AVX512DQ, but not VLX we need to widen to get a 512-bit result type.
19034     assert((VT == MVT::v4f32 || VT == MVT::v2f64 || VT == MVT::v4f64) &&
19035            "Unexpected VT!");
19036     MVT WideVT = VT == MVT::v4f32 ? MVT::v8f32 : MVT::v8f64;
19037 
19038     // Need to concat with zero vector for strict fp to avoid spurious
19039     // exceptions.
19040     SDValue Tmp = IsStrict ? DAG.getConstant(0, DL, MVT::v8i64)
19041                            : DAG.getUNDEF(MVT::v8i64);
19042     Src = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, MVT::v8i64, Tmp, Src,
19043                       DAG.getIntPtrConstant(0, DL));
19044     SDValue Res, Chain;
19045     if (IsStrict) {
19046       Res = DAG.getNode(Op.getOpcode(), DL, {WideVT, MVT::Other},
19047                         {Op->getOperand(0), Src});
19048       Chain = Res.getValue(1);
19049     } else {
19050       Res = DAG.getNode(Op.getOpcode(), DL, WideVT, Src);
19051     }
19052 
19053     Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
19054                       DAG.getIntPtrConstant(0, DL));
19055 
19056     if (IsStrict)
19057       return DAG.getMergeValues({Res, Chain}, DL);
19058     return Res;
19059   }
19060 
19061   bool IsSigned = Op->getOpcode() == ISD::SINT_TO_FP ||
19062                   Op->getOpcode() == ISD::STRICT_SINT_TO_FP;
19063   if (VT != MVT::v4f32 || IsSigned)
19064     return SDValue();
19065 
19066   SDValue Zero = DAG.getConstant(0, DL, MVT::v4i64);
19067   SDValue One  = DAG.getConstant(1, DL, MVT::v4i64);
19068   SDValue Sign = DAG.getNode(ISD::OR, DL, MVT::v4i64,
19069                              DAG.getNode(ISD::SRL, DL, MVT::v4i64, Src, One),
19070                              DAG.getNode(ISD::AND, DL, MVT::v4i64, Src, One));
19071   SDValue IsNeg = DAG.getSetCC(DL, MVT::v4i64, Src, Zero, ISD::SETLT);
19072   SDValue SignSrc = DAG.getSelect(DL, MVT::v4i64, IsNeg, Sign, Src);
19073   SmallVector<SDValue, 4> SignCvts(4);
19074   SmallVector<SDValue, 4> Chains(4);
19075   for (int i = 0; i != 4; ++i) {
19076     SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i64, SignSrc,
19077                               DAG.getIntPtrConstant(i, DL));
19078     if (IsStrict) {
19079       SignCvts[i] =
19080           DAG.getNode(ISD::STRICT_SINT_TO_FP, DL, {MVT::f32, MVT::Other},
19081                       {Op.getOperand(0), Elt});
19082       Chains[i] = SignCvts[i].getValue(1);
19083     } else {
19084       SignCvts[i] = DAG.getNode(ISD::SINT_TO_FP, DL, MVT::f32, Elt);
19085     }
19086   }
19087   SDValue SignCvt = DAG.getBuildVector(VT, DL, SignCvts);
19088 
19089   SDValue Slow, Chain;
19090   if (IsStrict) {
19091     Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
19092     Slow = DAG.getNode(ISD::STRICT_FADD, DL, {MVT::v4f32, MVT::Other},
19093                        {Chain, SignCvt, SignCvt});
19094     Chain = Slow.getValue(1);
19095   } else {
19096     Slow = DAG.getNode(ISD::FADD, DL, MVT::v4f32, SignCvt, SignCvt);
19097   }
19098 
19099   IsNeg = DAG.getNode(ISD::TRUNCATE, DL, MVT::v4i32, IsNeg);
19100   SDValue Cvt = DAG.getSelect(DL, MVT::v4f32, IsNeg, Slow, SignCvt);
19101 
19102   if (IsStrict)
19103     return DAG.getMergeValues({Cvt, Chain}, DL);
19104 
19105   return Cvt;
19106 }
19107 
promoteXINT_TO_FP(SDValue Op,SelectionDAG & DAG)19108 static SDValue promoteXINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
19109   bool IsStrict = Op->isStrictFPOpcode();
19110   SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
19111   SDValue Chain = IsStrict ? Op->getOperand(0) : DAG.getEntryNode();
19112   MVT VT = Op.getSimpleValueType();
19113   MVT NVT = VT.isVector() ? VT.changeVectorElementType(MVT::f32) : MVT::f32;
19114   SDLoc dl(Op);
19115 
19116   SDValue Rnd = DAG.getIntPtrConstant(0, dl);
19117   if (IsStrict)
19118     return DAG.getNode(
19119         ISD::STRICT_FP_ROUND, dl, {VT, MVT::Other},
19120         {Chain,
19121          DAG.getNode(Op.getOpcode(), dl, {NVT, MVT::Other}, {Chain, Src}),
19122          Rnd});
19123   return DAG.getNode(ISD::FP_ROUND, dl, VT,
19124                      DAG.getNode(Op.getOpcode(), dl, NVT, Src), Rnd);
19125 }
19126 
isLegalConversion(MVT VT,bool IsSigned,const X86Subtarget & Subtarget)19127 static bool isLegalConversion(MVT VT, bool IsSigned,
19128                               const X86Subtarget &Subtarget) {
19129   if (VT == MVT::v4i32 && Subtarget.hasSSE2() && IsSigned)
19130     return true;
19131   if (VT == MVT::v8i32 && Subtarget.hasAVX() && IsSigned)
19132     return true;
19133   if (Subtarget.hasVLX() && (VT == MVT::v4i32 || VT == MVT::v8i32))
19134     return true;
19135   if (Subtarget.useAVX512Regs()) {
19136     if (VT == MVT::v16i32)
19137       return true;
19138     if (VT == MVT::v8i64 && Subtarget.hasDQI())
19139       return true;
19140   }
19141   if (Subtarget.hasDQI() && Subtarget.hasVLX() &&
19142       (VT == MVT::v2i64 || VT == MVT::v4i64))
19143     return true;
19144   return false;
19145 }
19146 
LowerSINT_TO_FP(SDValue Op,SelectionDAG & DAG) const19147 SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op,
19148                                            SelectionDAG &DAG) const {
19149   bool IsStrict = Op->isStrictFPOpcode();
19150   unsigned OpNo = IsStrict ? 1 : 0;
19151   SDValue Src = Op.getOperand(OpNo);
19152   SDValue Chain = IsStrict ? Op->getOperand(0) : DAG.getEntryNode();
19153   MVT SrcVT = Src.getSimpleValueType();
19154   MVT VT = Op.getSimpleValueType();
19155   SDLoc dl(Op);
19156 
19157   if (isSoftF16(VT, Subtarget))
19158     return promoteXINT_TO_FP(Op, DAG);
19159   else if (isLegalConversion(SrcVT, true, Subtarget))
19160     return Op;
19161 
19162   if (Subtarget.isTargetWin64() && SrcVT == MVT::i128)
19163     return LowerWin64_INT128_TO_FP(Op, DAG);
19164 
19165   if (SDValue Extract = vectorizeExtractedCast(Op, DAG, Subtarget))
19166     return Extract;
19167 
19168   if (SDValue R = lowerFPToIntToFP(Op, DAG, Subtarget))
19169     return R;
19170 
19171   if (SrcVT.isVector()) {
19172     if (SrcVT == MVT::v2i32 && VT == MVT::v2f64) {
19173       // Note: Since v2f64 is a legal type. We don't need to zero extend the
19174       // source for strict FP.
19175       if (IsStrict)
19176         return DAG.getNode(
19177             X86ISD::STRICT_CVTSI2P, dl, {VT, MVT::Other},
19178             {Chain, DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Src,
19179                                 DAG.getUNDEF(SrcVT))});
19180       return DAG.getNode(X86ISD::CVTSI2P, dl, VT,
19181                          DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Src,
19182                                      DAG.getUNDEF(SrcVT)));
19183     }
19184     if (SrcVT == MVT::v2i64 || SrcVT == MVT::v4i64)
19185       return lowerINT_TO_FP_vXi64(Op, DAG, Subtarget);
19186 
19187     return SDValue();
19188   }
19189 
19190   assert(SrcVT <= MVT::i64 && SrcVT >= MVT::i16 &&
19191          "Unknown SINT_TO_FP to lower!");
19192 
19193   bool UseSSEReg = isScalarFPTypeInSSEReg(VT);
19194 
19195   // These are really Legal; return the operand so the caller accepts it as
19196   // Legal.
19197   if (SrcVT == MVT::i32 && UseSSEReg)
19198     return Op;
19199   if (SrcVT == MVT::i64 && UseSSEReg && Subtarget.is64Bit())
19200     return Op;
19201 
19202   if (SDValue V = LowerI64IntToFP_AVX512DQ(Op, DAG, Subtarget))
19203     return V;
19204   if (SDValue V = LowerI64IntToFP16(Op, DAG, Subtarget))
19205     return V;
19206 
19207   // SSE doesn't have an i16 conversion so we need to promote.
19208   if (SrcVT == MVT::i16 && (UseSSEReg || VT == MVT::f128)) {
19209     SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i32, Src);
19210     if (IsStrict)
19211       return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {VT, MVT::Other},
19212                          {Chain, Ext});
19213 
19214     return DAG.getNode(ISD::SINT_TO_FP, dl, VT, Ext);
19215   }
19216 
19217   if (VT == MVT::f128 || !Subtarget.hasX87())
19218     return SDValue();
19219 
19220   SDValue ValueToStore = Src;
19221   if (SrcVT == MVT::i64 && Subtarget.hasSSE2() && !Subtarget.is64Bit())
19222     // Bitcasting to f64 here allows us to do a single 64-bit store from
19223     // an SSE register, avoiding the store forwarding penalty that would come
19224     // with two 32-bit stores.
19225     ValueToStore = DAG.getBitcast(MVT::f64, ValueToStore);
19226 
19227   unsigned Size = SrcVT.getStoreSize();
19228   Align Alignment(Size);
19229   MachineFunction &MF = DAG.getMachineFunction();
19230   auto PtrVT = getPointerTy(MF.getDataLayout());
19231   int SSFI = MF.getFrameInfo().CreateStackObject(Size, Alignment, false);
19232   MachinePointerInfo MPI =
19233       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI);
19234   SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
19235   Chain = DAG.getStore(Chain, dl, ValueToStore, StackSlot, MPI, Alignment);
19236   std::pair<SDValue, SDValue> Tmp =
19237       BuildFILD(VT, SrcVT, dl, Chain, StackSlot, MPI, Alignment, DAG);
19238 
19239   if (IsStrict)
19240     return DAG.getMergeValues({Tmp.first, Tmp.second}, dl);
19241 
19242   return Tmp.first;
19243 }
19244 
BuildFILD(EVT DstVT,EVT SrcVT,const SDLoc & DL,SDValue Chain,SDValue Pointer,MachinePointerInfo PtrInfo,Align Alignment,SelectionDAG & DAG) const19245 std::pair<SDValue, SDValue> X86TargetLowering::BuildFILD(
19246     EVT DstVT, EVT SrcVT, const SDLoc &DL, SDValue Chain, SDValue Pointer,
19247     MachinePointerInfo PtrInfo, Align Alignment, SelectionDAG &DAG) const {
19248   // Build the FILD
19249   SDVTList Tys;
19250   bool useSSE = isScalarFPTypeInSSEReg(DstVT);
19251   if (useSSE)
19252     Tys = DAG.getVTList(MVT::f80, MVT::Other);
19253   else
19254     Tys = DAG.getVTList(DstVT, MVT::Other);
19255 
19256   SDValue FILDOps[] = {Chain, Pointer};
19257   SDValue Result =
19258       DAG.getMemIntrinsicNode(X86ISD::FILD, DL, Tys, FILDOps, SrcVT, PtrInfo,
19259                               Alignment, MachineMemOperand::MOLoad);
19260   Chain = Result.getValue(1);
19261 
19262   if (useSSE) {
19263     MachineFunction &MF = DAG.getMachineFunction();
19264     unsigned SSFISize = DstVT.getStoreSize();
19265     int SSFI =
19266         MF.getFrameInfo().CreateStackObject(SSFISize, Align(SSFISize), false);
19267     auto PtrVT = getPointerTy(MF.getDataLayout());
19268     SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
19269     Tys = DAG.getVTList(MVT::Other);
19270     SDValue FSTOps[] = {Chain, Result, StackSlot};
19271     MachineMemOperand *StoreMMO = DAG.getMachineFunction().getMachineMemOperand(
19272         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI),
19273         MachineMemOperand::MOStore, SSFISize, Align(SSFISize));
19274 
19275     Chain =
19276         DAG.getMemIntrinsicNode(X86ISD::FST, DL, Tys, FSTOps, DstVT, StoreMMO);
19277     Result = DAG.getLoad(
19278         DstVT, DL, Chain, StackSlot,
19279         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI));
19280     Chain = Result.getValue(1);
19281   }
19282 
19283   return { Result, Chain };
19284 }
19285 
19286 /// Horizontal vector math instructions may be slower than normal math with
19287 /// shuffles. Limit horizontal op codegen based on size/speed trade-offs, uarch
19288 /// implementation, and likely shuffle complexity of the alternate sequence.
shouldUseHorizontalOp(bool IsSingleSource,SelectionDAG & DAG,const X86Subtarget & Subtarget)19289 static bool shouldUseHorizontalOp(bool IsSingleSource, SelectionDAG &DAG,
19290                                   const X86Subtarget &Subtarget) {
19291   bool IsOptimizingSize = DAG.shouldOptForSize();
19292   bool HasFastHOps = Subtarget.hasFastHorizontalOps();
19293   return !IsSingleSource || IsOptimizingSize || HasFastHOps;
19294 }
19295 
19296 /// 64-bit unsigned integer to double expansion.
LowerUINT_TO_FP_i64(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)19297 static SDValue LowerUINT_TO_FP_i64(SDValue Op, SelectionDAG &DAG,
19298                                    const X86Subtarget &Subtarget) {
19299   // We can't use this algorithm for strict fp. It produces -0.0 instead of +0.0
19300   // when converting 0 when rounding toward negative infinity. Caller will
19301   // fall back to Expand for when i64 or is legal or use FILD in 32-bit mode.
19302   assert(!Op->isStrictFPOpcode() && "Expected non-strict uint_to_fp!");
19303   // This algorithm is not obvious. Here it is what we're trying to output:
19304   /*
19305      movq       %rax,  %xmm0
19306      punpckldq  (c0),  %xmm0  // c0: (uint4){ 0x43300000U, 0x45300000U, 0U, 0U }
19307      subpd      (c1),  %xmm0  // c1: (double2){ 0x1.0p52, 0x1.0p52 * 0x1.0p32 }
19308      #ifdef __SSE3__
19309        haddpd   %xmm0, %xmm0
19310      #else
19311        pshufd   $0x4e, %xmm0, %xmm1
19312        addpd    %xmm1, %xmm0
19313      #endif
19314   */
19315 
19316   SDLoc dl(Op);
19317   LLVMContext *Context = DAG.getContext();
19318 
19319   // Build some magic constants.
19320   static const uint32_t CV0[] = { 0x43300000, 0x45300000, 0, 0 };
19321   Constant *C0 = ConstantDataVector::get(*Context, CV0);
19322   auto PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
19323   SDValue CPIdx0 = DAG.getConstantPool(C0, PtrVT, Align(16));
19324 
19325   SmallVector<Constant*,2> CV1;
19326   CV1.push_back(
19327     ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble(),
19328                                       APInt(64, 0x4330000000000000ULL))));
19329   CV1.push_back(
19330     ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble(),
19331                                       APInt(64, 0x4530000000000000ULL))));
19332   Constant *C1 = ConstantVector::get(CV1);
19333   SDValue CPIdx1 = DAG.getConstantPool(C1, PtrVT, Align(16));
19334 
19335   // Load the 64-bit value into an XMM register.
19336   SDValue XR1 =
19337       DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Op.getOperand(0));
19338   SDValue CLod0 = DAG.getLoad(
19339       MVT::v4i32, dl, DAG.getEntryNode(), CPIdx0,
19340       MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), Align(16));
19341   SDValue Unpck1 =
19342       getUnpackl(DAG, dl, MVT::v4i32, DAG.getBitcast(MVT::v4i32, XR1), CLod0);
19343 
19344   SDValue CLod1 = DAG.getLoad(
19345       MVT::v2f64, dl, CLod0.getValue(1), CPIdx1,
19346       MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), Align(16));
19347   SDValue XR2F = DAG.getBitcast(MVT::v2f64, Unpck1);
19348   // TODO: Are there any fast-math-flags to propagate here?
19349   SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, XR2F, CLod1);
19350   SDValue Result;
19351 
19352   if (Subtarget.hasSSE3() &&
19353       shouldUseHorizontalOp(true, DAG, Subtarget)) {
19354     Result = DAG.getNode(X86ISD::FHADD, dl, MVT::v2f64, Sub, Sub);
19355   } else {
19356     SDValue Shuffle = DAG.getVectorShuffle(MVT::v2f64, dl, Sub, Sub, {1,-1});
19357     Result = DAG.getNode(ISD::FADD, dl, MVT::v2f64, Shuffle, Sub);
19358   }
19359   Result = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Result,
19360                        DAG.getIntPtrConstant(0, dl));
19361   return Result;
19362 }
19363 
19364 /// 32-bit unsigned integer to float expansion.
LowerUINT_TO_FP_i32(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)19365 static SDValue LowerUINT_TO_FP_i32(SDValue Op, SelectionDAG &DAG,
19366                                    const X86Subtarget &Subtarget) {
19367   unsigned OpNo = Op.getNode()->isStrictFPOpcode() ? 1 : 0;
19368   SDLoc dl(Op);
19369   // FP constant to bias correct the final result.
19370   SDValue Bias = DAG.getConstantFP(
19371       llvm::bit_cast<double>(0x4330000000000000ULL), dl, MVT::f64);
19372 
19373   // Load the 32-bit value into an XMM register.
19374   SDValue Load =
19375       DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Op.getOperand(OpNo));
19376 
19377   // Zero out the upper parts of the register.
19378   Load = getShuffleVectorZeroOrUndef(Load, 0, true, Subtarget, DAG);
19379 
19380   // Or the load with the bias.
19381   SDValue Or = DAG.getNode(
19382       ISD::OR, dl, MVT::v2i64,
19383       DAG.getBitcast(MVT::v2i64, Load),
19384       DAG.getBitcast(MVT::v2i64,
19385                      DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f64, Bias)));
19386   Or =
19387       DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
19388                   DAG.getBitcast(MVT::v2f64, Or), DAG.getIntPtrConstant(0, dl));
19389 
19390   if (Op.getNode()->isStrictFPOpcode()) {
19391     // Subtract the bias.
19392     // TODO: Are there any fast-math-flags to propagate here?
19393     SDValue Chain = Op.getOperand(0);
19394     SDValue Sub = DAG.getNode(ISD::STRICT_FSUB, dl, {MVT::f64, MVT::Other},
19395                               {Chain, Or, Bias});
19396 
19397     if (Op.getValueType() == Sub.getValueType())
19398       return Sub;
19399 
19400     // Handle final rounding.
19401     std::pair<SDValue, SDValue> ResultPair = DAG.getStrictFPExtendOrRound(
19402         Sub, Sub.getValue(1), dl, Op.getSimpleValueType());
19403 
19404     return DAG.getMergeValues({ResultPair.first, ResultPair.second}, dl);
19405   }
19406 
19407   // Subtract the bias.
19408   // TODO: Are there any fast-math-flags to propagate here?
19409   SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Or, Bias);
19410 
19411   // Handle final rounding.
19412   return DAG.getFPExtendOrRound(Sub, dl, Op.getSimpleValueType());
19413 }
19414 
lowerUINT_TO_FP_v2i32(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget,const SDLoc & DL)19415 static SDValue lowerUINT_TO_FP_v2i32(SDValue Op, SelectionDAG &DAG,
19416                                      const X86Subtarget &Subtarget,
19417                                      const SDLoc &DL) {
19418   if (Op.getSimpleValueType() != MVT::v2f64)
19419     return SDValue();
19420 
19421   bool IsStrict = Op->isStrictFPOpcode();
19422 
19423   SDValue N0 = Op.getOperand(IsStrict ? 1 : 0);
19424   assert(N0.getSimpleValueType() == MVT::v2i32 && "Unexpected input type");
19425 
19426   if (Subtarget.hasAVX512()) {
19427     if (!Subtarget.hasVLX()) {
19428       // Let generic type legalization widen this.
19429       if (!IsStrict)
19430         return SDValue();
19431       // Otherwise pad the integer input with 0s and widen the operation.
19432       N0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4i32, N0,
19433                        DAG.getConstant(0, DL, MVT::v2i32));
19434       SDValue Res = DAG.getNode(Op->getOpcode(), DL, {MVT::v4f64, MVT::Other},
19435                                 {Op.getOperand(0), N0});
19436       SDValue Chain = Res.getValue(1);
19437       Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2f64, Res,
19438                         DAG.getIntPtrConstant(0, DL));
19439       return DAG.getMergeValues({Res, Chain}, DL);
19440     }
19441 
19442     // Legalize to v4i32 type.
19443     N0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4i32, N0,
19444                      DAG.getUNDEF(MVT::v2i32));
19445     if (IsStrict)
19446       return DAG.getNode(X86ISD::STRICT_CVTUI2P, DL, {MVT::v2f64, MVT::Other},
19447                          {Op.getOperand(0), N0});
19448     return DAG.getNode(X86ISD::CVTUI2P, DL, MVT::v2f64, N0);
19449   }
19450 
19451   // Zero extend to 2i64, OR with the floating point representation of 2^52.
19452   // This gives us the floating point equivalent of 2^52 + the i32 integer
19453   // since double has 52-bits of mantissa. Then subtract 2^52 in floating
19454   // point leaving just our i32 integers in double format.
19455   SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v2i64, N0);
19456   SDValue VBias = DAG.getConstantFP(
19457       llvm::bit_cast<double>(0x4330000000000000ULL), DL, MVT::v2f64);
19458   SDValue Or = DAG.getNode(ISD::OR, DL, MVT::v2i64, ZExtIn,
19459                            DAG.getBitcast(MVT::v2i64, VBias));
19460   Or = DAG.getBitcast(MVT::v2f64, Or);
19461 
19462   if (IsStrict)
19463     return DAG.getNode(ISD::STRICT_FSUB, DL, {MVT::v2f64, MVT::Other},
19464                        {Op.getOperand(0), Or, VBias});
19465   return DAG.getNode(ISD::FSUB, DL, MVT::v2f64, Or, VBias);
19466 }
19467 
lowerUINT_TO_FP_vXi32(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)19468 static SDValue lowerUINT_TO_FP_vXi32(SDValue Op, SelectionDAG &DAG,
19469                                      const X86Subtarget &Subtarget) {
19470   SDLoc DL(Op);
19471   bool IsStrict = Op->isStrictFPOpcode();
19472   SDValue V = Op->getOperand(IsStrict ? 1 : 0);
19473   MVT VecIntVT = V.getSimpleValueType();
19474   assert((VecIntVT == MVT::v4i32 || VecIntVT == MVT::v8i32) &&
19475          "Unsupported custom type");
19476 
19477   if (Subtarget.hasAVX512()) {
19478     // With AVX512, but not VLX we need to widen to get a 512-bit result type.
19479     assert(!Subtarget.hasVLX() && "Unexpected features");
19480     MVT VT = Op->getSimpleValueType(0);
19481 
19482     // v8i32->v8f64 is legal with AVX512 so just return it.
19483     if (VT == MVT::v8f64)
19484       return Op;
19485 
19486     assert((VT == MVT::v4f32 || VT == MVT::v8f32 || VT == MVT::v4f64) &&
19487            "Unexpected VT!");
19488     MVT WideVT = VT == MVT::v4f64 ? MVT::v8f64 : MVT::v16f32;
19489     MVT WideIntVT = VT == MVT::v4f64 ? MVT::v8i32 : MVT::v16i32;
19490     // Need to concat with zero vector for strict fp to avoid spurious
19491     // exceptions.
19492     SDValue Tmp =
19493         IsStrict ? DAG.getConstant(0, DL, WideIntVT) : DAG.getUNDEF(WideIntVT);
19494     V = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, WideIntVT, Tmp, V,
19495                     DAG.getIntPtrConstant(0, DL));
19496     SDValue Res, Chain;
19497     if (IsStrict) {
19498       Res = DAG.getNode(ISD::STRICT_UINT_TO_FP, DL, {WideVT, MVT::Other},
19499                         {Op->getOperand(0), V});
19500       Chain = Res.getValue(1);
19501     } else {
19502       Res = DAG.getNode(ISD::UINT_TO_FP, DL, WideVT, V);
19503     }
19504 
19505     Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
19506                       DAG.getIntPtrConstant(0, DL));
19507 
19508     if (IsStrict)
19509       return DAG.getMergeValues({Res, Chain}, DL);
19510     return Res;
19511   }
19512 
19513   if (Subtarget.hasAVX() && VecIntVT == MVT::v4i32 &&
19514       Op->getSimpleValueType(0) == MVT::v4f64) {
19515     SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v4i64, V);
19516     Constant *Bias = ConstantFP::get(
19517         *DAG.getContext(),
19518         APFloat(APFloat::IEEEdouble(), APInt(64, 0x4330000000000000ULL)));
19519     auto PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
19520     SDValue CPIdx = DAG.getConstantPool(Bias, PtrVT, Align(8));
19521     SDVTList Tys = DAG.getVTList(MVT::v4f64, MVT::Other);
19522     SDValue Ops[] = {DAG.getEntryNode(), CPIdx};
19523     SDValue VBias = DAG.getMemIntrinsicNode(
19524         X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, MVT::f64,
19525         MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), Align(8),
19526         MachineMemOperand::MOLoad);
19527 
19528     SDValue Or = DAG.getNode(ISD::OR, DL, MVT::v4i64, ZExtIn,
19529                              DAG.getBitcast(MVT::v4i64, VBias));
19530     Or = DAG.getBitcast(MVT::v4f64, Or);
19531 
19532     if (IsStrict)
19533       return DAG.getNode(ISD::STRICT_FSUB, DL, {MVT::v4f64, MVT::Other},
19534                          {Op.getOperand(0), Or, VBias});
19535     return DAG.getNode(ISD::FSUB, DL, MVT::v4f64, Or, VBias);
19536   }
19537 
19538   // The algorithm is the following:
19539   // #ifdef __SSE4_1__
19540   //     uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
19541   //     uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
19542   //                                 (uint4) 0x53000000, 0xaa);
19543   // #else
19544   //     uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
19545   //     uint4 hi = (v >> 16) | (uint4) 0x53000000;
19546   // #endif
19547   //     float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
19548   //     return (float4) lo + fhi;
19549 
19550   bool Is128 = VecIntVT == MVT::v4i32;
19551   MVT VecFloatVT = Is128 ? MVT::v4f32 : MVT::v8f32;
19552   // If we convert to something else than the supported type, e.g., to v4f64,
19553   // abort early.
19554   if (VecFloatVT != Op->getSimpleValueType(0))
19555     return SDValue();
19556 
19557   // In the #idef/#else code, we have in common:
19558   // - The vector of constants:
19559   // -- 0x4b000000
19560   // -- 0x53000000
19561   // - A shift:
19562   // -- v >> 16
19563 
19564   // Create the splat vector for 0x4b000000.
19565   SDValue VecCstLow = DAG.getConstant(0x4b000000, DL, VecIntVT);
19566   // Create the splat vector for 0x53000000.
19567   SDValue VecCstHigh = DAG.getConstant(0x53000000, DL, VecIntVT);
19568 
19569   // Create the right shift.
19570   SDValue VecCstShift = DAG.getConstant(16, DL, VecIntVT);
19571   SDValue HighShift = DAG.getNode(ISD::SRL, DL, VecIntVT, V, VecCstShift);
19572 
19573   SDValue Low, High;
19574   if (Subtarget.hasSSE41()) {
19575     MVT VecI16VT = Is128 ? MVT::v8i16 : MVT::v16i16;
19576     //     uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
19577     SDValue VecCstLowBitcast = DAG.getBitcast(VecI16VT, VecCstLow);
19578     SDValue VecBitcast = DAG.getBitcast(VecI16VT, V);
19579     // Low will be bitcasted right away, so do not bother bitcasting back to its
19580     // original type.
19581     Low = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecBitcast,
19582                       VecCstLowBitcast, DAG.getTargetConstant(0xaa, DL, MVT::i8));
19583     //     uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
19584     //                                 (uint4) 0x53000000, 0xaa);
19585     SDValue VecCstHighBitcast = DAG.getBitcast(VecI16VT, VecCstHigh);
19586     SDValue VecShiftBitcast = DAG.getBitcast(VecI16VT, HighShift);
19587     // High will be bitcasted right away, so do not bother bitcasting back to
19588     // its original type.
19589     High = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecShiftBitcast,
19590                        VecCstHighBitcast, DAG.getTargetConstant(0xaa, DL, MVT::i8));
19591   } else {
19592     SDValue VecCstMask = DAG.getConstant(0xffff, DL, VecIntVT);
19593     //     uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
19594     SDValue LowAnd = DAG.getNode(ISD::AND, DL, VecIntVT, V, VecCstMask);
19595     Low = DAG.getNode(ISD::OR, DL, VecIntVT, LowAnd, VecCstLow);
19596 
19597     //     uint4 hi = (v >> 16) | (uint4) 0x53000000;
19598     High = DAG.getNode(ISD::OR, DL, VecIntVT, HighShift, VecCstHigh);
19599   }
19600 
19601   // Create the vector constant for (0x1.0p39f + 0x1.0p23f).
19602   SDValue VecCstFSub = DAG.getConstantFP(
19603       APFloat(APFloat::IEEEsingle(), APInt(32, 0x53000080)), DL, VecFloatVT);
19604 
19605   //     float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
19606   // NOTE: By using fsub of a positive constant instead of fadd of a negative
19607   // constant, we avoid reassociation in MachineCombiner when unsafe-fp-math is
19608   // enabled. See PR24512.
19609   SDValue HighBitcast = DAG.getBitcast(VecFloatVT, High);
19610   // TODO: Are there any fast-math-flags to propagate here?
19611   //     (float4) lo;
19612   SDValue LowBitcast = DAG.getBitcast(VecFloatVT, Low);
19613   //     return (float4) lo + fhi;
19614   if (IsStrict) {
19615     SDValue FHigh = DAG.getNode(ISD::STRICT_FSUB, DL, {VecFloatVT, MVT::Other},
19616                                 {Op.getOperand(0), HighBitcast, VecCstFSub});
19617     return DAG.getNode(ISD::STRICT_FADD, DL, {VecFloatVT, MVT::Other},
19618                        {FHigh.getValue(1), LowBitcast, FHigh});
19619   }
19620 
19621   SDValue FHigh =
19622       DAG.getNode(ISD::FSUB, DL, VecFloatVT, HighBitcast, VecCstFSub);
19623   return DAG.getNode(ISD::FADD, DL, VecFloatVT, LowBitcast, FHigh);
19624 }
19625 
lowerUINT_TO_FP_vec(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)19626 static SDValue lowerUINT_TO_FP_vec(SDValue Op, SelectionDAG &DAG,
19627                                    const X86Subtarget &Subtarget) {
19628   unsigned OpNo = Op.getNode()->isStrictFPOpcode() ? 1 : 0;
19629   SDValue N0 = Op.getOperand(OpNo);
19630   MVT SrcVT = N0.getSimpleValueType();
19631   SDLoc dl(Op);
19632 
19633   switch (SrcVT.SimpleTy) {
19634   default:
19635     llvm_unreachable("Custom UINT_TO_FP is not supported!");
19636   case MVT::v2i32:
19637     return lowerUINT_TO_FP_v2i32(Op, DAG, Subtarget, dl);
19638   case MVT::v4i32:
19639   case MVT::v8i32:
19640     return lowerUINT_TO_FP_vXi32(Op, DAG, Subtarget);
19641   case MVT::v2i64:
19642   case MVT::v4i64:
19643     return lowerINT_TO_FP_vXi64(Op, DAG, Subtarget);
19644   }
19645 }
19646 
LowerUINT_TO_FP(SDValue Op,SelectionDAG & DAG) const19647 SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
19648                                            SelectionDAG &DAG) const {
19649   bool IsStrict = Op->isStrictFPOpcode();
19650   unsigned OpNo = IsStrict ? 1 : 0;
19651   SDValue Src = Op.getOperand(OpNo);
19652   SDLoc dl(Op);
19653   auto PtrVT = getPointerTy(DAG.getDataLayout());
19654   MVT SrcVT = Src.getSimpleValueType();
19655   MVT DstVT = Op->getSimpleValueType(0);
19656   SDValue Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode();
19657 
19658   // Bail out when we don't have native conversion instructions.
19659   if (DstVT == MVT::f128)
19660     return SDValue();
19661 
19662   if (isSoftF16(DstVT, Subtarget))
19663     return promoteXINT_TO_FP(Op, DAG);
19664   else if (isLegalConversion(SrcVT, false, Subtarget))
19665     return Op;
19666 
19667   if (DstVT.isVector())
19668     return lowerUINT_TO_FP_vec(Op, DAG, Subtarget);
19669 
19670   if (Subtarget.isTargetWin64() && SrcVT == MVT::i128)
19671     return LowerWin64_INT128_TO_FP(Op, DAG);
19672 
19673   if (SDValue Extract = vectorizeExtractedCast(Op, DAG, Subtarget))
19674     return Extract;
19675 
19676   if (Subtarget.hasAVX512() && isScalarFPTypeInSSEReg(DstVT) &&
19677       (SrcVT == MVT::i32 || (SrcVT == MVT::i64 && Subtarget.is64Bit()))) {
19678     // Conversions from unsigned i32 to f32/f64 are legal,
19679     // using VCVTUSI2SS/SD.  Same for i64 in 64-bit mode.
19680     return Op;
19681   }
19682 
19683   // Promote i32 to i64 and use a signed conversion on 64-bit targets.
19684   if (SrcVT == MVT::i32 && Subtarget.is64Bit()) {
19685     Src = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Src);
19686     if (IsStrict)
19687       return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {DstVT, MVT::Other},
19688                          {Chain, Src});
19689     return DAG.getNode(ISD::SINT_TO_FP, dl, DstVT, Src);
19690   }
19691 
19692   if (SDValue V = LowerI64IntToFP_AVX512DQ(Op, DAG, Subtarget))
19693     return V;
19694   if (SDValue V = LowerI64IntToFP16(Op, DAG, Subtarget))
19695     return V;
19696 
19697   // The transform for i64->f64 isn't correct for 0 when rounding to negative
19698   // infinity. It produces -0.0, so disable under strictfp.
19699   if (SrcVT == MVT::i64 && DstVT == MVT::f64 && Subtarget.hasSSE2() &&
19700       !IsStrict)
19701     return LowerUINT_TO_FP_i64(Op, DAG, Subtarget);
19702   // The transform for i32->f64/f32 isn't correct for 0 when rounding to
19703   // negative infinity. So disable under strictfp. Using FILD instead.
19704   if (SrcVT == MVT::i32 && Subtarget.hasSSE2() && DstVT != MVT::f80 &&
19705       !IsStrict)
19706     return LowerUINT_TO_FP_i32(Op, DAG, Subtarget);
19707   if (Subtarget.is64Bit() && SrcVT == MVT::i64 &&
19708       (DstVT == MVT::f32 || DstVT == MVT::f64))
19709     return SDValue();
19710 
19711   // Make a 64-bit buffer, and use it to build an FILD.
19712   SDValue StackSlot = DAG.CreateStackTemporary(MVT::i64, 8);
19713   int SSFI = cast<FrameIndexSDNode>(StackSlot)->getIndex();
19714   Align SlotAlign(8);
19715   MachinePointerInfo MPI =
19716     MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI);
19717   if (SrcVT == MVT::i32) {
19718     SDValue OffsetSlot =
19719         DAG.getMemBasePlusOffset(StackSlot, TypeSize::getFixed(4), dl);
19720     SDValue Store1 = DAG.getStore(Chain, dl, Src, StackSlot, MPI, SlotAlign);
19721     SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, dl, MVT::i32),
19722                                   OffsetSlot, MPI.getWithOffset(4), SlotAlign);
19723     std::pair<SDValue, SDValue> Tmp =
19724         BuildFILD(DstVT, MVT::i64, dl, Store2, StackSlot, MPI, SlotAlign, DAG);
19725     if (IsStrict)
19726       return DAG.getMergeValues({Tmp.first, Tmp.second}, dl);
19727 
19728     return Tmp.first;
19729   }
19730 
19731   assert(SrcVT == MVT::i64 && "Unexpected type in UINT_TO_FP");
19732   SDValue ValueToStore = Src;
19733   if (isScalarFPTypeInSSEReg(Op.getValueType()) && !Subtarget.is64Bit()) {
19734     // Bitcasting to f64 here allows us to do a single 64-bit store from
19735     // an SSE register, avoiding the store forwarding penalty that would come
19736     // with two 32-bit stores.
19737     ValueToStore = DAG.getBitcast(MVT::f64, ValueToStore);
19738   }
19739   SDValue Store =
19740       DAG.getStore(Chain, dl, ValueToStore, StackSlot, MPI, SlotAlign);
19741   // For i64 source, we need to add the appropriate power of 2 if the input
19742   // was negative. We must be careful to do the computation in x87 extended
19743   // precision, not in SSE.
19744   SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
19745   SDValue Ops[] = { Store, StackSlot };
19746   SDValue Fild =
19747       DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, Ops, MVT::i64, MPI,
19748                               SlotAlign, MachineMemOperand::MOLoad);
19749   Chain = Fild.getValue(1);
19750 
19751 
19752   // Check whether the sign bit is set.
19753   SDValue SignSet = DAG.getSetCC(
19754       dl, getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i64),
19755       Op.getOperand(OpNo), DAG.getConstant(0, dl, MVT::i64), ISD::SETLT);
19756 
19757   // Build a 64 bit pair (FF, 0) in the constant pool, with FF in the hi bits.
19758   APInt FF(64, 0x5F80000000000000ULL);
19759   SDValue FudgePtr = DAG.getConstantPool(
19760       ConstantInt::get(*DAG.getContext(), FF), PtrVT);
19761   Align CPAlignment = cast<ConstantPoolSDNode>(FudgePtr)->getAlign();
19762 
19763   // Get a pointer to FF if the sign bit was set, or to 0 otherwise.
19764   SDValue Zero = DAG.getIntPtrConstant(0, dl);
19765   SDValue Four = DAG.getIntPtrConstant(4, dl);
19766   SDValue Offset = DAG.getSelect(dl, Zero.getValueType(), SignSet, Four, Zero);
19767   FudgePtr = DAG.getNode(ISD::ADD, dl, PtrVT, FudgePtr, Offset);
19768 
19769   // Load the value out, extending it from f32 to f80.
19770   SDValue Fudge = DAG.getExtLoad(
19771       ISD::EXTLOAD, dl, MVT::f80, Chain, FudgePtr,
19772       MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), MVT::f32,
19773       CPAlignment);
19774   Chain = Fudge.getValue(1);
19775   // Extend everything to 80 bits to force it to be done on x87.
19776   // TODO: Are there any fast-math-flags to propagate here?
19777   if (IsStrict) {
19778     unsigned Opc = ISD::STRICT_FADD;
19779     // Windows needs the precision control changed to 80bits around this add.
19780     if (Subtarget.isOSWindows() && DstVT == MVT::f32)
19781       Opc = X86ISD::STRICT_FP80_ADD;
19782 
19783     SDValue Add =
19784         DAG.getNode(Opc, dl, {MVT::f80, MVT::Other}, {Chain, Fild, Fudge});
19785     // STRICT_FP_ROUND can't handle equal types.
19786     if (DstVT == MVT::f80)
19787       return Add;
19788     return DAG.getNode(ISD::STRICT_FP_ROUND, dl, {DstVT, MVT::Other},
19789                        {Add.getValue(1), Add, DAG.getIntPtrConstant(0, dl)});
19790   }
19791   unsigned Opc = ISD::FADD;
19792   // Windows needs the precision control changed to 80bits around this add.
19793   if (Subtarget.isOSWindows() && DstVT == MVT::f32)
19794     Opc = X86ISD::FP80_ADD;
19795 
19796   SDValue Add = DAG.getNode(Opc, dl, MVT::f80, Fild, Fudge);
19797   return DAG.getNode(ISD::FP_ROUND, dl, DstVT, Add,
19798                      DAG.getIntPtrConstant(0, dl, /*isTarget=*/true));
19799 }
19800 
19801 // If the given FP_TO_SINT (IsSigned) or FP_TO_UINT (!IsSigned) operation
19802 // is legal, or has an fp128 or f16 source (which needs to be promoted to f32),
19803 // just return an SDValue().
19804 // Otherwise it is assumed to be a conversion from one of f32, f64 or f80
19805 // to i16, i32 or i64, and we lower it to a legal sequence and return the
19806 // result.
19807 SDValue
FP_TO_INTHelper(SDValue Op,SelectionDAG & DAG,bool IsSigned,SDValue & Chain) const19808 X86TargetLowering::FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
19809                                    bool IsSigned, SDValue &Chain) const {
19810   bool IsStrict = Op->isStrictFPOpcode();
19811   SDLoc DL(Op);
19812 
19813   EVT DstTy = Op.getValueType();
19814   SDValue Value = Op.getOperand(IsStrict ? 1 : 0);
19815   EVT TheVT = Value.getValueType();
19816   auto PtrVT = getPointerTy(DAG.getDataLayout());
19817 
19818   if (TheVT != MVT::f32 && TheVT != MVT::f64 && TheVT != MVT::f80) {
19819     // f16 must be promoted before using the lowering in this routine.
19820     // fp128 does not use this lowering.
19821     return SDValue();
19822   }
19823 
19824   // If using FIST to compute an unsigned i64, we'll need some fixup
19825   // to handle values above the maximum signed i64.  A FIST is always
19826   // used for the 32-bit subtarget, but also for f80 on a 64-bit target.
19827   bool UnsignedFixup = !IsSigned && DstTy == MVT::i64;
19828 
19829   // FIXME: This does not generate an invalid exception if the input does not
19830   // fit in i32. PR44019
19831   if (!IsSigned && DstTy != MVT::i64) {
19832     // Replace the fp-to-uint32 operation with an fp-to-sint64 FIST.
19833     // The low 32 bits of the fist result will have the correct uint32 result.
19834     assert(DstTy == MVT::i32 && "Unexpected FP_TO_UINT");
19835     DstTy = MVT::i64;
19836   }
19837 
19838   assert(DstTy.getSimpleVT() <= MVT::i64 &&
19839          DstTy.getSimpleVT() >= MVT::i16 &&
19840          "Unknown FP_TO_INT to lower!");
19841 
19842   // We lower FP->int64 into FISTP64 followed by a load from a temporary
19843   // stack slot.
19844   MachineFunction &MF = DAG.getMachineFunction();
19845   unsigned MemSize = DstTy.getStoreSize();
19846   int SSFI =
19847       MF.getFrameInfo().CreateStackObject(MemSize, Align(MemSize), false);
19848   SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
19849 
19850   Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode();
19851 
19852   SDValue Adjust; // 0x0 or 0x80000000, for result sign bit adjustment.
19853 
19854   if (UnsignedFixup) {
19855     //
19856     // Conversion to unsigned i64 is implemented with a select,
19857     // depending on whether the source value fits in the range
19858     // of a signed i64.  Let Thresh be the FP equivalent of
19859     // 0x8000000000000000ULL.
19860     //
19861     //  Adjust = (Value >= Thresh) ? 0x80000000 : 0;
19862     //  FltOfs = (Value >= Thresh) ? 0x80000000 : 0;
19863     //  FistSrc = (Value - FltOfs);
19864     //  Fist-to-mem64 FistSrc
19865     //  Add 0 or 0x800...0ULL to the 64-bit result, which is equivalent
19866     //  to XOR'ing the high 32 bits with Adjust.
19867     //
19868     // Being a power of 2, Thresh is exactly representable in all FP formats.
19869     // For X87 we'd like to use the smallest FP type for this constant, but
19870     // for DAG type consistency we have to match the FP operand type.
19871 
19872     APFloat Thresh(APFloat::IEEEsingle(), APInt(32, 0x5f000000));
19873     LLVM_ATTRIBUTE_UNUSED APFloat::opStatus Status = APFloat::opOK;
19874     bool LosesInfo = false;
19875     if (TheVT == MVT::f64)
19876       // The rounding mode is irrelevant as the conversion should be exact.
19877       Status = Thresh.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven,
19878                               &LosesInfo);
19879     else if (TheVT == MVT::f80)
19880       Status = Thresh.convert(APFloat::x87DoubleExtended(),
19881                               APFloat::rmNearestTiesToEven, &LosesInfo);
19882 
19883     assert(Status == APFloat::opOK && !LosesInfo &&
19884            "FP conversion should have been exact");
19885 
19886     SDValue ThreshVal = DAG.getConstantFP(Thresh, DL, TheVT);
19887 
19888     EVT ResVT = getSetCCResultType(DAG.getDataLayout(),
19889                                    *DAG.getContext(), TheVT);
19890     SDValue Cmp;
19891     if (IsStrict) {
19892       Cmp = DAG.getSetCC(DL, ResVT, Value, ThreshVal, ISD::SETGE, Chain,
19893                          /*IsSignaling*/ true);
19894       Chain = Cmp.getValue(1);
19895     } else {
19896       Cmp = DAG.getSetCC(DL, ResVT, Value, ThreshVal, ISD::SETGE);
19897     }
19898 
19899     // Our preferred lowering of
19900     //
19901     // (Value >= Thresh) ? 0x8000000000000000ULL : 0
19902     //
19903     // is
19904     //
19905     // (Value >= Thresh) << 63
19906     //
19907     // but since we can get here after LegalOperations, DAGCombine might do the
19908     // wrong thing if we create a select. So, directly create the preferred
19909     // version.
19910     SDValue Zext = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, Cmp);
19911     SDValue Const63 = DAG.getConstant(63, DL, MVT::i8);
19912     Adjust = DAG.getNode(ISD::SHL, DL, MVT::i64, Zext, Const63);
19913 
19914     SDValue FltOfs = DAG.getSelect(DL, TheVT, Cmp, ThreshVal,
19915                                    DAG.getConstantFP(0.0, DL, TheVT));
19916 
19917     if (IsStrict) {
19918       Value = DAG.getNode(ISD::STRICT_FSUB, DL, { TheVT, MVT::Other},
19919                           { Chain, Value, FltOfs });
19920       Chain = Value.getValue(1);
19921     } else
19922       Value = DAG.getNode(ISD::FSUB, DL, TheVT, Value, FltOfs);
19923   }
19924 
19925   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, SSFI);
19926 
19927   // FIXME This causes a redundant load/store if the SSE-class value is already
19928   // in memory, such as if it is on the callstack.
19929   if (isScalarFPTypeInSSEReg(TheVT)) {
19930     assert(DstTy == MVT::i64 && "Invalid FP_TO_SINT to lower!");
19931     Chain = DAG.getStore(Chain, DL, Value, StackSlot, MPI);
19932     SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
19933     SDValue Ops[] = { Chain, StackSlot };
19934 
19935     unsigned FLDSize = TheVT.getStoreSize();
19936     assert(FLDSize <= MemSize && "Stack slot not big enough");
19937     MachineMemOperand *MMO = MF.getMachineMemOperand(
19938         MPI, MachineMemOperand::MOLoad, FLDSize, Align(FLDSize));
19939     Value = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, TheVT, MMO);
19940     Chain = Value.getValue(1);
19941   }
19942 
19943   // Build the FP_TO_INT*_IN_MEM
19944   MachineMemOperand *MMO = MF.getMachineMemOperand(
19945       MPI, MachineMemOperand::MOStore, MemSize, Align(MemSize));
19946   SDValue Ops[] = { Chain, Value, StackSlot };
19947   SDValue FIST = DAG.getMemIntrinsicNode(X86ISD::FP_TO_INT_IN_MEM, DL,
19948                                          DAG.getVTList(MVT::Other),
19949                                          Ops, DstTy, MMO);
19950 
19951   SDValue Res = DAG.getLoad(Op.getValueType(), SDLoc(Op), FIST, StackSlot, MPI);
19952   Chain = Res.getValue(1);
19953 
19954   // If we need an unsigned fixup, XOR the result with adjust.
19955   if (UnsignedFixup)
19956     Res = DAG.getNode(ISD::XOR, DL, MVT::i64, Res, Adjust);
19957 
19958   return Res;
19959 }
19960 
LowerAVXExtend(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)19961 static SDValue LowerAVXExtend(SDValue Op, SelectionDAG &DAG,
19962                               const X86Subtarget &Subtarget) {
19963   MVT VT = Op.getSimpleValueType();
19964   SDValue In = Op.getOperand(0);
19965   MVT InVT = In.getSimpleValueType();
19966   SDLoc dl(Op);
19967   unsigned Opc = Op.getOpcode();
19968 
19969   assert(VT.isVector() && InVT.isVector() && "Expected vector type");
19970   assert((Opc == ISD::ANY_EXTEND || Opc == ISD::ZERO_EXTEND) &&
19971          "Unexpected extension opcode");
19972   assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
19973          "Expected same number of elements");
19974   assert((VT.getVectorElementType() == MVT::i16 ||
19975           VT.getVectorElementType() == MVT::i32 ||
19976           VT.getVectorElementType() == MVT::i64) &&
19977          "Unexpected element type");
19978   assert((InVT.getVectorElementType() == MVT::i8 ||
19979           InVT.getVectorElementType() == MVT::i16 ||
19980           InVT.getVectorElementType() == MVT::i32) &&
19981          "Unexpected element type");
19982 
19983   unsigned ExtendInVecOpc = DAG.getOpcode_EXTEND_VECTOR_INREG(Opc);
19984 
19985   if (VT == MVT::v32i16 && !Subtarget.hasBWI()) {
19986     assert(InVT == MVT::v32i8 && "Unexpected VT!");
19987     return splitVectorIntUnary(Op, DAG);
19988   }
19989 
19990   if (Subtarget.hasInt256())
19991     return Op;
19992 
19993   // Optimize vectors in AVX mode:
19994   //
19995   //   v8i16 -> v8i32
19996   //   Use vpmovzwd for 4 lower elements  v8i16 -> v4i32.
19997   //   Use vpunpckhwd for 4 upper elements  v8i16 -> v4i32.
19998   //   Concat upper and lower parts.
19999   //
20000   //   v4i32 -> v4i64
20001   //   Use vpmovzdq for 4 lower elements  v4i32 -> v2i64.
20002   //   Use vpunpckhdq for 4 upper elements  v4i32 -> v2i64.
20003   //   Concat upper and lower parts.
20004   //
20005   MVT HalfVT = VT.getHalfNumVectorElementsVT();
20006   SDValue OpLo = DAG.getNode(ExtendInVecOpc, dl, HalfVT, In);
20007 
20008   // Short-circuit if we can determine that each 128-bit half is the same value.
20009   // Otherwise, this is difficult to match and optimize.
20010   if (auto *Shuf = dyn_cast<ShuffleVectorSDNode>(In))
20011     if (hasIdenticalHalvesShuffleMask(Shuf->getMask()))
20012       return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpLo);
20013 
20014   SDValue ZeroVec = DAG.getConstant(0, dl, InVT);
20015   SDValue Undef = DAG.getUNDEF(InVT);
20016   bool NeedZero = Opc == ISD::ZERO_EXTEND;
20017   SDValue OpHi = getUnpackh(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef);
20018   OpHi = DAG.getBitcast(HalfVT, OpHi);
20019 
20020   return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
20021 }
20022 
20023 // Helper to split and extend a v16i1 mask to v16i8 or v16i16.
SplitAndExtendv16i1(unsigned ExtOpc,MVT VT,SDValue In,const SDLoc & dl,SelectionDAG & DAG)20024 static SDValue SplitAndExtendv16i1(unsigned ExtOpc, MVT VT, SDValue In,
20025                                    const SDLoc &dl, SelectionDAG &DAG) {
20026   assert((VT == MVT::v16i8 || VT == MVT::v16i16) && "Unexpected VT.");
20027   SDValue Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v8i1, In,
20028                            DAG.getIntPtrConstant(0, dl));
20029   SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v8i1, In,
20030                            DAG.getIntPtrConstant(8, dl));
20031   Lo = DAG.getNode(ExtOpc, dl, MVT::v8i16, Lo);
20032   Hi = DAG.getNode(ExtOpc, dl, MVT::v8i16, Hi);
20033   SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i16, Lo, Hi);
20034   return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
20035 }
20036 
LowerZERO_EXTEND_Mask(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)20037 static  SDValue LowerZERO_EXTEND_Mask(SDValue Op,
20038                                       const X86Subtarget &Subtarget,
20039                                       SelectionDAG &DAG) {
20040   MVT VT = Op->getSimpleValueType(0);
20041   SDValue In = Op->getOperand(0);
20042   MVT InVT = In.getSimpleValueType();
20043   assert(InVT.getVectorElementType() == MVT::i1 && "Unexpected input type!");
20044   SDLoc DL(Op);
20045   unsigned NumElts = VT.getVectorNumElements();
20046 
20047   // For all vectors, but vXi8 we can just emit a sign_extend and a shift. This
20048   // avoids a constant pool load.
20049   if (VT.getVectorElementType() != MVT::i8) {
20050     SDValue Extend = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, In);
20051     return DAG.getNode(ISD::SRL, DL, VT, Extend,
20052                        DAG.getConstant(VT.getScalarSizeInBits() - 1, DL, VT));
20053   }
20054 
20055   // Extend VT if BWI is not supported.
20056   MVT ExtVT = VT;
20057   if (!Subtarget.hasBWI()) {
20058     // If v16i32 is to be avoided, we'll need to split and concatenate.
20059     if (NumElts == 16 && !Subtarget.canExtendTo512DQ())
20060       return SplitAndExtendv16i1(ISD::ZERO_EXTEND, VT, In, DL, DAG);
20061 
20062     ExtVT = MVT::getVectorVT(MVT::i32, NumElts);
20063   }
20064 
20065   // Widen to 512-bits if VLX is not supported.
20066   MVT WideVT = ExtVT;
20067   if (!ExtVT.is512BitVector() && !Subtarget.hasVLX()) {
20068     NumElts *= 512 / ExtVT.getSizeInBits();
20069     InVT = MVT::getVectorVT(MVT::i1, NumElts);
20070     In = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InVT, DAG.getUNDEF(InVT),
20071                      In, DAG.getIntPtrConstant(0, DL));
20072     WideVT = MVT::getVectorVT(ExtVT.getVectorElementType(),
20073                               NumElts);
20074   }
20075 
20076   SDValue One = DAG.getConstant(1, DL, WideVT);
20077   SDValue Zero = DAG.getConstant(0, DL, WideVT);
20078 
20079   SDValue SelectedVal = DAG.getSelect(DL, WideVT, In, One, Zero);
20080 
20081   // Truncate if we had to extend above.
20082   if (VT != ExtVT) {
20083     WideVT = MVT::getVectorVT(MVT::i8, NumElts);
20084     SelectedVal = DAG.getNode(ISD::TRUNCATE, DL, WideVT, SelectedVal);
20085   }
20086 
20087   // Extract back to 128/256-bit if we widened.
20088   if (WideVT != VT)
20089     SelectedVal = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, SelectedVal,
20090                               DAG.getIntPtrConstant(0, DL));
20091 
20092   return SelectedVal;
20093 }
20094 
LowerZERO_EXTEND(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)20095 static SDValue LowerZERO_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
20096                                 SelectionDAG &DAG) {
20097   SDValue In = Op.getOperand(0);
20098   MVT SVT = In.getSimpleValueType();
20099 
20100   if (SVT.getVectorElementType() == MVT::i1)
20101     return LowerZERO_EXTEND_Mask(Op, Subtarget, DAG);
20102 
20103   assert(Subtarget.hasAVX() && "Expected AVX support");
20104   return LowerAVXExtend(Op, DAG, Subtarget);
20105 }
20106 
20107 /// Helper to recursively truncate vector elements in half with PACKSS/PACKUS.
20108 /// It makes use of the fact that vectors with enough leading sign/zero bits
20109 /// prevent the PACKSS/PACKUS from saturating the results.
20110 /// AVX2 (Int256) sub-targets require extra shuffling as the PACK*S operates
20111 /// within each 128-bit lane.
truncateVectorWithPACK(unsigned Opcode,EVT DstVT,SDValue In,const SDLoc & DL,SelectionDAG & DAG,const X86Subtarget & Subtarget)20112 static SDValue truncateVectorWithPACK(unsigned Opcode, EVT DstVT, SDValue In,
20113                                       const SDLoc &DL, SelectionDAG &DAG,
20114                                       const X86Subtarget &Subtarget) {
20115   assert((Opcode == X86ISD::PACKSS || Opcode == X86ISD::PACKUS) &&
20116          "Unexpected PACK opcode");
20117   assert(DstVT.isVector() && "VT not a vector?");
20118 
20119   // Requires SSE2 for PACKSS (SSE41 PACKUSDW is handled below).
20120   if (!Subtarget.hasSSE2())
20121     return SDValue();
20122 
20123   EVT SrcVT = In.getValueType();
20124 
20125   // No truncation required, we might get here due to recursive calls.
20126   if (SrcVT == DstVT)
20127     return In;
20128 
20129   unsigned NumElems = SrcVT.getVectorNumElements();
20130   if (NumElems < 2 || !isPowerOf2_32(NumElems) )
20131     return SDValue();
20132 
20133   unsigned DstSizeInBits = DstVT.getSizeInBits();
20134   unsigned SrcSizeInBits = SrcVT.getSizeInBits();
20135   assert(DstVT.getVectorNumElements() == NumElems && "Illegal truncation");
20136   assert(SrcSizeInBits > DstSizeInBits && "Illegal truncation");
20137 
20138   LLVMContext &Ctx = *DAG.getContext();
20139   EVT PackedSVT = EVT::getIntegerVT(Ctx, SrcVT.getScalarSizeInBits() / 2);
20140   EVT PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumElems);
20141 
20142   // Pack to the largest type possible:
20143   // vXi64/vXi32 -> PACK*SDW and vXi16 -> PACK*SWB.
20144   EVT InVT = MVT::i16, OutVT = MVT::i8;
20145   if (SrcVT.getScalarSizeInBits() > 16 &&
20146       (Opcode == X86ISD::PACKSS || Subtarget.hasSSE41())) {
20147     InVT = MVT::i32;
20148     OutVT = MVT::i16;
20149   }
20150 
20151   // Sub-128-bit truncation - widen to 128-bit src and pack in the lower half.
20152   // On pre-AVX512, pack the src in both halves to help value tracking.
20153   if (SrcSizeInBits <= 128) {
20154     InVT = EVT::getVectorVT(Ctx, InVT, 128 / InVT.getSizeInBits());
20155     OutVT = EVT::getVectorVT(Ctx, OutVT, 128 / OutVT.getSizeInBits());
20156     In = widenSubVector(In, false, Subtarget, DAG, DL, 128);
20157     SDValue LHS = DAG.getBitcast(InVT, In);
20158     SDValue RHS = Subtarget.hasAVX512() ? DAG.getUNDEF(InVT) : LHS;
20159     SDValue Res = DAG.getNode(Opcode, DL, OutVT, LHS, RHS);
20160     Res = extractSubVector(Res, 0, DAG, DL, SrcSizeInBits / 2);
20161     Res = DAG.getBitcast(PackedVT, Res);
20162     return truncateVectorWithPACK(Opcode, DstVT, Res, DL, DAG, Subtarget);
20163   }
20164 
20165   // Split lower/upper subvectors.
20166   SDValue Lo, Hi;
20167   std::tie(Lo, Hi) = splitVector(In, DAG, DL);
20168 
20169   // If Hi is undef, then don't bother packing it and widen the result instead.
20170   if (Hi.isUndef()) {
20171     EVT DstHalfVT = DstVT.getHalfNumVectorElementsVT(Ctx);
20172     if (SDValue Res =
20173             truncateVectorWithPACK(Opcode, DstHalfVT, Lo, DL, DAG, Subtarget))
20174       return widenSubVector(Res, false, Subtarget, DAG, DL, DstSizeInBits);
20175   }
20176 
20177   unsigned SubSizeInBits = SrcSizeInBits / 2;
20178   InVT = EVT::getVectorVT(Ctx, InVT, SubSizeInBits / InVT.getSizeInBits());
20179   OutVT = EVT::getVectorVT(Ctx, OutVT, SubSizeInBits / OutVT.getSizeInBits());
20180 
20181   // 256bit -> 128bit truncate - PACK lower/upper 128-bit subvectors.
20182   if (SrcVT.is256BitVector() && DstVT.is128BitVector()) {
20183     Lo = DAG.getBitcast(InVT, Lo);
20184     Hi = DAG.getBitcast(InVT, Hi);
20185     SDValue Res = DAG.getNode(Opcode, DL, OutVT, Lo, Hi);
20186     return DAG.getBitcast(DstVT, Res);
20187   }
20188 
20189   // AVX2: 512bit -> 256bit truncate - PACK lower/upper 256-bit subvectors.
20190   // AVX2: 512bit -> 128bit truncate - PACK(PACK, PACK).
20191   if (SrcVT.is512BitVector() && Subtarget.hasInt256()) {
20192     Lo = DAG.getBitcast(InVT, Lo);
20193     Hi = DAG.getBitcast(InVT, Hi);
20194     SDValue Res = DAG.getNode(Opcode, DL, OutVT, Lo, Hi);
20195 
20196     // 256-bit PACK(ARG0, ARG1) leaves us with ((LO0,LO1),(HI0,HI1)),
20197     // so we need to shuffle to get ((LO0,HI0),(LO1,HI1)).
20198     // Scale shuffle mask to avoid bitcasts and help ComputeNumSignBits.
20199     SmallVector<int, 64> Mask;
20200     int Scale = 64 / OutVT.getScalarSizeInBits();
20201     narrowShuffleMaskElts(Scale, { 0, 2, 1, 3 }, Mask);
20202     Res = DAG.getVectorShuffle(OutVT, DL, Res, Res, Mask);
20203 
20204     if (DstVT.is256BitVector())
20205       return DAG.getBitcast(DstVT, Res);
20206 
20207     // If 512bit -> 128bit truncate another stage.
20208     Res = DAG.getBitcast(PackedVT, Res);
20209     return truncateVectorWithPACK(Opcode, DstVT, Res, DL, DAG, Subtarget);
20210   }
20211 
20212   // Recursively pack lower/upper subvectors, concat result and pack again.
20213   assert(SrcSizeInBits >= 256 && "Expected 256-bit vector or greater");
20214 
20215   if (PackedVT.is128BitVector()) {
20216     // Avoid CONCAT_VECTORS on sub-128bit nodes as these can fail after
20217     // type legalization.
20218     SDValue Res =
20219         truncateVectorWithPACK(Opcode, PackedVT, In, DL, DAG, Subtarget);
20220     return truncateVectorWithPACK(Opcode, DstVT, Res, DL, DAG, Subtarget);
20221   }
20222 
20223   EVT HalfPackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumElems / 2);
20224   Lo = truncateVectorWithPACK(Opcode, HalfPackedVT, Lo, DL, DAG, Subtarget);
20225   Hi = truncateVectorWithPACK(Opcode, HalfPackedVT, Hi, DL, DAG, Subtarget);
20226   SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, PackedVT, Lo, Hi);
20227   return truncateVectorWithPACK(Opcode, DstVT, Res, DL, DAG, Subtarget);
20228 }
20229 
20230 /// Truncate using inreg zero extension (AND mask) and X86ISD::PACKUS.
20231 /// e.g. trunc <8 x i32> X to <8 x i16> -->
20232 /// MaskX = X & 0xffff (clear high bits to prevent saturation)
20233 /// packus (extract_subv MaskX, 0), (extract_subv MaskX, 1)
truncateVectorWithPACKUS(EVT DstVT,SDValue In,const SDLoc & DL,const X86Subtarget & Subtarget,SelectionDAG & DAG)20234 static SDValue truncateVectorWithPACKUS(EVT DstVT, SDValue In, const SDLoc &DL,
20235                                         const X86Subtarget &Subtarget,
20236                                         SelectionDAG &DAG) {
20237   In = DAG.getZeroExtendInReg(In, DL, DstVT);
20238   return truncateVectorWithPACK(X86ISD::PACKUS, DstVT, In, DL, DAG, Subtarget);
20239 }
20240 
20241 /// Truncate using inreg sign extension and X86ISD::PACKSS.
truncateVectorWithPACKSS(EVT DstVT,SDValue In,const SDLoc & DL,const X86Subtarget & Subtarget,SelectionDAG & DAG)20242 static SDValue truncateVectorWithPACKSS(EVT DstVT, SDValue In, const SDLoc &DL,
20243                                         const X86Subtarget &Subtarget,
20244                                         SelectionDAG &DAG) {
20245   EVT SrcVT = In.getValueType();
20246   In = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, SrcVT, In,
20247                    DAG.getValueType(DstVT));
20248   return truncateVectorWithPACK(X86ISD::PACKSS, DstVT, In, DL, DAG, Subtarget);
20249 }
20250 
20251 /// Helper to determine if \p In truncated to \p DstVT has the necessary
20252 /// signbits / leading zero bits to be truncated with PACKSS / PACKUS,
20253 /// possibly by converting a SRL node to SRA for sign extension.
matchTruncateWithPACK(unsigned & PackOpcode,EVT DstVT,SDValue In,const SDLoc & DL,SelectionDAG & DAG,const X86Subtarget & Subtarget)20254 static SDValue matchTruncateWithPACK(unsigned &PackOpcode, EVT DstVT,
20255                                      SDValue In, const SDLoc &DL,
20256                                      SelectionDAG &DAG,
20257                                      const X86Subtarget &Subtarget) {
20258   // Requires SSE2.
20259   if (!Subtarget.hasSSE2())
20260     return SDValue();
20261 
20262   EVT SrcVT = In.getValueType();
20263   EVT DstSVT = DstVT.getVectorElementType();
20264   EVT SrcSVT = SrcVT.getVectorElementType();
20265 
20266   // Check we have a truncation suited for PACKSS/PACKUS.
20267   if (!((SrcSVT == MVT::i16 || SrcSVT == MVT::i32 || SrcSVT == MVT::i64) &&
20268         (DstSVT == MVT::i8 || DstSVT == MVT::i16 || DstSVT == MVT::i32)))
20269     return SDValue();
20270 
20271   assert(SrcSVT.getSizeInBits() > DstSVT.getSizeInBits() && "Bad truncation");
20272   unsigned NumStages = Log2_32(SrcSVT.getSizeInBits() / DstSVT.getSizeInBits());
20273 
20274   // Truncation from 128-bit to vXi32 can be better handled with PSHUFD.
20275   // Truncation to sub-64-bit vXi16 can be better handled with PSHUFD/PSHUFLW.
20276   // Truncation from v2i64 to v2i8 can be better handled with PSHUFB.
20277   if ((DstSVT == MVT::i32 && SrcVT.getSizeInBits() <= 128) ||
20278       (DstSVT == MVT::i16 && SrcVT.getSizeInBits() <= (64 * NumStages)) ||
20279       (DstVT == MVT::v2i8 && SrcVT == MVT::v2i64 && Subtarget.hasSSSE3()))
20280     return SDValue();
20281 
20282   // Prefer to lower v4i64 -> v4i32 as a shuffle unless we can cheaply
20283   // split this for packing.
20284   if (SrcVT == MVT::v4i64 && DstVT == MVT::v4i32 &&
20285       !isFreeToSplitVector(In.getNode(), DAG) &&
20286       (!Subtarget.hasAVX() || DAG.ComputeNumSignBits(In) != 64))
20287     return SDValue();
20288 
20289   // Don't truncate AVX512 targets as multiple PACK nodes stages.
20290   if (Subtarget.hasAVX512() && NumStages > 1)
20291     return SDValue();
20292 
20293   unsigned NumSrcEltBits = SrcVT.getScalarSizeInBits();
20294   unsigned NumPackedSignBits = std::min<unsigned>(DstSVT.getSizeInBits(), 16);
20295   unsigned NumPackedZeroBits = Subtarget.hasSSE41() ? NumPackedSignBits : 8;
20296 
20297   // Truncate with PACKUS if we are truncating a vector with leading zero
20298   // bits that extend all the way to the packed/truncated value.
20299   // e.g. Masks, zext_in_reg, etc.
20300   // Pre-SSE41 we can only use PACKUSWB.
20301   KnownBits Known = DAG.computeKnownBits(In);
20302   if ((NumSrcEltBits - NumPackedZeroBits) <= Known.countMinLeadingZeros()) {
20303     PackOpcode = X86ISD::PACKUS;
20304     return In;
20305   }
20306 
20307   // Truncate with PACKSS if we are truncating a vector with sign-bits
20308   // that extend all the way to the packed/truncated value.
20309   // e.g. Comparison result, sext_in_reg, etc.
20310   unsigned NumSignBits = DAG.ComputeNumSignBits(In);
20311 
20312   // Don't use PACKSS for vXi64 -> vXi32 truncations unless we're dealing with
20313   // a sign splat (or AVX512 VPSRAQ support). ComputeNumSignBits struggles to
20314   // see through BITCASTs later on and combines/simplifications can't then use
20315   // it.
20316   if (DstSVT == MVT::i32 && NumSignBits != SrcSVT.getSizeInBits() &&
20317       !Subtarget.hasAVX512())
20318     return SDValue();
20319 
20320   unsigned MinSignBits = NumSrcEltBits - NumPackedSignBits;
20321   if (MinSignBits < NumSignBits) {
20322     PackOpcode = X86ISD::PACKSS;
20323     return In;
20324   }
20325 
20326   // If we have a srl that only generates signbits that we will discard in
20327   // the truncation then we can use PACKSS by converting the srl to a sra.
20328   // SimplifyDemandedBits often relaxes sra to srl so we need to reverse it.
20329   if (In.getOpcode() == ISD::SRL && In->hasOneUse())
20330     if (const APInt *ShAmt = DAG.getValidShiftAmountConstant(
20331             In, APInt::getAllOnes(SrcVT.getVectorNumElements()))) {
20332       if (*ShAmt == MinSignBits) {
20333         PackOpcode = X86ISD::PACKSS;
20334         return DAG.getNode(ISD::SRA, DL, SrcVT, In->ops());
20335       }
20336     }
20337 
20338   return SDValue();
20339 }
20340 
20341 /// This function lowers a vector truncation of 'extended sign-bits' or
20342 /// 'extended zero-bits' values.
20343 /// vXi16/vXi32/vXi64 to vXi8/vXi16/vXi32 into X86ISD::PACKSS/PACKUS operations.
LowerTruncateVecPackWithSignBits(MVT DstVT,SDValue In,const SDLoc & DL,const X86Subtarget & Subtarget,SelectionDAG & DAG)20344 static SDValue LowerTruncateVecPackWithSignBits(MVT DstVT, SDValue In,
20345                                                 const SDLoc &DL,
20346                                                 const X86Subtarget &Subtarget,
20347                                                 SelectionDAG &DAG) {
20348   MVT SrcVT = In.getSimpleValueType();
20349   MVT DstSVT = DstVT.getVectorElementType();
20350   MVT SrcSVT = SrcVT.getVectorElementType();
20351   if (!((SrcSVT == MVT::i16 || SrcSVT == MVT::i32 || SrcSVT == MVT::i64) &&
20352         (DstSVT == MVT::i8 || DstSVT == MVT::i16 || DstSVT == MVT::i32)))
20353     return SDValue();
20354 
20355   // If the upper half of the source is undef, then attempt to split and
20356   // only truncate the lower half.
20357   if (DstVT.getSizeInBits() >= 128) {
20358     SmallVector<SDValue> LowerOps;
20359     if (SDValue Lo = isUpperSubvectorUndef(In, DL, DAG)) {
20360       MVT DstHalfVT = DstVT.getHalfNumVectorElementsVT();
20361       if (SDValue Res = LowerTruncateVecPackWithSignBits(DstHalfVT, Lo, DL,
20362                                                          Subtarget, DAG))
20363         return widenSubVector(Res, false, Subtarget, DAG, DL,
20364                               DstVT.getSizeInBits());
20365     }
20366   }
20367 
20368   unsigned PackOpcode;
20369   if (SDValue Src =
20370           matchTruncateWithPACK(PackOpcode, DstVT, In, DL, DAG, Subtarget))
20371     return truncateVectorWithPACK(PackOpcode, DstVT, Src, DL, DAG, Subtarget);
20372 
20373   return SDValue();
20374 }
20375 
20376 /// This function lowers a vector truncation from vXi32/vXi64 to vXi8/vXi16 into
20377 /// X86ISD::PACKUS/X86ISD::PACKSS operations.
LowerTruncateVecPack(MVT DstVT,SDValue In,const SDLoc & DL,const X86Subtarget & Subtarget,SelectionDAG & DAG)20378 static SDValue LowerTruncateVecPack(MVT DstVT, SDValue In, const SDLoc &DL,
20379                                     const X86Subtarget &Subtarget,
20380                                     SelectionDAG &DAG) {
20381   MVT SrcVT = In.getSimpleValueType();
20382   MVT DstSVT = DstVT.getVectorElementType();
20383   MVT SrcSVT = SrcVT.getVectorElementType();
20384   unsigned NumElems = DstVT.getVectorNumElements();
20385   if (!((SrcSVT == MVT::i16 || SrcSVT == MVT::i32 || SrcSVT == MVT::i64) &&
20386         (DstSVT == MVT::i8 || DstSVT == MVT::i16) && isPowerOf2_32(NumElems) &&
20387         NumElems >= 8))
20388     return SDValue();
20389 
20390   // SSSE3's pshufb results in less instructions in the cases below.
20391   if (Subtarget.hasSSSE3() && NumElems == 8) {
20392     if (SrcSVT == MVT::i16)
20393       return SDValue();
20394     if (SrcSVT == MVT::i32 && (DstSVT == MVT::i8 || !Subtarget.hasSSE41()))
20395       return SDValue();
20396   }
20397 
20398   // If the upper half of the source is undef, then attempt to split and
20399   // only truncate the lower half.
20400   if (DstVT.getSizeInBits() >= 128) {
20401     SmallVector<SDValue> LowerOps;
20402     if (SDValue Lo = isUpperSubvectorUndef(In, DL, DAG)) {
20403       MVT DstHalfVT = DstVT.getHalfNumVectorElementsVT();
20404       if (SDValue Res = LowerTruncateVecPack(DstHalfVT, Lo, DL, Subtarget, DAG))
20405         return widenSubVector(Res, false, Subtarget, DAG, DL,
20406                               DstVT.getSizeInBits());
20407     }
20408   }
20409 
20410   // SSE2 provides PACKUS for only 2 x v8i16 -> v16i8 and SSE4.1 provides PACKUS
20411   // for 2 x v4i32 -> v8i16. For SSSE3 and below, we need to use PACKSS to
20412   // truncate 2 x v4i32 to v8i16.
20413   if (Subtarget.hasSSE41() || DstSVT == MVT::i8)
20414     return truncateVectorWithPACKUS(DstVT, In, DL, Subtarget, DAG);
20415 
20416   if (SrcSVT == MVT::i16 || SrcSVT == MVT::i32)
20417     return truncateVectorWithPACKSS(DstVT, In, DL, Subtarget, DAG);
20418 
20419   // Special case vXi64 -> vXi16, shuffle to vXi32 and then use PACKSS.
20420   if (DstSVT == MVT::i16 && SrcSVT == MVT::i64) {
20421     MVT TruncVT = MVT::getVectorVT(MVT::i32, NumElems);
20422     SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, TruncVT, In);
20423     return truncateVectorWithPACKSS(DstVT, Trunc, DL, Subtarget, DAG);
20424   }
20425 
20426   return SDValue();
20427 }
20428 
LowerTruncateVecI1(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)20429 static SDValue LowerTruncateVecI1(SDValue Op, SelectionDAG &DAG,
20430                                   const X86Subtarget &Subtarget) {
20431 
20432   SDLoc DL(Op);
20433   MVT VT = Op.getSimpleValueType();
20434   SDValue In = Op.getOperand(0);
20435   MVT InVT = In.getSimpleValueType();
20436 
20437   assert(VT.getVectorElementType() == MVT::i1 && "Unexpected vector type.");
20438 
20439   // Shift LSB to MSB and use VPMOVB/W2M or TESTD/Q.
20440   unsigned ShiftInx = InVT.getScalarSizeInBits() - 1;
20441   if (InVT.getScalarSizeInBits() <= 16) {
20442     if (Subtarget.hasBWI()) {
20443       // legal, will go to VPMOVB2M, VPMOVW2M
20444       if (DAG.ComputeNumSignBits(In) < InVT.getScalarSizeInBits()) {
20445         // We need to shift to get the lsb into sign position.
20446         // Shift packed bytes not supported natively, bitcast to word
20447         MVT ExtVT = MVT::getVectorVT(MVT::i16, InVT.getSizeInBits()/16);
20448         In = DAG.getNode(ISD::SHL, DL, ExtVT,
20449                          DAG.getBitcast(ExtVT, In),
20450                          DAG.getConstant(ShiftInx, DL, ExtVT));
20451         In = DAG.getBitcast(InVT, In);
20452       }
20453       return DAG.getSetCC(DL, VT, DAG.getConstant(0, DL, InVT),
20454                           In, ISD::SETGT);
20455     }
20456     // Use TESTD/Q, extended vector to packed dword/qword.
20457     assert((InVT.is256BitVector() || InVT.is128BitVector()) &&
20458            "Unexpected vector type.");
20459     unsigned NumElts = InVT.getVectorNumElements();
20460     assert((NumElts == 8 || NumElts == 16) && "Unexpected number of elements");
20461     // We need to change to a wider element type that we have support for.
20462     // For 8 element vectors this is easy, we either extend to v8i32 or v8i64.
20463     // For 16 element vectors we extend to v16i32 unless we are explicitly
20464     // trying to avoid 512-bit vectors. If we are avoiding 512-bit vectors
20465     // we need to split into two 8 element vectors which we can extend to v8i32,
20466     // truncate and concat the results. There's an additional complication if
20467     // the original type is v16i8. In that case we can't split the v16i8
20468     // directly, so we need to shuffle high elements to low and use
20469     // sign_extend_vector_inreg.
20470     if (NumElts == 16 && !Subtarget.canExtendTo512DQ()) {
20471       SDValue Lo, Hi;
20472       if (InVT == MVT::v16i8) {
20473         Lo = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, DL, MVT::v8i32, In);
20474         Hi = DAG.getVectorShuffle(
20475             InVT, DL, In, In,
20476             {8, 9, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1});
20477         Hi = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, DL, MVT::v8i32, Hi);
20478       } else {
20479         assert(InVT == MVT::v16i16 && "Unexpected VT!");
20480         Lo = extract128BitVector(In, 0, DAG, DL);
20481         Hi = extract128BitVector(In, 8, DAG, DL);
20482       }
20483       // We're split now, just emit two truncates and a concat. The two
20484       // truncates will trigger legalization to come back to this function.
20485       Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i1, Lo);
20486       Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i1, Hi);
20487       return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
20488     }
20489     // We either have 8 elements or we're allowed to use 512-bit vectors.
20490     // If we have VLX, we want to use the narrowest vector that can get the
20491     // job done so we use vXi32.
20492     MVT EltVT = Subtarget.hasVLX() ? MVT::i32 : MVT::getIntegerVT(512/NumElts);
20493     MVT ExtVT = MVT::getVectorVT(EltVT, NumElts);
20494     In = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, In);
20495     InVT = ExtVT;
20496     ShiftInx = InVT.getScalarSizeInBits() - 1;
20497   }
20498 
20499   if (DAG.ComputeNumSignBits(In) < InVT.getScalarSizeInBits()) {
20500     // We need to shift to get the lsb into sign position.
20501     In = DAG.getNode(ISD::SHL, DL, InVT, In,
20502                      DAG.getConstant(ShiftInx, DL, InVT));
20503   }
20504   // If we have DQI, emit a pattern that will be iseled as vpmovq2m/vpmovd2m.
20505   if (Subtarget.hasDQI())
20506     return DAG.getSetCC(DL, VT, DAG.getConstant(0, DL, InVT), In, ISD::SETGT);
20507   return DAG.getSetCC(DL, VT, In, DAG.getConstant(0, DL, InVT), ISD::SETNE);
20508 }
20509 
LowerTRUNCATE(SDValue Op,SelectionDAG & DAG) const20510 SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
20511   SDLoc DL(Op);
20512   MVT VT = Op.getSimpleValueType();
20513   SDValue In = Op.getOperand(0);
20514   MVT InVT = In.getSimpleValueType();
20515   assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
20516          "Invalid TRUNCATE operation");
20517 
20518   // If we're called by the type legalizer, handle a few cases.
20519   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
20520   if (!TLI.isTypeLegal(VT) || !TLI.isTypeLegal(InVT)) {
20521     if ((InVT == MVT::v8i64 || InVT == MVT::v16i32 || InVT == MVT::v16i64) &&
20522         VT.is128BitVector() && Subtarget.hasAVX512()) {
20523       assert((InVT == MVT::v16i64 || Subtarget.hasVLX()) &&
20524              "Unexpected subtarget!");
20525       // The default behavior is to truncate one step, concatenate, and then
20526       // truncate the remainder. We'd rather produce two 64-bit results and
20527       // concatenate those.
20528       SDValue Lo, Hi;
20529       std::tie(Lo, Hi) = DAG.SplitVector(In, DL);
20530 
20531       EVT LoVT, HiVT;
20532       std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
20533 
20534       Lo = DAG.getNode(ISD::TRUNCATE, DL, LoVT, Lo);
20535       Hi = DAG.getNode(ISD::TRUNCATE, DL, HiVT, Hi);
20536       return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
20537     }
20538 
20539     // Pre-AVX512 (or prefer-256bit) see if we can make use of PACKSS/PACKUS.
20540     if (!Subtarget.hasAVX512() ||
20541         (InVT.is512BitVector() && VT.is256BitVector()))
20542       if (SDValue SignPack =
20543               LowerTruncateVecPackWithSignBits(VT, In, DL, Subtarget, DAG))
20544         return SignPack;
20545 
20546     // Pre-AVX512 see if we can make use of PACKSS/PACKUS.
20547     if (!Subtarget.hasAVX512())
20548       return LowerTruncateVecPack(VT, In, DL, Subtarget, DAG);
20549 
20550     // Otherwise let default legalization handle it.
20551     return SDValue();
20552   }
20553 
20554   if (VT.getVectorElementType() == MVT::i1)
20555     return LowerTruncateVecI1(Op, DAG, Subtarget);
20556 
20557   // Attempt to truncate with PACKUS/PACKSS even on AVX512 if we'd have to
20558   // concat from subvectors to use VPTRUNC etc.
20559   if (!Subtarget.hasAVX512() || isFreeToSplitVector(In.getNode(), DAG))
20560     if (SDValue SignPack =
20561             LowerTruncateVecPackWithSignBits(VT, In, DL, Subtarget, DAG))
20562       return SignPack;
20563 
20564   // vpmovqb/w/d, vpmovdb/w, vpmovwb
20565   if (Subtarget.hasAVX512()) {
20566     if (InVT == MVT::v32i16 && !Subtarget.hasBWI()) {
20567       assert(VT == MVT::v32i8 && "Unexpected VT!");
20568       return splitVectorIntUnary(Op, DAG);
20569     }
20570 
20571     // word to byte only under BWI. Otherwise we have to promoted to v16i32
20572     // and then truncate that. But we should only do that if we haven't been
20573     // asked to avoid 512-bit vectors. The actual promotion to v16i32 will be
20574     // handled by isel patterns.
20575     if (InVT != MVT::v16i16 || Subtarget.hasBWI() ||
20576         Subtarget.canExtendTo512DQ())
20577       return Op;
20578   }
20579 
20580   // Handle truncation of V256 to V128 using shuffles.
20581   assert(VT.is128BitVector() && InVT.is256BitVector() && "Unexpected types!");
20582 
20583   if ((VT == MVT::v4i32) && (InVT == MVT::v4i64)) {
20584     // On AVX2, v4i64 -> v4i32 becomes VPERMD.
20585     if (Subtarget.hasInt256()) {
20586       static const int ShufMask[] = {0, 2, 4, 6, -1, -1, -1, -1};
20587       In = DAG.getBitcast(MVT::v8i32, In);
20588       In = DAG.getVectorShuffle(MVT::v8i32, DL, In, In, ShufMask);
20589       return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, In,
20590                          DAG.getIntPtrConstant(0, DL));
20591     }
20592 
20593     SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
20594                                DAG.getIntPtrConstant(0, DL));
20595     SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
20596                                DAG.getIntPtrConstant(2, DL));
20597     static const int ShufMask[] = {0, 2, 4, 6};
20598     return DAG.getVectorShuffle(VT, DL, DAG.getBitcast(MVT::v4i32, OpLo),
20599                                 DAG.getBitcast(MVT::v4i32, OpHi), ShufMask);
20600   }
20601 
20602   if ((VT == MVT::v8i16) && (InVT == MVT::v8i32)) {
20603     // On AVX2, v8i32 -> v8i16 becomes PSHUFB.
20604     if (Subtarget.hasInt256()) {
20605       // The PSHUFB mask:
20606       static const int ShufMask1[] = { 0,  1,  4,  5,  8,  9, 12, 13,
20607                                       -1, -1, -1, -1, -1, -1, -1, -1,
20608                                       16, 17, 20, 21, 24, 25, 28, 29,
20609                                       -1, -1, -1, -1, -1, -1, -1, -1 };
20610       In = DAG.getBitcast(MVT::v32i8, In);
20611       In = DAG.getVectorShuffle(MVT::v32i8, DL, In, In, ShufMask1);
20612       In = DAG.getBitcast(MVT::v4i64, In);
20613 
20614       static const int ShufMask2[] = {0, 2, -1, -1};
20615       In = DAG.getVectorShuffle(MVT::v4i64, DL, In, In, ShufMask2);
20616       In = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
20617                        DAG.getIntPtrConstant(0, DL));
20618       return DAG.getBitcast(MVT::v8i16, In);
20619     }
20620 
20621     return Subtarget.hasSSE41()
20622                ? truncateVectorWithPACKUS(VT, In, DL, Subtarget, DAG)
20623                : truncateVectorWithPACKSS(VT, In, DL, Subtarget, DAG);
20624   }
20625 
20626   if (VT == MVT::v16i8 && InVT == MVT::v16i16)
20627     return truncateVectorWithPACKUS(VT, In, DL, Subtarget, DAG);
20628 
20629   llvm_unreachable("All 256->128 cases should have been handled above!");
20630 }
20631 
20632 // We can leverage the specific way the "cvttps2dq/cvttpd2dq" instruction
20633 // behaves on out of range inputs to generate optimized conversions.
expandFP_TO_UINT_SSE(MVT VT,SDValue Src,const SDLoc & dl,SelectionDAG & DAG,const X86Subtarget & Subtarget)20634 static SDValue expandFP_TO_UINT_SSE(MVT VT, SDValue Src, const SDLoc &dl,
20635                                     SelectionDAG &DAG,
20636                                     const X86Subtarget &Subtarget) {
20637   MVT SrcVT = Src.getSimpleValueType();
20638   unsigned DstBits = VT.getScalarSizeInBits();
20639   assert(DstBits == 32 && "expandFP_TO_UINT_SSE - only vXi32 supported");
20640 
20641   // Calculate the converted result for values in the range 0 to
20642   // 2^31-1 ("Small") and from 2^31 to 2^32-1 ("Big").
20643   SDValue Small = DAG.getNode(X86ISD::CVTTP2SI, dl, VT, Src);
20644   SDValue Big =
20645       DAG.getNode(X86ISD::CVTTP2SI, dl, VT,
20646                   DAG.getNode(ISD::FSUB, dl, SrcVT, Src,
20647                               DAG.getConstantFP(2147483648.0f, dl, SrcVT)));
20648 
20649   // The "CVTTP2SI" instruction conveniently sets the sign bit if
20650   // and only if the value was out of range. So we can use that
20651   // as our indicator that we rather use "Big" instead of "Small".
20652   //
20653   // Use "Small" if "IsOverflown" has all bits cleared
20654   // and "0x80000000 | Big" if all bits in "IsOverflown" are set.
20655 
20656   // AVX1 can't use the signsplat masking for 256-bit vectors - we have to
20657   // use the slightly slower blendv select instead.
20658   if (VT == MVT::v8i32 && !Subtarget.hasAVX2()) {
20659     SDValue Overflow = DAG.getNode(ISD::OR, dl, VT, Small, Big);
20660     return DAG.getNode(X86ISD::BLENDV, dl, VT, Small, Overflow, Small);
20661   }
20662 
20663   SDValue IsOverflown =
20664       DAG.getNode(X86ISD::VSRAI, dl, VT, Small,
20665                   DAG.getTargetConstant(DstBits - 1, dl, MVT::i8));
20666   return DAG.getNode(ISD::OR, dl, VT, Small,
20667                      DAG.getNode(ISD::AND, dl, VT, Big, IsOverflown));
20668 }
20669 
LowerFP_TO_INT(SDValue Op,SelectionDAG & DAG) const20670 SDValue X86TargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const {
20671   bool IsStrict = Op->isStrictFPOpcode();
20672   bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT ||
20673                   Op.getOpcode() == ISD::STRICT_FP_TO_SINT;
20674   MVT VT = Op->getSimpleValueType(0);
20675   SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
20676   SDValue Chain = IsStrict ? Op->getOperand(0) : SDValue();
20677   MVT SrcVT = Src.getSimpleValueType();
20678   SDLoc dl(Op);
20679 
20680   SDValue Res;
20681   if (isSoftF16(SrcVT, Subtarget)) {
20682     MVT NVT = VT.isVector() ? VT.changeVectorElementType(MVT::f32) : MVT::f32;
20683     if (IsStrict)
20684       return DAG.getNode(Op.getOpcode(), dl, {VT, MVT::Other},
20685                          {Chain, DAG.getNode(ISD::STRICT_FP_EXTEND, dl,
20686                                              {NVT, MVT::Other}, {Chain, Src})});
20687     return DAG.getNode(Op.getOpcode(), dl, VT,
20688                        DAG.getNode(ISD::FP_EXTEND, dl, NVT, Src));
20689   } else if (isTypeLegal(SrcVT) && isLegalConversion(VT, IsSigned, Subtarget)) {
20690     return Op;
20691   }
20692 
20693   if (VT.isVector()) {
20694     if (VT == MVT::v2i1 && SrcVT == MVT::v2f64) {
20695       MVT ResVT = MVT::v4i32;
20696       MVT TruncVT = MVT::v4i1;
20697       unsigned Opc;
20698       if (IsStrict)
20699         Opc = IsSigned ? X86ISD::STRICT_CVTTP2SI : X86ISD::STRICT_CVTTP2UI;
20700       else
20701         Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
20702 
20703       if (!IsSigned && !Subtarget.hasVLX()) {
20704         assert(Subtarget.useAVX512Regs() && "Unexpected features!");
20705         // Widen to 512-bits.
20706         ResVT = MVT::v8i32;
20707         TruncVT = MVT::v8i1;
20708         Opc = Op.getOpcode();
20709         // Need to concat with zero vector for strict fp to avoid spurious
20710         // exceptions.
20711         // TODO: Should we just do this for non-strict as well?
20712         SDValue Tmp = IsStrict ? DAG.getConstantFP(0.0, dl, MVT::v8f64)
20713                                : DAG.getUNDEF(MVT::v8f64);
20714         Src = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8f64, Tmp, Src,
20715                           DAG.getIntPtrConstant(0, dl));
20716       }
20717       if (IsStrict) {
20718         Res = DAG.getNode(Opc, dl, {ResVT, MVT::Other}, {Chain, Src});
20719         Chain = Res.getValue(1);
20720       } else {
20721         Res = DAG.getNode(Opc, dl, ResVT, Src);
20722       }
20723 
20724       Res = DAG.getNode(ISD::TRUNCATE, dl, TruncVT, Res);
20725       Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i1, Res,
20726                         DAG.getIntPtrConstant(0, dl));
20727       if (IsStrict)
20728         return DAG.getMergeValues({Res, Chain}, dl);
20729       return Res;
20730     }
20731 
20732     if (Subtarget.hasFP16() && SrcVT.getVectorElementType() == MVT::f16) {
20733       if (VT == MVT::v8i16 || VT == MVT::v16i16 || VT == MVT::v32i16)
20734         return Op;
20735 
20736       MVT ResVT = VT;
20737       MVT EleVT = VT.getVectorElementType();
20738       if (EleVT != MVT::i64)
20739         ResVT = EleVT == MVT::i32 ? MVT::v4i32 : MVT::v8i16;
20740 
20741       if (SrcVT != MVT::v8f16) {
20742         SDValue Tmp =
20743             IsStrict ? DAG.getConstantFP(0.0, dl, SrcVT) : DAG.getUNDEF(SrcVT);
20744         SmallVector<SDValue, 4> Ops(SrcVT == MVT::v2f16 ? 4 : 2, Tmp);
20745         Ops[0] = Src;
20746         Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8f16, Ops);
20747       }
20748 
20749       if (IsStrict) {
20750         Res = DAG.getNode(IsSigned ? X86ISD::STRICT_CVTTP2SI
20751                                    : X86ISD::STRICT_CVTTP2UI,
20752                           dl, {ResVT, MVT::Other}, {Chain, Src});
20753         Chain = Res.getValue(1);
20754       } else {
20755         Res = DAG.getNode(IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI, dl,
20756                           ResVT, Src);
20757       }
20758 
20759       // TODO: Need to add exception check code for strict FP.
20760       if (EleVT.getSizeInBits() < 16) {
20761         ResVT = MVT::getVectorVT(EleVT, 8);
20762         Res = DAG.getNode(ISD::TRUNCATE, dl, ResVT, Res);
20763       }
20764 
20765       if (ResVT != VT)
20766         Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Res,
20767                           DAG.getIntPtrConstant(0, dl));
20768 
20769       if (IsStrict)
20770         return DAG.getMergeValues({Res, Chain}, dl);
20771       return Res;
20772     }
20773 
20774     // v8f32/v16f32/v8f64->v8i16/v16i16 need to widen first.
20775     if (VT.getVectorElementType() == MVT::i16) {
20776       assert((SrcVT.getVectorElementType() == MVT::f32 ||
20777               SrcVT.getVectorElementType() == MVT::f64) &&
20778              "Expected f32/f64 vector!");
20779       MVT NVT = VT.changeVectorElementType(MVT::i32);
20780       if (IsStrict) {
20781         Res = DAG.getNode(IsSigned ? ISD::STRICT_FP_TO_SINT
20782                                    : ISD::STRICT_FP_TO_UINT,
20783                           dl, {NVT, MVT::Other}, {Chain, Src});
20784         Chain = Res.getValue(1);
20785       } else {
20786         Res = DAG.getNode(IsSigned ? ISD::FP_TO_SINT : ISD::FP_TO_UINT, dl,
20787                           NVT, Src);
20788       }
20789 
20790       // TODO: Need to add exception check code for strict FP.
20791       Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
20792 
20793       if (IsStrict)
20794         return DAG.getMergeValues({Res, Chain}, dl);
20795       return Res;
20796     }
20797 
20798     // v8f64->v8i32 is legal, but we need v8i32 to be custom for v8f32.
20799     if (VT == MVT::v8i32 && SrcVT == MVT::v8f64) {
20800       assert(!IsSigned && "Expected unsigned conversion!");
20801       assert(Subtarget.useAVX512Regs() && "Requires avx512f");
20802       return Op;
20803     }
20804 
20805     // Widen vXi32 fp_to_uint with avx512f to 512-bit source.
20806     if ((VT == MVT::v4i32 || VT == MVT::v8i32) &&
20807         (SrcVT == MVT::v4f64 || SrcVT == MVT::v4f32 || SrcVT == MVT::v8f32) &&
20808         Subtarget.useAVX512Regs()) {
20809       assert(!IsSigned && "Expected unsigned conversion!");
20810       assert(!Subtarget.hasVLX() && "Unexpected features!");
20811       MVT WideVT = SrcVT == MVT::v4f64 ? MVT::v8f64 : MVT::v16f32;
20812       MVT ResVT = SrcVT == MVT::v4f64 ? MVT::v8i32 : MVT::v16i32;
20813       // Need to concat with zero vector for strict fp to avoid spurious
20814       // exceptions.
20815       // TODO: Should we just do this for non-strict as well?
20816       SDValue Tmp =
20817           IsStrict ? DAG.getConstantFP(0.0, dl, WideVT) : DAG.getUNDEF(WideVT);
20818       Src = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideVT, Tmp, Src,
20819                         DAG.getIntPtrConstant(0, dl));
20820 
20821       if (IsStrict) {
20822         Res = DAG.getNode(ISD::STRICT_FP_TO_UINT, dl, {ResVT, MVT::Other},
20823                           {Chain, Src});
20824         Chain = Res.getValue(1);
20825       } else {
20826         Res = DAG.getNode(ISD::FP_TO_UINT, dl, ResVT, Src);
20827       }
20828 
20829       Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Res,
20830                         DAG.getIntPtrConstant(0, dl));
20831 
20832       if (IsStrict)
20833         return DAG.getMergeValues({Res, Chain}, dl);
20834       return Res;
20835     }
20836 
20837     // Widen vXi64 fp_to_uint/fp_to_sint with avx512dq to 512-bit source.
20838     if ((VT == MVT::v2i64 || VT == MVT::v4i64) &&
20839         (SrcVT == MVT::v2f64 || SrcVT == MVT::v4f64 || SrcVT == MVT::v4f32) &&
20840         Subtarget.useAVX512Regs() && Subtarget.hasDQI()) {
20841       assert(!Subtarget.hasVLX() && "Unexpected features!");
20842       MVT WideVT = SrcVT == MVT::v4f32 ? MVT::v8f32 : MVT::v8f64;
20843       // Need to concat with zero vector for strict fp to avoid spurious
20844       // exceptions.
20845       // TODO: Should we just do this for non-strict as well?
20846       SDValue Tmp =
20847           IsStrict ? DAG.getConstantFP(0.0, dl, WideVT) : DAG.getUNDEF(WideVT);
20848       Src = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideVT, Tmp, Src,
20849                         DAG.getIntPtrConstant(0, dl));
20850 
20851       if (IsStrict) {
20852         Res = DAG.getNode(Op.getOpcode(), dl, {MVT::v8i64, MVT::Other},
20853                           {Chain, Src});
20854         Chain = Res.getValue(1);
20855       } else {
20856         Res = DAG.getNode(Op.getOpcode(), dl, MVT::v8i64, Src);
20857       }
20858 
20859       Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Res,
20860                         DAG.getIntPtrConstant(0, dl));
20861 
20862       if (IsStrict)
20863         return DAG.getMergeValues({Res, Chain}, dl);
20864       return Res;
20865     }
20866 
20867     if (VT == MVT::v2i64 && SrcVT == MVT::v2f32) {
20868       if (!Subtarget.hasVLX()) {
20869         // Non-strict nodes without VLX can we widened to v4f32->v4i64 by type
20870         // legalizer and then widened again by vector op legalization.
20871         if (!IsStrict)
20872           return SDValue();
20873 
20874         SDValue Zero = DAG.getConstantFP(0.0, dl, MVT::v2f32);
20875         SDValue Tmp = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8f32,
20876                                   {Src, Zero, Zero, Zero});
20877         Tmp = DAG.getNode(Op.getOpcode(), dl, {MVT::v8i64, MVT::Other},
20878                           {Chain, Tmp});
20879         SDValue Chain = Tmp.getValue(1);
20880         Tmp = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i64, Tmp,
20881                           DAG.getIntPtrConstant(0, dl));
20882         return DAG.getMergeValues({Tmp, Chain}, dl);
20883       }
20884 
20885       assert(Subtarget.hasDQI() && Subtarget.hasVLX() && "Requires AVX512DQVL");
20886       SDValue Tmp = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, Src,
20887                                 DAG.getUNDEF(MVT::v2f32));
20888       if (IsStrict) {
20889         unsigned Opc = IsSigned ? X86ISD::STRICT_CVTTP2SI
20890                                 : X86ISD::STRICT_CVTTP2UI;
20891         return DAG.getNode(Opc, dl, {VT, MVT::Other}, {Op->getOperand(0), Tmp});
20892       }
20893       unsigned Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
20894       return DAG.getNode(Opc, dl, VT, Tmp);
20895     }
20896 
20897     // Generate optimized instructions for pre AVX512 unsigned conversions from
20898     // vXf32 to vXi32.
20899     if ((VT == MVT::v4i32 && SrcVT == MVT::v4f32) ||
20900         (VT == MVT::v4i32 && SrcVT == MVT::v4f64) ||
20901         (VT == MVT::v8i32 && SrcVT == MVT::v8f32)) {
20902       assert(!IsSigned && "Expected unsigned conversion!");
20903       return expandFP_TO_UINT_SSE(VT, Src, dl, DAG, Subtarget);
20904     }
20905 
20906     return SDValue();
20907   }
20908 
20909   assert(!VT.isVector());
20910 
20911   bool UseSSEReg = isScalarFPTypeInSSEReg(SrcVT);
20912 
20913   if (!IsSigned && UseSSEReg) {
20914     // Conversions from f32/f64 with AVX512 should be legal.
20915     if (Subtarget.hasAVX512())
20916       return Op;
20917 
20918     // We can leverage the specific way the "cvttss2si/cvttsd2si" instruction
20919     // behaves on out of range inputs to generate optimized conversions.
20920     if (!IsStrict && ((VT == MVT::i32 && !Subtarget.is64Bit()) ||
20921                       (VT == MVT::i64 && Subtarget.is64Bit()))) {
20922       unsigned DstBits = VT.getScalarSizeInBits();
20923       APInt UIntLimit = APInt::getSignMask(DstBits);
20924       SDValue FloatOffset = DAG.getNode(ISD::UINT_TO_FP, dl, SrcVT,
20925                                         DAG.getConstant(UIntLimit, dl, VT));
20926       MVT SrcVecVT = MVT::getVectorVT(SrcVT, 128 / SrcVT.getScalarSizeInBits());
20927 
20928       // Calculate the converted result for values in the range:
20929       // (i32) 0 to 2^31-1 ("Small") and from 2^31 to 2^32-1 ("Big").
20930       // (i64) 0 to 2^63-1 ("Small") and from 2^63 to 2^64-1 ("Big").
20931       SDValue Small =
20932           DAG.getNode(X86ISD::CVTTS2SI, dl, VT,
20933                       DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, SrcVecVT, Src));
20934       SDValue Big = DAG.getNode(
20935           X86ISD::CVTTS2SI, dl, VT,
20936           DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, SrcVecVT,
20937                       DAG.getNode(ISD::FSUB, dl, SrcVT, Src, FloatOffset)));
20938 
20939       // The "CVTTS2SI" instruction conveniently sets the sign bit if
20940       // and only if the value was out of range. So we can use that
20941       // as our indicator that we rather use "Big" instead of "Small".
20942       //
20943       // Use "Small" if "IsOverflown" has all bits cleared
20944       // and "0x80000000 | Big" if all bits in "IsOverflown" are set.
20945       SDValue IsOverflown = DAG.getNode(
20946           ISD::SRA, dl, VT, Small, DAG.getConstant(DstBits - 1, dl, MVT::i8));
20947       return DAG.getNode(ISD::OR, dl, VT, Small,
20948                          DAG.getNode(ISD::AND, dl, VT, Big, IsOverflown));
20949     }
20950 
20951     // Use default expansion for i64.
20952     if (VT == MVT::i64)
20953       return SDValue();
20954 
20955     assert(VT == MVT::i32 && "Unexpected VT!");
20956 
20957     // Promote i32 to i64 and use a signed operation on 64-bit targets.
20958     // FIXME: This does not generate an invalid exception if the input does not
20959     // fit in i32. PR44019
20960     if (Subtarget.is64Bit()) {
20961       if (IsStrict) {
20962         Res = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, {MVT::i64, MVT::Other},
20963                           {Chain, Src});
20964         Chain = Res.getValue(1);
20965       } else
20966         Res = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i64, Src);
20967 
20968       Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
20969       if (IsStrict)
20970         return DAG.getMergeValues({Res, Chain}, dl);
20971       return Res;
20972     }
20973 
20974     // Use default expansion for SSE1/2 targets without SSE3. With SSE3 we can
20975     // use fisttp which will be handled later.
20976     if (!Subtarget.hasSSE3())
20977       return SDValue();
20978   }
20979 
20980   // Promote i16 to i32 if we can use a SSE operation or the type is f128.
20981   // FIXME: This does not generate an invalid exception if the input does not
20982   // fit in i16. PR44019
20983   if (VT == MVT::i16 && (UseSSEReg || SrcVT == MVT::f128)) {
20984     assert(IsSigned && "Expected i16 FP_TO_UINT to have been promoted!");
20985     if (IsStrict) {
20986       Res = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, {MVT::i32, MVT::Other},
20987                         {Chain, Src});
20988       Chain = Res.getValue(1);
20989     } else
20990       Res = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Src);
20991 
20992     Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
20993     if (IsStrict)
20994       return DAG.getMergeValues({Res, Chain}, dl);
20995     return Res;
20996   }
20997 
20998   // If this is a FP_TO_SINT using SSEReg we're done.
20999   if (UseSSEReg && IsSigned)
21000     return Op;
21001 
21002   // fp128 needs to use a libcall.
21003   if (SrcVT == MVT::f128) {
21004     RTLIB::Libcall LC;
21005     if (IsSigned)
21006       LC = RTLIB::getFPTOSINT(SrcVT, VT);
21007     else
21008       LC = RTLIB::getFPTOUINT(SrcVT, VT);
21009 
21010     MakeLibCallOptions CallOptions;
21011     std::pair<SDValue, SDValue> Tmp = makeLibCall(DAG, LC, VT, Src, CallOptions,
21012                                                   SDLoc(Op), Chain);
21013 
21014     if (IsStrict)
21015       return DAG.getMergeValues({ Tmp.first, Tmp.second }, dl);
21016 
21017     return Tmp.first;
21018   }
21019 
21020   // Fall back to X87.
21021   if (SDValue V = FP_TO_INTHelper(Op, DAG, IsSigned, Chain)) {
21022     if (IsStrict)
21023       return DAG.getMergeValues({V, Chain}, dl);
21024     return V;
21025   }
21026 
21027   llvm_unreachable("Expected FP_TO_INTHelper to handle all remaining cases.");
21028 }
21029 
LowerLRINT_LLRINT(SDValue Op,SelectionDAG & DAG) const21030 SDValue X86TargetLowering::LowerLRINT_LLRINT(SDValue Op,
21031                                              SelectionDAG &DAG) const {
21032   SDValue Src = Op.getOperand(0);
21033   MVT SrcVT = Src.getSimpleValueType();
21034 
21035   if (SrcVT == MVT::f16)
21036     return SDValue();
21037 
21038   // If the source is in an SSE register, the node is Legal.
21039   if (isScalarFPTypeInSSEReg(SrcVT))
21040     return Op;
21041 
21042   return LRINT_LLRINTHelper(Op.getNode(), DAG);
21043 }
21044 
LRINT_LLRINTHelper(SDNode * N,SelectionDAG & DAG) const21045 SDValue X86TargetLowering::LRINT_LLRINTHelper(SDNode *N,
21046                                               SelectionDAG &DAG) const {
21047   EVT DstVT = N->getValueType(0);
21048   SDValue Src = N->getOperand(0);
21049   EVT SrcVT = Src.getValueType();
21050 
21051   if (SrcVT != MVT::f32 && SrcVT != MVT::f64 && SrcVT != MVT::f80) {
21052     // f16 must be promoted before using the lowering in this routine.
21053     // fp128 does not use this lowering.
21054     return SDValue();
21055   }
21056 
21057   SDLoc DL(N);
21058   SDValue Chain = DAG.getEntryNode();
21059 
21060   bool UseSSE = isScalarFPTypeInSSEReg(SrcVT);
21061 
21062   // If we're converting from SSE, the stack slot needs to hold both types.
21063   // Otherwise it only needs to hold the DstVT.
21064   EVT OtherVT = UseSSE ? SrcVT : DstVT;
21065   SDValue StackPtr = DAG.CreateStackTemporary(DstVT, OtherVT);
21066   int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
21067   MachinePointerInfo MPI =
21068       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
21069 
21070   if (UseSSE) {
21071     assert(DstVT == MVT::i64 && "Invalid LRINT/LLRINT to lower!");
21072     Chain = DAG.getStore(Chain, DL, Src, StackPtr, MPI);
21073     SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
21074     SDValue Ops[] = { Chain, StackPtr };
21075 
21076     Src = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, SrcVT, MPI,
21077                                   /*Align*/ std::nullopt,
21078                                   MachineMemOperand::MOLoad);
21079     Chain = Src.getValue(1);
21080   }
21081 
21082   SDValue StoreOps[] = { Chain, Src, StackPtr };
21083   Chain = DAG.getMemIntrinsicNode(X86ISD::FIST, DL, DAG.getVTList(MVT::Other),
21084                                   StoreOps, DstVT, MPI, /*Align*/ std::nullopt,
21085                                   MachineMemOperand::MOStore);
21086 
21087   return DAG.getLoad(DstVT, DL, Chain, StackPtr, MPI);
21088 }
21089 
21090 SDValue
LowerFP_TO_INT_SAT(SDValue Op,SelectionDAG & DAG) const21091 X86TargetLowering::LowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG) const {
21092   // This is based on the TargetLowering::expandFP_TO_INT_SAT implementation,
21093   // but making use of X86 specifics to produce better instruction sequences.
21094   SDNode *Node = Op.getNode();
21095   bool IsSigned = Node->getOpcode() == ISD::FP_TO_SINT_SAT;
21096   unsigned FpToIntOpcode = IsSigned ? ISD::FP_TO_SINT : ISD::FP_TO_UINT;
21097   SDLoc dl(SDValue(Node, 0));
21098   SDValue Src = Node->getOperand(0);
21099 
21100   // There are three types involved here: SrcVT is the source floating point
21101   // type, DstVT is the type of the result, and TmpVT is the result of the
21102   // intermediate FP_TO_*INT operation we'll use (which may be a promotion of
21103   // DstVT).
21104   EVT SrcVT = Src.getValueType();
21105   EVT DstVT = Node->getValueType(0);
21106   EVT TmpVT = DstVT;
21107 
21108   // This code is only for floats and doubles. Fall back to generic code for
21109   // anything else.
21110   if (!isScalarFPTypeInSSEReg(SrcVT) || isSoftF16(SrcVT, Subtarget))
21111     return SDValue();
21112 
21113   EVT SatVT = cast<VTSDNode>(Node->getOperand(1))->getVT();
21114   unsigned SatWidth = SatVT.getScalarSizeInBits();
21115   unsigned DstWidth = DstVT.getScalarSizeInBits();
21116   unsigned TmpWidth = TmpVT.getScalarSizeInBits();
21117   assert(SatWidth <= DstWidth && SatWidth <= TmpWidth &&
21118          "Expected saturation width smaller than result width");
21119 
21120   // Promote result of FP_TO_*INT to at least 32 bits.
21121   if (TmpWidth < 32) {
21122     TmpVT = MVT::i32;
21123     TmpWidth = 32;
21124   }
21125 
21126   // Promote conversions to unsigned 32-bit to 64-bit, because it will allow
21127   // us to use a native signed conversion instead.
21128   if (SatWidth == 32 && !IsSigned && Subtarget.is64Bit()) {
21129     TmpVT = MVT::i64;
21130     TmpWidth = 64;
21131   }
21132 
21133   // If the saturation width is smaller than the size of the temporary result,
21134   // we can always use signed conversion, which is native.
21135   if (SatWidth < TmpWidth)
21136     FpToIntOpcode = ISD::FP_TO_SINT;
21137 
21138   // Determine minimum and maximum integer values and their corresponding
21139   // floating-point values.
21140   APInt MinInt, MaxInt;
21141   if (IsSigned) {
21142     MinInt = APInt::getSignedMinValue(SatWidth).sext(DstWidth);
21143     MaxInt = APInt::getSignedMaxValue(SatWidth).sext(DstWidth);
21144   } else {
21145     MinInt = APInt::getMinValue(SatWidth).zext(DstWidth);
21146     MaxInt = APInt::getMaxValue(SatWidth).zext(DstWidth);
21147   }
21148 
21149   APFloat MinFloat(DAG.EVTToAPFloatSemantics(SrcVT));
21150   APFloat MaxFloat(DAG.EVTToAPFloatSemantics(SrcVT));
21151 
21152   APFloat::opStatus MinStatus = MinFloat.convertFromAPInt(
21153     MinInt, IsSigned, APFloat::rmTowardZero);
21154   APFloat::opStatus MaxStatus = MaxFloat.convertFromAPInt(
21155     MaxInt, IsSigned, APFloat::rmTowardZero);
21156   bool AreExactFloatBounds = !(MinStatus & APFloat::opStatus::opInexact)
21157                           && !(MaxStatus & APFloat::opStatus::opInexact);
21158 
21159   SDValue MinFloatNode = DAG.getConstantFP(MinFloat, dl, SrcVT);
21160   SDValue MaxFloatNode = DAG.getConstantFP(MaxFloat, dl, SrcVT);
21161 
21162   // If the integer bounds are exactly representable as floats, emit a
21163   // min+max+fptoi sequence. Otherwise use comparisons and selects.
21164   if (AreExactFloatBounds) {
21165     if (DstVT != TmpVT) {
21166       // Clamp by MinFloat from below. If Src is NaN, propagate NaN.
21167       SDValue MinClamped = DAG.getNode(
21168         X86ISD::FMAX, dl, SrcVT, MinFloatNode, Src);
21169       // Clamp by MaxFloat from above. If Src is NaN, propagate NaN.
21170       SDValue BothClamped = DAG.getNode(
21171         X86ISD::FMIN, dl, SrcVT, MaxFloatNode, MinClamped);
21172       // Convert clamped value to integer.
21173       SDValue FpToInt = DAG.getNode(FpToIntOpcode, dl, TmpVT, BothClamped);
21174 
21175       // NaN will become INDVAL, with the top bit set and the rest zero.
21176       // Truncation will discard the top bit, resulting in zero.
21177       return DAG.getNode(ISD::TRUNCATE, dl, DstVT, FpToInt);
21178     }
21179 
21180     // Clamp by MinFloat from below. If Src is NaN, the result is MinFloat.
21181     SDValue MinClamped = DAG.getNode(
21182       X86ISD::FMAX, dl, SrcVT, Src, MinFloatNode);
21183     // Clamp by MaxFloat from above. NaN cannot occur.
21184     SDValue BothClamped = DAG.getNode(
21185       X86ISD::FMINC, dl, SrcVT, MinClamped, MaxFloatNode);
21186     // Convert clamped value to integer.
21187     SDValue FpToInt = DAG.getNode(FpToIntOpcode, dl, DstVT, BothClamped);
21188 
21189     if (!IsSigned) {
21190       // In the unsigned case we're done, because we mapped NaN to MinFloat,
21191       // which is zero.
21192       return FpToInt;
21193     }
21194 
21195     // Otherwise, select zero if Src is NaN.
21196     SDValue ZeroInt = DAG.getConstant(0, dl, DstVT);
21197     return DAG.getSelectCC(
21198       dl, Src, Src, ZeroInt, FpToInt, ISD::CondCode::SETUO);
21199   }
21200 
21201   SDValue MinIntNode = DAG.getConstant(MinInt, dl, DstVT);
21202   SDValue MaxIntNode = DAG.getConstant(MaxInt, dl, DstVT);
21203 
21204   // Result of direct conversion, which may be selected away.
21205   SDValue FpToInt = DAG.getNode(FpToIntOpcode, dl, TmpVT, Src);
21206 
21207   if (DstVT != TmpVT) {
21208     // NaN will become INDVAL, with the top bit set and the rest zero.
21209     // Truncation will discard the top bit, resulting in zero.
21210     FpToInt = DAG.getNode(ISD::TRUNCATE, dl, DstVT, FpToInt);
21211   }
21212 
21213   SDValue Select = FpToInt;
21214   // For signed conversions where we saturate to the same size as the
21215   // result type of the fptoi instructions, INDVAL coincides with integer
21216   // minimum, so we don't need to explicitly check it.
21217   if (!IsSigned || SatWidth != TmpVT.getScalarSizeInBits()) {
21218     // If Src ULT MinFloat, select MinInt. In particular, this also selects
21219     // MinInt if Src is NaN.
21220     Select = DAG.getSelectCC(
21221       dl, Src, MinFloatNode, MinIntNode, Select, ISD::CondCode::SETULT);
21222   }
21223 
21224   // If Src OGT MaxFloat, select MaxInt.
21225   Select = DAG.getSelectCC(
21226     dl, Src, MaxFloatNode, MaxIntNode, Select, ISD::CondCode::SETOGT);
21227 
21228   // In the unsigned case we are done, because we mapped NaN to MinInt, which
21229   // is already zero. The promoted case was already handled above.
21230   if (!IsSigned || DstVT != TmpVT) {
21231     return Select;
21232   }
21233 
21234   // Otherwise, select 0 if Src is NaN.
21235   SDValue ZeroInt = DAG.getConstant(0, dl, DstVT);
21236   return DAG.getSelectCC(
21237     dl, Src, Src, ZeroInt, Select, ISD::CondCode::SETUO);
21238 }
21239 
LowerFP_EXTEND(SDValue Op,SelectionDAG & DAG) const21240 SDValue X86TargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const {
21241   bool IsStrict = Op->isStrictFPOpcode();
21242 
21243   SDLoc DL(Op);
21244   MVT VT = Op.getSimpleValueType();
21245   SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
21246   SDValue In = Op.getOperand(IsStrict ? 1 : 0);
21247   MVT SVT = In.getSimpleValueType();
21248 
21249   // Let f16->f80 get lowered to a libcall, except for darwin, where we should
21250   // lower it to an fp_extend via f32 (as only f16<>f32 libcalls are available)
21251   if (VT == MVT::f128 || (SVT == MVT::f16 && VT == MVT::f80 &&
21252                           !Subtarget.getTargetTriple().isOSDarwin()))
21253     return SDValue();
21254 
21255   if ((SVT == MVT::v8f16 && Subtarget.hasF16C()) ||
21256       (SVT == MVT::v16f16 && Subtarget.useAVX512Regs()))
21257     return Op;
21258 
21259   if (SVT == MVT::f16) {
21260     if (Subtarget.hasFP16())
21261       return Op;
21262 
21263     if (VT != MVT::f32) {
21264       if (IsStrict)
21265         return DAG.getNode(
21266             ISD::STRICT_FP_EXTEND, DL, {VT, MVT::Other},
21267             {Chain, DAG.getNode(ISD::STRICT_FP_EXTEND, DL,
21268                                 {MVT::f32, MVT::Other}, {Chain, In})});
21269 
21270       return DAG.getNode(ISD::FP_EXTEND, DL, VT,
21271                          DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, In));
21272     }
21273 
21274     if (!Subtarget.hasF16C()) {
21275       if (!Subtarget.getTargetTriple().isOSDarwin())
21276         return SDValue();
21277 
21278       assert(VT == MVT::f32 && SVT == MVT::f16 && "unexpected extend libcall");
21279 
21280       // Need a libcall, but ABI for f16 is soft-float on MacOS.
21281       TargetLowering::CallLoweringInfo CLI(DAG);
21282       Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode();
21283 
21284       In = DAG.getBitcast(MVT::i16, In);
21285       TargetLowering::ArgListTy Args;
21286       TargetLowering::ArgListEntry Entry;
21287       Entry.Node = In;
21288       Entry.Ty = EVT(MVT::i16).getTypeForEVT(*DAG.getContext());
21289       Entry.IsSExt = false;
21290       Entry.IsZExt = true;
21291       Args.push_back(Entry);
21292 
21293       SDValue Callee = DAG.getExternalSymbol(
21294           getLibcallName(RTLIB::FPEXT_F16_F32),
21295           getPointerTy(DAG.getDataLayout()));
21296       CLI.setDebugLoc(DL).setChain(Chain).setLibCallee(
21297           CallingConv::C, EVT(VT).getTypeForEVT(*DAG.getContext()), Callee,
21298           std::move(Args));
21299 
21300       SDValue Res;
21301       std::tie(Res,Chain) = LowerCallTo(CLI);
21302       if (IsStrict)
21303         Res = DAG.getMergeValues({Res, Chain}, DL);
21304 
21305       return Res;
21306     }
21307 
21308     In = DAG.getBitcast(MVT::i16, In);
21309     In = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, MVT::v8i16,
21310                      getZeroVector(MVT::v8i16, Subtarget, DAG, DL), In,
21311                      DAG.getIntPtrConstant(0, DL));
21312     SDValue Res;
21313     if (IsStrict) {
21314       Res = DAG.getNode(X86ISD::STRICT_CVTPH2PS, DL, {MVT::v4f32, MVT::Other},
21315                         {Chain, In});
21316       Chain = Res.getValue(1);
21317     } else {
21318       Res = DAG.getNode(X86ISD::CVTPH2PS, DL, MVT::v4f32, In,
21319                         DAG.getTargetConstant(4, DL, MVT::i32));
21320     }
21321     Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, Res,
21322                       DAG.getIntPtrConstant(0, DL));
21323     if (IsStrict)
21324       return DAG.getMergeValues({Res, Chain}, DL);
21325     return Res;
21326   }
21327 
21328   if (!SVT.isVector())
21329     return Op;
21330 
21331   if (SVT.getVectorElementType() == MVT::bf16) {
21332     // FIXME: Do we need to support strict FP?
21333     assert(!IsStrict && "Strict FP doesn't support BF16");
21334     if (VT.getVectorElementType() == MVT::f64) {
21335       MVT TmpVT = VT.changeVectorElementType(MVT::f32);
21336       return DAG.getNode(ISD::FP_EXTEND, DL, VT,
21337                          DAG.getNode(ISD::FP_EXTEND, DL, TmpVT, In));
21338     }
21339     assert(VT.getVectorElementType() == MVT::f32 && "Unexpected fpext");
21340     MVT NVT = SVT.changeVectorElementType(MVT::i32);
21341     In = DAG.getBitcast(SVT.changeTypeToInteger(), In);
21342     In = DAG.getNode(ISD::ZERO_EXTEND, DL, NVT, In);
21343     In = DAG.getNode(ISD::SHL, DL, NVT, In, DAG.getConstant(16, DL, NVT));
21344     return DAG.getBitcast(VT, In);
21345   }
21346 
21347   if (SVT.getVectorElementType() == MVT::f16) {
21348     if (Subtarget.hasFP16() && isTypeLegal(SVT))
21349       return Op;
21350     assert(Subtarget.hasF16C() && "Unexpected features!");
21351     if (SVT == MVT::v2f16)
21352       In = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4f16, In,
21353                        DAG.getUNDEF(MVT::v2f16));
21354     SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8f16, In,
21355                               DAG.getUNDEF(MVT::v4f16));
21356     if (IsStrict)
21357       return DAG.getNode(X86ISD::STRICT_VFPEXT, DL, {VT, MVT::Other},
21358                          {Op->getOperand(0), Res});
21359     return DAG.getNode(X86ISD::VFPEXT, DL, VT, Res);
21360   } else if (VT == MVT::v4f64 || VT == MVT::v8f64) {
21361     return Op;
21362   }
21363 
21364   assert(SVT == MVT::v2f32 && "Only customize MVT::v2f32 type legalization!");
21365 
21366   SDValue Res =
21367       DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4f32, In, DAG.getUNDEF(SVT));
21368   if (IsStrict)
21369     return DAG.getNode(X86ISD::STRICT_VFPEXT, DL, {VT, MVT::Other},
21370                        {Op->getOperand(0), Res});
21371   return DAG.getNode(X86ISD::VFPEXT, DL, VT, Res);
21372 }
21373 
LowerFP_ROUND(SDValue Op,SelectionDAG & DAG) const21374 SDValue X86TargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
21375   bool IsStrict = Op->isStrictFPOpcode();
21376 
21377   SDLoc DL(Op);
21378   SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
21379   SDValue In = Op.getOperand(IsStrict ? 1 : 0);
21380   MVT VT = Op.getSimpleValueType();
21381   MVT SVT = In.getSimpleValueType();
21382 
21383   if (SVT == MVT::f128 || (VT == MVT::f16 && SVT == MVT::f80))
21384     return SDValue();
21385 
21386   if (VT == MVT::f16 && (SVT == MVT::f64 || SVT == MVT::f32) &&
21387       !Subtarget.hasFP16() && (SVT == MVT::f64 || !Subtarget.hasF16C())) {
21388     if (!Subtarget.getTargetTriple().isOSDarwin())
21389       return SDValue();
21390 
21391     // We need a libcall but the ABI for f16 libcalls on MacOS is soft.
21392     TargetLowering::CallLoweringInfo CLI(DAG);
21393     Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode();
21394 
21395     TargetLowering::ArgListTy Args;
21396     TargetLowering::ArgListEntry Entry;
21397     Entry.Node = In;
21398     Entry.Ty = EVT(SVT).getTypeForEVT(*DAG.getContext());
21399     Entry.IsSExt = false;
21400     Entry.IsZExt = true;
21401     Args.push_back(Entry);
21402 
21403     SDValue Callee = DAG.getExternalSymbol(
21404         getLibcallName(SVT == MVT::f64 ? RTLIB::FPROUND_F64_F16
21405                                        : RTLIB::FPROUND_F32_F16),
21406         getPointerTy(DAG.getDataLayout()));
21407     CLI.setDebugLoc(DL).setChain(Chain).setLibCallee(
21408         CallingConv::C, EVT(MVT::i16).getTypeForEVT(*DAG.getContext()), Callee,
21409         std::move(Args));
21410 
21411     SDValue Res;
21412     std::tie(Res, Chain) = LowerCallTo(CLI);
21413 
21414     Res = DAG.getBitcast(MVT::f16, Res);
21415 
21416     if (IsStrict)
21417       Res = DAG.getMergeValues({Res, Chain}, DL);
21418 
21419     return Res;
21420   }
21421 
21422   if (VT.getScalarType() == MVT::bf16) {
21423     if (SVT.getScalarType() == MVT::f32 &&
21424         ((Subtarget.hasBF16() && Subtarget.hasVLX()) ||
21425          Subtarget.hasAVXNECONVERT()))
21426       return Op;
21427     return SDValue();
21428   }
21429 
21430   if (VT.getScalarType() == MVT::f16 && !Subtarget.hasFP16()) {
21431     if (!Subtarget.hasF16C() || SVT.getScalarType() != MVT::f32)
21432       return SDValue();
21433 
21434     if (VT.isVector())
21435       return Op;
21436 
21437     SDValue Res;
21438     SDValue Rnd = DAG.getTargetConstant(X86::STATIC_ROUNDING::CUR_DIRECTION, DL,
21439                                         MVT::i32);
21440     if (IsStrict) {
21441       Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, MVT::v4f32,
21442                         DAG.getConstantFP(0, DL, MVT::v4f32), In,
21443                         DAG.getIntPtrConstant(0, DL));
21444       Res = DAG.getNode(X86ISD::STRICT_CVTPS2PH, DL, {MVT::v8i16, MVT::Other},
21445                         {Chain, Res, Rnd});
21446       Chain = Res.getValue(1);
21447     } else {
21448       // FIXME: Should we use zeros for upper elements for non-strict?
21449       Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v4f32, In);
21450       Res = DAG.getNode(X86ISD::CVTPS2PH, DL, MVT::v8i16, Res, Rnd);
21451     }
21452 
21453     Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i16, Res,
21454                       DAG.getIntPtrConstant(0, DL));
21455     Res = DAG.getBitcast(MVT::f16, Res);
21456 
21457     if (IsStrict)
21458       return DAG.getMergeValues({Res, Chain}, DL);
21459 
21460     return Res;
21461   }
21462 
21463   return Op;
21464 }
21465 
LowerFP16_TO_FP(SDValue Op,SelectionDAG & DAG)21466 static SDValue LowerFP16_TO_FP(SDValue Op, SelectionDAG &DAG) {
21467   bool IsStrict = Op->isStrictFPOpcode();
21468   SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
21469   assert(Src.getValueType() == MVT::i16 && Op.getValueType() == MVT::f32 &&
21470          "Unexpected VT!");
21471 
21472   SDLoc dl(Op);
21473   SDValue Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16,
21474                             DAG.getConstant(0, dl, MVT::v8i16), Src,
21475                             DAG.getIntPtrConstant(0, dl));
21476 
21477   SDValue Chain;
21478   if (IsStrict) {
21479     Res = DAG.getNode(X86ISD::STRICT_CVTPH2PS, dl, {MVT::v4f32, MVT::Other},
21480                       {Op.getOperand(0), Res});
21481     Chain = Res.getValue(1);
21482   } else {
21483     Res = DAG.getNode(X86ISD::CVTPH2PS, dl, MVT::v4f32, Res);
21484   }
21485 
21486   Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, Res,
21487                     DAG.getIntPtrConstant(0, dl));
21488 
21489   if (IsStrict)
21490     return DAG.getMergeValues({Res, Chain}, dl);
21491 
21492   return Res;
21493 }
21494 
LowerFP_TO_FP16(SDValue Op,SelectionDAG & DAG)21495 static SDValue LowerFP_TO_FP16(SDValue Op, SelectionDAG &DAG) {
21496   bool IsStrict = Op->isStrictFPOpcode();
21497   SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
21498   assert(Src.getValueType() == MVT::f32 && Op.getValueType() == MVT::i16 &&
21499          "Unexpected VT!");
21500 
21501   SDLoc dl(Op);
21502   SDValue Res, Chain;
21503   if (IsStrict) {
21504     Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v4f32,
21505                       DAG.getConstantFP(0, dl, MVT::v4f32), Src,
21506                       DAG.getIntPtrConstant(0, dl));
21507     Res = DAG.getNode(
21508         X86ISD::STRICT_CVTPS2PH, dl, {MVT::v8i16, MVT::Other},
21509         {Op.getOperand(0), Res, DAG.getTargetConstant(4, dl, MVT::i32)});
21510     Chain = Res.getValue(1);
21511   } else {
21512     // FIXME: Should we use zeros for upper elements for non-strict?
21513     Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, Src);
21514     Res = DAG.getNode(X86ISD::CVTPS2PH, dl, MVT::v8i16, Res,
21515                       DAG.getTargetConstant(4, dl, MVT::i32));
21516   }
21517 
21518   Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Res,
21519                     DAG.getIntPtrConstant(0, dl));
21520 
21521   if (IsStrict)
21522     return DAG.getMergeValues({Res, Chain}, dl);
21523 
21524   return Res;
21525 }
21526 
LowerFP_TO_BF16(SDValue Op,SelectionDAG & DAG) const21527 SDValue X86TargetLowering::LowerFP_TO_BF16(SDValue Op,
21528                                            SelectionDAG &DAG) const {
21529   SDLoc DL(Op);
21530 
21531   MVT SVT = Op.getOperand(0).getSimpleValueType();
21532   if (SVT == MVT::f32 && ((Subtarget.hasBF16() && Subtarget.hasVLX()) ||
21533                           Subtarget.hasAVXNECONVERT())) {
21534     SDValue Res;
21535     Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v4f32, Op.getOperand(0));
21536     Res = DAG.getNode(X86ISD::CVTNEPS2BF16, DL, MVT::v8bf16, Res);
21537     Res = DAG.getBitcast(MVT::v8i16, Res);
21538     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i16, Res,
21539                        DAG.getIntPtrConstant(0, DL));
21540   }
21541 
21542   MakeLibCallOptions CallOptions;
21543   RTLIB::Libcall LC = RTLIB::getFPROUND(SVT, MVT::bf16);
21544   SDValue Res =
21545       makeLibCall(DAG, LC, MVT::f16, Op.getOperand(0), CallOptions, DL).first;
21546   return DAG.getBitcast(MVT::i16, Res);
21547 }
21548 
21549 /// Depending on uarch and/or optimizing for size, we might prefer to use a
21550 /// vector operation in place of the typical scalar operation.
lowerAddSubToHorizontalOp(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)21551 static SDValue lowerAddSubToHorizontalOp(SDValue Op, SelectionDAG &DAG,
21552                                          const X86Subtarget &Subtarget) {
21553   // If both operands have other uses, this is probably not profitable.
21554   SDValue LHS = Op.getOperand(0);
21555   SDValue RHS = Op.getOperand(1);
21556   if (!LHS.hasOneUse() && !RHS.hasOneUse())
21557     return Op;
21558 
21559   // FP horizontal add/sub were added with SSE3. Integer with SSSE3.
21560   bool IsFP = Op.getSimpleValueType().isFloatingPoint();
21561   if (IsFP && !Subtarget.hasSSE3())
21562     return Op;
21563   if (!IsFP && !Subtarget.hasSSSE3())
21564     return Op;
21565 
21566   // Extract from a common vector.
21567   if (LHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
21568       RHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
21569       LHS.getOperand(0) != RHS.getOperand(0) ||
21570       !isa<ConstantSDNode>(LHS.getOperand(1)) ||
21571       !isa<ConstantSDNode>(RHS.getOperand(1)) ||
21572       !shouldUseHorizontalOp(true, DAG, Subtarget))
21573     return Op;
21574 
21575   // Allow commuted 'hadd' ops.
21576   // TODO: Allow commuted (f)sub by negating the result of (F)HSUB?
21577   unsigned HOpcode;
21578   switch (Op.getOpcode()) {
21579     case ISD::ADD: HOpcode = X86ISD::HADD; break;
21580     case ISD::SUB: HOpcode = X86ISD::HSUB; break;
21581     case ISD::FADD: HOpcode = X86ISD::FHADD; break;
21582     case ISD::FSUB: HOpcode = X86ISD::FHSUB; break;
21583     default:
21584       llvm_unreachable("Trying to lower unsupported opcode to horizontal op");
21585   }
21586   unsigned LExtIndex = LHS.getConstantOperandVal(1);
21587   unsigned RExtIndex = RHS.getConstantOperandVal(1);
21588   if ((LExtIndex & 1) == 1 && (RExtIndex & 1) == 0 &&
21589       (HOpcode == X86ISD::HADD || HOpcode == X86ISD::FHADD))
21590     std::swap(LExtIndex, RExtIndex);
21591 
21592   if ((LExtIndex & 1) != 0 || RExtIndex != (LExtIndex + 1))
21593     return Op;
21594 
21595   SDValue X = LHS.getOperand(0);
21596   EVT VecVT = X.getValueType();
21597   unsigned BitWidth = VecVT.getSizeInBits();
21598   unsigned NumLanes = BitWidth / 128;
21599   unsigned NumEltsPerLane = VecVT.getVectorNumElements() / NumLanes;
21600   assert((BitWidth == 128 || BitWidth == 256 || BitWidth == 512) &&
21601          "Not expecting illegal vector widths here");
21602 
21603   // Creating a 256-bit horizontal op would be wasteful, and there is no 512-bit
21604   // equivalent, so extract the 256/512-bit source op to 128-bit if we can.
21605   SDLoc DL(Op);
21606   if (BitWidth == 256 || BitWidth == 512) {
21607     unsigned LaneIdx = LExtIndex / NumEltsPerLane;
21608     X = extract128BitVector(X, LaneIdx * NumEltsPerLane, DAG, DL);
21609     LExtIndex %= NumEltsPerLane;
21610   }
21611 
21612   // add (extractelt (X, 0), extractelt (X, 1)) --> extractelt (hadd X, X), 0
21613   // add (extractelt (X, 1), extractelt (X, 0)) --> extractelt (hadd X, X), 0
21614   // add (extractelt (X, 2), extractelt (X, 3)) --> extractelt (hadd X, X), 1
21615   // sub (extractelt (X, 0), extractelt (X, 1)) --> extractelt (hsub X, X), 0
21616   SDValue HOp = DAG.getNode(HOpcode, DL, X.getValueType(), X, X);
21617   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, Op.getSimpleValueType(), HOp,
21618                      DAG.getIntPtrConstant(LExtIndex / 2, DL));
21619 }
21620 
21621 /// Depending on uarch and/or optimizing for size, we might prefer to use a
21622 /// vector operation in place of the typical scalar operation.
lowerFaddFsub(SDValue Op,SelectionDAG & DAG) const21623 SDValue X86TargetLowering::lowerFaddFsub(SDValue Op, SelectionDAG &DAG) const {
21624   assert((Op.getValueType() == MVT::f32 || Op.getValueType() == MVT::f64) &&
21625          "Only expecting float/double");
21626   return lowerAddSubToHorizontalOp(Op, DAG, Subtarget);
21627 }
21628 
21629 /// ISD::FROUND is defined to round to nearest with ties rounding away from 0.
21630 /// This mode isn't supported in hardware on X86. But as long as we aren't
21631 /// compiling with trapping math, we can emulate this with
21632 /// trunc(X + copysign(nextafter(0.5, 0.0), X)).
LowerFROUND(SDValue Op,SelectionDAG & DAG)21633 static SDValue LowerFROUND(SDValue Op, SelectionDAG &DAG) {
21634   SDValue N0 = Op.getOperand(0);
21635   SDLoc dl(Op);
21636   MVT VT = Op.getSimpleValueType();
21637 
21638   // N0 += copysign(nextafter(0.5, 0.0), N0)
21639   const fltSemantics &Sem = SelectionDAG::EVTToAPFloatSemantics(VT);
21640   bool Ignored;
21641   APFloat Point5Pred = APFloat(0.5f);
21642   Point5Pred.convert(Sem, APFloat::rmNearestTiesToEven, &Ignored);
21643   Point5Pred.next(/*nextDown*/true);
21644 
21645   SDValue Adder = DAG.getNode(ISD::FCOPYSIGN, dl, VT,
21646                               DAG.getConstantFP(Point5Pred, dl, VT), N0);
21647   N0 = DAG.getNode(ISD::FADD, dl, VT, N0, Adder);
21648 
21649   // Truncate the result to remove fraction.
21650   return DAG.getNode(ISD::FTRUNC, dl, VT, N0);
21651 }
21652 
21653 /// The only differences between FABS and FNEG are the mask and the logic op.
21654 /// FNEG also has a folding opportunity for FNEG(FABS(x)).
LowerFABSorFNEG(SDValue Op,SelectionDAG & DAG)21655 static SDValue LowerFABSorFNEG(SDValue Op, SelectionDAG &DAG) {
21656   assert((Op.getOpcode() == ISD::FABS || Op.getOpcode() == ISD::FNEG) &&
21657          "Wrong opcode for lowering FABS or FNEG.");
21658 
21659   bool IsFABS = (Op.getOpcode() == ISD::FABS);
21660 
21661   // If this is a FABS and it has an FNEG user, bail out to fold the combination
21662   // into an FNABS. We'll lower the FABS after that if it is still in use.
21663   if (IsFABS)
21664     for (SDNode *User : Op->uses())
21665       if (User->getOpcode() == ISD::FNEG)
21666         return Op;
21667 
21668   SDLoc dl(Op);
21669   MVT VT = Op.getSimpleValueType();
21670 
21671   bool IsF128 = (VT == MVT::f128);
21672   assert(VT.isFloatingPoint() && VT != MVT::f80 &&
21673          DAG.getTargetLoweringInfo().isTypeLegal(VT) &&
21674          "Unexpected type in LowerFABSorFNEG");
21675 
21676   // FIXME: Use function attribute "OptimizeForSize" and/or CodeGenOptLevel to
21677   // decide if we should generate a 16-byte constant mask when we only need 4 or
21678   // 8 bytes for the scalar case.
21679 
21680   // There are no scalar bitwise logical SSE/AVX instructions, so we
21681   // generate a 16-byte vector constant and logic op even for the scalar case.
21682   // Using a 16-byte mask allows folding the load of the mask with
21683   // the logic op, so it can save (~4 bytes) on code size.
21684   bool IsFakeVector = !VT.isVector() && !IsF128;
21685   MVT LogicVT = VT;
21686   if (IsFakeVector)
21687     LogicVT = (VT == MVT::f64)   ? MVT::v2f64
21688               : (VT == MVT::f32) ? MVT::v4f32
21689                                  : MVT::v8f16;
21690 
21691   unsigned EltBits = VT.getScalarSizeInBits();
21692   // For FABS, mask is 0x7f...; for FNEG, mask is 0x80...
21693   APInt MaskElt = IsFABS ? APInt::getSignedMaxValue(EltBits) :
21694                            APInt::getSignMask(EltBits);
21695   const fltSemantics &Sem = SelectionDAG::EVTToAPFloatSemantics(VT);
21696   SDValue Mask = DAG.getConstantFP(APFloat(Sem, MaskElt), dl, LogicVT);
21697 
21698   SDValue Op0 = Op.getOperand(0);
21699   bool IsFNABS = !IsFABS && (Op0.getOpcode() == ISD::FABS);
21700   unsigned LogicOp = IsFABS  ? X86ISD::FAND :
21701                      IsFNABS ? X86ISD::FOR  :
21702                                X86ISD::FXOR;
21703   SDValue Operand = IsFNABS ? Op0.getOperand(0) : Op0;
21704 
21705   if (VT.isVector() || IsF128)
21706     return DAG.getNode(LogicOp, dl, LogicVT, Operand, Mask);
21707 
21708   // For the scalar case extend to a 128-bit vector, perform the logic op,
21709   // and extract the scalar result back out.
21710   Operand = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Operand);
21711   SDValue LogicNode = DAG.getNode(LogicOp, dl, LogicVT, Operand, Mask);
21712   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, LogicNode,
21713                      DAG.getIntPtrConstant(0, dl));
21714 }
21715 
LowerFCOPYSIGN(SDValue Op,SelectionDAG & DAG)21716 static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) {
21717   SDValue Mag = Op.getOperand(0);
21718   SDValue Sign = Op.getOperand(1);
21719   SDLoc dl(Op);
21720 
21721   // If the sign operand is smaller, extend it first.
21722   MVT VT = Op.getSimpleValueType();
21723   if (Sign.getSimpleValueType().bitsLT(VT))
21724     Sign = DAG.getNode(ISD::FP_EXTEND, dl, VT, Sign);
21725 
21726   // And if it is bigger, shrink it first.
21727   if (Sign.getSimpleValueType().bitsGT(VT))
21728     Sign = DAG.getNode(ISD::FP_ROUND, dl, VT, Sign,
21729                        DAG.getIntPtrConstant(0, dl, /*isTarget=*/true));
21730 
21731   // At this point the operands and the result should have the same
21732   // type, and that won't be f80 since that is not custom lowered.
21733   bool IsF128 = (VT == MVT::f128);
21734   assert(VT.isFloatingPoint() && VT != MVT::f80 &&
21735          DAG.getTargetLoweringInfo().isTypeLegal(VT) &&
21736          "Unexpected type in LowerFCOPYSIGN");
21737 
21738   const fltSemantics &Sem = SelectionDAG::EVTToAPFloatSemantics(VT);
21739 
21740   // Perform all scalar logic operations as 16-byte vectors because there are no
21741   // scalar FP logic instructions in SSE.
21742   // TODO: This isn't necessary. If we used scalar types, we might avoid some
21743   // unnecessary splats, but we might miss load folding opportunities. Should
21744   // this decision be based on OptimizeForSize?
21745   bool IsFakeVector = !VT.isVector() && !IsF128;
21746   MVT LogicVT = VT;
21747   if (IsFakeVector)
21748     LogicVT = (VT == MVT::f64)   ? MVT::v2f64
21749               : (VT == MVT::f32) ? MVT::v4f32
21750                                  : MVT::v8f16;
21751 
21752   // The mask constants are automatically splatted for vector types.
21753   unsigned EltSizeInBits = VT.getScalarSizeInBits();
21754   SDValue SignMask = DAG.getConstantFP(
21755       APFloat(Sem, APInt::getSignMask(EltSizeInBits)), dl, LogicVT);
21756   SDValue MagMask = DAG.getConstantFP(
21757       APFloat(Sem, APInt::getSignedMaxValue(EltSizeInBits)), dl, LogicVT);
21758 
21759   // First, clear all bits but the sign bit from the second operand (sign).
21760   if (IsFakeVector)
21761     Sign = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Sign);
21762   SDValue SignBit = DAG.getNode(X86ISD::FAND, dl, LogicVT, Sign, SignMask);
21763 
21764   // Next, clear the sign bit from the first operand (magnitude).
21765   // TODO: If we had general constant folding for FP logic ops, this check
21766   // wouldn't be necessary.
21767   SDValue MagBits;
21768   if (ConstantFPSDNode *Op0CN = isConstOrConstSplatFP(Mag)) {
21769     APFloat APF = Op0CN->getValueAPF();
21770     APF.clearSign();
21771     MagBits = DAG.getConstantFP(APF, dl, LogicVT);
21772   } else {
21773     // If the magnitude operand wasn't a constant, we need to AND out the sign.
21774     if (IsFakeVector)
21775       Mag = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Mag);
21776     MagBits = DAG.getNode(X86ISD::FAND, dl, LogicVT, Mag, MagMask);
21777   }
21778 
21779   // OR the magnitude value with the sign bit.
21780   SDValue Or = DAG.getNode(X86ISD::FOR, dl, LogicVT, MagBits, SignBit);
21781   return !IsFakeVector ? Or : DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Or,
21782                                           DAG.getIntPtrConstant(0, dl));
21783 }
21784 
LowerFGETSIGN(SDValue Op,SelectionDAG & DAG)21785 static SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) {
21786   SDValue N0 = Op.getOperand(0);
21787   SDLoc dl(Op);
21788   MVT VT = Op.getSimpleValueType();
21789 
21790   MVT OpVT = N0.getSimpleValueType();
21791   assert((OpVT == MVT::f32 || OpVT == MVT::f64) &&
21792          "Unexpected type for FGETSIGN");
21793 
21794   // Lower ISD::FGETSIGN to (AND (X86ISD::MOVMSK ...) 1).
21795   MVT VecVT = (OpVT == MVT::f32 ? MVT::v4f32 : MVT::v2f64);
21796   SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, N0);
21797   Res = DAG.getNode(X86ISD::MOVMSK, dl, MVT::i32, Res);
21798   Res = DAG.getZExtOrTrunc(Res, dl, VT);
21799   Res = DAG.getNode(ISD::AND, dl, VT, Res, DAG.getConstant(1, dl, VT));
21800   return Res;
21801 }
21802 
21803 /// Helper for attempting to create a X86ISD::BT node.
getBT(SDValue Src,SDValue BitNo,const SDLoc & DL,SelectionDAG & DAG)21804 static SDValue getBT(SDValue Src, SDValue BitNo, const SDLoc &DL, SelectionDAG &DAG) {
21805   // If Src is i8, promote it to i32 with any_extend.  There is no i8 BT
21806   // instruction.  Since the shift amount is in-range-or-undefined, we know
21807   // that doing a bittest on the i32 value is ok.  We extend to i32 because
21808   // the encoding for the i16 version is larger than the i32 version.
21809   // Also promote i16 to i32 for performance / code size reason.
21810   if (Src.getValueType().getScalarSizeInBits() < 32)
21811     Src = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Src);
21812 
21813   // No legal type found, give up.
21814   if (!DAG.getTargetLoweringInfo().isTypeLegal(Src.getValueType()))
21815     return SDValue();
21816 
21817   // See if we can use the 32-bit instruction instead of the 64-bit one for a
21818   // shorter encoding. Since the former takes the modulo 32 of BitNo and the
21819   // latter takes the modulo 64, this is only valid if the 5th bit of BitNo is
21820   // known to be zero.
21821   if (Src.getValueType() == MVT::i64 &&
21822       DAG.MaskedValueIsZero(BitNo, APInt(BitNo.getValueSizeInBits(), 32)))
21823     Src = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Src);
21824 
21825   // If the operand types disagree, extend the shift amount to match.  Since
21826   // BT ignores high bits (like shifts) we can use anyextend.
21827   if (Src.getValueType() != BitNo.getValueType()) {
21828     // Peek through a mask/modulo operation.
21829     // TODO: DAGCombine fails to do this as it just checks isTruncateFree, but
21830     // we probably need a better IsDesirableToPromoteOp to handle this as well.
21831     if (BitNo.getOpcode() == ISD::AND && BitNo->hasOneUse())
21832       BitNo = DAG.getNode(ISD::AND, DL, Src.getValueType(),
21833                           DAG.getNode(ISD::ANY_EXTEND, DL, Src.getValueType(),
21834                                       BitNo.getOperand(0)),
21835                           DAG.getNode(ISD::ANY_EXTEND, DL, Src.getValueType(),
21836                                       BitNo.getOperand(1)));
21837     else
21838       BitNo = DAG.getNode(ISD::ANY_EXTEND, DL, Src.getValueType(), BitNo);
21839   }
21840 
21841   return DAG.getNode(X86ISD::BT, DL, MVT::i32, Src, BitNo);
21842 }
21843 
21844 /// Helper for creating a X86ISD::SETCC node.
getSETCC(X86::CondCode Cond,SDValue EFLAGS,const SDLoc & dl,SelectionDAG & DAG)21845 static SDValue getSETCC(X86::CondCode Cond, SDValue EFLAGS, const SDLoc &dl,
21846                         SelectionDAG &DAG) {
21847   return DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
21848                      DAG.getTargetConstant(Cond, dl, MVT::i8), EFLAGS);
21849 }
21850 
21851 /// Recursive helper for combineVectorSizedSetCCEquality() to see if we have a
21852 /// recognizable memcmp expansion.
isOrXorXorTree(SDValue X,bool Root=true)21853 static bool isOrXorXorTree(SDValue X, bool Root = true) {
21854   if (X.getOpcode() == ISD::OR)
21855     return isOrXorXorTree(X.getOperand(0), false) &&
21856            isOrXorXorTree(X.getOperand(1), false);
21857   if (Root)
21858     return false;
21859   return X.getOpcode() == ISD::XOR;
21860 }
21861 
21862 /// Recursive helper for combineVectorSizedSetCCEquality() to emit the memcmp
21863 /// expansion.
21864 template <typename F>
emitOrXorXorTree(SDValue X,const SDLoc & DL,SelectionDAG & DAG,EVT VecVT,EVT CmpVT,bool HasPT,F SToV)21865 static SDValue emitOrXorXorTree(SDValue X, const SDLoc &DL, SelectionDAG &DAG,
21866                                 EVT VecVT, EVT CmpVT, bool HasPT, F SToV) {
21867   SDValue Op0 = X.getOperand(0);
21868   SDValue Op1 = X.getOperand(1);
21869   if (X.getOpcode() == ISD::OR) {
21870     SDValue A = emitOrXorXorTree(Op0, DL, DAG, VecVT, CmpVT, HasPT, SToV);
21871     SDValue B = emitOrXorXorTree(Op1, DL, DAG, VecVT, CmpVT, HasPT, SToV);
21872     if (VecVT != CmpVT)
21873       return DAG.getNode(ISD::OR, DL, CmpVT, A, B);
21874     if (HasPT)
21875       return DAG.getNode(ISD::OR, DL, VecVT, A, B);
21876     return DAG.getNode(ISD::AND, DL, CmpVT, A, B);
21877   }
21878   if (X.getOpcode() == ISD::XOR) {
21879     SDValue A = SToV(Op0);
21880     SDValue B = SToV(Op1);
21881     if (VecVT != CmpVT)
21882       return DAG.getSetCC(DL, CmpVT, A, B, ISD::SETNE);
21883     if (HasPT)
21884       return DAG.getNode(ISD::XOR, DL, VecVT, A, B);
21885     return DAG.getSetCC(DL, CmpVT, A, B, ISD::SETEQ);
21886   }
21887   llvm_unreachable("Impossible");
21888 }
21889 
21890 /// Try to map a 128-bit or larger integer comparison to vector instructions
21891 /// before type legalization splits it up into chunks.
combineVectorSizedSetCCEquality(EVT VT,SDValue X,SDValue Y,ISD::CondCode CC,const SDLoc & DL,SelectionDAG & DAG,const X86Subtarget & Subtarget)21892 static SDValue combineVectorSizedSetCCEquality(EVT VT, SDValue X, SDValue Y,
21893                                                ISD::CondCode CC,
21894                                                const SDLoc &DL,
21895                                                SelectionDAG &DAG,
21896                                                const X86Subtarget &Subtarget) {
21897   assert((CC == ISD::SETNE || CC == ISD::SETEQ) && "Bad comparison predicate");
21898 
21899   // We're looking for an oversized integer equality comparison.
21900   EVT OpVT = X.getValueType();
21901   unsigned OpSize = OpVT.getSizeInBits();
21902   if (!OpVT.isScalarInteger() || OpSize < 128)
21903     return SDValue();
21904 
21905   // Ignore a comparison with zero because that gets special treatment in
21906   // EmitTest(). But make an exception for the special case of a pair of
21907   // logically-combined vector-sized operands compared to zero. This pattern may
21908   // be generated by the memcmp expansion pass with oversized integer compares
21909   // (see PR33325).
21910   bool IsOrXorXorTreeCCZero = isNullConstant(Y) && isOrXorXorTree(X);
21911   if (isNullConstant(Y) && !IsOrXorXorTreeCCZero)
21912     return SDValue();
21913 
21914   // Don't perform this combine if constructing the vector will be expensive.
21915   auto IsVectorBitCastCheap = [](SDValue X) {
21916     X = peekThroughBitcasts(X);
21917     return isa<ConstantSDNode>(X) || X.getValueType().isVector() ||
21918            X.getOpcode() == ISD::LOAD;
21919   };
21920   if ((!IsVectorBitCastCheap(X) || !IsVectorBitCastCheap(Y)) &&
21921       !IsOrXorXorTreeCCZero)
21922     return SDValue();
21923 
21924   // Use XOR (plus OR) and PTEST after SSE4.1 for 128/256-bit operands.
21925   // Use PCMPNEQ (plus OR) and KORTEST for 512-bit operands.
21926   // Otherwise use PCMPEQ (plus AND) and mask testing.
21927   bool NoImplicitFloatOps =
21928       DAG.getMachineFunction().getFunction().hasFnAttribute(
21929           Attribute::NoImplicitFloat);
21930   if (!Subtarget.useSoftFloat() && !NoImplicitFloatOps &&
21931       ((OpSize == 128 && Subtarget.hasSSE2()) ||
21932        (OpSize == 256 && Subtarget.hasAVX()) ||
21933        (OpSize == 512 && Subtarget.useAVX512Regs()))) {
21934     bool HasPT = Subtarget.hasSSE41();
21935 
21936     // PTEST and MOVMSK are slow on Knights Landing and Knights Mill and widened
21937     // vector registers are essentially free. (Technically, widening registers
21938     // prevents load folding, but the tradeoff is worth it.)
21939     bool PreferKOT = Subtarget.preferMaskRegisters();
21940     bool NeedZExt = PreferKOT && !Subtarget.hasVLX() && OpSize != 512;
21941 
21942     EVT VecVT = MVT::v16i8;
21943     EVT CmpVT = PreferKOT ? MVT::v16i1 : VecVT;
21944     if (OpSize == 256) {
21945       VecVT = MVT::v32i8;
21946       CmpVT = PreferKOT ? MVT::v32i1 : VecVT;
21947     }
21948     EVT CastVT = VecVT;
21949     bool NeedsAVX512FCast = false;
21950     if (OpSize == 512 || NeedZExt) {
21951       if (Subtarget.hasBWI()) {
21952         VecVT = MVT::v64i8;
21953         CmpVT = MVT::v64i1;
21954         if (OpSize == 512)
21955           CastVT = VecVT;
21956       } else {
21957         VecVT = MVT::v16i32;
21958         CmpVT = MVT::v16i1;
21959         CastVT = OpSize == 512   ? VecVT
21960                  : OpSize == 256 ? MVT::v8i32
21961                                  : MVT::v4i32;
21962         NeedsAVX512FCast = true;
21963       }
21964     }
21965 
21966     auto ScalarToVector = [&](SDValue X) -> SDValue {
21967       bool TmpZext = false;
21968       EVT TmpCastVT = CastVT;
21969       if (X.getOpcode() == ISD::ZERO_EXTEND) {
21970         SDValue OrigX = X.getOperand(0);
21971         unsigned OrigSize = OrigX.getScalarValueSizeInBits();
21972         if (OrigSize < OpSize) {
21973           if (OrigSize == 128) {
21974             TmpCastVT = NeedsAVX512FCast ? MVT::v4i32 : MVT::v16i8;
21975             X = OrigX;
21976             TmpZext = true;
21977           } else if (OrigSize == 256) {
21978             TmpCastVT = NeedsAVX512FCast ? MVT::v8i32 : MVT::v32i8;
21979             X = OrigX;
21980             TmpZext = true;
21981           }
21982         }
21983       }
21984       X = DAG.getBitcast(TmpCastVT, X);
21985       if (!NeedZExt && !TmpZext)
21986         return X;
21987       return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT,
21988                          DAG.getConstant(0, DL, VecVT), X,
21989                          DAG.getVectorIdxConstant(0, DL));
21990     };
21991 
21992     SDValue Cmp;
21993     if (IsOrXorXorTreeCCZero) {
21994       // This is a bitwise-combined equality comparison of 2 pairs of vectors:
21995       // setcc i128 (or (xor A, B), (xor C, D)), 0, eq|ne
21996       // Use 2 vector equality compares and 'and' the results before doing a
21997       // MOVMSK.
21998       Cmp = emitOrXorXorTree(X, DL, DAG, VecVT, CmpVT, HasPT, ScalarToVector);
21999     } else {
22000       SDValue VecX = ScalarToVector(X);
22001       SDValue VecY = ScalarToVector(Y);
22002       if (VecVT != CmpVT) {
22003         Cmp = DAG.getSetCC(DL, CmpVT, VecX, VecY, ISD::SETNE);
22004       } else if (HasPT) {
22005         Cmp = DAG.getNode(ISD::XOR, DL, VecVT, VecX, VecY);
22006       } else {
22007         Cmp = DAG.getSetCC(DL, CmpVT, VecX, VecY, ISD::SETEQ);
22008       }
22009     }
22010     // AVX512 should emit a setcc that will lower to kortest.
22011     if (VecVT != CmpVT) {
22012       EVT KRegVT = CmpVT == MVT::v64i1   ? MVT::i64
22013                    : CmpVT == MVT::v32i1 ? MVT::i32
22014                                          : MVT::i16;
22015       return DAG.getSetCC(DL, VT, DAG.getBitcast(KRegVT, Cmp),
22016                           DAG.getConstant(0, DL, KRegVT), CC);
22017     }
22018     if (HasPT) {
22019       SDValue BCCmp =
22020           DAG.getBitcast(OpSize == 256 ? MVT::v4i64 : MVT::v2i64, Cmp);
22021       SDValue PT = DAG.getNode(X86ISD::PTEST, DL, MVT::i32, BCCmp, BCCmp);
22022       X86::CondCode X86CC = CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE;
22023       SDValue X86SetCC = getSETCC(X86CC, PT, DL, DAG);
22024       return DAG.getNode(ISD::TRUNCATE, DL, VT, X86SetCC.getValue(0));
22025     }
22026     // If all bytes match (bitmask is 0x(FFFF)FFFF), that's equality.
22027     // setcc i128 X, Y, eq --> setcc (pmovmskb (pcmpeqb X, Y)), 0xFFFF, eq
22028     // setcc i128 X, Y, ne --> setcc (pmovmskb (pcmpeqb X, Y)), 0xFFFF, ne
22029     assert(Cmp.getValueType() == MVT::v16i8 &&
22030            "Non 128-bit vector on pre-SSE41 target");
22031     SDValue MovMsk = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Cmp);
22032     SDValue FFFFs = DAG.getConstant(0xFFFF, DL, MVT::i32);
22033     return DAG.getSetCC(DL, VT, MovMsk, FFFFs, CC);
22034   }
22035 
22036   return SDValue();
22037 }
22038 
22039 /// Helper for matching BINOP(EXTRACTELT(X,0),BINOP(EXTRACTELT(X,1),...))
22040 /// style scalarized (associative) reduction patterns. Partial reductions
22041 /// are supported when the pointer SrcMask is non-null.
22042 /// TODO - move this to SelectionDAG?
matchScalarReduction(SDValue Op,ISD::NodeType BinOp,SmallVectorImpl<SDValue> & SrcOps,SmallVectorImpl<APInt> * SrcMask=nullptr)22043 static bool matchScalarReduction(SDValue Op, ISD::NodeType BinOp,
22044                                  SmallVectorImpl<SDValue> &SrcOps,
22045                                  SmallVectorImpl<APInt> *SrcMask = nullptr) {
22046   SmallVector<SDValue, 8> Opnds;
22047   DenseMap<SDValue, APInt> SrcOpMap;
22048   EVT VT = MVT::Other;
22049 
22050   // Recognize a special case where a vector is casted into wide integer to
22051   // test all 0s.
22052   assert(Op.getOpcode() == unsigned(BinOp) &&
22053          "Unexpected bit reduction opcode");
22054   Opnds.push_back(Op.getOperand(0));
22055   Opnds.push_back(Op.getOperand(1));
22056 
22057   for (unsigned Slot = 0, e = Opnds.size(); Slot < e; ++Slot) {
22058     SmallVectorImpl<SDValue>::const_iterator I = Opnds.begin() + Slot;
22059     // BFS traverse all BinOp operands.
22060     if (I->getOpcode() == unsigned(BinOp)) {
22061       Opnds.push_back(I->getOperand(0));
22062       Opnds.push_back(I->getOperand(1));
22063       // Re-evaluate the number of nodes to be traversed.
22064       e += 2; // 2 more nodes (LHS and RHS) are pushed.
22065       continue;
22066     }
22067 
22068     // Quit if a non-EXTRACT_VECTOR_ELT
22069     if (I->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
22070       return false;
22071 
22072     // Quit if without a constant index.
22073     auto *Idx = dyn_cast<ConstantSDNode>(I->getOperand(1));
22074     if (!Idx)
22075       return false;
22076 
22077     SDValue Src = I->getOperand(0);
22078     DenseMap<SDValue, APInt>::iterator M = SrcOpMap.find(Src);
22079     if (M == SrcOpMap.end()) {
22080       VT = Src.getValueType();
22081       // Quit if not the same type.
22082       if (!SrcOpMap.empty() && VT != SrcOpMap.begin()->first.getValueType())
22083         return false;
22084       unsigned NumElts = VT.getVectorNumElements();
22085       APInt EltCount = APInt::getZero(NumElts);
22086       M = SrcOpMap.insert(std::make_pair(Src, EltCount)).first;
22087       SrcOps.push_back(Src);
22088     }
22089 
22090     // Quit if element already used.
22091     unsigned CIdx = Idx->getZExtValue();
22092     if (M->second[CIdx])
22093       return false;
22094     M->second.setBit(CIdx);
22095   }
22096 
22097   if (SrcMask) {
22098     // Collect the source partial masks.
22099     for (SDValue &SrcOp : SrcOps)
22100       SrcMask->push_back(SrcOpMap[SrcOp]);
22101   } else {
22102     // Quit if not all elements are used.
22103     for (const auto &I : SrcOpMap)
22104       if (!I.second.isAllOnes())
22105         return false;
22106   }
22107 
22108   return true;
22109 }
22110 
22111 // Helper function for comparing all bits of two vectors.
LowerVectorAllEqual(const SDLoc & DL,SDValue LHS,SDValue RHS,ISD::CondCode CC,const APInt & OriginalMask,const X86Subtarget & Subtarget,SelectionDAG & DAG,X86::CondCode & X86CC)22112 static SDValue LowerVectorAllEqual(const SDLoc &DL, SDValue LHS, SDValue RHS,
22113                                    ISD::CondCode CC, const APInt &OriginalMask,
22114                                    const X86Subtarget &Subtarget,
22115                                    SelectionDAG &DAG, X86::CondCode &X86CC) {
22116   EVT VT = LHS.getValueType();
22117   unsigned ScalarSize = VT.getScalarSizeInBits();
22118   if (OriginalMask.getBitWidth() != ScalarSize) {
22119     assert(ScalarSize == 1 && "Element Mask vs Vector bitwidth mismatch");
22120     return SDValue();
22121   }
22122 
22123   // Quit if not convertable to legal scalar or 128/256-bit vector.
22124   if (!llvm::has_single_bit<uint32_t>(VT.getSizeInBits()))
22125     return SDValue();
22126 
22127   // FCMP may use ISD::SETNE when nnan - early out if we manage to get here.
22128   if (VT.isFloatingPoint())
22129     return SDValue();
22130 
22131   assert((CC == ISD::SETEQ || CC == ISD::SETNE) && "Unsupported ISD::CondCode");
22132   X86CC = (CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE);
22133 
22134   APInt Mask = OriginalMask;
22135 
22136   auto MaskBits = [&](SDValue Src) {
22137     if (Mask.isAllOnes())
22138       return Src;
22139     EVT SrcVT = Src.getValueType();
22140     SDValue MaskValue = DAG.getConstant(Mask, DL, SrcVT);
22141     return DAG.getNode(ISD::AND, DL, SrcVT, Src, MaskValue);
22142   };
22143 
22144   // For sub-128-bit vector, cast to (legal) integer and compare with zero.
22145   if (VT.getSizeInBits() < 128) {
22146     EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits());
22147     if (!DAG.getTargetLoweringInfo().isTypeLegal(IntVT)) {
22148       if (IntVT != MVT::i64)
22149         return SDValue();
22150       auto SplitLHS = DAG.SplitScalar(DAG.getBitcast(IntVT, MaskBits(LHS)), DL,
22151                                       MVT::i32, MVT::i32);
22152       auto SplitRHS = DAG.SplitScalar(DAG.getBitcast(IntVT, MaskBits(RHS)), DL,
22153                                       MVT::i32, MVT::i32);
22154       SDValue Lo =
22155           DAG.getNode(ISD::XOR, DL, MVT::i32, SplitLHS.first, SplitRHS.first);
22156       SDValue Hi =
22157           DAG.getNode(ISD::XOR, DL, MVT::i32, SplitLHS.second, SplitRHS.second);
22158       return DAG.getNode(X86ISD::CMP, DL, MVT::i32,
22159                          DAG.getNode(ISD::OR, DL, MVT::i32, Lo, Hi),
22160                          DAG.getConstant(0, DL, MVT::i32));
22161     }
22162     return DAG.getNode(X86ISD::CMP, DL, MVT::i32,
22163                        DAG.getBitcast(IntVT, MaskBits(LHS)),
22164                        DAG.getBitcast(IntVT, MaskBits(RHS)));
22165   }
22166 
22167   // Without PTEST, a masked v2i64 or-reduction is not faster than
22168   // scalarization.
22169   bool UseKORTEST = Subtarget.useAVX512Regs();
22170   bool UsePTEST = Subtarget.hasSSE41();
22171   if (!UsePTEST && !Mask.isAllOnes() && ScalarSize > 32)
22172     return SDValue();
22173 
22174   // Split down to 128/256/512-bit vector.
22175   unsigned TestSize = UseKORTEST ? 512 : (Subtarget.hasAVX() ? 256 : 128);
22176 
22177   // If the input vector has vector elements wider than the target test size,
22178   // then cast to <X x i64> so it will safely split.
22179   if (ScalarSize > TestSize) {
22180     if (!Mask.isAllOnes())
22181       return SDValue();
22182     VT = EVT::getVectorVT(*DAG.getContext(), MVT::i64, VT.getSizeInBits() / 64);
22183     LHS = DAG.getBitcast(VT, LHS);
22184     RHS = DAG.getBitcast(VT, RHS);
22185     Mask = APInt::getAllOnes(64);
22186   }
22187 
22188   if (VT.getSizeInBits() > TestSize) {
22189     KnownBits KnownRHS = DAG.computeKnownBits(RHS);
22190     if (KnownRHS.isConstant() && KnownRHS.getConstant() == Mask) {
22191       // If ICMP(AND(LHS,MASK),MASK) - reduce using AND splits.
22192       while (VT.getSizeInBits() > TestSize) {
22193         auto Split = DAG.SplitVector(LHS, DL);
22194         VT = Split.first.getValueType();
22195         LHS = DAG.getNode(ISD::AND, DL, VT, Split.first, Split.second);
22196       }
22197       RHS = DAG.getAllOnesConstant(DL, VT);
22198     } else if (!UsePTEST && !KnownRHS.isZero()) {
22199       // MOVMSK Special Case:
22200       // ALLOF(CMPEQ(X,Y)) -> AND(CMPEQ(X[0],Y[0]),CMPEQ(X[1],Y[1]),....)
22201       MVT SVT = ScalarSize >= 32 ? MVT::i32 : MVT::i8;
22202       VT = MVT::getVectorVT(SVT, VT.getSizeInBits() / SVT.getSizeInBits());
22203       LHS = DAG.getBitcast(VT, MaskBits(LHS));
22204       RHS = DAG.getBitcast(VT, MaskBits(RHS));
22205       EVT BoolVT = VT.changeVectorElementType(MVT::i1);
22206       SDValue V = DAG.getSetCC(DL, BoolVT, LHS, RHS, ISD::SETEQ);
22207       V = DAG.getSExtOrTrunc(V, DL, VT);
22208       while (VT.getSizeInBits() > TestSize) {
22209         auto Split = DAG.SplitVector(V, DL);
22210         VT = Split.first.getValueType();
22211         V = DAG.getNode(ISD::AND, DL, VT, Split.first, Split.second);
22212       }
22213       V = DAG.getNOT(DL, V, VT);
22214       V = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V);
22215       return DAG.getNode(X86ISD::CMP, DL, MVT::i32, V,
22216                          DAG.getConstant(0, DL, MVT::i32));
22217     } else {
22218       // Convert to a ICMP_EQ(XOR(LHS,RHS),0) pattern.
22219       SDValue V = DAG.getNode(ISD::XOR, DL, VT, LHS, RHS);
22220       while (VT.getSizeInBits() > TestSize) {
22221         auto Split = DAG.SplitVector(V, DL);
22222         VT = Split.first.getValueType();
22223         V = DAG.getNode(ISD::OR, DL, VT, Split.first, Split.second);
22224       }
22225       LHS = V;
22226       RHS = DAG.getConstant(0, DL, VT);
22227     }
22228   }
22229 
22230   if (UseKORTEST && VT.is512BitVector()) {
22231     MVT TestVT = MVT::getVectorVT(MVT::i32, VT.getSizeInBits() / 32);
22232     MVT BoolVT = TestVT.changeVectorElementType(MVT::i1);
22233     LHS = DAG.getBitcast(TestVT, MaskBits(LHS));
22234     RHS = DAG.getBitcast(TestVT, MaskBits(RHS));
22235     SDValue V = DAG.getSetCC(DL, BoolVT, LHS, RHS, ISD::SETNE);
22236     return DAG.getNode(X86ISD::KORTEST, DL, MVT::i32, V, V);
22237   }
22238 
22239   if (UsePTEST) {
22240     MVT TestVT = MVT::getVectorVT(MVT::i64, VT.getSizeInBits() / 64);
22241     LHS = DAG.getBitcast(TestVT, MaskBits(LHS));
22242     RHS = DAG.getBitcast(TestVT, MaskBits(RHS));
22243     SDValue V = DAG.getNode(ISD::XOR, DL, TestVT, LHS, RHS);
22244     return DAG.getNode(X86ISD::PTEST, DL, MVT::i32, V, V);
22245   }
22246 
22247   assert(VT.getSizeInBits() == 128 && "Failure to split to 128-bits");
22248   MVT MaskVT = ScalarSize >= 32 ? MVT::v4i32 : MVT::v16i8;
22249   LHS = DAG.getBitcast(MaskVT, MaskBits(LHS));
22250   RHS = DAG.getBitcast(MaskVT, MaskBits(RHS));
22251   SDValue V = DAG.getNode(X86ISD::PCMPEQ, DL, MaskVT, LHS, RHS);
22252   V = DAG.getNOT(DL, V, MaskVT);
22253   V = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V);
22254   return DAG.getNode(X86ISD::CMP, DL, MVT::i32, V,
22255                      DAG.getConstant(0, DL, MVT::i32));
22256 }
22257 
22258 // Check whether an AND/OR'd reduction tree is PTEST-able, or if we can fallback
22259 // to CMP(MOVMSK(PCMPEQB(X,Y))).
MatchVectorAllEqualTest(SDValue LHS,SDValue RHS,ISD::CondCode CC,const SDLoc & DL,const X86Subtarget & Subtarget,SelectionDAG & DAG,X86::CondCode & X86CC)22260 static SDValue MatchVectorAllEqualTest(SDValue LHS, SDValue RHS,
22261                                        ISD::CondCode CC, const SDLoc &DL,
22262                                        const X86Subtarget &Subtarget,
22263                                        SelectionDAG &DAG,
22264                                        X86::CondCode &X86CC) {
22265   assert((CC == ISD::SETEQ || CC == ISD::SETNE) && "Unsupported ISD::CondCode");
22266 
22267   bool CmpNull = isNullConstant(RHS);
22268   bool CmpAllOnes = isAllOnesConstant(RHS);
22269   if (!CmpNull && !CmpAllOnes)
22270     return SDValue();
22271 
22272   SDValue Op = LHS;
22273   if (!Subtarget.hasSSE2() || !Op->hasOneUse())
22274     return SDValue();
22275 
22276   // Check whether we're masking/truncating an OR-reduction result, in which
22277   // case track the masked bits.
22278   // TODO: Add CmpAllOnes support.
22279   APInt Mask = APInt::getAllOnes(Op.getScalarValueSizeInBits());
22280   if (CmpNull) {
22281     switch (Op.getOpcode()) {
22282     case ISD::TRUNCATE: {
22283       SDValue Src = Op.getOperand(0);
22284       Mask = APInt::getLowBitsSet(Src.getScalarValueSizeInBits(),
22285                                   Op.getScalarValueSizeInBits());
22286       Op = Src;
22287       break;
22288     }
22289     case ISD::AND: {
22290       if (auto *Cst = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
22291         Mask = Cst->getAPIntValue();
22292         Op = Op.getOperand(0);
22293       }
22294       break;
22295     }
22296     }
22297   }
22298 
22299   ISD::NodeType LogicOp = CmpNull ? ISD::OR : ISD::AND;
22300 
22301   // Match icmp(or(extract(X,0),extract(X,1)),0) anyof reduction patterns.
22302   // Match icmp(and(extract(X,0),extract(X,1)),-1) allof reduction patterns.
22303   SmallVector<SDValue, 8> VecIns;
22304   if (Op.getOpcode() == LogicOp && matchScalarReduction(Op, LogicOp, VecIns)) {
22305     EVT VT = VecIns[0].getValueType();
22306     assert(llvm::all_of(VecIns,
22307                         [VT](SDValue V) { return VT == V.getValueType(); }) &&
22308            "Reduction source vector mismatch");
22309 
22310     // Quit if not splittable to scalar/128/256/512-bit vector.
22311     if (!llvm::has_single_bit<uint32_t>(VT.getSizeInBits()))
22312       return SDValue();
22313 
22314     // If more than one full vector is evaluated, AND/OR them first before
22315     // PTEST.
22316     for (unsigned Slot = 0, e = VecIns.size(); e - Slot > 1;
22317          Slot += 2, e += 1) {
22318       // Each iteration will AND/OR 2 nodes and append the result until there is
22319       // only 1 node left, i.e. the final value of all vectors.
22320       SDValue LHS = VecIns[Slot];
22321       SDValue RHS = VecIns[Slot + 1];
22322       VecIns.push_back(DAG.getNode(LogicOp, DL, VT, LHS, RHS));
22323     }
22324 
22325     return LowerVectorAllEqual(DL, VecIns.back(),
22326                                CmpNull ? DAG.getConstant(0, DL, VT)
22327                                        : DAG.getAllOnesConstant(DL, VT),
22328                                CC, Mask, Subtarget, DAG, X86CC);
22329   }
22330 
22331   // Match icmp(reduce_or(X),0) anyof reduction patterns.
22332   // Match icmp(reduce_and(X),-1) allof reduction patterns.
22333   if (Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
22334     ISD::NodeType BinOp;
22335     if (SDValue Match =
22336             DAG.matchBinOpReduction(Op.getNode(), BinOp, {LogicOp})) {
22337       EVT MatchVT = Match.getValueType();
22338       return LowerVectorAllEqual(DL, Match,
22339                                  CmpNull ? DAG.getConstant(0, DL, MatchVT)
22340                                          : DAG.getAllOnesConstant(DL, MatchVT),
22341                                  CC, Mask, Subtarget, DAG, X86CC);
22342     }
22343   }
22344 
22345   if (Mask.isAllOnes()) {
22346     assert(!Op.getValueType().isVector() &&
22347            "Illegal vector type for reduction pattern");
22348     SDValue Src = peekThroughBitcasts(Op);
22349     if (Src.getValueType().isFixedLengthVector() &&
22350         Src.getValueType().getScalarType() == MVT::i1) {
22351       // Match icmp(bitcast(icmp_ne(X,Y)),0) reduction patterns.
22352       // Match icmp(bitcast(icmp_eq(X,Y)),-1) reduction patterns.
22353       if (Src.getOpcode() == ISD::SETCC) {
22354         SDValue LHS = Src.getOperand(0);
22355         SDValue RHS = Src.getOperand(1);
22356         EVT LHSVT = LHS.getValueType();
22357         ISD::CondCode SrcCC = cast<CondCodeSDNode>(Src.getOperand(2))->get();
22358         if (SrcCC == (CmpNull ? ISD::SETNE : ISD::SETEQ) &&
22359             llvm::has_single_bit<uint32_t>(LHSVT.getSizeInBits())) {
22360           APInt SrcMask = APInt::getAllOnes(LHSVT.getScalarSizeInBits());
22361           return LowerVectorAllEqual(DL, LHS, RHS, CC, SrcMask, Subtarget, DAG,
22362                                      X86CC);
22363         }
22364       }
22365       // Match icmp(bitcast(vXi1 trunc(Y)),0) reduction patterns.
22366       // Match icmp(bitcast(vXi1 trunc(Y)),-1) reduction patterns.
22367       // Peek through truncation, mask the LSB and compare against zero/LSB.
22368       if (Src.getOpcode() == ISD::TRUNCATE) {
22369         SDValue Inner = Src.getOperand(0);
22370         EVT InnerVT = Inner.getValueType();
22371         if (llvm::has_single_bit<uint32_t>(InnerVT.getSizeInBits())) {
22372           unsigned BW = InnerVT.getScalarSizeInBits();
22373           APInt SrcMask = APInt(BW, 1);
22374           APInt Cmp = CmpNull ? APInt::getZero(BW) : SrcMask;
22375           return LowerVectorAllEqual(DL, Inner,
22376                                      DAG.getConstant(Cmp, DL, InnerVT), CC,
22377                                      SrcMask, Subtarget, DAG, X86CC);
22378         }
22379       }
22380     }
22381   }
22382 
22383   return SDValue();
22384 }
22385 
22386 /// return true if \c Op has a use that doesn't just read flags.
hasNonFlagsUse(SDValue Op)22387 static bool hasNonFlagsUse(SDValue Op) {
22388   for (SDNode::use_iterator UI = Op->use_begin(), UE = Op->use_end(); UI != UE;
22389        ++UI) {
22390     SDNode *User = *UI;
22391     unsigned UOpNo = UI.getOperandNo();
22392     if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) {
22393       // Look pass truncate.
22394       UOpNo = User->use_begin().getOperandNo();
22395       User = *User->use_begin();
22396     }
22397 
22398     if (User->getOpcode() != ISD::BRCOND && User->getOpcode() != ISD::SETCC &&
22399         !(User->getOpcode() == ISD::SELECT && UOpNo == 0))
22400       return true;
22401   }
22402   return false;
22403 }
22404 
22405 // Transform to an x86-specific ALU node with flags if there is a chance of
22406 // using an RMW op or only the flags are used. Otherwise, leave
22407 // the node alone and emit a 'cmp' or 'test' instruction.
isProfitableToUseFlagOp(SDValue Op)22408 static bool isProfitableToUseFlagOp(SDValue Op) {
22409   for (SDNode *U : Op->uses())
22410     if (U->getOpcode() != ISD::CopyToReg &&
22411         U->getOpcode() != ISD::SETCC &&
22412         U->getOpcode() != ISD::STORE)
22413       return false;
22414 
22415   return true;
22416 }
22417 
22418 /// Emit nodes that will be selected as "test Op0,Op0", or something
22419 /// equivalent.
EmitTest(SDValue Op,unsigned X86CC,const SDLoc & dl,SelectionDAG & DAG,const X86Subtarget & Subtarget)22420 static SDValue EmitTest(SDValue Op, unsigned X86CC, const SDLoc &dl,
22421                         SelectionDAG &DAG, const X86Subtarget &Subtarget) {
22422   // CF and OF aren't always set the way we want. Determine which
22423   // of these we need.
22424   bool NeedCF = false;
22425   bool NeedOF = false;
22426   switch (X86CC) {
22427   default: break;
22428   case X86::COND_A: case X86::COND_AE:
22429   case X86::COND_B: case X86::COND_BE:
22430     NeedCF = true;
22431     break;
22432   case X86::COND_G: case X86::COND_GE:
22433   case X86::COND_L: case X86::COND_LE:
22434   case X86::COND_O: case X86::COND_NO: {
22435     // Check if we really need to set the
22436     // Overflow flag. If NoSignedWrap is present
22437     // that is not actually needed.
22438     switch (Op->getOpcode()) {
22439     case ISD::ADD:
22440     case ISD::SUB:
22441     case ISD::MUL:
22442     case ISD::SHL:
22443       if (Op.getNode()->getFlags().hasNoSignedWrap())
22444         break;
22445       [[fallthrough]];
22446     default:
22447       NeedOF = true;
22448       break;
22449     }
22450     break;
22451   }
22452   }
22453   // See if we can use the EFLAGS value from the operand instead of
22454   // doing a separate TEST. TEST always sets OF and CF to 0, so unless
22455   // we prove that the arithmetic won't overflow, we can't use OF or CF.
22456   if (Op.getResNo() != 0 || NeedOF || NeedCF) {
22457     // Emit a CMP with 0, which is the TEST pattern.
22458     return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
22459                        DAG.getConstant(0, dl, Op.getValueType()));
22460   }
22461   unsigned Opcode = 0;
22462   unsigned NumOperands = 0;
22463 
22464   SDValue ArithOp = Op;
22465 
22466   // NOTICE: In the code below we use ArithOp to hold the arithmetic operation
22467   // which may be the result of a CAST.  We use the variable 'Op', which is the
22468   // non-casted variable when we check for possible users.
22469   switch (ArithOp.getOpcode()) {
22470   case ISD::AND:
22471     // If the primary 'and' result isn't used, don't bother using X86ISD::AND,
22472     // because a TEST instruction will be better.
22473     if (!hasNonFlagsUse(Op))
22474       break;
22475 
22476     [[fallthrough]];
22477   case ISD::ADD:
22478   case ISD::SUB:
22479   case ISD::OR:
22480   case ISD::XOR:
22481     if (!isProfitableToUseFlagOp(Op))
22482       break;
22483 
22484     // Otherwise use a regular EFLAGS-setting instruction.
22485     switch (ArithOp.getOpcode()) {
22486     default: llvm_unreachable("unexpected operator!");
22487     case ISD::ADD: Opcode = X86ISD::ADD; break;
22488     case ISD::SUB: Opcode = X86ISD::SUB; break;
22489     case ISD::XOR: Opcode = X86ISD::XOR; break;
22490     case ISD::AND: Opcode = X86ISD::AND; break;
22491     case ISD::OR:  Opcode = X86ISD::OR;  break;
22492     }
22493 
22494     NumOperands = 2;
22495     break;
22496   case X86ISD::ADD:
22497   case X86ISD::SUB:
22498   case X86ISD::OR:
22499   case X86ISD::XOR:
22500   case X86ISD::AND:
22501     return SDValue(Op.getNode(), 1);
22502   case ISD::SSUBO:
22503   case ISD::USUBO: {
22504     // /USUBO/SSUBO will become a X86ISD::SUB and we can use its Z flag.
22505     SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
22506     return DAG.getNode(X86ISD::SUB, dl, VTs, Op->getOperand(0),
22507                        Op->getOperand(1)).getValue(1);
22508   }
22509   default:
22510     break;
22511   }
22512 
22513   if (Opcode == 0) {
22514     // Emit a CMP with 0, which is the TEST pattern.
22515     return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
22516                        DAG.getConstant(0, dl, Op.getValueType()));
22517   }
22518   SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
22519   SmallVector<SDValue, 4> Ops(Op->op_begin(), Op->op_begin() + NumOperands);
22520 
22521   SDValue New = DAG.getNode(Opcode, dl, VTs, Ops);
22522   DAG.ReplaceAllUsesOfValueWith(SDValue(Op.getNode(), 0), New);
22523   return SDValue(New.getNode(), 1);
22524 }
22525 
22526 /// Emit nodes that will be selected as "cmp Op0,Op1", or something
22527 /// equivalent.
EmitCmp(SDValue Op0,SDValue Op1,unsigned X86CC,const SDLoc & dl,SelectionDAG & DAG,const X86Subtarget & Subtarget)22528 static SDValue EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
22529                        const SDLoc &dl, SelectionDAG &DAG,
22530                        const X86Subtarget &Subtarget) {
22531   if (isNullConstant(Op1))
22532     return EmitTest(Op0, X86CC, dl, DAG, Subtarget);
22533 
22534   EVT CmpVT = Op0.getValueType();
22535 
22536   assert((CmpVT == MVT::i8 || CmpVT == MVT::i16 ||
22537           CmpVT == MVT::i32 || CmpVT == MVT::i64) && "Unexpected VT!");
22538 
22539   // Only promote the compare up to I32 if it is a 16 bit operation
22540   // with an immediate.  16 bit immediates are to be avoided.
22541   if (CmpVT == MVT::i16 && !Subtarget.isAtom() &&
22542       !DAG.getMachineFunction().getFunction().hasMinSize()) {
22543     ConstantSDNode *COp0 = dyn_cast<ConstantSDNode>(Op0);
22544     ConstantSDNode *COp1 = dyn_cast<ConstantSDNode>(Op1);
22545     // Don't do this if the immediate can fit in 8-bits.
22546     if ((COp0 && !COp0->getAPIntValue().isSignedIntN(8)) ||
22547         (COp1 && !COp1->getAPIntValue().isSignedIntN(8))) {
22548       unsigned ExtendOp =
22549           isX86CCSigned(X86CC) ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
22550       if (X86CC == X86::COND_E || X86CC == X86::COND_NE) {
22551         // For equality comparisons try to use SIGN_EXTEND if the input was
22552         // truncate from something with enough sign bits.
22553         if (Op0.getOpcode() == ISD::TRUNCATE) {
22554           if (DAG.ComputeMaxSignificantBits(Op0.getOperand(0)) <= 16)
22555             ExtendOp = ISD::SIGN_EXTEND;
22556         } else if (Op1.getOpcode() == ISD::TRUNCATE) {
22557           if (DAG.ComputeMaxSignificantBits(Op1.getOperand(0)) <= 16)
22558             ExtendOp = ISD::SIGN_EXTEND;
22559         }
22560       }
22561 
22562       CmpVT = MVT::i32;
22563       Op0 = DAG.getNode(ExtendOp, dl, CmpVT, Op0);
22564       Op1 = DAG.getNode(ExtendOp, dl, CmpVT, Op1);
22565     }
22566   }
22567 
22568   // Try to shrink i64 compares if the input has enough zero bits.
22569   // FIXME: Do this for non-constant compares for constant on LHS?
22570   if (CmpVT == MVT::i64 && isa<ConstantSDNode>(Op1) && !isX86CCSigned(X86CC) &&
22571       Op0.hasOneUse() && // Hacky way to not break CSE opportunities with sub.
22572       Op1->getAsAPIntVal().getActiveBits() <= 32 &&
22573       DAG.MaskedValueIsZero(Op0, APInt::getHighBitsSet(64, 32))) {
22574     CmpVT = MVT::i32;
22575     Op0 = DAG.getNode(ISD::TRUNCATE, dl, CmpVT, Op0);
22576     Op1 = DAG.getNode(ISD::TRUNCATE, dl, CmpVT, Op1);
22577   }
22578 
22579   // 0-x == y --> x+y == 0
22580   // 0-x != y --> x+y != 0
22581   if (Op0.getOpcode() == ISD::SUB && isNullConstant(Op0.getOperand(0)) &&
22582       Op0.hasOneUse() && (X86CC == X86::COND_E || X86CC == X86::COND_NE)) {
22583     SDVTList VTs = DAG.getVTList(CmpVT, MVT::i32);
22584     SDValue Add = DAG.getNode(X86ISD::ADD, dl, VTs, Op0.getOperand(1), Op1);
22585     return Add.getValue(1);
22586   }
22587 
22588   // x == 0-y --> x+y == 0
22589   // x != 0-y --> x+y != 0
22590   if (Op1.getOpcode() == ISD::SUB && isNullConstant(Op1.getOperand(0)) &&
22591       Op1.hasOneUse() && (X86CC == X86::COND_E || X86CC == X86::COND_NE)) {
22592     SDVTList VTs = DAG.getVTList(CmpVT, MVT::i32);
22593     SDValue Add = DAG.getNode(X86ISD::ADD, dl, VTs, Op0, Op1.getOperand(1));
22594     return Add.getValue(1);
22595   }
22596 
22597   // Use SUB instead of CMP to enable CSE between SUB and CMP.
22598   SDVTList VTs = DAG.getVTList(CmpVT, MVT::i32);
22599   SDValue Sub = DAG.getNode(X86ISD::SUB, dl, VTs, Op0, Op1);
22600   return Sub.getValue(1);
22601 }
22602 
isXAndYEqZeroPreferableToXAndYEqY(ISD::CondCode Cond,EVT VT) const22603 bool X86TargetLowering::isXAndYEqZeroPreferableToXAndYEqY(ISD::CondCode Cond,
22604                                                           EVT VT) const {
22605   return !VT.isVector() || Cond != ISD::CondCode::SETEQ;
22606 }
22607 
optimizeFMulOrFDivAsShiftAddBitcast(SDNode * N,SDValue,SDValue IntPow2) const22608 bool X86TargetLowering::optimizeFMulOrFDivAsShiftAddBitcast(
22609     SDNode *N, SDValue, SDValue IntPow2) const {
22610   if (N->getOpcode() == ISD::FDIV)
22611     return true;
22612 
22613   EVT FPVT = N->getValueType(0);
22614   EVT IntVT = IntPow2.getValueType();
22615 
22616   // This indicates a non-free bitcast.
22617   // TODO: This is probably overly conservative as we will need to scale the
22618   // integer vector anyways for the int->fp cast.
22619   if (FPVT.isVector() &&
22620       FPVT.getScalarSizeInBits() != IntVT.getScalarSizeInBits())
22621     return false;
22622 
22623   return true;
22624 }
22625 
22626 /// Check if replacement of SQRT with RSQRT should be disabled.
isFsqrtCheap(SDValue Op,SelectionDAG & DAG) const22627 bool X86TargetLowering::isFsqrtCheap(SDValue Op, SelectionDAG &DAG) const {
22628   EVT VT = Op.getValueType();
22629 
22630   // We don't need to replace SQRT with RSQRT for half type.
22631   if (VT.getScalarType() == MVT::f16)
22632     return true;
22633 
22634   // We never want to use both SQRT and RSQRT instructions for the same input.
22635   if (DAG.doesNodeExist(X86ISD::FRSQRT, DAG.getVTList(VT), Op))
22636     return false;
22637 
22638   if (VT.isVector())
22639     return Subtarget.hasFastVectorFSQRT();
22640   return Subtarget.hasFastScalarFSQRT();
22641 }
22642 
22643 /// The minimum architected relative accuracy is 2^-12. We need one
22644 /// Newton-Raphson step to have a good float result (24 bits of precision).
getSqrtEstimate(SDValue Op,SelectionDAG & DAG,int Enabled,int & RefinementSteps,bool & UseOneConstNR,bool Reciprocal) const22645 SDValue X86TargetLowering::getSqrtEstimate(SDValue Op,
22646                                            SelectionDAG &DAG, int Enabled,
22647                                            int &RefinementSteps,
22648                                            bool &UseOneConstNR,
22649                                            bool Reciprocal) const {
22650   SDLoc DL(Op);
22651   EVT VT = Op.getValueType();
22652 
22653   // SSE1 has rsqrtss and rsqrtps. AVX adds a 256-bit variant for rsqrtps.
22654   // It is likely not profitable to do this for f64 because a double-precision
22655   // rsqrt estimate with refinement on x86 prior to FMA requires at least 16
22656   // instructions: convert to single, rsqrtss, convert back to double, refine
22657   // (3 steps = at least 13 insts). If an 'rsqrtsd' variant was added to the ISA
22658   // along with FMA, this could be a throughput win.
22659   // TODO: SQRT requires SSE2 to prevent the introduction of an illegal v4i32
22660   // after legalize types.
22661   if ((VT == MVT::f32 && Subtarget.hasSSE1()) ||
22662       (VT == MVT::v4f32 && Subtarget.hasSSE1() && Reciprocal) ||
22663       (VT == MVT::v4f32 && Subtarget.hasSSE2() && !Reciprocal) ||
22664       (VT == MVT::v8f32 && Subtarget.hasAVX()) ||
22665       (VT == MVT::v16f32 && Subtarget.useAVX512Regs())) {
22666     if (RefinementSteps == ReciprocalEstimate::Unspecified)
22667       RefinementSteps = 1;
22668 
22669     UseOneConstNR = false;
22670     // There is no FSQRT for 512-bits, but there is RSQRT14.
22671     unsigned Opcode = VT == MVT::v16f32 ? X86ISD::RSQRT14 : X86ISD::FRSQRT;
22672     SDValue Estimate = DAG.getNode(Opcode, DL, VT, Op);
22673     if (RefinementSteps == 0 && !Reciprocal)
22674       Estimate = DAG.getNode(ISD::FMUL, DL, VT, Op, Estimate);
22675     return Estimate;
22676   }
22677 
22678   if (VT.getScalarType() == MVT::f16 && isTypeLegal(VT) &&
22679       Subtarget.hasFP16()) {
22680     assert(Reciprocal && "Don't replace SQRT with RSQRT for half type");
22681     if (RefinementSteps == ReciprocalEstimate::Unspecified)
22682       RefinementSteps = 0;
22683 
22684     if (VT == MVT::f16) {
22685       SDValue Zero = DAG.getIntPtrConstant(0, DL);
22686       SDValue Undef = DAG.getUNDEF(MVT::v8f16);
22687       Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v8f16, Op);
22688       Op = DAG.getNode(X86ISD::RSQRT14S, DL, MVT::v8f16, Undef, Op);
22689       return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f16, Op, Zero);
22690     }
22691 
22692     return DAG.getNode(X86ISD::RSQRT14, DL, VT, Op);
22693   }
22694   return SDValue();
22695 }
22696 
22697 /// The minimum architected relative accuracy is 2^-12. We need one
22698 /// Newton-Raphson step to have a good float result (24 bits of precision).
getRecipEstimate(SDValue Op,SelectionDAG & DAG,int Enabled,int & RefinementSteps) const22699 SDValue X86TargetLowering::getRecipEstimate(SDValue Op, SelectionDAG &DAG,
22700                                             int Enabled,
22701                                             int &RefinementSteps) const {
22702   SDLoc DL(Op);
22703   EVT VT = Op.getValueType();
22704 
22705   // SSE1 has rcpss and rcpps. AVX adds a 256-bit variant for rcpps.
22706   // It is likely not profitable to do this for f64 because a double-precision
22707   // reciprocal estimate with refinement on x86 prior to FMA requires
22708   // 15 instructions: convert to single, rcpss, convert back to double, refine
22709   // (3 steps = 12 insts). If an 'rcpsd' variant was added to the ISA
22710   // along with FMA, this could be a throughput win.
22711 
22712   if ((VT == MVT::f32 && Subtarget.hasSSE1()) ||
22713       (VT == MVT::v4f32 && Subtarget.hasSSE1()) ||
22714       (VT == MVT::v8f32 && Subtarget.hasAVX()) ||
22715       (VT == MVT::v16f32 && Subtarget.useAVX512Regs())) {
22716     // Enable estimate codegen with 1 refinement step for vector division.
22717     // Scalar division estimates are disabled because they break too much
22718     // real-world code. These defaults are intended to match GCC behavior.
22719     if (VT == MVT::f32 && Enabled == ReciprocalEstimate::Unspecified)
22720       return SDValue();
22721 
22722     if (RefinementSteps == ReciprocalEstimate::Unspecified)
22723       RefinementSteps = 1;
22724 
22725     // There is no FSQRT for 512-bits, but there is RCP14.
22726     unsigned Opcode = VT == MVT::v16f32 ? X86ISD::RCP14 : X86ISD::FRCP;
22727     return DAG.getNode(Opcode, DL, VT, Op);
22728   }
22729 
22730   if (VT.getScalarType() == MVT::f16 && isTypeLegal(VT) &&
22731       Subtarget.hasFP16()) {
22732     if (RefinementSteps == ReciprocalEstimate::Unspecified)
22733       RefinementSteps = 0;
22734 
22735     if (VT == MVT::f16) {
22736       SDValue Zero = DAG.getIntPtrConstant(0, DL);
22737       SDValue Undef = DAG.getUNDEF(MVT::v8f16);
22738       Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v8f16, Op);
22739       Op = DAG.getNode(X86ISD::RCP14S, DL, MVT::v8f16, Undef, Op);
22740       return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f16, Op, Zero);
22741     }
22742 
22743     return DAG.getNode(X86ISD::RCP14, DL, VT, Op);
22744   }
22745   return SDValue();
22746 }
22747 
22748 /// If we have at least two divisions that use the same divisor, convert to
22749 /// multiplication by a reciprocal. This may need to be adjusted for a given
22750 /// CPU if a division's cost is not at least twice the cost of a multiplication.
22751 /// This is because we still need one division to calculate the reciprocal and
22752 /// then we need two multiplies by that reciprocal as replacements for the
22753 /// original divisions.
combineRepeatedFPDivisors() const22754 unsigned X86TargetLowering::combineRepeatedFPDivisors() const {
22755   return 2;
22756 }
22757 
22758 SDValue
BuildSDIVPow2(SDNode * N,const APInt & Divisor,SelectionDAG & DAG,SmallVectorImpl<SDNode * > & Created) const22759 X86TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
22760                                  SelectionDAG &DAG,
22761                                  SmallVectorImpl<SDNode *> &Created) const {
22762   AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
22763   if (isIntDivCheap(N->getValueType(0), Attr))
22764     return SDValue(N,0); // Lower SDIV as SDIV
22765 
22766   assert((Divisor.isPowerOf2() || Divisor.isNegatedPowerOf2()) &&
22767          "Unexpected divisor!");
22768 
22769   // Only perform this transform if CMOV is supported otherwise the select
22770   // below will become a branch.
22771   if (!Subtarget.canUseCMOV())
22772     return SDValue();
22773 
22774   // fold (sdiv X, pow2)
22775   EVT VT = N->getValueType(0);
22776   // FIXME: Support i8.
22777   if (VT != MVT::i16 && VT != MVT::i32 &&
22778       !(Subtarget.is64Bit() && VT == MVT::i64))
22779     return SDValue();
22780 
22781   // If the divisor is 2 or -2, the default expansion is better.
22782   if (Divisor == 2 ||
22783       Divisor == APInt(Divisor.getBitWidth(), -2, /*isSigned*/ true))
22784     return SDValue();
22785 
22786   return TargetLowering::buildSDIVPow2WithCMov(N, Divisor, DAG, Created);
22787 }
22788 
22789 /// Result of 'and' is compared against zero. Change to a BT node if possible.
22790 /// Returns the BT node and the condition code needed to use it.
LowerAndToBT(SDValue And,ISD::CondCode CC,const SDLoc & dl,SelectionDAG & DAG,X86::CondCode & X86CC)22791 static SDValue LowerAndToBT(SDValue And, ISD::CondCode CC, const SDLoc &dl,
22792                             SelectionDAG &DAG, X86::CondCode &X86CC) {
22793   assert(And.getOpcode() == ISD::AND && "Expected AND node!");
22794   SDValue Op0 = And.getOperand(0);
22795   SDValue Op1 = And.getOperand(1);
22796   if (Op0.getOpcode() == ISD::TRUNCATE)
22797     Op0 = Op0.getOperand(0);
22798   if (Op1.getOpcode() == ISD::TRUNCATE)
22799     Op1 = Op1.getOperand(0);
22800 
22801   SDValue Src, BitNo;
22802   if (Op1.getOpcode() == ISD::SHL)
22803     std::swap(Op0, Op1);
22804   if (Op0.getOpcode() == ISD::SHL) {
22805     if (isOneConstant(Op0.getOperand(0))) {
22806       // If we looked past a truncate, check that it's only truncating away
22807       // known zeros.
22808       unsigned BitWidth = Op0.getValueSizeInBits();
22809       unsigned AndBitWidth = And.getValueSizeInBits();
22810       if (BitWidth > AndBitWidth) {
22811         KnownBits Known = DAG.computeKnownBits(Op0);
22812         if (Known.countMinLeadingZeros() < BitWidth - AndBitWidth)
22813           return SDValue();
22814       }
22815       Src = Op1;
22816       BitNo = Op0.getOperand(1);
22817     }
22818   } else if (Op1.getOpcode() == ISD::Constant) {
22819     ConstantSDNode *AndRHS = cast<ConstantSDNode>(Op1);
22820     uint64_t AndRHSVal = AndRHS->getZExtValue();
22821     SDValue AndLHS = Op0;
22822 
22823     if (AndRHSVal == 1 && AndLHS.getOpcode() == ISD::SRL) {
22824       Src = AndLHS.getOperand(0);
22825       BitNo = AndLHS.getOperand(1);
22826     } else {
22827       // Use BT if the immediate can't be encoded in a TEST instruction or we
22828       // are optimizing for size and the immedaite won't fit in a byte.
22829       bool OptForSize = DAG.shouldOptForSize();
22830       if ((!isUInt<32>(AndRHSVal) || (OptForSize && !isUInt<8>(AndRHSVal))) &&
22831           isPowerOf2_64(AndRHSVal)) {
22832         Src = AndLHS;
22833         BitNo = DAG.getConstant(Log2_64_Ceil(AndRHSVal), dl,
22834                                 Src.getValueType());
22835       }
22836     }
22837   }
22838 
22839   // No patterns found, give up.
22840   if (!Src.getNode())
22841     return SDValue();
22842 
22843   // Remove any bit flip.
22844   if (isBitwiseNot(Src)) {
22845     Src = Src.getOperand(0);
22846     CC = CC == ISD::SETEQ ? ISD::SETNE : ISD::SETEQ;
22847   }
22848 
22849   // Attempt to create the X86ISD::BT node.
22850   if (SDValue BT = getBT(Src, BitNo, dl, DAG)) {
22851     X86CC = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B;
22852     return BT;
22853   }
22854 
22855   return SDValue();
22856 }
22857 
22858 // Check if pre-AVX condcode can be performed by a single FCMP op.
cheapX86FSETCC_SSE(ISD::CondCode SetCCOpcode)22859 static bool cheapX86FSETCC_SSE(ISD::CondCode SetCCOpcode) {
22860   return (SetCCOpcode != ISD::SETONE) && (SetCCOpcode != ISD::SETUEQ);
22861 }
22862 
22863 /// Turns an ISD::CondCode into a value suitable for SSE floating-point mask
22864 /// CMPs.
translateX86FSETCC(ISD::CondCode SetCCOpcode,SDValue & Op0,SDValue & Op1,bool & IsAlwaysSignaling)22865 static unsigned translateX86FSETCC(ISD::CondCode SetCCOpcode, SDValue &Op0,
22866                                    SDValue &Op1, bool &IsAlwaysSignaling) {
22867   unsigned SSECC;
22868   bool Swap = false;
22869 
22870   // SSE Condition code mapping:
22871   //  0 - EQ
22872   //  1 - LT
22873   //  2 - LE
22874   //  3 - UNORD
22875   //  4 - NEQ
22876   //  5 - NLT
22877   //  6 - NLE
22878   //  7 - ORD
22879   switch (SetCCOpcode) {
22880   default: llvm_unreachable("Unexpected SETCC condition");
22881   case ISD::SETOEQ:
22882   case ISD::SETEQ:  SSECC = 0; break;
22883   case ISD::SETOGT:
22884   case ISD::SETGT:  Swap = true; [[fallthrough]];
22885   case ISD::SETLT:
22886   case ISD::SETOLT: SSECC = 1; break;
22887   case ISD::SETOGE:
22888   case ISD::SETGE:  Swap = true; [[fallthrough]];
22889   case ISD::SETLE:
22890   case ISD::SETOLE: SSECC = 2; break;
22891   case ISD::SETUO:  SSECC = 3; break;
22892   case ISD::SETUNE:
22893   case ISD::SETNE:  SSECC = 4; break;
22894   case ISD::SETULE: Swap = true; [[fallthrough]];
22895   case ISD::SETUGE: SSECC = 5; break;
22896   case ISD::SETULT: Swap = true; [[fallthrough]];
22897   case ISD::SETUGT: SSECC = 6; break;
22898   case ISD::SETO:   SSECC = 7; break;
22899   case ISD::SETUEQ: SSECC = 8; break;
22900   case ISD::SETONE: SSECC = 12; break;
22901   }
22902   if (Swap)
22903     std::swap(Op0, Op1);
22904 
22905   switch (SetCCOpcode) {
22906   default:
22907     IsAlwaysSignaling = true;
22908     break;
22909   case ISD::SETEQ:
22910   case ISD::SETOEQ:
22911   case ISD::SETUEQ:
22912   case ISD::SETNE:
22913   case ISD::SETONE:
22914   case ISD::SETUNE:
22915   case ISD::SETO:
22916   case ISD::SETUO:
22917     IsAlwaysSignaling = false;
22918     break;
22919   }
22920 
22921   return SSECC;
22922 }
22923 
22924 /// Break a VSETCC 256-bit integer VSETCC into two new 128 ones and then
22925 /// concatenate the result back.
splitIntVSETCC(EVT VT,SDValue LHS,SDValue RHS,ISD::CondCode Cond,SelectionDAG & DAG,const SDLoc & dl)22926 static SDValue splitIntVSETCC(EVT VT, SDValue LHS, SDValue RHS,
22927                               ISD::CondCode Cond, SelectionDAG &DAG,
22928                               const SDLoc &dl) {
22929   assert(VT.isInteger() && VT == LHS.getValueType() &&
22930          VT == RHS.getValueType() && "Unsupported VTs!");
22931 
22932   SDValue CC = DAG.getCondCode(Cond);
22933 
22934   // Extract the LHS Lo/Hi vectors
22935   SDValue LHS1, LHS2;
22936   std::tie(LHS1, LHS2) = splitVector(LHS, DAG, dl);
22937 
22938   // Extract the RHS Lo/Hi vectors
22939   SDValue RHS1, RHS2;
22940   std::tie(RHS1, RHS2) = splitVector(RHS, DAG, dl);
22941 
22942   // Issue the operation on the smaller types and concatenate the result back
22943   EVT LoVT, HiVT;
22944   std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
22945   return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
22946                      DAG.getNode(ISD::SETCC, dl, LoVT, LHS1, RHS1, CC),
22947                      DAG.getNode(ISD::SETCC, dl, HiVT, LHS2, RHS2, CC));
22948 }
22949 
LowerIntVSETCC_AVX512(SDValue Op,SelectionDAG & DAG)22950 static SDValue LowerIntVSETCC_AVX512(SDValue Op, SelectionDAG &DAG) {
22951 
22952   SDValue Op0 = Op.getOperand(0);
22953   SDValue Op1 = Op.getOperand(1);
22954   SDValue CC = Op.getOperand(2);
22955   MVT VT = Op.getSimpleValueType();
22956   SDLoc dl(Op);
22957 
22958   assert(VT.getVectorElementType() == MVT::i1 &&
22959          "Cannot set masked compare for this operation");
22960 
22961   ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
22962 
22963   // Prefer SETGT over SETLT.
22964   if (SetCCOpcode == ISD::SETLT) {
22965     SetCCOpcode = ISD::getSetCCSwappedOperands(SetCCOpcode);
22966     std::swap(Op0, Op1);
22967   }
22968 
22969   return DAG.getSetCC(dl, VT, Op0, Op1, SetCCOpcode);
22970 }
22971 
22972 /// Given a buildvector constant, return a new vector constant with each element
22973 /// incremented or decremented. If incrementing or decrementing would result in
22974 /// unsigned overflow or underflow or this is not a simple vector constant,
22975 /// return an empty value.
incDecVectorConstant(SDValue V,SelectionDAG & DAG,bool IsInc,bool NSW)22976 static SDValue incDecVectorConstant(SDValue V, SelectionDAG &DAG, bool IsInc,
22977                                     bool NSW) {
22978   auto *BV = dyn_cast<BuildVectorSDNode>(V.getNode());
22979   if (!BV || !V.getValueType().isSimple())
22980     return SDValue();
22981 
22982   MVT VT = V.getSimpleValueType();
22983   MVT EltVT = VT.getVectorElementType();
22984   unsigned NumElts = VT.getVectorNumElements();
22985   SmallVector<SDValue, 8> NewVecC;
22986   SDLoc DL(V);
22987   for (unsigned i = 0; i < NumElts; ++i) {
22988     auto *Elt = dyn_cast<ConstantSDNode>(BV->getOperand(i));
22989     if (!Elt || Elt->isOpaque() || Elt->getSimpleValueType(0) != EltVT)
22990       return SDValue();
22991 
22992     // Avoid overflow/underflow.
22993     const APInt &EltC = Elt->getAPIntValue();
22994     if ((IsInc && EltC.isMaxValue()) || (!IsInc && EltC.isZero()))
22995       return SDValue();
22996     if (NSW && ((IsInc && EltC.isMaxSignedValue()) ||
22997                 (!IsInc && EltC.isMinSignedValue())))
22998       return SDValue();
22999 
23000     NewVecC.push_back(DAG.getConstant(EltC + (IsInc ? 1 : -1), DL, EltVT));
23001   }
23002 
23003   return DAG.getBuildVector(VT, DL, NewVecC);
23004 }
23005 
23006 /// As another special case, use PSUBUS[BW] when it's profitable. E.g. for
23007 /// Op0 u<= Op1:
23008 ///   t = psubus Op0, Op1
23009 ///   pcmpeq t, <0..0>
LowerVSETCCWithSUBUS(SDValue Op0,SDValue Op1,MVT VT,ISD::CondCode Cond,const SDLoc & dl,const X86Subtarget & Subtarget,SelectionDAG & DAG)23010 static SDValue LowerVSETCCWithSUBUS(SDValue Op0, SDValue Op1, MVT VT,
23011                                     ISD::CondCode Cond, const SDLoc &dl,
23012                                     const X86Subtarget &Subtarget,
23013                                     SelectionDAG &DAG) {
23014   if (!Subtarget.hasSSE2())
23015     return SDValue();
23016 
23017   MVT VET = VT.getVectorElementType();
23018   if (VET != MVT::i8 && VET != MVT::i16)
23019     return SDValue();
23020 
23021   switch (Cond) {
23022   default:
23023     return SDValue();
23024   case ISD::SETULT: {
23025     // If the comparison is against a constant we can turn this into a
23026     // setule.  With psubus, setule does not require a swap.  This is
23027     // beneficial because the constant in the register is no longer
23028     // destructed as the destination so it can be hoisted out of a loop.
23029     // Only do this pre-AVX since vpcmp* is no longer destructive.
23030     if (Subtarget.hasAVX())
23031       return SDValue();
23032     SDValue ULEOp1 =
23033         incDecVectorConstant(Op1, DAG, /*IsInc*/ false, /*NSW*/ false);
23034     if (!ULEOp1)
23035       return SDValue();
23036     Op1 = ULEOp1;
23037     break;
23038   }
23039   case ISD::SETUGT: {
23040     // If the comparison is against a constant, we can turn this into a setuge.
23041     // This is beneficial because materializing a constant 0 for the PCMPEQ is
23042     // probably cheaper than XOR+PCMPGT using 2 different vector constants:
23043     // cmpgt (xor X, SignMaskC) CmpC --> cmpeq (usubsat (CmpC+1), X), 0
23044     SDValue UGEOp1 =
23045         incDecVectorConstant(Op1, DAG, /*IsInc*/ true, /*NSW*/ false);
23046     if (!UGEOp1)
23047       return SDValue();
23048     Op1 = Op0;
23049     Op0 = UGEOp1;
23050     break;
23051   }
23052   // Psubus is better than flip-sign because it requires no inversion.
23053   case ISD::SETUGE:
23054     std::swap(Op0, Op1);
23055     break;
23056   case ISD::SETULE:
23057     break;
23058   }
23059 
23060   SDValue Result = DAG.getNode(ISD::USUBSAT, dl, VT, Op0, Op1);
23061   return DAG.getNode(X86ISD::PCMPEQ, dl, VT, Result,
23062                      DAG.getConstant(0, dl, VT));
23063 }
23064 
LowerVSETCC(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)23065 static SDValue LowerVSETCC(SDValue Op, const X86Subtarget &Subtarget,
23066                            SelectionDAG &DAG) {
23067   bool IsStrict = Op.getOpcode() == ISD::STRICT_FSETCC ||
23068                   Op.getOpcode() == ISD::STRICT_FSETCCS;
23069   SDValue Op0 = Op.getOperand(IsStrict ? 1 : 0);
23070   SDValue Op1 = Op.getOperand(IsStrict ? 2 : 1);
23071   SDValue CC = Op.getOperand(IsStrict ? 3 : 2);
23072   MVT VT = Op->getSimpleValueType(0);
23073   ISD::CondCode Cond = cast<CondCodeSDNode>(CC)->get();
23074   bool isFP = Op1.getSimpleValueType().isFloatingPoint();
23075   SDLoc dl(Op);
23076 
23077   if (isFP) {
23078     MVT EltVT = Op0.getSimpleValueType().getVectorElementType();
23079     assert(EltVT == MVT::f16 || EltVT == MVT::f32 || EltVT == MVT::f64);
23080     if (isSoftF16(EltVT, Subtarget))
23081       return SDValue();
23082 
23083     bool IsSignaling = Op.getOpcode() == ISD::STRICT_FSETCCS;
23084     SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
23085 
23086     // If we have a strict compare with a vXi1 result and the input is 128/256
23087     // bits we can't use a masked compare unless we have VLX. If we use a wider
23088     // compare like we do for non-strict, we might trigger spurious exceptions
23089     // from the upper elements. Instead emit a AVX compare and convert to mask.
23090     unsigned Opc;
23091     if (Subtarget.hasAVX512() && VT.getVectorElementType() == MVT::i1 &&
23092         (!IsStrict || Subtarget.hasVLX() ||
23093          Op0.getSimpleValueType().is512BitVector())) {
23094 #ifndef NDEBUG
23095       unsigned Num = VT.getVectorNumElements();
23096       assert(Num <= 16 || (Num == 32 && EltVT == MVT::f16));
23097 #endif
23098       Opc = IsStrict ? X86ISD::STRICT_CMPM : X86ISD::CMPM;
23099     } else {
23100       Opc = IsStrict ? X86ISD::STRICT_CMPP : X86ISD::CMPP;
23101       // The SSE/AVX packed FP comparison nodes are defined with a
23102       // floating-point vector result that matches the operand type. This allows
23103       // them to work with an SSE1 target (integer vector types are not legal).
23104       VT = Op0.getSimpleValueType();
23105     }
23106 
23107     SDValue Cmp;
23108     bool IsAlwaysSignaling;
23109     unsigned SSECC = translateX86FSETCC(Cond, Op0, Op1, IsAlwaysSignaling);
23110     if (!Subtarget.hasAVX()) {
23111       // TODO: We could use following steps to handle a quiet compare with
23112       // signaling encodings.
23113       // 1. Get ordered masks from a quiet ISD::SETO
23114       // 2. Use the masks to mask potential unordered elements in operand A, B
23115       // 3. Get the compare results of masked A, B
23116       // 4. Calculating final result using the mask and result from 3
23117       // But currently, we just fall back to scalar operations.
23118       if (IsStrict && IsAlwaysSignaling && !IsSignaling)
23119         return SDValue();
23120 
23121       // Insert an extra signaling instruction to raise exception.
23122       if (IsStrict && !IsAlwaysSignaling && IsSignaling) {
23123         SDValue SignalCmp = DAG.getNode(
23124             Opc, dl, {VT, MVT::Other},
23125             {Chain, Op0, Op1, DAG.getTargetConstant(1, dl, MVT::i8)}); // LT_OS
23126         // FIXME: It seems we need to update the flags of all new strict nodes.
23127         // Otherwise, mayRaiseFPException in MI will return false due to
23128         // NoFPExcept = false by default. However, I didn't find it in other
23129         // patches.
23130         SignalCmp->setFlags(Op->getFlags());
23131         Chain = SignalCmp.getValue(1);
23132       }
23133 
23134       // In the two cases not handled by SSE compare predicates (SETUEQ/SETONE),
23135       // emit two comparisons and a logic op to tie them together.
23136       if (!cheapX86FSETCC_SSE(Cond)) {
23137         // LLVM predicate is SETUEQ or SETONE.
23138         unsigned CC0, CC1;
23139         unsigned CombineOpc;
23140         if (Cond == ISD::SETUEQ) {
23141           CC0 = 3; // UNORD
23142           CC1 = 0; // EQ
23143           CombineOpc = X86ISD::FOR;
23144         } else {
23145           assert(Cond == ISD::SETONE);
23146           CC0 = 7; // ORD
23147           CC1 = 4; // NEQ
23148           CombineOpc = X86ISD::FAND;
23149         }
23150 
23151         SDValue Cmp0, Cmp1;
23152         if (IsStrict) {
23153           Cmp0 = DAG.getNode(
23154               Opc, dl, {VT, MVT::Other},
23155               {Chain, Op0, Op1, DAG.getTargetConstant(CC0, dl, MVT::i8)});
23156           Cmp1 = DAG.getNode(
23157               Opc, dl, {VT, MVT::Other},
23158               {Chain, Op0, Op1, DAG.getTargetConstant(CC1, dl, MVT::i8)});
23159           Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Cmp0.getValue(1),
23160                               Cmp1.getValue(1));
23161         } else {
23162           Cmp0 = DAG.getNode(
23163               Opc, dl, VT, Op0, Op1, DAG.getTargetConstant(CC0, dl, MVT::i8));
23164           Cmp1 = DAG.getNode(
23165               Opc, dl, VT, Op0, Op1, DAG.getTargetConstant(CC1, dl, MVT::i8));
23166         }
23167         Cmp = DAG.getNode(CombineOpc, dl, VT, Cmp0, Cmp1);
23168       } else {
23169         if (IsStrict) {
23170           Cmp = DAG.getNode(
23171               Opc, dl, {VT, MVT::Other},
23172               {Chain, Op0, Op1, DAG.getTargetConstant(SSECC, dl, MVT::i8)});
23173           Chain = Cmp.getValue(1);
23174         } else
23175           Cmp = DAG.getNode(
23176               Opc, dl, VT, Op0, Op1, DAG.getTargetConstant(SSECC, dl, MVT::i8));
23177       }
23178     } else {
23179       // Handle all other FP comparisons here.
23180       if (IsStrict) {
23181         // Make a flip on already signaling CCs before setting bit 4 of AVX CC.
23182         SSECC |= (IsAlwaysSignaling ^ IsSignaling) << 4;
23183         Cmp = DAG.getNode(
23184             Opc, dl, {VT, MVT::Other},
23185             {Chain, Op0, Op1, DAG.getTargetConstant(SSECC, dl, MVT::i8)});
23186         Chain = Cmp.getValue(1);
23187       } else
23188         Cmp = DAG.getNode(
23189             Opc, dl, VT, Op0, Op1, DAG.getTargetConstant(SSECC, dl, MVT::i8));
23190     }
23191 
23192     if (VT.getFixedSizeInBits() >
23193         Op.getSimpleValueType().getFixedSizeInBits()) {
23194       // We emitted a compare with an XMM/YMM result. Finish converting to a
23195       // mask register using a vptestm.
23196       EVT CastVT = EVT(VT).changeVectorElementTypeToInteger();
23197       Cmp = DAG.getBitcast(CastVT, Cmp);
23198       Cmp = DAG.getSetCC(dl, Op.getSimpleValueType(), Cmp,
23199                          DAG.getConstant(0, dl, CastVT), ISD::SETNE);
23200     } else {
23201       // If this is SSE/AVX CMPP, bitcast the result back to integer to match
23202       // the result type of SETCC. The bitcast is expected to be optimized
23203       // away during combining/isel.
23204       Cmp = DAG.getBitcast(Op.getSimpleValueType(), Cmp);
23205     }
23206 
23207     if (IsStrict)
23208       return DAG.getMergeValues({Cmp, Chain}, dl);
23209 
23210     return Cmp;
23211   }
23212 
23213   assert(!IsStrict && "Strict SETCC only handles FP operands.");
23214 
23215   MVT VTOp0 = Op0.getSimpleValueType();
23216   (void)VTOp0;
23217   assert(VTOp0 == Op1.getSimpleValueType() &&
23218          "Expected operands with same type!");
23219   assert(VT.getVectorNumElements() == VTOp0.getVectorNumElements() &&
23220          "Invalid number of packed elements for source and destination!");
23221 
23222   // The non-AVX512 code below works under the assumption that source and
23223   // destination types are the same.
23224   assert((Subtarget.hasAVX512() || (VT == VTOp0)) &&
23225          "Value types for source and destination must be the same!");
23226 
23227   // The result is boolean, but operands are int/float
23228   if (VT.getVectorElementType() == MVT::i1) {
23229     // In AVX-512 architecture setcc returns mask with i1 elements,
23230     // But there is no compare instruction for i8 and i16 elements in KNL.
23231     assert((VTOp0.getScalarSizeInBits() >= 32 || Subtarget.hasBWI()) &&
23232            "Unexpected operand type");
23233     return LowerIntVSETCC_AVX512(Op, DAG);
23234   }
23235 
23236   // Lower using XOP integer comparisons.
23237   if (VT.is128BitVector() && Subtarget.hasXOP()) {
23238     // Translate compare code to XOP PCOM compare mode.
23239     unsigned CmpMode = 0;
23240     switch (Cond) {
23241     default: llvm_unreachable("Unexpected SETCC condition");
23242     case ISD::SETULT:
23243     case ISD::SETLT: CmpMode = 0x00; break;
23244     case ISD::SETULE:
23245     case ISD::SETLE: CmpMode = 0x01; break;
23246     case ISD::SETUGT:
23247     case ISD::SETGT: CmpMode = 0x02; break;
23248     case ISD::SETUGE:
23249     case ISD::SETGE: CmpMode = 0x03; break;
23250     case ISD::SETEQ: CmpMode = 0x04; break;
23251     case ISD::SETNE: CmpMode = 0x05; break;
23252     }
23253 
23254     // Are we comparing unsigned or signed integers?
23255     unsigned Opc =
23256         ISD::isUnsignedIntSetCC(Cond) ? X86ISD::VPCOMU : X86ISD::VPCOM;
23257 
23258     return DAG.getNode(Opc, dl, VT, Op0, Op1,
23259                        DAG.getTargetConstant(CmpMode, dl, MVT::i8));
23260   }
23261 
23262   // (X & Y) != 0 --> (X & Y) == Y iff Y is power-of-2.
23263   // Revert part of the simplifySetCCWithAnd combine, to avoid an invert.
23264   if (Cond == ISD::SETNE && ISD::isBuildVectorAllZeros(Op1.getNode())) {
23265     SDValue BC0 = peekThroughBitcasts(Op0);
23266     if (BC0.getOpcode() == ISD::AND) {
23267       APInt UndefElts;
23268       SmallVector<APInt, 64> EltBits;
23269       if (getTargetConstantBitsFromNode(BC0.getOperand(1),
23270                                         VT.getScalarSizeInBits(), UndefElts,
23271                                         EltBits, false, false)) {
23272         if (llvm::all_of(EltBits, [](APInt &V) { return V.isPowerOf2(); })) {
23273           Cond = ISD::SETEQ;
23274           Op1 = DAG.getBitcast(VT, BC0.getOperand(1));
23275         }
23276       }
23277     }
23278   }
23279 
23280   // ICMP_EQ(AND(X,C),C) -> SRA(SHL(X,LOG2(C)),BW-1) iff C is power-of-2.
23281   if (Cond == ISD::SETEQ && Op0.getOpcode() == ISD::AND &&
23282       Op0.getOperand(1) == Op1 && Op0.hasOneUse()) {
23283     ConstantSDNode *C1 = isConstOrConstSplat(Op1);
23284     if (C1 && C1->getAPIntValue().isPowerOf2()) {
23285       unsigned BitWidth = VT.getScalarSizeInBits();
23286       unsigned ShiftAmt = BitWidth - C1->getAPIntValue().logBase2() - 1;
23287 
23288       SDValue Result = Op0.getOperand(0);
23289       Result = DAG.getNode(ISD::SHL, dl, VT, Result,
23290                            DAG.getConstant(ShiftAmt, dl, VT));
23291       Result = DAG.getNode(ISD::SRA, dl, VT, Result,
23292                            DAG.getConstant(BitWidth - 1, dl, VT));
23293       return Result;
23294     }
23295   }
23296 
23297   // Break 256-bit integer vector compare into smaller ones.
23298   if (VT.is256BitVector() && !Subtarget.hasInt256())
23299     return splitIntVSETCC(VT, Op0, Op1, Cond, DAG, dl);
23300 
23301   // Break 512-bit integer vector compare into smaller ones.
23302   // TODO: Try harder to use VPCMPx + VPMOV2x?
23303   if (VT.is512BitVector())
23304     return splitIntVSETCC(VT, Op0, Op1, Cond, DAG, dl);
23305 
23306   // If we have a limit constant, try to form PCMPGT (signed cmp) to avoid
23307   // not-of-PCMPEQ:
23308   // X != INT_MIN --> X >s INT_MIN
23309   // X != INT_MAX --> X <s INT_MAX --> INT_MAX >s X
23310   // +X != 0 --> +X >s 0
23311   APInt ConstValue;
23312   if (Cond == ISD::SETNE &&
23313       ISD::isConstantSplatVector(Op1.getNode(), ConstValue)) {
23314     if (ConstValue.isMinSignedValue())
23315       Cond = ISD::SETGT;
23316     else if (ConstValue.isMaxSignedValue())
23317       Cond = ISD::SETLT;
23318     else if (ConstValue.isZero() && DAG.SignBitIsZero(Op0))
23319       Cond = ISD::SETGT;
23320   }
23321 
23322   // If both operands are known non-negative, then an unsigned compare is the
23323   // same as a signed compare and there's no need to flip signbits.
23324   // TODO: We could check for more general simplifications here since we're
23325   // computing known bits.
23326   bool FlipSigns = ISD::isUnsignedIntSetCC(Cond) &&
23327                    !(DAG.SignBitIsZero(Op0) && DAG.SignBitIsZero(Op1));
23328 
23329   // Special case: Use min/max operations for unsigned compares.
23330   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23331   if (ISD::isUnsignedIntSetCC(Cond) &&
23332       (FlipSigns || ISD::isTrueWhenEqual(Cond)) &&
23333       TLI.isOperationLegal(ISD::UMIN, VT)) {
23334     // If we have a constant operand, increment/decrement it and change the
23335     // condition to avoid an invert.
23336     if (Cond == ISD::SETUGT) {
23337       // X > C --> X >= (C+1) --> X == umax(X, C+1)
23338       if (SDValue UGTOp1 =
23339               incDecVectorConstant(Op1, DAG, /*IsInc*/ true, /*NSW*/ false)) {
23340         Op1 = UGTOp1;
23341         Cond = ISD::SETUGE;
23342       }
23343     }
23344     if (Cond == ISD::SETULT) {
23345       // X < C --> X <= (C-1) --> X == umin(X, C-1)
23346       if (SDValue ULTOp1 =
23347               incDecVectorConstant(Op1, DAG, /*IsInc*/ false, /*NSW*/ false)) {
23348         Op1 = ULTOp1;
23349         Cond = ISD::SETULE;
23350       }
23351     }
23352     bool Invert = false;
23353     unsigned Opc;
23354     switch (Cond) {
23355     default: llvm_unreachable("Unexpected condition code");
23356     case ISD::SETUGT: Invert = true; [[fallthrough]];
23357     case ISD::SETULE: Opc = ISD::UMIN; break;
23358     case ISD::SETULT: Invert = true; [[fallthrough]];
23359     case ISD::SETUGE: Opc = ISD::UMAX; break;
23360     }
23361 
23362     SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
23363     Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Op0, Result);
23364 
23365     // If the logical-not of the result is required, perform that now.
23366     if (Invert)
23367       Result = DAG.getNOT(dl, Result, VT);
23368 
23369     return Result;
23370   }
23371 
23372   // Try to use SUBUS and PCMPEQ.
23373   if (FlipSigns)
23374     if (SDValue V =
23375             LowerVSETCCWithSUBUS(Op0, Op1, VT, Cond, dl, Subtarget, DAG))
23376       return V;
23377 
23378   // We are handling one of the integer comparisons here. Since SSE only has
23379   // GT and EQ comparisons for integer, swapping operands and multiple
23380   // operations may be required for some comparisons.
23381   unsigned Opc = (Cond == ISD::SETEQ || Cond == ISD::SETNE) ? X86ISD::PCMPEQ
23382                                                             : X86ISD::PCMPGT;
23383   bool Swap = Cond == ISD::SETLT || Cond == ISD::SETULT ||
23384               Cond == ISD::SETGE || Cond == ISD::SETUGE;
23385   bool Invert = Cond == ISD::SETNE ||
23386                 (Cond != ISD::SETEQ && ISD::isTrueWhenEqual(Cond));
23387 
23388   if (Swap)
23389     std::swap(Op0, Op1);
23390 
23391   // Check that the operation in question is available (most are plain SSE2,
23392   // but PCMPGTQ and PCMPEQQ have different requirements).
23393   if (VT == MVT::v2i64) {
23394     if (Opc == X86ISD::PCMPGT && !Subtarget.hasSSE42()) {
23395       assert(Subtarget.hasSSE2() && "Don't know how to lower!");
23396 
23397       // Special case for sign bit test. We can use a v4i32 PCMPGT and shuffle
23398       // the odd elements over the even elements.
23399       if (!FlipSigns && !Invert && ISD::isBuildVectorAllZeros(Op0.getNode())) {
23400         Op0 = DAG.getConstant(0, dl, MVT::v4i32);
23401         Op1 = DAG.getBitcast(MVT::v4i32, Op1);
23402 
23403         SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
23404         static const int MaskHi[] = { 1, 1, 3, 3 };
23405         SDValue Result = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);
23406 
23407         return DAG.getBitcast(VT, Result);
23408       }
23409 
23410       if (!FlipSigns && !Invert && ISD::isBuildVectorAllOnes(Op1.getNode())) {
23411         Op0 = DAG.getBitcast(MVT::v4i32, Op0);
23412         Op1 = DAG.getConstant(-1, dl, MVT::v4i32);
23413 
23414         SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
23415         static const int MaskHi[] = { 1, 1, 3, 3 };
23416         SDValue Result = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);
23417 
23418         return DAG.getBitcast(VT, Result);
23419       }
23420 
23421       // Since SSE has no unsigned integer comparisons, we need to flip the sign
23422       // bits of the inputs before performing those operations. The lower
23423       // compare is always unsigned.
23424       SDValue SB = DAG.getConstant(FlipSigns ? 0x8000000080000000ULL
23425                                              : 0x0000000080000000ULL,
23426                                    dl, MVT::v2i64);
23427 
23428       Op0 = DAG.getNode(ISD::XOR, dl, MVT::v2i64, Op0, SB);
23429       Op1 = DAG.getNode(ISD::XOR, dl, MVT::v2i64, Op1, SB);
23430 
23431       // Cast everything to the right type.
23432       Op0 = DAG.getBitcast(MVT::v4i32, Op0);
23433       Op1 = DAG.getBitcast(MVT::v4i32, Op1);
23434 
23435       // Emulate PCMPGTQ with (hi1 > hi2) | ((hi1 == hi2) & (lo1 > lo2))
23436       SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
23437       SDValue EQ = DAG.getNode(X86ISD::PCMPEQ, dl, MVT::v4i32, Op0, Op1);
23438 
23439       // Create masks for only the low parts/high parts of the 64 bit integers.
23440       static const int MaskHi[] = { 1, 1, 3, 3 };
23441       static const int MaskLo[] = { 0, 0, 2, 2 };
23442       SDValue EQHi = DAG.getVectorShuffle(MVT::v4i32, dl, EQ, EQ, MaskHi);
23443       SDValue GTLo = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskLo);
23444       SDValue GTHi = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);
23445 
23446       SDValue Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, EQHi, GTLo);
23447       Result = DAG.getNode(ISD::OR, dl, MVT::v4i32, Result, GTHi);
23448 
23449       if (Invert)
23450         Result = DAG.getNOT(dl, Result, MVT::v4i32);
23451 
23452       return DAG.getBitcast(VT, Result);
23453     }
23454 
23455     if (Opc == X86ISD::PCMPEQ && !Subtarget.hasSSE41()) {
23456       // If pcmpeqq is missing but pcmpeqd is available synthesize pcmpeqq with
23457       // pcmpeqd + pshufd + pand.
23458       assert(Subtarget.hasSSE2() && !FlipSigns && "Don't know how to lower!");
23459 
23460       // First cast everything to the right type.
23461       Op0 = DAG.getBitcast(MVT::v4i32, Op0);
23462       Op1 = DAG.getBitcast(MVT::v4i32, Op1);
23463 
23464       // Do the compare.
23465       SDValue Result = DAG.getNode(Opc, dl, MVT::v4i32, Op0, Op1);
23466 
23467       // Make sure the lower and upper halves are both all-ones.
23468       static const int Mask[] = { 1, 0, 3, 2 };
23469       SDValue Shuf = DAG.getVectorShuffle(MVT::v4i32, dl, Result, Result, Mask);
23470       Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, Result, Shuf);
23471 
23472       if (Invert)
23473         Result = DAG.getNOT(dl, Result, MVT::v4i32);
23474 
23475       return DAG.getBitcast(VT, Result);
23476     }
23477   }
23478 
23479   // Since SSE has no unsigned integer comparisons, we need to flip the sign
23480   // bits of the inputs before performing those operations.
23481   if (FlipSigns) {
23482     MVT EltVT = VT.getVectorElementType();
23483     SDValue SM = DAG.getConstant(APInt::getSignMask(EltVT.getSizeInBits()), dl,
23484                                  VT);
23485     Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SM);
23486     Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SM);
23487   }
23488 
23489   SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
23490 
23491   // If the logical-not of the result is required, perform that now.
23492   if (Invert)
23493     Result = DAG.getNOT(dl, Result, VT);
23494 
23495   return Result;
23496 }
23497 
23498 // Try to select this as a KORTEST+SETCC or KTEST+SETCC if possible.
EmitAVX512Test(SDValue Op0,SDValue Op1,ISD::CondCode CC,const SDLoc & dl,SelectionDAG & DAG,const X86Subtarget & Subtarget,SDValue & X86CC)23499 static SDValue EmitAVX512Test(SDValue Op0, SDValue Op1, ISD::CondCode CC,
23500                               const SDLoc &dl, SelectionDAG &DAG,
23501                               const X86Subtarget &Subtarget,
23502                               SDValue &X86CC) {
23503   assert((CC == ISD::SETEQ || CC == ISD::SETNE) && "Unsupported ISD::CondCode");
23504 
23505   // Must be a bitcast from vXi1.
23506   if (Op0.getOpcode() != ISD::BITCAST)
23507     return SDValue();
23508 
23509   Op0 = Op0.getOperand(0);
23510   MVT VT = Op0.getSimpleValueType();
23511   if (!(Subtarget.hasAVX512() && VT == MVT::v16i1) &&
23512       !(Subtarget.hasDQI() && VT == MVT::v8i1) &&
23513       !(Subtarget.hasBWI() && (VT == MVT::v32i1 || VT == MVT::v64i1)))
23514     return SDValue();
23515 
23516   X86::CondCode X86Cond;
23517   if (isNullConstant(Op1)) {
23518     X86Cond = CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE;
23519   } else if (isAllOnesConstant(Op1)) {
23520     // C flag is set for all ones.
23521     X86Cond = CC == ISD::SETEQ ? X86::COND_B : X86::COND_AE;
23522   } else
23523     return SDValue();
23524 
23525   // If the input is an AND, we can combine it's operands into the KTEST.
23526   bool KTestable = false;
23527   if (Subtarget.hasDQI() && (VT == MVT::v8i1 || VT == MVT::v16i1))
23528     KTestable = true;
23529   if (Subtarget.hasBWI() && (VT == MVT::v32i1 || VT == MVT::v64i1))
23530     KTestable = true;
23531   if (!isNullConstant(Op1))
23532     KTestable = false;
23533   if (KTestable && Op0.getOpcode() == ISD::AND && Op0.hasOneUse()) {
23534     SDValue LHS = Op0.getOperand(0);
23535     SDValue RHS = Op0.getOperand(1);
23536     X86CC = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
23537     return DAG.getNode(X86ISD::KTEST, dl, MVT::i32, LHS, RHS);
23538   }
23539 
23540   // If the input is an OR, we can combine it's operands into the KORTEST.
23541   SDValue LHS = Op0;
23542   SDValue RHS = Op0;
23543   if (Op0.getOpcode() == ISD::OR && Op0.hasOneUse()) {
23544     LHS = Op0.getOperand(0);
23545     RHS = Op0.getOperand(1);
23546   }
23547 
23548   X86CC = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
23549   return DAG.getNode(X86ISD::KORTEST, dl, MVT::i32, LHS, RHS);
23550 }
23551 
23552 /// Emit flags for the given setcc condition and operands. Also returns the
23553 /// corresponding X86 condition code constant in X86CC.
emitFlagsForSetcc(SDValue Op0,SDValue Op1,ISD::CondCode CC,const SDLoc & dl,SelectionDAG & DAG,SDValue & X86CC) const23554 SDValue X86TargetLowering::emitFlagsForSetcc(SDValue Op0, SDValue Op1,
23555                                              ISD::CondCode CC, const SDLoc &dl,
23556                                              SelectionDAG &DAG,
23557                                              SDValue &X86CC) const {
23558   // Equality Combines.
23559   if (CC == ISD::SETEQ || CC == ISD::SETNE) {
23560     X86::CondCode X86CondCode;
23561 
23562     // Optimize to BT if possible.
23563     // Lower (X & (1 << N)) == 0 to BT(X, N).
23564     // Lower ((X >>u N) & 1) != 0 to BT(X, N).
23565     // Lower ((X >>s N) & 1) != 0 to BT(X, N).
23566     if (Op0.getOpcode() == ISD::AND && Op0.hasOneUse() && isNullConstant(Op1)) {
23567       if (SDValue BT = LowerAndToBT(Op0, CC, dl, DAG, X86CondCode)) {
23568         X86CC = DAG.getTargetConstant(X86CondCode, dl, MVT::i8);
23569         return BT;
23570       }
23571     }
23572 
23573     // Try to use PTEST/PMOVMSKB for a tree AND/ORs equality compared with -1/0.
23574     if (SDValue CmpZ = MatchVectorAllEqualTest(Op0, Op1, CC, dl, Subtarget, DAG,
23575                                                X86CondCode)) {
23576       X86CC = DAG.getTargetConstant(X86CondCode, dl, MVT::i8);
23577       return CmpZ;
23578     }
23579 
23580     // Try to lower using KORTEST or KTEST.
23581     if (SDValue Test = EmitAVX512Test(Op0, Op1, CC, dl, DAG, Subtarget, X86CC))
23582       return Test;
23583 
23584     // Look for X == 0, X == 1, X != 0, or X != 1.  We can simplify some forms
23585     // of these.
23586     if (isOneConstant(Op1) || isNullConstant(Op1)) {
23587       // If the input is a setcc, then reuse the input setcc or use a new one
23588       // with the inverted condition.
23589       if (Op0.getOpcode() == X86ISD::SETCC) {
23590         bool Invert = (CC == ISD::SETNE) ^ isNullConstant(Op1);
23591 
23592         X86CC = Op0.getOperand(0);
23593         if (Invert) {
23594           X86CondCode = (X86::CondCode)Op0.getConstantOperandVal(0);
23595           X86CondCode = X86::GetOppositeBranchCondition(X86CondCode);
23596           X86CC = DAG.getTargetConstant(X86CondCode, dl, MVT::i8);
23597         }
23598 
23599         return Op0.getOperand(1);
23600       }
23601     }
23602 
23603     // Try to use the carry flag from the add in place of an separate CMP for:
23604     // (seteq (add X, -1), -1). Similar for setne.
23605     if (isAllOnesConstant(Op1) && Op0.getOpcode() == ISD::ADD &&
23606         Op0.getOperand(1) == Op1) {
23607       if (isProfitableToUseFlagOp(Op0)) {
23608         SDVTList VTs = DAG.getVTList(Op0.getValueType(), MVT::i32);
23609 
23610         SDValue New = DAG.getNode(X86ISD::ADD, dl, VTs, Op0.getOperand(0),
23611                                   Op0.getOperand(1));
23612         DAG.ReplaceAllUsesOfValueWith(SDValue(Op0.getNode(), 0), New);
23613         X86CondCode = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B;
23614         X86CC = DAG.getTargetConstant(X86CondCode, dl, MVT::i8);
23615         return SDValue(New.getNode(), 1);
23616       }
23617     }
23618   }
23619 
23620   X86::CondCode CondCode =
23621       TranslateX86CC(CC, dl, /*IsFP*/ false, Op0, Op1, DAG);
23622   assert(CondCode != X86::COND_INVALID && "Unexpected condition code!");
23623 
23624   SDValue EFLAGS = EmitCmp(Op0, Op1, CondCode, dl, DAG, Subtarget);
23625   X86CC = DAG.getTargetConstant(CondCode, dl, MVT::i8);
23626   return EFLAGS;
23627 }
23628 
LowerSETCC(SDValue Op,SelectionDAG & DAG) const23629 SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
23630 
23631   bool IsStrict = Op.getOpcode() == ISD::STRICT_FSETCC ||
23632                   Op.getOpcode() == ISD::STRICT_FSETCCS;
23633   MVT VT = Op->getSimpleValueType(0);
23634 
23635   if (VT.isVector()) return LowerVSETCC(Op, Subtarget, DAG);
23636 
23637   assert(VT == MVT::i8 && "SetCC type must be 8-bit integer");
23638   SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
23639   SDValue Op0 = Op.getOperand(IsStrict ? 1 : 0);
23640   SDValue Op1 = Op.getOperand(IsStrict ? 2 : 1);
23641   SDLoc dl(Op);
23642   ISD::CondCode CC =
23643       cast<CondCodeSDNode>(Op.getOperand(IsStrict ? 3 : 2))->get();
23644 
23645   if (isSoftF16(Op0.getValueType(), Subtarget))
23646     return SDValue();
23647 
23648   // Handle f128 first, since one possible outcome is a normal integer
23649   // comparison which gets handled by emitFlagsForSetcc.
23650   if (Op0.getValueType() == MVT::f128) {
23651     softenSetCCOperands(DAG, MVT::f128, Op0, Op1, CC, dl, Op0, Op1, Chain,
23652                         Op.getOpcode() == ISD::STRICT_FSETCCS);
23653 
23654     // If softenSetCCOperands returned a scalar, use it.
23655     if (!Op1.getNode()) {
23656       assert(Op0.getValueType() == Op.getValueType() &&
23657              "Unexpected setcc expansion!");
23658       if (IsStrict)
23659         return DAG.getMergeValues({Op0, Chain}, dl);
23660       return Op0;
23661     }
23662   }
23663 
23664   if (Op0.getSimpleValueType().isInteger()) {
23665     // Attempt to canonicalize SGT/UGT -> SGE/UGE compares with constant which
23666     // reduces the number of EFLAGs bit reads (the GE conditions don't read ZF),
23667     // this may translate to less uops depending on uarch implementation. The
23668     // equivalent for SLE/ULE -> SLT/ULT isn't likely to happen as we already
23669     // canonicalize to that CondCode.
23670     // NOTE: Only do this if incrementing the constant doesn't increase the bit
23671     // encoding size - so it must either already be a i8 or i32 immediate, or it
23672     // shrinks down to that. We don't do this for any i64's to avoid additional
23673     // constant materializations.
23674     // TODO: Can we move this to TranslateX86CC to handle jumps/branches too?
23675     if (auto *Op1C = dyn_cast<ConstantSDNode>(Op1)) {
23676       const APInt &Op1Val = Op1C->getAPIntValue();
23677       if (!Op1Val.isZero()) {
23678         // Ensure the constant+1 doesn't overflow.
23679         if ((CC == ISD::CondCode::SETGT && !Op1Val.isMaxSignedValue()) ||
23680             (CC == ISD::CondCode::SETUGT && !Op1Val.isMaxValue())) {
23681           APInt Op1ValPlusOne = Op1Val + 1;
23682           if (Op1ValPlusOne.isSignedIntN(32) &&
23683               (!Op1Val.isSignedIntN(8) || Op1ValPlusOne.isSignedIntN(8))) {
23684             Op1 = DAG.getConstant(Op1ValPlusOne, dl, Op0.getValueType());
23685             CC = CC == ISD::CondCode::SETGT ? ISD::CondCode::SETGE
23686                                             : ISD::CondCode::SETUGE;
23687           }
23688         }
23689       }
23690     }
23691 
23692     SDValue X86CC;
23693     SDValue EFLAGS = emitFlagsForSetcc(Op0, Op1, CC, dl, DAG, X86CC);
23694     SDValue Res = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, X86CC, EFLAGS);
23695     return IsStrict ? DAG.getMergeValues({Res, Chain}, dl) : Res;
23696   }
23697 
23698   // Handle floating point.
23699   X86::CondCode CondCode = TranslateX86CC(CC, dl, /*IsFP*/ true, Op0, Op1, DAG);
23700   if (CondCode == X86::COND_INVALID)
23701     return SDValue();
23702 
23703   SDValue EFLAGS;
23704   if (IsStrict) {
23705     bool IsSignaling = Op.getOpcode() == ISD::STRICT_FSETCCS;
23706     EFLAGS =
23707         DAG.getNode(IsSignaling ? X86ISD::STRICT_FCMPS : X86ISD::STRICT_FCMP,
23708                     dl, {MVT::i32, MVT::Other}, {Chain, Op0, Op1});
23709     Chain = EFLAGS.getValue(1);
23710   } else {
23711     EFLAGS = DAG.getNode(X86ISD::FCMP, dl, MVT::i32, Op0, Op1);
23712   }
23713 
23714   SDValue X86CC = DAG.getTargetConstant(CondCode, dl, MVT::i8);
23715   SDValue Res = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, X86CC, EFLAGS);
23716   return IsStrict ? DAG.getMergeValues({Res, Chain}, dl) : Res;
23717 }
23718 
LowerSETCCCARRY(SDValue Op,SelectionDAG & DAG) const23719 SDValue X86TargetLowering::LowerSETCCCARRY(SDValue Op, SelectionDAG &DAG) const {
23720   SDValue LHS = Op.getOperand(0);
23721   SDValue RHS = Op.getOperand(1);
23722   SDValue Carry = Op.getOperand(2);
23723   SDValue Cond = Op.getOperand(3);
23724   SDLoc DL(Op);
23725 
23726   assert(LHS.getSimpleValueType().isInteger() && "SETCCCARRY is integer only.");
23727   X86::CondCode CC = TranslateIntegerX86CC(cast<CondCodeSDNode>(Cond)->get());
23728 
23729   // Recreate the carry if needed.
23730   EVT CarryVT = Carry.getValueType();
23731   Carry = DAG.getNode(X86ISD::ADD, DL, DAG.getVTList(CarryVT, MVT::i32),
23732                       Carry, DAG.getAllOnesConstant(DL, CarryVT));
23733 
23734   SDVTList VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
23735   SDValue Cmp = DAG.getNode(X86ISD::SBB, DL, VTs, LHS, RHS, Carry.getValue(1));
23736   return getSETCC(CC, Cmp.getValue(1), DL, DAG);
23737 }
23738 
23739 // This function returns three things: the arithmetic computation itself
23740 // (Value), an EFLAGS result (Overflow), and a condition code (Cond).  The
23741 // flag and the condition code define the case in which the arithmetic
23742 // computation overflows.
23743 static std::pair<SDValue, SDValue>
getX86XALUOOp(X86::CondCode & Cond,SDValue Op,SelectionDAG & DAG)23744 getX86XALUOOp(X86::CondCode &Cond, SDValue Op, SelectionDAG &DAG) {
23745   assert(Op.getResNo() == 0 && "Unexpected result number!");
23746   SDValue Value, Overflow;
23747   SDValue LHS = Op.getOperand(0);
23748   SDValue RHS = Op.getOperand(1);
23749   unsigned BaseOp = 0;
23750   SDLoc DL(Op);
23751   switch (Op.getOpcode()) {
23752   default: llvm_unreachable("Unknown ovf instruction!");
23753   case ISD::SADDO:
23754     BaseOp = X86ISD::ADD;
23755     Cond = X86::COND_O;
23756     break;
23757   case ISD::UADDO:
23758     BaseOp = X86ISD::ADD;
23759     Cond = isOneConstant(RHS) ? X86::COND_E : X86::COND_B;
23760     break;
23761   case ISD::SSUBO:
23762     BaseOp = X86ISD::SUB;
23763     Cond = X86::COND_O;
23764     break;
23765   case ISD::USUBO:
23766     BaseOp = X86ISD::SUB;
23767     Cond = X86::COND_B;
23768     break;
23769   case ISD::SMULO:
23770     BaseOp = X86ISD::SMUL;
23771     Cond = X86::COND_O;
23772     break;
23773   case ISD::UMULO:
23774     BaseOp = X86ISD::UMUL;
23775     Cond = X86::COND_O;
23776     break;
23777   }
23778 
23779   if (BaseOp) {
23780     // Also sets EFLAGS.
23781     SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
23782     Value = DAG.getNode(BaseOp, DL, VTs, LHS, RHS);
23783     Overflow = Value.getValue(1);
23784   }
23785 
23786   return std::make_pair(Value, Overflow);
23787 }
23788 
LowerXALUO(SDValue Op,SelectionDAG & DAG)23789 static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {
23790   // Lower the "add/sub/mul with overflow" instruction into a regular ins plus
23791   // a "setcc" instruction that checks the overflow flag. The "brcond" lowering
23792   // looks for this combo and may remove the "setcc" instruction if the "setcc"
23793   // has only one use.
23794   SDLoc DL(Op);
23795   X86::CondCode Cond;
23796   SDValue Value, Overflow;
23797   std::tie(Value, Overflow) = getX86XALUOOp(Cond, Op, DAG);
23798 
23799   SDValue SetCC = getSETCC(Cond, Overflow, DL, DAG);
23800   assert(Op->getValueType(1) == MVT::i8 && "Unexpected VT!");
23801   return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(), Value, SetCC);
23802 }
23803 
23804 /// Return true if opcode is a X86 logical comparison.
isX86LogicalCmp(SDValue Op)23805 static bool isX86LogicalCmp(SDValue Op) {
23806   unsigned Opc = Op.getOpcode();
23807   if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI ||
23808       Opc == X86ISD::FCMP)
23809     return true;
23810   if (Op.getResNo() == 1 &&
23811       (Opc == X86ISD::ADD || Opc == X86ISD::SUB || Opc == X86ISD::ADC ||
23812        Opc == X86ISD::SBB || Opc == X86ISD::SMUL || Opc == X86ISD::UMUL ||
23813        Opc == X86ISD::OR || Opc == X86ISD::XOR || Opc == X86ISD::AND))
23814     return true;
23815 
23816   return false;
23817 }
23818 
isTruncWithZeroHighBitsInput(SDValue V,SelectionDAG & DAG)23819 static bool isTruncWithZeroHighBitsInput(SDValue V, SelectionDAG &DAG) {
23820   if (V.getOpcode() != ISD::TRUNCATE)
23821     return false;
23822 
23823   SDValue VOp0 = V.getOperand(0);
23824   unsigned InBits = VOp0.getValueSizeInBits();
23825   unsigned Bits = V.getValueSizeInBits();
23826   return DAG.MaskedValueIsZero(VOp0, APInt::getHighBitsSet(InBits,InBits-Bits));
23827 }
23828 
LowerSELECT(SDValue Op,SelectionDAG & DAG) const23829 SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
23830   bool AddTest = true;
23831   SDValue Cond  = Op.getOperand(0);
23832   SDValue Op1 = Op.getOperand(1);
23833   SDValue Op2 = Op.getOperand(2);
23834   SDLoc DL(Op);
23835   MVT VT = Op1.getSimpleValueType();
23836   SDValue CC;
23837 
23838   if (isSoftF16(VT, Subtarget)) {
23839     MVT NVT = VT.changeTypeToInteger();
23840     return DAG.getBitcast(VT, DAG.getNode(ISD::SELECT, DL, NVT, Cond,
23841                                           DAG.getBitcast(NVT, Op1),
23842                                           DAG.getBitcast(NVT, Op2)));
23843   }
23844 
23845   // Lower FP selects into a CMP/AND/ANDN/OR sequence when the necessary SSE ops
23846   // are available or VBLENDV if AVX is available.
23847   // Otherwise FP cmovs get lowered into a less efficient branch sequence later.
23848   if (Cond.getOpcode() == ISD::SETCC && isScalarFPTypeInSSEReg(VT) &&
23849       VT == Cond.getOperand(0).getSimpleValueType() && Cond->hasOneUse()) {
23850     SDValue CondOp0 = Cond.getOperand(0), CondOp1 = Cond.getOperand(1);
23851     bool IsAlwaysSignaling;
23852     unsigned SSECC =
23853         translateX86FSETCC(cast<CondCodeSDNode>(Cond.getOperand(2))->get(),
23854                            CondOp0, CondOp1, IsAlwaysSignaling);
23855 
23856     if (Subtarget.hasAVX512()) {
23857       SDValue Cmp =
23858           DAG.getNode(X86ISD::FSETCCM, DL, MVT::v1i1, CondOp0, CondOp1,
23859                       DAG.getTargetConstant(SSECC, DL, MVT::i8));
23860       assert(!VT.isVector() && "Not a scalar type?");
23861       return DAG.getNode(X86ISD::SELECTS, DL, VT, Cmp, Op1, Op2);
23862     }
23863 
23864     if (SSECC < 8 || Subtarget.hasAVX()) {
23865       SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, VT, CondOp0, CondOp1,
23866                                 DAG.getTargetConstant(SSECC, DL, MVT::i8));
23867 
23868       // If we have AVX, we can use a variable vector select (VBLENDV) instead
23869       // of 3 logic instructions for size savings and potentially speed.
23870       // Unfortunately, there is no scalar form of VBLENDV.
23871 
23872       // If either operand is a +0.0 constant, don't try this. We can expect to
23873       // optimize away at least one of the logic instructions later in that
23874       // case, so that sequence would be faster than a variable blend.
23875 
23876       // BLENDV was introduced with SSE 4.1, but the 2 register form implicitly
23877       // uses XMM0 as the selection register. That may need just as many
23878       // instructions as the AND/ANDN/OR sequence due to register moves, so
23879       // don't bother.
23880       if (Subtarget.hasAVX() && !isNullFPConstant(Op1) &&
23881           !isNullFPConstant(Op2)) {
23882         // Convert to vectors, do a VSELECT, and convert back to scalar.
23883         // All of the conversions should be optimized away.
23884         MVT VecVT = VT == MVT::f32 ? MVT::v4f32 : MVT::v2f64;
23885         SDValue VOp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Op1);
23886         SDValue VOp2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Op2);
23887         SDValue VCmp = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Cmp);
23888 
23889         MVT VCmpVT = VT == MVT::f32 ? MVT::v4i32 : MVT::v2i64;
23890         VCmp = DAG.getBitcast(VCmpVT, VCmp);
23891 
23892         SDValue VSel = DAG.getSelect(DL, VecVT, VCmp, VOp1, VOp2);
23893 
23894         return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
23895                            VSel, DAG.getIntPtrConstant(0, DL));
23896       }
23897       SDValue AndN = DAG.getNode(X86ISD::FANDN, DL, VT, Cmp, Op2);
23898       SDValue And = DAG.getNode(X86ISD::FAND, DL, VT, Cmp, Op1);
23899       return DAG.getNode(X86ISD::FOR, DL, VT, AndN, And);
23900     }
23901   }
23902 
23903   // AVX512 fallback is to lower selects of scalar floats to masked moves.
23904   if (isScalarFPTypeInSSEReg(VT) && Subtarget.hasAVX512()) {
23905     SDValue Cmp = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v1i1, Cond);
23906     return DAG.getNode(X86ISD::SELECTS, DL, VT, Cmp, Op1, Op2);
23907   }
23908 
23909   if (Cond.getOpcode() == ISD::SETCC &&
23910       !isSoftF16(Cond.getOperand(0).getSimpleValueType(), Subtarget)) {
23911     if (SDValue NewCond = LowerSETCC(Cond, DAG)) {
23912       Cond = NewCond;
23913       // If the condition was updated, it's possible that the operands of the
23914       // select were also updated (for example, EmitTest has a RAUW). Refresh
23915       // the local references to the select operands in case they got stale.
23916       Op1 = Op.getOperand(1);
23917       Op2 = Op.getOperand(2);
23918     }
23919   }
23920 
23921   // (select (x == 0), -1, y) -> (sign_bit (x - 1)) | y
23922   // (select (x == 0), y, -1) -> ~(sign_bit (x - 1)) | y
23923   // (select (x != 0), y, -1) -> (sign_bit (x - 1)) | y
23924   // (select (x != 0), -1, y) -> ~(sign_bit (x - 1)) | y
23925   // (select (and (x , 0x1) == 0), y, (z ^ y) ) -> (-(and (x , 0x1)) & z ) ^ y
23926   // (select (and (x , 0x1) == 0), y, (z | y) ) -> (-(and (x , 0x1)) & z ) | y
23927   // (select (x > 0), x, 0) -> (~(x >> (size_in_bits(x)-1))) & x
23928   // (select (x < 0), x, 0) -> ((x >> (size_in_bits(x)-1))) & x
23929   if (Cond.getOpcode() == X86ISD::SETCC &&
23930       Cond.getOperand(1).getOpcode() == X86ISD::CMP &&
23931       isNullConstant(Cond.getOperand(1).getOperand(1))) {
23932     SDValue Cmp = Cond.getOperand(1);
23933     SDValue CmpOp0 = Cmp.getOperand(0);
23934     unsigned CondCode = Cond.getConstantOperandVal(0);
23935 
23936     // Special handling for __builtin_ffs(X) - 1 pattern which looks like
23937     // (select (seteq X, 0), -1, (cttz_zero_undef X)). Disable the special
23938     // handle to keep the CMP with 0. This should be removed by
23939     // optimizeCompareInst by using the flags from the BSR/TZCNT used for the
23940     // cttz_zero_undef.
23941     auto MatchFFSMinus1 = [&](SDValue Op1, SDValue Op2) {
23942       return (Op1.getOpcode() == ISD::CTTZ_ZERO_UNDEF && Op1.hasOneUse() &&
23943               Op1.getOperand(0) == CmpOp0 && isAllOnesConstant(Op2));
23944     };
23945     if (Subtarget.canUseCMOV() && (VT == MVT::i32 || VT == MVT::i64) &&
23946         ((CondCode == X86::COND_NE && MatchFFSMinus1(Op1, Op2)) ||
23947          (CondCode == X86::COND_E && MatchFFSMinus1(Op2, Op1)))) {
23948       // Keep Cmp.
23949     } else if ((isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
23950         (CondCode == X86::COND_E || CondCode == X86::COND_NE)) {
23951       SDValue Y = isAllOnesConstant(Op2) ? Op1 : Op2;
23952       SDVTList CmpVTs = DAG.getVTList(CmpOp0.getValueType(), MVT::i32);
23953 
23954       // 'X - 1' sets the carry flag if X == 0.
23955       // '0 - X' sets the carry flag if X != 0.
23956       // Convert the carry flag to a -1/0 mask with sbb:
23957       // select (X != 0), -1, Y --> 0 - X; or (sbb), Y
23958       // select (X == 0), Y, -1 --> 0 - X; or (sbb), Y
23959       // select (X != 0), Y, -1 --> X - 1; or (sbb), Y
23960       // select (X == 0), -1, Y --> X - 1; or (sbb), Y
23961       SDValue Sub;
23962       if (isAllOnesConstant(Op1) == (CondCode == X86::COND_NE)) {
23963         SDValue Zero = DAG.getConstant(0, DL, CmpOp0.getValueType());
23964         Sub = DAG.getNode(X86ISD::SUB, DL, CmpVTs, Zero, CmpOp0);
23965       } else {
23966         SDValue One = DAG.getConstant(1, DL, CmpOp0.getValueType());
23967         Sub = DAG.getNode(X86ISD::SUB, DL, CmpVTs, CmpOp0, One);
23968       }
23969       SDValue SBB = DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
23970                                 DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
23971                                 Sub.getValue(1));
23972       return DAG.getNode(ISD::OR, DL, VT, SBB, Y);
23973     } else if (!Subtarget.canUseCMOV() && CondCode == X86::COND_E &&
23974                CmpOp0.getOpcode() == ISD::AND &&
23975                isOneConstant(CmpOp0.getOperand(1))) {
23976       SDValue Src1, Src2;
23977       // true if Op2 is XOR or OR operator and one of its operands
23978       // is equal to Op1
23979       // ( a , a op b) || ( b , a op b)
23980       auto isOrXorPattern = [&]() {
23981         if ((Op2.getOpcode() == ISD::XOR || Op2.getOpcode() == ISD::OR) &&
23982             (Op2.getOperand(0) == Op1 || Op2.getOperand(1) == Op1)) {
23983           Src1 =
23984               Op2.getOperand(0) == Op1 ? Op2.getOperand(1) : Op2.getOperand(0);
23985           Src2 = Op1;
23986           return true;
23987         }
23988         return false;
23989       };
23990 
23991       if (isOrXorPattern()) {
23992         SDValue Neg;
23993         unsigned int CmpSz = CmpOp0.getSimpleValueType().getSizeInBits();
23994         // we need mask of all zeros or ones with same size of the other
23995         // operands.
23996         if (CmpSz > VT.getSizeInBits())
23997           Neg = DAG.getNode(ISD::TRUNCATE, DL, VT, CmpOp0);
23998         else if (CmpSz < VT.getSizeInBits())
23999           Neg = DAG.getNode(ISD::AND, DL, VT,
24000               DAG.getNode(ISD::ANY_EXTEND, DL, VT, CmpOp0.getOperand(0)),
24001               DAG.getConstant(1, DL, VT));
24002         else
24003           Neg = CmpOp0;
24004         SDValue Mask = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
24005                                    Neg); // -(and (x, 0x1))
24006         SDValue And = DAG.getNode(ISD::AND, DL, VT, Mask, Src1); // Mask & z
24007         return DAG.getNode(Op2.getOpcode(), DL, VT, And, Src2);  // And Op y
24008       }
24009     } else if ((VT == MVT::i32 || VT == MVT::i64) && isNullConstant(Op2) &&
24010                Cmp.getNode()->hasOneUse() && (CmpOp0 == Op1) &&
24011                ((CondCode == X86::COND_S) ||                    // smin(x, 0)
24012                 (CondCode == X86::COND_G && hasAndNot(Op1)))) { // smax(x, 0)
24013       // (select (x < 0), x, 0) -> ((x >> (size_in_bits(x)-1))) & x
24014       //
24015       // If the comparison is testing for a positive value, we have to invert
24016       // the sign bit mask, so only do that transform if the target has a
24017       // bitwise 'and not' instruction (the invert is free).
24018       // (select (x > 0), x, 0) -> (~(x >> (size_in_bits(x)-1))) & x
24019       unsigned ShCt = VT.getSizeInBits() - 1;
24020       SDValue ShiftAmt = DAG.getConstant(ShCt, DL, VT);
24021       SDValue Shift = DAG.getNode(ISD::SRA, DL, VT, Op1, ShiftAmt);
24022       if (CondCode == X86::COND_G)
24023         Shift = DAG.getNOT(DL, Shift, VT);
24024       return DAG.getNode(ISD::AND, DL, VT, Shift, Op1);
24025     }
24026   }
24027 
24028   // Look past (and (setcc_carry (cmp ...)), 1).
24029   if (Cond.getOpcode() == ISD::AND &&
24030       Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY &&
24031       isOneConstant(Cond.getOperand(1)))
24032     Cond = Cond.getOperand(0);
24033 
24034   // If condition flag is set by a X86ISD::CMP, then use it as the condition
24035   // setting operand in place of the X86ISD::SETCC.
24036   unsigned CondOpcode = Cond.getOpcode();
24037   if (CondOpcode == X86ISD::SETCC ||
24038       CondOpcode == X86ISD::SETCC_CARRY) {
24039     CC = Cond.getOperand(0);
24040 
24041     SDValue Cmp = Cond.getOperand(1);
24042     bool IllegalFPCMov = false;
24043     if (VT.isFloatingPoint() && !VT.isVector() &&
24044         !isScalarFPTypeInSSEReg(VT) && Subtarget.canUseCMOV())  // FPStack?
24045       IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSExtValue());
24046 
24047     if ((isX86LogicalCmp(Cmp) && !IllegalFPCMov) ||
24048         Cmp.getOpcode() == X86ISD::BT) { // FIXME
24049       Cond = Cmp;
24050       AddTest = false;
24051     }
24052   } else if (CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
24053              CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
24054              CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) {
24055     SDValue Value;
24056     X86::CondCode X86Cond;
24057     std::tie(Value, Cond) = getX86XALUOOp(X86Cond, Cond.getValue(0), DAG);
24058 
24059     CC = DAG.getTargetConstant(X86Cond, DL, MVT::i8);
24060     AddTest = false;
24061   }
24062 
24063   if (AddTest) {
24064     // Look past the truncate if the high bits are known zero.
24065     if (isTruncWithZeroHighBitsInput(Cond, DAG))
24066       Cond = Cond.getOperand(0);
24067 
24068     // We know the result of AND is compared against zero. Try to match
24069     // it to BT.
24070     if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
24071       X86::CondCode X86CondCode;
24072       if (SDValue BT = LowerAndToBT(Cond, ISD::SETNE, DL, DAG, X86CondCode)) {
24073         CC = DAG.getTargetConstant(X86CondCode, DL, MVT::i8);
24074         Cond = BT;
24075         AddTest = false;
24076       }
24077     }
24078   }
24079 
24080   if (AddTest) {
24081     CC = DAG.getTargetConstant(X86::COND_NE, DL, MVT::i8);
24082     Cond = EmitTest(Cond, X86::COND_NE, DL, DAG, Subtarget);
24083   }
24084 
24085   // a <  b ? -1 :  0 -> RES = ~setcc_carry
24086   // a <  b ?  0 : -1 -> RES = setcc_carry
24087   // a >= b ? -1 :  0 -> RES = setcc_carry
24088   // a >= b ?  0 : -1 -> RES = ~setcc_carry
24089   if (Cond.getOpcode() == X86ISD::SUB) {
24090     unsigned CondCode = CC->getAsZExtVal();
24091 
24092     if ((CondCode == X86::COND_AE || CondCode == X86::COND_B) &&
24093         (isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
24094         (isNullConstant(Op1) || isNullConstant(Op2))) {
24095       SDValue Res =
24096           DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
24097                       DAG.getTargetConstant(X86::COND_B, DL, MVT::i8), Cond);
24098       if (isAllOnesConstant(Op1) != (CondCode == X86::COND_B))
24099         return DAG.getNOT(DL, Res, Res.getValueType());
24100       return Res;
24101     }
24102   }
24103 
24104   // X86 doesn't have an i8 cmov. If both operands are the result of a truncate
24105   // widen the cmov and push the truncate through. This avoids introducing a new
24106   // branch during isel and doesn't add any extensions.
24107   if (Op.getValueType() == MVT::i8 &&
24108       Op1.getOpcode() == ISD::TRUNCATE && Op2.getOpcode() == ISD::TRUNCATE) {
24109     SDValue T1 = Op1.getOperand(0), T2 = Op2.getOperand(0);
24110     if (T1.getValueType() == T2.getValueType() &&
24111         // Exclude CopyFromReg to avoid partial register stalls.
24112         T1.getOpcode() != ISD::CopyFromReg && T2.getOpcode()!=ISD::CopyFromReg){
24113       SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, T1.getValueType(), T2, T1,
24114                                  CC, Cond);
24115       return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
24116     }
24117   }
24118 
24119   // Or finally, promote i8 cmovs if we have CMOV,
24120   //                 or i16 cmovs if it won't prevent folding a load.
24121   // FIXME: we should not limit promotion of i8 case to only when the CMOV is
24122   //        legal, but EmitLoweredSelect() can not deal with these extensions
24123   //        being inserted between two CMOV's. (in i16 case too TBN)
24124   //        https://bugs.llvm.org/show_bug.cgi?id=40974
24125   if ((Op.getValueType() == MVT::i8 && Subtarget.canUseCMOV()) ||
24126       (Op.getValueType() == MVT::i16 && !X86::mayFoldLoad(Op1, Subtarget) &&
24127        !X86::mayFoldLoad(Op2, Subtarget))) {
24128     Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op1);
24129     Op2 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op2);
24130     SDValue Ops[] = { Op2, Op1, CC, Cond };
24131     SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, MVT::i32, Ops);
24132     return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
24133   }
24134 
24135   // X86ISD::CMOV means set the result (which is operand 1) to the RHS if
24136   // condition is true.
24137   SDValue Ops[] = { Op2, Op1, CC, Cond };
24138   return DAG.getNode(X86ISD::CMOV, DL, Op.getValueType(), Ops, Op->getFlags());
24139 }
24140 
LowerSIGN_EXTEND_Mask(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)24141 static SDValue LowerSIGN_EXTEND_Mask(SDValue Op,
24142                                      const X86Subtarget &Subtarget,
24143                                      SelectionDAG &DAG) {
24144   MVT VT = Op->getSimpleValueType(0);
24145   SDValue In = Op->getOperand(0);
24146   MVT InVT = In.getSimpleValueType();
24147   assert(InVT.getVectorElementType() == MVT::i1 && "Unexpected input type!");
24148   MVT VTElt = VT.getVectorElementType();
24149   SDLoc dl(Op);
24150 
24151   unsigned NumElts = VT.getVectorNumElements();
24152 
24153   // Extend VT if the scalar type is i8/i16 and BWI is not supported.
24154   MVT ExtVT = VT;
24155   if (!Subtarget.hasBWI() && VTElt.getSizeInBits() <= 16) {
24156     // If v16i32 is to be avoided, we'll need to split and concatenate.
24157     if (NumElts == 16 && !Subtarget.canExtendTo512DQ())
24158       return SplitAndExtendv16i1(Op.getOpcode(), VT, In, dl, DAG);
24159 
24160     ExtVT = MVT::getVectorVT(MVT::i32, NumElts);
24161   }
24162 
24163   // Widen to 512-bits if VLX is not supported.
24164   MVT WideVT = ExtVT;
24165   if (!ExtVT.is512BitVector() && !Subtarget.hasVLX()) {
24166     NumElts *= 512 / ExtVT.getSizeInBits();
24167     InVT = MVT::getVectorVT(MVT::i1, NumElts);
24168     In = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, InVT, DAG.getUNDEF(InVT),
24169                      In, DAG.getIntPtrConstant(0, dl));
24170     WideVT = MVT::getVectorVT(ExtVT.getVectorElementType(), NumElts);
24171   }
24172 
24173   SDValue V;
24174   MVT WideEltVT = WideVT.getVectorElementType();
24175   if ((Subtarget.hasDQI() && WideEltVT.getSizeInBits() >= 32) ||
24176       (Subtarget.hasBWI() && WideEltVT.getSizeInBits() <= 16)) {
24177     V = DAG.getNode(Op.getOpcode(), dl, WideVT, In);
24178   } else {
24179     SDValue NegOne = DAG.getConstant(-1, dl, WideVT);
24180     SDValue Zero = DAG.getConstant(0, dl, WideVT);
24181     V = DAG.getSelect(dl, WideVT, In, NegOne, Zero);
24182   }
24183 
24184   // Truncate if we had to extend i16/i8 above.
24185   if (VT != ExtVT) {
24186     WideVT = MVT::getVectorVT(VTElt, NumElts);
24187     V = DAG.getNode(ISD::TRUNCATE, dl, WideVT, V);
24188   }
24189 
24190   // Extract back to 128/256-bit if we widened.
24191   if (WideVT != VT)
24192     V = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, V,
24193                     DAG.getIntPtrConstant(0, dl));
24194 
24195   return V;
24196 }
24197 
LowerANY_EXTEND(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)24198 static SDValue LowerANY_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
24199                                SelectionDAG &DAG) {
24200   SDValue In = Op->getOperand(0);
24201   MVT InVT = In.getSimpleValueType();
24202 
24203   if (InVT.getVectorElementType() == MVT::i1)
24204     return LowerSIGN_EXTEND_Mask(Op, Subtarget, DAG);
24205 
24206   assert(Subtarget.hasAVX() && "Expected AVX support");
24207   return LowerAVXExtend(Op, DAG, Subtarget);
24208 }
24209 
24210 // Lowering for SIGN_EXTEND_VECTOR_INREG and ZERO_EXTEND_VECTOR_INREG.
24211 // For sign extend this needs to handle all vector sizes and SSE4.1 and
24212 // non-SSE4.1 targets. For zero extend this should only handle inputs of
24213 // MVT::v64i8 when BWI is not supported, but AVX512 is.
LowerEXTEND_VECTOR_INREG(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)24214 static SDValue LowerEXTEND_VECTOR_INREG(SDValue Op,
24215                                         const X86Subtarget &Subtarget,
24216                                         SelectionDAG &DAG) {
24217   SDValue In = Op->getOperand(0);
24218   MVT VT = Op->getSimpleValueType(0);
24219   MVT InVT = In.getSimpleValueType();
24220 
24221   MVT SVT = VT.getVectorElementType();
24222   MVT InSVT = InVT.getVectorElementType();
24223   assert(SVT.getFixedSizeInBits() > InSVT.getFixedSizeInBits());
24224 
24225   if (SVT != MVT::i64 && SVT != MVT::i32 && SVT != MVT::i16)
24226     return SDValue();
24227   if (InSVT != MVT::i32 && InSVT != MVT::i16 && InSVT != MVT::i8)
24228     return SDValue();
24229   if (!(VT.is128BitVector() && Subtarget.hasSSE2()) &&
24230       !(VT.is256BitVector() && Subtarget.hasAVX()) &&
24231       !(VT.is512BitVector() && Subtarget.hasAVX512()))
24232     return SDValue();
24233 
24234   SDLoc dl(Op);
24235   unsigned Opc = Op.getOpcode();
24236   unsigned NumElts = VT.getVectorNumElements();
24237 
24238   // For 256-bit vectors, we only need the lower (128-bit) half of the input.
24239   // For 512-bit vectors, we need 128-bits or 256-bits.
24240   if (InVT.getSizeInBits() > 128) {
24241     // Input needs to be at least the same number of elements as output, and
24242     // at least 128-bits.
24243     int InSize = InSVT.getSizeInBits() * NumElts;
24244     In = extractSubVector(In, 0, DAG, dl, std::max(InSize, 128));
24245     InVT = In.getSimpleValueType();
24246   }
24247 
24248   // SSE41 targets can use the pmov[sz]x* instructions directly for 128-bit results,
24249   // so are legal and shouldn't occur here. AVX2/AVX512 pmovsx* instructions still
24250   // need to be handled here for 256/512-bit results.
24251   if (Subtarget.hasInt256()) {
24252     assert(VT.getSizeInBits() > 128 && "Unexpected 128-bit vector extension");
24253 
24254     if (InVT.getVectorNumElements() != NumElts)
24255       return DAG.getNode(Op.getOpcode(), dl, VT, In);
24256 
24257     // FIXME: Apparently we create inreg operations that could be regular
24258     // extends.
24259     unsigned ExtOpc =
24260         Opc == ISD::SIGN_EXTEND_VECTOR_INREG ? ISD::SIGN_EXTEND
24261                                              : ISD::ZERO_EXTEND;
24262     return DAG.getNode(ExtOpc, dl, VT, In);
24263   }
24264 
24265   // pre-AVX2 256-bit extensions need to be split into 128-bit instructions.
24266   if (Subtarget.hasAVX()) {
24267     assert(VT.is256BitVector() && "256-bit vector expected");
24268     MVT HalfVT = VT.getHalfNumVectorElementsVT();
24269     int HalfNumElts = HalfVT.getVectorNumElements();
24270 
24271     unsigned NumSrcElts = InVT.getVectorNumElements();
24272     SmallVector<int, 16> HiMask(NumSrcElts, SM_SentinelUndef);
24273     for (int i = 0; i != HalfNumElts; ++i)
24274       HiMask[i] = HalfNumElts + i;
24275 
24276     SDValue Lo = DAG.getNode(Opc, dl, HalfVT, In);
24277     SDValue Hi = DAG.getVectorShuffle(InVT, dl, In, DAG.getUNDEF(InVT), HiMask);
24278     Hi = DAG.getNode(Opc, dl, HalfVT, Hi);
24279     return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
24280   }
24281 
24282   // We should only get here for sign extend.
24283   assert(Opc == ISD::SIGN_EXTEND_VECTOR_INREG && "Unexpected opcode!");
24284   assert(VT.is128BitVector() && InVT.is128BitVector() && "Unexpected VTs");
24285   unsigned InNumElts = InVT.getVectorNumElements();
24286 
24287   // If the source elements are already all-signbits, we don't need to extend,
24288   // just splat the elements.
24289   APInt DemandedElts = APInt::getLowBitsSet(InNumElts, NumElts);
24290   if (DAG.ComputeNumSignBits(In, DemandedElts) == InVT.getScalarSizeInBits()) {
24291     unsigned Scale = InNumElts / NumElts;
24292     SmallVector<int, 16> ShuffleMask;
24293     for (unsigned I = 0; I != NumElts; ++I)
24294       ShuffleMask.append(Scale, I);
24295     return DAG.getBitcast(VT,
24296                           DAG.getVectorShuffle(InVT, dl, In, In, ShuffleMask));
24297   }
24298 
24299   // pre-SSE41 targets unpack lower lanes and then sign-extend using SRAI.
24300   SDValue Curr = In;
24301   SDValue SignExt = Curr;
24302 
24303   // As SRAI is only available on i16/i32 types, we expand only up to i32
24304   // and handle i64 separately.
24305   if (InVT != MVT::v4i32) {
24306     MVT DestVT = VT == MVT::v2i64 ? MVT::v4i32 : VT;
24307 
24308     unsigned DestWidth = DestVT.getScalarSizeInBits();
24309     unsigned Scale = DestWidth / InSVT.getSizeInBits();
24310     unsigned DestElts = DestVT.getVectorNumElements();
24311 
24312     // Build a shuffle mask that takes each input element and places it in the
24313     // MSBs of the new element size.
24314     SmallVector<int, 16> Mask(InNumElts, SM_SentinelUndef);
24315     for (unsigned i = 0; i != DestElts; ++i)
24316       Mask[i * Scale + (Scale - 1)] = i;
24317 
24318     Curr = DAG.getVectorShuffle(InVT, dl, In, In, Mask);
24319     Curr = DAG.getBitcast(DestVT, Curr);
24320 
24321     unsigned SignExtShift = DestWidth - InSVT.getSizeInBits();
24322     SignExt = DAG.getNode(X86ISD::VSRAI, dl, DestVT, Curr,
24323                           DAG.getTargetConstant(SignExtShift, dl, MVT::i8));
24324   }
24325 
24326   if (VT == MVT::v2i64) {
24327     assert(Curr.getValueType() == MVT::v4i32 && "Unexpected input VT");
24328     SDValue Zero = DAG.getConstant(0, dl, MVT::v4i32);
24329     SDValue Sign = DAG.getSetCC(dl, MVT::v4i32, Zero, Curr, ISD::SETGT);
24330     SignExt = DAG.getVectorShuffle(MVT::v4i32, dl, SignExt, Sign, {0, 4, 1, 5});
24331     SignExt = DAG.getBitcast(VT, SignExt);
24332   }
24333 
24334   return SignExt;
24335 }
24336 
LowerSIGN_EXTEND(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)24337 static SDValue LowerSIGN_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
24338                                 SelectionDAG &DAG) {
24339   MVT VT = Op->getSimpleValueType(0);
24340   SDValue In = Op->getOperand(0);
24341   MVT InVT = In.getSimpleValueType();
24342   SDLoc dl(Op);
24343 
24344   if (InVT.getVectorElementType() == MVT::i1)
24345     return LowerSIGN_EXTEND_Mask(Op, Subtarget, DAG);
24346 
24347   assert(VT.isVector() && InVT.isVector() && "Expected vector type");
24348   assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
24349          "Expected same number of elements");
24350   assert((VT.getVectorElementType() == MVT::i16 ||
24351           VT.getVectorElementType() == MVT::i32 ||
24352           VT.getVectorElementType() == MVT::i64) &&
24353          "Unexpected element type");
24354   assert((InVT.getVectorElementType() == MVT::i8 ||
24355           InVT.getVectorElementType() == MVT::i16 ||
24356           InVT.getVectorElementType() == MVT::i32) &&
24357          "Unexpected element type");
24358 
24359   if (VT == MVT::v32i16 && !Subtarget.hasBWI()) {
24360     assert(InVT == MVT::v32i8 && "Unexpected VT!");
24361     return splitVectorIntUnary(Op, DAG);
24362   }
24363 
24364   if (Subtarget.hasInt256())
24365     return Op;
24366 
24367   // Optimize vectors in AVX mode
24368   // Sign extend  v8i16 to v8i32 and
24369   //              v4i32 to v4i64
24370   //
24371   // Divide input vector into two parts
24372   // for v4i32 the high shuffle mask will be {2, 3, -1, -1}
24373   // use vpmovsx instruction to extend v4i32 -> v2i64; v8i16 -> v4i32
24374   // concat the vectors to original VT
24375   MVT HalfVT = VT.getHalfNumVectorElementsVT();
24376   SDValue OpLo = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, HalfVT, In);
24377 
24378   unsigned NumElems = InVT.getVectorNumElements();
24379   SmallVector<int,8> ShufMask(NumElems, -1);
24380   for (unsigned i = 0; i != NumElems/2; ++i)
24381     ShufMask[i] = i + NumElems/2;
24382 
24383   SDValue OpHi = DAG.getVectorShuffle(InVT, dl, In, In, ShufMask);
24384   OpHi = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, HalfVT, OpHi);
24385 
24386   return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
24387 }
24388 
24389 /// Change a vector store into a pair of half-size vector stores.
splitVectorStore(StoreSDNode * Store,SelectionDAG & DAG)24390 static SDValue splitVectorStore(StoreSDNode *Store, SelectionDAG &DAG) {
24391   SDValue StoredVal = Store->getValue();
24392   assert((StoredVal.getValueType().is256BitVector() ||
24393           StoredVal.getValueType().is512BitVector()) &&
24394          "Expecting 256/512-bit op");
24395 
24396   // Splitting volatile memory ops is not allowed unless the operation was not
24397   // legal to begin with. Assume the input store is legal (this transform is
24398   // only used for targets with AVX). Note: It is possible that we have an
24399   // illegal type like v2i128, and so we could allow splitting a volatile store
24400   // in that case if that is important.
24401   if (!Store->isSimple())
24402     return SDValue();
24403 
24404   SDLoc DL(Store);
24405   SDValue Value0, Value1;
24406   std::tie(Value0, Value1) = splitVector(StoredVal, DAG, DL);
24407   unsigned HalfOffset = Value0.getValueType().getStoreSize();
24408   SDValue Ptr0 = Store->getBasePtr();
24409   SDValue Ptr1 =
24410       DAG.getMemBasePlusOffset(Ptr0, TypeSize::getFixed(HalfOffset), DL);
24411   SDValue Ch0 =
24412       DAG.getStore(Store->getChain(), DL, Value0, Ptr0, Store->getPointerInfo(),
24413                    Store->getOriginalAlign(),
24414                    Store->getMemOperand()->getFlags());
24415   SDValue Ch1 = DAG.getStore(Store->getChain(), DL, Value1, Ptr1,
24416                              Store->getPointerInfo().getWithOffset(HalfOffset),
24417                              Store->getOriginalAlign(),
24418                              Store->getMemOperand()->getFlags());
24419   return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Ch0, Ch1);
24420 }
24421 
24422 /// Scalarize a vector store, bitcasting to TargetVT to determine the scalar
24423 /// type.
scalarizeVectorStore(StoreSDNode * Store,MVT StoreVT,SelectionDAG & DAG)24424 static SDValue scalarizeVectorStore(StoreSDNode *Store, MVT StoreVT,
24425                                     SelectionDAG &DAG) {
24426   SDValue StoredVal = Store->getValue();
24427   assert(StoreVT.is128BitVector() &&
24428          StoredVal.getValueType().is128BitVector() && "Expecting 128-bit op");
24429   StoredVal = DAG.getBitcast(StoreVT, StoredVal);
24430 
24431   // Splitting volatile memory ops is not allowed unless the operation was not
24432   // legal to begin with. We are assuming the input op is legal (this transform
24433   // is only used for targets with AVX).
24434   if (!Store->isSimple())
24435     return SDValue();
24436 
24437   MVT StoreSVT = StoreVT.getScalarType();
24438   unsigned NumElems = StoreVT.getVectorNumElements();
24439   unsigned ScalarSize = StoreSVT.getStoreSize();
24440 
24441   SDLoc DL(Store);
24442   SmallVector<SDValue, 4> Stores;
24443   for (unsigned i = 0; i != NumElems; ++i) {
24444     unsigned Offset = i * ScalarSize;
24445     SDValue Ptr = DAG.getMemBasePlusOffset(Store->getBasePtr(),
24446                                            TypeSize::getFixed(Offset), DL);
24447     SDValue Scl = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, StoreSVT, StoredVal,
24448                               DAG.getIntPtrConstant(i, DL));
24449     SDValue Ch = DAG.getStore(Store->getChain(), DL, Scl, Ptr,
24450                               Store->getPointerInfo().getWithOffset(Offset),
24451                               Store->getOriginalAlign(),
24452                               Store->getMemOperand()->getFlags());
24453     Stores.push_back(Ch);
24454   }
24455   return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Stores);
24456 }
24457 
LowerStore(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)24458 static SDValue LowerStore(SDValue Op, const X86Subtarget &Subtarget,
24459                           SelectionDAG &DAG) {
24460   StoreSDNode *St = cast<StoreSDNode>(Op.getNode());
24461   SDLoc dl(St);
24462   SDValue StoredVal = St->getValue();
24463 
24464   // Without AVX512DQ, we need to use a scalar type for v2i1/v4i1/v8i1 stores.
24465   if (StoredVal.getValueType().isVector() &&
24466       StoredVal.getValueType().getVectorElementType() == MVT::i1) {
24467     unsigned NumElts = StoredVal.getValueType().getVectorNumElements();
24468     assert(NumElts <= 8 && "Unexpected VT");
24469     assert(!St->isTruncatingStore() && "Expected non-truncating store");
24470     assert(Subtarget.hasAVX512() && !Subtarget.hasDQI() &&
24471            "Expected AVX512F without AVX512DQI");
24472 
24473     // We must pad with zeros to ensure we store zeroes to any unused bits.
24474     StoredVal = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v16i1,
24475                             DAG.getUNDEF(MVT::v16i1), StoredVal,
24476                             DAG.getIntPtrConstant(0, dl));
24477     StoredVal = DAG.getBitcast(MVT::i16, StoredVal);
24478     StoredVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, StoredVal);
24479     // Make sure we store zeros in the extra bits.
24480     if (NumElts < 8)
24481       StoredVal = DAG.getZeroExtendInReg(
24482           StoredVal, dl, EVT::getIntegerVT(*DAG.getContext(), NumElts));
24483 
24484     return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
24485                         St->getPointerInfo(), St->getOriginalAlign(),
24486                         St->getMemOperand()->getFlags());
24487   }
24488 
24489   if (St->isTruncatingStore())
24490     return SDValue();
24491 
24492   // If this is a 256-bit store of concatenated ops, we are better off splitting
24493   // that store into two 128-bit stores. This avoids spurious use of 256-bit ops
24494   // and each half can execute independently. Some cores would split the op into
24495   // halves anyway, so the concat (vinsertf128) is purely an extra op.
24496   MVT StoreVT = StoredVal.getSimpleValueType();
24497   if (StoreVT.is256BitVector() ||
24498       ((StoreVT == MVT::v32i16 || StoreVT == MVT::v64i8) &&
24499        !Subtarget.hasBWI())) {
24500     if (StoredVal.hasOneUse() && isFreeToSplitVector(StoredVal.getNode(), DAG))
24501       return splitVectorStore(St, DAG);
24502     return SDValue();
24503   }
24504 
24505   if (StoreVT.is32BitVector())
24506     return SDValue();
24507 
24508   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24509   assert(StoreVT.is64BitVector() && "Unexpected VT");
24510   assert(TLI.getTypeAction(*DAG.getContext(), StoreVT) ==
24511              TargetLowering::TypeWidenVector &&
24512          "Unexpected type action!");
24513 
24514   EVT WideVT = TLI.getTypeToTransformTo(*DAG.getContext(), StoreVT);
24515   StoredVal = DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT, StoredVal,
24516                           DAG.getUNDEF(StoreVT));
24517 
24518   if (Subtarget.hasSSE2()) {
24519     // Widen the vector, cast to a v2x64 type, extract the single 64-bit element
24520     // and store it.
24521     MVT StVT = Subtarget.is64Bit() && StoreVT.isInteger() ? MVT::i64 : MVT::f64;
24522     MVT CastVT = MVT::getVectorVT(StVT, 2);
24523     StoredVal = DAG.getBitcast(CastVT, StoredVal);
24524     StoredVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, StVT, StoredVal,
24525                             DAG.getIntPtrConstant(0, dl));
24526 
24527     return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
24528                         St->getPointerInfo(), St->getOriginalAlign(),
24529                         St->getMemOperand()->getFlags());
24530   }
24531   assert(Subtarget.hasSSE1() && "Expected SSE");
24532   SDVTList Tys = DAG.getVTList(MVT::Other);
24533   SDValue Ops[] = {St->getChain(), StoredVal, St->getBasePtr()};
24534   return DAG.getMemIntrinsicNode(X86ISD::VEXTRACT_STORE, dl, Tys, Ops, MVT::i64,
24535                                  St->getMemOperand());
24536 }
24537 
24538 // Lower vector extended loads using a shuffle. If SSSE3 is not available we
24539 // may emit an illegal shuffle but the expansion is still better than scalar
24540 // code. We generate sext/sext_invec for SEXTLOADs if it's available, otherwise
24541 // we'll emit a shuffle and a arithmetic shift.
24542 // FIXME: Is the expansion actually better than scalar code? It doesn't seem so.
24543 // TODO: It is possible to support ZExt by zeroing the undef values during
24544 // the shuffle phase or after the shuffle.
LowerLoad(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)24545 static SDValue LowerLoad(SDValue Op, const X86Subtarget &Subtarget,
24546                                  SelectionDAG &DAG) {
24547   MVT RegVT = Op.getSimpleValueType();
24548   assert(RegVT.isVector() && "We only custom lower vector loads.");
24549   assert(RegVT.isInteger() &&
24550          "We only custom lower integer vector loads.");
24551 
24552   LoadSDNode *Ld = cast<LoadSDNode>(Op.getNode());
24553   SDLoc dl(Ld);
24554 
24555   // Without AVX512DQ, we need to use a scalar type for v2i1/v4i1/v8i1 loads.
24556   if (RegVT.getVectorElementType() == MVT::i1) {
24557     assert(EVT(RegVT) == Ld->getMemoryVT() && "Expected non-extending load");
24558     assert(RegVT.getVectorNumElements() <= 8 && "Unexpected VT");
24559     assert(Subtarget.hasAVX512() && !Subtarget.hasDQI() &&
24560            "Expected AVX512F without AVX512DQI");
24561 
24562     SDValue NewLd = DAG.getLoad(MVT::i8, dl, Ld->getChain(), Ld->getBasePtr(),
24563                                 Ld->getPointerInfo(), Ld->getOriginalAlign(),
24564                                 Ld->getMemOperand()->getFlags());
24565 
24566     // Replace chain users with the new chain.
24567     assert(NewLd->getNumValues() == 2 && "Loads must carry a chain!");
24568 
24569     SDValue Val = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, NewLd);
24570     Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, RegVT,
24571                       DAG.getBitcast(MVT::v16i1, Val),
24572                       DAG.getIntPtrConstant(0, dl));
24573     return DAG.getMergeValues({Val, NewLd.getValue(1)}, dl);
24574   }
24575 
24576   return SDValue();
24577 }
24578 
24579 /// Return true if node is an ISD::AND or ISD::OR of two X86ISD::SETCC nodes
24580 /// each of which has no other use apart from the AND / OR.
isAndOrOfSetCCs(SDValue Op,unsigned & Opc)24581 static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) {
24582   Opc = Op.getOpcode();
24583   if (Opc != ISD::OR && Opc != ISD::AND)
24584     return false;
24585   return (Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
24586           Op.getOperand(0).hasOneUse() &&
24587           Op.getOperand(1).getOpcode() == X86ISD::SETCC &&
24588           Op.getOperand(1).hasOneUse());
24589 }
24590 
LowerBRCOND(SDValue Op,SelectionDAG & DAG) const24591 SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
24592   SDValue Chain = Op.getOperand(0);
24593   SDValue Cond  = Op.getOperand(1);
24594   SDValue Dest  = Op.getOperand(2);
24595   SDLoc dl(Op);
24596 
24597   // Bail out when we don't have native compare instructions.
24598   if (Cond.getOpcode() == ISD::SETCC &&
24599       Cond.getOperand(0).getValueType() != MVT::f128 &&
24600       !isSoftF16(Cond.getOperand(0).getValueType(), Subtarget)) {
24601     SDValue LHS = Cond.getOperand(0);
24602     SDValue RHS = Cond.getOperand(1);
24603     ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
24604 
24605     // Special case for
24606     // setcc([su]{add,sub,mul}o == 0)
24607     // setcc([su]{add,sub,mul}o != 1)
24608     if (ISD::isOverflowIntrOpRes(LHS) &&
24609         (CC == ISD::SETEQ || CC == ISD::SETNE) &&
24610         (isNullConstant(RHS) || isOneConstant(RHS))) {
24611       SDValue Value, Overflow;
24612       X86::CondCode X86Cond;
24613       std::tie(Value, Overflow) = getX86XALUOOp(X86Cond, LHS.getValue(0), DAG);
24614 
24615       if ((CC == ISD::SETEQ) == isNullConstant(RHS))
24616         X86Cond = X86::GetOppositeBranchCondition(X86Cond);
24617 
24618       SDValue CCVal = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
24619       return DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
24620                          Overflow);
24621     }
24622 
24623     if (LHS.getSimpleValueType().isInteger()) {
24624       SDValue CCVal;
24625       SDValue EFLAGS = emitFlagsForSetcc(LHS, RHS, CC, SDLoc(Cond), DAG, CCVal);
24626       return DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
24627                          EFLAGS);
24628     }
24629 
24630     if (CC == ISD::SETOEQ) {
24631       // For FCMP_OEQ, we can emit
24632       // two branches instead of an explicit AND instruction with a
24633       // separate test. However, we only do this if this block doesn't
24634       // have a fall-through edge, because this requires an explicit
24635       // jmp when the condition is false.
24636       if (Op.getNode()->hasOneUse()) {
24637         SDNode *User = *Op.getNode()->use_begin();
24638         // Look for an unconditional branch following this conditional branch.
24639         // We need this because we need to reverse the successors in order
24640         // to implement FCMP_OEQ.
24641         if (User->getOpcode() == ISD::BR) {
24642           SDValue FalseBB = User->getOperand(1);
24643           SDNode *NewBR =
24644             DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
24645           assert(NewBR == User);
24646           (void)NewBR;
24647           Dest = FalseBB;
24648 
24649           SDValue Cmp =
24650               DAG.getNode(X86ISD::FCMP, SDLoc(Cond), MVT::i32, LHS, RHS);
24651           SDValue CCVal = DAG.getTargetConstant(X86::COND_NE, dl, MVT::i8);
24652           Chain = DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest,
24653                               CCVal, Cmp);
24654           CCVal = DAG.getTargetConstant(X86::COND_P, dl, MVT::i8);
24655           return DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
24656                              Cmp);
24657         }
24658       }
24659     } else if (CC == ISD::SETUNE) {
24660       // For FCMP_UNE, we can emit
24661       // two branches instead of an explicit OR instruction with a
24662       // separate test.
24663       SDValue Cmp = DAG.getNode(X86ISD::FCMP, SDLoc(Cond), MVT::i32, LHS, RHS);
24664       SDValue CCVal = DAG.getTargetConstant(X86::COND_NE, dl, MVT::i8);
24665       Chain =
24666           DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal, Cmp);
24667       CCVal = DAG.getTargetConstant(X86::COND_P, dl, MVT::i8);
24668       return DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
24669                          Cmp);
24670     } else {
24671       X86::CondCode X86Cond =
24672           TranslateX86CC(CC, dl, /*IsFP*/ true, LHS, RHS, DAG);
24673       SDValue Cmp = DAG.getNode(X86ISD::FCMP, SDLoc(Cond), MVT::i32, LHS, RHS);
24674       SDValue CCVal = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
24675       return DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
24676                          Cmp);
24677     }
24678   }
24679 
24680   if (ISD::isOverflowIntrOpRes(Cond)) {
24681     SDValue Value, Overflow;
24682     X86::CondCode X86Cond;
24683     std::tie(Value, Overflow) = getX86XALUOOp(X86Cond, Cond.getValue(0), DAG);
24684 
24685     SDValue CCVal = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
24686     return DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
24687                        Overflow);
24688   }
24689 
24690   // Look past the truncate if the high bits are known zero.
24691   if (isTruncWithZeroHighBitsInput(Cond, DAG))
24692     Cond = Cond.getOperand(0);
24693 
24694   EVT CondVT = Cond.getValueType();
24695 
24696   // Add an AND with 1 if we don't already have one.
24697   if (!(Cond.getOpcode() == ISD::AND && isOneConstant(Cond.getOperand(1))))
24698     Cond =
24699         DAG.getNode(ISD::AND, dl, CondVT, Cond, DAG.getConstant(1, dl, CondVT));
24700 
24701   SDValue LHS = Cond;
24702   SDValue RHS = DAG.getConstant(0, dl, CondVT);
24703 
24704   SDValue CCVal;
24705   SDValue EFLAGS = emitFlagsForSetcc(LHS, RHS, ISD::SETNE, dl, DAG, CCVal);
24706   return DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
24707                      EFLAGS);
24708 }
24709 
24710 // Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets.
24711 // Calls to _alloca are needed to probe the stack when allocating more than 4k
24712 // bytes in one go. Touching the stack at 4K increments is necessary to ensure
24713 // that the guard pages used by the OS virtual memory manager are allocated in
24714 // correct sequence.
24715 SDValue
LowerDYNAMIC_STACKALLOC(SDValue Op,SelectionDAG & DAG) const24716 X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
24717                                            SelectionDAG &DAG) const {
24718   MachineFunction &MF = DAG.getMachineFunction();
24719   bool SplitStack = MF.shouldSplitStack();
24720   bool EmitStackProbeCall = hasStackProbeSymbol(MF);
24721   bool Lower = (Subtarget.isOSWindows() && !Subtarget.isTargetMachO()) ||
24722                SplitStack || EmitStackProbeCall;
24723   SDLoc dl(Op);
24724 
24725   // Get the inputs.
24726   SDNode *Node = Op.getNode();
24727   SDValue Chain = Op.getOperand(0);
24728   SDValue Size  = Op.getOperand(1);
24729   MaybeAlign Alignment(Op.getConstantOperandVal(2));
24730   EVT VT = Node->getValueType(0);
24731 
24732   // Chain the dynamic stack allocation so that it doesn't modify the stack
24733   // pointer when other instructions are using the stack.
24734   Chain = DAG.getCALLSEQ_START(Chain, 0, 0, dl);
24735 
24736   bool Is64Bit = Subtarget.is64Bit();
24737   MVT SPTy = getPointerTy(DAG.getDataLayout());
24738 
24739   SDValue Result;
24740   if (!Lower) {
24741     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24742     Register SPReg = TLI.getStackPointerRegisterToSaveRestore();
24743     assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and"
24744                     " not tell us which reg is the stack pointer!");
24745 
24746     const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
24747     const Align StackAlign = TFI.getStackAlign();
24748     if (hasInlineStackProbe(MF)) {
24749       MachineRegisterInfo &MRI = MF.getRegInfo();
24750 
24751       const TargetRegisterClass *AddrRegClass = getRegClassFor(SPTy);
24752       Register Vreg = MRI.createVirtualRegister(AddrRegClass);
24753       Chain = DAG.getCopyToReg(Chain, dl, Vreg, Size);
24754       Result = DAG.getNode(X86ISD::PROBED_ALLOCA, dl, SPTy, Chain,
24755                            DAG.getRegister(Vreg, SPTy));
24756     } else {
24757       SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
24758       Chain = SP.getValue(1);
24759       Result = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
24760     }
24761     if (Alignment && *Alignment > StackAlign)
24762       Result =
24763           DAG.getNode(ISD::AND, dl, VT, Result,
24764                       DAG.getConstant(~(Alignment->value() - 1ULL), dl, VT));
24765     Chain = DAG.getCopyToReg(Chain, dl, SPReg, Result); // Output chain
24766   } else if (SplitStack) {
24767     MachineRegisterInfo &MRI = MF.getRegInfo();
24768 
24769     if (Is64Bit) {
24770       // The 64 bit implementation of segmented stacks needs to clobber both r10
24771       // r11. This makes it impossible to use it along with nested parameters.
24772       const Function &F = MF.getFunction();
24773       for (const auto &A : F.args()) {
24774         if (A.hasNestAttr())
24775           report_fatal_error("Cannot use segmented stacks with functions that "
24776                              "have nested arguments.");
24777       }
24778     }
24779 
24780     const TargetRegisterClass *AddrRegClass = getRegClassFor(SPTy);
24781     Register Vreg = MRI.createVirtualRegister(AddrRegClass);
24782     Chain = DAG.getCopyToReg(Chain, dl, Vreg, Size);
24783     Result = DAG.getNode(X86ISD::SEG_ALLOCA, dl, SPTy, Chain,
24784                                 DAG.getRegister(Vreg, SPTy));
24785   } else {
24786     SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
24787     Chain = DAG.getNode(X86ISD::DYN_ALLOCA, dl, NodeTys, Chain, Size);
24788     MF.getInfo<X86MachineFunctionInfo>()->setHasDynAlloca(true);
24789 
24790     const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
24791     Register SPReg = RegInfo->getStackRegister();
24792     SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, SPTy);
24793     Chain = SP.getValue(1);
24794 
24795     if (Alignment) {
24796       SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0),
24797                        DAG.getConstant(~(Alignment->value() - 1ULL), dl, VT));
24798       Chain = DAG.getCopyToReg(Chain, dl, SPReg, SP);
24799     }
24800 
24801     Result = SP;
24802   }
24803 
24804   Chain = DAG.getCALLSEQ_END(Chain, 0, 0, SDValue(), dl);
24805 
24806   SDValue Ops[2] = {Result, Chain};
24807   return DAG.getMergeValues(Ops, dl);
24808 }
24809 
LowerVASTART(SDValue Op,SelectionDAG & DAG) const24810 SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
24811   MachineFunction &MF = DAG.getMachineFunction();
24812   auto PtrVT = getPointerTy(MF.getDataLayout());
24813   X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
24814 
24815   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
24816   SDLoc DL(Op);
24817 
24818   if (!Subtarget.is64Bit() ||
24819       Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv())) {
24820     // vastart just stores the address of the VarArgsFrameIndex slot into the
24821     // memory location argument.
24822     SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
24823     return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1),
24824                         MachinePointerInfo(SV));
24825   }
24826 
24827   // __va_list_tag:
24828   //   gp_offset         (0 - 6 * 8)
24829   //   fp_offset         (48 - 48 + 8 * 16)
24830   //   overflow_arg_area (point to parameters coming in memory).
24831   //   reg_save_area
24832   SmallVector<SDValue, 8> MemOps;
24833   SDValue FIN = Op.getOperand(1);
24834   // Store gp_offset
24835   SDValue Store = DAG.getStore(
24836       Op.getOperand(0), DL,
24837       DAG.getConstant(FuncInfo->getVarArgsGPOffset(), DL, MVT::i32), FIN,
24838       MachinePointerInfo(SV));
24839   MemOps.push_back(Store);
24840 
24841   // Store fp_offset
24842   FIN = DAG.getMemBasePlusOffset(FIN, TypeSize::getFixed(4), DL);
24843   Store = DAG.getStore(
24844       Op.getOperand(0), DL,
24845       DAG.getConstant(FuncInfo->getVarArgsFPOffset(), DL, MVT::i32), FIN,
24846       MachinePointerInfo(SV, 4));
24847   MemOps.push_back(Store);
24848 
24849   // Store ptr to overflow_arg_area
24850   FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(4, DL));
24851   SDValue OVFIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
24852   Store =
24853       DAG.getStore(Op.getOperand(0), DL, OVFIN, FIN, MachinePointerInfo(SV, 8));
24854   MemOps.push_back(Store);
24855 
24856   // Store ptr to reg_save_area.
24857   FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(
24858       Subtarget.isTarget64BitLP64() ? 8 : 4, DL));
24859   SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), PtrVT);
24860   Store = DAG.getStore(
24861       Op.getOperand(0), DL, RSFIN, FIN,
24862       MachinePointerInfo(SV, Subtarget.isTarget64BitLP64() ? 16 : 12));
24863   MemOps.push_back(Store);
24864   return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
24865 }
24866 
LowerVAARG(SDValue Op,SelectionDAG & DAG) const24867 SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
24868   assert(Subtarget.is64Bit() &&
24869          "LowerVAARG only handles 64-bit va_arg!");
24870   assert(Op.getNumOperands() == 4);
24871 
24872   MachineFunction &MF = DAG.getMachineFunction();
24873   if (Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv()))
24874     // The Win64 ABI uses char* instead of a structure.
24875     return DAG.expandVAArg(Op.getNode());
24876 
24877   SDValue Chain = Op.getOperand(0);
24878   SDValue SrcPtr = Op.getOperand(1);
24879   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
24880   unsigned Align = Op.getConstantOperandVal(3);
24881   SDLoc dl(Op);
24882 
24883   EVT ArgVT = Op.getNode()->getValueType(0);
24884   Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
24885   uint32_t ArgSize = DAG.getDataLayout().getTypeAllocSize(ArgTy);
24886   uint8_t ArgMode;
24887 
24888   // Decide which area this value should be read from.
24889   // TODO: Implement the AMD64 ABI in its entirety. This simple
24890   // selection mechanism works only for the basic types.
24891   assert(ArgVT != MVT::f80 && "va_arg for f80 not yet implemented");
24892   if (ArgVT.isFloatingPoint() && ArgSize <= 16 /*bytes*/) {
24893     ArgMode = 2;  // Argument passed in XMM register. Use fp_offset.
24894   } else {
24895     assert(ArgVT.isInteger() && ArgSize <= 32 /*bytes*/ &&
24896            "Unhandled argument type in LowerVAARG");
24897     ArgMode = 1;  // Argument passed in GPR64 register(s). Use gp_offset.
24898   }
24899 
24900   if (ArgMode == 2) {
24901     // Make sure using fp_offset makes sense.
24902     assert(!Subtarget.useSoftFloat() &&
24903            !(MF.getFunction().hasFnAttribute(Attribute::NoImplicitFloat)) &&
24904            Subtarget.hasSSE1());
24905   }
24906 
24907   // Insert VAARG node into the DAG
24908   // VAARG returns two values: Variable Argument Address, Chain
24909   SDValue InstOps[] = {Chain, SrcPtr,
24910                        DAG.getTargetConstant(ArgSize, dl, MVT::i32),
24911                        DAG.getTargetConstant(ArgMode, dl, MVT::i8),
24912                        DAG.getTargetConstant(Align, dl, MVT::i32)};
24913   SDVTList VTs = DAG.getVTList(getPointerTy(DAG.getDataLayout()), MVT::Other);
24914   SDValue VAARG = DAG.getMemIntrinsicNode(
24915       Subtarget.isTarget64BitLP64() ? X86ISD::VAARG_64 : X86ISD::VAARG_X32, dl,
24916       VTs, InstOps, MVT::i64, MachinePointerInfo(SV),
24917       /*Alignment=*/std::nullopt,
24918       MachineMemOperand::MOLoad | MachineMemOperand::MOStore);
24919   Chain = VAARG.getValue(1);
24920 
24921   // Load the next argument and return it
24922   return DAG.getLoad(ArgVT, dl, Chain, VAARG, MachinePointerInfo());
24923 }
24924 
LowerVACOPY(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)24925 static SDValue LowerVACOPY(SDValue Op, const X86Subtarget &Subtarget,
24926                            SelectionDAG &DAG) {
24927   // X86-64 va_list is a struct { i32, i32, i8*, i8* }, except on Windows,
24928   // where a va_list is still an i8*.
24929   assert(Subtarget.is64Bit() && "This code only handles 64-bit va_copy!");
24930   if (Subtarget.isCallingConvWin64(
24931         DAG.getMachineFunction().getFunction().getCallingConv()))
24932     // Probably a Win64 va_copy.
24933     return DAG.expandVACopy(Op.getNode());
24934 
24935   SDValue Chain = Op.getOperand(0);
24936   SDValue DstPtr = Op.getOperand(1);
24937   SDValue SrcPtr = Op.getOperand(2);
24938   const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
24939   const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
24940   SDLoc DL(Op);
24941 
24942   return DAG.getMemcpy(
24943       Chain, DL, DstPtr, SrcPtr,
24944       DAG.getIntPtrConstant(Subtarget.isTarget64BitLP64() ? 24 : 16, DL),
24945       Align(Subtarget.isTarget64BitLP64() ? 8 : 4), /*isVolatile*/ false, false,
24946       false, MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
24947 }
24948 
24949 // Helper to get immediate/variable SSE shift opcode from other shift opcodes.
getTargetVShiftUniformOpcode(unsigned Opc,bool IsVariable)24950 static unsigned getTargetVShiftUniformOpcode(unsigned Opc, bool IsVariable) {
24951   switch (Opc) {
24952   case ISD::SHL:
24953   case X86ISD::VSHL:
24954   case X86ISD::VSHLI:
24955     return IsVariable ? X86ISD::VSHL : X86ISD::VSHLI;
24956   case ISD::SRL:
24957   case X86ISD::VSRL:
24958   case X86ISD::VSRLI:
24959     return IsVariable ? X86ISD::VSRL : X86ISD::VSRLI;
24960   case ISD::SRA:
24961   case X86ISD::VSRA:
24962   case X86ISD::VSRAI:
24963     return IsVariable ? X86ISD::VSRA : X86ISD::VSRAI;
24964   }
24965   llvm_unreachable("Unknown target vector shift node");
24966 }
24967 
24968 /// Handle vector element shifts where the shift amount is a constant.
24969 /// Takes immediate version of shift as input.
getTargetVShiftByConstNode(unsigned Opc,const SDLoc & dl,MVT VT,SDValue SrcOp,uint64_t ShiftAmt,SelectionDAG & DAG)24970 static SDValue getTargetVShiftByConstNode(unsigned Opc, const SDLoc &dl, MVT VT,
24971                                           SDValue SrcOp, uint64_t ShiftAmt,
24972                                           SelectionDAG &DAG) {
24973   MVT ElementType = VT.getVectorElementType();
24974 
24975   // Bitcast the source vector to the output type, this is mainly necessary for
24976   // vXi8/vXi64 shifts.
24977   if (VT != SrcOp.getSimpleValueType())
24978     SrcOp = DAG.getBitcast(VT, SrcOp);
24979 
24980   // Fold this packed shift into its first operand if ShiftAmt is 0.
24981   if (ShiftAmt == 0)
24982     return SrcOp;
24983 
24984   // Check for ShiftAmt >= element width
24985   if (ShiftAmt >= ElementType.getSizeInBits()) {
24986     if (Opc == X86ISD::VSRAI)
24987       ShiftAmt = ElementType.getSizeInBits() - 1;
24988     else
24989       return DAG.getConstant(0, dl, VT);
24990   }
24991 
24992   assert((Opc == X86ISD::VSHLI || Opc == X86ISD::VSRLI || Opc == X86ISD::VSRAI)
24993          && "Unknown target vector shift-by-constant node");
24994 
24995   // Fold this packed vector shift into a build vector if SrcOp is a
24996   // vector of Constants or UNDEFs.
24997   if (ISD::isBuildVectorOfConstantSDNodes(SrcOp.getNode())) {
24998     unsigned ShiftOpc;
24999     switch (Opc) {
25000     default: llvm_unreachable("Unknown opcode!");
25001     case X86ISD::VSHLI:
25002       ShiftOpc = ISD::SHL;
25003       break;
25004     case X86ISD::VSRLI:
25005       ShiftOpc = ISD::SRL;
25006       break;
25007     case X86ISD::VSRAI:
25008       ShiftOpc = ISD::SRA;
25009       break;
25010     }
25011 
25012     SDValue Amt = DAG.getConstant(ShiftAmt, dl, VT);
25013     if (SDValue C = DAG.FoldConstantArithmetic(ShiftOpc, dl, VT, {SrcOp, Amt}))
25014       return C;
25015   }
25016 
25017   return DAG.getNode(Opc, dl, VT, SrcOp,
25018                      DAG.getTargetConstant(ShiftAmt, dl, MVT::i8));
25019 }
25020 
25021 /// Handle vector element shifts by a splat shift amount
getTargetVShiftNode(unsigned Opc,const SDLoc & dl,MVT VT,SDValue SrcOp,SDValue ShAmt,int ShAmtIdx,const X86Subtarget & Subtarget,SelectionDAG & DAG)25022 static SDValue getTargetVShiftNode(unsigned Opc, const SDLoc &dl, MVT VT,
25023                                    SDValue SrcOp, SDValue ShAmt, int ShAmtIdx,
25024                                    const X86Subtarget &Subtarget,
25025                                    SelectionDAG &DAG) {
25026   MVT AmtVT = ShAmt.getSimpleValueType();
25027   assert(AmtVT.isVector() && "Vector shift type mismatch");
25028   assert(0 <= ShAmtIdx && ShAmtIdx < (int)AmtVT.getVectorNumElements() &&
25029          "Illegal vector splat index");
25030 
25031   // Move the splat element to the bottom element.
25032   if (ShAmtIdx != 0) {
25033     SmallVector<int> Mask(AmtVT.getVectorNumElements(), -1);
25034     Mask[0] = ShAmtIdx;
25035     ShAmt = DAG.getVectorShuffle(AmtVT, dl, ShAmt, DAG.getUNDEF(AmtVT), Mask);
25036   }
25037 
25038   // Peek through any zext node if we can get back to a 128-bit source.
25039   if (AmtVT.getScalarSizeInBits() == 64 &&
25040       (ShAmt.getOpcode() == ISD::ZERO_EXTEND ||
25041        ShAmt.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG) &&
25042       ShAmt.getOperand(0).getValueType().isSimple() &&
25043       ShAmt.getOperand(0).getValueType().is128BitVector()) {
25044     ShAmt = ShAmt.getOperand(0);
25045     AmtVT = ShAmt.getSimpleValueType();
25046   }
25047 
25048   // See if we can mask off the upper elements using the existing source node.
25049   // The shift uses the entire lower 64-bits of the amount vector, so no need to
25050   // do this for vXi64 types.
25051   bool IsMasked = false;
25052   if (AmtVT.getScalarSizeInBits() < 64) {
25053     if (ShAmt.getOpcode() == ISD::BUILD_VECTOR ||
25054         ShAmt.getOpcode() == ISD::SCALAR_TO_VECTOR) {
25055       // If the shift amount has come from a scalar, then zero-extend the scalar
25056       // before moving to the vector.
25057       ShAmt = DAG.getZExtOrTrunc(ShAmt.getOperand(0), dl, MVT::i32);
25058       ShAmt = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, ShAmt);
25059       ShAmt = DAG.getNode(X86ISD::VZEXT_MOVL, dl, MVT::v4i32, ShAmt);
25060       AmtVT = MVT::v4i32;
25061       IsMasked = true;
25062     } else if (ShAmt.getOpcode() == ISD::AND) {
25063       // See if the shift amount is already masked (e.g. for rotation modulo),
25064       // then we can zero-extend it by setting all the other mask elements to
25065       // zero.
25066       SmallVector<SDValue> MaskElts(
25067           AmtVT.getVectorNumElements(),
25068           DAG.getConstant(0, dl, AmtVT.getScalarType()));
25069       MaskElts[0] = DAG.getAllOnesConstant(dl, AmtVT.getScalarType());
25070       SDValue Mask = DAG.getBuildVector(AmtVT, dl, MaskElts);
25071       if ((Mask = DAG.FoldConstantArithmetic(ISD::AND, dl, AmtVT,
25072                                              {ShAmt.getOperand(1), Mask}))) {
25073         ShAmt = DAG.getNode(ISD::AND, dl, AmtVT, ShAmt.getOperand(0), Mask);
25074         IsMasked = true;
25075       }
25076     }
25077   }
25078 
25079   // Extract if the shift amount vector is larger than 128-bits.
25080   if (AmtVT.getSizeInBits() > 128) {
25081     ShAmt = extract128BitVector(ShAmt, 0, DAG, dl);
25082     AmtVT = ShAmt.getSimpleValueType();
25083   }
25084 
25085   // Zero-extend bottom element to v2i64 vector type, either by extension or
25086   // shuffle masking.
25087   if (!IsMasked && AmtVT.getScalarSizeInBits() < 64) {
25088     if (AmtVT == MVT::v4i32 && (ShAmt.getOpcode() == X86ISD::VBROADCAST ||
25089                                 ShAmt.getOpcode() == X86ISD::VBROADCAST_LOAD)) {
25090       ShAmt = DAG.getNode(X86ISD::VZEXT_MOVL, SDLoc(ShAmt), MVT::v4i32, ShAmt);
25091     } else if (Subtarget.hasSSE41()) {
25092       ShAmt = DAG.getNode(ISD::ZERO_EXTEND_VECTOR_INREG, SDLoc(ShAmt),
25093                           MVT::v2i64, ShAmt);
25094     } else {
25095       SDValue ByteShift = DAG.getTargetConstant(
25096           (128 - AmtVT.getScalarSizeInBits()) / 8, SDLoc(ShAmt), MVT::i8);
25097       ShAmt = DAG.getBitcast(MVT::v16i8, ShAmt);
25098       ShAmt = DAG.getNode(X86ISD::VSHLDQ, SDLoc(ShAmt), MVT::v16i8, ShAmt,
25099                           ByteShift);
25100       ShAmt = DAG.getNode(X86ISD::VSRLDQ, SDLoc(ShAmt), MVT::v16i8, ShAmt,
25101                           ByteShift);
25102     }
25103   }
25104 
25105   // Change opcode to non-immediate version.
25106   Opc = getTargetVShiftUniformOpcode(Opc, true);
25107 
25108   // The return type has to be a 128-bit type with the same element
25109   // type as the input type.
25110   MVT EltVT = VT.getVectorElementType();
25111   MVT ShVT = MVT::getVectorVT(EltVT, 128 / EltVT.getSizeInBits());
25112 
25113   ShAmt = DAG.getBitcast(ShVT, ShAmt);
25114   return DAG.getNode(Opc, dl, VT, SrcOp, ShAmt);
25115 }
25116 
25117 /// Return Mask with the necessary casting or extending
25118 /// for \p Mask according to \p MaskVT when lowering masking intrinsics
getMaskNode(SDValue Mask,MVT MaskVT,const X86Subtarget & Subtarget,SelectionDAG & DAG,const SDLoc & dl)25119 static SDValue getMaskNode(SDValue Mask, MVT MaskVT,
25120                            const X86Subtarget &Subtarget, SelectionDAG &DAG,
25121                            const SDLoc &dl) {
25122 
25123   if (isAllOnesConstant(Mask))
25124     return DAG.getConstant(1, dl, MaskVT);
25125   if (X86::isZeroNode(Mask))
25126     return DAG.getConstant(0, dl, MaskVT);
25127 
25128   assert(MaskVT.bitsLE(Mask.getSimpleValueType()) && "Unexpected mask size!");
25129 
25130   if (Mask.getSimpleValueType() == MVT::i64 && Subtarget.is32Bit()) {
25131     assert(MaskVT == MVT::v64i1 && "Expected v64i1 mask!");
25132     assert(Subtarget.hasBWI() && "Expected AVX512BW target!");
25133     // In case 32bit mode, bitcast i64 is illegal, extend/split it.
25134     SDValue Lo, Hi;
25135     std::tie(Lo, Hi) = DAG.SplitScalar(Mask, dl, MVT::i32, MVT::i32);
25136     Lo = DAG.getBitcast(MVT::v32i1, Lo);
25137     Hi = DAG.getBitcast(MVT::v32i1, Hi);
25138     return DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, Lo, Hi);
25139   } else {
25140     MVT BitcastVT = MVT::getVectorVT(MVT::i1,
25141                                      Mask.getSimpleValueType().getSizeInBits());
25142     // In case when MaskVT equals v2i1 or v4i1, low 2 or 4 elements
25143     // are extracted by EXTRACT_SUBVECTOR.
25144     return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
25145                        DAG.getBitcast(BitcastVT, Mask),
25146                        DAG.getIntPtrConstant(0, dl));
25147   }
25148 }
25149 
25150 /// Return (and \p Op, \p Mask) for compare instructions or
25151 /// (vselect \p Mask, \p Op, \p PreservedSrc) for others along with the
25152 /// necessary casting or extending for \p Mask when lowering masking intrinsics
getVectorMaskingNode(SDValue Op,SDValue Mask,SDValue PreservedSrc,const X86Subtarget & Subtarget,SelectionDAG & DAG)25153 static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
25154                                     SDValue PreservedSrc,
25155                                     const X86Subtarget &Subtarget,
25156                                     SelectionDAG &DAG) {
25157   MVT VT = Op.getSimpleValueType();
25158   MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
25159   unsigned OpcodeSelect = ISD::VSELECT;
25160   SDLoc dl(Op);
25161 
25162   if (isAllOnesConstant(Mask))
25163     return Op;
25164 
25165   SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
25166 
25167   if (PreservedSrc.isUndef())
25168     PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
25169   return DAG.getNode(OpcodeSelect, dl, VT, VMask, Op, PreservedSrc);
25170 }
25171 
25172 /// Creates an SDNode for a predicated scalar operation.
25173 /// \returns (X86vselect \p Mask, \p Op, \p PreservedSrc).
25174 /// The mask is coming as MVT::i8 and it should be transformed
25175 /// to MVT::v1i1 while lowering masking intrinsics.
25176 /// The main difference between ScalarMaskingNode and VectorMaskingNode is using
25177 /// "X86select" instead of "vselect". We just can't create the "vselect" node
25178 /// for a scalar instruction.
getScalarMaskingNode(SDValue Op,SDValue Mask,SDValue PreservedSrc,const X86Subtarget & Subtarget,SelectionDAG & DAG)25179 static SDValue getScalarMaskingNode(SDValue Op, SDValue Mask,
25180                                     SDValue PreservedSrc,
25181                                     const X86Subtarget &Subtarget,
25182                                     SelectionDAG &DAG) {
25183 
25184   if (auto *MaskConst = dyn_cast<ConstantSDNode>(Mask))
25185     if (MaskConst->getZExtValue() & 0x1)
25186       return Op;
25187 
25188   MVT VT = Op.getSimpleValueType();
25189   SDLoc dl(Op);
25190 
25191   assert(Mask.getValueType() == MVT::i8 && "Unexpect type");
25192   SDValue IMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v1i1,
25193                               DAG.getBitcast(MVT::v8i1, Mask),
25194                               DAG.getIntPtrConstant(0, dl));
25195   if (Op.getOpcode() == X86ISD::FSETCCM ||
25196       Op.getOpcode() == X86ISD::FSETCCM_SAE ||
25197       Op.getOpcode() == X86ISD::VFPCLASSS)
25198     return DAG.getNode(ISD::AND, dl, VT, Op, IMask);
25199 
25200   if (PreservedSrc.isUndef())
25201     PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
25202   return DAG.getNode(X86ISD::SELECTS, dl, VT, IMask, Op, PreservedSrc);
25203 }
25204 
getSEHRegistrationNodeSize(const Function * Fn)25205 static int getSEHRegistrationNodeSize(const Function *Fn) {
25206   if (!Fn->hasPersonalityFn())
25207     report_fatal_error(
25208         "querying registration node size for function without personality");
25209   // The RegNodeSize is 6 32-bit words for SEH and 4 for C++ EH. See
25210   // WinEHStatePass for the full struct definition.
25211   switch (classifyEHPersonality(Fn->getPersonalityFn())) {
25212   case EHPersonality::MSVC_X86SEH: return 24;
25213   case EHPersonality::MSVC_CXX: return 16;
25214   default: break;
25215   }
25216   report_fatal_error(
25217       "can only recover FP for 32-bit MSVC EH personality functions");
25218 }
25219 
25220 /// When the MSVC runtime transfers control to us, either to an outlined
25221 /// function or when returning to a parent frame after catching an exception, we
25222 /// recover the parent frame pointer by doing arithmetic on the incoming EBP.
25223 /// Here's the math:
25224 ///   RegNodeBase = EntryEBP - RegNodeSize
25225 ///   ParentFP = RegNodeBase - ParentFrameOffset
25226 /// Subtracting RegNodeSize takes us to the offset of the registration node, and
25227 /// subtracting the offset (negative on x86) takes us back to the parent FP.
recoverFramePointer(SelectionDAG & DAG,const Function * Fn,SDValue EntryEBP)25228 static SDValue recoverFramePointer(SelectionDAG &DAG, const Function *Fn,
25229                                    SDValue EntryEBP) {
25230   MachineFunction &MF = DAG.getMachineFunction();
25231   SDLoc dl;
25232 
25233   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25234   MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
25235 
25236   // It's possible that the parent function no longer has a personality function
25237   // if the exceptional code was optimized away, in which case we just return
25238   // the incoming EBP.
25239   if (!Fn->hasPersonalityFn())
25240     return EntryEBP;
25241 
25242   // Get an MCSymbol that will ultimately resolve to the frame offset of the EH
25243   // registration, or the .set_setframe offset.
25244   MCSymbol *OffsetSym =
25245       MF.getMMI().getContext().getOrCreateParentFrameOffsetSymbol(
25246           GlobalValue::dropLLVMManglingEscape(Fn->getName()));
25247   SDValue OffsetSymVal = DAG.getMCSymbol(OffsetSym, PtrVT);
25248   SDValue ParentFrameOffset =
25249       DAG.getNode(ISD::LOCAL_RECOVER, dl, PtrVT, OffsetSymVal);
25250 
25251   // Return EntryEBP + ParentFrameOffset for x64. This adjusts from RSP after
25252   // prologue to RBP in the parent function.
25253   const X86Subtarget &Subtarget = DAG.getSubtarget<X86Subtarget>();
25254   if (Subtarget.is64Bit())
25255     return DAG.getNode(ISD::ADD, dl, PtrVT, EntryEBP, ParentFrameOffset);
25256 
25257   int RegNodeSize = getSEHRegistrationNodeSize(Fn);
25258   // RegNodeBase = EntryEBP - RegNodeSize
25259   // ParentFP = RegNodeBase - ParentFrameOffset
25260   SDValue RegNodeBase = DAG.getNode(ISD::SUB, dl, PtrVT, EntryEBP,
25261                                     DAG.getConstant(RegNodeSize, dl, PtrVT));
25262   return DAG.getNode(ISD::SUB, dl, PtrVT, RegNodeBase, ParentFrameOffset);
25263 }
25264 
LowerINTRINSIC_WO_CHAIN(SDValue Op,SelectionDAG & DAG) const25265 SDValue X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
25266                                                    SelectionDAG &DAG) const {
25267   // Helper to detect if the operand is CUR_DIRECTION rounding mode.
25268   auto isRoundModeCurDirection = [](SDValue Rnd) {
25269     if (auto *C = dyn_cast<ConstantSDNode>(Rnd))
25270       return C->getAPIntValue() == X86::STATIC_ROUNDING::CUR_DIRECTION;
25271 
25272     return false;
25273   };
25274   auto isRoundModeSAE = [](SDValue Rnd) {
25275     if (auto *C = dyn_cast<ConstantSDNode>(Rnd)) {
25276       unsigned RC = C->getZExtValue();
25277       if (RC & X86::STATIC_ROUNDING::NO_EXC) {
25278         // Clear the NO_EXC bit and check remaining bits.
25279         RC ^= X86::STATIC_ROUNDING::NO_EXC;
25280         // As a convenience we allow no other bits or explicitly
25281         // current direction.
25282         return RC == 0 || RC == X86::STATIC_ROUNDING::CUR_DIRECTION;
25283       }
25284     }
25285 
25286     return false;
25287   };
25288   auto isRoundModeSAEToX = [](SDValue Rnd, unsigned &RC) {
25289     if (auto *C = dyn_cast<ConstantSDNode>(Rnd)) {
25290       RC = C->getZExtValue();
25291       if (RC & X86::STATIC_ROUNDING::NO_EXC) {
25292         // Clear the NO_EXC bit and check remaining bits.
25293         RC ^= X86::STATIC_ROUNDING::NO_EXC;
25294         return RC == X86::STATIC_ROUNDING::TO_NEAREST_INT ||
25295                RC == X86::STATIC_ROUNDING::TO_NEG_INF ||
25296                RC == X86::STATIC_ROUNDING::TO_POS_INF ||
25297                RC == X86::STATIC_ROUNDING::TO_ZERO;
25298       }
25299     }
25300 
25301     return false;
25302   };
25303 
25304   SDLoc dl(Op);
25305   unsigned IntNo = Op.getConstantOperandVal(0);
25306   MVT VT = Op.getSimpleValueType();
25307   const IntrinsicData* IntrData = getIntrinsicWithoutChain(IntNo);
25308 
25309   // Propagate flags from original node to transformed node(s).
25310   SelectionDAG::FlagInserter FlagsInserter(DAG, Op->getFlags());
25311 
25312   if (IntrData) {
25313     switch(IntrData->Type) {
25314     case INTR_TYPE_1OP: {
25315       // We specify 2 possible opcodes for intrinsics with rounding modes.
25316       // First, we check if the intrinsic may have non-default rounding mode,
25317       // (IntrData->Opc1 != 0), then we check the rounding mode operand.
25318       unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
25319       if (IntrWithRoundingModeOpcode != 0) {
25320         SDValue Rnd = Op.getOperand(2);
25321         unsigned RC = 0;
25322         if (isRoundModeSAEToX(Rnd, RC))
25323           return DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
25324                              Op.getOperand(1),
25325                              DAG.getTargetConstant(RC, dl, MVT::i32));
25326         if (!isRoundModeCurDirection(Rnd))
25327           return SDValue();
25328       }
25329       return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
25330                          Op.getOperand(1));
25331     }
25332     case INTR_TYPE_1OP_SAE: {
25333       SDValue Sae = Op.getOperand(2);
25334 
25335       unsigned Opc;
25336       if (isRoundModeCurDirection(Sae))
25337         Opc = IntrData->Opc0;
25338       else if (isRoundModeSAE(Sae))
25339         Opc = IntrData->Opc1;
25340       else
25341         return SDValue();
25342 
25343       return DAG.getNode(Opc, dl, Op.getValueType(), Op.getOperand(1));
25344     }
25345     case INTR_TYPE_2OP: {
25346       SDValue Src2 = Op.getOperand(2);
25347 
25348       // We specify 2 possible opcodes for intrinsics with rounding modes.
25349       // First, we check if the intrinsic may have non-default rounding mode,
25350       // (IntrData->Opc1 != 0), then we check the rounding mode operand.
25351       unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
25352       if (IntrWithRoundingModeOpcode != 0) {
25353         SDValue Rnd = Op.getOperand(3);
25354         unsigned RC = 0;
25355         if (isRoundModeSAEToX(Rnd, RC))
25356           return DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
25357                              Op.getOperand(1), Src2,
25358                              DAG.getTargetConstant(RC, dl, MVT::i32));
25359         if (!isRoundModeCurDirection(Rnd))
25360           return SDValue();
25361       }
25362 
25363       return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
25364                          Op.getOperand(1), Src2);
25365     }
25366     case INTR_TYPE_2OP_SAE: {
25367       SDValue Sae = Op.getOperand(3);
25368 
25369       unsigned Opc;
25370       if (isRoundModeCurDirection(Sae))
25371         Opc = IntrData->Opc0;
25372       else if (isRoundModeSAE(Sae))
25373         Opc = IntrData->Opc1;
25374       else
25375         return SDValue();
25376 
25377       return DAG.getNode(Opc, dl, Op.getValueType(), Op.getOperand(1),
25378                          Op.getOperand(2));
25379     }
25380     case INTR_TYPE_3OP:
25381     case INTR_TYPE_3OP_IMM8: {
25382       SDValue Src1 = Op.getOperand(1);
25383       SDValue Src2 = Op.getOperand(2);
25384       SDValue Src3 = Op.getOperand(3);
25385 
25386       if (IntrData->Type == INTR_TYPE_3OP_IMM8 &&
25387           Src3.getValueType() != MVT::i8) {
25388         Src3 = DAG.getTargetConstant(Src3->getAsZExtVal() & 0xff, dl, MVT::i8);
25389       }
25390 
25391       // We specify 2 possible opcodes for intrinsics with rounding modes.
25392       // First, we check if the intrinsic may have non-default rounding mode,
25393       // (IntrData->Opc1 != 0), then we check the rounding mode operand.
25394       unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
25395       if (IntrWithRoundingModeOpcode != 0) {
25396         SDValue Rnd = Op.getOperand(4);
25397         unsigned RC = 0;
25398         if (isRoundModeSAEToX(Rnd, RC))
25399           return DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
25400                              Src1, Src2, Src3,
25401                              DAG.getTargetConstant(RC, dl, MVT::i32));
25402         if (!isRoundModeCurDirection(Rnd))
25403           return SDValue();
25404       }
25405 
25406       return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
25407                          {Src1, Src2, Src3});
25408     }
25409     case INTR_TYPE_4OP_IMM8: {
25410       assert(Op.getOperand(4)->getOpcode() == ISD::TargetConstant);
25411       SDValue Src4 = Op.getOperand(4);
25412       if (Src4.getValueType() != MVT::i8) {
25413         Src4 = DAG.getTargetConstant(Src4->getAsZExtVal() & 0xff, dl, MVT::i8);
25414       }
25415 
25416       return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
25417                          Op.getOperand(1), Op.getOperand(2), Op.getOperand(3),
25418                          Src4);
25419     }
25420     case INTR_TYPE_1OP_MASK: {
25421       SDValue Src = Op.getOperand(1);
25422       SDValue PassThru = Op.getOperand(2);
25423       SDValue Mask = Op.getOperand(3);
25424       // We add rounding mode to the Node when
25425       //   - RC Opcode is specified and
25426       //   - RC is not "current direction".
25427       unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
25428       if (IntrWithRoundingModeOpcode != 0) {
25429         SDValue Rnd = Op.getOperand(4);
25430         unsigned RC = 0;
25431         if (isRoundModeSAEToX(Rnd, RC))
25432           return getVectorMaskingNode(
25433               DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
25434                           Src, DAG.getTargetConstant(RC, dl, MVT::i32)),
25435               Mask, PassThru, Subtarget, DAG);
25436         if (!isRoundModeCurDirection(Rnd))
25437           return SDValue();
25438       }
25439       return getVectorMaskingNode(
25440           DAG.getNode(IntrData->Opc0, dl, VT, Src), Mask, PassThru,
25441           Subtarget, DAG);
25442     }
25443     case INTR_TYPE_1OP_MASK_SAE: {
25444       SDValue Src = Op.getOperand(1);
25445       SDValue PassThru = Op.getOperand(2);
25446       SDValue Mask = Op.getOperand(3);
25447       SDValue Rnd = Op.getOperand(4);
25448 
25449       unsigned Opc;
25450       if (isRoundModeCurDirection(Rnd))
25451         Opc = IntrData->Opc0;
25452       else if (isRoundModeSAE(Rnd))
25453         Opc = IntrData->Opc1;
25454       else
25455         return SDValue();
25456 
25457       return getVectorMaskingNode(DAG.getNode(Opc, dl, VT, Src), Mask, PassThru,
25458                                   Subtarget, DAG);
25459     }
25460     case INTR_TYPE_SCALAR_MASK: {
25461       SDValue Src1 = Op.getOperand(1);
25462       SDValue Src2 = Op.getOperand(2);
25463       SDValue passThru = Op.getOperand(3);
25464       SDValue Mask = Op.getOperand(4);
25465       unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
25466       // There are 2 kinds of intrinsics in this group:
25467       // (1) With suppress-all-exceptions (sae) or rounding mode- 6 operands
25468       // (2) With rounding mode and sae - 7 operands.
25469       bool HasRounding = IntrWithRoundingModeOpcode != 0;
25470       if (Op.getNumOperands() == (5U + HasRounding)) {
25471         if (HasRounding) {
25472           SDValue Rnd = Op.getOperand(5);
25473           unsigned RC = 0;
25474           if (isRoundModeSAEToX(Rnd, RC))
25475             return getScalarMaskingNode(
25476                 DAG.getNode(IntrWithRoundingModeOpcode, dl, VT, Src1, Src2,
25477                             DAG.getTargetConstant(RC, dl, MVT::i32)),
25478                 Mask, passThru, Subtarget, DAG);
25479           if (!isRoundModeCurDirection(Rnd))
25480             return SDValue();
25481         }
25482         return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1,
25483                                                 Src2),
25484                                     Mask, passThru, Subtarget, DAG);
25485       }
25486 
25487       assert(Op.getNumOperands() == (6U + HasRounding) &&
25488              "Unexpected intrinsic form");
25489       SDValue RoundingMode = Op.getOperand(5);
25490       unsigned Opc = IntrData->Opc0;
25491       if (HasRounding) {
25492         SDValue Sae = Op.getOperand(6);
25493         if (isRoundModeSAE(Sae))
25494           Opc = IntrWithRoundingModeOpcode;
25495         else if (!isRoundModeCurDirection(Sae))
25496           return SDValue();
25497       }
25498       return getScalarMaskingNode(DAG.getNode(Opc, dl, VT, Src1,
25499                                               Src2, RoundingMode),
25500                                   Mask, passThru, Subtarget, DAG);
25501     }
25502     case INTR_TYPE_SCALAR_MASK_RND: {
25503       SDValue Src1 = Op.getOperand(1);
25504       SDValue Src2 = Op.getOperand(2);
25505       SDValue passThru = Op.getOperand(3);
25506       SDValue Mask = Op.getOperand(4);
25507       SDValue Rnd = Op.getOperand(5);
25508 
25509       SDValue NewOp;
25510       unsigned RC = 0;
25511       if (isRoundModeCurDirection(Rnd))
25512         NewOp = DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2);
25513       else if (isRoundModeSAEToX(Rnd, RC))
25514         NewOp = DAG.getNode(IntrData->Opc1, dl, VT, Src1, Src2,
25515                             DAG.getTargetConstant(RC, dl, MVT::i32));
25516       else
25517         return SDValue();
25518 
25519       return getScalarMaskingNode(NewOp, Mask, passThru, Subtarget, DAG);
25520     }
25521     case INTR_TYPE_SCALAR_MASK_SAE: {
25522       SDValue Src1 = Op.getOperand(1);
25523       SDValue Src2 = Op.getOperand(2);
25524       SDValue passThru = Op.getOperand(3);
25525       SDValue Mask = Op.getOperand(4);
25526       SDValue Sae = Op.getOperand(5);
25527       unsigned Opc;
25528       if (isRoundModeCurDirection(Sae))
25529         Opc = IntrData->Opc0;
25530       else if (isRoundModeSAE(Sae))
25531         Opc = IntrData->Opc1;
25532       else
25533         return SDValue();
25534 
25535       return getScalarMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2),
25536                                   Mask, passThru, Subtarget, DAG);
25537     }
25538     case INTR_TYPE_2OP_MASK: {
25539       SDValue Src1 = Op.getOperand(1);
25540       SDValue Src2 = Op.getOperand(2);
25541       SDValue PassThru = Op.getOperand(3);
25542       SDValue Mask = Op.getOperand(4);
25543       SDValue NewOp;
25544       if (IntrData->Opc1 != 0) {
25545         SDValue Rnd = Op.getOperand(5);
25546         unsigned RC = 0;
25547         if (isRoundModeSAEToX(Rnd, RC))
25548           NewOp = DAG.getNode(IntrData->Opc1, dl, VT, Src1, Src2,
25549                               DAG.getTargetConstant(RC, dl, MVT::i32));
25550         else if (!isRoundModeCurDirection(Rnd))
25551           return SDValue();
25552       }
25553       if (!NewOp)
25554         NewOp = DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2);
25555       return getVectorMaskingNode(NewOp, Mask, PassThru, Subtarget, DAG);
25556     }
25557     case INTR_TYPE_2OP_MASK_SAE: {
25558       SDValue Src1 = Op.getOperand(1);
25559       SDValue Src2 = Op.getOperand(2);
25560       SDValue PassThru = Op.getOperand(3);
25561       SDValue Mask = Op.getOperand(4);
25562 
25563       unsigned Opc = IntrData->Opc0;
25564       if (IntrData->Opc1 != 0) {
25565         SDValue Sae = Op.getOperand(5);
25566         if (isRoundModeSAE(Sae))
25567           Opc = IntrData->Opc1;
25568         else if (!isRoundModeCurDirection(Sae))
25569           return SDValue();
25570       }
25571 
25572       return getVectorMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2),
25573                                   Mask, PassThru, Subtarget, DAG);
25574     }
25575     case INTR_TYPE_3OP_SCALAR_MASK_SAE: {
25576       SDValue Src1 = Op.getOperand(1);
25577       SDValue Src2 = Op.getOperand(2);
25578       SDValue Src3 = Op.getOperand(3);
25579       SDValue PassThru = Op.getOperand(4);
25580       SDValue Mask = Op.getOperand(5);
25581       SDValue Sae = Op.getOperand(6);
25582       unsigned Opc;
25583       if (isRoundModeCurDirection(Sae))
25584         Opc = IntrData->Opc0;
25585       else if (isRoundModeSAE(Sae))
25586         Opc = IntrData->Opc1;
25587       else
25588         return SDValue();
25589 
25590       return getScalarMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2, Src3),
25591                                   Mask, PassThru, Subtarget, DAG);
25592     }
25593     case INTR_TYPE_3OP_MASK_SAE: {
25594       SDValue Src1 = Op.getOperand(1);
25595       SDValue Src2 = Op.getOperand(2);
25596       SDValue Src3 = Op.getOperand(3);
25597       SDValue PassThru = Op.getOperand(4);
25598       SDValue Mask = Op.getOperand(5);
25599 
25600       unsigned Opc = IntrData->Opc0;
25601       if (IntrData->Opc1 != 0) {
25602         SDValue Sae = Op.getOperand(6);
25603         if (isRoundModeSAE(Sae))
25604           Opc = IntrData->Opc1;
25605         else if (!isRoundModeCurDirection(Sae))
25606           return SDValue();
25607       }
25608       return getVectorMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2, Src3),
25609                                   Mask, PassThru, Subtarget, DAG);
25610     }
25611     case BLENDV: {
25612       SDValue Src1 = Op.getOperand(1);
25613       SDValue Src2 = Op.getOperand(2);
25614       SDValue Src3 = Op.getOperand(3);
25615 
25616       EVT MaskVT = Src3.getValueType().changeVectorElementTypeToInteger();
25617       Src3 = DAG.getBitcast(MaskVT, Src3);
25618 
25619       // Reverse the operands to match VSELECT order.
25620       return DAG.getNode(IntrData->Opc0, dl, VT, Src3, Src2, Src1);
25621     }
25622     case VPERM_2OP : {
25623       SDValue Src1 = Op.getOperand(1);
25624       SDValue Src2 = Op.getOperand(2);
25625 
25626       // Swap Src1 and Src2 in the node creation
25627       return DAG.getNode(IntrData->Opc0, dl, VT,Src2, Src1);
25628     }
25629     case CFMA_OP_MASKZ:
25630     case CFMA_OP_MASK: {
25631       SDValue Src1 = Op.getOperand(1);
25632       SDValue Src2 = Op.getOperand(2);
25633       SDValue Src3 = Op.getOperand(3);
25634       SDValue Mask = Op.getOperand(4);
25635       MVT VT = Op.getSimpleValueType();
25636 
25637       SDValue PassThru = Src3;
25638       if (IntrData->Type == CFMA_OP_MASKZ)
25639         PassThru = getZeroVector(VT, Subtarget, DAG, dl);
25640 
25641       // We add rounding mode to the Node when
25642       //   - RC Opcode is specified and
25643       //   - RC is not "current direction".
25644       SDValue NewOp;
25645       if (IntrData->Opc1 != 0) {
25646         SDValue Rnd = Op.getOperand(5);
25647         unsigned RC = 0;
25648         if (isRoundModeSAEToX(Rnd, RC))
25649           NewOp = DAG.getNode(IntrData->Opc1, dl, VT, Src1, Src2, Src3,
25650                               DAG.getTargetConstant(RC, dl, MVT::i32));
25651         else if (!isRoundModeCurDirection(Rnd))
25652           return SDValue();
25653       }
25654       if (!NewOp)
25655         NewOp = DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2, Src3);
25656       return getVectorMaskingNode(NewOp, Mask, PassThru, Subtarget, DAG);
25657     }
25658     case IFMA_OP:
25659       // NOTE: We need to swizzle the operands to pass the multiply operands
25660       // first.
25661       return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
25662                          Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
25663     case FPCLASSS: {
25664       SDValue Src1 = Op.getOperand(1);
25665       SDValue Imm = Op.getOperand(2);
25666       SDValue Mask = Op.getOperand(3);
25667       SDValue FPclass = DAG.getNode(IntrData->Opc0, dl, MVT::v1i1, Src1, Imm);
25668       SDValue FPclassMask = getScalarMaskingNode(FPclass, Mask, SDValue(),
25669                                                  Subtarget, DAG);
25670       // Need to fill with zeros to ensure the bitcast will produce zeroes
25671       // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
25672       SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8i1,
25673                                 DAG.getConstant(0, dl, MVT::v8i1),
25674                                 FPclassMask, DAG.getIntPtrConstant(0, dl));
25675       return DAG.getBitcast(MVT::i8, Ins);
25676     }
25677 
25678     case CMP_MASK_CC: {
25679       MVT MaskVT = Op.getSimpleValueType();
25680       SDValue CC = Op.getOperand(3);
25681       SDValue Mask = Op.getOperand(4);
25682       // We specify 2 possible opcodes for intrinsics with rounding modes.
25683       // First, we check if the intrinsic may have non-default rounding mode,
25684       // (IntrData->Opc1 != 0), then we check the rounding mode operand.
25685       if (IntrData->Opc1 != 0) {
25686         SDValue Sae = Op.getOperand(5);
25687         if (isRoundModeSAE(Sae))
25688           return DAG.getNode(IntrData->Opc1, dl, MaskVT, Op.getOperand(1),
25689                              Op.getOperand(2), CC, Mask, Sae);
25690         if (!isRoundModeCurDirection(Sae))
25691           return SDValue();
25692       }
25693       //default rounding mode
25694       return DAG.getNode(IntrData->Opc0, dl, MaskVT,
25695                          {Op.getOperand(1), Op.getOperand(2), CC, Mask});
25696     }
25697     case CMP_MASK_SCALAR_CC: {
25698       SDValue Src1 = Op.getOperand(1);
25699       SDValue Src2 = Op.getOperand(2);
25700       SDValue CC = Op.getOperand(3);
25701       SDValue Mask = Op.getOperand(4);
25702 
25703       SDValue Cmp;
25704       if (IntrData->Opc1 != 0) {
25705         SDValue Sae = Op.getOperand(5);
25706         if (isRoundModeSAE(Sae))
25707           Cmp = DAG.getNode(IntrData->Opc1, dl, MVT::v1i1, Src1, Src2, CC, Sae);
25708         else if (!isRoundModeCurDirection(Sae))
25709           return SDValue();
25710       }
25711       //default rounding mode
25712       if (!Cmp.getNode())
25713         Cmp = DAG.getNode(IntrData->Opc0, dl, MVT::v1i1, Src1, Src2, CC);
25714 
25715       SDValue CmpMask = getScalarMaskingNode(Cmp, Mask, SDValue(),
25716                                              Subtarget, DAG);
25717       // Need to fill with zeros to ensure the bitcast will produce zeroes
25718       // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
25719       SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8i1,
25720                                 DAG.getConstant(0, dl, MVT::v8i1),
25721                                 CmpMask, DAG.getIntPtrConstant(0, dl));
25722       return DAG.getBitcast(MVT::i8, Ins);
25723     }
25724     case COMI: { // Comparison intrinsics
25725       ISD::CondCode CC = (ISD::CondCode)IntrData->Opc1;
25726       SDValue LHS = Op.getOperand(1);
25727       SDValue RHS = Op.getOperand(2);
25728       // Some conditions require the operands to be swapped.
25729       if (CC == ISD::SETLT || CC == ISD::SETLE)
25730         std::swap(LHS, RHS);
25731 
25732       SDValue Comi = DAG.getNode(IntrData->Opc0, dl, MVT::i32, LHS, RHS);
25733       SDValue SetCC;
25734       switch (CC) {
25735       case ISD::SETEQ: { // (ZF = 0 and PF = 0)
25736         SetCC = getSETCC(X86::COND_E, Comi, dl, DAG);
25737         SDValue SetNP = getSETCC(X86::COND_NP, Comi, dl, DAG);
25738         SetCC = DAG.getNode(ISD::AND, dl, MVT::i8, SetCC, SetNP);
25739         break;
25740       }
25741       case ISD::SETNE: { // (ZF = 1 or PF = 1)
25742         SetCC = getSETCC(X86::COND_NE, Comi, dl, DAG);
25743         SDValue SetP = getSETCC(X86::COND_P, Comi, dl, DAG);
25744         SetCC = DAG.getNode(ISD::OR, dl, MVT::i8, SetCC, SetP);
25745         break;
25746       }
25747       case ISD::SETGT: // (CF = 0 and ZF = 0)
25748       case ISD::SETLT: { // Condition opposite to GT. Operands swapped above.
25749         SetCC = getSETCC(X86::COND_A, Comi, dl, DAG);
25750         break;
25751       }
25752       case ISD::SETGE: // CF = 0
25753       case ISD::SETLE: // Condition opposite to GE. Operands swapped above.
25754         SetCC = getSETCC(X86::COND_AE, Comi, dl, DAG);
25755         break;
25756       default:
25757         llvm_unreachable("Unexpected illegal condition!");
25758       }
25759       return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
25760     }
25761     case COMI_RM: { // Comparison intrinsics with Sae
25762       SDValue LHS = Op.getOperand(1);
25763       SDValue RHS = Op.getOperand(2);
25764       unsigned CondVal = Op.getConstantOperandVal(3);
25765       SDValue Sae = Op.getOperand(4);
25766 
25767       SDValue FCmp;
25768       if (isRoundModeCurDirection(Sae))
25769         FCmp = DAG.getNode(X86ISD::FSETCCM, dl, MVT::v1i1, LHS, RHS,
25770                            DAG.getTargetConstant(CondVal, dl, MVT::i8));
25771       else if (isRoundModeSAE(Sae))
25772         FCmp = DAG.getNode(X86ISD::FSETCCM_SAE, dl, MVT::v1i1, LHS, RHS,
25773                            DAG.getTargetConstant(CondVal, dl, MVT::i8), Sae);
25774       else
25775         return SDValue();
25776       // Need to fill with zeros to ensure the bitcast will produce zeroes
25777       // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
25778       SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v16i1,
25779                                 DAG.getConstant(0, dl, MVT::v16i1),
25780                                 FCmp, DAG.getIntPtrConstant(0, dl));
25781       return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32,
25782                          DAG.getBitcast(MVT::i16, Ins));
25783     }
25784     case VSHIFT: {
25785       SDValue SrcOp = Op.getOperand(1);
25786       SDValue ShAmt = Op.getOperand(2);
25787       assert(ShAmt.getValueType() == MVT::i32 &&
25788              "Unexpected VSHIFT amount type");
25789 
25790       // Catch shift-by-constant.
25791       if (auto *CShAmt = dyn_cast<ConstantSDNode>(ShAmt))
25792         return getTargetVShiftByConstNode(IntrData->Opc0, dl,
25793                                           Op.getSimpleValueType(), SrcOp,
25794                                           CShAmt->getZExtValue(), DAG);
25795 
25796       ShAmt = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, ShAmt);
25797       return getTargetVShiftNode(IntrData->Opc0, dl, Op.getSimpleValueType(),
25798                                  SrcOp, ShAmt, 0, Subtarget, DAG);
25799     }
25800     case COMPRESS_EXPAND_IN_REG: {
25801       SDValue Mask = Op.getOperand(3);
25802       SDValue DataToCompress = Op.getOperand(1);
25803       SDValue PassThru = Op.getOperand(2);
25804       if (ISD::isBuildVectorAllOnes(Mask.getNode())) // return data as is
25805         return Op.getOperand(1);
25806 
25807       // Avoid false dependency.
25808       if (PassThru.isUndef())
25809         PassThru = getZeroVector(VT, Subtarget, DAG, dl);
25810 
25811       return DAG.getNode(IntrData->Opc0, dl, VT, DataToCompress, PassThru,
25812                          Mask);
25813     }
25814     case FIXUPIMM:
25815     case FIXUPIMM_MASKZ: {
25816       SDValue Src1 = Op.getOperand(1);
25817       SDValue Src2 = Op.getOperand(2);
25818       SDValue Src3 = Op.getOperand(3);
25819       SDValue Imm = Op.getOperand(4);
25820       SDValue Mask = Op.getOperand(5);
25821       SDValue Passthru = (IntrData->Type == FIXUPIMM)
25822                              ? Src1
25823                              : getZeroVector(VT, Subtarget, DAG, dl);
25824 
25825       unsigned Opc = IntrData->Opc0;
25826       if (IntrData->Opc1 != 0) {
25827         SDValue Sae = Op.getOperand(6);
25828         if (isRoundModeSAE(Sae))
25829           Opc = IntrData->Opc1;
25830         else if (!isRoundModeCurDirection(Sae))
25831           return SDValue();
25832       }
25833 
25834       SDValue FixupImm = DAG.getNode(Opc, dl, VT, Src1, Src2, Src3, Imm);
25835 
25836       if (Opc == X86ISD::VFIXUPIMM || Opc == X86ISD::VFIXUPIMM_SAE)
25837         return getVectorMaskingNode(FixupImm, Mask, Passthru, Subtarget, DAG);
25838 
25839       return getScalarMaskingNode(FixupImm, Mask, Passthru, Subtarget, DAG);
25840     }
25841     case ROUNDP: {
25842       assert(IntrData->Opc0 == X86ISD::VRNDSCALE && "Unexpected opcode");
25843       // Clear the upper bits of the rounding immediate so that the legacy
25844       // intrinsic can't trigger the scaling behavior of VRNDSCALE.
25845       auto Round = cast<ConstantSDNode>(Op.getOperand(2));
25846       SDValue RoundingMode =
25847           DAG.getTargetConstant(Round->getZExtValue() & 0xf, dl, MVT::i32);
25848       return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
25849                          Op.getOperand(1), RoundingMode);
25850     }
25851     case ROUNDS: {
25852       assert(IntrData->Opc0 == X86ISD::VRNDSCALES && "Unexpected opcode");
25853       // Clear the upper bits of the rounding immediate so that the legacy
25854       // intrinsic can't trigger the scaling behavior of VRNDSCALE.
25855       auto Round = cast<ConstantSDNode>(Op.getOperand(3));
25856       SDValue RoundingMode =
25857           DAG.getTargetConstant(Round->getZExtValue() & 0xf, dl, MVT::i32);
25858       return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
25859                          Op.getOperand(1), Op.getOperand(2), RoundingMode);
25860     }
25861     case BEXTRI: {
25862       assert(IntrData->Opc0 == X86ISD::BEXTRI && "Unexpected opcode");
25863 
25864       uint64_t Imm = Op.getConstantOperandVal(2);
25865       SDValue Control = DAG.getTargetConstant(Imm & 0xffff, dl,
25866                                               Op.getValueType());
25867       return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
25868                          Op.getOperand(1), Control);
25869     }
25870     // ADC/SBB
25871     case ADX: {
25872       SDVTList CFVTs = DAG.getVTList(Op->getValueType(0), MVT::i32);
25873       SDVTList VTs = DAG.getVTList(Op.getOperand(2).getValueType(), MVT::i32);
25874 
25875       SDValue Res;
25876       // If the carry in is zero, then we should just use ADD/SUB instead of
25877       // ADC/SBB.
25878       if (isNullConstant(Op.getOperand(1))) {
25879         Res = DAG.getNode(IntrData->Opc1, dl, VTs, Op.getOperand(2),
25880                           Op.getOperand(3));
25881       } else {
25882         SDValue GenCF = DAG.getNode(X86ISD::ADD, dl, CFVTs, Op.getOperand(1),
25883                                     DAG.getConstant(-1, dl, MVT::i8));
25884         Res = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(2),
25885                           Op.getOperand(3), GenCF.getValue(1));
25886       }
25887       SDValue SetCC = getSETCC(X86::COND_B, Res.getValue(1), dl, DAG);
25888       SDValue Results[] = { SetCC, Res };
25889       return DAG.getMergeValues(Results, dl);
25890     }
25891     case CVTPD2PS_MASK:
25892     case CVTPD2DQ_MASK:
25893     case CVTQQ2PS_MASK:
25894     case TRUNCATE_TO_REG: {
25895       SDValue Src = Op.getOperand(1);
25896       SDValue PassThru = Op.getOperand(2);
25897       SDValue Mask = Op.getOperand(3);
25898 
25899       if (isAllOnesConstant(Mask))
25900         return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Src);
25901 
25902       MVT SrcVT = Src.getSimpleValueType();
25903       MVT MaskVT = MVT::getVectorVT(MVT::i1, SrcVT.getVectorNumElements());
25904       Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
25905       return DAG.getNode(IntrData->Opc1, dl, Op.getValueType(),
25906                          {Src, PassThru, Mask});
25907     }
25908     case CVTPS2PH_MASK: {
25909       SDValue Src = Op.getOperand(1);
25910       SDValue Rnd = Op.getOperand(2);
25911       SDValue PassThru = Op.getOperand(3);
25912       SDValue Mask = Op.getOperand(4);
25913 
25914       unsigned RC = 0;
25915       unsigned Opc = IntrData->Opc0;
25916       bool SAE = Src.getValueType().is512BitVector() &&
25917                  (isRoundModeSAEToX(Rnd, RC) || isRoundModeSAE(Rnd));
25918       if (SAE) {
25919         Opc = X86ISD::CVTPS2PH_SAE;
25920         Rnd = DAG.getTargetConstant(RC, dl, MVT::i32);
25921       }
25922 
25923       if (isAllOnesConstant(Mask))
25924         return DAG.getNode(Opc, dl, Op.getValueType(), Src, Rnd);
25925 
25926       if (SAE)
25927         Opc = X86ISD::MCVTPS2PH_SAE;
25928       else
25929         Opc = IntrData->Opc1;
25930       MVT SrcVT = Src.getSimpleValueType();
25931       MVT MaskVT = MVT::getVectorVT(MVT::i1, SrcVT.getVectorNumElements());
25932       Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
25933       return DAG.getNode(Opc, dl, Op.getValueType(), Src, Rnd, PassThru, Mask);
25934     }
25935     case CVTNEPS2BF16_MASK: {
25936       SDValue Src = Op.getOperand(1);
25937       SDValue PassThru = Op.getOperand(2);
25938       SDValue Mask = Op.getOperand(3);
25939 
25940       if (ISD::isBuildVectorAllOnes(Mask.getNode()))
25941         return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Src);
25942 
25943       // Break false dependency.
25944       if (PassThru.isUndef())
25945         PassThru = DAG.getConstant(0, dl, PassThru.getValueType());
25946 
25947       return DAG.getNode(IntrData->Opc1, dl, Op.getValueType(), Src, PassThru,
25948                          Mask);
25949     }
25950     default:
25951       break;
25952     }
25953   }
25954 
25955   switch (IntNo) {
25956   default: return SDValue();    // Don't custom lower most intrinsics.
25957 
25958   // ptest and testp intrinsics. The intrinsic these come from are designed to
25959   // return an integer value, not just an instruction so lower it to the ptest
25960   // or testp pattern and a setcc for the result.
25961   case Intrinsic::x86_avx512_ktestc_b:
25962   case Intrinsic::x86_avx512_ktestc_w:
25963   case Intrinsic::x86_avx512_ktestc_d:
25964   case Intrinsic::x86_avx512_ktestc_q:
25965   case Intrinsic::x86_avx512_ktestz_b:
25966   case Intrinsic::x86_avx512_ktestz_w:
25967   case Intrinsic::x86_avx512_ktestz_d:
25968   case Intrinsic::x86_avx512_ktestz_q:
25969   case Intrinsic::x86_sse41_ptestz:
25970   case Intrinsic::x86_sse41_ptestc:
25971   case Intrinsic::x86_sse41_ptestnzc:
25972   case Intrinsic::x86_avx_ptestz_256:
25973   case Intrinsic::x86_avx_ptestc_256:
25974   case Intrinsic::x86_avx_ptestnzc_256:
25975   case Intrinsic::x86_avx_vtestz_ps:
25976   case Intrinsic::x86_avx_vtestc_ps:
25977   case Intrinsic::x86_avx_vtestnzc_ps:
25978   case Intrinsic::x86_avx_vtestz_pd:
25979   case Intrinsic::x86_avx_vtestc_pd:
25980   case Intrinsic::x86_avx_vtestnzc_pd:
25981   case Intrinsic::x86_avx_vtestz_ps_256:
25982   case Intrinsic::x86_avx_vtestc_ps_256:
25983   case Intrinsic::x86_avx_vtestnzc_ps_256:
25984   case Intrinsic::x86_avx_vtestz_pd_256:
25985   case Intrinsic::x86_avx_vtestc_pd_256:
25986   case Intrinsic::x86_avx_vtestnzc_pd_256: {
25987     unsigned TestOpc = X86ISD::PTEST;
25988     X86::CondCode X86CC;
25989     switch (IntNo) {
25990     default: llvm_unreachable("Bad fallthrough in Intrinsic lowering.");
25991     case Intrinsic::x86_avx512_ktestc_b:
25992     case Intrinsic::x86_avx512_ktestc_w:
25993     case Intrinsic::x86_avx512_ktestc_d:
25994     case Intrinsic::x86_avx512_ktestc_q:
25995       // CF = 1
25996       TestOpc = X86ISD::KTEST;
25997       X86CC = X86::COND_B;
25998       break;
25999     case Intrinsic::x86_avx512_ktestz_b:
26000     case Intrinsic::x86_avx512_ktestz_w:
26001     case Intrinsic::x86_avx512_ktestz_d:
26002     case Intrinsic::x86_avx512_ktestz_q:
26003       TestOpc = X86ISD::KTEST;
26004       X86CC = X86::COND_E;
26005       break;
26006     case Intrinsic::x86_avx_vtestz_ps:
26007     case Intrinsic::x86_avx_vtestz_pd:
26008     case Intrinsic::x86_avx_vtestz_ps_256:
26009     case Intrinsic::x86_avx_vtestz_pd_256:
26010       TestOpc = X86ISD::TESTP;
26011       [[fallthrough]];
26012     case Intrinsic::x86_sse41_ptestz:
26013     case Intrinsic::x86_avx_ptestz_256:
26014       // ZF = 1
26015       X86CC = X86::COND_E;
26016       break;
26017     case Intrinsic::x86_avx_vtestc_ps:
26018     case Intrinsic::x86_avx_vtestc_pd:
26019     case Intrinsic::x86_avx_vtestc_ps_256:
26020     case Intrinsic::x86_avx_vtestc_pd_256:
26021       TestOpc = X86ISD::TESTP;
26022       [[fallthrough]];
26023     case Intrinsic::x86_sse41_ptestc:
26024     case Intrinsic::x86_avx_ptestc_256:
26025       // CF = 1
26026       X86CC = X86::COND_B;
26027       break;
26028     case Intrinsic::x86_avx_vtestnzc_ps:
26029     case Intrinsic::x86_avx_vtestnzc_pd:
26030     case Intrinsic::x86_avx_vtestnzc_ps_256:
26031     case Intrinsic::x86_avx_vtestnzc_pd_256:
26032       TestOpc = X86ISD::TESTP;
26033       [[fallthrough]];
26034     case Intrinsic::x86_sse41_ptestnzc:
26035     case Intrinsic::x86_avx_ptestnzc_256:
26036       // ZF and CF = 0
26037       X86CC = X86::COND_A;
26038       break;
26039     }
26040 
26041     SDValue LHS = Op.getOperand(1);
26042     SDValue RHS = Op.getOperand(2);
26043     SDValue Test = DAG.getNode(TestOpc, dl, MVT::i32, LHS, RHS);
26044     SDValue SetCC = getSETCC(X86CC, Test, dl, DAG);
26045     return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
26046   }
26047 
26048   case Intrinsic::x86_sse42_pcmpistria128:
26049   case Intrinsic::x86_sse42_pcmpestria128:
26050   case Intrinsic::x86_sse42_pcmpistric128:
26051   case Intrinsic::x86_sse42_pcmpestric128:
26052   case Intrinsic::x86_sse42_pcmpistrio128:
26053   case Intrinsic::x86_sse42_pcmpestrio128:
26054   case Intrinsic::x86_sse42_pcmpistris128:
26055   case Intrinsic::x86_sse42_pcmpestris128:
26056   case Intrinsic::x86_sse42_pcmpistriz128:
26057   case Intrinsic::x86_sse42_pcmpestriz128: {
26058     unsigned Opcode;
26059     X86::CondCode X86CC;
26060     switch (IntNo) {
26061     default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
26062     case Intrinsic::x86_sse42_pcmpistria128:
26063       Opcode = X86ISD::PCMPISTR;
26064       X86CC = X86::COND_A;
26065       break;
26066     case Intrinsic::x86_sse42_pcmpestria128:
26067       Opcode = X86ISD::PCMPESTR;
26068       X86CC = X86::COND_A;
26069       break;
26070     case Intrinsic::x86_sse42_pcmpistric128:
26071       Opcode = X86ISD::PCMPISTR;
26072       X86CC = X86::COND_B;
26073       break;
26074     case Intrinsic::x86_sse42_pcmpestric128:
26075       Opcode = X86ISD::PCMPESTR;
26076       X86CC = X86::COND_B;
26077       break;
26078     case Intrinsic::x86_sse42_pcmpistrio128:
26079       Opcode = X86ISD::PCMPISTR;
26080       X86CC = X86::COND_O;
26081       break;
26082     case Intrinsic::x86_sse42_pcmpestrio128:
26083       Opcode = X86ISD::PCMPESTR;
26084       X86CC = X86::COND_O;
26085       break;
26086     case Intrinsic::x86_sse42_pcmpistris128:
26087       Opcode = X86ISD::PCMPISTR;
26088       X86CC = X86::COND_S;
26089       break;
26090     case Intrinsic::x86_sse42_pcmpestris128:
26091       Opcode = X86ISD::PCMPESTR;
26092       X86CC = X86::COND_S;
26093       break;
26094     case Intrinsic::x86_sse42_pcmpistriz128:
26095       Opcode = X86ISD::PCMPISTR;
26096       X86CC = X86::COND_E;
26097       break;
26098     case Intrinsic::x86_sse42_pcmpestriz128:
26099       Opcode = X86ISD::PCMPESTR;
26100       X86CC = X86::COND_E;
26101       break;
26102     }
26103     SmallVector<SDValue, 5> NewOps(llvm::drop_begin(Op->ops()));
26104     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::v16i8, MVT::i32);
26105     SDValue PCMP = DAG.getNode(Opcode, dl, VTs, NewOps).getValue(2);
26106     SDValue SetCC = getSETCC(X86CC, PCMP, dl, DAG);
26107     return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
26108   }
26109 
26110   case Intrinsic::x86_sse42_pcmpistri128:
26111   case Intrinsic::x86_sse42_pcmpestri128: {
26112     unsigned Opcode;
26113     if (IntNo == Intrinsic::x86_sse42_pcmpistri128)
26114       Opcode = X86ISD::PCMPISTR;
26115     else
26116       Opcode = X86ISD::PCMPESTR;
26117 
26118     SmallVector<SDValue, 5> NewOps(llvm::drop_begin(Op->ops()));
26119     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::v16i8, MVT::i32);
26120     return DAG.getNode(Opcode, dl, VTs, NewOps);
26121   }
26122 
26123   case Intrinsic::x86_sse42_pcmpistrm128:
26124   case Intrinsic::x86_sse42_pcmpestrm128: {
26125     unsigned Opcode;
26126     if (IntNo == Intrinsic::x86_sse42_pcmpistrm128)
26127       Opcode = X86ISD::PCMPISTR;
26128     else
26129       Opcode = X86ISD::PCMPESTR;
26130 
26131     SmallVector<SDValue, 5> NewOps(llvm::drop_begin(Op->ops()));
26132     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::v16i8, MVT::i32);
26133     return DAG.getNode(Opcode, dl, VTs, NewOps).getValue(1);
26134   }
26135 
26136   case Intrinsic::eh_sjlj_lsda: {
26137     MachineFunction &MF = DAG.getMachineFunction();
26138     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
26139     MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
26140     auto &Context = MF.getMMI().getContext();
26141     MCSymbol *S = Context.getOrCreateSymbol(Twine("GCC_except_table") +
26142                                             Twine(MF.getFunctionNumber()));
26143     return DAG.getNode(getGlobalWrapperKind(nullptr, /*OpFlags=*/0), dl, VT,
26144                        DAG.getMCSymbol(S, PtrVT));
26145   }
26146 
26147   case Intrinsic::x86_seh_lsda: {
26148     // Compute the symbol for the LSDA. We know it'll get emitted later.
26149     MachineFunction &MF = DAG.getMachineFunction();
26150     SDValue Op1 = Op.getOperand(1);
26151     auto *Fn = cast<Function>(cast<GlobalAddressSDNode>(Op1)->getGlobal());
26152     MCSymbol *LSDASym = MF.getMMI().getContext().getOrCreateLSDASymbol(
26153         GlobalValue::dropLLVMManglingEscape(Fn->getName()));
26154 
26155     // Generate a simple absolute symbol reference. This intrinsic is only
26156     // supported on 32-bit Windows, which isn't PIC.
26157     SDValue Result = DAG.getMCSymbol(LSDASym, VT);
26158     return DAG.getNode(X86ISD::Wrapper, dl, VT, Result);
26159   }
26160 
26161   case Intrinsic::eh_recoverfp: {
26162     SDValue FnOp = Op.getOperand(1);
26163     SDValue IncomingFPOp = Op.getOperand(2);
26164     GlobalAddressSDNode *GSD = dyn_cast<GlobalAddressSDNode>(FnOp);
26165     auto *Fn = dyn_cast_or_null<Function>(GSD ? GSD->getGlobal() : nullptr);
26166     if (!Fn)
26167       report_fatal_error(
26168           "llvm.eh.recoverfp must take a function as the first argument");
26169     return recoverFramePointer(DAG, Fn, IncomingFPOp);
26170   }
26171 
26172   case Intrinsic::localaddress: {
26173     // Returns one of the stack, base, or frame pointer registers, depending on
26174     // which is used to reference local variables.
26175     MachineFunction &MF = DAG.getMachineFunction();
26176     const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
26177     unsigned Reg;
26178     if (RegInfo->hasBasePointer(MF))
26179       Reg = RegInfo->getBaseRegister();
26180     else { // Handles the SP or FP case.
26181       bool CantUseFP = RegInfo->hasStackRealignment(MF);
26182       if (CantUseFP)
26183         Reg = RegInfo->getPtrSizedStackRegister(MF);
26184       else
26185         Reg = RegInfo->getPtrSizedFrameRegister(MF);
26186     }
26187     return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT);
26188   }
26189   case Intrinsic::x86_avx512_vp2intersect_q_512:
26190   case Intrinsic::x86_avx512_vp2intersect_q_256:
26191   case Intrinsic::x86_avx512_vp2intersect_q_128:
26192   case Intrinsic::x86_avx512_vp2intersect_d_512:
26193   case Intrinsic::x86_avx512_vp2intersect_d_256:
26194   case Intrinsic::x86_avx512_vp2intersect_d_128: {
26195     MVT MaskVT = Op.getSimpleValueType();
26196 
26197     SDVTList VTs = DAG.getVTList(MVT::Untyped, MVT::Other);
26198     SDLoc DL(Op);
26199 
26200     SDValue Operation =
26201         DAG.getNode(X86ISD::VP2INTERSECT, DL, VTs,
26202                     Op->getOperand(1), Op->getOperand(2));
26203 
26204     SDValue Result0 = DAG.getTargetExtractSubreg(X86::sub_mask_0, DL,
26205                                                  MaskVT, Operation);
26206     SDValue Result1 = DAG.getTargetExtractSubreg(X86::sub_mask_1, DL,
26207                                                  MaskVT, Operation);
26208     return DAG.getMergeValues({Result0, Result1}, DL);
26209   }
26210   case Intrinsic::x86_mmx_pslli_w:
26211   case Intrinsic::x86_mmx_pslli_d:
26212   case Intrinsic::x86_mmx_pslli_q:
26213   case Intrinsic::x86_mmx_psrli_w:
26214   case Intrinsic::x86_mmx_psrli_d:
26215   case Intrinsic::x86_mmx_psrli_q:
26216   case Intrinsic::x86_mmx_psrai_w:
26217   case Intrinsic::x86_mmx_psrai_d: {
26218     SDLoc DL(Op);
26219     SDValue ShAmt = Op.getOperand(2);
26220     // If the argument is a constant, convert it to a target constant.
26221     if (auto *C = dyn_cast<ConstantSDNode>(ShAmt)) {
26222       // Clamp out of bounds shift amounts since they will otherwise be masked
26223       // to 8-bits which may make it no longer out of bounds.
26224       unsigned ShiftAmount = C->getAPIntValue().getLimitedValue(255);
26225       if (ShiftAmount == 0)
26226         return Op.getOperand(1);
26227 
26228       return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, Op.getValueType(),
26229                          Op.getOperand(0), Op.getOperand(1),
26230                          DAG.getTargetConstant(ShiftAmount, DL, MVT::i32));
26231     }
26232 
26233     unsigned NewIntrinsic;
26234     switch (IntNo) {
26235     default: llvm_unreachable("Impossible intrinsic");  // Can't reach here.
26236     case Intrinsic::x86_mmx_pslli_w:
26237       NewIntrinsic = Intrinsic::x86_mmx_psll_w;
26238       break;
26239     case Intrinsic::x86_mmx_pslli_d:
26240       NewIntrinsic = Intrinsic::x86_mmx_psll_d;
26241       break;
26242     case Intrinsic::x86_mmx_pslli_q:
26243       NewIntrinsic = Intrinsic::x86_mmx_psll_q;
26244       break;
26245     case Intrinsic::x86_mmx_psrli_w:
26246       NewIntrinsic = Intrinsic::x86_mmx_psrl_w;
26247       break;
26248     case Intrinsic::x86_mmx_psrli_d:
26249       NewIntrinsic = Intrinsic::x86_mmx_psrl_d;
26250       break;
26251     case Intrinsic::x86_mmx_psrli_q:
26252       NewIntrinsic = Intrinsic::x86_mmx_psrl_q;
26253       break;
26254     case Intrinsic::x86_mmx_psrai_w:
26255       NewIntrinsic = Intrinsic::x86_mmx_psra_w;
26256       break;
26257     case Intrinsic::x86_mmx_psrai_d:
26258       NewIntrinsic = Intrinsic::x86_mmx_psra_d;
26259       break;
26260     }
26261 
26262     // The vector shift intrinsics with scalars uses 32b shift amounts but
26263     // the sse2/mmx shift instructions reads 64 bits. Copy the 32 bits to an
26264     // MMX register.
26265     ShAmt = DAG.getNode(X86ISD::MMX_MOVW2D, DL, MVT::x86mmx, ShAmt);
26266     return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, Op.getValueType(),
26267                        DAG.getTargetConstant(NewIntrinsic, DL,
26268                                              getPointerTy(DAG.getDataLayout())),
26269                        Op.getOperand(1), ShAmt);
26270   }
26271   case Intrinsic::thread_pointer: {
26272     if (Subtarget.isTargetELF()) {
26273       SDLoc dl(Op);
26274       EVT PtrVT = getPointerTy(DAG.getDataLayout());
26275       // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit).
26276       Value *Ptr = Constant::getNullValue(PointerType::get(
26277           *DAG.getContext(), Subtarget.is64Bit() ? X86AS::FS : X86AS::GS));
26278       return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
26279                          DAG.getIntPtrConstant(0, dl), MachinePointerInfo(Ptr));
26280     }
26281     report_fatal_error(
26282         "Target OS doesn't support __builtin_thread_pointer() yet.");
26283   }
26284   }
26285 }
26286 
getAVX2GatherNode(unsigned Opc,SDValue Op,SelectionDAG & DAG,SDValue Src,SDValue Mask,SDValue Base,SDValue Index,SDValue ScaleOp,SDValue Chain,const X86Subtarget & Subtarget)26287 static SDValue getAVX2GatherNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
26288                                  SDValue Src, SDValue Mask, SDValue Base,
26289                                  SDValue Index, SDValue ScaleOp, SDValue Chain,
26290                                  const X86Subtarget &Subtarget) {
26291   SDLoc dl(Op);
26292   auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
26293   // Scale must be constant.
26294   if (!C)
26295     return SDValue();
26296   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
26297   SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl,
26298                                         TLI.getPointerTy(DAG.getDataLayout()));
26299   EVT MaskVT = Mask.getValueType().changeVectorElementTypeToInteger();
26300   SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Other);
26301   // If source is undef or we know it won't be used, use a zero vector
26302   // to break register dependency.
26303   // TODO: use undef instead and let BreakFalseDeps deal with it?
26304   if (Src.isUndef() || ISD::isBuildVectorAllOnes(Mask.getNode()))
26305     Src = getZeroVector(Op.getSimpleValueType(), Subtarget, DAG, dl);
26306 
26307   // Cast mask to an integer type.
26308   Mask = DAG.getBitcast(MaskVT, Mask);
26309 
26310   MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);
26311 
26312   SDValue Ops[] = {Chain, Src, Mask, Base, Index, Scale };
26313   SDValue Res =
26314       DAG.getMemIntrinsicNode(X86ISD::MGATHER, dl, VTs, Ops,
26315                               MemIntr->getMemoryVT(), MemIntr->getMemOperand());
26316   return DAG.getMergeValues({Res, Res.getValue(1)}, dl);
26317 }
26318 
getGatherNode(SDValue Op,SelectionDAG & DAG,SDValue Src,SDValue Mask,SDValue Base,SDValue Index,SDValue ScaleOp,SDValue Chain,const X86Subtarget & Subtarget)26319 static SDValue getGatherNode(SDValue Op, SelectionDAG &DAG,
26320                              SDValue Src, SDValue Mask, SDValue Base,
26321                              SDValue Index, SDValue ScaleOp, SDValue Chain,
26322                              const X86Subtarget &Subtarget) {
26323   MVT VT = Op.getSimpleValueType();
26324   SDLoc dl(Op);
26325   auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
26326   // Scale must be constant.
26327   if (!C)
26328     return SDValue();
26329   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
26330   SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl,
26331                                         TLI.getPointerTy(DAG.getDataLayout()));
26332   unsigned MinElts = std::min(Index.getSimpleValueType().getVectorNumElements(),
26333                               VT.getVectorNumElements());
26334   MVT MaskVT = MVT::getVectorVT(MVT::i1, MinElts);
26335 
26336   // We support two versions of the gather intrinsics. One with scalar mask and
26337   // one with vXi1 mask. Convert scalar to vXi1 if necessary.
26338   if (Mask.getValueType() != MaskVT)
26339     Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
26340 
26341   SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Other);
26342   // If source is undef or we know it won't be used, use a zero vector
26343   // to break register dependency.
26344   // TODO: use undef instead and let BreakFalseDeps deal with it?
26345   if (Src.isUndef() || ISD::isBuildVectorAllOnes(Mask.getNode()))
26346     Src = getZeroVector(Op.getSimpleValueType(), Subtarget, DAG, dl);
26347 
26348   MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);
26349 
26350   SDValue Ops[] = {Chain, Src, Mask, Base, Index, Scale };
26351   SDValue Res =
26352       DAG.getMemIntrinsicNode(X86ISD::MGATHER, dl, VTs, Ops,
26353                               MemIntr->getMemoryVT(), MemIntr->getMemOperand());
26354   return DAG.getMergeValues({Res, Res.getValue(1)}, dl);
26355 }
26356 
getScatterNode(unsigned Opc,SDValue Op,SelectionDAG & DAG,SDValue Src,SDValue Mask,SDValue Base,SDValue Index,SDValue ScaleOp,SDValue Chain,const X86Subtarget & Subtarget)26357 static SDValue getScatterNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
26358                                SDValue Src, SDValue Mask, SDValue Base,
26359                                SDValue Index, SDValue ScaleOp, SDValue Chain,
26360                                const X86Subtarget &Subtarget) {
26361   SDLoc dl(Op);
26362   auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
26363   // Scale must be constant.
26364   if (!C)
26365     return SDValue();
26366   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
26367   SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl,
26368                                         TLI.getPointerTy(DAG.getDataLayout()));
26369   unsigned MinElts = std::min(Index.getSimpleValueType().getVectorNumElements(),
26370                               Src.getSimpleValueType().getVectorNumElements());
26371   MVT MaskVT = MVT::getVectorVT(MVT::i1, MinElts);
26372 
26373   // We support two versions of the scatter intrinsics. One with scalar mask and
26374   // one with vXi1 mask. Convert scalar to vXi1 if necessary.
26375   if (Mask.getValueType() != MaskVT)
26376     Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
26377 
26378   MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);
26379 
26380   SDVTList VTs = DAG.getVTList(MVT::Other);
26381   SDValue Ops[] = {Chain, Src, Mask, Base, Index, Scale};
26382   SDValue Res =
26383       DAG.getMemIntrinsicNode(X86ISD::MSCATTER, dl, VTs, Ops,
26384                               MemIntr->getMemoryVT(), MemIntr->getMemOperand());
26385   return Res;
26386 }
26387 
getPrefetchNode(unsigned Opc,SDValue Op,SelectionDAG & DAG,SDValue Mask,SDValue Base,SDValue Index,SDValue ScaleOp,SDValue Chain,const X86Subtarget & Subtarget)26388 static SDValue getPrefetchNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
26389                                SDValue Mask, SDValue Base, SDValue Index,
26390                                SDValue ScaleOp, SDValue Chain,
26391                                const X86Subtarget &Subtarget) {
26392   SDLoc dl(Op);
26393   auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
26394   // Scale must be constant.
26395   if (!C)
26396     return SDValue();
26397   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
26398   SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl,
26399                                         TLI.getPointerTy(DAG.getDataLayout()));
26400   SDValue Disp = DAG.getTargetConstant(0, dl, MVT::i32);
26401   SDValue Segment = DAG.getRegister(0, MVT::i32);
26402   MVT MaskVT =
26403     MVT::getVectorVT(MVT::i1, Index.getSimpleValueType().getVectorNumElements());
26404   SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
26405   SDValue Ops[] = {VMask, Base, Scale, Index, Disp, Segment, Chain};
26406   SDNode *Res = DAG.getMachineNode(Opc, dl, MVT::Other, Ops);
26407   return SDValue(Res, 0);
26408 }
26409 
26410 /// Handles the lowering of builtin intrinsics with chain that return their
26411 /// value into registers EDX:EAX.
26412 /// If operand ScrReg is a valid register identifier, then operand 2 of N is
26413 /// copied to SrcReg. The assumption is that SrcReg is an implicit input to
26414 /// TargetOpcode.
26415 /// Returns a Glue value which can be used to add extra copy-from-reg if the
26416 /// expanded intrinsics implicitly defines extra registers (i.e. not just
26417 /// EDX:EAX).
expandIntrinsicWChainHelper(SDNode * N,const SDLoc & DL,SelectionDAG & DAG,unsigned TargetOpcode,unsigned SrcReg,const X86Subtarget & Subtarget,SmallVectorImpl<SDValue> & Results)26418 static SDValue expandIntrinsicWChainHelper(SDNode *N, const SDLoc &DL,
26419                                         SelectionDAG &DAG,
26420                                         unsigned TargetOpcode,
26421                                         unsigned SrcReg,
26422                                         const X86Subtarget &Subtarget,
26423                                         SmallVectorImpl<SDValue> &Results) {
26424   SDValue Chain = N->getOperand(0);
26425   SDValue Glue;
26426 
26427   if (SrcReg) {
26428     assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
26429     Chain = DAG.getCopyToReg(Chain, DL, SrcReg, N->getOperand(2), Glue);
26430     Glue = Chain.getValue(1);
26431   }
26432 
26433   SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
26434   SDValue N1Ops[] = {Chain, Glue};
26435   SDNode *N1 = DAG.getMachineNode(
26436       TargetOpcode, DL, Tys, ArrayRef<SDValue>(N1Ops, Glue.getNode() ? 2 : 1));
26437   Chain = SDValue(N1, 0);
26438 
26439   // Reads the content of XCR and returns it in registers EDX:EAX.
26440   SDValue LO, HI;
26441   if (Subtarget.is64Bit()) {
26442     LO = DAG.getCopyFromReg(Chain, DL, X86::RAX, MVT::i64, SDValue(N1, 1));
26443     HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
26444                             LO.getValue(2));
26445   } else {
26446     LO = DAG.getCopyFromReg(Chain, DL, X86::EAX, MVT::i32, SDValue(N1, 1));
26447     HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
26448                             LO.getValue(2));
26449   }
26450   Chain = HI.getValue(1);
26451   Glue = HI.getValue(2);
26452 
26453   if (Subtarget.is64Bit()) {
26454     // Merge the two 32-bit values into a 64-bit one.
26455     SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
26456                               DAG.getConstant(32, DL, MVT::i8));
26457     Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
26458     Results.push_back(Chain);
26459     return Glue;
26460   }
26461 
26462   // Use a buildpair to merge the two 32-bit values into a 64-bit one.
26463   SDValue Ops[] = { LO, HI };
26464   SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
26465   Results.push_back(Pair);
26466   Results.push_back(Chain);
26467   return Glue;
26468 }
26469 
26470 /// Handles the lowering of builtin intrinsics that read the time stamp counter
26471 /// (x86_rdtsc and x86_rdtscp). This function is also used to custom lower
26472 /// READCYCLECOUNTER nodes.
getReadTimeStampCounter(SDNode * N,const SDLoc & DL,unsigned Opcode,SelectionDAG & DAG,const X86Subtarget & Subtarget,SmallVectorImpl<SDValue> & Results)26473 static void getReadTimeStampCounter(SDNode *N, const SDLoc &DL, unsigned Opcode,
26474                                     SelectionDAG &DAG,
26475                                     const X86Subtarget &Subtarget,
26476                                     SmallVectorImpl<SDValue> &Results) {
26477   // The processor's time-stamp counter (a 64-bit MSR) is stored into the
26478   // EDX:EAX registers. EDX is loaded with the high-order 32 bits of the MSR
26479   // and the EAX register is loaded with the low-order 32 bits.
26480   SDValue Glue = expandIntrinsicWChainHelper(N, DL, DAG, Opcode,
26481                                              /* NoRegister */0, Subtarget,
26482                                              Results);
26483   if (Opcode != X86::RDTSCP)
26484     return;
26485 
26486   SDValue Chain = Results[1];
26487   // Instruction RDTSCP loads the IA32:TSC_AUX_MSR (address C000_0103H) into
26488   // the ECX register. Add 'ecx' explicitly to the chain.
26489   SDValue ecx = DAG.getCopyFromReg(Chain, DL, X86::ECX, MVT::i32, Glue);
26490   Results[1] = ecx;
26491   Results.push_back(ecx.getValue(1));
26492 }
26493 
LowerREADCYCLECOUNTER(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)26494 static SDValue LowerREADCYCLECOUNTER(SDValue Op, const X86Subtarget &Subtarget,
26495                                      SelectionDAG &DAG) {
26496   SmallVector<SDValue, 3> Results;
26497   SDLoc DL(Op);
26498   getReadTimeStampCounter(Op.getNode(), DL, X86::RDTSC, DAG, Subtarget,
26499                           Results);
26500   return DAG.getMergeValues(Results, DL);
26501 }
26502 
MarkEHRegistrationNode(SDValue Op,SelectionDAG & DAG)26503 static SDValue MarkEHRegistrationNode(SDValue Op, SelectionDAG &DAG) {
26504   MachineFunction &MF = DAG.getMachineFunction();
26505   SDValue Chain = Op.getOperand(0);
26506   SDValue RegNode = Op.getOperand(2);
26507   WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo();
26508   if (!EHInfo)
26509     report_fatal_error("EH registrations only live in functions using WinEH");
26510 
26511   // Cast the operand to an alloca, and remember the frame index.
26512   auto *FINode = dyn_cast<FrameIndexSDNode>(RegNode);
26513   if (!FINode)
26514     report_fatal_error("llvm.x86.seh.ehregnode expects a static alloca");
26515   EHInfo->EHRegNodeFrameIndex = FINode->getIndex();
26516 
26517   // Return the chain operand without making any DAG nodes.
26518   return Chain;
26519 }
26520 
MarkEHGuard(SDValue Op,SelectionDAG & DAG)26521 static SDValue MarkEHGuard(SDValue Op, SelectionDAG &DAG) {
26522   MachineFunction &MF = DAG.getMachineFunction();
26523   SDValue Chain = Op.getOperand(0);
26524   SDValue EHGuard = Op.getOperand(2);
26525   WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo();
26526   if (!EHInfo)
26527     report_fatal_error("EHGuard only live in functions using WinEH");
26528 
26529   // Cast the operand to an alloca, and remember the frame index.
26530   auto *FINode = dyn_cast<FrameIndexSDNode>(EHGuard);
26531   if (!FINode)
26532     report_fatal_error("llvm.x86.seh.ehguard expects a static alloca");
26533   EHInfo->EHGuardFrameIndex = FINode->getIndex();
26534 
26535   // Return the chain operand without making any DAG nodes.
26536   return Chain;
26537 }
26538 
26539 /// Emit Truncating Store with signed or unsigned saturation.
26540 static SDValue
EmitTruncSStore(bool SignedSat,SDValue Chain,const SDLoc & DL,SDValue Val,SDValue Ptr,EVT MemVT,MachineMemOperand * MMO,SelectionDAG & DAG)26541 EmitTruncSStore(bool SignedSat, SDValue Chain, const SDLoc &DL, SDValue Val,
26542                 SDValue Ptr, EVT MemVT, MachineMemOperand *MMO,
26543                 SelectionDAG &DAG) {
26544   SDVTList VTs = DAG.getVTList(MVT::Other);
26545   SDValue Undef = DAG.getUNDEF(Ptr.getValueType());
26546   SDValue Ops[] = { Chain, Val, Ptr, Undef };
26547   unsigned Opc = SignedSat ? X86ISD::VTRUNCSTORES : X86ISD::VTRUNCSTOREUS;
26548   return DAG.getMemIntrinsicNode(Opc, DL, VTs, Ops, MemVT, MMO);
26549 }
26550 
26551 /// Emit Masked Truncating Store with signed or unsigned saturation.
EmitMaskedTruncSStore(bool SignedSat,SDValue Chain,const SDLoc & DL,SDValue Val,SDValue Ptr,SDValue Mask,EVT MemVT,MachineMemOperand * MMO,SelectionDAG & DAG)26552 static SDValue EmitMaskedTruncSStore(bool SignedSat, SDValue Chain,
26553                                      const SDLoc &DL,
26554                       SDValue Val, SDValue Ptr, SDValue Mask, EVT MemVT,
26555                       MachineMemOperand *MMO, SelectionDAG &DAG) {
26556   SDVTList VTs = DAG.getVTList(MVT::Other);
26557   SDValue Ops[] = { Chain, Val, Ptr, Mask };
26558   unsigned Opc = SignedSat ? X86ISD::VMTRUNCSTORES : X86ISD::VMTRUNCSTOREUS;
26559   return DAG.getMemIntrinsicNode(Opc, DL, VTs, Ops, MemVT, MMO);
26560 }
26561 
LowerINTRINSIC_W_CHAIN(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)26562 static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget &Subtarget,
26563                                       SelectionDAG &DAG) {
26564   unsigned IntNo = Op.getConstantOperandVal(1);
26565   const IntrinsicData *IntrData = getIntrinsicWithChain(IntNo);
26566   if (!IntrData) {
26567     switch (IntNo) {
26568 
26569     case Intrinsic::swift_async_context_addr: {
26570       SDLoc dl(Op);
26571       auto &MF = DAG.getMachineFunction();
26572       auto X86FI = MF.getInfo<X86MachineFunctionInfo>();
26573       if (Subtarget.is64Bit()) {
26574         MF.getFrameInfo().setFrameAddressIsTaken(true);
26575         X86FI->setHasSwiftAsyncContext(true);
26576         SDValue Chain = Op->getOperand(0);
26577         SDValue CopyRBP = DAG.getCopyFromReg(Chain, dl, X86::RBP, MVT::i64);
26578         SDValue Result =
26579             SDValue(DAG.getMachineNode(X86::SUB64ri32, dl, MVT::i64, CopyRBP,
26580                                        DAG.getTargetConstant(8, dl, MVT::i32)),
26581                     0);
26582         // Return { result, chain }.
26583         return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result,
26584                            CopyRBP.getValue(1));
26585       } else {
26586         // 32-bit so no special extended frame, create or reuse an existing
26587         // stack slot.
26588         if (!X86FI->getSwiftAsyncContextFrameIdx())
26589           X86FI->setSwiftAsyncContextFrameIdx(
26590               MF.getFrameInfo().CreateStackObject(4, Align(4), false));
26591         SDValue Result =
26592             DAG.getFrameIndex(*X86FI->getSwiftAsyncContextFrameIdx(), MVT::i32);
26593         // Return { result, chain }.
26594         return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result,
26595                            Op->getOperand(0));
26596       }
26597     }
26598 
26599     case llvm::Intrinsic::x86_seh_ehregnode:
26600       return MarkEHRegistrationNode(Op, DAG);
26601     case llvm::Intrinsic::x86_seh_ehguard:
26602       return MarkEHGuard(Op, DAG);
26603     case llvm::Intrinsic::x86_rdpkru: {
26604       SDLoc dl(Op);
26605       SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
26606       // Create a RDPKRU node and pass 0 to the ECX parameter.
26607       return DAG.getNode(X86ISD::RDPKRU, dl, VTs, Op.getOperand(0),
26608                          DAG.getConstant(0, dl, MVT::i32));
26609     }
26610     case llvm::Intrinsic::x86_wrpkru: {
26611       SDLoc dl(Op);
26612       // Create a WRPKRU node, pass the input to the EAX parameter,  and pass 0
26613       // to the EDX and ECX parameters.
26614       return DAG.getNode(X86ISD::WRPKRU, dl, MVT::Other,
26615                          Op.getOperand(0), Op.getOperand(2),
26616                          DAG.getConstant(0, dl, MVT::i32),
26617                          DAG.getConstant(0, dl, MVT::i32));
26618     }
26619     case llvm::Intrinsic::asan_check_memaccess: {
26620       // Mark this as adjustsStack because it will be lowered to a call.
26621       DAG.getMachineFunction().getFrameInfo().setAdjustsStack(true);
26622       // Don't do anything here, we will expand these intrinsics out later.
26623       return Op;
26624     }
26625     case llvm::Intrinsic::x86_flags_read_u32:
26626     case llvm::Intrinsic::x86_flags_read_u64:
26627     case llvm::Intrinsic::x86_flags_write_u32:
26628     case llvm::Intrinsic::x86_flags_write_u64: {
26629       // We need a frame pointer because this will get lowered to a PUSH/POP
26630       // sequence.
26631       MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
26632       MFI.setHasCopyImplyingStackAdjustment(true);
26633       // Don't do anything here, we will expand these intrinsics out later
26634       // during FinalizeISel in EmitInstrWithCustomInserter.
26635       return Op;
26636     }
26637     case Intrinsic::x86_lwpins32:
26638     case Intrinsic::x86_lwpins64:
26639     case Intrinsic::x86_umwait:
26640     case Intrinsic::x86_tpause: {
26641       SDLoc dl(Op);
26642       SDValue Chain = Op->getOperand(0);
26643       SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
26644       unsigned Opcode;
26645 
26646       switch (IntNo) {
26647       default: llvm_unreachable("Impossible intrinsic");
26648       case Intrinsic::x86_umwait:
26649         Opcode = X86ISD::UMWAIT;
26650         break;
26651       case Intrinsic::x86_tpause:
26652         Opcode = X86ISD::TPAUSE;
26653         break;
26654       case Intrinsic::x86_lwpins32:
26655       case Intrinsic::x86_lwpins64:
26656         Opcode = X86ISD::LWPINS;
26657         break;
26658       }
26659 
26660       SDValue Operation =
26661           DAG.getNode(Opcode, dl, VTs, Chain, Op->getOperand(2),
26662                       Op->getOperand(3), Op->getOperand(4));
26663       SDValue SetCC = getSETCC(X86::COND_B, Operation.getValue(0), dl, DAG);
26664       return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), SetCC,
26665                          Operation.getValue(1));
26666     }
26667     case Intrinsic::x86_enqcmd:
26668     case Intrinsic::x86_enqcmds: {
26669       SDLoc dl(Op);
26670       SDValue Chain = Op.getOperand(0);
26671       SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
26672       unsigned Opcode;
26673       switch (IntNo) {
26674       default: llvm_unreachable("Impossible intrinsic!");
26675       case Intrinsic::x86_enqcmd:
26676         Opcode = X86ISD::ENQCMD;
26677         break;
26678       case Intrinsic::x86_enqcmds:
26679         Opcode = X86ISD::ENQCMDS;
26680         break;
26681       }
26682       SDValue Operation = DAG.getNode(Opcode, dl, VTs, Chain, Op.getOperand(2),
26683                                       Op.getOperand(3));
26684       SDValue SetCC = getSETCC(X86::COND_E, Operation.getValue(0), dl, DAG);
26685       return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), SetCC,
26686                          Operation.getValue(1));
26687     }
26688     case Intrinsic::x86_aesenc128kl:
26689     case Intrinsic::x86_aesdec128kl:
26690     case Intrinsic::x86_aesenc256kl:
26691     case Intrinsic::x86_aesdec256kl: {
26692       SDLoc DL(Op);
26693       SDVTList VTs = DAG.getVTList(MVT::v2i64, MVT::i32, MVT::Other);
26694       SDValue Chain = Op.getOperand(0);
26695       unsigned Opcode;
26696 
26697       switch (IntNo) {
26698       default: llvm_unreachable("Impossible intrinsic");
26699       case Intrinsic::x86_aesenc128kl:
26700         Opcode = X86ISD::AESENC128KL;
26701         break;
26702       case Intrinsic::x86_aesdec128kl:
26703         Opcode = X86ISD::AESDEC128KL;
26704         break;
26705       case Intrinsic::x86_aesenc256kl:
26706         Opcode = X86ISD::AESENC256KL;
26707         break;
26708       case Intrinsic::x86_aesdec256kl:
26709         Opcode = X86ISD::AESDEC256KL;
26710         break;
26711       }
26712 
26713       MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);
26714       MachineMemOperand *MMO = MemIntr->getMemOperand();
26715       EVT MemVT = MemIntr->getMemoryVT();
26716       SDValue Operation = DAG.getMemIntrinsicNode(
26717           Opcode, DL, VTs, {Chain, Op.getOperand(2), Op.getOperand(3)}, MemVT,
26718           MMO);
26719       SDValue ZF = getSETCC(X86::COND_E, Operation.getValue(1), DL, DAG);
26720 
26721       return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(),
26722                          {ZF, Operation.getValue(0), Operation.getValue(2)});
26723     }
26724     case Intrinsic::x86_aesencwide128kl:
26725     case Intrinsic::x86_aesdecwide128kl:
26726     case Intrinsic::x86_aesencwide256kl:
26727     case Intrinsic::x86_aesdecwide256kl: {
26728       SDLoc DL(Op);
26729       SDVTList VTs = DAG.getVTList(
26730           {MVT::i32, MVT::v2i64, MVT::v2i64, MVT::v2i64, MVT::v2i64, MVT::v2i64,
26731            MVT::v2i64, MVT::v2i64, MVT::v2i64, MVT::Other});
26732       SDValue Chain = Op.getOperand(0);
26733       unsigned Opcode;
26734 
26735       switch (IntNo) {
26736       default: llvm_unreachable("Impossible intrinsic");
26737       case Intrinsic::x86_aesencwide128kl:
26738         Opcode = X86ISD::AESENCWIDE128KL;
26739         break;
26740       case Intrinsic::x86_aesdecwide128kl:
26741         Opcode = X86ISD::AESDECWIDE128KL;
26742         break;
26743       case Intrinsic::x86_aesencwide256kl:
26744         Opcode = X86ISD::AESENCWIDE256KL;
26745         break;
26746       case Intrinsic::x86_aesdecwide256kl:
26747         Opcode = X86ISD::AESDECWIDE256KL;
26748         break;
26749       }
26750 
26751       MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);
26752       MachineMemOperand *MMO = MemIntr->getMemOperand();
26753       EVT MemVT = MemIntr->getMemoryVT();
26754       SDValue Operation = DAG.getMemIntrinsicNode(
26755           Opcode, DL, VTs,
26756           {Chain, Op.getOperand(2), Op.getOperand(3), Op.getOperand(4),
26757            Op.getOperand(5), Op.getOperand(6), Op.getOperand(7),
26758            Op.getOperand(8), Op.getOperand(9), Op.getOperand(10)},
26759           MemVT, MMO);
26760       SDValue ZF = getSETCC(X86::COND_E, Operation.getValue(0), DL, DAG);
26761 
26762       return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(),
26763                          {ZF, Operation.getValue(1), Operation.getValue(2),
26764                           Operation.getValue(3), Operation.getValue(4),
26765                           Operation.getValue(5), Operation.getValue(6),
26766                           Operation.getValue(7), Operation.getValue(8),
26767                           Operation.getValue(9)});
26768     }
26769     case Intrinsic::x86_testui: {
26770       SDLoc dl(Op);
26771       SDValue Chain = Op.getOperand(0);
26772       SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
26773       SDValue Operation = DAG.getNode(X86ISD::TESTUI, dl, VTs, Chain);
26774       SDValue SetCC = getSETCC(X86::COND_B, Operation.getValue(0), dl, DAG);
26775       return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), SetCC,
26776                          Operation.getValue(1));
26777     }
26778     case Intrinsic::x86_atomic_bts_rm:
26779     case Intrinsic::x86_atomic_btc_rm:
26780     case Intrinsic::x86_atomic_btr_rm: {
26781       SDLoc DL(Op);
26782       MVT VT = Op.getSimpleValueType();
26783       SDValue Chain = Op.getOperand(0);
26784       SDValue Op1 = Op.getOperand(2);
26785       SDValue Op2 = Op.getOperand(3);
26786       unsigned Opc = IntNo == Intrinsic::x86_atomic_bts_rm   ? X86ISD::LBTS_RM
26787                      : IntNo == Intrinsic::x86_atomic_btc_rm ? X86ISD::LBTC_RM
26788                                                              : X86ISD::LBTR_RM;
26789       MachineMemOperand *MMO = cast<MemIntrinsicSDNode>(Op)->getMemOperand();
26790       SDValue Res =
26791           DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::i32, MVT::Other),
26792                                   {Chain, Op1, Op2}, VT, MMO);
26793       Chain = Res.getValue(1);
26794       Res = DAG.getZExtOrTrunc(getSETCC(X86::COND_B, Res, DL, DAG), DL, VT);
26795       return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(), Res, Chain);
26796     }
26797     case Intrinsic::x86_atomic_bts:
26798     case Intrinsic::x86_atomic_btc:
26799     case Intrinsic::x86_atomic_btr: {
26800       SDLoc DL(Op);
26801       MVT VT = Op.getSimpleValueType();
26802       SDValue Chain = Op.getOperand(0);
26803       SDValue Op1 = Op.getOperand(2);
26804       SDValue Op2 = Op.getOperand(3);
26805       unsigned Opc = IntNo == Intrinsic::x86_atomic_bts   ? X86ISD::LBTS
26806                      : IntNo == Intrinsic::x86_atomic_btc ? X86ISD::LBTC
26807                                                           : X86ISD::LBTR;
26808       SDValue Size = DAG.getConstant(VT.getScalarSizeInBits(), DL, MVT::i32);
26809       MachineMemOperand *MMO = cast<MemIntrinsicSDNode>(Op)->getMemOperand();
26810       SDValue Res =
26811           DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::i32, MVT::Other),
26812                                   {Chain, Op1, Op2, Size}, VT, MMO);
26813       Chain = Res.getValue(1);
26814       Res = DAG.getZExtOrTrunc(getSETCC(X86::COND_B, Res, DL, DAG), DL, VT);
26815       unsigned Imm = Op2->getAsZExtVal();
26816       if (Imm)
26817         Res = DAG.getNode(ISD::SHL, DL, VT, Res,
26818                           DAG.getShiftAmountConstant(Imm, VT, DL));
26819       return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(), Res, Chain);
26820     }
26821     case Intrinsic::x86_cmpccxadd32:
26822     case Intrinsic::x86_cmpccxadd64: {
26823       SDLoc DL(Op);
26824       SDValue Chain = Op.getOperand(0);
26825       SDValue Addr = Op.getOperand(2);
26826       SDValue Src1 = Op.getOperand(3);
26827       SDValue Src2 = Op.getOperand(4);
26828       SDValue CC = Op.getOperand(5);
26829       MachineMemOperand *MMO = cast<MemIntrinsicSDNode>(Op)->getMemOperand();
26830       SDValue Operation = DAG.getMemIntrinsicNode(
26831           X86ISD::CMPCCXADD, DL, Op->getVTList(), {Chain, Addr, Src1, Src2, CC},
26832           MVT::i32, MMO);
26833       return Operation;
26834     }
26835     case Intrinsic::x86_aadd32:
26836     case Intrinsic::x86_aadd64:
26837     case Intrinsic::x86_aand32:
26838     case Intrinsic::x86_aand64:
26839     case Intrinsic::x86_aor32:
26840     case Intrinsic::x86_aor64:
26841     case Intrinsic::x86_axor32:
26842     case Intrinsic::x86_axor64: {
26843       SDLoc DL(Op);
26844       SDValue Chain = Op.getOperand(0);
26845       SDValue Op1 = Op.getOperand(2);
26846       SDValue Op2 = Op.getOperand(3);
26847       MVT VT = Op2.getSimpleValueType();
26848       unsigned Opc = 0;
26849       switch (IntNo) {
26850       default:
26851         llvm_unreachable("Unknown Intrinsic");
26852       case Intrinsic::x86_aadd32:
26853       case Intrinsic::x86_aadd64:
26854         Opc = X86ISD::AADD;
26855         break;
26856       case Intrinsic::x86_aand32:
26857       case Intrinsic::x86_aand64:
26858         Opc = X86ISD::AAND;
26859         break;
26860       case Intrinsic::x86_aor32:
26861       case Intrinsic::x86_aor64:
26862         Opc = X86ISD::AOR;
26863         break;
26864       case Intrinsic::x86_axor32:
26865       case Intrinsic::x86_axor64:
26866         Opc = X86ISD::AXOR;
26867         break;
26868       }
26869       MachineMemOperand *MMO = cast<MemSDNode>(Op)->getMemOperand();
26870       return DAG.getMemIntrinsicNode(Opc, DL, Op->getVTList(),
26871                                      {Chain, Op1, Op2}, VT, MMO);
26872     }
26873     case Intrinsic::x86_atomic_add_cc:
26874     case Intrinsic::x86_atomic_sub_cc:
26875     case Intrinsic::x86_atomic_or_cc:
26876     case Intrinsic::x86_atomic_and_cc:
26877     case Intrinsic::x86_atomic_xor_cc: {
26878       SDLoc DL(Op);
26879       SDValue Chain = Op.getOperand(0);
26880       SDValue Op1 = Op.getOperand(2);
26881       SDValue Op2 = Op.getOperand(3);
26882       X86::CondCode CC = (X86::CondCode)Op.getConstantOperandVal(4);
26883       MVT VT = Op2.getSimpleValueType();
26884       unsigned Opc = 0;
26885       switch (IntNo) {
26886       default:
26887         llvm_unreachable("Unknown Intrinsic");
26888       case Intrinsic::x86_atomic_add_cc:
26889         Opc = X86ISD::LADD;
26890         break;
26891       case Intrinsic::x86_atomic_sub_cc:
26892         Opc = X86ISD::LSUB;
26893         break;
26894       case Intrinsic::x86_atomic_or_cc:
26895         Opc = X86ISD::LOR;
26896         break;
26897       case Intrinsic::x86_atomic_and_cc:
26898         Opc = X86ISD::LAND;
26899         break;
26900       case Intrinsic::x86_atomic_xor_cc:
26901         Opc = X86ISD::LXOR;
26902         break;
26903       }
26904       MachineMemOperand *MMO = cast<MemIntrinsicSDNode>(Op)->getMemOperand();
26905       SDValue LockArith =
26906           DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::i32, MVT::Other),
26907                                   {Chain, Op1, Op2}, VT, MMO);
26908       Chain = LockArith.getValue(1);
26909       return DAG.getMergeValues({getSETCC(CC, LockArith, DL, DAG), Chain}, DL);
26910     }
26911     }
26912     return SDValue();
26913   }
26914 
26915   SDLoc dl(Op);
26916   switch(IntrData->Type) {
26917   default: llvm_unreachable("Unknown Intrinsic Type");
26918   case RDSEED:
26919   case RDRAND: {
26920     // Emit the node with the right value type.
26921     SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::i32, MVT::Other);
26922     SDValue Result = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
26923 
26924     // If the value returned by RDRAND/RDSEED was valid (CF=1), return 1.
26925     // Otherwise return the value from Rand, which is always 0, casted to i32.
26926     SDValue Ops[] = {DAG.getZExtOrTrunc(Result, dl, Op->getValueType(1)),
26927                      DAG.getConstant(1, dl, Op->getValueType(1)),
26928                      DAG.getTargetConstant(X86::COND_B, dl, MVT::i8),
26929                      SDValue(Result.getNode(), 1)};
26930     SDValue isValid = DAG.getNode(X86ISD::CMOV, dl, Op->getValueType(1), Ops);
26931 
26932     // Return { result, isValid, chain }.
26933     return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result, isValid,
26934                        SDValue(Result.getNode(), 2));
26935   }
26936   case GATHER_AVX2: {
26937     SDValue Chain = Op.getOperand(0);
26938     SDValue Src   = Op.getOperand(2);
26939     SDValue Base  = Op.getOperand(3);
26940     SDValue Index = Op.getOperand(4);
26941     SDValue Mask  = Op.getOperand(5);
26942     SDValue Scale = Op.getOperand(6);
26943     return getAVX2GatherNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index,
26944                              Scale, Chain, Subtarget);
26945   }
26946   case GATHER: {
26947   //gather(v1, mask, index, base, scale);
26948     SDValue Chain = Op.getOperand(0);
26949     SDValue Src   = Op.getOperand(2);
26950     SDValue Base  = Op.getOperand(3);
26951     SDValue Index = Op.getOperand(4);
26952     SDValue Mask  = Op.getOperand(5);
26953     SDValue Scale = Op.getOperand(6);
26954     return getGatherNode(Op, DAG, Src, Mask, Base, Index, Scale,
26955                          Chain, Subtarget);
26956   }
26957   case SCATTER: {
26958   //scatter(base, mask, index, v1, scale);
26959     SDValue Chain = Op.getOperand(0);
26960     SDValue Base  = Op.getOperand(2);
26961     SDValue Mask  = Op.getOperand(3);
26962     SDValue Index = Op.getOperand(4);
26963     SDValue Src   = Op.getOperand(5);
26964     SDValue Scale = Op.getOperand(6);
26965     return getScatterNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index,
26966                           Scale, Chain, Subtarget);
26967   }
26968   case PREFETCH: {
26969     const APInt &HintVal = Op.getConstantOperandAPInt(6);
26970     assert((HintVal == 2 || HintVal == 3) &&
26971            "Wrong prefetch hint in intrinsic: should be 2 or 3");
26972     unsigned Opcode = (HintVal == 2 ? IntrData->Opc1 : IntrData->Opc0);
26973     SDValue Chain = Op.getOperand(0);
26974     SDValue Mask  = Op.getOperand(2);
26975     SDValue Index = Op.getOperand(3);
26976     SDValue Base  = Op.getOperand(4);
26977     SDValue Scale = Op.getOperand(5);
26978     return getPrefetchNode(Opcode, Op, DAG, Mask, Base, Index, Scale, Chain,
26979                            Subtarget);
26980   }
26981   // Read Time Stamp Counter (RDTSC) and Processor ID (RDTSCP).
26982   case RDTSC: {
26983     SmallVector<SDValue, 2> Results;
26984     getReadTimeStampCounter(Op.getNode(), dl, IntrData->Opc0, DAG, Subtarget,
26985                             Results);
26986     return DAG.getMergeValues(Results, dl);
26987   }
26988   // Read Performance Monitoring Counters.
26989   case RDPMC:
26990   // Read Processor Register.
26991   case RDPRU:
26992   // GetExtended Control Register.
26993   case XGETBV: {
26994     SmallVector<SDValue, 2> Results;
26995 
26996     // RDPMC uses ECX to select the index of the performance counter to read.
26997     // RDPRU uses ECX to select the processor register to read.
26998     // XGETBV uses ECX to select the index of the XCR register to return.
26999     // The result is stored into registers EDX:EAX.
27000     expandIntrinsicWChainHelper(Op.getNode(), dl, DAG, IntrData->Opc0, X86::ECX,
27001                                 Subtarget, Results);
27002     return DAG.getMergeValues(Results, dl);
27003   }
27004   // XTEST intrinsics.
27005   case XTEST: {
27006     SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
27007     SDValue InTrans = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
27008 
27009     SDValue SetCC = getSETCC(X86::COND_NE, InTrans, dl, DAG);
27010     SDValue Ret = DAG.getNode(ISD::ZERO_EXTEND, dl, Op->getValueType(0), SetCC);
27011     return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(),
27012                        Ret, SDValue(InTrans.getNode(), 1));
27013   }
27014   case TRUNCATE_TO_MEM_VI8:
27015   case TRUNCATE_TO_MEM_VI16:
27016   case TRUNCATE_TO_MEM_VI32: {
27017     SDValue Mask = Op.getOperand(4);
27018     SDValue DataToTruncate = Op.getOperand(3);
27019     SDValue Addr = Op.getOperand(2);
27020     SDValue Chain = Op.getOperand(0);
27021 
27022     MemIntrinsicSDNode *MemIntr = dyn_cast<MemIntrinsicSDNode>(Op);
27023     assert(MemIntr && "Expected MemIntrinsicSDNode!");
27024 
27025     EVT MemVT  = MemIntr->getMemoryVT();
27026 
27027     uint16_t TruncationOp = IntrData->Opc0;
27028     switch (TruncationOp) {
27029     case X86ISD::VTRUNC: {
27030       if (isAllOnesConstant(Mask)) // return just a truncate store
27031         return DAG.getTruncStore(Chain, dl, DataToTruncate, Addr, MemVT,
27032                                  MemIntr->getMemOperand());
27033 
27034       MVT MaskVT = MVT::getVectorVT(MVT::i1, MemVT.getVectorNumElements());
27035       SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
27036       SDValue Offset = DAG.getUNDEF(VMask.getValueType());
27037 
27038       return DAG.getMaskedStore(Chain, dl, DataToTruncate, Addr, Offset, VMask,
27039                                 MemVT, MemIntr->getMemOperand(), ISD::UNINDEXED,
27040                                 true /* truncating */);
27041     }
27042     case X86ISD::VTRUNCUS:
27043     case X86ISD::VTRUNCS: {
27044       bool IsSigned = (TruncationOp == X86ISD::VTRUNCS);
27045       if (isAllOnesConstant(Mask))
27046         return EmitTruncSStore(IsSigned, Chain, dl, DataToTruncate, Addr, MemVT,
27047                                MemIntr->getMemOperand(), DAG);
27048 
27049       MVT MaskVT = MVT::getVectorVT(MVT::i1, MemVT.getVectorNumElements());
27050       SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
27051 
27052       return EmitMaskedTruncSStore(IsSigned, Chain, dl, DataToTruncate, Addr,
27053                                    VMask, MemVT, MemIntr->getMemOperand(), DAG);
27054     }
27055     default:
27056       llvm_unreachable("Unsupported truncstore intrinsic");
27057     }
27058   }
27059   }
27060 }
27061 
LowerRETURNADDR(SDValue Op,SelectionDAG & DAG) const27062 SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op,
27063                                            SelectionDAG &DAG) const {
27064   MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
27065   MFI.setReturnAddressIsTaken(true);
27066 
27067   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
27068     return SDValue();
27069 
27070   unsigned Depth = Op.getConstantOperandVal(0);
27071   SDLoc dl(Op);
27072   EVT PtrVT = getPointerTy(DAG.getDataLayout());
27073 
27074   if (Depth > 0) {
27075     SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
27076     const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
27077     SDValue Offset = DAG.getConstant(RegInfo->getSlotSize(), dl, PtrVT);
27078     return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
27079                        DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset),
27080                        MachinePointerInfo());
27081   }
27082 
27083   // Just load the return address.
27084   SDValue RetAddrFI = getReturnAddressFrameIndex(DAG);
27085   return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI,
27086                      MachinePointerInfo());
27087 }
27088 
LowerADDROFRETURNADDR(SDValue Op,SelectionDAG & DAG) const27089 SDValue X86TargetLowering::LowerADDROFRETURNADDR(SDValue Op,
27090                                                  SelectionDAG &DAG) const {
27091   DAG.getMachineFunction().getFrameInfo().setReturnAddressIsTaken(true);
27092   return getReturnAddressFrameIndex(DAG);
27093 }
27094 
LowerFRAMEADDR(SDValue Op,SelectionDAG & DAG) const27095 SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
27096   MachineFunction &MF = DAG.getMachineFunction();
27097   MachineFrameInfo &MFI = MF.getFrameInfo();
27098   X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
27099   const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
27100   EVT VT = Op.getValueType();
27101 
27102   MFI.setFrameAddressIsTaken(true);
27103 
27104   if (MF.getTarget().getMCAsmInfo()->usesWindowsCFI()) {
27105     // Depth > 0 makes no sense on targets which use Windows unwind codes.  It
27106     // is not possible to crawl up the stack without looking at the unwind codes
27107     // simultaneously.
27108     int FrameAddrIndex = FuncInfo->getFAIndex();
27109     if (!FrameAddrIndex) {
27110       // Set up a frame object for the return address.
27111       unsigned SlotSize = RegInfo->getSlotSize();
27112       FrameAddrIndex = MF.getFrameInfo().CreateFixedObject(
27113           SlotSize, /*SPOffset=*/0, /*IsImmutable=*/false);
27114       FuncInfo->setFAIndex(FrameAddrIndex);
27115     }
27116     return DAG.getFrameIndex(FrameAddrIndex, VT);
27117   }
27118 
27119   unsigned FrameReg =
27120       RegInfo->getPtrSizedFrameRegister(DAG.getMachineFunction());
27121   SDLoc dl(Op);  // FIXME probably not meaningful
27122   unsigned Depth = Op.getConstantOperandVal(0);
27123   assert(((FrameReg == X86::RBP && VT == MVT::i64) ||
27124           (FrameReg == X86::EBP && VT == MVT::i32)) &&
27125          "Invalid Frame Register!");
27126   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
27127   while (Depth--)
27128     FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
27129                             MachinePointerInfo());
27130   return FrameAddr;
27131 }
27132 
27133 // FIXME? Maybe this could be a TableGen attribute on some registers and
27134 // this table could be generated automatically from RegInfo.
getRegisterByName(const char * RegName,LLT VT,const MachineFunction & MF) const27135 Register X86TargetLowering::getRegisterByName(const char* RegName, LLT VT,
27136                                               const MachineFunction &MF) const {
27137   const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
27138 
27139   Register Reg = StringSwitch<unsigned>(RegName)
27140                      .Case("esp", X86::ESP)
27141                      .Case("rsp", X86::RSP)
27142                      .Case("ebp", X86::EBP)
27143                      .Case("rbp", X86::RBP)
27144                      .Case("r14", X86::R14)
27145                      .Case("r15", X86::R15)
27146                      .Default(0);
27147 
27148   if (Reg == X86::EBP || Reg == X86::RBP) {
27149     if (!TFI.hasFP(MF))
27150       report_fatal_error("register " + StringRef(RegName) +
27151                          " is allocatable: function has no frame pointer");
27152 #ifndef NDEBUG
27153     else {
27154       const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
27155       Register FrameReg = RegInfo->getPtrSizedFrameRegister(MF);
27156       assert((FrameReg == X86::EBP || FrameReg == X86::RBP) &&
27157              "Invalid Frame Register!");
27158     }
27159 #endif
27160   }
27161 
27162   if (Reg)
27163     return Reg;
27164 
27165   report_fatal_error("Invalid register name global variable");
27166 }
27167 
LowerFRAME_TO_ARGS_OFFSET(SDValue Op,SelectionDAG & DAG) const27168 SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op,
27169                                                      SelectionDAG &DAG) const {
27170   const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
27171   return DAG.getIntPtrConstant(2 * RegInfo->getSlotSize(), SDLoc(Op));
27172 }
27173 
getExceptionPointerRegister(const Constant * PersonalityFn) const27174 Register X86TargetLowering::getExceptionPointerRegister(
27175     const Constant *PersonalityFn) const {
27176   if (classifyEHPersonality(PersonalityFn) == EHPersonality::CoreCLR)
27177     return Subtarget.isTarget64BitLP64() ? X86::RDX : X86::EDX;
27178 
27179   return Subtarget.isTarget64BitLP64() ? X86::RAX : X86::EAX;
27180 }
27181 
getExceptionSelectorRegister(const Constant * PersonalityFn) const27182 Register X86TargetLowering::getExceptionSelectorRegister(
27183     const Constant *PersonalityFn) const {
27184   // Funclet personalities don't use selectors (the runtime does the selection).
27185   if (isFuncletEHPersonality(classifyEHPersonality(PersonalityFn)))
27186     return X86::NoRegister;
27187   return Subtarget.isTarget64BitLP64() ? X86::RDX : X86::EDX;
27188 }
27189 
needsFixedCatchObjects() const27190 bool X86TargetLowering::needsFixedCatchObjects() const {
27191   return Subtarget.isTargetWin64();
27192 }
27193 
LowerEH_RETURN(SDValue Op,SelectionDAG & DAG) const27194 SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
27195   SDValue Chain     = Op.getOperand(0);
27196   SDValue Offset    = Op.getOperand(1);
27197   SDValue Handler   = Op.getOperand(2);
27198   SDLoc dl      (Op);
27199 
27200   EVT PtrVT = getPointerTy(DAG.getDataLayout());
27201   const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
27202   Register FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction());
27203   assert(((FrameReg == X86::RBP && PtrVT == MVT::i64) ||
27204           (FrameReg == X86::EBP && PtrVT == MVT::i32)) &&
27205          "Invalid Frame Register!");
27206   SDValue Frame = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, PtrVT);
27207   Register StoreAddrReg = (PtrVT == MVT::i64) ? X86::RCX : X86::ECX;
27208 
27209   SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, Frame,
27210                                  DAG.getIntPtrConstant(RegInfo->getSlotSize(),
27211                                                        dl));
27212   StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, StoreAddr, Offset);
27213   Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo());
27214   Chain = DAG.getCopyToReg(Chain, dl, StoreAddrReg, StoreAddr);
27215 
27216   return DAG.getNode(X86ISD::EH_RETURN, dl, MVT::Other, Chain,
27217                      DAG.getRegister(StoreAddrReg, PtrVT));
27218 }
27219 
lowerEH_SJLJ_SETJMP(SDValue Op,SelectionDAG & DAG) const27220 SDValue X86TargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
27221                                                SelectionDAG &DAG) const {
27222   SDLoc DL(Op);
27223   // If the subtarget is not 64bit, we may need the global base reg
27224   // after isel expand pseudo, i.e., after CGBR pass ran.
27225   // Therefore, ask for the GlobalBaseReg now, so that the pass
27226   // inserts the code for us in case we need it.
27227   // Otherwise, we will end up in a situation where we will
27228   // reference a virtual register that is not defined!
27229   if (!Subtarget.is64Bit()) {
27230     const X86InstrInfo *TII = Subtarget.getInstrInfo();
27231     (void)TII->getGlobalBaseReg(&DAG.getMachineFunction());
27232   }
27233   return DAG.getNode(X86ISD::EH_SJLJ_SETJMP, DL,
27234                      DAG.getVTList(MVT::i32, MVT::Other),
27235                      Op.getOperand(0), Op.getOperand(1));
27236 }
27237 
lowerEH_SJLJ_LONGJMP(SDValue Op,SelectionDAG & DAG) const27238 SDValue X86TargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
27239                                                 SelectionDAG &DAG) const {
27240   SDLoc DL(Op);
27241   return DAG.getNode(X86ISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
27242                      Op.getOperand(0), Op.getOperand(1));
27243 }
27244 
lowerEH_SJLJ_SETUP_DISPATCH(SDValue Op,SelectionDAG & DAG) const27245 SDValue X86TargetLowering::lowerEH_SJLJ_SETUP_DISPATCH(SDValue Op,
27246                                                        SelectionDAG &DAG) const {
27247   SDLoc DL(Op);
27248   return DAG.getNode(X86ISD::EH_SJLJ_SETUP_DISPATCH, DL, MVT::Other,
27249                      Op.getOperand(0));
27250 }
27251 
LowerADJUST_TRAMPOLINE(SDValue Op,SelectionDAG & DAG)27252 static SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) {
27253   return Op.getOperand(0);
27254 }
27255 
LowerINIT_TRAMPOLINE(SDValue Op,SelectionDAG & DAG) const27256 SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
27257                                                 SelectionDAG &DAG) const {
27258   SDValue Root = Op.getOperand(0);
27259   SDValue Trmp = Op.getOperand(1); // trampoline
27260   SDValue FPtr = Op.getOperand(2); // nested function
27261   SDValue Nest = Op.getOperand(3); // 'nest' parameter value
27262   SDLoc dl (Op);
27263 
27264   const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
27265   const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
27266 
27267   if (Subtarget.is64Bit()) {
27268     SDValue OutChains[6];
27269 
27270     // Large code-model.
27271     const unsigned char JMP64r  = 0xFF; // 64-bit jmp through register opcode.
27272     const unsigned char MOV64ri = 0xB8; // X86::MOV64ri opcode.
27273 
27274     const unsigned char N86R10 = TRI->getEncodingValue(X86::R10) & 0x7;
27275     const unsigned char N86R11 = TRI->getEncodingValue(X86::R11) & 0x7;
27276 
27277     const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix
27278 
27279     // Load the pointer to the nested function into R11.
27280     unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11
27281     SDValue Addr = Trmp;
27282     OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
27283                                 Addr, MachinePointerInfo(TrmpAddr));
27284 
27285     Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
27286                        DAG.getConstant(2, dl, MVT::i64));
27287     OutChains[1] = DAG.getStore(Root, dl, FPtr, Addr,
27288                                 MachinePointerInfo(TrmpAddr, 2), Align(2));
27289 
27290     // Load the 'nest' parameter value into R10.
27291     // R10 is specified in X86CallingConv.td
27292     OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10
27293     Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
27294                        DAG.getConstant(10, dl, MVT::i64));
27295     OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
27296                                 Addr, MachinePointerInfo(TrmpAddr, 10));
27297 
27298     Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
27299                        DAG.getConstant(12, dl, MVT::i64));
27300     OutChains[3] = DAG.getStore(Root, dl, Nest, Addr,
27301                                 MachinePointerInfo(TrmpAddr, 12), Align(2));
27302 
27303     // Jump to the nested function.
27304     OpCode = (JMP64r << 8) | REX_WB; // jmpq *...
27305     Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
27306                        DAG.getConstant(20, dl, MVT::i64));
27307     OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
27308                                 Addr, MachinePointerInfo(TrmpAddr, 20));
27309 
27310     unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11
27311     Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
27312                        DAG.getConstant(22, dl, MVT::i64));
27313     OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, dl, MVT::i8),
27314                                 Addr, MachinePointerInfo(TrmpAddr, 22));
27315 
27316     return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
27317   } else {
27318     const Function *Func =
27319       cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue());
27320     CallingConv::ID CC = Func->getCallingConv();
27321     unsigned NestReg;
27322 
27323     switch (CC) {
27324     default:
27325       llvm_unreachable("Unsupported calling convention");
27326     case CallingConv::C:
27327     case CallingConv::X86_StdCall: {
27328       // Pass 'nest' parameter in ECX.
27329       // Must be kept in sync with X86CallingConv.td
27330       NestReg = X86::ECX;
27331 
27332       // Check that ECX wasn't needed by an 'inreg' parameter.
27333       FunctionType *FTy = Func->getFunctionType();
27334       const AttributeList &Attrs = Func->getAttributes();
27335 
27336       if (!Attrs.isEmpty() && !Func->isVarArg()) {
27337         unsigned InRegCount = 0;
27338         unsigned Idx = 0;
27339 
27340         for (FunctionType::param_iterator I = FTy->param_begin(),
27341              E = FTy->param_end(); I != E; ++I, ++Idx)
27342           if (Attrs.hasParamAttr(Idx, Attribute::InReg)) {
27343             const DataLayout &DL = DAG.getDataLayout();
27344             // FIXME: should only count parameters that are lowered to integers.
27345             InRegCount += (DL.getTypeSizeInBits(*I) + 31) / 32;
27346           }
27347 
27348         if (InRegCount > 2) {
27349           report_fatal_error("Nest register in use - reduce number of inreg"
27350                              " parameters!");
27351         }
27352       }
27353       break;
27354     }
27355     case CallingConv::X86_FastCall:
27356     case CallingConv::X86_ThisCall:
27357     case CallingConv::Fast:
27358     case CallingConv::Tail:
27359     case CallingConv::SwiftTail:
27360       // Pass 'nest' parameter in EAX.
27361       // Must be kept in sync with X86CallingConv.td
27362       NestReg = X86::EAX;
27363       break;
27364     }
27365 
27366     SDValue OutChains[4];
27367     SDValue Addr, Disp;
27368 
27369     Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
27370                        DAG.getConstant(10, dl, MVT::i32));
27371     Disp = DAG.getNode(ISD::SUB, dl, MVT::i32, FPtr, Addr);
27372 
27373     // This is storing the opcode for MOV32ri.
27374     const unsigned char MOV32ri = 0xB8; // X86::MOV32ri's opcode byte.
27375     const unsigned char N86Reg = TRI->getEncodingValue(NestReg) & 0x7;
27376     OutChains[0] =
27377         DAG.getStore(Root, dl, DAG.getConstant(MOV32ri | N86Reg, dl, MVT::i8),
27378                      Trmp, MachinePointerInfo(TrmpAddr));
27379 
27380     Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
27381                        DAG.getConstant(1, dl, MVT::i32));
27382     OutChains[1] = DAG.getStore(Root, dl, Nest, Addr,
27383                                 MachinePointerInfo(TrmpAddr, 1), Align(1));
27384 
27385     const unsigned char JMP = 0xE9; // jmp <32bit dst> opcode.
27386     Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
27387                        DAG.getConstant(5, dl, MVT::i32));
27388     OutChains[2] =
27389         DAG.getStore(Root, dl, DAG.getConstant(JMP, dl, MVT::i8), Addr,
27390                      MachinePointerInfo(TrmpAddr, 5), Align(1));
27391 
27392     Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
27393                        DAG.getConstant(6, dl, MVT::i32));
27394     OutChains[3] = DAG.getStore(Root, dl, Disp, Addr,
27395                                 MachinePointerInfo(TrmpAddr, 6), Align(1));
27396 
27397     return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
27398   }
27399 }
27400 
LowerGET_ROUNDING(SDValue Op,SelectionDAG & DAG) const27401 SDValue X86TargetLowering::LowerGET_ROUNDING(SDValue Op,
27402                                              SelectionDAG &DAG) const {
27403   /*
27404    The rounding mode is in bits 11:10 of FPSR, and has the following
27405    settings:
27406      00 Round to nearest
27407      01 Round to -inf
27408      10 Round to +inf
27409      11 Round to 0
27410 
27411   GET_ROUNDING, on the other hand, expects the following:
27412     -1 Undefined
27413      0 Round to 0
27414      1 Round to nearest
27415      2 Round to +inf
27416      3 Round to -inf
27417 
27418   To perform the conversion, we use a packed lookup table of the four 2-bit
27419   values that we can index by FPSP[11:10]
27420     0x2d --> (0b00,10,11,01) --> (0,2,3,1) >> FPSR[11:10]
27421 
27422     (0x2d >> ((FPSR & 0xc00) >> 9)) & 3
27423   */
27424 
27425   MachineFunction &MF = DAG.getMachineFunction();
27426   MVT VT = Op.getSimpleValueType();
27427   SDLoc DL(Op);
27428 
27429   // Save FP Control Word to stack slot
27430   int SSFI = MF.getFrameInfo().CreateStackObject(2, Align(2), false);
27431   SDValue StackSlot =
27432       DAG.getFrameIndex(SSFI, getPointerTy(DAG.getDataLayout()));
27433 
27434   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, SSFI);
27435 
27436   SDValue Chain = Op.getOperand(0);
27437   SDValue Ops[] = {Chain, StackSlot};
27438   Chain = DAG.getMemIntrinsicNode(X86ISD::FNSTCW16m, DL,
27439                                   DAG.getVTList(MVT::Other), Ops, MVT::i16, MPI,
27440                                   Align(2), MachineMemOperand::MOStore);
27441 
27442   // Load FP Control Word from stack slot
27443   SDValue CWD = DAG.getLoad(MVT::i16, DL, Chain, StackSlot, MPI, Align(2));
27444   Chain = CWD.getValue(1);
27445 
27446   // Mask and turn the control bits into a shift for the lookup table.
27447   SDValue Shift =
27448     DAG.getNode(ISD::SRL, DL, MVT::i16,
27449                 DAG.getNode(ISD::AND, DL, MVT::i16,
27450                             CWD, DAG.getConstant(0xc00, DL, MVT::i16)),
27451                 DAG.getConstant(9, DL, MVT::i8));
27452   Shift = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, Shift);
27453 
27454   SDValue LUT = DAG.getConstant(0x2d, DL, MVT::i32);
27455   SDValue RetVal =
27456     DAG.getNode(ISD::AND, DL, MVT::i32,
27457                 DAG.getNode(ISD::SRL, DL, MVT::i32, LUT, Shift),
27458                 DAG.getConstant(3, DL, MVT::i32));
27459 
27460   RetVal = DAG.getZExtOrTrunc(RetVal, DL, VT);
27461 
27462   return DAG.getMergeValues({RetVal, Chain}, DL);
27463 }
27464 
LowerSET_ROUNDING(SDValue Op,SelectionDAG & DAG) const27465 SDValue X86TargetLowering::LowerSET_ROUNDING(SDValue Op,
27466                                              SelectionDAG &DAG) const {
27467   MachineFunction &MF = DAG.getMachineFunction();
27468   SDLoc DL(Op);
27469   SDValue Chain = Op.getNode()->getOperand(0);
27470 
27471   // FP control word may be set only from data in memory. So we need to allocate
27472   // stack space to save/load FP control word.
27473   int OldCWFrameIdx = MF.getFrameInfo().CreateStackObject(4, Align(4), false);
27474   SDValue StackSlot =
27475       DAG.getFrameIndex(OldCWFrameIdx, getPointerTy(DAG.getDataLayout()));
27476   MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, OldCWFrameIdx);
27477   MachineMemOperand *MMO =
27478       MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 2, Align(2));
27479 
27480   // Store FP control word into memory.
27481   SDValue Ops[] = {Chain, StackSlot};
27482   Chain = DAG.getMemIntrinsicNode(
27483       X86ISD::FNSTCW16m, DL, DAG.getVTList(MVT::Other), Ops, MVT::i16, MMO);
27484 
27485   // Load FP Control Word from stack slot and clear RM field (bits 11:10).
27486   SDValue CWD = DAG.getLoad(MVT::i16, DL, Chain, StackSlot, MPI);
27487   Chain = CWD.getValue(1);
27488   CWD = DAG.getNode(ISD::AND, DL, MVT::i16, CWD.getValue(0),
27489                     DAG.getConstant(0xf3ff, DL, MVT::i16));
27490 
27491   // Calculate new rounding mode.
27492   SDValue NewRM = Op.getNode()->getOperand(1);
27493   SDValue RMBits;
27494   if (auto *CVal = dyn_cast<ConstantSDNode>(NewRM)) {
27495     uint64_t RM = CVal->getZExtValue();
27496     int FieldVal;
27497     switch (static_cast<RoundingMode>(RM)) {
27498     case RoundingMode::NearestTiesToEven: FieldVal = X86::rmToNearest; break;
27499     case RoundingMode::TowardNegative:    FieldVal = X86::rmDownward; break;
27500     case RoundingMode::TowardPositive:    FieldVal = X86::rmUpward; break;
27501     case RoundingMode::TowardZero:        FieldVal = X86::rmTowardZero; break;
27502     default:
27503       llvm_unreachable("rounding mode is not supported by X86 hardware");
27504     }
27505     RMBits = DAG.getConstant(FieldVal, DL, MVT::i16);
27506   } else {
27507     // Need to convert argument into bits of control word:
27508     //    0 Round to 0       -> 11
27509     //    1 Round to nearest -> 00
27510     //    2 Round to +inf    -> 10
27511     //    3 Round to -inf    -> 01
27512     // The 2-bit value needs then to be shifted so that it occupies bits 11:10.
27513     // To make the conversion, put all these values into a value 0xc9 and shift
27514     // it left depending on the rounding mode:
27515     //    (0xc9 << 4) & 0xc00 = X86::rmTowardZero
27516     //    (0xc9 << 6) & 0xc00 = X86::rmToNearest
27517     //    ...
27518     // (0xc9 << (2 * NewRM + 4)) & 0xc00
27519     SDValue ShiftValue =
27520         DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
27521                     DAG.getNode(ISD::ADD, DL, MVT::i32,
27522                                 DAG.getNode(ISD::SHL, DL, MVT::i32, NewRM,
27523                                             DAG.getConstant(1, DL, MVT::i8)),
27524                                 DAG.getConstant(4, DL, MVT::i32)));
27525     SDValue Shifted =
27526         DAG.getNode(ISD::SHL, DL, MVT::i16, DAG.getConstant(0xc9, DL, MVT::i16),
27527                     ShiftValue);
27528     RMBits = DAG.getNode(ISD::AND, DL, MVT::i16, Shifted,
27529                          DAG.getConstant(0xc00, DL, MVT::i16));
27530   }
27531 
27532   // Update rounding mode bits and store the new FP Control Word into stack.
27533   CWD = DAG.getNode(ISD::OR, DL, MVT::i16, CWD, RMBits);
27534   Chain = DAG.getStore(Chain, DL, CWD, StackSlot, MPI, Align(2));
27535 
27536   // Load FP control word from the slot.
27537   SDValue OpsLD[] = {Chain, StackSlot};
27538   MachineMemOperand *MMOL =
27539       MF.getMachineMemOperand(MPI, MachineMemOperand::MOLoad, 2, Align(2));
27540   Chain = DAG.getMemIntrinsicNode(
27541       X86ISD::FLDCW16m, DL, DAG.getVTList(MVT::Other), OpsLD, MVT::i16, MMOL);
27542 
27543   // If target supports SSE, set MXCSR as well. Rounding mode is encoded in the
27544   // same way but in bits 14:13.
27545   if (Subtarget.hasSSE1()) {
27546     // Store MXCSR into memory.
27547     Chain = DAG.getNode(
27548         ISD::INTRINSIC_VOID, DL, DAG.getVTList(MVT::Other), Chain,
27549         DAG.getTargetConstant(Intrinsic::x86_sse_stmxcsr, DL, MVT::i32),
27550         StackSlot);
27551 
27552     // Load MXCSR from stack slot and clear RM field (bits 14:13).
27553     SDValue CWD = DAG.getLoad(MVT::i32, DL, Chain, StackSlot, MPI);
27554     Chain = CWD.getValue(1);
27555     CWD = DAG.getNode(ISD::AND, DL, MVT::i32, CWD.getValue(0),
27556                       DAG.getConstant(0xffff9fff, DL, MVT::i32));
27557 
27558     // Shift X87 RM bits from 11:10 to 14:13.
27559     RMBits = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, RMBits);
27560     RMBits = DAG.getNode(ISD::SHL, DL, MVT::i32, RMBits,
27561                          DAG.getConstant(3, DL, MVT::i8));
27562 
27563     // Update rounding mode bits and store the new FP Control Word into stack.
27564     CWD = DAG.getNode(ISD::OR, DL, MVT::i32, CWD, RMBits);
27565     Chain = DAG.getStore(Chain, DL, CWD, StackSlot, MPI, Align(4));
27566 
27567     // Load MXCSR from the slot.
27568     Chain = DAG.getNode(
27569         ISD::INTRINSIC_VOID, DL, DAG.getVTList(MVT::Other), Chain,
27570         DAG.getTargetConstant(Intrinsic::x86_sse_ldmxcsr, DL, MVT::i32),
27571         StackSlot);
27572   }
27573 
27574   return Chain;
27575 }
27576 
27577 const unsigned X87StateSize = 28;
27578 const unsigned FPStateSize = 32;
27579 [[maybe_unused]] const unsigned FPStateSizeInBits = FPStateSize * 8;
27580 
LowerGET_FPENV_MEM(SDValue Op,SelectionDAG & DAG) const27581 SDValue X86TargetLowering::LowerGET_FPENV_MEM(SDValue Op,
27582                                               SelectionDAG &DAG) const {
27583   MachineFunction &MF = DAG.getMachineFunction();
27584   SDLoc DL(Op);
27585   SDValue Chain = Op->getOperand(0);
27586   SDValue Ptr = Op->getOperand(1);
27587   auto *Node = cast<FPStateAccessSDNode>(Op);
27588   EVT MemVT = Node->getMemoryVT();
27589   assert(MemVT.getSizeInBits() == FPStateSizeInBits);
27590   MachineMemOperand *MMO = cast<FPStateAccessSDNode>(Op)->getMemOperand();
27591 
27592   // Get x87 state, if it presents.
27593   if (Subtarget.hasX87()) {
27594     Chain =
27595         DAG.getMemIntrinsicNode(X86ISD::FNSTENVm, DL, DAG.getVTList(MVT::Other),
27596                                 {Chain, Ptr}, MemVT, MMO);
27597 
27598     // FNSTENV changes the exception mask, so load back the stored environment.
27599     MachineMemOperand::Flags NewFlags =
27600         MachineMemOperand::MOLoad |
27601         (MMO->getFlags() & ~MachineMemOperand::MOStore);
27602     MMO = MF.getMachineMemOperand(MMO, NewFlags);
27603     Chain =
27604         DAG.getMemIntrinsicNode(X86ISD::FLDENVm, DL, DAG.getVTList(MVT::Other),
27605                                 {Chain, Ptr}, MemVT, MMO);
27606   }
27607 
27608   // If target supports SSE, get MXCSR as well.
27609   if (Subtarget.hasSSE1()) {
27610     // Get pointer to the MXCSR location in memory.
27611     MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
27612     SDValue MXCSRAddr = DAG.getNode(ISD::ADD, DL, PtrVT, Ptr,
27613                                     DAG.getConstant(X87StateSize, DL, PtrVT));
27614     // Store MXCSR into memory.
27615     Chain = DAG.getNode(
27616         ISD::INTRINSIC_VOID, DL, DAG.getVTList(MVT::Other), Chain,
27617         DAG.getTargetConstant(Intrinsic::x86_sse_stmxcsr, DL, MVT::i32),
27618         MXCSRAddr);
27619   }
27620 
27621   return Chain;
27622 }
27623 
createSetFPEnvNodes(SDValue Ptr,SDValue Chain,SDLoc DL,EVT MemVT,MachineMemOperand * MMO,SelectionDAG & DAG,const X86Subtarget & Subtarget)27624 static SDValue createSetFPEnvNodes(SDValue Ptr, SDValue Chain, SDLoc DL,
27625                                    EVT MemVT, MachineMemOperand *MMO,
27626                                    SelectionDAG &DAG,
27627                                    const X86Subtarget &Subtarget) {
27628   // Set x87 state, if it presents.
27629   if (Subtarget.hasX87())
27630     Chain =
27631         DAG.getMemIntrinsicNode(X86ISD::FLDENVm, DL, DAG.getVTList(MVT::Other),
27632                                 {Chain, Ptr}, MemVT, MMO);
27633   // If target supports SSE, set MXCSR as well.
27634   if (Subtarget.hasSSE1()) {
27635     // Get pointer to the MXCSR location in memory.
27636     MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
27637     SDValue MXCSRAddr = DAG.getNode(ISD::ADD, DL, PtrVT, Ptr,
27638                                     DAG.getConstant(X87StateSize, DL, PtrVT));
27639     // Load MXCSR from memory.
27640     Chain = DAG.getNode(
27641         ISD::INTRINSIC_VOID, DL, DAG.getVTList(MVT::Other), Chain,
27642         DAG.getTargetConstant(Intrinsic::x86_sse_ldmxcsr, DL, MVT::i32),
27643         MXCSRAddr);
27644   }
27645   return Chain;
27646 }
27647 
LowerSET_FPENV_MEM(SDValue Op,SelectionDAG & DAG) const27648 SDValue X86TargetLowering::LowerSET_FPENV_MEM(SDValue Op,
27649                                               SelectionDAG &DAG) const {
27650   SDLoc DL(Op);
27651   SDValue Chain = Op->getOperand(0);
27652   SDValue Ptr = Op->getOperand(1);
27653   auto *Node = cast<FPStateAccessSDNode>(Op);
27654   EVT MemVT = Node->getMemoryVT();
27655   assert(MemVT.getSizeInBits() == FPStateSizeInBits);
27656   MachineMemOperand *MMO = cast<FPStateAccessSDNode>(Op)->getMemOperand();
27657   return createSetFPEnvNodes(Ptr, Chain, DL, MemVT, MMO, DAG, Subtarget);
27658 }
27659 
LowerRESET_FPENV(SDValue Op,SelectionDAG & DAG) const27660 SDValue X86TargetLowering::LowerRESET_FPENV(SDValue Op,
27661                                             SelectionDAG &DAG) const {
27662   MachineFunction &MF = DAG.getMachineFunction();
27663   SDLoc DL(Op);
27664   SDValue Chain = Op.getNode()->getOperand(0);
27665 
27666   IntegerType *ItemTy = Type::getInt32Ty(*DAG.getContext());
27667   ArrayType *FPEnvTy = ArrayType::get(ItemTy, 8);
27668   SmallVector<Constant *, 8> FPEnvVals;
27669 
27670   // x87 FPU Control Word: mask all floating-point exceptions, sets rounding to
27671   // nearest. FPU precision is set to 53 bits on Windows and 64 bits otherwise
27672   // for compatibility with glibc.
27673   unsigned X87CW = Subtarget.isTargetWindowsMSVC() ? 0x27F : 0x37F;
27674   FPEnvVals.push_back(ConstantInt::get(ItemTy, X87CW));
27675   Constant *Zero = ConstantInt::get(ItemTy, 0);
27676   for (unsigned I = 0; I < 6; ++I)
27677     FPEnvVals.push_back(Zero);
27678 
27679   // MXCSR: mask all floating-point exceptions, sets rounding to nearest, clear
27680   // all exceptions, sets DAZ and FTZ to 0.
27681   FPEnvVals.push_back(ConstantInt::get(ItemTy, 0x1F80));
27682   Constant *FPEnvBits = ConstantArray::get(FPEnvTy, FPEnvVals);
27683   MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
27684   SDValue Env = DAG.getConstantPool(FPEnvBits, PtrVT);
27685   MachinePointerInfo MPI =
27686       MachinePointerInfo::getConstantPool(DAG.getMachineFunction());
27687   MachineMemOperand *MMO = MF.getMachineMemOperand(
27688       MPI, MachineMemOperand::MOStore, X87StateSize, Align(4));
27689 
27690   return createSetFPEnvNodes(Env, Chain, DL, MVT::i32, MMO, DAG, Subtarget);
27691 }
27692 
27693 /// Lower a vector CTLZ using native supported vector CTLZ instruction.
27694 //
27695 // i8/i16 vector implemented using dword LZCNT vector instruction
27696 // ( sub(trunc(lzcnt(zext32(x)))) ). In case zext32(x) is illegal,
27697 // split the vector, perform operation on it's Lo a Hi part and
27698 // concatenate the results.
LowerVectorCTLZ_AVX512CDI(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)27699 static SDValue LowerVectorCTLZ_AVX512CDI(SDValue Op, SelectionDAG &DAG,
27700                                          const X86Subtarget &Subtarget) {
27701   assert(Op.getOpcode() == ISD::CTLZ);
27702   SDLoc dl(Op);
27703   MVT VT = Op.getSimpleValueType();
27704   MVT EltVT = VT.getVectorElementType();
27705   unsigned NumElems = VT.getVectorNumElements();
27706 
27707   assert((EltVT == MVT::i8 || EltVT == MVT::i16) &&
27708           "Unsupported element type");
27709 
27710   // Split vector, it's Lo and Hi parts will be handled in next iteration.
27711   if (NumElems > 16 ||
27712       (NumElems == 16 && !Subtarget.canExtendTo512DQ()))
27713     return splitVectorIntUnary(Op, DAG);
27714 
27715   MVT NewVT = MVT::getVectorVT(MVT::i32, NumElems);
27716   assert((NewVT.is256BitVector() || NewVT.is512BitVector()) &&
27717           "Unsupported value type for operation");
27718 
27719   // Use native supported vector instruction vplzcntd.
27720   Op = DAG.getNode(ISD::ZERO_EXTEND, dl, NewVT, Op.getOperand(0));
27721   SDValue CtlzNode = DAG.getNode(ISD::CTLZ, dl, NewVT, Op);
27722   SDValue TruncNode = DAG.getNode(ISD::TRUNCATE, dl, VT, CtlzNode);
27723   SDValue Delta = DAG.getConstant(32 - EltVT.getSizeInBits(), dl, VT);
27724 
27725   return DAG.getNode(ISD::SUB, dl, VT, TruncNode, Delta);
27726 }
27727 
27728 // Lower CTLZ using a PSHUFB lookup table implementation.
LowerVectorCTLZInRegLUT(SDValue Op,const SDLoc & DL,const X86Subtarget & Subtarget,SelectionDAG & DAG)27729 static SDValue LowerVectorCTLZInRegLUT(SDValue Op, const SDLoc &DL,
27730                                        const X86Subtarget &Subtarget,
27731                                        SelectionDAG &DAG) {
27732   MVT VT = Op.getSimpleValueType();
27733   int NumElts = VT.getVectorNumElements();
27734   int NumBytes = NumElts * (VT.getScalarSizeInBits() / 8);
27735   MVT CurrVT = MVT::getVectorVT(MVT::i8, NumBytes);
27736 
27737   // Per-nibble leading zero PSHUFB lookup table.
27738   const int LUT[16] = {/* 0 */ 4, /* 1 */ 3, /* 2 */ 2, /* 3 */ 2,
27739                        /* 4 */ 1, /* 5 */ 1, /* 6 */ 1, /* 7 */ 1,
27740                        /* 8 */ 0, /* 9 */ 0, /* a */ 0, /* b */ 0,
27741                        /* c */ 0, /* d */ 0, /* e */ 0, /* f */ 0};
27742 
27743   SmallVector<SDValue, 64> LUTVec;
27744   for (int i = 0; i < NumBytes; ++i)
27745     LUTVec.push_back(DAG.getConstant(LUT[i % 16], DL, MVT::i8));
27746   SDValue InRegLUT = DAG.getBuildVector(CurrVT, DL, LUTVec);
27747 
27748   // Begin by bitcasting the input to byte vector, then split those bytes
27749   // into lo/hi nibbles and use the PSHUFB LUT to perform CLTZ on each of them.
27750   // If the hi input nibble is zero then we add both results together, otherwise
27751   // we just take the hi result (by masking the lo result to zero before the
27752   // add).
27753   SDValue Op0 = DAG.getBitcast(CurrVT, Op.getOperand(0));
27754   SDValue Zero = DAG.getConstant(0, DL, CurrVT);
27755 
27756   SDValue NibbleShift = DAG.getConstant(0x4, DL, CurrVT);
27757   SDValue Lo = Op0;
27758   SDValue Hi = DAG.getNode(ISD::SRL, DL, CurrVT, Op0, NibbleShift);
27759   SDValue HiZ;
27760   if (CurrVT.is512BitVector()) {
27761     MVT MaskVT = MVT::getVectorVT(MVT::i1, CurrVT.getVectorNumElements());
27762     HiZ = DAG.getSetCC(DL, MaskVT, Hi, Zero, ISD::SETEQ);
27763     HiZ = DAG.getNode(ISD::SIGN_EXTEND, DL, CurrVT, HiZ);
27764   } else {
27765     HiZ = DAG.getSetCC(DL, CurrVT, Hi, Zero, ISD::SETEQ);
27766   }
27767 
27768   Lo = DAG.getNode(X86ISD::PSHUFB, DL, CurrVT, InRegLUT, Lo);
27769   Hi = DAG.getNode(X86ISD::PSHUFB, DL, CurrVT, InRegLUT, Hi);
27770   Lo = DAG.getNode(ISD::AND, DL, CurrVT, Lo, HiZ);
27771   SDValue Res = DAG.getNode(ISD::ADD, DL, CurrVT, Lo, Hi);
27772 
27773   // Merge result back from vXi8 back to VT, working on the lo/hi halves
27774   // of the current vector width in the same way we did for the nibbles.
27775   // If the upper half of the input element is zero then add the halves'
27776   // leading zero counts together, otherwise just use the upper half's.
27777   // Double the width of the result until we are at target width.
27778   while (CurrVT != VT) {
27779     int CurrScalarSizeInBits = CurrVT.getScalarSizeInBits();
27780     int CurrNumElts = CurrVT.getVectorNumElements();
27781     MVT NextSVT = MVT::getIntegerVT(CurrScalarSizeInBits * 2);
27782     MVT NextVT = MVT::getVectorVT(NextSVT, CurrNumElts / 2);
27783     SDValue Shift = DAG.getConstant(CurrScalarSizeInBits, DL, NextVT);
27784 
27785     // Check if the upper half of the input element is zero.
27786     if (CurrVT.is512BitVector()) {
27787       MVT MaskVT = MVT::getVectorVT(MVT::i1, CurrVT.getVectorNumElements());
27788       HiZ = DAG.getSetCC(DL, MaskVT, DAG.getBitcast(CurrVT, Op0),
27789                          DAG.getBitcast(CurrVT, Zero), ISD::SETEQ);
27790       HiZ = DAG.getNode(ISD::SIGN_EXTEND, DL, CurrVT, HiZ);
27791     } else {
27792       HiZ = DAG.getSetCC(DL, CurrVT, DAG.getBitcast(CurrVT, Op0),
27793                          DAG.getBitcast(CurrVT, Zero), ISD::SETEQ);
27794     }
27795     HiZ = DAG.getBitcast(NextVT, HiZ);
27796 
27797     // Move the upper/lower halves to the lower bits as we'll be extending to
27798     // NextVT. Mask the lower result to zero if HiZ is true and add the results
27799     // together.
27800     SDValue ResNext = Res = DAG.getBitcast(NextVT, Res);
27801     SDValue R0 = DAG.getNode(ISD::SRL, DL, NextVT, ResNext, Shift);
27802     SDValue R1 = DAG.getNode(ISD::SRL, DL, NextVT, HiZ, Shift);
27803     R1 = DAG.getNode(ISD::AND, DL, NextVT, ResNext, R1);
27804     Res = DAG.getNode(ISD::ADD, DL, NextVT, R0, R1);
27805     CurrVT = NextVT;
27806   }
27807 
27808   return Res;
27809 }
27810 
LowerVectorCTLZ(SDValue Op,const SDLoc & DL,const X86Subtarget & Subtarget,SelectionDAG & DAG)27811 static SDValue LowerVectorCTLZ(SDValue Op, const SDLoc &DL,
27812                                const X86Subtarget &Subtarget,
27813                                SelectionDAG &DAG) {
27814   MVT VT = Op.getSimpleValueType();
27815 
27816   if (Subtarget.hasCDI() &&
27817       // vXi8 vectors need to be promoted to 512-bits for vXi32.
27818       (Subtarget.canExtendTo512DQ() || VT.getVectorElementType() != MVT::i8))
27819     return LowerVectorCTLZ_AVX512CDI(Op, DAG, Subtarget);
27820 
27821   // Decompose 256-bit ops into smaller 128-bit ops.
27822   if (VT.is256BitVector() && !Subtarget.hasInt256())
27823     return splitVectorIntUnary(Op, DAG);
27824 
27825   // Decompose 512-bit ops into smaller 256-bit ops.
27826   if (VT.is512BitVector() && !Subtarget.hasBWI())
27827     return splitVectorIntUnary(Op, DAG);
27828 
27829   assert(Subtarget.hasSSSE3() && "Expected SSSE3 support for PSHUFB");
27830   return LowerVectorCTLZInRegLUT(Op, DL, Subtarget, DAG);
27831 }
27832 
LowerCTLZ(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)27833 static SDValue LowerCTLZ(SDValue Op, const X86Subtarget &Subtarget,
27834                          SelectionDAG &DAG) {
27835   MVT VT = Op.getSimpleValueType();
27836   MVT OpVT = VT;
27837   unsigned NumBits = VT.getSizeInBits();
27838   SDLoc dl(Op);
27839   unsigned Opc = Op.getOpcode();
27840 
27841   if (VT.isVector())
27842     return LowerVectorCTLZ(Op, dl, Subtarget, DAG);
27843 
27844   Op = Op.getOperand(0);
27845   if (VT == MVT::i8) {
27846     // Zero extend to i32 since there is not an i8 bsr.
27847     OpVT = MVT::i32;
27848     Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
27849   }
27850 
27851   // Issue a bsr (scan bits in reverse) which also sets EFLAGS.
27852   SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
27853   Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
27854 
27855   if (Opc == ISD::CTLZ) {
27856     // If src is zero (i.e. bsr sets ZF), returns NumBits.
27857     SDValue Ops[] = {Op, DAG.getConstant(NumBits + NumBits - 1, dl, OpVT),
27858                      DAG.getTargetConstant(X86::COND_E, dl, MVT::i8),
27859                      Op.getValue(1)};
27860     Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops);
27861   }
27862 
27863   // Finally xor with NumBits-1.
27864   Op = DAG.getNode(ISD::XOR, dl, OpVT, Op,
27865                    DAG.getConstant(NumBits - 1, dl, OpVT));
27866 
27867   if (VT == MVT::i8)
27868     Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
27869   return Op;
27870 }
27871 
LowerCTTZ(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)27872 static SDValue LowerCTTZ(SDValue Op, const X86Subtarget &Subtarget,
27873                          SelectionDAG &DAG) {
27874   MVT VT = Op.getSimpleValueType();
27875   unsigned NumBits = VT.getScalarSizeInBits();
27876   SDValue N0 = Op.getOperand(0);
27877   SDLoc dl(Op);
27878 
27879   assert(!VT.isVector() && Op.getOpcode() == ISD::CTTZ &&
27880          "Only scalar CTTZ requires custom lowering");
27881 
27882   // Issue a bsf (scan bits forward) which also sets EFLAGS.
27883   SDVTList VTs = DAG.getVTList(VT, MVT::i32);
27884   Op = DAG.getNode(X86ISD::BSF, dl, VTs, N0);
27885 
27886   // If src is known never zero we can skip the CMOV.
27887   if (DAG.isKnownNeverZero(N0))
27888     return Op;
27889 
27890   // If src is zero (i.e. bsf sets ZF), returns NumBits.
27891   SDValue Ops[] = {Op, DAG.getConstant(NumBits, dl, VT),
27892                    DAG.getTargetConstant(X86::COND_E, dl, MVT::i8),
27893                    Op.getValue(1)};
27894   return DAG.getNode(X86ISD::CMOV, dl, VT, Ops);
27895 }
27896 
lowerAddSub(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)27897 static SDValue lowerAddSub(SDValue Op, SelectionDAG &DAG,
27898                            const X86Subtarget &Subtarget) {
27899   MVT VT = Op.getSimpleValueType();
27900   if (VT == MVT::i16 || VT == MVT::i32)
27901     return lowerAddSubToHorizontalOp(Op, DAG, Subtarget);
27902 
27903   if (VT == MVT::v32i16 || VT == MVT::v64i8)
27904     return splitVectorIntBinary(Op, DAG);
27905 
27906   assert(Op.getSimpleValueType().is256BitVector() &&
27907          Op.getSimpleValueType().isInteger() &&
27908          "Only handle AVX 256-bit vector integer operation");
27909   return splitVectorIntBinary(Op, DAG);
27910 }
27911 
LowerADDSAT_SUBSAT(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)27912 static SDValue LowerADDSAT_SUBSAT(SDValue Op, SelectionDAG &DAG,
27913                                   const X86Subtarget &Subtarget) {
27914   MVT VT = Op.getSimpleValueType();
27915   SDValue X = Op.getOperand(0), Y = Op.getOperand(1);
27916   unsigned Opcode = Op.getOpcode();
27917   SDLoc DL(Op);
27918 
27919   if (VT == MVT::v32i16 || VT == MVT::v64i8 ||
27920       (VT.is256BitVector() && !Subtarget.hasInt256())) {
27921     assert(Op.getSimpleValueType().isInteger() &&
27922            "Only handle AVX vector integer operation");
27923     return splitVectorIntBinary(Op, DAG);
27924   }
27925 
27926   // Avoid the generic expansion with min/max if we don't have pminu*/pmaxu*.
27927   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
27928   EVT SetCCResultType =
27929       TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
27930 
27931   unsigned BitWidth = VT.getScalarSizeInBits();
27932   if (Opcode == ISD::USUBSAT) {
27933     if (!TLI.isOperationLegal(ISD::UMAX, VT) || useVPTERNLOG(Subtarget, VT)) {
27934       // Handle a special-case with a bit-hack instead of cmp+select:
27935       // usubsat X, SMIN --> (X ^ SMIN) & (X s>> BW-1)
27936       // If the target can use VPTERNLOG, DAGToDAG will match this as
27937       // "vpsra + vpternlog" which is better than "vpmax + vpsub" with a
27938       // "broadcast" constant load.
27939       ConstantSDNode *C = isConstOrConstSplat(Y, true);
27940       if (C && C->getAPIntValue().isSignMask()) {
27941         SDValue SignMask = DAG.getConstant(C->getAPIntValue(), DL, VT);
27942         SDValue ShiftAmt = DAG.getConstant(BitWidth - 1, DL, VT);
27943         SDValue Xor = DAG.getNode(ISD::XOR, DL, VT, X, SignMask);
27944         SDValue Sra = DAG.getNode(ISD::SRA, DL, VT, X, ShiftAmt);
27945         return DAG.getNode(ISD::AND, DL, VT, Xor, Sra);
27946       }
27947     }
27948     if (!TLI.isOperationLegal(ISD::UMAX, VT)) {
27949       // usubsat X, Y --> (X >u Y) ? X - Y : 0
27950       SDValue Sub = DAG.getNode(ISD::SUB, DL, VT, X, Y);
27951       SDValue Cmp = DAG.getSetCC(DL, SetCCResultType, X, Y, ISD::SETUGT);
27952       // TODO: Move this to DAGCombiner?
27953       if (SetCCResultType == VT &&
27954           DAG.ComputeNumSignBits(Cmp) == VT.getScalarSizeInBits())
27955         return DAG.getNode(ISD::AND, DL, VT, Cmp, Sub);
27956       return DAG.getSelect(DL, VT, Cmp, Sub, DAG.getConstant(0, DL, VT));
27957     }
27958   }
27959 
27960   if ((Opcode == ISD::SADDSAT || Opcode == ISD::SSUBSAT) &&
27961       (!VT.isVector() || VT == MVT::v2i64)) {
27962     APInt MinVal = APInt::getSignedMinValue(BitWidth);
27963     APInt MaxVal = APInt::getSignedMaxValue(BitWidth);
27964     SDValue Zero = DAG.getConstant(0, DL, VT);
27965     SDValue Result =
27966         DAG.getNode(Opcode == ISD::SADDSAT ? ISD::SADDO : ISD::SSUBO, DL,
27967                     DAG.getVTList(VT, SetCCResultType), X, Y);
27968     SDValue SumDiff = Result.getValue(0);
27969     SDValue Overflow = Result.getValue(1);
27970     SDValue SatMin = DAG.getConstant(MinVal, DL, VT);
27971     SDValue SatMax = DAG.getConstant(MaxVal, DL, VT);
27972     SDValue SumNeg =
27973         DAG.getSetCC(DL, SetCCResultType, SumDiff, Zero, ISD::SETLT);
27974     Result = DAG.getSelect(DL, VT, SumNeg, SatMax, SatMin);
27975     return DAG.getSelect(DL, VT, Overflow, Result, SumDiff);
27976   }
27977 
27978   // Use default expansion.
27979   return SDValue();
27980 }
27981 
LowerABS(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)27982 static SDValue LowerABS(SDValue Op, const X86Subtarget &Subtarget,
27983                         SelectionDAG &DAG) {
27984   MVT VT = Op.getSimpleValueType();
27985   if (VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64) {
27986     // Since X86 does not have CMOV for 8-bit integer, we don't convert
27987     // 8-bit integer abs to NEG and CMOV.
27988     SDLoc DL(Op);
27989     SDValue N0 = Op.getOperand(0);
27990     SDValue Neg = DAG.getNode(X86ISD::SUB, DL, DAG.getVTList(VT, MVT::i32),
27991                               DAG.getConstant(0, DL, VT), N0);
27992     SDValue Ops[] = {N0, Neg, DAG.getTargetConstant(X86::COND_NS, DL, MVT::i8),
27993                      SDValue(Neg.getNode(), 1)};
27994     return DAG.getNode(X86ISD::CMOV, DL, VT, Ops);
27995   }
27996 
27997   // ABS(vXi64 X) --> VPBLENDVPD(X, 0-X, X).
27998   if ((VT == MVT::v2i64 || VT == MVT::v4i64) && Subtarget.hasSSE41()) {
27999     SDLoc DL(Op);
28000     SDValue Src = Op.getOperand(0);
28001     SDValue Sub =
28002         DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Src);
28003     return DAG.getNode(X86ISD::BLENDV, DL, VT, Src, Sub, Src);
28004   }
28005 
28006   if (VT.is256BitVector() && !Subtarget.hasInt256()) {
28007     assert(VT.isInteger() &&
28008            "Only handle AVX 256-bit vector integer operation");
28009     return splitVectorIntUnary(Op, DAG);
28010   }
28011 
28012   if ((VT == MVT::v32i16 || VT == MVT::v64i8) && !Subtarget.hasBWI())
28013     return splitVectorIntUnary(Op, DAG);
28014 
28015   // Default to expand.
28016   return SDValue();
28017 }
28018 
LowerAVG(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)28019 static SDValue LowerAVG(SDValue Op, const X86Subtarget &Subtarget,
28020                         SelectionDAG &DAG) {
28021   MVT VT = Op.getSimpleValueType();
28022 
28023   // For AVX1 cases, split to use legal ops.
28024   if (VT.is256BitVector() && !Subtarget.hasInt256())
28025     return splitVectorIntBinary(Op, DAG);
28026 
28027   if (VT == MVT::v32i16 || VT == MVT::v64i8)
28028     return splitVectorIntBinary(Op, DAG);
28029 
28030   // Default to expand.
28031   return SDValue();
28032 }
28033 
LowerMINMAX(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)28034 static SDValue LowerMINMAX(SDValue Op, const X86Subtarget &Subtarget,
28035                            SelectionDAG &DAG) {
28036   MVT VT = Op.getSimpleValueType();
28037 
28038   // For AVX1 cases, split to use legal ops.
28039   if (VT.is256BitVector() && !Subtarget.hasInt256())
28040     return splitVectorIntBinary(Op, DAG);
28041 
28042   if (VT == MVT::v32i16 || VT == MVT::v64i8)
28043     return splitVectorIntBinary(Op, DAG);
28044 
28045   // Default to expand.
28046   return SDValue();
28047 }
28048 
LowerFMINIMUM_FMAXIMUM(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)28049 static SDValue LowerFMINIMUM_FMAXIMUM(SDValue Op, const X86Subtarget &Subtarget,
28050                                       SelectionDAG &DAG) {
28051   assert((Op.getOpcode() == ISD::FMAXIMUM || Op.getOpcode() == ISD::FMINIMUM) &&
28052          "Expected FMAXIMUM or FMINIMUM opcode");
28053   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
28054   EVT VT = Op.getValueType();
28055   SDValue X = Op.getOperand(0);
28056   SDValue Y = Op.getOperand(1);
28057   SDLoc DL(Op);
28058   uint64_t SizeInBits = VT.getScalarSizeInBits();
28059   APInt PreferredZero = APInt::getZero(SizeInBits);
28060   APInt OppositeZero = PreferredZero;
28061   EVT IVT = VT.changeTypeToInteger();
28062   X86ISD::NodeType MinMaxOp;
28063   if (Op.getOpcode() == ISD::FMAXIMUM) {
28064     MinMaxOp = X86ISD::FMAX;
28065     OppositeZero.setSignBit();
28066   } else {
28067     PreferredZero.setSignBit();
28068     MinMaxOp = X86ISD::FMIN;
28069   }
28070   EVT SetCCType =
28071       TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
28072 
28073   // The tables below show the expected result of Max in cases of NaN and
28074   // signed zeros.
28075   //
28076   //                 Y                       Y
28077   //             Num   xNaN              +0     -0
28078   //          ---------------         ---------------
28079   //     Num  |  Max |   Y  |     +0  |  +0  |  +0  |
28080   // X        ---------------  X      ---------------
28081   //    xNaN  |   X  |  X/Y |     -0  |  +0  |  -0  |
28082   //          ---------------         ---------------
28083   //
28084   // It is achieved by means of FMAX/FMIN with preliminary checks and operand
28085   // reordering.
28086   //
28087   // We check if any of operands is NaN and return NaN. Then we check if any of
28088   // operands is zero or negative zero (for fmaximum and fminimum respectively)
28089   // to ensure the correct zero is returned.
28090   auto MatchesZero = [](SDValue Op, APInt Zero) {
28091     Op = peekThroughBitcasts(Op);
28092     if (auto *CstOp = dyn_cast<ConstantFPSDNode>(Op))
28093       return CstOp->getValueAPF().bitcastToAPInt() == Zero;
28094     if (auto *CstOp = dyn_cast<ConstantSDNode>(Op))
28095       return CstOp->getAPIntValue() == Zero;
28096     if (Op->getOpcode() == ISD::BUILD_VECTOR ||
28097         Op->getOpcode() == ISD::SPLAT_VECTOR) {
28098       for (const SDValue &OpVal : Op->op_values()) {
28099         if (OpVal.isUndef())
28100           continue;
28101         auto *CstOp = dyn_cast<ConstantFPSDNode>(OpVal);
28102         if (!CstOp)
28103           return false;
28104         if (!CstOp->getValueAPF().isZero())
28105           continue;
28106         if (CstOp->getValueAPF().bitcastToAPInt() != Zero)
28107           return false;
28108       }
28109       return true;
28110     }
28111     return false;
28112   };
28113 
28114   bool IsXNeverNaN = DAG.isKnownNeverNaN(X);
28115   bool IsYNeverNaN = DAG.isKnownNeverNaN(Y);
28116   bool IgnoreSignedZero = DAG.getTarget().Options.NoSignedZerosFPMath ||
28117                           Op->getFlags().hasNoSignedZeros() ||
28118                           DAG.isKnownNeverZeroFloat(X) ||
28119                           DAG.isKnownNeverZeroFloat(Y);
28120   SDValue NewX, NewY;
28121   if (IgnoreSignedZero || MatchesZero(Y, PreferredZero) ||
28122       MatchesZero(X, OppositeZero)) {
28123     // Operands are already in right order or order does not matter.
28124     NewX = X;
28125     NewY = Y;
28126   } else if (MatchesZero(X, PreferredZero) || MatchesZero(Y, OppositeZero)) {
28127     NewX = Y;
28128     NewY = X;
28129   } else if (!VT.isVector() && (VT == MVT::f16 || Subtarget.hasDQI()) &&
28130              (Op->getFlags().hasNoNaNs() || IsXNeverNaN || IsYNeverNaN)) {
28131     if (IsXNeverNaN)
28132       std::swap(X, Y);
28133     // VFPCLASSS consumes a vector type. So provide a minimal one corresponded
28134     // xmm register.
28135     MVT VectorType = MVT::getVectorVT(VT.getSimpleVT(), 128 / SizeInBits);
28136     SDValue VX = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VectorType, X);
28137     // Bits of classes:
28138     // Bits  Imm8[0] Imm8[1] Imm8[2] Imm8[3] Imm8[4]  Imm8[5]  Imm8[6] Imm8[7]
28139     // Class    QNAN PosZero NegZero  PosINF  NegINF Denormal Negative    SNAN
28140     SDValue Imm = DAG.getTargetConstant(MinMaxOp == X86ISD::FMAX ? 0b11 : 0b101,
28141                                         DL, MVT::i32);
28142     SDValue IsNanZero = DAG.getNode(X86ISD::VFPCLASSS, DL, MVT::v1i1, VX, Imm);
28143     SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, MVT::v8i1,
28144                               DAG.getConstant(0, DL, MVT::v8i1), IsNanZero,
28145                               DAG.getIntPtrConstant(0, DL));
28146     SDValue NeedSwap = DAG.getBitcast(MVT::i8, Ins);
28147     NewX = DAG.getSelect(DL, VT, NeedSwap, Y, X);
28148     NewY = DAG.getSelect(DL, VT, NeedSwap, X, Y);
28149     return DAG.getNode(MinMaxOp, DL, VT, NewX, NewY, Op->getFlags());
28150   } else {
28151     SDValue IsXSigned;
28152     if (Subtarget.is64Bit() || VT != MVT::f64) {
28153       SDValue XInt = DAG.getNode(ISD::BITCAST, DL, IVT, X);
28154       SDValue ZeroCst = DAG.getConstant(0, DL, IVT);
28155       IsXSigned = DAG.getSetCC(DL, SetCCType, XInt, ZeroCst, ISD::SETLT);
28156     } else {
28157       assert(VT == MVT::f64);
28158       SDValue Ins = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, MVT::v2f64,
28159                                 DAG.getConstantFP(0, DL, MVT::v2f64), X,
28160                                 DAG.getIntPtrConstant(0, DL));
28161       SDValue VX = DAG.getNode(ISD::BITCAST, DL, MVT::v4f32, Ins);
28162       SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VX,
28163                                DAG.getIntPtrConstant(1, DL));
28164       Hi = DAG.getBitcast(MVT::i32, Hi);
28165       SDValue ZeroCst = DAG.getConstant(0, DL, MVT::i32);
28166       EVT SetCCType = TLI.getSetCCResultType(DAG.getDataLayout(),
28167                                              *DAG.getContext(), MVT::i32);
28168       IsXSigned = DAG.getSetCC(DL, SetCCType, Hi, ZeroCst, ISD::SETLT);
28169     }
28170     if (MinMaxOp == X86ISD::FMAX) {
28171       NewX = DAG.getSelect(DL, VT, IsXSigned, X, Y);
28172       NewY = DAG.getSelect(DL, VT, IsXSigned, Y, X);
28173     } else {
28174       NewX = DAG.getSelect(DL, VT, IsXSigned, Y, X);
28175       NewY = DAG.getSelect(DL, VT, IsXSigned, X, Y);
28176     }
28177   }
28178 
28179   bool IgnoreNaN = DAG.getTarget().Options.NoNaNsFPMath ||
28180                    Op->getFlags().hasNoNaNs() || (IsXNeverNaN && IsYNeverNaN);
28181 
28182   // If we did no ordering operands for signed zero handling and we need
28183   // to process NaN and we know that the second operand is not NaN then put
28184   // it in first operand and we will not need to post handle NaN after max/min.
28185   if (IgnoreSignedZero && !IgnoreNaN && DAG.isKnownNeverNaN(NewY))
28186     std::swap(NewX, NewY);
28187 
28188   SDValue MinMax = DAG.getNode(MinMaxOp, DL, VT, NewX, NewY, Op->getFlags());
28189 
28190   if (IgnoreNaN || DAG.isKnownNeverNaN(NewX))
28191     return MinMax;
28192 
28193   SDValue IsNaN = DAG.getSetCC(DL, SetCCType, NewX, NewX, ISD::SETUO);
28194   return DAG.getSelect(DL, VT, IsNaN, NewX, MinMax);
28195 }
28196 
LowerABD(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)28197 static SDValue LowerABD(SDValue Op, const X86Subtarget &Subtarget,
28198                         SelectionDAG &DAG) {
28199   MVT VT = Op.getSimpleValueType();
28200 
28201   // For AVX1 cases, split to use legal ops.
28202   if (VT.is256BitVector() && !Subtarget.hasInt256())
28203     return splitVectorIntBinary(Op, DAG);
28204 
28205   if ((VT == MVT::v32i16 || VT == MVT::v64i8) && !Subtarget.useBWIRegs())
28206     return splitVectorIntBinary(Op, DAG);
28207 
28208   SDLoc dl(Op);
28209   bool IsSigned = Op.getOpcode() == ISD::ABDS;
28210   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
28211 
28212   // TODO: Move to TargetLowering expandABD() once we have ABD promotion.
28213   if (VT.isScalarInteger()) {
28214     unsigned WideBits = std::max<unsigned>(2 * VT.getScalarSizeInBits(), 32u);
28215     MVT WideVT = MVT::getIntegerVT(WideBits);
28216     if (TLI.isTypeLegal(WideVT)) {
28217       // abds(lhs, rhs) -> trunc(abs(sub(sext(lhs), sext(rhs))))
28218       // abdu(lhs, rhs) -> trunc(abs(sub(zext(lhs), zext(rhs))))
28219       unsigned ExtOpc = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
28220       SDValue LHS = DAG.getNode(ExtOpc, dl, WideVT, Op.getOperand(0));
28221       SDValue RHS = DAG.getNode(ExtOpc, dl, WideVT, Op.getOperand(1));
28222       SDValue Diff = DAG.getNode(ISD::SUB, dl, WideVT, LHS, RHS);
28223       SDValue AbsDiff = DAG.getNode(ISD::ABS, dl, WideVT, Diff);
28224       return DAG.getNode(ISD::TRUNCATE, dl, VT, AbsDiff);
28225     }
28226   }
28227 
28228   // TODO: Move to TargetLowering expandABD().
28229   if (!Subtarget.hasSSE41() &&
28230       ((IsSigned && VT == MVT::v16i8) || VT == MVT::v4i32)) {
28231     SDValue LHS = DAG.getFreeze(Op.getOperand(0));
28232     SDValue RHS = DAG.getFreeze(Op.getOperand(1));
28233     ISD::CondCode CC = IsSigned ? ISD::CondCode::SETGT : ISD::CondCode::SETUGT;
28234     SDValue Cmp = DAG.getSetCC(dl, VT, LHS, RHS, CC);
28235     SDValue Diff0 = DAG.getNode(ISD::SUB, dl, VT, LHS, RHS);
28236     SDValue Diff1 = DAG.getNode(ISD::SUB, dl, VT, RHS, LHS);
28237     return getBitSelect(dl, VT, Diff0, Diff1, Cmp, DAG);
28238   }
28239 
28240   // Default to expand.
28241   return SDValue();
28242 }
28243 
LowerMUL(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)28244 static SDValue LowerMUL(SDValue Op, const X86Subtarget &Subtarget,
28245                         SelectionDAG &DAG) {
28246   SDLoc dl(Op);
28247   MVT VT = Op.getSimpleValueType();
28248 
28249   // Decompose 256-bit ops into 128-bit ops.
28250   if (VT.is256BitVector() && !Subtarget.hasInt256())
28251     return splitVectorIntBinary(Op, DAG);
28252 
28253   if ((VT == MVT::v32i16 || VT == MVT::v64i8) && !Subtarget.hasBWI())
28254     return splitVectorIntBinary(Op, DAG);
28255 
28256   SDValue A = Op.getOperand(0);
28257   SDValue B = Op.getOperand(1);
28258 
28259   // Lower v16i8/v32i8/v64i8 mul as sign-extension to v8i16/v16i16/v32i16
28260   // vector pairs, multiply and truncate.
28261   if (VT == MVT::v16i8 || VT == MVT::v32i8 || VT == MVT::v64i8) {
28262     unsigned NumElts = VT.getVectorNumElements();
28263 
28264     if ((VT == MVT::v16i8 && Subtarget.hasInt256()) ||
28265         (VT == MVT::v32i8 && Subtarget.canExtendTo512BW())) {
28266       MVT ExVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements());
28267       return DAG.getNode(
28268           ISD::TRUNCATE, dl, VT,
28269           DAG.getNode(ISD::MUL, dl, ExVT,
28270                       DAG.getNode(ISD::ANY_EXTEND, dl, ExVT, A),
28271                       DAG.getNode(ISD::ANY_EXTEND, dl, ExVT, B)));
28272     }
28273 
28274     MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
28275 
28276     // Extract the lo/hi parts to any extend to i16.
28277     // We're going to mask off the low byte of each result element of the
28278     // pmullw, so it doesn't matter what's in the high byte of each 16-bit
28279     // element.
28280     SDValue Undef = DAG.getUNDEF(VT);
28281     SDValue ALo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, A, Undef));
28282     SDValue AHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, A, Undef));
28283 
28284     SDValue BLo, BHi;
28285     if (ISD::isBuildVectorOfConstantSDNodes(B.getNode())) {
28286       // If the RHS is a constant, manually unpackl/unpackh.
28287       SmallVector<SDValue, 16> LoOps, HiOps;
28288       for (unsigned i = 0; i != NumElts; i += 16) {
28289         for (unsigned j = 0; j != 8; ++j) {
28290           LoOps.push_back(DAG.getAnyExtOrTrunc(B.getOperand(i + j), dl,
28291                                                MVT::i16));
28292           HiOps.push_back(DAG.getAnyExtOrTrunc(B.getOperand(i + j + 8), dl,
28293                                                MVT::i16));
28294         }
28295       }
28296 
28297       BLo = DAG.getBuildVector(ExVT, dl, LoOps);
28298       BHi = DAG.getBuildVector(ExVT, dl, HiOps);
28299     } else {
28300       BLo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, B, Undef));
28301       BHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, B, Undef));
28302     }
28303 
28304     // Multiply, mask the lower 8bits of the lo/hi results and pack.
28305     SDValue RLo = DAG.getNode(ISD::MUL, dl, ExVT, ALo, BLo);
28306     SDValue RHi = DAG.getNode(ISD::MUL, dl, ExVT, AHi, BHi);
28307     return getPack(DAG, Subtarget, dl, VT, RLo, RHi);
28308   }
28309 
28310   // Lower v4i32 mul as 2x shuffle, 2x pmuludq, 2x shuffle.
28311   if (VT == MVT::v4i32) {
28312     assert(Subtarget.hasSSE2() && !Subtarget.hasSSE41() &&
28313            "Should not custom lower when pmulld is available!");
28314 
28315     // Extract the odd parts.
28316     static const int UnpackMask[] = { 1, -1, 3, -1 };
28317     SDValue Aodds = DAG.getVectorShuffle(VT, dl, A, A, UnpackMask);
28318     SDValue Bodds = DAG.getVectorShuffle(VT, dl, B, B, UnpackMask);
28319 
28320     // Multiply the even parts.
28321     SDValue Evens = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64,
28322                                 DAG.getBitcast(MVT::v2i64, A),
28323                                 DAG.getBitcast(MVT::v2i64, B));
28324     // Now multiply odd parts.
28325     SDValue Odds = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64,
28326                                DAG.getBitcast(MVT::v2i64, Aodds),
28327                                DAG.getBitcast(MVT::v2i64, Bodds));
28328 
28329     Evens = DAG.getBitcast(VT, Evens);
28330     Odds = DAG.getBitcast(VT, Odds);
28331 
28332     // Merge the two vectors back together with a shuffle. This expands into 2
28333     // shuffles.
28334     static const int ShufMask[] = { 0, 4, 2, 6 };
28335     return DAG.getVectorShuffle(VT, dl, Evens, Odds, ShufMask);
28336   }
28337 
28338   assert((VT == MVT::v2i64 || VT == MVT::v4i64 || VT == MVT::v8i64) &&
28339          "Only know how to lower V2I64/V4I64/V8I64 multiply");
28340   assert(!Subtarget.hasDQI() && "DQI should use MULLQ");
28341 
28342   //  Ahi = psrlqi(a, 32);
28343   //  Bhi = psrlqi(b, 32);
28344   //
28345   //  AloBlo = pmuludq(a, b);
28346   //  AloBhi = pmuludq(a, Bhi);
28347   //  AhiBlo = pmuludq(Ahi, b);
28348   //
28349   //  Hi = psllqi(AloBhi + AhiBlo, 32);
28350   //  return AloBlo + Hi;
28351   KnownBits AKnown = DAG.computeKnownBits(A);
28352   KnownBits BKnown = DAG.computeKnownBits(B);
28353 
28354   APInt LowerBitsMask = APInt::getLowBitsSet(64, 32);
28355   bool ALoIsZero = LowerBitsMask.isSubsetOf(AKnown.Zero);
28356   bool BLoIsZero = LowerBitsMask.isSubsetOf(BKnown.Zero);
28357 
28358   APInt UpperBitsMask = APInt::getHighBitsSet(64, 32);
28359   bool AHiIsZero = UpperBitsMask.isSubsetOf(AKnown.Zero);
28360   bool BHiIsZero = UpperBitsMask.isSubsetOf(BKnown.Zero);
28361 
28362   SDValue Zero = DAG.getConstant(0, dl, VT);
28363 
28364   // Only multiply lo/hi halves that aren't known to be zero.
28365   SDValue AloBlo = Zero;
28366   if (!ALoIsZero && !BLoIsZero)
28367     AloBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, B);
28368 
28369   SDValue AloBhi = Zero;
28370   if (!ALoIsZero && !BHiIsZero) {
28371     SDValue Bhi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, B, 32, DAG);
28372     AloBhi = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, Bhi);
28373   }
28374 
28375   SDValue AhiBlo = Zero;
28376   if (!AHiIsZero && !BLoIsZero) {
28377     SDValue Ahi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, A, 32, DAG);
28378     AhiBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, Ahi, B);
28379   }
28380 
28381   SDValue Hi = DAG.getNode(ISD::ADD, dl, VT, AloBhi, AhiBlo);
28382   Hi = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Hi, 32, DAG);
28383 
28384   return DAG.getNode(ISD::ADD, dl, VT, AloBlo, Hi);
28385 }
28386 
LowervXi8MulWithUNPCK(SDValue A,SDValue B,const SDLoc & dl,MVT VT,bool IsSigned,const X86Subtarget & Subtarget,SelectionDAG & DAG,SDValue * Low=nullptr)28387 static SDValue LowervXi8MulWithUNPCK(SDValue A, SDValue B, const SDLoc &dl,
28388                                      MVT VT, bool IsSigned,
28389                                      const X86Subtarget &Subtarget,
28390                                      SelectionDAG &DAG,
28391                                      SDValue *Low = nullptr) {
28392   unsigned NumElts = VT.getVectorNumElements();
28393 
28394   // For vXi8 we will unpack the low and high half of each 128 bit lane to widen
28395   // to a vXi16 type. Do the multiplies, shift the results and pack the half
28396   // lane results back together.
28397 
28398   // We'll take different approaches for signed and unsigned.
28399   // For unsigned we'll use punpcklbw/punpckhbw to put zero extend the bytes
28400   // and use pmullw to calculate the full 16-bit product.
28401   // For signed we'll use punpcklbw/punpckbw to extend the bytes to words and
28402   // shift them left into the upper byte of each word. This allows us to use
28403   // pmulhw to calculate the full 16-bit product. This trick means we don't
28404   // need to sign extend the bytes to use pmullw.
28405 
28406   MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
28407   SDValue Zero = DAG.getConstant(0, dl, VT);
28408 
28409   SDValue ALo, AHi;
28410   if (IsSigned) {
28411     ALo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, Zero, A));
28412     AHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, Zero, A));
28413   } else {
28414     ALo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, A, Zero));
28415     AHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, A, Zero));
28416   }
28417 
28418   SDValue BLo, BHi;
28419   if (ISD::isBuildVectorOfConstantSDNodes(B.getNode())) {
28420     // If the RHS is a constant, manually unpackl/unpackh and extend.
28421     SmallVector<SDValue, 16> LoOps, HiOps;
28422     for (unsigned i = 0; i != NumElts; i += 16) {
28423       for (unsigned j = 0; j != 8; ++j) {
28424         SDValue LoOp = B.getOperand(i + j);
28425         SDValue HiOp = B.getOperand(i + j + 8);
28426 
28427         if (IsSigned) {
28428           LoOp = DAG.getAnyExtOrTrunc(LoOp, dl, MVT::i16);
28429           HiOp = DAG.getAnyExtOrTrunc(HiOp, dl, MVT::i16);
28430           LoOp = DAG.getNode(ISD::SHL, dl, MVT::i16, LoOp,
28431                              DAG.getConstant(8, dl, MVT::i16));
28432           HiOp = DAG.getNode(ISD::SHL, dl, MVT::i16, HiOp,
28433                              DAG.getConstant(8, dl, MVT::i16));
28434         } else {
28435           LoOp = DAG.getZExtOrTrunc(LoOp, dl, MVT::i16);
28436           HiOp = DAG.getZExtOrTrunc(HiOp, dl, MVT::i16);
28437         }
28438 
28439         LoOps.push_back(LoOp);
28440         HiOps.push_back(HiOp);
28441       }
28442     }
28443 
28444     BLo = DAG.getBuildVector(ExVT, dl, LoOps);
28445     BHi = DAG.getBuildVector(ExVT, dl, HiOps);
28446   } else if (IsSigned) {
28447     BLo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, Zero, B));
28448     BHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, Zero, B));
28449   } else {
28450     BLo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, B, Zero));
28451     BHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, B, Zero));
28452   }
28453 
28454   // Multiply, lshr the upper 8bits to the lower 8bits of the lo/hi results and
28455   // pack back to vXi8.
28456   unsigned MulOpc = IsSigned ? ISD::MULHS : ISD::MUL;
28457   SDValue RLo = DAG.getNode(MulOpc, dl, ExVT, ALo, BLo);
28458   SDValue RHi = DAG.getNode(MulOpc, dl, ExVT, AHi, BHi);
28459 
28460   if (Low)
28461     *Low = getPack(DAG, Subtarget, dl, VT, RLo, RHi);
28462 
28463   return getPack(DAG, Subtarget, dl, VT, RLo, RHi, /*PackHiHalf*/ true);
28464 }
28465 
LowerMULH(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)28466 static SDValue LowerMULH(SDValue Op, const X86Subtarget &Subtarget,
28467                          SelectionDAG &DAG) {
28468   SDLoc dl(Op);
28469   MVT VT = Op.getSimpleValueType();
28470   bool IsSigned = Op->getOpcode() == ISD::MULHS;
28471   unsigned NumElts = VT.getVectorNumElements();
28472   SDValue A = Op.getOperand(0);
28473   SDValue B = Op.getOperand(1);
28474 
28475   // Decompose 256-bit ops into 128-bit ops.
28476   if (VT.is256BitVector() && !Subtarget.hasInt256())
28477     return splitVectorIntBinary(Op, DAG);
28478 
28479   if ((VT == MVT::v32i16 || VT == MVT::v64i8) && !Subtarget.hasBWI())
28480     return splitVectorIntBinary(Op, DAG);
28481 
28482   if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32) {
28483     assert((VT == MVT::v4i32 && Subtarget.hasSSE2()) ||
28484            (VT == MVT::v8i32 && Subtarget.hasInt256()) ||
28485            (VT == MVT::v16i32 && Subtarget.hasAVX512()));
28486 
28487     // PMULxD operations multiply each even value (starting at 0) of LHS with
28488     // the related value of RHS and produce a widen result.
28489     // E.g., PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
28490     // => <2 x i64> <ae|cg>
28491     //
28492     // In other word, to have all the results, we need to perform two PMULxD:
28493     // 1. one with the even values.
28494     // 2. one with the odd values.
28495     // To achieve #2, with need to place the odd values at an even position.
28496     //
28497     // Place the odd value at an even position (basically, shift all values 1
28498     // step to the left):
28499     const int Mask[] = {1, -1,  3, -1,  5, -1,  7, -1,
28500                         9, -1, 11, -1, 13, -1, 15, -1};
28501     // <a|b|c|d> => <b|undef|d|undef>
28502     SDValue Odd0 =
28503         DAG.getVectorShuffle(VT, dl, A, A, ArrayRef(&Mask[0], NumElts));
28504     // <e|f|g|h> => <f|undef|h|undef>
28505     SDValue Odd1 =
28506         DAG.getVectorShuffle(VT, dl, B, B, ArrayRef(&Mask[0], NumElts));
28507 
28508     // Emit two multiplies, one for the lower 2 ints and one for the higher 2
28509     // ints.
28510     MVT MulVT = MVT::getVectorVT(MVT::i64, NumElts / 2);
28511     unsigned Opcode =
28512         (IsSigned && Subtarget.hasSSE41()) ? X86ISD::PMULDQ : X86ISD::PMULUDQ;
28513     // PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
28514     // => <2 x i64> <ae|cg>
28515     SDValue Mul1 = DAG.getBitcast(VT, DAG.getNode(Opcode, dl, MulVT,
28516                                                   DAG.getBitcast(MulVT, A),
28517                                                   DAG.getBitcast(MulVT, B)));
28518     // PMULUDQ <4 x i32> <b|undef|d|undef>, <4 x i32> <f|undef|h|undef>
28519     // => <2 x i64> <bf|dh>
28520     SDValue Mul2 = DAG.getBitcast(VT, DAG.getNode(Opcode, dl, MulVT,
28521                                                   DAG.getBitcast(MulVT, Odd0),
28522                                                   DAG.getBitcast(MulVT, Odd1)));
28523 
28524     // Shuffle it back into the right order.
28525     SmallVector<int, 16> ShufMask(NumElts);
28526     for (int i = 0; i != (int)NumElts; ++i)
28527       ShufMask[i] = (i / 2) * 2 + ((i % 2) * NumElts) + 1;
28528 
28529     SDValue Res = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, ShufMask);
28530 
28531     // If we have a signed multiply but no PMULDQ fix up the result of an
28532     // unsigned multiply.
28533     if (IsSigned && !Subtarget.hasSSE41()) {
28534       SDValue Zero = DAG.getConstant(0, dl, VT);
28535       SDValue T1 = DAG.getNode(ISD::AND, dl, VT,
28536                                DAG.getSetCC(dl, VT, Zero, A, ISD::SETGT), B);
28537       SDValue T2 = DAG.getNode(ISD::AND, dl, VT,
28538                                DAG.getSetCC(dl, VT, Zero, B, ISD::SETGT), A);
28539 
28540       SDValue Fixup = DAG.getNode(ISD::ADD, dl, VT, T1, T2);
28541       Res = DAG.getNode(ISD::SUB, dl, VT, Res, Fixup);
28542     }
28543 
28544     return Res;
28545   }
28546 
28547   // Only i8 vectors should need custom lowering after this.
28548   assert((VT == MVT::v16i8 || (VT == MVT::v32i8 && Subtarget.hasInt256()) ||
28549          (VT == MVT::v64i8 && Subtarget.hasBWI())) &&
28550          "Unsupported vector type");
28551 
28552   // Lower v16i8/v32i8 as extension to v8i16/v16i16 vector pairs, multiply,
28553   // logical shift down the upper half and pack back to i8.
28554 
28555   // With SSE41 we can use sign/zero extend, but for pre-SSE41 we unpack
28556   // and then ashr/lshr the upper bits down to the lower bits before multiply.
28557 
28558   if ((VT == MVT::v16i8 && Subtarget.hasInt256()) ||
28559       (VT == MVT::v32i8 && Subtarget.canExtendTo512BW())) {
28560     MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts);
28561     unsigned ExAVX = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
28562     SDValue ExA = DAG.getNode(ExAVX, dl, ExVT, A);
28563     SDValue ExB = DAG.getNode(ExAVX, dl, ExVT, B);
28564     SDValue Mul = DAG.getNode(ISD::MUL, dl, ExVT, ExA, ExB);
28565     Mul = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, Mul, 8, DAG);
28566     return DAG.getNode(ISD::TRUNCATE, dl, VT, Mul);
28567   }
28568 
28569   return LowervXi8MulWithUNPCK(A, B, dl, VT, IsSigned, Subtarget, DAG);
28570 }
28571 
28572 // Custom lowering for SMULO/UMULO.
LowerMULO(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)28573 static SDValue LowerMULO(SDValue Op, const X86Subtarget &Subtarget,
28574                          SelectionDAG &DAG) {
28575   MVT VT = Op.getSimpleValueType();
28576 
28577   // Scalars defer to LowerXALUO.
28578   if (!VT.isVector())
28579     return LowerXALUO(Op, DAG);
28580 
28581   SDLoc dl(Op);
28582   bool IsSigned = Op->getOpcode() == ISD::SMULO;
28583   SDValue A = Op.getOperand(0);
28584   SDValue B = Op.getOperand(1);
28585   EVT OvfVT = Op->getValueType(1);
28586 
28587   if ((VT == MVT::v32i8 && !Subtarget.hasInt256()) ||
28588       (VT == MVT::v64i8 && !Subtarget.hasBWI())) {
28589     // Extract the LHS Lo/Hi vectors
28590     SDValue LHSLo, LHSHi;
28591     std::tie(LHSLo, LHSHi) = splitVector(A, DAG, dl);
28592 
28593     // Extract the RHS Lo/Hi vectors
28594     SDValue RHSLo, RHSHi;
28595     std::tie(RHSLo, RHSHi) = splitVector(B, DAG, dl);
28596 
28597     EVT LoOvfVT, HiOvfVT;
28598     std::tie(LoOvfVT, HiOvfVT) = DAG.GetSplitDestVTs(OvfVT);
28599     SDVTList LoVTs = DAG.getVTList(LHSLo.getValueType(), LoOvfVT);
28600     SDVTList HiVTs = DAG.getVTList(LHSHi.getValueType(), HiOvfVT);
28601 
28602     // Issue the split operations.
28603     SDValue Lo = DAG.getNode(Op.getOpcode(), dl, LoVTs, LHSLo, RHSLo);
28604     SDValue Hi = DAG.getNode(Op.getOpcode(), dl, HiVTs, LHSHi, RHSHi);
28605 
28606     // Join the separate data results and the overflow results.
28607     SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
28608     SDValue Ovf = DAG.getNode(ISD::CONCAT_VECTORS, dl, OvfVT, Lo.getValue(1),
28609                               Hi.getValue(1));
28610 
28611     return DAG.getMergeValues({Res, Ovf}, dl);
28612   }
28613 
28614   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
28615   EVT SetccVT =
28616       TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT);
28617 
28618   if ((VT == MVT::v16i8 && Subtarget.hasInt256()) ||
28619       (VT == MVT::v32i8 && Subtarget.canExtendTo512BW())) {
28620     unsigned NumElts = VT.getVectorNumElements();
28621     MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts);
28622     unsigned ExAVX = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
28623     SDValue ExA = DAG.getNode(ExAVX, dl, ExVT, A);
28624     SDValue ExB = DAG.getNode(ExAVX, dl, ExVT, B);
28625     SDValue Mul = DAG.getNode(ISD::MUL, dl, ExVT, ExA, ExB);
28626 
28627     SDValue Low = DAG.getNode(ISD::TRUNCATE, dl, VT, Mul);
28628 
28629     SDValue Ovf;
28630     if (IsSigned) {
28631       SDValue High, LowSign;
28632       if (OvfVT.getVectorElementType() == MVT::i1 &&
28633           (Subtarget.hasBWI() || Subtarget.canExtendTo512DQ())) {
28634         // Rather the truncating try to do the compare on vXi16 or vXi32.
28635         // Shift the high down filling with sign bits.
28636         High = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Mul, 8, DAG);
28637         // Fill all 16 bits with the sign bit from the low.
28638         LowSign =
28639             getTargetVShiftByConstNode(X86ISD::VSHLI, dl, ExVT, Mul, 8, DAG);
28640         LowSign = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, LowSign,
28641                                              15, DAG);
28642         SetccVT = OvfVT;
28643         if (!Subtarget.hasBWI()) {
28644           // We can't do a vXi16 compare so sign extend to v16i32.
28645           High = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v16i32, High);
28646           LowSign = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v16i32, LowSign);
28647         }
28648       } else {
28649         // Otherwise do the compare at vXi8.
28650         High = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, Mul, 8, DAG);
28651         High = DAG.getNode(ISD::TRUNCATE, dl, VT, High);
28652         LowSign =
28653             DAG.getNode(ISD::SRA, dl, VT, Low, DAG.getConstant(7, dl, VT));
28654       }
28655 
28656       Ovf = DAG.getSetCC(dl, SetccVT, LowSign, High, ISD::SETNE);
28657     } else {
28658       SDValue High =
28659           getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, Mul, 8, DAG);
28660       if (OvfVT.getVectorElementType() == MVT::i1 &&
28661           (Subtarget.hasBWI() || Subtarget.canExtendTo512DQ())) {
28662         // Rather the truncating try to do the compare on vXi16 or vXi32.
28663         SetccVT = OvfVT;
28664         if (!Subtarget.hasBWI()) {
28665           // We can't do a vXi16 compare so sign extend to v16i32.
28666           High = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v16i32, High);
28667         }
28668       } else {
28669         // Otherwise do the compare at vXi8.
28670         High = DAG.getNode(ISD::TRUNCATE, dl, VT, High);
28671       }
28672 
28673       Ovf =
28674           DAG.getSetCC(dl, SetccVT, High,
28675                        DAG.getConstant(0, dl, High.getValueType()), ISD::SETNE);
28676     }
28677 
28678     Ovf = DAG.getSExtOrTrunc(Ovf, dl, OvfVT);
28679 
28680     return DAG.getMergeValues({Low, Ovf}, dl);
28681   }
28682 
28683   SDValue Low;
28684   SDValue High =
28685       LowervXi8MulWithUNPCK(A, B, dl, VT, IsSigned, Subtarget, DAG, &Low);
28686 
28687   SDValue Ovf;
28688   if (IsSigned) {
28689     // SMULO overflows if the high bits don't match the sign of the low.
28690     SDValue LowSign =
28691         DAG.getNode(ISD::SRA, dl, VT, Low, DAG.getConstant(7, dl, VT));
28692     Ovf = DAG.getSetCC(dl, SetccVT, LowSign, High, ISD::SETNE);
28693   } else {
28694     // UMULO overflows if the high bits are non-zero.
28695     Ovf =
28696         DAG.getSetCC(dl, SetccVT, High, DAG.getConstant(0, dl, VT), ISD::SETNE);
28697   }
28698 
28699   Ovf = DAG.getSExtOrTrunc(Ovf, dl, OvfVT);
28700 
28701   return DAG.getMergeValues({Low, Ovf}, dl);
28702 }
28703 
LowerWin64_i128OP(SDValue Op,SelectionDAG & DAG) const28704 SDValue X86TargetLowering::LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const {
28705   assert(Subtarget.isTargetWin64() && "Unexpected target");
28706   EVT VT = Op.getValueType();
28707   assert(VT.isInteger() && VT.getSizeInBits() == 128 &&
28708          "Unexpected return type for lowering");
28709 
28710   if (isa<ConstantSDNode>(Op->getOperand(1))) {
28711     SmallVector<SDValue> Result;
28712     if (expandDIVREMByConstant(Op.getNode(), Result, MVT::i64, DAG))
28713       return DAG.getNode(ISD::BUILD_PAIR, SDLoc(Op), VT, Result[0], Result[1]);
28714   }
28715 
28716   RTLIB::Libcall LC;
28717   bool isSigned;
28718   switch (Op->getOpcode()) {
28719   default: llvm_unreachable("Unexpected request for libcall!");
28720   case ISD::SDIV:      isSigned = true;  LC = RTLIB::SDIV_I128;    break;
28721   case ISD::UDIV:      isSigned = false; LC = RTLIB::UDIV_I128;    break;
28722   case ISD::SREM:      isSigned = true;  LC = RTLIB::SREM_I128;    break;
28723   case ISD::UREM:      isSigned = false; LC = RTLIB::UREM_I128;    break;
28724   }
28725 
28726   SDLoc dl(Op);
28727   SDValue InChain = DAG.getEntryNode();
28728 
28729   TargetLowering::ArgListTy Args;
28730   TargetLowering::ArgListEntry Entry;
28731   for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i) {
28732     EVT ArgVT = Op->getOperand(i).getValueType();
28733     assert(ArgVT.isInteger() && ArgVT.getSizeInBits() == 128 &&
28734            "Unexpected argument type for lowering");
28735     SDValue StackPtr = DAG.CreateStackTemporary(ArgVT, 16);
28736     int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
28737     MachinePointerInfo MPI =
28738         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
28739     Entry.Node = StackPtr;
28740     InChain =
28741         DAG.getStore(InChain, dl, Op->getOperand(i), StackPtr, MPI, Align(16));
28742     Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
28743     Entry.Ty = PointerType::get(ArgTy,0);
28744     Entry.IsSExt = false;
28745     Entry.IsZExt = false;
28746     Args.push_back(Entry);
28747   }
28748 
28749   SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
28750                                          getPointerTy(DAG.getDataLayout()));
28751 
28752   TargetLowering::CallLoweringInfo CLI(DAG);
28753   CLI.setDebugLoc(dl)
28754       .setChain(InChain)
28755       .setLibCallee(
28756           getLibcallCallingConv(LC),
28757           static_cast<EVT>(MVT::v2i64).getTypeForEVT(*DAG.getContext()), Callee,
28758           std::move(Args))
28759       .setInRegister()
28760       .setSExtResult(isSigned)
28761       .setZExtResult(!isSigned);
28762 
28763   std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
28764   return DAG.getBitcast(VT, CallInfo.first);
28765 }
28766 
LowerWin64_FP_TO_INT128(SDValue Op,SelectionDAG & DAG,SDValue & Chain) const28767 SDValue X86TargetLowering::LowerWin64_FP_TO_INT128(SDValue Op,
28768                                                    SelectionDAG &DAG,
28769                                                    SDValue &Chain) const {
28770   assert(Subtarget.isTargetWin64() && "Unexpected target");
28771   EVT VT = Op.getValueType();
28772   bool IsStrict = Op->isStrictFPOpcode();
28773 
28774   SDValue Arg = Op.getOperand(IsStrict ? 1 : 0);
28775   EVT ArgVT = Arg.getValueType();
28776 
28777   assert(VT.isInteger() && VT.getSizeInBits() == 128 &&
28778          "Unexpected return type for lowering");
28779 
28780   RTLIB::Libcall LC;
28781   if (Op->getOpcode() == ISD::FP_TO_SINT ||
28782       Op->getOpcode() == ISD::STRICT_FP_TO_SINT)
28783     LC = RTLIB::getFPTOSINT(ArgVT, VT);
28784   else
28785     LC = RTLIB::getFPTOUINT(ArgVT, VT);
28786   assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unexpected request for libcall!");
28787 
28788   SDLoc dl(Op);
28789   MakeLibCallOptions CallOptions;
28790   Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode();
28791 
28792   SDValue Result;
28793   // Expect the i128 argument returned as a v2i64 in xmm0, cast back to the
28794   // expected VT (i128).
28795   std::tie(Result, Chain) =
28796       makeLibCall(DAG, LC, MVT::v2i64, Arg, CallOptions, dl, Chain);
28797   Result = DAG.getBitcast(VT, Result);
28798   return Result;
28799 }
28800 
LowerWin64_INT128_TO_FP(SDValue Op,SelectionDAG & DAG) const28801 SDValue X86TargetLowering::LowerWin64_INT128_TO_FP(SDValue Op,
28802                                                    SelectionDAG &DAG) const {
28803   assert(Subtarget.isTargetWin64() && "Unexpected target");
28804   EVT VT = Op.getValueType();
28805   bool IsStrict = Op->isStrictFPOpcode();
28806 
28807   SDValue Arg = Op.getOperand(IsStrict ? 1 : 0);
28808   EVT ArgVT = Arg.getValueType();
28809 
28810   assert(ArgVT.isInteger() && ArgVT.getSizeInBits() == 128 &&
28811          "Unexpected argument type for lowering");
28812 
28813   RTLIB::Libcall LC;
28814   if (Op->getOpcode() == ISD::SINT_TO_FP ||
28815       Op->getOpcode() == ISD::STRICT_SINT_TO_FP)
28816     LC = RTLIB::getSINTTOFP(ArgVT, VT);
28817   else
28818     LC = RTLIB::getUINTTOFP(ArgVT, VT);
28819   assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unexpected request for libcall!");
28820 
28821   SDLoc dl(Op);
28822   MakeLibCallOptions CallOptions;
28823   SDValue Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode();
28824 
28825   // Pass the i128 argument as an indirect argument on the stack.
28826   SDValue StackPtr = DAG.CreateStackTemporary(ArgVT, 16);
28827   int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
28828   MachinePointerInfo MPI =
28829       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
28830   Chain = DAG.getStore(Chain, dl, Arg, StackPtr, MPI, Align(16));
28831 
28832   SDValue Result;
28833   std::tie(Result, Chain) =
28834       makeLibCall(DAG, LC, VT, StackPtr, CallOptions, dl, Chain);
28835   return IsStrict ? DAG.getMergeValues({Result, Chain}, dl) : Result;
28836 }
28837 
28838 // Return true if the required (according to Opcode) shift-imm form is natively
28839 // supported by the Subtarget
supportedVectorShiftWithImm(EVT VT,const X86Subtarget & Subtarget,unsigned Opcode)28840 static bool supportedVectorShiftWithImm(EVT VT, const X86Subtarget &Subtarget,
28841                                         unsigned Opcode) {
28842   if (!VT.isSimple())
28843     return false;
28844 
28845   if (!(VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()))
28846     return false;
28847 
28848   if (VT.getScalarSizeInBits() < 16)
28849     return false;
28850 
28851   if (VT.is512BitVector() && Subtarget.useAVX512Regs() &&
28852       (VT.getScalarSizeInBits() > 16 || Subtarget.hasBWI()))
28853     return true;
28854 
28855   bool LShift = (VT.is128BitVector() && Subtarget.hasSSE2()) ||
28856                 (VT.is256BitVector() && Subtarget.hasInt256());
28857 
28858   bool AShift = LShift && (Subtarget.hasAVX512() ||
28859                            (VT != MVT::v2i64 && VT != MVT::v4i64));
28860   return (Opcode == ISD::SRA) ? AShift : LShift;
28861 }
28862 
28863 // The shift amount is a variable, but it is the same for all vector lanes.
28864 // These instructions are defined together with shift-immediate.
28865 static
supportedVectorShiftWithBaseAmnt(EVT VT,const X86Subtarget & Subtarget,unsigned Opcode)28866 bool supportedVectorShiftWithBaseAmnt(EVT VT, const X86Subtarget &Subtarget,
28867                                       unsigned Opcode) {
28868   return supportedVectorShiftWithImm(VT, Subtarget, Opcode);
28869 }
28870 
28871 // Return true if the required (according to Opcode) variable-shift form is
28872 // natively supported by the Subtarget
supportedVectorVarShift(EVT VT,const X86Subtarget & Subtarget,unsigned Opcode)28873 static bool supportedVectorVarShift(EVT VT, const X86Subtarget &Subtarget,
28874                                     unsigned Opcode) {
28875   if (!VT.isSimple())
28876     return false;
28877 
28878   if (!(VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()))
28879     return false;
28880 
28881   if (!Subtarget.hasInt256() || VT.getScalarSizeInBits() < 16)
28882     return false;
28883 
28884   // vXi16 supported only on AVX-512, BWI
28885   if (VT.getScalarSizeInBits() == 16 && !Subtarget.hasBWI())
28886     return false;
28887 
28888   if (Subtarget.hasAVX512() &&
28889       (Subtarget.useAVX512Regs() || !VT.is512BitVector()))
28890     return true;
28891 
28892   bool LShift = VT.is128BitVector() || VT.is256BitVector();
28893   bool AShift = LShift &&  VT != MVT::v2i64 && VT != MVT::v4i64;
28894   return (Opcode == ISD::SRA) ? AShift : LShift;
28895 }
28896 
LowerShiftByScalarImmediate(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)28897 static SDValue LowerShiftByScalarImmediate(SDValue Op, SelectionDAG &DAG,
28898                                            const X86Subtarget &Subtarget) {
28899   MVT VT = Op.getSimpleValueType();
28900   SDLoc dl(Op);
28901   SDValue R = Op.getOperand(0);
28902   SDValue Amt = Op.getOperand(1);
28903   unsigned X86Opc = getTargetVShiftUniformOpcode(Op.getOpcode(), false);
28904 
28905   auto ArithmeticShiftRight64 = [&](uint64_t ShiftAmt) {
28906     assert((VT == MVT::v2i64 || VT == MVT::v4i64) && "Unexpected SRA type");
28907     MVT ExVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() * 2);
28908     SDValue Ex = DAG.getBitcast(ExVT, R);
28909 
28910     // ashr(R, 63) === cmp_slt(R, 0)
28911     if (ShiftAmt == 63 && Subtarget.hasSSE42()) {
28912       assert((VT != MVT::v4i64 || Subtarget.hasInt256()) &&
28913              "Unsupported PCMPGT op");
28914       return DAG.getNode(X86ISD::PCMPGT, dl, VT, DAG.getConstant(0, dl, VT), R);
28915     }
28916 
28917     if (ShiftAmt >= 32) {
28918       // Splat sign to upper i32 dst, and SRA upper i32 src to lower i32.
28919       SDValue Upper =
28920           getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex, 31, DAG);
28921       SDValue Lower = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex,
28922                                                  ShiftAmt - 32, DAG);
28923       if (VT == MVT::v2i64)
28924         Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower, {5, 1, 7, 3});
28925       if (VT == MVT::v4i64)
28926         Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower,
28927                                   {9, 1, 11, 3, 13, 5, 15, 7});
28928     } else {
28929       // SRA upper i32, SRL whole i64 and select lower i32.
28930       SDValue Upper = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex,
28931                                                  ShiftAmt, DAG);
28932       SDValue Lower =
28933           getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, R, ShiftAmt, DAG);
28934       Lower = DAG.getBitcast(ExVT, Lower);
28935       if (VT == MVT::v2i64)
28936         Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower, {4, 1, 6, 3});
28937       if (VT == MVT::v4i64)
28938         Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower,
28939                                   {8, 1, 10, 3, 12, 5, 14, 7});
28940     }
28941     return DAG.getBitcast(VT, Ex);
28942   };
28943 
28944   // Optimize shl/srl/sra with constant shift amount.
28945   APInt APIntShiftAmt;
28946   if (!X86::isConstantSplat(Amt, APIntShiftAmt))
28947     return SDValue();
28948 
28949   // If the shift amount is out of range, return undef.
28950   if (APIntShiftAmt.uge(VT.getScalarSizeInBits()))
28951     return DAG.getUNDEF(VT);
28952 
28953   uint64_t ShiftAmt = APIntShiftAmt.getZExtValue();
28954 
28955   if (supportedVectorShiftWithImm(VT, Subtarget, Op.getOpcode())) {
28956     // Hardware support for vector shifts is sparse which makes us scalarize the
28957     // vector operations in many cases. Also, on sandybridge ADD is faster than
28958     // shl: (shl V, 1) -> (add (freeze V), (freeze V))
28959     if (Op.getOpcode() == ISD::SHL && ShiftAmt == 1) {
28960       // R may be undef at run-time, but (shl R, 1) must be an even number (LSB
28961       // must be 0). (add undef, undef) however can be any value. To make this
28962       // safe, we must freeze R to ensure that register allocation uses the same
28963       // register for an undefined value. This ensures that the result will
28964       // still be even and preserves the original semantics.
28965       R = DAG.getFreeze(R);
28966       return DAG.getNode(ISD::ADD, dl, VT, R, R);
28967     }
28968 
28969     return getTargetVShiftByConstNode(X86Opc, dl, VT, R, ShiftAmt, DAG);
28970   }
28971 
28972   // i64 SRA needs to be performed as partial shifts.
28973   if (((!Subtarget.hasXOP() && VT == MVT::v2i64) ||
28974        (Subtarget.hasInt256() && VT == MVT::v4i64)) &&
28975       Op.getOpcode() == ISD::SRA)
28976     return ArithmeticShiftRight64(ShiftAmt);
28977 
28978   if (VT == MVT::v16i8 || (Subtarget.hasInt256() && VT == MVT::v32i8) ||
28979       (Subtarget.hasBWI() && VT == MVT::v64i8)) {
28980     unsigned NumElts = VT.getVectorNumElements();
28981     MVT ShiftVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
28982 
28983     // Simple i8 add case
28984     if (Op.getOpcode() == ISD::SHL && ShiftAmt == 1) {
28985       // R may be undef at run-time, but (shl R, 1) must be an even number (LSB
28986       // must be 0). (add undef, undef) however can be any value. To make this
28987       // safe, we must freeze R to ensure that register allocation uses the same
28988       // register for an undefined value. This ensures that the result will
28989       // still be even and preserves the original semantics.
28990       R = DAG.getFreeze(R);
28991       return DAG.getNode(ISD::ADD, dl, VT, R, R);
28992     }
28993 
28994     // ashr(R, 7)  === cmp_slt(R, 0)
28995     if (Op.getOpcode() == ISD::SRA && ShiftAmt == 7) {
28996       SDValue Zeros = DAG.getConstant(0, dl, VT);
28997       if (VT.is512BitVector()) {
28998         assert(VT == MVT::v64i8 && "Unexpected element type!");
28999         SDValue CMP = DAG.getSetCC(dl, MVT::v64i1, Zeros, R, ISD::SETGT);
29000         return DAG.getNode(ISD::SIGN_EXTEND, dl, VT, CMP);
29001       }
29002       return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
29003     }
29004 
29005     // XOP can shift v16i8 directly instead of as shift v8i16 + mask.
29006     if (VT == MVT::v16i8 && Subtarget.hasXOP())
29007       return SDValue();
29008 
29009     if (Op.getOpcode() == ISD::SHL) {
29010       // Make a large shift.
29011       SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, ShiftVT, R,
29012                                                ShiftAmt, DAG);
29013       SHL = DAG.getBitcast(VT, SHL);
29014       // Zero out the rightmost bits.
29015       APInt Mask = APInt::getHighBitsSet(8, 8 - ShiftAmt);
29016       return DAG.getNode(ISD::AND, dl, VT, SHL, DAG.getConstant(Mask, dl, VT));
29017     }
29018     if (Op.getOpcode() == ISD::SRL) {
29019       // Make a large shift.
29020       SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ShiftVT, R,
29021                                                ShiftAmt, DAG);
29022       SRL = DAG.getBitcast(VT, SRL);
29023       // Zero out the leftmost bits.
29024       APInt Mask = APInt::getLowBitsSet(8, 8 - ShiftAmt);
29025       return DAG.getNode(ISD::AND, dl, VT, SRL, DAG.getConstant(Mask, dl, VT));
29026     }
29027     if (Op.getOpcode() == ISD::SRA) {
29028       // ashr(R, Amt) === sub(xor(lshr(R, Amt), Mask), Mask)
29029       SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
29030 
29031       SDValue Mask = DAG.getConstant(128 >> ShiftAmt, dl, VT);
29032       Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
29033       Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
29034       return Res;
29035     }
29036     llvm_unreachable("Unknown shift opcode.");
29037   }
29038 
29039   return SDValue();
29040 }
29041 
LowerShiftByScalarVariable(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)29042 static SDValue LowerShiftByScalarVariable(SDValue Op, SelectionDAG &DAG,
29043                                           const X86Subtarget &Subtarget) {
29044   MVT VT = Op.getSimpleValueType();
29045   SDLoc dl(Op);
29046   SDValue R = Op.getOperand(0);
29047   SDValue Amt = Op.getOperand(1);
29048   unsigned Opcode = Op.getOpcode();
29049   unsigned X86OpcI = getTargetVShiftUniformOpcode(Opcode, false);
29050 
29051   int BaseShAmtIdx = -1;
29052   if (SDValue BaseShAmt = DAG.getSplatSourceVector(Amt, BaseShAmtIdx)) {
29053     if (supportedVectorShiftWithBaseAmnt(VT, Subtarget, Opcode))
29054       return getTargetVShiftNode(X86OpcI, dl, VT, R, BaseShAmt, BaseShAmtIdx,
29055                                  Subtarget, DAG);
29056 
29057     // vXi8 shifts - shift as v8i16 + mask result.
29058     if (((VT == MVT::v16i8 && !Subtarget.canExtendTo512DQ()) ||
29059          (VT == MVT::v32i8 && !Subtarget.canExtendTo512BW()) ||
29060          VT == MVT::v64i8) &&
29061         !Subtarget.hasXOP()) {
29062       unsigned NumElts = VT.getVectorNumElements();
29063       MVT ExtVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
29064       if (supportedVectorShiftWithBaseAmnt(ExtVT, Subtarget, Opcode)) {
29065         unsigned LogicalOp = (Opcode == ISD::SHL ? ISD::SHL : ISD::SRL);
29066         unsigned LogicalX86Op = getTargetVShiftUniformOpcode(LogicalOp, false);
29067 
29068         // Create the mask using vXi16 shifts. For shift-rights we need to move
29069         // the upper byte down before splatting the vXi8 mask.
29070         SDValue BitMask = DAG.getConstant(-1, dl, ExtVT);
29071         BitMask = getTargetVShiftNode(LogicalX86Op, dl, ExtVT, BitMask,
29072                                       BaseShAmt, BaseShAmtIdx, Subtarget, DAG);
29073         if (Opcode != ISD::SHL)
29074           BitMask = getTargetVShiftByConstNode(LogicalX86Op, dl, ExtVT, BitMask,
29075                                                8, DAG);
29076         BitMask = DAG.getBitcast(VT, BitMask);
29077         BitMask = DAG.getVectorShuffle(VT, dl, BitMask, BitMask,
29078                                        SmallVector<int, 64>(NumElts, 0));
29079 
29080         SDValue Res = getTargetVShiftNode(LogicalX86Op, dl, ExtVT,
29081                                           DAG.getBitcast(ExtVT, R), BaseShAmt,
29082                                           BaseShAmtIdx, Subtarget, DAG);
29083         Res = DAG.getBitcast(VT, Res);
29084         Res = DAG.getNode(ISD::AND, dl, VT, Res, BitMask);
29085 
29086         if (Opcode == ISD::SRA) {
29087           // ashr(R, Amt) === sub(xor(lshr(R, Amt), SignMask), SignMask)
29088           // SignMask = lshr(SignBit, Amt) - safe to do this with PSRLW.
29089           SDValue SignMask = DAG.getConstant(0x8080, dl, ExtVT);
29090           SignMask =
29091               getTargetVShiftNode(LogicalX86Op, dl, ExtVT, SignMask, BaseShAmt,
29092                                   BaseShAmtIdx, Subtarget, DAG);
29093           SignMask = DAG.getBitcast(VT, SignMask);
29094           Res = DAG.getNode(ISD::XOR, dl, VT, Res, SignMask);
29095           Res = DAG.getNode(ISD::SUB, dl, VT, Res, SignMask);
29096         }
29097         return Res;
29098       }
29099     }
29100   }
29101 
29102   return SDValue();
29103 }
29104 
29105 // Convert a shift/rotate left amount to a multiplication scale factor.
convertShiftLeftToScale(SDValue Amt,const SDLoc & dl,const X86Subtarget & Subtarget,SelectionDAG & DAG)29106 static SDValue convertShiftLeftToScale(SDValue Amt, const SDLoc &dl,
29107                                        const X86Subtarget &Subtarget,
29108                                        SelectionDAG &DAG) {
29109   MVT VT = Amt.getSimpleValueType();
29110   if (!(VT == MVT::v8i16 || VT == MVT::v4i32 ||
29111         (Subtarget.hasInt256() && VT == MVT::v16i16) ||
29112         (Subtarget.hasAVX512() && VT == MVT::v32i16) ||
29113         (!Subtarget.hasAVX512() && VT == MVT::v16i8) ||
29114         (Subtarget.hasInt256() && VT == MVT::v32i8) ||
29115         (Subtarget.hasBWI() && VT == MVT::v64i8)))
29116     return SDValue();
29117 
29118   MVT SVT = VT.getVectorElementType();
29119   unsigned SVTBits = SVT.getSizeInBits();
29120   unsigned NumElems = VT.getVectorNumElements();
29121 
29122   APInt UndefElts;
29123   SmallVector<APInt> EltBits;
29124   if (getTargetConstantBitsFromNode(Amt, SVTBits, UndefElts, EltBits)) {
29125     APInt One(SVTBits, 1);
29126     SmallVector<SDValue> Elts(NumElems, DAG.getUNDEF(SVT));
29127     for (unsigned I = 0; I != NumElems; ++I) {
29128       if (UndefElts[I] || EltBits[I].uge(SVTBits))
29129         continue;
29130       uint64_t ShAmt = EltBits[I].getZExtValue();
29131       Elts[I] = DAG.getConstant(One.shl(ShAmt), dl, SVT);
29132     }
29133     return DAG.getBuildVector(VT, dl, Elts);
29134   }
29135 
29136   // If the target doesn't support variable shifts, use either FP conversion
29137   // or integer multiplication to avoid shifting each element individually.
29138   if (VT == MVT::v4i32) {
29139     Amt = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(23, dl, VT));
29140     Amt = DAG.getNode(ISD::ADD, dl, VT, Amt,
29141                       DAG.getConstant(0x3f800000U, dl, VT));
29142     Amt = DAG.getBitcast(MVT::v4f32, Amt);
29143     return DAG.getNode(ISD::FP_TO_SINT, dl, VT, Amt);
29144   }
29145 
29146   // AVX2 can more effectively perform this as a zext/trunc to/from v8i32.
29147   if (VT == MVT::v8i16 && !Subtarget.hasAVX2()) {
29148     SDValue Z = DAG.getConstant(0, dl, VT);
29149     SDValue Lo = DAG.getBitcast(MVT::v4i32, getUnpackl(DAG, dl, VT, Amt, Z));
29150     SDValue Hi = DAG.getBitcast(MVT::v4i32, getUnpackh(DAG, dl, VT, Amt, Z));
29151     Lo = convertShiftLeftToScale(Lo, dl, Subtarget, DAG);
29152     Hi = convertShiftLeftToScale(Hi, dl, Subtarget, DAG);
29153     if (Subtarget.hasSSE41())
29154       return DAG.getNode(X86ISD::PACKUS, dl, VT, Lo, Hi);
29155     return getPack(DAG, Subtarget, dl, VT, Lo, Hi);
29156   }
29157 
29158   return SDValue();
29159 }
29160 
LowerShift(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)29161 static SDValue LowerShift(SDValue Op, const X86Subtarget &Subtarget,
29162                           SelectionDAG &DAG) {
29163   MVT VT = Op.getSimpleValueType();
29164   SDLoc dl(Op);
29165   SDValue R = Op.getOperand(0);
29166   SDValue Amt = Op.getOperand(1);
29167   unsigned EltSizeInBits = VT.getScalarSizeInBits();
29168   bool ConstantAmt = ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
29169 
29170   unsigned Opc = Op.getOpcode();
29171   unsigned X86OpcV = getTargetVShiftUniformOpcode(Opc, true);
29172   unsigned X86OpcI = getTargetVShiftUniformOpcode(Opc, false);
29173 
29174   assert(VT.isVector() && "Custom lowering only for vector shifts!");
29175   assert(Subtarget.hasSSE2() && "Only custom lower when we have SSE2!");
29176 
29177   if (SDValue V = LowerShiftByScalarImmediate(Op, DAG, Subtarget))
29178     return V;
29179 
29180   if (SDValue V = LowerShiftByScalarVariable(Op, DAG, Subtarget))
29181     return V;
29182 
29183   if (supportedVectorVarShift(VT, Subtarget, Opc))
29184     return Op;
29185 
29186   // i64 vector arithmetic shift can be emulated with the transform:
29187   // M = lshr(SIGN_MASK, Amt)
29188   // ashr(R, Amt) === sub(xor(lshr(R, Amt), M), M)
29189   if (((VT == MVT::v2i64 && !Subtarget.hasXOP()) ||
29190        (VT == MVT::v4i64 && Subtarget.hasInt256())) &&
29191       Opc == ISD::SRA) {
29192     SDValue S = DAG.getConstant(APInt::getSignMask(64), dl, VT);
29193     SDValue M = DAG.getNode(ISD::SRL, dl, VT, S, Amt);
29194     R = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
29195     R = DAG.getNode(ISD::XOR, dl, VT, R, M);
29196     R = DAG.getNode(ISD::SUB, dl, VT, R, M);
29197     return R;
29198   }
29199 
29200   // XOP has 128-bit variable logical/arithmetic shifts.
29201   // +ve/-ve Amt = shift left/right.
29202   if (Subtarget.hasXOP() && (VT == MVT::v2i64 || VT == MVT::v4i32 ||
29203                              VT == MVT::v8i16 || VT == MVT::v16i8)) {
29204     if (Opc == ISD::SRL || Opc == ISD::SRA) {
29205       SDValue Zero = DAG.getConstant(0, dl, VT);
29206       Amt = DAG.getNode(ISD::SUB, dl, VT, Zero, Amt);
29207     }
29208     if (Opc == ISD::SHL || Opc == ISD::SRL)
29209       return DAG.getNode(X86ISD::VPSHL, dl, VT, R, Amt);
29210     if (Opc == ISD::SRA)
29211       return DAG.getNode(X86ISD::VPSHA, dl, VT, R, Amt);
29212   }
29213 
29214   // 2i64 vector logical shifts can efficiently avoid scalarization - do the
29215   // shifts per-lane and then shuffle the partial results back together.
29216   if (VT == MVT::v2i64 && Opc != ISD::SRA) {
29217     // Splat the shift amounts so the scalar shifts above will catch it.
29218     SDValue Amt0 = DAG.getVectorShuffle(VT, dl, Amt, Amt, {0, 0});
29219     SDValue Amt1 = DAG.getVectorShuffle(VT, dl, Amt, Amt, {1, 1});
29220     SDValue R0 = DAG.getNode(Opc, dl, VT, R, Amt0);
29221     SDValue R1 = DAG.getNode(Opc, dl, VT, R, Amt1);
29222     return DAG.getVectorShuffle(VT, dl, R0, R1, {0, 3});
29223   }
29224 
29225   // If possible, lower this shift as a sequence of two shifts by
29226   // constant plus a BLENDing shuffle instead of scalarizing it.
29227   // Example:
29228   //   (v4i32 (srl A, (build_vector < X, Y, Y, Y>)))
29229   //
29230   // Could be rewritten as:
29231   //   (v4i32 (MOVSS (srl A, <Y,Y,Y,Y>), (srl A, <X,X,X,X>)))
29232   //
29233   // The advantage is that the two shifts from the example would be
29234   // lowered as X86ISD::VSRLI nodes in parallel before blending.
29235   if (ConstantAmt && (VT == MVT::v8i16 || VT == MVT::v4i32 ||
29236                       (VT == MVT::v16i16 && Subtarget.hasInt256()))) {
29237     SDValue Amt1, Amt2;
29238     unsigned NumElts = VT.getVectorNumElements();
29239     SmallVector<int, 8> ShuffleMask;
29240     for (unsigned i = 0; i != NumElts; ++i) {
29241       SDValue A = Amt->getOperand(i);
29242       if (A.isUndef()) {
29243         ShuffleMask.push_back(SM_SentinelUndef);
29244         continue;
29245       }
29246       if (!Amt1 || Amt1 == A) {
29247         ShuffleMask.push_back(i);
29248         Amt1 = A;
29249         continue;
29250       }
29251       if (!Amt2 || Amt2 == A) {
29252         ShuffleMask.push_back(i + NumElts);
29253         Amt2 = A;
29254         continue;
29255       }
29256       break;
29257     }
29258 
29259     // Only perform this blend if we can perform it without loading a mask.
29260     if (ShuffleMask.size() == NumElts && Amt1 && Amt2 &&
29261         (VT != MVT::v16i16 ||
29262          is128BitLaneRepeatedShuffleMask(VT, ShuffleMask)) &&
29263         (VT == MVT::v4i32 || Subtarget.hasSSE41() || Opc != ISD::SHL ||
29264          canWidenShuffleElements(ShuffleMask))) {
29265       auto *Cst1 = dyn_cast<ConstantSDNode>(Amt1);
29266       auto *Cst2 = dyn_cast<ConstantSDNode>(Amt2);
29267       if (Cst1 && Cst2 && Cst1->getAPIntValue().ult(EltSizeInBits) &&
29268           Cst2->getAPIntValue().ult(EltSizeInBits)) {
29269         SDValue Shift1 = getTargetVShiftByConstNode(X86OpcI, dl, VT, R,
29270                                                     Cst1->getZExtValue(), DAG);
29271         SDValue Shift2 = getTargetVShiftByConstNode(X86OpcI, dl, VT, R,
29272                                                     Cst2->getZExtValue(), DAG);
29273         return DAG.getVectorShuffle(VT, dl, Shift1, Shift2, ShuffleMask);
29274       }
29275     }
29276   }
29277 
29278   // If possible, lower this packed shift into a vector multiply instead of
29279   // expanding it into a sequence of scalar shifts.
29280   // For v32i8 cases, it might be quicker to split/extend to vXi16 shifts.
29281   if (Opc == ISD::SHL && !(VT == MVT::v32i8 && (Subtarget.hasXOP() ||
29282                                                 Subtarget.canExtendTo512BW())))
29283     if (SDValue Scale = convertShiftLeftToScale(Amt, dl, Subtarget, DAG))
29284       return DAG.getNode(ISD::MUL, dl, VT, R, Scale);
29285 
29286   // Constant ISD::SRL can be performed efficiently on vXi16 vectors as we
29287   // can replace with ISD::MULHU, creating scale factor from (NumEltBits - Amt).
29288   if (Opc == ISD::SRL && ConstantAmt &&
29289       (VT == MVT::v8i16 || (VT == MVT::v16i16 && Subtarget.hasInt256()))) {
29290     SDValue EltBits = DAG.getConstant(EltSizeInBits, dl, VT);
29291     SDValue RAmt = DAG.getNode(ISD::SUB, dl, VT, EltBits, Amt);
29292     if (SDValue Scale = convertShiftLeftToScale(RAmt, dl, Subtarget, DAG)) {
29293       SDValue Zero = DAG.getConstant(0, dl, VT);
29294       SDValue ZAmt = DAG.getSetCC(dl, VT, Amt, Zero, ISD::SETEQ);
29295       SDValue Res = DAG.getNode(ISD::MULHU, dl, VT, R, Scale);
29296       return DAG.getSelect(dl, VT, ZAmt, R, Res);
29297     }
29298   }
29299 
29300   // Constant ISD::SRA can be performed efficiently on vXi16 vectors as we
29301   // can replace with ISD::MULHS, creating scale factor from (NumEltBits - Amt).
29302   // TODO: Special case handling for shift by 0/1, really we can afford either
29303   // of these cases in pre-SSE41/XOP/AVX512 but not both.
29304   if (Opc == ISD::SRA && ConstantAmt &&
29305       (VT == MVT::v8i16 || (VT == MVT::v16i16 && Subtarget.hasInt256())) &&
29306       ((Subtarget.hasSSE41() && !Subtarget.hasXOP() &&
29307         !Subtarget.hasAVX512()) ||
29308        DAG.isKnownNeverZero(Amt))) {
29309     SDValue EltBits = DAG.getConstant(EltSizeInBits, dl, VT);
29310     SDValue RAmt = DAG.getNode(ISD::SUB, dl, VT, EltBits, Amt);
29311     if (SDValue Scale = convertShiftLeftToScale(RAmt, dl, Subtarget, DAG)) {
29312       SDValue Amt0 =
29313           DAG.getSetCC(dl, VT, Amt, DAG.getConstant(0, dl, VT), ISD::SETEQ);
29314       SDValue Amt1 =
29315           DAG.getSetCC(dl, VT, Amt, DAG.getConstant(1, dl, VT), ISD::SETEQ);
29316       SDValue Sra1 =
29317           getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, R, 1, DAG);
29318       SDValue Res = DAG.getNode(ISD::MULHS, dl, VT, R, Scale);
29319       Res = DAG.getSelect(dl, VT, Amt0, R, Res);
29320       return DAG.getSelect(dl, VT, Amt1, Sra1, Res);
29321     }
29322   }
29323 
29324   // v4i32 Non Uniform Shifts.
29325   // If the shift amount is constant we can shift each lane using the SSE2
29326   // immediate shifts, else we need to zero-extend each lane to the lower i64
29327   // and shift using the SSE2 variable shifts.
29328   // The separate results can then be blended together.
29329   if (VT == MVT::v4i32) {
29330     SDValue Amt0, Amt1, Amt2, Amt3;
29331     if (ConstantAmt) {
29332       Amt0 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {0, 0, 0, 0});
29333       Amt1 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {1, 1, 1, 1});
29334       Amt2 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {2, 2, 2, 2});
29335       Amt3 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {3, 3, 3, 3});
29336     } else {
29337       // The SSE2 shifts use the lower i64 as the same shift amount for
29338       // all lanes and the upper i64 is ignored. On AVX we're better off
29339       // just zero-extending, but for SSE just duplicating the top 16-bits is
29340       // cheaper and has the same effect for out of range values.
29341       if (Subtarget.hasAVX()) {
29342         SDValue Z = DAG.getConstant(0, dl, VT);
29343         Amt0 = DAG.getVectorShuffle(VT, dl, Amt, Z, {0, 4, -1, -1});
29344         Amt1 = DAG.getVectorShuffle(VT, dl, Amt, Z, {1, 5, -1, -1});
29345         Amt2 = DAG.getVectorShuffle(VT, dl, Amt, Z, {2, 6, -1, -1});
29346         Amt3 = DAG.getVectorShuffle(VT, dl, Amt, Z, {3, 7, -1, -1});
29347       } else {
29348         SDValue Amt01 = DAG.getBitcast(MVT::v8i16, Amt);
29349         SDValue Amt23 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt01, Amt01,
29350                                              {4, 5, 6, 7, -1, -1, -1, -1});
29351         SDValue Msk02 = getV4X86ShuffleImm8ForMask({0, 1, 1, 1}, dl, DAG);
29352         SDValue Msk13 = getV4X86ShuffleImm8ForMask({2, 3, 3, 3}, dl, DAG);
29353         Amt0 = DAG.getNode(X86ISD::PSHUFLW, dl, MVT::v8i16, Amt01, Msk02);
29354         Amt1 = DAG.getNode(X86ISD::PSHUFLW, dl, MVT::v8i16, Amt01, Msk13);
29355         Amt2 = DAG.getNode(X86ISD::PSHUFLW, dl, MVT::v8i16, Amt23, Msk02);
29356         Amt3 = DAG.getNode(X86ISD::PSHUFLW, dl, MVT::v8i16, Amt23, Msk13);
29357       }
29358     }
29359 
29360     unsigned ShOpc = ConstantAmt ? Opc : X86OpcV;
29361     SDValue R0 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt0));
29362     SDValue R1 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt1));
29363     SDValue R2 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt2));
29364     SDValue R3 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt3));
29365 
29366     // Merge the shifted lane results optimally with/without PBLENDW.
29367     // TODO - ideally shuffle combining would handle this.
29368     if (Subtarget.hasSSE41()) {
29369       SDValue R02 = DAG.getVectorShuffle(VT, dl, R0, R2, {0, -1, 6, -1});
29370       SDValue R13 = DAG.getVectorShuffle(VT, dl, R1, R3, {-1, 1, -1, 7});
29371       return DAG.getVectorShuffle(VT, dl, R02, R13, {0, 5, 2, 7});
29372     }
29373     SDValue R01 = DAG.getVectorShuffle(VT, dl, R0, R1, {0, -1, -1, 5});
29374     SDValue R23 = DAG.getVectorShuffle(VT, dl, R2, R3, {2, -1, -1, 7});
29375     return DAG.getVectorShuffle(VT, dl, R01, R23, {0, 3, 4, 7});
29376   }
29377 
29378   // It's worth extending once and using the vXi16/vXi32 shifts for smaller
29379   // types, but without AVX512 the extra overheads to get from vXi8 to vXi32
29380   // make the existing SSE solution better.
29381   // NOTE: We honor prefered vector width before promoting to 512-bits.
29382   if ((Subtarget.hasInt256() && VT == MVT::v8i16) ||
29383       (Subtarget.canExtendTo512DQ() && VT == MVT::v16i16) ||
29384       (Subtarget.canExtendTo512DQ() && VT == MVT::v16i8) ||
29385       (Subtarget.canExtendTo512BW() && VT == MVT::v32i8) ||
29386       (Subtarget.hasBWI() && Subtarget.hasVLX() && VT == MVT::v16i8)) {
29387     assert((!Subtarget.hasBWI() || VT == MVT::v32i8 || VT == MVT::v16i8) &&
29388            "Unexpected vector type");
29389     MVT EvtSVT = Subtarget.hasBWI() ? MVT::i16 : MVT::i32;
29390     MVT ExtVT = MVT::getVectorVT(EvtSVT, VT.getVectorNumElements());
29391     unsigned ExtOpc = Opc == ISD::SRA ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
29392     R = DAG.getNode(ExtOpc, dl, ExtVT, R);
29393     Amt = DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVT, Amt);
29394     return DAG.getNode(ISD::TRUNCATE, dl, VT,
29395                        DAG.getNode(Opc, dl, ExtVT, R, Amt));
29396   }
29397 
29398   // Constant ISD::SRA/SRL can be performed efficiently on vXi8 vectors as we
29399   // extend to vXi16 to perform a MUL scale effectively as a MUL_LOHI.
29400   if (ConstantAmt && (Opc == ISD::SRA || Opc == ISD::SRL) &&
29401       (VT == MVT::v16i8 || (VT == MVT::v32i8 && Subtarget.hasInt256()) ||
29402        (VT == MVT::v64i8 && Subtarget.hasBWI())) &&
29403       !Subtarget.hasXOP()) {
29404     int NumElts = VT.getVectorNumElements();
29405     SDValue Cst8 = DAG.getTargetConstant(8, dl, MVT::i8);
29406 
29407     // Extend constant shift amount to vXi16 (it doesn't matter if the type
29408     // isn't legal).
29409     MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts);
29410     Amt = DAG.getZExtOrTrunc(Amt, dl, ExVT);
29411     Amt = DAG.getNode(ISD::SUB, dl, ExVT, DAG.getConstant(8, dl, ExVT), Amt);
29412     Amt = DAG.getNode(ISD::SHL, dl, ExVT, DAG.getConstant(1, dl, ExVT), Amt);
29413     assert(ISD::isBuildVectorOfConstantSDNodes(Amt.getNode()) &&
29414            "Constant build vector expected");
29415 
29416     if (VT == MVT::v16i8 && Subtarget.hasInt256()) {
29417       bool IsSigned = Opc == ISD::SRA;
29418       R = DAG.getExtOrTrunc(IsSigned, R, dl, ExVT);
29419       R = DAG.getNode(ISD::MUL, dl, ExVT, R, Amt);
29420       R = DAG.getNode(X86ISD::VSRLI, dl, ExVT, R, Cst8);
29421       return DAG.getZExtOrTrunc(R, dl, VT);
29422     }
29423 
29424     SmallVector<SDValue, 16> LoAmt, HiAmt;
29425     for (int i = 0; i != NumElts; i += 16) {
29426       for (int j = 0; j != 8; ++j) {
29427         LoAmt.push_back(Amt.getOperand(i + j));
29428         HiAmt.push_back(Amt.getOperand(i + j + 8));
29429       }
29430     }
29431 
29432     MVT VT16 = MVT::getVectorVT(MVT::i16, NumElts / 2);
29433     SDValue LoA = DAG.getBuildVector(VT16, dl, LoAmt);
29434     SDValue HiA = DAG.getBuildVector(VT16, dl, HiAmt);
29435 
29436     SDValue LoR = DAG.getBitcast(VT16, getUnpackl(DAG, dl, VT, R, R));
29437     SDValue HiR = DAG.getBitcast(VT16, getUnpackh(DAG, dl, VT, R, R));
29438     LoR = DAG.getNode(X86OpcI, dl, VT16, LoR, Cst8);
29439     HiR = DAG.getNode(X86OpcI, dl, VT16, HiR, Cst8);
29440     LoR = DAG.getNode(ISD::MUL, dl, VT16, LoR, LoA);
29441     HiR = DAG.getNode(ISD::MUL, dl, VT16, HiR, HiA);
29442     LoR = DAG.getNode(X86ISD::VSRLI, dl, VT16, LoR, Cst8);
29443     HiR = DAG.getNode(X86ISD::VSRLI, dl, VT16, HiR, Cst8);
29444     return DAG.getNode(X86ISD::PACKUS, dl, VT, LoR, HiR);
29445   }
29446 
29447   if (VT == MVT::v16i8 ||
29448       (VT == MVT::v32i8 && Subtarget.hasInt256() && !Subtarget.hasXOP()) ||
29449       (VT == MVT::v64i8 && Subtarget.hasBWI())) {
29450     MVT ExtVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements() / 2);
29451 
29452     auto SignBitSelect = [&](MVT SelVT, SDValue Sel, SDValue V0, SDValue V1) {
29453       if (VT.is512BitVector()) {
29454         // On AVX512BW targets we make use of the fact that VSELECT lowers
29455         // to a masked blend which selects bytes based just on the sign bit
29456         // extracted to a mask.
29457         MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
29458         V0 = DAG.getBitcast(VT, V0);
29459         V1 = DAG.getBitcast(VT, V1);
29460         Sel = DAG.getBitcast(VT, Sel);
29461         Sel = DAG.getSetCC(dl, MaskVT, DAG.getConstant(0, dl, VT), Sel,
29462                            ISD::SETGT);
29463         return DAG.getBitcast(SelVT, DAG.getSelect(dl, VT, Sel, V0, V1));
29464       } else if (Subtarget.hasSSE41()) {
29465         // On SSE41 targets we can use PBLENDVB which selects bytes based just
29466         // on the sign bit.
29467         V0 = DAG.getBitcast(VT, V0);
29468         V1 = DAG.getBitcast(VT, V1);
29469         Sel = DAG.getBitcast(VT, Sel);
29470         return DAG.getBitcast(SelVT,
29471                               DAG.getNode(X86ISD::BLENDV, dl, VT, Sel, V0, V1));
29472       }
29473       // On pre-SSE41 targets we test for the sign bit by comparing to
29474       // zero - a negative value will set all bits of the lanes to true
29475       // and VSELECT uses that in its OR(AND(V0,C),AND(V1,~C)) lowering.
29476       SDValue Z = DAG.getConstant(0, dl, SelVT);
29477       SDValue C = DAG.getNode(X86ISD::PCMPGT, dl, SelVT, Z, Sel);
29478       return DAG.getSelect(dl, SelVT, C, V0, V1);
29479     };
29480 
29481     // Turn 'a' into a mask suitable for VSELECT: a = a << 5;
29482     // We can safely do this using i16 shifts as we're only interested in
29483     // the 3 lower bits of each byte.
29484     Amt = DAG.getBitcast(ExtVT, Amt);
29485     Amt = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, ExtVT, Amt, 5, DAG);
29486     Amt = DAG.getBitcast(VT, Amt);
29487 
29488     if (Opc == ISD::SHL || Opc == ISD::SRL) {
29489       // r = VSELECT(r, shift(r, 4), a);
29490       SDValue M = DAG.getNode(Opc, dl, VT, R, DAG.getConstant(4, dl, VT));
29491       R = SignBitSelect(VT, Amt, M, R);
29492 
29493       // a += a
29494       Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
29495 
29496       // r = VSELECT(r, shift(r, 2), a);
29497       M = DAG.getNode(Opc, dl, VT, R, DAG.getConstant(2, dl, VT));
29498       R = SignBitSelect(VT, Amt, M, R);
29499 
29500       // a += a
29501       Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
29502 
29503       // return VSELECT(r, shift(r, 1), a);
29504       M = DAG.getNode(Opc, dl, VT, R, DAG.getConstant(1, dl, VT));
29505       R = SignBitSelect(VT, Amt, M, R);
29506       return R;
29507     }
29508 
29509     if (Opc == ISD::SRA) {
29510       // For SRA we need to unpack each byte to the higher byte of a i16 vector
29511       // so we can correctly sign extend. We don't care what happens to the
29512       // lower byte.
29513       SDValue ALo = getUnpackl(DAG, dl, VT, DAG.getUNDEF(VT), Amt);
29514       SDValue AHi = getUnpackh(DAG, dl, VT, DAG.getUNDEF(VT), Amt);
29515       SDValue RLo = getUnpackl(DAG, dl, VT, DAG.getUNDEF(VT), R);
29516       SDValue RHi = getUnpackh(DAG, dl, VT, DAG.getUNDEF(VT), R);
29517       ALo = DAG.getBitcast(ExtVT, ALo);
29518       AHi = DAG.getBitcast(ExtVT, AHi);
29519       RLo = DAG.getBitcast(ExtVT, RLo);
29520       RHi = DAG.getBitcast(ExtVT, RHi);
29521 
29522       // r = VSELECT(r, shift(r, 4), a);
29523       SDValue MLo = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RLo, 4, DAG);
29524       SDValue MHi = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RHi, 4, DAG);
29525       RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
29526       RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
29527 
29528       // a += a
29529       ALo = DAG.getNode(ISD::ADD, dl, ExtVT, ALo, ALo);
29530       AHi = DAG.getNode(ISD::ADD, dl, ExtVT, AHi, AHi);
29531 
29532       // r = VSELECT(r, shift(r, 2), a);
29533       MLo = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RLo, 2, DAG);
29534       MHi = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RHi, 2, DAG);
29535       RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
29536       RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
29537 
29538       // a += a
29539       ALo = DAG.getNode(ISD::ADD, dl, ExtVT, ALo, ALo);
29540       AHi = DAG.getNode(ISD::ADD, dl, ExtVT, AHi, AHi);
29541 
29542       // r = VSELECT(r, shift(r, 1), a);
29543       MLo = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RLo, 1, DAG);
29544       MHi = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RHi, 1, DAG);
29545       RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
29546       RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
29547 
29548       // Logical shift the result back to the lower byte, leaving a zero upper
29549       // byte meaning that we can safely pack with PACKUSWB.
29550       RLo = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, RLo, 8, DAG);
29551       RHi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, RHi, 8, DAG);
29552       return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi);
29553     }
29554   }
29555 
29556   if (Subtarget.hasInt256() && !Subtarget.hasXOP() && VT == MVT::v16i16) {
29557     MVT ExtVT = MVT::v8i32;
29558     SDValue Z = DAG.getConstant(0, dl, VT);
29559     SDValue ALo = getUnpackl(DAG, dl, VT, Amt, Z);
29560     SDValue AHi = getUnpackh(DAG, dl, VT, Amt, Z);
29561     SDValue RLo = getUnpackl(DAG, dl, VT, Z, R);
29562     SDValue RHi = getUnpackh(DAG, dl, VT, Z, R);
29563     ALo = DAG.getBitcast(ExtVT, ALo);
29564     AHi = DAG.getBitcast(ExtVT, AHi);
29565     RLo = DAG.getBitcast(ExtVT, RLo);
29566     RHi = DAG.getBitcast(ExtVT, RHi);
29567     SDValue Lo = DAG.getNode(Opc, dl, ExtVT, RLo, ALo);
29568     SDValue Hi = DAG.getNode(Opc, dl, ExtVT, RHi, AHi);
29569     Lo = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, Lo, 16, DAG);
29570     Hi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, Hi, 16, DAG);
29571     return DAG.getNode(X86ISD::PACKUS, dl, VT, Lo, Hi);
29572   }
29573 
29574   if (VT == MVT::v8i16) {
29575     // If we have a constant shift amount, the non-SSE41 path is best as
29576     // avoiding bitcasts make it easier to constant fold and reduce to PBLENDW.
29577     bool UseSSE41 = Subtarget.hasSSE41() &&
29578                     !ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
29579 
29580     auto SignBitSelect = [&](SDValue Sel, SDValue V0, SDValue V1) {
29581       // On SSE41 targets we can use PBLENDVB which selects bytes based just on
29582       // the sign bit.
29583       if (UseSSE41) {
29584         MVT ExtVT = MVT::getVectorVT(MVT::i8, VT.getVectorNumElements() * 2);
29585         V0 = DAG.getBitcast(ExtVT, V0);
29586         V1 = DAG.getBitcast(ExtVT, V1);
29587         Sel = DAG.getBitcast(ExtVT, Sel);
29588         return DAG.getBitcast(
29589             VT, DAG.getNode(X86ISD::BLENDV, dl, ExtVT, Sel, V0, V1));
29590       }
29591       // On pre-SSE41 targets we splat the sign bit - a negative value will
29592       // set all bits of the lanes to true and VSELECT uses that in
29593       // its OR(AND(V0,C),AND(V1,~C)) lowering.
29594       SDValue C =
29595           getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, Sel, 15, DAG);
29596       return DAG.getSelect(dl, VT, C, V0, V1);
29597     };
29598 
29599     // Turn 'a' into a mask suitable for VSELECT: a = a << 12;
29600     if (UseSSE41) {
29601       // On SSE41 targets we need to replicate the shift mask in both
29602       // bytes for PBLENDVB.
29603       Amt = DAG.getNode(
29604           ISD::OR, dl, VT,
29605           getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Amt, 4, DAG),
29606           getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Amt, 12, DAG));
29607     } else {
29608       Amt = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Amt, 12, DAG);
29609     }
29610 
29611     // r = VSELECT(r, shift(r, 8), a);
29612     SDValue M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 8, DAG);
29613     R = SignBitSelect(Amt, M, R);
29614 
29615     // a += a
29616     Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
29617 
29618     // r = VSELECT(r, shift(r, 4), a);
29619     M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 4, DAG);
29620     R = SignBitSelect(Amt, M, R);
29621 
29622     // a += a
29623     Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
29624 
29625     // r = VSELECT(r, shift(r, 2), a);
29626     M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 2, DAG);
29627     R = SignBitSelect(Amt, M, R);
29628 
29629     // a += a
29630     Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
29631 
29632     // return VSELECT(r, shift(r, 1), a);
29633     M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 1, DAG);
29634     R = SignBitSelect(Amt, M, R);
29635     return R;
29636   }
29637 
29638   // Decompose 256-bit shifts into 128-bit shifts.
29639   if (VT.is256BitVector())
29640     return splitVectorIntBinary(Op, DAG);
29641 
29642   if (VT == MVT::v32i16 || VT == MVT::v64i8)
29643     return splitVectorIntBinary(Op, DAG);
29644 
29645   return SDValue();
29646 }
29647 
LowerFunnelShift(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)29648 static SDValue LowerFunnelShift(SDValue Op, const X86Subtarget &Subtarget,
29649                                 SelectionDAG &DAG) {
29650   MVT VT = Op.getSimpleValueType();
29651   assert((Op.getOpcode() == ISD::FSHL || Op.getOpcode() == ISD::FSHR) &&
29652          "Unexpected funnel shift opcode!");
29653 
29654   SDLoc DL(Op);
29655   SDValue Op0 = Op.getOperand(0);
29656   SDValue Op1 = Op.getOperand(1);
29657   SDValue Amt = Op.getOperand(2);
29658   unsigned EltSizeInBits = VT.getScalarSizeInBits();
29659   bool IsFSHR = Op.getOpcode() == ISD::FSHR;
29660 
29661   if (VT.isVector()) {
29662     APInt APIntShiftAmt;
29663     bool IsCstSplat = X86::isConstantSplat(Amt, APIntShiftAmt);
29664 
29665     if (Subtarget.hasVBMI2() && EltSizeInBits > 8) {
29666       if (IsFSHR)
29667         std::swap(Op0, Op1);
29668 
29669       if (IsCstSplat) {
29670         uint64_t ShiftAmt = APIntShiftAmt.urem(EltSizeInBits);
29671         SDValue Imm = DAG.getTargetConstant(ShiftAmt, DL, MVT::i8);
29672         return getAVX512Node(IsFSHR ? X86ISD::VSHRD : X86ISD::VSHLD, DL, VT,
29673                              {Op0, Op1, Imm}, DAG, Subtarget);
29674       }
29675       return getAVX512Node(IsFSHR ? X86ISD::VSHRDV : X86ISD::VSHLDV, DL, VT,
29676                            {Op0, Op1, Amt}, DAG, Subtarget);
29677     }
29678     assert((VT == MVT::v16i8 || VT == MVT::v32i8 || VT == MVT::v64i8 ||
29679             VT == MVT::v8i16 || VT == MVT::v16i16 || VT == MVT::v32i16 ||
29680             VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32) &&
29681            "Unexpected funnel shift type!");
29682 
29683     // fshl(x,y,z) -> unpack(y,x) << (z & (bw-1))) >> bw.
29684     // fshr(x,y,z) -> unpack(y,x) >> (z & (bw-1))).
29685     if (IsCstSplat) {
29686       // TODO: Can't use generic expansion as UNDEF amt elements can be
29687       // converted to other values when folded to shift amounts, losing the
29688       // splat.
29689       uint64_t ShiftAmt = APIntShiftAmt.urem(EltSizeInBits);
29690       uint64_t ShXAmt = IsFSHR ? (EltSizeInBits - ShiftAmt) : ShiftAmt;
29691       uint64_t ShYAmt = IsFSHR ? ShiftAmt : (EltSizeInBits - ShiftAmt);
29692       SDValue ShX = DAG.getNode(ISD::SHL, DL, VT, Op0,
29693                                 DAG.getShiftAmountConstant(ShXAmt, VT, DL));
29694       SDValue ShY = DAG.getNode(ISD::SRL, DL, VT, Op1,
29695                                 DAG.getShiftAmountConstant(ShYAmt, VT, DL));
29696       return DAG.getNode(ISD::OR, DL, VT, ShX, ShY);
29697     }
29698 
29699     SDValue AmtMask = DAG.getConstant(EltSizeInBits - 1, DL, VT);
29700     SDValue AmtMod = DAG.getNode(ISD::AND, DL, VT, Amt, AmtMask);
29701     bool IsCst = ISD::isBuildVectorOfConstantSDNodes(AmtMod.getNode());
29702 
29703     // Constant vXi16 funnel shifts can be efficiently handled by default.
29704     if (IsCst && EltSizeInBits == 16)
29705       return SDValue();
29706 
29707     unsigned ShiftOpc = IsFSHR ? ISD::SRL : ISD::SHL;
29708     unsigned NumElts = VT.getVectorNumElements();
29709     MVT ExtSVT = MVT::getIntegerVT(2 * EltSizeInBits);
29710     MVT ExtVT = MVT::getVectorVT(ExtSVT, NumElts / 2);
29711 
29712     // Split 256-bit integers on XOP/pre-AVX2 targets.
29713     // Split 512-bit integers on non 512-bit BWI targets.
29714     if ((VT.is256BitVector() && ((Subtarget.hasXOP() && EltSizeInBits < 16) ||
29715                                  !Subtarget.hasAVX2())) ||
29716         (VT.is512BitVector() && !Subtarget.useBWIRegs() &&
29717          EltSizeInBits < 32)) {
29718       // Pre-mask the amount modulo using the wider vector.
29719       Op = DAG.getNode(Op.getOpcode(), DL, VT, Op0, Op1, AmtMod);
29720       return splitVectorOp(Op, DAG);
29721     }
29722 
29723     // Attempt to fold scalar shift as unpack(y,x) << zext(splat(z))
29724     if (supportedVectorShiftWithBaseAmnt(ExtVT, Subtarget, ShiftOpc)) {
29725       int ScalarAmtIdx = -1;
29726       if (SDValue ScalarAmt = DAG.getSplatSourceVector(AmtMod, ScalarAmtIdx)) {
29727         // Uniform vXi16 funnel shifts can be efficiently handled by default.
29728         if (EltSizeInBits == 16)
29729           return SDValue();
29730 
29731         SDValue Lo = DAG.getBitcast(ExtVT, getUnpackl(DAG, DL, VT, Op1, Op0));
29732         SDValue Hi = DAG.getBitcast(ExtVT, getUnpackh(DAG, DL, VT, Op1, Op0));
29733         Lo = getTargetVShiftNode(ShiftOpc, DL, ExtVT, Lo, ScalarAmt,
29734                                  ScalarAmtIdx, Subtarget, DAG);
29735         Hi = getTargetVShiftNode(ShiftOpc, DL, ExtVT, Hi, ScalarAmt,
29736                                  ScalarAmtIdx, Subtarget, DAG);
29737         return getPack(DAG, Subtarget, DL, VT, Lo, Hi, !IsFSHR);
29738       }
29739     }
29740 
29741     MVT WideSVT = MVT::getIntegerVT(
29742         std::min<unsigned>(EltSizeInBits * 2, Subtarget.hasBWI() ? 16 : 32));
29743     MVT WideVT = MVT::getVectorVT(WideSVT, NumElts);
29744 
29745     // If per-element shifts are legal, fallback to generic expansion.
29746     if (supportedVectorVarShift(VT, Subtarget, ShiftOpc) || Subtarget.hasXOP())
29747       return SDValue();
29748 
29749     // Attempt to fold as:
29750     // fshl(x,y,z) -> (((aext(x) << bw) | zext(y)) << (z & (bw-1))) >> bw.
29751     // fshr(x,y,z) -> (((aext(x) << bw) | zext(y)) >> (z & (bw-1))).
29752     if (supportedVectorVarShift(WideVT, Subtarget, ShiftOpc) &&
29753         supportedVectorShiftWithImm(WideVT, Subtarget, ShiftOpc)) {
29754       Op0 = DAG.getNode(ISD::ANY_EXTEND, DL, WideVT, Op0);
29755       Op1 = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Op1);
29756       AmtMod = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, AmtMod);
29757       Op0 = getTargetVShiftByConstNode(X86ISD::VSHLI, DL, WideVT, Op0,
29758                                        EltSizeInBits, DAG);
29759       SDValue Res = DAG.getNode(ISD::OR, DL, WideVT, Op0, Op1);
29760       Res = DAG.getNode(ShiftOpc, DL, WideVT, Res, AmtMod);
29761       if (!IsFSHR)
29762         Res = getTargetVShiftByConstNode(X86ISD::VSRLI, DL, WideVT, Res,
29763                                          EltSizeInBits, DAG);
29764       return DAG.getNode(ISD::TRUNCATE, DL, VT, Res);
29765     }
29766 
29767     // Attempt to fold per-element (ExtVT) shift as unpack(y,x) << zext(z)
29768     if (((IsCst || !Subtarget.hasAVX512()) && !IsFSHR && EltSizeInBits <= 16) ||
29769         supportedVectorVarShift(ExtVT, Subtarget, ShiftOpc)) {
29770       SDValue Z = DAG.getConstant(0, DL, VT);
29771       SDValue RLo = DAG.getBitcast(ExtVT, getUnpackl(DAG, DL, VT, Op1, Op0));
29772       SDValue RHi = DAG.getBitcast(ExtVT, getUnpackh(DAG, DL, VT, Op1, Op0));
29773       SDValue ALo = DAG.getBitcast(ExtVT, getUnpackl(DAG, DL, VT, AmtMod, Z));
29774       SDValue AHi = DAG.getBitcast(ExtVT, getUnpackh(DAG, DL, VT, AmtMod, Z));
29775       SDValue Lo = DAG.getNode(ShiftOpc, DL, ExtVT, RLo, ALo);
29776       SDValue Hi = DAG.getNode(ShiftOpc, DL, ExtVT, RHi, AHi);
29777       return getPack(DAG, Subtarget, DL, VT, Lo, Hi, !IsFSHR);
29778     }
29779 
29780     // Fallback to generic expansion.
29781     return SDValue();
29782   }
29783   assert(
29784       (VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64) &&
29785       "Unexpected funnel shift type!");
29786 
29787   // Expand slow SHLD/SHRD cases if we are not optimizing for size.
29788   bool OptForSize = DAG.shouldOptForSize();
29789   bool ExpandFunnel = !OptForSize && Subtarget.isSHLDSlow();
29790 
29791   // fshl(x,y,z) -> (((aext(x) << bw) | zext(y)) << (z & (bw-1))) >> bw.
29792   // fshr(x,y,z) -> (((aext(x) << bw) | zext(y)) >> (z & (bw-1))).
29793   if ((VT == MVT::i8 || (ExpandFunnel && VT == MVT::i16)) &&
29794       !isa<ConstantSDNode>(Amt)) {
29795     SDValue Mask = DAG.getConstant(EltSizeInBits - 1, DL, Amt.getValueType());
29796     SDValue HiShift = DAG.getConstant(EltSizeInBits, DL, Amt.getValueType());
29797     Op0 = DAG.getAnyExtOrTrunc(Op0, DL, MVT::i32);
29798     Op1 = DAG.getZExtOrTrunc(Op1, DL, MVT::i32);
29799     Amt = DAG.getNode(ISD::AND, DL, Amt.getValueType(), Amt, Mask);
29800     SDValue Res = DAG.getNode(ISD::SHL, DL, MVT::i32, Op0, HiShift);
29801     Res = DAG.getNode(ISD::OR, DL, MVT::i32, Res, Op1);
29802     if (IsFSHR) {
29803       Res = DAG.getNode(ISD::SRL, DL, MVT::i32, Res, Amt);
29804     } else {
29805       Res = DAG.getNode(ISD::SHL, DL, MVT::i32, Res, Amt);
29806       Res = DAG.getNode(ISD::SRL, DL, MVT::i32, Res, HiShift);
29807     }
29808     return DAG.getZExtOrTrunc(Res, DL, VT);
29809   }
29810 
29811   if (VT == MVT::i8 || ExpandFunnel)
29812     return SDValue();
29813 
29814   // i16 needs to modulo the shift amount, but i32/i64 have implicit modulo.
29815   if (VT == MVT::i16) {
29816     Amt = DAG.getNode(ISD::AND, DL, Amt.getValueType(), Amt,
29817                       DAG.getConstant(15, DL, Amt.getValueType()));
29818     unsigned FSHOp = (IsFSHR ? X86ISD::FSHR : X86ISD::FSHL);
29819     return DAG.getNode(FSHOp, DL, VT, Op0, Op1, Amt);
29820   }
29821 
29822   return Op;
29823 }
29824 
LowerRotate(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)29825 static SDValue LowerRotate(SDValue Op, const X86Subtarget &Subtarget,
29826                            SelectionDAG &DAG) {
29827   MVT VT = Op.getSimpleValueType();
29828   assert(VT.isVector() && "Custom lowering only for vector rotates!");
29829 
29830   SDLoc DL(Op);
29831   SDValue R = Op.getOperand(0);
29832   SDValue Amt = Op.getOperand(1);
29833   unsigned Opcode = Op.getOpcode();
29834   unsigned EltSizeInBits = VT.getScalarSizeInBits();
29835   int NumElts = VT.getVectorNumElements();
29836   bool IsROTL = Opcode == ISD::ROTL;
29837 
29838   // Check for constant splat rotation amount.
29839   APInt CstSplatValue;
29840   bool IsCstSplat = X86::isConstantSplat(Amt, CstSplatValue);
29841 
29842   // Check for splat rotate by zero.
29843   if (IsCstSplat && CstSplatValue.urem(EltSizeInBits) == 0)
29844     return R;
29845 
29846   // AVX512 implicitly uses modulo rotation amounts.
29847   if ((Subtarget.hasVLX() ||
29848        (Subtarget.hasAVX512() && Subtarget.hasEVEX512())) &&
29849       32 <= EltSizeInBits) {
29850     // Attempt to rotate by immediate.
29851     if (IsCstSplat) {
29852       unsigned RotOpc = IsROTL ? X86ISD::VROTLI : X86ISD::VROTRI;
29853       uint64_t RotAmt = CstSplatValue.urem(EltSizeInBits);
29854       return DAG.getNode(RotOpc, DL, VT, R,
29855                          DAG.getTargetConstant(RotAmt, DL, MVT::i8));
29856     }
29857 
29858     // Else, fall-back on VPROLV/VPRORV.
29859     return Op;
29860   }
29861 
29862   // AVX512 VBMI2 vXi16 - lower to funnel shifts.
29863   if (Subtarget.hasVBMI2() && 16 == EltSizeInBits) {
29864     unsigned FunnelOpc = IsROTL ? ISD::FSHL : ISD::FSHR;
29865     return DAG.getNode(FunnelOpc, DL, VT, R, R, Amt);
29866   }
29867 
29868   SDValue Z = DAG.getConstant(0, DL, VT);
29869 
29870   if (!IsROTL) {
29871     // If the ISD::ROTR amount is constant, we're always better converting to
29872     // ISD::ROTL.
29873     if (SDValue NegAmt = DAG.FoldConstantArithmetic(ISD::SUB, DL, VT, {Z, Amt}))
29874       return DAG.getNode(ISD::ROTL, DL, VT, R, NegAmt);
29875 
29876     // XOP targets always prefers ISD::ROTL.
29877     if (Subtarget.hasXOP())
29878       return DAG.getNode(ISD::ROTL, DL, VT, R,
29879                          DAG.getNode(ISD::SUB, DL, VT, Z, Amt));
29880   }
29881 
29882   // Split 256-bit integers on XOP/pre-AVX2 targets.
29883   if (VT.is256BitVector() && (Subtarget.hasXOP() || !Subtarget.hasAVX2()))
29884     return splitVectorIntBinary(Op, DAG);
29885 
29886   // XOP has 128-bit vector variable + immediate rotates.
29887   // +ve/-ve Amt = rotate left/right - just need to handle ISD::ROTL.
29888   // XOP implicitly uses modulo rotation amounts.
29889   if (Subtarget.hasXOP()) {
29890     assert(IsROTL && "Only ROTL expected");
29891     assert(VT.is128BitVector() && "Only rotate 128-bit vectors!");
29892 
29893     // Attempt to rotate by immediate.
29894     if (IsCstSplat) {
29895       uint64_t RotAmt = CstSplatValue.urem(EltSizeInBits);
29896       return DAG.getNode(X86ISD::VROTLI, DL, VT, R,
29897                          DAG.getTargetConstant(RotAmt, DL, MVT::i8));
29898     }
29899 
29900     // Use general rotate by variable (per-element).
29901     return Op;
29902   }
29903 
29904   // Rotate by an uniform constant - expand back to shifts.
29905   // TODO: Can't use generic expansion as UNDEF amt elements can be converted
29906   // to other values when folded to shift amounts, losing the splat.
29907   if (IsCstSplat) {
29908     uint64_t RotAmt = CstSplatValue.urem(EltSizeInBits);
29909     uint64_t ShlAmt = IsROTL ? RotAmt : (EltSizeInBits - RotAmt);
29910     uint64_t SrlAmt = IsROTL ? (EltSizeInBits - RotAmt) : RotAmt;
29911     SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, R,
29912                               DAG.getShiftAmountConstant(ShlAmt, VT, DL));
29913     SDValue Srl = DAG.getNode(ISD::SRL, DL, VT, R,
29914                               DAG.getShiftAmountConstant(SrlAmt, VT, DL));
29915     return DAG.getNode(ISD::OR, DL, VT, Shl, Srl);
29916   }
29917 
29918   // Split 512-bit integers on non 512-bit BWI targets.
29919   if (VT.is512BitVector() && !Subtarget.useBWIRegs())
29920     return splitVectorIntBinary(Op, DAG);
29921 
29922   assert(
29923       (VT == MVT::v4i32 || VT == MVT::v8i16 || VT == MVT::v16i8 ||
29924        ((VT == MVT::v8i32 || VT == MVT::v16i16 || VT == MVT::v32i8) &&
29925         Subtarget.hasAVX2()) ||
29926        ((VT == MVT::v32i16 || VT == MVT::v64i8) && Subtarget.useBWIRegs())) &&
29927       "Only vXi32/vXi16/vXi8 vector rotates supported");
29928 
29929   MVT ExtSVT = MVT::getIntegerVT(2 * EltSizeInBits);
29930   MVT ExtVT = MVT::getVectorVT(ExtSVT, NumElts / 2);
29931 
29932   SDValue AmtMask = DAG.getConstant(EltSizeInBits - 1, DL, VT);
29933   SDValue AmtMod = DAG.getNode(ISD::AND, DL, VT, Amt, AmtMask);
29934 
29935   // Attempt to fold as unpack(x,x) << zext(splat(y)):
29936   // rotl(x,y) -> (unpack(x,x) << (y & (bw-1))) >> bw.
29937   // rotr(x,y) -> (unpack(x,x) >> (y & (bw-1))).
29938   if (EltSizeInBits == 8 || EltSizeInBits == 16 || EltSizeInBits == 32) {
29939     int BaseRotAmtIdx = -1;
29940     if (SDValue BaseRotAmt = DAG.getSplatSourceVector(AmtMod, BaseRotAmtIdx)) {
29941       if (EltSizeInBits == 16 && Subtarget.hasSSE41()) {
29942         unsigned FunnelOpc = IsROTL ? ISD::FSHL : ISD::FSHR;
29943         return DAG.getNode(FunnelOpc, DL, VT, R, R, Amt);
29944       }
29945       unsigned ShiftX86Opc = IsROTL ? X86ISD::VSHLI : X86ISD::VSRLI;
29946       SDValue Lo = DAG.getBitcast(ExtVT, getUnpackl(DAG, DL, VT, R, R));
29947       SDValue Hi = DAG.getBitcast(ExtVT, getUnpackh(DAG, DL, VT, R, R));
29948       Lo = getTargetVShiftNode(ShiftX86Opc, DL, ExtVT, Lo, BaseRotAmt,
29949                                BaseRotAmtIdx, Subtarget, DAG);
29950       Hi = getTargetVShiftNode(ShiftX86Opc, DL, ExtVT, Hi, BaseRotAmt,
29951                                BaseRotAmtIdx, Subtarget, DAG);
29952       return getPack(DAG, Subtarget, DL, VT, Lo, Hi, IsROTL);
29953     }
29954   }
29955 
29956   bool ConstantAmt = ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
29957   unsigned ShiftOpc = IsROTL ? ISD::SHL : ISD::SRL;
29958 
29959   // Attempt to fold as unpack(x,x) << zext(y):
29960   // rotl(x,y) -> (unpack(x,x) << (y & (bw-1))) >> bw.
29961   // rotr(x,y) -> (unpack(x,x) >> (y & (bw-1))).
29962   // Const vXi16/vXi32 are excluded in favor of MUL-based lowering.
29963   if (!(ConstantAmt && EltSizeInBits != 8) &&
29964       !supportedVectorVarShift(VT, Subtarget, ShiftOpc) &&
29965       (ConstantAmt || supportedVectorVarShift(ExtVT, Subtarget, ShiftOpc))) {
29966     SDValue RLo = DAG.getBitcast(ExtVT, getUnpackl(DAG, DL, VT, R, R));
29967     SDValue RHi = DAG.getBitcast(ExtVT, getUnpackh(DAG, DL, VT, R, R));
29968     SDValue ALo = DAG.getBitcast(ExtVT, getUnpackl(DAG, DL, VT, AmtMod, Z));
29969     SDValue AHi = DAG.getBitcast(ExtVT, getUnpackh(DAG, DL, VT, AmtMod, Z));
29970     SDValue Lo = DAG.getNode(ShiftOpc, DL, ExtVT, RLo, ALo);
29971     SDValue Hi = DAG.getNode(ShiftOpc, DL, ExtVT, RHi, AHi);
29972     return getPack(DAG, Subtarget, DL, VT, Lo, Hi, IsROTL);
29973   }
29974 
29975   // v16i8/v32i8/v64i8: Split rotation into rot4/rot2/rot1 stages and select by
29976   // the amount bit.
29977   // TODO: We're doing nothing here that we couldn't do for funnel shifts.
29978   if (EltSizeInBits == 8) {
29979     MVT WideVT =
29980         MVT::getVectorVT(Subtarget.hasBWI() ? MVT::i16 : MVT::i32, NumElts);
29981 
29982     // Attempt to fold as:
29983     // rotl(x,y) -> (((aext(x) << bw) | zext(x)) << (y & (bw-1))) >> bw.
29984     // rotr(x,y) -> (((aext(x) << bw) | zext(x)) >> (y & (bw-1))).
29985     if (supportedVectorVarShift(WideVT, Subtarget, ShiftOpc) &&
29986         supportedVectorShiftWithImm(WideVT, Subtarget, ShiftOpc)) {
29987       // If we're rotating by constant, just use default promotion.
29988       if (ConstantAmt)
29989         return SDValue();
29990       // See if we can perform this by widening to vXi16 or vXi32.
29991       R = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, R);
29992       R = DAG.getNode(
29993           ISD::OR, DL, WideVT, R,
29994           getTargetVShiftByConstNode(X86ISD::VSHLI, DL, WideVT, R, 8, DAG));
29995       Amt = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, AmtMod);
29996       R = DAG.getNode(ShiftOpc, DL, WideVT, R, Amt);
29997       if (IsROTL)
29998         R = getTargetVShiftByConstNode(X86ISD::VSRLI, DL, WideVT, R, 8, DAG);
29999       return DAG.getNode(ISD::TRUNCATE, DL, VT, R);
30000     }
30001 
30002     // We don't need ModuloAmt here as we just peek at individual bits.
30003     auto SignBitSelect = [&](MVT SelVT, SDValue Sel, SDValue V0, SDValue V1) {
30004       if (Subtarget.hasSSE41()) {
30005         // On SSE41 targets we can use PBLENDVB which selects bytes based just
30006         // on the sign bit.
30007         V0 = DAG.getBitcast(VT, V0);
30008         V1 = DAG.getBitcast(VT, V1);
30009         Sel = DAG.getBitcast(VT, Sel);
30010         return DAG.getBitcast(SelVT,
30011                               DAG.getNode(X86ISD::BLENDV, DL, VT, Sel, V0, V1));
30012       }
30013       // On pre-SSE41 targets we test for the sign bit by comparing to
30014       // zero - a negative value will set all bits of the lanes to true
30015       // and VSELECT uses that in its OR(AND(V0,C),AND(V1,~C)) lowering.
30016       SDValue Z = DAG.getConstant(0, DL, SelVT);
30017       SDValue C = DAG.getNode(X86ISD::PCMPGT, DL, SelVT, Z, Sel);
30018       return DAG.getSelect(DL, SelVT, C, V0, V1);
30019     };
30020 
30021     // ISD::ROTR is currently only profitable on AVX512 targets with VPTERNLOG.
30022     if (!IsROTL && !useVPTERNLOG(Subtarget, VT)) {
30023       Amt = DAG.getNode(ISD::SUB, DL, VT, Z, Amt);
30024       IsROTL = true;
30025     }
30026 
30027     unsigned ShiftLHS = IsROTL ? ISD::SHL : ISD::SRL;
30028     unsigned ShiftRHS = IsROTL ? ISD::SRL : ISD::SHL;
30029 
30030     // Turn 'a' into a mask suitable for VSELECT: a = a << 5;
30031     // We can safely do this using i16 shifts as we're only interested in
30032     // the 3 lower bits of each byte.
30033     Amt = DAG.getBitcast(ExtVT, Amt);
30034     Amt = DAG.getNode(ISD::SHL, DL, ExtVT, Amt, DAG.getConstant(5, DL, ExtVT));
30035     Amt = DAG.getBitcast(VT, Amt);
30036 
30037     // r = VSELECT(r, rot(r, 4), a);
30038     SDValue M;
30039     M = DAG.getNode(
30040         ISD::OR, DL, VT,
30041         DAG.getNode(ShiftLHS, DL, VT, R, DAG.getConstant(4, DL, VT)),
30042         DAG.getNode(ShiftRHS, DL, VT, R, DAG.getConstant(4, DL, VT)));
30043     R = SignBitSelect(VT, Amt, M, R);
30044 
30045     // a += a
30046     Amt = DAG.getNode(ISD::ADD, DL, VT, Amt, Amt);
30047 
30048     // r = VSELECT(r, rot(r, 2), a);
30049     M = DAG.getNode(
30050         ISD::OR, DL, VT,
30051         DAG.getNode(ShiftLHS, DL, VT, R, DAG.getConstant(2, DL, VT)),
30052         DAG.getNode(ShiftRHS, DL, VT, R, DAG.getConstant(6, DL, VT)));
30053     R = SignBitSelect(VT, Amt, M, R);
30054 
30055     // a += a
30056     Amt = DAG.getNode(ISD::ADD, DL, VT, Amt, Amt);
30057 
30058     // return VSELECT(r, rot(r, 1), a);
30059     M = DAG.getNode(
30060         ISD::OR, DL, VT,
30061         DAG.getNode(ShiftLHS, DL, VT, R, DAG.getConstant(1, DL, VT)),
30062         DAG.getNode(ShiftRHS, DL, VT, R, DAG.getConstant(7, DL, VT)));
30063     return SignBitSelect(VT, Amt, M, R);
30064   }
30065 
30066   bool IsSplatAmt = DAG.isSplatValue(Amt);
30067   bool LegalVarShifts = supportedVectorVarShift(VT, Subtarget, ISD::SHL) &&
30068                         supportedVectorVarShift(VT, Subtarget, ISD::SRL);
30069 
30070   // Fallback for splats + all supported variable shifts.
30071   // Fallback for non-constants AVX2 vXi16 as well.
30072   if (IsSplatAmt || LegalVarShifts || (Subtarget.hasAVX2() && !ConstantAmt)) {
30073     Amt = DAG.getNode(ISD::AND, DL, VT, Amt, AmtMask);
30074     SDValue AmtR = DAG.getConstant(EltSizeInBits, DL, VT);
30075     AmtR = DAG.getNode(ISD::SUB, DL, VT, AmtR, Amt);
30076     SDValue SHL = DAG.getNode(IsROTL ? ISD::SHL : ISD::SRL, DL, VT, R, Amt);
30077     SDValue SRL = DAG.getNode(IsROTL ? ISD::SRL : ISD::SHL, DL, VT, R, AmtR);
30078     return DAG.getNode(ISD::OR, DL, VT, SHL, SRL);
30079   }
30080 
30081   // Everything below assumes ISD::ROTL.
30082   if (!IsROTL) {
30083     Amt = DAG.getNode(ISD::SUB, DL, VT, Z, Amt);
30084     IsROTL = true;
30085   }
30086 
30087   // ISD::ROT* uses modulo rotate amounts.
30088   Amt = DAG.getNode(ISD::AND, DL, VT, Amt, AmtMask);
30089 
30090   assert(IsROTL && "Only ROTL supported");
30091 
30092   // As with shifts, attempt to convert the rotation amount to a multiplication
30093   // factor, fallback to general expansion.
30094   SDValue Scale = convertShiftLeftToScale(Amt, DL, Subtarget, DAG);
30095   if (!Scale)
30096     return SDValue();
30097 
30098   // v8i16/v16i16: perform unsigned multiply hi/lo and OR the results.
30099   if (EltSizeInBits == 16) {
30100     SDValue Lo = DAG.getNode(ISD::MUL, DL, VT, R, Scale);
30101     SDValue Hi = DAG.getNode(ISD::MULHU, DL, VT, R, Scale);
30102     return DAG.getNode(ISD::OR, DL, VT, Lo, Hi);
30103   }
30104 
30105   // v4i32: make use of the PMULUDQ instruction to multiply 2 lanes of v4i32
30106   // to v2i64 results at a time. The upper 32-bits contain the wrapped bits
30107   // that can then be OR'd with the lower 32-bits.
30108   assert(VT == MVT::v4i32 && "Only v4i32 vector rotate expected");
30109   static const int OddMask[] = {1, -1, 3, -1};
30110   SDValue R13 = DAG.getVectorShuffle(VT, DL, R, R, OddMask);
30111   SDValue Scale13 = DAG.getVectorShuffle(VT, DL, Scale, Scale, OddMask);
30112 
30113   SDValue Res02 = DAG.getNode(X86ISD::PMULUDQ, DL, MVT::v2i64,
30114                               DAG.getBitcast(MVT::v2i64, R),
30115                               DAG.getBitcast(MVT::v2i64, Scale));
30116   SDValue Res13 = DAG.getNode(X86ISD::PMULUDQ, DL, MVT::v2i64,
30117                               DAG.getBitcast(MVT::v2i64, R13),
30118                               DAG.getBitcast(MVT::v2i64, Scale13));
30119   Res02 = DAG.getBitcast(VT, Res02);
30120   Res13 = DAG.getBitcast(VT, Res13);
30121 
30122   return DAG.getNode(ISD::OR, DL, VT,
30123                      DAG.getVectorShuffle(VT, DL, Res02, Res13, {0, 4, 2, 6}),
30124                      DAG.getVectorShuffle(VT, DL, Res02, Res13, {1, 5, 3, 7}));
30125 }
30126 
30127 /// Returns true if the operand type is exactly twice the native width, and
30128 /// the corresponding cmpxchg8b or cmpxchg16b instruction is available.
30129 /// Used to know whether to use cmpxchg8/16b when expanding atomic operations
30130 /// (otherwise we leave them alone to become __sync_fetch_and_... calls).
needsCmpXchgNb(Type * MemType) const30131 bool X86TargetLowering::needsCmpXchgNb(Type *MemType) const {
30132   unsigned OpWidth = MemType->getPrimitiveSizeInBits();
30133 
30134   if (OpWidth == 64)
30135     return Subtarget.canUseCMPXCHG8B() && !Subtarget.is64Bit();
30136   if (OpWidth == 128)
30137     return Subtarget.canUseCMPXCHG16B();
30138 
30139   return false;
30140 }
30141 
30142 TargetLoweringBase::AtomicExpansionKind
shouldExpandAtomicStoreInIR(StoreInst * SI) const30143 X86TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
30144   Type *MemType = SI->getValueOperand()->getType();
30145 
30146   bool NoImplicitFloatOps =
30147       SI->getFunction()->hasFnAttribute(Attribute::NoImplicitFloat);
30148   if (MemType->getPrimitiveSizeInBits() == 64 && !Subtarget.is64Bit() &&
30149       !Subtarget.useSoftFloat() && !NoImplicitFloatOps &&
30150       (Subtarget.hasSSE1() || Subtarget.hasX87()))
30151     return AtomicExpansionKind::None;
30152 
30153   return needsCmpXchgNb(MemType) ? AtomicExpansionKind::Expand
30154                                  : AtomicExpansionKind::None;
30155 }
30156 
30157 // Note: this turns large loads into lock cmpxchg8b/16b.
30158 // TODO: In 32-bit mode, use MOVLPS when SSE1 is available?
30159 TargetLowering::AtomicExpansionKind
shouldExpandAtomicLoadInIR(LoadInst * LI) const30160 X86TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
30161   Type *MemType = LI->getType();
30162 
30163   // If this a 64 bit atomic load on a 32-bit target and SSE2 is enabled, we
30164   // can use movq to do the load. If we have X87 we can load into an 80-bit
30165   // X87 register and store it to a stack temporary.
30166   bool NoImplicitFloatOps =
30167       LI->getFunction()->hasFnAttribute(Attribute::NoImplicitFloat);
30168   if (MemType->getPrimitiveSizeInBits() == 64 && !Subtarget.is64Bit() &&
30169       !Subtarget.useSoftFloat() && !NoImplicitFloatOps &&
30170       (Subtarget.hasSSE1() || Subtarget.hasX87()))
30171     return AtomicExpansionKind::None;
30172 
30173   return needsCmpXchgNb(MemType) ? AtomicExpansionKind::CmpXChg
30174                                  : AtomicExpansionKind::None;
30175 }
30176 
30177 enum BitTestKind : unsigned {
30178   UndefBit,
30179   ConstantBit,
30180   NotConstantBit,
30181   ShiftBit,
30182   NotShiftBit
30183 };
30184 
FindSingleBitChange(Value * V)30185 static std::pair<Value *, BitTestKind> FindSingleBitChange(Value *V) {
30186   using namespace llvm::PatternMatch;
30187   BitTestKind BTK = UndefBit;
30188   auto *C = dyn_cast<ConstantInt>(V);
30189   if (C) {
30190     // Check if V is a power of 2 or NOT power of 2.
30191     if (isPowerOf2_64(C->getZExtValue()))
30192       BTK = ConstantBit;
30193     else if (isPowerOf2_64((~C->getValue()).getZExtValue()))
30194       BTK = NotConstantBit;
30195     return {V, BTK};
30196   }
30197 
30198   // Check if V is some power of 2 pattern known to be non-zero
30199   auto *I = dyn_cast<Instruction>(V);
30200   if (I) {
30201     bool Not = false;
30202     // Check if we have a NOT
30203     Value *PeekI;
30204     if (match(I, m_c_Xor(m_Value(PeekI), m_AllOnes())) ||
30205         match(I, m_Sub(m_AllOnes(), m_Value(PeekI)))) {
30206       Not = true;
30207       I = dyn_cast<Instruction>(PeekI);
30208 
30209       // If I is constant, it will fold and we can evaluate later. If its an
30210       // argument or something of that nature, we can't analyze.
30211       if (I == nullptr)
30212         return {nullptr, UndefBit};
30213     }
30214     // We can only use 1 << X without more sophisticated analysis. C << X where
30215     // C is a power of 2 but not 1 can result in zero which cannot be translated
30216     // to bittest. Likewise any C >> X (either arith or logical) can be zero.
30217     if (I->getOpcode() == Instruction::Shl) {
30218       // Todo(1): The cmpxchg case is pretty costly so matching `BLSI(X)`, `X &
30219       // -X` and some other provable power of 2 patterns that we can use CTZ on
30220       // may be profitable.
30221       // Todo(2): It may be possible in some cases to prove that Shl(C, X) is
30222       // non-zero even where C != 1. Likewise LShr(C, X) and AShr(C, X) may also
30223       // be provably a non-zero power of 2.
30224       // Todo(3): ROTL and ROTR patterns on a power of 2 C should also be
30225       // transformable to bittest.
30226       auto *ShiftVal = dyn_cast<ConstantInt>(I->getOperand(0));
30227       if (!ShiftVal)
30228         return {nullptr, UndefBit};
30229       if (ShiftVal->equalsInt(1))
30230         BTK = Not ? NotShiftBit : ShiftBit;
30231 
30232       if (BTK == UndefBit)
30233         return {nullptr, UndefBit};
30234 
30235       Value *BitV = I->getOperand(1);
30236 
30237       Value *AndOp;
30238       const APInt *AndC;
30239       if (match(BitV, m_c_And(m_Value(AndOp), m_APInt(AndC)))) {
30240         // Read past a shiftmask instruction to find count
30241         if (*AndC == (I->getType()->getPrimitiveSizeInBits() - 1))
30242           BitV = AndOp;
30243       }
30244       return {BitV, BTK};
30245     }
30246   }
30247   return {nullptr, UndefBit};
30248 }
30249 
30250 TargetLowering::AtomicExpansionKind
shouldExpandLogicAtomicRMWInIR(AtomicRMWInst * AI) const30251 X86TargetLowering::shouldExpandLogicAtomicRMWInIR(AtomicRMWInst *AI) const {
30252   using namespace llvm::PatternMatch;
30253   // If the atomicrmw's result isn't actually used, we can just add a "lock"
30254   // prefix to a normal instruction for these operations.
30255   if (AI->use_empty())
30256     return AtomicExpansionKind::None;
30257 
30258   if (AI->getOperation() == AtomicRMWInst::Xor) {
30259     // A ^ SignBit -> A + SignBit. This allows us to use `xadd` which is
30260     // preferable to both `cmpxchg` and `btc`.
30261     if (match(AI->getOperand(1), m_SignMask()))
30262       return AtomicExpansionKind::None;
30263   }
30264 
30265   // If the atomicrmw's result is used by a single bit AND, we may use
30266   // bts/btr/btc instruction for these operations.
30267   // Note: InstCombinePass can cause a de-optimization here. It replaces the
30268   // SETCC(And(AtomicRMW(P, power_of_2), power_of_2)) with LShr and Xor
30269   // (depending on CC). This pattern can only use bts/btr/btc but we don't
30270   // detect it.
30271   Instruction *I = AI->user_back();
30272   auto BitChange = FindSingleBitChange(AI->getValOperand());
30273   if (BitChange.second == UndefBit || !AI->hasOneUse() ||
30274       I->getOpcode() != Instruction::And ||
30275       AI->getType()->getPrimitiveSizeInBits() == 8 ||
30276       AI->getParent() != I->getParent())
30277     return AtomicExpansionKind::CmpXChg;
30278 
30279   unsigned OtherIdx = I->getOperand(0) == AI ? 1 : 0;
30280 
30281   // This is a redundant AND, it should get cleaned up elsewhere.
30282   if (AI == I->getOperand(OtherIdx))
30283     return AtomicExpansionKind::CmpXChg;
30284 
30285   // The following instruction must be a AND single bit.
30286   if (BitChange.second == ConstantBit || BitChange.second == NotConstantBit) {
30287     auto *C1 = cast<ConstantInt>(AI->getValOperand());
30288     auto *C2 = dyn_cast<ConstantInt>(I->getOperand(OtherIdx));
30289     if (!C2 || !isPowerOf2_64(C2->getZExtValue())) {
30290       return AtomicExpansionKind::CmpXChg;
30291     }
30292     if (AI->getOperation() == AtomicRMWInst::And) {
30293       return ~C1->getValue() == C2->getValue()
30294                  ? AtomicExpansionKind::BitTestIntrinsic
30295                  : AtomicExpansionKind::CmpXChg;
30296     }
30297     return C1 == C2 ? AtomicExpansionKind::BitTestIntrinsic
30298                     : AtomicExpansionKind::CmpXChg;
30299   }
30300 
30301   assert(BitChange.second == ShiftBit || BitChange.second == NotShiftBit);
30302 
30303   auto BitTested = FindSingleBitChange(I->getOperand(OtherIdx));
30304   if (BitTested.second != ShiftBit && BitTested.second != NotShiftBit)
30305     return AtomicExpansionKind::CmpXChg;
30306 
30307   assert(BitChange.first != nullptr && BitTested.first != nullptr);
30308 
30309   // If shift amounts are not the same we can't use BitTestIntrinsic.
30310   if (BitChange.first != BitTested.first)
30311     return AtomicExpansionKind::CmpXChg;
30312 
30313   // If atomic AND need to be masking all be one bit and testing the one bit
30314   // unset in the mask.
30315   if (AI->getOperation() == AtomicRMWInst::And)
30316     return (BitChange.second == NotShiftBit && BitTested.second == ShiftBit)
30317                ? AtomicExpansionKind::BitTestIntrinsic
30318                : AtomicExpansionKind::CmpXChg;
30319 
30320   // If atomic XOR/OR need to be setting and testing the same bit.
30321   return (BitChange.second == ShiftBit && BitTested.second == ShiftBit)
30322              ? AtomicExpansionKind::BitTestIntrinsic
30323              : AtomicExpansionKind::CmpXChg;
30324 }
30325 
emitBitTestAtomicRMWIntrinsic(AtomicRMWInst * AI) const30326 void X86TargetLowering::emitBitTestAtomicRMWIntrinsic(AtomicRMWInst *AI) const {
30327   IRBuilder<> Builder(AI);
30328   Builder.CollectMetadataToCopy(AI, {LLVMContext::MD_pcsections});
30329   Intrinsic::ID IID_C = Intrinsic::not_intrinsic;
30330   Intrinsic::ID IID_I = Intrinsic::not_intrinsic;
30331   switch (AI->getOperation()) {
30332   default:
30333     llvm_unreachable("Unknown atomic operation");
30334   case AtomicRMWInst::Or:
30335     IID_C = Intrinsic::x86_atomic_bts;
30336     IID_I = Intrinsic::x86_atomic_bts_rm;
30337     break;
30338   case AtomicRMWInst::Xor:
30339     IID_C = Intrinsic::x86_atomic_btc;
30340     IID_I = Intrinsic::x86_atomic_btc_rm;
30341     break;
30342   case AtomicRMWInst::And:
30343     IID_C = Intrinsic::x86_atomic_btr;
30344     IID_I = Intrinsic::x86_atomic_btr_rm;
30345     break;
30346   }
30347   Instruction *I = AI->user_back();
30348   LLVMContext &Ctx = AI->getContext();
30349   Value *Addr = Builder.CreatePointerCast(AI->getPointerOperand(),
30350                                           PointerType::getUnqual(Ctx));
30351   Function *BitTest = nullptr;
30352   Value *Result = nullptr;
30353   auto BitTested = FindSingleBitChange(AI->getValOperand());
30354   assert(BitTested.first != nullptr);
30355 
30356   if (BitTested.second == ConstantBit || BitTested.second == NotConstantBit) {
30357     auto *C = cast<ConstantInt>(I->getOperand(I->getOperand(0) == AI ? 1 : 0));
30358 
30359     BitTest = Intrinsic::getDeclaration(AI->getModule(), IID_C, AI->getType());
30360 
30361     unsigned Imm = llvm::countr_zero(C->getZExtValue());
30362     Result = Builder.CreateCall(BitTest, {Addr, Builder.getInt8(Imm)});
30363   } else {
30364     BitTest = Intrinsic::getDeclaration(AI->getModule(), IID_I, AI->getType());
30365 
30366     assert(BitTested.second == ShiftBit || BitTested.second == NotShiftBit);
30367 
30368     Value *SI = BitTested.first;
30369     assert(SI != nullptr);
30370 
30371     // BT{S|R|C} on memory operand don't modulo bit position so we need to
30372     // mask it.
30373     unsigned ShiftBits = SI->getType()->getPrimitiveSizeInBits();
30374     Value *BitPos =
30375         Builder.CreateAnd(SI, Builder.getIntN(ShiftBits, ShiftBits - 1));
30376     // Todo(1): In many cases it may be provable that SI is less than
30377     // ShiftBits in which case this mask is unnecessary
30378     // Todo(2): In the fairly idiomatic case of P[X / sizeof_bits(X)] OP 1
30379     // << (X % sizeof_bits(X)) we can drop the shift mask and AGEN in
30380     // favor of just a raw BT{S|R|C}.
30381 
30382     Result = Builder.CreateCall(BitTest, {Addr, BitPos});
30383     Result = Builder.CreateZExtOrTrunc(Result, AI->getType());
30384 
30385     // If the result is only used for zero/non-zero status then we don't need to
30386     // shift value back. Otherwise do so.
30387     for (auto It = I->user_begin(); It != I->user_end(); ++It) {
30388       if (auto *ICmp = dyn_cast<ICmpInst>(*It)) {
30389         if (ICmp->isEquality()) {
30390           auto *C0 = dyn_cast<ConstantInt>(ICmp->getOperand(0));
30391           auto *C1 = dyn_cast<ConstantInt>(ICmp->getOperand(1));
30392           if (C0 || C1) {
30393             assert(C0 == nullptr || C1 == nullptr);
30394             if ((C0 ? C0 : C1)->isZero())
30395               continue;
30396           }
30397         }
30398       }
30399       Result = Builder.CreateShl(Result, BitPos);
30400       break;
30401     }
30402   }
30403 
30404   I->replaceAllUsesWith(Result);
30405   I->eraseFromParent();
30406   AI->eraseFromParent();
30407 }
30408 
shouldExpandCmpArithRMWInIR(AtomicRMWInst * AI)30409 static bool shouldExpandCmpArithRMWInIR(AtomicRMWInst *AI) {
30410   using namespace llvm::PatternMatch;
30411   if (!AI->hasOneUse())
30412     return false;
30413 
30414   Value *Op = AI->getOperand(1);
30415   ICmpInst::Predicate Pred;
30416   Instruction *I = AI->user_back();
30417   AtomicRMWInst::BinOp Opc = AI->getOperation();
30418   if (Opc == AtomicRMWInst::Add) {
30419     if (match(I, m_c_ICmp(Pred, m_Sub(m_ZeroInt(), m_Specific(Op)), m_Value())))
30420       return Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE;
30421     if (match(I, m_OneUse(m_c_Add(m_Specific(Op), m_Value())))) {
30422       if (match(I->user_back(), m_ICmp(Pred, m_Value(), m_ZeroInt())))
30423         return Pred == CmpInst::ICMP_SLT;
30424       if (match(I->user_back(), m_ICmp(Pred, m_Value(), m_AllOnes())))
30425         return Pred == CmpInst::ICMP_SGT;
30426     }
30427     return false;
30428   }
30429   if (Opc == AtomicRMWInst::Sub) {
30430     if (match(I, m_c_ICmp(Pred, m_Specific(Op), m_Value())))
30431       return Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE;
30432     if (match(I, m_OneUse(m_Sub(m_Value(), m_Specific(Op))))) {
30433       if (match(I->user_back(), m_ICmp(Pred, m_Value(), m_ZeroInt())))
30434         return Pred == CmpInst::ICMP_SLT;
30435       if (match(I->user_back(), m_ICmp(Pred, m_Value(), m_AllOnes())))
30436         return Pred == CmpInst::ICMP_SGT;
30437     }
30438     return false;
30439   }
30440   if ((Opc == AtomicRMWInst::Or &&
30441        match(I, m_OneUse(m_c_Or(m_Specific(Op), m_Value())))) ||
30442       (Opc == AtomicRMWInst::And &&
30443        match(I, m_OneUse(m_c_And(m_Specific(Op), m_Value()))))) {
30444     if (match(I->user_back(), m_ICmp(Pred, m_Value(), m_ZeroInt())))
30445       return Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE ||
30446              Pred == CmpInst::ICMP_SLT;
30447     if (match(I->user_back(), m_ICmp(Pred, m_Value(), m_AllOnes())))
30448       return Pred == CmpInst::ICMP_SGT;
30449     return false;
30450   }
30451   if (Opc == AtomicRMWInst::Xor) {
30452     if (match(I, m_c_ICmp(Pred, m_Specific(Op), m_Value())))
30453       return Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE;
30454     if (match(I, m_OneUse(m_c_Xor(m_Specific(Op), m_Value())))) {
30455       if (match(I->user_back(), m_ICmp(Pred, m_Value(), m_ZeroInt())))
30456         return Pred == CmpInst::ICMP_SLT;
30457       if (match(I->user_back(), m_ICmp(Pred, m_Value(), m_AllOnes())))
30458         return Pred == CmpInst::ICMP_SGT;
30459     }
30460     return false;
30461   }
30462 
30463   return false;
30464 }
30465 
emitCmpArithAtomicRMWIntrinsic(AtomicRMWInst * AI) const30466 void X86TargetLowering::emitCmpArithAtomicRMWIntrinsic(
30467     AtomicRMWInst *AI) const {
30468   IRBuilder<> Builder(AI);
30469   Builder.CollectMetadataToCopy(AI, {LLVMContext::MD_pcsections});
30470   Instruction *TempI = nullptr;
30471   LLVMContext &Ctx = AI->getContext();
30472   ICmpInst *ICI = dyn_cast<ICmpInst>(AI->user_back());
30473   if (!ICI) {
30474     TempI = AI->user_back();
30475     assert(TempI->hasOneUse() && "Must have one use");
30476     ICI = cast<ICmpInst>(TempI->user_back());
30477   }
30478   X86::CondCode CC = X86::COND_INVALID;
30479   ICmpInst::Predicate Pred = ICI->getPredicate();
30480   switch (Pred) {
30481   default:
30482     llvm_unreachable("Not supported Pred");
30483   case CmpInst::ICMP_EQ:
30484     CC = X86::COND_E;
30485     break;
30486   case CmpInst::ICMP_NE:
30487     CC = X86::COND_NE;
30488     break;
30489   case CmpInst::ICMP_SLT:
30490     CC = X86::COND_S;
30491     break;
30492   case CmpInst::ICMP_SGT:
30493     CC = X86::COND_NS;
30494     break;
30495   }
30496   Intrinsic::ID IID = Intrinsic::not_intrinsic;
30497   switch (AI->getOperation()) {
30498   default:
30499     llvm_unreachable("Unknown atomic operation");
30500   case AtomicRMWInst::Add:
30501     IID = Intrinsic::x86_atomic_add_cc;
30502     break;
30503   case AtomicRMWInst::Sub:
30504     IID = Intrinsic::x86_atomic_sub_cc;
30505     break;
30506   case AtomicRMWInst::Or:
30507     IID = Intrinsic::x86_atomic_or_cc;
30508     break;
30509   case AtomicRMWInst::And:
30510     IID = Intrinsic::x86_atomic_and_cc;
30511     break;
30512   case AtomicRMWInst::Xor:
30513     IID = Intrinsic::x86_atomic_xor_cc;
30514     break;
30515   }
30516   Function *CmpArith =
30517       Intrinsic::getDeclaration(AI->getModule(), IID, AI->getType());
30518   Value *Addr = Builder.CreatePointerCast(AI->getPointerOperand(),
30519                                           PointerType::getUnqual(Ctx));
30520   Value *Call = Builder.CreateCall(
30521       CmpArith, {Addr, AI->getValOperand(), Builder.getInt32((unsigned)CC)});
30522   Value *Result = Builder.CreateTrunc(Call, Type::getInt1Ty(Ctx));
30523   ICI->replaceAllUsesWith(Result);
30524   ICI->eraseFromParent();
30525   if (TempI)
30526     TempI->eraseFromParent();
30527   AI->eraseFromParent();
30528 }
30529 
30530 TargetLowering::AtomicExpansionKind
shouldExpandAtomicRMWInIR(AtomicRMWInst * AI) const30531 X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
30532   unsigned NativeWidth = Subtarget.is64Bit() ? 64 : 32;
30533   Type *MemType = AI->getType();
30534 
30535   // If the operand is too big, we must see if cmpxchg8/16b is available
30536   // and default to library calls otherwise.
30537   if (MemType->getPrimitiveSizeInBits() > NativeWidth) {
30538     return needsCmpXchgNb(MemType) ? AtomicExpansionKind::CmpXChg
30539                                    : AtomicExpansionKind::None;
30540   }
30541 
30542   AtomicRMWInst::BinOp Op = AI->getOperation();
30543   switch (Op) {
30544   case AtomicRMWInst::Xchg:
30545     return AtomicExpansionKind::None;
30546   case AtomicRMWInst::Add:
30547   case AtomicRMWInst::Sub:
30548     if (shouldExpandCmpArithRMWInIR(AI))
30549       return AtomicExpansionKind::CmpArithIntrinsic;
30550     // It's better to use xadd, xsub or xchg for these in other cases.
30551     return AtomicExpansionKind::None;
30552   case AtomicRMWInst::Or:
30553   case AtomicRMWInst::And:
30554   case AtomicRMWInst::Xor:
30555     if (shouldExpandCmpArithRMWInIR(AI))
30556       return AtomicExpansionKind::CmpArithIntrinsic;
30557     return shouldExpandLogicAtomicRMWInIR(AI);
30558   case AtomicRMWInst::Nand:
30559   case AtomicRMWInst::Max:
30560   case AtomicRMWInst::Min:
30561   case AtomicRMWInst::UMax:
30562   case AtomicRMWInst::UMin:
30563   case AtomicRMWInst::FAdd:
30564   case AtomicRMWInst::FSub:
30565   case AtomicRMWInst::FMax:
30566   case AtomicRMWInst::FMin:
30567   case AtomicRMWInst::UIncWrap:
30568   case AtomicRMWInst::UDecWrap:
30569   default:
30570     // These always require a non-trivial set of data operations on x86. We must
30571     // use a cmpxchg loop.
30572     return AtomicExpansionKind::CmpXChg;
30573   }
30574 }
30575 
30576 LoadInst *
lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst * AI) const30577 X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
30578   unsigned NativeWidth = Subtarget.is64Bit() ? 64 : 32;
30579   Type *MemType = AI->getType();
30580   // Accesses larger than the native width are turned into cmpxchg/libcalls, so
30581   // there is no benefit in turning such RMWs into loads, and it is actually
30582   // harmful as it introduces a mfence.
30583   if (MemType->getPrimitiveSizeInBits() > NativeWidth)
30584     return nullptr;
30585 
30586   // If this is a canonical idempotent atomicrmw w/no uses, we have a better
30587   // lowering available in lowerAtomicArith.
30588   // TODO: push more cases through this path.
30589   if (auto *C = dyn_cast<ConstantInt>(AI->getValOperand()))
30590     if (AI->getOperation() == AtomicRMWInst::Or && C->isZero() &&
30591         AI->use_empty())
30592       return nullptr;
30593 
30594   IRBuilder<> Builder(AI);
30595   Builder.CollectMetadataToCopy(AI, {LLVMContext::MD_pcsections});
30596   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
30597   auto SSID = AI->getSyncScopeID();
30598   // We must restrict the ordering to avoid generating loads with Release or
30599   // ReleaseAcquire orderings.
30600   auto Order = AtomicCmpXchgInst::getStrongestFailureOrdering(AI->getOrdering());
30601 
30602   // Before the load we need a fence. Here is an example lifted from
30603   // http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf showing why a fence
30604   // is required:
30605   // Thread 0:
30606   //   x.store(1, relaxed);
30607   //   r1 = y.fetch_add(0, release);
30608   // Thread 1:
30609   //   y.fetch_add(42, acquire);
30610   //   r2 = x.load(relaxed);
30611   // r1 = r2 = 0 is impossible, but becomes possible if the idempotent rmw is
30612   // lowered to just a load without a fence. A mfence flushes the store buffer,
30613   // making the optimization clearly correct.
30614   // FIXME: it is required if isReleaseOrStronger(Order) but it is not clear
30615   // otherwise, we might be able to be more aggressive on relaxed idempotent
30616   // rmw. In practice, they do not look useful, so we don't try to be
30617   // especially clever.
30618   if (SSID == SyncScope::SingleThread)
30619     // FIXME: we could just insert an ISD::MEMBARRIER here, except we are at
30620     // the IR level, so we must wrap it in an intrinsic.
30621     return nullptr;
30622 
30623   if (!Subtarget.hasMFence())
30624     // FIXME: it might make sense to use a locked operation here but on a
30625     // different cache-line to prevent cache-line bouncing. In practice it
30626     // is probably a small win, and x86 processors without mfence are rare
30627     // enough that we do not bother.
30628     return nullptr;
30629 
30630   Function *MFence =
30631       llvm::Intrinsic::getDeclaration(M, Intrinsic::x86_sse2_mfence);
30632   Builder.CreateCall(MFence, {});
30633 
30634   // Finally we can emit the atomic load.
30635   LoadInst *Loaded = Builder.CreateAlignedLoad(
30636       AI->getType(), AI->getPointerOperand(), AI->getAlign());
30637   Loaded->setAtomic(Order, SSID);
30638   AI->replaceAllUsesWith(Loaded);
30639   AI->eraseFromParent();
30640   return Loaded;
30641 }
30642 
30643 /// Emit a locked operation on a stack location which does not change any
30644 /// memory location, but does involve a lock prefix.  Location is chosen to be
30645 /// a) very likely accessed only by a single thread to minimize cache traffic,
30646 /// and b) definitely dereferenceable.  Returns the new Chain result.
emitLockedStackOp(SelectionDAG & DAG,const X86Subtarget & Subtarget,SDValue Chain,const SDLoc & DL)30647 static SDValue emitLockedStackOp(SelectionDAG &DAG,
30648                                  const X86Subtarget &Subtarget, SDValue Chain,
30649                                  const SDLoc &DL) {
30650   // Implementation notes:
30651   // 1) LOCK prefix creates a full read/write reordering barrier for memory
30652   // operations issued by the current processor.  As such, the location
30653   // referenced is not relevant for the ordering properties of the instruction.
30654   // See: Intel® 64 and IA-32 ArchitecturesSoftware Developer’s Manual,
30655   // 8.2.3.9  Loads and Stores Are Not Reordered with Locked Instructions
30656   // 2) Using an immediate operand appears to be the best encoding choice
30657   // here since it doesn't require an extra register.
30658   // 3) OR appears to be very slightly faster than ADD. (Though, the difference
30659   // is small enough it might just be measurement noise.)
30660   // 4) When choosing offsets, there are several contributing factors:
30661   //   a) If there's no redzone, we default to TOS.  (We could allocate a cache
30662   //      line aligned stack object to improve this case.)
30663   //   b) To minimize our chances of introducing a false dependence, we prefer
30664   //      to offset the stack usage from TOS slightly.
30665   //   c) To minimize concerns about cross thread stack usage - in particular,
30666   //      the idiomatic MyThreadPool.run([&StackVars]() {...}) pattern which
30667   //      captures state in the TOS frame and accesses it from many threads -
30668   //      we want to use an offset such that the offset is in a distinct cache
30669   //      line from the TOS frame.
30670   //
30671   // For a general discussion of the tradeoffs and benchmark results, see:
30672   // https://shipilev.net/blog/2014/on-the-fence-with-dependencies/
30673 
30674   auto &MF = DAG.getMachineFunction();
30675   auto &TFL = *Subtarget.getFrameLowering();
30676   const unsigned SPOffset = TFL.has128ByteRedZone(MF) ? -64 : 0;
30677 
30678   if (Subtarget.is64Bit()) {
30679     SDValue Zero = DAG.getTargetConstant(0, DL, MVT::i32);
30680     SDValue Ops[] = {
30681       DAG.getRegister(X86::RSP, MVT::i64),                  // Base
30682       DAG.getTargetConstant(1, DL, MVT::i8),                // Scale
30683       DAG.getRegister(0, MVT::i64),                         // Index
30684       DAG.getTargetConstant(SPOffset, DL, MVT::i32),        // Disp
30685       DAG.getRegister(0, MVT::i16),                         // Segment.
30686       Zero,
30687       Chain};
30688     SDNode *Res = DAG.getMachineNode(X86::OR32mi8Locked, DL, MVT::i32,
30689                                      MVT::Other, Ops);
30690     return SDValue(Res, 1);
30691   }
30692 
30693   SDValue Zero = DAG.getTargetConstant(0, DL, MVT::i32);
30694   SDValue Ops[] = {
30695     DAG.getRegister(X86::ESP, MVT::i32),            // Base
30696     DAG.getTargetConstant(1, DL, MVT::i8),          // Scale
30697     DAG.getRegister(0, MVT::i32),                   // Index
30698     DAG.getTargetConstant(SPOffset, DL, MVT::i32),  // Disp
30699     DAG.getRegister(0, MVT::i16),                   // Segment.
30700     Zero,
30701     Chain
30702   };
30703   SDNode *Res = DAG.getMachineNode(X86::OR32mi8Locked, DL, MVT::i32,
30704                                    MVT::Other, Ops);
30705   return SDValue(Res, 1);
30706 }
30707 
LowerATOMIC_FENCE(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)30708 static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget &Subtarget,
30709                                  SelectionDAG &DAG) {
30710   SDLoc dl(Op);
30711   AtomicOrdering FenceOrdering =
30712       static_cast<AtomicOrdering>(Op.getConstantOperandVal(1));
30713   SyncScope::ID FenceSSID =
30714       static_cast<SyncScope::ID>(Op.getConstantOperandVal(2));
30715 
30716   // The only fence that needs an instruction is a sequentially-consistent
30717   // cross-thread fence.
30718   if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
30719       FenceSSID == SyncScope::System) {
30720     if (Subtarget.hasMFence())
30721       return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0));
30722 
30723     SDValue Chain = Op.getOperand(0);
30724     return emitLockedStackOp(DAG, Subtarget, Chain, dl);
30725   }
30726 
30727   // MEMBARRIER is a compiler barrier; it codegens to a no-op.
30728   return DAG.getNode(ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0));
30729 }
30730 
LowerCMP_SWAP(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)30731 static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget &Subtarget,
30732                              SelectionDAG &DAG) {
30733   MVT T = Op.getSimpleValueType();
30734   SDLoc DL(Op);
30735   unsigned Reg = 0;
30736   unsigned size = 0;
30737   switch(T.SimpleTy) {
30738   default: llvm_unreachable("Invalid value type!");
30739   case MVT::i8:  Reg = X86::AL;  size = 1; break;
30740   case MVT::i16: Reg = X86::AX;  size = 2; break;
30741   case MVT::i32: Reg = X86::EAX; size = 4; break;
30742   case MVT::i64:
30743     assert(Subtarget.is64Bit() && "Node not type legal!");
30744     Reg = X86::RAX; size = 8;
30745     break;
30746   }
30747   SDValue cpIn = DAG.getCopyToReg(Op.getOperand(0), DL, Reg,
30748                                   Op.getOperand(2), SDValue());
30749   SDValue Ops[] = { cpIn.getValue(0),
30750                     Op.getOperand(1),
30751                     Op.getOperand(3),
30752                     DAG.getTargetConstant(size, DL, MVT::i8),
30753                     cpIn.getValue(1) };
30754   SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
30755   MachineMemOperand *MMO = cast<AtomicSDNode>(Op)->getMemOperand();
30756   SDValue Result = DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG_DAG, DL, Tys,
30757                                            Ops, T, MMO);
30758 
30759   SDValue cpOut =
30760     DAG.getCopyFromReg(Result.getValue(0), DL, Reg, T, Result.getValue(1));
30761   SDValue EFLAGS = DAG.getCopyFromReg(cpOut.getValue(1), DL, X86::EFLAGS,
30762                                       MVT::i32, cpOut.getValue(2));
30763   SDValue Success = getSETCC(X86::COND_E, EFLAGS, DL, DAG);
30764 
30765   return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(),
30766                      cpOut, Success, EFLAGS.getValue(1));
30767 }
30768 
30769 // Create MOVMSKB, taking into account whether we need to split for AVX1.
getPMOVMSKB(const SDLoc & DL,SDValue V,SelectionDAG & DAG,const X86Subtarget & Subtarget)30770 static SDValue getPMOVMSKB(const SDLoc &DL, SDValue V, SelectionDAG &DAG,
30771                            const X86Subtarget &Subtarget) {
30772   MVT InVT = V.getSimpleValueType();
30773 
30774   if (InVT == MVT::v64i8) {
30775     SDValue Lo, Hi;
30776     std::tie(Lo, Hi) = DAG.SplitVector(V, DL);
30777     Lo = getPMOVMSKB(DL, Lo, DAG, Subtarget);
30778     Hi = getPMOVMSKB(DL, Hi, DAG, Subtarget);
30779     Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, Lo);
30780     Hi = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Hi);
30781     Hi = DAG.getNode(ISD::SHL, DL, MVT::i64, Hi,
30782                      DAG.getConstant(32, DL, MVT::i8));
30783     return DAG.getNode(ISD::OR, DL, MVT::i64, Lo, Hi);
30784   }
30785   if (InVT == MVT::v32i8 && !Subtarget.hasInt256()) {
30786     SDValue Lo, Hi;
30787     std::tie(Lo, Hi) = DAG.SplitVector(V, DL);
30788     Lo = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Lo);
30789     Hi = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Hi);
30790     Hi = DAG.getNode(ISD::SHL, DL, MVT::i32, Hi,
30791                      DAG.getConstant(16, DL, MVT::i8));
30792     return DAG.getNode(ISD::OR, DL, MVT::i32, Lo, Hi);
30793   }
30794 
30795   return DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V);
30796 }
30797 
LowerBITCAST(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)30798 static SDValue LowerBITCAST(SDValue Op, const X86Subtarget &Subtarget,
30799                             SelectionDAG &DAG) {
30800   SDValue Src = Op.getOperand(0);
30801   MVT SrcVT = Src.getSimpleValueType();
30802   MVT DstVT = Op.getSimpleValueType();
30803 
30804   // Legalize (v64i1 (bitcast i64 (X))) by splitting the i64, bitcasting each
30805   // half to v32i1 and concatenating the result.
30806   if (SrcVT == MVT::i64 && DstVT == MVT::v64i1) {
30807     assert(!Subtarget.is64Bit() && "Expected 32-bit mode");
30808     assert(Subtarget.hasBWI() && "Expected BWI target");
30809     SDLoc dl(Op);
30810     SDValue Lo, Hi;
30811     std::tie(Lo, Hi) = DAG.SplitScalar(Src, dl, MVT::i32, MVT::i32);
30812     Lo = DAG.getBitcast(MVT::v32i1, Lo);
30813     Hi = DAG.getBitcast(MVT::v32i1, Hi);
30814     return DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, Lo, Hi);
30815   }
30816 
30817   // Use MOVMSK for vector to scalar conversion to prevent scalarization.
30818   if ((SrcVT == MVT::v16i1 || SrcVT == MVT::v32i1) && DstVT.isScalarInteger()) {
30819     assert(!Subtarget.hasAVX512() && "Should use K-registers with AVX512");
30820     MVT SExtVT = SrcVT == MVT::v16i1 ? MVT::v16i8 : MVT::v32i8;
30821     SDLoc DL(Op);
30822     SDValue V = DAG.getSExtOrTrunc(Src, DL, SExtVT);
30823     V = getPMOVMSKB(DL, V, DAG, Subtarget);
30824     return DAG.getZExtOrTrunc(V, DL, DstVT);
30825   }
30826 
30827   assert((SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8 ||
30828           SrcVT == MVT::i64) && "Unexpected VT!");
30829 
30830   assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
30831   if (!(DstVT == MVT::f64 && SrcVT == MVT::i64) &&
30832       !(DstVT == MVT::x86mmx && SrcVT.isVector()))
30833     // This conversion needs to be expanded.
30834     return SDValue();
30835 
30836   SDLoc dl(Op);
30837   if (SrcVT.isVector()) {
30838     // Widen the vector in input in the case of MVT::v2i32.
30839     // Example: from MVT::v2i32 to MVT::v4i32.
30840     MVT NewVT = MVT::getVectorVT(SrcVT.getVectorElementType(),
30841                                  SrcVT.getVectorNumElements() * 2);
30842     Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewVT, Src,
30843                       DAG.getUNDEF(SrcVT));
30844   } else {
30845     assert(SrcVT == MVT::i64 && !Subtarget.is64Bit() &&
30846            "Unexpected source type in LowerBITCAST");
30847     Src = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Src);
30848   }
30849 
30850   MVT V2X64VT = DstVT == MVT::f64 ? MVT::v2f64 : MVT::v2i64;
30851   Src = DAG.getNode(ISD::BITCAST, dl, V2X64VT, Src);
30852 
30853   if (DstVT == MVT::x86mmx)
30854     return DAG.getNode(X86ISD::MOVDQ2Q, dl, DstVT, Src);
30855 
30856   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, DstVT, Src,
30857                      DAG.getIntPtrConstant(0, dl));
30858 }
30859 
30860 /// Compute the horizontal sum of bytes in V for the elements of VT.
30861 ///
30862 /// Requires V to be a byte vector and VT to be an integer vector type with
30863 /// wider elements than V's type. The width of the elements of VT determines
30864 /// how many bytes of V are summed horizontally to produce each element of the
30865 /// result.
LowerHorizontalByteSum(SDValue V,MVT VT,const X86Subtarget & Subtarget,SelectionDAG & DAG)30866 static SDValue LowerHorizontalByteSum(SDValue V, MVT VT,
30867                                       const X86Subtarget &Subtarget,
30868                                       SelectionDAG &DAG) {
30869   SDLoc DL(V);
30870   MVT ByteVecVT = V.getSimpleValueType();
30871   MVT EltVT = VT.getVectorElementType();
30872   assert(ByteVecVT.getVectorElementType() == MVT::i8 &&
30873          "Expected value to have byte element type.");
30874   assert(EltVT != MVT::i8 &&
30875          "Horizontal byte sum only makes sense for wider elements!");
30876   unsigned VecSize = VT.getSizeInBits();
30877   assert(ByteVecVT.getSizeInBits() == VecSize && "Cannot change vector size!");
30878 
30879   // PSADBW instruction horizontally add all bytes and leave the result in i64
30880   // chunks, thus directly computes the pop count for v2i64 and v4i64.
30881   if (EltVT == MVT::i64) {
30882     SDValue Zeros = DAG.getConstant(0, DL, ByteVecVT);
30883     MVT SadVecVT = MVT::getVectorVT(MVT::i64, VecSize / 64);
30884     V = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT, V, Zeros);
30885     return DAG.getBitcast(VT, V);
30886   }
30887 
30888   if (EltVT == MVT::i32) {
30889     // We unpack the low half and high half into i32s interleaved with zeros so
30890     // that we can use PSADBW to horizontally sum them. The most useful part of
30891     // this is that it lines up the results of two PSADBW instructions to be
30892     // two v2i64 vectors which concatenated are the 4 population counts. We can
30893     // then use PACKUSWB to shrink and concatenate them into a v4i32 again.
30894     SDValue Zeros = DAG.getConstant(0, DL, VT);
30895     SDValue V32 = DAG.getBitcast(VT, V);
30896     SDValue Low = getUnpackl(DAG, DL, VT, V32, Zeros);
30897     SDValue High = getUnpackh(DAG, DL, VT, V32, Zeros);
30898 
30899     // Do the horizontal sums into two v2i64s.
30900     Zeros = DAG.getConstant(0, DL, ByteVecVT);
30901     MVT SadVecVT = MVT::getVectorVT(MVT::i64, VecSize / 64);
30902     Low = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT,
30903                       DAG.getBitcast(ByteVecVT, Low), Zeros);
30904     High = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT,
30905                        DAG.getBitcast(ByteVecVT, High), Zeros);
30906 
30907     // Merge them together.
30908     MVT ShortVecVT = MVT::getVectorVT(MVT::i16, VecSize / 16);
30909     V = DAG.getNode(X86ISD::PACKUS, DL, ByteVecVT,
30910                     DAG.getBitcast(ShortVecVT, Low),
30911                     DAG.getBitcast(ShortVecVT, High));
30912 
30913     return DAG.getBitcast(VT, V);
30914   }
30915 
30916   // The only element type left is i16.
30917   assert(EltVT == MVT::i16 && "Unknown how to handle type");
30918 
30919   // To obtain pop count for each i16 element starting from the pop count for
30920   // i8 elements, shift the i16s left by 8, sum as i8s, and then shift as i16s
30921   // right by 8. It is important to shift as i16s as i8 vector shift isn't
30922   // directly supported.
30923   SDValue ShifterV = DAG.getConstant(8, DL, VT);
30924   SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, DAG.getBitcast(VT, V), ShifterV);
30925   V = DAG.getNode(ISD::ADD, DL, ByteVecVT, DAG.getBitcast(ByteVecVT, Shl),
30926                   DAG.getBitcast(ByteVecVT, V));
30927   return DAG.getNode(ISD::SRL, DL, VT, DAG.getBitcast(VT, V), ShifterV);
30928 }
30929 
LowerVectorCTPOPInRegLUT(SDValue Op,const SDLoc & DL,const X86Subtarget & Subtarget,SelectionDAG & DAG)30930 static SDValue LowerVectorCTPOPInRegLUT(SDValue Op, const SDLoc &DL,
30931                                         const X86Subtarget &Subtarget,
30932                                         SelectionDAG &DAG) {
30933   MVT VT = Op.getSimpleValueType();
30934   MVT EltVT = VT.getVectorElementType();
30935   int NumElts = VT.getVectorNumElements();
30936   (void)EltVT;
30937   assert(EltVT == MVT::i8 && "Only vXi8 vector CTPOP lowering supported.");
30938 
30939   // Implement a lookup table in register by using an algorithm based on:
30940   // http://wm.ite.pl/articles/sse-popcount.html
30941   //
30942   // The general idea is that every lower byte nibble in the input vector is an
30943   // index into a in-register pre-computed pop count table. We then split up the
30944   // input vector in two new ones: (1) a vector with only the shifted-right
30945   // higher nibbles for each byte and (2) a vector with the lower nibbles (and
30946   // masked out higher ones) for each byte. PSHUFB is used separately with both
30947   // to index the in-register table. Next, both are added and the result is a
30948   // i8 vector where each element contains the pop count for input byte.
30949   const int LUT[16] = {/* 0 */ 0, /* 1 */ 1, /* 2 */ 1, /* 3 */ 2,
30950                        /* 4 */ 1, /* 5 */ 2, /* 6 */ 2, /* 7 */ 3,
30951                        /* 8 */ 1, /* 9 */ 2, /* a */ 2, /* b */ 3,
30952                        /* c */ 2, /* d */ 3, /* e */ 3, /* f */ 4};
30953 
30954   SmallVector<SDValue, 64> LUTVec;
30955   for (int i = 0; i < NumElts; ++i)
30956     LUTVec.push_back(DAG.getConstant(LUT[i % 16], DL, MVT::i8));
30957   SDValue InRegLUT = DAG.getBuildVector(VT, DL, LUTVec);
30958   SDValue M0F = DAG.getConstant(0x0F, DL, VT);
30959 
30960   // High nibbles
30961   SDValue FourV = DAG.getConstant(4, DL, VT);
30962   SDValue HiNibbles = DAG.getNode(ISD::SRL, DL, VT, Op, FourV);
30963 
30964   // Low nibbles
30965   SDValue LoNibbles = DAG.getNode(ISD::AND, DL, VT, Op, M0F);
30966 
30967   // The input vector is used as the shuffle mask that index elements into the
30968   // LUT. After counting low and high nibbles, add the vector to obtain the
30969   // final pop count per i8 element.
30970   SDValue HiPopCnt = DAG.getNode(X86ISD::PSHUFB, DL, VT, InRegLUT, HiNibbles);
30971   SDValue LoPopCnt = DAG.getNode(X86ISD::PSHUFB, DL, VT, InRegLUT, LoNibbles);
30972   return DAG.getNode(ISD::ADD, DL, VT, HiPopCnt, LoPopCnt);
30973 }
30974 
30975 // Please ensure that any codegen change from LowerVectorCTPOP is reflected in
30976 // updated cost models in X86TTIImpl::getIntrinsicInstrCost.
LowerVectorCTPOP(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)30977 static SDValue LowerVectorCTPOP(SDValue Op, const X86Subtarget &Subtarget,
30978                                 SelectionDAG &DAG) {
30979   MVT VT = Op.getSimpleValueType();
30980   assert((VT.is512BitVector() || VT.is256BitVector() || VT.is128BitVector()) &&
30981          "Unknown CTPOP type to handle");
30982   SDLoc DL(Op.getNode());
30983   SDValue Op0 = Op.getOperand(0);
30984 
30985   // TRUNC(CTPOP(ZEXT(X))) to make use of vXi32/vXi64 VPOPCNT instructions.
30986   if (Subtarget.hasVPOPCNTDQ()) {
30987     unsigned NumElems = VT.getVectorNumElements();
30988     assert((VT.getVectorElementType() == MVT::i8 ||
30989             VT.getVectorElementType() == MVT::i16) && "Unexpected type");
30990     if (NumElems < 16 || (NumElems == 16 && Subtarget.canExtendTo512DQ())) {
30991       MVT NewVT = MVT::getVectorVT(MVT::i32, NumElems);
30992       Op = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, Op0);
30993       Op = DAG.getNode(ISD::CTPOP, DL, NewVT, Op);
30994       return DAG.getNode(ISD::TRUNCATE, DL, VT, Op);
30995     }
30996   }
30997 
30998   // Decompose 256-bit ops into smaller 128-bit ops.
30999   if (VT.is256BitVector() && !Subtarget.hasInt256())
31000     return splitVectorIntUnary(Op, DAG);
31001 
31002   // Decompose 512-bit ops into smaller 256-bit ops.
31003   if (VT.is512BitVector() && !Subtarget.hasBWI())
31004     return splitVectorIntUnary(Op, DAG);
31005 
31006   // For element types greater than i8, do vXi8 pop counts and a bytesum.
31007   if (VT.getScalarType() != MVT::i8) {
31008     MVT ByteVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
31009     SDValue ByteOp = DAG.getBitcast(ByteVT, Op0);
31010     SDValue PopCnt8 = DAG.getNode(ISD::CTPOP, DL, ByteVT, ByteOp);
31011     return LowerHorizontalByteSum(PopCnt8, VT, Subtarget, DAG);
31012   }
31013 
31014   // We can't use the fast LUT approach, so fall back on LegalizeDAG.
31015   if (!Subtarget.hasSSSE3())
31016     return SDValue();
31017 
31018   return LowerVectorCTPOPInRegLUT(Op0, DL, Subtarget, DAG);
31019 }
31020 
LowerCTPOP(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)31021 static SDValue LowerCTPOP(SDValue Op, const X86Subtarget &Subtarget,
31022                           SelectionDAG &DAG) {
31023   assert(Op.getSimpleValueType().isVector() &&
31024          "We only do custom lowering for vector population count.");
31025   return LowerVectorCTPOP(Op, Subtarget, DAG);
31026 }
31027 
LowerBITREVERSE_XOP(SDValue Op,SelectionDAG & DAG)31028 static SDValue LowerBITREVERSE_XOP(SDValue Op, SelectionDAG &DAG) {
31029   MVT VT = Op.getSimpleValueType();
31030   SDValue In = Op.getOperand(0);
31031   SDLoc DL(Op);
31032 
31033   // For scalars, its still beneficial to transfer to/from the SIMD unit to
31034   // perform the BITREVERSE.
31035   if (!VT.isVector()) {
31036     MVT VecVT = MVT::getVectorVT(VT, 128 / VT.getSizeInBits());
31037     SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, In);
31038     Res = DAG.getNode(ISD::BITREVERSE, DL, VecVT, Res);
31039     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Res,
31040                        DAG.getIntPtrConstant(0, DL));
31041   }
31042 
31043   int NumElts = VT.getVectorNumElements();
31044   int ScalarSizeInBytes = VT.getScalarSizeInBits() / 8;
31045 
31046   // Decompose 256-bit ops into smaller 128-bit ops.
31047   if (VT.is256BitVector())
31048     return splitVectorIntUnary(Op, DAG);
31049 
31050   assert(VT.is128BitVector() &&
31051          "Only 128-bit vector bitreverse lowering supported.");
31052 
31053   // VPPERM reverses the bits of a byte with the permute Op (2 << 5), and we
31054   // perform the BSWAP in the shuffle.
31055   // Its best to shuffle using the second operand as this will implicitly allow
31056   // memory folding for multiple vectors.
31057   SmallVector<SDValue, 16> MaskElts;
31058   for (int i = 0; i != NumElts; ++i) {
31059     for (int j = ScalarSizeInBytes - 1; j >= 0; --j) {
31060       int SourceByte = 16 + (i * ScalarSizeInBytes) + j;
31061       int PermuteByte = SourceByte | (2 << 5);
31062       MaskElts.push_back(DAG.getConstant(PermuteByte, DL, MVT::i8));
31063     }
31064   }
31065 
31066   SDValue Mask = DAG.getBuildVector(MVT::v16i8, DL, MaskElts);
31067   SDValue Res = DAG.getBitcast(MVT::v16i8, In);
31068   Res = DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, DAG.getUNDEF(MVT::v16i8),
31069                     Res, Mask);
31070   return DAG.getBitcast(VT, Res);
31071 }
31072 
LowerBITREVERSE(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)31073 static SDValue LowerBITREVERSE(SDValue Op, const X86Subtarget &Subtarget,
31074                                SelectionDAG &DAG) {
31075   MVT VT = Op.getSimpleValueType();
31076 
31077   if (Subtarget.hasXOP() && !VT.is512BitVector())
31078     return LowerBITREVERSE_XOP(Op, DAG);
31079 
31080   assert(Subtarget.hasSSSE3() && "SSSE3 required for BITREVERSE");
31081 
31082   SDValue In = Op.getOperand(0);
31083   SDLoc DL(Op);
31084 
31085   assert(VT.getScalarType() == MVT::i8 &&
31086          "Only byte vector BITREVERSE supported");
31087 
31088   // Split v64i8 without BWI so that we can still use the PSHUFB lowering.
31089   if (VT == MVT::v64i8 && !Subtarget.hasBWI())
31090     return splitVectorIntUnary(Op, DAG);
31091 
31092   // Decompose 256-bit ops into smaller 128-bit ops on pre-AVX2.
31093   if (VT == MVT::v32i8 && !Subtarget.hasInt256())
31094     return splitVectorIntUnary(Op, DAG);
31095 
31096   unsigned NumElts = VT.getVectorNumElements();
31097 
31098   // If we have GFNI, we can use GF2P8AFFINEQB to reverse the bits.
31099   if (Subtarget.hasGFNI()) {
31100     MVT MatrixVT = MVT::getVectorVT(MVT::i64, NumElts / 8);
31101     SDValue Matrix = DAG.getConstant(0x8040201008040201ULL, DL, MatrixVT);
31102     Matrix = DAG.getBitcast(VT, Matrix);
31103     return DAG.getNode(X86ISD::GF2P8AFFINEQB, DL, VT, In, Matrix,
31104                        DAG.getTargetConstant(0, DL, MVT::i8));
31105   }
31106 
31107   // Perform BITREVERSE using PSHUFB lookups. Each byte is split into
31108   // two nibbles and a PSHUFB lookup to find the bitreverse of each
31109   // 0-15 value (moved to the other nibble).
31110   SDValue NibbleMask = DAG.getConstant(0xF, DL, VT);
31111   SDValue Lo = DAG.getNode(ISD::AND, DL, VT, In, NibbleMask);
31112   SDValue Hi = DAG.getNode(ISD::SRL, DL, VT, In, DAG.getConstant(4, DL, VT));
31113 
31114   const int LoLUT[16] = {
31115       /* 0 */ 0x00, /* 1 */ 0x80, /* 2 */ 0x40, /* 3 */ 0xC0,
31116       /* 4 */ 0x20, /* 5 */ 0xA0, /* 6 */ 0x60, /* 7 */ 0xE0,
31117       /* 8 */ 0x10, /* 9 */ 0x90, /* a */ 0x50, /* b */ 0xD0,
31118       /* c */ 0x30, /* d */ 0xB0, /* e */ 0x70, /* f */ 0xF0};
31119   const int HiLUT[16] = {
31120       /* 0 */ 0x00, /* 1 */ 0x08, /* 2 */ 0x04, /* 3 */ 0x0C,
31121       /* 4 */ 0x02, /* 5 */ 0x0A, /* 6 */ 0x06, /* 7 */ 0x0E,
31122       /* 8 */ 0x01, /* 9 */ 0x09, /* a */ 0x05, /* b */ 0x0D,
31123       /* c */ 0x03, /* d */ 0x0B, /* e */ 0x07, /* f */ 0x0F};
31124 
31125   SmallVector<SDValue, 16> LoMaskElts, HiMaskElts;
31126   for (unsigned i = 0; i < NumElts; ++i) {
31127     LoMaskElts.push_back(DAG.getConstant(LoLUT[i % 16], DL, MVT::i8));
31128     HiMaskElts.push_back(DAG.getConstant(HiLUT[i % 16], DL, MVT::i8));
31129   }
31130 
31131   SDValue LoMask = DAG.getBuildVector(VT, DL, LoMaskElts);
31132   SDValue HiMask = DAG.getBuildVector(VT, DL, HiMaskElts);
31133   Lo = DAG.getNode(X86ISD::PSHUFB, DL, VT, LoMask, Lo);
31134   Hi = DAG.getNode(X86ISD::PSHUFB, DL, VT, HiMask, Hi);
31135   return DAG.getNode(ISD::OR, DL, VT, Lo, Hi);
31136 }
31137 
LowerPARITY(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)31138 static SDValue LowerPARITY(SDValue Op, const X86Subtarget &Subtarget,
31139                            SelectionDAG &DAG) {
31140   SDLoc DL(Op);
31141   SDValue X = Op.getOperand(0);
31142   MVT VT = Op.getSimpleValueType();
31143 
31144   // Special case. If the input fits in 8-bits we can use a single 8-bit TEST.
31145   if (VT == MVT::i8 ||
31146       DAG.MaskedValueIsZero(X, APInt::getBitsSetFrom(VT.getSizeInBits(), 8))) {
31147     X = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, X);
31148     SDValue Flags = DAG.getNode(X86ISD::CMP, DL, MVT::i32, X,
31149                                 DAG.getConstant(0, DL, MVT::i8));
31150     // Copy the inverse of the parity flag into a register with setcc.
31151     SDValue Setnp = getSETCC(X86::COND_NP, Flags, DL, DAG);
31152     // Extend to the original type.
31153     return DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Setnp);
31154   }
31155 
31156   // If we have POPCNT, use the default expansion.
31157   if (Subtarget.hasPOPCNT())
31158     return SDValue();
31159 
31160   if (VT == MVT::i64) {
31161     // Xor the high and low 16-bits together using a 32-bit operation.
31162     SDValue Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32,
31163                              DAG.getNode(ISD::SRL, DL, MVT::i64, X,
31164                                          DAG.getConstant(32, DL, MVT::i8)));
31165     SDValue Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, X);
31166     X = DAG.getNode(ISD::XOR, DL, MVT::i32, Lo, Hi);
31167   }
31168 
31169   if (VT != MVT::i16) {
31170     // Xor the high and low 16-bits together using a 32-bit operation.
31171     SDValue Hi16 = DAG.getNode(ISD::SRL, DL, MVT::i32, X,
31172                                DAG.getConstant(16, DL, MVT::i8));
31173     X = DAG.getNode(ISD::XOR, DL, MVT::i32, X, Hi16);
31174   } else {
31175     // If the input is 16-bits, we need to extend to use an i32 shift below.
31176     X = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, X);
31177   }
31178 
31179   // Finally xor the low 2 bytes together and use a 8-bit flag setting xor.
31180   // This should allow an h-reg to be used to save a shift.
31181   SDValue Hi = DAG.getNode(
31182       ISD::TRUNCATE, DL, MVT::i8,
31183       DAG.getNode(ISD::SRL, DL, MVT::i32, X, DAG.getConstant(8, DL, MVT::i8)));
31184   SDValue Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, X);
31185   SDVTList VTs = DAG.getVTList(MVT::i8, MVT::i32);
31186   SDValue Flags = DAG.getNode(X86ISD::XOR, DL, VTs, Lo, Hi).getValue(1);
31187 
31188   // Copy the inverse of the parity flag into a register with setcc.
31189   SDValue Setnp = getSETCC(X86::COND_NP, Flags, DL, DAG);
31190   // Extend to the original type.
31191   return DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Setnp);
31192 }
31193 
lowerAtomicArithWithLOCK(SDValue N,SelectionDAG & DAG,const X86Subtarget & Subtarget)31194 static SDValue lowerAtomicArithWithLOCK(SDValue N, SelectionDAG &DAG,
31195                                         const X86Subtarget &Subtarget) {
31196   unsigned NewOpc = 0;
31197   switch (N->getOpcode()) {
31198   case ISD::ATOMIC_LOAD_ADD:
31199     NewOpc = X86ISD::LADD;
31200     break;
31201   case ISD::ATOMIC_LOAD_SUB:
31202     NewOpc = X86ISD::LSUB;
31203     break;
31204   case ISD::ATOMIC_LOAD_OR:
31205     NewOpc = X86ISD::LOR;
31206     break;
31207   case ISD::ATOMIC_LOAD_XOR:
31208     NewOpc = X86ISD::LXOR;
31209     break;
31210   case ISD::ATOMIC_LOAD_AND:
31211     NewOpc = X86ISD::LAND;
31212     break;
31213   default:
31214     llvm_unreachable("Unknown ATOMIC_LOAD_ opcode");
31215   }
31216 
31217   MachineMemOperand *MMO = cast<MemSDNode>(N)->getMemOperand();
31218 
31219   return DAG.getMemIntrinsicNode(
31220       NewOpc, SDLoc(N), DAG.getVTList(MVT::i32, MVT::Other),
31221       {N->getOperand(0), N->getOperand(1), N->getOperand(2)},
31222       /*MemVT=*/N->getSimpleValueType(0), MMO);
31223 }
31224 
31225 /// Lower atomic_load_ops into LOCK-prefixed operations.
lowerAtomicArith(SDValue N,SelectionDAG & DAG,const X86Subtarget & Subtarget)31226 static SDValue lowerAtomicArith(SDValue N, SelectionDAG &DAG,
31227                                 const X86Subtarget &Subtarget) {
31228   AtomicSDNode *AN = cast<AtomicSDNode>(N.getNode());
31229   SDValue Chain = N->getOperand(0);
31230   SDValue LHS = N->getOperand(1);
31231   SDValue RHS = N->getOperand(2);
31232   unsigned Opc = N->getOpcode();
31233   MVT VT = N->getSimpleValueType(0);
31234   SDLoc DL(N);
31235 
31236   // We can lower atomic_load_add into LXADD. However, any other atomicrmw op
31237   // can only be lowered when the result is unused.  They should have already
31238   // been transformed into a cmpxchg loop in AtomicExpand.
31239   if (N->hasAnyUseOfValue(0)) {
31240     // Handle (atomic_load_sub p, v) as (atomic_load_add p, -v), to be able to
31241     // select LXADD if LOCK_SUB can't be selected.
31242     // Handle (atomic_load_xor p, SignBit) as (atomic_load_add p, SignBit) so we
31243     // can use LXADD as opposed to cmpxchg.
31244     if (Opc == ISD::ATOMIC_LOAD_SUB ||
31245         (Opc == ISD::ATOMIC_LOAD_XOR && isMinSignedConstant(RHS))) {
31246       RHS = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), RHS);
31247       return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, DL, VT, Chain, LHS, RHS,
31248                            AN->getMemOperand());
31249     }
31250     assert(Opc == ISD::ATOMIC_LOAD_ADD &&
31251            "Used AtomicRMW ops other than Add should have been expanded!");
31252     return N;
31253   }
31254 
31255   // Specialized lowering for the canonical form of an idemptotent atomicrmw.
31256   // The core idea here is that since the memory location isn't actually
31257   // changing, all we need is a lowering for the *ordering* impacts of the
31258   // atomicrmw.  As such, we can chose a different operation and memory
31259   // location to minimize impact on other code.
31260   // The above holds unless the node is marked volatile in which
31261   // case it needs to be preserved according to the langref.
31262   if (Opc == ISD::ATOMIC_LOAD_OR && isNullConstant(RHS) && !AN->isVolatile()) {
31263     // On X86, the only ordering which actually requires an instruction is
31264     // seq_cst which isn't SingleThread, everything just needs to be preserved
31265     // during codegen and then dropped. Note that we expect (but don't assume),
31266     // that orderings other than seq_cst and acq_rel have been canonicalized to
31267     // a store or load.
31268     if (AN->getSuccessOrdering() == AtomicOrdering::SequentiallyConsistent &&
31269         AN->getSyncScopeID() == SyncScope::System) {
31270       // Prefer a locked operation against a stack location to minimize cache
31271       // traffic.  This assumes that stack locations are very likely to be
31272       // accessed only by the owning thread.
31273       SDValue NewChain = emitLockedStackOp(DAG, Subtarget, Chain, DL);
31274       assert(!N->hasAnyUseOfValue(0));
31275       // NOTE: The getUNDEF is needed to give something for the unused result 0.
31276       return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(),
31277                          DAG.getUNDEF(VT), NewChain);
31278     }
31279     // MEMBARRIER is a compiler barrier; it codegens to a no-op.
31280     SDValue NewChain = DAG.getNode(ISD::MEMBARRIER, DL, MVT::Other, Chain);
31281     assert(!N->hasAnyUseOfValue(0));
31282     // NOTE: The getUNDEF is needed to give something for the unused result 0.
31283     return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(),
31284                        DAG.getUNDEF(VT), NewChain);
31285   }
31286 
31287   SDValue LockOp = lowerAtomicArithWithLOCK(N, DAG, Subtarget);
31288   // RAUW the chain, but don't worry about the result, as it's unused.
31289   assert(!N->hasAnyUseOfValue(0));
31290   // NOTE: The getUNDEF is needed to give something for the unused result 0.
31291   return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(),
31292                      DAG.getUNDEF(VT), LockOp.getValue(1));
31293 }
31294 
LowerATOMIC_STORE(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)31295 static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG,
31296                                  const X86Subtarget &Subtarget) {
31297   auto *Node = cast<AtomicSDNode>(Op.getNode());
31298   SDLoc dl(Node);
31299   EVT VT = Node->getMemoryVT();
31300 
31301   bool IsSeqCst =
31302       Node->getSuccessOrdering() == AtomicOrdering::SequentiallyConsistent;
31303   bool IsTypeLegal = DAG.getTargetLoweringInfo().isTypeLegal(VT);
31304 
31305   // If this store is not sequentially consistent and the type is legal
31306   // we can just keep it.
31307   if (!IsSeqCst && IsTypeLegal)
31308     return Op;
31309 
31310   if (VT == MVT::i64 && !IsTypeLegal) {
31311     // For illegal i64 atomic_stores, we can try to use MOVQ or MOVLPS if SSE
31312     // is enabled.
31313     bool NoImplicitFloatOps =
31314         DAG.getMachineFunction().getFunction().hasFnAttribute(
31315             Attribute::NoImplicitFloat);
31316     if (!Subtarget.useSoftFloat() && !NoImplicitFloatOps) {
31317       SDValue Chain;
31318       if (Subtarget.hasSSE1()) {
31319         SDValue SclToVec =
31320             DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Node->getVal());
31321         MVT StVT = Subtarget.hasSSE2() ? MVT::v2i64 : MVT::v4f32;
31322         SclToVec = DAG.getBitcast(StVT, SclToVec);
31323         SDVTList Tys = DAG.getVTList(MVT::Other);
31324         SDValue Ops[] = {Node->getChain(), SclToVec, Node->getBasePtr()};
31325         Chain = DAG.getMemIntrinsicNode(X86ISD::VEXTRACT_STORE, dl, Tys, Ops,
31326                                         MVT::i64, Node->getMemOperand());
31327       } else if (Subtarget.hasX87()) {
31328         // First load this into an 80-bit X87 register using a stack temporary.
31329         // This will put the whole integer into the significand.
31330         SDValue StackPtr = DAG.CreateStackTemporary(MVT::i64);
31331         int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
31332         MachinePointerInfo MPI =
31333             MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
31334         Chain = DAG.getStore(Node->getChain(), dl, Node->getVal(), StackPtr,
31335                              MPI, MaybeAlign(), MachineMemOperand::MOStore);
31336         SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
31337         SDValue LdOps[] = {Chain, StackPtr};
31338         SDValue Value = DAG.getMemIntrinsicNode(
31339             X86ISD::FILD, dl, Tys, LdOps, MVT::i64, MPI,
31340             /*Align*/ std::nullopt, MachineMemOperand::MOLoad);
31341         Chain = Value.getValue(1);
31342 
31343         // Now use an FIST to do the atomic store.
31344         SDValue StoreOps[] = {Chain, Value, Node->getBasePtr()};
31345         Chain =
31346             DAG.getMemIntrinsicNode(X86ISD::FIST, dl, DAG.getVTList(MVT::Other),
31347                                     StoreOps, MVT::i64, Node->getMemOperand());
31348       }
31349 
31350       if (Chain) {
31351         // If this is a sequentially consistent store, also emit an appropriate
31352         // barrier.
31353         if (IsSeqCst)
31354           Chain = emitLockedStackOp(DAG, Subtarget, Chain, dl);
31355 
31356         return Chain;
31357       }
31358     }
31359   }
31360 
31361   // Convert seq_cst store -> xchg
31362   // Convert wide store -> swap (-> cmpxchg8b/cmpxchg16b)
31363   // FIXME: 16-byte ATOMIC_SWAP isn't actually hooked up at the moment.
31364   SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl, Node->getMemoryVT(),
31365                                Node->getOperand(0), Node->getOperand(2),
31366                                Node->getOperand(1), Node->getMemOperand());
31367   return Swap.getValue(1);
31368 }
31369 
LowerADDSUBO_CARRY(SDValue Op,SelectionDAG & DAG)31370 static SDValue LowerADDSUBO_CARRY(SDValue Op, SelectionDAG &DAG) {
31371   SDNode *N = Op.getNode();
31372   MVT VT = N->getSimpleValueType(0);
31373   unsigned Opc = Op.getOpcode();
31374 
31375   // Let legalize expand this if it isn't a legal type yet.
31376   if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
31377     return SDValue();
31378 
31379   SDVTList VTs = DAG.getVTList(VT, MVT::i32);
31380   SDLoc DL(N);
31381 
31382   // Set the carry flag.
31383   SDValue Carry = Op.getOperand(2);
31384   EVT CarryVT = Carry.getValueType();
31385   Carry = DAG.getNode(X86ISD::ADD, DL, DAG.getVTList(CarryVT, MVT::i32),
31386                       Carry, DAG.getAllOnesConstant(DL, CarryVT));
31387 
31388   bool IsAdd = Opc == ISD::UADDO_CARRY || Opc == ISD::SADDO_CARRY;
31389   SDValue Sum = DAG.getNode(IsAdd ? X86ISD::ADC : X86ISD::SBB, DL, VTs,
31390                             Op.getOperand(0), Op.getOperand(1),
31391                             Carry.getValue(1));
31392 
31393   bool IsSigned = Opc == ISD::SADDO_CARRY || Opc == ISD::SSUBO_CARRY;
31394   SDValue SetCC = getSETCC(IsSigned ? X86::COND_O : X86::COND_B,
31395                            Sum.getValue(1), DL, DAG);
31396   if (N->getValueType(1) == MVT::i1)
31397     SetCC = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC);
31398 
31399   return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
31400 }
31401 
LowerFSINCOS(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)31402 static SDValue LowerFSINCOS(SDValue Op, const X86Subtarget &Subtarget,
31403                             SelectionDAG &DAG) {
31404   assert(Subtarget.isTargetDarwin() && Subtarget.is64Bit());
31405 
31406   // For MacOSX, we want to call an alternative entry point: __sincos_stret,
31407   // which returns the values as { float, float } (in XMM0) or
31408   // { double, double } (which is returned in XMM0, XMM1).
31409   SDLoc dl(Op);
31410   SDValue Arg = Op.getOperand(0);
31411   EVT ArgVT = Arg.getValueType();
31412   Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
31413 
31414   TargetLowering::ArgListTy Args;
31415   TargetLowering::ArgListEntry Entry;
31416 
31417   Entry.Node = Arg;
31418   Entry.Ty = ArgTy;
31419   Entry.IsSExt = false;
31420   Entry.IsZExt = false;
31421   Args.push_back(Entry);
31422 
31423   bool isF64 = ArgVT == MVT::f64;
31424   // Only optimize x86_64 for now. i386 is a bit messy. For f32,
31425   // the small struct {f32, f32} is returned in (eax, edx). For f64,
31426   // the results are returned via SRet in memory.
31427   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
31428   RTLIB::Libcall LC = isF64 ? RTLIB::SINCOS_STRET_F64 : RTLIB::SINCOS_STRET_F32;
31429   const char *LibcallName = TLI.getLibcallName(LC);
31430   SDValue Callee =
31431       DAG.getExternalSymbol(LibcallName, TLI.getPointerTy(DAG.getDataLayout()));
31432 
31433   Type *RetTy = isF64 ? (Type *)StructType::get(ArgTy, ArgTy)
31434                       : (Type *)FixedVectorType::get(ArgTy, 4);
31435 
31436   TargetLowering::CallLoweringInfo CLI(DAG);
31437   CLI.setDebugLoc(dl)
31438       .setChain(DAG.getEntryNode())
31439       .setLibCallee(CallingConv::C, RetTy, Callee, std::move(Args));
31440 
31441   std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
31442 
31443   if (isF64)
31444     // Returned in xmm0 and xmm1.
31445     return CallResult.first;
31446 
31447   // Returned in bits 0:31 and 32:64 xmm0.
31448   SDValue SinVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
31449                                CallResult.first, DAG.getIntPtrConstant(0, dl));
31450   SDValue CosVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
31451                                CallResult.first, DAG.getIntPtrConstant(1, dl));
31452   SDVTList Tys = DAG.getVTList(ArgVT, ArgVT);
31453   return DAG.getNode(ISD::MERGE_VALUES, dl, Tys, SinVal, CosVal);
31454 }
31455 
31456 /// Widen a vector input to a vector of NVT.  The
31457 /// input vector must have the same element type as NVT.
ExtendToType(SDValue InOp,MVT NVT,SelectionDAG & DAG,bool FillWithZeroes=false)31458 static SDValue ExtendToType(SDValue InOp, MVT NVT, SelectionDAG &DAG,
31459                             bool FillWithZeroes = false) {
31460   // Check if InOp already has the right width.
31461   MVT InVT = InOp.getSimpleValueType();
31462   if (InVT == NVT)
31463     return InOp;
31464 
31465   if (InOp.isUndef())
31466     return DAG.getUNDEF(NVT);
31467 
31468   assert(InVT.getVectorElementType() == NVT.getVectorElementType() &&
31469          "input and widen element type must match");
31470 
31471   unsigned InNumElts = InVT.getVectorNumElements();
31472   unsigned WidenNumElts = NVT.getVectorNumElements();
31473   assert(WidenNumElts > InNumElts && WidenNumElts % InNumElts == 0 &&
31474          "Unexpected request for vector widening");
31475 
31476   SDLoc dl(InOp);
31477   if (InOp.getOpcode() == ISD::CONCAT_VECTORS &&
31478       InOp.getNumOperands() == 2) {
31479     SDValue N1 = InOp.getOperand(1);
31480     if ((ISD::isBuildVectorAllZeros(N1.getNode()) && FillWithZeroes) ||
31481         N1.isUndef()) {
31482       InOp = InOp.getOperand(0);
31483       InVT = InOp.getSimpleValueType();
31484       InNumElts = InVT.getVectorNumElements();
31485     }
31486   }
31487   if (ISD::isBuildVectorOfConstantSDNodes(InOp.getNode()) ||
31488       ISD::isBuildVectorOfConstantFPSDNodes(InOp.getNode())) {
31489     SmallVector<SDValue, 16> Ops;
31490     for (unsigned i = 0; i < InNumElts; ++i)
31491       Ops.push_back(InOp.getOperand(i));
31492 
31493     EVT EltVT = InOp.getOperand(0).getValueType();
31494 
31495     SDValue FillVal = FillWithZeroes ? DAG.getConstant(0, dl, EltVT) :
31496       DAG.getUNDEF(EltVT);
31497     for (unsigned i = 0; i < WidenNumElts - InNumElts; ++i)
31498       Ops.push_back(FillVal);
31499     return DAG.getBuildVector(NVT, dl, Ops);
31500   }
31501   SDValue FillVal = FillWithZeroes ? DAG.getConstant(0, dl, NVT) :
31502     DAG.getUNDEF(NVT);
31503   return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, NVT, FillVal,
31504                      InOp, DAG.getIntPtrConstant(0, dl));
31505 }
31506 
LowerMSCATTER(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)31507 static SDValue LowerMSCATTER(SDValue Op, const X86Subtarget &Subtarget,
31508                              SelectionDAG &DAG) {
31509   assert(Subtarget.hasAVX512() &&
31510          "MGATHER/MSCATTER are supported on AVX-512 arch only");
31511 
31512   MaskedScatterSDNode *N = cast<MaskedScatterSDNode>(Op.getNode());
31513   SDValue Src = N->getValue();
31514   MVT VT = Src.getSimpleValueType();
31515   assert(VT.getScalarSizeInBits() >= 32 && "Unsupported scatter op");
31516   SDLoc dl(Op);
31517 
31518   SDValue Scale = N->getScale();
31519   SDValue Index = N->getIndex();
31520   SDValue Mask = N->getMask();
31521   SDValue Chain = N->getChain();
31522   SDValue BasePtr = N->getBasePtr();
31523 
31524   if (VT == MVT::v2f32 || VT == MVT::v2i32) {
31525     assert(Mask.getValueType() == MVT::v2i1 && "Unexpected mask type");
31526     // If the index is v2i64 and we have VLX we can use xmm for data and index.
31527     if (Index.getValueType() == MVT::v2i64 && Subtarget.hasVLX()) {
31528       const TargetLowering &TLI = DAG.getTargetLoweringInfo();
31529       EVT WideVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
31530       Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT, Src, DAG.getUNDEF(VT));
31531       SDVTList VTs = DAG.getVTList(MVT::Other);
31532       SDValue Ops[] = {Chain, Src, Mask, BasePtr, Index, Scale};
31533       return DAG.getMemIntrinsicNode(X86ISD::MSCATTER, dl, VTs, Ops,
31534                                      N->getMemoryVT(), N->getMemOperand());
31535     }
31536     return SDValue();
31537   }
31538 
31539   MVT IndexVT = Index.getSimpleValueType();
31540 
31541   // If the index is v2i32, we're being called by type legalization and we
31542   // should just let the default handling take care of it.
31543   if (IndexVT == MVT::v2i32)
31544     return SDValue();
31545 
31546   // If we don't have VLX and neither the passthru or index is 512-bits, we
31547   // need to widen until one is.
31548   if (!Subtarget.hasVLX() && !VT.is512BitVector() &&
31549       !Index.getSimpleValueType().is512BitVector()) {
31550     // Determine how much we need to widen by to get a 512-bit type.
31551     unsigned Factor = std::min(512/VT.getSizeInBits(),
31552                                512/IndexVT.getSizeInBits());
31553     unsigned NumElts = VT.getVectorNumElements() * Factor;
31554 
31555     VT = MVT::getVectorVT(VT.getVectorElementType(), NumElts);
31556     IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(), NumElts);
31557     MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
31558 
31559     Src = ExtendToType(Src, VT, DAG);
31560     Index = ExtendToType(Index, IndexVT, DAG);
31561     Mask = ExtendToType(Mask, MaskVT, DAG, true);
31562   }
31563 
31564   SDVTList VTs = DAG.getVTList(MVT::Other);
31565   SDValue Ops[] = {Chain, Src, Mask, BasePtr, Index, Scale};
31566   return DAG.getMemIntrinsicNode(X86ISD::MSCATTER, dl, VTs, Ops,
31567                                  N->getMemoryVT(), N->getMemOperand());
31568 }
31569 
LowerMLOAD(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)31570 static SDValue LowerMLOAD(SDValue Op, const X86Subtarget &Subtarget,
31571                           SelectionDAG &DAG) {
31572 
31573   MaskedLoadSDNode *N = cast<MaskedLoadSDNode>(Op.getNode());
31574   MVT VT = Op.getSimpleValueType();
31575   MVT ScalarVT = VT.getScalarType();
31576   SDValue Mask = N->getMask();
31577   MVT MaskVT = Mask.getSimpleValueType();
31578   SDValue PassThru = N->getPassThru();
31579   SDLoc dl(Op);
31580 
31581   // Handle AVX masked loads which don't support passthru other than 0.
31582   if (MaskVT.getVectorElementType() != MVT::i1) {
31583     // We also allow undef in the isel pattern.
31584     if (PassThru.isUndef() || ISD::isBuildVectorAllZeros(PassThru.getNode()))
31585       return Op;
31586 
31587     SDValue NewLoad = DAG.getMaskedLoad(
31588         VT, dl, N->getChain(), N->getBasePtr(), N->getOffset(), Mask,
31589         getZeroVector(VT, Subtarget, DAG, dl), N->getMemoryVT(),
31590         N->getMemOperand(), N->getAddressingMode(), N->getExtensionType(),
31591         N->isExpandingLoad());
31592     // Emit a blend.
31593     SDValue Select = DAG.getNode(ISD::VSELECT, dl, VT, Mask, NewLoad, PassThru);
31594     return DAG.getMergeValues({ Select, NewLoad.getValue(1) }, dl);
31595   }
31596 
31597   assert((!N->isExpandingLoad() || Subtarget.hasAVX512()) &&
31598          "Expanding masked load is supported on AVX-512 target only!");
31599 
31600   assert((!N->isExpandingLoad() || ScalarVT.getSizeInBits() >= 32) &&
31601          "Expanding masked load is supported for 32 and 64-bit types only!");
31602 
31603   assert(Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&
31604          "Cannot lower masked load op.");
31605 
31606   assert((ScalarVT.getSizeInBits() >= 32 ||
31607           (Subtarget.hasBWI() &&
31608               (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) &&
31609          "Unsupported masked load op.");
31610 
31611   // This operation is legal for targets with VLX, but without
31612   // VLX the vector should be widened to 512 bit
31613   unsigned NumEltsInWideVec = 512 / VT.getScalarSizeInBits();
31614   MVT WideDataVT = MVT::getVectorVT(ScalarVT, NumEltsInWideVec);
31615   PassThru = ExtendToType(PassThru, WideDataVT, DAG);
31616 
31617   // Mask element has to be i1.
31618   assert(Mask.getSimpleValueType().getScalarType() == MVT::i1 &&
31619          "Unexpected mask type");
31620 
31621   MVT WideMaskVT = MVT::getVectorVT(MVT::i1, NumEltsInWideVec);
31622 
31623   Mask = ExtendToType(Mask, WideMaskVT, DAG, true);
31624   SDValue NewLoad = DAG.getMaskedLoad(
31625       WideDataVT, dl, N->getChain(), N->getBasePtr(), N->getOffset(), Mask,
31626       PassThru, N->getMemoryVT(), N->getMemOperand(), N->getAddressingMode(),
31627       N->getExtensionType(), N->isExpandingLoad());
31628 
31629   SDValue Extract =
31630       DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, NewLoad.getValue(0),
31631                   DAG.getIntPtrConstant(0, dl));
31632   SDValue RetOps[] = {Extract, NewLoad.getValue(1)};
31633   return DAG.getMergeValues(RetOps, dl);
31634 }
31635 
LowerMSTORE(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)31636 static SDValue LowerMSTORE(SDValue Op, const X86Subtarget &Subtarget,
31637                            SelectionDAG &DAG) {
31638   MaskedStoreSDNode *N = cast<MaskedStoreSDNode>(Op.getNode());
31639   SDValue DataToStore = N->getValue();
31640   MVT VT = DataToStore.getSimpleValueType();
31641   MVT ScalarVT = VT.getScalarType();
31642   SDValue Mask = N->getMask();
31643   SDLoc dl(Op);
31644 
31645   assert((!N->isCompressingStore() || Subtarget.hasAVX512()) &&
31646          "Expanding masked load is supported on AVX-512 target only!");
31647 
31648   assert((!N->isCompressingStore() || ScalarVT.getSizeInBits() >= 32) &&
31649          "Expanding masked load is supported for 32 and 64-bit types only!");
31650 
31651   assert(Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&
31652          "Cannot lower masked store op.");
31653 
31654   assert((ScalarVT.getSizeInBits() >= 32 ||
31655           (Subtarget.hasBWI() &&
31656               (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) &&
31657           "Unsupported masked store op.");
31658 
31659   // This operation is legal for targets with VLX, but without
31660   // VLX the vector should be widened to 512 bit
31661   unsigned NumEltsInWideVec = 512/VT.getScalarSizeInBits();
31662   MVT WideDataVT = MVT::getVectorVT(ScalarVT, NumEltsInWideVec);
31663 
31664   // Mask element has to be i1.
31665   assert(Mask.getSimpleValueType().getScalarType() == MVT::i1 &&
31666          "Unexpected mask type");
31667 
31668   MVT WideMaskVT = MVT::getVectorVT(MVT::i1, NumEltsInWideVec);
31669 
31670   DataToStore = ExtendToType(DataToStore, WideDataVT, DAG);
31671   Mask = ExtendToType(Mask, WideMaskVT, DAG, true);
31672   return DAG.getMaskedStore(N->getChain(), dl, DataToStore, N->getBasePtr(),
31673                             N->getOffset(), Mask, N->getMemoryVT(),
31674                             N->getMemOperand(), N->getAddressingMode(),
31675                             N->isTruncatingStore(), N->isCompressingStore());
31676 }
31677 
LowerMGATHER(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)31678 static SDValue LowerMGATHER(SDValue Op, const X86Subtarget &Subtarget,
31679                             SelectionDAG &DAG) {
31680   assert(Subtarget.hasAVX2() &&
31681          "MGATHER/MSCATTER are supported on AVX-512/AVX-2 arch only");
31682 
31683   MaskedGatherSDNode *N = cast<MaskedGatherSDNode>(Op.getNode());
31684   SDLoc dl(Op);
31685   MVT VT = Op.getSimpleValueType();
31686   SDValue Index = N->getIndex();
31687   SDValue Mask = N->getMask();
31688   SDValue PassThru = N->getPassThru();
31689   MVT IndexVT = Index.getSimpleValueType();
31690 
31691   assert(VT.getScalarSizeInBits() >= 32 && "Unsupported gather op");
31692 
31693   // If the index is v2i32, we're being called by type legalization.
31694   if (IndexVT == MVT::v2i32)
31695     return SDValue();
31696 
31697   // If we don't have VLX and neither the passthru or index is 512-bits, we
31698   // need to widen until one is.
31699   MVT OrigVT = VT;
31700   if (Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&
31701       !IndexVT.is512BitVector()) {
31702     // Determine how much we need to widen by to get a 512-bit type.
31703     unsigned Factor = std::min(512/VT.getSizeInBits(),
31704                                512/IndexVT.getSizeInBits());
31705 
31706     unsigned NumElts = VT.getVectorNumElements() * Factor;
31707 
31708     VT = MVT::getVectorVT(VT.getVectorElementType(), NumElts);
31709     IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(), NumElts);
31710     MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
31711 
31712     PassThru = ExtendToType(PassThru, VT, DAG);
31713     Index = ExtendToType(Index, IndexVT, DAG);
31714     Mask = ExtendToType(Mask, MaskVT, DAG, true);
31715   }
31716 
31717   // Break dependency on the data register.
31718   if (PassThru.isUndef())
31719     PassThru = getZeroVector(VT, Subtarget, DAG, dl);
31720 
31721   SDValue Ops[] = { N->getChain(), PassThru, Mask, N->getBasePtr(), Index,
31722                     N->getScale() };
31723   SDValue NewGather = DAG.getMemIntrinsicNode(
31724       X86ISD::MGATHER, dl, DAG.getVTList(VT, MVT::Other), Ops, N->getMemoryVT(),
31725       N->getMemOperand());
31726   SDValue Extract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OrigVT,
31727                                 NewGather, DAG.getIntPtrConstant(0, dl));
31728   return DAG.getMergeValues({Extract, NewGather.getValue(1)}, dl);
31729 }
31730 
LowerADDRSPACECAST(SDValue Op,SelectionDAG & DAG)31731 static SDValue LowerADDRSPACECAST(SDValue Op, SelectionDAG &DAG) {
31732   SDLoc dl(Op);
31733   SDValue Src = Op.getOperand(0);
31734   MVT DstVT = Op.getSimpleValueType();
31735 
31736   AddrSpaceCastSDNode *N = cast<AddrSpaceCastSDNode>(Op.getNode());
31737   unsigned SrcAS = N->getSrcAddressSpace();
31738 
31739   assert(SrcAS != N->getDestAddressSpace() &&
31740          "addrspacecast must be between different address spaces");
31741 
31742   if (SrcAS == X86AS::PTR32_UPTR && DstVT == MVT::i64) {
31743     Op = DAG.getNode(ISD::ZERO_EXTEND, dl, DstVT, Src);
31744   } else if (DstVT == MVT::i64) {
31745     Op = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Src);
31746   } else if (DstVT == MVT::i32) {
31747     Op = DAG.getNode(ISD::TRUNCATE, dl, DstVT, Src);
31748   } else {
31749     report_fatal_error("Bad address space in addrspacecast");
31750   }
31751   return Op;
31752 }
31753 
LowerGC_TRANSITION(SDValue Op,SelectionDAG & DAG) const31754 SDValue X86TargetLowering::LowerGC_TRANSITION(SDValue Op,
31755                                               SelectionDAG &DAG) const {
31756   // TODO: Eventually, the lowering of these nodes should be informed by or
31757   // deferred to the GC strategy for the function in which they appear. For
31758   // now, however, they must be lowered to something. Since they are logically
31759   // no-ops in the case of a null GC strategy (or a GC strategy which does not
31760   // require special handling for these nodes), lower them as literal NOOPs for
31761   // the time being.
31762   SmallVector<SDValue, 2> Ops;
31763   Ops.push_back(Op.getOperand(0));
31764   if (Op->getGluedNode())
31765     Ops.push_back(Op->getOperand(Op->getNumOperands() - 1));
31766 
31767   SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
31768   return SDValue(DAG.getMachineNode(X86::NOOP, SDLoc(Op), VTs, Ops), 0);
31769 }
31770 
31771 // Custom split CVTPS2PH with wide types.
LowerCVTPS2PH(SDValue Op,SelectionDAG & DAG)31772 static SDValue LowerCVTPS2PH(SDValue Op, SelectionDAG &DAG) {
31773   SDLoc dl(Op);
31774   EVT VT = Op.getValueType();
31775   SDValue Lo, Hi;
31776   std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0);
31777   EVT LoVT, HiVT;
31778   std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
31779   SDValue RC = Op.getOperand(1);
31780   Lo = DAG.getNode(X86ISD::CVTPS2PH, dl, LoVT, Lo, RC);
31781   Hi = DAG.getNode(X86ISD::CVTPS2PH, dl, HiVT, Hi, RC);
31782   return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
31783 }
31784 
LowerPREFETCH(SDValue Op,const X86Subtarget & Subtarget,SelectionDAG & DAG)31785 static SDValue LowerPREFETCH(SDValue Op, const X86Subtarget &Subtarget,
31786                              SelectionDAG &DAG) {
31787   unsigned IsData = Op.getConstantOperandVal(4);
31788 
31789   // We don't support non-data prefetch without PREFETCHI.
31790   // Just preserve the chain.
31791   if (!IsData && !Subtarget.hasPREFETCHI())
31792     return Op.getOperand(0);
31793 
31794   return Op;
31795 }
31796 
getInstrStrFromOpNo(const SmallVectorImpl<StringRef> & AsmStrs,unsigned OpNo)31797 static StringRef getInstrStrFromOpNo(const SmallVectorImpl<StringRef> &AsmStrs,
31798                                      unsigned OpNo) {
31799   const APInt Operand(32, OpNo);
31800   std::string OpNoStr = llvm::toString(Operand, 10, false);
31801   std::string Str(" $");
31802 
31803   std::string OpNoStr1(Str + OpNoStr);             // e.g. " $1" (OpNo=1)
31804   std::string OpNoStr2(Str + "{" + OpNoStr + ":"); // With modifier, e.g. ${1:P}
31805 
31806   auto I = StringRef::npos;
31807   for (auto &AsmStr : AsmStrs) {
31808     // Match the OpNo string. We should match exactly to exclude match
31809     // sub-string, e.g. "$12" contain "$1"
31810     if (AsmStr.ends_with(OpNoStr1))
31811       I = AsmStr.size() - OpNoStr1.size();
31812 
31813     // Get the index of operand in AsmStr.
31814     if (I == StringRef::npos)
31815       I = AsmStr.find(OpNoStr1 + ",");
31816     if (I == StringRef::npos)
31817       I = AsmStr.find(OpNoStr2);
31818 
31819     if (I == StringRef::npos)
31820       continue;
31821 
31822     assert(I > 0 && "Unexpected inline asm string!");
31823     // Remove the operand string and label (if exsit).
31824     // For example:
31825     // ".L__MSASMLABEL_.${:uid}__l:call dword ptr ${0:P}"
31826     // ==>
31827     // ".L__MSASMLABEL_.${:uid}__l:call dword ptr "
31828     // ==>
31829     // "call dword ptr "
31830     auto TmpStr = AsmStr.substr(0, I);
31831     I = TmpStr.rfind(':');
31832     if (I != StringRef::npos)
31833       TmpStr = TmpStr.substr(I + 1);
31834     return TmpStr.take_while(llvm::isAlpha);
31835   }
31836 
31837   return StringRef();
31838 }
31839 
isInlineAsmTargetBranch(const SmallVectorImpl<StringRef> & AsmStrs,unsigned OpNo) const31840 bool X86TargetLowering::isInlineAsmTargetBranch(
31841     const SmallVectorImpl<StringRef> &AsmStrs, unsigned OpNo) const {
31842   // In a __asm block, __asm inst foo where inst is CALL or JMP should be
31843   // changed from indirect TargetLowering::C_Memory to direct
31844   // TargetLowering::C_Address.
31845   // We don't need to special case LOOP* and Jcc, which cannot target a memory
31846   // location.
31847   StringRef Inst = getInstrStrFromOpNo(AsmStrs, OpNo);
31848   return Inst.equals_insensitive("call") || Inst.equals_insensitive("jmp");
31849 }
31850 
31851 /// Provide custom lowering hooks for some operations.
LowerOperation(SDValue Op,SelectionDAG & DAG) const31852 SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
31853   switch (Op.getOpcode()) {
31854   default: llvm_unreachable("Should not custom lower this!");
31855   case ISD::ATOMIC_FENCE:       return LowerATOMIC_FENCE(Op, Subtarget, DAG);
31856   case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
31857     return LowerCMP_SWAP(Op, Subtarget, DAG);
31858   case ISD::CTPOP:              return LowerCTPOP(Op, Subtarget, DAG);
31859   case ISD::ATOMIC_LOAD_ADD:
31860   case ISD::ATOMIC_LOAD_SUB:
31861   case ISD::ATOMIC_LOAD_OR:
31862   case ISD::ATOMIC_LOAD_XOR:
31863   case ISD::ATOMIC_LOAD_AND:    return lowerAtomicArith(Op, DAG, Subtarget);
31864   case ISD::ATOMIC_STORE:       return LowerATOMIC_STORE(Op, DAG, Subtarget);
31865   case ISD::BITREVERSE:         return LowerBITREVERSE(Op, Subtarget, DAG);
31866   case ISD::PARITY:             return LowerPARITY(Op, Subtarget, DAG);
31867   case ISD::BUILD_VECTOR:       return LowerBUILD_VECTOR(Op, DAG);
31868   case ISD::CONCAT_VECTORS:     return LowerCONCAT_VECTORS(Op, Subtarget, DAG);
31869   case ISD::VECTOR_SHUFFLE:     return lowerVECTOR_SHUFFLE(Op, Subtarget, DAG);
31870   case ISD::VSELECT:            return LowerVSELECT(Op, DAG);
31871   case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
31872   case ISD::INSERT_VECTOR_ELT:  return LowerINSERT_VECTOR_ELT(Op, DAG);
31873   case ISD::INSERT_SUBVECTOR:   return LowerINSERT_SUBVECTOR(Op, Subtarget,DAG);
31874   case ISD::EXTRACT_SUBVECTOR:  return LowerEXTRACT_SUBVECTOR(Op,Subtarget,DAG);
31875   case ISD::SCALAR_TO_VECTOR:   return LowerSCALAR_TO_VECTOR(Op, Subtarget,DAG);
31876   case ISD::ConstantPool:       return LowerConstantPool(Op, DAG);
31877   case ISD::GlobalAddress:      return LowerGlobalAddress(Op, DAG);
31878   case ISD::GlobalTLSAddress:   return LowerGlobalTLSAddress(Op, DAG);
31879   case ISD::ExternalSymbol:     return LowerExternalSymbol(Op, DAG);
31880   case ISD::BlockAddress:       return LowerBlockAddress(Op, DAG);
31881   case ISD::SHL_PARTS:
31882   case ISD::SRA_PARTS:
31883   case ISD::SRL_PARTS:          return LowerShiftParts(Op, DAG);
31884   case ISD::FSHL:
31885   case ISD::FSHR:               return LowerFunnelShift(Op, Subtarget, DAG);
31886   case ISD::STRICT_SINT_TO_FP:
31887   case ISD::SINT_TO_FP:         return LowerSINT_TO_FP(Op, DAG);
31888   case ISD::STRICT_UINT_TO_FP:
31889   case ISD::UINT_TO_FP:         return LowerUINT_TO_FP(Op, DAG);
31890   case ISD::TRUNCATE:           return LowerTRUNCATE(Op, DAG);
31891   case ISD::ZERO_EXTEND:        return LowerZERO_EXTEND(Op, Subtarget, DAG);
31892   case ISD::SIGN_EXTEND:        return LowerSIGN_EXTEND(Op, Subtarget, DAG);
31893   case ISD::ANY_EXTEND:         return LowerANY_EXTEND(Op, Subtarget, DAG);
31894   case ISD::ZERO_EXTEND_VECTOR_INREG:
31895   case ISD::SIGN_EXTEND_VECTOR_INREG:
31896     return LowerEXTEND_VECTOR_INREG(Op, Subtarget, DAG);
31897   case ISD::FP_TO_SINT:
31898   case ISD::STRICT_FP_TO_SINT:
31899   case ISD::FP_TO_UINT:
31900   case ISD::STRICT_FP_TO_UINT:  return LowerFP_TO_INT(Op, DAG);
31901   case ISD::FP_TO_SINT_SAT:
31902   case ISD::FP_TO_UINT_SAT:     return LowerFP_TO_INT_SAT(Op, DAG);
31903   case ISD::FP_EXTEND:
31904   case ISD::STRICT_FP_EXTEND:   return LowerFP_EXTEND(Op, DAG);
31905   case ISD::FP_ROUND:
31906   case ISD::STRICT_FP_ROUND:    return LowerFP_ROUND(Op, DAG);
31907   case ISD::FP16_TO_FP:
31908   case ISD::STRICT_FP16_TO_FP:  return LowerFP16_TO_FP(Op, DAG);
31909   case ISD::FP_TO_FP16:
31910   case ISD::STRICT_FP_TO_FP16:  return LowerFP_TO_FP16(Op, DAG);
31911   case ISD::FP_TO_BF16:         return LowerFP_TO_BF16(Op, DAG);
31912   case ISD::LOAD:               return LowerLoad(Op, Subtarget, DAG);
31913   case ISD::STORE:              return LowerStore(Op, Subtarget, DAG);
31914   case ISD::FADD:
31915   case ISD::FSUB:               return lowerFaddFsub(Op, DAG);
31916   case ISD::FROUND:             return LowerFROUND(Op, DAG);
31917   case ISD::FABS:
31918   case ISD::FNEG:               return LowerFABSorFNEG(Op, DAG);
31919   case ISD::FCOPYSIGN:          return LowerFCOPYSIGN(Op, DAG);
31920   case ISD::FGETSIGN:           return LowerFGETSIGN(Op, DAG);
31921   case ISD::LRINT:
31922   case ISD::LLRINT:             return LowerLRINT_LLRINT(Op, DAG);
31923   case ISD::SETCC:
31924   case ISD::STRICT_FSETCC:
31925   case ISD::STRICT_FSETCCS:     return LowerSETCC(Op, DAG);
31926   case ISD::SETCCCARRY:         return LowerSETCCCARRY(Op, DAG);
31927   case ISD::SELECT:             return LowerSELECT(Op, DAG);
31928   case ISD::BRCOND:             return LowerBRCOND(Op, DAG);
31929   case ISD::JumpTable:          return LowerJumpTable(Op, DAG);
31930   case ISD::VASTART:            return LowerVASTART(Op, DAG);
31931   case ISD::VAARG:              return LowerVAARG(Op, DAG);
31932   case ISD::VACOPY:             return LowerVACOPY(Op, Subtarget, DAG);
31933   case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
31934   case ISD::INTRINSIC_VOID:
31935   case ISD::INTRINSIC_W_CHAIN:  return LowerINTRINSIC_W_CHAIN(Op, Subtarget, DAG);
31936   case ISD::RETURNADDR:         return LowerRETURNADDR(Op, DAG);
31937   case ISD::ADDROFRETURNADDR:   return LowerADDROFRETURNADDR(Op, DAG);
31938   case ISD::FRAMEADDR:          return LowerFRAMEADDR(Op, DAG);
31939   case ISD::FRAME_TO_ARGS_OFFSET:
31940                                 return LowerFRAME_TO_ARGS_OFFSET(Op, DAG);
31941   case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
31942   case ISD::EH_RETURN:          return LowerEH_RETURN(Op, DAG);
31943   case ISD::EH_SJLJ_SETJMP:     return lowerEH_SJLJ_SETJMP(Op, DAG);
31944   case ISD::EH_SJLJ_LONGJMP:    return lowerEH_SJLJ_LONGJMP(Op, DAG);
31945   case ISD::EH_SJLJ_SETUP_DISPATCH:
31946     return lowerEH_SJLJ_SETUP_DISPATCH(Op, DAG);
31947   case ISD::INIT_TRAMPOLINE:    return LowerINIT_TRAMPOLINE(Op, DAG);
31948   case ISD::ADJUST_TRAMPOLINE:  return LowerADJUST_TRAMPOLINE(Op, DAG);
31949   case ISD::GET_ROUNDING:       return LowerGET_ROUNDING(Op, DAG);
31950   case ISD::SET_ROUNDING:       return LowerSET_ROUNDING(Op, DAG);
31951   case ISD::GET_FPENV_MEM:      return LowerGET_FPENV_MEM(Op, DAG);
31952   case ISD::SET_FPENV_MEM:      return LowerSET_FPENV_MEM(Op, DAG);
31953   case ISD::RESET_FPENV:        return LowerRESET_FPENV(Op, DAG);
31954   case ISD::CTLZ:
31955   case ISD::CTLZ_ZERO_UNDEF:    return LowerCTLZ(Op, Subtarget, DAG);
31956   case ISD::CTTZ:
31957   case ISD::CTTZ_ZERO_UNDEF:    return LowerCTTZ(Op, Subtarget, DAG);
31958   case ISD::MUL:                return LowerMUL(Op, Subtarget, DAG);
31959   case ISD::MULHS:
31960   case ISD::MULHU:              return LowerMULH(Op, Subtarget, DAG);
31961   case ISD::ROTL:
31962   case ISD::ROTR:               return LowerRotate(Op, Subtarget, DAG);
31963   case ISD::SRA:
31964   case ISD::SRL:
31965   case ISD::SHL:                return LowerShift(Op, Subtarget, DAG);
31966   case ISD::SADDO:
31967   case ISD::UADDO:
31968   case ISD::SSUBO:
31969   case ISD::USUBO:              return LowerXALUO(Op, DAG);
31970   case ISD::SMULO:
31971   case ISD::UMULO:              return LowerMULO(Op, Subtarget, DAG);
31972   case ISD::READCYCLECOUNTER:   return LowerREADCYCLECOUNTER(Op, Subtarget,DAG);
31973   case ISD::BITCAST:            return LowerBITCAST(Op, Subtarget, DAG);
31974   case ISD::SADDO_CARRY:
31975   case ISD::SSUBO_CARRY:
31976   case ISD::UADDO_CARRY:
31977   case ISD::USUBO_CARRY:        return LowerADDSUBO_CARRY(Op, DAG);
31978   case ISD::ADD:
31979   case ISD::SUB:                return lowerAddSub(Op, DAG, Subtarget);
31980   case ISD::UADDSAT:
31981   case ISD::SADDSAT:
31982   case ISD::USUBSAT:
31983   case ISD::SSUBSAT:            return LowerADDSAT_SUBSAT(Op, DAG, Subtarget);
31984   case ISD::SMAX:
31985   case ISD::SMIN:
31986   case ISD::UMAX:
31987   case ISD::UMIN:               return LowerMINMAX(Op, Subtarget, DAG);
31988   case ISD::FMINIMUM:
31989   case ISD::FMAXIMUM:
31990     return LowerFMINIMUM_FMAXIMUM(Op, Subtarget, DAG);
31991   case ISD::ABS:                return LowerABS(Op, Subtarget, DAG);
31992   case ISD::ABDS:
31993   case ISD::ABDU:               return LowerABD(Op, Subtarget, DAG);
31994   case ISD::AVGCEILU:           return LowerAVG(Op, Subtarget, DAG);
31995   case ISD::FSINCOS:            return LowerFSINCOS(Op, Subtarget, DAG);
31996   case ISD::MLOAD:              return LowerMLOAD(Op, Subtarget, DAG);
31997   case ISD::MSTORE:             return LowerMSTORE(Op, Subtarget, DAG);
31998   case ISD::MGATHER:            return LowerMGATHER(Op, Subtarget, DAG);
31999   case ISD::MSCATTER:           return LowerMSCATTER(Op, Subtarget, DAG);
32000   case ISD::GC_TRANSITION_START:
32001   case ISD::GC_TRANSITION_END:  return LowerGC_TRANSITION(Op, DAG);
32002   case ISD::ADDRSPACECAST:      return LowerADDRSPACECAST(Op, DAG);
32003   case X86ISD::CVTPS2PH:        return LowerCVTPS2PH(Op, DAG);
32004   case ISD::PREFETCH:           return LowerPREFETCH(Op, Subtarget, DAG);
32005   }
32006 }
32007 
32008 /// Replace a node with an illegal result type with a new node built out of
32009 /// custom code.
ReplaceNodeResults(SDNode * N,SmallVectorImpl<SDValue> & Results,SelectionDAG & DAG) const32010 void X86TargetLowering::ReplaceNodeResults(SDNode *N,
32011                                            SmallVectorImpl<SDValue>&Results,
32012                                            SelectionDAG &DAG) const {
32013   SDLoc dl(N);
32014   switch (N->getOpcode()) {
32015   default:
32016 #ifndef NDEBUG
32017     dbgs() << "ReplaceNodeResults: ";
32018     N->dump(&DAG);
32019 #endif
32020     llvm_unreachable("Do not know how to custom type legalize this operation!");
32021   case X86ISD::CVTPH2PS: {
32022     EVT VT = N->getValueType(0);
32023     SDValue Lo, Hi;
32024     std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0);
32025     EVT LoVT, HiVT;
32026     std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
32027     Lo = DAG.getNode(X86ISD::CVTPH2PS, dl, LoVT, Lo);
32028     Hi = DAG.getNode(X86ISD::CVTPH2PS, dl, HiVT, Hi);
32029     SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
32030     Results.push_back(Res);
32031     return;
32032   }
32033   case X86ISD::STRICT_CVTPH2PS: {
32034     EVT VT = N->getValueType(0);
32035     SDValue Lo, Hi;
32036     std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 1);
32037     EVT LoVT, HiVT;
32038     std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
32039     Lo = DAG.getNode(X86ISD::STRICT_CVTPH2PS, dl, {LoVT, MVT::Other},
32040                      {N->getOperand(0), Lo});
32041     Hi = DAG.getNode(X86ISD::STRICT_CVTPH2PS, dl, {HiVT, MVT::Other},
32042                      {N->getOperand(0), Hi});
32043     SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
32044                                 Lo.getValue(1), Hi.getValue(1));
32045     SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
32046     Results.push_back(Res);
32047     Results.push_back(Chain);
32048     return;
32049   }
32050   case X86ISD::CVTPS2PH:
32051     Results.push_back(LowerCVTPS2PH(SDValue(N, 0), DAG));
32052     return;
32053   case ISD::CTPOP: {
32054     assert(N->getValueType(0) == MVT::i64 && "Unexpected VT!");
32055     // Use a v2i64 if possible.
32056     bool NoImplicitFloatOps =
32057         DAG.getMachineFunction().getFunction().hasFnAttribute(
32058             Attribute::NoImplicitFloat);
32059     if (isTypeLegal(MVT::v2i64) && !NoImplicitFloatOps) {
32060       SDValue Wide =
32061           DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, N->getOperand(0));
32062       Wide = DAG.getNode(ISD::CTPOP, dl, MVT::v2i64, Wide);
32063       // Bit count should fit in 32-bits, extract it as that and then zero
32064       // extend to i64. Otherwise we end up extracting bits 63:32 separately.
32065       Wide = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Wide);
32066       Wide = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, Wide,
32067                          DAG.getIntPtrConstant(0, dl));
32068       Wide = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Wide);
32069       Results.push_back(Wide);
32070     }
32071     return;
32072   }
32073   case ISD::MUL: {
32074     EVT VT = N->getValueType(0);
32075     assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
32076            VT.getVectorElementType() == MVT::i8 && "Unexpected VT!");
32077     // Pre-promote these to vXi16 to avoid op legalization thinking all 16
32078     // elements are needed.
32079     MVT MulVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements());
32080     SDValue Op0 = DAG.getNode(ISD::ANY_EXTEND, dl, MulVT, N->getOperand(0));
32081     SDValue Op1 = DAG.getNode(ISD::ANY_EXTEND, dl, MulVT, N->getOperand(1));
32082     SDValue Res = DAG.getNode(ISD::MUL, dl, MulVT, Op0, Op1);
32083     Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
32084     unsigned NumConcats = 16 / VT.getVectorNumElements();
32085     SmallVector<SDValue, 8> ConcatOps(NumConcats, DAG.getUNDEF(VT));
32086     ConcatOps[0] = Res;
32087     Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i8, ConcatOps);
32088     Results.push_back(Res);
32089     return;
32090   }
32091   case ISD::SMULO:
32092   case ISD::UMULO: {
32093     EVT VT = N->getValueType(0);
32094     assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
32095            VT == MVT::v2i32 && "Unexpected VT!");
32096     bool IsSigned = N->getOpcode() == ISD::SMULO;
32097     unsigned ExtOpc = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
32098     SDValue Op0 = DAG.getNode(ExtOpc, dl, MVT::v2i64, N->getOperand(0));
32099     SDValue Op1 = DAG.getNode(ExtOpc, dl, MVT::v2i64, N->getOperand(1));
32100     SDValue Res = DAG.getNode(ISD::MUL, dl, MVT::v2i64, Op0, Op1);
32101     // Extract the high 32 bits from each result using PSHUFD.
32102     // TODO: Could use SRL+TRUNCATE but that doesn't become a PSHUFD.
32103     SDValue Hi = DAG.getBitcast(MVT::v4i32, Res);
32104     Hi = DAG.getVectorShuffle(MVT::v4i32, dl, Hi, Hi, {1, 3, -1, -1});
32105     Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Hi,
32106                      DAG.getIntPtrConstant(0, dl));
32107 
32108     // Truncate the low bits of the result. This will become PSHUFD.
32109     Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
32110 
32111     SDValue HiCmp;
32112     if (IsSigned) {
32113       // SMULO overflows if the high bits don't match the sign of the low.
32114       HiCmp = DAG.getNode(ISD::SRA, dl, VT, Res, DAG.getConstant(31, dl, VT));
32115     } else {
32116       // UMULO overflows if the high bits are non-zero.
32117       HiCmp = DAG.getConstant(0, dl, VT);
32118     }
32119     SDValue Ovf = DAG.getSetCC(dl, N->getValueType(1), Hi, HiCmp, ISD::SETNE);
32120 
32121     // Widen the result with by padding with undef.
32122     Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Res,
32123                       DAG.getUNDEF(VT));
32124     Results.push_back(Res);
32125     Results.push_back(Ovf);
32126     return;
32127   }
32128   case X86ISD::VPMADDWD: {
32129     // Legalize types for X86ISD::VPMADDWD by widening.
32130     assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
32131 
32132     EVT VT = N->getValueType(0);
32133     EVT InVT = N->getOperand(0).getValueType();
32134     assert(VT.getSizeInBits() < 128 && 128 % VT.getSizeInBits() == 0 &&
32135            "Expected a VT that divides into 128 bits.");
32136     assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
32137            "Unexpected type action!");
32138     unsigned NumConcat = 128 / InVT.getSizeInBits();
32139 
32140     EVT InWideVT = EVT::getVectorVT(*DAG.getContext(),
32141                                     InVT.getVectorElementType(),
32142                                     NumConcat * InVT.getVectorNumElements());
32143     EVT WideVT = EVT::getVectorVT(*DAG.getContext(),
32144                                   VT.getVectorElementType(),
32145                                   NumConcat * VT.getVectorNumElements());
32146 
32147     SmallVector<SDValue, 16> Ops(NumConcat, DAG.getUNDEF(InVT));
32148     Ops[0] = N->getOperand(0);
32149     SDValue InVec0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, InWideVT, Ops);
32150     Ops[0] = N->getOperand(1);
32151     SDValue InVec1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, InWideVT, Ops);
32152 
32153     SDValue Res = DAG.getNode(N->getOpcode(), dl, WideVT, InVec0, InVec1);
32154     Results.push_back(Res);
32155     return;
32156   }
32157   // We might have generated v2f32 FMIN/FMAX operations. Widen them to v4f32.
32158   case X86ISD::FMINC:
32159   case X86ISD::FMIN:
32160   case X86ISD::FMAXC:
32161   case X86ISD::FMAX: {
32162     EVT VT = N->getValueType(0);
32163     assert(VT == MVT::v2f32 && "Unexpected type (!= v2f32) on FMIN/FMAX.");
32164     SDValue UNDEF = DAG.getUNDEF(VT);
32165     SDValue LHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
32166                               N->getOperand(0), UNDEF);
32167     SDValue RHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
32168                               N->getOperand(1), UNDEF);
32169     Results.push_back(DAG.getNode(N->getOpcode(), dl, MVT::v4f32, LHS, RHS));
32170     return;
32171   }
32172   case ISD::SDIV:
32173   case ISD::UDIV:
32174   case ISD::SREM:
32175   case ISD::UREM: {
32176     EVT VT = N->getValueType(0);
32177     if (VT.isVector()) {
32178       assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
32179              "Unexpected type action!");
32180       // If this RHS is a constant splat vector we can widen this and let
32181       // division/remainder by constant optimize it.
32182       // TODO: Can we do something for non-splat?
32183       APInt SplatVal;
32184       if (ISD::isConstantSplatVector(N->getOperand(1).getNode(), SplatVal)) {
32185         unsigned NumConcats = 128 / VT.getSizeInBits();
32186         SmallVector<SDValue, 8> Ops0(NumConcats, DAG.getUNDEF(VT));
32187         Ops0[0] = N->getOperand(0);
32188         EVT ResVT = getTypeToTransformTo(*DAG.getContext(), VT);
32189         SDValue N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Ops0);
32190         SDValue N1 = DAG.getConstant(SplatVal, dl, ResVT);
32191         SDValue Res = DAG.getNode(N->getOpcode(), dl, ResVT, N0, N1);
32192         Results.push_back(Res);
32193       }
32194       return;
32195     }
32196 
32197     SDValue V = LowerWin64_i128OP(SDValue(N,0), DAG);
32198     Results.push_back(V);
32199     return;
32200   }
32201   case ISD::TRUNCATE: {
32202     MVT VT = N->getSimpleValueType(0);
32203     if (getTypeAction(*DAG.getContext(), VT) != TypeWidenVector)
32204       return;
32205 
32206     // The generic legalizer will try to widen the input type to the same
32207     // number of elements as the widened result type. But this isn't always
32208     // the best thing so do some custom legalization to avoid some cases.
32209     MVT WidenVT = getTypeToTransformTo(*DAG.getContext(), VT).getSimpleVT();
32210     SDValue In = N->getOperand(0);
32211     EVT InVT = In.getValueType();
32212     EVT InEltVT = InVT.getVectorElementType();
32213     EVT EltVT = VT.getVectorElementType();
32214     unsigned MinElts = VT.getVectorNumElements();
32215     unsigned WidenNumElts = WidenVT.getVectorNumElements();
32216     unsigned InBits = InVT.getSizeInBits();
32217 
32218     // See if there are sufficient leading bits to perform a PACKUS/PACKSS.
32219     unsigned PackOpcode;
32220     if (SDValue Src =
32221             matchTruncateWithPACK(PackOpcode, VT, In, dl, DAG, Subtarget)) {
32222       if (SDValue Res = truncateVectorWithPACK(PackOpcode, VT, Src,
32223                                                dl, DAG, Subtarget)) {
32224         Res = widenSubVector(WidenVT, Res, false, Subtarget, DAG, dl);
32225         Results.push_back(Res);
32226         return;
32227       }
32228     }
32229 
32230     if (128 % InBits == 0) {
32231       // 128 bit and smaller inputs should avoid truncate all together and
32232       // just use a build_vector that will become a shuffle.
32233       // TODO: Widen and use a shuffle directly?
32234       SmallVector<SDValue, 16> Ops(WidenNumElts, DAG.getUNDEF(EltVT));
32235       // Use the original element count so we don't do more scalar opts than
32236       // necessary.
32237       for (unsigned i=0; i < MinElts; ++i) {
32238         SDValue Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, InEltVT, In,
32239                                   DAG.getIntPtrConstant(i, dl));
32240         Ops[i] = DAG.getNode(ISD::TRUNCATE, dl, EltVT, Val);
32241       }
32242       Results.push_back(DAG.getBuildVector(WidenVT, dl, Ops));
32243       return;
32244     }
32245 
32246     // With AVX512 there are some cases that can use a target specific
32247     // truncate node to go from 256/512 to less than 128 with zeros in the
32248     // upper elements of the 128 bit result.
32249     if (Subtarget.hasAVX512() && isTypeLegal(InVT)) {
32250       // We can use VTRUNC directly if for 256 bits with VLX or for any 512.
32251       if ((InBits == 256 && Subtarget.hasVLX()) || InBits == 512) {
32252         Results.push_back(DAG.getNode(X86ISD::VTRUNC, dl, WidenVT, In));
32253         return;
32254       }
32255       // There's one case we can widen to 512 bits and use VTRUNC.
32256       if (InVT == MVT::v4i64 && VT == MVT::v4i8 && isTypeLegal(MVT::v8i64)) {
32257         In = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i64, In,
32258                          DAG.getUNDEF(MVT::v4i64));
32259         Results.push_back(DAG.getNode(X86ISD::VTRUNC, dl, WidenVT, In));
32260         return;
32261       }
32262     }
32263     if (Subtarget.hasVLX() && InVT == MVT::v8i64 && VT == MVT::v8i8 &&
32264         getTypeAction(*DAG.getContext(), InVT) == TypeSplitVector &&
32265         isTypeLegal(MVT::v4i64)) {
32266       // Input needs to be split and output needs to widened. Let's use two
32267       // VTRUNCs, and shuffle their results together into the wider type.
32268       SDValue Lo, Hi;
32269       std::tie(Lo, Hi) = DAG.SplitVector(In, dl);
32270 
32271       Lo = DAG.getNode(X86ISD::VTRUNC, dl, MVT::v16i8, Lo);
32272       Hi = DAG.getNode(X86ISD::VTRUNC, dl, MVT::v16i8, Hi);
32273       SDValue Res = DAG.getVectorShuffle(MVT::v16i8, dl, Lo, Hi,
32274                                          { 0,  1,  2,  3, 16, 17, 18, 19,
32275                                           -1, -1, -1, -1, -1, -1, -1, -1 });
32276       Results.push_back(Res);
32277       return;
32278     }
32279 
32280     // Attempt to widen the truncation input vector to let LowerTRUNCATE handle
32281     // this via type legalization.
32282     if ((InEltVT == MVT::i16 || InEltVT == MVT::i32 || InEltVT == MVT::i64) &&
32283         (EltVT == MVT::i8 || EltVT == MVT::i16 || EltVT == MVT::i32) &&
32284         (!Subtarget.hasSSSE3() ||
32285          (!isTypeLegal(InVT) &&
32286           !(MinElts <= 4 && InEltVT == MVT::i64 && EltVT == MVT::i8)))) {
32287       SDValue WidenIn = widenSubVector(In, false, Subtarget, DAG, dl,
32288                                        InEltVT.getSizeInBits() * WidenNumElts);
32289       Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, WidenVT, WidenIn));
32290       return;
32291     }
32292 
32293     return;
32294   }
32295   case ISD::ANY_EXTEND:
32296     // Right now, only MVT::v8i8 has Custom action for an illegal type.
32297     // It's intended to custom handle the input type.
32298     assert(N->getValueType(0) == MVT::v8i8 &&
32299            "Do not know how to legalize this Node");
32300     return;
32301   case ISD::SIGN_EXTEND:
32302   case ISD::ZERO_EXTEND: {
32303     EVT VT = N->getValueType(0);
32304     SDValue In = N->getOperand(0);
32305     EVT InVT = In.getValueType();
32306     if (!Subtarget.hasSSE41() && VT == MVT::v4i64 &&
32307         (InVT == MVT::v4i16 || InVT == MVT::v4i8)){
32308       assert(getTypeAction(*DAG.getContext(), InVT) == TypeWidenVector &&
32309              "Unexpected type action!");
32310       assert(N->getOpcode() == ISD::SIGN_EXTEND && "Unexpected opcode");
32311       // Custom split this so we can extend i8/i16->i32 invec. This is better
32312       // since sign_extend_inreg i8/i16->i64 requires an extend to i32 using
32313       // sra. Then extending from i32 to i64 using pcmpgt. By custom splitting
32314       // we allow the sra from the extend to i32 to be shared by the split.
32315       In = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, In);
32316 
32317       // Fill a vector with sign bits for each element.
32318       SDValue Zero = DAG.getConstant(0, dl, MVT::v4i32);
32319       SDValue SignBits = DAG.getSetCC(dl, MVT::v4i32, Zero, In, ISD::SETGT);
32320 
32321       // Create an unpackl and unpackh to interleave the sign bits then bitcast
32322       // to v2i64.
32323       SDValue Lo = DAG.getVectorShuffle(MVT::v4i32, dl, In, SignBits,
32324                                         {0, 4, 1, 5});
32325       Lo = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Lo);
32326       SDValue Hi = DAG.getVectorShuffle(MVT::v4i32, dl, In, SignBits,
32327                                         {2, 6, 3, 7});
32328       Hi = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Hi);
32329 
32330       SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
32331       Results.push_back(Res);
32332       return;
32333     }
32334 
32335     if (VT == MVT::v16i32 || VT == MVT::v8i64) {
32336       if (!InVT.is128BitVector()) {
32337         // Not a 128 bit vector, but maybe type legalization will promote
32338         // it to 128 bits.
32339         if (getTypeAction(*DAG.getContext(), InVT) != TypePromoteInteger)
32340           return;
32341         InVT = getTypeToTransformTo(*DAG.getContext(), InVT);
32342         if (!InVT.is128BitVector())
32343           return;
32344 
32345         // Promote the input to 128 bits. Type legalization will turn this into
32346         // zext_inreg/sext_inreg.
32347         In = DAG.getNode(N->getOpcode(), dl, InVT, In);
32348       }
32349 
32350       // Perform custom splitting instead of the two stage extend we would get
32351       // by default.
32352       EVT LoVT, HiVT;
32353       std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
32354       assert(isTypeLegal(LoVT) && "Split VT not legal?");
32355 
32356       SDValue Lo = getEXTEND_VECTOR_INREG(N->getOpcode(), dl, LoVT, In, DAG);
32357 
32358       // We need to shift the input over by half the number of elements.
32359       unsigned NumElts = InVT.getVectorNumElements();
32360       unsigned HalfNumElts = NumElts / 2;
32361       SmallVector<int, 16> ShufMask(NumElts, SM_SentinelUndef);
32362       for (unsigned i = 0; i != HalfNumElts; ++i)
32363         ShufMask[i] = i + HalfNumElts;
32364 
32365       SDValue Hi = DAG.getVectorShuffle(InVT, dl, In, In, ShufMask);
32366       Hi = getEXTEND_VECTOR_INREG(N->getOpcode(), dl, HiVT, Hi, DAG);
32367 
32368       SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
32369       Results.push_back(Res);
32370     }
32371     return;
32372   }
32373   case ISD::FP_TO_SINT:
32374   case ISD::STRICT_FP_TO_SINT:
32375   case ISD::FP_TO_UINT:
32376   case ISD::STRICT_FP_TO_UINT: {
32377     bool IsStrict = N->isStrictFPOpcode();
32378     bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT ||
32379                     N->getOpcode() == ISD::STRICT_FP_TO_SINT;
32380     EVT VT = N->getValueType(0);
32381     SDValue Src = N->getOperand(IsStrict ? 1 : 0);
32382     SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
32383     EVT SrcVT = Src.getValueType();
32384 
32385     SDValue Res;
32386     if (isSoftF16(SrcVT, Subtarget)) {
32387       EVT NVT = VT.isVector() ? VT.changeVectorElementType(MVT::f32) : MVT::f32;
32388       if (IsStrict) {
32389         Res =
32390             DAG.getNode(N->getOpcode(), dl, {VT, MVT::Other},
32391                         {Chain, DAG.getNode(ISD::STRICT_FP_EXTEND, dl,
32392                                             {NVT, MVT::Other}, {Chain, Src})});
32393         Chain = Res.getValue(1);
32394       } else {
32395         Res = DAG.getNode(N->getOpcode(), dl, VT,
32396                           DAG.getNode(ISD::FP_EXTEND, dl, NVT, Src));
32397       }
32398       Results.push_back(Res);
32399       if (IsStrict)
32400         Results.push_back(Chain);
32401 
32402       return;
32403     }
32404 
32405     if (VT.isVector() && Subtarget.hasFP16() &&
32406         SrcVT.getVectorElementType() == MVT::f16) {
32407       EVT EleVT = VT.getVectorElementType();
32408       EVT ResVT = EleVT == MVT::i32 ? MVT::v4i32 : MVT::v8i16;
32409 
32410       if (SrcVT != MVT::v8f16) {
32411         SDValue Tmp =
32412             IsStrict ? DAG.getConstantFP(0.0, dl, SrcVT) : DAG.getUNDEF(SrcVT);
32413         SmallVector<SDValue, 4> Ops(SrcVT == MVT::v2f16 ? 4 : 2, Tmp);
32414         Ops[0] = Src;
32415         Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8f16, Ops);
32416       }
32417 
32418       if (IsStrict) {
32419         unsigned Opc =
32420             IsSigned ? X86ISD::STRICT_CVTTP2SI : X86ISD::STRICT_CVTTP2UI;
32421         Res =
32422             DAG.getNode(Opc, dl, {ResVT, MVT::Other}, {N->getOperand(0), Src});
32423         Chain = Res.getValue(1);
32424       } else {
32425         unsigned Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
32426         Res = DAG.getNode(Opc, dl, ResVT, Src);
32427       }
32428 
32429       // TODO: Need to add exception check code for strict FP.
32430       if (EleVT.getSizeInBits() < 16) {
32431         MVT TmpVT = MVT::getVectorVT(EleVT.getSimpleVT(), 8);
32432         Res = DAG.getNode(ISD::TRUNCATE, dl, TmpVT, Res);
32433 
32434         // Now widen to 128 bits.
32435         unsigned NumConcats = 128 / TmpVT.getSizeInBits();
32436         MVT ConcatVT = MVT::getVectorVT(EleVT.getSimpleVT(), 8 * NumConcats);
32437         SmallVector<SDValue, 8> ConcatOps(NumConcats, DAG.getUNDEF(TmpVT));
32438         ConcatOps[0] = Res;
32439         Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, ConcatVT, ConcatOps);
32440       }
32441 
32442       Results.push_back(Res);
32443       if (IsStrict)
32444         Results.push_back(Chain);
32445 
32446       return;
32447     }
32448 
32449     if (VT.isVector() && VT.getScalarSizeInBits() < 32) {
32450       assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
32451              "Unexpected type action!");
32452 
32453       // Try to create a 128 bit vector, but don't exceed a 32 bit element.
32454       unsigned NewEltWidth = std::min(128 / VT.getVectorNumElements(), 32U);
32455       MVT PromoteVT = MVT::getVectorVT(MVT::getIntegerVT(NewEltWidth),
32456                                        VT.getVectorNumElements());
32457       SDValue Res;
32458       SDValue Chain;
32459       if (IsStrict) {
32460         Res = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, {PromoteVT, MVT::Other},
32461                           {N->getOperand(0), Src});
32462         Chain = Res.getValue(1);
32463       } else
32464         Res = DAG.getNode(ISD::FP_TO_SINT, dl, PromoteVT, Src);
32465 
32466       // Preserve what we know about the size of the original result. If the
32467       // result is v2i32, we have to manually widen the assert.
32468       if (PromoteVT == MVT::v2i32)
32469         Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Res,
32470                           DAG.getUNDEF(MVT::v2i32));
32471 
32472       Res = DAG.getNode(!IsSigned ? ISD::AssertZext : ISD::AssertSext, dl,
32473                         Res.getValueType(), Res,
32474                         DAG.getValueType(VT.getVectorElementType()));
32475 
32476       if (PromoteVT == MVT::v2i32)
32477         Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i32, Res,
32478                           DAG.getIntPtrConstant(0, dl));
32479 
32480       // Truncate back to the original width.
32481       Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
32482 
32483       // Now widen to 128 bits.
32484       unsigned NumConcats = 128 / VT.getSizeInBits();
32485       MVT ConcatVT = MVT::getVectorVT(VT.getSimpleVT().getVectorElementType(),
32486                                       VT.getVectorNumElements() * NumConcats);
32487       SmallVector<SDValue, 8> ConcatOps(NumConcats, DAG.getUNDEF(VT));
32488       ConcatOps[0] = Res;
32489       Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, ConcatVT, ConcatOps);
32490       Results.push_back(Res);
32491       if (IsStrict)
32492         Results.push_back(Chain);
32493       return;
32494     }
32495 
32496 
32497     if (VT == MVT::v2i32) {
32498       assert((!IsStrict || IsSigned || Subtarget.hasAVX512()) &&
32499              "Strict unsigned conversion requires AVX512");
32500       assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
32501       assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
32502              "Unexpected type action!");
32503       if (Src.getValueType() == MVT::v2f64) {
32504         if (!IsSigned && !Subtarget.hasAVX512()) {
32505           SDValue Res =
32506               expandFP_TO_UINT_SSE(MVT::v4i32, Src, dl, DAG, Subtarget);
32507           Results.push_back(Res);
32508           return;
32509         }
32510 
32511         unsigned Opc;
32512         if (IsStrict)
32513           Opc = IsSigned ? X86ISD::STRICT_CVTTP2SI : X86ISD::STRICT_CVTTP2UI;
32514         else
32515           Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
32516 
32517         // If we have VLX we can emit a target specific FP_TO_UINT node,.
32518         if (!IsSigned && !Subtarget.hasVLX()) {
32519           // Otherwise we can defer to the generic legalizer which will widen
32520           // the input as well. This will be further widened during op
32521           // legalization to v8i32<-v8f64.
32522           // For strict nodes we'll need to widen ourselves.
32523           // FIXME: Fix the type legalizer to safely widen strict nodes?
32524           if (!IsStrict)
32525             return;
32526           Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f64, Src,
32527                             DAG.getConstantFP(0.0, dl, MVT::v2f64));
32528           Opc = N->getOpcode();
32529         }
32530         SDValue Res;
32531         SDValue Chain;
32532         if (IsStrict) {
32533           Res = DAG.getNode(Opc, dl, {MVT::v4i32, MVT::Other},
32534                             {N->getOperand(0), Src});
32535           Chain = Res.getValue(1);
32536         } else {
32537           Res = DAG.getNode(Opc, dl, MVT::v4i32, Src);
32538         }
32539         Results.push_back(Res);
32540         if (IsStrict)
32541           Results.push_back(Chain);
32542         return;
32543       }
32544 
32545       // Custom widen strict v2f32->v2i32 by padding with zeros.
32546       // FIXME: Should generic type legalizer do this?
32547       if (Src.getValueType() == MVT::v2f32 && IsStrict) {
32548         Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, Src,
32549                           DAG.getConstantFP(0.0, dl, MVT::v2f32));
32550         SDValue Res = DAG.getNode(N->getOpcode(), dl, {MVT::v4i32, MVT::Other},
32551                                   {N->getOperand(0), Src});
32552         Results.push_back(Res);
32553         Results.push_back(Res.getValue(1));
32554         return;
32555       }
32556 
32557       // The FP_TO_INTHelper below only handles f32/f64/f80 scalar inputs,
32558       // so early out here.
32559       return;
32560     }
32561 
32562     assert(!VT.isVector() && "Vectors should have been handled above!");
32563 
32564     if ((Subtarget.hasDQI() && VT == MVT::i64 &&
32565          (SrcVT == MVT::f32 || SrcVT == MVT::f64)) ||
32566         (Subtarget.hasFP16() && SrcVT == MVT::f16)) {
32567       assert(!Subtarget.is64Bit() && "i64 should be legal");
32568       unsigned NumElts = Subtarget.hasVLX() ? 2 : 8;
32569       // If we use a 128-bit result we might need to use a target specific node.
32570       unsigned SrcElts =
32571           std::max(NumElts, 128U / (unsigned)SrcVT.getSizeInBits());
32572       MVT VecVT = MVT::getVectorVT(MVT::i64, NumElts);
32573       MVT VecInVT = MVT::getVectorVT(SrcVT.getSimpleVT(), SrcElts);
32574       unsigned Opc = N->getOpcode();
32575       if (NumElts != SrcElts) {
32576         if (IsStrict)
32577           Opc = IsSigned ? X86ISD::STRICT_CVTTP2SI : X86ISD::STRICT_CVTTP2UI;
32578         else
32579           Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
32580       }
32581 
32582       SDValue ZeroIdx = DAG.getIntPtrConstant(0, dl);
32583       SDValue Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VecInVT,
32584                                 DAG.getConstantFP(0.0, dl, VecInVT), Src,
32585                                 ZeroIdx);
32586       SDValue Chain;
32587       if (IsStrict) {
32588         SDVTList Tys = DAG.getVTList(VecVT, MVT::Other);
32589         Res = DAG.getNode(Opc, SDLoc(N), Tys, N->getOperand(0), Res);
32590         Chain = Res.getValue(1);
32591       } else
32592         Res = DAG.getNode(Opc, SDLoc(N), VecVT, Res);
32593       Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Res, ZeroIdx);
32594       Results.push_back(Res);
32595       if (IsStrict)
32596         Results.push_back(Chain);
32597       return;
32598     }
32599 
32600     if (VT == MVT::i128 && Subtarget.isTargetWin64()) {
32601       SDValue Chain;
32602       SDValue V = LowerWin64_FP_TO_INT128(SDValue(N, 0), DAG, Chain);
32603       Results.push_back(V);
32604       if (IsStrict)
32605         Results.push_back(Chain);
32606       return;
32607     }
32608 
32609     if (SDValue V = FP_TO_INTHelper(SDValue(N, 0), DAG, IsSigned, Chain)) {
32610       Results.push_back(V);
32611       if (IsStrict)
32612         Results.push_back(Chain);
32613     }
32614     return;
32615   }
32616   case ISD::LRINT:
32617   case ISD::LLRINT: {
32618     if (SDValue V = LRINT_LLRINTHelper(N, DAG))
32619       Results.push_back(V);
32620     return;
32621   }
32622 
32623   case ISD::SINT_TO_FP:
32624   case ISD::STRICT_SINT_TO_FP:
32625   case ISD::UINT_TO_FP:
32626   case ISD::STRICT_UINT_TO_FP: {
32627     bool IsStrict = N->isStrictFPOpcode();
32628     bool IsSigned = N->getOpcode() == ISD::SINT_TO_FP ||
32629                     N->getOpcode() == ISD::STRICT_SINT_TO_FP;
32630     EVT VT = N->getValueType(0);
32631     SDValue Src = N->getOperand(IsStrict ? 1 : 0);
32632     if (VT.getVectorElementType() == MVT::f16 && Subtarget.hasFP16() &&
32633         Subtarget.hasVLX()) {
32634       if (Src.getValueType().getVectorElementType() == MVT::i16)
32635         return;
32636 
32637       if (VT == MVT::v2f16 && Src.getValueType() == MVT::v2i32)
32638         Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Src,
32639                           IsStrict ? DAG.getConstant(0, dl, MVT::v2i32)
32640                                    : DAG.getUNDEF(MVT::v2i32));
32641       if (IsStrict) {
32642         unsigned Opc =
32643             IsSigned ? X86ISD::STRICT_CVTSI2P : X86ISD::STRICT_CVTUI2P;
32644         SDValue Res = DAG.getNode(Opc, dl, {MVT::v8f16, MVT::Other},
32645                                   {N->getOperand(0), Src});
32646         Results.push_back(Res);
32647         Results.push_back(Res.getValue(1));
32648       } else {
32649         unsigned Opc = IsSigned ? X86ISD::CVTSI2P : X86ISD::CVTUI2P;
32650         Results.push_back(DAG.getNode(Opc, dl, MVT::v8f16, Src));
32651       }
32652       return;
32653     }
32654     if (VT != MVT::v2f32)
32655       return;
32656     EVT SrcVT = Src.getValueType();
32657     if (Subtarget.hasDQI() && Subtarget.hasVLX() && SrcVT == MVT::v2i64) {
32658       if (IsStrict) {
32659         unsigned Opc = IsSigned ? X86ISD::STRICT_CVTSI2P
32660                                 : X86ISD::STRICT_CVTUI2P;
32661         SDValue Res = DAG.getNode(Opc, dl, {MVT::v4f32, MVT::Other},
32662                                   {N->getOperand(0), Src});
32663         Results.push_back(Res);
32664         Results.push_back(Res.getValue(1));
32665       } else {
32666         unsigned Opc = IsSigned ? X86ISD::CVTSI2P : X86ISD::CVTUI2P;
32667         Results.push_back(DAG.getNode(Opc, dl, MVT::v4f32, Src));
32668       }
32669       return;
32670     }
32671     if (SrcVT == MVT::v2i64 && !IsSigned && Subtarget.is64Bit() &&
32672         Subtarget.hasSSE41() && !Subtarget.hasAVX512()) {
32673       SDValue Zero = DAG.getConstant(0, dl, SrcVT);
32674       SDValue One  = DAG.getConstant(1, dl, SrcVT);
32675       SDValue Sign = DAG.getNode(ISD::OR, dl, SrcVT,
32676                                  DAG.getNode(ISD::SRL, dl, SrcVT, Src, One),
32677                                  DAG.getNode(ISD::AND, dl, SrcVT, Src, One));
32678       SDValue IsNeg = DAG.getSetCC(dl, MVT::v2i64, Src, Zero, ISD::SETLT);
32679       SDValue SignSrc = DAG.getSelect(dl, SrcVT, IsNeg, Sign, Src);
32680       SmallVector<SDValue, 4> SignCvts(4, DAG.getConstantFP(0.0, dl, MVT::f32));
32681       for (int i = 0; i != 2; ++i) {
32682         SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64,
32683                                   SignSrc, DAG.getIntPtrConstant(i, dl));
32684         if (IsStrict)
32685           SignCvts[i] =
32686               DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {MVT::f32, MVT::Other},
32687                           {N->getOperand(0), Elt});
32688         else
32689           SignCvts[i] = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, Elt);
32690       };
32691       SDValue SignCvt = DAG.getBuildVector(MVT::v4f32, dl, SignCvts);
32692       SDValue Slow, Chain;
32693       if (IsStrict) {
32694         Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
32695                             SignCvts[0].getValue(1), SignCvts[1].getValue(1));
32696         Slow = DAG.getNode(ISD::STRICT_FADD, dl, {MVT::v4f32, MVT::Other},
32697                            {Chain, SignCvt, SignCvt});
32698         Chain = Slow.getValue(1);
32699       } else {
32700         Slow = DAG.getNode(ISD::FADD, dl, MVT::v4f32, SignCvt, SignCvt);
32701       }
32702       IsNeg = DAG.getBitcast(MVT::v4i32, IsNeg);
32703       IsNeg =
32704           DAG.getVectorShuffle(MVT::v4i32, dl, IsNeg, IsNeg, {1, 3, -1, -1});
32705       SDValue Cvt = DAG.getSelect(dl, MVT::v4f32, IsNeg, Slow, SignCvt);
32706       Results.push_back(Cvt);
32707       if (IsStrict)
32708         Results.push_back(Chain);
32709       return;
32710     }
32711 
32712     if (SrcVT != MVT::v2i32)
32713       return;
32714 
32715     if (IsSigned || Subtarget.hasAVX512()) {
32716       if (!IsStrict)
32717         return;
32718 
32719       // Custom widen strict v2i32->v2f32 to avoid scalarization.
32720       // FIXME: Should generic type legalizer do this?
32721       Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Src,
32722                         DAG.getConstant(0, dl, MVT::v2i32));
32723       SDValue Res = DAG.getNode(N->getOpcode(), dl, {MVT::v4f32, MVT::Other},
32724                                 {N->getOperand(0), Src});
32725       Results.push_back(Res);
32726       Results.push_back(Res.getValue(1));
32727       return;
32728     }
32729 
32730     assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
32731     SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v2i64, Src);
32732     SDValue VBias = DAG.getConstantFP(
32733         llvm::bit_cast<double>(0x4330000000000000ULL), dl, MVT::v2f64);
32734     SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, ZExtIn,
32735                              DAG.getBitcast(MVT::v2i64, VBias));
32736     Or = DAG.getBitcast(MVT::v2f64, Or);
32737     if (IsStrict) {
32738       SDValue Sub = DAG.getNode(ISD::STRICT_FSUB, dl, {MVT::v2f64, MVT::Other},
32739                                 {N->getOperand(0), Or, VBias});
32740       SDValue Res = DAG.getNode(X86ISD::STRICT_VFPROUND, dl,
32741                                 {MVT::v4f32, MVT::Other},
32742                                 {Sub.getValue(1), Sub});
32743       Results.push_back(Res);
32744       Results.push_back(Res.getValue(1));
32745     } else {
32746       // TODO: Are there any fast-math-flags to propagate here?
32747       SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, Or, VBias);
32748       Results.push_back(DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, Sub));
32749     }
32750     return;
32751   }
32752   case ISD::STRICT_FP_ROUND:
32753   case ISD::FP_ROUND: {
32754     bool IsStrict = N->isStrictFPOpcode();
32755     SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
32756     SDValue Src = N->getOperand(IsStrict ? 1 : 0);
32757     SDValue Rnd = N->getOperand(IsStrict ? 2 : 1);
32758     EVT SrcVT = Src.getValueType();
32759     EVT VT = N->getValueType(0);
32760     SDValue V;
32761     if (VT == MVT::v2f16 && Src.getValueType() == MVT::v2f32) {
32762       SDValue Ext = IsStrict ? DAG.getConstantFP(0.0, dl, MVT::v2f32)
32763                              : DAG.getUNDEF(MVT::v2f32);
32764       Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, Src, Ext);
32765     }
32766     if (!Subtarget.hasFP16() && VT.getVectorElementType() == MVT::f16) {
32767       assert(Subtarget.hasF16C() && "Cannot widen f16 without F16C");
32768       if (SrcVT.getVectorElementType() != MVT::f32)
32769         return;
32770 
32771       if (IsStrict)
32772         V = DAG.getNode(X86ISD::STRICT_CVTPS2PH, dl, {MVT::v8i16, MVT::Other},
32773                         {Chain, Src, Rnd});
32774       else
32775         V = DAG.getNode(X86ISD::CVTPS2PH, dl, MVT::v8i16, Src, Rnd);
32776 
32777       Results.push_back(DAG.getBitcast(MVT::v8f16, V));
32778       if (IsStrict)
32779         Results.push_back(V.getValue(1));
32780       return;
32781     }
32782     if (!isTypeLegal(Src.getValueType()))
32783       return;
32784     EVT NewVT = VT.getVectorElementType() == MVT::f16 ? MVT::v8f16 : MVT::v4f32;
32785     if (IsStrict)
32786       V = DAG.getNode(X86ISD::STRICT_VFPROUND, dl, {NewVT, MVT::Other},
32787                       {Chain, Src});
32788     else
32789       V = DAG.getNode(X86ISD::VFPROUND, dl, NewVT, Src);
32790     Results.push_back(V);
32791     if (IsStrict)
32792       Results.push_back(V.getValue(1));
32793     return;
32794   }
32795   case ISD::FP_EXTEND:
32796   case ISD::STRICT_FP_EXTEND: {
32797     // Right now, only MVT::v2f32 has OperationAction for FP_EXTEND.
32798     // No other ValueType for FP_EXTEND should reach this point.
32799     assert(N->getValueType(0) == MVT::v2f32 &&
32800            "Do not know how to legalize this Node");
32801     if (!Subtarget.hasFP16() || !Subtarget.hasVLX())
32802       return;
32803     bool IsStrict = N->isStrictFPOpcode();
32804     SDValue Src = N->getOperand(IsStrict ? 1 : 0);
32805     SDValue Ext = IsStrict ? DAG.getConstantFP(0.0, dl, MVT::v2f16)
32806                            : DAG.getUNDEF(MVT::v2f16);
32807     SDValue V = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f16, Src, Ext);
32808     if (IsStrict)
32809       V = DAG.getNode(ISD::STRICT_FP_EXTEND, dl, {MVT::v4f32, MVT::Other},
32810                       {N->getOperand(0), V});
32811     else
32812       V = DAG.getNode(ISD::FP_EXTEND, dl, MVT::v4f32, V);
32813     Results.push_back(V);
32814     if (IsStrict)
32815       Results.push_back(V.getValue(1));
32816     return;
32817   }
32818   case ISD::INTRINSIC_W_CHAIN: {
32819     unsigned IntNo = N->getConstantOperandVal(1);
32820     switch (IntNo) {
32821     default : llvm_unreachable("Do not know how to custom type "
32822                                "legalize this intrinsic operation!");
32823     case Intrinsic::x86_rdtsc:
32824       return getReadTimeStampCounter(N, dl, X86::RDTSC, DAG, Subtarget,
32825                                      Results);
32826     case Intrinsic::x86_rdtscp:
32827       return getReadTimeStampCounter(N, dl, X86::RDTSCP, DAG, Subtarget,
32828                                      Results);
32829     case Intrinsic::x86_rdpmc:
32830       expandIntrinsicWChainHelper(N, dl, DAG, X86::RDPMC, X86::ECX, Subtarget,
32831                                   Results);
32832       return;
32833     case Intrinsic::x86_rdpru:
32834       expandIntrinsicWChainHelper(N, dl, DAG, X86::RDPRU, X86::ECX, Subtarget,
32835         Results);
32836       return;
32837     case Intrinsic::x86_xgetbv:
32838       expandIntrinsicWChainHelper(N, dl, DAG, X86::XGETBV, X86::ECX, Subtarget,
32839                                   Results);
32840       return;
32841     }
32842   }
32843   case ISD::READCYCLECOUNTER: {
32844     return getReadTimeStampCounter(N, dl, X86::RDTSC, DAG, Subtarget, Results);
32845   }
32846   case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: {
32847     EVT T = N->getValueType(0);
32848     assert((T == MVT::i64 || T == MVT::i128) && "can only expand cmpxchg pair");
32849     bool Regs64bit = T == MVT::i128;
32850     assert((!Regs64bit || Subtarget.canUseCMPXCHG16B()) &&
32851            "64-bit ATOMIC_CMP_SWAP_WITH_SUCCESS requires CMPXCHG16B");
32852     MVT HalfT = Regs64bit ? MVT::i64 : MVT::i32;
32853     SDValue cpInL, cpInH;
32854     std::tie(cpInL, cpInH) =
32855         DAG.SplitScalar(N->getOperand(2), dl, HalfT, HalfT);
32856     cpInL = DAG.getCopyToReg(N->getOperand(0), dl,
32857                              Regs64bit ? X86::RAX : X86::EAX, cpInL, SDValue());
32858     cpInH =
32859         DAG.getCopyToReg(cpInL.getValue(0), dl, Regs64bit ? X86::RDX : X86::EDX,
32860                          cpInH, cpInL.getValue(1));
32861     SDValue swapInL, swapInH;
32862     std::tie(swapInL, swapInH) =
32863         DAG.SplitScalar(N->getOperand(3), dl, HalfT, HalfT);
32864     swapInH =
32865         DAG.getCopyToReg(cpInH.getValue(0), dl, Regs64bit ? X86::RCX : X86::ECX,
32866                          swapInH, cpInH.getValue(1));
32867 
32868     // In 64-bit mode we might need the base pointer in RBX, but we can't know
32869     // until later. So we keep the RBX input in a vreg and use a custom
32870     // inserter.
32871     // Since RBX will be a reserved register the register allocator will not
32872     // make sure its value will be properly saved and restored around this
32873     // live-range.
32874     SDValue Result;
32875     SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
32876     MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand();
32877     if (Regs64bit) {
32878       SDValue Ops[] = {swapInH.getValue(0), N->getOperand(1), swapInL,
32879                        swapInH.getValue(1)};
32880       Result =
32881           DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG16_DAG, dl, Tys, Ops, T, MMO);
32882     } else {
32883       swapInL = DAG.getCopyToReg(swapInH.getValue(0), dl, X86::EBX, swapInL,
32884                                  swapInH.getValue(1));
32885       SDValue Ops[] = {swapInL.getValue(0), N->getOperand(1),
32886                        swapInL.getValue(1)};
32887       Result =
32888           DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG8_DAG, dl, Tys, Ops, T, MMO);
32889     }
32890 
32891     SDValue cpOutL = DAG.getCopyFromReg(Result.getValue(0), dl,
32892                                         Regs64bit ? X86::RAX : X86::EAX,
32893                                         HalfT, Result.getValue(1));
32894     SDValue cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), dl,
32895                                         Regs64bit ? X86::RDX : X86::EDX,
32896                                         HalfT, cpOutL.getValue(2));
32897     SDValue OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)};
32898 
32899     SDValue EFLAGS = DAG.getCopyFromReg(cpOutH.getValue(1), dl, X86::EFLAGS,
32900                                         MVT::i32, cpOutH.getValue(2));
32901     SDValue Success = getSETCC(X86::COND_E, EFLAGS, dl, DAG);
32902     Success = DAG.getZExtOrTrunc(Success, dl, N->getValueType(1));
32903 
32904     Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, T, OpsF));
32905     Results.push_back(Success);
32906     Results.push_back(EFLAGS.getValue(1));
32907     return;
32908   }
32909   case ISD::ATOMIC_LOAD: {
32910     assert(N->getValueType(0) == MVT::i64 && "Unexpected VT!");
32911     bool NoImplicitFloatOps =
32912         DAG.getMachineFunction().getFunction().hasFnAttribute(
32913             Attribute::NoImplicitFloat);
32914     if (!Subtarget.useSoftFloat() && !NoImplicitFloatOps) {
32915       auto *Node = cast<AtomicSDNode>(N);
32916       if (Subtarget.hasSSE1()) {
32917         // Use a VZEXT_LOAD which will be selected as MOVQ or XORPS+MOVLPS.
32918         // Then extract the lower 64-bits.
32919         MVT LdVT = Subtarget.hasSSE2() ? MVT::v2i64 : MVT::v4f32;
32920         SDVTList Tys = DAG.getVTList(LdVT, MVT::Other);
32921         SDValue Ops[] = { Node->getChain(), Node->getBasePtr() };
32922         SDValue Ld = DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops,
32923                                              MVT::i64, Node->getMemOperand());
32924         if (Subtarget.hasSSE2()) {
32925           SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Ld,
32926                                     DAG.getIntPtrConstant(0, dl));
32927           Results.push_back(Res);
32928           Results.push_back(Ld.getValue(1));
32929           return;
32930         }
32931         // We use an alternative sequence for SSE1 that extracts as v2f32 and
32932         // then casts to i64. This avoids a 128-bit stack temporary being
32933         // created by type legalization if we were to cast v4f32->v2i64.
32934         SDValue Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2f32, Ld,
32935                                   DAG.getIntPtrConstant(0, dl));
32936         Res = DAG.getBitcast(MVT::i64, Res);
32937         Results.push_back(Res);
32938         Results.push_back(Ld.getValue(1));
32939         return;
32940       }
32941       if (Subtarget.hasX87()) {
32942         // First load this into an 80-bit X87 register. This will put the whole
32943         // integer into the significand.
32944         SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
32945         SDValue Ops[] = { Node->getChain(), Node->getBasePtr() };
32946         SDValue Result = DAG.getMemIntrinsicNode(X86ISD::FILD,
32947                                                  dl, Tys, Ops, MVT::i64,
32948                                                  Node->getMemOperand());
32949         SDValue Chain = Result.getValue(1);
32950 
32951         // Now store the X87 register to a stack temporary and convert to i64.
32952         // This store is not atomic and doesn't need to be.
32953         // FIXME: We don't need a stack temporary if the result of the load
32954         // is already being stored. We could just directly store there.
32955         SDValue StackPtr = DAG.CreateStackTemporary(MVT::i64);
32956         int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
32957         MachinePointerInfo MPI =
32958             MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
32959         SDValue StoreOps[] = { Chain, Result, StackPtr };
32960         Chain = DAG.getMemIntrinsicNode(
32961             X86ISD::FIST, dl, DAG.getVTList(MVT::Other), StoreOps, MVT::i64,
32962             MPI, std::nullopt /*Align*/, MachineMemOperand::MOStore);
32963 
32964         // Finally load the value back from the stack temporary and return it.
32965         // This load is not atomic and doesn't need to be.
32966         // This load will be further type legalized.
32967         Result = DAG.getLoad(MVT::i64, dl, Chain, StackPtr, MPI);
32968         Results.push_back(Result);
32969         Results.push_back(Result.getValue(1));
32970         return;
32971       }
32972     }
32973     // TODO: Use MOVLPS when SSE1 is available?
32974     // Delegate to generic TypeLegalization. Situations we can really handle
32975     // should have already been dealt with by AtomicExpandPass.cpp.
32976     break;
32977   }
32978   case ISD::ATOMIC_SWAP:
32979   case ISD::ATOMIC_LOAD_ADD:
32980   case ISD::ATOMIC_LOAD_SUB:
32981   case ISD::ATOMIC_LOAD_AND:
32982   case ISD::ATOMIC_LOAD_OR:
32983   case ISD::ATOMIC_LOAD_XOR:
32984   case ISD::ATOMIC_LOAD_NAND:
32985   case ISD::ATOMIC_LOAD_MIN:
32986   case ISD::ATOMIC_LOAD_MAX:
32987   case ISD::ATOMIC_LOAD_UMIN:
32988   case ISD::ATOMIC_LOAD_UMAX:
32989     // Delegate to generic TypeLegalization. Situations we can really handle
32990     // should have already been dealt with by AtomicExpandPass.cpp.
32991     break;
32992 
32993   case ISD::BITCAST: {
32994     assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
32995     EVT DstVT = N->getValueType(0);
32996     EVT SrcVT = N->getOperand(0).getValueType();
32997 
32998     // If this is a bitcast from a v64i1 k-register to a i64 on a 32-bit target
32999     // we can split using the k-register rather than memory.
33000     if (SrcVT == MVT::v64i1 && DstVT == MVT::i64 && Subtarget.hasBWI()) {
33001       assert(!Subtarget.is64Bit() && "Expected 32-bit mode");
33002       SDValue Lo, Hi;
33003       std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0);
33004       Lo = DAG.getBitcast(MVT::i32, Lo);
33005       Hi = DAG.getBitcast(MVT::i32, Hi);
33006       SDValue Res = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
33007       Results.push_back(Res);
33008       return;
33009     }
33010 
33011     if (DstVT.isVector() && SrcVT == MVT::x86mmx) {
33012       // FIXME: Use v4f32 for SSE1?
33013       assert(Subtarget.hasSSE2() && "Requires SSE2");
33014       assert(getTypeAction(*DAG.getContext(), DstVT) == TypeWidenVector &&
33015              "Unexpected type action!");
33016       EVT WideVT = getTypeToTransformTo(*DAG.getContext(), DstVT);
33017       SDValue Res = DAG.getNode(X86ISD::MOVQ2DQ, dl, MVT::v2i64,
33018                                 N->getOperand(0));
33019       Res = DAG.getBitcast(WideVT, Res);
33020       Results.push_back(Res);
33021       return;
33022     }
33023 
33024     return;
33025   }
33026   case ISD::MGATHER: {
33027     EVT VT = N->getValueType(0);
33028     if ((VT == MVT::v2f32 || VT == MVT::v2i32) &&
33029         (Subtarget.hasVLX() || !Subtarget.hasAVX512())) {
33030       auto *Gather = cast<MaskedGatherSDNode>(N);
33031       SDValue Index = Gather->getIndex();
33032       if (Index.getValueType() != MVT::v2i64)
33033         return;
33034       assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
33035              "Unexpected type action!");
33036       EVT WideVT = getTypeToTransformTo(*DAG.getContext(), VT);
33037       SDValue Mask = Gather->getMask();
33038       assert(Mask.getValueType() == MVT::v2i1 && "Unexpected mask type");
33039       SDValue PassThru = DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT,
33040                                      Gather->getPassThru(),
33041                                      DAG.getUNDEF(VT));
33042       if (!Subtarget.hasVLX()) {
33043         // We need to widen the mask, but the instruction will only use 2
33044         // of its elements. So we can use undef.
33045         Mask = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i1, Mask,
33046                            DAG.getUNDEF(MVT::v2i1));
33047         Mask = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, Mask);
33048       }
33049       SDValue Ops[] = { Gather->getChain(), PassThru, Mask,
33050                         Gather->getBasePtr(), Index, Gather->getScale() };
33051       SDValue Res = DAG.getMemIntrinsicNode(
33052           X86ISD::MGATHER, dl, DAG.getVTList(WideVT, MVT::Other), Ops,
33053           Gather->getMemoryVT(), Gather->getMemOperand());
33054       Results.push_back(Res);
33055       Results.push_back(Res.getValue(1));
33056       return;
33057     }
33058     return;
33059   }
33060   case ISD::LOAD: {
33061     // Use an f64/i64 load and a scalar_to_vector for v2f32/v2i32 loads. This
33062     // avoids scalarizing in 32-bit mode. In 64-bit mode this avoids a int->fp
33063     // cast since type legalization will try to use an i64 load.
33064     MVT VT = N->getSimpleValueType(0);
33065     assert(VT.isVector() && VT.getSizeInBits() == 64 && "Unexpected VT");
33066     assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
33067            "Unexpected type action!");
33068     if (!ISD::isNON_EXTLoad(N))
33069       return;
33070     auto *Ld = cast<LoadSDNode>(N);
33071     if (Subtarget.hasSSE2()) {
33072       MVT LdVT = Subtarget.is64Bit() && VT.isInteger() ? MVT::i64 : MVT::f64;
33073       SDValue Res = DAG.getLoad(LdVT, dl, Ld->getChain(), Ld->getBasePtr(),
33074                                 Ld->getPointerInfo(), Ld->getOriginalAlign(),
33075                                 Ld->getMemOperand()->getFlags());
33076       SDValue Chain = Res.getValue(1);
33077       MVT VecVT = MVT::getVectorVT(LdVT, 2);
33078       Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Res);
33079       EVT WideVT = getTypeToTransformTo(*DAG.getContext(), VT);
33080       Res = DAG.getBitcast(WideVT, Res);
33081       Results.push_back(Res);
33082       Results.push_back(Chain);
33083       return;
33084     }
33085     assert(Subtarget.hasSSE1() && "Expected SSE");
33086     SDVTList Tys = DAG.getVTList(MVT::v4f32, MVT::Other);
33087     SDValue Ops[] = {Ld->getChain(), Ld->getBasePtr()};
33088     SDValue Res = DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops,
33089                                           MVT::i64, Ld->getMemOperand());
33090     Results.push_back(Res);
33091     Results.push_back(Res.getValue(1));
33092     return;
33093   }
33094   case ISD::ADDRSPACECAST: {
33095     SDValue V = LowerADDRSPACECAST(SDValue(N,0), DAG);
33096     Results.push_back(V);
33097     return;
33098   }
33099   case ISD::BITREVERSE: {
33100     assert(N->getValueType(0) == MVT::i64 && "Unexpected VT!");
33101     assert(Subtarget.hasXOP() && "Expected XOP");
33102     // We can use VPPERM by copying to a vector register and back. We'll need
33103     // to move the scalar in two i32 pieces.
33104     Results.push_back(LowerBITREVERSE(SDValue(N, 0), Subtarget, DAG));
33105     return;
33106   }
33107   case ISD::EXTRACT_VECTOR_ELT: {
33108     // f16 = extract vXf16 %vec, i64 %idx
33109     assert(N->getSimpleValueType(0) == MVT::f16 &&
33110            "Unexpected Value type of EXTRACT_VECTOR_ELT!");
33111     assert(Subtarget.hasFP16() && "Expected FP16");
33112     SDValue VecOp = N->getOperand(0);
33113     EVT ExtVT = VecOp.getValueType().changeVectorElementTypeToInteger();
33114     SDValue Split = DAG.getBitcast(ExtVT, N->getOperand(0));
33115     Split = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Split,
33116                         N->getOperand(1));
33117     Split = DAG.getBitcast(MVT::f16, Split);
33118     Results.push_back(Split);
33119     return;
33120   }
33121   }
33122 }
33123 
getTargetNodeName(unsigned Opcode) const33124 const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
33125   switch ((X86ISD::NodeType)Opcode) {
33126   case X86ISD::FIRST_NUMBER:       break;
33127 #define NODE_NAME_CASE(NODE) case X86ISD::NODE: return "X86ISD::" #NODE;
33128   NODE_NAME_CASE(BSF)
33129   NODE_NAME_CASE(BSR)
33130   NODE_NAME_CASE(FSHL)
33131   NODE_NAME_CASE(FSHR)
33132   NODE_NAME_CASE(FAND)
33133   NODE_NAME_CASE(FANDN)
33134   NODE_NAME_CASE(FOR)
33135   NODE_NAME_CASE(FXOR)
33136   NODE_NAME_CASE(FILD)
33137   NODE_NAME_CASE(FIST)
33138   NODE_NAME_CASE(FP_TO_INT_IN_MEM)
33139   NODE_NAME_CASE(FLD)
33140   NODE_NAME_CASE(FST)
33141   NODE_NAME_CASE(CALL)
33142   NODE_NAME_CASE(CALL_RVMARKER)
33143   NODE_NAME_CASE(BT)
33144   NODE_NAME_CASE(CMP)
33145   NODE_NAME_CASE(FCMP)
33146   NODE_NAME_CASE(STRICT_FCMP)
33147   NODE_NAME_CASE(STRICT_FCMPS)
33148   NODE_NAME_CASE(COMI)
33149   NODE_NAME_CASE(UCOMI)
33150   NODE_NAME_CASE(CMPM)
33151   NODE_NAME_CASE(CMPMM)
33152   NODE_NAME_CASE(STRICT_CMPM)
33153   NODE_NAME_CASE(CMPMM_SAE)
33154   NODE_NAME_CASE(SETCC)
33155   NODE_NAME_CASE(SETCC_CARRY)
33156   NODE_NAME_CASE(FSETCC)
33157   NODE_NAME_CASE(FSETCCM)
33158   NODE_NAME_CASE(FSETCCM_SAE)
33159   NODE_NAME_CASE(CMOV)
33160   NODE_NAME_CASE(BRCOND)
33161   NODE_NAME_CASE(RET_GLUE)
33162   NODE_NAME_CASE(IRET)
33163   NODE_NAME_CASE(REP_STOS)
33164   NODE_NAME_CASE(REP_MOVS)
33165   NODE_NAME_CASE(GlobalBaseReg)
33166   NODE_NAME_CASE(Wrapper)
33167   NODE_NAME_CASE(WrapperRIP)
33168   NODE_NAME_CASE(MOVQ2DQ)
33169   NODE_NAME_CASE(MOVDQ2Q)
33170   NODE_NAME_CASE(MMX_MOVD2W)
33171   NODE_NAME_CASE(MMX_MOVW2D)
33172   NODE_NAME_CASE(PEXTRB)
33173   NODE_NAME_CASE(PEXTRW)
33174   NODE_NAME_CASE(INSERTPS)
33175   NODE_NAME_CASE(PINSRB)
33176   NODE_NAME_CASE(PINSRW)
33177   NODE_NAME_CASE(PSHUFB)
33178   NODE_NAME_CASE(ANDNP)
33179   NODE_NAME_CASE(BLENDI)
33180   NODE_NAME_CASE(BLENDV)
33181   NODE_NAME_CASE(HADD)
33182   NODE_NAME_CASE(HSUB)
33183   NODE_NAME_CASE(FHADD)
33184   NODE_NAME_CASE(FHSUB)
33185   NODE_NAME_CASE(CONFLICT)
33186   NODE_NAME_CASE(FMAX)
33187   NODE_NAME_CASE(FMAXS)
33188   NODE_NAME_CASE(FMAX_SAE)
33189   NODE_NAME_CASE(FMAXS_SAE)
33190   NODE_NAME_CASE(FMIN)
33191   NODE_NAME_CASE(FMINS)
33192   NODE_NAME_CASE(FMIN_SAE)
33193   NODE_NAME_CASE(FMINS_SAE)
33194   NODE_NAME_CASE(FMAXC)
33195   NODE_NAME_CASE(FMINC)
33196   NODE_NAME_CASE(FRSQRT)
33197   NODE_NAME_CASE(FRCP)
33198   NODE_NAME_CASE(EXTRQI)
33199   NODE_NAME_CASE(INSERTQI)
33200   NODE_NAME_CASE(TLSADDR)
33201   NODE_NAME_CASE(TLSBASEADDR)
33202   NODE_NAME_CASE(TLSCALL)
33203   NODE_NAME_CASE(EH_SJLJ_SETJMP)
33204   NODE_NAME_CASE(EH_SJLJ_LONGJMP)
33205   NODE_NAME_CASE(EH_SJLJ_SETUP_DISPATCH)
33206   NODE_NAME_CASE(EH_RETURN)
33207   NODE_NAME_CASE(TC_RETURN)
33208   NODE_NAME_CASE(FNSTCW16m)
33209   NODE_NAME_CASE(FLDCW16m)
33210   NODE_NAME_CASE(FNSTENVm)
33211   NODE_NAME_CASE(FLDENVm)
33212   NODE_NAME_CASE(LCMPXCHG_DAG)
33213   NODE_NAME_CASE(LCMPXCHG8_DAG)
33214   NODE_NAME_CASE(LCMPXCHG16_DAG)
33215   NODE_NAME_CASE(LCMPXCHG16_SAVE_RBX_DAG)
33216   NODE_NAME_CASE(LADD)
33217   NODE_NAME_CASE(LSUB)
33218   NODE_NAME_CASE(LOR)
33219   NODE_NAME_CASE(LXOR)
33220   NODE_NAME_CASE(LAND)
33221   NODE_NAME_CASE(LBTS)
33222   NODE_NAME_CASE(LBTC)
33223   NODE_NAME_CASE(LBTR)
33224   NODE_NAME_CASE(LBTS_RM)
33225   NODE_NAME_CASE(LBTC_RM)
33226   NODE_NAME_CASE(LBTR_RM)
33227   NODE_NAME_CASE(AADD)
33228   NODE_NAME_CASE(AOR)
33229   NODE_NAME_CASE(AXOR)
33230   NODE_NAME_CASE(AAND)
33231   NODE_NAME_CASE(VZEXT_MOVL)
33232   NODE_NAME_CASE(VZEXT_LOAD)
33233   NODE_NAME_CASE(VEXTRACT_STORE)
33234   NODE_NAME_CASE(VTRUNC)
33235   NODE_NAME_CASE(VTRUNCS)
33236   NODE_NAME_CASE(VTRUNCUS)
33237   NODE_NAME_CASE(VMTRUNC)
33238   NODE_NAME_CASE(VMTRUNCS)
33239   NODE_NAME_CASE(VMTRUNCUS)
33240   NODE_NAME_CASE(VTRUNCSTORES)
33241   NODE_NAME_CASE(VTRUNCSTOREUS)
33242   NODE_NAME_CASE(VMTRUNCSTORES)
33243   NODE_NAME_CASE(VMTRUNCSTOREUS)
33244   NODE_NAME_CASE(VFPEXT)
33245   NODE_NAME_CASE(STRICT_VFPEXT)
33246   NODE_NAME_CASE(VFPEXT_SAE)
33247   NODE_NAME_CASE(VFPEXTS)
33248   NODE_NAME_CASE(VFPEXTS_SAE)
33249   NODE_NAME_CASE(VFPROUND)
33250   NODE_NAME_CASE(STRICT_VFPROUND)
33251   NODE_NAME_CASE(VMFPROUND)
33252   NODE_NAME_CASE(VFPROUND_RND)
33253   NODE_NAME_CASE(VFPROUNDS)
33254   NODE_NAME_CASE(VFPROUNDS_RND)
33255   NODE_NAME_CASE(VSHLDQ)
33256   NODE_NAME_CASE(VSRLDQ)
33257   NODE_NAME_CASE(VSHL)
33258   NODE_NAME_CASE(VSRL)
33259   NODE_NAME_CASE(VSRA)
33260   NODE_NAME_CASE(VSHLI)
33261   NODE_NAME_CASE(VSRLI)
33262   NODE_NAME_CASE(VSRAI)
33263   NODE_NAME_CASE(VSHLV)
33264   NODE_NAME_CASE(VSRLV)
33265   NODE_NAME_CASE(VSRAV)
33266   NODE_NAME_CASE(VROTLI)
33267   NODE_NAME_CASE(VROTRI)
33268   NODE_NAME_CASE(VPPERM)
33269   NODE_NAME_CASE(CMPP)
33270   NODE_NAME_CASE(STRICT_CMPP)
33271   NODE_NAME_CASE(PCMPEQ)
33272   NODE_NAME_CASE(PCMPGT)
33273   NODE_NAME_CASE(PHMINPOS)
33274   NODE_NAME_CASE(ADD)
33275   NODE_NAME_CASE(SUB)
33276   NODE_NAME_CASE(ADC)
33277   NODE_NAME_CASE(SBB)
33278   NODE_NAME_CASE(SMUL)
33279   NODE_NAME_CASE(UMUL)
33280   NODE_NAME_CASE(OR)
33281   NODE_NAME_CASE(XOR)
33282   NODE_NAME_CASE(AND)
33283   NODE_NAME_CASE(BEXTR)
33284   NODE_NAME_CASE(BEXTRI)
33285   NODE_NAME_CASE(BZHI)
33286   NODE_NAME_CASE(PDEP)
33287   NODE_NAME_CASE(PEXT)
33288   NODE_NAME_CASE(MUL_IMM)
33289   NODE_NAME_CASE(MOVMSK)
33290   NODE_NAME_CASE(PTEST)
33291   NODE_NAME_CASE(TESTP)
33292   NODE_NAME_CASE(KORTEST)
33293   NODE_NAME_CASE(KTEST)
33294   NODE_NAME_CASE(KADD)
33295   NODE_NAME_CASE(KSHIFTL)
33296   NODE_NAME_CASE(KSHIFTR)
33297   NODE_NAME_CASE(PACKSS)
33298   NODE_NAME_CASE(PACKUS)
33299   NODE_NAME_CASE(PALIGNR)
33300   NODE_NAME_CASE(VALIGN)
33301   NODE_NAME_CASE(VSHLD)
33302   NODE_NAME_CASE(VSHRD)
33303   NODE_NAME_CASE(VSHLDV)
33304   NODE_NAME_CASE(VSHRDV)
33305   NODE_NAME_CASE(PSHUFD)
33306   NODE_NAME_CASE(PSHUFHW)
33307   NODE_NAME_CASE(PSHUFLW)
33308   NODE_NAME_CASE(SHUFP)
33309   NODE_NAME_CASE(SHUF128)
33310   NODE_NAME_CASE(MOVLHPS)
33311   NODE_NAME_CASE(MOVHLPS)
33312   NODE_NAME_CASE(MOVDDUP)
33313   NODE_NAME_CASE(MOVSHDUP)
33314   NODE_NAME_CASE(MOVSLDUP)
33315   NODE_NAME_CASE(MOVSD)
33316   NODE_NAME_CASE(MOVSS)
33317   NODE_NAME_CASE(MOVSH)
33318   NODE_NAME_CASE(UNPCKL)
33319   NODE_NAME_CASE(UNPCKH)
33320   NODE_NAME_CASE(VBROADCAST)
33321   NODE_NAME_CASE(VBROADCAST_LOAD)
33322   NODE_NAME_CASE(VBROADCASTM)
33323   NODE_NAME_CASE(SUBV_BROADCAST_LOAD)
33324   NODE_NAME_CASE(VPERMILPV)
33325   NODE_NAME_CASE(VPERMILPI)
33326   NODE_NAME_CASE(VPERM2X128)
33327   NODE_NAME_CASE(VPERMV)
33328   NODE_NAME_CASE(VPERMV3)
33329   NODE_NAME_CASE(VPERMI)
33330   NODE_NAME_CASE(VPTERNLOG)
33331   NODE_NAME_CASE(VFIXUPIMM)
33332   NODE_NAME_CASE(VFIXUPIMM_SAE)
33333   NODE_NAME_CASE(VFIXUPIMMS)
33334   NODE_NAME_CASE(VFIXUPIMMS_SAE)
33335   NODE_NAME_CASE(VRANGE)
33336   NODE_NAME_CASE(VRANGE_SAE)
33337   NODE_NAME_CASE(VRANGES)
33338   NODE_NAME_CASE(VRANGES_SAE)
33339   NODE_NAME_CASE(PMULUDQ)
33340   NODE_NAME_CASE(PMULDQ)
33341   NODE_NAME_CASE(PSADBW)
33342   NODE_NAME_CASE(DBPSADBW)
33343   NODE_NAME_CASE(VASTART_SAVE_XMM_REGS)
33344   NODE_NAME_CASE(VAARG_64)
33345   NODE_NAME_CASE(VAARG_X32)
33346   NODE_NAME_CASE(DYN_ALLOCA)
33347   NODE_NAME_CASE(MFENCE)
33348   NODE_NAME_CASE(SEG_ALLOCA)
33349   NODE_NAME_CASE(PROBED_ALLOCA)
33350   NODE_NAME_CASE(RDRAND)
33351   NODE_NAME_CASE(RDSEED)
33352   NODE_NAME_CASE(RDPKRU)
33353   NODE_NAME_CASE(WRPKRU)
33354   NODE_NAME_CASE(VPMADDUBSW)
33355   NODE_NAME_CASE(VPMADDWD)
33356   NODE_NAME_CASE(VPSHA)
33357   NODE_NAME_CASE(VPSHL)
33358   NODE_NAME_CASE(VPCOM)
33359   NODE_NAME_CASE(VPCOMU)
33360   NODE_NAME_CASE(VPERMIL2)
33361   NODE_NAME_CASE(FMSUB)
33362   NODE_NAME_CASE(STRICT_FMSUB)
33363   NODE_NAME_CASE(FNMADD)
33364   NODE_NAME_CASE(STRICT_FNMADD)
33365   NODE_NAME_CASE(FNMSUB)
33366   NODE_NAME_CASE(STRICT_FNMSUB)
33367   NODE_NAME_CASE(FMADDSUB)
33368   NODE_NAME_CASE(FMSUBADD)
33369   NODE_NAME_CASE(FMADD_RND)
33370   NODE_NAME_CASE(FNMADD_RND)
33371   NODE_NAME_CASE(FMSUB_RND)
33372   NODE_NAME_CASE(FNMSUB_RND)
33373   NODE_NAME_CASE(FMADDSUB_RND)
33374   NODE_NAME_CASE(FMSUBADD_RND)
33375   NODE_NAME_CASE(VFMADDC)
33376   NODE_NAME_CASE(VFMADDC_RND)
33377   NODE_NAME_CASE(VFCMADDC)
33378   NODE_NAME_CASE(VFCMADDC_RND)
33379   NODE_NAME_CASE(VFMULC)
33380   NODE_NAME_CASE(VFMULC_RND)
33381   NODE_NAME_CASE(VFCMULC)
33382   NODE_NAME_CASE(VFCMULC_RND)
33383   NODE_NAME_CASE(VFMULCSH)
33384   NODE_NAME_CASE(VFMULCSH_RND)
33385   NODE_NAME_CASE(VFCMULCSH)
33386   NODE_NAME_CASE(VFCMULCSH_RND)
33387   NODE_NAME_CASE(VFMADDCSH)
33388   NODE_NAME_CASE(VFMADDCSH_RND)
33389   NODE_NAME_CASE(VFCMADDCSH)
33390   NODE_NAME_CASE(VFCMADDCSH_RND)
33391   NODE_NAME_CASE(VPMADD52H)
33392   NODE_NAME_CASE(VPMADD52L)
33393   NODE_NAME_CASE(VRNDSCALE)
33394   NODE_NAME_CASE(STRICT_VRNDSCALE)
33395   NODE_NAME_CASE(VRNDSCALE_SAE)
33396   NODE_NAME_CASE(VRNDSCALES)
33397   NODE_NAME_CASE(VRNDSCALES_SAE)
33398   NODE_NAME_CASE(VREDUCE)
33399   NODE_NAME_CASE(VREDUCE_SAE)
33400   NODE_NAME_CASE(VREDUCES)
33401   NODE_NAME_CASE(VREDUCES_SAE)
33402   NODE_NAME_CASE(VGETMANT)
33403   NODE_NAME_CASE(VGETMANT_SAE)
33404   NODE_NAME_CASE(VGETMANTS)
33405   NODE_NAME_CASE(VGETMANTS_SAE)
33406   NODE_NAME_CASE(PCMPESTR)
33407   NODE_NAME_CASE(PCMPISTR)
33408   NODE_NAME_CASE(XTEST)
33409   NODE_NAME_CASE(COMPRESS)
33410   NODE_NAME_CASE(EXPAND)
33411   NODE_NAME_CASE(SELECTS)
33412   NODE_NAME_CASE(ADDSUB)
33413   NODE_NAME_CASE(RCP14)
33414   NODE_NAME_CASE(RCP14S)
33415   NODE_NAME_CASE(RCP28)
33416   NODE_NAME_CASE(RCP28_SAE)
33417   NODE_NAME_CASE(RCP28S)
33418   NODE_NAME_CASE(RCP28S_SAE)
33419   NODE_NAME_CASE(EXP2)
33420   NODE_NAME_CASE(EXP2_SAE)
33421   NODE_NAME_CASE(RSQRT14)
33422   NODE_NAME_CASE(RSQRT14S)
33423   NODE_NAME_CASE(RSQRT28)
33424   NODE_NAME_CASE(RSQRT28_SAE)
33425   NODE_NAME_CASE(RSQRT28S)
33426   NODE_NAME_CASE(RSQRT28S_SAE)
33427   NODE_NAME_CASE(FADD_RND)
33428   NODE_NAME_CASE(FADDS)
33429   NODE_NAME_CASE(FADDS_RND)
33430   NODE_NAME_CASE(FSUB_RND)
33431   NODE_NAME_CASE(FSUBS)
33432   NODE_NAME_CASE(FSUBS_RND)
33433   NODE_NAME_CASE(FMUL_RND)
33434   NODE_NAME_CASE(FMULS)
33435   NODE_NAME_CASE(FMULS_RND)
33436   NODE_NAME_CASE(FDIV_RND)
33437   NODE_NAME_CASE(FDIVS)
33438   NODE_NAME_CASE(FDIVS_RND)
33439   NODE_NAME_CASE(FSQRT_RND)
33440   NODE_NAME_CASE(FSQRTS)
33441   NODE_NAME_CASE(FSQRTS_RND)
33442   NODE_NAME_CASE(FGETEXP)
33443   NODE_NAME_CASE(FGETEXP_SAE)
33444   NODE_NAME_CASE(FGETEXPS)
33445   NODE_NAME_CASE(FGETEXPS_SAE)
33446   NODE_NAME_CASE(SCALEF)
33447   NODE_NAME_CASE(SCALEF_RND)
33448   NODE_NAME_CASE(SCALEFS)
33449   NODE_NAME_CASE(SCALEFS_RND)
33450   NODE_NAME_CASE(MULHRS)
33451   NODE_NAME_CASE(SINT_TO_FP_RND)
33452   NODE_NAME_CASE(UINT_TO_FP_RND)
33453   NODE_NAME_CASE(CVTTP2SI)
33454   NODE_NAME_CASE(CVTTP2UI)
33455   NODE_NAME_CASE(STRICT_CVTTP2SI)
33456   NODE_NAME_CASE(STRICT_CVTTP2UI)
33457   NODE_NAME_CASE(MCVTTP2SI)
33458   NODE_NAME_CASE(MCVTTP2UI)
33459   NODE_NAME_CASE(CVTTP2SI_SAE)
33460   NODE_NAME_CASE(CVTTP2UI_SAE)
33461   NODE_NAME_CASE(CVTTS2SI)
33462   NODE_NAME_CASE(CVTTS2UI)
33463   NODE_NAME_CASE(CVTTS2SI_SAE)
33464   NODE_NAME_CASE(CVTTS2UI_SAE)
33465   NODE_NAME_CASE(CVTSI2P)
33466   NODE_NAME_CASE(CVTUI2P)
33467   NODE_NAME_CASE(STRICT_CVTSI2P)
33468   NODE_NAME_CASE(STRICT_CVTUI2P)
33469   NODE_NAME_CASE(MCVTSI2P)
33470   NODE_NAME_CASE(MCVTUI2P)
33471   NODE_NAME_CASE(VFPCLASS)
33472   NODE_NAME_CASE(VFPCLASSS)
33473   NODE_NAME_CASE(MULTISHIFT)
33474   NODE_NAME_CASE(SCALAR_SINT_TO_FP)
33475   NODE_NAME_CASE(SCALAR_SINT_TO_FP_RND)
33476   NODE_NAME_CASE(SCALAR_UINT_TO_FP)
33477   NODE_NAME_CASE(SCALAR_UINT_TO_FP_RND)
33478   NODE_NAME_CASE(CVTPS2PH)
33479   NODE_NAME_CASE(STRICT_CVTPS2PH)
33480   NODE_NAME_CASE(CVTPS2PH_SAE)
33481   NODE_NAME_CASE(MCVTPS2PH)
33482   NODE_NAME_CASE(MCVTPS2PH_SAE)
33483   NODE_NAME_CASE(CVTPH2PS)
33484   NODE_NAME_CASE(STRICT_CVTPH2PS)
33485   NODE_NAME_CASE(CVTPH2PS_SAE)
33486   NODE_NAME_CASE(CVTP2SI)
33487   NODE_NAME_CASE(CVTP2UI)
33488   NODE_NAME_CASE(MCVTP2SI)
33489   NODE_NAME_CASE(MCVTP2UI)
33490   NODE_NAME_CASE(CVTP2SI_RND)
33491   NODE_NAME_CASE(CVTP2UI_RND)
33492   NODE_NAME_CASE(CVTS2SI)
33493   NODE_NAME_CASE(CVTS2UI)
33494   NODE_NAME_CASE(CVTS2SI_RND)
33495   NODE_NAME_CASE(CVTS2UI_RND)
33496   NODE_NAME_CASE(CVTNE2PS2BF16)
33497   NODE_NAME_CASE(CVTNEPS2BF16)
33498   NODE_NAME_CASE(MCVTNEPS2BF16)
33499   NODE_NAME_CASE(DPBF16PS)
33500   NODE_NAME_CASE(LWPINS)
33501   NODE_NAME_CASE(MGATHER)
33502   NODE_NAME_CASE(MSCATTER)
33503   NODE_NAME_CASE(VPDPBUSD)
33504   NODE_NAME_CASE(VPDPBUSDS)
33505   NODE_NAME_CASE(VPDPWSSD)
33506   NODE_NAME_CASE(VPDPWSSDS)
33507   NODE_NAME_CASE(VPSHUFBITQMB)
33508   NODE_NAME_CASE(GF2P8MULB)
33509   NODE_NAME_CASE(GF2P8AFFINEQB)
33510   NODE_NAME_CASE(GF2P8AFFINEINVQB)
33511   NODE_NAME_CASE(NT_CALL)
33512   NODE_NAME_CASE(NT_BRIND)
33513   NODE_NAME_CASE(UMWAIT)
33514   NODE_NAME_CASE(TPAUSE)
33515   NODE_NAME_CASE(ENQCMD)
33516   NODE_NAME_CASE(ENQCMDS)
33517   NODE_NAME_CASE(VP2INTERSECT)
33518   NODE_NAME_CASE(VPDPBSUD)
33519   NODE_NAME_CASE(VPDPBSUDS)
33520   NODE_NAME_CASE(VPDPBUUD)
33521   NODE_NAME_CASE(VPDPBUUDS)
33522   NODE_NAME_CASE(VPDPBSSD)
33523   NODE_NAME_CASE(VPDPBSSDS)
33524   NODE_NAME_CASE(AESENC128KL)
33525   NODE_NAME_CASE(AESDEC128KL)
33526   NODE_NAME_CASE(AESENC256KL)
33527   NODE_NAME_CASE(AESDEC256KL)
33528   NODE_NAME_CASE(AESENCWIDE128KL)
33529   NODE_NAME_CASE(AESDECWIDE128KL)
33530   NODE_NAME_CASE(AESENCWIDE256KL)
33531   NODE_NAME_CASE(AESDECWIDE256KL)
33532   NODE_NAME_CASE(CMPCCXADD)
33533   NODE_NAME_CASE(TESTUI)
33534   NODE_NAME_CASE(FP80_ADD)
33535   NODE_NAME_CASE(STRICT_FP80_ADD)
33536   }
33537   return nullptr;
33538 #undef NODE_NAME_CASE
33539 }
33540 
33541 /// Return true if the addressing mode represented by AM is legal for this
33542 /// target, for a load/store of the specified type.
isLegalAddressingMode(const DataLayout & DL,const AddrMode & AM,Type * Ty,unsigned AS,Instruction * I) const33543 bool X86TargetLowering::isLegalAddressingMode(const DataLayout &DL,
33544                                               const AddrMode &AM, Type *Ty,
33545                                               unsigned AS,
33546                                               Instruction *I) const {
33547   // X86 supports extremely general addressing modes.
33548   CodeModel::Model M = getTargetMachine().getCodeModel();
33549 
33550   // X86 allows a sign-extended 32-bit immediate field as a displacement.
33551   if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != nullptr))
33552     return false;
33553 
33554   if (AM.BaseGV) {
33555     unsigned GVFlags = Subtarget.classifyGlobalReference(AM.BaseGV);
33556 
33557     // If a reference to this global requires an extra load, we can't fold it.
33558     if (isGlobalStubReference(GVFlags))
33559       return false;
33560 
33561     // If BaseGV requires a register for the PIC base, we cannot also have a
33562     // BaseReg specified.
33563     if (AM.HasBaseReg && isGlobalRelativeToPICBase(GVFlags))
33564       return false;
33565 
33566     // If lower 4G is not available, then we must use rip-relative addressing.
33567     if ((M != CodeModel::Small || isPositionIndependent()) &&
33568         Subtarget.is64Bit() && (AM.BaseOffs || AM.Scale > 1))
33569       return false;
33570   }
33571 
33572   switch (AM.Scale) {
33573   case 0:
33574   case 1:
33575   case 2:
33576   case 4:
33577   case 8:
33578     // These scales always work.
33579     break;
33580   case 3:
33581   case 5:
33582   case 9:
33583     // These scales are formed with basereg+scalereg.  Only accept if there is
33584     // no basereg yet.
33585     if (AM.HasBaseReg)
33586       return false;
33587     break;
33588   default:  // Other stuff never works.
33589     return false;
33590   }
33591 
33592   return true;
33593 }
33594 
isVectorShiftByScalarCheap(Type * Ty) const33595 bool X86TargetLowering::isVectorShiftByScalarCheap(Type *Ty) const {
33596   unsigned Bits = Ty->getScalarSizeInBits();
33597 
33598   // XOP has v16i8/v8i16/v4i32/v2i64 variable vector shifts.
33599   // Splitting for v32i8/v16i16 on XOP+AVX2 targets is still preferred.
33600   if (Subtarget.hasXOP() &&
33601       (Bits == 8 || Bits == 16 || Bits == 32 || Bits == 64))
33602     return false;
33603 
33604   // AVX2 has vpsllv[dq] instructions (and other shifts) that make variable
33605   // shifts just as cheap as scalar ones.
33606   if (Subtarget.hasAVX2() && (Bits == 32 || Bits == 64))
33607     return false;
33608 
33609   // AVX512BW has shifts such as vpsllvw.
33610   if (Subtarget.hasBWI() && Bits == 16)
33611     return false;
33612 
33613   // Otherwise, it's significantly cheaper to shift by a scalar amount than by a
33614   // fully general vector.
33615   return true;
33616 }
33617 
isBinOp(unsigned Opcode) const33618 bool X86TargetLowering::isBinOp(unsigned Opcode) const {
33619   switch (Opcode) {
33620   // These are non-commutative binops.
33621   // TODO: Add more X86ISD opcodes once we have test coverage.
33622   case X86ISD::ANDNP:
33623   case X86ISD::PCMPGT:
33624   case X86ISD::FMAX:
33625   case X86ISD::FMIN:
33626   case X86ISD::FANDN:
33627   case X86ISD::VPSHA:
33628   case X86ISD::VPSHL:
33629   case X86ISD::VSHLV:
33630   case X86ISD::VSRLV:
33631   case X86ISD::VSRAV:
33632     return true;
33633   }
33634 
33635   return TargetLoweringBase::isBinOp(Opcode);
33636 }
33637 
isCommutativeBinOp(unsigned Opcode) const33638 bool X86TargetLowering::isCommutativeBinOp(unsigned Opcode) const {
33639   switch (Opcode) {
33640   // TODO: Add more X86ISD opcodes once we have test coverage.
33641   case X86ISD::PCMPEQ:
33642   case X86ISD::PMULDQ:
33643   case X86ISD::PMULUDQ:
33644   case X86ISD::FMAXC:
33645   case X86ISD::FMINC:
33646   case X86ISD::FAND:
33647   case X86ISD::FOR:
33648   case X86ISD::FXOR:
33649     return true;
33650   }
33651 
33652   return TargetLoweringBase::isCommutativeBinOp(Opcode);
33653 }
33654 
isTruncateFree(Type * Ty1,Type * Ty2) const33655 bool X86TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
33656   if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
33657     return false;
33658   unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
33659   unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
33660   return NumBits1 > NumBits2;
33661 }
33662 
allowTruncateForTailCall(Type * Ty1,Type * Ty2) const33663 bool X86TargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
33664   if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
33665     return false;
33666 
33667   if (!isTypeLegal(EVT::getEVT(Ty1)))
33668     return false;
33669 
33670   assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop");
33671 
33672   // Assuming the caller doesn't have a zeroext or signext return parameter,
33673   // truncation all the way down to i1 is valid.
33674   return true;
33675 }
33676 
isLegalICmpImmediate(int64_t Imm) const33677 bool X86TargetLowering::isLegalICmpImmediate(int64_t Imm) const {
33678   return isInt<32>(Imm);
33679 }
33680 
isLegalAddImmediate(int64_t Imm) const33681 bool X86TargetLowering::isLegalAddImmediate(int64_t Imm) const {
33682   // Can also use sub to handle negated immediates.
33683   return isInt<32>(Imm);
33684 }
33685 
isLegalStoreImmediate(int64_t Imm) const33686 bool X86TargetLowering::isLegalStoreImmediate(int64_t Imm) const {
33687   return isInt<32>(Imm);
33688 }
33689 
isTruncateFree(EVT VT1,EVT VT2) const33690 bool X86TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
33691   if (!VT1.isScalarInteger() || !VT2.isScalarInteger())
33692     return false;
33693   unsigned NumBits1 = VT1.getSizeInBits();
33694   unsigned NumBits2 = VT2.getSizeInBits();
33695   return NumBits1 > NumBits2;
33696 }
33697 
isZExtFree(Type * Ty1,Type * Ty2) const33698 bool X86TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
33699   // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
33700   return Ty1->isIntegerTy(32) && Ty2->isIntegerTy(64) && Subtarget.is64Bit();
33701 }
33702 
isZExtFree(EVT VT1,EVT VT2) const33703 bool X86TargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
33704   // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
33705   return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget.is64Bit();
33706 }
33707 
isZExtFree(SDValue Val,EVT VT2) const33708 bool X86TargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
33709   EVT VT1 = Val.getValueType();
33710   if (isZExtFree(VT1, VT2))
33711     return true;
33712 
33713   if (Val.getOpcode() != ISD::LOAD)
33714     return false;
33715 
33716   if (!VT1.isSimple() || !VT1.isInteger() ||
33717       !VT2.isSimple() || !VT2.isInteger())
33718     return false;
33719 
33720   switch (VT1.getSimpleVT().SimpleTy) {
33721   default: break;
33722   case MVT::i8:
33723   case MVT::i16:
33724   case MVT::i32:
33725     // X86 has 8, 16, and 32-bit zero-extending loads.
33726     return true;
33727   }
33728 
33729   return false;
33730 }
33731 
shouldSinkOperands(Instruction * I,SmallVectorImpl<Use * > & Ops) const33732 bool X86TargetLowering::shouldSinkOperands(Instruction *I,
33733                                            SmallVectorImpl<Use *> &Ops) const {
33734   using namespace llvm::PatternMatch;
33735 
33736   FixedVectorType *VTy = dyn_cast<FixedVectorType>(I->getType());
33737   if (!VTy)
33738     return false;
33739 
33740   if (I->getOpcode() == Instruction::Mul &&
33741       VTy->getElementType()->isIntegerTy(64)) {
33742     for (auto &Op : I->operands()) {
33743       // Make sure we are not already sinking this operand
33744       if (any_of(Ops, [&](Use *U) { return U->get() == Op; }))
33745         continue;
33746 
33747       // Look for PMULDQ pattern where the input is a sext_inreg from vXi32 or
33748       // the PMULUDQ pattern where the input is a zext_inreg from vXi32.
33749       if (Subtarget.hasSSE41() &&
33750           match(Op.get(), m_AShr(m_Shl(m_Value(), m_SpecificInt(32)),
33751                                  m_SpecificInt(32)))) {
33752         Ops.push_back(&cast<Instruction>(Op)->getOperandUse(0));
33753         Ops.push_back(&Op);
33754       } else if (Subtarget.hasSSE2() &&
33755                  match(Op.get(),
33756                        m_And(m_Value(), m_SpecificInt(UINT64_C(0xffffffff))))) {
33757         Ops.push_back(&Op);
33758       }
33759     }
33760 
33761     return !Ops.empty();
33762   }
33763 
33764   // A uniform shift amount in a vector shift or funnel shift may be much
33765   // cheaper than a generic variable vector shift, so make that pattern visible
33766   // to SDAG by sinking the shuffle instruction next to the shift.
33767   int ShiftAmountOpNum = -1;
33768   if (I->isShift())
33769     ShiftAmountOpNum = 1;
33770   else if (auto *II = dyn_cast<IntrinsicInst>(I)) {
33771     if (II->getIntrinsicID() == Intrinsic::fshl ||
33772         II->getIntrinsicID() == Intrinsic::fshr)
33773       ShiftAmountOpNum = 2;
33774   }
33775 
33776   if (ShiftAmountOpNum == -1)
33777     return false;
33778 
33779   auto *Shuf = dyn_cast<ShuffleVectorInst>(I->getOperand(ShiftAmountOpNum));
33780   if (Shuf && getSplatIndex(Shuf->getShuffleMask()) >= 0 &&
33781       isVectorShiftByScalarCheap(I->getType())) {
33782     Ops.push_back(&I->getOperandUse(ShiftAmountOpNum));
33783     return true;
33784   }
33785 
33786   return false;
33787 }
33788 
shouldConvertPhiType(Type * From,Type * To) const33789 bool X86TargetLowering::shouldConvertPhiType(Type *From, Type *To) const {
33790   if (!Subtarget.is64Bit())
33791     return false;
33792   return TargetLowering::shouldConvertPhiType(From, To);
33793 }
33794 
isVectorLoadExtDesirable(SDValue ExtVal) const33795 bool X86TargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const {
33796   if (isa<MaskedLoadSDNode>(ExtVal.getOperand(0)))
33797     return false;
33798 
33799   EVT SrcVT = ExtVal.getOperand(0).getValueType();
33800 
33801   // There is no extending load for vXi1.
33802   if (SrcVT.getScalarType() == MVT::i1)
33803     return false;
33804 
33805   return true;
33806 }
33807 
isFMAFasterThanFMulAndFAdd(const MachineFunction & MF,EVT VT) const33808 bool X86TargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
33809                                                    EVT VT) const {
33810   if (!Subtarget.hasAnyFMA())
33811     return false;
33812 
33813   VT = VT.getScalarType();
33814 
33815   if (!VT.isSimple())
33816     return false;
33817 
33818   switch (VT.getSimpleVT().SimpleTy) {
33819   case MVT::f16:
33820     return Subtarget.hasFP16();
33821   case MVT::f32:
33822   case MVT::f64:
33823     return true;
33824   default:
33825     break;
33826   }
33827 
33828   return false;
33829 }
33830 
isNarrowingProfitable(EVT SrcVT,EVT DestVT) const33831 bool X86TargetLowering::isNarrowingProfitable(EVT SrcVT, EVT DestVT) const {
33832   // i16 instructions are longer (0x66 prefix) and potentially slower.
33833   return !(SrcVT == MVT::i32 && DestVT == MVT::i16);
33834 }
33835 
shouldFoldSelectWithIdentityConstant(unsigned Opcode,EVT VT) const33836 bool X86TargetLowering::shouldFoldSelectWithIdentityConstant(unsigned Opcode,
33837                                                              EVT VT) const {
33838   // TODO: This is too general. There are cases where pre-AVX512 codegen would
33839   //       benefit. The transform may also be profitable for scalar code.
33840   if (!Subtarget.hasAVX512())
33841     return false;
33842   if (!Subtarget.hasVLX() && !VT.is512BitVector())
33843     return false;
33844   if (!VT.isVector() || VT.getScalarType() == MVT::i1)
33845     return false;
33846 
33847   return true;
33848 }
33849 
33850 /// Targets can use this to indicate that they only support *some*
33851 /// VECTOR_SHUFFLE operations, those with specific masks.
33852 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
33853 /// are assumed to be legal.
isShuffleMaskLegal(ArrayRef<int> Mask,EVT VT) const33854 bool X86TargetLowering::isShuffleMaskLegal(ArrayRef<int> Mask, EVT VT) const {
33855   if (!VT.isSimple())
33856     return false;
33857 
33858   // Not for i1 vectors
33859   if (VT.getSimpleVT().getScalarType() == MVT::i1)
33860     return false;
33861 
33862   // Very little shuffling can be done for 64-bit vectors right now.
33863   if (VT.getSimpleVT().getSizeInBits() == 64)
33864     return false;
33865 
33866   // We only care that the types being shuffled are legal. The lowering can
33867   // handle any possible shuffle mask that results.
33868   return isTypeLegal(VT.getSimpleVT());
33869 }
33870 
isVectorClearMaskLegal(ArrayRef<int> Mask,EVT VT) const33871 bool X86TargetLowering::isVectorClearMaskLegal(ArrayRef<int> Mask,
33872                                                EVT VT) const {
33873   // Don't convert an 'and' into a shuffle that we don't directly support.
33874   // vpblendw and vpshufb for 256-bit vectors are not available on AVX1.
33875   if (!Subtarget.hasAVX2())
33876     if (VT == MVT::v32i8 || VT == MVT::v16i16)
33877       return false;
33878 
33879   // Just delegate to the generic legality, clear masks aren't special.
33880   return isShuffleMaskLegal(Mask, VT);
33881 }
33882 
areJTsAllowed(const Function * Fn) const33883 bool X86TargetLowering::areJTsAllowed(const Function *Fn) const {
33884   // If the subtarget is using thunks, we need to not generate jump tables.
33885   if (Subtarget.useIndirectThunkBranches())
33886     return false;
33887 
33888   // Otherwise, fallback on the generic logic.
33889   return TargetLowering::areJTsAllowed(Fn);
33890 }
33891 
getPreferredSwitchConditionType(LLVMContext & Context,EVT ConditionVT) const33892 MVT X86TargetLowering::getPreferredSwitchConditionType(LLVMContext &Context,
33893                                                        EVT ConditionVT) const {
33894   // Avoid 8 and 16 bit types because they increase the chance for unnecessary
33895   // zero-extensions.
33896   if (ConditionVT.getSizeInBits() < 32)
33897     return MVT::i32;
33898   return TargetLoweringBase::getPreferredSwitchConditionType(Context,
33899                                                              ConditionVT);
33900 }
33901 
33902 //===----------------------------------------------------------------------===//
33903 //                           X86 Scheduler Hooks
33904 //===----------------------------------------------------------------------===//
33905 
33906 // Returns true if EFLAG is consumed after this iterator in the rest of the
33907 // basic block or any successors of the basic block.
isEFLAGSLiveAfter(MachineBasicBlock::iterator Itr,MachineBasicBlock * BB)33908 static bool isEFLAGSLiveAfter(MachineBasicBlock::iterator Itr,
33909                               MachineBasicBlock *BB) {
33910   // Scan forward through BB for a use/def of EFLAGS.
33911   for (const MachineInstr &mi : llvm::make_range(std::next(Itr), BB->end())) {
33912     if (mi.readsRegister(X86::EFLAGS))
33913       return true;
33914     // If we found a def, we can stop searching.
33915     if (mi.definesRegister(X86::EFLAGS))
33916       return false;
33917   }
33918 
33919   // If we hit the end of the block, check whether EFLAGS is live into a
33920   // successor.
33921   for (MachineBasicBlock *Succ : BB->successors())
33922     if (Succ->isLiveIn(X86::EFLAGS))
33923       return true;
33924 
33925   return false;
33926 }
33927 
33928 /// Utility function to emit xbegin specifying the start of an RTM region.
emitXBegin(MachineInstr & MI,MachineBasicBlock * MBB,const TargetInstrInfo * TII)33929 static MachineBasicBlock *emitXBegin(MachineInstr &MI, MachineBasicBlock *MBB,
33930                                      const TargetInstrInfo *TII) {
33931   const MIMetadata MIMD(MI);
33932 
33933   const BasicBlock *BB = MBB->getBasicBlock();
33934   MachineFunction::iterator I = ++MBB->getIterator();
33935 
33936   // For the v = xbegin(), we generate
33937   //
33938   // thisMBB:
33939   //  xbegin sinkMBB
33940   //
33941   // mainMBB:
33942   //  s0 = -1
33943   //
33944   // fallBB:
33945   //  eax = # XABORT_DEF
33946   //  s1 = eax
33947   //
33948   // sinkMBB:
33949   //  v = phi(s0/mainBB, s1/fallBB)
33950 
33951   MachineBasicBlock *thisMBB = MBB;
33952   MachineFunction *MF = MBB->getParent();
33953   MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
33954   MachineBasicBlock *fallMBB = MF->CreateMachineBasicBlock(BB);
33955   MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
33956   MF->insert(I, mainMBB);
33957   MF->insert(I, fallMBB);
33958   MF->insert(I, sinkMBB);
33959 
33960   if (isEFLAGSLiveAfter(MI, MBB)) {
33961     mainMBB->addLiveIn(X86::EFLAGS);
33962     fallMBB->addLiveIn(X86::EFLAGS);
33963     sinkMBB->addLiveIn(X86::EFLAGS);
33964   }
33965 
33966   // Transfer the remainder of BB and its successor edges to sinkMBB.
33967   sinkMBB->splice(sinkMBB->begin(), MBB,
33968                   std::next(MachineBasicBlock::iterator(MI)), MBB->end());
33969   sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
33970 
33971   MachineRegisterInfo &MRI = MF->getRegInfo();
33972   Register DstReg = MI.getOperand(0).getReg();
33973   const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
33974   Register mainDstReg = MRI.createVirtualRegister(RC);
33975   Register fallDstReg = MRI.createVirtualRegister(RC);
33976 
33977   // thisMBB:
33978   //  xbegin fallMBB
33979   //  # fallthrough to mainMBB
33980   //  # abortion to fallMBB
33981   BuildMI(thisMBB, MIMD, TII->get(X86::XBEGIN_4)).addMBB(fallMBB);
33982   thisMBB->addSuccessor(mainMBB);
33983   thisMBB->addSuccessor(fallMBB);
33984 
33985   // mainMBB:
33986   //  mainDstReg := -1
33987   BuildMI(mainMBB, MIMD, TII->get(X86::MOV32ri), mainDstReg).addImm(-1);
33988   BuildMI(mainMBB, MIMD, TII->get(X86::JMP_1)).addMBB(sinkMBB);
33989   mainMBB->addSuccessor(sinkMBB);
33990 
33991   // fallMBB:
33992   //  ; pseudo instruction to model hardware's definition from XABORT
33993   //  EAX := XABORT_DEF
33994   //  fallDstReg := EAX
33995   BuildMI(fallMBB, MIMD, TII->get(X86::XABORT_DEF));
33996   BuildMI(fallMBB, MIMD, TII->get(TargetOpcode::COPY), fallDstReg)
33997       .addReg(X86::EAX);
33998   fallMBB->addSuccessor(sinkMBB);
33999 
34000   // sinkMBB:
34001   //  DstReg := phi(mainDstReg/mainBB, fallDstReg/fallBB)
34002   BuildMI(*sinkMBB, sinkMBB->begin(), MIMD, TII->get(X86::PHI), DstReg)
34003       .addReg(mainDstReg).addMBB(mainMBB)
34004       .addReg(fallDstReg).addMBB(fallMBB);
34005 
34006   MI.eraseFromParent();
34007   return sinkMBB;
34008 }
34009 
34010 MachineBasicBlock *
EmitVAARGWithCustomInserter(MachineInstr & MI,MachineBasicBlock * MBB) const34011 X86TargetLowering::EmitVAARGWithCustomInserter(MachineInstr &MI,
34012                                                MachineBasicBlock *MBB) const {
34013   // Emit va_arg instruction on X86-64.
34014 
34015   // Operands to this pseudo-instruction:
34016   // 0  ) Output        : destination address (reg)
34017   // 1-5) Input         : va_list address (addr, i64mem)
34018   // 6  ) ArgSize       : Size (in bytes) of vararg type
34019   // 7  ) ArgMode       : 0=overflow only, 1=use gp_offset, 2=use fp_offset
34020   // 8  ) Align         : Alignment of type
34021   // 9  ) EFLAGS (implicit-def)
34022 
34023   assert(MI.getNumOperands() == 10 && "VAARG should have 10 operands!");
34024   static_assert(X86::AddrNumOperands == 5, "VAARG assumes 5 address operands");
34025 
34026   Register DestReg = MI.getOperand(0).getReg();
34027   MachineOperand &Base = MI.getOperand(1);
34028   MachineOperand &Scale = MI.getOperand(2);
34029   MachineOperand &Index = MI.getOperand(3);
34030   MachineOperand &Disp = MI.getOperand(4);
34031   MachineOperand &Segment = MI.getOperand(5);
34032   unsigned ArgSize = MI.getOperand(6).getImm();
34033   unsigned ArgMode = MI.getOperand(7).getImm();
34034   Align Alignment = Align(MI.getOperand(8).getImm());
34035 
34036   MachineFunction *MF = MBB->getParent();
34037 
34038   // Memory Reference
34039   assert(MI.hasOneMemOperand() && "Expected VAARG to have one memoperand");
34040 
34041   MachineMemOperand *OldMMO = MI.memoperands().front();
34042 
34043   // Clone the MMO into two separate MMOs for loading and storing
34044   MachineMemOperand *LoadOnlyMMO = MF->getMachineMemOperand(
34045       OldMMO, OldMMO->getFlags() & ~MachineMemOperand::MOStore);
34046   MachineMemOperand *StoreOnlyMMO = MF->getMachineMemOperand(
34047       OldMMO, OldMMO->getFlags() & ~MachineMemOperand::MOLoad);
34048 
34049   // Machine Information
34050   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
34051   MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
34052   const TargetRegisterClass *AddrRegClass =
34053       getRegClassFor(getPointerTy(MBB->getParent()->getDataLayout()));
34054   const TargetRegisterClass *OffsetRegClass = getRegClassFor(MVT::i32);
34055   const MIMetadata MIMD(MI);
34056 
34057   // struct va_list {
34058   //   i32   gp_offset
34059   //   i32   fp_offset
34060   //   i64   overflow_area (address)
34061   //   i64   reg_save_area (address)
34062   // }
34063   // sizeof(va_list) = 24
34064   // alignment(va_list) = 8
34065 
34066   unsigned TotalNumIntRegs = 6;
34067   unsigned TotalNumXMMRegs = 8;
34068   bool UseGPOffset = (ArgMode == 1);
34069   bool UseFPOffset = (ArgMode == 2);
34070   unsigned MaxOffset = TotalNumIntRegs * 8 +
34071                        (UseFPOffset ? TotalNumXMMRegs * 16 : 0);
34072 
34073   /* Align ArgSize to a multiple of 8 */
34074   unsigned ArgSizeA8 = (ArgSize + 7) & ~7;
34075   bool NeedsAlign = (Alignment > 8);
34076 
34077   MachineBasicBlock *thisMBB = MBB;
34078   MachineBasicBlock *overflowMBB;
34079   MachineBasicBlock *offsetMBB;
34080   MachineBasicBlock *endMBB;
34081 
34082   unsigned OffsetDestReg = 0;    // Argument address computed by offsetMBB
34083   unsigned OverflowDestReg = 0;  // Argument address computed by overflowMBB
34084   unsigned OffsetReg = 0;
34085 
34086   if (!UseGPOffset && !UseFPOffset) {
34087     // If we only pull from the overflow region, we don't create a branch.
34088     // We don't need to alter control flow.
34089     OffsetDestReg = 0; // unused
34090     OverflowDestReg = DestReg;
34091 
34092     offsetMBB = nullptr;
34093     overflowMBB = thisMBB;
34094     endMBB = thisMBB;
34095   } else {
34096     // First emit code to check if gp_offset (or fp_offset) is below the bound.
34097     // If so, pull the argument from reg_save_area. (branch to offsetMBB)
34098     // If not, pull from overflow_area. (branch to overflowMBB)
34099     //
34100     //       thisMBB
34101     //         |     .
34102     //         |        .
34103     //     offsetMBB   overflowMBB
34104     //         |        .
34105     //         |     .
34106     //        endMBB
34107 
34108     // Registers for the PHI in endMBB
34109     OffsetDestReg = MRI.createVirtualRegister(AddrRegClass);
34110     OverflowDestReg = MRI.createVirtualRegister(AddrRegClass);
34111 
34112     const BasicBlock *LLVM_BB = MBB->getBasicBlock();
34113     overflowMBB = MF->CreateMachineBasicBlock(LLVM_BB);
34114     offsetMBB = MF->CreateMachineBasicBlock(LLVM_BB);
34115     endMBB = MF->CreateMachineBasicBlock(LLVM_BB);
34116 
34117     MachineFunction::iterator MBBIter = ++MBB->getIterator();
34118 
34119     // Insert the new basic blocks
34120     MF->insert(MBBIter, offsetMBB);
34121     MF->insert(MBBIter, overflowMBB);
34122     MF->insert(MBBIter, endMBB);
34123 
34124     // Transfer the remainder of MBB and its successor edges to endMBB.
34125     endMBB->splice(endMBB->begin(), thisMBB,
34126                    std::next(MachineBasicBlock::iterator(MI)), thisMBB->end());
34127     endMBB->transferSuccessorsAndUpdatePHIs(thisMBB);
34128 
34129     // Make offsetMBB and overflowMBB successors of thisMBB
34130     thisMBB->addSuccessor(offsetMBB);
34131     thisMBB->addSuccessor(overflowMBB);
34132 
34133     // endMBB is a successor of both offsetMBB and overflowMBB
34134     offsetMBB->addSuccessor(endMBB);
34135     overflowMBB->addSuccessor(endMBB);
34136 
34137     // Load the offset value into a register
34138     OffsetReg = MRI.createVirtualRegister(OffsetRegClass);
34139     BuildMI(thisMBB, MIMD, TII->get(X86::MOV32rm), OffsetReg)
34140         .add(Base)
34141         .add(Scale)
34142         .add(Index)
34143         .addDisp(Disp, UseFPOffset ? 4 : 0)
34144         .add(Segment)
34145         .setMemRefs(LoadOnlyMMO);
34146 
34147     // Check if there is enough room left to pull this argument.
34148     BuildMI(thisMBB, MIMD, TII->get(X86::CMP32ri))
34149       .addReg(OffsetReg)
34150       .addImm(MaxOffset + 8 - ArgSizeA8);
34151 
34152     // Branch to "overflowMBB" if offset >= max
34153     // Fall through to "offsetMBB" otherwise
34154     BuildMI(thisMBB, MIMD, TII->get(X86::JCC_1))
34155       .addMBB(overflowMBB).addImm(X86::COND_AE);
34156   }
34157 
34158   // In offsetMBB, emit code to use the reg_save_area.
34159   if (offsetMBB) {
34160     assert(OffsetReg != 0);
34161 
34162     // Read the reg_save_area address.
34163     Register RegSaveReg = MRI.createVirtualRegister(AddrRegClass);
34164     BuildMI(
34165         offsetMBB, MIMD,
34166         TII->get(Subtarget.isTarget64BitLP64() ? X86::MOV64rm : X86::MOV32rm),
34167         RegSaveReg)
34168         .add(Base)
34169         .add(Scale)
34170         .add(Index)
34171         .addDisp(Disp, Subtarget.isTarget64BitLP64() ? 16 : 12)
34172         .add(Segment)
34173         .setMemRefs(LoadOnlyMMO);
34174 
34175     if (Subtarget.isTarget64BitLP64()) {
34176       // Zero-extend the offset
34177       Register OffsetReg64 = MRI.createVirtualRegister(AddrRegClass);
34178       BuildMI(offsetMBB, MIMD, TII->get(X86::SUBREG_TO_REG), OffsetReg64)
34179           .addImm(0)
34180           .addReg(OffsetReg)
34181           .addImm(X86::sub_32bit);
34182 
34183       // Add the offset to the reg_save_area to get the final address.
34184       BuildMI(offsetMBB, MIMD, TII->get(X86::ADD64rr), OffsetDestReg)
34185           .addReg(OffsetReg64)
34186           .addReg(RegSaveReg);
34187     } else {
34188       // Add the offset to the reg_save_area to get the final address.
34189       BuildMI(offsetMBB, MIMD, TII->get(X86::ADD32rr), OffsetDestReg)
34190           .addReg(OffsetReg)
34191           .addReg(RegSaveReg);
34192     }
34193 
34194     // Compute the offset for the next argument
34195     Register NextOffsetReg = MRI.createVirtualRegister(OffsetRegClass);
34196     BuildMI(offsetMBB, MIMD, TII->get(X86::ADD32ri), NextOffsetReg)
34197       .addReg(OffsetReg)
34198       .addImm(UseFPOffset ? 16 : 8);
34199 
34200     // Store it back into the va_list.
34201     BuildMI(offsetMBB, MIMD, TII->get(X86::MOV32mr))
34202         .add(Base)
34203         .add(Scale)
34204         .add(Index)
34205         .addDisp(Disp, UseFPOffset ? 4 : 0)
34206         .add(Segment)
34207         .addReg(NextOffsetReg)
34208         .setMemRefs(StoreOnlyMMO);
34209 
34210     // Jump to endMBB
34211     BuildMI(offsetMBB, MIMD, TII->get(X86::JMP_1))
34212       .addMBB(endMBB);
34213   }
34214 
34215   //
34216   // Emit code to use overflow area
34217   //
34218 
34219   // Load the overflow_area address into a register.
34220   Register OverflowAddrReg = MRI.createVirtualRegister(AddrRegClass);
34221   BuildMI(overflowMBB, MIMD,
34222           TII->get(Subtarget.isTarget64BitLP64() ? X86::MOV64rm : X86::MOV32rm),
34223           OverflowAddrReg)
34224       .add(Base)
34225       .add(Scale)
34226       .add(Index)
34227       .addDisp(Disp, 8)
34228       .add(Segment)
34229       .setMemRefs(LoadOnlyMMO);
34230 
34231   // If we need to align it, do so. Otherwise, just copy the address
34232   // to OverflowDestReg.
34233   if (NeedsAlign) {
34234     // Align the overflow address
34235     Register TmpReg = MRI.createVirtualRegister(AddrRegClass);
34236 
34237     // aligned_addr = (addr + (align-1)) & ~(align-1)
34238     BuildMI(
34239         overflowMBB, MIMD,
34240         TII->get(Subtarget.isTarget64BitLP64() ? X86::ADD64ri32 : X86::ADD32ri),
34241         TmpReg)
34242         .addReg(OverflowAddrReg)
34243         .addImm(Alignment.value() - 1);
34244 
34245     BuildMI(
34246         overflowMBB, MIMD,
34247         TII->get(Subtarget.isTarget64BitLP64() ? X86::AND64ri32 : X86::AND32ri),
34248         OverflowDestReg)
34249         .addReg(TmpReg)
34250         .addImm(~(uint64_t)(Alignment.value() - 1));
34251   } else {
34252     BuildMI(overflowMBB, MIMD, TII->get(TargetOpcode::COPY), OverflowDestReg)
34253       .addReg(OverflowAddrReg);
34254   }
34255 
34256   // Compute the next overflow address after this argument.
34257   // (the overflow address should be kept 8-byte aligned)
34258   Register NextAddrReg = MRI.createVirtualRegister(AddrRegClass);
34259   BuildMI(
34260       overflowMBB, MIMD,
34261       TII->get(Subtarget.isTarget64BitLP64() ? X86::ADD64ri32 : X86::ADD32ri),
34262       NextAddrReg)
34263       .addReg(OverflowDestReg)
34264       .addImm(ArgSizeA8);
34265 
34266   // Store the new overflow address.
34267   BuildMI(overflowMBB, MIMD,
34268           TII->get(Subtarget.isTarget64BitLP64() ? X86::MOV64mr : X86::MOV32mr))
34269       .add(Base)
34270       .add(Scale)
34271       .add(Index)
34272       .addDisp(Disp, 8)
34273       .add(Segment)
34274       .addReg(NextAddrReg)
34275       .setMemRefs(StoreOnlyMMO);
34276 
34277   // If we branched, emit the PHI to the front of endMBB.
34278   if (offsetMBB) {
34279     BuildMI(*endMBB, endMBB->begin(), MIMD,
34280             TII->get(X86::PHI), DestReg)
34281       .addReg(OffsetDestReg).addMBB(offsetMBB)
34282       .addReg(OverflowDestReg).addMBB(overflowMBB);
34283   }
34284 
34285   // Erase the pseudo instruction
34286   MI.eraseFromParent();
34287 
34288   return endMBB;
34289 }
34290 
34291 // The EFLAGS operand of SelectItr might be missing a kill marker
34292 // because there were multiple uses of EFLAGS, and ISel didn't know
34293 // which to mark. Figure out whether SelectItr should have had a
34294 // kill marker, and set it if it should. Returns the correct kill
34295 // marker value.
checkAndUpdateEFLAGSKill(MachineBasicBlock::iterator SelectItr,MachineBasicBlock * BB,const TargetRegisterInfo * TRI)34296 static bool checkAndUpdateEFLAGSKill(MachineBasicBlock::iterator SelectItr,
34297                                      MachineBasicBlock* BB,
34298                                      const TargetRegisterInfo* TRI) {
34299   if (isEFLAGSLiveAfter(SelectItr, BB))
34300     return false;
34301 
34302   // We found a def, or hit the end of the basic block and EFLAGS wasn't live
34303   // out. SelectMI should have a kill flag on EFLAGS.
34304   SelectItr->addRegisterKilled(X86::EFLAGS, TRI);
34305   return true;
34306 }
34307 
34308 // Return true if it is OK for this CMOV pseudo-opcode to be cascaded
34309 // together with other CMOV pseudo-opcodes into a single basic-block with
34310 // conditional jump around it.
isCMOVPseudo(MachineInstr & MI)34311 static bool isCMOVPseudo(MachineInstr &MI) {
34312   switch (MI.getOpcode()) {
34313   case X86::CMOV_FR16:
34314   case X86::CMOV_FR16X:
34315   case X86::CMOV_FR32:
34316   case X86::CMOV_FR32X:
34317   case X86::CMOV_FR64:
34318   case X86::CMOV_FR64X:
34319   case X86::CMOV_GR8:
34320   case X86::CMOV_GR16:
34321   case X86::CMOV_GR32:
34322   case X86::CMOV_RFP32:
34323   case X86::CMOV_RFP64:
34324   case X86::CMOV_RFP80:
34325   case X86::CMOV_VR64:
34326   case X86::CMOV_VR128:
34327   case X86::CMOV_VR128X:
34328   case X86::CMOV_VR256:
34329   case X86::CMOV_VR256X:
34330   case X86::CMOV_VR512:
34331   case X86::CMOV_VK1:
34332   case X86::CMOV_VK2:
34333   case X86::CMOV_VK4:
34334   case X86::CMOV_VK8:
34335   case X86::CMOV_VK16:
34336   case X86::CMOV_VK32:
34337   case X86::CMOV_VK64:
34338     return true;
34339 
34340   default:
34341     return false;
34342   }
34343 }
34344 
34345 // Helper function, which inserts PHI functions into SinkMBB:
34346 //   %Result(i) = phi [ %FalseValue(i), FalseMBB ], [ %TrueValue(i), TrueMBB ],
34347 // where %FalseValue(i) and %TrueValue(i) are taken from the consequent CMOVs
34348 // in [MIItBegin, MIItEnd) range. It returns the last MachineInstrBuilder for
34349 // the last PHI function inserted.
createPHIsForCMOVsInSinkBB(MachineBasicBlock::iterator MIItBegin,MachineBasicBlock::iterator MIItEnd,MachineBasicBlock * TrueMBB,MachineBasicBlock * FalseMBB,MachineBasicBlock * SinkMBB)34350 static MachineInstrBuilder createPHIsForCMOVsInSinkBB(
34351     MachineBasicBlock::iterator MIItBegin, MachineBasicBlock::iterator MIItEnd,
34352     MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB,
34353     MachineBasicBlock *SinkMBB) {
34354   MachineFunction *MF = TrueMBB->getParent();
34355   const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
34356   const MIMetadata MIMD(*MIItBegin);
34357 
34358   X86::CondCode CC = X86::CondCode(MIItBegin->getOperand(3).getImm());
34359   X86::CondCode OppCC = X86::GetOppositeBranchCondition(CC);
34360 
34361   MachineBasicBlock::iterator SinkInsertionPoint = SinkMBB->begin();
34362 
34363   // As we are creating the PHIs, we have to be careful if there is more than
34364   // one.  Later CMOVs may reference the results of earlier CMOVs, but later
34365   // PHIs have to reference the individual true/false inputs from earlier PHIs.
34366   // That also means that PHI construction must work forward from earlier to
34367   // later, and that the code must maintain a mapping from earlier PHI's
34368   // destination registers, and the registers that went into the PHI.
34369   DenseMap<unsigned, std::pair<unsigned, unsigned>> RegRewriteTable;
34370   MachineInstrBuilder MIB;
34371 
34372   for (MachineBasicBlock::iterator MIIt = MIItBegin; MIIt != MIItEnd; ++MIIt) {
34373     Register DestReg = MIIt->getOperand(0).getReg();
34374     Register Op1Reg = MIIt->getOperand(1).getReg();
34375     Register Op2Reg = MIIt->getOperand(2).getReg();
34376 
34377     // If this CMOV we are generating is the opposite condition from
34378     // the jump we generated, then we have to swap the operands for the
34379     // PHI that is going to be generated.
34380     if (MIIt->getOperand(3).getImm() == OppCC)
34381       std::swap(Op1Reg, Op2Reg);
34382 
34383     if (RegRewriteTable.contains(Op1Reg))
34384       Op1Reg = RegRewriteTable[Op1Reg].first;
34385 
34386     if (RegRewriteTable.contains(Op2Reg))
34387       Op2Reg = RegRewriteTable[Op2Reg].second;
34388 
34389     MIB =
34390         BuildMI(*SinkMBB, SinkInsertionPoint, MIMD, TII->get(X86::PHI), DestReg)
34391             .addReg(Op1Reg)
34392             .addMBB(FalseMBB)
34393             .addReg(Op2Reg)
34394             .addMBB(TrueMBB);
34395 
34396     // Add this PHI to the rewrite table.
34397     RegRewriteTable[DestReg] = std::make_pair(Op1Reg, Op2Reg);
34398   }
34399 
34400   return MIB;
34401 }
34402 
34403 // Lower cascaded selects in form of (SecondCmov (FirstCMOV F, T, cc1), T, cc2).
34404 MachineBasicBlock *
EmitLoweredCascadedSelect(MachineInstr & FirstCMOV,MachineInstr & SecondCascadedCMOV,MachineBasicBlock * ThisMBB) const34405 X86TargetLowering::EmitLoweredCascadedSelect(MachineInstr &FirstCMOV,
34406                                              MachineInstr &SecondCascadedCMOV,
34407                                              MachineBasicBlock *ThisMBB) const {
34408   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
34409   const MIMetadata MIMD(FirstCMOV);
34410 
34411   // We lower cascaded CMOVs such as
34412   //
34413   //   (SecondCascadedCMOV (FirstCMOV F, T, cc1), T, cc2)
34414   //
34415   // to two successive branches.
34416   //
34417   // Without this, we would add a PHI between the two jumps, which ends up
34418   // creating a few copies all around. For instance, for
34419   //
34420   //    (sitofp (zext (fcmp une)))
34421   //
34422   // we would generate:
34423   //
34424   //         ucomiss %xmm1, %xmm0
34425   //         movss  <1.0f>, %xmm0
34426   //         movaps  %xmm0, %xmm1
34427   //         jne     .LBB5_2
34428   //         xorps   %xmm1, %xmm1
34429   // .LBB5_2:
34430   //         jp      .LBB5_4
34431   //         movaps  %xmm1, %xmm0
34432   // .LBB5_4:
34433   //         retq
34434   //
34435   // because this custom-inserter would have generated:
34436   //
34437   //   A
34438   //   | \
34439   //   |  B
34440   //   | /
34441   //   C
34442   //   | \
34443   //   |  D
34444   //   | /
34445   //   E
34446   //
34447   // A: X = ...; Y = ...
34448   // B: empty
34449   // C: Z = PHI [X, A], [Y, B]
34450   // D: empty
34451   // E: PHI [X, C], [Z, D]
34452   //
34453   // If we lower both CMOVs in a single step, we can instead generate:
34454   //
34455   //   A
34456   //   | \
34457   //   |  C
34458   //   | /|
34459   //   |/ |
34460   //   |  |
34461   //   |  D
34462   //   | /
34463   //   E
34464   //
34465   // A: X = ...; Y = ...
34466   // D: empty
34467   // E: PHI [X, A], [X, C], [Y, D]
34468   //
34469   // Which, in our sitofp/fcmp example, gives us something like:
34470   //
34471   //         ucomiss %xmm1, %xmm0
34472   //         movss  <1.0f>, %xmm0
34473   //         jne     .LBB5_4
34474   //         jp      .LBB5_4
34475   //         xorps   %xmm0, %xmm0
34476   // .LBB5_4:
34477   //         retq
34478   //
34479 
34480   // We lower cascaded CMOV into two successive branches to the same block.
34481   // EFLAGS is used by both, so mark it as live in the second.
34482   const BasicBlock *LLVM_BB = ThisMBB->getBasicBlock();
34483   MachineFunction *F = ThisMBB->getParent();
34484   MachineBasicBlock *FirstInsertedMBB = F->CreateMachineBasicBlock(LLVM_BB);
34485   MachineBasicBlock *SecondInsertedMBB = F->CreateMachineBasicBlock(LLVM_BB);
34486   MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
34487 
34488   MachineFunction::iterator It = ++ThisMBB->getIterator();
34489   F->insert(It, FirstInsertedMBB);
34490   F->insert(It, SecondInsertedMBB);
34491   F->insert(It, SinkMBB);
34492 
34493   // For a cascaded CMOV, we lower it to two successive branches to
34494   // the same block (SinkMBB).  EFLAGS is used by both, so mark it as live in
34495   // the FirstInsertedMBB.
34496   FirstInsertedMBB->addLiveIn(X86::EFLAGS);
34497 
34498   // If the EFLAGS register isn't dead in the terminator, then claim that it's
34499   // live into the sink and copy blocks.
34500   const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
34501   if (!SecondCascadedCMOV.killsRegister(X86::EFLAGS) &&
34502       !checkAndUpdateEFLAGSKill(SecondCascadedCMOV, ThisMBB, TRI)) {
34503     SecondInsertedMBB->addLiveIn(X86::EFLAGS);
34504     SinkMBB->addLiveIn(X86::EFLAGS);
34505   }
34506 
34507   // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
34508   SinkMBB->splice(SinkMBB->begin(), ThisMBB,
34509                   std::next(MachineBasicBlock::iterator(FirstCMOV)),
34510                   ThisMBB->end());
34511   SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB);
34512 
34513   // Fallthrough block for ThisMBB.
34514   ThisMBB->addSuccessor(FirstInsertedMBB);
34515   // The true block target of the first branch is always SinkMBB.
34516   ThisMBB->addSuccessor(SinkMBB);
34517   // Fallthrough block for FirstInsertedMBB.
34518   FirstInsertedMBB->addSuccessor(SecondInsertedMBB);
34519   // The true block for the branch of FirstInsertedMBB.
34520   FirstInsertedMBB->addSuccessor(SinkMBB);
34521   // This is fallthrough.
34522   SecondInsertedMBB->addSuccessor(SinkMBB);
34523 
34524   // Create the conditional branch instructions.
34525   X86::CondCode FirstCC = X86::CondCode(FirstCMOV.getOperand(3).getImm());
34526   BuildMI(ThisMBB, MIMD, TII->get(X86::JCC_1)).addMBB(SinkMBB).addImm(FirstCC);
34527 
34528   X86::CondCode SecondCC =
34529       X86::CondCode(SecondCascadedCMOV.getOperand(3).getImm());
34530   BuildMI(FirstInsertedMBB, MIMD, TII->get(X86::JCC_1))
34531       .addMBB(SinkMBB)
34532       .addImm(SecondCC);
34533 
34534   //  SinkMBB:
34535   //   %Result = phi [ %FalseValue, SecondInsertedMBB ], [ %TrueValue, ThisMBB ]
34536   Register DestReg = SecondCascadedCMOV.getOperand(0).getReg();
34537   Register Op1Reg = FirstCMOV.getOperand(1).getReg();
34538   Register Op2Reg = FirstCMOV.getOperand(2).getReg();
34539   MachineInstrBuilder MIB =
34540       BuildMI(*SinkMBB, SinkMBB->begin(), MIMD, TII->get(X86::PHI), DestReg)
34541           .addReg(Op1Reg)
34542           .addMBB(SecondInsertedMBB)
34543           .addReg(Op2Reg)
34544           .addMBB(ThisMBB);
34545 
34546   // The second SecondInsertedMBB provides the same incoming value as the
34547   // FirstInsertedMBB (the True operand of the SELECT_CC/CMOV nodes).
34548   MIB.addReg(FirstCMOV.getOperand(2).getReg()).addMBB(FirstInsertedMBB);
34549 
34550   // Now remove the CMOVs.
34551   FirstCMOV.eraseFromParent();
34552   SecondCascadedCMOV.eraseFromParent();
34553 
34554   return SinkMBB;
34555 }
34556 
34557 MachineBasicBlock *
EmitLoweredSelect(MachineInstr & MI,MachineBasicBlock * ThisMBB) const34558 X86TargetLowering::EmitLoweredSelect(MachineInstr &MI,
34559                                      MachineBasicBlock *ThisMBB) const {
34560   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
34561   const MIMetadata MIMD(MI);
34562 
34563   // To "insert" a SELECT_CC instruction, we actually have to insert the
34564   // diamond control-flow pattern.  The incoming instruction knows the
34565   // destination vreg to set, the condition code register to branch on, the
34566   // true/false values to select between and a branch opcode to use.
34567 
34568   //  ThisMBB:
34569   //  ...
34570   //   TrueVal = ...
34571   //   cmpTY ccX, r1, r2
34572   //   bCC copy1MBB
34573   //   fallthrough --> FalseMBB
34574 
34575   // This code lowers all pseudo-CMOV instructions. Generally it lowers these
34576   // as described above, by inserting a BB, and then making a PHI at the join
34577   // point to select the true and false operands of the CMOV in the PHI.
34578   //
34579   // The code also handles two different cases of multiple CMOV opcodes
34580   // in a row.
34581   //
34582   // Case 1:
34583   // In this case, there are multiple CMOVs in a row, all which are based on
34584   // the same condition setting (or the exact opposite condition setting).
34585   // In this case we can lower all the CMOVs using a single inserted BB, and
34586   // then make a number of PHIs at the join point to model the CMOVs. The only
34587   // trickiness here, is that in a case like:
34588   //
34589   // t2 = CMOV cond1 t1, f1
34590   // t3 = CMOV cond1 t2, f2
34591   //
34592   // when rewriting this into PHIs, we have to perform some renaming on the
34593   // temps since you cannot have a PHI operand refer to a PHI result earlier
34594   // in the same block.  The "simple" but wrong lowering would be:
34595   //
34596   // t2 = PHI t1(BB1), f1(BB2)
34597   // t3 = PHI t2(BB1), f2(BB2)
34598   //
34599   // but clearly t2 is not defined in BB1, so that is incorrect. The proper
34600   // renaming is to note that on the path through BB1, t2 is really just a
34601   // copy of t1, and do that renaming, properly generating:
34602   //
34603   // t2 = PHI t1(BB1), f1(BB2)
34604   // t3 = PHI t1(BB1), f2(BB2)
34605   //
34606   // Case 2:
34607   // CMOV ((CMOV F, T, cc1), T, cc2) is checked here and handled by a separate
34608   // function - EmitLoweredCascadedSelect.
34609 
34610   X86::CondCode CC = X86::CondCode(MI.getOperand(3).getImm());
34611   X86::CondCode OppCC = X86::GetOppositeBranchCondition(CC);
34612   MachineInstr *LastCMOV = &MI;
34613   MachineBasicBlock::iterator NextMIIt = MachineBasicBlock::iterator(MI);
34614 
34615   // Check for case 1, where there are multiple CMOVs with the same condition
34616   // first.  Of the two cases of multiple CMOV lowerings, case 1 reduces the
34617   // number of jumps the most.
34618 
34619   if (isCMOVPseudo(MI)) {
34620     // See if we have a string of CMOVS with the same condition. Skip over
34621     // intervening debug insts.
34622     while (NextMIIt != ThisMBB->end() && isCMOVPseudo(*NextMIIt) &&
34623            (NextMIIt->getOperand(3).getImm() == CC ||
34624             NextMIIt->getOperand(3).getImm() == OppCC)) {
34625       LastCMOV = &*NextMIIt;
34626       NextMIIt = next_nodbg(NextMIIt, ThisMBB->end());
34627     }
34628   }
34629 
34630   // This checks for case 2, but only do this if we didn't already find
34631   // case 1, as indicated by LastCMOV == MI.
34632   if (LastCMOV == &MI && NextMIIt != ThisMBB->end() &&
34633       NextMIIt->getOpcode() == MI.getOpcode() &&
34634       NextMIIt->getOperand(2).getReg() == MI.getOperand(2).getReg() &&
34635       NextMIIt->getOperand(1).getReg() == MI.getOperand(0).getReg() &&
34636       NextMIIt->getOperand(1).isKill()) {
34637     return EmitLoweredCascadedSelect(MI, *NextMIIt, ThisMBB);
34638   }
34639 
34640   const BasicBlock *LLVM_BB = ThisMBB->getBasicBlock();
34641   MachineFunction *F = ThisMBB->getParent();
34642   MachineBasicBlock *FalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
34643   MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
34644 
34645   MachineFunction::iterator It = ++ThisMBB->getIterator();
34646   F->insert(It, FalseMBB);
34647   F->insert(It, SinkMBB);
34648 
34649   // Set the call frame size on entry to the new basic blocks.
34650   unsigned CallFrameSize = TII->getCallFrameSizeAt(MI);
34651   FalseMBB->setCallFrameSize(CallFrameSize);
34652   SinkMBB->setCallFrameSize(CallFrameSize);
34653 
34654   // If the EFLAGS register isn't dead in the terminator, then claim that it's
34655   // live into the sink and copy blocks.
34656   const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
34657   if (!LastCMOV->killsRegister(X86::EFLAGS) &&
34658       !checkAndUpdateEFLAGSKill(LastCMOV, ThisMBB, TRI)) {
34659     FalseMBB->addLiveIn(X86::EFLAGS);
34660     SinkMBB->addLiveIn(X86::EFLAGS);
34661   }
34662 
34663   // Transfer any debug instructions inside the CMOV sequence to the sunk block.
34664   auto DbgRange = llvm::make_range(MachineBasicBlock::iterator(MI),
34665                                    MachineBasicBlock::iterator(LastCMOV));
34666   for (MachineInstr &MI : llvm::make_early_inc_range(DbgRange))
34667     if (MI.isDebugInstr())
34668       SinkMBB->push_back(MI.removeFromParent());
34669 
34670   // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
34671   SinkMBB->splice(SinkMBB->end(), ThisMBB,
34672                   std::next(MachineBasicBlock::iterator(LastCMOV)),
34673                   ThisMBB->end());
34674   SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB);
34675 
34676   // Fallthrough block for ThisMBB.
34677   ThisMBB->addSuccessor(FalseMBB);
34678   // The true block target of the first (or only) branch is always a SinkMBB.
34679   ThisMBB->addSuccessor(SinkMBB);
34680   // Fallthrough block for FalseMBB.
34681   FalseMBB->addSuccessor(SinkMBB);
34682 
34683   // Create the conditional branch instruction.
34684   BuildMI(ThisMBB, MIMD, TII->get(X86::JCC_1)).addMBB(SinkMBB).addImm(CC);
34685 
34686   //  SinkMBB:
34687   //   %Result = phi [ %FalseValue, FalseMBB ], [ %TrueValue, ThisMBB ]
34688   //  ...
34689   MachineBasicBlock::iterator MIItBegin = MachineBasicBlock::iterator(MI);
34690   MachineBasicBlock::iterator MIItEnd =
34691       std::next(MachineBasicBlock::iterator(LastCMOV));
34692   createPHIsForCMOVsInSinkBB(MIItBegin, MIItEnd, ThisMBB, FalseMBB, SinkMBB);
34693 
34694   // Now remove the CMOV(s).
34695   ThisMBB->erase(MIItBegin, MIItEnd);
34696 
34697   return SinkMBB;
34698 }
34699 
getSUBriOpcode(bool IsLP64)34700 static unsigned getSUBriOpcode(bool IsLP64) {
34701   if (IsLP64)
34702     return X86::SUB64ri32;
34703   else
34704     return X86::SUB32ri;
34705 }
34706 
34707 MachineBasicBlock *
EmitLoweredProbedAlloca(MachineInstr & MI,MachineBasicBlock * MBB) const34708 X86TargetLowering::EmitLoweredProbedAlloca(MachineInstr &MI,
34709                                            MachineBasicBlock *MBB) const {
34710   MachineFunction *MF = MBB->getParent();
34711   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
34712   const X86FrameLowering &TFI = *Subtarget.getFrameLowering();
34713   const MIMetadata MIMD(MI);
34714   const BasicBlock *LLVM_BB = MBB->getBasicBlock();
34715 
34716   const unsigned ProbeSize = getStackProbeSize(*MF);
34717 
34718   MachineRegisterInfo &MRI = MF->getRegInfo();
34719   MachineBasicBlock *testMBB = MF->CreateMachineBasicBlock(LLVM_BB);
34720   MachineBasicBlock *tailMBB = MF->CreateMachineBasicBlock(LLVM_BB);
34721   MachineBasicBlock *blockMBB = MF->CreateMachineBasicBlock(LLVM_BB);
34722 
34723   MachineFunction::iterator MBBIter = ++MBB->getIterator();
34724   MF->insert(MBBIter, testMBB);
34725   MF->insert(MBBIter, blockMBB);
34726   MF->insert(MBBIter, tailMBB);
34727 
34728   Register sizeVReg = MI.getOperand(1).getReg();
34729 
34730   Register physSPReg = TFI.Uses64BitFramePtr ? X86::RSP : X86::ESP;
34731 
34732   Register TmpStackPtr = MRI.createVirtualRegister(
34733       TFI.Uses64BitFramePtr ? &X86::GR64RegClass : &X86::GR32RegClass);
34734   Register FinalStackPtr = MRI.createVirtualRegister(
34735       TFI.Uses64BitFramePtr ? &X86::GR64RegClass : &X86::GR32RegClass);
34736 
34737   BuildMI(*MBB, {MI}, MIMD, TII->get(TargetOpcode::COPY), TmpStackPtr)
34738       .addReg(physSPReg);
34739   {
34740     const unsigned Opc = TFI.Uses64BitFramePtr ? X86::SUB64rr : X86::SUB32rr;
34741     BuildMI(*MBB, {MI}, MIMD, TII->get(Opc), FinalStackPtr)
34742         .addReg(TmpStackPtr)
34743         .addReg(sizeVReg);
34744   }
34745 
34746   // test rsp size
34747 
34748   BuildMI(testMBB, MIMD,
34749           TII->get(TFI.Uses64BitFramePtr ? X86::CMP64rr : X86::CMP32rr))
34750       .addReg(FinalStackPtr)
34751       .addReg(physSPReg);
34752 
34753   BuildMI(testMBB, MIMD, TII->get(X86::JCC_1))
34754       .addMBB(tailMBB)
34755       .addImm(X86::COND_GE);
34756   testMBB->addSuccessor(blockMBB);
34757   testMBB->addSuccessor(tailMBB);
34758 
34759   // Touch the block then extend it. This is done on the opposite side of
34760   // static probe where we allocate then touch, to avoid the need of probing the
34761   // tail of the static alloca. Possible scenarios are:
34762   //
34763   //       + ---- <- ------------ <- ------------- <- ------------ +
34764   //       |                                                       |
34765   // [free probe] -> [page alloc] -> [alloc probe] -> [tail alloc] + -> [dyn probe] -> [page alloc] -> [dyn probe] -> [tail alloc] +
34766   //                                                               |                                                               |
34767   //                                                               + <- ----------- <- ------------ <- ----------- <- ------------ +
34768   //
34769   // The property we want to enforce is to never have more than [page alloc] between two probes.
34770 
34771   const unsigned XORMIOpc =
34772       TFI.Uses64BitFramePtr ? X86::XOR64mi32 : X86::XOR32mi;
34773   addRegOffset(BuildMI(blockMBB, MIMD, TII->get(XORMIOpc)), physSPReg, false, 0)
34774       .addImm(0);
34775 
34776   BuildMI(blockMBB, MIMD, TII->get(getSUBriOpcode(TFI.Uses64BitFramePtr)),
34777           physSPReg)
34778       .addReg(physSPReg)
34779       .addImm(ProbeSize);
34780 
34781   BuildMI(blockMBB, MIMD, TII->get(X86::JMP_1)).addMBB(testMBB);
34782   blockMBB->addSuccessor(testMBB);
34783 
34784   // Replace original instruction by the expected stack ptr
34785   BuildMI(tailMBB, MIMD, TII->get(TargetOpcode::COPY),
34786           MI.getOperand(0).getReg())
34787       .addReg(FinalStackPtr);
34788 
34789   tailMBB->splice(tailMBB->end(), MBB,
34790                   std::next(MachineBasicBlock::iterator(MI)), MBB->end());
34791   tailMBB->transferSuccessorsAndUpdatePHIs(MBB);
34792   MBB->addSuccessor(testMBB);
34793 
34794   // Delete the original pseudo instruction.
34795   MI.eraseFromParent();
34796 
34797   // And we're done.
34798   return tailMBB;
34799 }
34800 
34801 MachineBasicBlock *
EmitLoweredSegAlloca(MachineInstr & MI,MachineBasicBlock * BB) const34802 X86TargetLowering::EmitLoweredSegAlloca(MachineInstr &MI,
34803                                         MachineBasicBlock *BB) const {
34804   MachineFunction *MF = BB->getParent();
34805   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
34806   const MIMetadata MIMD(MI);
34807   const BasicBlock *LLVM_BB = BB->getBasicBlock();
34808 
34809   assert(MF->shouldSplitStack());
34810 
34811   const bool Is64Bit = Subtarget.is64Bit();
34812   const bool IsLP64 = Subtarget.isTarget64BitLP64();
34813 
34814   const unsigned TlsReg = Is64Bit ? X86::FS : X86::GS;
34815   const unsigned TlsOffset = IsLP64 ? 0x70 : Is64Bit ? 0x40 : 0x30;
34816 
34817   // BB:
34818   //  ... [Till the alloca]
34819   // If stacklet is not large enough, jump to mallocMBB
34820   //
34821   // bumpMBB:
34822   //  Allocate by subtracting from RSP
34823   //  Jump to continueMBB
34824   //
34825   // mallocMBB:
34826   //  Allocate by call to runtime
34827   //
34828   // continueMBB:
34829   //  ...
34830   //  [rest of original BB]
34831   //
34832 
34833   MachineBasicBlock *mallocMBB = MF->CreateMachineBasicBlock(LLVM_BB);
34834   MachineBasicBlock *bumpMBB = MF->CreateMachineBasicBlock(LLVM_BB);
34835   MachineBasicBlock *continueMBB = MF->CreateMachineBasicBlock(LLVM_BB);
34836 
34837   MachineRegisterInfo &MRI = MF->getRegInfo();
34838   const TargetRegisterClass *AddrRegClass =
34839       getRegClassFor(getPointerTy(MF->getDataLayout()));
34840 
34841   Register mallocPtrVReg = MRI.createVirtualRegister(AddrRegClass),
34842            bumpSPPtrVReg = MRI.createVirtualRegister(AddrRegClass),
34843            tmpSPVReg = MRI.createVirtualRegister(AddrRegClass),
34844            SPLimitVReg = MRI.createVirtualRegister(AddrRegClass),
34845            sizeVReg = MI.getOperand(1).getReg(),
34846            physSPReg =
34847                IsLP64 || Subtarget.isTargetNaCl64() ? X86::RSP : X86::ESP;
34848 
34849   MachineFunction::iterator MBBIter = ++BB->getIterator();
34850 
34851   MF->insert(MBBIter, bumpMBB);
34852   MF->insert(MBBIter, mallocMBB);
34853   MF->insert(MBBIter, continueMBB);
34854 
34855   continueMBB->splice(continueMBB->begin(), BB,
34856                       std::next(MachineBasicBlock::iterator(MI)), BB->end());
34857   continueMBB->transferSuccessorsAndUpdatePHIs(BB);
34858 
34859   // Add code to the main basic block to check if the stack limit has been hit,
34860   // and if so, jump to mallocMBB otherwise to bumpMBB.
34861   BuildMI(BB, MIMD, TII->get(TargetOpcode::COPY), tmpSPVReg).addReg(physSPReg);
34862   BuildMI(BB, MIMD, TII->get(IsLP64 ? X86::SUB64rr:X86::SUB32rr), SPLimitVReg)
34863     .addReg(tmpSPVReg).addReg(sizeVReg);
34864   BuildMI(BB, MIMD, TII->get(IsLP64 ? X86::CMP64mr:X86::CMP32mr))
34865     .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg)
34866     .addReg(SPLimitVReg);
34867   BuildMI(BB, MIMD, TII->get(X86::JCC_1)).addMBB(mallocMBB).addImm(X86::COND_G);
34868 
34869   // bumpMBB simply decreases the stack pointer, since we know the current
34870   // stacklet has enough space.
34871   BuildMI(bumpMBB, MIMD, TII->get(TargetOpcode::COPY), physSPReg)
34872     .addReg(SPLimitVReg);
34873   BuildMI(bumpMBB, MIMD, TII->get(TargetOpcode::COPY), bumpSPPtrVReg)
34874     .addReg(SPLimitVReg);
34875   BuildMI(bumpMBB, MIMD, TII->get(X86::JMP_1)).addMBB(continueMBB);
34876 
34877   // Calls into a routine in libgcc to allocate more space from the heap.
34878   const uint32_t *RegMask =
34879       Subtarget.getRegisterInfo()->getCallPreservedMask(*MF, CallingConv::C);
34880   if (IsLP64) {
34881     BuildMI(mallocMBB, MIMD, TII->get(X86::MOV64rr), X86::RDI)
34882       .addReg(sizeVReg);
34883     BuildMI(mallocMBB, MIMD, TII->get(X86::CALL64pcrel32))
34884       .addExternalSymbol("__morestack_allocate_stack_space")
34885       .addRegMask(RegMask)
34886       .addReg(X86::RDI, RegState::Implicit)
34887       .addReg(X86::RAX, RegState::ImplicitDefine);
34888   } else if (Is64Bit) {
34889     BuildMI(mallocMBB, MIMD, TII->get(X86::MOV32rr), X86::EDI)
34890       .addReg(sizeVReg);
34891     BuildMI(mallocMBB, MIMD, TII->get(X86::CALL64pcrel32))
34892       .addExternalSymbol("__morestack_allocate_stack_space")
34893       .addRegMask(RegMask)
34894       .addReg(X86::EDI, RegState::Implicit)
34895       .addReg(X86::EAX, RegState::ImplicitDefine);
34896   } else {
34897     BuildMI(mallocMBB, MIMD, TII->get(X86::SUB32ri), physSPReg).addReg(physSPReg)
34898       .addImm(12);
34899     BuildMI(mallocMBB, MIMD, TII->get(X86::PUSH32r)).addReg(sizeVReg);
34900     BuildMI(mallocMBB, MIMD, TII->get(X86::CALLpcrel32))
34901       .addExternalSymbol("__morestack_allocate_stack_space")
34902       .addRegMask(RegMask)
34903       .addReg(X86::EAX, RegState::ImplicitDefine);
34904   }
34905 
34906   if (!Is64Bit)
34907     BuildMI(mallocMBB, MIMD, TII->get(X86::ADD32ri), physSPReg).addReg(physSPReg)
34908       .addImm(16);
34909 
34910   BuildMI(mallocMBB, MIMD, TII->get(TargetOpcode::COPY), mallocPtrVReg)
34911     .addReg(IsLP64 ? X86::RAX : X86::EAX);
34912   BuildMI(mallocMBB, MIMD, TII->get(X86::JMP_1)).addMBB(continueMBB);
34913 
34914   // Set up the CFG correctly.
34915   BB->addSuccessor(bumpMBB);
34916   BB->addSuccessor(mallocMBB);
34917   mallocMBB->addSuccessor(continueMBB);
34918   bumpMBB->addSuccessor(continueMBB);
34919 
34920   // Take care of the PHI nodes.
34921   BuildMI(*continueMBB, continueMBB->begin(), MIMD, TII->get(X86::PHI),
34922           MI.getOperand(0).getReg())
34923       .addReg(mallocPtrVReg)
34924       .addMBB(mallocMBB)
34925       .addReg(bumpSPPtrVReg)
34926       .addMBB(bumpMBB);
34927 
34928   // Delete the original pseudo instruction.
34929   MI.eraseFromParent();
34930 
34931   // And we're done.
34932   return continueMBB;
34933 }
34934 
34935 MachineBasicBlock *
EmitLoweredCatchRet(MachineInstr & MI,MachineBasicBlock * BB) const34936 X86TargetLowering::EmitLoweredCatchRet(MachineInstr &MI,
34937                                        MachineBasicBlock *BB) const {
34938   MachineFunction *MF = BB->getParent();
34939   const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
34940   MachineBasicBlock *TargetMBB = MI.getOperand(0).getMBB();
34941   const MIMetadata MIMD(MI);
34942 
34943   assert(!isAsynchronousEHPersonality(
34944              classifyEHPersonality(MF->getFunction().getPersonalityFn())) &&
34945          "SEH does not use catchret!");
34946 
34947   // Only 32-bit EH needs to worry about manually restoring stack pointers.
34948   if (!Subtarget.is32Bit())
34949     return BB;
34950 
34951   // C++ EH creates a new target block to hold the restore code, and wires up
34952   // the new block to the return destination with a normal JMP_4.
34953   MachineBasicBlock *RestoreMBB =
34954       MF->CreateMachineBasicBlock(BB->getBasicBlock());
34955   assert(BB->succ_size() == 1);
34956   MF->insert(std::next(BB->getIterator()), RestoreMBB);
34957   RestoreMBB->transferSuccessorsAndUpdatePHIs(BB);
34958   BB->addSuccessor(RestoreMBB);
34959   MI.getOperand(0).setMBB(RestoreMBB);
34960 
34961   // Marking this as an EH pad but not a funclet entry block causes PEI to
34962   // restore stack pointers in the block.
34963   RestoreMBB->setIsEHPad(true);
34964 
34965   auto RestoreMBBI = RestoreMBB->begin();
34966   BuildMI(*RestoreMBB, RestoreMBBI, MIMD, TII.get(X86::JMP_4)).addMBB(TargetMBB);
34967   return BB;
34968 }
34969 
34970 MachineBasicBlock *
EmitLoweredTLSAddr(MachineInstr & MI,MachineBasicBlock * BB) const34971 X86TargetLowering::EmitLoweredTLSAddr(MachineInstr &MI,
34972                                       MachineBasicBlock *BB) const {
34973   // So, here we replace TLSADDR with the sequence:
34974   // adjust_stackdown -> TLSADDR -> adjust_stackup.
34975   // We need this because TLSADDR is lowered into calls
34976   // inside MC, therefore without the two markers shrink-wrapping
34977   // may push the prologue/epilogue pass them.
34978   const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
34979   const MIMetadata MIMD(MI);
34980   MachineFunction &MF = *BB->getParent();
34981 
34982   // Emit CALLSEQ_START right before the instruction.
34983   unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
34984   MachineInstrBuilder CallseqStart =
34985       BuildMI(MF, MIMD, TII.get(AdjStackDown)).addImm(0).addImm(0).addImm(0);
34986   BB->insert(MachineBasicBlock::iterator(MI), CallseqStart);
34987 
34988   // Emit CALLSEQ_END right after the instruction.
34989   // We don't call erase from parent because we want to keep the
34990   // original instruction around.
34991   unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
34992   MachineInstrBuilder CallseqEnd =
34993       BuildMI(MF, MIMD, TII.get(AdjStackUp)).addImm(0).addImm(0);
34994   BB->insertAfter(MachineBasicBlock::iterator(MI), CallseqEnd);
34995 
34996   return BB;
34997 }
34998 
34999 MachineBasicBlock *
EmitLoweredTLSCall(MachineInstr & MI,MachineBasicBlock * BB) const35000 X86TargetLowering::EmitLoweredTLSCall(MachineInstr &MI,
35001                                       MachineBasicBlock *BB) const {
35002   // This is pretty easy.  We're taking the value that we received from
35003   // our load from the relocation, sticking it in either RDI (x86-64)
35004   // or EAX and doing an indirect call.  The return value will then
35005   // be in the normal return register.
35006   MachineFunction *F = BB->getParent();
35007   const X86InstrInfo *TII = Subtarget.getInstrInfo();
35008   const MIMetadata MIMD(MI);
35009 
35010   assert(Subtarget.isTargetDarwin() && "Darwin only instr emitted?");
35011   assert(MI.getOperand(3).isGlobal() && "This should be a global");
35012 
35013   // Get a register mask for the lowered call.
35014   // FIXME: The 32-bit calls have non-standard calling conventions. Use a
35015   // proper register mask.
35016   const uint32_t *RegMask =
35017       Subtarget.is64Bit() ?
35018       Subtarget.getRegisterInfo()->getDarwinTLSCallPreservedMask() :
35019       Subtarget.getRegisterInfo()->getCallPreservedMask(*F, CallingConv::C);
35020   if (Subtarget.is64Bit()) {
35021     MachineInstrBuilder MIB =
35022         BuildMI(*BB, MI, MIMD, TII->get(X86::MOV64rm), X86::RDI)
35023             .addReg(X86::RIP)
35024             .addImm(0)
35025             .addReg(0)
35026             .addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
35027                               MI.getOperand(3).getTargetFlags())
35028             .addReg(0);
35029     MIB = BuildMI(*BB, MI, MIMD, TII->get(X86::CALL64m));
35030     addDirectMem(MIB, X86::RDI);
35031     MIB.addReg(X86::RAX, RegState::ImplicitDefine).addRegMask(RegMask);
35032   } else if (!isPositionIndependent()) {
35033     MachineInstrBuilder MIB =
35034         BuildMI(*BB, MI, MIMD, TII->get(X86::MOV32rm), X86::EAX)
35035             .addReg(0)
35036             .addImm(0)
35037             .addReg(0)
35038             .addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
35039                               MI.getOperand(3).getTargetFlags())
35040             .addReg(0);
35041     MIB = BuildMI(*BB, MI, MIMD, TII->get(X86::CALL32m));
35042     addDirectMem(MIB, X86::EAX);
35043     MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
35044   } else {
35045     MachineInstrBuilder MIB =
35046         BuildMI(*BB, MI, MIMD, TII->get(X86::MOV32rm), X86::EAX)
35047             .addReg(TII->getGlobalBaseReg(F))
35048             .addImm(0)
35049             .addReg(0)
35050             .addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
35051                               MI.getOperand(3).getTargetFlags())
35052             .addReg(0);
35053     MIB = BuildMI(*BB, MI, MIMD, TII->get(X86::CALL32m));
35054     addDirectMem(MIB, X86::EAX);
35055     MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
35056   }
35057 
35058   MI.eraseFromParent(); // The pseudo instruction is gone now.
35059   return BB;
35060 }
35061 
getOpcodeForIndirectThunk(unsigned RPOpc)35062 static unsigned getOpcodeForIndirectThunk(unsigned RPOpc) {
35063   switch (RPOpc) {
35064   case X86::INDIRECT_THUNK_CALL32:
35065     return X86::CALLpcrel32;
35066   case X86::INDIRECT_THUNK_CALL64:
35067     return X86::CALL64pcrel32;
35068   case X86::INDIRECT_THUNK_TCRETURN32:
35069     return X86::TCRETURNdi;
35070   case X86::INDIRECT_THUNK_TCRETURN64:
35071     return X86::TCRETURNdi64;
35072   }
35073   llvm_unreachable("not indirect thunk opcode");
35074 }
35075 
getIndirectThunkSymbol(const X86Subtarget & Subtarget,unsigned Reg)35076 static const char *getIndirectThunkSymbol(const X86Subtarget &Subtarget,
35077                                           unsigned Reg) {
35078   if (Subtarget.useRetpolineExternalThunk()) {
35079     // When using an external thunk for retpolines, we pick names that match the
35080     // names GCC happens to use as well. This helps simplify the implementation
35081     // of the thunks for kernels where they have no easy ability to create
35082     // aliases and are doing non-trivial configuration of the thunk's body. For
35083     // example, the Linux kernel will do boot-time hot patching of the thunk
35084     // bodies and cannot easily export aliases of these to loaded modules.
35085     //
35086     // Note that at any point in the future, we may need to change the semantics
35087     // of how we implement retpolines and at that time will likely change the
35088     // name of the called thunk. Essentially, there is no hard guarantee that
35089     // LLVM will generate calls to specific thunks, we merely make a best-effort
35090     // attempt to help out kernels and other systems where duplicating the
35091     // thunks is costly.
35092     switch (Reg) {
35093     case X86::EAX:
35094       assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
35095       return "__x86_indirect_thunk_eax";
35096     case X86::ECX:
35097       assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
35098       return "__x86_indirect_thunk_ecx";
35099     case X86::EDX:
35100       assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
35101       return "__x86_indirect_thunk_edx";
35102     case X86::EDI:
35103       assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
35104       return "__x86_indirect_thunk_edi";
35105     case X86::R11:
35106       assert(Subtarget.is64Bit() && "Should not be using a 64-bit thunk!");
35107       return "__x86_indirect_thunk_r11";
35108     }
35109     llvm_unreachable("unexpected reg for external indirect thunk");
35110   }
35111 
35112   if (Subtarget.useRetpolineIndirectCalls() ||
35113       Subtarget.useRetpolineIndirectBranches()) {
35114     // When targeting an internal COMDAT thunk use an LLVM-specific name.
35115     switch (Reg) {
35116     case X86::EAX:
35117       assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
35118       return "__llvm_retpoline_eax";
35119     case X86::ECX:
35120       assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
35121       return "__llvm_retpoline_ecx";
35122     case X86::EDX:
35123       assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
35124       return "__llvm_retpoline_edx";
35125     case X86::EDI:
35126       assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
35127       return "__llvm_retpoline_edi";
35128     case X86::R11:
35129       assert(Subtarget.is64Bit() && "Should not be using a 64-bit thunk!");
35130       return "__llvm_retpoline_r11";
35131     }
35132     llvm_unreachable("unexpected reg for retpoline");
35133   }
35134 
35135   if (Subtarget.useLVIControlFlowIntegrity()) {
35136     assert(Subtarget.is64Bit() && "Should not be using a 64-bit thunk!");
35137     return "__llvm_lvi_thunk_r11";
35138   }
35139   llvm_unreachable("getIndirectThunkSymbol() invoked without thunk feature");
35140 }
35141 
35142 MachineBasicBlock *
EmitLoweredIndirectThunk(MachineInstr & MI,MachineBasicBlock * BB) const35143 X86TargetLowering::EmitLoweredIndirectThunk(MachineInstr &MI,
35144                                             MachineBasicBlock *BB) const {
35145   // Copy the virtual register into the R11 physical register and
35146   // call the retpoline thunk.
35147   const MIMetadata MIMD(MI);
35148   const X86InstrInfo *TII = Subtarget.getInstrInfo();
35149   Register CalleeVReg = MI.getOperand(0).getReg();
35150   unsigned Opc = getOpcodeForIndirectThunk(MI.getOpcode());
35151 
35152   // Find an available scratch register to hold the callee. On 64-bit, we can
35153   // just use R11, but we scan for uses anyway to ensure we don't generate
35154   // incorrect code. On 32-bit, we use one of EAX, ECX, or EDX that isn't
35155   // already a register use operand to the call to hold the callee. If none
35156   // are available, use EDI instead. EDI is chosen because EBX is the PIC base
35157   // register and ESI is the base pointer to realigned stack frames with VLAs.
35158   SmallVector<unsigned, 3> AvailableRegs;
35159   if (Subtarget.is64Bit())
35160     AvailableRegs.push_back(X86::R11);
35161   else
35162     AvailableRegs.append({X86::EAX, X86::ECX, X86::EDX, X86::EDI});
35163 
35164   // Zero out any registers that are already used.
35165   for (const auto &MO : MI.operands()) {
35166     if (MO.isReg() && MO.isUse())
35167       for (unsigned &Reg : AvailableRegs)
35168         if (Reg == MO.getReg())
35169           Reg = 0;
35170   }
35171 
35172   // Choose the first remaining non-zero available register.
35173   unsigned AvailableReg = 0;
35174   for (unsigned MaybeReg : AvailableRegs) {
35175     if (MaybeReg) {
35176       AvailableReg = MaybeReg;
35177       break;
35178     }
35179   }
35180   if (!AvailableReg)
35181     report_fatal_error("calling convention incompatible with retpoline, no "
35182                        "available registers");
35183 
35184   const char *Symbol = getIndirectThunkSymbol(Subtarget, AvailableReg);
35185 
35186   BuildMI(*BB, MI, MIMD, TII->get(TargetOpcode::COPY), AvailableReg)
35187       .addReg(CalleeVReg);
35188   MI.getOperand(0).ChangeToES(Symbol);
35189   MI.setDesc(TII->get(Opc));
35190   MachineInstrBuilder(*BB->getParent(), &MI)
35191       .addReg(AvailableReg, RegState::Implicit | RegState::Kill);
35192   return BB;
35193 }
35194 
35195 /// SetJmp implies future control flow change upon calling the corresponding
35196 /// LongJmp.
35197 /// Instead of using the 'return' instruction, the long jump fixes the stack and
35198 /// performs an indirect branch. To do so it uses the registers that were stored
35199 /// in the jump buffer (when calling SetJmp).
35200 /// In case the shadow stack is enabled we need to fix it as well, because some
35201 /// return addresses will be skipped.
35202 /// The function will save the SSP for future fixing in the function
35203 /// emitLongJmpShadowStackFix.
35204 /// \sa emitLongJmpShadowStackFix
35205 /// \param [in] MI The temporary Machine Instruction for the builtin.
35206 /// \param [in] MBB The Machine Basic Block that will be modified.
emitSetJmpShadowStackFix(MachineInstr & MI,MachineBasicBlock * MBB) const35207 void X86TargetLowering::emitSetJmpShadowStackFix(MachineInstr &MI,
35208                                                  MachineBasicBlock *MBB) const {
35209   const MIMetadata MIMD(MI);
35210   MachineFunction *MF = MBB->getParent();
35211   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
35212   MachineRegisterInfo &MRI = MF->getRegInfo();
35213   MachineInstrBuilder MIB;
35214 
35215   // Memory Reference.
35216   SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
35217                                            MI.memoperands_end());
35218 
35219   // Initialize a register with zero.
35220   MVT PVT = getPointerTy(MF->getDataLayout());
35221   const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
35222   Register ZReg = MRI.createVirtualRegister(PtrRC);
35223   unsigned XorRROpc = (PVT == MVT::i64) ? X86::XOR64rr : X86::XOR32rr;
35224   BuildMI(*MBB, MI, MIMD, TII->get(XorRROpc))
35225       .addDef(ZReg)
35226       .addReg(ZReg, RegState::Undef)
35227       .addReg(ZReg, RegState::Undef);
35228 
35229   // Read the current SSP Register value to the zeroed register.
35230   Register SSPCopyReg = MRI.createVirtualRegister(PtrRC);
35231   unsigned RdsspOpc = (PVT == MVT::i64) ? X86::RDSSPQ : X86::RDSSPD;
35232   BuildMI(*MBB, MI, MIMD, TII->get(RdsspOpc), SSPCopyReg).addReg(ZReg);
35233 
35234   // Write the SSP register value to offset 3 in input memory buffer.
35235   unsigned PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
35236   MIB = BuildMI(*MBB, MI, MIMD, TII->get(PtrStoreOpc));
35237   const int64_t SSPOffset = 3 * PVT.getStoreSize();
35238   const unsigned MemOpndSlot = 1;
35239   for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
35240     if (i == X86::AddrDisp)
35241       MIB.addDisp(MI.getOperand(MemOpndSlot + i), SSPOffset);
35242     else
35243       MIB.add(MI.getOperand(MemOpndSlot + i));
35244   }
35245   MIB.addReg(SSPCopyReg);
35246   MIB.setMemRefs(MMOs);
35247 }
35248 
35249 MachineBasicBlock *
emitEHSjLjSetJmp(MachineInstr & MI,MachineBasicBlock * MBB) const35250 X86TargetLowering::emitEHSjLjSetJmp(MachineInstr &MI,
35251                                     MachineBasicBlock *MBB) const {
35252   const MIMetadata MIMD(MI);
35253   MachineFunction *MF = MBB->getParent();
35254   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
35255   const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
35256   MachineRegisterInfo &MRI = MF->getRegInfo();
35257 
35258   const BasicBlock *BB = MBB->getBasicBlock();
35259   MachineFunction::iterator I = ++MBB->getIterator();
35260 
35261   // Memory Reference
35262   SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
35263                                            MI.memoperands_end());
35264 
35265   unsigned DstReg;
35266   unsigned MemOpndSlot = 0;
35267 
35268   unsigned CurOp = 0;
35269 
35270   DstReg = MI.getOperand(CurOp++).getReg();
35271   const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
35272   assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!");
35273   (void)TRI;
35274   Register mainDstReg = MRI.createVirtualRegister(RC);
35275   Register restoreDstReg = MRI.createVirtualRegister(RC);
35276 
35277   MemOpndSlot = CurOp;
35278 
35279   MVT PVT = getPointerTy(MF->getDataLayout());
35280   assert((PVT == MVT::i64 || PVT == MVT::i32) &&
35281          "Invalid Pointer Size!");
35282 
35283   // For v = setjmp(buf), we generate
35284   //
35285   // thisMBB:
35286   //  buf[LabelOffset] = restoreMBB <-- takes address of restoreMBB
35287   //  SjLjSetup restoreMBB
35288   //
35289   // mainMBB:
35290   //  v_main = 0
35291   //
35292   // sinkMBB:
35293   //  v = phi(main, restore)
35294   //
35295   // restoreMBB:
35296   //  if base pointer being used, load it from frame
35297   //  v_restore = 1
35298 
35299   MachineBasicBlock *thisMBB = MBB;
35300   MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
35301   MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
35302   MachineBasicBlock *restoreMBB = MF->CreateMachineBasicBlock(BB);
35303   MF->insert(I, mainMBB);
35304   MF->insert(I, sinkMBB);
35305   MF->push_back(restoreMBB);
35306   restoreMBB->setMachineBlockAddressTaken();
35307 
35308   MachineInstrBuilder MIB;
35309 
35310   // Transfer the remainder of BB and its successor edges to sinkMBB.
35311   sinkMBB->splice(sinkMBB->begin(), MBB,
35312                   std::next(MachineBasicBlock::iterator(MI)), MBB->end());
35313   sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
35314 
35315   // thisMBB:
35316   unsigned PtrStoreOpc = 0;
35317   unsigned LabelReg = 0;
35318   const int64_t LabelOffset = 1 * PVT.getStoreSize();
35319   bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
35320                      !isPositionIndependent();
35321 
35322   // Prepare IP either in reg or imm.
35323   if (!UseImmLabel) {
35324     PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
35325     const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
35326     LabelReg = MRI.createVirtualRegister(PtrRC);
35327     if (Subtarget.is64Bit()) {
35328       MIB = BuildMI(*thisMBB, MI, MIMD, TII->get(X86::LEA64r), LabelReg)
35329               .addReg(X86::RIP)
35330               .addImm(0)
35331               .addReg(0)
35332               .addMBB(restoreMBB)
35333               .addReg(0);
35334     } else {
35335       const X86InstrInfo *XII = static_cast<const X86InstrInfo*>(TII);
35336       MIB = BuildMI(*thisMBB, MI, MIMD, TII->get(X86::LEA32r), LabelReg)
35337               .addReg(XII->getGlobalBaseReg(MF))
35338               .addImm(0)
35339               .addReg(0)
35340               .addMBB(restoreMBB, Subtarget.classifyBlockAddressReference())
35341               .addReg(0);
35342     }
35343   } else
35344     PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
35345   // Store IP
35346   MIB = BuildMI(*thisMBB, MI, MIMD, TII->get(PtrStoreOpc));
35347   for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
35348     if (i == X86::AddrDisp)
35349       MIB.addDisp(MI.getOperand(MemOpndSlot + i), LabelOffset);
35350     else
35351       MIB.add(MI.getOperand(MemOpndSlot + i));
35352   }
35353   if (!UseImmLabel)
35354     MIB.addReg(LabelReg);
35355   else
35356     MIB.addMBB(restoreMBB);
35357   MIB.setMemRefs(MMOs);
35358 
35359   if (MF->getMMI().getModule()->getModuleFlag("cf-protection-return")) {
35360     emitSetJmpShadowStackFix(MI, thisMBB);
35361   }
35362 
35363   // Setup
35364   MIB = BuildMI(*thisMBB, MI, MIMD, TII->get(X86::EH_SjLj_Setup))
35365           .addMBB(restoreMBB);
35366 
35367   const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
35368   MIB.addRegMask(RegInfo->getNoPreservedMask());
35369   thisMBB->addSuccessor(mainMBB);
35370   thisMBB->addSuccessor(restoreMBB);
35371 
35372   // mainMBB:
35373   //  EAX = 0
35374   BuildMI(mainMBB, MIMD, TII->get(X86::MOV32r0), mainDstReg);
35375   mainMBB->addSuccessor(sinkMBB);
35376 
35377   // sinkMBB:
35378   BuildMI(*sinkMBB, sinkMBB->begin(), MIMD, TII->get(X86::PHI), DstReg)
35379       .addReg(mainDstReg)
35380       .addMBB(mainMBB)
35381       .addReg(restoreDstReg)
35382       .addMBB(restoreMBB);
35383 
35384   // restoreMBB:
35385   if (RegInfo->hasBasePointer(*MF)) {
35386     const bool Uses64BitFramePtr =
35387         Subtarget.isTarget64BitLP64() || Subtarget.isTargetNaCl64();
35388     X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
35389     X86FI->setRestoreBasePointer(MF);
35390     Register FramePtr = RegInfo->getFrameRegister(*MF);
35391     Register BasePtr = RegInfo->getBaseRegister();
35392     unsigned Opm = Uses64BitFramePtr ? X86::MOV64rm : X86::MOV32rm;
35393     addRegOffset(BuildMI(restoreMBB, MIMD, TII->get(Opm), BasePtr),
35394                  FramePtr, true, X86FI->getRestoreBasePointerOffset())
35395       .setMIFlag(MachineInstr::FrameSetup);
35396   }
35397   BuildMI(restoreMBB, MIMD, TII->get(X86::MOV32ri), restoreDstReg).addImm(1);
35398   BuildMI(restoreMBB, MIMD, TII->get(X86::JMP_1)).addMBB(sinkMBB);
35399   restoreMBB->addSuccessor(sinkMBB);
35400 
35401   MI.eraseFromParent();
35402   return sinkMBB;
35403 }
35404 
35405 /// Fix the shadow stack using the previously saved SSP pointer.
35406 /// \sa emitSetJmpShadowStackFix
35407 /// \param [in] MI The temporary Machine Instruction for the builtin.
35408 /// \param [in] MBB The Machine Basic Block that will be modified.
35409 /// \return The sink MBB that will perform the future indirect branch.
35410 MachineBasicBlock *
emitLongJmpShadowStackFix(MachineInstr & MI,MachineBasicBlock * MBB) const35411 X86TargetLowering::emitLongJmpShadowStackFix(MachineInstr &MI,
35412                                              MachineBasicBlock *MBB) const {
35413   const MIMetadata MIMD(MI);
35414   MachineFunction *MF = MBB->getParent();
35415   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
35416   MachineRegisterInfo &MRI = MF->getRegInfo();
35417 
35418   // Memory Reference
35419   SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
35420                                            MI.memoperands_end());
35421 
35422   MVT PVT = getPointerTy(MF->getDataLayout());
35423   const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
35424 
35425   // checkSspMBB:
35426   //         xor vreg1, vreg1
35427   //         rdssp vreg1
35428   //         test vreg1, vreg1
35429   //         je sinkMBB   # Jump if Shadow Stack is not supported
35430   // fallMBB:
35431   //         mov buf+24/12(%rip), vreg2
35432   //         sub vreg1, vreg2
35433   //         jbe sinkMBB  # No need to fix the Shadow Stack
35434   // fixShadowMBB:
35435   //         shr 3/2, vreg2
35436   //         incssp vreg2  # fix the SSP according to the lower 8 bits
35437   //         shr 8, vreg2
35438   //         je sinkMBB
35439   // fixShadowLoopPrepareMBB:
35440   //         shl vreg2
35441   //         mov 128, vreg3
35442   // fixShadowLoopMBB:
35443   //         incssp vreg3
35444   //         dec vreg2
35445   //         jne fixShadowLoopMBB # Iterate until you finish fixing
35446   //                              # the Shadow Stack
35447   // sinkMBB:
35448 
35449   MachineFunction::iterator I = ++MBB->getIterator();
35450   const BasicBlock *BB = MBB->getBasicBlock();
35451 
35452   MachineBasicBlock *checkSspMBB = MF->CreateMachineBasicBlock(BB);
35453   MachineBasicBlock *fallMBB = MF->CreateMachineBasicBlock(BB);
35454   MachineBasicBlock *fixShadowMBB = MF->CreateMachineBasicBlock(BB);
35455   MachineBasicBlock *fixShadowLoopPrepareMBB = MF->CreateMachineBasicBlock(BB);
35456   MachineBasicBlock *fixShadowLoopMBB = MF->CreateMachineBasicBlock(BB);
35457   MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
35458   MF->insert(I, checkSspMBB);
35459   MF->insert(I, fallMBB);
35460   MF->insert(I, fixShadowMBB);
35461   MF->insert(I, fixShadowLoopPrepareMBB);
35462   MF->insert(I, fixShadowLoopMBB);
35463   MF->insert(I, sinkMBB);
35464 
35465   // Transfer the remainder of BB and its successor edges to sinkMBB.
35466   sinkMBB->splice(sinkMBB->begin(), MBB, MachineBasicBlock::iterator(MI),
35467                   MBB->end());
35468   sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
35469 
35470   MBB->addSuccessor(checkSspMBB);
35471 
35472   // Initialize a register with zero.
35473   Register ZReg = MRI.createVirtualRegister(&X86::GR32RegClass);
35474   BuildMI(checkSspMBB, MIMD, TII->get(X86::MOV32r0), ZReg);
35475 
35476   if (PVT == MVT::i64) {
35477     Register TmpZReg = MRI.createVirtualRegister(PtrRC);
35478     BuildMI(checkSspMBB, MIMD, TII->get(X86::SUBREG_TO_REG), TmpZReg)
35479       .addImm(0)
35480       .addReg(ZReg)
35481       .addImm(X86::sub_32bit);
35482     ZReg = TmpZReg;
35483   }
35484 
35485   // Read the current SSP Register value to the zeroed register.
35486   Register SSPCopyReg = MRI.createVirtualRegister(PtrRC);
35487   unsigned RdsspOpc = (PVT == MVT::i64) ? X86::RDSSPQ : X86::RDSSPD;
35488   BuildMI(checkSspMBB, MIMD, TII->get(RdsspOpc), SSPCopyReg).addReg(ZReg);
35489 
35490   // Check whether the result of the SSP register is zero and jump directly
35491   // to the sink.
35492   unsigned TestRROpc = (PVT == MVT::i64) ? X86::TEST64rr : X86::TEST32rr;
35493   BuildMI(checkSspMBB, MIMD, TII->get(TestRROpc))
35494       .addReg(SSPCopyReg)
35495       .addReg(SSPCopyReg);
35496   BuildMI(checkSspMBB, MIMD, TII->get(X86::JCC_1))
35497       .addMBB(sinkMBB)
35498       .addImm(X86::COND_E);
35499   checkSspMBB->addSuccessor(sinkMBB);
35500   checkSspMBB->addSuccessor(fallMBB);
35501 
35502   // Reload the previously saved SSP register value.
35503   Register PrevSSPReg = MRI.createVirtualRegister(PtrRC);
35504   unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm;
35505   const int64_t SPPOffset = 3 * PVT.getStoreSize();
35506   MachineInstrBuilder MIB =
35507       BuildMI(fallMBB, MIMD, TII->get(PtrLoadOpc), PrevSSPReg);
35508   for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
35509     const MachineOperand &MO = MI.getOperand(i);
35510     if (i == X86::AddrDisp)
35511       MIB.addDisp(MO, SPPOffset);
35512     else if (MO.isReg()) // Don't add the whole operand, we don't want to
35513                          // preserve kill flags.
35514       MIB.addReg(MO.getReg());
35515     else
35516       MIB.add(MO);
35517   }
35518   MIB.setMemRefs(MMOs);
35519 
35520   // Subtract the current SSP from the previous SSP.
35521   Register SspSubReg = MRI.createVirtualRegister(PtrRC);
35522   unsigned SubRROpc = (PVT == MVT::i64) ? X86::SUB64rr : X86::SUB32rr;
35523   BuildMI(fallMBB, MIMD, TII->get(SubRROpc), SspSubReg)
35524       .addReg(PrevSSPReg)
35525       .addReg(SSPCopyReg);
35526 
35527   // Jump to sink in case PrevSSPReg <= SSPCopyReg.
35528   BuildMI(fallMBB, MIMD, TII->get(X86::JCC_1))
35529       .addMBB(sinkMBB)
35530       .addImm(X86::COND_BE);
35531   fallMBB->addSuccessor(sinkMBB);
35532   fallMBB->addSuccessor(fixShadowMBB);
35533 
35534   // Shift right by 2/3 for 32/64 because incssp multiplies the argument by 4/8.
35535   unsigned ShrRIOpc = (PVT == MVT::i64) ? X86::SHR64ri : X86::SHR32ri;
35536   unsigned Offset = (PVT == MVT::i64) ? 3 : 2;
35537   Register SspFirstShrReg = MRI.createVirtualRegister(PtrRC);
35538   BuildMI(fixShadowMBB, MIMD, TII->get(ShrRIOpc), SspFirstShrReg)
35539       .addReg(SspSubReg)
35540       .addImm(Offset);
35541 
35542   // Increase SSP when looking only on the lower 8 bits of the delta.
35543   unsigned IncsspOpc = (PVT == MVT::i64) ? X86::INCSSPQ : X86::INCSSPD;
35544   BuildMI(fixShadowMBB, MIMD, TII->get(IncsspOpc)).addReg(SspFirstShrReg);
35545 
35546   // Reset the lower 8 bits.
35547   Register SspSecondShrReg = MRI.createVirtualRegister(PtrRC);
35548   BuildMI(fixShadowMBB, MIMD, TII->get(ShrRIOpc), SspSecondShrReg)
35549       .addReg(SspFirstShrReg)
35550       .addImm(8);
35551 
35552   // Jump if the result of the shift is zero.
35553   BuildMI(fixShadowMBB, MIMD, TII->get(X86::JCC_1))
35554       .addMBB(sinkMBB)
35555       .addImm(X86::COND_E);
35556   fixShadowMBB->addSuccessor(sinkMBB);
35557   fixShadowMBB->addSuccessor(fixShadowLoopPrepareMBB);
35558 
35559   // Do a single shift left.
35560   unsigned ShlR1Opc = (PVT == MVT::i64) ? X86::SHL64ri : X86::SHL32ri;
35561   Register SspAfterShlReg = MRI.createVirtualRegister(PtrRC);
35562   BuildMI(fixShadowLoopPrepareMBB, MIMD, TII->get(ShlR1Opc), SspAfterShlReg)
35563       .addReg(SspSecondShrReg)
35564       .addImm(1);
35565 
35566   // Save the value 128 to a register (will be used next with incssp).
35567   Register Value128InReg = MRI.createVirtualRegister(PtrRC);
35568   unsigned MovRIOpc = (PVT == MVT::i64) ? X86::MOV64ri32 : X86::MOV32ri;
35569   BuildMI(fixShadowLoopPrepareMBB, MIMD, TII->get(MovRIOpc), Value128InReg)
35570       .addImm(128);
35571   fixShadowLoopPrepareMBB->addSuccessor(fixShadowLoopMBB);
35572 
35573   // Since incssp only looks at the lower 8 bits, we might need to do several
35574   // iterations of incssp until we finish fixing the shadow stack.
35575   Register DecReg = MRI.createVirtualRegister(PtrRC);
35576   Register CounterReg = MRI.createVirtualRegister(PtrRC);
35577   BuildMI(fixShadowLoopMBB, MIMD, TII->get(X86::PHI), CounterReg)
35578       .addReg(SspAfterShlReg)
35579       .addMBB(fixShadowLoopPrepareMBB)
35580       .addReg(DecReg)
35581       .addMBB(fixShadowLoopMBB);
35582 
35583   // Every iteration we increase the SSP by 128.
35584   BuildMI(fixShadowLoopMBB, MIMD, TII->get(IncsspOpc)).addReg(Value128InReg);
35585 
35586   // Every iteration we decrement the counter by 1.
35587   unsigned DecROpc = (PVT == MVT::i64) ? X86::DEC64r : X86::DEC32r;
35588   BuildMI(fixShadowLoopMBB, MIMD, TII->get(DecROpc), DecReg).addReg(CounterReg);
35589 
35590   // Jump if the counter is not zero yet.
35591   BuildMI(fixShadowLoopMBB, MIMD, TII->get(X86::JCC_1))
35592       .addMBB(fixShadowLoopMBB)
35593       .addImm(X86::COND_NE);
35594   fixShadowLoopMBB->addSuccessor(sinkMBB);
35595   fixShadowLoopMBB->addSuccessor(fixShadowLoopMBB);
35596 
35597   return sinkMBB;
35598 }
35599 
35600 MachineBasicBlock *
emitEHSjLjLongJmp(MachineInstr & MI,MachineBasicBlock * MBB) const35601 X86TargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
35602                                      MachineBasicBlock *MBB) const {
35603   const MIMetadata MIMD(MI);
35604   MachineFunction *MF = MBB->getParent();
35605   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
35606   MachineRegisterInfo &MRI = MF->getRegInfo();
35607 
35608   // Memory Reference
35609   SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
35610                                            MI.memoperands_end());
35611 
35612   MVT PVT = getPointerTy(MF->getDataLayout());
35613   assert((PVT == MVT::i64 || PVT == MVT::i32) &&
35614          "Invalid Pointer Size!");
35615 
35616   const TargetRegisterClass *RC =
35617     (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
35618   Register Tmp = MRI.createVirtualRegister(RC);
35619   // Since FP is only updated here but NOT referenced, it's treated as GPR.
35620   const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
35621   Register FP = (PVT == MVT::i64) ? X86::RBP : X86::EBP;
35622   Register SP = RegInfo->getStackRegister();
35623 
35624   MachineInstrBuilder MIB;
35625 
35626   const int64_t LabelOffset = 1 * PVT.getStoreSize();
35627   const int64_t SPOffset = 2 * PVT.getStoreSize();
35628 
35629   unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm;
35630   unsigned IJmpOpc = (PVT == MVT::i64) ? X86::JMP64r : X86::JMP32r;
35631 
35632   MachineBasicBlock *thisMBB = MBB;
35633 
35634   // When CET and shadow stack is enabled, we need to fix the Shadow Stack.
35635   if (MF->getMMI().getModule()->getModuleFlag("cf-protection-return")) {
35636     thisMBB = emitLongJmpShadowStackFix(MI, thisMBB);
35637   }
35638 
35639   // Reload FP
35640   MIB = BuildMI(*thisMBB, MI, MIMD, TII->get(PtrLoadOpc), FP);
35641   for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
35642     const MachineOperand &MO = MI.getOperand(i);
35643     if (MO.isReg()) // Don't add the whole operand, we don't want to
35644                     // preserve kill flags.
35645       MIB.addReg(MO.getReg());
35646     else
35647       MIB.add(MO);
35648   }
35649   MIB.setMemRefs(MMOs);
35650 
35651   // Reload IP
35652   MIB = BuildMI(*thisMBB, MI, MIMD, TII->get(PtrLoadOpc), Tmp);
35653   for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
35654     const MachineOperand &MO = MI.getOperand(i);
35655     if (i == X86::AddrDisp)
35656       MIB.addDisp(MO, LabelOffset);
35657     else if (MO.isReg()) // Don't add the whole operand, we don't want to
35658                          // preserve kill flags.
35659       MIB.addReg(MO.getReg());
35660     else
35661       MIB.add(MO);
35662   }
35663   MIB.setMemRefs(MMOs);
35664 
35665   // Reload SP
35666   MIB = BuildMI(*thisMBB, MI, MIMD, TII->get(PtrLoadOpc), SP);
35667   for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
35668     if (i == X86::AddrDisp)
35669       MIB.addDisp(MI.getOperand(i), SPOffset);
35670     else
35671       MIB.add(MI.getOperand(i)); // We can preserve the kill flags here, it's
35672                                  // the last instruction of the expansion.
35673   }
35674   MIB.setMemRefs(MMOs);
35675 
35676   // Jump
35677   BuildMI(*thisMBB, MI, MIMD, TII->get(IJmpOpc)).addReg(Tmp);
35678 
35679   MI.eraseFromParent();
35680   return thisMBB;
35681 }
35682 
SetupEntryBlockForSjLj(MachineInstr & MI,MachineBasicBlock * MBB,MachineBasicBlock * DispatchBB,int FI) const35683 void X86TargetLowering::SetupEntryBlockForSjLj(MachineInstr &MI,
35684                                                MachineBasicBlock *MBB,
35685                                                MachineBasicBlock *DispatchBB,
35686                                                int FI) const {
35687   const MIMetadata MIMD(MI);
35688   MachineFunction *MF = MBB->getParent();
35689   MachineRegisterInfo *MRI = &MF->getRegInfo();
35690   const X86InstrInfo *TII = Subtarget.getInstrInfo();
35691 
35692   MVT PVT = getPointerTy(MF->getDataLayout());
35693   assert((PVT == MVT::i64 || PVT == MVT::i32) && "Invalid Pointer Size!");
35694 
35695   unsigned Op = 0;
35696   unsigned VR = 0;
35697 
35698   bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
35699                      !isPositionIndependent();
35700 
35701   if (UseImmLabel) {
35702     Op = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
35703   } else {
35704     const TargetRegisterClass *TRC =
35705         (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
35706     VR = MRI->createVirtualRegister(TRC);
35707     Op = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
35708 
35709     if (Subtarget.is64Bit())
35710       BuildMI(*MBB, MI, MIMD, TII->get(X86::LEA64r), VR)
35711           .addReg(X86::RIP)
35712           .addImm(1)
35713           .addReg(0)
35714           .addMBB(DispatchBB)
35715           .addReg(0);
35716     else
35717       BuildMI(*MBB, MI, MIMD, TII->get(X86::LEA32r), VR)
35718           .addReg(0) /* TII->getGlobalBaseReg(MF) */
35719           .addImm(1)
35720           .addReg(0)
35721           .addMBB(DispatchBB, Subtarget.classifyBlockAddressReference())
35722           .addReg(0);
35723   }
35724 
35725   MachineInstrBuilder MIB = BuildMI(*MBB, MI, MIMD, TII->get(Op));
35726   addFrameReference(MIB, FI, Subtarget.is64Bit() ? 56 : 36);
35727   if (UseImmLabel)
35728     MIB.addMBB(DispatchBB);
35729   else
35730     MIB.addReg(VR);
35731 }
35732 
35733 MachineBasicBlock *
EmitSjLjDispatchBlock(MachineInstr & MI,MachineBasicBlock * BB) const35734 X86TargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI,
35735                                          MachineBasicBlock *BB) const {
35736   const MIMetadata MIMD(MI);
35737   MachineFunction *MF = BB->getParent();
35738   MachineRegisterInfo *MRI = &MF->getRegInfo();
35739   const X86InstrInfo *TII = Subtarget.getInstrInfo();
35740   int FI = MF->getFrameInfo().getFunctionContextIndex();
35741 
35742   // Get a mapping of the call site numbers to all of the landing pads they're
35743   // associated with.
35744   DenseMap<unsigned, SmallVector<MachineBasicBlock *, 2>> CallSiteNumToLPad;
35745   unsigned MaxCSNum = 0;
35746   for (auto &MBB : *MF) {
35747     if (!MBB.isEHPad())
35748       continue;
35749 
35750     MCSymbol *Sym = nullptr;
35751     for (const auto &MI : MBB) {
35752       if (MI.isDebugInstr())
35753         continue;
35754 
35755       assert(MI.isEHLabel() && "expected EH_LABEL");
35756       Sym = MI.getOperand(0).getMCSymbol();
35757       break;
35758     }
35759 
35760     if (!MF->hasCallSiteLandingPad(Sym))
35761       continue;
35762 
35763     for (unsigned CSI : MF->getCallSiteLandingPad(Sym)) {
35764       CallSiteNumToLPad[CSI].push_back(&MBB);
35765       MaxCSNum = std::max(MaxCSNum, CSI);
35766     }
35767   }
35768 
35769   // Get an ordered list of the machine basic blocks for the jump table.
35770   std::vector<MachineBasicBlock *> LPadList;
35771   SmallPtrSet<MachineBasicBlock *, 32> InvokeBBs;
35772   LPadList.reserve(CallSiteNumToLPad.size());
35773 
35774   for (unsigned CSI = 1; CSI <= MaxCSNum; ++CSI) {
35775     for (auto &LP : CallSiteNumToLPad[CSI]) {
35776       LPadList.push_back(LP);
35777       InvokeBBs.insert(LP->pred_begin(), LP->pred_end());
35778     }
35779   }
35780 
35781   assert(!LPadList.empty() &&
35782          "No landing pad destinations for the dispatch jump table!");
35783 
35784   // Create the MBBs for the dispatch code.
35785 
35786   // Shove the dispatch's address into the return slot in the function context.
35787   MachineBasicBlock *DispatchBB = MF->CreateMachineBasicBlock();
35788   DispatchBB->setIsEHPad(true);
35789 
35790   MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock();
35791   BuildMI(TrapBB, MIMD, TII->get(X86::TRAP));
35792   DispatchBB->addSuccessor(TrapBB);
35793 
35794   MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock();
35795   DispatchBB->addSuccessor(DispContBB);
35796 
35797   // Insert MBBs.
35798   MF->push_back(DispatchBB);
35799   MF->push_back(DispContBB);
35800   MF->push_back(TrapBB);
35801 
35802   // Insert code into the entry block that creates and registers the function
35803   // context.
35804   SetupEntryBlockForSjLj(MI, BB, DispatchBB, FI);
35805 
35806   // Create the jump table and associated information
35807   unsigned JTE = getJumpTableEncoding();
35808   MachineJumpTableInfo *JTI = MF->getOrCreateJumpTableInfo(JTE);
35809   unsigned MJTI = JTI->createJumpTableIndex(LPadList);
35810 
35811   const X86RegisterInfo &RI = TII->getRegisterInfo();
35812   // Add a register mask with no preserved registers.  This results in all
35813   // registers being marked as clobbered.
35814   if (RI.hasBasePointer(*MF)) {
35815     const bool FPIs64Bit =
35816         Subtarget.isTarget64BitLP64() || Subtarget.isTargetNaCl64();
35817     X86MachineFunctionInfo *MFI = MF->getInfo<X86MachineFunctionInfo>();
35818     MFI->setRestoreBasePointer(MF);
35819 
35820     Register FP = RI.getFrameRegister(*MF);
35821     Register BP = RI.getBaseRegister();
35822     unsigned Op = FPIs64Bit ? X86::MOV64rm : X86::MOV32rm;
35823     addRegOffset(BuildMI(DispatchBB, MIMD, TII->get(Op), BP), FP, true,
35824                  MFI->getRestoreBasePointerOffset())
35825         .addRegMask(RI.getNoPreservedMask());
35826   } else {
35827     BuildMI(DispatchBB, MIMD, TII->get(X86::NOOP))
35828         .addRegMask(RI.getNoPreservedMask());
35829   }
35830 
35831   // IReg is used as an index in a memory operand and therefore can't be SP
35832   Register IReg = MRI->createVirtualRegister(&X86::GR32_NOSPRegClass);
35833   addFrameReference(BuildMI(DispatchBB, MIMD, TII->get(X86::MOV32rm), IReg), FI,
35834                     Subtarget.is64Bit() ? 8 : 4);
35835   BuildMI(DispatchBB, MIMD, TII->get(X86::CMP32ri))
35836       .addReg(IReg)
35837       .addImm(LPadList.size());
35838   BuildMI(DispatchBB, MIMD, TII->get(X86::JCC_1))
35839       .addMBB(TrapBB)
35840       .addImm(X86::COND_AE);
35841 
35842   if (Subtarget.is64Bit()) {
35843     Register BReg = MRI->createVirtualRegister(&X86::GR64RegClass);
35844     Register IReg64 = MRI->createVirtualRegister(&X86::GR64_NOSPRegClass);
35845 
35846     // leaq .LJTI0_0(%rip), BReg
35847     BuildMI(DispContBB, MIMD, TII->get(X86::LEA64r), BReg)
35848         .addReg(X86::RIP)
35849         .addImm(1)
35850         .addReg(0)
35851         .addJumpTableIndex(MJTI)
35852         .addReg(0);
35853     // movzx IReg64, IReg
35854     BuildMI(DispContBB, MIMD, TII->get(TargetOpcode::SUBREG_TO_REG), IReg64)
35855         .addImm(0)
35856         .addReg(IReg)
35857         .addImm(X86::sub_32bit);
35858 
35859     switch (JTE) {
35860     case MachineJumpTableInfo::EK_BlockAddress:
35861       // jmpq *(BReg,IReg64,8)
35862       BuildMI(DispContBB, MIMD, TII->get(X86::JMP64m))
35863           .addReg(BReg)
35864           .addImm(8)
35865           .addReg(IReg64)
35866           .addImm(0)
35867           .addReg(0);
35868       break;
35869     case MachineJumpTableInfo::EK_LabelDifference32: {
35870       Register OReg = MRI->createVirtualRegister(&X86::GR32RegClass);
35871       Register OReg64 = MRI->createVirtualRegister(&X86::GR64RegClass);
35872       Register TReg = MRI->createVirtualRegister(&X86::GR64RegClass);
35873 
35874       // movl (BReg,IReg64,4), OReg
35875       BuildMI(DispContBB, MIMD, TII->get(X86::MOV32rm), OReg)
35876           .addReg(BReg)
35877           .addImm(4)
35878           .addReg(IReg64)
35879           .addImm(0)
35880           .addReg(0);
35881       // movsx OReg64, OReg
35882       BuildMI(DispContBB, MIMD, TII->get(X86::MOVSX64rr32), OReg64)
35883           .addReg(OReg);
35884       // addq BReg, OReg64, TReg
35885       BuildMI(DispContBB, MIMD, TII->get(X86::ADD64rr), TReg)
35886           .addReg(OReg64)
35887           .addReg(BReg);
35888       // jmpq *TReg
35889       BuildMI(DispContBB, MIMD, TII->get(X86::JMP64r)).addReg(TReg);
35890       break;
35891     }
35892     default:
35893       llvm_unreachable("Unexpected jump table encoding");
35894     }
35895   } else {
35896     // jmpl *.LJTI0_0(,IReg,4)
35897     BuildMI(DispContBB, MIMD, TII->get(X86::JMP32m))
35898         .addReg(0)
35899         .addImm(4)
35900         .addReg(IReg)
35901         .addJumpTableIndex(MJTI)
35902         .addReg(0);
35903   }
35904 
35905   // Add the jump table entries as successors to the MBB.
35906   SmallPtrSet<MachineBasicBlock *, 8> SeenMBBs;
35907   for (auto &LP : LPadList)
35908     if (SeenMBBs.insert(LP).second)
35909       DispContBB->addSuccessor(LP);
35910 
35911   // N.B. the order the invoke BBs are processed in doesn't matter here.
35912   SmallVector<MachineBasicBlock *, 64> MBBLPads;
35913   const MCPhysReg *SavedRegs = MF->getRegInfo().getCalleeSavedRegs();
35914   for (MachineBasicBlock *MBB : InvokeBBs) {
35915     // Remove the landing pad successor from the invoke block and replace it
35916     // with the new dispatch block.
35917     // Keep a copy of Successors since it's modified inside the loop.
35918     SmallVector<MachineBasicBlock *, 8> Successors(MBB->succ_rbegin(),
35919                                                    MBB->succ_rend());
35920     // FIXME: Avoid quadratic complexity.
35921     for (auto *MBBS : Successors) {
35922       if (MBBS->isEHPad()) {
35923         MBB->removeSuccessor(MBBS);
35924         MBBLPads.push_back(MBBS);
35925       }
35926     }
35927 
35928     MBB->addSuccessor(DispatchBB);
35929 
35930     // Find the invoke call and mark all of the callee-saved registers as
35931     // 'implicit defined' so that they're spilled.  This prevents code from
35932     // moving instructions to before the EH block, where they will never be
35933     // executed.
35934     for (auto &II : reverse(*MBB)) {
35935       if (!II.isCall())
35936         continue;
35937 
35938       DenseMap<unsigned, bool> DefRegs;
35939       for (auto &MOp : II.operands())
35940         if (MOp.isReg())
35941           DefRegs[MOp.getReg()] = true;
35942 
35943       MachineInstrBuilder MIB(*MF, &II);
35944       for (unsigned RegIdx = 0; SavedRegs[RegIdx]; ++RegIdx) {
35945         unsigned Reg = SavedRegs[RegIdx];
35946         if (!DefRegs[Reg])
35947           MIB.addReg(Reg, RegState::ImplicitDefine | RegState::Dead);
35948       }
35949 
35950       break;
35951     }
35952   }
35953 
35954   // Mark all former landing pads as non-landing pads.  The dispatch is the only
35955   // landing pad now.
35956   for (auto &LP : MBBLPads)
35957     LP->setIsEHPad(false);
35958 
35959   // The instruction is gone now.
35960   MI.eraseFromParent();
35961   return BB;
35962 }
35963 
35964 MachineBasicBlock *
EmitInstrWithCustomInserter(MachineInstr & MI,MachineBasicBlock * BB) const35965 X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
35966                                                MachineBasicBlock *BB) const {
35967   MachineFunction *MF = BB->getParent();
35968   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
35969   const MIMetadata MIMD(MI);
35970 
35971   auto TMMImmToTMMReg = [](unsigned Imm) {
35972     assert (Imm < 8 && "Illegal tmm index");
35973     return X86::TMM0 + Imm;
35974   };
35975   switch (MI.getOpcode()) {
35976   default: llvm_unreachable("Unexpected instr type to insert");
35977   case X86::TLS_addr32:
35978   case X86::TLS_addr64:
35979   case X86::TLS_addrX32:
35980   case X86::TLS_base_addr32:
35981   case X86::TLS_base_addr64:
35982   case X86::TLS_base_addrX32:
35983     return EmitLoweredTLSAddr(MI, BB);
35984   case X86::INDIRECT_THUNK_CALL32:
35985   case X86::INDIRECT_THUNK_CALL64:
35986   case X86::INDIRECT_THUNK_TCRETURN32:
35987   case X86::INDIRECT_THUNK_TCRETURN64:
35988     return EmitLoweredIndirectThunk(MI, BB);
35989   case X86::CATCHRET:
35990     return EmitLoweredCatchRet(MI, BB);
35991   case X86::SEG_ALLOCA_32:
35992   case X86::SEG_ALLOCA_64:
35993     return EmitLoweredSegAlloca(MI, BB);
35994   case X86::PROBED_ALLOCA_32:
35995   case X86::PROBED_ALLOCA_64:
35996     return EmitLoweredProbedAlloca(MI, BB);
35997   case X86::TLSCall_32:
35998   case X86::TLSCall_64:
35999     return EmitLoweredTLSCall(MI, BB);
36000   case X86::CMOV_FR16:
36001   case X86::CMOV_FR16X:
36002   case X86::CMOV_FR32:
36003   case X86::CMOV_FR32X:
36004   case X86::CMOV_FR64:
36005   case X86::CMOV_FR64X:
36006   case X86::CMOV_GR8:
36007   case X86::CMOV_GR16:
36008   case X86::CMOV_GR32:
36009   case X86::CMOV_RFP32:
36010   case X86::CMOV_RFP64:
36011   case X86::CMOV_RFP80:
36012   case X86::CMOV_VR64:
36013   case X86::CMOV_VR128:
36014   case X86::CMOV_VR128X:
36015   case X86::CMOV_VR256:
36016   case X86::CMOV_VR256X:
36017   case X86::CMOV_VR512:
36018   case X86::CMOV_VK1:
36019   case X86::CMOV_VK2:
36020   case X86::CMOV_VK4:
36021   case X86::CMOV_VK8:
36022   case X86::CMOV_VK16:
36023   case X86::CMOV_VK32:
36024   case X86::CMOV_VK64:
36025     return EmitLoweredSelect(MI, BB);
36026 
36027   case X86::FP80_ADDr:
36028   case X86::FP80_ADDm32: {
36029     // Change the floating point control register to use double extended
36030     // precision when performing the addition.
36031     int OrigCWFrameIdx =
36032         MF->getFrameInfo().CreateStackObject(2, Align(2), false);
36033     addFrameReference(BuildMI(*BB, MI, MIMD, TII->get(X86::FNSTCW16m)),
36034                       OrigCWFrameIdx);
36035 
36036     // Load the old value of the control word...
36037     Register OldCW = MF->getRegInfo().createVirtualRegister(&X86::GR32RegClass);
36038     addFrameReference(BuildMI(*BB, MI, MIMD, TII->get(X86::MOVZX32rm16), OldCW),
36039                       OrigCWFrameIdx);
36040 
36041     // OR 0b11 into bit 8 and 9. 0b11 is the encoding for double extended
36042     // precision.
36043     Register NewCW = MF->getRegInfo().createVirtualRegister(&X86::GR32RegClass);
36044     BuildMI(*BB, MI, MIMD, TII->get(X86::OR32ri), NewCW)
36045         .addReg(OldCW, RegState::Kill)
36046         .addImm(0x300);
36047 
36048     // Extract to 16 bits.
36049     Register NewCW16 =
36050         MF->getRegInfo().createVirtualRegister(&X86::GR16RegClass);
36051     BuildMI(*BB, MI, MIMD, TII->get(TargetOpcode::COPY), NewCW16)
36052         .addReg(NewCW, RegState::Kill, X86::sub_16bit);
36053 
36054     // Prepare memory for FLDCW.
36055     int NewCWFrameIdx =
36056         MF->getFrameInfo().CreateStackObject(2, Align(2), false);
36057     addFrameReference(BuildMI(*BB, MI, MIMD, TII->get(X86::MOV16mr)),
36058                       NewCWFrameIdx)
36059         .addReg(NewCW16, RegState::Kill);
36060 
36061     // Reload the modified control word now...
36062     addFrameReference(BuildMI(*BB, MI, MIMD, TII->get(X86::FLDCW16m)),
36063                       NewCWFrameIdx);
36064 
36065     // Do the addition.
36066     if (MI.getOpcode() == X86::FP80_ADDr) {
36067       BuildMI(*BB, MI, MIMD, TII->get(X86::ADD_Fp80))
36068           .add(MI.getOperand(0))
36069           .add(MI.getOperand(1))
36070           .add(MI.getOperand(2));
36071     } else {
36072       BuildMI(*BB, MI, MIMD, TII->get(X86::ADD_Fp80m32))
36073           .add(MI.getOperand(0))
36074           .add(MI.getOperand(1))
36075           .add(MI.getOperand(2))
36076           .add(MI.getOperand(3))
36077           .add(MI.getOperand(4))
36078           .add(MI.getOperand(5))
36079           .add(MI.getOperand(6));
36080     }
36081 
36082     // Reload the original control word now.
36083     addFrameReference(BuildMI(*BB, MI, MIMD, TII->get(X86::FLDCW16m)),
36084                       OrigCWFrameIdx);
36085 
36086     MI.eraseFromParent(); // The pseudo instruction is gone now.
36087     return BB;
36088   }
36089 
36090   case X86::FP32_TO_INT16_IN_MEM:
36091   case X86::FP32_TO_INT32_IN_MEM:
36092   case X86::FP32_TO_INT64_IN_MEM:
36093   case X86::FP64_TO_INT16_IN_MEM:
36094   case X86::FP64_TO_INT32_IN_MEM:
36095   case X86::FP64_TO_INT64_IN_MEM:
36096   case X86::FP80_TO_INT16_IN_MEM:
36097   case X86::FP80_TO_INT32_IN_MEM:
36098   case X86::FP80_TO_INT64_IN_MEM: {
36099     // Change the floating point control register to use "round towards zero"
36100     // mode when truncating to an integer value.
36101     int OrigCWFrameIdx =
36102         MF->getFrameInfo().CreateStackObject(2, Align(2), false);
36103     addFrameReference(BuildMI(*BB, MI, MIMD, TII->get(X86::FNSTCW16m)),
36104                       OrigCWFrameIdx);
36105 
36106     // Load the old value of the control word...
36107     Register OldCW = MF->getRegInfo().createVirtualRegister(&X86::GR32RegClass);
36108     addFrameReference(BuildMI(*BB, MI, MIMD, TII->get(X86::MOVZX32rm16), OldCW),
36109                       OrigCWFrameIdx);
36110 
36111     // OR 0b11 into bit 10 and 11. 0b11 is the encoding for round toward zero.
36112     Register NewCW = MF->getRegInfo().createVirtualRegister(&X86::GR32RegClass);
36113     BuildMI(*BB, MI, MIMD, TII->get(X86::OR32ri), NewCW)
36114       .addReg(OldCW, RegState::Kill).addImm(0xC00);
36115 
36116     // Extract to 16 bits.
36117     Register NewCW16 =
36118         MF->getRegInfo().createVirtualRegister(&X86::GR16RegClass);
36119     BuildMI(*BB, MI, MIMD, TII->get(TargetOpcode::COPY), NewCW16)
36120       .addReg(NewCW, RegState::Kill, X86::sub_16bit);
36121 
36122     // Prepare memory for FLDCW.
36123     int NewCWFrameIdx =
36124         MF->getFrameInfo().CreateStackObject(2, Align(2), false);
36125     addFrameReference(BuildMI(*BB, MI, MIMD, TII->get(X86::MOV16mr)),
36126                       NewCWFrameIdx)
36127       .addReg(NewCW16, RegState::Kill);
36128 
36129     // Reload the modified control word now...
36130     addFrameReference(BuildMI(*BB, MI, MIMD,
36131                               TII->get(X86::FLDCW16m)), NewCWFrameIdx);
36132 
36133     // Get the X86 opcode to use.
36134     unsigned Opc;
36135     switch (MI.getOpcode()) {
36136     default: llvm_unreachable("illegal opcode!");
36137     case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break;
36138     case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break;
36139     case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break;
36140     case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break;
36141     case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break;
36142     case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break;
36143     case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break;
36144     case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break;
36145     case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break;
36146     }
36147 
36148     X86AddressMode AM = getAddressFromInstr(&MI, 0);
36149     addFullAddress(BuildMI(*BB, MI, MIMD, TII->get(Opc)), AM)
36150         .addReg(MI.getOperand(X86::AddrNumOperands).getReg());
36151 
36152     // Reload the original control word now.
36153     addFrameReference(BuildMI(*BB, MI, MIMD, TII->get(X86::FLDCW16m)),
36154                       OrigCWFrameIdx);
36155 
36156     MI.eraseFromParent(); // The pseudo instruction is gone now.
36157     return BB;
36158   }
36159 
36160   // xbegin
36161   case X86::XBEGIN:
36162     return emitXBegin(MI, BB, Subtarget.getInstrInfo());
36163 
36164   case X86::VAARG_64:
36165   case X86::VAARG_X32:
36166     return EmitVAARGWithCustomInserter(MI, BB);
36167 
36168   case X86::EH_SjLj_SetJmp32:
36169   case X86::EH_SjLj_SetJmp64:
36170     return emitEHSjLjSetJmp(MI, BB);
36171 
36172   case X86::EH_SjLj_LongJmp32:
36173   case X86::EH_SjLj_LongJmp64:
36174     return emitEHSjLjLongJmp(MI, BB);
36175 
36176   case X86::Int_eh_sjlj_setup_dispatch:
36177     return EmitSjLjDispatchBlock(MI, BB);
36178 
36179   case TargetOpcode::STATEPOINT:
36180     // As an implementation detail, STATEPOINT shares the STACKMAP format at
36181     // this point in the process.  We diverge later.
36182     return emitPatchPoint(MI, BB);
36183 
36184   case TargetOpcode::STACKMAP:
36185   case TargetOpcode::PATCHPOINT:
36186     return emitPatchPoint(MI, BB);
36187 
36188   case TargetOpcode::PATCHABLE_EVENT_CALL:
36189   case TargetOpcode::PATCHABLE_TYPED_EVENT_CALL:
36190     return BB;
36191 
36192   case X86::LCMPXCHG8B: {
36193     const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
36194     // In addition to 4 E[ABCD] registers implied by encoding, CMPXCHG8B
36195     // requires a memory operand. If it happens that current architecture is
36196     // i686 and for current function we need a base pointer
36197     // - which is ESI for i686 - register allocator would not be able to
36198     // allocate registers for an address in form of X(%reg, %reg, Y)
36199     // - there never would be enough unreserved registers during regalloc
36200     // (without the need for base ptr the only option would be X(%edi, %esi, Y).
36201     // We are giving a hand to register allocator by precomputing the address in
36202     // a new vreg using LEA.
36203 
36204     // If it is not i686 or there is no base pointer - nothing to do here.
36205     if (!Subtarget.is32Bit() || !TRI->hasBasePointer(*MF))
36206       return BB;
36207 
36208     // Even though this code does not necessarily needs the base pointer to
36209     // be ESI, we check for that. The reason: if this assert fails, there are
36210     // some changes happened in the compiler base pointer handling, which most
36211     // probably have to be addressed somehow here.
36212     assert(TRI->getBaseRegister() == X86::ESI &&
36213            "LCMPXCHG8B custom insertion for i686 is written with X86::ESI as a "
36214            "base pointer in mind");
36215 
36216     MachineRegisterInfo &MRI = MF->getRegInfo();
36217     MVT SPTy = getPointerTy(MF->getDataLayout());
36218     const TargetRegisterClass *AddrRegClass = getRegClassFor(SPTy);
36219     Register computedAddrVReg = MRI.createVirtualRegister(AddrRegClass);
36220 
36221     X86AddressMode AM = getAddressFromInstr(&MI, 0);
36222     // Regalloc does not need any help when the memory operand of CMPXCHG8B
36223     // does not use index register.
36224     if (AM.IndexReg == X86::NoRegister)
36225       return BB;
36226 
36227     // After X86TargetLowering::ReplaceNodeResults CMPXCHG8B is glued to its
36228     // four operand definitions that are E[ABCD] registers. We skip them and
36229     // then insert the LEA.
36230     MachineBasicBlock::reverse_iterator RMBBI(MI.getReverseIterator());
36231     while (RMBBI != BB->rend() && (RMBBI->definesRegister(X86::EAX) ||
36232                                    RMBBI->definesRegister(X86::EBX) ||
36233                                    RMBBI->definesRegister(X86::ECX) ||
36234                                    RMBBI->definesRegister(X86::EDX))) {
36235       ++RMBBI;
36236     }
36237     MachineBasicBlock::iterator MBBI(RMBBI);
36238     addFullAddress(
36239         BuildMI(*BB, *MBBI, MIMD, TII->get(X86::LEA32r), computedAddrVReg), AM);
36240 
36241     setDirectAddressInInstr(&MI, 0, computedAddrVReg);
36242 
36243     return BB;
36244   }
36245   case X86::LCMPXCHG16B_NO_RBX: {
36246     const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
36247     Register BasePtr = TRI->getBaseRegister();
36248     if (TRI->hasBasePointer(*MF) &&
36249         (BasePtr == X86::RBX || BasePtr == X86::EBX)) {
36250       if (!BB->isLiveIn(BasePtr))
36251         BB->addLiveIn(BasePtr);
36252       // Save RBX into a virtual register.
36253       Register SaveRBX =
36254           MF->getRegInfo().createVirtualRegister(&X86::GR64RegClass);
36255       BuildMI(*BB, MI, MIMD, TII->get(TargetOpcode::COPY), SaveRBX)
36256           .addReg(X86::RBX);
36257       Register Dst = MF->getRegInfo().createVirtualRegister(&X86::GR64RegClass);
36258       MachineInstrBuilder MIB =
36259           BuildMI(*BB, MI, MIMD, TII->get(X86::LCMPXCHG16B_SAVE_RBX), Dst);
36260       for (unsigned Idx = 0; Idx < X86::AddrNumOperands; ++Idx)
36261         MIB.add(MI.getOperand(Idx));
36262       MIB.add(MI.getOperand(X86::AddrNumOperands));
36263       MIB.addReg(SaveRBX);
36264     } else {
36265       // Simple case, just copy the virtual register to RBX.
36266       BuildMI(*BB, MI, MIMD, TII->get(TargetOpcode::COPY), X86::RBX)
36267           .add(MI.getOperand(X86::AddrNumOperands));
36268       MachineInstrBuilder MIB =
36269           BuildMI(*BB, MI, MIMD, TII->get(X86::LCMPXCHG16B));
36270       for (unsigned Idx = 0; Idx < X86::AddrNumOperands; ++Idx)
36271         MIB.add(MI.getOperand(Idx));
36272     }
36273     MI.eraseFromParent();
36274     return BB;
36275   }
36276   case X86::MWAITX: {
36277     const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
36278     Register BasePtr = TRI->getBaseRegister();
36279     bool IsRBX = (BasePtr == X86::RBX || BasePtr == X86::EBX);
36280     // If no need to save the base pointer, we generate MWAITXrrr,
36281     // else we generate pseudo MWAITX_SAVE_RBX.
36282     if (!IsRBX || !TRI->hasBasePointer(*MF)) {
36283       BuildMI(*BB, MI, MIMD, TII->get(TargetOpcode::COPY), X86::ECX)
36284           .addReg(MI.getOperand(0).getReg());
36285       BuildMI(*BB, MI, MIMD, TII->get(TargetOpcode::COPY), X86::EAX)
36286           .addReg(MI.getOperand(1).getReg());
36287       BuildMI(*BB, MI, MIMD, TII->get(TargetOpcode::COPY), X86::EBX)
36288           .addReg(MI.getOperand(2).getReg());
36289       BuildMI(*BB, MI, MIMD, TII->get(X86::MWAITXrrr));
36290       MI.eraseFromParent();
36291     } else {
36292       if (!BB->isLiveIn(BasePtr)) {
36293         BB->addLiveIn(BasePtr);
36294       }
36295       // Parameters can be copied into ECX and EAX but not EBX yet.
36296       BuildMI(*BB, MI, MIMD, TII->get(TargetOpcode::COPY), X86::ECX)
36297           .addReg(MI.getOperand(0).getReg());
36298       BuildMI(*BB, MI, MIMD, TII->get(TargetOpcode::COPY), X86::EAX)
36299           .addReg(MI.getOperand(1).getReg());
36300       assert(Subtarget.is64Bit() && "Expected 64-bit mode!");
36301       // Save RBX into a virtual register.
36302       Register SaveRBX =
36303           MF->getRegInfo().createVirtualRegister(&X86::GR64RegClass);
36304       BuildMI(*BB, MI, MIMD, TII->get(TargetOpcode::COPY), SaveRBX)
36305           .addReg(X86::RBX);
36306       // Generate mwaitx pseudo.
36307       Register Dst = MF->getRegInfo().createVirtualRegister(&X86::GR64RegClass);
36308       BuildMI(*BB, MI, MIMD, TII->get(X86::MWAITX_SAVE_RBX))
36309           .addDef(Dst) // Destination tied in with SaveRBX.
36310           .addReg(MI.getOperand(2).getReg()) // input value of EBX.
36311           .addUse(SaveRBX);                  // Save of base pointer.
36312       MI.eraseFromParent();
36313     }
36314     return BB;
36315   }
36316   case TargetOpcode::PREALLOCATED_SETUP: {
36317     assert(Subtarget.is32Bit() && "preallocated only used in 32-bit");
36318     auto MFI = MF->getInfo<X86MachineFunctionInfo>();
36319     MFI->setHasPreallocatedCall(true);
36320     int64_t PreallocatedId = MI.getOperand(0).getImm();
36321     size_t StackAdjustment = MFI->getPreallocatedStackSize(PreallocatedId);
36322     assert(StackAdjustment != 0 && "0 stack adjustment");
36323     LLVM_DEBUG(dbgs() << "PREALLOCATED_SETUP stack adjustment "
36324                       << StackAdjustment << "\n");
36325     BuildMI(*BB, MI, MIMD, TII->get(X86::SUB32ri), X86::ESP)
36326         .addReg(X86::ESP)
36327         .addImm(StackAdjustment);
36328     MI.eraseFromParent();
36329     return BB;
36330   }
36331   case TargetOpcode::PREALLOCATED_ARG: {
36332     assert(Subtarget.is32Bit() && "preallocated calls only used in 32-bit");
36333     int64_t PreallocatedId = MI.getOperand(1).getImm();
36334     int64_t ArgIdx = MI.getOperand(2).getImm();
36335     auto MFI = MF->getInfo<X86MachineFunctionInfo>();
36336     size_t ArgOffset = MFI->getPreallocatedArgOffsets(PreallocatedId)[ArgIdx];
36337     LLVM_DEBUG(dbgs() << "PREALLOCATED_ARG arg index " << ArgIdx
36338                       << ", arg offset " << ArgOffset << "\n");
36339     // stack pointer + offset
36340     addRegOffset(BuildMI(*BB, MI, MIMD, TII->get(X86::LEA32r),
36341                          MI.getOperand(0).getReg()),
36342                  X86::ESP, false, ArgOffset);
36343     MI.eraseFromParent();
36344     return BB;
36345   }
36346   case X86::PTDPBSSD:
36347   case X86::PTDPBSUD:
36348   case X86::PTDPBUSD:
36349   case X86::PTDPBUUD:
36350   case X86::PTDPBF16PS:
36351   case X86::PTDPFP16PS: {
36352     unsigned Opc;
36353     switch (MI.getOpcode()) {
36354     default: llvm_unreachable("illegal opcode!");
36355     case X86::PTDPBSSD: Opc = X86::TDPBSSD; break;
36356     case X86::PTDPBSUD: Opc = X86::TDPBSUD; break;
36357     case X86::PTDPBUSD: Opc = X86::TDPBUSD; break;
36358     case X86::PTDPBUUD: Opc = X86::TDPBUUD; break;
36359     case X86::PTDPBF16PS: Opc = X86::TDPBF16PS; break;
36360     case X86::PTDPFP16PS: Opc = X86::TDPFP16PS; break;
36361     }
36362 
36363     MachineInstrBuilder MIB = BuildMI(*BB, MI, MIMD, TII->get(Opc));
36364     MIB.addReg(TMMImmToTMMReg(MI.getOperand(0).getImm()), RegState::Define);
36365     MIB.addReg(TMMImmToTMMReg(MI.getOperand(0).getImm()), RegState::Undef);
36366     MIB.addReg(TMMImmToTMMReg(MI.getOperand(1).getImm()), RegState::Undef);
36367     MIB.addReg(TMMImmToTMMReg(MI.getOperand(2).getImm()), RegState::Undef);
36368 
36369     MI.eraseFromParent(); // The pseudo is gone now.
36370     return BB;
36371   }
36372   case X86::PTILEZERO: {
36373     unsigned Imm = MI.getOperand(0).getImm();
36374     BuildMI(*BB, MI, MIMD, TII->get(X86::TILEZERO), TMMImmToTMMReg(Imm));
36375     MI.eraseFromParent(); // The pseudo is gone now.
36376     return BB;
36377   }
36378   case X86::PTILELOADD:
36379   case X86::PTILELOADDT1:
36380   case X86::PTILESTORED: {
36381     unsigned Opc;
36382     switch (MI.getOpcode()) {
36383     default: llvm_unreachable("illegal opcode!");
36384 #define GET_EGPR_IF_ENABLED(OPC) (Subtarget.hasEGPR() ? OPC##_EVEX : OPC)
36385     case X86::PTILELOADD:
36386       Opc = GET_EGPR_IF_ENABLED(X86::TILELOADD);
36387       break;
36388     case X86::PTILELOADDT1:
36389       Opc = GET_EGPR_IF_ENABLED(X86::TILELOADDT1);
36390       break;
36391     case X86::PTILESTORED:
36392       Opc = GET_EGPR_IF_ENABLED(X86::TILESTORED);
36393       break;
36394 #undef GET_EGPR_IF_ENABLED
36395     }
36396 
36397     MachineInstrBuilder MIB = BuildMI(*BB, MI, MIMD, TII->get(Opc));
36398     unsigned CurOp = 0;
36399     if (Opc != X86::TILESTORED && Opc != X86::TILESTORED_EVEX)
36400       MIB.addReg(TMMImmToTMMReg(MI.getOperand(CurOp++).getImm()),
36401                  RegState::Define);
36402 
36403     MIB.add(MI.getOperand(CurOp++)); // base
36404     MIB.add(MI.getOperand(CurOp++)); // scale
36405     MIB.add(MI.getOperand(CurOp++)); // index -- stride
36406     MIB.add(MI.getOperand(CurOp++)); // displacement
36407     MIB.add(MI.getOperand(CurOp++)); // segment
36408 
36409     if (Opc == X86::TILESTORED || Opc == X86::TILESTORED_EVEX)
36410       MIB.addReg(TMMImmToTMMReg(MI.getOperand(CurOp++).getImm()),
36411                  RegState::Undef);
36412 
36413     MI.eraseFromParent(); // The pseudo is gone now.
36414     return BB;
36415   }
36416   case X86::PTCMMIMFP16PS:
36417   case X86::PTCMMRLFP16PS: {
36418     const MIMetadata MIMD(MI);
36419     unsigned Opc;
36420     switch (MI.getOpcode()) {
36421     default: llvm_unreachable("Unexpected instruction!");
36422     case X86::PTCMMIMFP16PS:     Opc = X86::TCMMIMFP16PS;     break;
36423     case X86::PTCMMRLFP16PS:     Opc = X86::TCMMRLFP16PS;     break;
36424     }
36425     MachineInstrBuilder MIB = BuildMI(*BB, MI, MIMD, TII->get(Opc));
36426     MIB.addReg(TMMImmToTMMReg(MI.getOperand(0).getImm()), RegState::Define);
36427     MIB.addReg(TMMImmToTMMReg(MI.getOperand(0).getImm()), RegState::Undef);
36428     MIB.addReg(TMMImmToTMMReg(MI.getOperand(1).getImm()), RegState::Undef);
36429     MIB.addReg(TMMImmToTMMReg(MI.getOperand(2).getImm()), RegState::Undef);
36430     MI.eraseFromParent(); // The pseudo is gone now.
36431     return BB;
36432   }
36433   }
36434 }
36435 
36436 //===----------------------------------------------------------------------===//
36437 //                           X86 Optimization Hooks
36438 //===----------------------------------------------------------------------===//
36439 
36440 bool
targetShrinkDemandedConstant(SDValue Op,const APInt & DemandedBits,const APInt & DemandedElts,TargetLoweringOpt & TLO) const36441 X86TargetLowering::targetShrinkDemandedConstant(SDValue Op,
36442                                                 const APInt &DemandedBits,
36443                                                 const APInt &DemandedElts,
36444                                                 TargetLoweringOpt &TLO) const {
36445   EVT VT = Op.getValueType();
36446   unsigned Opcode = Op.getOpcode();
36447   unsigned EltSize = VT.getScalarSizeInBits();
36448 
36449   if (VT.isVector()) {
36450     // If the constant is only all signbits in the active bits, then we should
36451     // extend it to the entire constant to allow it act as a boolean constant
36452     // vector.
36453     auto NeedsSignExtension = [&](SDValue V, unsigned ActiveBits) {
36454       if (!ISD::isBuildVectorOfConstantSDNodes(V.getNode()))
36455         return false;
36456       for (unsigned i = 0, e = V.getNumOperands(); i != e; ++i) {
36457         if (!DemandedElts[i] || V.getOperand(i).isUndef())
36458           continue;
36459         const APInt &Val = V.getConstantOperandAPInt(i);
36460         if (Val.getBitWidth() > Val.getNumSignBits() &&
36461             Val.trunc(ActiveBits).getNumSignBits() == ActiveBits)
36462           return true;
36463       }
36464       return false;
36465     };
36466     // For vectors - if we have a constant, then try to sign extend.
36467     // TODO: Handle AND cases.
36468     unsigned ActiveBits = DemandedBits.getActiveBits();
36469     if (EltSize > ActiveBits && EltSize > 1 && isTypeLegal(VT) &&
36470         (Opcode == ISD::OR || Opcode == ISD::XOR || Opcode == X86ISD::ANDNP) &&
36471         NeedsSignExtension(Op.getOperand(1), ActiveBits)) {
36472       EVT ExtSVT = EVT::getIntegerVT(*TLO.DAG.getContext(), ActiveBits);
36473       EVT ExtVT = EVT::getVectorVT(*TLO.DAG.getContext(), ExtSVT,
36474                                    VT.getVectorNumElements());
36475       SDValue NewC =
36476           TLO.DAG.getNode(ISD::SIGN_EXTEND_INREG, SDLoc(Op), VT,
36477                           Op.getOperand(1), TLO.DAG.getValueType(ExtVT));
36478       SDValue NewOp =
36479           TLO.DAG.getNode(Opcode, SDLoc(Op), VT, Op.getOperand(0), NewC);
36480       return TLO.CombineTo(Op, NewOp);
36481     }
36482     return false;
36483   }
36484 
36485   // Only optimize Ands to prevent shrinking a constant that could be
36486   // matched by movzx.
36487   if (Opcode != ISD::AND)
36488     return false;
36489 
36490   // Make sure the RHS really is a constant.
36491   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
36492   if (!C)
36493     return false;
36494 
36495   const APInt &Mask = C->getAPIntValue();
36496 
36497   // Clear all non-demanded bits initially.
36498   APInt ShrunkMask = Mask & DemandedBits;
36499 
36500   // Find the width of the shrunk mask.
36501   unsigned Width = ShrunkMask.getActiveBits();
36502 
36503   // If the mask is all 0s there's nothing to do here.
36504   if (Width == 0)
36505     return false;
36506 
36507   // Find the next power of 2 width, rounding up to a byte.
36508   Width = llvm::bit_ceil(std::max(Width, 8U));
36509   // Truncate the width to size to handle illegal types.
36510   Width = std::min(Width, EltSize);
36511 
36512   // Calculate a possible zero extend mask for this constant.
36513   APInt ZeroExtendMask = APInt::getLowBitsSet(EltSize, Width);
36514 
36515   // If we aren't changing the mask, just return true to keep it and prevent
36516   // the caller from optimizing.
36517   if (ZeroExtendMask == Mask)
36518     return true;
36519 
36520   // Make sure the new mask can be represented by a combination of mask bits
36521   // and non-demanded bits.
36522   if (!ZeroExtendMask.isSubsetOf(Mask | ~DemandedBits))
36523     return false;
36524 
36525   // Replace the constant with the zero extend mask.
36526   SDLoc DL(Op);
36527   SDValue NewC = TLO.DAG.getConstant(ZeroExtendMask, DL, VT);
36528   SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC);
36529   return TLO.CombineTo(Op, NewOp);
36530 }
36531 
computeKnownBitsForTargetNode(const SDValue Op,KnownBits & Known,const APInt & DemandedElts,const SelectionDAG & DAG,unsigned Depth) const36532 void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
36533                                                       KnownBits &Known,
36534                                                       const APInt &DemandedElts,
36535                                                       const SelectionDAG &DAG,
36536                                                       unsigned Depth) const {
36537   unsigned BitWidth = Known.getBitWidth();
36538   unsigned NumElts = DemandedElts.getBitWidth();
36539   unsigned Opc = Op.getOpcode();
36540   EVT VT = Op.getValueType();
36541   assert((Opc >= ISD::BUILTIN_OP_END ||
36542           Opc == ISD::INTRINSIC_WO_CHAIN ||
36543           Opc == ISD::INTRINSIC_W_CHAIN ||
36544           Opc == ISD::INTRINSIC_VOID) &&
36545          "Should use MaskedValueIsZero if you don't know whether Op"
36546          " is a target node!");
36547 
36548   Known.resetAll();
36549   switch (Opc) {
36550   default: break;
36551   case X86ISD::MUL_IMM: {
36552     KnownBits Known2;
36553     Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
36554     Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
36555     Known = KnownBits::mul(Known, Known2);
36556     break;
36557   }
36558   case X86ISD::SETCC:
36559     Known.Zero.setBitsFrom(1);
36560     break;
36561   case X86ISD::MOVMSK: {
36562     unsigned NumLoBits = Op.getOperand(0).getValueType().getVectorNumElements();
36563     Known.Zero.setBitsFrom(NumLoBits);
36564     break;
36565   }
36566   case X86ISD::PEXTRB:
36567   case X86ISD::PEXTRW: {
36568     SDValue Src = Op.getOperand(0);
36569     EVT SrcVT = Src.getValueType();
36570     APInt DemandedElt = APInt::getOneBitSet(SrcVT.getVectorNumElements(),
36571                                             Op.getConstantOperandVal(1));
36572     Known = DAG.computeKnownBits(Src, DemandedElt, Depth + 1);
36573     Known = Known.anyextOrTrunc(BitWidth);
36574     Known.Zero.setBitsFrom(SrcVT.getScalarSizeInBits());
36575     break;
36576   }
36577   case X86ISD::VSRAI:
36578   case X86ISD::VSHLI:
36579   case X86ISD::VSRLI: {
36580     unsigned ShAmt = Op.getConstantOperandVal(1);
36581     if (ShAmt >= VT.getScalarSizeInBits()) {
36582       // Out of range logical bit shifts are guaranteed to be zero.
36583       // Out of range arithmetic bit shifts splat the sign bit.
36584       if (Opc != X86ISD::VSRAI) {
36585         Known.setAllZero();
36586         break;
36587       }
36588 
36589       ShAmt = VT.getScalarSizeInBits() - 1;
36590     }
36591 
36592     Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
36593     if (Opc == X86ISD::VSHLI) {
36594       Known.Zero <<= ShAmt;
36595       Known.One <<= ShAmt;
36596       // Low bits are known zero.
36597       Known.Zero.setLowBits(ShAmt);
36598     } else if (Opc == X86ISD::VSRLI) {
36599       Known.Zero.lshrInPlace(ShAmt);
36600       Known.One.lshrInPlace(ShAmt);
36601       // High bits are known zero.
36602       Known.Zero.setHighBits(ShAmt);
36603     } else {
36604       Known.Zero.ashrInPlace(ShAmt);
36605       Known.One.ashrInPlace(ShAmt);
36606     }
36607     break;
36608   }
36609   case X86ISD::PACKUS: {
36610     // PACKUS is just a truncation if the upper half is zero.
36611     APInt DemandedLHS, DemandedRHS;
36612     getPackDemandedElts(VT, DemandedElts, DemandedLHS, DemandedRHS);
36613 
36614     Known.One = APInt::getAllOnes(BitWidth * 2);
36615     Known.Zero = APInt::getAllOnes(BitWidth * 2);
36616 
36617     KnownBits Known2;
36618     if (!!DemandedLHS) {
36619       Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedLHS, Depth + 1);
36620       Known = Known.intersectWith(Known2);
36621     }
36622     if (!!DemandedRHS) {
36623       Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedRHS, Depth + 1);
36624       Known = Known.intersectWith(Known2);
36625     }
36626 
36627     if (Known.countMinLeadingZeros() < BitWidth)
36628       Known.resetAll();
36629     Known = Known.trunc(BitWidth);
36630     break;
36631   }
36632   case X86ISD::VBROADCAST: {
36633     SDValue Src = Op.getOperand(0);
36634     if (!Src.getSimpleValueType().isVector()) {
36635       Known = DAG.computeKnownBits(Src, Depth + 1);
36636       return;
36637     }
36638     break;
36639   }
36640   case X86ISD::AND: {
36641     if (Op.getResNo() == 0) {
36642       KnownBits Known2;
36643       Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
36644       Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
36645       Known &= Known2;
36646     }
36647     break;
36648   }
36649   case X86ISD::ANDNP: {
36650     KnownBits Known2;
36651     Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
36652     Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
36653 
36654     // ANDNP = (~X & Y);
36655     Known.One &= Known2.Zero;
36656     Known.Zero |= Known2.One;
36657     break;
36658   }
36659   case X86ISD::FOR: {
36660     KnownBits Known2;
36661     Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
36662     Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
36663 
36664     Known |= Known2;
36665     break;
36666   }
36667   case X86ISD::PSADBW: {
36668     assert(VT.getScalarType() == MVT::i64 &&
36669            Op.getOperand(0).getValueType().getScalarType() == MVT::i8 &&
36670            "Unexpected PSADBW types");
36671 
36672     // PSADBW - fills low 16 bits and zeros upper 48 bits of each i64 result.
36673     Known.Zero.setBitsFrom(16);
36674     break;
36675   }
36676   case X86ISD::PCMPGT:
36677   case X86ISD::PCMPEQ: {
36678     KnownBits KnownLhs =
36679         DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
36680     KnownBits KnownRhs =
36681         DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
36682     std::optional<bool> Res = Opc == X86ISD::PCMPEQ
36683                                   ? KnownBits::eq(KnownLhs, KnownRhs)
36684                                   : KnownBits::sgt(KnownLhs, KnownRhs);
36685     if (Res) {
36686       if (*Res)
36687         Known.setAllOnes();
36688       else
36689         Known.setAllZero();
36690     }
36691     break;
36692   }
36693   case X86ISD::PMULUDQ: {
36694     KnownBits Known2;
36695     Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
36696     Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
36697 
36698     Known = Known.trunc(BitWidth / 2).zext(BitWidth);
36699     Known2 = Known2.trunc(BitWidth / 2).zext(BitWidth);
36700     Known = KnownBits::mul(Known, Known2);
36701     break;
36702   }
36703   case X86ISD::CMOV: {
36704     Known = DAG.computeKnownBits(Op.getOperand(1), Depth + 1);
36705     // If we don't know any bits, early out.
36706     if (Known.isUnknown())
36707       break;
36708     KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
36709 
36710     // Only known if known in both the LHS and RHS.
36711     Known = Known.intersectWith(Known2);
36712     break;
36713   }
36714   case X86ISD::BEXTR:
36715   case X86ISD::BEXTRI: {
36716     SDValue Op0 = Op.getOperand(0);
36717     SDValue Op1 = Op.getOperand(1);
36718 
36719     if (auto* Cst1 = dyn_cast<ConstantSDNode>(Op1)) {
36720       unsigned Shift = Cst1->getAPIntValue().extractBitsAsZExtValue(8, 0);
36721       unsigned Length = Cst1->getAPIntValue().extractBitsAsZExtValue(8, 8);
36722 
36723       // If the length is 0, the result is 0.
36724       if (Length == 0) {
36725         Known.setAllZero();
36726         break;
36727       }
36728 
36729       if ((Shift + Length) <= BitWidth) {
36730         Known = DAG.computeKnownBits(Op0, Depth + 1);
36731         Known = Known.extractBits(Length, Shift);
36732         Known = Known.zextOrTrunc(BitWidth);
36733       }
36734     }
36735     break;
36736   }
36737   case X86ISD::PDEP: {
36738     KnownBits Known2;
36739     Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
36740     Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
36741     // Zeros are retained from the mask operand. But not ones.
36742     Known.One.clearAllBits();
36743     // The result will have at least as many trailing zeros as the non-mask
36744     // operand since bits can only map to the same or higher bit position.
36745     Known.Zero.setLowBits(Known2.countMinTrailingZeros());
36746     break;
36747   }
36748   case X86ISD::PEXT: {
36749     Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
36750     // The result has as many leading zeros as the number of zeroes in the mask.
36751     unsigned Count = Known.Zero.popcount();
36752     Known.Zero = APInt::getHighBitsSet(BitWidth, Count);
36753     Known.One.clearAllBits();
36754     break;
36755   }
36756   case X86ISD::VTRUNC:
36757   case X86ISD::VTRUNCS:
36758   case X86ISD::VTRUNCUS:
36759   case X86ISD::CVTSI2P:
36760   case X86ISD::CVTUI2P:
36761   case X86ISD::CVTP2SI:
36762   case X86ISD::CVTP2UI:
36763   case X86ISD::MCVTP2SI:
36764   case X86ISD::MCVTP2UI:
36765   case X86ISD::CVTTP2SI:
36766   case X86ISD::CVTTP2UI:
36767   case X86ISD::MCVTTP2SI:
36768   case X86ISD::MCVTTP2UI:
36769   case X86ISD::MCVTSI2P:
36770   case X86ISD::MCVTUI2P:
36771   case X86ISD::VFPROUND:
36772   case X86ISD::VMFPROUND:
36773   case X86ISD::CVTPS2PH:
36774   case X86ISD::MCVTPS2PH: {
36775     // Truncations/Conversions - upper elements are known zero.
36776     EVT SrcVT = Op.getOperand(0).getValueType();
36777     if (SrcVT.isVector()) {
36778       unsigned NumSrcElts = SrcVT.getVectorNumElements();
36779       if (NumElts > NumSrcElts && DemandedElts.countr_zero() >= NumSrcElts)
36780         Known.setAllZero();
36781     }
36782     break;
36783   }
36784   case X86ISD::STRICT_CVTTP2SI:
36785   case X86ISD::STRICT_CVTTP2UI:
36786   case X86ISD::STRICT_CVTSI2P:
36787   case X86ISD::STRICT_CVTUI2P:
36788   case X86ISD::STRICT_VFPROUND:
36789   case X86ISD::STRICT_CVTPS2PH: {
36790     // Strict Conversions - upper elements are known zero.
36791     EVT SrcVT = Op.getOperand(1).getValueType();
36792     if (SrcVT.isVector()) {
36793       unsigned NumSrcElts = SrcVT.getVectorNumElements();
36794       if (NumElts > NumSrcElts && DemandedElts.countr_zero() >= NumSrcElts)
36795         Known.setAllZero();
36796     }
36797     break;
36798   }
36799   case X86ISD::MOVQ2DQ: {
36800     // Move from MMX to XMM. Upper half of XMM should be 0.
36801     if (DemandedElts.countr_zero() >= (NumElts / 2))
36802       Known.setAllZero();
36803     break;
36804   }
36805   case X86ISD::VBROADCAST_LOAD: {
36806     APInt UndefElts;
36807     SmallVector<APInt, 16> EltBits;
36808     if (getTargetConstantBitsFromNode(Op, BitWidth, UndefElts, EltBits,
36809                                       /*AllowWholeUndefs*/ false,
36810                                       /*AllowPartialUndefs*/ false)) {
36811       Known.Zero.setAllBits();
36812       Known.One.setAllBits();
36813       for (unsigned I = 0; I != NumElts; ++I) {
36814         if (!DemandedElts[I])
36815           continue;
36816         if (UndefElts[I]) {
36817           Known.resetAll();
36818           break;
36819         }
36820         KnownBits Known2 = KnownBits::makeConstant(EltBits[I]);
36821         Known = Known.intersectWith(Known2);
36822       }
36823       return;
36824     }
36825     break;
36826   }
36827   }
36828 
36829   // Handle target shuffles.
36830   // TODO - use resolveTargetShuffleInputs once we can limit recursive depth.
36831   if (isTargetShuffle(Opc)) {
36832     SmallVector<int, 64> Mask;
36833     SmallVector<SDValue, 2> Ops;
36834     if (getTargetShuffleMask(Op.getNode(), VT.getSimpleVT(), true, Ops, Mask)) {
36835       unsigned NumOps = Ops.size();
36836       unsigned NumElts = VT.getVectorNumElements();
36837       if (Mask.size() == NumElts) {
36838         SmallVector<APInt, 2> DemandedOps(NumOps, APInt(NumElts, 0));
36839         Known.Zero.setAllBits(); Known.One.setAllBits();
36840         for (unsigned i = 0; i != NumElts; ++i) {
36841           if (!DemandedElts[i])
36842             continue;
36843           int M = Mask[i];
36844           if (M == SM_SentinelUndef) {
36845             // For UNDEF elements, we don't know anything about the common state
36846             // of the shuffle result.
36847             Known.resetAll();
36848             break;
36849           }
36850           if (M == SM_SentinelZero) {
36851             Known.One.clearAllBits();
36852             continue;
36853           }
36854           assert(0 <= M && (unsigned)M < (NumOps * NumElts) &&
36855                  "Shuffle index out of range");
36856 
36857           unsigned OpIdx = (unsigned)M / NumElts;
36858           unsigned EltIdx = (unsigned)M % NumElts;
36859           if (Ops[OpIdx].getValueType() != VT) {
36860             // TODO - handle target shuffle ops with different value types.
36861             Known.resetAll();
36862             break;
36863           }
36864           DemandedOps[OpIdx].setBit(EltIdx);
36865         }
36866         // Known bits are the values that are shared by every demanded element.
36867         for (unsigned i = 0; i != NumOps && !Known.isUnknown(); ++i) {
36868           if (!DemandedOps[i])
36869             continue;
36870           KnownBits Known2 =
36871               DAG.computeKnownBits(Ops[i], DemandedOps[i], Depth + 1);
36872           Known = Known.intersectWith(Known2);
36873         }
36874       }
36875     }
36876   }
36877 }
36878 
ComputeNumSignBitsForTargetNode(SDValue Op,const APInt & DemandedElts,const SelectionDAG & DAG,unsigned Depth) const36879 unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(
36880     SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
36881     unsigned Depth) const {
36882   EVT VT = Op.getValueType();
36883   unsigned VTBits = VT.getScalarSizeInBits();
36884   unsigned Opcode = Op.getOpcode();
36885   switch (Opcode) {
36886   case X86ISD::SETCC_CARRY:
36887     // SETCC_CARRY sets the dest to ~0 for true or 0 for false.
36888     return VTBits;
36889 
36890   case X86ISD::VTRUNC: {
36891     SDValue Src = Op.getOperand(0);
36892     MVT SrcVT = Src.getSimpleValueType();
36893     unsigned NumSrcBits = SrcVT.getScalarSizeInBits();
36894     assert(VTBits < NumSrcBits && "Illegal truncation input type");
36895     APInt DemandedSrc = DemandedElts.zextOrTrunc(SrcVT.getVectorNumElements());
36896     unsigned Tmp = DAG.ComputeNumSignBits(Src, DemandedSrc, Depth + 1);
36897     if (Tmp > (NumSrcBits - VTBits))
36898       return Tmp - (NumSrcBits - VTBits);
36899     return 1;
36900   }
36901 
36902   case X86ISD::PACKSS: {
36903     // PACKSS is just a truncation if the sign bits extend to the packed size.
36904     APInt DemandedLHS, DemandedRHS;
36905     getPackDemandedElts(Op.getValueType(), DemandedElts, DemandedLHS,
36906                         DemandedRHS);
36907 
36908     // Helper to detect PACKSSDW(BITCAST(PACKSSDW(X)),BITCAST(PACKSSDW(Y)))
36909     // patterns often used to compact vXi64 allsignbit patterns.
36910     auto NumSignBitsPACKSS = [&](SDValue V, const APInt &Elts) -> unsigned {
36911       SDValue BC = peekThroughBitcasts(V);
36912       if (BC.getOpcode() == X86ISD::PACKSS &&
36913           BC.getScalarValueSizeInBits() == 16 &&
36914           V.getScalarValueSizeInBits() == 32) {
36915         SDValue BC0 = peekThroughBitcasts(BC.getOperand(0));
36916         SDValue BC1 = peekThroughBitcasts(BC.getOperand(1));
36917         if (BC0.getScalarValueSizeInBits() == 64 &&
36918             BC1.getScalarValueSizeInBits() == 64 &&
36919             DAG.ComputeNumSignBits(BC0, Depth + 1) == 64 &&
36920             DAG.ComputeNumSignBits(BC1, Depth + 1) == 64)
36921           return 32;
36922       }
36923       return DAG.ComputeNumSignBits(V, Elts, Depth + 1);
36924     };
36925 
36926     unsigned SrcBits = Op.getOperand(0).getScalarValueSizeInBits();
36927     unsigned Tmp0 = SrcBits, Tmp1 = SrcBits;
36928     if (!!DemandedLHS)
36929       Tmp0 = NumSignBitsPACKSS(Op.getOperand(0), DemandedLHS);
36930     if (!!DemandedRHS)
36931       Tmp1 = NumSignBitsPACKSS(Op.getOperand(1), DemandedRHS);
36932     unsigned Tmp = std::min(Tmp0, Tmp1);
36933     if (Tmp > (SrcBits - VTBits))
36934       return Tmp - (SrcBits - VTBits);
36935     return 1;
36936   }
36937 
36938   case X86ISD::VBROADCAST: {
36939     SDValue Src = Op.getOperand(0);
36940     if (!Src.getSimpleValueType().isVector())
36941       return DAG.ComputeNumSignBits(Src, Depth + 1);
36942     break;
36943   }
36944 
36945   case X86ISD::VSHLI: {
36946     SDValue Src = Op.getOperand(0);
36947     const APInt &ShiftVal = Op.getConstantOperandAPInt(1);
36948     if (ShiftVal.uge(VTBits))
36949       return VTBits; // Shifted all bits out --> zero.
36950     unsigned Tmp = DAG.ComputeNumSignBits(Src, DemandedElts, Depth + 1);
36951     if (ShiftVal.uge(Tmp))
36952       return 1; // Shifted all sign bits out --> unknown.
36953     return Tmp - ShiftVal.getZExtValue();
36954   }
36955 
36956   case X86ISD::VSRAI: {
36957     SDValue Src = Op.getOperand(0);
36958     APInt ShiftVal = Op.getConstantOperandAPInt(1);
36959     if (ShiftVal.uge(VTBits - 1))
36960       return VTBits; // Sign splat.
36961     unsigned Tmp = DAG.ComputeNumSignBits(Src, DemandedElts, Depth + 1);
36962     ShiftVal += Tmp;
36963     return ShiftVal.uge(VTBits) ? VTBits : ShiftVal.getZExtValue();
36964   }
36965 
36966   case X86ISD::FSETCC:
36967     // cmpss/cmpsd return zero/all-bits result values in the bottom element.
36968     if (VT == MVT::f32 || VT == MVT::f64 ||
36969         ((VT == MVT::v4f32 || VT == MVT::v2f64) && DemandedElts == 1))
36970       return VTBits;
36971     break;
36972 
36973   case X86ISD::PCMPGT:
36974   case X86ISD::PCMPEQ:
36975   case X86ISD::CMPP:
36976   case X86ISD::VPCOM:
36977   case X86ISD::VPCOMU:
36978     // Vector compares return zero/all-bits result values.
36979     return VTBits;
36980 
36981   case X86ISD::ANDNP: {
36982     unsigned Tmp0 =
36983         DAG.ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
36984     if (Tmp0 == 1) return 1; // Early out.
36985     unsigned Tmp1 =
36986         DAG.ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1);
36987     return std::min(Tmp0, Tmp1);
36988   }
36989 
36990   case X86ISD::CMOV: {
36991     unsigned Tmp0 = DAG.ComputeNumSignBits(Op.getOperand(0), Depth+1);
36992     if (Tmp0 == 1) return 1;  // Early out.
36993     unsigned Tmp1 = DAG.ComputeNumSignBits(Op.getOperand(1), Depth+1);
36994     return std::min(Tmp0, Tmp1);
36995   }
36996   }
36997 
36998   // Handle target shuffles.
36999   // TODO - use resolveTargetShuffleInputs once we can limit recursive depth.
37000   if (isTargetShuffle(Opcode)) {
37001     SmallVector<int, 64> Mask;
37002     SmallVector<SDValue, 2> Ops;
37003     if (getTargetShuffleMask(Op.getNode(), VT.getSimpleVT(), true, Ops, Mask)) {
37004       unsigned NumOps = Ops.size();
37005       unsigned NumElts = VT.getVectorNumElements();
37006       if (Mask.size() == NumElts) {
37007         SmallVector<APInt, 2> DemandedOps(NumOps, APInt(NumElts, 0));
37008         for (unsigned i = 0; i != NumElts; ++i) {
37009           if (!DemandedElts[i])
37010             continue;
37011           int M = Mask[i];
37012           if (M == SM_SentinelUndef) {
37013             // For UNDEF elements, we don't know anything about the common state
37014             // of the shuffle result.
37015             return 1;
37016           } else if (M == SM_SentinelZero) {
37017             // Zero = all sign bits.
37018             continue;
37019           }
37020           assert(0 <= M && (unsigned)M < (NumOps * NumElts) &&
37021                  "Shuffle index out of range");
37022 
37023           unsigned OpIdx = (unsigned)M / NumElts;
37024           unsigned EltIdx = (unsigned)M % NumElts;
37025           if (Ops[OpIdx].getValueType() != VT) {
37026             // TODO - handle target shuffle ops with different value types.
37027             return 1;
37028           }
37029           DemandedOps[OpIdx].setBit(EltIdx);
37030         }
37031         unsigned Tmp0 = VTBits;
37032         for (unsigned i = 0; i != NumOps && Tmp0 > 1; ++i) {
37033           if (!DemandedOps[i])
37034             continue;
37035           unsigned Tmp1 =
37036               DAG.ComputeNumSignBits(Ops[i], DemandedOps[i], Depth + 1);
37037           Tmp0 = std::min(Tmp0, Tmp1);
37038         }
37039         return Tmp0;
37040       }
37041     }
37042   }
37043 
37044   // Fallback case.
37045   return 1;
37046 }
37047 
unwrapAddress(SDValue N) const37048 SDValue X86TargetLowering::unwrapAddress(SDValue N) const {
37049   if (N->getOpcode() == X86ISD::Wrapper || N->getOpcode() == X86ISD::WrapperRIP)
37050     return N->getOperand(0);
37051   return N;
37052 }
37053 
37054 // Helper to look for a normal load that can be narrowed into a vzload with the
37055 // specified VT and memory VT. Returns SDValue() on failure.
narrowLoadToVZLoad(LoadSDNode * LN,MVT MemVT,MVT VT,SelectionDAG & DAG)37056 static SDValue narrowLoadToVZLoad(LoadSDNode *LN, MVT MemVT, MVT VT,
37057                                   SelectionDAG &DAG) {
37058   // Can't if the load is volatile or atomic.
37059   if (!LN->isSimple())
37060     return SDValue();
37061 
37062   SDVTList Tys = DAG.getVTList(VT, MVT::Other);
37063   SDValue Ops[] = {LN->getChain(), LN->getBasePtr()};
37064   return DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, SDLoc(LN), Tys, Ops, MemVT,
37065                                  LN->getPointerInfo(), LN->getOriginalAlign(),
37066                                  LN->getMemOperand()->getFlags());
37067 }
37068 
37069 // Attempt to match a combined shuffle mask against supported unary shuffle
37070 // instructions.
37071 // TODO: Investigate sharing more of this with shuffle lowering.
matchUnaryShuffle(MVT MaskVT,ArrayRef<int> Mask,bool AllowFloatDomain,bool AllowIntDomain,SDValue V1,const SelectionDAG & DAG,const X86Subtarget & Subtarget,unsigned & Shuffle,MVT & SrcVT,MVT & DstVT)37072 static bool matchUnaryShuffle(MVT MaskVT, ArrayRef<int> Mask,
37073                               bool AllowFloatDomain, bool AllowIntDomain,
37074                               SDValue V1, const SelectionDAG &DAG,
37075                               const X86Subtarget &Subtarget, unsigned &Shuffle,
37076                               MVT &SrcVT, MVT &DstVT) {
37077   unsigned NumMaskElts = Mask.size();
37078   unsigned MaskEltSize = MaskVT.getScalarSizeInBits();
37079 
37080   // Match against a VZEXT_MOVL vXi32 and vXi16 zero-extending instruction.
37081   if (Mask[0] == 0 &&
37082       (MaskEltSize == 32 || (MaskEltSize == 16 && Subtarget.hasFP16()))) {
37083     if ((isUndefOrZero(Mask[1]) && isUndefInRange(Mask, 2, NumMaskElts - 2)) ||
37084         (V1.getOpcode() == ISD::SCALAR_TO_VECTOR &&
37085          isUndefOrZeroInRange(Mask, 1, NumMaskElts - 1))) {
37086       Shuffle = X86ISD::VZEXT_MOVL;
37087       if (MaskEltSize == 16)
37088         SrcVT = DstVT = MaskVT.changeVectorElementType(MVT::f16);
37089       else
37090         SrcVT = DstVT = !Subtarget.hasSSE2() ? MVT::v4f32 : MaskVT;
37091       return true;
37092     }
37093   }
37094 
37095   // Match against a ANY/SIGN/ZERO_EXTEND_VECTOR_INREG instruction.
37096   // TODO: Add 512-bit vector support (split AVX512F and AVX512BW).
37097   if (AllowIntDomain && ((MaskVT.is128BitVector() && Subtarget.hasSSE41()) ||
37098                          (MaskVT.is256BitVector() && Subtarget.hasInt256()))) {
37099     unsigned MaxScale = 64 / MaskEltSize;
37100     bool UseSign = V1.getScalarValueSizeInBits() == MaskEltSize &&
37101                    DAG.ComputeNumSignBits(V1) == MaskEltSize;
37102     for (unsigned Scale = 2; Scale <= MaxScale; Scale *= 2) {
37103       bool MatchAny = true;
37104       bool MatchZero = true;
37105       bool MatchSign = UseSign;
37106       unsigned NumDstElts = NumMaskElts / Scale;
37107       for (unsigned i = 0;
37108            i != NumDstElts && (MatchAny || MatchSign || MatchZero); ++i) {
37109         if (!isUndefOrEqual(Mask[i * Scale], (int)i)) {
37110           MatchAny = MatchSign = MatchZero = false;
37111           break;
37112         }
37113         unsigned Pos = (i * Scale) + 1;
37114         unsigned Len = Scale - 1;
37115         MatchAny &= isUndefInRange(Mask, Pos, Len);
37116         MatchZero &= isUndefOrZeroInRange(Mask, Pos, Len);
37117         MatchSign &= isUndefOrEqualInRange(Mask, (int)i, Pos, Len);
37118       }
37119       if (MatchAny || MatchSign || MatchZero) {
37120         assert((MatchSign || MatchZero) &&
37121                "Failed to match sext/zext but matched aext?");
37122         unsigned SrcSize = std::max(128u, NumDstElts * MaskEltSize);
37123         MVT ScalarTy = MaskVT.isInteger() ? MaskVT.getScalarType()
37124                                           : MVT::getIntegerVT(MaskEltSize);
37125         SrcVT = MVT::getVectorVT(ScalarTy, SrcSize / MaskEltSize);
37126 
37127         Shuffle = unsigned(
37128             MatchAny ? ISD::ANY_EXTEND
37129                      : (MatchSign ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND));
37130         if (SrcVT.getVectorNumElements() != NumDstElts)
37131           Shuffle = DAG.getOpcode_EXTEND_VECTOR_INREG(Shuffle);
37132 
37133         DstVT = MVT::getIntegerVT(Scale * MaskEltSize);
37134         DstVT = MVT::getVectorVT(DstVT, NumDstElts);
37135         return true;
37136       }
37137     }
37138   }
37139 
37140   // Match against a VZEXT_MOVL instruction, SSE1 only supports 32-bits (MOVSS).
37141   if (((MaskEltSize == 32) || (MaskEltSize == 64 && Subtarget.hasSSE2()) ||
37142        (MaskEltSize == 16 && Subtarget.hasFP16())) &&
37143       isUndefOrEqual(Mask[0], 0) &&
37144       isUndefOrZeroInRange(Mask, 1, NumMaskElts - 1)) {
37145     Shuffle = X86ISD::VZEXT_MOVL;
37146     if (MaskEltSize == 16)
37147       SrcVT = DstVT = MaskVT.changeVectorElementType(MVT::f16);
37148     else
37149       SrcVT = DstVT = !Subtarget.hasSSE2() ? MVT::v4f32 : MaskVT;
37150     return true;
37151   }
37152 
37153   // Check if we have SSE3 which will let us use MOVDDUP etc. The
37154   // instructions are no slower than UNPCKLPD but has the option to
37155   // fold the input operand into even an unaligned memory load.
37156   if (MaskVT.is128BitVector() && Subtarget.hasSSE3() && AllowFloatDomain) {
37157     if (isTargetShuffleEquivalent(MaskVT, Mask, {0, 0}, DAG, V1)) {
37158       Shuffle = X86ISD::MOVDDUP;
37159       SrcVT = DstVT = MVT::v2f64;
37160       return true;
37161     }
37162     if (isTargetShuffleEquivalent(MaskVT, Mask, {0, 0, 2, 2}, DAG, V1)) {
37163       Shuffle = X86ISD::MOVSLDUP;
37164       SrcVT = DstVT = MVT::v4f32;
37165       return true;
37166     }
37167     if (isTargetShuffleEquivalent(MaskVT, Mask, {1, 1, 3, 3}, DAG, V1)) {
37168       Shuffle = X86ISD::MOVSHDUP;
37169       SrcVT = DstVT = MVT::v4f32;
37170       return true;
37171     }
37172   }
37173 
37174   if (MaskVT.is256BitVector() && AllowFloatDomain) {
37175     assert(Subtarget.hasAVX() && "AVX required for 256-bit vector shuffles");
37176     if (isTargetShuffleEquivalent(MaskVT, Mask, {0, 0, 2, 2}, DAG, V1)) {
37177       Shuffle = X86ISD::MOVDDUP;
37178       SrcVT = DstVT = MVT::v4f64;
37179       return true;
37180     }
37181     if (isTargetShuffleEquivalent(MaskVT, Mask, {0, 0, 2, 2, 4, 4, 6, 6}, DAG,
37182                                   V1)) {
37183       Shuffle = X86ISD::MOVSLDUP;
37184       SrcVT = DstVT = MVT::v8f32;
37185       return true;
37186     }
37187     if (isTargetShuffleEquivalent(MaskVT, Mask, {1, 1, 3, 3, 5, 5, 7, 7}, DAG,
37188                                   V1)) {
37189       Shuffle = X86ISD::MOVSHDUP;
37190       SrcVT = DstVT = MVT::v8f32;
37191       return true;
37192     }
37193   }
37194 
37195   if (MaskVT.is512BitVector() && AllowFloatDomain) {
37196     assert(Subtarget.hasAVX512() &&
37197            "AVX512 required for 512-bit vector shuffles");
37198     if (isTargetShuffleEquivalent(MaskVT, Mask, {0, 0, 2, 2, 4, 4, 6, 6}, DAG,
37199                                   V1)) {
37200       Shuffle = X86ISD::MOVDDUP;
37201       SrcVT = DstVT = MVT::v8f64;
37202       return true;
37203     }
37204     if (isTargetShuffleEquivalent(
37205             MaskVT, Mask,
37206             {0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14}, DAG, V1)) {
37207       Shuffle = X86ISD::MOVSLDUP;
37208       SrcVT = DstVT = MVT::v16f32;
37209       return true;
37210     }
37211     if (isTargetShuffleEquivalent(
37212             MaskVT, Mask,
37213             {1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15}, DAG, V1)) {
37214       Shuffle = X86ISD::MOVSHDUP;
37215       SrcVT = DstVT = MVT::v16f32;
37216       return true;
37217     }
37218   }
37219 
37220   return false;
37221 }
37222 
37223 // Attempt to match a combined shuffle mask against supported unary immediate
37224 // permute instructions.
37225 // TODO: Investigate sharing more of this with shuffle lowering.
matchUnaryPermuteShuffle(MVT MaskVT,ArrayRef<int> Mask,const APInt & Zeroable,bool AllowFloatDomain,bool AllowIntDomain,const SelectionDAG & DAG,const X86Subtarget & Subtarget,unsigned & Shuffle,MVT & ShuffleVT,unsigned & PermuteImm)37226 static bool matchUnaryPermuteShuffle(MVT MaskVT, ArrayRef<int> Mask,
37227                                      const APInt &Zeroable,
37228                                      bool AllowFloatDomain, bool AllowIntDomain,
37229                                      const SelectionDAG &DAG,
37230                                      const X86Subtarget &Subtarget,
37231                                      unsigned &Shuffle, MVT &ShuffleVT,
37232                                      unsigned &PermuteImm) {
37233   unsigned NumMaskElts = Mask.size();
37234   unsigned InputSizeInBits = MaskVT.getSizeInBits();
37235   unsigned MaskScalarSizeInBits = InputSizeInBits / NumMaskElts;
37236   MVT MaskEltVT = MVT::getIntegerVT(MaskScalarSizeInBits);
37237   bool ContainsZeros = isAnyZero(Mask);
37238 
37239   // Handle VPERMI/VPERMILPD vXi64/vXi64 patterns.
37240   if (!ContainsZeros && MaskScalarSizeInBits == 64) {
37241     // Check for lane crossing permutes.
37242     if (is128BitLaneCrossingShuffleMask(MaskEltVT, Mask)) {
37243       // PERMPD/PERMQ permutes within a 256-bit vector (AVX2+).
37244       if (Subtarget.hasAVX2() && MaskVT.is256BitVector()) {
37245         Shuffle = X86ISD::VPERMI;
37246         ShuffleVT = (AllowFloatDomain ? MVT::v4f64 : MVT::v4i64);
37247         PermuteImm = getV4X86ShuffleImm(Mask);
37248         return true;
37249       }
37250       if (Subtarget.hasAVX512() && MaskVT.is512BitVector()) {
37251         SmallVector<int, 4> RepeatedMask;
37252         if (is256BitLaneRepeatedShuffleMask(MVT::v8f64, Mask, RepeatedMask)) {
37253           Shuffle = X86ISD::VPERMI;
37254           ShuffleVT = (AllowFloatDomain ? MVT::v8f64 : MVT::v8i64);
37255           PermuteImm = getV4X86ShuffleImm(RepeatedMask);
37256           return true;
37257         }
37258       }
37259     } else if (AllowFloatDomain && Subtarget.hasAVX()) {
37260       // VPERMILPD can permute with a non-repeating shuffle.
37261       Shuffle = X86ISD::VPERMILPI;
37262       ShuffleVT = MVT::getVectorVT(MVT::f64, Mask.size());
37263       PermuteImm = 0;
37264       for (int i = 0, e = Mask.size(); i != e; ++i) {
37265         int M = Mask[i];
37266         if (M == SM_SentinelUndef)
37267           continue;
37268         assert(((M / 2) == (i / 2)) && "Out of range shuffle mask index");
37269         PermuteImm |= (M & 1) << i;
37270       }
37271       return true;
37272     }
37273   }
37274 
37275   // We are checking for shuffle match or shift match. Loop twice so we can
37276   // order which we try and match first depending on target preference.
37277   for (unsigned Order = 0; Order < 2; ++Order) {
37278     if (Subtarget.preferLowerShuffleAsShift() ? (Order == 1) : (Order == 0)) {
37279       // Handle PSHUFD/VPERMILPI vXi32/vXf32 repeated patterns.
37280       // AVX introduced the VPERMILPD/VPERMILPS float permutes, before then we
37281       // had to use 2-input SHUFPD/SHUFPS shuffles (not handled here).
37282       if ((MaskScalarSizeInBits == 64 || MaskScalarSizeInBits == 32) &&
37283           !ContainsZeros && (AllowIntDomain || Subtarget.hasAVX())) {
37284         SmallVector<int, 4> RepeatedMask;
37285         if (is128BitLaneRepeatedShuffleMask(MaskEltVT, Mask, RepeatedMask)) {
37286           // Narrow the repeated mask to create 32-bit element permutes.
37287           SmallVector<int, 4> WordMask = RepeatedMask;
37288           if (MaskScalarSizeInBits == 64)
37289             narrowShuffleMaskElts(2, RepeatedMask, WordMask);
37290 
37291           Shuffle = (AllowIntDomain ? X86ISD::PSHUFD : X86ISD::VPERMILPI);
37292           ShuffleVT = (AllowIntDomain ? MVT::i32 : MVT::f32);
37293           ShuffleVT = MVT::getVectorVT(ShuffleVT, InputSizeInBits / 32);
37294           PermuteImm = getV4X86ShuffleImm(WordMask);
37295           return true;
37296         }
37297       }
37298 
37299       // Handle PSHUFLW/PSHUFHW vXi16 repeated patterns.
37300       if (!ContainsZeros && AllowIntDomain && MaskScalarSizeInBits == 16 &&
37301           ((MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
37302            (MaskVT.is256BitVector() && Subtarget.hasAVX2()) ||
37303            (MaskVT.is512BitVector() && Subtarget.hasBWI()))) {
37304         SmallVector<int, 4> RepeatedMask;
37305         if (is128BitLaneRepeatedShuffleMask(MaskEltVT, Mask, RepeatedMask)) {
37306           ArrayRef<int> LoMask(RepeatedMask.data() + 0, 4);
37307           ArrayRef<int> HiMask(RepeatedMask.data() + 4, 4);
37308 
37309           // PSHUFLW: permute lower 4 elements only.
37310           if (isUndefOrInRange(LoMask, 0, 4) &&
37311               isSequentialOrUndefInRange(HiMask, 0, 4, 4)) {
37312             Shuffle = X86ISD::PSHUFLW;
37313             ShuffleVT = MVT::getVectorVT(MVT::i16, InputSizeInBits / 16);
37314             PermuteImm = getV4X86ShuffleImm(LoMask);
37315             return true;
37316           }
37317 
37318           // PSHUFHW: permute upper 4 elements only.
37319           if (isUndefOrInRange(HiMask, 4, 8) &&
37320               isSequentialOrUndefInRange(LoMask, 0, 4, 0)) {
37321             // Offset the HiMask so that we can create the shuffle immediate.
37322             int OffsetHiMask[4];
37323             for (int i = 0; i != 4; ++i)
37324               OffsetHiMask[i] = (HiMask[i] < 0 ? HiMask[i] : HiMask[i] - 4);
37325 
37326             Shuffle = X86ISD::PSHUFHW;
37327             ShuffleVT = MVT::getVectorVT(MVT::i16, InputSizeInBits / 16);
37328             PermuteImm = getV4X86ShuffleImm(OffsetHiMask);
37329             return true;
37330           }
37331         }
37332       }
37333     } else {
37334       // Attempt to match against bit rotates.
37335       if (!ContainsZeros && AllowIntDomain && MaskScalarSizeInBits < 64 &&
37336           ((MaskVT.is128BitVector() && Subtarget.hasXOP()) ||
37337            Subtarget.hasAVX512())) {
37338         int RotateAmt = matchShuffleAsBitRotate(ShuffleVT, MaskScalarSizeInBits,
37339                                                 Subtarget, Mask);
37340         if (0 < RotateAmt) {
37341           Shuffle = X86ISD::VROTLI;
37342           PermuteImm = (unsigned)RotateAmt;
37343           return true;
37344         }
37345       }
37346     }
37347     // Attempt to match against byte/bit shifts.
37348     if (AllowIntDomain &&
37349         ((MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
37350          (MaskVT.is256BitVector() && Subtarget.hasAVX2()) ||
37351          (MaskVT.is512BitVector() && Subtarget.hasAVX512()))) {
37352       int ShiftAmt =
37353           matchShuffleAsShift(ShuffleVT, Shuffle, MaskScalarSizeInBits, Mask, 0,
37354                               Zeroable, Subtarget);
37355       if (0 < ShiftAmt && (!ShuffleVT.is512BitVector() || Subtarget.hasBWI() ||
37356                            32 <= ShuffleVT.getScalarSizeInBits())) {
37357         // Byte shifts can be slower so only match them on second attempt.
37358         if (Order == 0 &&
37359             (Shuffle == X86ISD::VSHLDQ || Shuffle == X86ISD::VSRLDQ))
37360           continue;
37361 
37362         PermuteImm = (unsigned)ShiftAmt;
37363         return true;
37364       }
37365 
37366     }
37367   }
37368 
37369   return false;
37370 }
37371 
37372 // Attempt to match a combined unary shuffle mask against supported binary
37373 // shuffle instructions.
37374 // TODO: Investigate sharing more of this with shuffle lowering.
matchBinaryShuffle(MVT MaskVT,ArrayRef<int> Mask,bool AllowFloatDomain,bool AllowIntDomain,SDValue & V1,SDValue & V2,const SDLoc & DL,SelectionDAG & DAG,const X86Subtarget & Subtarget,unsigned & Shuffle,MVT & SrcVT,MVT & DstVT,bool IsUnary)37375 static bool matchBinaryShuffle(MVT MaskVT, ArrayRef<int> Mask,
37376                                bool AllowFloatDomain, bool AllowIntDomain,
37377                                SDValue &V1, SDValue &V2, const SDLoc &DL,
37378                                SelectionDAG &DAG, const X86Subtarget &Subtarget,
37379                                unsigned &Shuffle, MVT &SrcVT, MVT &DstVT,
37380                                bool IsUnary) {
37381   unsigned NumMaskElts = Mask.size();
37382   unsigned EltSizeInBits = MaskVT.getScalarSizeInBits();
37383   unsigned SizeInBits = MaskVT.getSizeInBits();
37384 
37385   if (MaskVT.is128BitVector()) {
37386     if (isTargetShuffleEquivalent(MaskVT, Mask, {0, 0}, DAG) &&
37387         AllowFloatDomain) {
37388       V2 = V1;
37389       V1 = (SM_SentinelUndef == Mask[0] ? DAG.getUNDEF(MVT::v4f32) : V1);
37390       Shuffle = Subtarget.hasSSE2() ? X86ISD::UNPCKL : X86ISD::MOVLHPS;
37391       SrcVT = DstVT = Subtarget.hasSSE2() ? MVT::v2f64 : MVT::v4f32;
37392       return true;
37393     }
37394     if (isTargetShuffleEquivalent(MaskVT, Mask, {1, 1}, DAG) &&
37395         AllowFloatDomain) {
37396       V2 = V1;
37397       Shuffle = Subtarget.hasSSE2() ? X86ISD::UNPCKH : X86ISD::MOVHLPS;
37398       SrcVT = DstVT = Subtarget.hasSSE2() ? MVT::v2f64 : MVT::v4f32;
37399       return true;
37400     }
37401     if (isTargetShuffleEquivalent(MaskVT, Mask, {0, 3}, DAG) &&
37402         Subtarget.hasSSE2() && (AllowFloatDomain || !Subtarget.hasSSE41())) {
37403       std::swap(V1, V2);
37404       Shuffle = X86ISD::MOVSD;
37405       SrcVT = DstVT = MVT::v2f64;
37406       return true;
37407     }
37408     if (isTargetShuffleEquivalent(MaskVT, Mask, {4, 1, 2, 3}, DAG) &&
37409         (AllowFloatDomain || !Subtarget.hasSSE41())) {
37410       Shuffle = X86ISD::MOVSS;
37411       SrcVT = DstVT = MVT::v4f32;
37412       return true;
37413     }
37414     if (isTargetShuffleEquivalent(MaskVT, Mask, {8, 1, 2, 3, 4, 5, 6, 7},
37415                                   DAG) &&
37416         Subtarget.hasFP16()) {
37417       Shuffle = X86ISD::MOVSH;
37418       SrcVT = DstVT = MVT::v8f16;
37419       return true;
37420     }
37421   }
37422 
37423   // Attempt to match against either an unary or binary PACKSS/PACKUS shuffle.
37424   if (((MaskVT == MVT::v8i16 || MaskVT == MVT::v16i8) && Subtarget.hasSSE2()) ||
37425       ((MaskVT == MVT::v16i16 || MaskVT == MVT::v32i8) && Subtarget.hasInt256()) ||
37426       ((MaskVT == MVT::v32i16 || MaskVT == MVT::v64i8) && Subtarget.hasBWI())) {
37427     if (matchShuffleWithPACK(MaskVT, SrcVT, V1, V2, Shuffle, Mask, DAG,
37428                              Subtarget)) {
37429       DstVT = MaskVT;
37430       return true;
37431     }
37432   }
37433   // TODO: Can we handle this inside matchShuffleWithPACK?
37434   if (MaskVT == MVT::v4i32 && Subtarget.hasSSE2() &&
37435       isTargetShuffleEquivalent(MaskVT, Mask, {0, 2, 4, 6}, DAG) &&
37436       V1.getScalarValueSizeInBits() == 64 &&
37437       V2.getScalarValueSizeInBits() == 64) {
37438     // Use (SSE41) PACKUSWD if the leading zerobits goto the lowest 16-bits.
37439     unsigned MinLZV1 = DAG.computeKnownBits(V1).countMinLeadingZeros();
37440     unsigned MinLZV2 = DAG.computeKnownBits(V2).countMinLeadingZeros();
37441     if (Subtarget.hasSSE41() && MinLZV1 >= 48 && MinLZV2 >= 48) {
37442       SrcVT = MVT::v4i32;
37443       DstVT = MVT::v8i16;
37444       Shuffle = X86ISD::PACKUS;
37445       return true;
37446     }
37447     // Use PACKUSBW if the leading zerobits goto the lowest 8-bits.
37448     if (MinLZV1 >= 56 && MinLZV2 >= 56) {
37449       SrcVT = MVT::v8i16;
37450       DstVT = MVT::v16i8;
37451       Shuffle = X86ISD::PACKUS;
37452       return true;
37453     }
37454     // Use PACKSSWD if the signbits extend to the lowest 16-bits.
37455     if (DAG.ComputeNumSignBits(V1) > 48 && DAG.ComputeNumSignBits(V2) > 48) {
37456       SrcVT = MVT::v4i32;
37457       DstVT = MVT::v8i16;
37458       Shuffle = X86ISD::PACKSS;
37459       return true;
37460     }
37461   }
37462 
37463   // Attempt to match against either a unary or binary UNPCKL/UNPCKH shuffle.
37464   if ((MaskVT == MVT::v4f32 && Subtarget.hasSSE1()) ||
37465       (MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
37466       (MaskVT.is256BitVector() && 32 <= EltSizeInBits && Subtarget.hasAVX()) ||
37467       (MaskVT.is256BitVector() && Subtarget.hasAVX2()) ||
37468       (MaskVT.is512BitVector() && Subtarget.hasAVX512() &&
37469        (32 <= EltSizeInBits || Subtarget.hasBWI()))) {
37470     if (matchShuffleWithUNPCK(MaskVT, V1, V2, Shuffle, IsUnary, Mask, DL, DAG,
37471                               Subtarget)) {
37472       SrcVT = DstVT = MaskVT;
37473       if (MaskVT.is256BitVector() && !Subtarget.hasAVX2())
37474         SrcVT = DstVT = (32 == EltSizeInBits ? MVT::v8f32 : MVT::v4f64);
37475       return true;
37476     }
37477   }
37478 
37479   // Attempt to match against a OR if we're performing a blend shuffle and the
37480   // non-blended source element is zero in each case.
37481   // TODO: Handle cases where V1/V2 sizes doesn't match SizeInBits.
37482   if (SizeInBits == V1.getValueSizeInBits() &&
37483       SizeInBits == V2.getValueSizeInBits() &&
37484       (EltSizeInBits % V1.getScalarValueSizeInBits()) == 0 &&
37485       (EltSizeInBits % V2.getScalarValueSizeInBits()) == 0) {
37486     bool IsBlend = true;
37487     unsigned NumV1Elts = V1.getValueType().getVectorNumElements();
37488     unsigned NumV2Elts = V2.getValueType().getVectorNumElements();
37489     unsigned Scale1 = NumV1Elts / NumMaskElts;
37490     unsigned Scale2 = NumV2Elts / NumMaskElts;
37491     APInt DemandedZeroV1 = APInt::getZero(NumV1Elts);
37492     APInt DemandedZeroV2 = APInt::getZero(NumV2Elts);
37493     for (unsigned i = 0; i != NumMaskElts; ++i) {
37494       int M = Mask[i];
37495       if (M == SM_SentinelUndef)
37496         continue;
37497       if (M == SM_SentinelZero) {
37498         DemandedZeroV1.setBits(i * Scale1, (i + 1) * Scale1);
37499         DemandedZeroV2.setBits(i * Scale2, (i + 1) * Scale2);
37500         continue;
37501       }
37502       if (M == (int)i) {
37503         DemandedZeroV2.setBits(i * Scale2, (i + 1) * Scale2);
37504         continue;
37505       }
37506       if (M == (int)(i + NumMaskElts)) {
37507         DemandedZeroV1.setBits(i * Scale1, (i + 1) * Scale1);
37508         continue;
37509       }
37510       IsBlend = false;
37511       break;
37512     }
37513     if (IsBlend) {
37514       if (DAG.MaskedVectorIsZero(V1, DemandedZeroV1) &&
37515           DAG.MaskedVectorIsZero(V2, DemandedZeroV2)) {
37516         Shuffle = ISD::OR;
37517         SrcVT = DstVT = MaskVT.changeTypeToInteger();
37518         return true;
37519       }
37520       if (NumV1Elts == NumV2Elts && NumV1Elts == NumMaskElts) {
37521         // FIXME: handle mismatched sizes?
37522         // TODO: investigate if `ISD::OR` handling in
37523         // `TargetLowering::SimplifyDemandedVectorElts` can be improved instead.
37524         auto computeKnownBitsElementWise = [&DAG](SDValue V) {
37525           unsigned NumElts = V.getValueType().getVectorNumElements();
37526           KnownBits Known(NumElts);
37527           for (unsigned EltIdx = 0; EltIdx != NumElts; ++EltIdx) {
37528             APInt Mask = APInt::getOneBitSet(NumElts, EltIdx);
37529             KnownBits PeepholeKnown = DAG.computeKnownBits(V, Mask);
37530             if (PeepholeKnown.isZero())
37531               Known.Zero.setBit(EltIdx);
37532             if (PeepholeKnown.isAllOnes())
37533               Known.One.setBit(EltIdx);
37534           }
37535           return Known;
37536         };
37537 
37538         KnownBits V1Known = computeKnownBitsElementWise(V1);
37539         KnownBits V2Known = computeKnownBitsElementWise(V2);
37540 
37541         for (unsigned i = 0; i != NumMaskElts && IsBlend; ++i) {
37542           int M = Mask[i];
37543           if (M == SM_SentinelUndef)
37544             continue;
37545           if (M == SM_SentinelZero) {
37546             IsBlend &= V1Known.Zero[i] && V2Known.Zero[i];
37547             continue;
37548           }
37549           if (M == (int)i) {
37550             IsBlend &= V2Known.Zero[i] || V1Known.One[i];
37551             continue;
37552           }
37553           if (M == (int)(i + NumMaskElts)) {
37554             IsBlend &= V1Known.Zero[i] || V2Known.One[i];
37555             continue;
37556           }
37557           llvm_unreachable("will not get here.");
37558         }
37559         if (IsBlend) {
37560           Shuffle = ISD::OR;
37561           SrcVT = DstVT = MaskVT.changeTypeToInteger();
37562           return true;
37563         }
37564       }
37565     }
37566   }
37567 
37568   return false;
37569 }
37570 
matchBinaryPermuteShuffle(MVT MaskVT,ArrayRef<int> Mask,const APInt & Zeroable,bool AllowFloatDomain,bool AllowIntDomain,SDValue & V1,SDValue & V2,const SDLoc & DL,SelectionDAG & DAG,const X86Subtarget & Subtarget,unsigned & Shuffle,MVT & ShuffleVT,unsigned & PermuteImm)37571 static bool matchBinaryPermuteShuffle(
37572     MVT MaskVT, ArrayRef<int> Mask, const APInt &Zeroable,
37573     bool AllowFloatDomain, bool AllowIntDomain, SDValue &V1, SDValue &V2,
37574     const SDLoc &DL, SelectionDAG &DAG, const X86Subtarget &Subtarget,
37575     unsigned &Shuffle, MVT &ShuffleVT, unsigned &PermuteImm) {
37576   unsigned NumMaskElts = Mask.size();
37577   unsigned EltSizeInBits = MaskVT.getScalarSizeInBits();
37578 
37579   // Attempt to match against VALIGND/VALIGNQ rotate.
37580   if (AllowIntDomain && (EltSizeInBits == 64 || EltSizeInBits == 32) &&
37581       ((MaskVT.is128BitVector() && Subtarget.hasVLX()) ||
37582        (MaskVT.is256BitVector() && Subtarget.hasVLX()) ||
37583        (MaskVT.is512BitVector() && Subtarget.hasAVX512()))) {
37584     if (!isAnyZero(Mask)) {
37585       int Rotation = matchShuffleAsElementRotate(V1, V2, Mask);
37586       if (0 < Rotation) {
37587         Shuffle = X86ISD::VALIGN;
37588         if (EltSizeInBits == 64)
37589           ShuffleVT = MVT::getVectorVT(MVT::i64, MaskVT.getSizeInBits() / 64);
37590         else
37591           ShuffleVT = MVT::getVectorVT(MVT::i32, MaskVT.getSizeInBits() / 32);
37592         PermuteImm = Rotation;
37593         return true;
37594       }
37595     }
37596   }
37597 
37598   // Attempt to match against PALIGNR byte rotate.
37599   if (AllowIntDomain && ((MaskVT.is128BitVector() && Subtarget.hasSSSE3()) ||
37600                          (MaskVT.is256BitVector() && Subtarget.hasAVX2()) ||
37601                          (MaskVT.is512BitVector() && Subtarget.hasBWI()))) {
37602     int ByteRotation = matchShuffleAsByteRotate(MaskVT, V1, V2, Mask);
37603     if (0 < ByteRotation) {
37604       Shuffle = X86ISD::PALIGNR;
37605       ShuffleVT = MVT::getVectorVT(MVT::i8, MaskVT.getSizeInBits() / 8);
37606       PermuteImm = ByteRotation;
37607       return true;
37608     }
37609   }
37610 
37611   // Attempt to combine to X86ISD::BLENDI.
37612   if ((NumMaskElts <= 8 && ((Subtarget.hasSSE41() && MaskVT.is128BitVector()) ||
37613                             (Subtarget.hasAVX() && MaskVT.is256BitVector()))) ||
37614       (MaskVT == MVT::v16i16 && Subtarget.hasAVX2())) {
37615     uint64_t BlendMask = 0;
37616     bool ForceV1Zero = false, ForceV2Zero = false;
37617     SmallVector<int, 8> TargetMask(Mask);
37618     if (matchShuffleAsBlend(MaskVT, V1, V2, TargetMask, Zeroable, ForceV1Zero,
37619                             ForceV2Zero, BlendMask)) {
37620       if (MaskVT == MVT::v16i16) {
37621         // We can only use v16i16 PBLENDW if the lanes are repeated.
37622         SmallVector<int, 8> RepeatedMask;
37623         if (isRepeatedTargetShuffleMask(128, MaskVT, TargetMask,
37624                                         RepeatedMask)) {
37625           assert(RepeatedMask.size() == 8 &&
37626                  "Repeated mask size doesn't match!");
37627           PermuteImm = 0;
37628           for (int i = 0; i < 8; ++i)
37629             if (RepeatedMask[i] >= 8)
37630               PermuteImm |= 1 << i;
37631           V1 = ForceV1Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V1;
37632           V2 = ForceV2Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V2;
37633           Shuffle = X86ISD::BLENDI;
37634           ShuffleVT = MaskVT;
37635           return true;
37636         }
37637       } else {
37638         V1 = ForceV1Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V1;
37639         V2 = ForceV2Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V2;
37640         PermuteImm = (unsigned)BlendMask;
37641         Shuffle = X86ISD::BLENDI;
37642         ShuffleVT = MaskVT;
37643         return true;
37644       }
37645     }
37646   }
37647 
37648   // Attempt to combine to INSERTPS, but only if it has elements that need to
37649   // be set to zero.
37650   if (AllowFloatDomain && EltSizeInBits == 32 && Subtarget.hasSSE41() &&
37651       MaskVT.is128BitVector() && isAnyZero(Mask) &&
37652       matchShuffleAsInsertPS(V1, V2, PermuteImm, Zeroable, Mask, DAG)) {
37653     Shuffle = X86ISD::INSERTPS;
37654     ShuffleVT = MVT::v4f32;
37655     return true;
37656   }
37657 
37658   // Attempt to combine to SHUFPD.
37659   if (AllowFloatDomain && EltSizeInBits == 64 &&
37660       ((MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
37661        (MaskVT.is256BitVector() && Subtarget.hasAVX()) ||
37662        (MaskVT.is512BitVector() && Subtarget.hasAVX512()))) {
37663     bool ForceV1Zero = false, ForceV2Zero = false;
37664     if (matchShuffleWithSHUFPD(MaskVT, V1, V2, ForceV1Zero, ForceV2Zero,
37665                                PermuteImm, Mask, Zeroable)) {
37666       V1 = ForceV1Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V1;
37667       V2 = ForceV2Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V2;
37668       Shuffle = X86ISD::SHUFP;
37669       ShuffleVT = MVT::getVectorVT(MVT::f64, MaskVT.getSizeInBits() / 64);
37670       return true;
37671     }
37672   }
37673 
37674   // Attempt to combine to SHUFPS.
37675   if (AllowFloatDomain && EltSizeInBits == 32 &&
37676       ((MaskVT.is128BitVector() && Subtarget.hasSSE1()) ||
37677        (MaskVT.is256BitVector() && Subtarget.hasAVX()) ||
37678        (MaskVT.is512BitVector() && Subtarget.hasAVX512()))) {
37679     SmallVector<int, 4> RepeatedMask;
37680     if (isRepeatedTargetShuffleMask(128, MaskVT, Mask, RepeatedMask)) {
37681       // Match each half of the repeated mask, to determine if its just
37682       // referencing one of the vectors, is zeroable or entirely undef.
37683       auto MatchHalf = [&](unsigned Offset, int &S0, int &S1) {
37684         int M0 = RepeatedMask[Offset];
37685         int M1 = RepeatedMask[Offset + 1];
37686 
37687         if (isUndefInRange(RepeatedMask, Offset, 2)) {
37688           return DAG.getUNDEF(MaskVT);
37689         } else if (isUndefOrZeroInRange(RepeatedMask, Offset, 2)) {
37690           S0 = (SM_SentinelUndef == M0 ? -1 : 0);
37691           S1 = (SM_SentinelUndef == M1 ? -1 : 1);
37692           return getZeroVector(MaskVT, Subtarget, DAG, DL);
37693         } else if (isUndefOrInRange(M0, 0, 4) && isUndefOrInRange(M1, 0, 4)) {
37694           S0 = (SM_SentinelUndef == M0 ? -1 : M0 & 3);
37695           S1 = (SM_SentinelUndef == M1 ? -1 : M1 & 3);
37696           return V1;
37697         } else if (isUndefOrInRange(M0, 4, 8) && isUndefOrInRange(M1, 4, 8)) {
37698           S0 = (SM_SentinelUndef == M0 ? -1 : M0 & 3);
37699           S1 = (SM_SentinelUndef == M1 ? -1 : M1 & 3);
37700           return V2;
37701         }
37702 
37703         return SDValue();
37704       };
37705 
37706       int ShufMask[4] = {-1, -1, -1, -1};
37707       SDValue Lo = MatchHalf(0, ShufMask[0], ShufMask[1]);
37708       SDValue Hi = MatchHalf(2, ShufMask[2], ShufMask[3]);
37709 
37710       if (Lo && Hi) {
37711         V1 = Lo;
37712         V2 = Hi;
37713         Shuffle = X86ISD::SHUFP;
37714         ShuffleVT = MVT::getVectorVT(MVT::f32, MaskVT.getSizeInBits() / 32);
37715         PermuteImm = getV4X86ShuffleImm(ShufMask);
37716         return true;
37717       }
37718     }
37719   }
37720 
37721   // Attempt to combine to INSERTPS more generally if X86ISD::SHUFP failed.
37722   if (AllowFloatDomain && EltSizeInBits == 32 && Subtarget.hasSSE41() &&
37723       MaskVT.is128BitVector() &&
37724       matchShuffleAsInsertPS(V1, V2, PermuteImm, Zeroable, Mask, DAG)) {
37725     Shuffle = X86ISD::INSERTPS;
37726     ShuffleVT = MVT::v4f32;
37727     return true;
37728   }
37729 
37730   return false;
37731 }
37732 
37733 static SDValue combineX86ShuffleChainWithExtract(
37734     ArrayRef<SDValue> Inputs, SDValue Root, ArrayRef<int> BaseMask, int Depth,
37735     bool HasVariableMask, bool AllowVariableCrossLaneMask,
37736     bool AllowVariablePerLaneMask, SelectionDAG &DAG,
37737     const X86Subtarget &Subtarget);
37738 
37739 /// Combine an arbitrary chain of shuffles into a single instruction if
37740 /// possible.
37741 ///
37742 /// This is the leaf of the recursive combine below. When we have found some
37743 /// chain of single-use x86 shuffle instructions and accumulated the combined
37744 /// shuffle mask represented by them, this will try to pattern match that mask
37745 /// into either a single instruction if there is a special purpose instruction
37746 /// for this operation, or into a PSHUFB instruction which is a fully general
37747 /// instruction but should only be used to replace chains over a certain depth.
combineX86ShuffleChain(ArrayRef<SDValue> Inputs,SDValue Root,ArrayRef<int> BaseMask,int Depth,bool HasVariableMask,bool AllowVariableCrossLaneMask,bool AllowVariablePerLaneMask,SelectionDAG & DAG,const X86Subtarget & Subtarget)37748 static SDValue combineX86ShuffleChain(ArrayRef<SDValue> Inputs, SDValue Root,
37749                                       ArrayRef<int> BaseMask, int Depth,
37750                                       bool HasVariableMask,
37751                                       bool AllowVariableCrossLaneMask,
37752                                       bool AllowVariablePerLaneMask,
37753                                       SelectionDAG &DAG,
37754                                       const X86Subtarget &Subtarget) {
37755   assert(!BaseMask.empty() && "Cannot combine an empty shuffle mask!");
37756   assert((Inputs.size() == 1 || Inputs.size() == 2) &&
37757          "Unexpected number of shuffle inputs!");
37758 
37759   SDLoc DL(Root);
37760   MVT RootVT = Root.getSimpleValueType();
37761   unsigned RootSizeInBits = RootVT.getSizeInBits();
37762   unsigned NumRootElts = RootVT.getVectorNumElements();
37763 
37764   // Canonicalize shuffle input op to the requested type.
37765   auto CanonicalizeShuffleInput = [&](MVT VT, SDValue Op) {
37766     if (VT.getSizeInBits() > Op.getValueSizeInBits())
37767       Op = widenSubVector(Op, false, Subtarget, DAG, DL, VT.getSizeInBits());
37768     else if (VT.getSizeInBits() < Op.getValueSizeInBits())
37769       Op = extractSubVector(Op, 0, DAG, DL, VT.getSizeInBits());
37770     return DAG.getBitcast(VT, Op);
37771   };
37772 
37773   // Find the inputs that enter the chain. Note that multiple uses are OK
37774   // here, we're not going to remove the operands we find.
37775   bool UnaryShuffle = (Inputs.size() == 1);
37776   SDValue V1 = peekThroughBitcasts(Inputs[0]);
37777   SDValue V2 = (UnaryShuffle ? DAG.getUNDEF(V1.getValueType())
37778                              : peekThroughBitcasts(Inputs[1]));
37779 
37780   MVT VT1 = V1.getSimpleValueType();
37781   MVT VT2 = V2.getSimpleValueType();
37782   assert((RootSizeInBits % VT1.getSizeInBits()) == 0 &&
37783          (RootSizeInBits % VT2.getSizeInBits()) == 0 && "Vector size mismatch");
37784 
37785   SDValue Res;
37786 
37787   unsigned NumBaseMaskElts = BaseMask.size();
37788   if (NumBaseMaskElts == 1) {
37789     assert(BaseMask[0] == 0 && "Invalid shuffle index found!");
37790     return CanonicalizeShuffleInput(RootVT, V1);
37791   }
37792 
37793   bool OptForSize = DAG.shouldOptForSize();
37794   unsigned BaseMaskEltSizeInBits = RootSizeInBits / NumBaseMaskElts;
37795   bool FloatDomain = VT1.isFloatingPoint() || VT2.isFloatingPoint() ||
37796                      (RootVT.isFloatingPoint() && Depth >= 1) ||
37797                      (RootVT.is256BitVector() && !Subtarget.hasAVX2());
37798 
37799   // Don't combine if we are a AVX512/EVEX target and the mask element size
37800   // is different from the root element size - this would prevent writemasks
37801   // from being reused.
37802   bool IsMaskedShuffle = false;
37803   if (RootSizeInBits == 512 || (Subtarget.hasVLX() && RootSizeInBits >= 128)) {
37804     if (Root.hasOneUse() && Root->use_begin()->getOpcode() == ISD::VSELECT &&
37805         Root->use_begin()->getOperand(0).getScalarValueSizeInBits() == 1) {
37806       IsMaskedShuffle = true;
37807     }
37808   }
37809 
37810   // If we are shuffling a splat (and not introducing zeros) then we can just
37811   // use it directly. This works for smaller elements as well as they already
37812   // repeat across each mask element.
37813   if (UnaryShuffle && !isAnyZero(BaseMask) &&
37814       V1.getValueSizeInBits() >= RootSizeInBits &&
37815       (BaseMaskEltSizeInBits % V1.getScalarValueSizeInBits()) == 0 &&
37816       DAG.isSplatValue(V1, /*AllowUndefs*/ false)) {
37817     return CanonicalizeShuffleInput(RootVT, V1);
37818   }
37819 
37820   SmallVector<int, 64> Mask(BaseMask);
37821 
37822   // See if the shuffle is a hidden identity shuffle - repeated args in HOPs
37823   // etc. can be simplified.
37824   if (VT1 == VT2 && VT1.getSizeInBits() == RootSizeInBits && VT1.isVector()) {
37825     SmallVector<int> ScaledMask, IdentityMask;
37826     unsigned NumElts = VT1.getVectorNumElements();
37827     if (Mask.size() <= NumElts &&
37828         scaleShuffleElements(Mask, NumElts, ScaledMask)) {
37829       for (unsigned i = 0; i != NumElts; ++i)
37830         IdentityMask.push_back(i);
37831       if (isTargetShuffleEquivalent(RootVT, ScaledMask, IdentityMask, DAG, V1,
37832                                     V2))
37833         return CanonicalizeShuffleInput(RootVT, V1);
37834     }
37835   }
37836 
37837   // Handle 128/256-bit lane shuffles of 512-bit vectors.
37838   if (RootVT.is512BitVector() &&
37839       (NumBaseMaskElts == 2 || NumBaseMaskElts == 4)) {
37840     // If the upper subvectors are zeroable, then an extract+insert is more
37841     // optimal than using X86ISD::SHUF128. The insertion is free, even if it has
37842     // to zero the upper subvectors.
37843     if (isUndefOrZeroInRange(Mask, 1, NumBaseMaskElts - 1)) {
37844       if (Depth == 0 && Root.getOpcode() == ISD::INSERT_SUBVECTOR)
37845         return SDValue(); // Nothing to do!
37846       assert(isInRange(Mask[0], 0, NumBaseMaskElts) &&
37847              "Unexpected lane shuffle");
37848       Res = CanonicalizeShuffleInput(RootVT, V1);
37849       unsigned SubIdx = Mask[0] * (NumRootElts / NumBaseMaskElts);
37850       bool UseZero = isAnyZero(Mask);
37851       Res = extractSubVector(Res, SubIdx, DAG, DL, BaseMaskEltSizeInBits);
37852       return widenSubVector(Res, UseZero, Subtarget, DAG, DL, RootSizeInBits);
37853     }
37854 
37855     // Narrow shuffle mask to v4x128.
37856     SmallVector<int, 4> ScaledMask;
37857     assert((BaseMaskEltSizeInBits % 128) == 0 && "Illegal mask size");
37858     narrowShuffleMaskElts(BaseMaskEltSizeInBits / 128, Mask, ScaledMask);
37859 
37860     // Try to lower to vshuf64x2/vshuf32x4.
37861     auto MatchSHUF128 = [&](MVT ShuffleVT, const SDLoc &DL,
37862                             ArrayRef<int> ScaledMask, SDValue V1, SDValue V2,
37863                             SelectionDAG &DAG) {
37864       int PermMask[4] = {-1, -1, -1, -1};
37865       // Ensure elements came from the same Op.
37866       SDValue Ops[2] = {DAG.getUNDEF(ShuffleVT), DAG.getUNDEF(ShuffleVT)};
37867       for (int i = 0; i < 4; ++i) {
37868         assert(ScaledMask[i] >= -1 && "Illegal shuffle sentinel value");
37869         if (ScaledMask[i] < 0)
37870           continue;
37871 
37872         SDValue Op = ScaledMask[i] >= 4 ? V2 : V1;
37873         unsigned OpIndex = i / 2;
37874         if (Ops[OpIndex].isUndef())
37875           Ops[OpIndex] = Op;
37876         else if (Ops[OpIndex] != Op)
37877           return SDValue();
37878 
37879         PermMask[i] = ScaledMask[i] % 4;
37880       }
37881 
37882       return DAG.getNode(X86ISD::SHUF128, DL, ShuffleVT,
37883                          CanonicalizeShuffleInput(ShuffleVT, Ops[0]),
37884                          CanonicalizeShuffleInput(ShuffleVT, Ops[1]),
37885                          getV4X86ShuffleImm8ForMask(PermMask, DL, DAG));
37886     };
37887 
37888     // FIXME: Is there a better way to do this? is256BitLaneRepeatedShuffleMask
37889     // doesn't work because our mask is for 128 bits and we don't have an MVT
37890     // to match that.
37891     bool PreferPERMQ = UnaryShuffle && isUndefOrInRange(ScaledMask[0], 0, 2) &&
37892                        isUndefOrInRange(ScaledMask[1], 0, 2) &&
37893                        isUndefOrInRange(ScaledMask[2], 2, 4) &&
37894                        isUndefOrInRange(ScaledMask[3], 2, 4) &&
37895                        (ScaledMask[0] < 0 || ScaledMask[2] < 0 ||
37896                         ScaledMask[0] == (ScaledMask[2] % 2)) &&
37897                        (ScaledMask[1] < 0 || ScaledMask[3] < 0 ||
37898                         ScaledMask[1] == (ScaledMask[3] % 2));
37899 
37900     if (!isAnyZero(ScaledMask) && !PreferPERMQ) {
37901       if (Depth == 0 && Root.getOpcode() == X86ISD::SHUF128)
37902         return SDValue(); // Nothing to do!
37903       MVT ShuffleVT = (FloatDomain ? MVT::v8f64 : MVT::v8i64);
37904       if (SDValue V = MatchSHUF128(ShuffleVT, DL, ScaledMask, V1, V2, DAG))
37905         return DAG.getBitcast(RootVT, V);
37906     }
37907   }
37908 
37909   // Handle 128-bit lane shuffles of 256-bit vectors.
37910   if (RootVT.is256BitVector() && NumBaseMaskElts == 2) {
37911     // If the upper half is zeroable, then an extract+insert is more optimal
37912     // than using X86ISD::VPERM2X128. The insertion is free, even if it has to
37913     // zero the upper half.
37914     if (isUndefOrZero(Mask[1])) {
37915       if (Depth == 0 && Root.getOpcode() == ISD::INSERT_SUBVECTOR)
37916         return SDValue(); // Nothing to do!
37917       assert(isInRange(Mask[0], 0, 2) && "Unexpected lane shuffle");
37918       Res = CanonicalizeShuffleInput(RootVT, V1);
37919       Res = extract128BitVector(Res, Mask[0] * (NumRootElts / 2), DAG, DL);
37920       return widenSubVector(Res, Mask[1] == SM_SentinelZero, Subtarget, DAG, DL,
37921                             256);
37922     }
37923 
37924     // If we're inserting the low subvector, an insert-subvector 'concat'
37925     // pattern is quicker than VPERM2X128.
37926     // TODO: Add AVX2 support instead of VPERMQ/VPERMPD.
37927     if (BaseMask[0] == 0 && (BaseMask[1] == 0 || BaseMask[1] == 2) &&
37928         !Subtarget.hasAVX2()) {
37929       if (Depth == 0 && Root.getOpcode() == ISD::INSERT_SUBVECTOR)
37930         return SDValue(); // Nothing to do!
37931       SDValue Lo = CanonicalizeShuffleInput(RootVT, V1);
37932       SDValue Hi = CanonicalizeShuffleInput(RootVT, BaseMask[1] == 0 ? V1 : V2);
37933       Hi = extractSubVector(Hi, 0, DAG, DL, 128);
37934       return insertSubVector(Lo, Hi, NumRootElts / 2, DAG, DL, 128);
37935     }
37936 
37937     if (Depth == 0 && Root.getOpcode() == X86ISD::VPERM2X128)
37938       return SDValue(); // Nothing to do!
37939 
37940     // If we have AVX2, prefer to use VPERMQ/VPERMPD for unary shuffles unless
37941     // we need to use the zeroing feature.
37942     // Prefer blends for sequential shuffles unless we are optimizing for size.
37943     if (UnaryShuffle &&
37944         !(Subtarget.hasAVX2() && isUndefOrInRange(Mask, 0, 2)) &&
37945         (OptForSize || !isSequentialOrUndefOrZeroInRange(Mask, 0, 2, 0))) {
37946       unsigned PermMask = 0;
37947       PermMask |= ((Mask[0] < 0 ? 0x8 : (Mask[0] & 1)) << 0);
37948       PermMask |= ((Mask[1] < 0 ? 0x8 : (Mask[1] & 1)) << 4);
37949       return DAG.getNode(
37950           X86ISD::VPERM2X128, DL, RootVT, CanonicalizeShuffleInput(RootVT, V1),
37951           DAG.getUNDEF(RootVT), DAG.getTargetConstant(PermMask, DL, MVT::i8));
37952     }
37953 
37954     if (Depth == 0 && Root.getOpcode() == X86ISD::SHUF128)
37955       return SDValue(); // Nothing to do!
37956 
37957     // TODO - handle AVX512VL cases with X86ISD::SHUF128.
37958     if (!UnaryShuffle && !IsMaskedShuffle) {
37959       assert(llvm::all_of(Mask, [](int M) { return 0 <= M && M < 4; }) &&
37960              "Unexpected shuffle sentinel value");
37961       // Prefer blends to X86ISD::VPERM2X128.
37962       if (!((Mask[0] == 0 && Mask[1] == 3) || (Mask[0] == 2 && Mask[1] == 1))) {
37963         unsigned PermMask = 0;
37964         PermMask |= ((Mask[0] & 3) << 0);
37965         PermMask |= ((Mask[1] & 3) << 4);
37966         SDValue LHS = isInRange(Mask[0], 0, 2) ? V1 : V2;
37967         SDValue RHS = isInRange(Mask[1], 0, 2) ? V1 : V2;
37968         return DAG.getNode(X86ISD::VPERM2X128, DL, RootVT,
37969                           CanonicalizeShuffleInput(RootVT, LHS),
37970                           CanonicalizeShuffleInput(RootVT, RHS),
37971                           DAG.getTargetConstant(PermMask, DL, MVT::i8));
37972       }
37973     }
37974   }
37975 
37976   // For masks that have been widened to 128-bit elements or more,
37977   // narrow back down to 64-bit elements.
37978   if (BaseMaskEltSizeInBits > 64) {
37979     assert((BaseMaskEltSizeInBits % 64) == 0 && "Illegal mask size");
37980     int MaskScale = BaseMaskEltSizeInBits / 64;
37981     SmallVector<int, 64> ScaledMask;
37982     narrowShuffleMaskElts(MaskScale, Mask, ScaledMask);
37983     Mask = std::move(ScaledMask);
37984   }
37985 
37986   // For masked shuffles, we're trying to match the root width for better
37987   // writemask folding, attempt to scale the mask.
37988   // TODO - variable shuffles might need this to be widened again.
37989   if (IsMaskedShuffle && NumRootElts > Mask.size()) {
37990     assert((NumRootElts % Mask.size()) == 0 && "Illegal mask size");
37991     int MaskScale = NumRootElts / Mask.size();
37992     SmallVector<int, 64> ScaledMask;
37993     narrowShuffleMaskElts(MaskScale, Mask, ScaledMask);
37994     Mask = std::move(ScaledMask);
37995   }
37996 
37997   unsigned NumMaskElts = Mask.size();
37998   unsigned MaskEltSizeInBits = RootSizeInBits / NumMaskElts;
37999 
38000   // Determine the effective mask value type.
38001   FloatDomain &= (32 <= MaskEltSizeInBits);
38002   MVT MaskVT = FloatDomain ? MVT::getFloatingPointVT(MaskEltSizeInBits)
38003                            : MVT::getIntegerVT(MaskEltSizeInBits);
38004   MaskVT = MVT::getVectorVT(MaskVT, NumMaskElts);
38005 
38006   // Only allow legal mask types.
38007   if (!DAG.getTargetLoweringInfo().isTypeLegal(MaskVT))
38008     return SDValue();
38009 
38010   // Attempt to match the mask against known shuffle patterns.
38011   MVT ShuffleSrcVT, ShuffleVT;
38012   unsigned Shuffle, PermuteImm;
38013 
38014   // Which shuffle domains are permitted?
38015   // Permit domain crossing at higher combine depths.
38016   // TODO: Should we indicate which domain is preferred if both are allowed?
38017   bool AllowFloatDomain = FloatDomain || (Depth >= 3);
38018   bool AllowIntDomain = (!FloatDomain || (Depth >= 3)) && Subtarget.hasSSE2() &&
38019                         (!MaskVT.is256BitVector() || Subtarget.hasAVX2());
38020 
38021   // Determine zeroable mask elements.
38022   APInt KnownUndef, KnownZero;
38023   resolveZeroablesFromTargetShuffle(Mask, KnownUndef, KnownZero);
38024   APInt Zeroable = KnownUndef | KnownZero;
38025 
38026   if (UnaryShuffle) {
38027     // Attempt to match against broadcast-from-vector.
38028     // Limit AVX1 to cases where we're loading+broadcasting a scalar element.
38029     if ((Subtarget.hasAVX2() ||
38030          (Subtarget.hasAVX() && 32 <= MaskEltSizeInBits)) &&
38031         (!IsMaskedShuffle || NumRootElts == NumMaskElts)) {
38032       if (isUndefOrEqual(Mask, 0)) {
38033         if (V1.getValueType() == MaskVT &&
38034             V1.getOpcode() == ISD::SCALAR_TO_VECTOR &&
38035             X86::mayFoldLoad(V1.getOperand(0), Subtarget)) {
38036           if (Depth == 0 && Root.getOpcode() == X86ISD::VBROADCAST)
38037             return SDValue(); // Nothing to do!
38038           Res = V1.getOperand(0);
38039           Res = DAG.getNode(X86ISD::VBROADCAST, DL, MaskVT, Res);
38040           return DAG.getBitcast(RootVT, Res);
38041         }
38042         if (Subtarget.hasAVX2()) {
38043           if (Depth == 0 && Root.getOpcode() == X86ISD::VBROADCAST)
38044             return SDValue(); // Nothing to do!
38045           Res = CanonicalizeShuffleInput(MaskVT, V1);
38046           Res = DAG.getNode(X86ISD::VBROADCAST, DL, MaskVT, Res);
38047           return DAG.getBitcast(RootVT, Res);
38048         }
38049       }
38050     }
38051 
38052     if (matchUnaryShuffle(MaskVT, Mask, AllowFloatDomain, AllowIntDomain, V1,
38053                           DAG, Subtarget, Shuffle, ShuffleSrcVT, ShuffleVT) &&
38054         (!IsMaskedShuffle ||
38055          (NumRootElts == ShuffleVT.getVectorNumElements()))) {
38056       if (Depth == 0 && Root.getOpcode() == Shuffle)
38057         return SDValue(); // Nothing to do!
38058       Res = CanonicalizeShuffleInput(ShuffleSrcVT, V1);
38059       Res = DAG.getNode(Shuffle, DL, ShuffleVT, Res);
38060       return DAG.getBitcast(RootVT, Res);
38061     }
38062 
38063     if (matchUnaryPermuteShuffle(MaskVT, Mask, Zeroable, AllowFloatDomain,
38064                                  AllowIntDomain, DAG, Subtarget, Shuffle, ShuffleVT,
38065                                  PermuteImm) &&
38066         (!IsMaskedShuffle ||
38067          (NumRootElts == ShuffleVT.getVectorNumElements()))) {
38068       if (Depth == 0 && Root.getOpcode() == Shuffle)
38069         return SDValue(); // Nothing to do!
38070       Res = CanonicalizeShuffleInput(ShuffleVT, V1);
38071       Res = DAG.getNode(Shuffle, DL, ShuffleVT, Res,
38072                         DAG.getTargetConstant(PermuteImm, DL, MVT::i8));
38073       return DAG.getBitcast(RootVT, Res);
38074     }
38075   }
38076 
38077   // Attempt to combine to INSERTPS, but only if the inserted element has come
38078   // from a scalar.
38079   // TODO: Handle other insertions here as well?
38080   if (!UnaryShuffle && AllowFloatDomain && RootSizeInBits == 128 &&
38081       Subtarget.hasSSE41() &&
38082       !isTargetShuffleEquivalent(MaskVT, Mask, {4, 1, 2, 3}, DAG)) {
38083     if (MaskEltSizeInBits == 32) {
38084       SDValue SrcV1 = V1, SrcV2 = V2;
38085       if (matchShuffleAsInsertPS(SrcV1, SrcV2, PermuteImm, Zeroable, Mask,
38086                                  DAG) &&
38087           SrcV2.getOpcode() == ISD::SCALAR_TO_VECTOR) {
38088         if (Depth == 0 && Root.getOpcode() == X86ISD::INSERTPS)
38089           return SDValue(); // Nothing to do!
38090         Res = DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32,
38091                           CanonicalizeShuffleInput(MVT::v4f32, SrcV1),
38092                           CanonicalizeShuffleInput(MVT::v4f32, SrcV2),
38093                           DAG.getTargetConstant(PermuteImm, DL, MVT::i8));
38094         return DAG.getBitcast(RootVT, Res);
38095       }
38096     }
38097     if (MaskEltSizeInBits == 64 &&
38098         isTargetShuffleEquivalent(MaskVT, Mask, {0, 2}, DAG) &&
38099         V2.getOpcode() == ISD::SCALAR_TO_VECTOR &&
38100         V2.getScalarValueSizeInBits() <= 32) {
38101       if (Depth == 0 && Root.getOpcode() == X86ISD::INSERTPS)
38102         return SDValue(); // Nothing to do!
38103       PermuteImm = (/*DstIdx*/ 2 << 4) | (/*SrcIdx*/ 0 << 0);
38104       Res = DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32,
38105                         CanonicalizeShuffleInput(MVT::v4f32, V1),
38106                         CanonicalizeShuffleInput(MVT::v4f32, V2),
38107                         DAG.getTargetConstant(PermuteImm, DL, MVT::i8));
38108       return DAG.getBitcast(RootVT, Res);
38109     }
38110   }
38111 
38112   SDValue NewV1 = V1; // Save operands in case early exit happens.
38113   SDValue NewV2 = V2;
38114   if (matchBinaryShuffle(MaskVT, Mask, AllowFloatDomain, AllowIntDomain, NewV1,
38115                          NewV2, DL, DAG, Subtarget, Shuffle, ShuffleSrcVT,
38116                          ShuffleVT, UnaryShuffle) &&
38117       (!IsMaskedShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) {
38118     if (Depth == 0 && Root.getOpcode() == Shuffle)
38119       return SDValue(); // Nothing to do!
38120     NewV1 = CanonicalizeShuffleInput(ShuffleSrcVT, NewV1);
38121     NewV2 = CanonicalizeShuffleInput(ShuffleSrcVT, NewV2);
38122     Res = DAG.getNode(Shuffle, DL, ShuffleVT, NewV1, NewV2);
38123     return DAG.getBitcast(RootVT, Res);
38124   }
38125 
38126   NewV1 = V1; // Save operands in case early exit happens.
38127   NewV2 = V2;
38128   if (matchBinaryPermuteShuffle(MaskVT, Mask, Zeroable, AllowFloatDomain,
38129                                 AllowIntDomain, NewV1, NewV2, DL, DAG,
38130                                 Subtarget, Shuffle, ShuffleVT, PermuteImm) &&
38131       (!IsMaskedShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) {
38132     if (Depth == 0 && Root.getOpcode() == Shuffle)
38133       return SDValue(); // Nothing to do!
38134     NewV1 = CanonicalizeShuffleInput(ShuffleVT, NewV1);
38135     NewV2 = CanonicalizeShuffleInput(ShuffleVT, NewV2);
38136     Res = DAG.getNode(Shuffle, DL, ShuffleVT, NewV1, NewV2,
38137                       DAG.getTargetConstant(PermuteImm, DL, MVT::i8));
38138     return DAG.getBitcast(RootVT, Res);
38139   }
38140 
38141   // Typically from here on, we need an integer version of MaskVT.
38142   MVT IntMaskVT = MVT::getIntegerVT(MaskEltSizeInBits);
38143   IntMaskVT = MVT::getVectorVT(IntMaskVT, NumMaskElts);
38144 
38145   // Annoyingly, SSE4A instructions don't map into the above match helpers.
38146   if (Subtarget.hasSSE4A() && AllowIntDomain && RootSizeInBits == 128) {
38147     uint64_t BitLen, BitIdx;
38148     if (matchShuffleAsEXTRQ(IntMaskVT, V1, V2, Mask, BitLen, BitIdx,
38149                             Zeroable)) {
38150       if (Depth == 0 && Root.getOpcode() == X86ISD::EXTRQI)
38151         return SDValue(); // Nothing to do!
38152       V1 = CanonicalizeShuffleInput(IntMaskVT, V1);
38153       Res = DAG.getNode(X86ISD::EXTRQI, DL, IntMaskVT, V1,
38154                         DAG.getTargetConstant(BitLen, DL, MVT::i8),
38155                         DAG.getTargetConstant(BitIdx, DL, MVT::i8));
38156       return DAG.getBitcast(RootVT, Res);
38157     }
38158 
38159     if (matchShuffleAsINSERTQ(IntMaskVT, V1, V2, Mask, BitLen, BitIdx)) {
38160       if (Depth == 0 && Root.getOpcode() == X86ISD::INSERTQI)
38161         return SDValue(); // Nothing to do!
38162       V1 = CanonicalizeShuffleInput(IntMaskVT, V1);
38163       V2 = CanonicalizeShuffleInput(IntMaskVT, V2);
38164       Res = DAG.getNode(X86ISD::INSERTQI, DL, IntMaskVT, V1, V2,
38165                         DAG.getTargetConstant(BitLen, DL, MVT::i8),
38166                         DAG.getTargetConstant(BitIdx, DL, MVT::i8));
38167       return DAG.getBitcast(RootVT, Res);
38168     }
38169   }
38170 
38171   // Match shuffle against TRUNCATE patterns.
38172   if (AllowIntDomain && MaskEltSizeInBits < 64 && Subtarget.hasAVX512()) {
38173     // Match against a VTRUNC instruction, accounting for src/dst sizes.
38174     if (matchShuffleAsVTRUNC(ShuffleSrcVT, ShuffleVT, IntMaskVT, Mask, Zeroable,
38175                              Subtarget)) {
38176       bool IsTRUNCATE = ShuffleVT.getVectorNumElements() ==
38177                         ShuffleSrcVT.getVectorNumElements();
38178       unsigned Opc =
38179           IsTRUNCATE ? (unsigned)ISD::TRUNCATE : (unsigned)X86ISD::VTRUNC;
38180       if (Depth == 0 && Root.getOpcode() == Opc)
38181         return SDValue(); // Nothing to do!
38182       V1 = CanonicalizeShuffleInput(ShuffleSrcVT, V1);
38183       Res = DAG.getNode(Opc, DL, ShuffleVT, V1);
38184       if (ShuffleVT.getSizeInBits() < RootSizeInBits)
38185         Res = widenSubVector(Res, true, Subtarget, DAG, DL, RootSizeInBits);
38186       return DAG.getBitcast(RootVT, Res);
38187     }
38188 
38189     // Do we need a more general binary truncation pattern?
38190     if (RootSizeInBits < 512 &&
38191         ((RootVT.is256BitVector() && Subtarget.useAVX512Regs()) ||
38192          (RootVT.is128BitVector() && Subtarget.hasVLX())) &&
38193         (MaskEltSizeInBits > 8 || Subtarget.hasBWI()) &&
38194         isSequentialOrUndefInRange(Mask, 0, NumMaskElts, 0, 2)) {
38195       // Bail if this was already a truncation or PACK node.
38196       // We sometimes fail to match PACK if we demand known undef elements.
38197       if (Depth == 0 && (Root.getOpcode() == ISD::TRUNCATE ||
38198                          Root.getOpcode() == X86ISD::PACKSS ||
38199                          Root.getOpcode() == X86ISD::PACKUS))
38200         return SDValue(); // Nothing to do!
38201       ShuffleSrcVT = MVT::getIntegerVT(MaskEltSizeInBits * 2);
38202       ShuffleSrcVT = MVT::getVectorVT(ShuffleSrcVT, NumMaskElts / 2);
38203       V1 = CanonicalizeShuffleInput(ShuffleSrcVT, V1);
38204       V2 = CanonicalizeShuffleInput(ShuffleSrcVT, V2);
38205       ShuffleSrcVT = MVT::getIntegerVT(MaskEltSizeInBits * 2);
38206       ShuffleSrcVT = MVT::getVectorVT(ShuffleSrcVT, NumMaskElts);
38207       Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, ShuffleSrcVT, V1, V2);
38208       Res = DAG.getNode(ISD::TRUNCATE, DL, IntMaskVT, Res);
38209       return DAG.getBitcast(RootVT, Res);
38210     }
38211   }
38212 
38213   // Don't try to re-form single instruction chains under any circumstances now
38214   // that we've done encoding canonicalization for them.
38215   if (Depth < 1)
38216     return SDValue();
38217 
38218   // Depth threshold above which we can efficiently use variable mask shuffles.
38219   int VariableCrossLaneShuffleDepth =
38220       Subtarget.hasFastVariableCrossLaneShuffle() ? 1 : 2;
38221   int VariablePerLaneShuffleDepth =
38222       Subtarget.hasFastVariablePerLaneShuffle() ? 1 : 2;
38223   AllowVariableCrossLaneMask &=
38224       (Depth >= VariableCrossLaneShuffleDepth) || HasVariableMask;
38225   AllowVariablePerLaneMask &=
38226       (Depth >= VariablePerLaneShuffleDepth) || HasVariableMask;
38227   // VPERMI2W/VPERMI2B are 3 uops on Skylake and Icelake so we require a
38228   // higher depth before combining them.
38229   bool AllowBWIVPERMV3 =
38230       (Depth >= (VariableCrossLaneShuffleDepth + 2) || HasVariableMask);
38231 
38232   bool MaskContainsZeros = isAnyZero(Mask);
38233 
38234   if (is128BitLaneCrossingShuffleMask(MaskVT, Mask)) {
38235     // If we have a single input lane-crossing shuffle then lower to VPERMV.
38236     if (UnaryShuffle && AllowVariableCrossLaneMask && !MaskContainsZeros) {
38237       if (Subtarget.hasAVX2() &&
38238           (MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) {
38239         SDValue VPermMask = getConstVector(Mask, IntMaskVT, DAG, DL, true);
38240         Res = CanonicalizeShuffleInput(MaskVT, V1);
38241         Res = DAG.getNode(X86ISD::VPERMV, DL, MaskVT, VPermMask, Res);
38242         return DAG.getBitcast(RootVT, Res);
38243       }
38244       // AVX512 variants (non-VLX will pad to 512-bit shuffles).
38245       if ((Subtarget.hasAVX512() &&
38246            (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
38247             MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) ||
38248           (Subtarget.hasBWI() &&
38249            (MaskVT == MVT::v16i16 || MaskVT == MVT::v32i16)) ||
38250           (Subtarget.hasVBMI() &&
38251            (MaskVT == MVT::v32i8 || MaskVT == MVT::v64i8))) {
38252         V1 = CanonicalizeShuffleInput(MaskVT, V1);
38253         V2 = DAG.getUNDEF(MaskVT);
38254         Res = lowerShuffleWithPERMV(DL, MaskVT, Mask, V1, V2, Subtarget, DAG);
38255         return DAG.getBitcast(RootVT, Res);
38256       }
38257     }
38258 
38259     // Lower a unary+zero lane-crossing shuffle as VPERMV3 with a zero
38260     // vector as the second source (non-VLX will pad to 512-bit shuffles).
38261     if (UnaryShuffle && AllowVariableCrossLaneMask &&
38262         ((Subtarget.hasAVX512() &&
38263           (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
38264            MaskVT == MVT::v4f64 || MaskVT == MVT::v4i64 ||
38265            MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32 ||
38266            MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) ||
38267          (Subtarget.hasBWI() && AllowBWIVPERMV3 &&
38268           (MaskVT == MVT::v16i16 || MaskVT == MVT::v32i16)) ||
38269          (Subtarget.hasVBMI() && AllowBWIVPERMV3 &&
38270           (MaskVT == MVT::v32i8 || MaskVT == MVT::v64i8)))) {
38271       // Adjust shuffle mask - replace SM_SentinelZero with second source index.
38272       for (unsigned i = 0; i != NumMaskElts; ++i)
38273         if (Mask[i] == SM_SentinelZero)
38274           Mask[i] = NumMaskElts + i;
38275       V1 = CanonicalizeShuffleInput(MaskVT, V1);
38276       V2 = getZeroVector(MaskVT, Subtarget, DAG, DL);
38277       Res = lowerShuffleWithPERMV(DL, MaskVT, Mask, V1, V2, Subtarget, DAG);
38278       return DAG.getBitcast(RootVT, Res);
38279     }
38280 
38281     // If that failed and either input is extracted then try to combine as a
38282     // shuffle with the larger type.
38283     if (SDValue WideShuffle = combineX86ShuffleChainWithExtract(
38284             Inputs, Root, BaseMask, Depth, HasVariableMask,
38285             AllowVariableCrossLaneMask, AllowVariablePerLaneMask, DAG,
38286             Subtarget))
38287       return WideShuffle;
38288 
38289     // If we have a dual input lane-crossing shuffle then lower to VPERMV3,
38290     // (non-VLX will pad to 512-bit shuffles).
38291     if (AllowVariableCrossLaneMask && !MaskContainsZeros &&
38292         ((Subtarget.hasAVX512() &&
38293           (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
38294            MaskVT == MVT::v4f64 || MaskVT == MVT::v4i64 ||
38295            MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32 ||
38296            MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) ||
38297          (Subtarget.hasBWI() && AllowBWIVPERMV3 &&
38298           (MaskVT == MVT::v16i16 || MaskVT == MVT::v32i16)) ||
38299          (Subtarget.hasVBMI() && AllowBWIVPERMV3 &&
38300           (MaskVT == MVT::v32i8 || MaskVT == MVT::v64i8)))) {
38301       V1 = CanonicalizeShuffleInput(MaskVT, V1);
38302       V2 = CanonicalizeShuffleInput(MaskVT, V2);
38303       Res = lowerShuffleWithPERMV(DL, MaskVT, Mask, V1, V2, Subtarget, DAG);
38304       return DAG.getBitcast(RootVT, Res);
38305     }
38306     return SDValue();
38307   }
38308 
38309   // See if we can combine a single input shuffle with zeros to a bit-mask,
38310   // which is much simpler than any shuffle.
38311   if (UnaryShuffle && MaskContainsZeros && AllowVariablePerLaneMask &&
38312       isSequentialOrUndefOrZeroInRange(Mask, 0, NumMaskElts, 0) &&
38313       DAG.getTargetLoweringInfo().isTypeLegal(MaskVT)) {
38314     APInt Zero = APInt::getZero(MaskEltSizeInBits);
38315     APInt AllOnes = APInt::getAllOnes(MaskEltSizeInBits);
38316     APInt UndefElts(NumMaskElts, 0);
38317     SmallVector<APInt, 64> EltBits(NumMaskElts, Zero);
38318     for (unsigned i = 0; i != NumMaskElts; ++i) {
38319       int M = Mask[i];
38320       if (M == SM_SentinelUndef) {
38321         UndefElts.setBit(i);
38322         continue;
38323       }
38324       if (M == SM_SentinelZero)
38325         continue;
38326       EltBits[i] = AllOnes;
38327     }
38328     SDValue BitMask = getConstVector(EltBits, UndefElts, MaskVT, DAG, DL);
38329     Res = CanonicalizeShuffleInput(MaskVT, V1);
38330     unsigned AndOpcode =
38331         MaskVT.isFloatingPoint() ? unsigned(X86ISD::FAND) : unsigned(ISD::AND);
38332     Res = DAG.getNode(AndOpcode, DL, MaskVT, Res, BitMask);
38333     return DAG.getBitcast(RootVT, Res);
38334   }
38335 
38336   // If we have a single input shuffle with different shuffle patterns in the
38337   // the 128-bit lanes use the variable mask to VPERMILPS.
38338   // TODO Combine other mask types at higher depths.
38339   if (UnaryShuffle && AllowVariablePerLaneMask && !MaskContainsZeros &&
38340       ((MaskVT == MVT::v8f32 && Subtarget.hasAVX()) ||
38341        (MaskVT == MVT::v16f32 && Subtarget.hasAVX512()))) {
38342     SmallVector<SDValue, 16> VPermIdx;
38343     for (int M : Mask) {
38344       SDValue Idx =
38345           M < 0 ? DAG.getUNDEF(MVT::i32) : DAG.getConstant(M % 4, DL, MVT::i32);
38346       VPermIdx.push_back(Idx);
38347     }
38348     SDValue VPermMask = DAG.getBuildVector(IntMaskVT, DL, VPermIdx);
38349     Res = CanonicalizeShuffleInput(MaskVT, V1);
38350     Res = DAG.getNode(X86ISD::VPERMILPV, DL, MaskVT, Res, VPermMask);
38351     return DAG.getBitcast(RootVT, Res);
38352   }
38353 
38354   // With XOP, binary shuffles of 128/256-bit floating point vectors can combine
38355   // to VPERMIL2PD/VPERMIL2PS.
38356   if (AllowVariablePerLaneMask && Subtarget.hasXOP() &&
38357       (MaskVT == MVT::v2f64 || MaskVT == MVT::v4f64 || MaskVT == MVT::v4f32 ||
38358        MaskVT == MVT::v8f32)) {
38359     // VPERMIL2 Operation.
38360     // Bits[3] - Match Bit.
38361     // Bits[2:1] - (Per Lane) PD Shuffle Mask.
38362     // Bits[2:0] - (Per Lane) PS Shuffle Mask.
38363     unsigned NumLanes = MaskVT.getSizeInBits() / 128;
38364     unsigned NumEltsPerLane = NumMaskElts / NumLanes;
38365     SmallVector<int, 8> VPerm2Idx;
38366     unsigned M2ZImm = 0;
38367     for (int M : Mask) {
38368       if (M == SM_SentinelUndef) {
38369         VPerm2Idx.push_back(-1);
38370         continue;
38371       }
38372       if (M == SM_SentinelZero) {
38373         M2ZImm = 2;
38374         VPerm2Idx.push_back(8);
38375         continue;
38376       }
38377       int Index = (M % NumEltsPerLane) + ((M / NumMaskElts) * NumEltsPerLane);
38378       Index = (MaskVT.getScalarSizeInBits() == 64 ? Index << 1 : Index);
38379       VPerm2Idx.push_back(Index);
38380     }
38381     V1 = CanonicalizeShuffleInput(MaskVT, V1);
38382     V2 = CanonicalizeShuffleInput(MaskVT, V2);
38383     SDValue VPerm2MaskOp = getConstVector(VPerm2Idx, IntMaskVT, DAG, DL, true);
38384     Res = DAG.getNode(X86ISD::VPERMIL2, DL, MaskVT, V1, V2, VPerm2MaskOp,
38385                       DAG.getTargetConstant(M2ZImm, DL, MVT::i8));
38386     return DAG.getBitcast(RootVT, Res);
38387   }
38388 
38389   // If we have 3 or more shuffle instructions or a chain involving a variable
38390   // mask, we can replace them with a single PSHUFB instruction profitably.
38391   // Intel's manuals suggest only using PSHUFB if doing so replacing 5
38392   // instructions, but in practice PSHUFB tends to be *very* fast so we're
38393   // more aggressive.
38394   if (UnaryShuffle && AllowVariablePerLaneMask &&
38395       ((RootVT.is128BitVector() && Subtarget.hasSSSE3()) ||
38396        (RootVT.is256BitVector() && Subtarget.hasAVX2()) ||
38397        (RootVT.is512BitVector() && Subtarget.hasBWI()))) {
38398     SmallVector<SDValue, 16> PSHUFBMask;
38399     int NumBytes = RootVT.getSizeInBits() / 8;
38400     int Ratio = NumBytes / NumMaskElts;
38401     for (int i = 0; i < NumBytes; ++i) {
38402       int M = Mask[i / Ratio];
38403       if (M == SM_SentinelUndef) {
38404         PSHUFBMask.push_back(DAG.getUNDEF(MVT::i8));
38405         continue;
38406       }
38407       if (M == SM_SentinelZero) {
38408         PSHUFBMask.push_back(DAG.getConstant(0x80, DL, MVT::i8));
38409         continue;
38410       }
38411       M = Ratio * M + i % Ratio;
38412       assert((M / 16) == (i / 16) && "Lane crossing detected");
38413       PSHUFBMask.push_back(DAG.getConstant(M, DL, MVT::i8));
38414     }
38415     MVT ByteVT = MVT::getVectorVT(MVT::i8, NumBytes);
38416     Res = CanonicalizeShuffleInput(ByteVT, V1);
38417     SDValue PSHUFBMaskOp = DAG.getBuildVector(ByteVT, DL, PSHUFBMask);
38418     Res = DAG.getNode(X86ISD::PSHUFB, DL, ByteVT, Res, PSHUFBMaskOp);
38419     return DAG.getBitcast(RootVT, Res);
38420   }
38421 
38422   // With XOP, if we have a 128-bit binary input shuffle we can always combine
38423   // to VPPERM. We match the depth requirement of PSHUFB - VPPERM is never
38424   // slower than PSHUFB on targets that support both.
38425   if (AllowVariablePerLaneMask && RootVT.is128BitVector() &&
38426       Subtarget.hasXOP()) {
38427     // VPPERM Mask Operation
38428     // Bits[4:0] - Byte Index (0 - 31)
38429     // Bits[7:5] - Permute Operation (0 - Source byte, 4 - ZERO)
38430     SmallVector<SDValue, 16> VPPERMMask;
38431     int NumBytes = 16;
38432     int Ratio = NumBytes / NumMaskElts;
38433     for (int i = 0; i < NumBytes; ++i) {
38434       int M = Mask[i / Ratio];
38435       if (M == SM_SentinelUndef) {
38436         VPPERMMask.push_back(DAG.getUNDEF(MVT::i8));
38437         continue;
38438       }
38439       if (M == SM_SentinelZero) {
38440         VPPERMMask.push_back(DAG.getConstant(0x80, DL, MVT::i8));
38441         continue;
38442       }
38443       M = Ratio * M + i % Ratio;
38444       VPPERMMask.push_back(DAG.getConstant(M, DL, MVT::i8));
38445     }
38446     MVT ByteVT = MVT::v16i8;
38447     V1 = CanonicalizeShuffleInput(ByteVT, V1);
38448     V2 = CanonicalizeShuffleInput(ByteVT, V2);
38449     SDValue VPPERMMaskOp = DAG.getBuildVector(ByteVT, DL, VPPERMMask);
38450     Res = DAG.getNode(X86ISD::VPPERM, DL, ByteVT, V1, V2, VPPERMMaskOp);
38451     return DAG.getBitcast(RootVT, Res);
38452   }
38453 
38454   // If that failed and either input is extracted then try to combine as a
38455   // shuffle with the larger type.
38456   if (SDValue WideShuffle = combineX86ShuffleChainWithExtract(
38457           Inputs, Root, BaseMask, Depth, HasVariableMask,
38458           AllowVariableCrossLaneMask, AllowVariablePerLaneMask, DAG, Subtarget))
38459     return WideShuffle;
38460 
38461   // If we have a dual input shuffle then lower to VPERMV3,
38462   // (non-VLX will pad to 512-bit shuffles)
38463   if (!UnaryShuffle && AllowVariablePerLaneMask && !MaskContainsZeros &&
38464       ((Subtarget.hasAVX512() &&
38465         (MaskVT == MVT::v2f64 || MaskVT == MVT::v4f64 || MaskVT == MVT::v8f64 ||
38466          MaskVT == MVT::v2i64 || MaskVT == MVT::v4i64 || MaskVT == MVT::v8i64 ||
38467          MaskVT == MVT::v4f32 || MaskVT == MVT::v4i32 || MaskVT == MVT::v8f32 ||
38468          MaskVT == MVT::v8i32 || MaskVT == MVT::v16f32 ||
38469          MaskVT == MVT::v16i32)) ||
38470        (Subtarget.hasBWI() && AllowBWIVPERMV3 &&
38471         (MaskVT == MVT::v8i16 || MaskVT == MVT::v16i16 ||
38472          MaskVT == MVT::v32i16)) ||
38473        (Subtarget.hasVBMI() && AllowBWIVPERMV3 &&
38474         (MaskVT == MVT::v16i8 || MaskVT == MVT::v32i8 ||
38475          MaskVT == MVT::v64i8)))) {
38476     V1 = CanonicalizeShuffleInput(MaskVT, V1);
38477     V2 = CanonicalizeShuffleInput(MaskVT, V2);
38478     Res = lowerShuffleWithPERMV(DL, MaskVT, Mask, V1, V2, Subtarget, DAG);
38479     return DAG.getBitcast(RootVT, Res);
38480   }
38481 
38482   // Failed to find any combines.
38483   return SDValue();
38484 }
38485 
38486 // Combine an arbitrary chain of shuffles + extract_subvectors into a single
38487 // instruction if possible.
38488 //
38489 // Wrapper for combineX86ShuffleChain that extends the shuffle mask to a larger
38490 // type size to attempt to combine:
38491 // shuffle(extract_subvector(x,c1),extract_subvector(y,c2),m1)
38492 // -->
38493 // extract_subvector(shuffle(x,y,m2),0)
combineX86ShuffleChainWithExtract(ArrayRef<SDValue> Inputs,SDValue Root,ArrayRef<int> BaseMask,int Depth,bool HasVariableMask,bool AllowVariableCrossLaneMask,bool AllowVariablePerLaneMask,SelectionDAG & DAG,const X86Subtarget & Subtarget)38494 static SDValue combineX86ShuffleChainWithExtract(
38495     ArrayRef<SDValue> Inputs, SDValue Root, ArrayRef<int> BaseMask, int Depth,
38496     bool HasVariableMask, bool AllowVariableCrossLaneMask,
38497     bool AllowVariablePerLaneMask, SelectionDAG &DAG,
38498     const X86Subtarget &Subtarget) {
38499   unsigned NumMaskElts = BaseMask.size();
38500   unsigned NumInputs = Inputs.size();
38501   if (NumInputs == 0)
38502     return SDValue();
38503 
38504   EVT RootVT = Root.getValueType();
38505   unsigned RootSizeInBits = RootVT.getSizeInBits();
38506   unsigned RootEltSizeInBits = RootSizeInBits / NumMaskElts;
38507   assert((RootSizeInBits % NumMaskElts) == 0 && "Unexpected root shuffle mask");
38508 
38509   // Peek through extract_subvector to find widest legal vector.
38510   // TODO: Handle ISD::TRUNCATE
38511   unsigned WideSizeInBits = RootSizeInBits;
38512   for (unsigned I = 0; I != NumInputs; ++I) {
38513     SDValue Input = peekThroughBitcasts(Inputs[I]);
38514     while (Input.getOpcode() == ISD::EXTRACT_SUBVECTOR)
38515       Input = peekThroughBitcasts(Input.getOperand(0));
38516     if (DAG.getTargetLoweringInfo().isTypeLegal(Input.getValueType()) &&
38517         WideSizeInBits < Input.getValueSizeInBits())
38518       WideSizeInBits = Input.getValueSizeInBits();
38519   }
38520 
38521   // Bail if we fail to find a source larger than the existing root.
38522   unsigned Scale = WideSizeInBits / RootSizeInBits;
38523   if (WideSizeInBits <= RootSizeInBits ||
38524       (WideSizeInBits % RootSizeInBits) != 0)
38525     return SDValue();
38526 
38527   // Create new mask for larger type.
38528   SmallVector<int, 64> WideMask(BaseMask);
38529   for (int &M : WideMask) {
38530     if (M < 0)
38531       continue;
38532     M = (M % NumMaskElts) + ((M / NumMaskElts) * Scale * NumMaskElts);
38533   }
38534   WideMask.append((Scale - 1) * NumMaskElts, SM_SentinelUndef);
38535 
38536   // Attempt to peek through inputs and adjust mask when we extract from an
38537   // upper subvector.
38538   int AdjustedMasks = 0;
38539   SmallVector<SDValue, 4> WideInputs(Inputs.begin(), Inputs.end());
38540   for (unsigned I = 0; I != NumInputs; ++I) {
38541     SDValue &Input = WideInputs[I];
38542     Input = peekThroughBitcasts(Input);
38543     while (Input.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
38544            Input.getOperand(0).getValueSizeInBits() <= WideSizeInBits) {
38545       uint64_t Idx = Input.getConstantOperandVal(1);
38546       if (Idx != 0) {
38547         ++AdjustedMasks;
38548         unsigned InputEltSizeInBits = Input.getScalarValueSizeInBits();
38549         Idx = (Idx * InputEltSizeInBits) / RootEltSizeInBits;
38550 
38551         int lo = I * WideMask.size();
38552         int hi = (I + 1) * WideMask.size();
38553         for (int &M : WideMask)
38554           if (lo <= M && M < hi)
38555             M += Idx;
38556       }
38557       Input = peekThroughBitcasts(Input.getOperand(0));
38558     }
38559   }
38560 
38561   // Remove unused/repeated shuffle source ops.
38562   resolveTargetShuffleInputsAndMask(WideInputs, WideMask);
38563   assert(!WideInputs.empty() && "Shuffle with no inputs detected");
38564 
38565   // Bail if we're always extracting from the lowest subvectors,
38566   // combineX86ShuffleChain should match this for the current width, or the
38567   // shuffle still references too many inputs.
38568   if (AdjustedMasks == 0 || WideInputs.size() > 2)
38569     return SDValue();
38570 
38571   // Minor canonicalization of the accumulated shuffle mask to make it easier
38572   // to match below. All this does is detect masks with sequential pairs of
38573   // elements, and shrink them to the half-width mask. It does this in a loop
38574   // so it will reduce the size of the mask to the minimal width mask which
38575   // performs an equivalent shuffle.
38576   while (WideMask.size() > 1) {
38577     SmallVector<int, 64> WidenedMask;
38578     if (!canWidenShuffleElements(WideMask, WidenedMask))
38579       break;
38580     WideMask = std::move(WidenedMask);
38581   }
38582 
38583   // Canonicalization of binary shuffle masks to improve pattern matching by
38584   // commuting the inputs.
38585   if (WideInputs.size() == 2 && canonicalizeShuffleMaskWithCommute(WideMask)) {
38586     ShuffleVectorSDNode::commuteMask(WideMask);
38587     std::swap(WideInputs[0], WideInputs[1]);
38588   }
38589 
38590   // Increase depth for every upper subvector we've peeked through.
38591   Depth += AdjustedMasks;
38592 
38593   // Attempt to combine wider chain.
38594   // TODO: Can we use a better Root?
38595   SDValue WideRoot = WideInputs.front().getValueSizeInBits() >
38596                              WideInputs.back().getValueSizeInBits()
38597                          ? WideInputs.front()
38598                          : WideInputs.back();
38599   assert(WideRoot.getValueSizeInBits() == WideSizeInBits &&
38600          "WideRootSize mismatch");
38601 
38602   if (SDValue WideShuffle =
38603           combineX86ShuffleChain(WideInputs, WideRoot, WideMask, Depth,
38604                                  HasVariableMask, AllowVariableCrossLaneMask,
38605                                  AllowVariablePerLaneMask, DAG, Subtarget)) {
38606     WideShuffle =
38607         extractSubVector(WideShuffle, 0, DAG, SDLoc(Root), RootSizeInBits);
38608     return DAG.getBitcast(RootVT, WideShuffle);
38609   }
38610 
38611   return SDValue();
38612 }
38613 
38614 // Canonicalize the combined shuffle mask chain with horizontal ops.
38615 // NOTE: This may update the Ops and Mask.
canonicalizeShuffleMaskWithHorizOp(MutableArrayRef<SDValue> Ops,MutableArrayRef<int> Mask,unsigned RootSizeInBits,const SDLoc & DL,SelectionDAG & DAG,const X86Subtarget & Subtarget)38616 static SDValue canonicalizeShuffleMaskWithHorizOp(
38617     MutableArrayRef<SDValue> Ops, MutableArrayRef<int> Mask,
38618     unsigned RootSizeInBits, const SDLoc &DL, SelectionDAG &DAG,
38619     const X86Subtarget &Subtarget) {
38620   if (Mask.empty() || Ops.empty())
38621     return SDValue();
38622 
38623   SmallVector<SDValue> BC;
38624   for (SDValue Op : Ops)
38625     BC.push_back(peekThroughBitcasts(Op));
38626 
38627   // All ops must be the same horizop + type.
38628   SDValue BC0 = BC[0];
38629   EVT VT0 = BC0.getValueType();
38630   unsigned Opcode0 = BC0.getOpcode();
38631   if (VT0.getSizeInBits() != RootSizeInBits || llvm::any_of(BC, [&](SDValue V) {
38632         return V.getOpcode() != Opcode0 || V.getValueType() != VT0;
38633       }))
38634     return SDValue();
38635 
38636   bool isHoriz = (Opcode0 == X86ISD::FHADD || Opcode0 == X86ISD::HADD ||
38637                   Opcode0 == X86ISD::FHSUB || Opcode0 == X86ISD::HSUB);
38638   bool isPack = (Opcode0 == X86ISD::PACKSS || Opcode0 == X86ISD::PACKUS);
38639   if (!isHoriz && !isPack)
38640     return SDValue();
38641 
38642   // Do all ops have a single use?
38643   bool OneUseOps = llvm::all_of(Ops, [](SDValue Op) {
38644     return Op.hasOneUse() &&
38645            peekThroughBitcasts(Op) == peekThroughOneUseBitcasts(Op);
38646   });
38647 
38648   int NumElts = VT0.getVectorNumElements();
38649   int NumLanes = VT0.getSizeInBits() / 128;
38650   int NumEltsPerLane = NumElts / NumLanes;
38651   int NumHalfEltsPerLane = NumEltsPerLane / 2;
38652   MVT SrcVT = BC0.getOperand(0).getSimpleValueType();
38653   unsigned EltSizeInBits = RootSizeInBits / Mask.size();
38654 
38655   if (NumEltsPerLane >= 4 &&
38656       (isPack || shouldUseHorizontalOp(Ops.size() == 1, DAG, Subtarget))) {
38657     SmallVector<int> LaneMask, ScaledMask;
38658     if (isRepeatedTargetShuffleMask(128, EltSizeInBits, Mask, LaneMask) &&
38659         scaleShuffleElements(LaneMask, 4, ScaledMask)) {
38660       // See if we can remove the shuffle by resorting the HOP chain so that
38661       // the HOP args are pre-shuffled.
38662       // TODO: Generalize to any sized/depth chain.
38663       // TODO: Add support for PACKSS/PACKUS.
38664       if (isHoriz) {
38665         // Attempt to find a HOP(HOP(X,Y),HOP(Z,W)) source operand.
38666         auto GetHOpSrc = [&](int M) {
38667           if (M == SM_SentinelUndef)
38668             return DAG.getUNDEF(VT0);
38669           if (M == SM_SentinelZero)
38670             return getZeroVector(VT0.getSimpleVT(), Subtarget, DAG, DL);
38671           SDValue Src0 = BC[M / 4];
38672           SDValue Src1 = Src0.getOperand((M % 4) >= 2);
38673           if (Src1.getOpcode() == Opcode0 && Src0->isOnlyUserOf(Src1.getNode()))
38674             return Src1.getOperand(M % 2);
38675           return SDValue();
38676         };
38677         SDValue M0 = GetHOpSrc(ScaledMask[0]);
38678         SDValue M1 = GetHOpSrc(ScaledMask[1]);
38679         SDValue M2 = GetHOpSrc(ScaledMask[2]);
38680         SDValue M3 = GetHOpSrc(ScaledMask[3]);
38681         if (M0 && M1 && M2 && M3) {
38682           SDValue LHS = DAG.getNode(Opcode0, DL, SrcVT, M0, M1);
38683           SDValue RHS = DAG.getNode(Opcode0, DL, SrcVT, M2, M3);
38684           return DAG.getNode(Opcode0, DL, VT0, LHS, RHS);
38685         }
38686       }
38687       // shuffle(hop(x,y),hop(z,w)) -> permute(hop(x,z)) etc.
38688       if (Ops.size() >= 2) {
38689         SDValue LHS, RHS;
38690         auto GetHOpSrc = [&](int M, int &OutM) {
38691           // TODO: Support SM_SentinelZero
38692           if (M < 0)
38693             return M == SM_SentinelUndef;
38694           SDValue Src = BC[M / 4].getOperand((M % 4) >= 2);
38695           if (!LHS || LHS == Src) {
38696             LHS = Src;
38697             OutM = (M % 2);
38698             return true;
38699           }
38700           if (!RHS || RHS == Src) {
38701             RHS = Src;
38702             OutM = (M % 2) + 2;
38703             return true;
38704           }
38705           return false;
38706         };
38707         int PostMask[4] = {-1, -1, -1, -1};
38708         if (GetHOpSrc(ScaledMask[0], PostMask[0]) &&
38709             GetHOpSrc(ScaledMask[1], PostMask[1]) &&
38710             GetHOpSrc(ScaledMask[2], PostMask[2]) &&
38711             GetHOpSrc(ScaledMask[3], PostMask[3])) {
38712           LHS = DAG.getBitcast(SrcVT, LHS);
38713           RHS = DAG.getBitcast(SrcVT, RHS ? RHS : LHS);
38714           SDValue Res = DAG.getNode(Opcode0, DL, VT0, LHS, RHS);
38715           // Use SHUFPS for the permute so this will work on SSE2 targets,
38716           // shuffle combining and domain handling will simplify this later on.
38717           MVT ShuffleVT = MVT::getVectorVT(MVT::f32, RootSizeInBits / 32);
38718           Res = DAG.getBitcast(ShuffleVT, Res);
38719           return DAG.getNode(X86ISD::SHUFP, DL, ShuffleVT, Res, Res,
38720                              getV4X86ShuffleImm8ForMask(PostMask, DL, DAG));
38721         }
38722       }
38723     }
38724   }
38725 
38726   if (2 < Ops.size())
38727     return SDValue();
38728 
38729   SDValue BC1 = BC[BC.size() - 1];
38730   if (Mask.size() == VT0.getVectorNumElements()) {
38731     // Canonicalize binary shuffles of horizontal ops that use the
38732     // same sources to an unary shuffle.
38733     // TODO: Try to perform this fold even if the shuffle remains.
38734     if (Ops.size() == 2) {
38735       auto ContainsOps = [](SDValue HOp, SDValue Op) {
38736         return Op == HOp.getOperand(0) || Op == HOp.getOperand(1);
38737       };
38738       // Commute if all BC0's ops are contained in BC1.
38739       if (ContainsOps(BC1, BC0.getOperand(0)) &&
38740           ContainsOps(BC1, BC0.getOperand(1))) {
38741         ShuffleVectorSDNode::commuteMask(Mask);
38742         std::swap(Ops[0], Ops[1]);
38743         std::swap(BC0, BC1);
38744       }
38745 
38746       // If BC1 can be represented by BC0, then convert to unary shuffle.
38747       if (ContainsOps(BC0, BC1.getOperand(0)) &&
38748           ContainsOps(BC0, BC1.getOperand(1))) {
38749         for (int &M : Mask) {
38750           if (M < NumElts) // BC0 element or UNDEF/Zero sentinel.
38751             continue;
38752           int SubLane = ((M % NumEltsPerLane) >= NumHalfEltsPerLane) ? 1 : 0;
38753           M -= NumElts + (SubLane * NumHalfEltsPerLane);
38754           if (BC1.getOperand(SubLane) != BC0.getOperand(0))
38755             M += NumHalfEltsPerLane;
38756         }
38757       }
38758     }
38759 
38760     // Canonicalize unary horizontal ops to only refer to lower halves.
38761     for (int i = 0; i != NumElts; ++i) {
38762       int &M = Mask[i];
38763       if (isUndefOrZero(M))
38764         continue;
38765       if (M < NumElts && BC0.getOperand(0) == BC0.getOperand(1) &&
38766           (M % NumEltsPerLane) >= NumHalfEltsPerLane)
38767         M -= NumHalfEltsPerLane;
38768       if (NumElts <= M && BC1.getOperand(0) == BC1.getOperand(1) &&
38769           (M % NumEltsPerLane) >= NumHalfEltsPerLane)
38770         M -= NumHalfEltsPerLane;
38771     }
38772   }
38773 
38774   // Combine binary shuffle of 2 similar 'Horizontal' instructions into a
38775   // single instruction. Attempt to match a v2X64 repeating shuffle pattern that
38776   // represents the LHS/RHS inputs for the lower/upper halves.
38777   SmallVector<int, 16> TargetMask128, WideMask128;
38778   if (isRepeatedTargetShuffleMask(128, EltSizeInBits, Mask, TargetMask128) &&
38779       scaleShuffleElements(TargetMask128, 2, WideMask128)) {
38780     assert(isUndefOrZeroOrInRange(WideMask128, 0, 4) && "Illegal shuffle");
38781     bool SingleOp = (Ops.size() == 1);
38782     if (isPack || OneUseOps ||
38783         shouldUseHorizontalOp(SingleOp, DAG, Subtarget)) {
38784       SDValue Lo = isInRange(WideMask128[0], 0, 2) ? BC0 : BC1;
38785       SDValue Hi = isInRange(WideMask128[1], 0, 2) ? BC0 : BC1;
38786       Lo = Lo.getOperand(WideMask128[0] & 1);
38787       Hi = Hi.getOperand(WideMask128[1] & 1);
38788       if (SingleOp) {
38789         SDValue Undef = DAG.getUNDEF(SrcVT);
38790         SDValue Zero = getZeroVector(SrcVT, Subtarget, DAG, DL);
38791         Lo = (WideMask128[0] == SM_SentinelZero ? Zero : Lo);
38792         Hi = (WideMask128[1] == SM_SentinelZero ? Zero : Hi);
38793         Lo = (WideMask128[0] == SM_SentinelUndef ? Undef : Lo);
38794         Hi = (WideMask128[1] == SM_SentinelUndef ? Undef : Hi);
38795       }
38796       return DAG.getNode(Opcode0, DL, VT0, Lo, Hi);
38797     }
38798   }
38799 
38800   // If we are post-shuffling a 256-bit hop and not requiring the upper
38801   // elements, then try to narrow to a 128-bit hop directly.
38802   SmallVector<int, 16> WideMask64;
38803   if (Ops.size() == 1 && NumLanes == 2 &&
38804       scaleShuffleElements(Mask, 4, WideMask64) &&
38805       isUndefInRange(WideMask64, 2, 2)) {
38806     int M0 = WideMask64[0];
38807     int M1 = WideMask64[1];
38808     if (isInRange(M0, 0, 4) && isInRange(M1, 0, 4)) {
38809       MVT HalfVT = VT0.getSimpleVT().getHalfNumVectorElementsVT();
38810       unsigned Idx0 = (M0 & 2) ? (SrcVT.getVectorNumElements() / 2) : 0;
38811       unsigned Idx1 = (M1 & 2) ? (SrcVT.getVectorNumElements() / 2) : 0;
38812       SDValue V0 = extract128BitVector(BC[0].getOperand(M0 & 1), Idx0, DAG, DL);
38813       SDValue V1 = extract128BitVector(BC[0].getOperand(M1 & 1), Idx1, DAG, DL);
38814       SDValue Res = DAG.getNode(Opcode0, DL, HalfVT, V0, V1);
38815       return widenSubVector(Res, false, Subtarget, DAG, DL, 256);
38816     }
38817   }
38818 
38819   return SDValue();
38820 }
38821 
38822 // Attempt to constant fold all of the constant source ops.
38823 // Returns true if the entire shuffle is folded to a constant.
38824 // TODO: Extend this to merge multiple constant Ops and update the mask.
combineX86ShufflesConstants(ArrayRef<SDValue> Ops,ArrayRef<int> Mask,SDValue Root,bool HasVariableMask,SelectionDAG & DAG,const X86Subtarget & Subtarget)38825 static SDValue combineX86ShufflesConstants(ArrayRef<SDValue> Ops,
38826                                            ArrayRef<int> Mask, SDValue Root,
38827                                            bool HasVariableMask,
38828                                            SelectionDAG &DAG,
38829                                            const X86Subtarget &Subtarget) {
38830   MVT VT = Root.getSimpleValueType();
38831 
38832   unsigned SizeInBits = VT.getSizeInBits();
38833   unsigned NumMaskElts = Mask.size();
38834   unsigned MaskSizeInBits = SizeInBits / NumMaskElts;
38835   unsigned NumOps = Ops.size();
38836 
38837   // Extract constant bits from each source op.
38838   SmallVector<APInt, 16> UndefEltsOps(NumOps);
38839   SmallVector<SmallVector<APInt, 16>, 16> RawBitsOps(NumOps);
38840   for (unsigned I = 0; I != NumOps; ++I)
38841     if (!getTargetConstantBitsFromNode(Ops[I], MaskSizeInBits, UndefEltsOps[I],
38842                                        RawBitsOps[I]))
38843       return SDValue();
38844 
38845   // If we're optimizing for size, only fold if at least one of the constants is
38846   // only used once or the combined shuffle has included a variable mask
38847   // shuffle, this is to avoid constant pool bloat.
38848   bool IsOptimizingSize = DAG.shouldOptForSize();
38849   if (IsOptimizingSize && !HasVariableMask &&
38850       llvm::none_of(Ops, [](SDValue SrcOp) { return SrcOp->hasOneUse(); }))
38851     return SDValue();
38852 
38853   // Shuffle the constant bits according to the mask.
38854   SDLoc DL(Root);
38855   APInt UndefElts(NumMaskElts, 0);
38856   APInt ZeroElts(NumMaskElts, 0);
38857   APInt ConstantElts(NumMaskElts, 0);
38858   SmallVector<APInt, 8> ConstantBitData(NumMaskElts,
38859                                         APInt::getZero(MaskSizeInBits));
38860   for (unsigned i = 0; i != NumMaskElts; ++i) {
38861     int M = Mask[i];
38862     if (M == SM_SentinelUndef) {
38863       UndefElts.setBit(i);
38864       continue;
38865     } else if (M == SM_SentinelZero) {
38866       ZeroElts.setBit(i);
38867       continue;
38868     }
38869     assert(0 <= M && M < (int)(NumMaskElts * NumOps));
38870 
38871     unsigned SrcOpIdx = (unsigned)M / NumMaskElts;
38872     unsigned SrcMaskIdx = (unsigned)M % NumMaskElts;
38873 
38874     auto &SrcUndefElts = UndefEltsOps[SrcOpIdx];
38875     if (SrcUndefElts[SrcMaskIdx]) {
38876       UndefElts.setBit(i);
38877       continue;
38878     }
38879 
38880     auto &SrcEltBits = RawBitsOps[SrcOpIdx];
38881     APInt &Bits = SrcEltBits[SrcMaskIdx];
38882     if (!Bits) {
38883       ZeroElts.setBit(i);
38884       continue;
38885     }
38886 
38887     ConstantElts.setBit(i);
38888     ConstantBitData[i] = Bits;
38889   }
38890   assert((UndefElts | ZeroElts | ConstantElts).isAllOnes());
38891 
38892   // Attempt to create a zero vector.
38893   if ((UndefElts | ZeroElts).isAllOnes())
38894     return getZeroVector(Root.getSimpleValueType(), Subtarget, DAG, DL);
38895 
38896   // Create the constant data.
38897   MVT MaskSVT;
38898   if (VT.isFloatingPoint() && (MaskSizeInBits == 32 || MaskSizeInBits == 64))
38899     MaskSVT = MVT::getFloatingPointVT(MaskSizeInBits);
38900   else
38901     MaskSVT = MVT::getIntegerVT(MaskSizeInBits);
38902 
38903   MVT MaskVT = MVT::getVectorVT(MaskSVT, NumMaskElts);
38904   if (!DAG.getTargetLoweringInfo().isTypeLegal(MaskVT))
38905     return SDValue();
38906 
38907   SDValue CstOp = getConstVector(ConstantBitData, UndefElts, MaskVT, DAG, DL);
38908   return DAG.getBitcast(VT, CstOp);
38909 }
38910 
38911 namespace llvm {
38912   namespace X86 {
38913     enum {
38914       MaxShuffleCombineDepth = 8
38915     };
38916   } // namespace X86
38917 } // namespace llvm
38918 
38919 /// Fully generic combining of x86 shuffle instructions.
38920 ///
38921 /// This should be the last combine run over the x86 shuffle instructions. Once
38922 /// they have been fully optimized, this will recursively consider all chains
38923 /// of single-use shuffle instructions, build a generic model of the cumulative
38924 /// shuffle operation, and check for simpler instructions which implement this
38925 /// operation. We use this primarily for two purposes:
38926 ///
38927 /// 1) Collapse generic shuffles to specialized single instructions when
38928 ///    equivalent. In most cases, this is just an encoding size win, but
38929 ///    sometimes we will collapse multiple generic shuffles into a single
38930 ///    special-purpose shuffle.
38931 /// 2) Look for sequences of shuffle instructions with 3 or more total
38932 ///    instructions, and replace them with the slightly more expensive SSSE3
38933 ///    PSHUFB instruction if available. We do this as the last combining step
38934 ///    to ensure we avoid using PSHUFB if we can implement the shuffle with
38935 ///    a suitable short sequence of other instructions. The PSHUFB will either
38936 ///    use a register or have to read from memory and so is slightly (but only
38937 ///    slightly) more expensive than the other shuffle instructions.
38938 ///
38939 /// Because this is inherently a quadratic operation (for each shuffle in
38940 /// a chain, we recurse up the chain), the depth is limited to 8 instructions.
38941 /// This should never be an issue in practice as the shuffle lowering doesn't
38942 /// produce sequences of more than 8 instructions.
38943 ///
38944 /// FIXME: We will currently miss some cases where the redundant shuffling
38945 /// would simplify under the threshold for PSHUFB formation because of
38946 /// combine-ordering. To fix this, we should do the redundant instruction
38947 /// combining in this recursive walk.
combineX86ShufflesRecursively(ArrayRef<SDValue> SrcOps,int SrcOpIndex,SDValue Root,ArrayRef<int> RootMask,ArrayRef<const SDNode * > SrcNodes,unsigned Depth,unsigned MaxDepth,bool HasVariableMask,bool AllowVariableCrossLaneMask,bool AllowVariablePerLaneMask,SelectionDAG & DAG,const X86Subtarget & Subtarget)38948 static SDValue combineX86ShufflesRecursively(
38949     ArrayRef<SDValue> SrcOps, int SrcOpIndex, SDValue Root,
38950     ArrayRef<int> RootMask, ArrayRef<const SDNode *> SrcNodes, unsigned Depth,
38951     unsigned MaxDepth, bool HasVariableMask, bool AllowVariableCrossLaneMask,
38952     bool AllowVariablePerLaneMask, SelectionDAG &DAG,
38953     const X86Subtarget &Subtarget) {
38954   assert(!RootMask.empty() &&
38955          (RootMask.size() > 1 || (RootMask[0] == 0 && SrcOpIndex == 0)) &&
38956          "Illegal shuffle root mask");
38957   MVT RootVT = Root.getSimpleValueType();
38958   assert(RootVT.isVector() && "Shuffles operate on vector types!");
38959   unsigned RootSizeInBits = RootVT.getSizeInBits();
38960 
38961   // Bound the depth of our recursive combine because this is ultimately
38962   // quadratic in nature.
38963   if (Depth >= MaxDepth)
38964     return SDValue();
38965 
38966   // Directly rip through bitcasts to find the underlying operand.
38967   SDValue Op = SrcOps[SrcOpIndex];
38968   Op = peekThroughOneUseBitcasts(Op);
38969 
38970   EVT VT = Op.getValueType();
38971   if (!VT.isVector() || !VT.isSimple())
38972     return SDValue(); // Bail if we hit a non-simple non-vector.
38973 
38974   // FIXME: Just bail on f16 for now.
38975   if (VT.getVectorElementType() == MVT::f16)
38976     return SDValue();
38977 
38978   assert((RootSizeInBits % VT.getSizeInBits()) == 0 &&
38979          "Can only combine shuffles upto size of the root op.");
38980 
38981   // Create a demanded elts mask from the referenced elements of Op.
38982   APInt OpDemandedElts = APInt::getZero(RootMask.size());
38983   for (int M : RootMask) {
38984     int BaseIdx = RootMask.size() * SrcOpIndex;
38985     if (isInRange(M, BaseIdx, BaseIdx + RootMask.size()))
38986       OpDemandedElts.setBit(M - BaseIdx);
38987   }
38988   if (RootSizeInBits != VT.getSizeInBits()) {
38989     // Op is smaller than Root - extract the demanded elts for the subvector.
38990     unsigned Scale = RootSizeInBits / VT.getSizeInBits();
38991     unsigned NumOpMaskElts = RootMask.size() / Scale;
38992     assert((RootMask.size() % Scale) == 0 && "Root mask size mismatch");
38993     assert(OpDemandedElts
38994                .extractBits(RootMask.size() - NumOpMaskElts, NumOpMaskElts)
38995                .isZero() &&
38996            "Out of range elements referenced in root mask");
38997     OpDemandedElts = OpDemandedElts.extractBits(NumOpMaskElts, 0);
38998   }
38999   OpDemandedElts =
39000       APIntOps::ScaleBitMask(OpDemandedElts, VT.getVectorNumElements());
39001 
39002   // Extract target shuffle mask and resolve sentinels and inputs.
39003   SmallVector<int, 64> OpMask;
39004   SmallVector<SDValue, 2> OpInputs;
39005   APInt OpUndef, OpZero;
39006   bool IsOpVariableMask = isTargetShuffleVariableMask(Op.getOpcode());
39007   if (getTargetShuffleInputs(Op, OpDemandedElts, OpInputs, OpMask, OpUndef,
39008                              OpZero, DAG, Depth, false)) {
39009     // Shuffle inputs must not be larger than the shuffle result.
39010     // TODO: Relax this for single input faux shuffles (e.g. trunc).
39011     if (llvm::any_of(OpInputs, [VT](SDValue OpInput) {
39012           return OpInput.getValueSizeInBits() > VT.getSizeInBits();
39013         }))
39014       return SDValue();
39015   } else if (Op.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
39016              (RootSizeInBits % Op.getOperand(0).getValueSizeInBits()) == 0 &&
39017              !isNullConstant(Op.getOperand(1))) {
39018     SDValue SrcVec = Op.getOperand(0);
39019     int ExtractIdx = Op.getConstantOperandVal(1);
39020     unsigned NumElts = VT.getVectorNumElements();
39021     OpInputs.assign({SrcVec});
39022     OpMask.assign(NumElts, SM_SentinelUndef);
39023     std::iota(OpMask.begin(), OpMask.end(), ExtractIdx);
39024     OpZero = OpUndef = APInt::getZero(NumElts);
39025   } else {
39026     return SDValue();
39027   }
39028 
39029   // If the shuffle result was smaller than the root, we need to adjust the
39030   // mask indices and pad the mask with undefs.
39031   if (RootSizeInBits > VT.getSizeInBits()) {
39032     unsigned NumSubVecs = RootSizeInBits / VT.getSizeInBits();
39033     unsigned OpMaskSize = OpMask.size();
39034     if (OpInputs.size() > 1) {
39035       unsigned PaddedMaskSize = NumSubVecs * OpMaskSize;
39036       for (int &M : OpMask) {
39037         if (M < 0)
39038           continue;
39039         int EltIdx = M % OpMaskSize;
39040         int OpIdx = M / OpMaskSize;
39041         M = (PaddedMaskSize * OpIdx) + EltIdx;
39042       }
39043     }
39044     OpZero = OpZero.zext(NumSubVecs * OpMaskSize);
39045     OpUndef = OpUndef.zext(NumSubVecs * OpMaskSize);
39046     OpMask.append((NumSubVecs - 1) * OpMaskSize, SM_SentinelUndef);
39047   }
39048 
39049   SmallVector<int, 64> Mask;
39050   SmallVector<SDValue, 16> Ops;
39051 
39052   // We don't need to merge masks if the root is empty.
39053   bool EmptyRoot = (Depth == 0) && (RootMask.size() == 1);
39054   if (EmptyRoot) {
39055     // Only resolve zeros if it will remove an input, otherwise we might end
39056     // up in an infinite loop.
39057     bool ResolveKnownZeros = true;
39058     if (!OpZero.isZero()) {
39059       APInt UsedInputs = APInt::getZero(OpInputs.size());
39060       for (int i = 0, e = OpMask.size(); i != e; ++i) {
39061         int M = OpMask[i];
39062         if (OpUndef[i] || OpZero[i] || isUndefOrZero(M))
39063           continue;
39064         UsedInputs.setBit(M / OpMask.size());
39065         if (UsedInputs.isAllOnes()) {
39066           ResolveKnownZeros = false;
39067           break;
39068         }
39069       }
39070     }
39071     resolveTargetShuffleFromZeroables(OpMask, OpUndef, OpZero,
39072                                       ResolveKnownZeros);
39073 
39074     Mask = OpMask;
39075     Ops.append(OpInputs.begin(), OpInputs.end());
39076   } else {
39077     resolveTargetShuffleFromZeroables(OpMask, OpUndef, OpZero);
39078 
39079     // Add the inputs to the Ops list, avoiding duplicates.
39080     Ops.append(SrcOps.begin(), SrcOps.end());
39081 
39082     auto AddOp = [&Ops](SDValue Input, int InsertionPoint) -> int {
39083       // Attempt to find an existing match.
39084       SDValue InputBC = peekThroughBitcasts(Input);
39085       for (int i = 0, e = Ops.size(); i < e; ++i)
39086         if (InputBC == peekThroughBitcasts(Ops[i]))
39087           return i;
39088       // Match failed - should we replace an existing Op?
39089       if (InsertionPoint >= 0) {
39090         Ops[InsertionPoint] = Input;
39091         return InsertionPoint;
39092       }
39093       // Add to the end of the Ops list.
39094       Ops.push_back(Input);
39095       return Ops.size() - 1;
39096     };
39097 
39098     SmallVector<int, 2> OpInputIdx;
39099     for (SDValue OpInput : OpInputs)
39100       OpInputIdx.push_back(
39101           AddOp(OpInput, OpInputIdx.empty() ? SrcOpIndex : -1));
39102 
39103     assert(((RootMask.size() > OpMask.size() &&
39104              RootMask.size() % OpMask.size() == 0) ||
39105             (OpMask.size() > RootMask.size() &&
39106              OpMask.size() % RootMask.size() == 0) ||
39107             OpMask.size() == RootMask.size()) &&
39108            "The smaller number of elements must divide the larger.");
39109 
39110     // This function can be performance-critical, so we rely on the power-of-2
39111     // knowledge that we have about the mask sizes to replace div/rem ops with
39112     // bit-masks and shifts.
39113     assert(llvm::has_single_bit<uint32_t>(RootMask.size()) &&
39114            "Non-power-of-2 shuffle mask sizes");
39115     assert(llvm::has_single_bit<uint32_t>(OpMask.size()) &&
39116            "Non-power-of-2 shuffle mask sizes");
39117     unsigned RootMaskSizeLog2 = llvm::countr_zero(RootMask.size());
39118     unsigned OpMaskSizeLog2 = llvm::countr_zero(OpMask.size());
39119 
39120     unsigned MaskWidth = std::max<unsigned>(OpMask.size(), RootMask.size());
39121     unsigned RootRatio =
39122         std::max<unsigned>(1, OpMask.size() >> RootMaskSizeLog2);
39123     unsigned OpRatio = std::max<unsigned>(1, RootMask.size() >> OpMaskSizeLog2);
39124     assert((RootRatio == 1 || OpRatio == 1) &&
39125            "Must not have a ratio for both incoming and op masks!");
39126 
39127     assert(isPowerOf2_32(MaskWidth) && "Non-power-of-2 shuffle mask sizes");
39128     assert(isPowerOf2_32(RootRatio) && "Non-power-of-2 shuffle mask sizes");
39129     assert(isPowerOf2_32(OpRatio) && "Non-power-of-2 shuffle mask sizes");
39130     unsigned RootRatioLog2 = llvm::countr_zero(RootRatio);
39131     unsigned OpRatioLog2 = llvm::countr_zero(OpRatio);
39132 
39133     Mask.resize(MaskWidth, SM_SentinelUndef);
39134 
39135     // Merge this shuffle operation's mask into our accumulated mask. Note that
39136     // this shuffle's mask will be the first applied to the input, followed by
39137     // the root mask to get us all the way to the root value arrangement. The
39138     // reason for this order is that we are recursing up the operation chain.
39139     for (unsigned i = 0; i < MaskWidth; ++i) {
39140       unsigned RootIdx = i >> RootRatioLog2;
39141       if (RootMask[RootIdx] < 0) {
39142         // This is a zero or undef lane, we're done.
39143         Mask[i] = RootMask[RootIdx];
39144         continue;
39145       }
39146 
39147       unsigned RootMaskedIdx =
39148           RootRatio == 1
39149               ? RootMask[RootIdx]
39150               : (RootMask[RootIdx] << RootRatioLog2) + (i & (RootRatio - 1));
39151 
39152       // Just insert the scaled root mask value if it references an input other
39153       // than the SrcOp we're currently inserting.
39154       if ((RootMaskedIdx < (SrcOpIndex * MaskWidth)) ||
39155           (((SrcOpIndex + 1) * MaskWidth) <= RootMaskedIdx)) {
39156         Mask[i] = RootMaskedIdx;
39157         continue;
39158       }
39159 
39160       RootMaskedIdx = RootMaskedIdx & (MaskWidth - 1);
39161       unsigned OpIdx = RootMaskedIdx >> OpRatioLog2;
39162       if (OpMask[OpIdx] < 0) {
39163         // The incoming lanes are zero or undef, it doesn't matter which ones we
39164         // are using.
39165         Mask[i] = OpMask[OpIdx];
39166         continue;
39167       }
39168 
39169       // Ok, we have non-zero lanes, map them through to one of the Op's inputs.
39170       unsigned OpMaskedIdx = OpRatio == 1 ? OpMask[OpIdx]
39171                                           : (OpMask[OpIdx] << OpRatioLog2) +
39172                                                 (RootMaskedIdx & (OpRatio - 1));
39173 
39174       OpMaskedIdx = OpMaskedIdx & (MaskWidth - 1);
39175       int InputIdx = OpMask[OpIdx] / (int)OpMask.size();
39176       assert(0 <= OpInputIdx[InputIdx] && "Unknown target shuffle input");
39177       OpMaskedIdx += OpInputIdx[InputIdx] * MaskWidth;
39178 
39179       Mask[i] = OpMaskedIdx;
39180     }
39181   }
39182 
39183   // Peek through vector widenings and set out of bounds mask indices to undef.
39184   // TODO: Can resolveTargetShuffleInputsAndMask do some of this?
39185   for (unsigned I = 0, E = Ops.size(); I != E; ++I) {
39186     SDValue &Op = Ops[I];
39187     if (Op.getOpcode() == ISD::INSERT_SUBVECTOR && Op.getOperand(0).isUndef() &&
39188         isNullConstant(Op.getOperand(2))) {
39189       Op = Op.getOperand(1);
39190       unsigned Scale = RootSizeInBits / Op.getValueSizeInBits();
39191       int Lo = I * Mask.size();
39192       int Hi = (I + 1) * Mask.size();
39193       int NewHi = Lo + (Mask.size() / Scale);
39194       for (int &M : Mask) {
39195         if (Lo <= M && NewHi <= M && M < Hi)
39196           M = SM_SentinelUndef;
39197       }
39198     }
39199   }
39200 
39201   // Peek through any free extract_subvector nodes back to root size.
39202   for (SDValue &Op : Ops)
39203     while (Op.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
39204            (RootSizeInBits % Op.getOperand(0).getValueSizeInBits()) == 0 &&
39205            isNullConstant(Op.getOperand(1)))
39206       Op = Op.getOperand(0);
39207 
39208   // Remove unused/repeated shuffle source ops.
39209   resolveTargetShuffleInputsAndMask(Ops, Mask);
39210 
39211   // Handle the all undef/zero/ones cases early.
39212   if (all_of(Mask, [](int Idx) { return Idx == SM_SentinelUndef; }))
39213     return DAG.getUNDEF(RootVT);
39214   if (all_of(Mask, [](int Idx) { return Idx < 0; }))
39215     return getZeroVector(RootVT, Subtarget, DAG, SDLoc(Root));
39216   if (Ops.size() == 1 && ISD::isBuildVectorAllOnes(Ops[0].getNode()) &&
39217       !llvm::is_contained(Mask, SM_SentinelZero))
39218     return getOnesVector(RootVT, DAG, SDLoc(Root));
39219 
39220   assert(!Ops.empty() && "Shuffle with no inputs detected");
39221   HasVariableMask |= IsOpVariableMask;
39222 
39223   // Update the list of shuffle nodes that have been combined so far.
39224   SmallVector<const SDNode *, 16> CombinedNodes(SrcNodes.begin(),
39225                                                 SrcNodes.end());
39226   CombinedNodes.push_back(Op.getNode());
39227 
39228   // See if we can recurse into each shuffle source op (if it's a target
39229   // shuffle). The source op should only be generally combined if it either has
39230   // a single use (i.e. current Op) or all its users have already been combined,
39231   // if not then we can still combine but should prevent generation of variable
39232   // shuffles to avoid constant pool bloat.
39233   // Don't recurse if we already have more source ops than we can combine in
39234   // the remaining recursion depth.
39235   if (Ops.size() < (MaxDepth - Depth)) {
39236     for (int i = 0, e = Ops.size(); i < e; ++i) {
39237       // For empty roots, we need to resolve zeroable elements before combining
39238       // them with other shuffles.
39239       SmallVector<int, 64> ResolvedMask = Mask;
39240       if (EmptyRoot)
39241         resolveTargetShuffleFromZeroables(ResolvedMask, OpUndef, OpZero);
39242       bool AllowCrossLaneVar = false;
39243       bool AllowPerLaneVar = false;
39244       if (Ops[i].getNode()->hasOneUse() ||
39245           SDNode::areOnlyUsersOf(CombinedNodes, Ops[i].getNode())) {
39246         AllowCrossLaneVar = AllowVariableCrossLaneMask;
39247         AllowPerLaneVar = AllowVariablePerLaneMask;
39248       }
39249       if (SDValue Res = combineX86ShufflesRecursively(
39250               Ops, i, Root, ResolvedMask, CombinedNodes, Depth + 1, MaxDepth,
39251               HasVariableMask, AllowCrossLaneVar, AllowPerLaneVar, DAG,
39252               Subtarget))
39253         return Res;
39254     }
39255   }
39256 
39257   // Attempt to constant fold all of the constant source ops.
39258   if (SDValue Cst = combineX86ShufflesConstants(
39259           Ops, Mask, Root, HasVariableMask, DAG, Subtarget))
39260     return Cst;
39261 
39262   // If constant fold failed and we only have constants - then we have
39263   // multiple uses by a single non-variable shuffle - just bail.
39264   if (Depth == 0 && llvm::all_of(Ops, [&](SDValue Op) {
39265         APInt UndefElts;
39266         SmallVector<APInt> RawBits;
39267         unsigned EltSizeInBits = RootSizeInBits / Mask.size();
39268         return getTargetConstantBitsFromNode(Op, EltSizeInBits, UndefElts,
39269                                              RawBits);
39270       })) {
39271     return SDValue();
39272   }
39273 
39274   // Canonicalize the combined shuffle mask chain with horizontal ops.
39275   // NOTE: This will update the Ops and Mask.
39276   if (SDValue HOp = canonicalizeShuffleMaskWithHorizOp(
39277           Ops, Mask, RootSizeInBits, SDLoc(Root), DAG, Subtarget))
39278     return DAG.getBitcast(RootVT, HOp);
39279 
39280   // Try to refine our inputs given our knowledge of target shuffle mask.
39281   for (auto I : enumerate(Ops)) {
39282     int OpIdx = I.index();
39283     SDValue &Op = I.value();
39284 
39285     // What range of shuffle mask element values results in picking from Op?
39286     int Lo = OpIdx * Mask.size();
39287     int Hi = Lo + Mask.size();
39288 
39289     // Which elements of Op do we demand, given the mask's granularity?
39290     APInt OpDemandedElts(Mask.size(), 0);
39291     for (int MaskElt : Mask) {
39292       if (isInRange(MaskElt, Lo, Hi)) { // Picks from Op?
39293         int OpEltIdx = MaskElt - Lo;
39294         OpDemandedElts.setBit(OpEltIdx);
39295       }
39296     }
39297 
39298     // Is the shuffle result smaller than the root?
39299     if (Op.getValueSizeInBits() < RootSizeInBits) {
39300       // We padded the mask with undefs. But we now need to undo that.
39301       unsigned NumExpectedVectorElts = Mask.size();
39302       unsigned EltSizeInBits = RootSizeInBits / NumExpectedVectorElts;
39303       unsigned NumOpVectorElts = Op.getValueSizeInBits() / EltSizeInBits;
39304       assert(!OpDemandedElts.extractBits(
39305                  NumExpectedVectorElts - NumOpVectorElts, NumOpVectorElts) &&
39306              "Demanding the virtual undef widening padding?");
39307       OpDemandedElts = OpDemandedElts.trunc(NumOpVectorElts); // NUW
39308     }
39309 
39310     // The Op itself may be of different VT, so we need to scale the mask.
39311     unsigned NumOpElts = Op.getValueType().getVectorNumElements();
39312     APInt OpScaledDemandedElts = APIntOps::ScaleBitMask(OpDemandedElts, NumOpElts);
39313 
39314     // Can this operand be simplified any further, given it's demanded elements?
39315     if (SDValue NewOp =
39316             DAG.getTargetLoweringInfo().SimplifyMultipleUseDemandedVectorElts(
39317                 Op, OpScaledDemandedElts, DAG))
39318       Op = NewOp;
39319   }
39320   // FIXME: should we rerun resolveTargetShuffleInputsAndMask() now?
39321 
39322   // Widen any subvector shuffle inputs we've collected.
39323   // TODO: Remove this to avoid generating temporary nodes, we should only
39324   // widen once combineX86ShuffleChain has found a match.
39325   if (any_of(Ops, [RootSizeInBits](SDValue Op) {
39326         return Op.getValueSizeInBits() < RootSizeInBits;
39327       })) {
39328     for (SDValue &Op : Ops)
39329       if (Op.getValueSizeInBits() < RootSizeInBits)
39330         Op = widenSubVector(Op, false, Subtarget, DAG, SDLoc(Op),
39331                             RootSizeInBits);
39332     // Reresolve - we might have repeated subvector sources.
39333     resolveTargetShuffleInputsAndMask(Ops, Mask);
39334   }
39335 
39336   // We can only combine unary and binary shuffle mask cases.
39337   if (Ops.size() <= 2) {
39338     // Minor canonicalization of the accumulated shuffle mask to make it easier
39339     // to match below. All this does is detect masks with sequential pairs of
39340     // elements, and shrink them to the half-width mask. It does this in a loop
39341     // so it will reduce the size of the mask to the minimal width mask which
39342     // performs an equivalent shuffle.
39343     while (Mask.size() > 1) {
39344       SmallVector<int, 64> WidenedMask;
39345       if (!canWidenShuffleElements(Mask, WidenedMask))
39346         break;
39347       Mask = std::move(WidenedMask);
39348     }
39349 
39350     // Canonicalization of binary shuffle masks to improve pattern matching by
39351     // commuting the inputs.
39352     if (Ops.size() == 2 && canonicalizeShuffleMaskWithCommute(Mask)) {
39353       ShuffleVectorSDNode::commuteMask(Mask);
39354       std::swap(Ops[0], Ops[1]);
39355     }
39356 
39357     // Try to combine into a single shuffle instruction.
39358     if (SDValue Shuffle = combineX86ShuffleChain(
39359             Ops, Root, Mask, Depth, HasVariableMask, AllowVariableCrossLaneMask,
39360             AllowVariablePerLaneMask, DAG, Subtarget))
39361       return Shuffle;
39362 
39363     // If all the operands come from the same larger vector, fallthrough and try
39364     // to use combineX86ShuffleChainWithExtract.
39365     SDValue LHS = peekThroughBitcasts(Ops.front());
39366     SDValue RHS = peekThroughBitcasts(Ops.back());
39367     if (Ops.size() != 2 || !Subtarget.hasAVX2() || RootSizeInBits != 128 ||
39368         (RootSizeInBits / Mask.size()) != 64 ||
39369         LHS.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
39370         RHS.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
39371         LHS.getOperand(0) != RHS.getOperand(0))
39372       return SDValue();
39373   }
39374 
39375   // If that failed and any input is extracted then try to combine as a
39376   // shuffle with the larger type.
39377   return combineX86ShuffleChainWithExtract(
39378       Ops, Root, Mask, Depth, HasVariableMask, AllowVariableCrossLaneMask,
39379       AllowVariablePerLaneMask, DAG, Subtarget);
39380 }
39381 
39382 /// Helper entry wrapper to combineX86ShufflesRecursively.
combineX86ShufflesRecursively(SDValue Op,SelectionDAG & DAG,const X86Subtarget & Subtarget)39383 static SDValue combineX86ShufflesRecursively(SDValue Op, SelectionDAG &DAG,
39384                                              const X86Subtarget &Subtarget) {
39385   return combineX86ShufflesRecursively(
39386       {Op}, 0, Op, {0}, {}, /*Depth*/ 0, X86::MaxShuffleCombineDepth,
39387       /*HasVarMask*/ false,
39388       /*AllowCrossLaneVarMask*/ true, /*AllowPerLaneVarMask*/ true, DAG,
39389       Subtarget);
39390 }
39391 
39392 /// Get the PSHUF-style mask from PSHUF node.
39393 ///
39394 /// This is a very minor wrapper around getTargetShuffleMask to easy forming v4
39395 /// PSHUF-style masks that can be reused with such instructions.
getPSHUFShuffleMask(SDValue N)39396 static SmallVector<int, 4> getPSHUFShuffleMask(SDValue N) {
39397   MVT VT = N.getSimpleValueType();
39398   SmallVector<int, 4> Mask;
39399   SmallVector<SDValue, 2> Ops;
39400   bool HaveMask =
39401       getTargetShuffleMask(N.getNode(), VT, false, Ops, Mask);
39402   (void)HaveMask;
39403   assert(HaveMask);
39404 
39405   // If we have more than 128-bits, only the low 128-bits of shuffle mask
39406   // matter. Check that the upper masks are repeats and remove them.
39407   if (VT.getSizeInBits() > 128) {
39408     int LaneElts = 128 / VT.getScalarSizeInBits();
39409 #ifndef NDEBUG
39410     for (int i = 1, NumLanes = VT.getSizeInBits() / 128; i < NumLanes; ++i)
39411       for (int j = 0; j < LaneElts; ++j)
39412         assert(Mask[j] == Mask[i * LaneElts + j] - (LaneElts * i) &&
39413                "Mask doesn't repeat in high 128-bit lanes!");
39414 #endif
39415     Mask.resize(LaneElts);
39416   }
39417 
39418   switch (N.getOpcode()) {
39419   case X86ISD::PSHUFD:
39420     return Mask;
39421   case X86ISD::PSHUFLW:
39422     Mask.resize(4);
39423     return Mask;
39424   case X86ISD::PSHUFHW:
39425     Mask.erase(Mask.begin(), Mask.begin() + 4);
39426     for (int &M : Mask)
39427       M -= 4;
39428     return Mask;
39429   default:
39430     llvm_unreachable("No valid shuffle instruction found!");
39431   }
39432 }
39433 
39434 /// Search for a combinable shuffle across a chain ending in pshufd.
39435 ///
39436 /// We walk up the chain and look for a combinable shuffle, skipping over
39437 /// shuffles that we could hoist this shuffle's transformation past without
39438 /// altering anything.
39439 static SDValue
combineRedundantDWordShuffle(SDValue N,MutableArrayRef<int> Mask,SelectionDAG & DAG)39440 combineRedundantDWordShuffle(SDValue N, MutableArrayRef<int> Mask,
39441                              SelectionDAG &DAG) {
39442   assert(N.getOpcode() == X86ISD::PSHUFD &&
39443          "Called with something other than an x86 128-bit half shuffle!");
39444   SDLoc DL(N);
39445 
39446   // Walk up a single-use chain looking for a combinable shuffle. Keep a stack
39447   // of the shuffles in the chain so that we can form a fresh chain to replace
39448   // this one.
39449   SmallVector<SDValue, 8> Chain;
39450   SDValue V = N.getOperand(0);
39451   for (; V.hasOneUse(); V = V.getOperand(0)) {
39452     switch (V.getOpcode()) {
39453     default:
39454       return SDValue(); // Nothing combined!
39455 
39456     case ISD::BITCAST:
39457       // Skip bitcasts as we always know the type for the target specific
39458       // instructions.
39459       continue;
39460 
39461     case X86ISD::PSHUFD:
39462       // Found another dword shuffle.
39463       break;
39464 
39465     case X86ISD::PSHUFLW:
39466       // Check that the low words (being shuffled) are the identity in the
39467       // dword shuffle, and the high words are self-contained.
39468       if (Mask[0] != 0 || Mask[1] != 1 ||
39469           !(Mask[2] >= 2 && Mask[2] < 4 && Mask[3] >= 2 && Mask[3] < 4))
39470         return SDValue();
39471 
39472       Chain.push_back(V);
39473       continue;
39474 
39475     case X86ISD::PSHUFHW:
39476       // Check that the high words (being shuffled) are the identity in the
39477       // dword shuffle, and the low words are self-contained.
39478       if (Mask[2] != 2 || Mask[3] != 3 ||
39479           !(Mask[0] >= 0 && Mask[0] < 2 && Mask[1] >= 0 && Mask[1] < 2))
39480         return SDValue();
39481 
39482       Chain.push_back(V);
39483       continue;
39484 
39485     case X86ISD::UNPCKL:
39486     case X86ISD::UNPCKH:
39487       // For either i8 -> i16 or i16 -> i32 unpacks, we can combine a dword
39488       // shuffle into a preceding word shuffle.
39489       if (V.getSimpleValueType().getVectorElementType() != MVT::i8 &&
39490           V.getSimpleValueType().getVectorElementType() != MVT::i16)
39491         return SDValue();
39492 
39493       // Search for a half-shuffle which we can combine with.
39494       unsigned CombineOp =
39495           V.getOpcode() == X86ISD::UNPCKL ? X86ISD::PSHUFLW : X86ISD::PSHUFHW;
39496       if (V.getOperand(0) != V.getOperand(1) ||
39497           !V->isOnlyUserOf(V.getOperand(0).getNode()))
39498         return SDValue();
39499       Chain.push_back(V);
39500       V = V.getOperand(0);
39501       do {
39502         switch (V.getOpcode()) {
39503         default:
39504           return SDValue(); // Nothing to combine.
39505 
39506         case X86ISD::PSHUFLW:
39507         case X86ISD::PSHUFHW:
39508           if (V.getOpcode() == CombineOp)
39509             break;
39510 
39511           Chain.push_back(V);
39512 
39513           [[fallthrough]];
39514         case ISD::BITCAST:
39515           V = V.getOperand(0);
39516           continue;
39517         }
39518         break;
39519       } while (V.hasOneUse());
39520       break;
39521     }
39522     // Break out of the loop if we break out of the switch.
39523     break;
39524   }
39525 
39526   if (!V.hasOneUse())
39527     // We fell out of the loop without finding a viable combining instruction.
39528     return SDValue();
39529 
39530   // Merge this node's mask and our incoming mask.
39531   SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
39532   for (int &M : Mask)
39533     M = VMask[M];
39534   V = DAG.getNode(V.getOpcode(), DL, V.getValueType(), V.getOperand(0),
39535                   getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
39536 
39537   // Rebuild the chain around this new shuffle.
39538   while (!Chain.empty()) {
39539     SDValue W = Chain.pop_back_val();
39540 
39541     if (V.getValueType() != W.getOperand(0).getValueType())
39542       V = DAG.getBitcast(W.getOperand(0).getValueType(), V);
39543 
39544     switch (W.getOpcode()) {
39545     default:
39546       llvm_unreachable("Only PSHUF and UNPCK instructions get here!");
39547 
39548     case X86ISD::UNPCKL:
39549     case X86ISD::UNPCKH:
39550       V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, V);
39551       break;
39552 
39553     case X86ISD::PSHUFD:
39554     case X86ISD::PSHUFLW:
39555     case X86ISD::PSHUFHW:
39556       V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, W.getOperand(1));
39557       break;
39558     }
39559   }
39560   if (V.getValueType() != N.getValueType())
39561     V = DAG.getBitcast(N.getValueType(), V);
39562 
39563   // Return the new chain to replace N.
39564   return V;
39565 }
39566 
39567 // Attempt to commute shufps LHS loads:
39568 // permilps(shufps(load(),x)) --> permilps(shufps(x,load()))
combineCommutableSHUFP(SDValue N,MVT VT,const SDLoc & DL,SelectionDAG & DAG)39569 static SDValue combineCommutableSHUFP(SDValue N, MVT VT, const SDLoc &DL,
39570                                       SelectionDAG &DAG) {
39571   // TODO: Add vXf64 support.
39572   if (VT != MVT::v4f32 && VT != MVT::v8f32 && VT != MVT::v16f32)
39573     return SDValue();
39574 
39575   // SHUFP(LHS, RHS) -> SHUFP(RHS, LHS) iff LHS is foldable + RHS is not.
39576   auto commuteSHUFP = [&VT, &DL, &DAG](SDValue Parent, SDValue V) {
39577     if (V.getOpcode() != X86ISD::SHUFP || !Parent->isOnlyUserOf(V.getNode()))
39578       return SDValue();
39579     SDValue N0 = V.getOperand(0);
39580     SDValue N1 = V.getOperand(1);
39581     unsigned Imm = V.getConstantOperandVal(2);
39582     const X86Subtarget &Subtarget = DAG.getSubtarget<X86Subtarget>();
39583     if (!X86::mayFoldLoad(peekThroughOneUseBitcasts(N0), Subtarget) ||
39584         X86::mayFoldLoad(peekThroughOneUseBitcasts(N1), Subtarget))
39585       return SDValue();
39586     Imm = ((Imm & 0x0F) << 4) | ((Imm & 0xF0) >> 4);
39587     return DAG.getNode(X86ISD::SHUFP, DL, VT, N1, N0,
39588                        DAG.getTargetConstant(Imm, DL, MVT::i8));
39589   };
39590 
39591   switch (N.getOpcode()) {
39592   case X86ISD::VPERMILPI:
39593     if (SDValue NewSHUFP = commuteSHUFP(N, N.getOperand(0))) {
39594       unsigned Imm = N.getConstantOperandVal(1);
39595       return DAG.getNode(X86ISD::VPERMILPI, DL, VT, NewSHUFP,
39596                          DAG.getTargetConstant(Imm ^ 0xAA, DL, MVT::i8));
39597     }
39598     break;
39599   case X86ISD::SHUFP: {
39600     SDValue N0 = N.getOperand(0);
39601     SDValue N1 = N.getOperand(1);
39602     unsigned Imm = N.getConstantOperandVal(2);
39603     if (N0 == N1) {
39604       if (SDValue NewSHUFP = commuteSHUFP(N, N0))
39605         return DAG.getNode(X86ISD::SHUFP, DL, VT, NewSHUFP, NewSHUFP,
39606                            DAG.getTargetConstant(Imm ^ 0xAA, DL, MVT::i8));
39607     } else if (SDValue NewSHUFP = commuteSHUFP(N, N0)) {
39608       return DAG.getNode(X86ISD::SHUFP, DL, VT, NewSHUFP, N1,
39609                          DAG.getTargetConstant(Imm ^ 0x0A, DL, MVT::i8));
39610     } else if (SDValue NewSHUFP = commuteSHUFP(N, N1)) {
39611       return DAG.getNode(X86ISD::SHUFP, DL, VT, N0, NewSHUFP,
39612                          DAG.getTargetConstant(Imm ^ 0xA0, DL, MVT::i8));
39613     }
39614     break;
39615   }
39616   }
39617 
39618   return SDValue();
39619 }
39620 
39621 // TODO - move this to TLI like isBinOp?
isUnaryOp(unsigned Opcode)39622 static bool isUnaryOp(unsigned Opcode) {
39623   switch (Opcode) {
39624   case ISD::CTLZ:
39625   case ISD::CTTZ:
39626   case ISD::CTPOP:
39627     return true;
39628   }
39629   return false;
39630 }
39631 
39632 // Canonicalize SHUFFLE(UNARYOP(X)) -> UNARYOP(SHUFFLE(X)).
39633 // Canonicalize SHUFFLE(BINOP(X,Y)) -> BINOP(SHUFFLE(X),SHUFFLE(Y)).
canonicalizeShuffleWithOp(SDValue N,SelectionDAG & DAG,const SDLoc & DL)39634 static SDValue canonicalizeShuffleWithOp(SDValue N, SelectionDAG &DAG,
39635                                          const SDLoc &DL) {
39636   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
39637   EVT ShuffleVT = N.getValueType();
39638 
39639   auto IsMergeableWithShuffle = [&DAG](SDValue Op, bool FoldLoad = false) {
39640     // AllZeros/AllOnes constants are freely shuffled and will peek through
39641     // bitcasts. Other constant build vectors do not peek through bitcasts. Only
39642     // merge with target shuffles if it has one use so shuffle combining is
39643     // likely to kick in. Shuffles of splats are expected to be removed.
39644     return ISD::isBuildVectorAllOnes(Op.getNode()) ||
39645            ISD::isBuildVectorAllZeros(Op.getNode()) ||
39646            ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) ||
39647            ISD::isBuildVectorOfConstantFPSDNodes(Op.getNode()) ||
39648            getTargetConstantFromNode(dyn_cast<LoadSDNode>(Op)) ||
39649            (Op.getOpcode() == ISD::INSERT_SUBVECTOR && Op->hasOneUse()) ||
39650            (isTargetShuffle(Op.getOpcode()) && Op->hasOneUse()) ||
39651            (FoldLoad && isShuffleFoldableLoad(Op)) ||
39652            DAG.isSplatValue(Op, /*AllowUndefs*/ false);
39653   };
39654   auto IsSafeToMoveShuffle = [ShuffleVT](SDValue Op, unsigned BinOp) {
39655     // Ensure we only shuffle whole vector src elements, unless its a logical
39656     // binops where we can more aggressively move shuffles from dst to src.
39657     return BinOp == ISD::AND || BinOp == ISD::OR || BinOp == ISD::XOR ||
39658            BinOp == X86ISD::ANDNP ||
39659            (Op.getScalarValueSizeInBits() <= ShuffleVT.getScalarSizeInBits());
39660   };
39661 
39662   unsigned Opc = N.getOpcode();
39663   switch (Opc) {
39664   // Unary and Unary+Permute Shuffles.
39665   case X86ISD::PSHUFB: {
39666     // Don't merge PSHUFB if it contains zero'd elements.
39667     SmallVector<int> Mask;
39668     SmallVector<SDValue> Ops;
39669     if (!getTargetShuffleMask(N.getNode(), ShuffleVT.getSimpleVT(), false, Ops,
39670                               Mask))
39671       break;
39672     [[fallthrough]];
39673   }
39674   case X86ISD::VBROADCAST:
39675   case X86ISD::MOVDDUP:
39676   case X86ISD::PSHUFD:
39677   case X86ISD::PSHUFHW:
39678   case X86ISD::PSHUFLW:
39679   case X86ISD::VPERMI:
39680   case X86ISD::VPERMILPI: {
39681     if (N.getOperand(0).getValueType() == ShuffleVT &&
39682         N->isOnlyUserOf(N.getOperand(0).getNode())) {
39683       SDValue N0 = peekThroughOneUseBitcasts(N.getOperand(0));
39684       unsigned SrcOpcode = N0.getOpcode();
39685       if (TLI.isBinOp(SrcOpcode) && IsSafeToMoveShuffle(N0, SrcOpcode)) {
39686         SDValue Op00 = peekThroughOneUseBitcasts(N0.getOperand(0));
39687         SDValue Op01 = peekThroughOneUseBitcasts(N0.getOperand(1));
39688         if (IsMergeableWithShuffle(Op00, Opc != X86ISD::PSHUFB) ||
39689             IsMergeableWithShuffle(Op01, Opc != X86ISD::PSHUFB)) {
39690           SDValue LHS, RHS;
39691           Op00 = DAG.getBitcast(ShuffleVT, Op00);
39692           Op01 = DAG.getBitcast(ShuffleVT, Op01);
39693           if (N.getNumOperands() == 2) {
39694             LHS = DAG.getNode(Opc, DL, ShuffleVT, Op00, N.getOperand(1));
39695             RHS = DAG.getNode(Opc, DL, ShuffleVT, Op01, N.getOperand(1));
39696           } else {
39697             LHS = DAG.getNode(Opc, DL, ShuffleVT, Op00);
39698             RHS = DAG.getNode(Opc, DL, ShuffleVT, Op01);
39699           }
39700           EVT OpVT = N0.getValueType();
39701           return DAG.getBitcast(ShuffleVT,
39702                                 DAG.getNode(SrcOpcode, DL, OpVT,
39703                                             DAG.getBitcast(OpVT, LHS),
39704                                             DAG.getBitcast(OpVT, RHS)));
39705         }
39706       }
39707     }
39708     break;
39709   }
39710   // Binary and Binary+Permute Shuffles.
39711   case X86ISD::INSERTPS: {
39712     // Don't merge INSERTPS if it contains zero'd elements.
39713     unsigned InsertPSMask = N.getConstantOperandVal(2);
39714     unsigned ZeroMask = InsertPSMask & 0xF;
39715     if (ZeroMask != 0)
39716       break;
39717     [[fallthrough]];
39718   }
39719   case X86ISD::MOVSD:
39720   case X86ISD::MOVSS:
39721   case X86ISD::BLENDI:
39722   case X86ISD::SHUFP:
39723   case X86ISD::UNPCKH:
39724   case X86ISD::UNPCKL: {
39725     if (N->isOnlyUserOf(N.getOperand(0).getNode()) &&
39726         N->isOnlyUserOf(N.getOperand(1).getNode())) {
39727       SDValue N0 = peekThroughOneUseBitcasts(N.getOperand(0));
39728       SDValue N1 = peekThroughOneUseBitcasts(N.getOperand(1));
39729       unsigned SrcOpcode = N0.getOpcode();
39730       if (TLI.isBinOp(SrcOpcode) && N1.getOpcode() == SrcOpcode &&
39731           N0.getValueType() == N1.getValueType() &&
39732           IsSafeToMoveShuffle(N0, SrcOpcode) &&
39733           IsSafeToMoveShuffle(N1, SrcOpcode)) {
39734         SDValue Op00 = peekThroughOneUseBitcasts(N0.getOperand(0));
39735         SDValue Op10 = peekThroughOneUseBitcasts(N1.getOperand(0));
39736         SDValue Op01 = peekThroughOneUseBitcasts(N0.getOperand(1));
39737         SDValue Op11 = peekThroughOneUseBitcasts(N1.getOperand(1));
39738         // Ensure the total number of shuffles doesn't increase by folding this
39739         // shuffle through to the source ops.
39740         if (((IsMergeableWithShuffle(Op00) && IsMergeableWithShuffle(Op10)) ||
39741              (IsMergeableWithShuffle(Op01) && IsMergeableWithShuffle(Op11))) ||
39742             ((IsMergeableWithShuffle(Op00) || IsMergeableWithShuffle(Op10)) &&
39743              (IsMergeableWithShuffle(Op01) || IsMergeableWithShuffle(Op11)))) {
39744           SDValue LHS, RHS;
39745           Op00 = DAG.getBitcast(ShuffleVT, Op00);
39746           Op10 = DAG.getBitcast(ShuffleVT, Op10);
39747           Op01 = DAG.getBitcast(ShuffleVT, Op01);
39748           Op11 = DAG.getBitcast(ShuffleVT, Op11);
39749           if (N.getNumOperands() == 3) {
39750             LHS = DAG.getNode(Opc, DL, ShuffleVT, Op00, Op10, N.getOperand(2));
39751             RHS = DAG.getNode(Opc, DL, ShuffleVT, Op01, Op11, N.getOperand(2));
39752           } else {
39753             LHS = DAG.getNode(Opc, DL, ShuffleVT, Op00, Op10);
39754             RHS = DAG.getNode(Opc, DL, ShuffleVT, Op01, Op11);
39755           }
39756           EVT OpVT = N0.getValueType();
39757           return DAG.getBitcast(ShuffleVT,
39758                                 DAG.getNode(SrcOpcode, DL, OpVT,
39759                                             DAG.getBitcast(OpVT, LHS),
39760                                             DAG.getBitcast(OpVT, RHS)));
39761         }
39762       }
39763       if (isUnaryOp(SrcOpcode) && N1.getOpcode() == SrcOpcode &&
39764           N0.getValueType() == N1.getValueType() &&
39765           IsSafeToMoveShuffle(N0, SrcOpcode) &&
39766           IsSafeToMoveShuffle(N1, SrcOpcode)) {
39767         SDValue Op00 = peekThroughOneUseBitcasts(N0.getOperand(0));
39768         SDValue Op10 = peekThroughOneUseBitcasts(N1.getOperand(0));
39769         SDValue Res;
39770         Op00 = DAG.getBitcast(ShuffleVT, Op00);
39771         Op10 = DAG.getBitcast(ShuffleVT, Op10);
39772         if (N.getNumOperands() == 3) {
39773           Res = DAG.getNode(Opc, DL, ShuffleVT, Op00, Op10, N.getOperand(2));
39774         } else {
39775           Res = DAG.getNode(Opc, DL, ShuffleVT, Op00, Op10);
39776         }
39777         EVT OpVT = N0.getValueType();
39778         return DAG.getBitcast(
39779             ShuffleVT,
39780             DAG.getNode(SrcOpcode, DL, OpVT, DAG.getBitcast(OpVT, Res)));
39781       }
39782     }
39783     break;
39784   }
39785   }
39786   return SDValue();
39787 }
39788 
39789 /// Attempt to fold vpermf128(op(),op()) -> op(vpermf128(),vpermf128()).
canonicalizeLaneShuffleWithRepeatedOps(SDValue V,SelectionDAG & DAG,const SDLoc & DL)39790 static SDValue canonicalizeLaneShuffleWithRepeatedOps(SDValue V,
39791                                                       SelectionDAG &DAG,
39792                                                       const SDLoc &DL) {
39793   assert(V.getOpcode() == X86ISD::VPERM2X128 && "Unknown lane shuffle");
39794 
39795   MVT VT = V.getSimpleValueType();
39796   SDValue Src0 = peekThroughBitcasts(V.getOperand(0));
39797   SDValue Src1 = peekThroughBitcasts(V.getOperand(1));
39798   unsigned SrcOpc0 = Src0.getOpcode();
39799   unsigned SrcOpc1 = Src1.getOpcode();
39800   EVT SrcVT0 = Src0.getValueType();
39801   EVT SrcVT1 = Src1.getValueType();
39802 
39803   if (!Src1.isUndef() && (SrcVT0 != SrcVT1 || SrcOpc0 != SrcOpc1))
39804     return SDValue();
39805 
39806   switch (SrcOpc0) {
39807   case X86ISD::MOVDDUP: {
39808     SDValue LHS = Src0.getOperand(0);
39809     SDValue RHS = Src1.isUndef() ? Src1 : Src1.getOperand(0);
39810     SDValue Res =
39811         DAG.getNode(X86ISD::VPERM2X128, DL, SrcVT0, LHS, RHS, V.getOperand(2));
39812     Res = DAG.getNode(SrcOpc0, DL, SrcVT0, Res);
39813     return DAG.getBitcast(VT, Res);
39814   }
39815   case X86ISD::VPERMILPI:
39816     // TODO: Handle v4f64 permutes with different low/high lane masks.
39817     if (SrcVT0 == MVT::v4f64) {
39818       uint64_t Mask = Src0.getConstantOperandVal(1);
39819       if ((Mask & 0x3) != ((Mask >> 2) & 0x3))
39820         break;
39821     }
39822     [[fallthrough]];
39823   case X86ISD::VSHLI:
39824   case X86ISD::VSRLI:
39825   case X86ISD::VSRAI:
39826   case X86ISD::PSHUFD:
39827     if (Src1.isUndef() || Src0.getOperand(1) == Src1.getOperand(1)) {
39828       SDValue LHS = Src0.getOperand(0);
39829       SDValue RHS = Src1.isUndef() ? Src1 : Src1.getOperand(0);
39830       SDValue Res = DAG.getNode(X86ISD::VPERM2X128, DL, SrcVT0, LHS, RHS,
39831                                 V.getOperand(2));
39832       Res = DAG.getNode(SrcOpc0, DL, SrcVT0, Res, Src0.getOperand(1));
39833       return DAG.getBitcast(VT, Res);
39834     }
39835     break;
39836   }
39837 
39838   return SDValue();
39839 }
39840 
39841 /// Try to combine x86 target specific shuffles.
combineTargetShuffle(SDValue N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)39842 static SDValue combineTargetShuffle(SDValue N, SelectionDAG &DAG,
39843                                     TargetLowering::DAGCombinerInfo &DCI,
39844                                     const X86Subtarget &Subtarget) {
39845   SDLoc DL(N);
39846   MVT VT = N.getSimpleValueType();
39847   SmallVector<int, 4> Mask;
39848   unsigned Opcode = N.getOpcode();
39849 
39850   if (SDValue R = combineCommutableSHUFP(N, VT, DL, DAG))
39851     return R;
39852 
39853   // Handle specific target shuffles.
39854   switch (Opcode) {
39855   case X86ISD::MOVDDUP: {
39856     SDValue Src = N.getOperand(0);
39857     // Turn a 128-bit MOVDDUP of a full vector load into movddup+vzload.
39858     if (VT == MVT::v2f64 && Src.hasOneUse() &&
39859         ISD::isNormalLoad(Src.getNode())) {
39860       LoadSDNode *LN = cast<LoadSDNode>(Src);
39861       if (SDValue VZLoad = narrowLoadToVZLoad(LN, MVT::f64, MVT::v2f64, DAG)) {
39862         SDValue Movddup = DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v2f64, VZLoad);
39863         DCI.CombineTo(N.getNode(), Movddup);
39864         DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
39865         DCI.recursivelyDeleteUnusedNodes(LN);
39866         return N; // Return N so it doesn't get rechecked!
39867       }
39868     }
39869 
39870     return SDValue();
39871   }
39872   case X86ISD::VBROADCAST: {
39873     SDValue Src = N.getOperand(0);
39874     SDValue BC = peekThroughBitcasts(Src);
39875     EVT SrcVT = Src.getValueType();
39876     EVT BCVT = BC.getValueType();
39877 
39878     // If broadcasting from another shuffle, attempt to simplify it.
39879     // TODO - we really need a general SimplifyDemandedVectorElts mechanism.
39880     if (isTargetShuffle(BC.getOpcode()) &&
39881         VT.getScalarSizeInBits() % BCVT.getScalarSizeInBits() == 0) {
39882       unsigned Scale = VT.getScalarSizeInBits() / BCVT.getScalarSizeInBits();
39883       SmallVector<int, 16> DemandedMask(BCVT.getVectorNumElements(),
39884                                         SM_SentinelUndef);
39885       for (unsigned i = 0; i != Scale; ++i)
39886         DemandedMask[i] = i;
39887       if (SDValue Res = combineX86ShufflesRecursively(
39888               {BC}, 0, BC, DemandedMask, {}, /*Depth*/ 0,
39889               X86::MaxShuffleCombineDepth,
39890               /*HasVarMask*/ false, /*AllowCrossLaneVarMask*/ true,
39891               /*AllowPerLaneVarMask*/ true, DAG, Subtarget))
39892         return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
39893                            DAG.getBitcast(SrcVT, Res));
39894     }
39895 
39896     // broadcast(bitcast(src)) -> bitcast(broadcast(src))
39897     // 32-bit targets have to bitcast i64 to f64, so better to bitcast upward.
39898     if (Src.getOpcode() == ISD::BITCAST &&
39899         SrcVT.getScalarSizeInBits() == BCVT.getScalarSizeInBits() &&
39900         DAG.getTargetLoweringInfo().isTypeLegal(BCVT) &&
39901         FixedVectorType::isValidElementType(
39902             BCVT.getScalarType().getTypeForEVT(*DAG.getContext()))) {
39903       EVT NewVT = EVT::getVectorVT(*DAG.getContext(), BCVT.getScalarType(),
39904                                    VT.getVectorNumElements());
39905       return DAG.getBitcast(VT, DAG.getNode(X86ISD::VBROADCAST, DL, NewVT, BC));
39906     }
39907 
39908     // vbroadcast(bitcast(vbroadcast(src))) -> bitcast(vbroadcast(src))
39909     // If we're re-broadcasting a smaller type then broadcast with that type and
39910     // bitcast.
39911     // TODO: Do this for any splat?
39912     if (Src.getOpcode() == ISD::BITCAST &&
39913         (BC.getOpcode() == X86ISD::VBROADCAST ||
39914          BC.getOpcode() == X86ISD::VBROADCAST_LOAD) &&
39915         (VT.getScalarSizeInBits() % BCVT.getScalarSizeInBits()) == 0 &&
39916         (VT.getSizeInBits() % BCVT.getSizeInBits()) == 0) {
39917       MVT NewVT =
39918           MVT::getVectorVT(BCVT.getSimpleVT().getScalarType(),
39919                            VT.getSizeInBits() / BCVT.getScalarSizeInBits());
39920       return DAG.getBitcast(VT, DAG.getNode(X86ISD::VBROADCAST, DL, NewVT, BC));
39921     }
39922 
39923     // Reduce broadcast source vector to lowest 128-bits.
39924     if (SrcVT.getSizeInBits() > 128)
39925       return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
39926                          extract128BitVector(Src, 0, DAG, DL));
39927 
39928     // broadcast(scalar_to_vector(x)) -> broadcast(x).
39929     if (Src.getOpcode() == ISD::SCALAR_TO_VECTOR &&
39930         Src.getValueType().getScalarType() == Src.getOperand(0).getValueType())
39931       return DAG.getNode(X86ISD::VBROADCAST, DL, VT, Src.getOperand(0));
39932 
39933     // broadcast(extract_vector_elt(x, 0)) -> broadcast(x).
39934     if (Src.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
39935         isNullConstant(Src.getOperand(1)) &&
39936         Src.getValueType() ==
39937             Src.getOperand(0).getValueType().getScalarType() &&
39938         DAG.getTargetLoweringInfo().isTypeLegal(
39939             Src.getOperand(0).getValueType()))
39940       return DAG.getNode(X86ISD::VBROADCAST, DL, VT, Src.getOperand(0));
39941 
39942     // Share broadcast with the longest vector and extract low subvector (free).
39943     // Ensure the same SDValue from the SDNode use is being used.
39944     for (SDNode *User : Src->uses())
39945       if (User != N.getNode() && User->getOpcode() == X86ISD::VBROADCAST &&
39946           Src == User->getOperand(0) &&
39947           User->getValueSizeInBits(0).getFixedValue() >
39948               VT.getFixedSizeInBits()) {
39949         return extractSubVector(SDValue(User, 0), 0, DAG, DL,
39950                                 VT.getSizeInBits());
39951       }
39952 
39953     // vbroadcast(scalarload X) -> vbroadcast_load X
39954     // For float loads, extract other uses of the scalar from the broadcast.
39955     if (!SrcVT.isVector() && (Src.hasOneUse() || VT.isFloatingPoint()) &&
39956         ISD::isNormalLoad(Src.getNode())) {
39957       LoadSDNode *LN = cast<LoadSDNode>(Src);
39958       SDVTList Tys = DAG.getVTList(VT, MVT::Other);
39959       SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
39960       SDValue BcastLd =
39961           DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, DL, Tys, Ops,
39962                                   LN->getMemoryVT(), LN->getMemOperand());
39963       // If the load value is used only by N, replace it via CombineTo N.
39964       bool NoReplaceExtract = Src.hasOneUse();
39965       DCI.CombineTo(N.getNode(), BcastLd);
39966       if (NoReplaceExtract) {
39967         DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1));
39968         DCI.recursivelyDeleteUnusedNodes(LN);
39969       } else {
39970         SDValue Scl = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SrcVT, BcastLd,
39971                                   DAG.getIntPtrConstant(0, DL));
39972         DCI.CombineTo(LN, Scl, BcastLd.getValue(1));
39973       }
39974       return N; // Return N so it doesn't get rechecked!
39975     }
39976 
39977     // Due to isTypeDesirableForOp, we won't always shrink a load truncated to
39978     // i16. So shrink it ourselves if we can make a broadcast_load.
39979     if (SrcVT == MVT::i16 && Src.getOpcode() == ISD::TRUNCATE &&
39980         Src.hasOneUse() && Src.getOperand(0).hasOneUse()) {
39981       assert(Subtarget.hasAVX2() && "Expected AVX2");
39982       SDValue TruncIn = Src.getOperand(0);
39983 
39984       // If this is a truncate of a non extending load we can just narrow it to
39985       // use a broadcast_load.
39986       if (ISD::isNormalLoad(TruncIn.getNode())) {
39987         LoadSDNode *LN = cast<LoadSDNode>(TruncIn);
39988         // Unless its volatile or atomic.
39989         if (LN->isSimple()) {
39990           SDVTList Tys = DAG.getVTList(VT, MVT::Other);
39991           SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
39992           SDValue BcastLd = DAG.getMemIntrinsicNode(
39993               X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, MVT::i16,
39994               LN->getPointerInfo(), LN->getOriginalAlign(),
39995               LN->getMemOperand()->getFlags());
39996           DCI.CombineTo(N.getNode(), BcastLd);
39997           DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1));
39998           DCI.recursivelyDeleteUnusedNodes(Src.getNode());
39999           return N; // Return N so it doesn't get rechecked!
40000         }
40001       }
40002 
40003       // If this is a truncate of an i16 extload, we can directly replace it.
40004       if (ISD::isUNINDEXEDLoad(Src.getOperand(0).getNode()) &&
40005           ISD::isEXTLoad(Src.getOperand(0).getNode())) {
40006         LoadSDNode *LN = cast<LoadSDNode>(Src.getOperand(0));
40007         if (LN->getMemoryVT().getSizeInBits() == 16) {
40008           SDVTList Tys = DAG.getVTList(VT, MVT::Other);
40009           SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
40010           SDValue BcastLd =
40011               DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, DL, Tys, Ops,
40012                                       LN->getMemoryVT(), LN->getMemOperand());
40013           DCI.CombineTo(N.getNode(), BcastLd);
40014           DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1));
40015           DCI.recursivelyDeleteUnusedNodes(Src.getNode());
40016           return N; // Return N so it doesn't get rechecked!
40017         }
40018       }
40019 
40020       // If this is a truncate of load that has been shifted right, we can
40021       // offset the pointer and use a narrower load.
40022       if (TruncIn.getOpcode() == ISD::SRL &&
40023           TruncIn.getOperand(0).hasOneUse() &&
40024           isa<ConstantSDNode>(TruncIn.getOperand(1)) &&
40025           ISD::isNormalLoad(TruncIn.getOperand(0).getNode())) {
40026         LoadSDNode *LN = cast<LoadSDNode>(TruncIn.getOperand(0));
40027         unsigned ShiftAmt = TruncIn.getConstantOperandVal(1);
40028         // Make sure the shift amount and the load size are divisible by 16.
40029         // Don't do this if the load is volatile or atomic.
40030         if (ShiftAmt % 16 == 0 && TruncIn.getValueSizeInBits() % 16 == 0 &&
40031             LN->isSimple()) {
40032           unsigned Offset = ShiftAmt / 8;
40033           SDVTList Tys = DAG.getVTList(VT, MVT::Other);
40034           SDValue Ptr = DAG.getMemBasePlusOffset(
40035               LN->getBasePtr(), TypeSize::getFixed(Offset), DL);
40036           SDValue Ops[] = { LN->getChain(), Ptr };
40037           SDValue BcastLd = DAG.getMemIntrinsicNode(
40038               X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, MVT::i16,
40039               LN->getPointerInfo().getWithOffset(Offset),
40040               LN->getOriginalAlign(),
40041               LN->getMemOperand()->getFlags());
40042           DCI.CombineTo(N.getNode(), BcastLd);
40043           DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1));
40044           DCI.recursivelyDeleteUnusedNodes(Src.getNode());
40045           return N; // Return N so it doesn't get rechecked!
40046         }
40047       }
40048     }
40049 
40050     // vbroadcast(vzload X) -> vbroadcast_load X
40051     if (Src.getOpcode() == X86ISD::VZEXT_LOAD && Src.hasOneUse()) {
40052       MemSDNode *LN = cast<MemIntrinsicSDNode>(Src);
40053       if (LN->getMemoryVT().getSizeInBits() == VT.getScalarSizeInBits()) {
40054         SDVTList Tys = DAG.getVTList(VT, MVT::Other);
40055         SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
40056         SDValue BcastLd =
40057             DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, DL, Tys, Ops,
40058                                     LN->getMemoryVT(), LN->getMemOperand());
40059         DCI.CombineTo(N.getNode(), BcastLd);
40060         DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1));
40061         DCI.recursivelyDeleteUnusedNodes(LN);
40062         return N; // Return N so it doesn't get rechecked!
40063       }
40064     }
40065 
40066     // vbroadcast(vector load X) -> vbroadcast_load
40067     if ((SrcVT == MVT::v2f64 || SrcVT == MVT::v4f32 || SrcVT == MVT::v2i64 ||
40068          SrcVT == MVT::v4i32) &&
40069         Src.hasOneUse() && ISD::isNormalLoad(Src.getNode())) {
40070       LoadSDNode *LN = cast<LoadSDNode>(Src);
40071       // Unless the load is volatile or atomic.
40072       if (LN->isSimple()) {
40073         SDVTList Tys = DAG.getVTList(VT, MVT::Other);
40074         SDValue Ops[] = {LN->getChain(), LN->getBasePtr()};
40075         SDValue BcastLd = DAG.getMemIntrinsicNode(
40076             X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, SrcVT.getScalarType(),
40077             LN->getPointerInfo(), LN->getOriginalAlign(),
40078             LN->getMemOperand()->getFlags());
40079         DCI.CombineTo(N.getNode(), BcastLd);
40080         DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1));
40081         DCI.recursivelyDeleteUnusedNodes(LN);
40082         return N; // Return N so it doesn't get rechecked!
40083       }
40084     }
40085 
40086     return SDValue();
40087   }
40088   case X86ISD::VZEXT_MOVL: {
40089     SDValue N0 = N.getOperand(0);
40090 
40091     // If this a vzmovl of a full vector load, replace it with a vzload, unless
40092     // the load is volatile.
40093     if (N0.hasOneUse() && ISD::isNormalLoad(N0.getNode())) {
40094       auto *LN = cast<LoadSDNode>(N0);
40095       if (SDValue VZLoad =
40096               narrowLoadToVZLoad(LN, VT.getVectorElementType(), VT, DAG)) {
40097         DCI.CombineTo(N.getNode(), VZLoad);
40098         DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
40099         DCI.recursivelyDeleteUnusedNodes(LN);
40100         return N;
40101       }
40102     }
40103 
40104     // If this a VZEXT_MOVL of a VBROADCAST_LOAD, we don't need the broadcast
40105     // and can just use a VZEXT_LOAD.
40106     // FIXME: Is there some way to do this with SimplifyDemandedVectorElts?
40107     if (N0.hasOneUse() && N0.getOpcode() == X86ISD::VBROADCAST_LOAD) {
40108       auto *LN = cast<MemSDNode>(N0);
40109       if (VT.getScalarSizeInBits() == LN->getMemoryVT().getSizeInBits()) {
40110         SDVTList Tys = DAG.getVTList(VT, MVT::Other);
40111         SDValue Ops[] = {LN->getChain(), LN->getBasePtr()};
40112         SDValue VZLoad =
40113             DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, Ops,
40114                                     LN->getMemoryVT(), LN->getMemOperand());
40115         DCI.CombineTo(N.getNode(), VZLoad);
40116         DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
40117         DCI.recursivelyDeleteUnusedNodes(LN);
40118         return N;
40119       }
40120     }
40121 
40122     // Turn (v2i64 (vzext_movl (scalar_to_vector (i64 X)))) into
40123     // (v2i64 (bitcast (v4i32 (vzext_movl (scalar_to_vector (i32 (trunc X)))))))
40124     // if the upper bits of the i64 are zero.
40125     if (N0.hasOneUse() && N0.getOpcode() == ISD::SCALAR_TO_VECTOR &&
40126         N0.getOperand(0).hasOneUse() &&
40127         N0.getOperand(0).getValueType() == MVT::i64) {
40128       SDValue In = N0.getOperand(0);
40129       APInt Mask = APInt::getHighBitsSet(64, 32);
40130       if (DAG.MaskedValueIsZero(In, Mask)) {
40131         SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, In);
40132         MVT VecVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() * 2);
40133         SDValue SclVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Trunc);
40134         SDValue Movl = DAG.getNode(X86ISD::VZEXT_MOVL, DL, VecVT, SclVec);
40135         return DAG.getBitcast(VT, Movl);
40136       }
40137     }
40138 
40139     // Load a scalar integer constant directly to XMM instead of transferring an
40140     // immediate value from GPR.
40141     // vzext_movl (scalar_to_vector C) --> load [C,0...]
40142     if (N0.getOpcode() == ISD::SCALAR_TO_VECTOR) {
40143       if (auto *C = dyn_cast<ConstantSDNode>(N0.getOperand(0))) {
40144         // Create a vector constant - scalar constant followed by zeros.
40145         EVT ScalarVT = N0.getOperand(0).getValueType();
40146         Type *ScalarTy = ScalarVT.getTypeForEVT(*DAG.getContext());
40147         unsigned NumElts = VT.getVectorNumElements();
40148         Constant *Zero = ConstantInt::getNullValue(ScalarTy);
40149         SmallVector<Constant *, 32> ConstantVec(NumElts, Zero);
40150         ConstantVec[0] = const_cast<ConstantInt *>(C->getConstantIntValue());
40151 
40152         // Load the vector constant from constant pool.
40153         MVT PVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
40154         SDValue CP = DAG.getConstantPool(ConstantVector::get(ConstantVec), PVT);
40155         MachinePointerInfo MPI =
40156             MachinePointerInfo::getConstantPool(DAG.getMachineFunction());
40157         Align Alignment = cast<ConstantPoolSDNode>(CP)->getAlign();
40158         return DAG.getLoad(VT, DL, DAG.getEntryNode(), CP, MPI, Alignment,
40159                            MachineMemOperand::MOLoad);
40160       }
40161     }
40162 
40163     // Pull subvector inserts into undef through VZEXT_MOVL by making it an
40164     // insert into a zero vector. This helps get VZEXT_MOVL closer to
40165     // scalar_to_vectors where 256/512 are canonicalized to an insert and a
40166     // 128-bit scalar_to_vector. This reduces the number of isel patterns.
40167     if (!DCI.isBeforeLegalizeOps() && N0.hasOneUse()) {
40168       SDValue V = peekThroughOneUseBitcasts(N0);
40169 
40170       if (V.getOpcode() == ISD::INSERT_SUBVECTOR && V.getOperand(0).isUndef() &&
40171           isNullConstant(V.getOperand(2))) {
40172         SDValue In = V.getOperand(1);
40173         MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(),
40174                                      In.getValueSizeInBits() /
40175                                          VT.getScalarSizeInBits());
40176         In = DAG.getBitcast(SubVT, In);
40177         SDValue Movl = DAG.getNode(X86ISD::VZEXT_MOVL, DL, SubVT, In);
40178         return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
40179                            getZeroVector(VT, Subtarget, DAG, DL), Movl,
40180                            V.getOperand(2));
40181       }
40182     }
40183 
40184     return SDValue();
40185   }
40186   case X86ISD::BLENDI: {
40187     SDValue N0 = N.getOperand(0);
40188     SDValue N1 = N.getOperand(1);
40189 
40190     // blend(bitcast(x),bitcast(y)) -> bitcast(blend(x,y)) to narrower types.
40191     // TODO: Handle MVT::v16i16 repeated blend mask.
40192     if (N0.getOpcode() == ISD::BITCAST && N1.getOpcode() == ISD::BITCAST &&
40193         N0.getOperand(0).getValueType() == N1.getOperand(0).getValueType()) {
40194       MVT SrcVT = N0.getOperand(0).getSimpleValueType();
40195       if ((VT.getScalarSizeInBits() % SrcVT.getScalarSizeInBits()) == 0 &&
40196           SrcVT.getScalarSizeInBits() >= 32) {
40197         unsigned BlendMask = N.getConstantOperandVal(2);
40198         unsigned Size = VT.getVectorNumElements();
40199         unsigned Scale = VT.getScalarSizeInBits() / SrcVT.getScalarSizeInBits();
40200         BlendMask = scaleVectorShuffleBlendMask(BlendMask, Size, Scale);
40201         return DAG.getBitcast(
40202             VT, DAG.getNode(X86ISD::BLENDI, DL, SrcVT, N0.getOperand(0),
40203                             N1.getOperand(0),
40204                             DAG.getTargetConstant(BlendMask, DL, MVT::i8)));
40205       }
40206     }
40207     return SDValue();
40208   }
40209   case X86ISD::SHUFP: {
40210     // Fold shufps(shuffle(x),shuffle(y)) -> shufps(x,y).
40211     // This is a more relaxed shuffle combiner that can ignore oneuse limits.
40212     // TODO: Support types other than v4f32.
40213     if (VT == MVT::v4f32) {
40214       bool Updated = false;
40215       SmallVector<int> Mask;
40216       SmallVector<SDValue> Ops;
40217       if (getTargetShuffleMask(N.getNode(), VT, false, Ops, Mask) &&
40218           Ops.size() == 2) {
40219         for (int i = 0; i != 2; ++i) {
40220           SmallVector<SDValue> SubOps;
40221           SmallVector<int> SubMask, SubScaledMask;
40222           SDValue Sub = peekThroughBitcasts(Ops[i]);
40223           // TODO: Scaling might be easier if we specify the demanded elts.
40224           if (getTargetShuffleInputs(Sub, SubOps, SubMask, DAG, 0, false) &&
40225               scaleShuffleElements(SubMask, 4, SubScaledMask) &&
40226               SubOps.size() == 1 && isUndefOrInRange(SubScaledMask, 0, 4)) {
40227             int Ofs = i * 2;
40228             Mask[Ofs + 0] = SubScaledMask[Mask[Ofs + 0] % 4] + (i * 4);
40229             Mask[Ofs + 1] = SubScaledMask[Mask[Ofs + 1] % 4] + (i * 4);
40230             Ops[i] = DAG.getBitcast(VT, SubOps[0]);
40231             Updated = true;
40232           }
40233         }
40234       }
40235       if (Updated) {
40236         for (int &M : Mask)
40237           M %= 4;
40238         Ops.push_back(getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
40239         return DAG.getNode(X86ISD::SHUFP, DL, VT, Ops);
40240       }
40241     }
40242     return SDValue();
40243   }
40244   case X86ISD::VPERMI: {
40245     // vpermi(bitcast(x)) -> bitcast(vpermi(x)) for same number of elements.
40246     // TODO: Remove when we have preferred domains in combineX86ShuffleChain.
40247     SDValue N0 = N.getOperand(0);
40248     SDValue N1 = N.getOperand(1);
40249     unsigned EltSizeInBits = VT.getScalarSizeInBits();
40250     if (N0.getOpcode() == ISD::BITCAST &&
40251         N0.getOperand(0).getScalarValueSizeInBits() == EltSizeInBits) {
40252       SDValue Src = N0.getOperand(0);
40253       EVT SrcVT = Src.getValueType();
40254       SDValue Res = DAG.getNode(X86ISD::VPERMI, DL, SrcVT, Src, N1);
40255       return DAG.getBitcast(VT, Res);
40256     }
40257     return SDValue();
40258   }
40259   case X86ISD::SHUF128: {
40260     // If we're permuting the upper 256-bits subvectors of a concatenation, then
40261     // see if we can peek through and access the subvector directly.
40262     if (VT.is512BitVector()) {
40263       // 512-bit mask uses 4 x i2 indices - if the msb is always set then only the
40264       // upper subvector is used.
40265       SDValue LHS = N->getOperand(0);
40266       SDValue RHS = N->getOperand(1);
40267       uint64_t Mask = N->getConstantOperandVal(2);
40268       SmallVector<SDValue> LHSOps, RHSOps;
40269       SDValue NewLHS, NewRHS;
40270       if ((Mask & 0x0A) == 0x0A &&
40271           collectConcatOps(LHS.getNode(), LHSOps, DAG) && LHSOps.size() == 2) {
40272         NewLHS = widenSubVector(LHSOps[1], false, Subtarget, DAG, DL, 512);
40273         Mask &= ~0x0A;
40274       }
40275       if ((Mask & 0xA0) == 0xA0 &&
40276           collectConcatOps(RHS.getNode(), RHSOps, DAG) && RHSOps.size() == 2) {
40277         NewRHS = widenSubVector(RHSOps[1], false, Subtarget, DAG, DL, 512);
40278         Mask &= ~0xA0;
40279       }
40280       if (NewLHS || NewRHS)
40281         return DAG.getNode(X86ISD::SHUF128, DL, VT, NewLHS ? NewLHS : LHS,
40282                            NewRHS ? NewRHS : RHS,
40283                            DAG.getTargetConstant(Mask, DL, MVT::i8));
40284     }
40285     return SDValue();
40286   }
40287   case X86ISD::VPERM2X128: {
40288     // Fold vperm2x128(bitcast(x),bitcast(y),c) -> bitcast(vperm2x128(x,y,c)).
40289     SDValue LHS = N->getOperand(0);
40290     SDValue RHS = N->getOperand(1);
40291     if (LHS.getOpcode() == ISD::BITCAST &&
40292         (RHS.getOpcode() == ISD::BITCAST || RHS.isUndef())) {
40293       EVT SrcVT = LHS.getOperand(0).getValueType();
40294       if (RHS.isUndef() || SrcVT == RHS.getOperand(0).getValueType()) {
40295         return DAG.getBitcast(VT, DAG.getNode(X86ISD::VPERM2X128, DL, SrcVT,
40296                                               DAG.getBitcast(SrcVT, LHS),
40297                                               DAG.getBitcast(SrcVT, RHS),
40298                                               N->getOperand(2)));
40299       }
40300     }
40301 
40302     // Fold vperm2x128(op(),op()) -> op(vperm2x128(),vperm2x128()).
40303     if (SDValue Res = canonicalizeLaneShuffleWithRepeatedOps(N, DAG, DL))
40304       return Res;
40305 
40306     // Fold vperm2x128 subvector shuffle with an inner concat pattern.
40307     // vperm2x128(concat(X,Y),concat(Z,W)) --> concat X,Y etc.
40308     auto FindSubVector128 = [&](unsigned Idx) {
40309       if (Idx > 3)
40310         return SDValue();
40311       SDValue Src = peekThroughBitcasts(N.getOperand(Idx < 2 ? 0 : 1));
40312       SmallVector<SDValue> SubOps;
40313       if (collectConcatOps(Src.getNode(), SubOps, DAG) && SubOps.size() == 2)
40314         return SubOps[Idx & 1];
40315       unsigned NumElts = Src.getValueType().getVectorNumElements();
40316       if ((Idx & 1) == 1 && Src.getOpcode() == ISD::INSERT_SUBVECTOR &&
40317           Src.getOperand(1).getValueSizeInBits() == 128 &&
40318           Src.getConstantOperandAPInt(2) == (NumElts / 2)) {
40319         return Src.getOperand(1);
40320       }
40321       return SDValue();
40322     };
40323     unsigned Imm = N.getConstantOperandVal(2);
40324     if (SDValue SubLo = FindSubVector128(Imm & 0x0F)) {
40325       if (SDValue SubHi = FindSubVector128((Imm & 0xF0) >> 4)) {
40326         MVT SubVT = VT.getHalfNumVectorElementsVT();
40327         SubLo = DAG.getBitcast(SubVT, SubLo);
40328         SubHi = DAG.getBitcast(SubVT, SubHi);
40329         return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, SubLo, SubHi);
40330       }
40331     }
40332     return SDValue();
40333   }
40334   case X86ISD::PSHUFD:
40335   case X86ISD::PSHUFLW:
40336   case X86ISD::PSHUFHW: {
40337     SDValue N0 = N.getOperand(0);
40338     SDValue N1 = N.getOperand(1);
40339     if (N0->hasOneUse()) {
40340       SDValue V = peekThroughOneUseBitcasts(N0);
40341       switch (V.getOpcode()) {
40342       case X86ISD::VSHL:
40343       case X86ISD::VSRL:
40344       case X86ISD::VSRA:
40345       case X86ISD::VSHLI:
40346       case X86ISD::VSRLI:
40347       case X86ISD::VSRAI:
40348       case X86ISD::VROTLI:
40349       case X86ISD::VROTRI: {
40350         MVT InnerVT = V.getSimpleValueType();
40351         if (InnerVT.getScalarSizeInBits() <= VT.getScalarSizeInBits()) {
40352           SDValue Res = DAG.getNode(Opcode, DL, VT,
40353                                     DAG.getBitcast(VT, V.getOperand(0)), N1);
40354           Res = DAG.getBitcast(InnerVT, Res);
40355           Res = DAG.getNode(V.getOpcode(), DL, InnerVT, Res, V.getOperand(1));
40356           return DAG.getBitcast(VT, Res);
40357         }
40358         break;
40359       }
40360       }
40361     }
40362 
40363     Mask = getPSHUFShuffleMask(N);
40364     assert(Mask.size() == 4);
40365     break;
40366   }
40367   case X86ISD::MOVSD:
40368   case X86ISD::MOVSH:
40369   case X86ISD::MOVSS: {
40370     SDValue N0 = N.getOperand(0);
40371     SDValue N1 = N.getOperand(1);
40372 
40373     // Canonicalize scalar FPOps:
40374     // MOVS*(N0, OP(N0, N1)) --> MOVS*(N0, SCALAR_TO_VECTOR(OP(N0[0], N1[0])))
40375     // If commutable, allow OP(N1[0], N0[0]).
40376     unsigned Opcode1 = N1.getOpcode();
40377     if (Opcode1 == ISD::FADD || Opcode1 == ISD::FMUL || Opcode1 == ISD::FSUB ||
40378         Opcode1 == ISD::FDIV) {
40379       SDValue N10 = N1.getOperand(0);
40380       SDValue N11 = N1.getOperand(1);
40381       if (N10 == N0 ||
40382           (N11 == N0 && (Opcode1 == ISD::FADD || Opcode1 == ISD::FMUL))) {
40383         if (N10 != N0)
40384           std::swap(N10, N11);
40385         MVT SVT = VT.getVectorElementType();
40386         SDValue ZeroIdx = DAG.getIntPtrConstant(0, DL);
40387         N10 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SVT, N10, ZeroIdx);
40388         N11 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SVT, N11, ZeroIdx);
40389         SDValue Scl = DAG.getNode(Opcode1, DL, SVT, N10, N11);
40390         SDValue SclVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Scl);
40391         return DAG.getNode(Opcode, DL, VT, N0, SclVec);
40392       }
40393     }
40394 
40395     return SDValue();
40396   }
40397   case X86ISD::INSERTPS: {
40398     assert(VT == MVT::v4f32 && "INSERTPS ValueType must be MVT::v4f32");
40399     SDValue Op0 = N.getOperand(0);
40400     SDValue Op1 = N.getOperand(1);
40401     unsigned InsertPSMask = N.getConstantOperandVal(2);
40402     unsigned SrcIdx = (InsertPSMask >> 6) & 0x3;
40403     unsigned DstIdx = (InsertPSMask >> 4) & 0x3;
40404     unsigned ZeroMask = InsertPSMask & 0xF;
40405 
40406     // If we zero out all elements from Op0 then we don't need to reference it.
40407     if (((ZeroMask | (1u << DstIdx)) == 0xF) && !Op0.isUndef())
40408       return DAG.getNode(X86ISD::INSERTPS, DL, VT, DAG.getUNDEF(VT), Op1,
40409                          DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
40410 
40411     // If we zero out the element from Op1 then we don't need to reference it.
40412     if ((ZeroMask & (1u << DstIdx)) && !Op1.isUndef())
40413       return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, DAG.getUNDEF(VT),
40414                          DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
40415 
40416     // Attempt to merge insertps Op1 with an inner target shuffle node.
40417     SmallVector<int, 8> TargetMask1;
40418     SmallVector<SDValue, 2> Ops1;
40419     APInt KnownUndef1, KnownZero1;
40420     if (getTargetShuffleAndZeroables(Op1, TargetMask1, Ops1, KnownUndef1,
40421                                      KnownZero1)) {
40422       if (KnownUndef1[SrcIdx] || KnownZero1[SrcIdx]) {
40423         // Zero/UNDEF insertion - zero out element and remove dependency.
40424         InsertPSMask |= (1u << DstIdx);
40425         return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, DAG.getUNDEF(VT),
40426                            DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
40427       }
40428       // Update insertps mask srcidx and reference the source input directly.
40429       int M = TargetMask1[SrcIdx];
40430       assert(0 <= M && M < 8 && "Shuffle index out of range");
40431       InsertPSMask = (InsertPSMask & 0x3f) | ((M & 0x3) << 6);
40432       Op1 = Ops1[M < 4 ? 0 : 1];
40433       return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, Op1,
40434                          DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
40435     }
40436 
40437     // Attempt to merge insertps Op0 with an inner target shuffle node.
40438     SmallVector<int, 8> TargetMask0;
40439     SmallVector<SDValue, 2> Ops0;
40440     APInt KnownUndef0, KnownZero0;
40441     if (getTargetShuffleAndZeroables(Op0, TargetMask0, Ops0, KnownUndef0,
40442                                      KnownZero0)) {
40443       bool Updated = false;
40444       bool UseInput00 = false;
40445       bool UseInput01 = false;
40446       for (int i = 0; i != 4; ++i) {
40447         if ((InsertPSMask & (1u << i)) || (i == (int)DstIdx)) {
40448           // No change if element is already zero or the inserted element.
40449           continue;
40450         }
40451 
40452         if (KnownUndef0[i] || KnownZero0[i]) {
40453           // If the target mask is undef/zero then we must zero the element.
40454           InsertPSMask |= (1u << i);
40455           Updated = true;
40456           continue;
40457         }
40458 
40459         // The input vector element must be inline.
40460         int M = TargetMask0[i];
40461         if (M != i && M != (i + 4))
40462           return SDValue();
40463 
40464         // Determine which inputs of the target shuffle we're using.
40465         UseInput00 |= (0 <= M && M < 4);
40466         UseInput01 |= (4 <= M);
40467       }
40468 
40469       // If we're not using both inputs of the target shuffle then use the
40470       // referenced input directly.
40471       if (UseInput00 && !UseInput01) {
40472         Updated = true;
40473         Op0 = Ops0[0];
40474       } else if (!UseInput00 && UseInput01) {
40475         Updated = true;
40476         Op0 = Ops0[1];
40477       }
40478 
40479       if (Updated)
40480         return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, Op1,
40481                            DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
40482     }
40483 
40484     // If we're inserting an element from a vbroadcast load, fold the
40485     // load into the X86insertps instruction. We need to convert the scalar
40486     // load to a vector and clear the source lane of the INSERTPS control.
40487     if (Op1.getOpcode() == X86ISD::VBROADCAST_LOAD && Op1.hasOneUse()) {
40488       auto *MemIntr = cast<MemIntrinsicSDNode>(Op1);
40489       if (MemIntr->getMemoryVT().getScalarSizeInBits() == 32) {
40490         SDValue Load = DAG.getLoad(MVT::f32, DL, MemIntr->getChain(),
40491                                    MemIntr->getBasePtr(),
40492                                    MemIntr->getMemOperand());
40493         SDValue Insert = DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0,
40494                            DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT,
40495                                        Load),
40496                            DAG.getTargetConstant(InsertPSMask & 0x3f, DL, MVT::i8));
40497         DAG.ReplaceAllUsesOfValueWith(SDValue(MemIntr, 1), Load.getValue(1));
40498         return Insert;
40499       }
40500     }
40501 
40502     return SDValue();
40503   }
40504   default:
40505     return SDValue();
40506   }
40507 
40508   // Nuke no-op shuffles that show up after combining.
40509   if (isNoopShuffleMask(Mask))
40510     return N.getOperand(0);
40511 
40512   // Look for simplifications involving one or two shuffle instructions.
40513   SDValue V = N.getOperand(0);
40514   switch (N.getOpcode()) {
40515   default:
40516     break;
40517   case X86ISD::PSHUFLW:
40518   case X86ISD::PSHUFHW:
40519     assert(VT.getVectorElementType() == MVT::i16 && "Bad word shuffle type!");
40520 
40521     // See if this reduces to a PSHUFD which is no more expensive and can
40522     // combine with more operations. Note that it has to at least flip the
40523     // dwords as otherwise it would have been removed as a no-op.
40524     if (ArrayRef<int>(Mask).equals({2, 3, 0, 1})) {
40525       int DMask[] = {0, 1, 2, 3};
40526       int DOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 2;
40527       DMask[DOffset + 0] = DOffset + 1;
40528       DMask[DOffset + 1] = DOffset + 0;
40529       MVT DVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() / 2);
40530       V = DAG.getBitcast(DVT, V);
40531       V = DAG.getNode(X86ISD::PSHUFD, DL, DVT, V,
40532                       getV4X86ShuffleImm8ForMask(DMask, DL, DAG));
40533       return DAG.getBitcast(VT, V);
40534     }
40535 
40536     // Look for shuffle patterns which can be implemented as a single unpack.
40537     // FIXME: This doesn't handle the location of the PSHUFD generically, and
40538     // only works when we have a PSHUFD followed by two half-shuffles.
40539     if (Mask[0] == Mask[1] && Mask[2] == Mask[3] &&
40540         (V.getOpcode() == X86ISD::PSHUFLW ||
40541          V.getOpcode() == X86ISD::PSHUFHW) &&
40542         V.getOpcode() != N.getOpcode() &&
40543         V.hasOneUse() && V.getOperand(0).hasOneUse()) {
40544       SDValue D = peekThroughOneUseBitcasts(V.getOperand(0));
40545       if (D.getOpcode() == X86ISD::PSHUFD) {
40546         SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
40547         SmallVector<int, 4> DMask = getPSHUFShuffleMask(D);
40548         int NOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
40549         int VOffset = V.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
40550         int WordMask[8];
40551         for (int i = 0; i < 4; ++i) {
40552           WordMask[i + NOffset] = Mask[i] + NOffset;
40553           WordMask[i + VOffset] = VMask[i] + VOffset;
40554         }
40555         // Map the word mask through the DWord mask.
40556         int MappedMask[8];
40557         for (int i = 0; i < 8; ++i)
40558           MappedMask[i] = 2 * DMask[WordMask[i] / 2] + WordMask[i] % 2;
40559         if (ArrayRef<int>(MappedMask).equals({0, 0, 1, 1, 2, 2, 3, 3}) ||
40560             ArrayRef<int>(MappedMask).equals({4, 4, 5, 5, 6, 6, 7, 7})) {
40561           // We can replace all three shuffles with an unpack.
40562           V = DAG.getBitcast(VT, D.getOperand(0));
40563           return DAG.getNode(MappedMask[0] == 0 ? X86ISD::UNPCKL
40564                                                 : X86ISD::UNPCKH,
40565                              DL, VT, V, V);
40566         }
40567       }
40568     }
40569 
40570     break;
40571 
40572   case X86ISD::PSHUFD:
40573     if (SDValue NewN = combineRedundantDWordShuffle(N, Mask, DAG))
40574       return NewN;
40575 
40576     break;
40577   }
40578 
40579   return SDValue();
40580 }
40581 
40582 /// Checks if the shuffle mask takes subsequent elements
40583 /// alternately from two vectors.
40584 /// For example <0, 5, 2, 7> or <8, 1, 10, 3, 12, 5, 14, 7> are both correct.
isAddSubOrSubAddMask(ArrayRef<int> Mask,bool & Op0Even)40585 static bool isAddSubOrSubAddMask(ArrayRef<int> Mask, bool &Op0Even) {
40586 
40587   int ParitySrc[2] = {-1, -1};
40588   unsigned Size = Mask.size();
40589   for (unsigned i = 0; i != Size; ++i) {
40590     int M = Mask[i];
40591     if (M < 0)
40592       continue;
40593 
40594     // Make sure we are using the matching element from the input.
40595     if ((M % Size) != i)
40596       return false;
40597 
40598     // Make sure we use the same input for all elements of the same parity.
40599     int Src = M / Size;
40600     if (ParitySrc[i % 2] >= 0 && ParitySrc[i % 2] != Src)
40601       return false;
40602     ParitySrc[i % 2] = Src;
40603   }
40604 
40605   // Make sure each input is used.
40606   if (ParitySrc[0] < 0 || ParitySrc[1] < 0 || ParitySrc[0] == ParitySrc[1])
40607     return false;
40608 
40609   Op0Even = ParitySrc[0] == 0;
40610   return true;
40611 }
40612 
40613 /// Returns true iff the shuffle node \p N can be replaced with ADDSUB(SUBADD)
40614 /// operation. If true is returned then the operands of ADDSUB(SUBADD) operation
40615 /// are written to the parameters \p Opnd0 and \p Opnd1.
40616 ///
40617 /// We combine shuffle to ADDSUB(SUBADD) directly on the abstract vector shuffle nodes
40618 /// so it is easier to generically match. We also insert dummy vector shuffle
40619 /// nodes for the operands which explicitly discard the lanes which are unused
40620 /// by this operation to try to flow through the rest of the combiner
40621 /// the fact that they're unused.
isAddSubOrSubAdd(SDNode * N,const X86Subtarget & Subtarget,SelectionDAG & DAG,SDValue & Opnd0,SDValue & Opnd1,bool & IsSubAdd)40622 static bool isAddSubOrSubAdd(SDNode *N, const X86Subtarget &Subtarget,
40623                              SelectionDAG &DAG, SDValue &Opnd0, SDValue &Opnd1,
40624                              bool &IsSubAdd) {
40625 
40626   EVT VT = N->getValueType(0);
40627   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
40628   if (!Subtarget.hasSSE3() || !TLI.isTypeLegal(VT) ||
40629       !VT.getSimpleVT().isFloatingPoint())
40630     return false;
40631 
40632   // We only handle target-independent shuffles.
40633   // FIXME: It would be easy and harmless to use the target shuffle mask
40634   // extraction tool to support more.
40635   if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
40636     return false;
40637 
40638   SDValue V1 = N->getOperand(0);
40639   SDValue V2 = N->getOperand(1);
40640 
40641   // Make sure we have an FADD and an FSUB.
40642   if ((V1.getOpcode() != ISD::FADD && V1.getOpcode() != ISD::FSUB) ||
40643       (V2.getOpcode() != ISD::FADD && V2.getOpcode() != ISD::FSUB) ||
40644       V1.getOpcode() == V2.getOpcode())
40645     return false;
40646 
40647   // If there are other uses of these operations we can't fold them.
40648   if (!V1->hasOneUse() || !V2->hasOneUse())
40649     return false;
40650 
40651   // Ensure that both operations have the same operands. Note that we can
40652   // commute the FADD operands.
40653   SDValue LHS, RHS;
40654   if (V1.getOpcode() == ISD::FSUB) {
40655     LHS = V1->getOperand(0); RHS = V1->getOperand(1);
40656     if ((V2->getOperand(0) != LHS || V2->getOperand(1) != RHS) &&
40657         (V2->getOperand(0) != RHS || V2->getOperand(1) != LHS))
40658       return false;
40659   } else {
40660     assert(V2.getOpcode() == ISD::FSUB && "Unexpected opcode");
40661     LHS = V2->getOperand(0); RHS = V2->getOperand(1);
40662     if ((V1->getOperand(0) != LHS || V1->getOperand(1) != RHS) &&
40663         (V1->getOperand(0) != RHS || V1->getOperand(1) != LHS))
40664       return false;
40665   }
40666 
40667   ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(N)->getMask();
40668   bool Op0Even;
40669   if (!isAddSubOrSubAddMask(Mask, Op0Even))
40670     return false;
40671 
40672   // It's a subadd if the vector in the even parity is an FADD.
40673   IsSubAdd = Op0Even ? V1->getOpcode() == ISD::FADD
40674                      : V2->getOpcode() == ISD::FADD;
40675 
40676   Opnd0 = LHS;
40677   Opnd1 = RHS;
40678   return true;
40679 }
40680 
40681 /// Combine shuffle of two fma nodes into FMAddSub or FMSubAdd.
combineShuffleToFMAddSub(SDNode * N,const X86Subtarget & Subtarget,SelectionDAG & DAG)40682 static SDValue combineShuffleToFMAddSub(SDNode *N,
40683                                         const X86Subtarget &Subtarget,
40684                                         SelectionDAG &DAG) {
40685   // We only handle target-independent shuffles.
40686   // FIXME: It would be easy and harmless to use the target shuffle mask
40687   // extraction tool to support more.
40688   if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
40689     return SDValue();
40690 
40691   MVT VT = N->getSimpleValueType(0);
40692   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
40693   if (!Subtarget.hasAnyFMA() || !TLI.isTypeLegal(VT))
40694     return SDValue();
40695 
40696   // We're trying to match (shuffle fma(a, b, c), X86Fmsub(a, b, c).
40697   SDValue Op0 = N->getOperand(0);
40698   SDValue Op1 = N->getOperand(1);
40699   SDValue FMAdd = Op0, FMSub = Op1;
40700   if (FMSub.getOpcode() != X86ISD::FMSUB)
40701     std::swap(FMAdd, FMSub);
40702 
40703   if (FMAdd.getOpcode() != ISD::FMA || FMSub.getOpcode() != X86ISD::FMSUB ||
40704       FMAdd.getOperand(0) != FMSub.getOperand(0) || !FMAdd.hasOneUse() ||
40705       FMAdd.getOperand(1) != FMSub.getOperand(1) || !FMSub.hasOneUse() ||
40706       FMAdd.getOperand(2) != FMSub.getOperand(2))
40707     return SDValue();
40708 
40709   // Check for correct shuffle mask.
40710   ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(N)->getMask();
40711   bool Op0Even;
40712   if (!isAddSubOrSubAddMask(Mask, Op0Even))
40713     return SDValue();
40714 
40715   // FMAddSub takes zeroth operand from FMSub node.
40716   SDLoc DL(N);
40717   bool IsSubAdd = Op0Even ? Op0 == FMAdd : Op1 == FMAdd;
40718   unsigned Opcode = IsSubAdd ? X86ISD::FMSUBADD : X86ISD::FMADDSUB;
40719   return DAG.getNode(Opcode, DL, VT, FMAdd.getOperand(0), FMAdd.getOperand(1),
40720                      FMAdd.getOperand(2));
40721 }
40722 
40723 /// Try to combine a shuffle into a target-specific add-sub or
40724 /// mul-add-sub node.
combineShuffleToAddSubOrFMAddSub(SDNode * N,const X86Subtarget & Subtarget,SelectionDAG & DAG)40725 static SDValue combineShuffleToAddSubOrFMAddSub(SDNode *N,
40726                                                 const X86Subtarget &Subtarget,
40727                                                 SelectionDAG &DAG) {
40728   if (SDValue V = combineShuffleToFMAddSub(N, Subtarget, DAG))
40729     return V;
40730 
40731   SDValue Opnd0, Opnd1;
40732   bool IsSubAdd;
40733   if (!isAddSubOrSubAdd(N, Subtarget, DAG, Opnd0, Opnd1, IsSubAdd))
40734     return SDValue();
40735 
40736   MVT VT = N->getSimpleValueType(0);
40737   SDLoc DL(N);
40738 
40739   // Try to generate X86ISD::FMADDSUB node here.
40740   SDValue Opnd2;
40741   if (isFMAddSubOrFMSubAdd(Subtarget, DAG, Opnd0, Opnd1, Opnd2, 2)) {
40742     unsigned Opc = IsSubAdd ? X86ISD::FMSUBADD : X86ISD::FMADDSUB;
40743     return DAG.getNode(Opc, DL, VT, Opnd0, Opnd1, Opnd2);
40744   }
40745 
40746   if (IsSubAdd)
40747     return SDValue();
40748 
40749   // Do not generate X86ISD::ADDSUB node for 512-bit types even though
40750   // the ADDSUB idiom has been successfully recognized. There are no known
40751   // X86 targets with 512-bit ADDSUB instructions!
40752   if (VT.is512BitVector())
40753     return SDValue();
40754 
40755   // Do not generate X86ISD::ADDSUB node for FP16's vector types even though
40756   // the ADDSUB idiom has been successfully recognized. There are no known
40757   // X86 targets with FP16 ADDSUB instructions!
40758   if (VT.getVectorElementType() == MVT::f16)
40759     return SDValue();
40760 
40761   return DAG.getNode(X86ISD::ADDSUB, DL, VT, Opnd0, Opnd1);
40762 }
40763 
40764 // We are looking for a shuffle where both sources are concatenated with undef
40765 // and have a width that is half of the output's width. AVX2 has VPERMD/Q, so
40766 // if we can express this as a single-source shuffle, that's preferable.
combineShuffleOfConcatUndef(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)40767 static SDValue combineShuffleOfConcatUndef(SDNode *N, SelectionDAG &DAG,
40768                                            const X86Subtarget &Subtarget) {
40769   if (!Subtarget.hasAVX2() || !isa<ShuffleVectorSDNode>(N))
40770     return SDValue();
40771 
40772   EVT VT = N->getValueType(0);
40773 
40774   // We only care about shuffles of 128/256-bit vectors of 32/64-bit values.
40775   if (!VT.is128BitVector() && !VT.is256BitVector())
40776     return SDValue();
40777 
40778   if (VT.getVectorElementType() != MVT::i32 &&
40779       VT.getVectorElementType() != MVT::i64 &&
40780       VT.getVectorElementType() != MVT::f32 &&
40781       VT.getVectorElementType() != MVT::f64)
40782     return SDValue();
40783 
40784   SDValue N0 = N->getOperand(0);
40785   SDValue N1 = N->getOperand(1);
40786 
40787   // Check that both sources are concats with undef.
40788   if (N0.getOpcode() != ISD::CONCAT_VECTORS ||
40789       N1.getOpcode() != ISD::CONCAT_VECTORS || N0.getNumOperands() != 2 ||
40790       N1.getNumOperands() != 2 || !N0.getOperand(1).isUndef() ||
40791       !N1.getOperand(1).isUndef())
40792     return SDValue();
40793 
40794   // Construct the new shuffle mask. Elements from the first source retain their
40795   // index, but elements from the second source no longer need to skip an undef.
40796   SmallVector<int, 8> Mask;
40797   int NumElts = VT.getVectorNumElements();
40798 
40799   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
40800   for (int Elt : SVOp->getMask())
40801     Mask.push_back(Elt < NumElts ? Elt : (Elt - NumElts / 2));
40802 
40803   SDLoc DL(N);
40804   SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, N0.getOperand(0),
40805                                N1.getOperand(0));
40806   return DAG.getVectorShuffle(VT, DL, Concat, DAG.getUNDEF(VT), Mask);
40807 }
40808 
40809 /// If we have a shuffle of AVX/AVX512 (256/512 bit) vectors that only uses the
40810 /// low half of each source vector and does not set any high half elements in
40811 /// the destination vector, narrow the shuffle to half its original size.
narrowShuffle(ShuffleVectorSDNode * Shuf,SelectionDAG & DAG)40812 static SDValue narrowShuffle(ShuffleVectorSDNode *Shuf, SelectionDAG &DAG) {
40813   EVT VT = Shuf->getValueType(0);
40814   if (!DAG.getTargetLoweringInfo().isTypeLegal(Shuf->getValueType(0)))
40815     return SDValue();
40816   if (!VT.is256BitVector() && !VT.is512BitVector())
40817     return SDValue();
40818 
40819   // See if we can ignore all of the high elements of the shuffle.
40820   ArrayRef<int> Mask = Shuf->getMask();
40821   if (!isUndefUpperHalf(Mask))
40822     return SDValue();
40823 
40824   // Check if the shuffle mask accesses only the low half of each input vector
40825   // (half-index output is 0 or 2).
40826   int HalfIdx1, HalfIdx2;
40827   SmallVector<int, 8> HalfMask(Mask.size() / 2);
40828   if (!getHalfShuffleMask(Mask, HalfMask, HalfIdx1, HalfIdx2) ||
40829       (HalfIdx1 % 2 == 1) || (HalfIdx2 % 2 == 1))
40830     return SDValue();
40831 
40832   // Create a half-width shuffle to replace the unnecessarily wide shuffle.
40833   // The trick is knowing that all of the insert/extract are actually free
40834   // subregister (zmm<->ymm or ymm<->xmm) ops. That leaves us with a shuffle
40835   // of narrow inputs into a narrow output, and that is always cheaper than
40836   // the wide shuffle that we started with.
40837   return getShuffleHalfVectors(SDLoc(Shuf), Shuf->getOperand(0),
40838                                Shuf->getOperand(1), HalfMask, HalfIdx1,
40839                                HalfIdx2, false, DAG, /*UseConcat*/ true);
40840 }
40841 
combineShuffle(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)40842 static SDValue combineShuffle(SDNode *N, SelectionDAG &DAG,
40843                               TargetLowering::DAGCombinerInfo &DCI,
40844                               const X86Subtarget &Subtarget) {
40845   if (auto *Shuf = dyn_cast<ShuffleVectorSDNode>(N))
40846     if (SDValue V = narrowShuffle(Shuf, DAG))
40847       return V;
40848 
40849   // If we have legalized the vector types, look for blends of FADD and FSUB
40850   // nodes that we can fuse into an ADDSUB, FMADDSUB, or FMSUBADD node.
40851   SDLoc dl(N);
40852   EVT VT = N->getValueType(0);
40853   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
40854   if (TLI.isTypeLegal(VT) && !isSoftF16(VT, Subtarget))
40855     if (SDValue AddSub = combineShuffleToAddSubOrFMAddSub(N, Subtarget, DAG))
40856       return AddSub;
40857 
40858   // Attempt to combine into a vector load/broadcast.
40859   if (SDValue LD = combineToConsecutiveLoads(
40860           VT, SDValue(N, 0), dl, DAG, Subtarget, /*IsAfterLegalize*/ true))
40861     return LD;
40862 
40863   // For AVX2, we sometimes want to combine
40864   // (vector_shuffle <mask> (concat_vectors t1, undef)
40865   //                        (concat_vectors t2, undef))
40866   // Into:
40867   // (vector_shuffle <mask> (concat_vectors t1, t2), undef)
40868   // Since the latter can be efficiently lowered with VPERMD/VPERMQ
40869   if (SDValue ShufConcat = combineShuffleOfConcatUndef(N, DAG, Subtarget))
40870     return ShufConcat;
40871 
40872   if (isTargetShuffle(N->getOpcode())) {
40873     SDValue Op(N, 0);
40874     if (SDValue Shuffle = combineTargetShuffle(Op, DAG, DCI, Subtarget))
40875       return Shuffle;
40876 
40877     // Try recursively combining arbitrary sequences of x86 shuffle
40878     // instructions into higher-order shuffles. We do this after combining
40879     // specific PSHUF instruction sequences into their minimal form so that we
40880     // can evaluate how many specialized shuffle instructions are involved in
40881     // a particular chain.
40882     if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
40883       return Res;
40884 
40885     // Simplify source operands based on shuffle mask.
40886     // TODO - merge this into combineX86ShufflesRecursively.
40887     APInt DemandedElts = APInt::getAllOnes(VT.getVectorNumElements());
40888     if (TLI.SimplifyDemandedVectorElts(Op, DemandedElts, DCI))
40889       return SDValue(N, 0);
40890 
40891     // Canonicalize SHUFFLE(UNARYOP(X)) -> UNARYOP(SHUFFLE(X)).
40892     // Canonicalize SHUFFLE(BINOP(X,Y)) -> BINOP(SHUFFLE(X),SHUFFLE(Y)).
40893     // Perform this after other shuffle combines to allow inner shuffles to be
40894     // combined away first.
40895     if (SDValue BinOp = canonicalizeShuffleWithOp(Op, DAG, dl))
40896       return BinOp;
40897   }
40898 
40899   return SDValue();
40900 }
40901 
40902 // Simplify variable target shuffle masks based on the demanded elements.
40903 // TODO: Handle DemandedBits in mask indices as well?
SimplifyDemandedVectorEltsForTargetShuffle(SDValue Op,const APInt & DemandedElts,unsigned MaskIndex,TargetLowering::TargetLoweringOpt & TLO,unsigned Depth) const40904 bool X86TargetLowering::SimplifyDemandedVectorEltsForTargetShuffle(
40905     SDValue Op, const APInt &DemandedElts, unsigned MaskIndex,
40906     TargetLowering::TargetLoweringOpt &TLO, unsigned Depth) const {
40907   // If we're demanding all elements don't bother trying to simplify the mask.
40908   unsigned NumElts = DemandedElts.getBitWidth();
40909   if (DemandedElts.isAllOnes())
40910     return false;
40911 
40912   SDValue Mask = Op.getOperand(MaskIndex);
40913   if (!Mask.hasOneUse())
40914     return false;
40915 
40916   // Attempt to generically simplify the variable shuffle mask.
40917   APInt MaskUndef, MaskZero;
40918   if (SimplifyDemandedVectorElts(Mask, DemandedElts, MaskUndef, MaskZero, TLO,
40919                                  Depth + 1))
40920     return true;
40921 
40922   // Attempt to extract+simplify a (constant pool load) shuffle mask.
40923   // TODO: Support other types from getTargetShuffleMaskIndices?
40924   SDValue BC = peekThroughOneUseBitcasts(Mask);
40925   EVT BCVT = BC.getValueType();
40926   auto *Load = dyn_cast<LoadSDNode>(BC);
40927   if (!Load || !Load->getBasePtr().hasOneUse())
40928     return false;
40929 
40930   const Constant *C = getTargetConstantFromNode(Load);
40931   if (!C)
40932     return false;
40933 
40934   Type *CTy = C->getType();
40935   if (!CTy->isVectorTy() ||
40936       CTy->getPrimitiveSizeInBits() != Mask.getValueSizeInBits())
40937     return false;
40938 
40939   // Handle scaling for i64 elements on 32-bit targets.
40940   unsigned NumCstElts = cast<FixedVectorType>(CTy)->getNumElements();
40941   if (NumCstElts != NumElts && NumCstElts != (NumElts * 2))
40942     return false;
40943   unsigned Scale = NumCstElts / NumElts;
40944 
40945   // Simplify mask if we have an undemanded element that is not undef.
40946   bool Simplified = false;
40947   SmallVector<Constant *, 32> ConstVecOps;
40948   for (unsigned i = 0; i != NumCstElts; ++i) {
40949     Constant *Elt = C->getAggregateElement(i);
40950     if (!DemandedElts[i / Scale] && !isa<UndefValue>(Elt)) {
40951       ConstVecOps.push_back(UndefValue::get(Elt->getType()));
40952       Simplified = true;
40953       continue;
40954     }
40955     ConstVecOps.push_back(Elt);
40956   }
40957   if (!Simplified)
40958     return false;
40959 
40960   // Generate new constant pool entry + legalize immediately for the load.
40961   SDLoc DL(Op);
40962   SDValue CV = TLO.DAG.getConstantPool(ConstantVector::get(ConstVecOps), BCVT);
40963   SDValue LegalCV = LowerConstantPool(CV, TLO.DAG);
40964   SDValue NewMask = TLO.DAG.getLoad(
40965       BCVT, DL, TLO.DAG.getEntryNode(), LegalCV,
40966       MachinePointerInfo::getConstantPool(TLO.DAG.getMachineFunction()),
40967       Load->getAlign());
40968   return TLO.CombineTo(Mask, TLO.DAG.getBitcast(Mask.getValueType(), NewMask));
40969 }
40970 
SimplifyDemandedVectorEltsForTargetNode(SDValue Op,const APInt & DemandedElts,APInt & KnownUndef,APInt & KnownZero,TargetLoweringOpt & TLO,unsigned Depth) const40971 bool X86TargetLowering::SimplifyDemandedVectorEltsForTargetNode(
40972     SDValue Op, const APInt &DemandedElts, APInt &KnownUndef, APInt &KnownZero,
40973     TargetLoweringOpt &TLO, unsigned Depth) const {
40974   int NumElts = DemandedElts.getBitWidth();
40975   unsigned Opc = Op.getOpcode();
40976   EVT VT = Op.getValueType();
40977 
40978   // Handle special case opcodes.
40979   switch (Opc) {
40980   case X86ISD::PMULDQ:
40981   case X86ISD::PMULUDQ: {
40982     APInt LHSUndef, LHSZero;
40983     APInt RHSUndef, RHSZero;
40984     SDValue LHS = Op.getOperand(0);
40985     SDValue RHS = Op.getOperand(1);
40986     if (SimplifyDemandedVectorElts(LHS, DemandedElts, LHSUndef, LHSZero, TLO,
40987                                    Depth + 1))
40988       return true;
40989     if (SimplifyDemandedVectorElts(RHS, DemandedElts, RHSUndef, RHSZero, TLO,
40990                                    Depth + 1))
40991       return true;
40992     // Multiply by zero.
40993     KnownZero = LHSZero | RHSZero;
40994     break;
40995   }
40996   case X86ISD::VPMADDWD: {
40997     APInt LHSUndef, LHSZero;
40998     APInt RHSUndef, RHSZero;
40999     SDValue LHS = Op.getOperand(0);
41000     SDValue RHS = Op.getOperand(1);
41001     APInt DemandedSrcElts = APIntOps::ScaleBitMask(DemandedElts, 2 * NumElts);
41002 
41003     if (SimplifyDemandedVectorElts(LHS, DemandedSrcElts, LHSUndef, LHSZero, TLO,
41004                                    Depth + 1))
41005       return true;
41006     if (SimplifyDemandedVectorElts(RHS, DemandedSrcElts, RHSUndef, RHSZero, TLO,
41007                                    Depth + 1))
41008       return true;
41009 
41010     // TODO: Multiply by zero.
41011 
41012     // If RHS/LHS elements are known zero then we don't need the LHS/RHS equivalent.
41013     APInt DemandedLHSElts = DemandedSrcElts & ~RHSZero;
41014     if (SimplifyDemandedVectorElts(LHS, DemandedLHSElts, LHSUndef, LHSZero, TLO,
41015                                    Depth + 1))
41016       return true;
41017     APInt DemandedRHSElts = DemandedSrcElts & ~LHSZero;
41018     if (SimplifyDemandedVectorElts(RHS, DemandedRHSElts, RHSUndef, RHSZero, TLO,
41019                                    Depth + 1))
41020       return true;
41021     break;
41022   }
41023   case X86ISD::PSADBW: {
41024     SDValue LHS = Op.getOperand(0);
41025     SDValue RHS = Op.getOperand(1);
41026     assert(VT.getScalarType() == MVT::i64 &&
41027            LHS.getValueType() == RHS.getValueType() &&
41028            LHS.getValueType().getScalarType() == MVT::i8 &&
41029            "Unexpected PSADBW types");
41030 
41031     // Aggressively peek through ops to get at the demanded elts.
41032     if (!DemandedElts.isAllOnes()) {
41033       unsigned NumSrcElts = LHS.getValueType().getVectorNumElements();
41034       APInt DemandedSrcElts = APIntOps::ScaleBitMask(DemandedElts, NumSrcElts);
41035       SDValue NewLHS = SimplifyMultipleUseDemandedVectorElts(
41036           LHS, DemandedSrcElts, TLO.DAG, Depth + 1);
41037       SDValue NewRHS = SimplifyMultipleUseDemandedVectorElts(
41038           RHS, DemandedSrcElts, TLO.DAG, Depth + 1);
41039       if (NewLHS || NewRHS) {
41040         NewLHS = NewLHS ? NewLHS : LHS;
41041         NewRHS = NewRHS ? NewRHS : RHS;
41042         return TLO.CombineTo(
41043             Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewLHS, NewRHS));
41044       }
41045     }
41046     break;
41047   }
41048   case X86ISD::VSHL:
41049   case X86ISD::VSRL:
41050   case X86ISD::VSRA: {
41051     // We only need the bottom 64-bits of the (128-bit) shift amount.
41052     SDValue Amt = Op.getOperand(1);
41053     MVT AmtVT = Amt.getSimpleValueType();
41054     assert(AmtVT.is128BitVector() && "Unexpected value type");
41055 
41056     // If we reuse the shift amount just for sse shift amounts then we know that
41057     // only the bottom 64-bits are only ever used.
41058     bool AssumeSingleUse = llvm::all_of(Amt->uses(), [&Amt](SDNode *Use) {
41059       unsigned UseOpc = Use->getOpcode();
41060       return (UseOpc == X86ISD::VSHL || UseOpc == X86ISD::VSRL ||
41061               UseOpc == X86ISD::VSRA) &&
41062              Use->getOperand(0) != Amt;
41063     });
41064 
41065     APInt AmtUndef, AmtZero;
41066     unsigned NumAmtElts = AmtVT.getVectorNumElements();
41067     APInt AmtElts = APInt::getLowBitsSet(NumAmtElts, NumAmtElts / 2);
41068     if (SimplifyDemandedVectorElts(Amt, AmtElts, AmtUndef, AmtZero, TLO,
41069                                    Depth + 1, AssumeSingleUse))
41070       return true;
41071     [[fallthrough]];
41072   }
41073   case X86ISD::VSHLI:
41074   case X86ISD::VSRLI:
41075   case X86ISD::VSRAI: {
41076     SDValue Src = Op.getOperand(0);
41077     APInt SrcUndef;
41078     if (SimplifyDemandedVectorElts(Src, DemandedElts, SrcUndef, KnownZero, TLO,
41079                                    Depth + 1))
41080       return true;
41081 
41082     // Fold shift(0,x) -> 0
41083     if (DemandedElts.isSubsetOf(KnownZero))
41084       return TLO.CombineTo(
41085           Op, getZeroVector(VT.getSimpleVT(), Subtarget, TLO.DAG, SDLoc(Op)));
41086 
41087     // Aggressively peek through ops to get at the demanded elts.
41088     if (!DemandedElts.isAllOnes())
41089       if (SDValue NewSrc = SimplifyMultipleUseDemandedVectorElts(
41090               Src, DemandedElts, TLO.DAG, Depth + 1))
41091         return TLO.CombineTo(
41092             Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewSrc, Op.getOperand(1)));
41093     break;
41094   }
41095   case X86ISD::VPSHA:
41096   case X86ISD::VPSHL:
41097   case X86ISD::VSHLV:
41098   case X86ISD::VSRLV:
41099   case X86ISD::VSRAV: {
41100     APInt LHSUndef, LHSZero;
41101     APInt RHSUndef, RHSZero;
41102     SDValue LHS = Op.getOperand(0);
41103     SDValue RHS = Op.getOperand(1);
41104     if (SimplifyDemandedVectorElts(LHS, DemandedElts, LHSUndef, LHSZero, TLO,
41105                                    Depth + 1))
41106       return true;
41107 
41108     // Fold shift(0,x) -> 0
41109     if (DemandedElts.isSubsetOf(LHSZero))
41110       return TLO.CombineTo(
41111           Op, getZeroVector(VT.getSimpleVT(), Subtarget, TLO.DAG, SDLoc(Op)));
41112 
41113     if (SimplifyDemandedVectorElts(RHS, DemandedElts, RHSUndef, RHSZero, TLO,
41114                                    Depth + 1))
41115       return true;
41116 
41117     KnownZero = LHSZero;
41118     break;
41119   }
41120   case X86ISD::KSHIFTL: {
41121     SDValue Src = Op.getOperand(0);
41122     auto *Amt = cast<ConstantSDNode>(Op.getOperand(1));
41123     assert(Amt->getAPIntValue().ult(NumElts) && "Out of range shift amount");
41124     unsigned ShiftAmt = Amt->getZExtValue();
41125 
41126     if (ShiftAmt == 0)
41127       return TLO.CombineTo(Op, Src);
41128 
41129     // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a
41130     // single shift.  We can do this if the bottom bits (which are shifted
41131     // out) are never demanded.
41132     if (Src.getOpcode() == X86ISD::KSHIFTR) {
41133       if (!DemandedElts.intersects(APInt::getLowBitsSet(NumElts, ShiftAmt))) {
41134         unsigned C1 = Src.getConstantOperandVal(1);
41135         unsigned NewOpc = X86ISD::KSHIFTL;
41136         int Diff = ShiftAmt - C1;
41137         if (Diff < 0) {
41138           Diff = -Diff;
41139           NewOpc = X86ISD::KSHIFTR;
41140         }
41141 
41142         SDLoc dl(Op);
41143         SDValue NewSA = TLO.DAG.getTargetConstant(Diff, dl, MVT::i8);
41144         return TLO.CombineTo(
41145             Op, TLO.DAG.getNode(NewOpc, dl, VT, Src.getOperand(0), NewSA));
41146       }
41147     }
41148 
41149     APInt DemandedSrc = DemandedElts.lshr(ShiftAmt);
41150     if (SimplifyDemandedVectorElts(Src, DemandedSrc, KnownUndef, KnownZero, TLO,
41151                                    Depth + 1))
41152       return true;
41153 
41154     KnownUndef <<= ShiftAmt;
41155     KnownZero <<= ShiftAmt;
41156     KnownZero.setLowBits(ShiftAmt);
41157     break;
41158   }
41159   case X86ISD::KSHIFTR: {
41160     SDValue Src = Op.getOperand(0);
41161     auto *Amt = cast<ConstantSDNode>(Op.getOperand(1));
41162     assert(Amt->getAPIntValue().ult(NumElts) && "Out of range shift amount");
41163     unsigned ShiftAmt = Amt->getZExtValue();
41164 
41165     if (ShiftAmt == 0)
41166       return TLO.CombineTo(Op, Src);
41167 
41168     // If this is ((X << C1) >>u ShAmt), see if we can simplify this into a
41169     // single shift.  We can do this if the top bits (which are shifted
41170     // out) are never demanded.
41171     if (Src.getOpcode() == X86ISD::KSHIFTL) {
41172       if (!DemandedElts.intersects(APInt::getHighBitsSet(NumElts, ShiftAmt))) {
41173         unsigned C1 = Src.getConstantOperandVal(1);
41174         unsigned NewOpc = X86ISD::KSHIFTR;
41175         int Diff = ShiftAmt - C1;
41176         if (Diff < 0) {
41177           Diff = -Diff;
41178           NewOpc = X86ISD::KSHIFTL;
41179         }
41180 
41181         SDLoc dl(Op);
41182         SDValue NewSA = TLO.DAG.getTargetConstant(Diff, dl, MVT::i8);
41183         return TLO.CombineTo(
41184             Op, TLO.DAG.getNode(NewOpc, dl, VT, Src.getOperand(0), NewSA));
41185       }
41186     }
41187 
41188     APInt DemandedSrc = DemandedElts.shl(ShiftAmt);
41189     if (SimplifyDemandedVectorElts(Src, DemandedSrc, KnownUndef, KnownZero, TLO,
41190                                    Depth + 1))
41191       return true;
41192 
41193     KnownUndef.lshrInPlace(ShiftAmt);
41194     KnownZero.lshrInPlace(ShiftAmt);
41195     KnownZero.setHighBits(ShiftAmt);
41196     break;
41197   }
41198   case X86ISD::ANDNP: {
41199     // ANDNP = (~LHS & RHS);
41200     SDValue LHS = Op.getOperand(0);
41201     SDValue RHS = Op.getOperand(1);
41202 
41203     auto GetDemandedMasks = [&](SDValue Op, bool Invert = false) {
41204       APInt UndefElts;
41205       SmallVector<APInt> EltBits;
41206       int NumElts = VT.getVectorNumElements();
41207       int EltSizeInBits = VT.getScalarSizeInBits();
41208       APInt OpBits = APInt::getAllOnes(EltSizeInBits);
41209       APInt OpElts = DemandedElts;
41210       if (getTargetConstantBitsFromNode(Op, EltSizeInBits, UndefElts,
41211                                         EltBits)) {
41212         OpBits.clearAllBits();
41213         OpElts.clearAllBits();
41214         for (int I = 0; I != NumElts; ++I) {
41215           if (!DemandedElts[I])
41216             continue;
41217           if (UndefElts[I]) {
41218             // We can't assume an undef src element gives an undef dst - the
41219             // other src might be zero.
41220             OpBits.setAllBits();
41221             OpElts.setBit(I);
41222           } else if ((Invert && !EltBits[I].isAllOnes()) ||
41223                      (!Invert && !EltBits[I].isZero())) {
41224             OpBits |= Invert ? ~EltBits[I] : EltBits[I];
41225             OpElts.setBit(I);
41226           }
41227         }
41228       }
41229       return std::make_pair(OpBits, OpElts);
41230     };
41231     APInt BitsLHS, EltsLHS;
41232     APInt BitsRHS, EltsRHS;
41233     std::tie(BitsLHS, EltsLHS) = GetDemandedMasks(RHS);
41234     std::tie(BitsRHS, EltsRHS) = GetDemandedMasks(LHS, true);
41235 
41236     APInt LHSUndef, LHSZero;
41237     APInt RHSUndef, RHSZero;
41238     if (SimplifyDemandedVectorElts(LHS, EltsLHS, LHSUndef, LHSZero, TLO,
41239                                    Depth + 1))
41240       return true;
41241     if (SimplifyDemandedVectorElts(RHS, EltsRHS, RHSUndef, RHSZero, TLO,
41242                                    Depth + 1))
41243       return true;
41244 
41245     if (!DemandedElts.isAllOnes()) {
41246       SDValue NewLHS = SimplifyMultipleUseDemandedBits(LHS, BitsLHS, EltsLHS,
41247                                                        TLO.DAG, Depth + 1);
41248       SDValue NewRHS = SimplifyMultipleUseDemandedBits(RHS, BitsRHS, EltsRHS,
41249                                                        TLO.DAG, Depth + 1);
41250       if (NewLHS || NewRHS) {
41251         NewLHS = NewLHS ? NewLHS : LHS;
41252         NewRHS = NewRHS ? NewRHS : RHS;
41253         return TLO.CombineTo(
41254             Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewLHS, NewRHS));
41255       }
41256     }
41257     break;
41258   }
41259   case X86ISD::CVTSI2P:
41260   case X86ISD::CVTUI2P: {
41261     SDValue Src = Op.getOperand(0);
41262     MVT SrcVT = Src.getSimpleValueType();
41263     APInt SrcUndef, SrcZero;
41264     APInt SrcElts = DemandedElts.zextOrTrunc(SrcVT.getVectorNumElements());
41265     if (SimplifyDemandedVectorElts(Src, SrcElts, SrcUndef, SrcZero, TLO,
41266                                    Depth + 1))
41267       return true;
41268     break;
41269   }
41270   case X86ISD::PACKSS:
41271   case X86ISD::PACKUS: {
41272     SDValue N0 = Op.getOperand(0);
41273     SDValue N1 = Op.getOperand(1);
41274 
41275     APInt DemandedLHS, DemandedRHS;
41276     getPackDemandedElts(VT, DemandedElts, DemandedLHS, DemandedRHS);
41277 
41278     APInt LHSUndef, LHSZero;
41279     if (SimplifyDemandedVectorElts(N0, DemandedLHS, LHSUndef, LHSZero, TLO,
41280                                    Depth + 1))
41281       return true;
41282     APInt RHSUndef, RHSZero;
41283     if (SimplifyDemandedVectorElts(N1, DemandedRHS, RHSUndef, RHSZero, TLO,
41284                                    Depth + 1))
41285       return true;
41286 
41287     // TODO - pass on known zero/undef.
41288 
41289     // Aggressively peek through ops to get at the demanded elts.
41290     // TODO - we should do this for all target/faux shuffles ops.
41291     if (!DemandedElts.isAllOnes()) {
41292       SDValue NewN0 = SimplifyMultipleUseDemandedVectorElts(N0, DemandedLHS,
41293                                                             TLO.DAG, Depth + 1);
41294       SDValue NewN1 = SimplifyMultipleUseDemandedVectorElts(N1, DemandedRHS,
41295                                                             TLO.DAG, Depth + 1);
41296       if (NewN0 || NewN1) {
41297         NewN0 = NewN0 ? NewN0 : N0;
41298         NewN1 = NewN1 ? NewN1 : N1;
41299         return TLO.CombineTo(Op,
41300                              TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewN0, NewN1));
41301       }
41302     }
41303     break;
41304   }
41305   case X86ISD::HADD:
41306   case X86ISD::HSUB:
41307   case X86ISD::FHADD:
41308   case X86ISD::FHSUB: {
41309     SDValue N0 = Op.getOperand(0);
41310     SDValue N1 = Op.getOperand(1);
41311 
41312     APInt DemandedLHS, DemandedRHS;
41313     getHorizDemandedElts(VT, DemandedElts, DemandedLHS, DemandedRHS);
41314 
41315     APInt LHSUndef, LHSZero;
41316     if (SimplifyDemandedVectorElts(N0, DemandedLHS, LHSUndef, LHSZero, TLO,
41317                                    Depth + 1))
41318       return true;
41319     APInt RHSUndef, RHSZero;
41320     if (SimplifyDemandedVectorElts(N1, DemandedRHS, RHSUndef, RHSZero, TLO,
41321                                    Depth + 1))
41322       return true;
41323 
41324     // TODO - pass on known zero/undef.
41325 
41326     // Aggressively peek through ops to get at the demanded elts.
41327     // TODO: Handle repeated operands.
41328     if (N0 != N1 && !DemandedElts.isAllOnes()) {
41329       SDValue NewN0 = SimplifyMultipleUseDemandedVectorElts(N0, DemandedLHS,
41330                                                             TLO.DAG, Depth + 1);
41331       SDValue NewN1 = SimplifyMultipleUseDemandedVectorElts(N1, DemandedRHS,
41332                                                             TLO.DAG, Depth + 1);
41333       if (NewN0 || NewN1) {
41334         NewN0 = NewN0 ? NewN0 : N0;
41335         NewN1 = NewN1 ? NewN1 : N1;
41336         return TLO.CombineTo(Op,
41337                              TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewN0, NewN1));
41338       }
41339     }
41340     break;
41341   }
41342   case X86ISD::VTRUNC:
41343   case X86ISD::VTRUNCS:
41344   case X86ISD::VTRUNCUS: {
41345     SDValue Src = Op.getOperand(0);
41346     MVT SrcVT = Src.getSimpleValueType();
41347     APInt DemandedSrc = DemandedElts.zextOrTrunc(SrcVT.getVectorNumElements());
41348     APInt SrcUndef, SrcZero;
41349     if (SimplifyDemandedVectorElts(Src, DemandedSrc, SrcUndef, SrcZero, TLO,
41350                                    Depth + 1))
41351       return true;
41352     KnownZero = SrcZero.zextOrTrunc(NumElts);
41353     KnownUndef = SrcUndef.zextOrTrunc(NumElts);
41354     break;
41355   }
41356   case X86ISD::BLENDV: {
41357     APInt SelUndef, SelZero;
41358     if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedElts, SelUndef,
41359                                    SelZero, TLO, Depth + 1))
41360       return true;
41361 
41362     // TODO: Use SelZero to adjust LHS/RHS DemandedElts.
41363     APInt LHSUndef, LHSZero;
41364     if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedElts, LHSUndef,
41365                                    LHSZero, TLO, Depth + 1))
41366       return true;
41367 
41368     APInt RHSUndef, RHSZero;
41369     if (SimplifyDemandedVectorElts(Op.getOperand(2), DemandedElts, RHSUndef,
41370                                    RHSZero, TLO, Depth + 1))
41371       return true;
41372 
41373     KnownZero = LHSZero & RHSZero;
41374     KnownUndef = LHSUndef & RHSUndef;
41375     break;
41376   }
41377   case X86ISD::VZEXT_MOVL: {
41378     // If upper demanded elements are already zero then we have nothing to do.
41379     SDValue Src = Op.getOperand(0);
41380     APInt DemandedUpperElts = DemandedElts;
41381     DemandedUpperElts.clearLowBits(1);
41382     if (TLO.DAG.MaskedVectorIsZero(Src, DemandedUpperElts, Depth + 1))
41383       return TLO.CombineTo(Op, Src);
41384     break;
41385   }
41386   case X86ISD::VZEXT_LOAD: {
41387     // If upper demanded elements are not demanded then simplify to a
41388     // scalar_to_vector(load()).
41389     MVT SVT = VT.getSimpleVT().getVectorElementType();
41390     if (DemandedElts == 1 && Op.getValue(1).use_empty() && isTypeLegal(SVT)) {
41391       SDLoc DL(Op);
41392       auto *Mem = cast<MemSDNode>(Op);
41393       SDValue Elt = TLO.DAG.getLoad(SVT, DL, Mem->getChain(), Mem->getBasePtr(),
41394                                     Mem->getMemOperand());
41395       SDValue Vec = TLO.DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Elt);
41396       return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Vec));
41397     }
41398     break;
41399   }
41400   case X86ISD::VBROADCAST: {
41401     SDValue Src = Op.getOperand(0);
41402     MVT SrcVT = Src.getSimpleValueType();
41403     if (!SrcVT.isVector())
41404       break;
41405     // Don't bother broadcasting if we just need the 0'th element.
41406     if (DemandedElts == 1) {
41407       if (Src.getValueType() != VT)
41408         Src = widenSubVector(VT.getSimpleVT(), Src, false, Subtarget, TLO.DAG,
41409                              SDLoc(Op));
41410       return TLO.CombineTo(Op, Src);
41411     }
41412     APInt SrcUndef, SrcZero;
41413     APInt SrcElts = APInt::getOneBitSet(SrcVT.getVectorNumElements(), 0);
41414     if (SimplifyDemandedVectorElts(Src, SrcElts, SrcUndef, SrcZero, TLO,
41415                                    Depth + 1))
41416       return true;
41417     // Aggressively peek through src to get at the demanded elt.
41418     // TODO - we should do this for all target/faux shuffles ops.
41419     if (SDValue NewSrc = SimplifyMultipleUseDemandedVectorElts(
41420             Src, SrcElts, TLO.DAG, Depth + 1))
41421       return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewSrc));
41422     break;
41423   }
41424   case X86ISD::VPERMV:
41425     if (SimplifyDemandedVectorEltsForTargetShuffle(Op, DemandedElts, 0, TLO,
41426                                                    Depth))
41427       return true;
41428     break;
41429   case X86ISD::PSHUFB:
41430   case X86ISD::VPERMV3:
41431   case X86ISD::VPERMILPV:
41432     if (SimplifyDemandedVectorEltsForTargetShuffle(Op, DemandedElts, 1, TLO,
41433                                                    Depth))
41434       return true;
41435     break;
41436   case X86ISD::VPPERM:
41437   case X86ISD::VPERMIL2:
41438     if (SimplifyDemandedVectorEltsForTargetShuffle(Op, DemandedElts, 2, TLO,
41439                                                    Depth))
41440       return true;
41441     break;
41442   }
41443 
41444   // For 256/512-bit ops that are 128/256-bit ops glued together, if we do not
41445   // demand any of the high elements, then narrow the op to 128/256-bits: e.g.
41446   // (op ymm0, ymm1) --> insert undef, (op xmm0, xmm1), 0
41447   if ((VT.is256BitVector() || VT.is512BitVector()) &&
41448       DemandedElts.lshr(NumElts / 2) == 0) {
41449     unsigned SizeInBits = VT.getSizeInBits();
41450     unsigned ExtSizeInBits = SizeInBits / 2;
41451 
41452     // See if 512-bit ops only use the bottom 128-bits.
41453     if (VT.is512BitVector() && DemandedElts.lshr(NumElts / 4) == 0)
41454       ExtSizeInBits = SizeInBits / 4;
41455 
41456     switch (Opc) {
41457       // Scalar broadcast.
41458     case X86ISD::VBROADCAST: {
41459       SDLoc DL(Op);
41460       SDValue Src = Op.getOperand(0);
41461       if (Src.getValueSizeInBits() > ExtSizeInBits)
41462         Src = extractSubVector(Src, 0, TLO.DAG, DL, ExtSizeInBits);
41463       EVT BcstVT = EVT::getVectorVT(*TLO.DAG.getContext(), VT.getScalarType(),
41464                                     ExtSizeInBits / VT.getScalarSizeInBits());
41465       SDValue Bcst = TLO.DAG.getNode(X86ISD::VBROADCAST, DL, BcstVT, Src);
41466       return TLO.CombineTo(Op, insertSubVector(TLO.DAG.getUNDEF(VT), Bcst, 0,
41467                                                TLO.DAG, DL, ExtSizeInBits));
41468     }
41469     case X86ISD::VBROADCAST_LOAD: {
41470       SDLoc DL(Op);
41471       auto *MemIntr = cast<MemIntrinsicSDNode>(Op);
41472       EVT BcstVT = EVT::getVectorVT(*TLO.DAG.getContext(), VT.getScalarType(),
41473                                     ExtSizeInBits / VT.getScalarSizeInBits());
41474       SDVTList Tys = TLO.DAG.getVTList(BcstVT, MVT::Other);
41475       SDValue Ops[] = {MemIntr->getOperand(0), MemIntr->getOperand(1)};
41476       SDValue Bcst = TLO.DAG.getMemIntrinsicNode(
41477           X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, MemIntr->getMemoryVT(),
41478           MemIntr->getMemOperand());
41479       TLO.DAG.makeEquivalentMemoryOrdering(SDValue(MemIntr, 1),
41480                                            Bcst.getValue(1));
41481       return TLO.CombineTo(Op, insertSubVector(TLO.DAG.getUNDEF(VT), Bcst, 0,
41482                                                TLO.DAG, DL, ExtSizeInBits));
41483     }
41484       // Subvector broadcast.
41485     case X86ISD::SUBV_BROADCAST_LOAD: {
41486       auto *MemIntr = cast<MemIntrinsicSDNode>(Op);
41487       EVT MemVT = MemIntr->getMemoryVT();
41488       if (ExtSizeInBits == MemVT.getStoreSizeInBits()) {
41489         SDLoc DL(Op);
41490         SDValue Ld =
41491             TLO.DAG.getLoad(MemVT, DL, MemIntr->getChain(),
41492                             MemIntr->getBasePtr(), MemIntr->getMemOperand());
41493         TLO.DAG.makeEquivalentMemoryOrdering(SDValue(MemIntr, 1),
41494                                              Ld.getValue(1));
41495         return TLO.CombineTo(Op, insertSubVector(TLO.DAG.getUNDEF(VT), Ld, 0,
41496                                                  TLO.DAG, DL, ExtSizeInBits));
41497       } else if ((ExtSizeInBits % MemVT.getStoreSizeInBits()) == 0) {
41498         SDLoc DL(Op);
41499         EVT BcstVT = EVT::getVectorVT(*TLO.DAG.getContext(), VT.getScalarType(),
41500                                       ExtSizeInBits / VT.getScalarSizeInBits());
41501         if (SDValue BcstLd =
41502                 getBROADCAST_LOAD(Opc, DL, BcstVT, MemVT, MemIntr, 0, TLO.DAG))
41503           return TLO.CombineTo(Op,
41504                                insertSubVector(TLO.DAG.getUNDEF(VT), BcstLd, 0,
41505                                                TLO.DAG, DL, ExtSizeInBits));
41506       }
41507       break;
41508     }
41509       // Byte shifts by immediate.
41510     case X86ISD::VSHLDQ:
41511     case X86ISD::VSRLDQ:
41512       // Shift by uniform.
41513     case X86ISD::VSHL:
41514     case X86ISD::VSRL:
41515     case X86ISD::VSRA:
41516       // Shift by immediate.
41517     case X86ISD::VSHLI:
41518     case X86ISD::VSRLI:
41519     case X86ISD::VSRAI: {
41520       SDLoc DL(Op);
41521       SDValue Ext0 =
41522           extractSubVector(Op.getOperand(0), 0, TLO.DAG, DL, ExtSizeInBits);
41523       SDValue ExtOp =
41524           TLO.DAG.getNode(Opc, DL, Ext0.getValueType(), Ext0, Op.getOperand(1));
41525       SDValue UndefVec = TLO.DAG.getUNDEF(VT);
41526       SDValue Insert =
41527           insertSubVector(UndefVec, ExtOp, 0, TLO.DAG, DL, ExtSizeInBits);
41528       return TLO.CombineTo(Op, Insert);
41529     }
41530     case X86ISD::VPERMI: {
41531       // Simplify PERMPD/PERMQ to extract_subvector.
41532       // TODO: This should be done in shuffle combining.
41533       if (VT == MVT::v4f64 || VT == MVT::v4i64) {
41534         SmallVector<int, 4> Mask;
41535         DecodeVPERMMask(NumElts, Op.getConstantOperandVal(1), Mask);
41536         if (isUndefOrEqual(Mask[0], 2) && isUndefOrEqual(Mask[1], 3)) {
41537           SDLoc DL(Op);
41538           SDValue Ext = extractSubVector(Op.getOperand(0), 2, TLO.DAG, DL, 128);
41539           SDValue UndefVec = TLO.DAG.getUNDEF(VT);
41540           SDValue Insert = insertSubVector(UndefVec, Ext, 0, TLO.DAG, DL, 128);
41541           return TLO.CombineTo(Op, Insert);
41542         }
41543       }
41544       break;
41545     }
41546     case X86ISD::VPERM2X128: {
41547       // Simplify VPERM2F128/VPERM2I128 to extract_subvector.
41548       SDLoc DL(Op);
41549       unsigned LoMask = Op.getConstantOperandVal(2) & 0xF;
41550       if (LoMask & 0x8)
41551         return TLO.CombineTo(
41552             Op, getZeroVector(VT.getSimpleVT(), Subtarget, TLO.DAG, DL));
41553       unsigned EltIdx = (LoMask & 0x1) * (NumElts / 2);
41554       unsigned SrcIdx = (LoMask & 0x2) >> 1;
41555       SDValue ExtOp =
41556           extractSubVector(Op.getOperand(SrcIdx), EltIdx, TLO.DAG, DL, 128);
41557       SDValue UndefVec = TLO.DAG.getUNDEF(VT);
41558       SDValue Insert =
41559           insertSubVector(UndefVec, ExtOp, 0, TLO.DAG, DL, ExtSizeInBits);
41560       return TLO.CombineTo(Op, Insert);
41561     }
41562       // Zero upper elements.
41563     case X86ISD::VZEXT_MOVL:
41564       // Target unary shuffles by immediate:
41565     case X86ISD::PSHUFD:
41566     case X86ISD::PSHUFLW:
41567     case X86ISD::PSHUFHW:
41568     case X86ISD::VPERMILPI:
41569       // (Non-Lane Crossing) Target Shuffles.
41570     case X86ISD::VPERMILPV:
41571     case X86ISD::VPERMIL2:
41572     case X86ISD::PSHUFB:
41573     case X86ISD::UNPCKL:
41574     case X86ISD::UNPCKH:
41575     case X86ISD::BLENDI:
41576       // Integer ops.
41577     case X86ISD::PACKSS:
41578     case X86ISD::PACKUS:
41579     case X86ISD::PCMPEQ:
41580     case X86ISD::PCMPGT:
41581     case X86ISD::PMULUDQ:
41582     case X86ISD::PMULDQ:
41583     case X86ISD::VSHLV:
41584     case X86ISD::VSRLV:
41585     case X86ISD::VSRAV:
41586       // Float ops.
41587     case X86ISD::FMAX:
41588     case X86ISD::FMIN:
41589     case X86ISD::FMAXC:
41590     case X86ISD::FMINC:
41591       // Horizontal Ops.
41592     case X86ISD::HADD:
41593     case X86ISD::HSUB:
41594     case X86ISD::FHADD:
41595     case X86ISD::FHSUB: {
41596       SDLoc DL(Op);
41597       SmallVector<SDValue, 4> Ops;
41598       for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
41599         SDValue SrcOp = Op.getOperand(i);
41600         EVT SrcVT = SrcOp.getValueType();
41601         assert((!SrcVT.isVector() || SrcVT.getSizeInBits() == SizeInBits) &&
41602                "Unsupported vector size");
41603         Ops.push_back(SrcVT.isVector() ? extractSubVector(SrcOp, 0, TLO.DAG, DL,
41604                                                           ExtSizeInBits)
41605                                        : SrcOp);
41606       }
41607       MVT ExtVT = VT.getSimpleVT();
41608       ExtVT = MVT::getVectorVT(ExtVT.getScalarType(),
41609                                ExtSizeInBits / ExtVT.getScalarSizeInBits());
41610       SDValue ExtOp = TLO.DAG.getNode(Opc, DL, ExtVT, Ops);
41611       SDValue UndefVec = TLO.DAG.getUNDEF(VT);
41612       SDValue Insert =
41613           insertSubVector(UndefVec, ExtOp, 0, TLO.DAG, DL, ExtSizeInBits);
41614       return TLO.CombineTo(Op, Insert);
41615     }
41616     }
41617   }
41618 
41619   // For splats, unless we *only* demand the 0'th element,
41620   // stop attempts at simplification here, we aren't going to improve things,
41621   // this is better than any potential shuffle.
41622   if (!DemandedElts.isOne() && TLO.DAG.isSplatValue(Op, /*AllowUndefs*/false))
41623     return false;
41624 
41625   // Get target/faux shuffle mask.
41626   APInt OpUndef, OpZero;
41627   SmallVector<int, 64> OpMask;
41628   SmallVector<SDValue, 2> OpInputs;
41629   if (!getTargetShuffleInputs(Op, DemandedElts, OpInputs, OpMask, OpUndef,
41630                               OpZero, TLO.DAG, Depth, false))
41631     return false;
41632 
41633   // Shuffle inputs must be the same size as the result.
41634   if (OpMask.size() != (unsigned)NumElts ||
41635       llvm::any_of(OpInputs, [VT](SDValue V) {
41636         return VT.getSizeInBits() != V.getValueSizeInBits() ||
41637                !V.getValueType().isVector();
41638       }))
41639     return false;
41640 
41641   KnownZero = OpZero;
41642   KnownUndef = OpUndef;
41643 
41644   // Check if shuffle mask can be simplified to undef/zero/identity.
41645   int NumSrcs = OpInputs.size();
41646   for (int i = 0; i != NumElts; ++i)
41647     if (!DemandedElts[i])
41648       OpMask[i] = SM_SentinelUndef;
41649 
41650   if (isUndefInRange(OpMask, 0, NumElts)) {
41651     KnownUndef.setAllBits();
41652     return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT));
41653   }
41654   if (isUndefOrZeroInRange(OpMask, 0, NumElts)) {
41655     KnownZero.setAllBits();
41656     return TLO.CombineTo(
41657         Op, getZeroVector(VT.getSimpleVT(), Subtarget, TLO.DAG, SDLoc(Op)));
41658   }
41659   for (int Src = 0; Src != NumSrcs; ++Src)
41660     if (isSequentialOrUndefInRange(OpMask, 0, NumElts, Src * NumElts))
41661       return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, OpInputs[Src]));
41662 
41663   // Attempt to simplify inputs.
41664   for (int Src = 0; Src != NumSrcs; ++Src) {
41665     // TODO: Support inputs of different types.
41666     if (OpInputs[Src].getValueType() != VT)
41667       continue;
41668 
41669     int Lo = Src * NumElts;
41670     APInt SrcElts = APInt::getZero(NumElts);
41671     for (int i = 0; i != NumElts; ++i)
41672       if (DemandedElts[i]) {
41673         int M = OpMask[i] - Lo;
41674         if (0 <= M && M < NumElts)
41675           SrcElts.setBit(M);
41676       }
41677 
41678     // TODO - Propagate input undef/zero elts.
41679     APInt SrcUndef, SrcZero;
41680     if (SimplifyDemandedVectorElts(OpInputs[Src], SrcElts, SrcUndef, SrcZero,
41681                                    TLO, Depth + 1))
41682       return true;
41683   }
41684 
41685   // If we don't demand all elements, then attempt to combine to a simpler
41686   // shuffle.
41687   // We need to convert the depth to something combineX86ShufflesRecursively
41688   // can handle - so pretend its Depth == 0 again, and reduce the max depth
41689   // to match. This prevents combineX86ShuffleChain from returning a
41690   // combined shuffle that's the same as the original root, causing an
41691   // infinite loop.
41692   if (!DemandedElts.isAllOnes()) {
41693     assert(Depth < X86::MaxShuffleCombineDepth && "Depth out of range");
41694 
41695     SmallVector<int, 64> DemandedMask(NumElts, SM_SentinelUndef);
41696     for (int i = 0; i != NumElts; ++i)
41697       if (DemandedElts[i])
41698         DemandedMask[i] = i;
41699 
41700     SDValue NewShuffle = combineX86ShufflesRecursively(
41701         {Op}, 0, Op, DemandedMask, {}, 0, X86::MaxShuffleCombineDepth - Depth,
41702         /*HasVarMask*/ false,
41703         /*AllowCrossLaneVarMask*/ true, /*AllowPerLaneVarMask*/ true, TLO.DAG,
41704         Subtarget);
41705     if (NewShuffle)
41706       return TLO.CombineTo(Op, NewShuffle);
41707   }
41708 
41709   return false;
41710 }
41711 
SimplifyDemandedBitsForTargetNode(SDValue Op,const APInt & OriginalDemandedBits,const APInt & OriginalDemandedElts,KnownBits & Known,TargetLoweringOpt & TLO,unsigned Depth) const41712 bool X86TargetLowering::SimplifyDemandedBitsForTargetNode(
41713     SDValue Op, const APInt &OriginalDemandedBits,
41714     const APInt &OriginalDemandedElts, KnownBits &Known, TargetLoweringOpt &TLO,
41715     unsigned Depth) const {
41716   EVT VT = Op.getValueType();
41717   unsigned BitWidth = OriginalDemandedBits.getBitWidth();
41718   unsigned Opc = Op.getOpcode();
41719   switch(Opc) {
41720   case X86ISD::VTRUNC: {
41721     KnownBits KnownOp;
41722     SDValue Src = Op.getOperand(0);
41723     MVT SrcVT = Src.getSimpleValueType();
41724 
41725     // Simplify the input, using demanded bit information.
41726     APInt TruncMask = OriginalDemandedBits.zext(SrcVT.getScalarSizeInBits());
41727     APInt DemandedElts = OriginalDemandedElts.trunc(SrcVT.getVectorNumElements());
41728     if (SimplifyDemandedBits(Src, TruncMask, DemandedElts, KnownOp, TLO, Depth + 1))
41729       return true;
41730     break;
41731   }
41732   case X86ISD::PMULDQ:
41733   case X86ISD::PMULUDQ: {
41734     // PMULDQ/PMULUDQ only uses lower 32 bits from each vector element.
41735     KnownBits KnownLHS, KnownRHS;
41736     SDValue LHS = Op.getOperand(0);
41737     SDValue RHS = Op.getOperand(1);
41738 
41739     // Don't mask bits on 32-bit AVX512 targets which might lose a broadcast.
41740     // FIXME: Can we bound this better?
41741     APInt DemandedMask = APInt::getLowBitsSet(64, 32);
41742     APInt DemandedMaskLHS = APInt::getAllOnes(64);
41743     APInt DemandedMaskRHS = APInt::getAllOnes(64);
41744 
41745     bool Is32BitAVX512 = !Subtarget.is64Bit() && Subtarget.hasAVX512();
41746     if (!Is32BitAVX512 || !TLO.DAG.isSplatValue(LHS))
41747       DemandedMaskLHS = DemandedMask;
41748     if (!Is32BitAVX512 || !TLO.DAG.isSplatValue(RHS))
41749       DemandedMaskRHS = DemandedMask;
41750 
41751     if (SimplifyDemandedBits(LHS, DemandedMaskLHS, OriginalDemandedElts,
41752                              KnownLHS, TLO, Depth + 1))
41753       return true;
41754     if (SimplifyDemandedBits(RHS, DemandedMaskRHS, OriginalDemandedElts,
41755                              KnownRHS, TLO, Depth + 1))
41756       return true;
41757 
41758     // PMULUDQ(X,1) -> AND(X,(1<<32)-1) 'getZeroExtendInReg'.
41759     KnownRHS = KnownRHS.trunc(32);
41760     if (Opc == X86ISD::PMULUDQ && KnownRHS.isConstant() &&
41761         KnownRHS.getConstant().isOne()) {
41762       SDLoc DL(Op);
41763       SDValue Mask = TLO.DAG.getConstant(DemandedMask, DL, VT);
41764       return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::AND, DL, VT, LHS, Mask));
41765     }
41766 
41767     // Aggressively peek through ops to get at the demanded low bits.
41768     SDValue DemandedLHS = SimplifyMultipleUseDemandedBits(
41769         LHS, DemandedMaskLHS, OriginalDemandedElts, TLO.DAG, Depth + 1);
41770     SDValue DemandedRHS = SimplifyMultipleUseDemandedBits(
41771         RHS, DemandedMaskRHS, OriginalDemandedElts, TLO.DAG, Depth + 1);
41772     if (DemandedLHS || DemandedRHS) {
41773       DemandedLHS = DemandedLHS ? DemandedLHS : LHS;
41774       DemandedRHS = DemandedRHS ? DemandedRHS : RHS;
41775       return TLO.CombineTo(
41776           Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, DemandedLHS, DemandedRHS));
41777     }
41778     break;
41779   }
41780   case X86ISD::ANDNP: {
41781     KnownBits Known2;
41782     SDValue Op0 = Op.getOperand(0);
41783     SDValue Op1 = Op.getOperand(1);
41784 
41785     if (SimplifyDemandedBits(Op1, OriginalDemandedBits, OriginalDemandedElts,
41786                              Known, TLO, Depth + 1))
41787       return true;
41788     assert(!Known.hasConflict() && "Bits known to be one AND zero?");
41789 
41790     if (SimplifyDemandedBits(Op0, ~Known.Zero & OriginalDemandedBits,
41791                              OriginalDemandedElts, Known2, TLO, Depth + 1))
41792       return true;
41793     assert(!Known2.hasConflict() && "Bits known to be one AND zero?");
41794 
41795     // If the RHS is a constant, see if we can simplify it.
41796     if (ShrinkDemandedConstant(Op, ~Known2.One & OriginalDemandedBits,
41797                                OriginalDemandedElts, TLO))
41798       return true;
41799 
41800     // ANDNP = (~Op0 & Op1);
41801     Known.One &= Known2.Zero;
41802     Known.Zero |= Known2.One;
41803     break;
41804   }
41805   case X86ISD::VSHLI: {
41806     SDValue Op0 = Op.getOperand(0);
41807 
41808     unsigned ShAmt = Op.getConstantOperandVal(1);
41809     if (ShAmt >= BitWidth)
41810       break;
41811 
41812     APInt DemandedMask = OriginalDemandedBits.lshr(ShAmt);
41813 
41814     // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a
41815     // single shift.  We can do this if the bottom bits (which are shifted
41816     // out) are never demanded.
41817     if (Op0.getOpcode() == X86ISD::VSRLI &&
41818         OriginalDemandedBits.countr_zero() >= ShAmt) {
41819       unsigned Shift2Amt = Op0.getConstantOperandVal(1);
41820       if (Shift2Amt < BitWidth) {
41821         int Diff = ShAmt - Shift2Amt;
41822         if (Diff == 0)
41823           return TLO.CombineTo(Op, Op0.getOperand(0));
41824 
41825         unsigned NewOpc = Diff < 0 ? X86ISD::VSRLI : X86ISD::VSHLI;
41826         SDValue NewShift = TLO.DAG.getNode(
41827             NewOpc, SDLoc(Op), VT, Op0.getOperand(0),
41828             TLO.DAG.getTargetConstant(std::abs(Diff), SDLoc(Op), MVT::i8));
41829         return TLO.CombineTo(Op, NewShift);
41830       }
41831     }
41832 
41833     // If we are only demanding sign bits then we can use the shift source directly.
41834     unsigned NumSignBits =
41835         TLO.DAG.ComputeNumSignBits(Op0, OriginalDemandedElts, Depth + 1);
41836     unsigned UpperDemandedBits = BitWidth - OriginalDemandedBits.countr_zero();
41837     if (NumSignBits > ShAmt && (NumSignBits - ShAmt) >= UpperDemandedBits)
41838       return TLO.CombineTo(Op, Op0);
41839 
41840     if (SimplifyDemandedBits(Op0, DemandedMask, OriginalDemandedElts, Known,
41841                              TLO, Depth + 1))
41842       return true;
41843 
41844     assert(!Known.hasConflict() && "Bits known to be one AND zero?");
41845     Known.Zero <<= ShAmt;
41846     Known.One <<= ShAmt;
41847 
41848     // Low bits known zero.
41849     Known.Zero.setLowBits(ShAmt);
41850     return false;
41851   }
41852   case X86ISD::VSRLI: {
41853     unsigned ShAmt = Op.getConstantOperandVal(1);
41854     if (ShAmt >= BitWidth)
41855       break;
41856 
41857     APInt DemandedMask = OriginalDemandedBits << ShAmt;
41858 
41859     if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask,
41860                              OriginalDemandedElts, Known, TLO, Depth + 1))
41861       return true;
41862 
41863     assert(!Known.hasConflict() && "Bits known to be one AND zero?");
41864     Known.Zero.lshrInPlace(ShAmt);
41865     Known.One.lshrInPlace(ShAmt);
41866 
41867     // High bits known zero.
41868     Known.Zero.setHighBits(ShAmt);
41869     return false;
41870   }
41871   case X86ISD::VSRAI: {
41872     SDValue Op0 = Op.getOperand(0);
41873     SDValue Op1 = Op.getOperand(1);
41874 
41875     unsigned ShAmt = Op1->getAsZExtVal();
41876     if (ShAmt >= BitWidth)
41877       break;
41878 
41879     APInt DemandedMask = OriginalDemandedBits << ShAmt;
41880 
41881     // If we just want the sign bit then we don't need to shift it.
41882     if (OriginalDemandedBits.isSignMask())
41883       return TLO.CombineTo(Op, Op0);
41884 
41885     // fold (VSRAI (VSHLI X, C1), C1) --> X iff NumSignBits(X) > C1
41886     if (Op0.getOpcode() == X86ISD::VSHLI &&
41887         Op.getOperand(1) == Op0.getOperand(1)) {
41888       SDValue Op00 = Op0.getOperand(0);
41889       unsigned NumSignBits =
41890           TLO.DAG.ComputeNumSignBits(Op00, OriginalDemandedElts);
41891       if (ShAmt < NumSignBits)
41892         return TLO.CombineTo(Op, Op00);
41893     }
41894 
41895     // If any of the demanded bits are produced by the sign extension, we also
41896     // demand the input sign bit.
41897     if (OriginalDemandedBits.countl_zero() < ShAmt)
41898       DemandedMask.setSignBit();
41899 
41900     if (SimplifyDemandedBits(Op0, DemandedMask, OriginalDemandedElts, Known,
41901                              TLO, Depth + 1))
41902       return true;
41903 
41904     assert(!Known.hasConflict() && "Bits known to be one AND zero?");
41905     Known.Zero.lshrInPlace(ShAmt);
41906     Known.One.lshrInPlace(ShAmt);
41907 
41908     // If the input sign bit is known to be zero, or if none of the top bits
41909     // are demanded, turn this into an unsigned shift right.
41910     if (Known.Zero[BitWidth - ShAmt - 1] ||
41911         OriginalDemandedBits.countl_zero() >= ShAmt)
41912       return TLO.CombineTo(
41913           Op, TLO.DAG.getNode(X86ISD::VSRLI, SDLoc(Op), VT, Op0, Op1));
41914 
41915     // High bits are known one.
41916     if (Known.One[BitWidth - ShAmt - 1])
41917       Known.One.setHighBits(ShAmt);
41918     return false;
41919   }
41920   case X86ISD::BLENDV: {
41921     SDValue Sel = Op.getOperand(0);
41922     SDValue LHS = Op.getOperand(1);
41923     SDValue RHS = Op.getOperand(2);
41924 
41925     APInt SignMask = APInt::getSignMask(BitWidth);
41926     SDValue NewSel = SimplifyMultipleUseDemandedBits(
41927         Sel, SignMask, OriginalDemandedElts, TLO.DAG, Depth + 1);
41928     SDValue NewLHS = SimplifyMultipleUseDemandedBits(
41929         LHS, OriginalDemandedBits, OriginalDemandedElts, TLO.DAG, Depth + 1);
41930     SDValue NewRHS = SimplifyMultipleUseDemandedBits(
41931         RHS, OriginalDemandedBits, OriginalDemandedElts, TLO.DAG, Depth + 1);
41932 
41933     if (NewSel || NewLHS || NewRHS) {
41934       NewSel = NewSel ? NewSel : Sel;
41935       NewLHS = NewLHS ? NewLHS : LHS;
41936       NewRHS = NewRHS ? NewRHS : RHS;
41937       return TLO.CombineTo(Op, TLO.DAG.getNode(X86ISD::BLENDV, SDLoc(Op), VT,
41938                                                NewSel, NewLHS, NewRHS));
41939     }
41940     break;
41941   }
41942   case X86ISD::PEXTRB:
41943   case X86ISD::PEXTRW: {
41944     SDValue Vec = Op.getOperand(0);
41945     auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(1));
41946     MVT VecVT = Vec.getSimpleValueType();
41947     unsigned NumVecElts = VecVT.getVectorNumElements();
41948 
41949     if (CIdx && CIdx->getAPIntValue().ult(NumVecElts)) {
41950       unsigned Idx = CIdx->getZExtValue();
41951       unsigned VecBitWidth = VecVT.getScalarSizeInBits();
41952 
41953       // If we demand no bits from the vector then we must have demanded
41954       // bits from the implict zext - simplify to zero.
41955       APInt DemandedVecBits = OriginalDemandedBits.trunc(VecBitWidth);
41956       if (DemandedVecBits == 0)
41957         return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT));
41958 
41959       APInt KnownUndef, KnownZero;
41960       APInt DemandedVecElts = APInt::getOneBitSet(NumVecElts, Idx);
41961       if (SimplifyDemandedVectorElts(Vec, DemandedVecElts, KnownUndef,
41962                                      KnownZero, TLO, Depth + 1))
41963         return true;
41964 
41965       KnownBits KnownVec;
41966       if (SimplifyDemandedBits(Vec, DemandedVecBits, DemandedVecElts,
41967                                KnownVec, TLO, Depth + 1))
41968         return true;
41969 
41970       if (SDValue V = SimplifyMultipleUseDemandedBits(
41971               Vec, DemandedVecBits, DemandedVecElts, TLO.DAG, Depth + 1))
41972         return TLO.CombineTo(
41973             Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, V, Op.getOperand(1)));
41974 
41975       Known = KnownVec.zext(BitWidth);
41976       return false;
41977     }
41978     break;
41979   }
41980   case X86ISD::PINSRB:
41981   case X86ISD::PINSRW: {
41982     SDValue Vec = Op.getOperand(0);
41983     SDValue Scl = Op.getOperand(1);
41984     auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2));
41985     MVT VecVT = Vec.getSimpleValueType();
41986 
41987     if (CIdx && CIdx->getAPIntValue().ult(VecVT.getVectorNumElements())) {
41988       unsigned Idx = CIdx->getZExtValue();
41989       if (!OriginalDemandedElts[Idx])
41990         return TLO.CombineTo(Op, Vec);
41991 
41992       KnownBits KnownVec;
41993       APInt DemandedVecElts(OriginalDemandedElts);
41994       DemandedVecElts.clearBit(Idx);
41995       if (SimplifyDemandedBits(Vec, OriginalDemandedBits, DemandedVecElts,
41996                                KnownVec, TLO, Depth + 1))
41997         return true;
41998 
41999       KnownBits KnownScl;
42000       unsigned NumSclBits = Scl.getScalarValueSizeInBits();
42001       APInt DemandedSclBits = OriginalDemandedBits.zext(NumSclBits);
42002       if (SimplifyDemandedBits(Scl, DemandedSclBits, KnownScl, TLO, Depth + 1))
42003         return true;
42004 
42005       KnownScl = KnownScl.trunc(VecVT.getScalarSizeInBits());
42006       Known = KnownVec.intersectWith(KnownScl);
42007       return false;
42008     }
42009     break;
42010   }
42011   case X86ISD::PACKSS:
42012     // PACKSS saturates to MIN/MAX integer values. So if we just want the
42013     // sign bit then we can just ask for the source operands sign bit.
42014     // TODO - add known bits handling.
42015     if (OriginalDemandedBits.isSignMask()) {
42016       APInt DemandedLHS, DemandedRHS;
42017       getPackDemandedElts(VT, OriginalDemandedElts, DemandedLHS, DemandedRHS);
42018 
42019       KnownBits KnownLHS, KnownRHS;
42020       APInt SignMask = APInt::getSignMask(BitWidth * 2);
42021       if (SimplifyDemandedBits(Op.getOperand(0), SignMask, DemandedLHS,
42022                                KnownLHS, TLO, Depth + 1))
42023         return true;
42024       if (SimplifyDemandedBits(Op.getOperand(1), SignMask, DemandedRHS,
42025                                KnownRHS, TLO, Depth + 1))
42026         return true;
42027 
42028       // Attempt to avoid multi-use ops if we don't need anything from them.
42029       SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
42030           Op.getOperand(0), SignMask, DemandedLHS, TLO.DAG, Depth + 1);
42031       SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
42032           Op.getOperand(1), SignMask, DemandedRHS, TLO.DAG, Depth + 1);
42033       if (DemandedOp0 || DemandedOp1) {
42034         SDValue Op0 = DemandedOp0 ? DemandedOp0 : Op.getOperand(0);
42035         SDValue Op1 = DemandedOp1 ? DemandedOp1 : Op.getOperand(1);
42036         return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, Op0, Op1));
42037       }
42038     }
42039     // TODO - add general PACKSS/PACKUS SimplifyDemandedBits support.
42040     break;
42041   case X86ISD::VBROADCAST: {
42042     SDValue Src = Op.getOperand(0);
42043     MVT SrcVT = Src.getSimpleValueType();
42044     APInt DemandedElts = APInt::getOneBitSet(
42045         SrcVT.isVector() ? SrcVT.getVectorNumElements() : 1, 0);
42046     if (SimplifyDemandedBits(Src, OriginalDemandedBits, DemandedElts, Known,
42047                              TLO, Depth + 1))
42048       return true;
42049     // If we don't need the upper bits, attempt to narrow the broadcast source.
42050     // Don't attempt this on AVX512 as it might affect broadcast folding.
42051     // TODO: Should we attempt this for i32/i16 splats? They tend to be slower.
42052     if ((BitWidth == 64) && SrcVT.isScalarInteger() && !Subtarget.hasAVX512() &&
42053         OriginalDemandedBits.countl_zero() >= (BitWidth / 2) &&
42054         Src->hasOneUse()) {
42055       MVT NewSrcVT = MVT::getIntegerVT(BitWidth / 2);
42056       SDValue NewSrc =
42057           TLO.DAG.getNode(ISD::TRUNCATE, SDLoc(Src), NewSrcVT, Src);
42058       MVT NewVT = MVT::getVectorVT(NewSrcVT, VT.getVectorNumElements() * 2);
42059       SDValue NewBcst =
42060           TLO.DAG.getNode(X86ISD::VBROADCAST, SDLoc(Op), NewVT, NewSrc);
42061       return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, NewBcst));
42062     }
42063     break;
42064   }
42065   case X86ISD::PCMPGT:
42066     // icmp sgt(0, R) == ashr(R, BitWidth-1).
42067     // iff we only need the sign bit then we can use R directly.
42068     if (OriginalDemandedBits.isSignMask() &&
42069         ISD::isBuildVectorAllZeros(Op.getOperand(0).getNode()))
42070       return TLO.CombineTo(Op, Op.getOperand(1));
42071     break;
42072   case X86ISD::MOVMSK: {
42073     SDValue Src = Op.getOperand(0);
42074     MVT SrcVT = Src.getSimpleValueType();
42075     unsigned SrcBits = SrcVT.getScalarSizeInBits();
42076     unsigned NumElts = SrcVT.getVectorNumElements();
42077 
42078     // If we don't need the sign bits at all just return zero.
42079     if (OriginalDemandedBits.countr_zero() >= NumElts)
42080       return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT));
42081 
42082     // See if we only demand bits from the lower 128-bit vector.
42083     if (SrcVT.is256BitVector() &&
42084         OriginalDemandedBits.getActiveBits() <= (NumElts / 2)) {
42085       SDValue NewSrc = extract128BitVector(Src, 0, TLO.DAG, SDLoc(Src));
42086       return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewSrc));
42087     }
42088 
42089     // Only demand the vector elements of the sign bits we need.
42090     APInt KnownUndef, KnownZero;
42091     APInt DemandedElts = OriginalDemandedBits.zextOrTrunc(NumElts);
42092     if (SimplifyDemandedVectorElts(Src, DemandedElts, KnownUndef, KnownZero,
42093                                    TLO, Depth + 1))
42094       return true;
42095 
42096     Known.Zero = KnownZero.zext(BitWidth);
42097     Known.Zero.setHighBits(BitWidth - NumElts);
42098 
42099     // MOVMSK only uses the MSB from each vector element.
42100     KnownBits KnownSrc;
42101     APInt DemandedSrcBits = APInt::getSignMask(SrcBits);
42102     if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedElts, KnownSrc, TLO,
42103                              Depth + 1))
42104       return true;
42105 
42106     if (KnownSrc.One[SrcBits - 1])
42107       Known.One.setLowBits(NumElts);
42108     else if (KnownSrc.Zero[SrcBits - 1])
42109       Known.Zero.setLowBits(NumElts);
42110 
42111     // Attempt to avoid multi-use os if we don't need anything from it.
42112     if (SDValue NewSrc = SimplifyMultipleUseDemandedBits(
42113             Src, DemandedSrcBits, DemandedElts, TLO.DAG, Depth + 1))
42114       return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewSrc));
42115     return false;
42116   }
42117   case X86ISD::TESTP: {
42118     SDValue Op0 = Op.getOperand(0);
42119     SDValue Op1 = Op.getOperand(1);
42120     MVT OpVT = Op0.getSimpleValueType();
42121     assert((OpVT.getVectorElementType() == MVT::f32 ||
42122             OpVT.getVectorElementType() == MVT::f64) &&
42123            "Illegal vector type for X86ISD::TESTP");
42124 
42125     // TESTPS/TESTPD only demands the sign bits of ALL the elements.
42126     KnownBits KnownSrc;
42127     APInt SignMask = APInt::getSignMask(OpVT.getScalarSizeInBits());
42128     bool AssumeSingleUse = (Op0 == Op1) && Op->isOnlyUserOf(Op0.getNode());
42129     return SimplifyDemandedBits(Op0, SignMask, KnownSrc, TLO, Depth + 1,
42130                                 AssumeSingleUse) ||
42131            SimplifyDemandedBits(Op1, SignMask, KnownSrc, TLO, Depth + 1,
42132                                 AssumeSingleUse);
42133   }
42134   case X86ISD::BEXTR:
42135   case X86ISD::BEXTRI: {
42136     SDValue Op0 = Op.getOperand(0);
42137     SDValue Op1 = Op.getOperand(1);
42138 
42139     // Only bottom 16-bits of the control bits are required.
42140     if (auto *Cst1 = dyn_cast<ConstantSDNode>(Op1)) {
42141       // NOTE: SimplifyDemandedBits won't do this for constants.
42142       uint64_t Val1 = Cst1->getZExtValue();
42143       uint64_t MaskedVal1 = Val1 & 0xFFFF;
42144       if (Opc == X86ISD::BEXTR && MaskedVal1 != Val1) {
42145         SDLoc DL(Op);
42146         return TLO.CombineTo(
42147             Op, TLO.DAG.getNode(X86ISD::BEXTR, DL, VT, Op0,
42148                                 TLO.DAG.getConstant(MaskedVal1, DL, VT)));
42149       }
42150 
42151       unsigned Shift = Cst1->getAPIntValue().extractBitsAsZExtValue(8, 0);
42152       unsigned Length = Cst1->getAPIntValue().extractBitsAsZExtValue(8, 8);
42153 
42154       // If the length is 0, the result is 0.
42155       if (Length == 0) {
42156         Known.setAllZero();
42157         return false;
42158       }
42159 
42160       if ((Shift + Length) <= BitWidth) {
42161         APInt DemandedMask = APInt::getBitsSet(BitWidth, Shift, Shift + Length);
42162         if (SimplifyDemandedBits(Op0, DemandedMask, Known, TLO, Depth + 1))
42163           return true;
42164 
42165         Known = Known.extractBits(Length, Shift);
42166         Known = Known.zextOrTrunc(BitWidth);
42167         return false;
42168       }
42169     } else {
42170       assert(Opc == X86ISD::BEXTR && "Unexpected opcode!");
42171       KnownBits Known1;
42172       APInt DemandedMask(APInt::getLowBitsSet(BitWidth, 16));
42173       if (SimplifyDemandedBits(Op1, DemandedMask, Known1, TLO, Depth + 1))
42174         return true;
42175 
42176       // If the length is 0, replace with 0.
42177       KnownBits LengthBits = Known1.extractBits(8, 8);
42178       if (LengthBits.isZero())
42179         return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT));
42180     }
42181 
42182     break;
42183   }
42184   case X86ISD::PDEP: {
42185     SDValue Op0 = Op.getOperand(0);
42186     SDValue Op1 = Op.getOperand(1);
42187 
42188     unsigned DemandedBitsLZ = OriginalDemandedBits.countl_zero();
42189     APInt LoMask = APInt::getLowBitsSet(BitWidth, BitWidth - DemandedBitsLZ);
42190 
42191     // If the demanded bits has leading zeroes, we don't demand those from the
42192     // mask.
42193     if (SimplifyDemandedBits(Op1, LoMask, Known, TLO, Depth + 1))
42194       return true;
42195 
42196     // The number of possible 1s in the mask determines the number of LSBs of
42197     // operand 0 used. Undemanded bits from the mask don't matter so filter
42198     // them before counting.
42199     KnownBits Known2;
42200     uint64_t Count = (~Known.Zero & LoMask).popcount();
42201     APInt DemandedMask(APInt::getLowBitsSet(BitWidth, Count));
42202     if (SimplifyDemandedBits(Op0, DemandedMask, Known2, TLO, Depth + 1))
42203       return true;
42204 
42205     // Zeroes are retained from the mask, but not ones.
42206     Known.One.clearAllBits();
42207     // The result will have at least as many trailing zeros as the non-mask
42208     // operand since bits can only map to the same or higher bit position.
42209     Known.Zero.setLowBits(Known2.countMinTrailingZeros());
42210     return false;
42211   }
42212   }
42213 
42214   return TargetLowering::SimplifyDemandedBitsForTargetNode(
42215       Op, OriginalDemandedBits, OriginalDemandedElts, Known, TLO, Depth);
42216 }
42217 
SimplifyMultipleUseDemandedBitsForTargetNode(SDValue Op,const APInt & DemandedBits,const APInt & DemandedElts,SelectionDAG & DAG,unsigned Depth) const42218 SDValue X86TargetLowering::SimplifyMultipleUseDemandedBitsForTargetNode(
42219     SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
42220     SelectionDAG &DAG, unsigned Depth) const {
42221   int NumElts = DemandedElts.getBitWidth();
42222   unsigned Opc = Op.getOpcode();
42223   EVT VT = Op.getValueType();
42224 
42225   switch (Opc) {
42226   case X86ISD::PINSRB:
42227   case X86ISD::PINSRW: {
42228     // If we don't demand the inserted element, return the base vector.
42229     SDValue Vec = Op.getOperand(0);
42230     auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2));
42231     MVT VecVT = Vec.getSimpleValueType();
42232     if (CIdx && CIdx->getAPIntValue().ult(VecVT.getVectorNumElements()) &&
42233         !DemandedElts[CIdx->getZExtValue()])
42234       return Vec;
42235     break;
42236   }
42237   case X86ISD::VSHLI: {
42238     // If we are only demanding sign bits then we can use the shift source
42239     // directly.
42240     SDValue Op0 = Op.getOperand(0);
42241     unsigned ShAmt = Op.getConstantOperandVal(1);
42242     unsigned BitWidth = DemandedBits.getBitWidth();
42243     unsigned NumSignBits = DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1);
42244     unsigned UpperDemandedBits = BitWidth - DemandedBits.countr_zero();
42245     if (NumSignBits > ShAmt && (NumSignBits - ShAmt) >= UpperDemandedBits)
42246       return Op0;
42247     break;
42248   }
42249   case X86ISD::VSRAI:
42250     // iff we only need the sign bit then we can use the source directly.
42251     // TODO: generalize where we only demand extended signbits.
42252     if (DemandedBits.isSignMask())
42253       return Op.getOperand(0);
42254     break;
42255   case X86ISD::PCMPGT:
42256     // icmp sgt(0, R) == ashr(R, BitWidth-1).
42257     // iff we only need the sign bit then we can use R directly.
42258     if (DemandedBits.isSignMask() &&
42259         ISD::isBuildVectorAllZeros(Op.getOperand(0).getNode()))
42260       return Op.getOperand(1);
42261     break;
42262   case X86ISD::BLENDV: {
42263     // BLENDV: Cond (MSB) ? LHS : RHS
42264     SDValue Cond = Op.getOperand(0);
42265     SDValue LHS = Op.getOperand(1);
42266     SDValue RHS = Op.getOperand(2);
42267 
42268     KnownBits CondKnown = DAG.computeKnownBits(Cond, DemandedElts, Depth + 1);
42269     if (CondKnown.isNegative())
42270       return LHS;
42271     if (CondKnown.isNonNegative())
42272       return RHS;
42273     break;
42274   }
42275   case X86ISD::ANDNP: {
42276     // ANDNP = (~LHS & RHS);
42277     SDValue LHS = Op.getOperand(0);
42278     SDValue RHS = Op.getOperand(1);
42279 
42280     KnownBits LHSKnown = DAG.computeKnownBits(LHS, DemandedElts, Depth + 1);
42281     KnownBits RHSKnown = DAG.computeKnownBits(RHS, DemandedElts, Depth + 1);
42282 
42283     // If all of the demanded bits are known 0 on LHS and known 0 on RHS, then
42284     // the (inverted) LHS bits cannot contribute to the result of the 'andn' in
42285     // this context, so return RHS.
42286     if (DemandedBits.isSubsetOf(RHSKnown.Zero | LHSKnown.Zero))
42287       return RHS;
42288     break;
42289   }
42290   }
42291 
42292   APInt ShuffleUndef, ShuffleZero;
42293   SmallVector<int, 16> ShuffleMask;
42294   SmallVector<SDValue, 2> ShuffleOps;
42295   if (getTargetShuffleInputs(Op, DemandedElts, ShuffleOps, ShuffleMask,
42296                              ShuffleUndef, ShuffleZero, DAG, Depth, false)) {
42297     // If all the demanded elts are from one operand and are inline,
42298     // then we can use the operand directly.
42299     int NumOps = ShuffleOps.size();
42300     if (ShuffleMask.size() == (unsigned)NumElts &&
42301         llvm::all_of(ShuffleOps, [VT](SDValue V) {
42302           return VT.getSizeInBits() == V.getValueSizeInBits();
42303         })) {
42304 
42305       if (DemandedElts.isSubsetOf(ShuffleUndef))
42306         return DAG.getUNDEF(VT);
42307       if (DemandedElts.isSubsetOf(ShuffleUndef | ShuffleZero))
42308         return getZeroVector(VT.getSimpleVT(), Subtarget, DAG, SDLoc(Op));
42309 
42310       // Bitmask that indicates which ops have only been accessed 'inline'.
42311       APInt IdentityOp = APInt::getAllOnes(NumOps);
42312       for (int i = 0; i != NumElts; ++i) {
42313         int M = ShuffleMask[i];
42314         if (!DemandedElts[i] || ShuffleUndef[i])
42315           continue;
42316         int OpIdx = M / NumElts;
42317         int EltIdx = M % NumElts;
42318         if (M < 0 || EltIdx != i) {
42319           IdentityOp.clearAllBits();
42320           break;
42321         }
42322         IdentityOp &= APInt::getOneBitSet(NumOps, OpIdx);
42323         if (IdentityOp == 0)
42324           break;
42325       }
42326       assert((IdentityOp == 0 || IdentityOp.popcount() == 1) &&
42327              "Multiple identity shuffles detected");
42328 
42329       if (IdentityOp != 0)
42330         return DAG.getBitcast(VT, ShuffleOps[IdentityOp.countr_zero()]);
42331     }
42332   }
42333 
42334   return TargetLowering::SimplifyMultipleUseDemandedBitsForTargetNode(
42335       Op, DemandedBits, DemandedElts, DAG, Depth);
42336 }
42337 
isGuaranteedNotToBeUndefOrPoisonForTargetNode(SDValue Op,const APInt & DemandedElts,const SelectionDAG & DAG,bool PoisonOnly,unsigned Depth) const42338 bool X86TargetLowering::isGuaranteedNotToBeUndefOrPoisonForTargetNode(
42339     SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
42340     bool PoisonOnly, unsigned Depth) const {
42341   unsigned EltsBits = Op.getScalarValueSizeInBits();
42342   unsigned NumElts = DemandedElts.getBitWidth();
42343 
42344   // TODO: Add more target shuffles.
42345   switch (Op.getOpcode()) {
42346   case X86ISD::PSHUFD:
42347   case X86ISD::VPERMILPI: {
42348     SmallVector<int, 8> Mask;
42349     DecodePSHUFMask(NumElts, EltsBits, Op.getConstantOperandVal(1), Mask);
42350 
42351     APInt DemandedSrcElts = APInt::getZero(NumElts);
42352     for (unsigned I = 0; I != NumElts; ++I)
42353       if (DemandedElts[I])
42354         DemandedSrcElts.setBit(Mask[I]);
42355 
42356     return DAG.isGuaranteedNotToBeUndefOrPoison(
42357         Op.getOperand(0), DemandedSrcElts, PoisonOnly, Depth + 1);
42358   }
42359   }
42360   return TargetLowering::isGuaranteedNotToBeUndefOrPoisonForTargetNode(
42361       Op, DemandedElts, DAG, PoisonOnly, Depth);
42362 }
42363 
canCreateUndefOrPoisonForTargetNode(SDValue Op,const APInt & DemandedElts,const SelectionDAG & DAG,bool PoisonOnly,bool ConsiderFlags,unsigned Depth) const42364 bool X86TargetLowering::canCreateUndefOrPoisonForTargetNode(
42365     SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
42366     bool PoisonOnly, bool ConsiderFlags, unsigned Depth) const {
42367 
42368   // TODO: Add more target shuffles.
42369   switch (Op.getOpcode()) {
42370   case X86ISD::PSHUFD:
42371   case X86ISD::VPERMILPI:
42372     return false;
42373   }
42374   return TargetLowering::canCreateUndefOrPoisonForTargetNode(
42375       Op, DemandedElts, DAG, PoisonOnly, ConsiderFlags, Depth);
42376 }
42377 
isSplatValueForTargetNode(SDValue Op,const APInt & DemandedElts,APInt & UndefElts,const SelectionDAG & DAG,unsigned Depth) const42378 bool X86TargetLowering::isSplatValueForTargetNode(SDValue Op,
42379                                                   const APInt &DemandedElts,
42380                                                   APInt &UndefElts,
42381                                                   const SelectionDAG &DAG,
42382                                                   unsigned Depth) const {
42383   unsigned NumElts = DemandedElts.getBitWidth();
42384   unsigned Opc = Op.getOpcode();
42385 
42386   switch (Opc) {
42387   case X86ISD::VBROADCAST:
42388   case X86ISD::VBROADCAST_LOAD:
42389     UndefElts = APInt::getZero(NumElts);
42390     return true;
42391   }
42392 
42393   return TargetLowering::isSplatValueForTargetNode(Op, DemandedElts, UndefElts,
42394                                                    DAG, Depth);
42395 }
42396 
42397 // Helper to peek through bitops/trunc/setcc to determine size of source vector.
42398 // Allows combineBitcastvxi1 to determine what size vector generated a <X x i1>.
checkBitcastSrcVectorSize(SDValue Src,unsigned Size,bool AllowTruncate)42399 static bool checkBitcastSrcVectorSize(SDValue Src, unsigned Size,
42400                                       bool AllowTruncate) {
42401   switch (Src.getOpcode()) {
42402   case ISD::TRUNCATE:
42403     if (!AllowTruncate)
42404       return false;
42405     [[fallthrough]];
42406   case ISD::SETCC:
42407     return Src.getOperand(0).getValueSizeInBits() == Size;
42408   case ISD::AND:
42409   case ISD::XOR:
42410   case ISD::OR:
42411     return checkBitcastSrcVectorSize(Src.getOperand(0), Size, AllowTruncate) &&
42412            checkBitcastSrcVectorSize(Src.getOperand(1), Size, AllowTruncate);
42413   case ISD::SELECT:
42414   case ISD::VSELECT:
42415     return Src.getOperand(0).getScalarValueSizeInBits() == 1 &&
42416            checkBitcastSrcVectorSize(Src.getOperand(1), Size, AllowTruncate) &&
42417            checkBitcastSrcVectorSize(Src.getOperand(2), Size, AllowTruncate);
42418   case ISD::BUILD_VECTOR:
42419     return ISD::isBuildVectorAllZeros(Src.getNode()) ||
42420            ISD::isBuildVectorAllOnes(Src.getNode());
42421   }
42422   return false;
42423 }
42424 
42425 // Helper to flip between AND/OR/XOR opcodes and their X86ISD FP equivalents.
getAltBitOpcode(unsigned Opcode)42426 static unsigned getAltBitOpcode(unsigned Opcode) {
42427   switch(Opcode) {
42428   case ISD::AND: return X86ISD::FAND;
42429   case ISD::OR: return X86ISD::FOR;
42430   case ISD::XOR: return X86ISD::FXOR;
42431   case X86ISD::ANDNP: return X86ISD::FANDN;
42432   }
42433   llvm_unreachable("Unknown bitwise opcode");
42434 }
42435 
42436 // Helper to adjust v4i32 MOVMSK expansion to work with SSE1-only targets.
adjustBitcastSrcVectorSSE1(SelectionDAG & DAG,SDValue Src,const SDLoc & DL)42437 static SDValue adjustBitcastSrcVectorSSE1(SelectionDAG &DAG, SDValue Src,
42438                                           const SDLoc &DL) {
42439   EVT SrcVT = Src.getValueType();
42440   if (SrcVT != MVT::v4i1)
42441     return SDValue();
42442 
42443   switch (Src.getOpcode()) {
42444   case ISD::SETCC:
42445     if (Src.getOperand(0).getValueType() == MVT::v4i32 &&
42446         ISD::isBuildVectorAllZeros(Src.getOperand(1).getNode()) &&
42447         cast<CondCodeSDNode>(Src.getOperand(2))->get() == ISD::SETLT) {
42448       SDValue Op0 = Src.getOperand(0);
42449       if (ISD::isNormalLoad(Op0.getNode()))
42450         return DAG.getBitcast(MVT::v4f32, Op0);
42451       if (Op0.getOpcode() == ISD::BITCAST &&
42452           Op0.getOperand(0).getValueType() == MVT::v4f32)
42453         return Op0.getOperand(0);
42454     }
42455     break;
42456   case ISD::AND:
42457   case ISD::XOR:
42458   case ISD::OR: {
42459     SDValue Op0 = adjustBitcastSrcVectorSSE1(DAG, Src.getOperand(0), DL);
42460     SDValue Op1 = adjustBitcastSrcVectorSSE1(DAG, Src.getOperand(1), DL);
42461     if (Op0 && Op1)
42462       return DAG.getNode(getAltBitOpcode(Src.getOpcode()), DL, MVT::v4f32, Op0,
42463                          Op1);
42464     break;
42465   }
42466   }
42467   return SDValue();
42468 }
42469 
42470 // Helper to push sign extension of vXi1 SETCC result through bitops.
signExtendBitcastSrcVector(SelectionDAG & DAG,EVT SExtVT,SDValue Src,const SDLoc & DL)42471 static SDValue signExtendBitcastSrcVector(SelectionDAG &DAG, EVT SExtVT,
42472                                           SDValue Src, const SDLoc &DL) {
42473   switch (Src.getOpcode()) {
42474   case ISD::SETCC:
42475   case ISD::TRUNCATE:
42476   case ISD::BUILD_VECTOR:
42477     return DAG.getNode(ISD::SIGN_EXTEND, DL, SExtVT, Src);
42478   case ISD::AND:
42479   case ISD::XOR:
42480   case ISD::OR:
42481     return DAG.getNode(
42482         Src.getOpcode(), DL, SExtVT,
42483         signExtendBitcastSrcVector(DAG, SExtVT, Src.getOperand(0), DL),
42484         signExtendBitcastSrcVector(DAG, SExtVT, Src.getOperand(1), DL));
42485   case ISD::SELECT:
42486   case ISD::VSELECT:
42487     return DAG.getSelect(
42488         DL, SExtVT, Src.getOperand(0),
42489         signExtendBitcastSrcVector(DAG, SExtVT, Src.getOperand(1), DL),
42490         signExtendBitcastSrcVector(DAG, SExtVT, Src.getOperand(2), DL));
42491   }
42492   llvm_unreachable("Unexpected node type for vXi1 sign extension");
42493 }
42494 
42495 // Try to match patterns such as
42496 // (i16 bitcast (v16i1 x))
42497 // ->
42498 // (i16 movmsk (16i8 sext (v16i1 x)))
42499 // before the illegal vector is scalarized on subtargets that don't have legal
42500 // vxi1 types.
combineBitcastvxi1(SelectionDAG & DAG,EVT VT,SDValue Src,const SDLoc & DL,const X86Subtarget & Subtarget)42501 static SDValue combineBitcastvxi1(SelectionDAG &DAG, EVT VT, SDValue Src,
42502                                   const SDLoc &DL,
42503                                   const X86Subtarget &Subtarget) {
42504   EVT SrcVT = Src.getValueType();
42505   if (!SrcVT.isSimple() || SrcVT.getScalarType() != MVT::i1)
42506     return SDValue();
42507 
42508   // Recognize the IR pattern for the movmsk intrinsic under SSE1 before type
42509   // legalization destroys the v4i32 type.
42510   if (Subtarget.hasSSE1() && !Subtarget.hasSSE2()) {
42511     if (SDValue V = adjustBitcastSrcVectorSSE1(DAG, Src, DL)) {
42512       V = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32,
42513                       DAG.getBitcast(MVT::v4f32, V));
42514       return DAG.getZExtOrTrunc(V, DL, VT);
42515     }
42516   }
42517 
42518   // If the input is a truncate from v16i8 or v32i8 go ahead and use a
42519   // movmskb even with avx512. This will be better than truncating to vXi1 and
42520   // using a kmov. This can especially help KNL if the input is a v16i8/v32i8
42521   // vpcmpeqb/vpcmpgtb.
42522   bool PreferMovMsk = Src.getOpcode() == ISD::TRUNCATE && Src.hasOneUse() &&
42523                       (Src.getOperand(0).getValueType() == MVT::v16i8 ||
42524                        Src.getOperand(0).getValueType() == MVT::v32i8 ||
42525                        Src.getOperand(0).getValueType() == MVT::v64i8);
42526 
42527   // Prefer movmsk for AVX512 for (bitcast (setlt X, 0)) which can be handled
42528   // directly with vpmovmskb/vmovmskps/vmovmskpd.
42529   if (Src.getOpcode() == ISD::SETCC && Src.hasOneUse() &&
42530       cast<CondCodeSDNode>(Src.getOperand(2))->get() == ISD::SETLT &&
42531       ISD::isBuildVectorAllZeros(Src.getOperand(1).getNode())) {
42532     EVT CmpVT = Src.getOperand(0).getValueType();
42533     EVT EltVT = CmpVT.getVectorElementType();
42534     if (CmpVT.getSizeInBits() <= 256 &&
42535         (EltVT == MVT::i8 || EltVT == MVT::i32 || EltVT == MVT::i64))
42536       PreferMovMsk = true;
42537   }
42538 
42539   // With AVX512 vxi1 types are legal and we prefer using k-regs.
42540   // MOVMSK is supported in SSE2 or later.
42541   if (!Subtarget.hasSSE2() || (Subtarget.hasAVX512() && !PreferMovMsk))
42542     return SDValue();
42543 
42544   // If the upper ops of a concatenation are undef, then try to bitcast the
42545   // lower op and extend.
42546   SmallVector<SDValue, 4> SubSrcOps;
42547   if (collectConcatOps(Src.getNode(), SubSrcOps, DAG) &&
42548       SubSrcOps.size() >= 2) {
42549     SDValue LowerOp = SubSrcOps[0];
42550     ArrayRef<SDValue> UpperOps(std::next(SubSrcOps.begin()), SubSrcOps.end());
42551     if (LowerOp.getOpcode() == ISD::SETCC &&
42552         all_of(UpperOps, [](SDValue Op) { return Op.isUndef(); })) {
42553       EVT SubVT = VT.getIntegerVT(
42554           *DAG.getContext(), LowerOp.getValueType().getVectorMinNumElements());
42555       if (SDValue V = combineBitcastvxi1(DAG, SubVT, LowerOp, DL, Subtarget)) {
42556         EVT IntVT = VT.getIntegerVT(*DAG.getContext(), VT.getSizeInBits());
42557         return DAG.getBitcast(VT, DAG.getNode(ISD::ANY_EXTEND, DL, IntVT, V));
42558       }
42559     }
42560   }
42561 
42562   // There are MOVMSK flavors for types v16i8, v32i8, v4f32, v8f32, v4f64 and
42563   // v8f64. So all legal 128-bit and 256-bit vectors are covered except for
42564   // v8i16 and v16i16.
42565   // For these two cases, we can shuffle the upper element bytes to a
42566   // consecutive sequence at the start of the vector and treat the results as
42567   // v16i8 or v32i8, and for v16i8 this is the preferable solution. However,
42568   // for v16i16 this is not the case, because the shuffle is expensive, so we
42569   // avoid sign-extending to this type entirely.
42570   // For example, t0 := (v8i16 sext(v8i1 x)) needs to be shuffled as:
42571   // (v16i8 shuffle <0,2,4,6,8,10,12,14,u,u,...,u> (v16i8 bitcast t0), undef)
42572   MVT SExtVT;
42573   bool PropagateSExt = false;
42574   switch (SrcVT.getSimpleVT().SimpleTy) {
42575   default:
42576     return SDValue();
42577   case MVT::v2i1:
42578     SExtVT = MVT::v2i64;
42579     break;
42580   case MVT::v4i1:
42581     SExtVT = MVT::v4i32;
42582     // For cases such as (i4 bitcast (v4i1 setcc v4i64 v1, v2))
42583     // sign-extend to a 256-bit operation to avoid truncation.
42584     if (Subtarget.hasAVX() &&
42585         checkBitcastSrcVectorSize(Src, 256, Subtarget.hasAVX2())) {
42586       SExtVT = MVT::v4i64;
42587       PropagateSExt = true;
42588     }
42589     break;
42590   case MVT::v8i1:
42591     SExtVT = MVT::v8i16;
42592     // For cases such as (i8 bitcast (v8i1 setcc v8i32 v1, v2)),
42593     // sign-extend to a 256-bit operation to match the compare.
42594     // If the setcc operand is 128-bit, prefer sign-extending to 128-bit over
42595     // 256-bit because the shuffle is cheaper than sign extending the result of
42596     // the compare.
42597     if (Subtarget.hasAVX() && (checkBitcastSrcVectorSize(Src, 256, true) ||
42598                                checkBitcastSrcVectorSize(Src, 512, true))) {
42599       SExtVT = MVT::v8i32;
42600       PropagateSExt = true;
42601     }
42602     break;
42603   case MVT::v16i1:
42604     SExtVT = MVT::v16i8;
42605     // For the case (i16 bitcast (v16i1 setcc v16i16 v1, v2)),
42606     // it is not profitable to sign-extend to 256-bit because this will
42607     // require an extra cross-lane shuffle which is more expensive than
42608     // truncating the result of the compare to 128-bits.
42609     break;
42610   case MVT::v32i1:
42611     SExtVT = MVT::v32i8;
42612     break;
42613   case MVT::v64i1:
42614     // If we have AVX512F, but not AVX512BW and the input is truncated from
42615     // v64i8 checked earlier. Then split the input and make two pmovmskbs.
42616     if (Subtarget.hasAVX512()) {
42617       if (Subtarget.hasBWI())
42618         return SDValue();
42619       SExtVT = MVT::v64i8;
42620       break;
42621     }
42622     // Split if this is a <64 x i8> comparison result.
42623     if (checkBitcastSrcVectorSize(Src, 512, false)) {
42624       SExtVT = MVT::v64i8;
42625       break;
42626     }
42627     return SDValue();
42628   };
42629 
42630   SDValue V = PropagateSExt ? signExtendBitcastSrcVector(DAG, SExtVT, Src, DL)
42631                             : DAG.getNode(ISD::SIGN_EXTEND, DL, SExtVT, Src);
42632 
42633   if (SExtVT == MVT::v16i8 || SExtVT == MVT::v32i8 || SExtVT == MVT::v64i8) {
42634     V = getPMOVMSKB(DL, V, DAG, Subtarget);
42635   } else {
42636     if (SExtVT == MVT::v8i16) {
42637       V = widenSubVector(V, false, Subtarget, DAG, DL, 256);
42638       V = DAG.getNode(ISD::TRUNCATE, DL, MVT::v16i8, V);
42639     }
42640     V = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V);
42641   }
42642 
42643   EVT IntVT =
42644       EVT::getIntegerVT(*DAG.getContext(), SrcVT.getVectorNumElements());
42645   V = DAG.getZExtOrTrunc(V, DL, IntVT);
42646   return DAG.getBitcast(VT, V);
42647 }
42648 
42649 // Convert a vXi1 constant build vector to the same width scalar integer.
combinevXi1ConstantToInteger(SDValue Op,SelectionDAG & DAG)42650 static SDValue combinevXi1ConstantToInteger(SDValue Op, SelectionDAG &DAG) {
42651   EVT SrcVT = Op.getValueType();
42652   assert(SrcVT.getVectorElementType() == MVT::i1 &&
42653          "Expected a vXi1 vector");
42654   assert(ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
42655          "Expected a constant build vector");
42656 
42657   APInt Imm(SrcVT.getVectorNumElements(), 0);
42658   for (unsigned Idx = 0, e = Op.getNumOperands(); Idx < e; ++Idx) {
42659     SDValue In = Op.getOperand(Idx);
42660     if (!In.isUndef() && (In->getAsZExtVal() & 0x1))
42661       Imm.setBit(Idx);
42662   }
42663   EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), Imm.getBitWidth());
42664   return DAG.getConstant(Imm, SDLoc(Op), IntVT);
42665 }
42666 
combineCastedMaskArithmetic(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)42667 static SDValue combineCastedMaskArithmetic(SDNode *N, SelectionDAG &DAG,
42668                                            TargetLowering::DAGCombinerInfo &DCI,
42669                                            const X86Subtarget &Subtarget) {
42670   assert(N->getOpcode() == ISD::BITCAST && "Expected a bitcast");
42671 
42672   if (!DCI.isBeforeLegalizeOps())
42673     return SDValue();
42674 
42675   // Only do this if we have k-registers.
42676   if (!Subtarget.hasAVX512())
42677     return SDValue();
42678 
42679   EVT DstVT = N->getValueType(0);
42680   SDValue Op = N->getOperand(0);
42681   EVT SrcVT = Op.getValueType();
42682 
42683   if (!Op.hasOneUse())
42684     return SDValue();
42685 
42686   // Look for logic ops.
42687   if (Op.getOpcode() != ISD::AND &&
42688       Op.getOpcode() != ISD::OR &&
42689       Op.getOpcode() != ISD::XOR)
42690     return SDValue();
42691 
42692   // Make sure we have a bitcast between mask registers and a scalar type.
42693   if (!(SrcVT.isVector() && SrcVT.getVectorElementType() == MVT::i1 &&
42694         DstVT.isScalarInteger()) &&
42695       !(DstVT.isVector() && DstVT.getVectorElementType() == MVT::i1 &&
42696         SrcVT.isScalarInteger()))
42697     return SDValue();
42698 
42699   SDValue LHS = Op.getOperand(0);
42700   SDValue RHS = Op.getOperand(1);
42701 
42702   if (LHS.hasOneUse() && LHS.getOpcode() == ISD::BITCAST &&
42703       LHS.getOperand(0).getValueType() == DstVT)
42704     return DAG.getNode(Op.getOpcode(), SDLoc(N), DstVT, LHS.getOperand(0),
42705                        DAG.getBitcast(DstVT, RHS));
42706 
42707   if (RHS.hasOneUse() && RHS.getOpcode() == ISD::BITCAST &&
42708       RHS.getOperand(0).getValueType() == DstVT)
42709     return DAG.getNode(Op.getOpcode(), SDLoc(N), DstVT,
42710                        DAG.getBitcast(DstVT, LHS), RHS.getOperand(0));
42711 
42712   // If the RHS is a vXi1 build vector, this is a good reason to flip too.
42713   // Most of these have to move a constant from the scalar domain anyway.
42714   if (ISD::isBuildVectorOfConstantSDNodes(RHS.getNode())) {
42715     RHS = combinevXi1ConstantToInteger(RHS, DAG);
42716     return DAG.getNode(Op.getOpcode(), SDLoc(N), DstVT,
42717                        DAG.getBitcast(DstVT, LHS), RHS);
42718   }
42719 
42720   return SDValue();
42721 }
42722 
createMMXBuildVector(BuildVectorSDNode * BV,SelectionDAG & DAG,const X86Subtarget & Subtarget)42723 static SDValue createMMXBuildVector(BuildVectorSDNode *BV, SelectionDAG &DAG,
42724                                     const X86Subtarget &Subtarget) {
42725   SDLoc DL(BV);
42726   unsigned NumElts = BV->getNumOperands();
42727   SDValue Splat = BV->getSplatValue();
42728 
42729   // Build MMX element from integer GPR or SSE float values.
42730   auto CreateMMXElement = [&](SDValue V) {
42731     if (V.isUndef())
42732       return DAG.getUNDEF(MVT::x86mmx);
42733     if (V.getValueType().isFloatingPoint()) {
42734       if (Subtarget.hasSSE1() && !isa<ConstantFPSDNode>(V)) {
42735         V = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v4f32, V);
42736         V = DAG.getBitcast(MVT::v2i64, V);
42737         return DAG.getNode(X86ISD::MOVDQ2Q, DL, MVT::x86mmx, V);
42738       }
42739       V = DAG.getBitcast(MVT::i32, V);
42740     } else {
42741       V = DAG.getAnyExtOrTrunc(V, DL, MVT::i32);
42742     }
42743     return DAG.getNode(X86ISD::MMX_MOVW2D, DL, MVT::x86mmx, V);
42744   };
42745 
42746   // Convert build vector ops to MMX data in the bottom elements.
42747   SmallVector<SDValue, 8> Ops;
42748 
42749   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
42750 
42751   // Broadcast - use (PUNPCKL+)PSHUFW to broadcast single element.
42752   if (Splat) {
42753     if (Splat.isUndef())
42754       return DAG.getUNDEF(MVT::x86mmx);
42755 
42756     Splat = CreateMMXElement(Splat);
42757 
42758     if (Subtarget.hasSSE1()) {
42759       // Unpack v8i8 to splat i8 elements to lowest 16-bits.
42760       if (NumElts == 8)
42761         Splat = DAG.getNode(
42762             ISD::INTRINSIC_WO_CHAIN, DL, MVT::x86mmx,
42763             DAG.getTargetConstant(Intrinsic::x86_mmx_punpcklbw, DL,
42764                                   TLI.getPointerTy(DAG.getDataLayout())),
42765             Splat, Splat);
42766 
42767       // Use PSHUFW to repeat 16-bit elements.
42768       unsigned ShufMask = (NumElts > 2 ? 0 : 0x44);
42769       return DAG.getNode(
42770           ISD::INTRINSIC_WO_CHAIN, DL, MVT::x86mmx,
42771           DAG.getTargetConstant(Intrinsic::x86_sse_pshuf_w, DL,
42772                                 TLI.getPointerTy(DAG.getDataLayout())),
42773           Splat, DAG.getTargetConstant(ShufMask, DL, MVT::i8));
42774     }
42775     Ops.append(NumElts, Splat);
42776   } else {
42777     for (unsigned i = 0; i != NumElts; ++i)
42778       Ops.push_back(CreateMMXElement(BV->getOperand(i)));
42779   }
42780 
42781   // Use tree of PUNPCKLs to build up general MMX vector.
42782   while (Ops.size() > 1) {
42783     unsigned NumOps = Ops.size();
42784     unsigned IntrinOp =
42785         (NumOps == 2 ? Intrinsic::x86_mmx_punpckldq
42786                      : (NumOps == 4 ? Intrinsic::x86_mmx_punpcklwd
42787                                     : Intrinsic::x86_mmx_punpcklbw));
42788     SDValue Intrin = DAG.getTargetConstant(
42789         IntrinOp, DL, TLI.getPointerTy(DAG.getDataLayout()));
42790     for (unsigned i = 0; i != NumOps; i += 2)
42791       Ops[i / 2] = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, MVT::x86mmx, Intrin,
42792                                Ops[i], Ops[i + 1]);
42793     Ops.resize(NumOps / 2);
42794   }
42795 
42796   return Ops[0];
42797 }
42798 
42799 // Recursive function that attempts to find if a bool vector node was originally
42800 // a vector/float/double that got truncated/extended/bitcast to/from a scalar
42801 // integer. If so, replace the scalar ops with bool vector equivalents back down
42802 // the chain.
combineBitcastToBoolVector(EVT VT,SDValue V,const SDLoc & DL,SelectionDAG & DAG,const X86Subtarget & Subtarget)42803 static SDValue combineBitcastToBoolVector(EVT VT, SDValue V, const SDLoc &DL,
42804                                           SelectionDAG &DAG,
42805                                           const X86Subtarget &Subtarget) {
42806   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
42807   unsigned Opc = V.getOpcode();
42808   switch (Opc) {
42809   case ISD::BITCAST: {
42810     // Bitcast from a vector/float/double, we can cheaply bitcast to VT.
42811     SDValue Src = V.getOperand(0);
42812     EVT SrcVT = Src.getValueType();
42813     if (SrcVT.isVector() || SrcVT.isFloatingPoint())
42814       return DAG.getBitcast(VT, Src);
42815     break;
42816   }
42817   case ISD::TRUNCATE: {
42818     // If we find a suitable source, a truncated scalar becomes a subvector.
42819     SDValue Src = V.getOperand(0);
42820     EVT NewSrcVT =
42821         EVT::getVectorVT(*DAG.getContext(), MVT::i1, Src.getValueSizeInBits());
42822     if (TLI.isTypeLegal(NewSrcVT))
42823       if (SDValue N0 =
42824               combineBitcastToBoolVector(NewSrcVT, Src, DL, DAG, Subtarget))
42825         return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, N0,
42826                            DAG.getIntPtrConstant(0, DL));
42827     break;
42828   }
42829   case ISD::ANY_EXTEND:
42830   case ISD::ZERO_EXTEND: {
42831     // If we find a suitable source, an extended scalar becomes a subvector.
42832     SDValue Src = V.getOperand(0);
42833     EVT NewSrcVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
42834                                     Src.getScalarValueSizeInBits());
42835     if (TLI.isTypeLegal(NewSrcVT))
42836       if (SDValue N0 =
42837               combineBitcastToBoolVector(NewSrcVT, Src, DL, DAG, Subtarget))
42838         return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
42839                            Opc == ISD::ANY_EXTEND ? DAG.getUNDEF(VT)
42840                                                   : DAG.getConstant(0, DL, VT),
42841                            N0, DAG.getIntPtrConstant(0, DL));
42842     break;
42843   }
42844   case ISD::OR: {
42845     // If we find suitable sources, we can just move an OR to the vector domain.
42846     SDValue Src0 = V.getOperand(0);
42847     SDValue Src1 = V.getOperand(1);
42848     if (SDValue N0 = combineBitcastToBoolVector(VT, Src0, DL, DAG, Subtarget))
42849       if (SDValue N1 = combineBitcastToBoolVector(VT, Src1, DL, DAG, Subtarget))
42850         return DAG.getNode(Opc, DL, VT, N0, N1);
42851     break;
42852   }
42853   case ISD::SHL: {
42854     // If we find a suitable source, a SHL becomes a KSHIFTL.
42855     SDValue Src0 = V.getOperand(0);
42856     if ((VT == MVT::v8i1 && !Subtarget.hasDQI()) ||
42857         ((VT == MVT::v32i1 || VT == MVT::v64i1) && !Subtarget.hasBWI()))
42858       break;
42859 
42860     if (auto *Amt = dyn_cast<ConstantSDNode>(V.getOperand(1)))
42861       if (SDValue N0 = combineBitcastToBoolVector(VT, Src0, DL, DAG, Subtarget))
42862         return DAG.getNode(
42863             X86ISD::KSHIFTL, DL, VT, N0,
42864             DAG.getTargetConstant(Amt->getZExtValue(), DL, MVT::i8));
42865     break;
42866   }
42867   }
42868   return SDValue();
42869 }
42870 
combineBitcast(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)42871 static SDValue combineBitcast(SDNode *N, SelectionDAG &DAG,
42872                               TargetLowering::DAGCombinerInfo &DCI,
42873                               const X86Subtarget &Subtarget) {
42874   SDValue N0 = N->getOperand(0);
42875   EVT VT = N->getValueType(0);
42876   EVT SrcVT = N0.getValueType();
42877   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
42878 
42879   // Try to match patterns such as
42880   // (i16 bitcast (v16i1 x))
42881   // ->
42882   // (i16 movmsk (16i8 sext (v16i1 x)))
42883   // before the setcc result is scalarized on subtargets that don't have legal
42884   // vxi1 types.
42885   if (DCI.isBeforeLegalize()) {
42886     SDLoc dl(N);
42887     if (SDValue V = combineBitcastvxi1(DAG, VT, N0, dl, Subtarget))
42888       return V;
42889 
42890     // If this is a bitcast between a MVT::v4i1/v2i1 and an illegal integer
42891     // type, widen both sides to avoid a trip through memory.
42892     if ((VT == MVT::v4i1 || VT == MVT::v2i1) && SrcVT.isScalarInteger() &&
42893         Subtarget.hasAVX512()) {
42894       N0 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i8, N0);
42895       N0 = DAG.getBitcast(MVT::v8i1, N0);
42896       return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, N0,
42897                          DAG.getIntPtrConstant(0, dl));
42898     }
42899 
42900     // If this is a bitcast between a MVT::v4i1/v2i1 and an illegal integer
42901     // type, widen both sides to avoid a trip through memory.
42902     if ((SrcVT == MVT::v4i1 || SrcVT == MVT::v2i1) && VT.isScalarInteger() &&
42903         Subtarget.hasAVX512()) {
42904       // Use zeros for the widening if we already have some zeroes. This can
42905       // allow SimplifyDemandedBits to remove scalar ANDs that may be down
42906       // stream of this.
42907       // FIXME: It might make sense to detect a concat_vectors with a mix of
42908       // zeroes and undef and turn it into insert_subvector for i1 vectors as
42909       // a separate combine. What we can't do is canonicalize the operands of
42910       // such a concat or we'll get into a loop with SimplifyDemandedBits.
42911       if (N0.getOpcode() == ISD::CONCAT_VECTORS) {
42912         SDValue LastOp = N0.getOperand(N0.getNumOperands() - 1);
42913         if (ISD::isBuildVectorAllZeros(LastOp.getNode())) {
42914           SrcVT = LastOp.getValueType();
42915           unsigned NumConcats = 8 / SrcVT.getVectorNumElements();
42916           SmallVector<SDValue, 4> Ops(N0->op_begin(), N0->op_end());
42917           Ops.resize(NumConcats, DAG.getConstant(0, dl, SrcVT));
42918           N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i1, Ops);
42919           N0 = DAG.getBitcast(MVT::i8, N0);
42920           return DAG.getNode(ISD::TRUNCATE, dl, VT, N0);
42921         }
42922       }
42923 
42924       unsigned NumConcats = 8 / SrcVT.getVectorNumElements();
42925       SmallVector<SDValue, 4> Ops(NumConcats, DAG.getUNDEF(SrcVT));
42926       Ops[0] = N0;
42927       N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i1, Ops);
42928       N0 = DAG.getBitcast(MVT::i8, N0);
42929       return DAG.getNode(ISD::TRUNCATE, dl, VT, N0);
42930     }
42931   } else {
42932     // If we're bitcasting from iX to vXi1, see if the integer originally
42933     // began as a vXi1 and whether we can remove the bitcast entirely.
42934     if (VT.isVector() && VT.getScalarType() == MVT::i1 &&
42935         SrcVT.isScalarInteger() && TLI.isTypeLegal(VT)) {
42936       if (SDValue V =
42937               combineBitcastToBoolVector(VT, N0, SDLoc(N), DAG, Subtarget))
42938         return V;
42939     }
42940   }
42941 
42942   // Look for (i8 (bitcast (v8i1 (extract_subvector (v16i1 X), 0)))) and
42943   // replace with (i8 (trunc (i16 (bitcast (v16i1 X))))). This can occur
42944   // due to insert_subvector legalization on KNL. By promoting the copy to i16
42945   // we can help with known bits propagation from the vXi1 domain to the
42946   // scalar domain.
42947   if (VT == MVT::i8 && SrcVT == MVT::v8i1 && Subtarget.hasAVX512() &&
42948       !Subtarget.hasDQI() && N0.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
42949       N0.getOperand(0).getValueType() == MVT::v16i1 &&
42950       isNullConstant(N0.getOperand(1)))
42951     return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT,
42952                        DAG.getBitcast(MVT::i16, N0.getOperand(0)));
42953 
42954   // Canonicalize (bitcast (vbroadcast_load)) so that the output of the bitcast
42955   // and the vbroadcast_load are both integer or both fp. In some cases this
42956   // will remove the bitcast entirely.
42957   if (N0.getOpcode() == X86ISD::VBROADCAST_LOAD && N0.hasOneUse() &&
42958        VT.isFloatingPoint() != SrcVT.isFloatingPoint() && VT.isVector()) {
42959     auto *BCast = cast<MemIntrinsicSDNode>(N0);
42960     unsigned SrcVTSize = SrcVT.getScalarSizeInBits();
42961     unsigned MemSize = BCast->getMemoryVT().getScalarSizeInBits();
42962     // Don't swap i8/i16 since don't have fp types that size.
42963     if (MemSize >= 32) {
42964       MVT MemVT = VT.isFloatingPoint() ? MVT::getFloatingPointVT(MemSize)
42965                                        : MVT::getIntegerVT(MemSize);
42966       MVT LoadVT = VT.isFloatingPoint() ? MVT::getFloatingPointVT(SrcVTSize)
42967                                         : MVT::getIntegerVT(SrcVTSize);
42968       LoadVT = MVT::getVectorVT(LoadVT, SrcVT.getVectorNumElements());
42969 
42970       SDVTList Tys = DAG.getVTList(LoadVT, MVT::Other);
42971       SDValue Ops[] = { BCast->getChain(), BCast->getBasePtr() };
42972       SDValue ResNode =
42973           DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, SDLoc(N), Tys, Ops,
42974                                   MemVT, BCast->getMemOperand());
42975       DAG.ReplaceAllUsesOfValueWith(SDValue(BCast, 1), ResNode.getValue(1));
42976       return DAG.getBitcast(VT, ResNode);
42977     }
42978   }
42979 
42980   // Since MMX types are special and don't usually play with other vector types,
42981   // it's better to handle them early to be sure we emit efficient code by
42982   // avoiding store-load conversions.
42983   if (VT == MVT::x86mmx) {
42984     // Detect MMX constant vectors.
42985     APInt UndefElts;
42986     SmallVector<APInt, 1> EltBits;
42987     if (getTargetConstantBitsFromNode(N0, 64, UndefElts, EltBits)) {
42988       SDLoc DL(N0);
42989       // Handle zero-extension of i32 with MOVD.
42990       if (EltBits[0].countl_zero() >= 32)
42991         return DAG.getNode(X86ISD::MMX_MOVW2D, DL, VT,
42992                            DAG.getConstant(EltBits[0].trunc(32), DL, MVT::i32));
42993       // Else, bitcast to a double.
42994       // TODO - investigate supporting sext 32-bit immediates on x86_64.
42995       APFloat F64(APFloat::IEEEdouble(), EltBits[0]);
42996       return DAG.getBitcast(VT, DAG.getConstantFP(F64, DL, MVT::f64));
42997     }
42998 
42999     // Detect bitcasts to x86mmx low word.
43000     if (N0.getOpcode() == ISD::BUILD_VECTOR &&
43001         (SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8) &&
43002         N0.getOperand(0).getValueType() == SrcVT.getScalarType()) {
43003       bool LowUndef = true, AllUndefOrZero = true;
43004       for (unsigned i = 1, e = SrcVT.getVectorNumElements(); i != e; ++i) {
43005         SDValue Op = N0.getOperand(i);
43006         LowUndef &= Op.isUndef() || (i >= e/2);
43007         AllUndefOrZero &= (Op.isUndef() || isNullConstant(Op));
43008       }
43009       if (AllUndefOrZero) {
43010         SDValue N00 = N0.getOperand(0);
43011         SDLoc dl(N00);
43012         N00 = LowUndef ? DAG.getAnyExtOrTrunc(N00, dl, MVT::i32)
43013                        : DAG.getZExtOrTrunc(N00, dl, MVT::i32);
43014         return DAG.getNode(X86ISD::MMX_MOVW2D, dl, VT, N00);
43015       }
43016     }
43017 
43018     // Detect bitcasts of 64-bit build vectors and convert to a
43019     // MMX UNPCK/PSHUFW which takes MMX type inputs with the value in the
43020     // lowest element.
43021     if (N0.getOpcode() == ISD::BUILD_VECTOR &&
43022         (SrcVT == MVT::v2f32 || SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 ||
43023          SrcVT == MVT::v8i8))
43024       return createMMXBuildVector(cast<BuildVectorSDNode>(N0), DAG, Subtarget);
43025 
43026     // Detect bitcasts between element or subvector extraction to x86mmx.
43027     if ((N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT ||
43028          N0.getOpcode() == ISD::EXTRACT_SUBVECTOR) &&
43029         isNullConstant(N0.getOperand(1))) {
43030       SDValue N00 = N0.getOperand(0);
43031       if (N00.getValueType().is128BitVector())
43032         return DAG.getNode(X86ISD::MOVDQ2Q, SDLoc(N00), VT,
43033                            DAG.getBitcast(MVT::v2i64, N00));
43034     }
43035 
43036     // Detect bitcasts from FP_TO_SINT to x86mmx.
43037     if (SrcVT == MVT::v2i32 && N0.getOpcode() == ISD::FP_TO_SINT) {
43038       SDLoc DL(N0);
43039       SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4i32, N0,
43040                                 DAG.getUNDEF(MVT::v2i32));
43041       return DAG.getNode(X86ISD::MOVDQ2Q, DL, VT,
43042                          DAG.getBitcast(MVT::v2i64, Res));
43043     }
43044   }
43045 
43046   // Try to remove a bitcast of constant vXi1 vector. We have to legalize
43047   // most of these to scalar anyway.
43048   if (Subtarget.hasAVX512() && VT.isScalarInteger() &&
43049       SrcVT.isVector() && SrcVT.getVectorElementType() == MVT::i1 &&
43050       ISD::isBuildVectorOfConstantSDNodes(N0.getNode())) {
43051     return combinevXi1ConstantToInteger(N0, DAG);
43052   }
43053 
43054   if (Subtarget.hasAVX512() && SrcVT.isScalarInteger() &&
43055       VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
43056       isa<ConstantSDNode>(N0)) {
43057     auto *C = cast<ConstantSDNode>(N0);
43058     if (C->isAllOnes())
43059       return DAG.getConstant(1, SDLoc(N0), VT);
43060     if (C->isZero())
43061       return DAG.getConstant(0, SDLoc(N0), VT);
43062   }
43063 
43064   // Look for MOVMSK that is maybe truncated and then bitcasted to vXi1.
43065   // Turn it into a sign bit compare that produces a k-register. This avoids
43066   // a trip through a GPR.
43067   if (Subtarget.hasAVX512() && SrcVT.isScalarInteger() &&
43068       VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
43069       isPowerOf2_32(VT.getVectorNumElements())) {
43070     unsigned NumElts = VT.getVectorNumElements();
43071     SDValue Src = N0;
43072 
43073     // Peek through truncate.
43074     if (N0.getOpcode() == ISD::TRUNCATE && N0.hasOneUse())
43075       Src = N0.getOperand(0);
43076 
43077     if (Src.getOpcode() == X86ISD::MOVMSK && Src.hasOneUse()) {
43078       SDValue MovmskIn = Src.getOperand(0);
43079       MVT MovmskVT = MovmskIn.getSimpleValueType();
43080       unsigned MovMskElts = MovmskVT.getVectorNumElements();
43081 
43082       // We allow extra bits of the movmsk to be used since they are known zero.
43083       // We can't convert a VPMOVMSKB without avx512bw.
43084       if (MovMskElts <= NumElts &&
43085           (Subtarget.hasBWI() || MovmskVT.getVectorElementType() != MVT::i8)) {
43086         EVT IntVT = EVT(MovmskVT).changeVectorElementTypeToInteger();
43087         MovmskIn = DAG.getBitcast(IntVT, MovmskIn);
43088         SDLoc dl(N);
43089         MVT CmpVT = MVT::getVectorVT(MVT::i1, MovMskElts);
43090         SDValue Cmp = DAG.getSetCC(dl, CmpVT, MovmskIn,
43091                                    DAG.getConstant(0, dl, IntVT), ISD::SETLT);
43092         if (EVT(CmpVT) == VT)
43093           return Cmp;
43094 
43095         // Pad with zeroes up to original VT to replace the zeroes that were
43096         // being used from the MOVMSK.
43097         unsigned NumConcats = NumElts / MovMskElts;
43098         SmallVector<SDValue, 4> Ops(NumConcats, DAG.getConstant(0, dl, CmpVT));
43099         Ops[0] = Cmp;
43100         return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Ops);
43101       }
43102     }
43103   }
43104 
43105   // Try to remove bitcasts from input and output of mask arithmetic to
43106   // remove GPR<->K-register crossings.
43107   if (SDValue V = combineCastedMaskArithmetic(N, DAG, DCI, Subtarget))
43108     return V;
43109 
43110   // Convert a bitcasted integer logic operation that has one bitcasted
43111   // floating-point operand into a floating-point logic operation. This may
43112   // create a load of a constant, but that is cheaper than materializing the
43113   // constant in an integer register and transferring it to an SSE register or
43114   // transferring the SSE operand to integer register and back.
43115   unsigned FPOpcode;
43116   switch (N0.getOpcode()) {
43117     case ISD::AND: FPOpcode = X86ISD::FAND; break;
43118     case ISD::OR:  FPOpcode = X86ISD::FOR;  break;
43119     case ISD::XOR: FPOpcode = X86ISD::FXOR; break;
43120     default: return SDValue();
43121   }
43122 
43123   // Check if we have a bitcast from another integer type as well.
43124   if (!((Subtarget.hasSSE1() && VT == MVT::f32) ||
43125         (Subtarget.hasSSE2() && VT == MVT::f64) ||
43126         (Subtarget.hasFP16() && VT == MVT::f16) ||
43127         (Subtarget.hasSSE2() && VT.isInteger() && VT.isVector() &&
43128          TLI.isTypeLegal(VT))))
43129     return SDValue();
43130 
43131   SDValue LogicOp0 = N0.getOperand(0);
43132   SDValue LogicOp1 = N0.getOperand(1);
43133   SDLoc DL0(N0);
43134 
43135   // bitcast(logic(bitcast(X), Y)) --> logic'(X, bitcast(Y))
43136   if (N0.hasOneUse() && LogicOp0.getOpcode() == ISD::BITCAST &&
43137       LogicOp0.hasOneUse() && LogicOp0.getOperand(0).hasOneUse() &&
43138       LogicOp0.getOperand(0).getValueType() == VT &&
43139       !isa<ConstantSDNode>(LogicOp0.getOperand(0))) {
43140     SDValue CastedOp1 = DAG.getBitcast(VT, LogicOp1);
43141     unsigned Opcode = VT.isFloatingPoint() ? FPOpcode : N0.getOpcode();
43142     return DAG.getNode(Opcode, DL0, VT, LogicOp0.getOperand(0), CastedOp1);
43143   }
43144   // bitcast(logic(X, bitcast(Y))) --> logic'(bitcast(X), Y)
43145   if (N0.hasOneUse() && LogicOp1.getOpcode() == ISD::BITCAST &&
43146       LogicOp1.hasOneUse() && LogicOp1.getOperand(0).hasOneUse() &&
43147       LogicOp1.getOperand(0).getValueType() == VT &&
43148       !isa<ConstantSDNode>(LogicOp1.getOperand(0))) {
43149     SDValue CastedOp0 = DAG.getBitcast(VT, LogicOp0);
43150     unsigned Opcode = VT.isFloatingPoint() ? FPOpcode : N0.getOpcode();
43151     return DAG.getNode(Opcode, DL0, VT, LogicOp1.getOperand(0), CastedOp0);
43152   }
43153 
43154   return SDValue();
43155 }
43156 
43157 // (mul (zext a), (sext, b))
detectExtMul(SelectionDAG & DAG,const SDValue & Mul,SDValue & Op0,SDValue & Op1)43158 static bool detectExtMul(SelectionDAG &DAG, const SDValue &Mul, SDValue &Op0,
43159                          SDValue &Op1) {
43160   Op0 = Mul.getOperand(0);
43161   Op1 = Mul.getOperand(1);
43162 
43163   // The operand1 should be signed extend
43164   if (Op0.getOpcode() == ISD::SIGN_EXTEND)
43165     std::swap(Op0, Op1);
43166 
43167   auto IsFreeTruncation = [](SDValue &Op) -> bool {
43168     if ((Op.getOpcode() == ISD::ZERO_EXTEND ||
43169          Op.getOpcode() == ISD::SIGN_EXTEND) &&
43170         Op.getOperand(0).getScalarValueSizeInBits() <= 8)
43171       return true;
43172 
43173     auto *BV = dyn_cast<BuildVectorSDNode>(Op);
43174     return (BV && BV->isConstant());
43175   };
43176 
43177   // (dpbusd (zext a), (sext, b)). Since the first operand should be unsigned
43178   // value, we need to check Op0 is zero extended value. Op1 should be signed
43179   // value, so we just check the signed bits.
43180   if ((IsFreeTruncation(Op0) &&
43181        DAG.computeKnownBits(Op0).countMaxActiveBits() <= 8) &&
43182       (IsFreeTruncation(Op1) && DAG.ComputeMaxSignificantBits(Op1) <= 8))
43183     return true;
43184 
43185   return false;
43186 }
43187 
43188 // Given a ABS node, detect the following pattern:
43189 // (ABS (SUB (ZERO_EXTEND a), (ZERO_EXTEND b))).
43190 // This is useful as it is the input into a SAD pattern.
detectZextAbsDiff(const SDValue & Abs,SDValue & Op0,SDValue & Op1)43191 static bool detectZextAbsDiff(const SDValue &Abs, SDValue &Op0, SDValue &Op1) {
43192   SDValue AbsOp1 = Abs->getOperand(0);
43193   if (AbsOp1.getOpcode() != ISD::SUB)
43194     return false;
43195 
43196   Op0 = AbsOp1.getOperand(0);
43197   Op1 = AbsOp1.getOperand(1);
43198 
43199   // Check if the operands of the sub are zero-extended from vectors of i8.
43200   if (Op0.getOpcode() != ISD::ZERO_EXTEND ||
43201       Op0.getOperand(0).getValueType().getVectorElementType() != MVT::i8 ||
43202       Op1.getOpcode() != ISD::ZERO_EXTEND ||
43203       Op1.getOperand(0).getValueType().getVectorElementType() != MVT::i8)
43204     return false;
43205 
43206   return true;
43207 }
43208 
createVPDPBUSD(SelectionDAG & DAG,SDValue LHS,SDValue RHS,unsigned & LogBias,const SDLoc & DL,const X86Subtarget & Subtarget)43209 static SDValue createVPDPBUSD(SelectionDAG &DAG, SDValue LHS, SDValue RHS,
43210                               unsigned &LogBias, const SDLoc &DL,
43211                               const X86Subtarget &Subtarget) {
43212   // Extend or truncate to MVT::i8 first.
43213   MVT Vi8VT =
43214       MVT::getVectorVT(MVT::i8, LHS.getValueType().getVectorElementCount());
43215   LHS = DAG.getZExtOrTrunc(LHS, DL, Vi8VT);
43216   RHS = DAG.getSExtOrTrunc(RHS, DL, Vi8VT);
43217 
43218   // VPDPBUSD(<16 x i32>C, <16 x i8>A, <16 x i8>B). For each dst element
43219   // C[0] = C[0] + A[0]B[0] + A[1]B[1] + A[2]B[2] + A[3]B[3].
43220   // The src A, B element type is i8, but the dst C element type is i32.
43221   // When we calculate the reduce stage, we use src vector type vXi8 for it
43222   // so we need logbias 2 to avoid extra 2 stages.
43223   LogBias = 2;
43224 
43225   unsigned RegSize = std::max(128u, (unsigned)Vi8VT.getSizeInBits());
43226   if (Subtarget.hasVNNI() && !Subtarget.hasVLX())
43227     RegSize = std::max(512u, RegSize);
43228 
43229   // "Zero-extend" the i8 vectors. This is not a per-element zext, rather we
43230   // fill in the missing vector elements with 0.
43231   unsigned NumConcat = RegSize / Vi8VT.getSizeInBits();
43232   SmallVector<SDValue, 16> Ops(NumConcat, DAG.getConstant(0, DL, Vi8VT));
43233   Ops[0] = LHS;
43234   MVT ExtendedVT = MVT::getVectorVT(MVT::i8, RegSize / 8);
43235   SDValue DpOp0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, ExtendedVT, Ops);
43236   Ops[0] = RHS;
43237   SDValue DpOp1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, ExtendedVT, Ops);
43238 
43239   // Actually build the DotProduct, split as 256/512 bits for
43240   // AVXVNNI/AVX512VNNI.
43241   auto DpBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
43242                        ArrayRef<SDValue> Ops) {
43243     MVT VT = MVT::getVectorVT(MVT::i32, Ops[0].getValueSizeInBits() / 32);
43244     return DAG.getNode(X86ISD::VPDPBUSD, DL, VT, Ops);
43245   };
43246   MVT DpVT = MVT::getVectorVT(MVT::i32, RegSize / 32);
43247   SDValue Zero = DAG.getConstant(0, DL, DpVT);
43248 
43249   return SplitOpsAndApply(DAG, Subtarget, DL, DpVT, {Zero, DpOp0, DpOp1},
43250                           DpBuilder, false);
43251 }
43252 
43253 // Given two zexts of <k x i8> to <k x i32>, create a PSADBW of the inputs
43254 // to these zexts.
createPSADBW(SelectionDAG & DAG,const SDValue & Zext0,const SDValue & Zext1,const SDLoc & DL,const X86Subtarget & Subtarget)43255 static SDValue createPSADBW(SelectionDAG &DAG, const SDValue &Zext0,
43256                             const SDValue &Zext1, const SDLoc &DL,
43257                             const X86Subtarget &Subtarget) {
43258   // Find the appropriate width for the PSADBW.
43259   EVT InVT = Zext0.getOperand(0).getValueType();
43260   unsigned RegSize = std::max(128u, (unsigned)InVT.getSizeInBits());
43261 
43262   // "Zero-extend" the i8 vectors. This is not a per-element zext, rather we
43263   // fill in the missing vector elements with 0.
43264   unsigned NumConcat = RegSize / InVT.getSizeInBits();
43265   SmallVector<SDValue, 16> Ops(NumConcat, DAG.getConstant(0, DL, InVT));
43266   Ops[0] = Zext0.getOperand(0);
43267   MVT ExtendedVT = MVT::getVectorVT(MVT::i8, RegSize / 8);
43268   SDValue SadOp0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, ExtendedVT, Ops);
43269   Ops[0] = Zext1.getOperand(0);
43270   SDValue SadOp1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, ExtendedVT, Ops);
43271 
43272   // Actually build the SAD, split as 128/256/512 bits for SSE/AVX2/AVX512BW.
43273   auto PSADBWBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
43274                           ArrayRef<SDValue> Ops) {
43275     MVT VT = MVT::getVectorVT(MVT::i64, Ops[0].getValueSizeInBits() / 64);
43276     return DAG.getNode(X86ISD::PSADBW, DL, VT, Ops);
43277   };
43278   MVT SadVT = MVT::getVectorVT(MVT::i64, RegSize / 64);
43279   return SplitOpsAndApply(DAG, Subtarget, DL, SadVT, { SadOp0, SadOp1 },
43280                           PSADBWBuilder);
43281 }
43282 
43283 // Attempt to replace an min/max v8i16/v16i8 horizontal reduction with
43284 // PHMINPOSUW.
combineMinMaxReduction(SDNode * Extract,SelectionDAG & DAG,const X86Subtarget & Subtarget)43285 static SDValue combineMinMaxReduction(SDNode *Extract, SelectionDAG &DAG,
43286                                       const X86Subtarget &Subtarget) {
43287   // Bail without SSE41.
43288   if (!Subtarget.hasSSE41())
43289     return SDValue();
43290 
43291   EVT ExtractVT = Extract->getValueType(0);
43292   if (ExtractVT != MVT::i16 && ExtractVT != MVT::i8)
43293     return SDValue();
43294 
43295   // Check for SMAX/SMIN/UMAX/UMIN horizontal reduction patterns.
43296   ISD::NodeType BinOp;
43297   SDValue Src = DAG.matchBinOpReduction(
43298       Extract, BinOp, {ISD::SMAX, ISD::SMIN, ISD::UMAX, ISD::UMIN}, true);
43299   if (!Src)
43300     return SDValue();
43301 
43302   EVT SrcVT = Src.getValueType();
43303   EVT SrcSVT = SrcVT.getScalarType();
43304   if (SrcSVT != ExtractVT || (SrcVT.getSizeInBits() % 128) != 0)
43305     return SDValue();
43306 
43307   SDLoc DL(Extract);
43308   SDValue MinPos = Src;
43309 
43310   // First, reduce the source down to 128-bit, applying BinOp to lo/hi.
43311   while (SrcVT.getSizeInBits() > 128) {
43312     SDValue Lo, Hi;
43313     std::tie(Lo, Hi) = splitVector(MinPos, DAG, DL);
43314     SrcVT = Lo.getValueType();
43315     MinPos = DAG.getNode(BinOp, DL, SrcVT, Lo, Hi);
43316   }
43317   assert(((SrcVT == MVT::v8i16 && ExtractVT == MVT::i16) ||
43318           (SrcVT == MVT::v16i8 && ExtractVT == MVT::i8)) &&
43319          "Unexpected value type");
43320 
43321   // PHMINPOSUW applies to UMIN(v8i16), for SMIN/SMAX/UMAX we must apply a mask
43322   // to flip the value accordingly.
43323   SDValue Mask;
43324   unsigned MaskEltsBits = ExtractVT.getSizeInBits();
43325   if (BinOp == ISD::SMAX)
43326     Mask = DAG.getConstant(APInt::getSignedMaxValue(MaskEltsBits), DL, SrcVT);
43327   else if (BinOp == ISD::SMIN)
43328     Mask = DAG.getConstant(APInt::getSignedMinValue(MaskEltsBits), DL, SrcVT);
43329   else if (BinOp == ISD::UMAX)
43330     Mask = DAG.getAllOnesConstant(DL, SrcVT);
43331 
43332   if (Mask)
43333     MinPos = DAG.getNode(ISD::XOR, DL, SrcVT, Mask, MinPos);
43334 
43335   // For v16i8 cases we need to perform UMIN on pairs of byte elements,
43336   // shuffling each upper element down and insert zeros. This means that the
43337   // v16i8 UMIN will leave the upper element as zero, performing zero-extension
43338   // ready for the PHMINPOS.
43339   if (ExtractVT == MVT::i8) {
43340     SDValue Upper = DAG.getVectorShuffle(
43341         SrcVT, DL, MinPos, DAG.getConstant(0, DL, MVT::v16i8),
43342         {1, 16, 3, 16, 5, 16, 7, 16, 9, 16, 11, 16, 13, 16, 15, 16});
43343     MinPos = DAG.getNode(ISD::UMIN, DL, SrcVT, MinPos, Upper);
43344   }
43345 
43346   // Perform the PHMINPOS on a v8i16 vector,
43347   MinPos = DAG.getBitcast(MVT::v8i16, MinPos);
43348   MinPos = DAG.getNode(X86ISD::PHMINPOS, DL, MVT::v8i16, MinPos);
43349   MinPos = DAG.getBitcast(SrcVT, MinPos);
43350 
43351   if (Mask)
43352     MinPos = DAG.getNode(ISD::XOR, DL, SrcVT, Mask, MinPos);
43353 
43354   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ExtractVT, MinPos,
43355                      DAG.getIntPtrConstant(0, DL));
43356 }
43357 
43358 // Attempt to replace an all_of/any_of/parity style horizontal reduction with a MOVMSK.
combinePredicateReduction(SDNode * Extract,SelectionDAG & DAG,const X86Subtarget & Subtarget)43359 static SDValue combinePredicateReduction(SDNode *Extract, SelectionDAG &DAG,
43360                                          const X86Subtarget &Subtarget) {
43361   // Bail without SSE2.
43362   if (!Subtarget.hasSSE2())
43363     return SDValue();
43364 
43365   EVT ExtractVT = Extract->getValueType(0);
43366   unsigned BitWidth = ExtractVT.getSizeInBits();
43367   if (ExtractVT != MVT::i64 && ExtractVT != MVT::i32 && ExtractVT != MVT::i16 &&
43368       ExtractVT != MVT::i8 && ExtractVT != MVT::i1)
43369     return SDValue();
43370 
43371   // Check for OR(any_of)/AND(all_of)/XOR(parity) horizontal reduction patterns.
43372   ISD::NodeType BinOp;
43373   SDValue Match = DAG.matchBinOpReduction(Extract, BinOp, {ISD::OR, ISD::AND});
43374   if (!Match && ExtractVT == MVT::i1)
43375     Match = DAG.matchBinOpReduction(Extract, BinOp, {ISD::XOR});
43376   if (!Match)
43377     return SDValue();
43378 
43379   // EXTRACT_VECTOR_ELT can require implicit extension of the vector element
43380   // which we can't support here for now.
43381   if (Match.getScalarValueSizeInBits() != BitWidth)
43382     return SDValue();
43383 
43384   SDValue Movmsk;
43385   SDLoc DL(Extract);
43386   EVT MatchVT = Match.getValueType();
43387   unsigned NumElts = MatchVT.getVectorNumElements();
43388   unsigned MaxElts = Subtarget.hasInt256() ? 32 : 16;
43389   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
43390   LLVMContext &Ctx = *DAG.getContext();
43391 
43392   if (ExtractVT == MVT::i1) {
43393     // Special case for (pre-legalization) vXi1 reductions.
43394     if (NumElts > 64 || !isPowerOf2_32(NumElts))
43395       return SDValue();
43396     if (Match.getOpcode() == ISD::SETCC) {
43397       ISD::CondCode CC = cast<CondCodeSDNode>(Match.getOperand(2))->get();
43398       if ((BinOp == ISD::AND && CC == ISD::CondCode::SETEQ) ||
43399           (BinOp == ISD::OR && CC == ISD::CondCode::SETNE)) {
43400         // For all_of(setcc(x,y,eq)) - use (iX)x == (iX)y.
43401         // For any_of(setcc(x,y,ne)) - use (iX)x != (iX)y.
43402         X86::CondCode X86CC;
43403         SDValue LHS = DAG.getFreeze(Match.getOperand(0));
43404         SDValue RHS = DAG.getFreeze(Match.getOperand(1));
43405         APInt Mask = APInt::getAllOnes(LHS.getScalarValueSizeInBits());
43406         if (SDValue V = LowerVectorAllEqual(DL, LHS, RHS, CC, Mask, Subtarget,
43407                                             DAG, X86CC))
43408           return DAG.getNode(ISD::TRUNCATE, DL, ExtractVT,
43409                              getSETCC(X86CC, V, DL, DAG));
43410       }
43411     }
43412     if (TLI.isTypeLegal(MatchVT)) {
43413       // If this is a legal AVX512 predicate type then we can just bitcast.
43414       EVT MovmskVT = EVT::getIntegerVT(Ctx, NumElts);
43415       Movmsk = DAG.getBitcast(MovmskVT, Match);
43416     } else {
43417       // Use combineBitcastvxi1 to create the MOVMSK.
43418       while (NumElts > MaxElts) {
43419         SDValue Lo, Hi;
43420         std::tie(Lo, Hi) = DAG.SplitVector(Match, DL);
43421         Match = DAG.getNode(BinOp, DL, Lo.getValueType(), Lo, Hi);
43422         NumElts /= 2;
43423       }
43424       EVT MovmskVT = EVT::getIntegerVT(Ctx, NumElts);
43425       Movmsk = combineBitcastvxi1(DAG, MovmskVT, Match, DL, Subtarget);
43426     }
43427     if (!Movmsk)
43428       return SDValue();
43429     Movmsk = DAG.getZExtOrTrunc(Movmsk, DL, NumElts > 32 ? MVT::i64 : MVT::i32);
43430   } else {
43431     // FIXME: Better handling of k-registers or 512-bit vectors?
43432     unsigned MatchSizeInBits = Match.getValueSizeInBits();
43433     if (!(MatchSizeInBits == 128 ||
43434           (MatchSizeInBits == 256 && Subtarget.hasAVX())))
43435       return SDValue();
43436 
43437     // Make sure this isn't a vector of 1 element. The perf win from using
43438     // MOVMSK diminishes with less elements in the reduction, but it is
43439     // generally better to get the comparison over to the GPRs as soon as
43440     // possible to reduce the number of vector ops.
43441     if (Match.getValueType().getVectorNumElements() < 2)
43442       return SDValue();
43443 
43444     // Check that we are extracting a reduction of all sign bits.
43445     if (DAG.ComputeNumSignBits(Match) != BitWidth)
43446       return SDValue();
43447 
43448     if (MatchSizeInBits == 256 && BitWidth < 32 && !Subtarget.hasInt256()) {
43449       SDValue Lo, Hi;
43450       std::tie(Lo, Hi) = DAG.SplitVector(Match, DL);
43451       Match = DAG.getNode(BinOp, DL, Lo.getValueType(), Lo, Hi);
43452       MatchSizeInBits = Match.getValueSizeInBits();
43453     }
43454 
43455     // For 32/64 bit comparisons use MOVMSKPS/MOVMSKPD, else PMOVMSKB.
43456     MVT MaskSrcVT;
43457     if (64 == BitWidth || 32 == BitWidth)
43458       MaskSrcVT = MVT::getVectorVT(MVT::getFloatingPointVT(BitWidth),
43459                                    MatchSizeInBits / BitWidth);
43460     else
43461       MaskSrcVT = MVT::getVectorVT(MVT::i8, MatchSizeInBits / 8);
43462 
43463     SDValue BitcastLogicOp = DAG.getBitcast(MaskSrcVT, Match);
43464     Movmsk = getPMOVMSKB(DL, BitcastLogicOp, DAG, Subtarget);
43465     NumElts = MaskSrcVT.getVectorNumElements();
43466   }
43467   assert((NumElts <= 32 || NumElts == 64) &&
43468          "Not expecting more than 64 elements");
43469 
43470   MVT CmpVT = NumElts == 64 ? MVT::i64 : MVT::i32;
43471   if (BinOp == ISD::XOR) {
43472     // parity -> (PARITY(MOVMSK X))
43473     SDValue Result = DAG.getNode(ISD::PARITY, DL, CmpVT, Movmsk);
43474     return DAG.getZExtOrTrunc(Result, DL, ExtractVT);
43475   }
43476 
43477   SDValue CmpC;
43478   ISD::CondCode CondCode;
43479   if (BinOp == ISD::OR) {
43480     // any_of -> MOVMSK != 0
43481     CmpC = DAG.getConstant(0, DL, CmpVT);
43482     CondCode = ISD::CondCode::SETNE;
43483   } else {
43484     // all_of -> MOVMSK == ((1 << NumElts) - 1)
43485     CmpC = DAG.getConstant(APInt::getLowBitsSet(CmpVT.getSizeInBits(), NumElts),
43486                            DL, CmpVT);
43487     CondCode = ISD::CondCode::SETEQ;
43488   }
43489 
43490   // The setcc produces an i8 of 0/1, so extend that to the result width and
43491   // negate to get the final 0/-1 mask value.
43492   EVT SetccVT = TLI.getSetCCResultType(DAG.getDataLayout(), Ctx, CmpVT);
43493   SDValue Setcc = DAG.getSetCC(DL, SetccVT, Movmsk, CmpC, CondCode);
43494   SDValue Zext = DAG.getZExtOrTrunc(Setcc, DL, ExtractVT);
43495   SDValue Zero = DAG.getConstant(0, DL, ExtractVT);
43496   return DAG.getNode(ISD::SUB, DL, ExtractVT, Zero, Zext);
43497 }
43498 
combineVPDPBUSDPattern(SDNode * Extract,SelectionDAG & DAG,const X86Subtarget & Subtarget)43499 static SDValue combineVPDPBUSDPattern(SDNode *Extract, SelectionDAG &DAG,
43500                                       const X86Subtarget &Subtarget) {
43501   if (!Subtarget.hasVNNI() && !Subtarget.hasAVXVNNI())
43502     return SDValue();
43503 
43504   EVT ExtractVT = Extract->getValueType(0);
43505   // Verify the type we're extracting is i32, as the output element type of
43506   // vpdpbusd is i32.
43507   if (ExtractVT != MVT::i32)
43508     return SDValue();
43509 
43510   EVT VT = Extract->getOperand(0).getValueType();
43511   if (!isPowerOf2_32(VT.getVectorNumElements()))
43512     return SDValue();
43513 
43514   // Match shuffle + add pyramid.
43515   ISD::NodeType BinOp;
43516   SDValue Root = DAG.matchBinOpReduction(Extract, BinOp, {ISD::ADD});
43517 
43518   // We can't combine to vpdpbusd for zext, because each of the 4 multiplies
43519   // done by vpdpbusd compute a signed 16-bit product that will be sign extended
43520   // before adding into the accumulator.
43521   // TODO:
43522   // We also need to verify that the multiply has at least 2x the number of bits
43523   // of the input. We shouldn't match
43524   // (sign_extend (mul (vXi9 (zext (vXi8 X))), (vXi9 (zext (vXi8 Y)))).
43525   // if (Root && (Root.getOpcode() == ISD::SIGN_EXTEND))
43526   //   Root = Root.getOperand(0);
43527 
43528   // If there was a match, we want Root to be a mul.
43529   if (!Root || Root.getOpcode() != ISD::MUL)
43530     return SDValue();
43531 
43532   // Check whether we have an extend and mul pattern
43533   SDValue LHS, RHS;
43534   if (!detectExtMul(DAG, Root, LHS, RHS))
43535     return SDValue();
43536 
43537   // Create the dot product instruction.
43538   SDLoc DL(Extract);
43539   unsigned StageBias;
43540   SDValue DP = createVPDPBUSD(DAG, LHS, RHS, StageBias, DL, Subtarget);
43541 
43542   // If the original vector was wider than 4 elements, sum over the results
43543   // in the DP vector.
43544   unsigned Stages = Log2_32(VT.getVectorNumElements());
43545   EVT DpVT = DP.getValueType();
43546 
43547   if (Stages > StageBias) {
43548     unsigned DpElems = DpVT.getVectorNumElements();
43549 
43550     for (unsigned i = Stages - StageBias; i > 0; --i) {
43551       SmallVector<int, 16> Mask(DpElems, -1);
43552       for (unsigned j = 0, MaskEnd = 1 << (i - 1); j < MaskEnd; ++j)
43553         Mask[j] = MaskEnd + j;
43554 
43555       SDValue Shuffle =
43556           DAG.getVectorShuffle(DpVT, DL, DP, DAG.getUNDEF(DpVT), Mask);
43557       DP = DAG.getNode(ISD::ADD, DL, DpVT, DP, Shuffle);
43558     }
43559   }
43560 
43561   // Return the lowest ExtractSizeInBits bits.
43562   EVT ResVT =
43563       EVT::getVectorVT(*DAG.getContext(), ExtractVT,
43564                        DpVT.getSizeInBits() / ExtractVT.getSizeInBits());
43565   DP = DAG.getBitcast(ResVT, DP);
43566   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ExtractVT, DP,
43567                      Extract->getOperand(1));
43568 }
43569 
combineBasicSADPattern(SDNode * Extract,SelectionDAG & DAG,const X86Subtarget & Subtarget)43570 static SDValue combineBasicSADPattern(SDNode *Extract, SelectionDAG &DAG,
43571                                       const X86Subtarget &Subtarget) {
43572   // PSADBW is only supported on SSE2 and up.
43573   if (!Subtarget.hasSSE2())
43574     return SDValue();
43575 
43576   EVT ExtractVT = Extract->getValueType(0);
43577   // Verify the type we're extracting is either i32 or i64.
43578   // FIXME: Could support other types, but this is what we have coverage for.
43579   if (ExtractVT != MVT::i32 && ExtractVT != MVT::i64)
43580     return SDValue();
43581 
43582   EVT VT = Extract->getOperand(0).getValueType();
43583   if (!isPowerOf2_32(VT.getVectorNumElements()))
43584     return SDValue();
43585 
43586   // Match shuffle + add pyramid.
43587   ISD::NodeType BinOp;
43588   SDValue Root = DAG.matchBinOpReduction(Extract, BinOp, {ISD::ADD});
43589 
43590   // The operand is expected to be zero extended from i8
43591   // (verified in detectZextAbsDiff).
43592   // In order to convert to i64 and above, additional any/zero/sign
43593   // extend is expected.
43594   // The zero extend from 32 bit has no mathematical effect on the result.
43595   // Also the sign extend is basically zero extend
43596   // (extends the sign bit which is zero).
43597   // So it is correct to skip the sign/zero extend instruction.
43598   if (Root && (Root.getOpcode() == ISD::SIGN_EXTEND ||
43599                Root.getOpcode() == ISD::ZERO_EXTEND ||
43600                Root.getOpcode() == ISD::ANY_EXTEND))
43601     Root = Root.getOperand(0);
43602 
43603   // If there was a match, we want Root to be a select that is the root of an
43604   // abs-diff pattern.
43605   if (!Root || Root.getOpcode() != ISD::ABS)
43606     return SDValue();
43607 
43608   // Check whether we have an abs-diff pattern feeding into the select.
43609   SDValue Zext0, Zext1;
43610   if (!detectZextAbsDiff(Root, Zext0, Zext1))
43611     return SDValue();
43612 
43613   // Create the SAD instruction.
43614   SDLoc DL(Extract);
43615   SDValue SAD = createPSADBW(DAG, Zext0, Zext1, DL, Subtarget);
43616 
43617   // If the original vector was wider than 8 elements, sum over the results
43618   // in the SAD vector.
43619   unsigned Stages = Log2_32(VT.getVectorNumElements());
43620   EVT SadVT = SAD.getValueType();
43621   if (Stages > 3) {
43622     unsigned SadElems = SadVT.getVectorNumElements();
43623 
43624     for(unsigned i = Stages - 3; i > 0; --i) {
43625       SmallVector<int, 16> Mask(SadElems, -1);
43626       for(unsigned j = 0, MaskEnd = 1 << (i - 1); j < MaskEnd; ++j)
43627         Mask[j] = MaskEnd + j;
43628 
43629       SDValue Shuffle =
43630           DAG.getVectorShuffle(SadVT, DL, SAD, DAG.getUNDEF(SadVT), Mask);
43631       SAD = DAG.getNode(ISD::ADD, DL, SadVT, SAD, Shuffle);
43632     }
43633   }
43634 
43635   unsigned ExtractSizeInBits = ExtractVT.getSizeInBits();
43636   // Return the lowest ExtractSizeInBits bits.
43637   EVT ResVT = EVT::getVectorVT(*DAG.getContext(), ExtractVT,
43638                                SadVT.getSizeInBits() / ExtractSizeInBits);
43639   SAD = DAG.getBitcast(ResVT, SAD);
43640   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ExtractVT, SAD,
43641                      Extract->getOperand(1));
43642 }
43643 
43644 // Attempt to peek through a target shuffle and extract the scalar from the
43645 // source.
combineExtractWithShuffle(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)43646 static SDValue combineExtractWithShuffle(SDNode *N, SelectionDAG &DAG,
43647                                          TargetLowering::DAGCombinerInfo &DCI,
43648                                          const X86Subtarget &Subtarget) {
43649   if (DCI.isBeforeLegalizeOps())
43650     return SDValue();
43651 
43652   SDLoc dl(N);
43653   SDValue Src = N->getOperand(0);
43654   SDValue Idx = N->getOperand(1);
43655 
43656   EVT VT = N->getValueType(0);
43657   EVT SrcVT = Src.getValueType();
43658   EVT SrcSVT = SrcVT.getVectorElementType();
43659   unsigned SrcEltBits = SrcSVT.getSizeInBits();
43660   unsigned NumSrcElts = SrcVT.getVectorNumElements();
43661 
43662   // Don't attempt this for boolean mask vectors or unknown extraction indices.
43663   if (SrcSVT == MVT::i1 || !isa<ConstantSDNode>(Idx))
43664     return SDValue();
43665 
43666   const APInt &IdxC = N->getConstantOperandAPInt(1);
43667   if (IdxC.uge(NumSrcElts))
43668     return SDValue();
43669 
43670   SDValue SrcBC = peekThroughBitcasts(Src);
43671 
43672   // Handle extract(bitcast(broadcast(scalar_value))).
43673   if (X86ISD::VBROADCAST == SrcBC.getOpcode()) {
43674     SDValue SrcOp = SrcBC.getOperand(0);
43675     EVT SrcOpVT = SrcOp.getValueType();
43676     if (SrcOpVT.isScalarInteger() && VT.isInteger() &&
43677         (SrcOpVT.getSizeInBits() % SrcEltBits) == 0) {
43678       unsigned Scale = SrcOpVT.getSizeInBits() / SrcEltBits;
43679       unsigned Offset = IdxC.urem(Scale) * SrcEltBits;
43680       // TODO support non-zero offsets.
43681       if (Offset == 0) {
43682         SrcOp = DAG.getZExtOrTrunc(SrcOp, dl, SrcVT.getScalarType());
43683         SrcOp = DAG.getZExtOrTrunc(SrcOp, dl, VT);
43684         return SrcOp;
43685       }
43686     }
43687   }
43688 
43689   // If we're extracting a single element from a broadcast load and there are
43690   // no other users, just create a single load.
43691   if (SrcBC.getOpcode() == X86ISD::VBROADCAST_LOAD && SrcBC.hasOneUse()) {
43692     auto *MemIntr = cast<MemIntrinsicSDNode>(SrcBC);
43693     unsigned SrcBCWidth = SrcBC.getScalarValueSizeInBits();
43694     if (MemIntr->getMemoryVT().getSizeInBits() == SrcBCWidth &&
43695         VT.getSizeInBits() == SrcBCWidth && SrcEltBits == SrcBCWidth) {
43696       SDValue Load = DAG.getLoad(VT, dl, MemIntr->getChain(),
43697                                  MemIntr->getBasePtr(),
43698                                  MemIntr->getPointerInfo(),
43699                                  MemIntr->getOriginalAlign(),
43700                                  MemIntr->getMemOperand()->getFlags());
43701       DAG.ReplaceAllUsesOfValueWith(SDValue(MemIntr, 1), Load.getValue(1));
43702       return Load;
43703     }
43704   }
43705 
43706   // Handle extract(bitcast(scalar_to_vector(scalar_value))) for integers.
43707   // TODO: Move to DAGCombine?
43708   if (SrcBC.getOpcode() == ISD::SCALAR_TO_VECTOR && VT.isInteger() &&
43709       SrcBC.getValueType().isInteger() &&
43710       (SrcBC.getScalarValueSizeInBits() % SrcEltBits) == 0 &&
43711       SrcBC.getScalarValueSizeInBits() ==
43712           SrcBC.getOperand(0).getValueSizeInBits()) {
43713     unsigned Scale = SrcBC.getScalarValueSizeInBits() / SrcEltBits;
43714     if (IdxC.ult(Scale)) {
43715       unsigned Offset = IdxC.getZExtValue() * SrcVT.getScalarSizeInBits();
43716       SDValue Scl = SrcBC.getOperand(0);
43717       EVT SclVT = Scl.getValueType();
43718       if (Offset) {
43719         Scl = DAG.getNode(ISD::SRL, dl, SclVT, Scl,
43720                           DAG.getShiftAmountConstant(Offset, SclVT, dl));
43721       }
43722       Scl = DAG.getZExtOrTrunc(Scl, dl, SrcVT.getScalarType());
43723       Scl = DAG.getZExtOrTrunc(Scl, dl, VT);
43724       return Scl;
43725     }
43726   }
43727 
43728   // Handle extract(truncate(x)) for 0'th index.
43729   // TODO: Treat this as a faux shuffle?
43730   // TODO: When can we use this for general indices?
43731   if (ISD::TRUNCATE == Src.getOpcode() && IdxC == 0 &&
43732       (SrcVT.getSizeInBits() % 128) == 0) {
43733     Src = extract128BitVector(Src.getOperand(0), 0, DAG, dl);
43734     MVT ExtractVT = MVT::getVectorVT(SrcSVT.getSimpleVT(), 128 / SrcEltBits);
43735     return DAG.getNode(N->getOpcode(), dl, VT, DAG.getBitcast(ExtractVT, Src),
43736                        Idx);
43737   }
43738 
43739   // We can only legally extract other elements from 128-bit vectors and in
43740   // certain circumstances, depending on SSE-level.
43741   // TODO: Investigate float/double extraction if it will be just stored.
43742   auto GetLegalExtract = [&Subtarget, &DAG, &dl](SDValue Vec, EVT VecVT,
43743                                                  unsigned Idx) {
43744     EVT VecSVT = VecVT.getScalarType();
43745     if ((VecVT.is256BitVector() || VecVT.is512BitVector()) &&
43746         (VecSVT == MVT::i8 || VecSVT == MVT::i16 || VecSVT == MVT::i32 ||
43747          VecSVT == MVT::i64)) {
43748       unsigned EltSizeInBits = VecSVT.getSizeInBits();
43749       unsigned NumEltsPerLane = 128 / EltSizeInBits;
43750       unsigned LaneOffset = (Idx & ~(NumEltsPerLane - 1)) * EltSizeInBits;
43751       unsigned LaneIdx = LaneOffset / Vec.getScalarValueSizeInBits();
43752       VecVT = EVT::getVectorVT(*DAG.getContext(), VecSVT, NumEltsPerLane);
43753       Vec = extract128BitVector(Vec, LaneIdx, DAG, dl);
43754       Idx &= (NumEltsPerLane - 1);
43755     }
43756     if ((VecVT == MVT::v4i32 || VecVT == MVT::v2i64) &&
43757         ((Idx == 0 && Subtarget.hasSSE2()) || Subtarget.hasSSE41())) {
43758       return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VecVT.getScalarType(),
43759                          DAG.getBitcast(VecVT, Vec),
43760                          DAG.getIntPtrConstant(Idx, dl));
43761     }
43762     if ((VecVT == MVT::v8i16 && Subtarget.hasSSE2()) ||
43763         (VecVT == MVT::v16i8 && Subtarget.hasSSE41())) {
43764       unsigned OpCode = (VecVT == MVT::v8i16 ? X86ISD::PEXTRW : X86ISD::PEXTRB);
43765       return DAG.getNode(OpCode, dl, MVT::i32, DAG.getBitcast(VecVT, Vec),
43766                          DAG.getTargetConstant(Idx, dl, MVT::i8));
43767     }
43768     return SDValue();
43769   };
43770 
43771   // Resolve the target shuffle inputs and mask.
43772   SmallVector<int, 16> Mask;
43773   SmallVector<SDValue, 2> Ops;
43774   if (!getTargetShuffleInputs(SrcBC, Ops, Mask, DAG))
43775     return SDValue();
43776 
43777   // Shuffle inputs must be the same size as the result.
43778   if (llvm::any_of(Ops, [SrcVT](SDValue Op) {
43779         return SrcVT.getSizeInBits() != Op.getValueSizeInBits();
43780       }))
43781     return SDValue();
43782 
43783   // Attempt to narrow/widen the shuffle mask to the correct size.
43784   if (Mask.size() != NumSrcElts) {
43785     if ((NumSrcElts % Mask.size()) == 0) {
43786       SmallVector<int, 16> ScaledMask;
43787       int Scale = NumSrcElts / Mask.size();
43788       narrowShuffleMaskElts(Scale, Mask, ScaledMask);
43789       Mask = std::move(ScaledMask);
43790     } else if ((Mask.size() % NumSrcElts) == 0) {
43791       // Simplify Mask based on demanded element.
43792       int ExtractIdx = (int)IdxC.getZExtValue();
43793       int Scale = Mask.size() / NumSrcElts;
43794       int Lo = Scale * ExtractIdx;
43795       int Hi = Scale * (ExtractIdx + 1);
43796       for (int i = 0, e = (int)Mask.size(); i != e; ++i)
43797         if (i < Lo || Hi <= i)
43798           Mask[i] = SM_SentinelUndef;
43799 
43800       SmallVector<int, 16> WidenedMask;
43801       while (Mask.size() > NumSrcElts &&
43802              canWidenShuffleElements(Mask, WidenedMask))
43803         Mask = std::move(WidenedMask);
43804     }
43805   }
43806 
43807   // If narrowing/widening failed, see if we can extract+zero-extend.
43808   int ExtractIdx;
43809   EVT ExtractVT;
43810   if (Mask.size() == NumSrcElts) {
43811     ExtractIdx = Mask[IdxC.getZExtValue()];
43812     ExtractVT = SrcVT;
43813   } else {
43814     unsigned Scale = Mask.size() / NumSrcElts;
43815     if ((Mask.size() % NumSrcElts) != 0 || SrcVT.isFloatingPoint())
43816       return SDValue();
43817     unsigned ScaledIdx = Scale * IdxC.getZExtValue();
43818     if (!isUndefOrZeroInRange(Mask, ScaledIdx + 1, Scale - 1))
43819       return SDValue();
43820     ExtractIdx = Mask[ScaledIdx];
43821     EVT ExtractSVT = EVT::getIntegerVT(*DAG.getContext(), SrcEltBits / Scale);
43822     ExtractVT = EVT::getVectorVT(*DAG.getContext(), ExtractSVT, Mask.size());
43823     assert(SrcVT.getSizeInBits() == ExtractVT.getSizeInBits() &&
43824            "Failed to widen vector type");
43825   }
43826 
43827   // If the shuffle source element is undef/zero then we can just accept it.
43828   if (ExtractIdx == SM_SentinelUndef)
43829     return DAG.getUNDEF(VT);
43830 
43831   if (ExtractIdx == SM_SentinelZero)
43832     return VT.isFloatingPoint() ? DAG.getConstantFP(0.0, dl, VT)
43833                                 : DAG.getConstant(0, dl, VT);
43834 
43835   SDValue SrcOp = Ops[ExtractIdx / Mask.size()];
43836   ExtractIdx = ExtractIdx % Mask.size();
43837   if (SDValue V = GetLegalExtract(SrcOp, ExtractVT, ExtractIdx))
43838     return DAG.getZExtOrTrunc(V, dl, VT);
43839 
43840   return SDValue();
43841 }
43842 
43843 /// Extracting a scalar FP value from vector element 0 is free, so extract each
43844 /// operand first, then perform the math as a scalar op.
scalarizeExtEltFP(SDNode * ExtElt,SelectionDAG & DAG,const X86Subtarget & Subtarget)43845 static SDValue scalarizeExtEltFP(SDNode *ExtElt, SelectionDAG &DAG,
43846                                  const X86Subtarget &Subtarget) {
43847   assert(ExtElt->getOpcode() == ISD::EXTRACT_VECTOR_ELT && "Expected extract");
43848   SDValue Vec = ExtElt->getOperand(0);
43849   SDValue Index = ExtElt->getOperand(1);
43850   EVT VT = ExtElt->getValueType(0);
43851   EVT VecVT = Vec.getValueType();
43852 
43853   // TODO: If this is a unary/expensive/expand op, allow extraction from a
43854   // non-zero element because the shuffle+scalar op will be cheaper?
43855   if (!Vec.hasOneUse() || !isNullConstant(Index) || VecVT.getScalarType() != VT)
43856     return SDValue();
43857 
43858   // Vector FP compares don't fit the pattern of FP math ops (propagate, not
43859   // extract, the condition code), so deal with those as a special-case.
43860   if (Vec.getOpcode() == ISD::SETCC && VT == MVT::i1) {
43861     EVT OpVT = Vec.getOperand(0).getValueType().getScalarType();
43862     if (OpVT != MVT::f32 && OpVT != MVT::f64)
43863       return SDValue();
43864 
43865     // extract (setcc X, Y, CC), 0 --> setcc (extract X, 0), (extract Y, 0), CC
43866     SDLoc DL(ExtElt);
43867     SDValue Ext0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, OpVT,
43868                                Vec.getOperand(0), Index);
43869     SDValue Ext1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, OpVT,
43870                                Vec.getOperand(1), Index);
43871     return DAG.getNode(Vec.getOpcode(), DL, VT, Ext0, Ext1, Vec.getOperand(2));
43872   }
43873 
43874   if (!(VT == MVT::f16 && Subtarget.hasFP16()) && VT != MVT::f32 &&
43875       VT != MVT::f64)
43876     return SDValue();
43877 
43878   // Vector FP selects don't fit the pattern of FP math ops (because the
43879   // condition has a different type and we have to change the opcode), so deal
43880   // with those here.
43881   // FIXME: This is restricted to pre type legalization by ensuring the setcc
43882   // has i1 elements. If we loosen this we need to convert vector bool to a
43883   // scalar bool.
43884   if (Vec.getOpcode() == ISD::VSELECT &&
43885       Vec.getOperand(0).getOpcode() == ISD::SETCC &&
43886       Vec.getOperand(0).getValueType().getScalarType() == MVT::i1 &&
43887       Vec.getOperand(0).getOperand(0).getValueType() == VecVT) {
43888     // ext (sel Cond, X, Y), 0 --> sel (ext Cond, 0), (ext X, 0), (ext Y, 0)
43889     SDLoc DL(ExtElt);
43890     SDValue Ext0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
43891                                Vec.getOperand(0).getValueType().getScalarType(),
43892                                Vec.getOperand(0), Index);
43893     SDValue Ext1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
43894                                Vec.getOperand(1), Index);
43895     SDValue Ext2 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
43896                                Vec.getOperand(2), Index);
43897     return DAG.getNode(ISD::SELECT, DL, VT, Ext0, Ext1, Ext2);
43898   }
43899 
43900   // TODO: This switch could include FNEG and the x86-specific FP logic ops
43901   // (FAND, FANDN, FOR, FXOR). But that may require enhancements to avoid
43902   // missed load folding and fma+fneg combining.
43903   switch (Vec.getOpcode()) {
43904   case ISD::FMA: // Begin 3 operands
43905   case ISD::FMAD:
43906   case ISD::FADD: // Begin 2 operands
43907   case ISD::FSUB:
43908   case ISD::FMUL:
43909   case ISD::FDIV:
43910   case ISD::FREM:
43911   case ISD::FCOPYSIGN:
43912   case ISD::FMINNUM:
43913   case ISD::FMAXNUM:
43914   case ISD::FMINNUM_IEEE:
43915   case ISD::FMAXNUM_IEEE:
43916   case ISD::FMAXIMUM:
43917   case ISD::FMINIMUM:
43918   case X86ISD::FMAX:
43919   case X86ISD::FMIN:
43920   case ISD::FABS: // Begin 1 operand
43921   case ISD::FSQRT:
43922   case ISD::FRINT:
43923   case ISD::FCEIL:
43924   case ISD::FTRUNC:
43925   case ISD::FNEARBYINT:
43926   case ISD::FROUNDEVEN:
43927   case ISD::FROUND:
43928   case ISD::FFLOOR:
43929   case X86ISD::FRCP:
43930   case X86ISD::FRSQRT: {
43931     // extract (fp X, Y, ...), 0 --> fp (extract X, 0), (extract Y, 0), ...
43932     SDLoc DL(ExtElt);
43933     SmallVector<SDValue, 4> ExtOps;
43934     for (SDValue Op : Vec->ops())
43935       ExtOps.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Op, Index));
43936     return DAG.getNode(Vec.getOpcode(), DL, VT, ExtOps);
43937   }
43938   default:
43939     return SDValue();
43940   }
43941   llvm_unreachable("All opcodes should return within switch");
43942 }
43943 
43944 /// Try to convert a vector reduction sequence composed of binops and shuffles
43945 /// into horizontal ops.
combineArithReduction(SDNode * ExtElt,SelectionDAG & DAG,const X86Subtarget & Subtarget)43946 static SDValue combineArithReduction(SDNode *ExtElt, SelectionDAG &DAG,
43947                                      const X86Subtarget &Subtarget) {
43948   assert(ExtElt->getOpcode() == ISD::EXTRACT_VECTOR_ELT && "Unexpected caller");
43949 
43950   // We need at least SSE2 to anything here.
43951   if (!Subtarget.hasSSE2())
43952     return SDValue();
43953 
43954   ISD::NodeType Opc;
43955   SDValue Rdx = DAG.matchBinOpReduction(ExtElt, Opc,
43956                                         {ISD::ADD, ISD::MUL, ISD::FADD}, true);
43957   if (!Rdx)
43958     return SDValue();
43959 
43960   SDValue Index = ExtElt->getOperand(1);
43961   assert(isNullConstant(Index) &&
43962          "Reduction doesn't end in an extract from index 0");
43963 
43964   EVT VT = ExtElt->getValueType(0);
43965   EVT VecVT = Rdx.getValueType();
43966   if (VecVT.getScalarType() != VT)
43967     return SDValue();
43968 
43969   SDLoc DL(ExtElt);
43970   unsigned NumElts = VecVT.getVectorNumElements();
43971   unsigned EltSizeInBits = VecVT.getScalarSizeInBits();
43972 
43973   // Extend v4i8/v8i8 vector to v16i8, with undef upper 64-bits.
43974   auto WidenToV16I8 = [&](SDValue V, bool ZeroExtend) {
43975     if (V.getValueType() == MVT::v4i8) {
43976       if (ZeroExtend && Subtarget.hasSSE41()) {
43977         V = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, MVT::v4i32,
43978                         DAG.getConstant(0, DL, MVT::v4i32),
43979                         DAG.getBitcast(MVT::i32, V),
43980                         DAG.getIntPtrConstant(0, DL));
43981         return DAG.getBitcast(MVT::v16i8, V);
43982       }
43983       V = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8i8, V,
43984                       ZeroExtend ? DAG.getConstant(0, DL, MVT::v4i8)
43985                                  : DAG.getUNDEF(MVT::v4i8));
43986     }
43987     return DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v16i8, V,
43988                        DAG.getUNDEF(MVT::v8i8));
43989   };
43990 
43991   // vXi8 mul reduction - promote to vXi16 mul reduction.
43992   if (Opc == ISD::MUL) {
43993     if (VT != MVT::i8 || NumElts < 4 || !isPowerOf2_32(NumElts))
43994       return SDValue();
43995     if (VecVT.getSizeInBits() >= 128) {
43996       EVT WideVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16, NumElts / 2);
43997       SDValue Lo = getUnpackl(DAG, DL, VecVT, Rdx, DAG.getUNDEF(VecVT));
43998       SDValue Hi = getUnpackh(DAG, DL, VecVT, Rdx, DAG.getUNDEF(VecVT));
43999       Lo = DAG.getBitcast(WideVT, Lo);
44000       Hi = DAG.getBitcast(WideVT, Hi);
44001       Rdx = DAG.getNode(Opc, DL, WideVT, Lo, Hi);
44002       while (Rdx.getValueSizeInBits() > 128) {
44003         std::tie(Lo, Hi) = splitVector(Rdx, DAG, DL);
44004         Rdx = DAG.getNode(Opc, DL, Lo.getValueType(), Lo, Hi);
44005       }
44006     } else {
44007       Rdx = WidenToV16I8(Rdx, false);
44008       Rdx = getUnpackl(DAG, DL, MVT::v16i8, Rdx, DAG.getUNDEF(MVT::v16i8));
44009       Rdx = DAG.getBitcast(MVT::v8i16, Rdx);
44010     }
44011     if (NumElts >= 8)
44012       Rdx = DAG.getNode(Opc, DL, MVT::v8i16, Rdx,
44013                         DAG.getVectorShuffle(MVT::v8i16, DL, Rdx, Rdx,
44014                                              {4, 5, 6, 7, -1, -1, -1, -1}));
44015     Rdx = DAG.getNode(Opc, DL, MVT::v8i16, Rdx,
44016                       DAG.getVectorShuffle(MVT::v8i16, DL, Rdx, Rdx,
44017                                            {2, 3, -1, -1, -1, -1, -1, -1}));
44018     Rdx = DAG.getNode(Opc, DL, MVT::v8i16, Rdx,
44019                       DAG.getVectorShuffle(MVT::v8i16, DL, Rdx, Rdx,
44020                                            {1, -1, -1, -1, -1, -1, -1, -1}));
44021     Rdx = DAG.getBitcast(MVT::v16i8, Rdx);
44022     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Rdx, Index);
44023   }
44024 
44025   // vXi8 add reduction - sub 128-bit vector.
44026   if (VecVT == MVT::v4i8 || VecVT == MVT::v8i8) {
44027     Rdx = WidenToV16I8(Rdx, true);
44028     Rdx = DAG.getNode(X86ISD::PSADBW, DL, MVT::v2i64, Rdx,
44029                       DAG.getConstant(0, DL, MVT::v16i8));
44030     Rdx = DAG.getBitcast(MVT::v16i8, Rdx);
44031     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Rdx, Index);
44032   }
44033 
44034   // Must be a >=128-bit vector with pow2 elements.
44035   if ((VecVT.getSizeInBits() % 128) != 0 || !isPowerOf2_32(NumElts))
44036     return SDValue();
44037 
44038   // vXi8 add reduction - sum lo/hi halves then use PSADBW.
44039   if (VT == MVT::i8) {
44040     while (Rdx.getValueSizeInBits() > 128) {
44041       SDValue Lo, Hi;
44042       std::tie(Lo, Hi) = splitVector(Rdx, DAG, DL);
44043       VecVT = Lo.getValueType();
44044       Rdx = DAG.getNode(ISD::ADD, DL, VecVT, Lo, Hi);
44045     }
44046     assert(VecVT == MVT::v16i8 && "v16i8 reduction expected");
44047 
44048     SDValue Hi = DAG.getVectorShuffle(
44049         MVT::v16i8, DL, Rdx, Rdx,
44050         {8, 9, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1});
44051     Rdx = DAG.getNode(ISD::ADD, DL, MVT::v16i8, Rdx, Hi);
44052     Rdx = DAG.getNode(X86ISD::PSADBW, DL, MVT::v2i64, Rdx,
44053                       getZeroVector(MVT::v16i8, Subtarget, DAG, DL));
44054     Rdx = DAG.getBitcast(MVT::v16i8, Rdx);
44055     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Rdx, Index);
44056   }
44057 
44058   // See if we can use vXi8 PSADBW add reduction for larger zext types.
44059   // If the source vector values are 0-255, then we can use PSADBW to
44060   // sum+zext v8i8 subvectors to vXi64, then perform the reduction.
44061   // TODO: See if its worth avoiding vXi16/i32 truncations?
44062   if (Opc == ISD::ADD && NumElts >= 4 && EltSizeInBits >= 16 &&
44063       DAG.computeKnownBits(Rdx).getMaxValue().ule(255) &&
44064       (EltSizeInBits == 16 || Rdx.getOpcode() == ISD::ZERO_EXTEND ||
44065        Subtarget.hasAVX512())) {
44066     if (Rdx.getValueType() == MVT::v8i16) {
44067       Rdx = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, Rdx,
44068                         DAG.getUNDEF(MVT::v8i16));
44069     } else {
44070       EVT ByteVT = VecVT.changeVectorElementType(MVT::i8);
44071       Rdx = DAG.getNode(ISD::TRUNCATE, DL, ByteVT, Rdx);
44072       if (ByteVT.getSizeInBits() < 128)
44073         Rdx = WidenToV16I8(Rdx, true);
44074     }
44075 
44076     // Build the PSADBW, split as 128/256/512 bits for SSE/AVX2/AVX512BW.
44077     auto PSADBWBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
44078                             ArrayRef<SDValue> Ops) {
44079       MVT VT = MVT::getVectorVT(MVT::i64, Ops[0].getValueSizeInBits() / 64);
44080       SDValue Zero = DAG.getConstant(0, DL, Ops[0].getValueType());
44081       return DAG.getNode(X86ISD::PSADBW, DL, VT, Ops[0], Zero);
44082     };
44083     MVT SadVT = MVT::getVectorVT(MVT::i64, Rdx.getValueSizeInBits() / 64);
44084     Rdx = SplitOpsAndApply(DAG, Subtarget, DL, SadVT, {Rdx}, PSADBWBuilder);
44085 
44086     // TODO: We could truncate to vXi16/vXi32 before performing the reduction.
44087     while (Rdx.getValueSizeInBits() > 128) {
44088       SDValue Lo, Hi;
44089       std::tie(Lo, Hi) = splitVector(Rdx, DAG, DL);
44090       VecVT = Lo.getValueType();
44091       Rdx = DAG.getNode(ISD::ADD, DL, VecVT, Lo, Hi);
44092     }
44093     assert(Rdx.getValueType() == MVT::v2i64 && "v2i64 reduction expected");
44094 
44095     if (NumElts > 8) {
44096       SDValue RdxHi = DAG.getVectorShuffle(MVT::v2i64, DL, Rdx, Rdx, {1, -1});
44097       Rdx = DAG.getNode(ISD::ADD, DL, MVT::v2i64, Rdx, RdxHi);
44098     }
44099 
44100     VecVT = MVT::getVectorVT(VT.getSimpleVT(), 128 / VT.getSizeInBits());
44101     Rdx = DAG.getBitcast(VecVT, Rdx);
44102     return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Rdx, Index);
44103   }
44104 
44105   // Only use (F)HADD opcodes if they aren't microcoded or minimizes codesize.
44106   if (!shouldUseHorizontalOp(true, DAG, Subtarget))
44107     return SDValue();
44108 
44109   unsigned HorizOpcode = Opc == ISD::ADD ? X86ISD::HADD : X86ISD::FHADD;
44110 
44111   // 256-bit horizontal instructions operate on 128-bit chunks rather than
44112   // across the whole vector, so we need an extract + hop preliminary stage.
44113   // This is the only step where the operands of the hop are not the same value.
44114   // TODO: We could extend this to handle 512-bit or even longer vectors.
44115   if (((VecVT == MVT::v16i16 || VecVT == MVT::v8i32) && Subtarget.hasSSSE3()) ||
44116       ((VecVT == MVT::v8f32 || VecVT == MVT::v4f64) && Subtarget.hasSSE3())) {
44117     unsigned NumElts = VecVT.getVectorNumElements();
44118     SDValue Hi = extract128BitVector(Rdx, NumElts / 2, DAG, DL);
44119     SDValue Lo = extract128BitVector(Rdx, 0, DAG, DL);
44120     Rdx = DAG.getNode(HorizOpcode, DL, Lo.getValueType(), Hi, Lo);
44121     VecVT = Rdx.getValueType();
44122   }
44123   if (!((VecVT == MVT::v8i16 || VecVT == MVT::v4i32) && Subtarget.hasSSSE3()) &&
44124       !((VecVT == MVT::v4f32 || VecVT == MVT::v2f64) && Subtarget.hasSSE3()))
44125     return SDValue();
44126 
44127   // extract (add (shuf X), X), 0 --> extract (hadd X, X), 0
44128   unsigned ReductionSteps = Log2_32(VecVT.getVectorNumElements());
44129   for (unsigned i = 0; i != ReductionSteps; ++i)
44130     Rdx = DAG.getNode(HorizOpcode, DL, VecVT, Rdx, Rdx);
44131 
44132   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Rdx, Index);
44133 }
44134 
44135 /// Detect vector gather/scatter index generation and convert it from being a
44136 /// bunch of shuffles and extracts into a somewhat faster sequence.
44137 /// For i686, the best sequence is apparently storing the value and loading
44138 /// scalars back, while for x64 we should use 64-bit extracts and shifts.
combineExtractVectorElt(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)44139 static SDValue combineExtractVectorElt(SDNode *N, SelectionDAG &DAG,
44140                                        TargetLowering::DAGCombinerInfo &DCI,
44141                                        const X86Subtarget &Subtarget) {
44142   if (SDValue NewOp = combineExtractWithShuffle(N, DAG, DCI, Subtarget))
44143     return NewOp;
44144 
44145   SDValue InputVector = N->getOperand(0);
44146   SDValue EltIdx = N->getOperand(1);
44147   auto *CIdx = dyn_cast<ConstantSDNode>(EltIdx);
44148 
44149   EVT SrcVT = InputVector.getValueType();
44150   EVT VT = N->getValueType(0);
44151   SDLoc dl(InputVector);
44152   bool IsPextr = N->getOpcode() != ISD::EXTRACT_VECTOR_ELT;
44153   unsigned NumSrcElts = SrcVT.getVectorNumElements();
44154   unsigned NumEltBits = VT.getScalarSizeInBits();
44155   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
44156 
44157   if (CIdx && CIdx->getAPIntValue().uge(NumSrcElts))
44158     return IsPextr ? DAG.getConstant(0, dl, VT) : DAG.getUNDEF(VT);
44159 
44160   // Integer Constant Folding.
44161   if (CIdx && VT.isInteger()) {
44162     APInt UndefVecElts;
44163     SmallVector<APInt, 16> EltBits;
44164     unsigned VecEltBitWidth = SrcVT.getScalarSizeInBits();
44165     if (getTargetConstantBitsFromNode(InputVector, VecEltBitWidth, UndefVecElts,
44166                                       EltBits, true, false)) {
44167       uint64_t Idx = CIdx->getZExtValue();
44168       if (UndefVecElts[Idx])
44169         return IsPextr ? DAG.getConstant(0, dl, VT) : DAG.getUNDEF(VT);
44170       return DAG.getConstant(EltBits[Idx].zext(NumEltBits), dl, VT);
44171     }
44172 
44173     // Convert extract_element(bitcast(<X x i1>) -> bitcast(extract_subvector()).
44174     // Improves lowering of bool masks on rust which splits them into byte array.
44175     if (InputVector.getOpcode() == ISD::BITCAST && (NumEltBits % 8) == 0) {
44176       SDValue Src = peekThroughBitcasts(InputVector);
44177       if (Src.getValueType().getScalarType() == MVT::i1 &&
44178           TLI.isTypeLegal(Src.getValueType())) {
44179         MVT SubVT = MVT::getVectorVT(MVT::i1, NumEltBits);
44180         SDValue Sub = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, SubVT, Src,
44181             DAG.getIntPtrConstant(CIdx->getZExtValue() * NumEltBits, dl));
44182         return DAG.getBitcast(VT, Sub);
44183       }
44184     }
44185   }
44186 
44187   if (IsPextr) {
44188     if (TLI.SimplifyDemandedBits(SDValue(N, 0), APInt::getAllOnes(NumEltBits),
44189                                  DCI))
44190       return SDValue(N, 0);
44191 
44192     // PEXTR*(PINSR*(v, s, c), c) -> s (with implicit zext handling).
44193     if ((InputVector.getOpcode() == X86ISD::PINSRB ||
44194          InputVector.getOpcode() == X86ISD::PINSRW) &&
44195         InputVector.getOperand(2) == EltIdx) {
44196       assert(SrcVT == InputVector.getOperand(0).getValueType() &&
44197              "Vector type mismatch");
44198       SDValue Scl = InputVector.getOperand(1);
44199       Scl = DAG.getNode(ISD::TRUNCATE, dl, SrcVT.getScalarType(), Scl);
44200       return DAG.getZExtOrTrunc(Scl, dl, VT);
44201     }
44202 
44203     // TODO - Remove this once we can handle the implicit zero-extension of
44204     // X86ISD::PEXTRW/X86ISD::PEXTRB in combinePredicateReduction and
44205     // combineBasicSADPattern.
44206     return SDValue();
44207   }
44208 
44209   // Detect mmx extraction of all bits as a i64. It works better as a bitcast.
44210   if (VT == MVT::i64 && SrcVT == MVT::v1i64 &&
44211       InputVector.getOpcode() == ISD::BITCAST &&
44212       InputVector.getOperand(0).getValueType() == MVT::x86mmx &&
44213       isNullConstant(EltIdx) && InputVector.hasOneUse())
44214     return DAG.getBitcast(VT, InputVector);
44215 
44216   // Detect mmx to i32 conversion through a v2i32 elt extract.
44217   if (VT == MVT::i32 && SrcVT == MVT::v2i32 &&
44218       InputVector.getOpcode() == ISD::BITCAST &&
44219       InputVector.getOperand(0).getValueType() == MVT::x86mmx &&
44220       isNullConstant(EltIdx) && InputVector.hasOneUse())
44221     return DAG.getNode(X86ISD::MMX_MOVD2W, dl, MVT::i32,
44222                        InputVector.getOperand(0));
44223 
44224   // Check whether this extract is the root of a sum of absolute differences
44225   // pattern. This has to be done here because we really want it to happen
44226   // pre-legalization,
44227   if (SDValue SAD = combineBasicSADPattern(N, DAG, Subtarget))
44228     return SAD;
44229 
44230   if (SDValue VPDPBUSD = combineVPDPBUSDPattern(N, DAG, Subtarget))
44231     return VPDPBUSD;
44232 
44233   // Attempt to replace an all_of/any_of horizontal reduction with a MOVMSK.
44234   if (SDValue Cmp = combinePredicateReduction(N, DAG, Subtarget))
44235     return Cmp;
44236 
44237   // Attempt to replace min/max v8i16/v16i8 reductions with PHMINPOSUW.
44238   if (SDValue MinMax = combineMinMaxReduction(N, DAG, Subtarget))
44239     return MinMax;
44240 
44241   // Attempt to optimize ADD/FADD/MUL reductions with HADD, promotion etc..
44242   if (SDValue V = combineArithReduction(N, DAG, Subtarget))
44243     return V;
44244 
44245   if (SDValue V = scalarizeExtEltFP(N, DAG, Subtarget))
44246     return V;
44247 
44248   // Attempt to extract a i1 element by using MOVMSK to extract the signbits
44249   // and then testing the relevant element.
44250   //
44251   // Note that we only combine extracts on the *same* result number, i.e.
44252   //   t0 = merge_values a0, a1, a2, a3
44253   //   i1 = extract_vector_elt t0, Constant:i64<2>
44254   //   i1 = extract_vector_elt t0, Constant:i64<3>
44255   // but not
44256   //   i1 = extract_vector_elt t0:1, Constant:i64<2>
44257   // since the latter would need its own MOVMSK.
44258   if (SrcVT.getScalarType() == MVT::i1) {
44259     bool IsVar = !CIdx;
44260     SmallVector<SDNode *, 16> BoolExtracts;
44261     unsigned ResNo = InputVector.getResNo();
44262     auto IsBoolExtract = [&BoolExtracts, &ResNo, &IsVar](SDNode *Use) {
44263       if (Use->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
44264           Use->getOperand(0).getResNo() == ResNo &&
44265           Use->getValueType(0) == MVT::i1) {
44266         BoolExtracts.push_back(Use);
44267         IsVar |= !isa<ConstantSDNode>(Use->getOperand(1));
44268         return true;
44269       }
44270       return false;
44271     };
44272     // TODO: Can we drop the oneuse check for constant extracts?
44273     if (all_of(InputVector->uses(), IsBoolExtract) &&
44274         (IsVar || BoolExtracts.size() > 1)) {
44275       EVT BCVT = EVT::getIntegerVT(*DAG.getContext(), NumSrcElts);
44276       if (SDValue BC =
44277               combineBitcastvxi1(DAG, BCVT, InputVector, dl, Subtarget)) {
44278         for (SDNode *Use : BoolExtracts) {
44279           // extractelement vXi1 X, MaskIdx --> ((movmsk X) & Mask) == Mask
44280           // Mask = 1 << MaskIdx
44281           SDValue MaskIdx = DAG.getZExtOrTrunc(Use->getOperand(1), dl, MVT::i8);
44282           SDValue MaskBit = DAG.getConstant(1, dl, BCVT);
44283           SDValue Mask = DAG.getNode(ISD::SHL, dl, BCVT, MaskBit, MaskIdx);
44284           SDValue Res = DAG.getNode(ISD::AND, dl, BCVT, BC, Mask);
44285           Res = DAG.getSetCC(dl, MVT::i1, Res, Mask, ISD::SETEQ);
44286           DCI.CombineTo(Use, Res);
44287         }
44288         return SDValue(N, 0);
44289       }
44290     }
44291   }
44292 
44293   // If this extract is from a loaded vector value and will be used as an
44294   // integer, that requires a potentially expensive XMM -> GPR transfer.
44295   // Additionally, if we can convert to a scalar integer load, that will likely
44296   // be folded into a subsequent integer op.
44297   // Note: Unlike the related fold for this in DAGCombiner, this is not limited
44298   //       to a single-use of the loaded vector. For the reasons above, we
44299   //       expect this to be profitable even if it creates an extra load.
44300   bool LikelyUsedAsVector = any_of(N->uses(), [](SDNode *Use) {
44301     return Use->getOpcode() == ISD::STORE ||
44302            Use->getOpcode() == ISD::INSERT_VECTOR_ELT ||
44303            Use->getOpcode() == ISD::SCALAR_TO_VECTOR;
44304   });
44305   auto *LoadVec = dyn_cast<LoadSDNode>(InputVector);
44306   if (LoadVec && CIdx && ISD::isNormalLoad(LoadVec) && VT.isInteger() &&
44307       SrcVT.getVectorElementType() == VT && DCI.isAfterLegalizeDAG() &&
44308       !LikelyUsedAsVector && LoadVec->isSimple()) {
44309     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
44310     SDValue NewPtr =
44311         TLI.getVectorElementPointer(DAG, LoadVec->getBasePtr(), SrcVT, EltIdx);
44312     unsigned PtrOff = VT.getSizeInBits() * CIdx->getZExtValue() / 8;
44313     MachinePointerInfo MPI = LoadVec->getPointerInfo().getWithOffset(PtrOff);
44314     Align Alignment = commonAlignment(LoadVec->getAlign(), PtrOff);
44315     SDValue Load =
44316         DAG.getLoad(VT, dl, LoadVec->getChain(), NewPtr, MPI, Alignment,
44317                     LoadVec->getMemOperand()->getFlags(), LoadVec->getAAInfo());
44318     DAG.makeEquivalentMemoryOrdering(LoadVec, Load);
44319     return Load;
44320   }
44321 
44322   return SDValue();
44323 }
44324 
44325 // Convert (vXiY *ext(vXi1 bitcast(iX))) to extend_in_reg(broadcast(iX)).
44326 // This is more or less the reverse of combineBitcastvxi1.
combineToExtendBoolVectorInReg(unsigned Opcode,const SDLoc & DL,EVT VT,SDValue N0,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)44327 static SDValue combineToExtendBoolVectorInReg(
44328     unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N0, SelectionDAG &DAG,
44329     TargetLowering::DAGCombinerInfo &DCI, const X86Subtarget &Subtarget) {
44330   if (Opcode != ISD::SIGN_EXTEND && Opcode != ISD::ZERO_EXTEND &&
44331       Opcode != ISD::ANY_EXTEND)
44332     return SDValue();
44333   if (!DCI.isBeforeLegalizeOps())
44334     return SDValue();
44335   if (!Subtarget.hasSSE2() || Subtarget.hasAVX512())
44336     return SDValue();
44337 
44338   EVT SVT = VT.getScalarType();
44339   EVT InSVT = N0.getValueType().getScalarType();
44340   unsigned EltSizeInBits = SVT.getSizeInBits();
44341 
44342   // Input type must be extending a bool vector (bit-casted from a scalar
44343   // integer) to legal integer types.
44344   if (!VT.isVector())
44345     return SDValue();
44346   if (SVT != MVT::i64 && SVT != MVT::i32 && SVT != MVT::i16 && SVT != MVT::i8)
44347     return SDValue();
44348   if (InSVT != MVT::i1 || N0.getOpcode() != ISD::BITCAST)
44349     return SDValue();
44350 
44351   SDValue N00 = N0.getOperand(0);
44352   EVT SclVT = N00.getValueType();
44353   if (!SclVT.isScalarInteger())
44354     return SDValue();
44355 
44356   SDValue Vec;
44357   SmallVector<int> ShuffleMask;
44358   unsigned NumElts = VT.getVectorNumElements();
44359   assert(NumElts == SclVT.getSizeInBits() && "Unexpected bool vector size");
44360 
44361   // Broadcast the scalar integer to the vector elements.
44362   if (NumElts > EltSizeInBits) {
44363     // If the scalar integer is greater than the vector element size, then we
44364     // must split it down into sub-sections for broadcasting. For example:
44365     //   i16 -> v16i8 (i16 -> v8i16 -> v16i8) with 2 sub-sections.
44366     //   i32 -> v32i8 (i32 -> v8i32 -> v32i8) with 4 sub-sections.
44367     assert((NumElts % EltSizeInBits) == 0 && "Unexpected integer scale");
44368     unsigned Scale = NumElts / EltSizeInBits;
44369     EVT BroadcastVT = EVT::getVectorVT(*DAG.getContext(), SclVT, EltSizeInBits);
44370     Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, BroadcastVT, N00);
44371     Vec = DAG.getBitcast(VT, Vec);
44372 
44373     for (unsigned i = 0; i != Scale; ++i)
44374       ShuffleMask.append(EltSizeInBits, i);
44375     Vec = DAG.getVectorShuffle(VT, DL, Vec, Vec, ShuffleMask);
44376   } else if (Subtarget.hasAVX2() && NumElts < EltSizeInBits &&
44377              (SclVT == MVT::i8 || SclVT == MVT::i16 || SclVT == MVT::i32)) {
44378     // If we have register broadcast instructions, use the scalar size as the
44379     // element type for the shuffle. Then cast to the wider element type. The
44380     // widened bits won't be used, and this might allow the use of a broadcast
44381     // load.
44382     assert((EltSizeInBits % NumElts) == 0 && "Unexpected integer scale");
44383     unsigned Scale = EltSizeInBits / NumElts;
44384     EVT BroadcastVT =
44385         EVT::getVectorVT(*DAG.getContext(), SclVT, NumElts * Scale);
44386     Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, BroadcastVT, N00);
44387     ShuffleMask.append(NumElts * Scale, 0);
44388     Vec = DAG.getVectorShuffle(BroadcastVT, DL, Vec, Vec, ShuffleMask);
44389     Vec = DAG.getBitcast(VT, Vec);
44390   } else {
44391     // For smaller scalar integers, we can simply any-extend it to the vector
44392     // element size (we don't care about the upper bits) and broadcast it to all
44393     // elements.
44394     SDValue Scl = DAG.getAnyExtOrTrunc(N00, DL, SVT);
44395     Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Scl);
44396     ShuffleMask.append(NumElts, 0);
44397     Vec = DAG.getVectorShuffle(VT, DL, Vec, Vec, ShuffleMask);
44398   }
44399 
44400   // Now, mask the relevant bit in each element.
44401   SmallVector<SDValue, 32> Bits;
44402   for (unsigned i = 0; i != NumElts; ++i) {
44403     int BitIdx = (i % EltSizeInBits);
44404     APInt Bit = APInt::getBitsSet(EltSizeInBits, BitIdx, BitIdx + 1);
44405     Bits.push_back(DAG.getConstant(Bit, DL, SVT));
44406   }
44407   SDValue BitMask = DAG.getBuildVector(VT, DL, Bits);
44408   Vec = DAG.getNode(ISD::AND, DL, VT, Vec, BitMask);
44409 
44410   // Compare against the bitmask and extend the result.
44411   EVT CCVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1, NumElts);
44412   Vec = DAG.getSetCC(DL, CCVT, Vec, BitMask, ISD::SETEQ);
44413   Vec = DAG.getSExtOrTrunc(Vec, DL, VT);
44414 
44415   // For SEXT, this is now done, otherwise shift the result down for
44416   // zero-extension.
44417   if (Opcode == ISD::SIGN_EXTEND)
44418     return Vec;
44419   return DAG.getNode(ISD::SRL, DL, VT, Vec,
44420                      DAG.getConstant(EltSizeInBits - 1, DL, VT));
44421 }
44422 
44423 /// If a vector select has an operand that is -1 or 0, try to simplify the
44424 /// select to a bitwise logic operation.
44425 /// TODO: Move to DAGCombiner, possibly using TargetLowering::hasAndNot()?
44426 static SDValue
combineVSelectWithAllOnesOrZeros(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)44427 combineVSelectWithAllOnesOrZeros(SDNode *N, SelectionDAG &DAG,
44428                                  TargetLowering::DAGCombinerInfo &DCI,
44429                                  const X86Subtarget &Subtarget) {
44430   SDValue Cond = N->getOperand(0);
44431   SDValue LHS = N->getOperand(1);
44432   SDValue RHS = N->getOperand(2);
44433   EVT VT = LHS.getValueType();
44434   EVT CondVT = Cond.getValueType();
44435   SDLoc DL(N);
44436   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
44437 
44438   if (N->getOpcode() != ISD::VSELECT)
44439     return SDValue();
44440 
44441   assert(CondVT.isVector() && "Vector select expects a vector selector!");
44442 
44443   // TODO: Use isNullOrNullSplat() to distinguish constants with undefs?
44444   // TODO: Can we assert that both operands are not zeros (because that should
44445   //       get simplified at node creation time)?
44446   bool TValIsAllZeros = ISD::isBuildVectorAllZeros(LHS.getNode());
44447   bool FValIsAllZeros = ISD::isBuildVectorAllZeros(RHS.getNode());
44448 
44449   // If both inputs are 0/undef, create a complete zero vector.
44450   // FIXME: As noted above this should be handled by DAGCombiner/getNode.
44451   if (TValIsAllZeros && FValIsAllZeros) {
44452     if (VT.isFloatingPoint())
44453       return DAG.getConstantFP(0.0, DL, VT);
44454     return DAG.getConstant(0, DL, VT);
44455   }
44456 
44457   // To use the condition operand as a bitwise mask, it must have elements that
44458   // are the same size as the select elements. Ie, the condition operand must
44459   // have already been promoted from the IR select condition type <N x i1>.
44460   // Don't check if the types themselves are equal because that excludes
44461   // vector floating-point selects.
44462   if (CondVT.getScalarSizeInBits() != VT.getScalarSizeInBits())
44463     return SDValue();
44464 
44465   // Try to invert the condition if true value is not all 1s and false value is
44466   // not all 0s. Only do this if the condition has one use.
44467   bool TValIsAllOnes = ISD::isBuildVectorAllOnes(LHS.getNode());
44468   if (!TValIsAllOnes && !FValIsAllZeros && Cond.hasOneUse() &&
44469       // Check if the selector will be produced by CMPP*/PCMP*.
44470       Cond.getOpcode() == ISD::SETCC &&
44471       // Check if SETCC has already been promoted.
44472       TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT) ==
44473           CondVT) {
44474     bool FValIsAllOnes = ISD::isBuildVectorAllOnes(RHS.getNode());
44475 
44476     if (TValIsAllZeros || FValIsAllOnes) {
44477       SDValue CC = Cond.getOperand(2);
44478       ISD::CondCode NewCC = ISD::getSetCCInverse(
44479           cast<CondCodeSDNode>(CC)->get(), Cond.getOperand(0).getValueType());
44480       Cond = DAG.getSetCC(DL, CondVT, Cond.getOperand(0), Cond.getOperand(1),
44481                           NewCC);
44482       std::swap(LHS, RHS);
44483       TValIsAllOnes = FValIsAllOnes;
44484       FValIsAllZeros = TValIsAllZeros;
44485     }
44486   }
44487 
44488   // Cond value must be 'sign splat' to be converted to a logical op.
44489   if (DAG.ComputeNumSignBits(Cond) != CondVT.getScalarSizeInBits())
44490     return SDValue();
44491 
44492   // vselect Cond, 111..., 000... -> Cond
44493   if (TValIsAllOnes && FValIsAllZeros)
44494     return DAG.getBitcast(VT, Cond);
44495 
44496   if (!TLI.isTypeLegal(CondVT))
44497     return SDValue();
44498 
44499   // vselect Cond, 111..., X -> or Cond, X
44500   if (TValIsAllOnes) {
44501     SDValue CastRHS = DAG.getBitcast(CondVT, RHS);
44502     SDValue Or = DAG.getNode(ISD::OR, DL, CondVT, Cond, CastRHS);
44503     return DAG.getBitcast(VT, Or);
44504   }
44505 
44506   // vselect Cond, X, 000... -> and Cond, X
44507   if (FValIsAllZeros) {
44508     SDValue CastLHS = DAG.getBitcast(CondVT, LHS);
44509     SDValue And = DAG.getNode(ISD::AND, DL, CondVT, Cond, CastLHS);
44510     return DAG.getBitcast(VT, And);
44511   }
44512 
44513   // vselect Cond, 000..., X -> andn Cond, X
44514   if (TValIsAllZeros) {
44515     SDValue CastRHS = DAG.getBitcast(CondVT, RHS);
44516     SDValue AndN;
44517     // The canonical form differs for i1 vectors - x86andnp is not used
44518     if (CondVT.getScalarType() == MVT::i1)
44519       AndN = DAG.getNode(ISD::AND, DL, CondVT, DAG.getNOT(DL, Cond, CondVT),
44520                          CastRHS);
44521     else
44522       AndN = DAG.getNode(X86ISD::ANDNP, DL, CondVT, Cond, CastRHS);
44523     return DAG.getBitcast(VT, AndN);
44524   }
44525 
44526   return SDValue();
44527 }
44528 
44529 /// If both arms of a vector select are concatenated vectors, split the select,
44530 /// and concatenate the result to eliminate a wide (256-bit) vector instruction:
44531 ///   vselect Cond, (concat T0, T1), (concat F0, F1) -->
44532 ///   concat (vselect (split Cond), T0, F0), (vselect (split Cond), T1, F1)
narrowVectorSelect(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)44533 static SDValue narrowVectorSelect(SDNode *N, SelectionDAG &DAG,
44534                                   const X86Subtarget &Subtarget) {
44535   unsigned Opcode = N->getOpcode();
44536   if (Opcode != X86ISD::BLENDV && Opcode != ISD::VSELECT)
44537     return SDValue();
44538 
44539   // TODO: Split 512-bit vectors too?
44540   EVT VT = N->getValueType(0);
44541   if (!VT.is256BitVector())
44542     return SDValue();
44543 
44544   // TODO: Split as long as any 2 of the 3 operands are concatenated?
44545   SDValue Cond = N->getOperand(0);
44546   SDValue TVal = N->getOperand(1);
44547   SDValue FVal = N->getOperand(2);
44548   if (!TVal.hasOneUse() || !FVal.hasOneUse() ||
44549       !isFreeToSplitVector(TVal.getNode(), DAG) ||
44550       !isFreeToSplitVector(FVal.getNode(), DAG))
44551     return SDValue();
44552 
44553   auto makeBlend = [Opcode](SelectionDAG &DAG, const SDLoc &DL,
44554                             ArrayRef<SDValue> Ops) {
44555     return DAG.getNode(Opcode, DL, Ops[1].getValueType(), Ops);
44556   };
44557   return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, { Cond, TVal, FVal },
44558                           makeBlend, /*CheckBWI*/ false);
44559 }
44560 
combineSelectOfTwoConstants(SDNode * N,SelectionDAG & DAG)44561 static SDValue combineSelectOfTwoConstants(SDNode *N, SelectionDAG &DAG) {
44562   SDValue Cond = N->getOperand(0);
44563   SDValue LHS = N->getOperand(1);
44564   SDValue RHS = N->getOperand(2);
44565   SDLoc DL(N);
44566 
44567   auto *TrueC = dyn_cast<ConstantSDNode>(LHS);
44568   auto *FalseC = dyn_cast<ConstantSDNode>(RHS);
44569   if (!TrueC || !FalseC)
44570     return SDValue();
44571 
44572   // Don't do this for crazy integer types.
44573   EVT VT = N->getValueType(0);
44574   if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
44575     return SDValue();
44576 
44577   // We're going to use the condition bit in math or logic ops. We could allow
44578   // this with a wider condition value (post-legalization it becomes an i8),
44579   // but if nothing is creating selects that late, it doesn't matter.
44580   if (Cond.getValueType() != MVT::i1)
44581     return SDValue();
44582 
44583   // A power-of-2 multiply is just a shift. LEA also cheaply handles multiply by
44584   // 3, 5, or 9 with i32/i64, so those get transformed too.
44585   // TODO: For constants that overflow or do not differ by power-of-2 or small
44586   // multiplier, convert to 'and' + 'add'.
44587   const APInt &TrueVal = TrueC->getAPIntValue();
44588   const APInt &FalseVal = FalseC->getAPIntValue();
44589 
44590   // We have a more efficient lowering for "(X == 0) ? Y : -1" using SBB.
44591   if ((TrueVal.isAllOnes() || FalseVal.isAllOnes()) &&
44592       Cond.getOpcode() == ISD::SETCC && isNullConstant(Cond.getOperand(1))) {
44593     ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
44594     if (CC == ISD::SETEQ || CC == ISD::SETNE)
44595       return SDValue();
44596   }
44597 
44598   bool OV;
44599   APInt Diff = TrueVal.ssub_ov(FalseVal, OV);
44600   if (OV)
44601     return SDValue();
44602 
44603   APInt AbsDiff = Diff.abs();
44604   if (AbsDiff.isPowerOf2() ||
44605       ((VT == MVT::i32 || VT == MVT::i64) &&
44606        (AbsDiff == 3 || AbsDiff == 5 || AbsDiff == 9))) {
44607 
44608     // We need a positive multiplier constant for shift/LEA codegen. The 'not'
44609     // of the condition can usually be folded into a compare predicate, but even
44610     // without that, the sequence should be cheaper than a CMOV alternative.
44611     if (TrueVal.slt(FalseVal)) {
44612       Cond = DAG.getNOT(DL, Cond, MVT::i1);
44613       std::swap(TrueC, FalseC);
44614     }
44615 
44616     // select Cond, TC, FC --> (zext(Cond) * (TC - FC)) + FC
44617     SDValue R = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Cond);
44618 
44619     // Multiply condition by the difference if non-one.
44620     if (!AbsDiff.isOne())
44621       R = DAG.getNode(ISD::MUL, DL, VT, R, DAG.getConstant(AbsDiff, DL, VT));
44622 
44623     // Add the base if non-zero.
44624     if (!FalseC->isZero())
44625       R = DAG.getNode(ISD::ADD, DL, VT, R, SDValue(FalseC, 0));
44626 
44627     return R;
44628   }
44629 
44630   return SDValue();
44631 }
44632 
44633 /// If this is a *dynamic* select (non-constant condition) and we can match
44634 /// this node with one of the variable blend instructions, restructure the
44635 /// condition so that blends can use the high (sign) bit of each element.
44636 /// This function will also call SimplifyDemandedBits on already created
44637 /// BLENDV to perform additional simplifications.
combineVSelectToBLENDV(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)44638 static SDValue combineVSelectToBLENDV(SDNode *N, SelectionDAG &DAG,
44639                                       TargetLowering::DAGCombinerInfo &DCI,
44640                                       const X86Subtarget &Subtarget) {
44641   SDValue Cond = N->getOperand(0);
44642   if ((N->getOpcode() != ISD::VSELECT &&
44643        N->getOpcode() != X86ISD::BLENDV) ||
44644       ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
44645     return SDValue();
44646 
44647   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
44648   unsigned BitWidth = Cond.getScalarValueSizeInBits();
44649   EVT VT = N->getValueType(0);
44650 
44651   // We can only handle the cases where VSELECT is directly legal on the
44652   // subtarget. We custom lower VSELECT nodes with constant conditions and
44653   // this makes it hard to see whether a dynamic VSELECT will correctly
44654   // lower, so we both check the operation's status and explicitly handle the
44655   // cases where a *dynamic* blend will fail even though a constant-condition
44656   // blend could be custom lowered.
44657   // FIXME: We should find a better way to handle this class of problems.
44658   // Potentially, we should combine constant-condition vselect nodes
44659   // pre-legalization into shuffles and not mark as many types as custom
44660   // lowered.
44661   if (!TLI.isOperationLegalOrCustom(ISD::VSELECT, VT))
44662     return SDValue();
44663   // FIXME: We don't support i16-element blends currently. We could and
44664   // should support them by making *all* the bits in the condition be set
44665   // rather than just the high bit and using an i8-element blend.
44666   if (VT.getVectorElementType() == MVT::i16)
44667     return SDValue();
44668   // Dynamic blending was only available from SSE4.1 onward.
44669   if (VT.is128BitVector() && !Subtarget.hasSSE41())
44670     return SDValue();
44671   // Byte blends are only available in AVX2
44672   if (VT == MVT::v32i8 && !Subtarget.hasAVX2())
44673     return SDValue();
44674   // There are no 512-bit blend instructions that use sign bits.
44675   if (VT.is512BitVector())
44676     return SDValue();
44677 
44678   // Don't optimize before the condition has been transformed to a legal type
44679   // and don't ever optimize vector selects that map to AVX512 mask-registers.
44680   if (BitWidth < 8 || BitWidth > 64)
44681     return SDValue();
44682 
44683   auto OnlyUsedAsSelectCond = [](SDValue Cond) {
44684     for (SDNode::use_iterator UI = Cond->use_begin(), UE = Cond->use_end();
44685          UI != UE; ++UI)
44686       if ((UI->getOpcode() != ISD::VSELECT &&
44687            UI->getOpcode() != X86ISD::BLENDV) ||
44688           UI.getOperandNo() != 0)
44689         return false;
44690 
44691     return true;
44692   };
44693 
44694   APInt DemandedBits(APInt::getSignMask(BitWidth));
44695 
44696   if (OnlyUsedAsSelectCond(Cond)) {
44697     KnownBits Known;
44698     TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
44699                                           !DCI.isBeforeLegalizeOps());
44700     if (!TLI.SimplifyDemandedBits(Cond, DemandedBits, Known, TLO, 0, true))
44701       return SDValue();
44702 
44703     // If we changed the computation somewhere in the DAG, this change will
44704     // affect all users of Cond. Update all the nodes so that we do not use
44705     // the generic VSELECT anymore. Otherwise, we may perform wrong
44706     // optimizations as we messed with the actual expectation for the vector
44707     // boolean values.
44708     for (SDNode *U : Cond->uses()) {
44709       if (U->getOpcode() == X86ISD::BLENDV)
44710         continue;
44711 
44712       SDValue SB = DAG.getNode(X86ISD::BLENDV, SDLoc(U), U->getValueType(0),
44713                                Cond, U->getOperand(1), U->getOperand(2));
44714       DAG.ReplaceAllUsesOfValueWith(SDValue(U, 0), SB);
44715       DCI.AddToWorklist(U);
44716     }
44717     DCI.CommitTargetLoweringOpt(TLO);
44718     return SDValue(N, 0);
44719   }
44720 
44721   // Otherwise we can still at least try to simplify multiple use bits.
44722   if (SDValue V = TLI.SimplifyMultipleUseDemandedBits(Cond, DemandedBits, DAG))
44723       return DAG.getNode(X86ISD::BLENDV, SDLoc(N), N->getValueType(0), V,
44724                          N->getOperand(1), N->getOperand(2));
44725 
44726   return SDValue();
44727 }
44728 
44729 // Try to match:
44730 //   (or (and (M, (sub 0, X)), (pandn M, X)))
44731 // which is a special case of:
44732 //   (select M, (sub 0, X), X)
44733 // Per:
44734 // http://graphics.stanford.edu/~seander/bithacks.html#ConditionalNegate
44735 // We know that, if fNegate is 0 or 1:
44736 //   (fNegate ? -v : v) == ((v ^ -fNegate) + fNegate)
44737 //
44738 // Here, we have a mask, M (all 1s or 0), and, similarly, we know that:
44739 //   ((M & 1) ? -X : X) == ((X ^ -(M & 1)) + (M & 1))
44740 //   ( M      ? -X : X) == ((X ^   M     ) + (M & 1))
44741 // This lets us transform our vselect to:
44742 //   (add (xor X, M), (and M, 1))
44743 // And further to:
44744 //   (sub (xor X, M), M)
combineLogicBlendIntoConditionalNegate(EVT VT,SDValue Mask,SDValue X,SDValue Y,const SDLoc & DL,SelectionDAG & DAG,const X86Subtarget & Subtarget)44745 static SDValue combineLogicBlendIntoConditionalNegate(
44746     EVT VT, SDValue Mask, SDValue X, SDValue Y, const SDLoc &DL,
44747     SelectionDAG &DAG, const X86Subtarget &Subtarget) {
44748   EVT MaskVT = Mask.getValueType();
44749   assert(MaskVT.isInteger() &&
44750          DAG.ComputeNumSignBits(Mask) == MaskVT.getScalarSizeInBits() &&
44751          "Mask must be zero/all-bits");
44752 
44753   if (X.getValueType() != MaskVT || Y.getValueType() != MaskVT)
44754     return SDValue();
44755   if (!DAG.getTargetLoweringInfo().isOperationLegal(ISD::SUB, MaskVT))
44756     return SDValue();
44757 
44758   auto IsNegV = [](SDNode *N, SDValue V) {
44759     return N->getOpcode() == ISD::SUB && N->getOperand(1) == V &&
44760            ISD::isBuildVectorAllZeros(N->getOperand(0).getNode());
44761   };
44762 
44763   SDValue V;
44764   if (IsNegV(Y.getNode(), X))
44765     V = X;
44766   else if (IsNegV(X.getNode(), Y))
44767     V = Y;
44768   else
44769     return SDValue();
44770 
44771   SDValue SubOp1 = DAG.getNode(ISD::XOR, DL, MaskVT, V, Mask);
44772   SDValue SubOp2 = Mask;
44773 
44774   // If the negate was on the false side of the select, then
44775   // the operands of the SUB need to be swapped. PR 27251.
44776   // This is because the pattern being matched above is
44777   // (vselect M, (sub (0, X), X)  -> (sub (xor X, M), M)
44778   // but if the pattern matched was
44779   // (vselect M, X, (sub (0, X))), that is really negation of the pattern
44780   // above, -(vselect M, (sub 0, X), X), and therefore the replacement
44781   // pattern also needs to be a negation of the replacement pattern above.
44782   // And -(sub X, Y) is just sub (Y, X), so swapping the operands of the
44783   // sub accomplishes the negation of the replacement pattern.
44784   if (V == Y)
44785     std::swap(SubOp1, SubOp2);
44786 
44787   SDValue Res = DAG.getNode(ISD::SUB, DL, MaskVT, SubOp1, SubOp2);
44788   return DAG.getBitcast(VT, Res);
44789 }
44790 
commuteSelect(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)44791 static SDValue commuteSelect(SDNode *N, SelectionDAG &DAG,
44792                                   const X86Subtarget &Subtarget) {
44793   if (!Subtarget.hasAVX512())
44794     return SDValue();
44795   if (N->getOpcode() != ISD::VSELECT)
44796     return SDValue();
44797 
44798   SDLoc DL(N);
44799   SDValue Cond = N->getOperand(0);
44800   SDValue LHS = N->getOperand(1);
44801   SDValue RHS = N->getOperand(2);
44802 
44803   if (canCombineAsMaskOperation(LHS, Subtarget))
44804     return SDValue();
44805 
44806   if (!canCombineAsMaskOperation(RHS, Subtarget))
44807     return SDValue();
44808 
44809   if (Cond.getOpcode() != ISD::SETCC || !Cond.hasOneUse())
44810     return SDValue();
44811 
44812   // Commute LHS and RHS to create opportunity to select mask instruction.
44813   // (vselect M, L, R) -> (vselect ~M, R, L)
44814   ISD::CondCode NewCC =
44815       ISD::getSetCCInverse(cast<CondCodeSDNode>(Cond.getOperand(2))->get(),
44816                            Cond.getOperand(0).getValueType());
44817   Cond = DAG.getSetCC(SDLoc(Cond), Cond.getValueType(), Cond.getOperand(0),
44818 		                        Cond.getOperand(1), NewCC);
44819   return DAG.getSelect(DL, LHS.getValueType(), Cond, RHS, LHS);
44820 }
44821 
44822 /// Do target-specific dag combines on SELECT and VSELECT nodes.
combineSelect(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)44823 static SDValue combineSelect(SDNode *N, SelectionDAG &DAG,
44824                              TargetLowering::DAGCombinerInfo &DCI,
44825                              const X86Subtarget &Subtarget) {
44826   SDLoc DL(N);
44827   SDValue Cond = N->getOperand(0);
44828   SDValue LHS = N->getOperand(1);
44829   SDValue RHS = N->getOperand(2);
44830 
44831   // Try simplification again because we use this function to optimize
44832   // BLENDV nodes that are not handled by the generic combiner.
44833   if (SDValue V = DAG.simplifySelect(Cond, LHS, RHS))
44834     return V;
44835 
44836   // When avx512 is available the lhs operand of select instruction can be
44837   // folded with mask instruction, while the rhs operand can't. Commute the
44838   // lhs and rhs of the select instruction to create the opportunity of
44839   // folding.
44840   if (SDValue V = commuteSelect(N, DAG, Subtarget))
44841     return V;
44842 
44843   EVT VT = LHS.getValueType();
44844   EVT CondVT = Cond.getValueType();
44845   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
44846   bool CondConstantVector = ISD::isBuildVectorOfConstantSDNodes(Cond.getNode());
44847 
44848   // Attempt to combine (select M, (sub 0, X), X) -> (sub (xor X, M), M).
44849   // Limit this to cases of non-constant masks that createShuffleMaskFromVSELECT
44850   // can't catch, plus vXi8 cases where we'd likely end up with BLENDV.
44851   if (CondVT.isVector() && CondVT.isInteger() &&
44852       CondVT.getScalarSizeInBits() == VT.getScalarSizeInBits() &&
44853       (!CondConstantVector || CondVT.getScalarType() == MVT::i8) &&
44854       DAG.ComputeNumSignBits(Cond) == CondVT.getScalarSizeInBits())
44855     if (SDValue V = combineLogicBlendIntoConditionalNegate(VT, Cond, RHS, LHS,
44856                                                            DL, DAG, Subtarget))
44857       return V;
44858 
44859   // Convert vselects with constant condition into shuffles.
44860   if (CondConstantVector && DCI.isBeforeLegalizeOps() &&
44861       (N->getOpcode() == ISD::VSELECT || N->getOpcode() == X86ISD::BLENDV)) {
44862     SmallVector<int, 64> Mask;
44863     if (createShuffleMaskFromVSELECT(Mask, Cond,
44864                                      N->getOpcode() == X86ISD::BLENDV))
44865       return DAG.getVectorShuffle(VT, DL, LHS, RHS, Mask);
44866   }
44867 
44868   // fold vselect(cond, pshufb(x), pshufb(y)) -> or (pshufb(x), pshufb(y))
44869   // by forcing the unselected elements to zero.
44870   // TODO: Can we handle more shuffles with this?
44871   if (N->getOpcode() == ISD::VSELECT && CondVT.isVector() &&
44872       LHS.getOpcode() == X86ISD::PSHUFB && RHS.getOpcode() == X86ISD::PSHUFB &&
44873       LHS.hasOneUse() && RHS.hasOneUse()) {
44874     MVT SimpleVT = VT.getSimpleVT();
44875     SmallVector<SDValue, 1> LHSOps, RHSOps;
44876     SmallVector<int, 64> LHSMask, RHSMask, CondMask;
44877     if (createShuffleMaskFromVSELECT(CondMask, Cond) &&
44878         getTargetShuffleMask(LHS.getNode(), SimpleVT, true, LHSOps, LHSMask) &&
44879         getTargetShuffleMask(RHS.getNode(), SimpleVT, true, RHSOps, RHSMask)) {
44880       int NumElts = VT.getVectorNumElements();
44881       for (int i = 0; i != NumElts; ++i) {
44882         // getConstVector sets negative shuffle mask values as undef, so ensure
44883         // we hardcode SM_SentinelZero values to zero (0x80).
44884         if (CondMask[i] < NumElts) {
44885           LHSMask[i] = isUndefOrZero(LHSMask[i]) ? 0x80 : LHSMask[i];
44886           RHSMask[i] = 0x80;
44887         } else {
44888           LHSMask[i] = 0x80;
44889           RHSMask[i] = isUndefOrZero(RHSMask[i]) ? 0x80 : RHSMask[i];
44890         }
44891       }
44892       LHS = DAG.getNode(X86ISD::PSHUFB, DL, VT, LHS.getOperand(0),
44893                         getConstVector(LHSMask, SimpleVT, DAG, DL, true));
44894       RHS = DAG.getNode(X86ISD::PSHUFB, DL, VT, RHS.getOperand(0),
44895                         getConstVector(RHSMask, SimpleVT, DAG, DL, true));
44896       return DAG.getNode(ISD::OR, DL, VT, LHS, RHS);
44897     }
44898   }
44899 
44900   // If we have SSE[12] support, try to form min/max nodes. SSE min/max
44901   // instructions match the semantics of the common C idiom x<y?x:y but not
44902   // x<=y?x:y, because of how they handle negative zero (which can be
44903   // ignored in unsafe-math mode).
44904   // We also try to create v2f32 min/max nodes, which we later widen to v4f32.
44905   if (Cond.getOpcode() == ISD::SETCC && VT.isFloatingPoint() &&
44906       VT != MVT::f80 && VT != MVT::f128 && !isSoftF16(VT, Subtarget) &&
44907       (TLI.isTypeLegal(VT) || VT == MVT::v2f32) &&
44908       (Subtarget.hasSSE2() ||
44909        (Subtarget.hasSSE1() && VT.getScalarType() == MVT::f32))) {
44910     ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
44911 
44912     unsigned Opcode = 0;
44913     // Check for x CC y ? x : y.
44914     if (DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
44915         DAG.isEqualTo(RHS, Cond.getOperand(1))) {
44916       switch (CC) {
44917       default: break;
44918       case ISD::SETULT:
44919         // Converting this to a min would handle NaNs incorrectly, and swapping
44920         // the operands would cause it to handle comparisons between positive
44921         // and negative zero incorrectly.
44922         if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
44923           if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
44924               !(DAG.isKnownNeverZeroFloat(LHS) ||
44925                 DAG.isKnownNeverZeroFloat(RHS)))
44926             break;
44927           std::swap(LHS, RHS);
44928         }
44929         Opcode = X86ISD::FMIN;
44930         break;
44931       case ISD::SETOLE:
44932         // Converting this to a min would handle comparisons between positive
44933         // and negative zero incorrectly.
44934         if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
44935             !DAG.isKnownNeverZeroFloat(LHS) && !DAG.isKnownNeverZeroFloat(RHS))
44936           break;
44937         Opcode = X86ISD::FMIN;
44938         break;
44939       case ISD::SETULE:
44940         // Converting this to a min would handle both negative zeros and NaNs
44941         // incorrectly, but we can swap the operands to fix both.
44942         std::swap(LHS, RHS);
44943         [[fallthrough]];
44944       case ISD::SETOLT:
44945       case ISD::SETLT:
44946       case ISD::SETLE:
44947         Opcode = X86ISD::FMIN;
44948         break;
44949 
44950       case ISD::SETOGE:
44951         // Converting this to a max would handle comparisons between positive
44952         // and negative zero incorrectly.
44953         if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
44954             !DAG.isKnownNeverZeroFloat(LHS) && !DAG.isKnownNeverZeroFloat(RHS))
44955           break;
44956         Opcode = X86ISD::FMAX;
44957         break;
44958       case ISD::SETUGT:
44959         // Converting this to a max would handle NaNs incorrectly, and swapping
44960         // the operands would cause it to handle comparisons between positive
44961         // and negative zero incorrectly.
44962         if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
44963           if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
44964               !(DAG.isKnownNeverZeroFloat(LHS) ||
44965                 DAG.isKnownNeverZeroFloat(RHS)))
44966             break;
44967           std::swap(LHS, RHS);
44968         }
44969         Opcode = X86ISD::FMAX;
44970         break;
44971       case ISD::SETUGE:
44972         // Converting this to a max would handle both negative zeros and NaNs
44973         // incorrectly, but we can swap the operands to fix both.
44974         std::swap(LHS, RHS);
44975         [[fallthrough]];
44976       case ISD::SETOGT:
44977       case ISD::SETGT:
44978       case ISD::SETGE:
44979         Opcode = X86ISD::FMAX;
44980         break;
44981       }
44982     // Check for x CC y ? y : x -- a min/max with reversed arms.
44983     } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) &&
44984                DAG.isEqualTo(RHS, Cond.getOperand(0))) {
44985       switch (CC) {
44986       default: break;
44987       case ISD::SETOGE:
44988         // Converting this to a min would handle comparisons between positive
44989         // and negative zero incorrectly, and swapping the operands would
44990         // cause it to handle NaNs incorrectly.
44991         if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
44992             !(DAG.isKnownNeverZeroFloat(LHS) ||
44993               DAG.isKnownNeverZeroFloat(RHS))) {
44994           if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
44995             break;
44996           std::swap(LHS, RHS);
44997         }
44998         Opcode = X86ISD::FMIN;
44999         break;
45000       case ISD::SETUGT:
45001         // Converting this to a min would handle NaNs incorrectly.
45002         if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
45003           break;
45004         Opcode = X86ISD::FMIN;
45005         break;
45006       case ISD::SETUGE:
45007         // Converting this to a min would handle both negative zeros and NaNs
45008         // incorrectly, but we can swap the operands to fix both.
45009         std::swap(LHS, RHS);
45010         [[fallthrough]];
45011       case ISD::SETOGT:
45012       case ISD::SETGT:
45013       case ISD::SETGE:
45014         Opcode = X86ISD::FMIN;
45015         break;
45016 
45017       case ISD::SETULT:
45018         // Converting this to a max would handle NaNs incorrectly.
45019         if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
45020           break;
45021         Opcode = X86ISD::FMAX;
45022         break;
45023       case ISD::SETOLE:
45024         // Converting this to a max would handle comparisons between positive
45025         // and negative zero incorrectly, and swapping the operands would
45026         // cause it to handle NaNs incorrectly.
45027         if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
45028             !DAG.isKnownNeverZeroFloat(LHS) &&
45029             !DAG.isKnownNeverZeroFloat(RHS)) {
45030           if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
45031             break;
45032           std::swap(LHS, RHS);
45033         }
45034         Opcode = X86ISD::FMAX;
45035         break;
45036       case ISD::SETULE:
45037         // Converting this to a max would handle both negative zeros and NaNs
45038         // incorrectly, but we can swap the operands to fix both.
45039         std::swap(LHS, RHS);
45040         [[fallthrough]];
45041       case ISD::SETOLT:
45042       case ISD::SETLT:
45043       case ISD::SETLE:
45044         Opcode = X86ISD::FMAX;
45045         break;
45046       }
45047     }
45048 
45049     if (Opcode)
45050       return DAG.getNode(Opcode, DL, N->getValueType(0), LHS, RHS);
45051   }
45052 
45053   // Some mask scalar intrinsics rely on checking if only one bit is set
45054   // and implement it in C code like this:
45055   // A[0] = (U & 1) ? A[0] : W[0];
45056   // This creates some redundant instructions that break pattern matching.
45057   // fold (select (setcc (and (X, 1), 0, seteq), Y, Z)) -> select(and(X, 1),Z,Y)
45058   if (Subtarget.hasAVX512() && N->getOpcode() == ISD::SELECT &&
45059       Cond.getOpcode() == ISD::SETCC && (VT == MVT::f32 || VT == MVT::f64)) {
45060     ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
45061     SDValue AndNode = Cond.getOperand(0);
45062     if (AndNode.getOpcode() == ISD::AND && CC == ISD::SETEQ &&
45063         isNullConstant(Cond.getOperand(1)) &&
45064         isOneConstant(AndNode.getOperand(1))) {
45065       // LHS and RHS swapped due to
45066       // setcc outputting 1 when AND resulted in 0 and vice versa.
45067       AndNode = DAG.getZExtOrTrunc(AndNode, DL, MVT::i8);
45068       return DAG.getNode(ISD::SELECT, DL, VT, AndNode, RHS, LHS);
45069     }
45070   }
45071 
45072   // v16i8 (select v16i1, v16i8, v16i8) does not have a proper
45073   // lowering on KNL. In this case we convert it to
45074   // v16i8 (select v16i8, v16i8, v16i8) and use AVX instruction.
45075   // The same situation all vectors of i8 and i16 without BWI.
45076   // Make sure we extend these even before type legalization gets a chance to
45077   // split wide vectors.
45078   // Since SKX these selects have a proper lowering.
45079   if (Subtarget.hasAVX512() && !Subtarget.hasBWI() && CondVT.isVector() &&
45080       CondVT.getVectorElementType() == MVT::i1 &&
45081       (VT.getVectorElementType() == MVT::i8 ||
45082        VT.getVectorElementType() == MVT::i16)) {
45083     Cond = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Cond);
45084     return DAG.getNode(N->getOpcode(), DL, VT, Cond, LHS, RHS);
45085   }
45086 
45087   // AVX512 - Extend select with zero to merge with target shuffle.
45088   // select(mask, extract_subvector(shuffle(x)), zero) -->
45089   // extract_subvector(select(insert_subvector(mask), shuffle(x), zero))
45090   // TODO - support non target shuffles as well.
45091   if (Subtarget.hasAVX512() && CondVT.isVector() &&
45092       CondVT.getVectorElementType() == MVT::i1) {
45093     auto SelectableOp = [&TLI](SDValue Op) {
45094       return Op.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
45095              isTargetShuffle(Op.getOperand(0).getOpcode()) &&
45096              isNullConstant(Op.getOperand(1)) &&
45097              TLI.isTypeLegal(Op.getOperand(0).getValueType()) &&
45098              Op.hasOneUse() && Op.getOperand(0).hasOneUse();
45099     };
45100 
45101     bool SelectableLHS = SelectableOp(LHS);
45102     bool SelectableRHS = SelectableOp(RHS);
45103     bool ZeroLHS = ISD::isBuildVectorAllZeros(LHS.getNode());
45104     bool ZeroRHS = ISD::isBuildVectorAllZeros(RHS.getNode());
45105 
45106     if ((SelectableLHS && ZeroRHS) || (SelectableRHS && ZeroLHS)) {
45107       EVT SrcVT = SelectableLHS ? LHS.getOperand(0).getValueType()
45108                                 : RHS.getOperand(0).getValueType();
45109       EVT SrcCondVT = SrcVT.changeVectorElementType(MVT::i1);
45110       LHS = insertSubVector(DAG.getUNDEF(SrcVT), LHS, 0, DAG, DL,
45111                             VT.getSizeInBits());
45112       RHS = insertSubVector(DAG.getUNDEF(SrcVT), RHS, 0, DAG, DL,
45113                             VT.getSizeInBits());
45114       Cond = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, SrcCondVT,
45115                          DAG.getUNDEF(SrcCondVT), Cond,
45116                          DAG.getIntPtrConstant(0, DL));
45117       SDValue Res = DAG.getSelect(DL, SrcVT, Cond, LHS, RHS);
45118       return extractSubVector(Res, 0, DAG, DL, VT.getSizeInBits());
45119     }
45120   }
45121 
45122   if (SDValue V = combineSelectOfTwoConstants(N, DAG))
45123     return V;
45124 
45125   if (N->getOpcode() == ISD::SELECT && Cond.getOpcode() == ISD::SETCC &&
45126       Cond.hasOneUse()) {
45127     EVT CondVT = Cond.getValueType();
45128     SDValue Cond0 = Cond.getOperand(0);
45129     SDValue Cond1 = Cond.getOperand(1);
45130     ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
45131 
45132     // Canonicalize min/max:
45133     // (x > 0) ? x : 0 -> (x >= 0) ? x : 0
45134     // (x < -1) ? x : -1 -> (x <= -1) ? x : -1
45135     // This allows use of COND_S / COND_NS (see TranslateX86CC) which eliminates
45136     // the need for an extra compare against zero. e.g.
45137     // (a - b) > 0 : (a - b) ? 0 -> (a - b) >= 0 : (a - b) ? 0
45138     // subl   %esi, %edi
45139     // testl  %edi, %edi
45140     // movl   $0, %eax
45141     // cmovgl %edi, %eax
45142     // =>
45143     // xorl   %eax, %eax
45144     // subl   %esi, $edi
45145     // cmovsl %eax, %edi
45146     //
45147     // We can also canonicalize
45148     //  (x s> 1) ? x : 1 -> (x s>= 1) ? x : 1 -> (x s> 0) ? x : 1
45149     //  (x u> 1) ? x : 1 -> (x u>= 1) ? x : 1 -> (x != 0) ? x : 1
45150     // This allows the use of a test instruction for the compare.
45151     if (LHS == Cond0 && RHS == Cond1) {
45152       if ((CC == ISD::SETGT && (isNullConstant(RHS) || isOneConstant(RHS))) ||
45153           (CC == ISD::SETLT && isAllOnesConstant(RHS))) {
45154         ISD::CondCode NewCC = CC == ISD::SETGT ? ISD::SETGE : ISD::SETLE;
45155         Cond = DAG.getSetCC(SDLoc(Cond), CondVT, Cond0, Cond1, NewCC);
45156         return DAG.getSelect(DL, VT, Cond, LHS, RHS);
45157       }
45158       if (CC == ISD::SETUGT && isOneConstant(RHS)) {
45159         ISD::CondCode NewCC = ISD::SETUGE;
45160         Cond = DAG.getSetCC(SDLoc(Cond), CondVT, Cond0, Cond1, NewCC);
45161         return DAG.getSelect(DL, VT, Cond, LHS, RHS);
45162       }
45163     }
45164 
45165     // Similar to DAGCombine's select(or(CC0,CC1),X,Y) fold but for legal types.
45166     // fold eq + gt/lt nested selects into ge/le selects
45167     // select (cmpeq Cond0, Cond1), LHS, (select (cmpugt Cond0, Cond1), LHS, Y)
45168     // --> (select (cmpuge Cond0, Cond1), LHS, Y)
45169     // select (cmpslt Cond0, Cond1), LHS, (select (cmpeq Cond0, Cond1), LHS, Y)
45170     // --> (select (cmpsle Cond0, Cond1), LHS, Y)
45171     // .. etc ..
45172     if (RHS.getOpcode() == ISD::SELECT && RHS.getOperand(1) == LHS &&
45173         RHS.getOperand(0).getOpcode() == ISD::SETCC) {
45174       SDValue InnerSetCC = RHS.getOperand(0);
45175       ISD::CondCode InnerCC =
45176           cast<CondCodeSDNode>(InnerSetCC.getOperand(2))->get();
45177       if ((CC == ISD::SETEQ || InnerCC == ISD::SETEQ) &&
45178           Cond0 == InnerSetCC.getOperand(0) &&
45179           Cond1 == InnerSetCC.getOperand(1)) {
45180         ISD::CondCode NewCC;
45181         switch (CC == ISD::SETEQ ? InnerCC : CC) {
45182         case ISD::SETGT:  NewCC = ISD::SETGE; break;
45183         case ISD::SETLT:  NewCC = ISD::SETLE; break;
45184         case ISD::SETUGT: NewCC = ISD::SETUGE; break;
45185         case ISD::SETULT: NewCC = ISD::SETULE; break;
45186         default: NewCC = ISD::SETCC_INVALID; break;
45187         }
45188         if (NewCC != ISD::SETCC_INVALID) {
45189           Cond = DAG.getSetCC(DL, CondVT, Cond0, Cond1, NewCC);
45190           return DAG.getSelect(DL, VT, Cond, LHS, RHS.getOperand(2));
45191         }
45192       }
45193     }
45194   }
45195 
45196   // Check if the first operand is all zeros and Cond type is vXi1.
45197   // If this an avx512 target we can improve the use of zero masking by
45198   // swapping the operands and inverting the condition.
45199   if (N->getOpcode() == ISD::VSELECT && Cond.hasOneUse() &&
45200       Subtarget.hasAVX512() && CondVT.getVectorElementType() == MVT::i1 &&
45201       ISD::isBuildVectorAllZeros(LHS.getNode()) &&
45202       !ISD::isBuildVectorAllZeros(RHS.getNode())) {
45203     // Invert the cond to not(cond) : xor(op,allones)=not(op)
45204     SDValue CondNew = DAG.getNOT(DL, Cond, CondVT);
45205     // Vselect cond, op1, op2 = Vselect not(cond), op2, op1
45206     return DAG.getSelect(DL, VT, CondNew, RHS, LHS);
45207   }
45208 
45209   // Attempt to convert a (vXi1 bitcast(iX Cond)) selection mask before it might
45210   // get split by legalization.
45211   if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::BITCAST &&
45212       CondVT.getVectorElementType() == MVT::i1 &&
45213       TLI.isTypeLegal(VT.getScalarType())) {
45214     EVT ExtCondVT = VT.changeVectorElementTypeToInteger();
45215     if (SDValue ExtCond = combineToExtendBoolVectorInReg(
45216             ISD::SIGN_EXTEND, DL, ExtCondVT, Cond, DAG, DCI, Subtarget)) {
45217       ExtCond = DAG.getNode(ISD::TRUNCATE, DL, CondVT, ExtCond);
45218       return DAG.getSelect(DL, VT, ExtCond, LHS, RHS);
45219     }
45220   }
45221 
45222   // Early exit check
45223   if (!TLI.isTypeLegal(VT) || isSoftF16(VT, Subtarget))
45224     return SDValue();
45225 
45226   if (SDValue V = combineVSelectWithAllOnesOrZeros(N, DAG, DCI, Subtarget))
45227     return V;
45228 
45229   if (SDValue V = combineVSelectToBLENDV(N, DAG, DCI, Subtarget))
45230     return V;
45231 
45232   if (SDValue V = narrowVectorSelect(N, DAG, Subtarget))
45233     return V;
45234 
45235   // select(~Cond, X, Y) -> select(Cond, Y, X)
45236   if (CondVT.getScalarType() != MVT::i1) {
45237     if (SDValue CondNot = IsNOT(Cond, DAG))
45238       return DAG.getNode(N->getOpcode(), DL, VT,
45239                          DAG.getBitcast(CondVT, CondNot), RHS, LHS);
45240 
45241     // pcmpgt(X, -1) -> pcmpgt(0, X) to help select/blendv just use the
45242     // signbit.
45243     if (Cond.getOpcode() == X86ISD::PCMPGT &&
45244         ISD::isBuildVectorAllOnes(Cond.getOperand(1).getNode()) &&
45245         Cond.hasOneUse()) {
45246       Cond = DAG.getNode(X86ISD::PCMPGT, DL, CondVT,
45247                          DAG.getConstant(0, DL, CondVT), Cond.getOperand(0));
45248       return DAG.getNode(N->getOpcode(), DL, VT, Cond, RHS, LHS);
45249     }
45250   }
45251 
45252   // Try to optimize vXi1 selects if both operands are either all constants or
45253   // bitcasts from scalar integer type. In that case we can convert the operands
45254   // to integer and use an integer select which will be converted to a CMOV.
45255   // We need to take a little bit of care to avoid creating an i64 type after
45256   // type legalization.
45257   if (N->getOpcode() == ISD::SELECT && VT.isVector() &&
45258       VT.getVectorElementType() == MVT::i1 &&
45259       (DCI.isBeforeLegalize() || (VT != MVT::v64i1 || Subtarget.is64Bit()))) {
45260     EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), VT.getVectorNumElements());
45261     if (DCI.isBeforeLegalize() || TLI.isTypeLegal(IntVT)) {
45262       bool LHSIsConst = ISD::isBuildVectorOfConstantSDNodes(LHS.getNode());
45263       bool RHSIsConst = ISD::isBuildVectorOfConstantSDNodes(RHS.getNode());
45264 
45265       if ((LHSIsConst || (LHS.getOpcode() == ISD::BITCAST &&
45266                           LHS.getOperand(0).getValueType() == IntVT)) &&
45267           (RHSIsConst || (RHS.getOpcode() == ISD::BITCAST &&
45268                           RHS.getOperand(0).getValueType() == IntVT))) {
45269         if (LHSIsConst)
45270           LHS = combinevXi1ConstantToInteger(LHS, DAG);
45271         else
45272           LHS = LHS.getOperand(0);
45273 
45274         if (RHSIsConst)
45275           RHS = combinevXi1ConstantToInteger(RHS, DAG);
45276         else
45277           RHS = RHS.getOperand(0);
45278 
45279         SDValue Select = DAG.getSelect(DL, IntVT, Cond, LHS, RHS);
45280         return DAG.getBitcast(VT, Select);
45281       }
45282     }
45283   }
45284 
45285   // If this is "((X & C) == 0) ? Y : Z" and C is a constant mask vector of
45286   // single bits, then invert the predicate and swap the select operands.
45287   // This can lower using a vector shift bit-hack rather than mask and compare.
45288   if (DCI.isBeforeLegalize() && !Subtarget.hasAVX512() &&
45289       N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC &&
45290       Cond.hasOneUse() && CondVT.getVectorElementType() == MVT::i1 &&
45291       Cond.getOperand(0).getOpcode() == ISD::AND &&
45292       isNullOrNullSplat(Cond.getOperand(1)) &&
45293       cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETEQ &&
45294       Cond.getOperand(0).getValueType() == VT) {
45295     // The 'and' mask must be composed of power-of-2 constants.
45296     SDValue And = Cond.getOperand(0);
45297     auto *C = isConstOrConstSplat(And.getOperand(1));
45298     if (C && C->getAPIntValue().isPowerOf2()) {
45299       // vselect (X & C == 0), LHS, RHS --> vselect (X & C != 0), RHS, LHS
45300       SDValue NotCond =
45301           DAG.getSetCC(DL, CondVT, And, Cond.getOperand(1), ISD::SETNE);
45302       return DAG.getSelect(DL, VT, NotCond, RHS, LHS);
45303     }
45304 
45305     // If we have a non-splat but still powers-of-2 mask, AVX1 can use pmulld
45306     // and AVX2 can use vpsllv{dq}. 8-bit lacks a proper shift or multiply.
45307     // 16-bit lacks a proper blendv.
45308     unsigned EltBitWidth = VT.getScalarSizeInBits();
45309     bool CanShiftBlend =
45310         TLI.isTypeLegal(VT) && ((Subtarget.hasAVX() && EltBitWidth == 32) ||
45311                                 (Subtarget.hasAVX2() && EltBitWidth == 64) ||
45312                                 (Subtarget.hasXOP()));
45313     if (CanShiftBlend &&
45314         ISD::matchUnaryPredicate(And.getOperand(1), [](ConstantSDNode *C) {
45315           return C->getAPIntValue().isPowerOf2();
45316         })) {
45317       // Create a left-shift constant to get the mask bits over to the sign-bit.
45318       SDValue Mask = And.getOperand(1);
45319       SmallVector<int, 32> ShlVals;
45320       for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
45321         auto *MaskVal = cast<ConstantSDNode>(Mask.getOperand(i));
45322         ShlVals.push_back(EltBitWidth - 1 -
45323                           MaskVal->getAPIntValue().exactLogBase2());
45324       }
45325       // vsel ((X & C) == 0), LHS, RHS --> vsel ((shl X, C') < 0), RHS, LHS
45326       SDValue ShlAmt = getConstVector(ShlVals, VT.getSimpleVT(), DAG, DL);
45327       SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, And.getOperand(0), ShlAmt);
45328       SDValue NewCond =
45329           DAG.getSetCC(DL, CondVT, Shl, Cond.getOperand(1), ISD::SETLT);
45330       return DAG.getSelect(DL, VT, NewCond, RHS, LHS);
45331     }
45332   }
45333 
45334   return SDValue();
45335 }
45336 
45337 /// Combine:
45338 ///   (brcond/cmov/setcc .., (cmp (atomic_load_add x, 1), 0), COND_S)
45339 /// to:
45340 ///   (brcond/cmov/setcc .., (LADD x, 1), COND_LE)
45341 /// i.e., reusing the EFLAGS produced by the LOCKed instruction.
45342 /// Note that this is only legal for some op/cc combinations.
combineSetCCAtomicArith(SDValue Cmp,X86::CondCode & CC,SelectionDAG & DAG,const X86Subtarget & Subtarget)45343 static SDValue combineSetCCAtomicArith(SDValue Cmp, X86::CondCode &CC,
45344                                        SelectionDAG &DAG,
45345                                        const X86Subtarget &Subtarget) {
45346   // This combine only operates on CMP-like nodes.
45347   if (!(Cmp.getOpcode() == X86ISD::CMP ||
45348         (Cmp.getOpcode() == X86ISD::SUB && !Cmp->hasAnyUseOfValue(0))))
45349     return SDValue();
45350 
45351   // Can't replace the cmp if it has more uses than the one we're looking at.
45352   // FIXME: We would like to be able to handle this, but would need to make sure
45353   // all uses were updated.
45354   if (!Cmp.hasOneUse())
45355     return SDValue();
45356 
45357   // This only applies to variations of the common case:
45358   //   (icmp slt x, 0) -> (icmp sle (add x, 1), 0)
45359   //   (icmp sge x, 0) -> (icmp sgt (add x, 1), 0)
45360   //   (icmp sle x, 0) -> (icmp slt (sub x, 1), 0)
45361   //   (icmp sgt x, 0) -> (icmp sge (sub x, 1), 0)
45362   // Using the proper condcodes (see below), overflow is checked for.
45363 
45364   // FIXME: We can generalize both constraints:
45365   // - XOR/OR/AND (if they were made to survive AtomicExpand)
45366   // - LHS != 1
45367   // if the result is compared.
45368 
45369   SDValue CmpLHS = Cmp.getOperand(0);
45370   SDValue CmpRHS = Cmp.getOperand(1);
45371   EVT CmpVT = CmpLHS.getValueType();
45372 
45373   if (!CmpLHS.hasOneUse())
45374     return SDValue();
45375 
45376   unsigned Opc = CmpLHS.getOpcode();
45377   if (Opc != ISD::ATOMIC_LOAD_ADD && Opc != ISD::ATOMIC_LOAD_SUB)
45378     return SDValue();
45379 
45380   SDValue OpRHS = CmpLHS.getOperand(2);
45381   auto *OpRHSC = dyn_cast<ConstantSDNode>(OpRHS);
45382   if (!OpRHSC)
45383     return SDValue();
45384 
45385   APInt Addend = OpRHSC->getAPIntValue();
45386   if (Opc == ISD::ATOMIC_LOAD_SUB)
45387     Addend = -Addend;
45388 
45389   auto *CmpRHSC = dyn_cast<ConstantSDNode>(CmpRHS);
45390   if (!CmpRHSC)
45391     return SDValue();
45392 
45393   APInt Comparison = CmpRHSC->getAPIntValue();
45394   APInt NegAddend = -Addend;
45395 
45396   // See if we can adjust the CC to make the comparison match the negated
45397   // addend.
45398   if (Comparison != NegAddend) {
45399     APInt IncComparison = Comparison + 1;
45400     if (IncComparison == NegAddend) {
45401       if (CC == X86::COND_A && !Comparison.isMaxValue()) {
45402         Comparison = IncComparison;
45403         CC = X86::COND_AE;
45404       } else if (CC == X86::COND_LE && !Comparison.isMaxSignedValue()) {
45405         Comparison = IncComparison;
45406         CC = X86::COND_L;
45407       }
45408     }
45409     APInt DecComparison = Comparison - 1;
45410     if (DecComparison == NegAddend) {
45411       if (CC == X86::COND_AE && !Comparison.isMinValue()) {
45412         Comparison = DecComparison;
45413         CC = X86::COND_A;
45414       } else if (CC == X86::COND_L && !Comparison.isMinSignedValue()) {
45415         Comparison = DecComparison;
45416         CC = X86::COND_LE;
45417       }
45418     }
45419   }
45420 
45421   // If the addend is the negation of the comparison value, then we can do
45422   // a full comparison by emitting the atomic arithmetic as a locked sub.
45423   if (Comparison == NegAddend) {
45424     // The CC is fine, but we need to rewrite the LHS of the comparison as an
45425     // atomic sub.
45426     auto *AN = cast<AtomicSDNode>(CmpLHS.getNode());
45427     auto AtomicSub = DAG.getAtomic(
45428         ISD::ATOMIC_LOAD_SUB, SDLoc(CmpLHS), CmpVT,
45429         /*Chain*/ CmpLHS.getOperand(0), /*LHS*/ CmpLHS.getOperand(1),
45430         /*RHS*/ DAG.getConstant(NegAddend, SDLoc(CmpRHS), CmpVT),
45431         AN->getMemOperand());
45432     auto LockOp = lowerAtomicArithWithLOCK(AtomicSub, DAG, Subtarget);
45433     DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(0), DAG.getUNDEF(CmpVT));
45434     DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(1), LockOp.getValue(1));
45435     return LockOp;
45436   }
45437 
45438   // We can handle comparisons with zero in a number of cases by manipulating
45439   // the CC used.
45440   if (!Comparison.isZero())
45441     return SDValue();
45442 
45443   if (CC == X86::COND_S && Addend == 1)
45444     CC = X86::COND_LE;
45445   else if (CC == X86::COND_NS && Addend == 1)
45446     CC = X86::COND_G;
45447   else if (CC == X86::COND_G && Addend == -1)
45448     CC = X86::COND_GE;
45449   else if (CC == X86::COND_LE && Addend == -1)
45450     CC = X86::COND_L;
45451   else
45452     return SDValue();
45453 
45454   SDValue LockOp = lowerAtomicArithWithLOCK(CmpLHS, DAG, Subtarget);
45455   DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(0), DAG.getUNDEF(CmpVT));
45456   DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(1), LockOp.getValue(1));
45457   return LockOp;
45458 }
45459 
45460 // Check whether a boolean test is testing a boolean value generated by
45461 // X86ISD::SETCC. If so, return the operand of that SETCC and proper condition
45462 // code.
45463 //
45464 // Simplify the following patterns:
45465 // (Op (CMP (SETCC Cond EFLAGS) 1) EQ) or
45466 // (Op (CMP (SETCC Cond EFLAGS) 0) NEQ)
45467 // to (Op EFLAGS Cond)
45468 //
45469 // (Op (CMP (SETCC Cond EFLAGS) 0) EQ) or
45470 // (Op (CMP (SETCC Cond EFLAGS) 1) NEQ)
45471 // to (Op EFLAGS !Cond)
45472 //
45473 // where Op could be BRCOND or CMOV.
45474 //
checkBoolTestSetCCCombine(SDValue Cmp,X86::CondCode & CC)45475 static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) {
45476   // This combine only operates on CMP-like nodes.
45477   if (!(Cmp.getOpcode() == X86ISD::CMP ||
45478         (Cmp.getOpcode() == X86ISD::SUB && !Cmp->hasAnyUseOfValue(0))))
45479     return SDValue();
45480 
45481   // Quit if not used as a boolean value.
45482   if (CC != X86::COND_E && CC != X86::COND_NE)
45483     return SDValue();
45484 
45485   // Check CMP operands. One of them should be 0 or 1 and the other should be
45486   // an SetCC or extended from it.
45487   SDValue Op1 = Cmp.getOperand(0);
45488   SDValue Op2 = Cmp.getOperand(1);
45489 
45490   SDValue SetCC;
45491   const ConstantSDNode* C = nullptr;
45492   bool needOppositeCond = (CC == X86::COND_E);
45493   bool checkAgainstTrue = false; // Is it a comparison against 1?
45494 
45495   if ((C = dyn_cast<ConstantSDNode>(Op1)))
45496     SetCC = Op2;
45497   else if ((C = dyn_cast<ConstantSDNode>(Op2)))
45498     SetCC = Op1;
45499   else // Quit if all operands are not constants.
45500     return SDValue();
45501 
45502   if (C->getZExtValue() == 1) {
45503     needOppositeCond = !needOppositeCond;
45504     checkAgainstTrue = true;
45505   } else if (C->getZExtValue() != 0)
45506     // Quit if the constant is neither 0 or 1.
45507     return SDValue();
45508 
45509   bool truncatedToBoolWithAnd = false;
45510   // Skip (zext $x), (trunc $x), or (and $x, 1) node.
45511   while (SetCC.getOpcode() == ISD::ZERO_EXTEND ||
45512          SetCC.getOpcode() == ISD::TRUNCATE ||
45513          SetCC.getOpcode() == ISD::AND) {
45514     if (SetCC.getOpcode() == ISD::AND) {
45515       int OpIdx = -1;
45516       if (isOneConstant(SetCC.getOperand(0)))
45517         OpIdx = 1;
45518       if (isOneConstant(SetCC.getOperand(1)))
45519         OpIdx = 0;
45520       if (OpIdx < 0)
45521         break;
45522       SetCC = SetCC.getOperand(OpIdx);
45523       truncatedToBoolWithAnd = true;
45524     } else
45525       SetCC = SetCC.getOperand(0);
45526   }
45527 
45528   switch (SetCC.getOpcode()) {
45529   case X86ISD::SETCC_CARRY:
45530     // Since SETCC_CARRY gives output based on R = CF ? ~0 : 0, it's unsafe to
45531     // simplify it if the result of SETCC_CARRY is not canonicalized to 0 or 1,
45532     // i.e. it's a comparison against true but the result of SETCC_CARRY is not
45533     // truncated to i1 using 'and'.
45534     if (checkAgainstTrue && !truncatedToBoolWithAnd)
45535       break;
45536     assert(X86::CondCode(SetCC.getConstantOperandVal(0)) == X86::COND_B &&
45537            "Invalid use of SETCC_CARRY!");
45538     [[fallthrough]];
45539   case X86ISD::SETCC:
45540     // Set the condition code or opposite one if necessary.
45541     CC = X86::CondCode(SetCC.getConstantOperandVal(0));
45542     if (needOppositeCond)
45543       CC = X86::GetOppositeBranchCondition(CC);
45544     return SetCC.getOperand(1);
45545   case X86ISD::CMOV: {
45546     // Check whether false/true value has canonical one, i.e. 0 or 1.
45547     ConstantSDNode *FVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(0));
45548     ConstantSDNode *TVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(1));
45549     // Quit if true value is not a constant.
45550     if (!TVal)
45551       return SDValue();
45552     // Quit if false value is not a constant.
45553     if (!FVal) {
45554       SDValue Op = SetCC.getOperand(0);
45555       // Skip 'zext' or 'trunc' node.
45556       if (Op.getOpcode() == ISD::ZERO_EXTEND ||
45557           Op.getOpcode() == ISD::TRUNCATE)
45558         Op = Op.getOperand(0);
45559       // A special case for rdrand/rdseed, where 0 is set if false cond is
45560       // found.
45561       if ((Op.getOpcode() != X86ISD::RDRAND &&
45562            Op.getOpcode() != X86ISD::RDSEED) || Op.getResNo() != 0)
45563         return SDValue();
45564     }
45565     // Quit if false value is not the constant 0 or 1.
45566     bool FValIsFalse = true;
45567     if (FVal && FVal->getZExtValue() != 0) {
45568       if (FVal->getZExtValue() != 1)
45569         return SDValue();
45570       // If FVal is 1, opposite cond is needed.
45571       needOppositeCond = !needOppositeCond;
45572       FValIsFalse = false;
45573     }
45574     // Quit if TVal is not the constant opposite of FVal.
45575     if (FValIsFalse && TVal->getZExtValue() != 1)
45576       return SDValue();
45577     if (!FValIsFalse && TVal->getZExtValue() != 0)
45578       return SDValue();
45579     CC = X86::CondCode(SetCC.getConstantOperandVal(2));
45580     if (needOppositeCond)
45581       CC = X86::GetOppositeBranchCondition(CC);
45582     return SetCC.getOperand(3);
45583   }
45584   }
45585 
45586   return SDValue();
45587 }
45588 
45589 /// Check whether Cond is an AND/OR of SETCCs off of the same EFLAGS.
45590 /// Match:
45591 ///   (X86or (X86setcc) (X86setcc))
45592 ///   (X86cmp (and (X86setcc) (X86setcc)), 0)
checkBoolTestAndOrSetCCCombine(SDValue Cond,X86::CondCode & CC0,X86::CondCode & CC1,SDValue & Flags,bool & isAnd)45593 static bool checkBoolTestAndOrSetCCCombine(SDValue Cond, X86::CondCode &CC0,
45594                                            X86::CondCode &CC1, SDValue &Flags,
45595                                            bool &isAnd) {
45596   if (Cond->getOpcode() == X86ISD::CMP) {
45597     if (!isNullConstant(Cond->getOperand(1)))
45598       return false;
45599 
45600     Cond = Cond->getOperand(0);
45601   }
45602 
45603   isAnd = false;
45604 
45605   SDValue SetCC0, SetCC1;
45606   switch (Cond->getOpcode()) {
45607   default: return false;
45608   case ISD::AND:
45609   case X86ISD::AND:
45610     isAnd = true;
45611     [[fallthrough]];
45612   case ISD::OR:
45613   case X86ISD::OR:
45614     SetCC0 = Cond->getOperand(0);
45615     SetCC1 = Cond->getOperand(1);
45616     break;
45617   };
45618 
45619   // Make sure we have SETCC nodes, using the same flags value.
45620   if (SetCC0.getOpcode() != X86ISD::SETCC ||
45621       SetCC1.getOpcode() != X86ISD::SETCC ||
45622       SetCC0->getOperand(1) != SetCC1->getOperand(1))
45623     return false;
45624 
45625   CC0 = (X86::CondCode)SetCC0->getConstantOperandVal(0);
45626   CC1 = (X86::CondCode)SetCC1->getConstantOperandVal(0);
45627   Flags = SetCC0->getOperand(1);
45628   return true;
45629 }
45630 
45631 // When legalizing carry, we create carries via add X, -1
45632 // If that comes from an actual carry, via setcc, we use the
45633 // carry directly.
combineCarryThroughADD(SDValue EFLAGS,SelectionDAG & DAG)45634 static SDValue combineCarryThroughADD(SDValue EFLAGS, SelectionDAG &DAG) {
45635   if (EFLAGS.getOpcode() == X86ISD::ADD) {
45636     if (isAllOnesConstant(EFLAGS.getOperand(1))) {
45637       bool FoundAndLSB = false;
45638       SDValue Carry = EFLAGS.getOperand(0);
45639       while (Carry.getOpcode() == ISD::TRUNCATE ||
45640              Carry.getOpcode() == ISD::ZERO_EXTEND ||
45641              (Carry.getOpcode() == ISD::AND &&
45642               isOneConstant(Carry.getOperand(1)))) {
45643         FoundAndLSB |= Carry.getOpcode() == ISD::AND;
45644         Carry = Carry.getOperand(0);
45645       }
45646       if (Carry.getOpcode() == X86ISD::SETCC ||
45647           Carry.getOpcode() == X86ISD::SETCC_CARRY) {
45648         // TODO: Merge this code with equivalent in combineAddOrSubToADCOrSBB?
45649         uint64_t CarryCC = Carry.getConstantOperandVal(0);
45650         SDValue CarryOp1 = Carry.getOperand(1);
45651         if (CarryCC == X86::COND_B)
45652           return CarryOp1;
45653         if (CarryCC == X86::COND_A) {
45654           // Try to convert COND_A into COND_B in an attempt to facilitate
45655           // materializing "setb reg".
45656           //
45657           // Do not flip "e > c", where "c" is a constant, because Cmp
45658           // instruction cannot take an immediate as its first operand.
45659           //
45660           if (CarryOp1.getOpcode() == X86ISD::SUB &&
45661               CarryOp1.getNode()->hasOneUse() &&
45662               CarryOp1.getValueType().isInteger() &&
45663               !isa<ConstantSDNode>(CarryOp1.getOperand(1))) {
45664             SDValue SubCommute =
45665                 DAG.getNode(X86ISD::SUB, SDLoc(CarryOp1), CarryOp1->getVTList(),
45666                             CarryOp1.getOperand(1), CarryOp1.getOperand(0));
45667             return SDValue(SubCommute.getNode(), CarryOp1.getResNo());
45668           }
45669         }
45670         // If this is a check of the z flag of an add with 1, switch to the
45671         // C flag.
45672         if (CarryCC == X86::COND_E &&
45673             CarryOp1.getOpcode() == X86ISD::ADD &&
45674             isOneConstant(CarryOp1.getOperand(1)))
45675           return CarryOp1;
45676       } else if (FoundAndLSB) {
45677         SDLoc DL(Carry);
45678         SDValue BitNo = DAG.getConstant(0, DL, Carry.getValueType());
45679         if (Carry.getOpcode() == ISD::SRL) {
45680           BitNo = Carry.getOperand(1);
45681           Carry = Carry.getOperand(0);
45682         }
45683         return getBT(Carry, BitNo, DL, DAG);
45684       }
45685     }
45686   }
45687 
45688   return SDValue();
45689 }
45690 
45691 /// If we are inverting an PTEST/TESTP operand, attempt to adjust the CC
45692 /// to avoid the inversion.
combinePTESTCC(SDValue EFLAGS,X86::CondCode & CC,SelectionDAG & DAG,const X86Subtarget & Subtarget)45693 static SDValue combinePTESTCC(SDValue EFLAGS, X86::CondCode &CC,
45694                               SelectionDAG &DAG,
45695                               const X86Subtarget &Subtarget) {
45696   // TODO: Handle X86ISD::KTEST/X86ISD::KORTEST.
45697   if (EFLAGS.getOpcode() != X86ISD::PTEST &&
45698       EFLAGS.getOpcode() != X86ISD::TESTP)
45699     return SDValue();
45700 
45701   // PTEST/TESTP sets EFLAGS as:
45702   // TESTZ: ZF = (Op0 & Op1) == 0
45703   // TESTC: CF = (~Op0 & Op1) == 0
45704   // TESTNZC: ZF == 0 && CF == 0
45705   MVT VT = EFLAGS.getSimpleValueType();
45706   SDValue Op0 = EFLAGS.getOperand(0);
45707   SDValue Op1 = EFLAGS.getOperand(1);
45708   MVT OpVT = Op0.getSimpleValueType();
45709 
45710   // TEST*(~X,Y) == TEST*(X,Y)
45711   if (SDValue NotOp0 = IsNOT(Op0, DAG)) {
45712     X86::CondCode InvCC;
45713     switch (CC) {
45714     case X86::COND_B:
45715       // testc -> testz.
45716       InvCC = X86::COND_E;
45717       break;
45718     case X86::COND_AE:
45719       // !testc -> !testz.
45720       InvCC = X86::COND_NE;
45721       break;
45722     case X86::COND_E:
45723       // testz -> testc.
45724       InvCC = X86::COND_B;
45725       break;
45726     case X86::COND_NE:
45727       // !testz -> !testc.
45728       InvCC = X86::COND_AE;
45729       break;
45730     case X86::COND_A:
45731     case X86::COND_BE:
45732       // testnzc -> testnzc (no change).
45733       InvCC = CC;
45734       break;
45735     default:
45736       InvCC = X86::COND_INVALID;
45737       break;
45738     }
45739 
45740     if (InvCC != X86::COND_INVALID) {
45741       CC = InvCC;
45742       return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT,
45743                          DAG.getBitcast(OpVT, NotOp0), Op1);
45744     }
45745   }
45746 
45747   if (CC == X86::COND_B || CC == X86::COND_AE) {
45748     // TESTC(X,~X) == TESTC(X,-1)
45749     if (SDValue NotOp1 = IsNOT(Op1, DAG)) {
45750       if (peekThroughBitcasts(NotOp1) == peekThroughBitcasts(Op0)) {
45751         SDLoc DL(EFLAGS);
45752         return DAG.getNode(
45753             EFLAGS.getOpcode(), DL, VT, DAG.getBitcast(OpVT, NotOp1),
45754             DAG.getBitcast(OpVT,
45755                            DAG.getAllOnesConstant(DL, NotOp1.getValueType())));
45756       }
45757     }
45758   }
45759 
45760   if (CC == X86::COND_E || CC == X86::COND_NE) {
45761     // TESTZ(X,~Y) == TESTC(Y,X)
45762     if (SDValue NotOp1 = IsNOT(Op1, DAG)) {
45763       CC = (CC == X86::COND_E ? X86::COND_B : X86::COND_AE);
45764       return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT,
45765                          DAG.getBitcast(OpVT, NotOp1), Op0);
45766     }
45767 
45768     if (Op0 == Op1) {
45769       SDValue BC = peekThroughBitcasts(Op0);
45770       EVT BCVT = BC.getValueType();
45771 
45772       // TESTZ(AND(X,Y),AND(X,Y)) == TESTZ(X,Y)
45773       if (BC.getOpcode() == ISD::AND || BC.getOpcode() == X86ISD::FAND) {
45774         return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT,
45775                            DAG.getBitcast(OpVT, BC.getOperand(0)),
45776                            DAG.getBitcast(OpVT, BC.getOperand(1)));
45777       }
45778 
45779       // TESTZ(AND(~X,Y),AND(~X,Y)) == TESTC(X,Y)
45780       if (BC.getOpcode() == X86ISD::ANDNP || BC.getOpcode() == X86ISD::FANDN) {
45781         CC = (CC == X86::COND_E ? X86::COND_B : X86::COND_AE);
45782         return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT,
45783                            DAG.getBitcast(OpVT, BC.getOperand(0)),
45784                            DAG.getBitcast(OpVT, BC.getOperand(1)));
45785       }
45786 
45787       // If every element is an all-sign value, see if we can use TESTP/MOVMSK
45788       // to more efficiently extract the sign bits and compare that.
45789       // TODO: Handle TESTC with comparison inversion.
45790       // TODO: Can we remove SimplifyMultipleUseDemandedBits and rely on
45791       // TESTP/MOVMSK combines to make sure its never worse than PTEST?
45792       if (BCVT.isVector() && DAG.getTargetLoweringInfo().isTypeLegal(BCVT)) {
45793         unsigned EltBits = BCVT.getScalarSizeInBits();
45794         if (DAG.ComputeNumSignBits(BC) == EltBits) {
45795           assert(VT == MVT::i32 && "Expected i32 EFLAGS comparison result");
45796           APInt SignMask = APInt::getSignMask(EltBits);
45797           const TargetLowering &TLI = DAG.getTargetLoweringInfo();
45798           if (SDValue Res =
45799                   TLI.SimplifyMultipleUseDemandedBits(BC, SignMask, DAG)) {
45800             // For vXi16 cases we need to use pmovmksb and extract every other
45801             // sign bit.
45802             SDLoc DL(EFLAGS);
45803             if ((EltBits == 32 || EltBits == 64) && Subtarget.hasAVX()) {
45804               MVT FloatSVT = MVT::getFloatingPointVT(EltBits);
45805               MVT FloatVT =
45806                   MVT::getVectorVT(FloatSVT, OpVT.getSizeInBits() / EltBits);
45807               Res = DAG.getBitcast(FloatVT, Res);
45808               return DAG.getNode(X86ISD::TESTP, SDLoc(EFLAGS), VT, Res, Res);
45809             } else if (EltBits == 16) {
45810               MVT MovmskVT = BCVT.is128BitVector() ? MVT::v16i8 : MVT::v32i8;
45811               Res = DAG.getBitcast(MovmskVT, Res);
45812               Res = getPMOVMSKB(DL, Res, DAG, Subtarget);
45813               Res = DAG.getNode(ISD::AND, DL, MVT::i32, Res,
45814                                 DAG.getConstant(0xAAAAAAAA, DL, MVT::i32));
45815             } else {
45816               Res = getPMOVMSKB(DL, Res, DAG, Subtarget);
45817             }
45818             return DAG.getNode(X86ISD::CMP, DL, MVT::i32, Res,
45819                                DAG.getConstant(0, DL, MVT::i32));
45820           }
45821         }
45822       }
45823     }
45824 
45825     // TESTZ(-1,X) == TESTZ(X,X)
45826     if (ISD::isBuildVectorAllOnes(Op0.getNode()))
45827       return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT, Op1, Op1);
45828 
45829     // TESTZ(X,-1) == TESTZ(X,X)
45830     if (ISD::isBuildVectorAllOnes(Op1.getNode()))
45831       return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT, Op0, Op0);
45832 
45833     // TESTZ(OR(LO(X),HI(X)),OR(LO(Y),HI(Y))) -> TESTZ(X,Y)
45834     // TODO: Add COND_NE handling?
45835     if (CC == X86::COND_E && OpVT.is128BitVector() && Subtarget.hasAVX()) {
45836       SDValue Src0 = peekThroughBitcasts(Op0);
45837       SDValue Src1 = peekThroughBitcasts(Op1);
45838       if (Src0.getOpcode() == ISD::OR && Src1.getOpcode() == ISD::OR) {
45839         Src0 = getSplitVectorSrc(peekThroughBitcasts(Src0.getOperand(0)),
45840                                  peekThroughBitcasts(Src0.getOperand(1)), true);
45841         Src1 = getSplitVectorSrc(peekThroughBitcasts(Src1.getOperand(0)),
45842                                  peekThroughBitcasts(Src1.getOperand(1)), true);
45843         if (Src0 && Src1) {
45844           MVT OpVT2 = OpVT.getDoubleNumVectorElementsVT();
45845           return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT,
45846                              DAG.getBitcast(OpVT2, Src0),
45847                              DAG.getBitcast(OpVT2, Src1));
45848         }
45849       }
45850     }
45851   }
45852 
45853   return SDValue();
45854 }
45855 
45856 // Attempt to simplify the MOVMSK input based on the comparison type.
combineSetCCMOVMSK(SDValue EFLAGS,X86::CondCode & CC,SelectionDAG & DAG,const X86Subtarget & Subtarget)45857 static SDValue combineSetCCMOVMSK(SDValue EFLAGS, X86::CondCode &CC,
45858                                   SelectionDAG &DAG,
45859                                   const X86Subtarget &Subtarget) {
45860   // Handle eq/ne against zero (any_of).
45861   // Handle eq/ne against -1 (all_of).
45862   if (!(CC == X86::COND_E || CC == X86::COND_NE))
45863     return SDValue();
45864   if (EFLAGS.getValueType() != MVT::i32)
45865     return SDValue();
45866   unsigned CmpOpcode = EFLAGS.getOpcode();
45867   if (CmpOpcode != X86ISD::CMP && CmpOpcode != X86ISD::SUB)
45868     return SDValue();
45869   auto *CmpConstant = dyn_cast<ConstantSDNode>(EFLAGS.getOperand(1));
45870   if (!CmpConstant)
45871     return SDValue();
45872   const APInt &CmpVal = CmpConstant->getAPIntValue();
45873 
45874   SDValue CmpOp = EFLAGS.getOperand(0);
45875   unsigned CmpBits = CmpOp.getValueSizeInBits();
45876   assert(CmpBits == CmpVal.getBitWidth() && "Value size mismatch");
45877 
45878   // Peek through any truncate.
45879   if (CmpOp.getOpcode() == ISD::TRUNCATE)
45880     CmpOp = CmpOp.getOperand(0);
45881 
45882   // Bail if we don't find a MOVMSK.
45883   if (CmpOp.getOpcode() != X86ISD::MOVMSK)
45884     return SDValue();
45885 
45886   SDValue Vec = CmpOp.getOperand(0);
45887   MVT VecVT = Vec.getSimpleValueType();
45888   assert((VecVT.is128BitVector() || VecVT.is256BitVector()) &&
45889          "Unexpected MOVMSK operand");
45890   unsigned NumElts = VecVT.getVectorNumElements();
45891   unsigned NumEltBits = VecVT.getScalarSizeInBits();
45892 
45893   bool IsAnyOf = CmpOpcode == X86ISD::CMP && CmpVal.isZero();
45894   bool IsAllOf = (CmpOpcode == X86ISD::SUB || CmpOpcode == X86ISD::CMP) &&
45895                  NumElts <= CmpBits && CmpVal.isMask(NumElts);
45896   if (!IsAnyOf && !IsAllOf)
45897     return SDValue();
45898 
45899   // TODO: Check more combining cases for me.
45900   // Here we check the cmp use number to decide do combining or not.
45901   // Currently we only get 2 tests about combining "MOVMSK(CONCAT(..))"
45902   // and "MOVMSK(PCMPEQ(..))" are fit to use this constraint.
45903   bool IsOneUse = CmpOp.getNode()->hasOneUse();
45904 
45905   // See if we can peek through to a vector with a wider element type, if the
45906   // signbits extend down to all the sub-elements as well.
45907   // Calling MOVMSK with the wider type, avoiding the bitcast, helps expose
45908   // potential SimplifyDemandedBits/Elts cases.
45909   // If we looked through a truncate that discard bits, we can't do this
45910   // transform.
45911   // FIXME: We could do this transform for truncates that discarded bits by
45912   // inserting an AND mask between the new MOVMSK and the CMP.
45913   if (Vec.getOpcode() == ISD::BITCAST && NumElts <= CmpBits) {
45914     SDValue BC = peekThroughBitcasts(Vec);
45915     MVT BCVT = BC.getSimpleValueType();
45916     unsigned BCNumElts = BCVT.getVectorNumElements();
45917     unsigned BCNumEltBits = BCVT.getScalarSizeInBits();
45918     if ((BCNumEltBits == 32 || BCNumEltBits == 64) &&
45919         BCNumEltBits > NumEltBits &&
45920         DAG.ComputeNumSignBits(BC) > (BCNumEltBits - NumEltBits)) {
45921       SDLoc DL(EFLAGS);
45922       APInt CmpMask = APInt::getLowBitsSet(32, IsAnyOf ? 0 : BCNumElts);
45923       return DAG.getNode(X86ISD::CMP, DL, MVT::i32,
45924                          DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, BC),
45925                          DAG.getConstant(CmpMask, DL, MVT::i32));
45926     }
45927   }
45928 
45929   // MOVMSK(CONCAT(X,Y)) == 0 ->  MOVMSK(OR(X,Y)).
45930   // MOVMSK(CONCAT(X,Y)) != 0 ->  MOVMSK(OR(X,Y)).
45931   // MOVMSK(CONCAT(X,Y)) == -1 ->  MOVMSK(AND(X,Y)).
45932   // MOVMSK(CONCAT(X,Y)) != -1 ->  MOVMSK(AND(X,Y)).
45933   if (VecVT.is256BitVector() && NumElts <= CmpBits && IsOneUse) {
45934     SmallVector<SDValue> Ops;
45935     if (collectConcatOps(peekThroughBitcasts(Vec).getNode(), Ops, DAG) &&
45936         Ops.size() == 2) {
45937       SDLoc DL(EFLAGS);
45938       EVT SubVT = Ops[0].getValueType().changeTypeToInteger();
45939       APInt CmpMask = APInt::getLowBitsSet(32, IsAnyOf ? 0 : NumElts / 2);
45940       SDValue V = DAG.getNode(IsAnyOf ? ISD::OR : ISD::AND, DL, SubVT,
45941                               DAG.getBitcast(SubVT, Ops[0]),
45942                               DAG.getBitcast(SubVT, Ops[1]));
45943       V = DAG.getBitcast(VecVT.getHalfNumVectorElementsVT(), V);
45944       return DAG.getNode(X86ISD::CMP, DL, MVT::i32,
45945                          DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V),
45946                          DAG.getConstant(CmpMask, DL, MVT::i32));
45947     }
45948   }
45949 
45950   // MOVMSK(PCMPEQ(X,0)) == -1 -> PTESTZ(X,X).
45951   // MOVMSK(PCMPEQ(X,0)) != -1 -> !PTESTZ(X,X).
45952   // MOVMSK(PCMPEQ(X,Y)) == -1 -> PTESTZ(XOR(X,Y),XOR(X,Y)).
45953   // MOVMSK(PCMPEQ(X,Y)) != -1 -> !PTESTZ(XOR(X,Y),XOR(X,Y)).
45954   if (IsAllOf && Subtarget.hasSSE41() && IsOneUse) {
45955     MVT TestVT = VecVT.is128BitVector() ? MVT::v2i64 : MVT::v4i64;
45956     SDValue BC = peekThroughBitcasts(Vec);
45957     // Ensure MOVMSK was testing every signbit of BC.
45958     if (BC.getValueType().getVectorNumElements() <= NumElts) {
45959       if (BC.getOpcode() == X86ISD::PCMPEQ) {
45960         SDValue V = DAG.getNode(ISD::XOR, SDLoc(BC), BC.getValueType(),
45961                                 BC.getOperand(0), BC.getOperand(1));
45962         V = DAG.getBitcast(TestVT, V);
45963         return DAG.getNode(X86ISD::PTEST, SDLoc(EFLAGS), MVT::i32, V, V);
45964       }
45965       // Check for 256-bit split vector cases.
45966       if (BC.getOpcode() == ISD::AND &&
45967           BC.getOperand(0).getOpcode() == X86ISD::PCMPEQ &&
45968           BC.getOperand(1).getOpcode() == X86ISD::PCMPEQ) {
45969         SDValue LHS = BC.getOperand(0);
45970         SDValue RHS = BC.getOperand(1);
45971         LHS = DAG.getNode(ISD::XOR, SDLoc(LHS), LHS.getValueType(),
45972                           LHS.getOperand(0), LHS.getOperand(1));
45973         RHS = DAG.getNode(ISD::XOR, SDLoc(RHS), RHS.getValueType(),
45974                           RHS.getOperand(0), RHS.getOperand(1));
45975         LHS = DAG.getBitcast(TestVT, LHS);
45976         RHS = DAG.getBitcast(TestVT, RHS);
45977         SDValue V = DAG.getNode(ISD::OR, SDLoc(EFLAGS), TestVT, LHS, RHS);
45978         return DAG.getNode(X86ISD::PTEST, SDLoc(EFLAGS), MVT::i32, V, V);
45979       }
45980     }
45981   }
45982 
45983   // See if we can avoid a PACKSS by calling MOVMSK on the sources.
45984   // For vXi16 cases we can use a v2Xi8 PMOVMSKB. We must mask out
45985   // sign bits prior to the comparison with zero unless we know that
45986   // the vXi16 splats the sign bit down to the lower i8 half.
45987   // TODO: Handle all_of patterns.
45988   if (Vec.getOpcode() == X86ISD::PACKSS && VecVT == MVT::v16i8) {
45989     SDValue VecOp0 = Vec.getOperand(0);
45990     SDValue VecOp1 = Vec.getOperand(1);
45991     bool SignExt0 = DAG.ComputeNumSignBits(VecOp0) > 8;
45992     bool SignExt1 = DAG.ComputeNumSignBits(VecOp1) > 8;
45993     // PMOVMSKB(PACKSSBW(X, undef)) -> PMOVMSKB(BITCAST_v16i8(X)) & 0xAAAA.
45994     if (IsAnyOf && CmpBits == 8 && VecOp1.isUndef()) {
45995       SDLoc DL(EFLAGS);
45996       SDValue Result = DAG.getBitcast(MVT::v16i8, VecOp0);
45997       Result = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Result);
45998       Result = DAG.getZExtOrTrunc(Result, DL, MVT::i16);
45999       if (!SignExt0) {
46000         Result = DAG.getNode(ISD::AND, DL, MVT::i16, Result,
46001                              DAG.getConstant(0xAAAA, DL, MVT::i16));
46002       }
46003       return DAG.getNode(X86ISD::CMP, DL, MVT::i32, Result,
46004                          DAG.getConstant(0, DL, MVT::i16));
46005     }
46006     // PMOVMSKB(PACKSSBW(LO(X), HI(X)))
46007     // -> PMOVMSKB(BITCAST_v32i8(X)) & 0xAAAAAAAA.
46008     if (CmpBits >= 16 && Subtarget.hasInt256() &&
46009         (IsAnyOf || (SignExt0 && SignExt1))) {
46010       if (SDValue Src = getSplitVectorSrc(VecOp0, VecOp1, true)) {
46011         SDLoc DL(EFLAGS);
46012         SDValue Result = peekThroughBitcasts(Src);
46013         if (IsAllOf && Result.getOpcode() == X86ISD::PCMPEQ &&
46014             Result.getValueType().getVectorNumElements() <= NumElts) {
46015           SDValue V = DAG.getNode(ISD::XOR, DL, Result.getValueType(),
46016                                   Result.getOperand(0), Result.getOperand(1));
46017           V = DAG.getBitcast(MVT::v4i64, V);
46018           return DAG.getNode(X86ISD::PTEST, SDLoc(EFLAGS), MVT::i32, V, V);
46019         }
46020         Result = DAG.getBitcast(MVT::v32i8, Result);
46021         Result = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Result);
46022         unsigned CmpMask = IsAnyOf ? 0 : 0xFFFFFFFF;
46023         if (!SignExt0 || !SignExt1) {
46024           assert(IsAnyOf &&
46025                  "Only perform v16i16 signmasks for any_of patterns");
46026           Result = DAG.getNode(ISD::AND, DL, MVT::i32, Result,
46027                                DAG.getConstant(0xAAAAAAAA, DL, MVT::i32));
46028         }
46029         return DAG.getNode(X86ISD::CMP, DL, MVT::i32, Result,
46030                            DAG.getConstant(CmpMask, DL, MVT::i32));
46031       }
46032     }
46033   }
46034 
46035   // MOVMSK(SHUFFLE(X,u)) -> MOVMSK(X) iff every element is referenced.
46036   // Since we peek through a bitcast, we need to be careful if the base vector
46037   // type has smaller elements than the MOVMSK type.  In that case, even if
46038   // all the elements are demanded by the shuffle mask, only the "high"
46039   // elements which have highbits that align with highbits in the MOVMSK vec
46040   // elements are actually demanded. A simplification of spurious operations
46041   // on the "low" elements take place during other simplifications.
46042   //
46043   // For example:
46044   // MOVMSK64(BITCAST(SHUF32 X, (1,0,3,2))) even though all the elements are
46045   // demanded, because we are swapping around the result can change.
46046   //
46047   // To address this, we check that we can scale the shuffle mask to MOVMSK
46048   // element width (this will ensure "high" elements match). Its slightly overly
46049   // conservative, but fine for an edge case fold.
46050   SmallVector<int, 32> ShuffleMask, ScaledMaskUnused;
46051   SmallVector<SDValue, 2> ShuffleInputs;
46052   if (NumElts <= CmpBits &&
46053       getTargetShuffleInputs(peekThroughBitcasts(Vec), ShuffleInputs,
46054                              ShuffleMask, DAG) &&
46055       ShuffleInputs.size() == 1 && !isAnyZeroOrUndef(ShuffleMask) &&
46056       ShuffleInputs[0].getValueSizeInBits() == VecVT.getSizeInBits() &&
46057       scaleShuffleElements(ShuffleMask, NumElts, ScaledMaskUnused)) {
46058     unsigned NumShuffleElts = ShuffleMask.size();
46059     APInt DemandedElts = APInt::getZero(NumShuffleElts);
46060     for (int M : ShuffleMask) {
46061       assert(0 <= M && M < (int)NumShuffleElts && "Bad unary shuffle index");
46062       DemandedElts.setBit(M);
46063     }
46064     if (DemandedElts.isAllOnes()) {
46065       SDLoc DL(EFLAGS);
46066       SDValue Result = DAG.getBitcast(VecVT, ShuffleInputs[0]);
46067       Result = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Result);
46068       Result =
46069           DAG.getZExtOrTrunc(Result, DL, EFLAGS.getOperand(0).getValueType());
46070       return DAG.getNode(X86ISD::CMP, DL, MVT::i32, Result,
46071                          EFLAGS.getOperand(1));
46072     }
46073   }
46074 
46075   // MOVMSKPS(V) !=/== 0 -> TESTPS(V,V)
46076   // MOVMSKPD(V) !=/== 0 -> TESTPD(V,V)
46077   // MOVMSKPS(V) !=/== -1 -> TESTPS(V,V)
46078   // MOVMSKPD(V) !=/== -1 -> TESTPD(V,V)
46079   // iff every element is referenced.
46080   if (NumElts <= CmpBits && Subtarget.hasAVX() &&
46081       !Subtarget.preferMovmskOverVTest() && IsOneUse &&
46082       (NumEltBits == 32 || NumEltBits == 64)) {
46083     SDLoc DL(EFLAGS);
46084     MVT FloatSVT = MVT::getFloatingPointVT(NumEltBits);
46085     MVT FloatVT = MVT::getVectorVT(FloatSVT, NumElts);
46086     MVT IntVT = FloatVT.changeVectorElementTypeToInteger();
46087     SDValue LHS = Vec;
46088     SDValue RHS = IsAnyOf ? Vec : DAG.getAllOnesConstant(DL, IntVT);
46089     CC = IsAnyOf ? CC : (CC == X86::COND_E ? X86::COND_B : X86::COND_AE);
46090     return DAG.getNode(X86ISD::TESTP, DL, MVT::i32,
46091                        DAG.getBitcast(FloatVT, LHS),
46092                        DAG.getBitcast(FloatVT, RHS));
46093   }
46094 
46095   return SDValue();
46096 }
46097 
46098 /// Optimize an EFLAGS definition used according to the condition code \p CC
46099 /// into a simpler EFLAGS value, potentially returning a new \p CC and replacing
46100 /// uses of chain values.
combineSetCCEFLAGS(SDValue EFLAGS,X86::CondCode & CC,SelectionDAG & DAG,const X86Subtarget & Subtarget)46101 static SDValue combineSetCCEFLAGS(SDValue EFLAGS, X86::CondCode &CC,
46102                                   SelectionDAG &DAG,
46103                                   const X86Subtarget &Subtarget) {
46104   if (CC == X86::COND_B)
46105     if (SDValue Flags = combineCarryThroughADD(EFLAGS, DAG))
46106       return Flags;
46107 
46108   if (SDValue R = checkBoolTestSetCCCombine(EFLAGS, CC))
46109     return R;
46110 
46111   if (SDValue R = combinePTESTCC(EFLAGS, CC, DAG, Subtarget))
46112     return R;
46113 
46114   if (SDValue R = combineSetCCMOVMSK(EFLAGS, CC, DAG, Subtarget))
46115     return R;
46116 
46117   return combineSetCCAtomicArith(EFLAGS, CC, DAG, Subtarget);
46118 }
46119 
46120 /// Optimize X86ISD::CMOV [LHS, RHS, CONDCODE (e.g. X86::COND_NE), CONDVAL]
combineCMov(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)46121 static SDValue combineCMov(SDNode *N, SelectionDAG &DAG,
46122                            TargetLowering::DAGCombinerInfo &DCI,
46123                            const X86Subtarget &Subtarget) {
46124   SDLoc DL(N);
46125 
46126   SDValue FalseOp = N->getOperand(0);
46127   SDValue TrueOp = N->getOperand(1);
46128   X86::CondCode CC = (X86::CondCode)N->getConstantOperandVal(2);
46129   SDValue Cond = N->getOperand(3);
46130 
46131   // cmov X, X, ?, ? --> X
46132   if (TrueOp == FalseOp)
46133     return TrueOp;
46134 
46135   // Try to simplify the EFLAGS and condition code operands.
46136   // We can't always do this as FCMOV only supports a subset of X86 cond.
46137   if (SDValue Flags = combineSetCCEFLAGS(Cond, CC, DAG, Subtarget)) {
46138     if (!(FalseOp.getValueType() == MVT::f80 ||
46139           (FalseOp.getValueType() == MVT::f64 && !Subtarget.hasSSE2()) ||
46140           (FalseOp.getValueType() == MVT::f32 && !Subtarget.hasSSE1())) ||
46141         !Subtarget.canUseCMOV() || hasFPCMov(CC)) {
46142       SDValue Ops[] = {FalseOp, TrueOp, DAG.getTargetConstant(CC, DL, MVT::i8),
46143                        Flags};
46144       return DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), Ops);
46145     }
46146   }
46147 
46148   // If this is a select between two integer constants, try to do some
46149   // optimizations.  Note that the operands are ordered the opposite of SELECT
46150   // operands.
46151   if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(TrueOp)) {
46152     if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(FalseOp)) {
46153       // Canonicalize the TrueC/FalseC values so that TrueC (the true value) is
46154       // larger than FalseC (the false value).
46155       if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue())) {
46156         CC = X86::GetOppositeBranchCondition(CC);
46157         std::swap(TrueC, FalseC);
46158         std::swap(TrueOp, FalseOp);
46159       }
46160 
46161       // Optimize C ? 8 : 0 -> zext(setcc(C)) << 3.  Likewise for any pow2/0.
46162       // This is efficient for any integer data type (including i8/i16) and
46163       // shift amount.
46164       if (FalseC->getAPIntValue() == 0 && TrueC->getAPIntValue().isPowerOf2()) {
46165         Cond = getSETCC(CC, Cond, DL, DAG);
46166 
46167         // Zero extend the condition if needed.
46168         Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, TrueC->getValueType(0), Cond);
46169 
46170         unsigned ShAmt = TrueC->getAPIntValue().logBase2();
46171         Cond = DAG.getNode(ISD::SHL, DL, Cond.getValueType(), Cond,
46172                            DAG.getConstant(ShAmt, DL, MVT::i8));
46173         return Cond;
46174       }
46175 
46176       // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst.  This is efficient
46177       // for any integer data type, including i8/i16.
46178       if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
46179         Cond = getSETCC(CC, Cond, DL, DAG);
46180 
46181         // Zero extend the condition if needed.
46182         Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
46183                            FalseC->getValueType(0), Cond);
46184         Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
46185                            SDValue(FalseC, 0));
46186         return Cond;
46187       }
46188 
46189       // Optimize cases that will turn into an LEA instruction.  This requires
46190       // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
46191       if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
46192         APInt Diff = TrueC->getAPIntValue() - FalseC->getAPIntValue();
46193         assert(Diff.getBitWidth() == N->getValueType(0).getSizeInBits() &&
46194                "Implicit constant truncation");
46195 
46196         bool isFastMultiplier = false;
46197         if (Diff.ult(10)) {
46198           switch (Diff.getZExtValue()) {
46199           default: break;
46200           case 1:  // result = add base, cond
46201           case 2:  // result = lea base(    , cond*2)
46202           case 3:  // result = lea base(cond, cond*2)
46203           case 4:  // result = lea base(    , cond*4)
46204           case 5:  // result = lea base(cond, cond*4)
46205           case 8:  // result = lea base(    , cond*8)
46206           case 9:  // result = lea base(cond, cond*8)
46207             isFastMultiplier = true;
46208             break;
46209           }
46210         }
46211 
46212         if (isFastMultiplier) {
46213           Cond = getSETCC(CC, Cond, DL ,DAG);
46214           // Zero extend the condition if needed.
46215           Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
46216                              Cond);
46217           // Scale the condition by the difference.
46218           if (Diff != 1)
46219             Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
46220                                DAG.getConstant(Diff, DL, Cond.getValueType()));
46221 
46222           // Add the base if non-zero.
46223           if (FalseC->getAPIntValue() != 0)
46224             Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
46225                                SDValue(FalseC, 0));
46226           return Cond;
46227         }
46228       }
46229     }
46230   }
46231 
46232   // Handle these cases:
46233   //   (select (x != c), e, c) -> select (x != c), e, x),
46234   //   (select (x == c), c, e) -> select (x == c), x, e)
46235   // where the c is an integer constant, and the "select" is the combination
46236   // of CMOV and CMP.
46237   //
46238   // The rationale for this change is that the conditional-move from a constant
46239   // needs two instructions, however, conditional-move from a register needs
46240   // only one instruction.
46241   //
46242   // CAVEAT: By replacing a constant with a symbolic value, it may obscure
46243   //  some instruction-combining opportunities. This opt needs to be
46244   //  postponed as late as possible.
46245   //
46246   if (!DCI.isBeforeLegalize() && !DCI.isBeforeLegalizeOps()) {
46247     // the DCI.xxxx conditions are provided to postpone the optimization as
46248     // late as possible.
46249 
46250     ConstantSDNode *CmpAgainst = nullptr;
46251     if ((Cond.getOpcode() == X86ISD::CMP || Cond.getOpcode() == X86ISD::SUB) &&
46252         (CmpAgainst = dyn_cast<ConstantSDNode>(Cond.getOperand(1))) &&
46253         !isa<ConstantSDNode>(Cond.getOperand(0))) {
46254 
46255       if (CC == X86::COND_NE &&
46256           CmpAgainst == dyn_cast<ConstantSDNode>(FalseOp)) {
46257         CC = X86::GetOppositeBranchCondition(CC);
46258         std::swap(TrueOp, FalseOp);
46259       }
46260 
46261       if (CC == X86::COND_E &&
46262           CmpAgainst == dyn_cast<ConstantSDNode>(TrueOp)) {
46263         SDValue Ops[] = {FalseOp, Cond.getOperand(0),
46264                          DAG.getTargetConstant(CC, DL, MVT::i8), Cond};
46265         return DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), Ops);
46266       }
46267     }
46268   }
46269 
46270   // Transform:
46271   //
46272   //   (cmov 1 T (uge T 2))
46273   //
46274   // to:
46275   //
46276   //   (adc T 0 (sub T 1))
46277   if (CC == X86::COND_AE && isOneConstant(FalseOp) &&
46278       Cond.getOpcode() == X86ISD::SUB && Cond->hasOneUse()) {
46279     SDValue Cond0 = Cond.getOperand(0);
46280     if (Cond0.getOpcode() == ISD::TRUNCATE)
46281       Cond0 = Cond0.getOperand(0);
46282     auto *Sub1C = dyn_cast<ConstantSDNode>(Cond.getOperand(1));
46283     if (Cond0 == TrueOp && Sub1C && Sub1C->getZExtValue() == 2) {
46284       EVT CondVT = Cond->getValueType(0);
46285       EVT OuterVT = N->getValueType(0);
46286       // Subtract 1 and generate a carry.
46287       SDValue NewSub =
46288           DAG.getNode(X86ISD::SUB, DL, Cond->getVTList(), Cond.getOperand(0),
46289                       DAG.getConstant(1, DL, CondVT));
46290       SDValue EFLAGS(NewSub.getNode(), 1);
46291       return DAG.getNode(X86ISD::ADC, DL, DAG.getVTList(OuterVT, MVT::i32),
46292                          TrueOp, DAG.getConstant(0, DL, OuterVT), EFLAGS);
46293     }
46294   }
46295 
46296   // Fold and/or of setcc's to double CMOV:
46297   //   (CMOV F, T, ((cc1 | cc2) != 0)) -> (CMOV (CMOV F, T, cc1), T, cc2)
46298   //   (CMOV F, T, ((cc1 & cc2) != 0)) -> (CMOV (CMOV T, F, !cc1), F, !cc2)
46299   //
46300   // This combine lets us generate:
46301   //   cmovcc1 (jcc1 if we don't have CMOV)
46302   //   cmovcc2 (same)
46303   // instead of:
46304   //   setcc1
46305   //   setcc2
46306   //   and/or
46307   //   cmovne (jne if we don't have CMOV)
46308   // When we can't use the CMOV instruction, it might increase branch
46309   // mispredicts.
46310   // When we can use CMOV, or when there is no mispredict, this improves
46311   // throughput and reduces register pressure.
46312   //
46313   if (CC == X86::COND_NE) {
46314     SDValue Flags;
46315     X86::CondCode CC0, CC1;
46316     bool isAndSetCC;
46317     if (checkBoolTestAndOrSetCCCombine(Cond, CC0, CC1, Flags, isAndSetCC)) {
46318       if (isAndSetCC) {
46319         std::swap(FalseOp, TrueOp);
46320         CC0 = X86::GetOppositeBranchCondition(CC0);
46321         CC1 = X86::GetOppositeBranchCondition(CC1);
46322       }
46323 
46324       SDValue LOps[] = {FalseOp, TrueOp,
46325                         DAG.getTargetConstant(CC0, DL, MVT::i8), Flags};
46326       SDValue LCMOV = DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), LOps);
46327       SDValue Ops[] = {LCMOV, TrueOp, DAG.getTargetConstant(CC1, DL, MVT::i8),
46328                        Flags};
46329       SDValue CMOV = DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), Ops);
46330       return CMOV;
46331     }
46332   }
46333 
46334   // Fold (CMOV C1, (ADD (CTTZ X), C2), (X != 0)) ->
46335   //      (ADD (CMOV C1-C2, (CTTZ X), (X != 0)), C2)
46336   // Or (CMOV (ADD (CTTZ X), C2), C1, (X == 0)) ->
46337   //    (ADD (CMOV (CTTZ X), C1-C2, (X == 0)), C2)
46338   if ((CC == X86::COND_NE || CC == X86::COND_E) &&
46339       Cond.getOpcode() == X86ISD::CMP && isNullConstant(Cond.getOperand(1))) {
46340     SDValue Add = TrueOp;
46341     SDValue Const = FalseOp;
46342     // Canonicalize the condition code for easier matching and output.
46343     if (CC == X86::COND_E)
46344       std::swap(Add, Const);
46345 
46346     // We might have replaced the constant in the cmov with the LHS of the
46347     // compare. If so change it to the RHS of the compare.
46348     if (Const == Cond.getOperand(0))
46349       Const = Cond.getOperand(1);
46350 
46351     // Ok, now make sure that Add is (add (cttz X), C2) and Const is a constant.
46352     if (isa<ConstantSDNode>(Const) && Add.getOpcode() == ISD::ADD &&
46353         Add.hasOneUse() && isa<ConstantSDNode>(Add.getOperand(1)) &&
46354         (Add.getOperand(0).getOpcode() == ISD::CTTZ_ZERO_UNDEF ||
46355          Add.getOperand(0).getOpcode() == ISD::CTTZ) &&
46356         Add.getOperand(0).getOperand(0) == Cond.getOperand(0)) {
46357       EVT VT = N->getValueType(0);
46358       // This should constant fold.
46359       SDValue Diff = DAG.getNode(ISD::SUB, DL, VT, Const, Add.getOperand(1));
46360       SDValue CMov =
46361           DAG.getNode(X86ISD::CMOV, DL, VT, Diff, Add.getOperand(0),
46362                       DAG.getTargetConstant(X86::COND_NE, DL, MVT::i8), Cond);
46363       return DAG.getNode(ISD::ADD, DL, VT, CMov, Add.getOperand(1));
46364     }
46365   }
46366 
46367   return SDValue();
46368 }
46369 
46370 /// Different mul shrinking modes.
46371 enum class ShrinkMode { MULS8, MULU8, MULS16, MULU16 };
46372 
canReduceVMulWidth(SDNode * N,SelectionDAG & DAG,ShrinkMode & Mode)46373 static bool canReduceVMulWidth(SDNode *N, SelectionDAG &DAG, ShrinkMode &Mode) {
46374   EVT VT = N->getOperand(0).getValueType();
46375   if (VT.getScalarSizeInBits() != 32)
46376     return false;
46377 
46378   assert(N->getNumOperands() == 2 && "NumOperands of Mul are 2");
46379   unsigned SignBits[2] = {1, 1};
46380   bool IsPositive[2] = {false, false};
46381   for (unsigned i = 0; i < 2; i++) {
46382     SDValue Opd = N->getOperand(i);
46383 
46384     SignBits[i] = DAG.ComputeNumSignBits(Opd);
46385     IsPositive[i] = DAG.SignBitIsZero(Opd);
46386   }
46387 
46388   bool AllPositive = IsPositive[0] && IsPositive[1];
46389   unsigned MinSignBits = std::min(SignBits[0], SignBits[1]);
46390   // When ranges are from -128 ~ 127, use MULS8 mode.
46391   if (MinSignBits >= 25)
46392     Mode = ShrinkMode::MULS8;
46393   // When ranges are from 0 ~ 255, use MULU8 mode.
46394   else if (AllPositive && MinSignBits >= 24)
46395     Mode = ShrinkMode::MULU8;
46396   // When ranges are from -32768 ~ 32767, use MULS16 mode.
46397   else if (MinSignBits >= 17)
46398     Mode = ShrinkMode::MULS16;
46399   // When ranges are from 0 ~ 65535, use MULU16 mode.
46400   else if (AllPositive && MinSignBits >= 16)
46401     Mode = ShrinkMode::MULU16;
46402   else
46403     return false;
46404   return true;
46405 }
46406 
46407 /// When the operands of vector mul are extended from smaller size values,
46408 /// like i8 and i16, the type of mul may be shrinked to generate more
46409 /// efficient code. Two typical patterns are handled:
46410 /// Pattern1:
46411 ///     %2 = sext/zext <N x i8> %1 to <N x i32>
46412 ///     %4 = sext/zext <N x i8> %3 to <N x i32>
46413 //   or %4 = build_vector <N x i32> %C1, ..., %CN (%C1..%CN are constants)
46414 ///     %5 = mul <N x i32> %2, %4
46415 ///
46416 /// Pattern2:
46417 ///     %2 = zext/sext <N x i16> %1 to <N x i32>
46418 ///     %4 = zext/sext <N x i16> %3 to <N x i32>
46419 ///  or %4 = build_vector <N x i32> %C1, ..., %CN (%C1..%CN are constants)
46420 ///     %5 = mul <N x i32> %2, %4
46421 ///
46422 /// There are four mul shrinking modes:
46423 /// If %2 == sext32(trunc8(%2)), i.e., the scalar value range of %2 is
46424 /// -128 to 128, and the scalar value range of %4 is also -128 to 128,
46425 /// generate pmullw+sext32 for it (MULS8 mode).
46426 /// If %2 == zext32(trunc8(%2)), i.e., the scalar value range of %2 is
46427 /// 0 to 255, and the scalar value range of %4 is also 0 to 255,
46428 /// generate pmullw+zext32 for it (MULU8 mode).
46429 /// If %2 == sext32(trunc16(%2)), i.e., the scalar value range of %2 is
46430 /// -32768 to 32767, and the scalar value range of %4 is also -32768 to 32767,
46431 /// generate pmullw+pmulhw for it (MULS16 mode).
46432 /// If %2 == zext32(trunc16(%2)), i.e., the scalar value range of %2 is
46433 /// 0 to 65535, and the scalar value range of %4 is also 0 to 65535,
46434 /// generate pmullw+pmulhuw for it (MULU16 mode).
reduceVMULWidth(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)46435 static SDValue reduceVMULWidth(SDNode *N, SelectionDAG &DAG,
46436                                const X86Subtarget &Subtarget) {
46437   // Check for legality
46438   // pmullw/pmulhw are not supported by SSE.
46439   if (!Subtarget.hasSSE2())
46440     return SDValue();
46441 
46442   // Check for profitability
46443   // pmulld is supported since SSE41. It is better to use pmulld
46444   // instead of pmullw+pmulhw, except for subtargets where pmulld is slower than
46445   // the expansion.
46446   bool OptForMinSize = DAG.getMachineFunction().getFunction().hasMinSize();
46447   if (Subtarget.hasSSE41() && (OptForMinSize || !Subtarget.isPMULLDSlow()))
46448     return SDValue();
46449 
46450   ShrinkMode Mode;
46451   if (!canReduceVMulWidth(N, DAG, Mode))
46452     return SDValue();
46453 
46454   SDLoc DL(N);
46455   SDValue N0 = N->getOperand(0);
46456   SDValue N1 = N->getOperand(1);
46457   EVT VT = N->getOperand(0).getValueType();
46458   unsigned NumElts = VT.getVectorNumElements();
46459   if ((NumElts % 2) != 0)
46460     return SDValue();
46461 
46462   EVT ReducedVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16, NumElts);
46463 
46464   // Shrink the operands of mul.
46465   SDValue NewN0 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, N0);
46466   SDValue NewN1 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, N1);
46467 
46468   // Generate the lower part of mul: pmullw. For MULU8/MULS8, only the
46469   // lower part is needed.
46470   SDValue MulLo = DAG.getNode(ISD::MUL, DL, ReducedVT, NewN0, NewN1);
46471   if (Mode == ShrinkMode::MULU8 || Mode == ShrinkMode::MULS8)
46472     return DAG.getNode((Mode == ShrinkMode::MULU8) ? ISD::ZERO_EXTEND
46473                                                    : ISD::SIGN_EXTEND,
46474                        DL, VT, MulLo);
46475 
46476   EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts / 2);
46477   // Generate the higher part of mul: pmulhw/pmulhuw. For MULU16/MULS16,
46478   // the higher part is also needed.
46479   SDValue MulHi =
46480       DAG.getNode(Mode == ShrinkMode::MULS16 ? ISD::MULHS : ISD::MULHU, DL,
46481                   ReducedVT, NewN0, NewN1);
46482 
46483   // Repack the lower part and higher part result of mul into a wider
46484   // result.
46485   // Generate shuffle functioning as punpcklwd.
46486   SmallVector<int, 16> ShuffleMask(NumElts);
46487   for (unsigned i = 0, e = NumElts / 2; i < e; i++) {
46488     ShuffleMask[2 * i] = i;
46489     ShuffleMask[2 * i + 1] = i + NumElts;
46490   }
46491   SDValue ResLo =
46492       DAG.getVectorShuffle(ReducedVT, DL, MulLo, MulHi, ShuffleMask);
46493   ResLo = DAG.getBitcast(ResVT, ResLo);
46494   // Generate shuffle functioning as punpckhwd.
46495   for (unsigned i = 0, e = NumElts / 2; i < e; i++) {
46496     ShuffleMask[2 * i] = i + NumElts / 2;
46497     ShuffleMask[2 * i + 1] = i + NumElts * 3 / 2;
46498   }
46499   SDValue ResHi =
46500       DAG.getVectorShuffle(ReducedVT, DL, MulLo, MulHi, ShuffleMask);
46501   ResHi = DAG.getBitcast(ResVT, ResHi);
46502   return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ResLo, ResHi);
46503 }
46504 
combineMulSpecial(uint64_t MulAmt,SDNode * N,SelectionDAG & DAG,EVT VT,const SDLoc & DL)46505 static SDValue combineMulSpecial(uint64_t MulAmt, SDNode *N, SelectionDAG &DAG,
46506                                  EVT VT, const SDLoc &DL) {
46507 
46508   auto combineMulShlAddOrSub = [&](int Mult, int Shift, bool isAdd) {
46509     SDValue Result = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
46510                                  DAG.getConstant(Mult, DL, VT));
46511     Result = DAG.getNode(ISD::SHL, DL, VT, Result,
46512                          DAG.getConstant(Shift, DL, MVT::i8));
46513     Result = DAG.getNode(isAdd ? ISD::ADD : ISD::SUB, DL, VT, Result,
46514                          N->getOperand(0));
46515     return Result;
46516   };
46517 
46518   auto combineMulMulAddOrSub = [&](int Mul1, int Mul2, bool isAdd) {
46519     SDValue Result = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
46520                                  DAG.getConstant(Mul1, DL, VT));
46521     Result = DAG.getNode(X86ISD::MUL_IMM, DL, VT, Result,
46522                          DAG.getConstant(Mul2, DL, VT));
46523     Result = DAG.getNode(isAdd ? ISD::ADD : ISD::SUB, DL, VT, Result,
46524                          N->getOperand(0));
46525     return Result;
46526   };
46527 
46528   switch (MulAmt) {
46529   default:
46530     break;
46531   case 11:
46532     // mul x, 11 => add ((shl (mul x, 5), 1), x)
46533     return combineMulShlAddOrSub(5, 1, /*isAdd*/ true);
46534   case 21:
46535     // mul x, 21 => add ((shl (mul x, 5), 2), x)
46536     return combineMulShlAddOrSub(5, 2, /*isAdd*/ true);
46537   case 41:
46538     // mul x, 41 => add ((shl (mul x, 5), 3), x)
46539     return combineMulShlAddOrSub(5, 3, /*isAdd*/ true);
46540   case 22:
46541     // mul x, 22 => add (add ((shl (mul x, 5), 2), x), x)
46542     return DAG.getNode(ISD::ADD, DL, VT, N->getOperand(0),
46543                        combineMulShlAddOrSub(5, 2, /*isAdd*/ true));
46544   case 19:
46545     // mul x, 19 => add ((shl (mul x, 9), 1), x)
46546     return combineMulShlAddOrSub(9, 1, /*isAdd*/ true);
46547   case 37:
46548     // mul x, 37 => add ((shl (mul x, 9), 2), x)
46549     return combineMulShlAddOrSub(9, 2, /*isAdd*/ true);
46550   case 73:
46551     // mul x, 73 => add ((shl (mul x, 9), 3), x)
46552     return combineMulShlAddOrSub(9, 3, /*isAdd*/ true);
46553   case 13:
46554     // mul x, 13 => add ((shl (mul x, 3), 2), x)
46555     return combineMulShlAddOrSub(3, 2, /*isAdd*/ true);
46556   case 23:
46557     // mul x, 23 => sub ((shl (mul x, 3), 3), x)
46558     return combineMulShlAddOrSub(3, 3, /*isAdd*/ false);
46559   case 26:
46560     // mul x, 26 => add ((mul (mul x, 5), 5), x)
46561     return combineMulMulAddOrSub(5, 5, /*isAdd*/ true);
46562   case 28:
46563     // mul x, 28 => add ((mul (mul x, 9), 3), x)
46564     return combineMulMulAddOrSub(9, 3, /*isAdd*/ true);
46565   case 29:
46566     // mul x, 29 => add (add ((mul (mul x, 9), 3), x), x)
46567     return DAG.getNode(ISD::ADD, DL, VT, N->getOperand(0),
46568                        combineMulMulAddOrSub(9, 3, /*isAdd*/ true));
46569   }
46570 
46571   // Another trick. If this is a power 2 + 2/4/8, we can use a shift followed
46572   // by a single LEA.
46573   // First check if this a sum of two power of 2s because that's easy. Then
46574   // count how many zeros are up to the first bit.
46575   // TODO: We can do this even without LEA at a cost of two shifts and an add.
46576   if (isPowerOf2_64(MulAmt & (MulAmt - 1))) {
46577     unsigned ScaleShift = llvm::countr_zero(MulAmt);
46578     if (ScaleShift >= 1 && ScaleShift < 4) {
46579       unsigned ShiftAmt = Log2_64((MulAmt & (MulAmt - 1)));
46580       SDValue Shift1 = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
46581                                    DAG.getConstant(ShiftAmt, DL, MVT::i8));
46582       SDValue Shift2 = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
46583                                    DAG.getConstant(ScaleShift, DL, MVT::i8));
46584       return DAG.getNode(ISD::ADD, DL, VT, Shift1, Shift2);
46585     }
46586   }
46587 
46588   return SDValue();
46589 }
46590 
46591 // If the upper 17 bits of either element are zero and the other element are
46592 // zero/sign bits then we can use PMADDWD, which is always at least as quick as
46593 // PMULLD, except on KNL.
combineMulToPMADDWD(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)46594 static SDValue combineMulToPMADDWD(SDNode *N, SelectionDAG &DAG,
46595                                    const X86Subtarget &Subtarget) {
46596   if (!Subtarget.hasSSE2())
46597     return SDValue();
46598 
46599   if (Subtarget.isPMADDWDSlow())
46600     return SDValue();
46601 
46602   EVT VT = N->getValueType(0);
46603 
46604   // Only support vXi32 vectors.
46605   if (!VT.isVector() || VT.getVectorElementType() != MVT::i32)
46606     return SDValue();
46607 
46608   // Make sure the type is legal or can split/widen to a legal type.
46609   // With AVX512 but without BWI, we would need to split v32i16.
46610   unsigned NumElts = VT.getVectorNumElements();
46611   if (NumElts == 1 || !isPowerOf2_32(NumElts))
46612     return SDValue();
46613 
46614   // With AVX512 but without BWI, we would need to split v32i16.
46615   if (32 <= (2 * NumElts) && Subtarget.hasAVX512() && !Subtarget.hasBWI())
46616     return SDValue();
46617 
46618   SDValue N0 = N->getOperand(0);
46619   SDValue N1 = N->getOperand(1);
46620 
46621   // If we are zero/sign extending two steps without SSE4.1, its better to
46622   // reduce the vmul width instead.
46623   if (!Subtarget.hasSSE41() &&
46624       (((N0.getOpcode() == ISD::ZERO_EXTEND &&
46625          N0.getOperand(0).getScalarValueSizeInBits() <= 8) &&
46626         (N1.getOpcode() == ISD::ZERO_EXTEND &&
46627          N1.getOperand(0).getScalarValueSizeInBits() <= 8)) ||
46628        ((N0.getOpcode() == ISD::SIGN_EXTEND &&
46629          N0.getOperand(0).getScalarValueSizeInBits() <= 8) &&
46630         (N1.getOpcode() == ISD::SIGN_EXTEND &&
46631          N1.getOperand(0).getScalarValueSizeInBits() <= 8))))
46632     return SDValue();
46633 
46634   // If we are sign extending a wide vector without SSE4.1, its better to reduce
46635   // the vmul width instead.
46636   if (!Subtarget.hasSSE41() &&
46637       (N0.getOpcode() == ISD::SIGN_EXTEND &&
46638        N0.getOperand(0).getValueSizeInBits() > 128) &&
46639       (N1.getOpcode() == ISD::SIGN_EXTEND &&
46640        N1.getOperand(0).getValueSizeInBits() > 128))
46641     return SDValue();
46642 
46643   // Sign bits must extend down to the lowest i16.
46644   if (DAG.ComputeMaxSignificantBits(N1) > 16 ||
46645       DAG.ComputeMaxSignificantBits(N0) > 16)
46646     return SDValue();
46647 
46648   // At least one of the elements must be zero in the upper 17 bits, or can be
46649   // safely made zero without altering the final result.
46650   auto GetZeroableOp = [&](SDValue Op) {
46651     APInt Mask17 = APInt::getHighBitsSet(32, 17);
46652     if (DAG.MaskedValueIsZero(Op, Mask17))
46653       return Op;
46654     // Mask off upper 16-bits of sign-extended constants.
46655     if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode()))
46656       return DAG.getNode(ISD::AND, SDLoc(N), VT, Op,
46657                          DAG.getConstant(0xFFFF, SDLoc(N), VT));
46658     if (Op.getOpcode() == ISD::SIGN_EXTEND && N->isOnlyUserOf(Op.getNode())) {
46659       SDValue Src = Op.getOperand(0);
46660       // Convert sext(vXi16) to zext(vXi16).
46661       if (Src.getScalarValueSizeInBits() == 16 && VT.getSizeInBits() <= 128)
46662         return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), VT, Src);
46663       // Convert sext(vXi8) to zext(vXi16 sext(vXi8)) on pre-SSE41 targets
46664       // which will expand the extension.
46665       if (Src.getScalarValueSizeInBits() < 16 && !Subtarget.hasSSE41()) {
46666         EVT ExtVT = VT.changeVectorElementType(MVT::i16);
46667         Src = DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), ExtVT, Src);
46668         return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), VT, Src);
46669       }
46670     }
46671     // Convert SIGN_EXTEND_VECTOR_INREG to ZEXT_EXTEND_VECTOR_INREG.
46672     if (Op.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG &&
46673         N->isOnlyUserOf(Op.getNode())) {
46674       SDValue Src = Op.getOperand(0);
46675       if (Src.getScalarValueSizeInBits() == 16)
46676         return DAG.getNode(ISD::ZERO_EXTEND_VECTOR_INREG, SDLoc(N), VT, Src);
46677     }
46678     // Convert VSRAI(Op, 16) to VSRLI(Op, 16).
46679     if (Op.getOpcode() == X86ISD::VSRAI && Op.getConstantOperandVal(1) == 16 &&
46680         N->isOnlyUserOf(Op.getNode())) {
46681       return DAG.getNode(X86ISD::VSRLI, SDLoc(N), VT, Op.getOperand(0),
46682                          Op.getOperand(1));
46683     }
46684     return SDValue();
46685   };
46686   SDValue ZeroN0 = GetZeroableOp(N0);
46687   SDValue ZeroN1 = GetZeroableOp(N1);
46688   if (!ZeroN0 && !ZeroN1)
46689     return SDValue();
46690   N0 = ZeroN0 ? ZeroN0 : N0;
46691   N1 = ZeroN1 ? ZeroN1 : N1;
46692 
46693   // Use SplitOpsAndApply to handle AVX splitting.
46694   auto PMADDWDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
46695                            ArrayRef<SDValue> Ops) {
46696     MVT ResVT = MVT::getVectorVT(MVT::i32, Ops[0].getValueSizeInBits() / 32);
46697     MVT OpVT = MVT::getVectorVT(MVT::i16, Ops[0].getValueSizeInBits() / 16);
46698     return DAG.getNode(X86ISD::VPMADDWD, DL, ResVT,
46699                        DAG.getBitcast(OpVT, Ops[0]),
46700                        DAG.getBitcast(OpVT, Ops[1]));
46701   };
46702   return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, {N0, N1},
46703                           PMADDWDBuilder);
46704 }
46705 
combineMulToPMULDQ(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)46706 static SDValue combineMulToPMULDQ(SDNode *N, SelectionDAG &DAG,
46707                                   const X86Subtarget &Subtarget) {
46708   if (!Subtarget.hasSSE2())
46709     return SDValue();
46710 
46711   EVT VT = N->getValueType(0);
46712 
46713   // Only support vXi64 vectors.
46714   if (!VT.isVector() || VT.getVectorElementType() != MVT::i64 ||
46715       VT.getVectorNumElements() < 2 ||
46716       !isPowerOf2_32(VT.getVectorNumElements()))
46717     return SDValue();
46718 
46719   SDValue N0 = N->getOperand(0);
46720   SDValue N1 = N->getOperand(1);
46721 
46722   // MULDQ returns the 64-bit result of the signed multiplication of the lower
46723   // 32-bits. We can lower with this if the sign bits stretch that far.
46724   if (Subtarget.hasSSE41() && DAG.ComputeNumSignBits(N0) > 32 &&
46725       DAG.ComputeNumSignBits(N1) > 32) {
46726     auto PMULDQBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
46727                             ArrayRef<SDValue> Ops) {
46728       return DAG.getNode(X86ISD::PMULDQ, DL, Ops[0].getValueType(), Ops);
46729     };
46730     return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, { N0, N1 },
46731                             PMULDQBuilder, /*CheckBWI*/false);
46732   }
46733 
46734   // If the upper bits are zero we can use a single pmuludq.
46735   APInt Mask = APInt::getHighBitsSet(64, 32);
46736   if (DAG.MaskedValueIsZero(N0, Mask) && DAG.MaskedValueIsZero(N1, Mask)) {
46737     auto PMULUDQBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
46738                              ArrayRef<SDValue> Ops) {
46739       return DAG.getNode(X86ISD::PMULUDQ, DL, Ops[0].getValueType(), Ops);
46740     };
46741     return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, { N0, N1 },
46742                             PMULUDQBuilder, /*CheckBWI*/false);
46743   }
46744 
46745   return SDValue();
46746 }
46747 
combineMul(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)46748 static SDValue combineMul(SDNode *N, SelectionDAG &DAG,
46749                           TargetLowering::DAGCombinerInfo &DCI,
46750                           const X86Subtarget &Subtarget) {
46751   EVT VT = N->getValueType(0);
46752 
46753   if (SDValue V = combineMulToPMADDWD(N, DAG, Subtarget))
46754     return V;
46755 
46756   if (SDValue V = combineMulToPMULDQ(N, DAG, Subtarget))
46757     return V;
46758 
46759   if (DCI.isBeforeLegalize() && VT.isVector())
46760     return reduceVMULWidth(N, DAG, Subtarget);
46761 
46762   // Optimize a single multiply with constant into two operations in order to
46763   // implement it with two cheaper instructions, e.g. LEA + SHL, LEA + LEA.
46764   if (!MulConstantOptimization)
46765     return SDValue();
46766 
46767   // An imul is usually smaller than the alternative sequence.
46768   if (DAG.getMachineFunction().getFunction().hasMinSize())
46769     return SDValue();
46770 
46771   if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
46772     return SDValue();
46773 
46774   if (VT != MVT::i64 && VT != MVT::i32 &&
46775       (!VT.isVector() || !VT.isSimple() || !VT.isInteger()))
46776     return SDValue();
46777 
46778   ConstantSDNode *CNode = isConstOrConstSplat(
46779       N->getOperand(1), /*AllowUndefs*/ true, /*AllowTrunc*/ false);
46780   const APInt *C = nullptr;
46781   if (!CNode) {
46782     if (VT.isVector())
46783       if (auto *RawC = getTargetConstantFromNode(N->getOperand(1)))
46784         if (auto *SplatC = RawC->getSplatValue())
46785           C = &(SplatC->getUniqueInteger());
46786 
46787     if (!C || C->getBitWidth() != VT.getScalarSizeInBits())
46788       return SDValue();
46789   } else {
46790     C = &(CNode->getAPIntValue());
46791   }
46792 
46793   if (isPowerOf2_64(C->getZExtValue()))
46794     return SDValue();
46795 
46796   int64_t SignMulAmt = C->getSExtValue();
46797   assert(SignMulAmt != INT64_MIN && "Int min should have been handled!");
46798   uint64_t AbsMulAmt = SignMulAmt < 0 ? -SignMulAmt : SignMulAmt;
46799 
46800   SDLoc DL(N);
46801   SDValue NewMul = SDValue();
46802   if (VT == MVT::i64 || VT == MVT::i32) {
46803     if (AbsMulAmt == 3 || AbsMulAmt == 5 || AbsMulAmt == 9) {
46804       NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
46805                            DAG.getConstant(AbsMulAmt, DL, VT));
46806       if (SignMulAmt < 0)
46807         NewMul =
46808             DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), NewMul);
46809 
46810       return NewMul;
46811     }
46812 
46813     uint64_t MulAmt1 = 0;
46814     uint64_t MulAmt2 = 0;
46815     if ((AbsMulAmt % 9) == 0) {
46816       MulAmt1 = 9;
46817       MulAmt2 = AbsMulAmt / 9;
46818     } else if ((AbsMulAmt % 5) == 0) {
46819       MulAmt1 = 5;
46820       MulAmt2 = AbsMulAmt / 5;
46821     } else if ((AbsMulAmt % 3) == 0) {
46822       MulAmt1 = 3;
46823       MulAmt2 = AbsMulAmt / 3;
46824     }
46825 
46826     // For negative multiply amounts, only allow MulAmt2 to be a power of 2.
46827     if (MulAmt2 &&
46828         (isPowerOf2_64(MulAmt2) ||
46829          (SignMulAmt >= 0 && (MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9)))) {
46830 
46831       if (isPowerOf2_64(MulAmt2) && !(SignMulAmt >= 0 && N->hasOneUse() &&
46832                                       N->use_begin()->getOpcode() == ISD::ADD))
46833         // If second multiplifer is pow2, issue it first. We want the multiply
46834         // by 3, 5, or 9 to be folded into the addressing mode unless the lone
46835         // use is an add. Only do this for positive multiply amounts since the
46836         // negate would prevent it from being used as an address mode anyway.
46837         std::swap(MulAmt1, MulAmt2);
46838 
46839       if (isPowerOf2_64(MulAmt1))
46840         NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
46841                              DAG.getConstant(Log2_64(MulAmt1), DL, MVT::i8));
46842       else
46843         NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
46844                              DAG.getConstant(MulAmt1, DL, VT));
46845 
46846       if (isPowerOf2_64(MulAmt2))
46847         NewMul = DAG.getNode(ISD::SHL, DL, VT, NewMul,
46848                              DAG.getConstant(Log2_64(MulAmt2), DL, MVT::i8));
46849       else
46850         NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, NewMul,
46851                              DAG.getConstant(MulAmt2, DL, VT));
46852 
46853       // Negate the result.
46854       if (SignMulAmt < 0)
46855         NewMul =
46856             DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), NewMul);
46857     } else if (!Subtarget.slowLEA())
46858       NewMul = combineMulSpecial(C->getZExtValue(), N, DAG, VT, DL);
46859   }
46860   if (!NewMul) {
46861     EVT ShiftVT = VT.isVector() ? VT : MVT::i8;
46862     assert(C->getZExtValue() != 0 &&
46863            C->getZExtValue() != maxUIntN(VT.getScalarSizeInBits()) &&
46864            "Both cases that could cause potential overflows should have "
46865            "already been handled.");
46866     if (isPowerOf2_64(AbsMulAmt - 1)) {
46867       // (mul x, 2^N + 1) => (add (shl x, N), x)
46868       NewMul = DAG.getNode(
46869           ISD::ADD, DL, VT, N->getOperand(0),
46870           DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
46871                       DAG.getConstant(Log2_64(AbsMulAmt - 1), DL, ShiftVT)));
46872       // To negate, subtract the number from zero
46873       if (SignMulAmt < 0)
46874         NewMul =
46875             DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), NewMul);
46876     } else if (isPowerOf2_64(AbsMulAmt + 1)) {
46877       // (mul x, 2^N - 1) => (sub (shl x, N), x)
46878       NewMul =
46879           DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
46880                       DAG.getConstant(Log2_64(AbsMulAmt + 1), DL, ShiftVT));
46881       // To negate, reverse the operands of the subtract.
46882       if (SignMulAmt < 0)
46883         NewMul = DAG.getNode(ISD::SUB, DL, VT, N->getOperand(0), NewMul);
46884       else
46885         NewMul = DAG.getNode(ISD::SUB, DL, VT, NewMul, N->getOperand(0));
46886     } else if (SignMulAmt >= 0 && isPowerOf2_64(AbsMulAmt - 2) &&
46887                (!VT.isVector() || Subtarget.fastImmVectorShift())) {
46888       // (mul x, 2^N + 2) => (add (shl x, N), (add x, x))
46889       NewMul =
46890           DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
46891                       DAG.getConstant(Log2_64(AbsMulAmt - 2), DL, ShiftVT));
46892       NewMul = DAG.getNode(
46893           ISD::ADD, DL, VT, NewMul,
46894           DAG.getNode(ISD::ADD, DL, VT, N->getOperand(0), N->getOperand(0)));
46895     } else if (SignMulAmt >= 0 && isPowerOf2_64(AbsMulAmt + 2) &&
46896                (!VT.isVector() || Subtarget.fastImmVectorShift())) {
46897       // (mul x, 2^N - 2) => (sub (shl x, N), (add x, x))
46898       NewMul =
46899           DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
46900                       DAG.getConstant(Log2_64(AbsMulAmt + 2), DL, ShiftVT));
46901       NewMul = DAG.getNode(
46902           ISD::SUB, DL, VT, NewMul,
46903           DAG.getNode(ISD::ADD, DL, VT, N->getOperand(0), N->getOperand(0)));
46904     } else if (SignMulAmt >= 0 && VT.isVector() &&
46905                Subtarget.fastImmVectorShift()) {
46906       uint64_t AbsMulAmtLowBit = AbsMulAmt & (-AbsMulAmt);
46907       uint64_t ShiftAmt1;
46908       std::optional<unsigned> Opc;
46909       if (isPowerOf2_64(AbsMulAmt - AbsMulAmtLowBit)) {
46910         ShiftAmt1 = AbsMulAmt - AbsMulAmtLowBit;
46911         Opc = ISD::ADD;
46912       } else if (isPowerOf2_64(AbsMulAmt + AbsMulAmtLowBit)) {
46913         ShiftAmt1 = AbsMulAmt + AbsMulAmtLowBit;
46914         Opc = ISD::SUB;
46915       }
46916 
46917       if (Opc) {
46918         SDValue Shift1 =
46919             DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
46920                         DAG.getConstant(Log2_64(ShiftAmt1), DL, ShiftVT));
46921         SDValue Shift2 =
46922             DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
46923                         DAG.getConstant(Log2_64(AbsMulAmtLowBit), DL, ShiftVT));
46924         NewMul = DAG.getNode(*Opc, DL, VT, Shift1, Shift2);
46925       }
46926     }
46927   }
46928 
46929   return NewMul;
46930 }
46931 
46932 // Try to form a MULHU or MULHS node by looking for
46933 // (srl (mul ext, ext), 16)
46934 // TODO: This is X86 specific because we want to be able to handle wide types
46935 // before type legalization. But we can only do it if the vector will be
46936 // legalized via widening/splitting. Type legalization can't handle promotion
46937 // of a MULHU/MULHS. There isn't a way to convey this to the generic DAG
46938 // combiner.
combineShiftToPMULH(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)46939 static SDValue combineShiftToPMULH(SDNode *N, SelectionDAG &DAG,
46940                                    const X86Subtarget &Subtarget) {
46941   assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA) &&
46942            "SRL or SRA node is required here!");
46943   SDLoc DL(N);
46944 
46945   if (!Subtarget.hasSSE2())
46946     return SDValue();
46947 
46948   // The operation feeding into the shift must be a multiply.
46949   SDValue ShiftOperand = N->getOperand(0);
46950   if (ShiftOperand.getOpcode() != ISD::MUL || !ShiftOperand.hasOneUse())
46951     return SDValue();
46952 
46953   // Input type should be at least vXi32.
46954   EVT VT = N->getValueType(0);
46955   if (!VT.isVector() || VT.getVectorElementType().getSizeInBits() < 32)
46956     return SDValue();
46957 
46958   // Need a shift by 16.
46959   APInt ShiftAmt;
46960   if (!ISD::isConstantSplatVector(N->getOperand(1).getNode(), ShiftAmt) ||
46961       ShiftAmt != 16)
46962     return SDValue();
46963 
46964   SDValue LHS = ShiftOperand.getOperand(0);
46965   SDValue RHS = ShiftOperand.getOperand(1);
46966 
46967   unsigned ExtOpc = LHS.getOpcode();
46968   if ((ExtOpc != ISD::SIGN_EXTEND && ExtOpc != ISD::ZERO_EXTEND) ||
46969       RHS.getOpcode() != ExtOpc)
46970     return SDValue();
46971 
46972   // Peek through the extends.
46973   LHS = LHS.getOperand(0);
46974   RHS = RHS.getOperand(0);
46975 
46976   // Ensure the input types match.
46977   EVT MulVT = LHS.getValueType();
46978   if (MulVT.getVectorElementType() != MVT::i16 || RHS.getValueType() != MulVT)
46979     return SDValue();
46980 
46981   unsigned Opc = ExtOpc == ISD::SIGN_EXTEND ? ISD::MULHS : ISD::MULHU;
46982   SDValue Mulh = DAG.getNode(Opc, DL, MulVT, LHS, RHS);
46983 
46984   ExtOpc = N->getOpcode() == ISD::SRA ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
46985   return DAG.getNode(ExtOpc, DL, VT, Mulh);
46986 }
46987 
combineShiftLeft(SDNode * N,SelectionDAG & DAG)46988 static SDValue combineShiftLeft(SDNode *N, SelectionDAG &DAG) {
46989   SDValue N0 = N->getOperand(0);
46990   SDValue N1 = N->getOperand(1);
46991   ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
46992   EVT VT = N0.getValueType();
46993 
46994   // fold (shl (and (setcc_c), c1), c2) -> (and setcc_c, (c1 << c2))
46995   // since the result of setcc_c is all zero's or all ones.
46996   if (VT.isInteger() && !VT.isVector() &&
46997       N1C && N0.getOpcode() == ISD::AND &&
46998       N0.getOperand(1).getOpcode() == ISD::Constant) {
46999     SDValue N00 = N0.getOperand(0);
47000     APInt Mask = N0.getConstantOperandAPInt(1);
47001     Mask <<= N1C->getAPIntValue();
47002     bool MaskOK = false;
47003     // We can handle cases concerning bit-widening nodes containing setcc_c if
47004     // we carefully interrogate the mask to make sure we are semantics
47005     // preserving.
47006     // The transform is not safe if the result of C1 << C2 exceeds the bitwidth
47007     // of the underlying setcc_c operation if the setcc_c was zero extended.
47008     // Consider the following example:
47009     //   zext(setcc_c)                 -> i32 0x0000FFFF
47010     //   c1                            -> i32 0x0000FFFF
47011     //   c2                            -> i32 0x00000001
47012     //   (shl (and (setcc_c), c1), c2) -> i32 0x0001FFFE
47013     //   (and setcc_c, (c1 << c2))     -> i32 0x0000FFFE
47014     if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
47015       MaskOK = true;
47016     } else if (N00.getOpcode() == ISD::SIGN_EXTEND &&
47017                N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
47018       MaskOK = true;
47019     } else if ((N00.getOpcode() == ISD::ZERO_EXTEND ||
47020                 N00.getOpcode() == ISD::ANY_EXTEND) &&
47021                N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
47022       MaskOK = Mask.isIntN(N00.getOperand(0).getValueSizeInBits());
47023     }
47024     if (MaskOK && Mask != 0) {
47025       SDLoc DL(N);
47026       return DAG.getNode(ISD::AND, DL, VT, N00, DAG.getConstant(Mask, DL, VT));
47027     }
47028   }
47029 
47030   return SDValue();
47031 }
47032 
combineShiftRightArithmetic(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)47033 static SDValue combineShiftRightArithmetic(SDNode *N, SelectionDAG &DAG,
47034                                            const X86Subtarget &Subtarget) {
47035   SDValue N0 = N->getOperand(0);
47036   SDValue N1 = N->getOperand(1);
47037   EVT VT = N0.getValueType();
47038   unsigned Size = VT.getSizeInBits();
47039 
47040   if (SDValue V = combineShiftToPMULH(N, DAG, Subtarget))
47041     return V;
47042 
47043   // fold (SRA (SHL X, ShlConst), SraConst)
47044   // into (SHL (sext_in_reg X), ShlConst - SraConst)
47045   //   or (sext_in_reg X)
47046   //   or (SRA (sext_in_reg X), SraConst - ShlConst)
47047   // depending on relation between SraConst and ShlConst.
47048   // We only do this if (Size - ShlConst) is equal to 8, 16 or 32. That allows
47049   // us to do the sext_in_reg from corresponding bit.
47050 
47051   // sexts in X86 are MOVs. The MOVs have the same code size
47052   // as above SHIFTs (only SHIFT on 1 has lower code size).
47053   // However the MOVs have 2 advantages to a SHIFT:
47054   // 1. MOVs can write to a register that differs from source
47055   // 2. MOVs accept memory operands
47056 
47057   if (VT.isVector() || N1.getOpcode() != ISD::Constant ||
47058       N0.getOpcode() != ISD::SHL || !N0.hasOneUse() ||
47059       N0.getOperand(1).getOpcode() != ISD::Constant)
47060     return SDValue();
47061 
47062   SDValue N00 = N0.getOperand(0);
47063   SDValue N01 = N0.getOperand(1);
47064   APInt ShlConst = N01->getAsAPIntVal();
47065   APInt SraConst = N1->getAsAPIntVal();
47066   EVT CVT = N1.getValueType();
47067 
47068   if (CVT != N01.getValueType())
47069     return SDValue();
47070   if (SraConst.isNegative())
47071     return SDValue();
47072 
47073   for (MVT SVT : { MVT::i8, MVT::i16, MVT::i32 }) {
47074     unsigned ShiftSize = SVT.getSizeInBits();
47075     // Only deal with (Size - ShlConst) being equal to 8, 16 or 32.
47076     if (ShiftSize >= Size || ShlConst != Size - ShiftSize)
47077       continue;
47078     SDLoc DL(N);
47079     SDValue NN =
47080         DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, N00, DAG.getValueType(SVT));
47081     if (SraConst.eq(ShlConst))
47082       return NN;
47083     if (SraConst.ult(ShlConst))
47084       return DAG.getNode(ISD::SHL, DL, VT, NN,
47085                          DAG.getConstant(ShlConst - SraConst, DL, CVT));
47086     return DAG.getNode(ISD::SRA, DL, VT, NN,
47087                        DAG.getConstant(SraConst - ShlConst, DL, CVT));
47088   }
47089   return SDValue();
47090 }
47091 
combineShiftRightLogical(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)47092 static SDValue combineShiftRightLogical(SDNode *N, SelectionDAG &DAG,
47093                                         TargetLowering::DAGCombinerInfo &DCI,
47094                                         const X86Subtarget &Subtarget) {
47095   SDValue N0 = N->getOperand(0);
47096   SDValue N1 = N->getOperand(1);
47097   EVT VT = N0.getValueType();
47098 
47099   if (SDValue V = combineShiftToPMULH(N, DAG, Subtarget))
47100     return V;
47101 
47102   // Only do this on the last DAG combine as it can interfere with other
47103   // combines.
47104   if (!DCI.isAfterLegalizeDAG())
47105     return SDValue();
47106 
47107   // Try to improve a sequence of srl (and X, C1), C2 by inverting the order.
47108   // TODO: This is a generic DAG combine that became an x86-only combine to
47109   // avoid shortcomings in other folds such as bswap, bit-test ('bt'), and
47110   // and-not ('andn').
47111   if (N0.getOpcode() != ISD::AND || !N0.hasOneUse())
47112     return SDValue();
47113 
47114   auto *ShiftC = dyn_cast<ConstantSDNode>(N1);
47115   auto *AndC = dyn_cast<ConstantSDNode>(N0.getOperand(1));
47116   if (!ShiftC || !AndC)
47117     return SDValue();
47118 
47119   // If we can shrink the constant mask below 8-bits or 32-bits, then this
47120   // transform should reduce code size. It may also enable secondary transforms
47121   // from improved known-bits analysis or instruction selection.
47122   APInt MaskVal = AndC->getAPIntValue();
47123 
47124   // If this can be matched by a zero extend, don't optimize.
47125   if (MaskVal.isMask()) {
47126     unsigned TO = MaskVal.countr_one();
47127     if (TO >= 8 && isPowerOf2_32(TO))
47128       return SDValue();
47129   }
47130 
47131   APInt NewMaskVal = MaskVal.lshr(ShiftC->getAPIntValue());
47132   unsigned OldMaskSize = MaskVal.getSignificantBits();
47133   unsigned NewMaskSize = NewMaskVal.getSignificantBits();
47134   if ((OldMaskSize > 8 && NewMaskSize <= 8) ||
47135       (OldMaskSize > 32 && NewMaskSize <= 32)) {
47136     // srl (and X, AndC), ShiftC --> and (srl X, ShiftC), (AndC >> ShiftC)
47137     SDLoc DL(N);
47138     SDValue NewMask = DAG.getConstant(NewMaskVal, DL, VT);
47139     SDValue NewShift = DAG.getNode(ISD::SRL, DL, VT, N0.getOperand(0), N1);
47140     return DAG.getNode(ISD::AND, DL, VT, NewShift, NewMask);
47141   }
47142   return SDValue();
47143 }
47144 
combineHorizOpWithShuffle(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)47145 static SDValue combineHorizOpWithShuffle(SDNode *N, SelectionDAG &DAG,
47146                                          const X86Subtarget &Subtarget) {
47147   unsigned Opcode = N->getOpcode();
47148   assert(isHorizOp(Opcode) && "Unexpected hadd/hsub/pack opcode");
47149 
47150   SDLoc DL(N);
47151   EVT VT = N->getValueType(0);
47152   SDValue N0 = N->getOperand(0);
47153   SDValue N1 = N->getOperand(1);
47154   EVT SrcVT = N0.getValueType();
47155 
47156   SDValue BC0 =
47157       N->isOnlyUserOf(N0.getNode()) ? peekThroughOneUseBitcasts(N0) : N0;
47158   SDValue BC1 =
47159       N->isOnlyUserOf(N1.getNode()) ? peekThroughOneUseBitcasts(N1) : N1;
47160 
47161   // Attempt to fold HOP(LOSUBVECTOR(SHUFFLE(X)),HISUBVECTOR(SHUFFLE(X)))
47162   // to SHUFFLE(HOP(LOSUBVECTOR(X),HISUBVECTOR(X))), this is mainly for
47163   // truncation trees that help us avoid lane crossing shuffles.
47164   // TODO: There's a lot more we can do for PACK/HADD style shuffle combines.
47165   // TODO: We don't handle vXf64 shuffles yet.
47166   if (VT.is128BitVector() && SrcVT.getScalarSizeInBits() <= 32) {
47167     if (SDValue BCSrc = getSplitVectorSrc(BC0, BC1, false)) {
47168       SmallVector<SDValue> ShuffleOps;
47169       SmallVector<int> ShuffleMask, ScaledMask;
47170       SDValue Vec = peekThroughBitcasts(BCSrc);
47171       if (getTargetShuffleInputs(Vec, ShuffleOps, ShuffleMask, DAG)) {
47172         resolveTargetShuffleInputsAndMask(ShuffleOps, ShuffleMask);
47173         // To keep the HOP LHS/RHS coherency, we must be able to scale the unary
47174         // shuffle to a v4X64 width - we can probably relax this in the future.
47175         if (!isAnyZero(ShuffleMask) && ShuffleOps.size() == 1 &&
47176             ShuffleOps[0].getValueType().is256BitVector() &&
47177             scaleShuffleElements(ShuffleMask, 4, ScaledMask)) {
47178           SDValue Lo, Hi;
47179           MVT ShufVT = VT.isFloatingPoint() ? MVT::v4f32 : MVT::v4i32;
47180           std::tie(Lo, Hi) = DAG.SplitVector(ShuffleOps[0], DL);
47181           Lo = DAG.getBitcast(SrcVT, Lo);
47182           Hi = DAG.getBitcast(SrcVT, Hi);
47183           SDValue Res = DAG.getNode(Opcode, DL, VT, Lo, Hi);
47184           Res = DAG.getBitcast(ShufVT, Res);
47185           Res = DAG.getVectorShuffle(ShufVT, DL, Res, Res, ScaledMask);
47186           return DAG.getBitcast(VT, Res);
47187         }
47188       }
47189     }
47190   }
47191 
47192   // Attempt to fold HOP(SHUFFLE(X,Y),SHUFFLE(Z,W)) -> SHUFFLE(HOP()).
47193   if (VT.is128BitVector() && SrcVT.getScalarSizeInBits() <= 32) {
47194     // If either/both ops are a shuffle that can scale to v2x64,
47195     // then see if we can perform this as a v4x32 post shuffle.
47196     SmallVector<SDValue> Ops0, Ops1;
47197     SmallVector<int> Mask0, Mask1, ScaledMask0, ScaledMask1;
47198     bool IsShuf0 =
47199         getTargetShuffleInputs(BC0, Ops0, Mask0, DAG) && !isAnyZero(Mask0) &&
47200         scaleShuffleElements(Mask0, 2, ScaledMask0) &&
47201         all_of(Ops0, [](SDValue Op) { return Op.getValueSizeInBits() == 128; });
47202     bool IsShuf1 =
47203         getTargetShuffleInputs(BC1, Ops1, Mask1, DAG) && !isAnyZero(Mask1) &&
47204         scaleShuffleElements(Mask1, 2, ScaledMask1) &&
47205         all_of(Ops1, [](SDValue Op) { return Op.getValueSizeInBits() == 128; });
47206     if (IsShuf0 || IsShuf1) {
47207       if (!IsShuf0) {
47208         Ops0.assign({BC0});
47209         ScaledMask0.assign({0, 1});
47210       }
47211       if (!IsShuf1) {
47212         Ops1.assign({BC1});
47213         ScaledMask1.assign({0, 1});
47214       }
47215 
47216       SDValue LHS, RHS;
47217       int PostShuffle[4] = {-1, -1, -1, -1};
47218       auto FindShuffleOpAndIdx = [&](int M, int &Idx, ArrayRef<SDValue> Ops) {
47219         if (M < 0)
47220           return true;
47221         Idx = M % 2;
47222         SDValue Src = Ops[M / 2];
47223         if (!LHS || LHS == Src) {
47224           LHS = Src;
47225           return true;
47226         }
47227         if (!RHS || RHS == Src) {
47228           Idx += 2;
47229           RHS = Src;
47230           return true;
47231         }
47232         return false;
47233       };
47234       if (FindShuffleOpAndIdx(ScaledMask0[0], PostShuffle[0], Ops0) &&
47235           FindShuffleOpAndIdx(ScaledMask0[1], PostShuffle[1], Ops0) &&
47236           FindShuffleOpAndIdx(ScaledMask1[0], PostShuffle[2], Ops1) &&
47237           FindShuffleOpAndIdx(ScaledMask1[1], PostShuffle[3], Ops1)) {
47238         LHS = DAG.getBitcast(SrcVT, LHS);
47239         RHS = DAG.getBitcast(SrcVT, RHS ? RHS : LHS);
47240         MVT ShufVT = VT.isFloatingPoint() ? MVT::v4f32 : MVT::v4i32;
47241         SDValue Res = DAG.getNode(Opcode, DL, VT, LHS, RHS);
47242         Res = DAG.getBitcast(ShufVT, Res);
47243         Res = DAG.getVectorShuffle(ShufVT, DL, Res, Res, PostShuffle);
47244         return DAG.getBitcast(VT, Res);
47245       }
47246     }
47247   }
47248 
47249   // Attempt to fold HOP(SHUFFLE(X,Y),SHUFFLE(X,Y)) -> SHUFFLE(HOP(X,Y)).
47250   if (VT.is256BitVector() && Subtarget.hasInt256()) {
47251     SmallVector<int> Mask0, Mask1;
47252     SmallVector<SDValue> Ops0, Ops1;
47253     SmallVector<int, 2> ScaledMask0, ScaledMask1;
47254     if (getTargetShuffleInputs(BC0, Ops0, Mask0, DAG) && !isAnyZero(Mask0) &&
47255         getTargetShuffleInputs(BC1, Ops1, Mask1, DAG) && !isAnyZero(Mask1) &&
47256         !Ops0.empty() && !Ops1.empty() &&
47257         all_of(Ops0,
47258                [](SDValue Op) { return Op.getValueType().is256BitVector(); }) &&
47259         all_of(Ops1,
47260                [](SDValue Op) { return Op.getValueType().is256BitVector(); }) &&
47261         scaleShuffleElements(Mask0, 2, ScaledMask0) &&
47262         scaleShuffleElements(Mask1, 2, ScaledMask1)) {
47263       SDValue Op00 = peekThroughBitcasts(Ops0.front());
47264       SDValue Op10 = peekThroughBitcasts(Ops1.front());
47265       SDValue Op01 = peekThroughBitcasts(Ops0.back());
47266       SDValue Op11 = peekThroughBitcasts(Ops1.back());
47267       if ((Op00 == Op11) && (Op01 == Op10)) {
47268         std::swap(Op10, Op11);
47269         ShuffleVectorSDNode::commuteMask(ScaledMask1);
47270       }
47271       if ((Op00 == Op10) && (Op01 == Op11)) {
47272         const int Map[4] = {0, 2, 1, 3};
47273         SmallVector<int, 4> ShuffleMask(
47274             {Map[ScaledMask0[0]], Map[ScaledMask1[0]], Map[ScaledMask0[1]],
47275              Map[ScaledMask1[1]]});
47276         MVT ShufVT = VT.isFloatingPoint() ? MVT::v4f64 : MVT::v4i64;
47277         SDValue Res = DAG.getNode(Opcode, DL, VT, DAG.getBitcast(SrcVT, Op00),
47278                                   DAG.getBitcast(SrcVT, Op01));
47279         Res = DAG.getBitcast(ShufVT, Res);
47280         Res = DAG.getVectorShuffle(ShufVT, DL, Res, Res, ShuffleMask);
47281         return DAG.getBitcast(VT, Res);
47282       }
47283     }
47284   }
47285 
47286   return SDValue();
47287 }
47288 
combineVectorPack(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)47289 static SDValue combineVectorPack(SDNode *N, SelectionDAG &DAG,
47290                                  TargetLowering::DAGCombinerInfo &DCI,
47291                                  const X86Subtarget &Subtarget) {
47292   unsigned Opcode = N->getOpcode();
47293   assert((X86ISD::PACKSS == Opcode || X86ISD::PACKUS == Opcode) &&
47294          "Unexpected pack opcode");
47295 
47296   EVT VT = N->getValueType(0);
47297   SDValue N0 = N->getOperand(0);
47298   SDValue N1 = N->getOperand(1);
47299   unsigned NumDstElts = VT.getVectorNumElements();
47300   unsigned DstBitsPerElt = VT.getScalarSizeInBits();
47301   unsigned SrcBitsPerElt = 2 * DstBitsPerElt;
47302   assert(N0.getScalarValueSizeInBits() == SrcBitsPerElt &&
47303          N1.getScalarValueSizeInBits() == SrcBitsPerElt &&
47304          "Unexpected PACKSS/PACKUS input type");
47305 
47306   bool IsSigned = (X86ISD::PACKSS == Opcode);
47307 
47308   // Constant Folding.
47309   APInt UndefElts0, UndefElts1;
47310   SmallVector<APInt, 32> EltBits0, EltBits1;
47311   if ((N0.isUndef() || N->isOnlyUserOf(N0.getNode())) &&
47312       (N1.isUndef() || N->isOnlyUserOf(N1.getNode())) &&
47313       getTargetConstantBitsFromNode(N0, SrcBitsPerElt, UndefElts0, EltBits0) &&
47314       getTargetConstantBitsFromNode(N1, SrcBitsPerElt, UndefElts1, EltBits1)) {
47315     unsigned NumLanes = VT.getSizeInBits() / 128;
47316     unsigned NumSrcElts = NumDstElts / 2;
47317     unsigned NumDstEltsPerLane = NumDstElts / NumLanes;
47318     unsigned NumSrcEltsPerLane = NumSrcElts / NumLanes;
47319 
47320     APInt Undefs(NumDstElts, 0);
47321     SmallVector<APInt, 32> Bits(NumDstElts, APInt::getZero(DstBitsPerElt));
47322     for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
47323       for (unsigned Elt = 0; Elt != NumDstEltsPerLane; ++Elt) {
47324         unsigned SrcIdx = Lane * NumSrcEltsPerLane + Elt % NumSrcEltsPerLane;
47325         auto &UndefElts = (Elt >= NumSrcEltsPerLane ? UndefElts1 : UndefElts0);
47326         auto &EltBits = (Elt >= NumSrcEltsPerLane ? EltBits1 : EltBits0);
47327 
47328         if (UndefElts[SrcIdx]) {
47329           Undefs.setBit(Lane * NumDstEltsPerLane + Elt);
47330           continue;
47331         }
47332 
47333         APInt &Val = EltBits[SrcIdx];
47334         if (IsSigned) {
47335           // PACKSS: Truncate signed value with signed saturation.
47336           // Source values less than dst minint are saturated to minint.
47337           // Source values greater than dst maxint are saturated to maxint.
47338           if (Val.isSignedIntN(DstBitsPerElt))
47339             Val = Val.trunc(DstBitsPerElt);
47340           else if (Val.isNegative())
47341             Val = APInt::getSignedMinValue(DstBitsPerElt);
47342           else
47343             Val = APInt::getSignedMaxValue(DstBitsPerElt);
47344         } else {
47345           // PACKUS: Truncate signed value with unsigned saturation.
47346           // Source values less than zero are saturated to zero.
47347           // Source values greater than dst maxuint are saturated to maxuint.
47348           if (Val.isIntN(DstBitsPerElt))
47349             Val = Val.trunc(DstBitsPerElt);
47350           else if (Val.isNegative())
47351             Val = APInt::getZero(DstBitsPerElt);
47352           else
47353             Val = APInt::getAllOnes(DstBitsPerElt);
47354         }
47355         Bits[Lane * NumDstEltsPerLane + Elt] = Val;
47356       }
47357     }
47358 
47359     return getConstVector(Bits, Undefs, VT.getSimpleVT(), DAG, SDLoc(N));
47360   }
47361 
47362   // Try to fold PACK(SHUFFLE(),SHUFFLE()) -> SHUFFLE(PACK()).
47363   if (SDValue V = combineHorizOpWithShuffle(N, DAG, Subtarget))
47364     return V;
47365 
47366   // Try to fold PACKSS(NOT(X),NOT(Y)) -> NOT(PACKSS(X,Y)).
47367   // Currently limit this to allsignbits cases only.
47368   if (IsSigned &&
47369       (N0.isUndef() || DAG.ComputeNumSignBits(N0) == SrcBitsPerElt) &&
47370       (N1.isUndef() || DAG.ComputeNumSignBits(N1) == SrcBitsPerElt)) {
47371     SDValue Not0 = N0.isUndef() ? N0 : IsNOT(N0, DAG);
47372     SDValue Not1 = N1.isUndef() ? N1 : IsNOT(N1, DAG);
47373     if (Not0 && Not1) {
47374       SDLoc DL(N);
47375       MVT SrcVT = N0.getSimpleValueType();
47376       SDValue Pack =
47377           DAG.getNode(X86ISD::PACKSS, DL, VT, DAG.getBitcast(SrcVT, Not0),
47378                       DAG.getBitcast(SrcVT, Not1));
47379       return DAG.getNOT(DL, Pack, VT);
47380     }
47381   }
47382 
47383   // Try to combine a PACKUSWB/PACKSSWB implemented truncate with a regular
47384   // truncate to create a larger truncate.
47385   if (Subtarget.hasAVX512() &&
47386       N0.getOpcode() == ISD::TRUNCATE && N1.isUndef() && VT == MVT::v16i8 &&
47387       N0.getOperand(0).getValueType() == MVT::v8i32) {
47388     if ((IsSigned && DAG.ComputeNumSignBits(N0) > 8) ||
47389         (!IsSigned &&
47390          DAG.MaskedValueIsZero(N0, APInt::getHighBitsSet(16, 8)))) {
47391       if (Subtarget.hasVLX())
47392         return DAG.getNode(X86ISD::VTRUNC, SDLoc(N), VT, N0.getOperand(0));
47393 
47394       // Widen input to v16i32 so we can truncate that.
47395       SDLoc dl(N);
47396       SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i32,
47397                                    N0.getOperand(0), DAG.getUNDEF(MVT::v8i32));
47398       return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, Concat);
47399     }
47400   }
47401 
47402   // Try to fold PACK(EXTEND(X),EXTEND(Y)) -> CONCAT(X,Y) subvectors.
47403   if (VT.is128BitVector()) {
47404     unsigned ExtOpc = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
47405     SDValue Src0, Src1;
47406     if (N0.getOpcode() == ExtOpc &&
47407         N0.getOperand(0).getValueType().is64BitVector() &&
47408         N0.getOperand(0).getScalarValueSizeInBits() == DstBitsPerElt) {
47409       Src0 = N0.getOperand(0);
47410     }
47411     if (N1.getOpcode() == ExtOpc &&
47412         N1.getOperand(0).getValueType().is64BitVector() &&
47413         N1.getOperand(0).getScalarValueSizeInBits() == DstBitsPerElt) {
47414       Src1 = N1.getOperand(0);
47415     }
47416     if ((Src0 || N0.isUndef()) && (Src1 || N1.isUndef())) {
47417       assert((Src0 || Src1) && "Found PACK(UNDEF,UNDEF)");
47418       Src0 = Src0 ? Src0 : DAG.getUNDEF(Src1.getValueType());
47419       Src1 = Src1 ? Src1 : DAG.getUNDEF(Src0.getValueType());
47420       return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT, Src0, Src1);
47421     }
47422 
47423     // Try again with pack(*_extend_vector_inreg, undef).
47424     unsigned VecInRegOpc = IsSigned ? ISD::SIGN_EXTEND_VECTOR_INREG
47425                                     : ISD::ZERO_EXTEND_VECTOR_INREG;
47426     if (N0.getOpcode() == VecInRegOpc && N1.isUndef() &&
47427         N0.getOperand(0).getScalarValueSizeInBits() < DstBitsPerElt)
47428       return getEXTEND_VECTOR_INREG(ExtOpc, SDLoc(N), VT, N0.getOperand(0),
47429                                     DAG);
47430   }
47431 
47432   // Attempt to combine as shuffle.
47433   SDValue Op(N, 0);
47434   if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
47435     return Res;
47436 
47437   return SDValue();
47438 }
47439 
combineVectorHADDSUB(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)47440 static SDValue combineVectorHADDSUB(SDNode *N, SelectionDAG &DAG,
47441                                     TargetLowering::DAGCombinerInfo &DCI,
47442                                     const X86Subtarget &Subtarget) {
47443   assert((X86ISD::HADD == N->getOpcode() || X86ISD::FHADD == N->getOpcode() ||
47444           X86ISD::HSUB == N->getOpcode() || X86ISD::FHSUB == N->getOpcode()) &&
47445          "Unexpected horizontal add/sub opcode");
47446 
47447   if (!shouldUseHorizontalOp(true, DAG, Subtarget)) {
47448     MVT VT = N->getSimpleValueType(0);
47449     SDValue LHS = N->getOperand(0);
47450     SDValue RHS = N->getOperand(1);
47451 
47452     // HOP(HOP'(X,X),HOP'(Y,Y)) -> HOP(PERMUTE(HOP'(X,Y)),PERMUTE(HOP'(X,Y)).
47453     if (LHS != RHS && LHS.getOpcode() == N->getOpcode() &&
47454         LHS.getOpcode() == RHS.getOpcode() &&
47455         LHS.getValueType() == RHS.getValueType() &&
47456         N->isOnlyUserOf(LHS.getNode()) && N->isOnlyUserOf(RHS.getNode())) {
47457       SDValue LHS0 = LHS.getOperand(0);
47458       SDValue LHS1 = LHS.getOperand(1);
47459       SDValue RHS0 = RHS.getOperand(0);
47460       SDValue RHS1 = RHS.getOperand(1);
47461       if ((LHS0 == LHS1 || LHS0.isUndef() || LHS1.isUndef()) &&
47462           (RHS0 == RHS1 || RHS0.isUndef() || RHS1.isUndef())) {
47463         SDLoc DL(N);
47464         SDValue Res = DAG.getNode(LHS.getOpcode(), DL, LHS.getValueType(),
47465                                   LHS0.isUndef() ? LHS1 : LHS0,
47466                                   RHS0.isUndef() ? RHS1 : RHS0);
47467         MVT ShufVT = MVT::getVectorVT(MVT::i32, VT.getSizeInBits() / 32);
47468         Res = DAG.getBitcast(ShufVT, Res);
47469         SDValue NewLHS =
47470             DAG.getNode(X86ISD::PSHUFD, DL, ShufVT, Res,
47471                         getV4X86ShuffleImm8ForMask({0, 1, 0, 1}, DL, DAG));
47472         SDValue NewRHS =
47473             DAG.getNode(X86ISD::PSHUFD, DL, ShufVT, Res,
47474                         getV4X86ShuffleImm8ForMask({2, 3, 2, 3}, DL, DAG));
47475         return DAG.getNode(N->getOpcode(), DL, VT, DAG.getBitcast(VT, NewLHS),
47476                            DAG.getBitcast(VT, NewRHS));
47477       }
47478     }
47479   }
47480 
47481   // Try to fold HOP(SHUFFLE(),SHUFFLE()) -> SHUFFLE(HOP()).
47482   if (SDValue V = combineHorizOpWithShuffle(N, DAG, Subtarget))
47483     return V;
47484 
47485   return SDValue();
47486 }
47487 
combineVectorShiftVar(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)47488 static SDValue combineVectorShiftVar(SDNode *N, SelectionDAG &DAG,
47489                                      TargetLowering::DAGCombinerInfo &DCI,
47490                                      const X86Subtarget &Subtarget) {
47491   assert((X86ISD::VSHL == N->getOpcode() || X86ISD::VSRA == N->getOpcode() ||
47492           X86ISD::VSRL == N->getOpcode()) &&
47493          "Unexpected shift opcode");
47494   EVT VT = N->getValueType(0);
47495   SDValue N0 = N->getOperand(0);
47496   SDValue N1 = N->getOperand(1);
47497 
47498   // Shift zero -> zero.
47499   if (ISD::isBuildVectorAllZeros(N0.getNode()))
47500     return DAG.getConstant(0, SDLoc(N), VT);
47501 
47502   // Detect constant shift amounts.
47503   APInt UndefElts;
47504   SmallVector<APInt, 32> EltBits;
47505   if (getTargetConstantBitsFromNode(N1, 64, UndefElts, EltBits, true, false)) {
47506     unsigned X86Opc = getTargetVShiftUniformOpcode(N->getOpcode(), false);
47507     return getTargetVShiftByConstNode(X86Opc, SDLoc(N), VT.getSimpleVT(), N0,
47508                                       EltBits[0].getZExtValue(), DAG);
47509   }
47510 
47511   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
47512   APInt DemandedElts = APInt::getAllOnes(VT.getVectorNumElements());
47513   if (TLI.SimplifyDemandedVectorElts(SDValue(N, 0), DemandedElts, DCI))
47514     return SDValue(N, 0);
47515 
47516   return SDValue();
47517 }
47518 
combineVectorShiftImm(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)47519 static SDValue combineVectorShiftImm(SDNode *N, SelectionDAG &DAG,
47520                                      TargetLowering::DAGCombinerInfo &DCI,
47521                                      const X86Subtarget &Subtarget) {
47522   unsigned Opcode = N->getOpcode();
47523   assert((X86ISD::VSHLI == Opcode || X86ISD::VSRAI == Opcode ||
47524           X86ISD::VSRLI == Opcode) &&
47525          "Unexpected shift opcode");
47526   bool LogicalShift = X86ISD::VSHLI == Opcode || X86ISD::VSRLI == Opcode;
47527   EVT VT = N->getValueType(0);
47528   SDValue N0 = N->getOperand(0);
47529   SDValue N1 = N->getOperand(1);
47530   unsigned NumBitsPerElt = VT.getScalarSizeInBits();
47531   assert(VT == N0.getValueType() && (NumBitsPerElt % 8) == 0 &&
47532          "Unexpected value type");
47533   assert(N1.getValueType() == MVT::i8 && "Unexpected shift amount type");
47534 
47535   // (shift undef, X) -> 0
47536   if (N0.isUndef())
47537     return DAG.getConstant(0, SDLoc(N), VT);
47538 
47539   // Out of range logical bit shifts are guaranteed to be zero.
47540   // Out of range arithmetic bit shifts splat the sign bit.
47541   unsigned ShiftVal = N->getConstantOperandVal(1);
47542   if (ShiftVal >= NumBitsPerElt) {
47543     if (LogicalShift)
47544       return DAG.getConstant(0, SDLoc(N), VT);
47545     ShiftVal = NumBitsPerElt - 1;
47546   }
47547 
47548   // (shift X, 0) -> X
47549   if (!ShiftVal)
47550     return N0;
47551 
47552   // (shift 0, C) -> 0
47553   if (ISD::isBuildVectorAllZeros(N0.getNode()))
47554     // N0 is all zeros or undef. We guarantee that the bits shifted into the
47555     // result are all zeros, not undef.
47556     return DAG.getConstant(0, SDLoc(N), VT);
47557 
47558   // (VSRAI -1, C) -> -1
47559   if (!LogicalShift && ISD::isBuildVectorAllOnes(N0.getNode()))
47560     // N0 is all ones or undef. We guarantee that the bits shifted into the
47561     // result are all ones, not undef.
47562     return DAG.getConstant(-1, SDLoc(N), VT);
47563 
47564   auto MergeShifts = [&](SDValue X, uint64_t Amt0, uint64_t Amt1) {
47565     unsigned NewShiftVal = Amt0 + Amt1;
47566     if (NewShiftVal >= NumBitsPerElt) {
47567       // Out of range logical bit shifts are guaranteed to be zero.
47568       // Out of range arithmetic bit shifts splat the sign bit.
47569       if (LogicalShift)
47570         return DAG.getConstant(0, SDLoc(N), VT);
47571       NewShiftVal = NumBitsPerElt - 1;
47572     }
47573     return DAG.getNode(Opcode, SDLoc(N), VT, N0.getOperand(0),
47574                        DAG.getTargetConstant(NewShiftVal, SDLoc(N), MVT::i8));
47575   };
47576 
47577   // (shift (shift X, C2), C1) -> (shift X, (C1 + C2))
47578   if (Opcode == N0.getOpcode())
47579     return MergeShifts(N0.getOperand(0), ShiftVal, N0.getConstantOperandVal(1));
47580 
47581   // (shl (add X, X), C) -> (shl X, (C + 1))
47582   if (Opcode == X86ISD::VSHLI && N0.getOpcode() == ISD::ADD &&
47583       N0.getOperand(0) == N0.getOperand(1))
47584     return MergeShifts(N0.getOperand(0), ShiftVal, 1);
47585 
47586   // We can decode 'whole byte' logical bit shifts as shuffles.
47587   if (LogicalShift && (ShiftVal % 8) == 0) {
47588     SDValue Op(N, 0);
47589     if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
47590       return Res;
47591   }
47592 
47593   // Attempt to detect an expanded vXi64 SIGN_EXTEND_INREG vXi1 pattern, and
47594   // convert to a splatted v2Xi32 SIGN_EXTEND_INREG pattern:
47595   // psrad(pshufd(psllq(X,63),1,1,3,3),31) ->
47596   // pshufd(psrad(pslld(X,31),31),0,0,2,2).
47597   if (Opcode == X86ISD::VSRAI && NumBitsPerElt == 32 && ShiftVal == 31 &&
47598       N0.getOpcode() == X86ISD::PSHUFD &&
47599       N0.getConstantOperandVal(1) == getV4X86ShuffleImm({1, 1, 3, 3}) &&
47600       N0->hasOneUse()) {
47601     SDValue BC = peekThroughOneUseBitcasts(N0.getOperand(0));
47602     if (BC.getOpcode() == X86ISD::VSHLI &&
47603         BC.getScalarValueSizeInBits() == 64 &&
47604         BC.getConstantOperandVal(1) == 63) {
47605       SDLoc DL(N);
47606       SDValue Src = BC.getOperand(0);
47607       Src = DAG.getBitcast(VT, Src);
47608       Src = DAG.getNode(X86ISD::PSHUFD, DL, VT, Src,
47609                         getV4X86ShuffleImm8ForMask({0, 0, 2, 2}, DL, DAG));
47610       Src = DAG.getNode(X86ISD::VSHLI, DL, VT, Src, N1);
47611       Src = DAG.getNode(X86ISD::VSRAI, DL, VT, Src, N1);
47612       return Src;
47613     }
47614   }
47615 
47616   auto TryConstantFold = [&](SDValue V) {
47617     APInt UndefElts;
47618     SmallVector<APInt, 32> EltBits;
47619     if (!getTargetConstantBitsFromNode(V, NumBitsPerElt, UndefElts, EltBits))
47620       return SDValue();
47621     assert(EltBits.size() == VT.getVectorNumElements() &&
47622            "Unexpected shift value type");
47623     // Undef elements need to fold to 0. It's possible SimplifyDemandedBits
47624     // created an undef input due to no input bits being demanded, but user
47625     // still expects 0 in other bits.
47626     for (unsigned i = 0, e = EltBits.size(); i != e; ++i) {
47627       APInt &Elt = EltBits[i];
47628       if (UndefElts[i])
47629         Elt = 0;
47630       else if (X86ISD::VSHLI == Opcode)
47631         Elt <<= ShiftVal;
47632       else if (X86ISD::VSRAI == Opcode)
47633         Elt.ashrInPlace(ShiftVal);
47634       else
47635         Elt.lshrInPlace(ShiftVal);
47636     }
47637     // Reset undef elements since they were zeroed above.
47638     UndefElts = 0;
47639     return getConstVector(EltBits, UndefElts, VT.getSimpleVT(), DAG, SDLoc(N));
47640   };
47641 
47642   // Constant Folding.
47643   if (N->isOnlyUserOf(N0.getNode())) {
47644     if (SDValue C = TryConstantFold(N0))
47645       return C;
47646 
47647     // Fold (shift (logic X, C2), C1) -> (logic (shift X, C1), (shift C2, C1))
47648     // Don't break NOT patterns.
47649     SDValue BC = peekThroughOneUseBitcasts(N0);
47650     if (ISD::isBitwiseLogicOp(BC.getOpcode()) &&
47651         BC->isOnlyUserOf(BC.getOperand(1).getNode()) &&
47652         !ISD::isBuildVectorAllOnes(BC.getOperand(1).getNode())) {
47653       if (SDValue RHS = TryConstantFold(BC.getOperand(1))) {
47654         SDLoc DL(N);
47655         SDValue LHS = DAG.getNode(Opcode, DL, VT,
47656                                   DAG.getBitcast(VT, BC.getOperand(0)), N1);
47657         return DAG.getNode(BC.getOpcode(), DL, VT, LHS, RHS);
47658       }
47659     }
47660   }
47661 
47662   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
47663   if (TLI.SimplifyDemandedBits(SDValue(N, 0), APInt::getAllOnes(NumBitsPerElt),
47664                                DCI))
47665     return SDValue(N, 0);
47666 
47667   return SDValue();
47668 }
47669 
combineVectorInsert(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)47670 static SDValue combineVectorInsert(SDNode *N, SelectionDAG &DAG,
47671                                    TargetLowering::DAGCombinerInfo &DCI,
47672                                    const X86Subtarget &Subtarget) {
47673   EVT VT = N->getValueType(0);
47674   unsigned Opcode = N->getOpcode();
47675   assert(((Opcode == X86ISD::PINSRB && VT == MVT::v16i8) ||
47676           (Opcode == X86ISD::PINSRW && VT == MVT::v8i16) ||
47677           Opcode == ISD::INSERT_VECTOR_ELT) &&
47678          "Unexpected vector insertion");
47679 
47680   SDValue Vec = N->getOperand(0);
47681   SDValue Scl = N->getOperand(1);
47682   SDValue Idx = N->getOperand(2);
47683 
47684   // Fold insert_vector_elt(undef, elt, 0) --> scalar_to_vector(elt).
47685   if (Opcode == ISD::INSERT_VECTOR_ELT && Vec.isUndef() && isNullConstant(Idx))
47686     return DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(N), VT, Scl);
47687 
47688   if (Opcode == X86ISD::PINSRB || Opcode == X86ISD::PINSRW) {
47689     unsigned NumBitsPerElt = VT.getScalarSizeInBits();
47690     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
47691     if (TLI.SimplifyDemandedBits(SDValue(N, 0),
47692                                  APInt::getAllOnes(NumBitsPerElt), DCI))
47693       return SDValue(N, 0);
47694   }
47695 
47696   // Attempt to combine insertion patterns to a shuffle.
47697   if (VT.isSimple() && DCI.isAfterLegalizeDAG()) {
47698     SDValue Op(N, 0);
47699     if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
47700       return Res;
47701   }
47702 
47703   return SDValue();
47704 }
47705 
47706 /// Recognize the distinctive (AND (setcc ...) (setcc ..)) where both setccs
47707 /// reference the same FP CMP, and rewrite for CMPEQSS and friends. Likewise for
47708 /// OR -> CMPNEQSS.
combineCompareEqual(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)47709 static SDValue combineCompareEqual(SDNode *N, SelectionDAG &DAG,
47710                                    TargetLowering::DAGCombinerInfo &DCI,
47711                                    const X86Subtarget &Subtarget) {
47712   unsigned opcode;
47713 
47714   // SSE1 supports CMP{eq|ne}SS, and SSE2 added CMP{eq|ne}SD, but
47715   // we're requiring SSE2 for both.
47716   if (Subtarget.hasSSE2() && isAndOrOfSetCCs(SDValue(N, 0U), opcode)) {
47717     SDValue N0 = N->getOperand(0);
47718     SDValue N1 = N->getOperand(1);
47719     SDValue CMP0 = N0.getOperand(1);
47720     SDValue CMP1 = N1.getOperand(1);
47721     SDLoc DL(N);
47722 
47723     // The SETCCs should both refer to the same CMP.
47724     if (CMP0.getOpcode() != X86ISD::FCMP || CMP0 != CMP1)
47725       return SDValue();
47726 
47727     SDValue CMP00 = CMP0->getOperand(0);
47728     SDValue CMP01 = CMP0->getOperand(1);
47729     EVT     VT    = CMP00.getValueType();
47730 
47731     if (VT == MVT::f32 || VT == MVT::f64 ||
47732         (VT == MVT::f16 && Subtarget.hasFP16())) {
47733       bool ExpectingFlags = false;
47734       // Check for any users that want flags:
47735       for (const SDNode *U : N->uses()) {
47736         if (ExpectingFlags)
47737           break;
47738 
47739         switch (U->getOpcode()) {
47740         default:
47741         case ISD::BR_CC:
47742         case ISD::BRCOND:
47743         case ISD::SELECT:
47744           ExpectingFlags = true;
47745           break;
47746         case ISD::CopyToReg:
47747         case ISD::SIGN_EXTEND:
47748         case ISD::ZERO_EXTEND:
47749         case ISD::ANY_EXTEND:
47750           break;
47751         }
47752       }
47753 
47754       if (!ExpectingFlags) {
47755         enum X86::CondCode cc0 = (enum X86::CondCode)N0.getConstantOperandVal(0);
47756         enum X86::CondCode cc1 = (enum X86::CondCode)N1.getConstantOperandVal(0);
47757 
47758         if (cc1 == X86::COND_E || cc1 == X86::COND_NE) {
47759           X86::CondCode tmp = cc0;
47760           cc0 = cc1;
47761           cc1 = tmp;
47762         }
47763 
47764         if ((cc0 == X86::COND_E  && cc1 == X86::COND_NP) ||
47765             (cc0 == X86::COND_NE && cc1 == X86::COND_P)) {
47766           // FIXME: need symbolic constants for these magic numbers.
47767           // See X86ATTInstPrinter.cpp:printSSECC().
47768           unsigned x86cc = (cc0 == X86::COND_E) ? 0 : 4;
47769           if (Subtarget.hasAVX512()) {
47770             SDValue FSetCC =
47771                 DAG.getNode(X86ISD::FSETCCM, DL, MVT::v1i1, CMP00, CMP01,
47772                             DAG.getTargetConstant(x86cc, DL, MVT::i8));
47773             // Need to fill with zeros to ensure the bitcast will produce zeroes
47774             // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
47775             SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, MVT::v16i1,
47776                                       DAG.getConstant(0, DL, MVT::v16i1),
47777                                       FSetCC, DAG.getIntPtrConstant(0, DL));
47778             return DAG.getZExtOrTrunc(DAG.getBitcast(MVT::i16, Ins), DL,
47779                                       N->getSimpleValueType(0));
47780           }
47781           SDValue OnesOrZeroesF =
47782               DAG.getNode(X86ISD::FSETCC, DL, CMP00.getValueType(), CMP00,
47783                           CMP01, DAG.getTargetConstant(x86cc, DL, MVT::i8));
47784 
47785           bool is64BitFP = (CMP00.getValueType() == MVT::f64);
47786           MVT IntVT = is64BitFP ? MVT::i64 : MVT::i32;
47787 
47788           if (is64BitFP && !Subtarget.is64Bit()) {
47789             // On a 32-bit target, we cannot bitcast the 64-bit float to a
47790             // 64-bit integer, since that's not a legal type. Since
47791             // OnesOrZeroesF is all ones or all zeroes, we don't need all the
47792             // bits, but can do this little dance to extract the lowest 32 bits
47793             // and work with those going forward.
47794             SDValue Vector64 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64,
47795                                            OnesOrZeroesF);
47796             SDValue Vector32 = DAG.getBitcast(MVT::v4f32, Vector64);
47797             OnesOrZeroesF = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32,
47798                                         Vector32, DAG.getIntPtrConstant(0, DL));
47799             IntVT = MVT::i32;
47800           }
47801 
47802           SDValue OnesOrZeroesI = DAG.getBitcast(IntVT, OnesOrZeroesF);
47803           SDValue ANDed = DAG.getNode(ISD::AND, DL, IntVT, OnesOrZeroesI,
47804                                       DAG.getConstant(1, DL, IntVT));
47805           SDValue OneBitOfTruth = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
47806                                               ANDed);
47807           return OneBitOfTruth;
47808         }
47809       }
47810     }
47811   }
47812   return SDValue();
47813 }
47814 
47815 /// Try to fold: (and (xor X, -1), Y) -> (andnp X, Y).
combineAndNotIntoANDNP(SDNode * N,SelectionDAG & DAG)47816 static SDValue combineAndNotIntoANDNP(SDNode *N, SelectionDAG &DAG) {
47817   assert(N->getOpcode() == ISD::AND && "Unexpected opcode combine into ANDNP");
47818 
47819   MVT VT = N->getSimpleValueType(0);
47820   if (!VT.is128BitVector() && !VT.is256BitVector() && !VT.is512BitVector())
47821     return SDValue();
47822 
47823   SDValue X, Y;
47824   SDValue N0 = N->getOperand(0);
47825   SDValue N1 = N->getOperand(1);
47826 
47827   if (SDValue Not = IsNOT(N0, DAG)) {
47828     X = Not;
47829     Y = N1;
47830   } else if (SDValue Not = IsNOT(N1, DAG)) {
47831     X = Not;
47832     Y = N0;
47833   } else
47834     return SDValue();
47835 
47836   X = DAG.getBitcast(VT, X);
47837   Y = DAG.getBitcast(VT, Y);
47838   return DAG.getNode(X86ISD::ANDNP, SDLoc(N), VT, X, Y);
47839 }
47840 
47841 /// Try to fold:
47842 ///   and (vector_shuffle<Z,...,Z>
47843 ///            (insert_vector_elt undef, (xor X, -1), Z), undef), Y
47844 ///   ->
47845 ///   andnp (vector_shuffle<Z,...,Z>
47846 ///              (insert_vector_elt undef, X, Z), undef), Y
combineAndShuffleNot(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)47847 static SDValue combineAndShuffleNot(SDNode *N, SelectionDAG &DAG,
47848                                     const X86Subtarget &Subtarget) {
47849   assert(N->getOpcode() == ISD::AND && "Unexpected opcode combine into ANDNP");
47850 
47851   EVT VT = N->getValueType(0);
47852   // Do not split 256 and 512 bit vectors with SSE2 as they overwrite original
47853   // value and require extra moves.
47854   if (!((VT.is128BitVector() && Subtarget.hasSSE2()) ||
47855         ((VT.is256BitVector() || VT.is512BitVector()) && Subtarget.hasAVX())))
47856     return SDValue();
47857 
47858   auto GetNot = [&DAG](SDValue V) {
47859     auto *SVN = dyn_cast<ShuffleVectorSDNode>(peekThroughOneUseBitcasts(V));
47860     // TODO: SVN->hasOneUse() is a strong condition. It can be relaxed if all
47861     // end-users are ISD::AND including cases
47862     // (and(extract_vector_element(SVN), Y)).
47863     if (!SVN || !SVN->hasOneUse() || !SVN->isSplat() ||
47864         !SVN->getOperand(1).isUndef()) {
47865       return SDValue();
47866     }
47867     SDValue IVEN = SVN->getOperand(0);
47868     if (IVEN.getOpcode() != ISD::INSERT_VECTOR_ELT ||
47869         !IVEN.getOperand(0).isUndef() || !IVEN.hasOneUse())
47870       return SDValue();
47871     if (!isa<ConstantSDNode>(IVEN.getOperand(2)) ||
47872         IVEN.getConstantOperandAPInt(2) != SVN->getSplatIndex())
47873       return SDValue();
47874     SDValue Src = IVEN.getOperand(1);
47875     if (SDValue Not = IsNOT(Src, DAG)) {
47876       SDValue NotSrc = DAG.getBitcast(Src.getValueType(), Not);
47877       SDValue NotIVEN =
47878           DAG.getNode(ISD::INSERT_VECTOR_ELT, SDLoc(IVEN), IVEN.getValueType(),
47879                       IVEN.getOperand(0), NotSrc, IVEN.getOperand(2));
47880       return DAG.getVectorShuffle(SVN->getValueType(0), SDLoc(SVN), NotIVEN,
47881                                   SVN->getOperand(1), SVN->getMask());
47882     }
47883     return SDValue();
47884   };
47885 
47886   SDValue X, Y;
47887   SDValue N0 = N->getOperand(0);
47888   SDValue N1 = N->getOperand(1);
47889   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
47890 
47891   if (SDValue Not = GetNot(N0)) {
47892     X = Not;
47893     Y = N1;
47894   } else if (SDValue Not = GetNot(N1)) {
47895     X = Not;
47896     Y = N0;
47897   } else
47898     return SDValue();
47899 
47900   X = DAG.getBitcast(VT, X);
47901   Y = DAG.getBitcast(VT, Y);
47902   SDLoc DL(N);
47903 
47904   // We do not split for SSE at all, but we need to split vectors for AVX1 and
47905   // AVX2.
47906   if (!Subtarget.useAVX512Regs() && VT.is512BitVector() &&
47907       TLI.isTypeLegal(VT.getHalfNumVectorElementsVT(*DAG.getContext()))) {
47908     SDValue LoX, HiX;
47909     std::tie(LoX, HiX) = splitVector(X, DAG, DL);
47910     SDValue LoY, HiY;
47911     std::tie(LoY, HiY) = splitVector(Y, DAG, DL);
47912     EVT SplitVT = LoX.getValueType();
47913     SDValue LoV = DAG.getNode(X86ISD::ANDNP, DL, SplitVT, {LoX, LoY});
47914     SDValue HiV = DAG.getNode(X86ISD::ANDNP, DL, SplitVT, {HiX, HiY});
47915     return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, {LoV, HiV});
47916   }
47917 
47918   if (TLI.isTypeLegal(VT))
47919     return DAG.getNode(X86ISD::ANDNP, DL, VT, {X, Y});
47920 
47921   return SDValue();
47922 }
47923 
47924 // Try to widen AND, OR and XOR nodes to VT in order to remove casts around
47925 // logical operations, like in the example below.
47926 //   or (and (truncate x, truncate y)),
47927 //      (xor (truncate z, build_vector (constants)))
47928 // Given a target type \p VT, we generate
47929 //   or (and x, y), (xor z, zext(build_vector (constants)))
47930 // given x, y and z are of type \p VT. We can do so, if operands are either
47931 // truncates from VT types, the second operand is a vector of constants or can
47932 // be recursively promoted.
PromoteMaskArithmetic(SDNode * N,EVT VT,SelectionDAG & DAG,unsigned Depth)47933 static SDValue PromoteMaskArithmetic(SDNode *N, EVT VT, SelectionDAG &DAG,
47934                                      unsigned Depth) {
47935   // Limit recursion to avoid excessive compile times.
47936   if (Depth >= SelectionDAG::MaxRecursionDepth)
47937     return SDValue();
47938 
47939   if (N->getOpcode() != ISD::XOR && N->getOpcode() != ISD::AND &&
47940       N->getOpcode() != ISD::OR)
47941     return SDValue();
47942 
47943   SDValue N0 = N->getOperand(0);
47944   SDValue N1 = N->getOperand(1);
47945   SDLoc DL(N);
47946 
47947   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
47948   if (!TLI.isOperationLegalOrPromote(N->getOpcode(), VT))
47949     return SDValue();
47950 
47951   if (SDValue NN0 = PromoteMaskArithmetic(N0.getNode(), VT, DAG, Depth + 1))
47952     N0 = NN0;
47953   else {
47954     // The Left side has to be a trunc.
47955     if (N0.getOpcode() != ISD::TRUNCATE)
47956       return SDValue();
47957 
47958     // The type of the truncated inputs.
47959     if (N0.getOperand(0).getValueType() != VT)
47960       return SDValue();
47961 
47962     N0 = N0.getOperand(0);
47963   }
47964 
47965   if (SDValue NN1 = PromoteMaskArithmetic(N1.getNode(), VT, DAG, Depth + 1))
47966     N1 = NN1;
47967   else {
47968     // The right side has to be a 'trunc' or a constant vector.
47969     bool RHSTrunc = N1.getOpcode() == ISD::TRUNCATE &&
47970                     N1.getOperand(0).getValueType() == VT;
47971     if (!RHSTrunc && !ISD::isBuildVectorOfConstantSDNodes(N1.getNode()))
47972       return SDValue();
47973 
47974     if (RHSTrunc)
47975       N1 = N1.getOperand(0);
47976     else
47977       N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N1);
47978   }
47979 
47980   return DAG.getNode(N->getOpcode(), DL, VT, N0, N1);
47981 }
47982 
47983 // On AVX/AVX2 the type v8i1 is legalized to v8i16, which is an XMM sized
47984 // register. In most cases we actually compare or select YMM-sized registers
47985 // and mixing the two types creates horrible code. This method optimizes
47986 // some of the transition sequences.
47987 // Even with AVX-512 this is still useful for removing casts around logical
47988 // operations on vXi1 mask types.
PromoteMaskArithmetic(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)47989 static SDValue PromoteMaskArithmetic(SDNode *N, SelectionDAG &DAG,
47990                                      const X86Subtarget &Subtarget) {
47991   EVT VT = N->getValueType(0);
47992   assert(VT.isVector() && "Expected vector type");
47993 
47994   SDLoc DL(N);
47995   assert((N->getOpcode() == ISD::ANY_EXTEND ||
47996           N->getOpcode() == ISD::ZERO_EXTEND ||
47997           N->getOpcode() == ISD::SIGN_EXTEND) && "Invalid Node");
47998 
47999   SDValue Narrow = N->getOperand(0);
48000   EVT NarrowVT = Narrow.getValueType();
48001 
48002   // Generate the wide operation.
48003   SDValue Op = PromoteMaskArithmetic(Narrow.getNode(), VT, DAG, 0);
48004   if (!Op)
48005     return SDValue();
48006   switch (N->getOpcode()) {
48007   default: llvm_unreachable("Unexpected opcode");
48008   case ISD::ANY_EXTEND:
48009     return Op;
48010   case ISD::ZERO_EXTEND:
48011     return DAG.getZeroExtendInReg(Op, DL, NarrowVT);
48012   case ISD::SIGN_EXTEND:
48013     return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT,
48014                        Op, DAG.getValueType(NarrowVT));
48015   }
48016 }
48017 
convertIntLogicToFPLogicOpcode(unsigned Opcode)48018 static unsigned convertIntLogicToFPLogicOpcode(unsigned Opcode) {
48019   unsigned FPOpcode;
48020   switch (Opcode) {
48021   default: llvm_unreachable("Unexpected input node for FP logic conversion");
48022   case ISD::AND: FPOpcode = X86ISD::FAND; break;
48023   case ISD::OR:  FPOpcode = X86ISD::FOR;  break;
48024   case ISD::XOR: FPOpcode = X86ISD::FXOR; break;
48025   }
48026   return FPOpcode;
48027 }
48028 
48029 /// If both input operands of a logic op are being cast from floating-point
48030 /// types or FP compares, try to convert this into a floating-point logic node
48031 /// to avoid unnecessary moves from SSE to integer registers.
convertIntLogicToFPLogic(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)48032 static SDValue convertIntLogicToFPLogic(SDNode *N, SelectionDAG &DAG,
48033                                         TargetLowering::DAGCombinerInfo &DCI,
48034                                         const X86Subtarget &Subtarget) {
48035   EVT VT = N->getValueType(0);
48036   SDValue N0 = N->getOperand(0);
48037   SDValue N1 = N->getOperand(1);
48038   SDLoc DL(N);
48039 
48040   if (!((N0.getOpcode() == ISD::BITCAST && N1.getOpcode() == ISD::BITCAST) ||
48041         (N0.getOpcode() == ISD::SETCC && N1.getOpcode() == ISD::SETCC)))
48042     return SDValue();
48043 
48044   SDValue N00 = N0.getOperand(0);
48045   SDValue N10 = N1.getOperand(0);
48046   EVT N00Type = N00.getValueType();
48047   EVT N10Type = N10.getValueType();
48048 
48049   // Ensure that both types are the same and are legal scalar fp types.
48050   if (N00Type != N10Type || !((Subtarget.hasSSE1() && N00Type == MVT::f32) ||
48051                               (Subtarget.hasSSE2() && N00Type == MVT::f64) ||
48052                               (Subtarget.hasFP16() && N00Type == MVT::f16)))
48053     return SDValue();
48054 
48055   if (N0.getOpcode() == ISD::BITCAST && !DCI.isBeforeLegalizeOps()) {
48056     unsigned FPOpcode = convertIntLogicToFPLogicOpcode(N->getOpcode());
48057     SDValue FPLogic = DAG.getNode(FPOpcode, DL, N00Type, N00, N10);
48058     return DAG.getBitcast(VT, FPLogic);
48059   }
48060 
48061   if (VT != MVT::i1 || N0.getOpcode() != ISD::SETCC || !N0.hasOneUse() ||
48062       !N1.hasOneUse())
48063     return SDValue();
48064 
48065   ISD::CondCode CC0 = cast<CondCodeSDNode>(N0.getOperand(2))->get();
48066   ISD::CondCode CC1 = cast<CondCodeSDNode>(N1.getOperand(2))->get();
48067 
48068   // The vector ISA for FP predicates is incomplete before AVX, so converting
48069   // COMIS* to CMPS* may not be a win before AVX.
48070   if (!Subtarget.hasAVX() &&
48071       !(cheapX86FSETCC_SSE(CC0) && cheapX86FSETCC_SSE(CC1)))
48072     return SDValue();
48073 
48074   // Convert scalar FP compares and logic to vector compares (COMIS* to CMPS*)
48075   // and vector logic:
48076   // logic (setcc N00, N01), (setcc N10, N11) -->
48077   // extelt (logic (setcc (s2v N00), (s2v N01)), setcc (s2v N10), (s2v N11))), 0
48078   unsigned NumElts = 128 / N00Type.getSizeInBits();
48079   EVT VecVT = EVT::getVectorVT(*DAG.getContext(), N00Type, NumElts);
48080   EVT BoolVecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1, NumElts);
48081   SDValue ZeroIndex = DAG.getVectorIdxConstant(0, DL);
48082   SDValue N01 = N0.getOperand(1);
48083   SDValue N11 = N1.getOperand(1);
48084   SDValue Vec00 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, N00);
48085   SDValue Vec01 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, N01);
48086   SDValue Vec10 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, N10);
48087   SDValue Vec11 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, N11);
48088   SDValue Setcc0 = DAG.getSetCC(DL, BoolVecVT, Vec00, Vec01, CC0);
48089   SDValue Setcc1 = DAG.getSetCC(DL, BoolVecVT, Vec10, Vec11, CC1);
48090   SDValue Logic = DAG.getNode(N->getOpcode(), DL, BoolVecVT, Setcc0, Setcc1);
48091   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Logic, ZeroIndex);
48092 }
48093 
48094 // Attempt to fold BITOP(MOVMSK(X),MOVMSK(Y)) -> MOVMSK(BITOP(X,Y))
48095 // to reduce XMM->GPR traffic.
combineBitOpWithMOVMSK(SDNode * N,SelectionDAG & DAG)48096 static SDValue combineBitOpWithMOVMSK(SDNode *N, SelectionDAG &DAG) {
48097   unsigned Opc = N->getOpcode();
48098   assert((Opc == ISD::OR || Opc == ISD::AND || Opc == ISD::XOR) &&
48099          "Unexpected bit opcode");
48100 
48101   SDValue N0 = N->getOperand(0);
48102   SDValue N1 = N->getOperand(1);
48103 
48104   // Both operands must be single use MOVMSK.
48105   if (N0.getOpcode() != X86ISD::MOVMSK || !N0.hasOneUse() ||
48106       N1.getOpcode() != X86ISD::MOVMSK || !N1.hasOneUse())
48107     return SDValue();
48108 
48109   SDValue Vec0 = N0.getOperand(0);
48110   SDValue Vec1 = N1.getOperand(0);
48111   EVT VecVT0 = Vec0.getValueType();
48112   EVT VecVT1 = Vec1.getValueType();
48113 
48114   // Both MOVMSK operands must be from vectors of the same size and same element
48115   // size, but its OK for a fp/int diff.
48116   if (VecVT0.getSizeInBits() != VecVT1.getSizeInBits() ||
48117       VecVT0.getScalarSizeInBits() != VecVT1.getScalarSizeInBits())
48118     return SDValue();
48119 
48120   SDLoc DL(N);
48121   unsigned VecOpc =
48122       VecVT0.isFloatingPoint() ? convertIntLogicToFPLogicOpcode(Opc) : Opc;
48123   SDValue Result =
48124       DAG.getNode(VecOpc, DL, VecVT0, Vec0, DAG.getBitcast(VecVT0, Vec1));
48125   return DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Result);
48126 }
48127 
48128 // Attempt to fold BITOP(SHIFT(X,Z),SHIFT(Y,Z)) -> SHIFT(BITOP(X,Y),Z).
48129 // NOTE: This is a very limited case of what SimplifyUsingDistributiveLaws
48130 // handles in InstCombine.
combineBitOpWithShift(SDNode * N,SelectionDAG & DAG)48131 static SDValue combineBitOpWithShift(SDNode *N, SelectionDAG &DAG) {
48132   unsigned Opc = N->getOpcode();
48133   assert((Opc == ISD::OR || Opc == ISD::AND || Opc == ISD::XOR) &&
48134          "Unexpected bit opcode");
48135 
48136   SDValue N0 = N->getOperand(0);
48137   SDValue N1 = N->getOperand(1);
48138   EVT VT = N->getValueType(0);
48139 
48140   // Both operands must be single use.
48141   if (!N0.hasOneUse() || !N1.hasOneUse())
48142     return SDValue();
48143 
48144   // Search for matching shifts.
48145   SDValue BC0 = peekThroughOneUseBitcasts(N0);
48146   SDValue BC1 = peekThroughOneUseBitcasts(N1);
48147 
48148   unsigned BCOpc = BC0.getOpcode();
48149   EVT BCVT = BC0.getValueType();
48150   if (BCOpc != BC1->getOpcode() || BCVT != BC1.getValueType())
48151     return SDValue();
48152 
48153   switch (BCOpc) {
48154   case X86ISD::VSHLI:
48155   case X86ISD::VSRLI:
48156   case X86ISD::VSRAI: {
48157     if (BC0.getOperand(1) != BC1.getOperand(1))
48158       return SDValue();
48159 
48160     SDLoc DL(N);
48161     SDValue BitOp =
48162         DAG.getNode(Opc, DL, BCVT, BC0.getOperand(0), BC1.getOperand(0));
48163     SDValue Shift = DAG.getNode(BCOpc, DL, BCVT, BitOp, BC0.getOperand(1));
48164     return DAG.getBitcast(VT, Shift);
48165   }
48166   }
48167 
48168   return SDValue();
48169 }
48170 
48171 // Attempt to fold:
48172 // BITOP(PACKSS(X,Z),PACKSS(Y,W)) --> PACKSS(BITOP(X,Y),BITOP(Z,W)).
48173 // TODO: Handle PACKUS handling.
combineBitOpWithPACK(SDNode * N,SelectionDAG & DAG)48174 static SDValue combineBitOpWithPACK(SDNode *N, SelectionDAG &DAG) {
48175   unsigned Opc = N->getOpcode();
48176   assert((Opc == ISD::OR || Opc == ISD::AND || Opc == ISD::XOR) &&
48177          "Unexpected bit opcode");
48178 
48179   SDValue N0 = N->getOperand(0);
48180   SDValue N1 = N->getOperand(1);
48181   EVT VT = N->getValueType(0);
48182 
48183   // Both operands must be single use.
48184   if (!N0.hasOneUse() || !N1.hasOneUse())
48185     return SDValue();
48186 
48187   // Search for matching packs.
48188   N0 = peekThroughOneUseBitcasts(N0);
48189   N1 = peekThroughOneUseBitcasts(N1);
48190 
48191   if (N0.getOpcode() != X86ISD::PACKSS || N1.getOpcode() != X86ISD::PACKSS)
48192     return SDValue();
48193 
48194   MVT DstVT = N0.getSimpleValueType();
48195   if (DstVT != N1.getSimpleValueType())
48196     return SDValue();
48197 
48198   MVT SrcVT = N0.getOperand(0).getSimpleValueType();
48199   unsigned NumSrcBits = SrcVT.getScalarSizeInBits();
48200 
48201   // Limit to allsignbits packing.
48202   if (DAG.ComputeNumSignBits(N0.getOperand(0)) != NumSrcBits ||
48203       DAG.ComputeNumSignBits(N0.getOperand(1)) != NumSrcBits ||
48204       DAG.ComputeNumSignBits(N1.getOperand(0)) != NumSrcBits ||
48205       DAG.ComputeNumSignBits(N1.getOperand(1)) != NumSrcBits)
48206     return SDValue();
48207 
48208   SDLoc DL(N);
48209   SDValue LHS = DAG.getNode(Opc, DL, SrcVT, N0.getOperand(0), N1.getOperand(0));
48210   SDValue RHS = DAG.getNode(Opc, DL, SrcVT, N0.getOperand(1), N1.getOperand(1));
48211   return DAG.getBitcast(VT, DAG.getNode(X86ISD::PACKSS, DL, DstVT, LHS, RHS));
48212 }
48213 
48214 /// If this is a zero/all-bits result that is bitwise-anded with a low bits
48215 /// mask. (Mask == 1 for the x86 lowering of a SETCC + ZEXT), replace the 'and'
48216 /// with a shift-right to eliminate loading the vector constant mask value.
combineAndMaskToShift(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)48217 static SDValue combineAndMaskToShift(SDNode *N, SelectionDAG &DAG,
48218                                      const X86Subtarget &Subtarget) {
48219   SDValue Op0 = peekThroughBitcasts(N->getOperand(0));
48220   SDValue Op1 = peekThroughBitcasts(N->getOperand(1));
48221   EVT VT = Op0.getValueType();
48222   if (VT != Op1.getValueType() || !VT.isSimple() || !VT.isInteger())
48223     return SDValue();
48224 
48225   // Try to convert an "is positive" signbit masking operation into arithmetic
48226   // shift and "andn". This saves a materialization of a -1 vector constant.
48227   // The "is negative" variant should be handled more generally because it only
48228   // requires "and" rather than "andn":
48229   // and (pcmpgt X, -1), Y --> pandn (vsrai X, BitWidth - 1), Y
48230   //
48231   // This is limited to the original type to avoid producing even more bitcasts.
48232   // If the bitcasts can't be eliminated, then it is unlikely that this fold
48233   // will be profitable.
48234   if (N->getValueType(0) == VT &&
48235       supportedVectorShiftWithImm(VT, Subtarget, ISD::SRA)) {
48236     SDValue X, Y;
48237     if (Op1.getOpcode() == X86ISD::PCMPGT &&
48238         isAllOnesOrAllOnesSplat(Op1.getOperand(1)) && Op1.hasOneUse()) {
48239       X = Op1.getOperand(0);
48240       Y = Op0;
48241     } else if (Op0.getOpcode() == X86ISD::PCMPGT &&
48242                isAllOnesOrAllOnesSplat(Op0.getOperand(1)) && Op0.hasOneUse()) {
48243       X = Op0.getOperand(0);
48244       Y = Op1;
48245     }
48246     if (X && Y) {
48247       SDLoc DL(N);
48248       SDValue Sra =
48249           getTargetVShiftByConstNode(X86ISD::VSRAI, DL, VT.getSimpleVT(), X,
48250                                      VT.getScalarSizeInBits() - 1, DAG);
48251       return DAG.getNode(X86ISD::ANDNP, DL, VT, Sra, Y);
48252     }
48253   }
48254 
48255   APInt SplatVal;
48256   if (!X86::isConstantSplat(Op1, SplatVal, false) || !SplatVal.isMask())
48257     return SDValue();
48258 
48259   // Don't prevent creation of ANDN.
48260   if (isBitwiseNot(Op0))
48261     return SDValue();
48262 
48263   if (!supportedVectorShiftWithImm(VT, Subtarget, ISD::SRL))
48264     return SDValue();
48265 
48266   unsigned EltBitWidth = VT.getScalarSizeInBits();
48267   if (EltBitWidth != DAG.ComputeNumSignBits(Op0))
48268     return SDValue();
48269 
48270   SDLoc DL(N);
48271   unsigned ShiftVal = SplatVal.countr_one();
48272   SDValue ShAmt = DAG.getTargetConstant(EltBitWidth - ShiftVal, DL, MVT::i8);
48273   SDValue Shift = DAG.getNode(X86ISD::VSRLI, DL, VT, Op0, ShAmt);
48274   return DAG.getBitcast(N->getValueType(0), Shift);
48275 }
48276 
48277 // Get the index node from the lowered DAG of a GEP IR instruction with one
48278 // indexing dimension.
getIndexFromUnindexedLoad(LoadSDNode * Ld)48279 static SDValue getIndexFromUnindexedLoad(LoadSDNode *Ld) {
48280   if (Ld->isIndexed())
48281     return SDValue();
48282 
48283   SDValue Base = Ld->getBasePtr();
48284 
48285   if (Base.getOpcode() != ISD::ADD)
48286     return SDValue();
48287 
48288   SDValue ShiftedIndex = Base.getOperand(0);
48289 
48290   if (ShiftedIndex.getOpcode() != ISD::SHL)
48291     return SDValue();
48292 
48293   return ShiftedIndex.getOperand(0);
48294 
48295 }
48296 
hasBZHI(const X86Subtarget & Subtarget,MVT VT)48297 static bool hasBZHI(const X86Subtarget &Subtarget, MVT VT) {
48298   if (Subtarget.hasBMI2() && VT.isScalarInteger()) {
48299     switch (VT.getSizeInBits()) {
48300     default: return false;
48301     case 64: return Subtarget.is64Bit() ? true : false;
48302     case 32: return true;
48303     }
48304   }
48305   return false;
48306 }
48307 
48308 // This function recognizes cases where X86 bzhi instruction can replace and
48309 // 'and-load' sequence.
48310 // In case of loading integer value from an array of constants which is defined
48311 // as follows:
48312 //
48313 //   int array[SIZE] = {0x0, 0x1, 0x3, 0x7, 0xF ..., 2^(SIZE-1) - 1}
48314 //
48315 // then applying a bitwise and on the result with another input.
48316 // It's equivalent to performing bzhi (zero high bits) on the input, with the
48317 // same index of the load.
combineAndLoadToBZHI(SDNode * Node,SelectionDAG & DAG,const X86Subtarget & Subtarget)48318 static SDValue combineAndLoadToBZHI(SDNode *Node, SelectionDAG &DAG,
48319                                     const X86Subtarget &Subtarget) {
48320   MVT VT = Node->getSimpleValueType(0);
48321   SDLoc dl(Node);
48322 
48323   // Check if subtarget has BZHI instruction for the node's type
48324   if (!hasBZHI(Subtarget, VT))
48325     return SDValue();
48326 
48327   // Try matching the pattern for both operands.
48328   for (unsigned i = 0; i < 2; i++) {
48329     SDValue N = Node->getOperand(i);
48330     LoadSDNode *Ld = dyn_cast<LoadSDNode>(N.getNode());
48331 
48332      // continue if the operand is not a load instruction
48333     if (!Ld)
48334       return SDValue();
48335 
48336     const Value *MemOp = Ld->getMemOperand()->getValue();
48337 
48338     if (!MemOp)
48339       return SDValue();
48340 
48341     if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(MemOp)) {
48342       if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0))) {
48343         if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
48344 
48345           Constant *Init = GV->getInitializer();
48346           Type *Ty = Init->getType();
48347           if (!isa<ConstantDataArray>(Init) ||
48348               !Ty->getArrayElementType()->isIntegerTy() ||
48349               Ty->getArrayElementType()->getScalarSizeInBits() !=
48350                   VT.getSizeInBits() ||
48351               Ty->getArrayNumElements() >
48352                   Ty->getArrayElementType()->getScalarSizeInBits())
48353             continue;
48354 
48355           // Check if the array's constant elements are suitable to our case.
48356           uint64_t ArrayElementCount = Init->getType()->getArrayNumElements();
48357           bool ConstantsMatch = true;
48358           for (uint64_t j = 0; j < ArrayElementCount; j++) {
48359             auto *Elem = cast<ConstantInt>(Init->getAggregateElement(j));
48360             if (Elem->getZExtValue() != (((uint64_t)1 << j) - 1)) {
48361               ConstantsMatch = false;
48362               break;
48363             }
48364           }
48365           if (!ConstantsMatch)
48366             continue;
48367 
48368           // Do the transformation (For 32-bit type):
48369           // -> (and (load arr[idx]), inp)
48370           // <- (and (srl 0xFFFFFFFF, (sub 32, idx)))
48371           //    that will be replaced with one bzhi instruction.
48372           SDValue Inp = (i == 0) ? Node->getOperand(1) : Node->getOperand(0);
48373           SDValue SizeC = DAG.getConstant(VT.getSizeInBits(), dl, MVT::i32);
48374 
48375           // Get the Node which indexes into the array.
48376           SDValue Index = getIndexFromUnindexedLoad(Ld);
48377           if (!Index)
48378             return SDValue();
48379           Index = DAG.getZExtOrTrunc(Index, dl, MVT::i32);
48380 
48381           SDValue Sub = DAG.getNode(ISD::SUB, dl, MVT::i32, SizeC, Index);
48382           Sub = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Sub);
48383 
48384           SDValue AllOnes = DAG.getAllOnesConstant(dl, VT);
48385           SDValue LShr = DAG.getNode(ISD::SRL, dl, VT, AllOnes, Sub);
48386 
48387           return DAG.getNode(ISD::AND, dl, VT, Inp, LShr);
48388         }
48389       }
48390     }
48391   }
48392   return SDValue();
48393 }
48394 
48395 // Look for (and (bitcast (vXi1 (concat_vectors (vYi1 setcc), undef,))), C)
48396 // Where C is a mask containing the same number of bits as the setcc and
48397 // where the setcc will freely 0 upper bits of k-register. We can replace the
48398 // undef in the concat with 0s and remove the AND. This mainly helps with
48399 // v2i1/v4i1 setcc being casted to scalar.
combineScalarAndWithMaskSetcc(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)48400 static SDValue combineScalarAndWithMaskSetcc(SDNode *N, SelectionDAG &DAG,
48401                                              const X86Subtarget &Subtarget) {
48402   assert(N->getOpcode() == ISD::AND && "Unexpected opcode!");
48403 
48404   EVT VT = N->getValueType(0);
48405 
48406   // Make sure this is an AND with constant. We will check the value of the
48407   // constant later.
48408   auto *C1 = dyn_cast<ConstantSDNode>(N->getOperand(1));
48409   if (!C1)
48410     return SDValue();
48411 
48412   // This is implied by the ConstantSDNode.
48413   assert(!VT.isVector() && "Expected scalar VT!");
48414 
48415   SDValue Src = N->getOperand(0);
48416   if (!Src.hasOneUse())
48417     return SDValue();
48418 
48419   // (Optionally) peek through any_extend().
48420   if (Src.getOpcode() == ISD::ANY_EXTEND) {
48421     if (!Src.getOperand(0).hasOneUse())
48422       return SDValue();
48423     Src = Src.getOperand(0);
48424   }
48425 
48426   if (Src.getOpcode() != ISD::BITCAST || !Src.getOperand(0).hasOneUse())
48427     return SDValue();
48428 
48429   Src = Src.getOperand(0);
48430   EVT SrcVT = Src.getValueType();
48431 
48432   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
48433   if (!SrcVT.isVector() || SrcVT.getVectorElementType() != MVT::i1 ||
48434       !TLI.isTypeLegal(SrcVT))
48435     return SDValue();
48436 
48437   if (Src.getOpcode() != ISD::CONCAT_VECTORS)
48438     return SDValue();
48439 
48440   // We only care about the first subvector of the concat, we expect the
48441   // other subvectors to be ignored due to the AND if we make the change.
48442   SDValue SubVec = Src.getOperand(0);
48443   EVT SubVecVT = SubVec.getValueType();
48444 
48445   // The RHS of the AND should be a mask with as many bits as SubVec.
48446   if (!TLI.isTypeLegal(SubVecVT) ||
48447       !C1->getAPIntValue().isMask(SubVecVT.getVectorNumElements()))
48448     return SDValue();
48449 
48450   // First subvector should be a setcc with a legal result type or a
48451   // AND containing at least one setcc with a legal result type.
48452   auto IsLegalSetCC = [&](SDValue V) {
48453     if (V.getOpcode() != ISD::SETCC)
48454       return false;
48455     EVT SetccVT = V.getOperand(0).getValueType();
48456     if (!TLI.isTypeLegal(SetccVT) ||
48457         !(Subtarget.hasVLX() || SetccVT.is512BitVector()))
48458       return false;
48459     if (!(Subtarget.hasBWI() || SetccVT.getScalarSizeInBits() >= 32))
48460       return false;
48461     return true;
48462   };
48463   if (!(IsLegalSetCC(SubVec) || (SubVec.getOpcode() == ISD::AND &&
48464                                  (IsLegalSetCC(SubVec.getOperand(0)) ||
48465                                   IsLegalSetCC(SubVec.getOperand(1))))))
48466     return SDValue();
48467 
48468   // We passed all the checks. Rebuild the concat_vectors with zeroes
48469   // and cast it back to VT.
48470   SDLoc dl(N);
48471   SmallVector<SDValue, 4> Ops(Src.getNumOperands(),
48472                               DAG.getConstant(0, dl, SubVecVT));
48473   Ops[0] = SubVec;
48474   SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, dl, SrcVT,
48475                                Ops);
48476   EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), SrcVT.getSizeInBits());
48477   return DAG.getZExtOrTrunc(DAG.getBitcast(IntVT, Concat), dl, VT);
48478 }
48479 
getBMIMatchingOp(unsigned Opc,SelectionDAG & DAG,SDValue OpMustEq,SDValue Op,unsigned Depth)48480 static SDValue getBMIMatchingOp(unsigned Opc, SelectionDAG &DAG,
48481                                 SDValue OpMustEq, SDValue Op, unsigned Depth) {
48482   // We don't want to go crazy with the recursion here. This isn't a super
48483   // important optimization.
48484   static constexpr unsigned kMaxDepth = 2;
48485 
48486   // Only do this re-ordering if op has one use.
48487   if (!Op.hasOneUse())
48488     return SDValue();
48489 
48490   SDLoc DL(Op);
48491   // If we hit another assosiative op, recurse further.
48492   if (Op.getOpcode() == Opc) {
48493     // Done recursing.
48494     if (Depth++ >= kMaxDepth)
48495       return SDValue();
48496 
48497     for (unsigned OpIdx = 0; OpIdx < 2; ++OpIdx)
48498       if (SDValue R =
48499               getBMIMatchingOp(Opc, DAG, OpMustEq, Op.getOperand(OpIdx), Depth))
48500         return DAG.getNode(Op.getOpcode(), DL, Op.getValueType(), R,
48501                            Op.getOperand(1 - OpIdx));
48502 
48503   } else if (Op.getOpcode() == ISD::SUB) {
48504     if (Opc == ISD::AND) {
48505       // BLSI: (and x, (sub 0, x))
48506       if (isNullConstant(Op.getOperand(0)) && Op.getOperand(1) == OpMustEq)
48507         return DAG.getNode(Opc, DL, Op.getValueType(), OpMustEq, Op);
48508     }
48509     // Opc must be ISD::AND or ISD::XOR
48510     // BLSR: (and x, (sub x, 1))
48511     // BLSMSK: (xor x, (sub x, 1))
48512     if (isOneConstant(Op.getOperand(1)) && Op.getOperand(0) == OpMustEq)
48513       return DAG.getNode(Opc, DL, Op.getValueType(), OpMustEq, Op);
48514 
48515   } else if (Op.getOpcode() == ISD::ADD) {
48516     // Opc must be ISD::AND or ISD::XOR
48517     // BLSR: (and x, (add x, -1))
48518     // BLSMSK: (xor x, (add x, -1))
48519     if (isAllOnesConstant(Op.getOperand(1)) && Op.getOperand(0) == OpMustEq)
48520       return DAG.getNode(Opc, DL, Op.getValueType(), OpMustEq, Op);
48521   }
48522   return SDValue();
48523 }
48524 
combineBMILogicOp(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)48525 static SDValue combineBMILogicOp(SDNode *N, SelectionDAG &DAG,
48526                                  const X86Subtarget &Subtarget) {
48527   EVT VT = N->getValueType(0);
48528   // Make sure this node is a candidate for BMI instructions.
48529   if (!Subtarget.hasBMI() || !VT.isScalarInteger() ||
48530       (VT != MVT::i32 && VT != MVT::i64))
48531     return SDValue();
48532 
48533   assert(N->getOpcode() == ISD::AND || N->getOpcode() == ISD::XOR);
48534 
48535   // Try and match LHS and RHS.
48536   for (unsigned OpIdx = 0; OpIdx < 2; ++OpIdx)
48537     if (SDValue OpMatch =
48538             getBMIMatchingOp(N->getOpcode(), DAG, N->getOperand(OpIdx),
48539                              N->getOperand(1 - OpIdx), 0))
48540       return OpMatch;
48541   return SDValue();
48542 }
48543 
combineAnd(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)48544 static SDValue combineAnd(SDNode *N, SelectionDAG &DAG,
48545                           TargetLowering::DAGCombinerInfo &DCI,
48546                           const X86Subtarget &Subtarget) {
48547   SDValue N0 = N->getOperand(0);
48548   SDValue N1 = N->getOperand(1);
48549   EVT VT = N->getValueType(0);
48550   SDLoc dl(N);
48551   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
48552 
48553   // If this is SSE1 only convert to FAND to avoid scalarization.
48554   if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32) {
48555     return DAG.getBitcast(MVT::v4i32,
48556                           DAG.getNode(X86ISD::FAND, dl, MVT::v4f32,
48557                                       DAG.getBitcast(MVT::v4f32, N0),
48558                                       DAG.getBitcast(MVT::v4f32, N1)));
48559   }
48560 
48561   // Use a 32-bit and+zext if upper bits known zero.
48562   if (VT == MVT::i64 && Subtarget.is64Bit() && !isa<ConstantSDNode>(N1)) {
48563     APInt HiMask = APInt::getHighBitsSet(64, 32);
48564     if (DAG.MaskedValueIsZero(N1, HiMask) ||
48565         DAG.MaskedValueIsZero(N0, HiMask)) {
48566       SDValue LHS = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, N0);
48567       SDValue RHS = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, N1);
48568       return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64,
48569                          DAG.getNode(ISD::AND, dl, MVT::i32, LHS, RHS));
48570     }
48571   }
48572 
48573   // Match all-of bool scalar reductions into a bitcast/movmsk + cmp.
48574   // TODO: Support multiple SrcOps.
48575   if (VT == MVT::i1) {
48576     SmallVector<SDValue, 2> SrcOps;
48577     SmallVector<APInt, 2> SrcPartials;
48578     if (matchScalarReduction(SDValue(N, 0), ISD::AND, SrcOps, &SrcPartials) &&
48579         SrcOps.size() == 1) {
48580       unsigned NumElts = SrcOps[0].getValueType().getVectorNumElements();
48581       EVT MaskVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
48582       SDValue Mask = combineBitcastvxi1(DAG, MaskVT, SrcOps[0], dl, Subtarget);
48583       if (!Mask && TLI.isTypeLegal(SrcOps[0].getValueType()))
48584         Mask = DAG.getBitcast(MaskVT, SrcOps[0]);
48585       if (Mask) {
48586         assert(SrcPartials[0].getBitWidth() == NumElts &&
48587                "Unexpected partial reduction mask");
48588         SDValue PartialBits = DAG.getConstant(SrcPartials[0], dl, MaskVT);
48589         Mask = DAG.getNode(ISD::AND, dl, MaskVT, Mask, PartialBits);
48590         return DAG.getSetCC(dl, MVT::i1, Mask, PartialBits, ISD::SETEQ);
48591       }
48592     }
48593   }
48594 
48595   // InstCombine converts:
48596   //    `(-x << C0) & C1`
48597   // to
48598   //    `(x * (Pow2_Ceil(C1) - (1 << C0))) & C1`
48599   // This saves an IR instruction but on x86 the neg/shift version is preferable
48600   // so undo the transform.
48601 
48602   if (N0.getOpcode() == ISD::MUL && N0.hasOneUse()) {
48603     // TODO: We don't actually need a splat for this, we just need the checks to
48604     // hold for each element.
48605     ConstantSDNode *N1C = isConstOrConstSplat(N1, /*AllowUndefs*/ true,
48606                                               /*AllowTruncation*/ false);
48607     ConstantSDNode *N01C =
48608         isConstOrConstSplat(N0.getOperand(1), /*AllowUndefs*/ true,
48609                             /*AllowTruncation*/ false);
48610     if (N1C && N01C) {
48611       const APInt &MulC = N01C->getAPIntValue();
48612       const APInt &AndC = N1C->getAPIntValue();
48613       APInt MulCLowBit = MulC & (-MulC);
48614       if (MulC.uge(AndC) && !MulC.isPowerOf2() &&
48615           (MulCLowBit + MulC).isPowerOf2()) {
48616         SDValue Neg = DAG.getNode(ISD::SUB, dl, VT, DAG.getConstant(0, dl, VT),
48617                                   N0.getOperand(0));
48618         int32_t MulCLowBitLog = MulCLowBit.exactLogBase2();
48619         assert(MulCLowBitLog != -1 &&
48620                "Isolated lowbit is somehow not a power of 2!");
48621         SDValue Shift = DAG.getNode(ISD::SHL, dl, VT, Neg,
48622                                     DAG.getConstant(MulCLowBitLog, dl, VT));
48623         return DAG.getNode(ISD::AND, dl, VT, Shift, N1);
48624       }
48625     }
48626   }
48627 
48628   if (SDValue V = combineScalarAndWithMaskSetcc(N, DAG, Subtarget))
48629     return V;
48630 
48631   if (SDValue R = combineBitOpWithMOVMSK(N, DAG))
48632     return R;
48633 
48634   if (SDValue R = combineBitOpWithShift(N, DAG))
48635     return R;
48636 
48637   if (SDValue R = combineBitOpWithPACK(N, DAG))
48638     return R;
48639 
48640   if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, DCI, Subtarget))
48641     return FPLogic;
48642 
48643   if (SDValue R = combineAndShuffleNot(N, DAG, Subtarget))
48644     return R;
48645 
48646   if (DCI.isBeforeLegalizeOps())
48647     return SDValue();
48648 
48649   if (SDValue R = combineCompareEqual(N, DAG, DCI, Subtarget))
48650     return R;
48651 
48652   if (SDValue R = combineAndNotIntoANDNP(N, DAG))
48653     return R;
48654 
48655   if (SDValue ShiftRight = combineAndMaskToShift(N, DAG, Subtarget))
48656     return ShiftRight;
48657 
48658   if (SDValue R = combineAndLoadToBZHI(N, DAG, Subtarget))
48659     return R;
48660 
48661   // fold (and (mul x, c1), c2) -> (mul x, (and c1, c2))
48662   // iff c2 is all/no bits mask - i.e. a select-with-zero mask.
48663   // TODO: Handle PMULDQ/PMULUDQ/VPMADDWD/VPMADDUBSW?
48664   if (VT.isVector() && getTargetConstantFromNode(N1)) {
48665     unsigned Opc0 = N0.getOpcode();
48666     if ((Opc0 == ISD::MUL || Opc0 == ISD::MULHU || Opc0 == ISD::MULHS) &&
48667         getTargetConstantFromNode(N0.getOperand(1)) &&
48668         DAG.ComputeNumSignBits(N1) == VT.getScalarSizeInBits() &&
48669         N0->hasOneUse() && N0.getOperand(1)->hasOneUse()) {
48670       SDValue MaskMul = DAG.getNode(ISD::AND, dl, VT, N0.getOperand(1), N1);
48671       return DAG.getNode(Opc0, dl, VT, N0.getOperand(0), MaskMul);
48672     }
48673   }
48674 
48675   // Fold AND(SRL(X,Y),1) -> SETCC(BT(X,Y), COND_B) iff Y is not a constant
48676   // avoids slow variable shift (moving shift amount to ECX etc.)
48677   if (isOneConstant(N1) && N0->hasOneUse()) {
48678     SDValue Src = N0;
48679     while ((Src.getOpcode() == ISD::ZERO_EXTEND ||
48680             Src.getOpcode() == ISD::TRUNCATE) &&
48681            Src.getOperand(0)->hasOneUse())
48682       Src = Src.getOperand(0);
48683     bool ContainsNOT = false;
48684     X86::CondCode X86CC = X86::COND_B;
48685     // Peek through AND(NOT(SRL(X,Y)),1).
48686     if (isBitwiseNot(Src)) {
48687       Src = Src.getOperand(0);
48688       X86CC = X86::COND_AE;
48689       ContainsNOT = true;
48690     }
48691     if (Src.getOpcode() == ISD::SRL &&
48692         !isa<ConstantSDNode>(Src.getOperand(1))) {
48693       SDValue BitNo = Src.getOperand(1);
48694       Src = Src.getOperand(0);
48695       // Peek through AND(SRL(NOT(X),Y),1).
48696       if (isBitwiseNot(Src)) {
48697         Src = Src.getOperand(0);
48698         X86CC = X86CC == X86::COND_AE ? X86::COND_B : X86::COND_AE;
48699         ContainsNOT = true;
48700       }
48701       // If we have BMI2 then SHRX should be faster for i32/i64 cases.
48702       if (!(Subtarget.hasBMI2() && !ContainsNOT && VT.getSizeInBits() >= 32))
48703         if (SDValue BT = getBT(Src, BitNo, dl, DAG))
48704           return DAG.getZExtOrTrunc(getSETCC(X86CC, BT, dl, DAG), dl, VT);
48705     }
48706   }
48707 
48708   if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) {
48709     // Attempt to recursively combine a bitmask AND with shuffles.
48710     SDValue Op(N, 0);
48711     if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
48712       return Res;
48713 
48714     // If either operand is a constant mask, then only the elements that aren't
48715     // zero are actually demanded by the other operand.
48716     auto GetDemandedMasks = [&](SDValue Op) {
48717       APInt UndefElts;
48718       SmallVector<APInt> EltBits;
48719       int NumElts = VT.getVectorNumElements();
48720       int EltSizeInBits = VT.getScalarSizeInBits();
48721       APInt DemandedBits = APInt::getAllOnes(EltSizeInBits);
48722       APInt DemandedElts = APInt::getAllOnes(NumElts);
48723       if (getTargetConstantBitsFromNode(Op, EltSizeInBits, UndefElts,
48724                                         EltBits)) {
48725         DemandedBits.clearAllBits();
48726         DemandedElts.clearAllBits();
48727         for (int I = 0; I != NumElts; ++I) {
48728           if (UndefElts[I]) {
48729             // We can't assume an undef src element gives an undef dst - the
48730             // other src might be zero.
48731             DemandedBits.setAllBits();
48732             DemandedElts.setBit(I);
48733           } else if (!EltBits[I].isZero()) {
48734             DemandedBits |= EltBits[I];
48735             DemandedElts.setBit(I);
48736           }
48737         }
48738       }
48739       return std::make_pair(DemandedBits, DemandedElts);
48740     };
48741     APInt Bits0, Elts0;
48742     APInt Bits1, Elts1;
48743     std::tie(Bits0, Elts0) = GetDemandedMasks(N1);
48744     std::tie(Bits1, Elts1) = GetDemandedMasks(N0);
48745 
48746     if (TLI.SimplifyDemandedVectorElts(N0, Elts0, DCI) ||
48747         TLI.SimplifyDemandedVectorElts(N1, Elts1, DCI) ||
48748         TLI.SimplifyDemandedBits(N0, Bits0, Elts0, DCI) ||
48749         TLI.SimplifyDemandedBits(N1, Bits1, Elts1, DCI)) {
48750       if (N->getOpcode() != ISD::DELETED_NODE)
48751         DCI.AddToWorklist(N);
48752       return SDValue(N, 0);
48753     }
48754 
48755     SDValue NewN0 = TLI.SimplifyMultipleUseDemandedBits(N0, Bits0, Elts0, DAG);
48756     SDValue NewN1 = TLI.SimplifyMultipleUseDemandedBits(N1, Bits1, Elts1, DAG);
48757     if (NewN0 || NewN1)
48758       return DAG.getNode(ISD::AND, dl, VT, NewN0 ? NewN0 : N0,
48759                          NewN1 ? NewN1 : N1);
48760   }
48761 
48762   // Attempt to combine a scalar bitmask AND with an extracted shuffle.
48763   if ((VT.getScalarSizeInBits() % 8) == 0 &&
48764       N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
48765       isa<ConstantSDNode>(N0.getOperand(1)) && N0->hasOneUse()) {
48766     SDValue BitMask = N1;
48767     SDValue SrcVec = N0.getOperand(0);
48768     EVT SrcVecVT = SrcVec.getValueType();
48769 
48770     // Check that the constant bitmask masks whole bytes.
48771     APInt UndefElts;
48772     SmallVector<APInt, 64> EltBits;
48773     if (VT == SrcVecVT.getScalarType() && N0->isOnlyUserOf(SrcVec.getNode()) &&
48774         getTargetConstantBitsFromNode(BitMask, 8, UndefElts, EltBits) &&
48775         llvm::all_of(EltBits, [](const APInt &M) {
48776           return M.isZero() || M.isAllOnes();
48777         })) {
48778       unsigned NumElts = SrcVecVT.getVectorNumElements();
48779       unsigned Scale = SrcVecVT.getScalarSizeInBits() / 8;
48780       unsigned Idx = N0.getConstantOperandVal(1);
48781 
48782       // Create a root shuffle mask from the byte mask and the extracted index.
48783       SmallVector<int, 16> ShuffleMask(NumElts * Scale, SM_SentinelUndef);
48784       for (unsigned i = 0; i != Scale; ++i) {
48785         if (UndefElts[i])
48786           continue;
48787         int VecIdx = Scale * Idx + i;
48788         ShuffleMask[VecIdx] = EltBits[i].isZero() ? SM_SentinelZero : VecIdx;
48789       }
48790 
48791       if (SDValue Shuffle = combineX86ShufflesRecursively(
48792               {SrcVec}, 0, SrcVec, ShuffleMask, {}, /*Depth*/ 1,
48793               X86::MaxShuffleCombineDepth,
48794               /*HasVarMask*/ false, /*AllowVarCrossLaneMask*/ true,
48795               /*AllowVarPerLaneMask*/ true, DAG, Subtarget))
48796         return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Shuffle,
48797                            N0.getOperand(1));
48798     }
48799   }
48800 
48801   if (SDValue R = combineBMILogicOp(N, DAG, Subtarget))
48802     return R;
48803 
48804   return SDValue();
48805 }
48806 
48807 // Canonicalize OR(AND(X,C),AND(Y,~C)) -> OR(AND(X,C),ANDNP(C,Y))
canonicalizeBitSelect(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)48808 static SDValue canonicalizeBitSelect(SDNode *N, SelectionDAG &DAG,
48809                                      const X86Subtarget &Subtarget) {
48810   assert(N->getOpcode() == ISD::OR && "Unexpected Opcode");
48811 
48812   MVT VT = N->getSimpleValueType(0);
48813   unsigned EltSizeInBits = VT.getScalarSizeInBits();
48814   if (!VT.isVector() || (EltSizeInBits % 8) != 0)
48815     return SDValue();
48816 
48817   SDValue N0 = peekThroughBitcasts(N->getOperand(0));
48818   SDValue N1 = peekThroughBitcasts(N->getOperand(1));
48819   if (N0.getOpcode() != ISD::AND || N1.getOpcode() != ISD::AND)
48820     return SDValue();
48821 
48822   // On XOP we'll lower to PCMOV so accept one use. With AVX512, we can use
48823   // VPTERNLOG. Otherwise only do this if either mask has multiple uses already.
48824   if (!(Subtarget.hasXOP() || useVPTERNLOG(Subtarget, VT) ||
48825         !N0.getOperand(1).hasOneUse() || !N1.getOperand(1).hasOneUse()))
48826     return SDValue();
48827 
48828   // Attempt to extract constant byte masks.
48829   APInt UndefElts0, UndefElts1;
48830   SmallVector<APInt, 32> EltBits0, EltBits1;
48831   if (!getTargetConstantBitsFromNode(N0.getOperand(1), 8, UndefElts0, EltBits0,
48832                                      false, false))
48833     return SDValue();
48834   if (!getTargetConstantBitsFromNode(N1.getOperand(1), 8, UndefElts1, EltBits1,
48835                                      false, false))
48836     return SDValue();
48837 
48838   for (unsigned i = 0, e = EltBits0.size(); i != e; ++i) {
48839     // TODO - add UNDEF elts support.
48840     if (UndefElts0[i] || UndefElts1[i])
48841       return SDValue();
48842     if (EltBits0[i] != ~EltBits1[i])
48843       return SDValue();
48844   }
48845 
48846   SDLoc DL(N);
48847 
48848   if (useVPTERNLOG(Subtarget, VT)) {
48849     // Emit a VPTERNLOG node directly - 0xCA is the imm code for A?B:C.
48850     // VPTERNLOG is only available as vXi32/64-bit types.
48851     MVT OpSVT = EltSizeInBits <= 32 ? MVT::i32 : MVT::i64;
48852     MVT OpVT =
48853         MVT::getVectorVT(OpSVT, VT.getSizeInBits() / OpSVT.getSizeInBits());
48854     SDValue A = DAG.getBitcast(OpVT, N0.getOperand(1));
48855     SDValue B = DAG.getBitcast(OpVT, N0.getOperand(0));
48856     SDValue C = DAG.getBitcast(OpVT, N1.getOperand(0));
48857     SDValue Imm = DAG.getTargetConstant(0xCA, DL, MVT::i8);
48858     SDValue Res = getAVX512Node(X86ISD::VPTERNLOG, DL, OpVT, {A, B, C, Imm},
48859                                 DAG, Subtarget);
48860     return DAG.getBitcast(VT, Res);
48861   }
48862 
48863   SDValue X = N->getOperand(0);
48864   SDValue Y =
48865       DAG.getNode(X86ISD::ANDNP, DL, VT, DAG.getBitcast(VT, N0.getOperand(1)),
48866                   DAG.getBitcast(VT, N1.getOperand(0)));
48867   return DAG.getNode(ISD::OR, DL, VT, X, Y);
48868 }
48869 
48870 // Try to match OR(AND(~MASK,X),AND(MASK,Y)) logic pattern.
matchLogicBlend(SDNode * N,SDValue & X,SDValue & Y,SDValue & Mask)48871 static bool matchLogicBlend(SDNode *N, SDValue &X, SDValue &Y, SDValue &Mask) {
48872   if (N->getOpcode() != ISD::OR)
48873     return false;
48874 
48875   SDValue N0 = N->getOperand(0);
48876   SDValue N1 = N->getOperand(1);
48877 
48878   // Canonicalize AND to LHS.
48879   if (N1.getOpcode() == ISD::AND)
48880     std::swap(N0, N1);
48881 
48882   // Attempt to match OR(AND(M,Y),ANDNP(M,X)).
48883   if (N0.getOpcode() != ISD::AND || N1.getOpcode() != X86ISD::ANDNP)
48884     return false;
48885 
48886   Mask = N1.getOperand(0);
48887   X = N1.getOperand(1);
48888 
48889   // Check to see if the mask appeared in both the AND and ANDNP.
48890   if (N0.getOperand(0) == Mask)
48891     Y = N0.getOperand(1);
48892   else if (N0.getOperand(1) == Mask)
48893     Y = N0.getOperand(0);
48894   else
48895     return false;
48896 
48897   // TODO: Attempt to match against AND(XOR(-1,M),Y) as well, waiting for
48898   // ANDNP combine allows other combines to happen that prevent matching.
48899   return true;
48900 }
48901 
48902 // Try to fold:
48903 //   (or (and (m, y), (pandn m, x)))
48904 // into:
48905 //   (vselect m, x, y)
48906 // As a special case, try to fold:
48907 //   (or (and (m, (sub 0, x)), (pandn m, x)))
48908 // into:
48909 //   (sub (xor X, M), M)
combineLogicBlendIntoPBLENDV(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)48910 static SDValue combineLogicBlendIntoPBLENDV(SDNode *N, SelectionDAG &DAG,
48911                                             const X86Subtarget &Subtarget) {
48912   assert(N->getOpcode() == ISD::OR && "Unexpected Opcode");
48913 
48914   EVT VT = N->getValueType(0);
48915   if (!((VT.is128BitVector() && Subtarget.hasSSE2()) ||
48916         (VT.is256BitVector() && Subtarget.hasInt256())))
48917     return SDValue();
48918 
48919   SDValue X, Y, Mask;
48920   if (!matchLogicBlend(N, X, Y, Mask))
48921     return SDValue();
48922 
48923   // Validate that X, Y, and Mask are bitcasts, and see through them.
48924   Mask = peekThroughBitcasts(Mask);
48925   X = peekThroughBitcasts(X);
48926   Y = peekThroughBitcasts(Y);
48927 
48928   EVT MaskVT = Mask.getValueType();
48929   unsigned EltBits = MaskVT.getScalarSizeInBits();
48930 
48931   // TODO: Attempt to handle floating point cases as well?
48932   if (!MaskVT.isInteger() || DAG.ComputeNumSignBits(Mask) != EltBits)
48933     return SDValue();
48934 
48935   SDLoc DL(N);
48936 
48937   // Attempt to combine to conditional negate: (sub (xor X, M), M)
48938   if (SDValue Res = combineLogicBlendIntoConditionalNegate(VT, Mask, X, Y, DL,
48939                                                            DAG, Subtarget))
48940     return Res;
48941 
48942   // PBLENDVB is only available on SSE 4.1.
48943   if (!Subtarget.hasSSE41())
48944     return SDValue();
48945 
48946   // If we have VPTERNLOG we should prefer that since PBLENDVB is multiple uops.
48947   if (Subtarget.hasVLX())
48948     return SDValue();
48949 
48950   MVT BlendVT = VT.is256BitVector() ? MVT::v32i8 : MVT::v16i8;
48951 
48952   X = DAG.getBitcast(BlendVT, X);
48953   Y = DAG.getBitcast(BlendVT, Y);
48954   Mask = DAG.getBitcast(BlendVT, Mask);
48955   Mask = DAG.getSelect(DL, BlendVT, Mask, Y, X);
48956   return DAG.getBitcast(VT, Mask);
48957 }
48958 
48959 // Helper function for combineOrCmpEqZeroToCtlzSrl
48960 // Transforms:
48961 //   seteq(cmp x, 0)
48962 //   into:
48963 //   srl(ctlz x), log2(bitsize(x))
48964 // Input pattern is checked by caller.
lowerX86CmpEqZeroToCtlzSrl(SDValue Op,SelectionDAG & DAG)48965 static SDValue lowerX86CmpEqZeroToCtlzSrl(SDValue Op, SelectionDAG &DAG) {
48966   SDValue Cmp = Op.getOperand(1);
48967   EVT VT = Cmp.getOperand(0).getValueType();
48968   unsigned Log2b = Log2_32(VT.getSizeInBits());
48969   SDLoc dl(Op);
48970   SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Cmp->getOperand(0));
48971   // The result of the shift is true or false, and on X86, the 32-bit
48972   // encoding of shr and lzcnt is more desirable.
48973   SDValue Trunc = DAG.getZExtOrTrunc(Clz, dl, MVT::i32);
48974   SDValue Scc = DAG.getNode(ISD::SRL, dl, MVT::i32, Trunc,
48975                             DAG.getConstant(Log2b, dl, MVT::i8));
48976   return Scc;
48977 }
48978 
48979 // Try to transform:
48980 //   zext(or(setcc(eq, (cmp x, 0)), setcc(eq, (cmp y, 0))))
48981 //   into:
48982 //   srl(or(ctlz(x), ctlz(y)), log2(bitsize(x))
48983 // Will also attempt to match more generic cases, eg:
48984 //   zext(or(or(setcc(eq, cmp 0), setcc(eq, cmp 0)), setcc(eq, cmp 0)))
48985 // Only applies if the target supports the FastLZCNT feature.
combineOrCmpEqZeroToCtlzSrl(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)48986 static SDValue combineOrCmpEqZeroToCtlzSrl(SDNode *N, SelectionDAG &DAG,
48987                                            TargetLowering::DAGCombinerInfo &DCI,
48988                                            const X86Subtarget &Subtarget) {
48989   if (DCI.isBeforeLegalize() || !Subtarget.getTargetLowering()->isCtlzFast())
48990     return SDValue();
48991 
48992   auto isORCandidate = [](SDValue N) {
48993     return (N->getOpcode() == ISD::OR && N->hasOneUse());
48994   };
48995 
48996   // Check the zero extend is extending to 32-bit or more. The code generated by
48997   // srl(ctlz) for 16-bit or less variants of the pattern would require extra
48998   // instructions to clear the upper bits.
48999   if (!N->hasOneUse() || !N->getSimpleValueType(0).bitsGE(MVT::i32) ||
49000       !isORCandidate(N->getOperand(0)))
49001     return SDValue();
49002 
49003   // Check the node matches: setcc(eq, cmp 0)
49004   auto isSetCCCandidate = [](SDValue N) {
49005     return N->getOpcode() == X86ISD::SETCC && N->hasOneUse() &&
49006            X86::CondCode(N->getConstantOperandVal(0)) == X86::COND_E &&
49007            N->getOperand(1).getOpcode() == X86ISD::CMP &&
49008            isNullConstant(N->getOperand(1).getOperand(1)) &&
49009            N->getOperand(1).getValueType().bitsGE(MVT::i32);
49010   };
49011 
49012   SDNode *OR = N->getOperand(0).getNode();
49013   SDValue LHS = OR->getOperand(0);
49014   SDValue RHS = OR->getOperand(1);
49015 
49016   // Save nodes matching or(or, setcc(eq, cmp 0)).
49017   SmallVector<SDNode *, 2> ORNodes;
49018   while (((isORCandidate(LHS) && isSetCCCandidate(RHS)) ||
49019           (isORCandidate(RHS) && isSetCCCandidate(LHS)))) {
49020     ORNodes.push_back(OR);
49021     OR = (LHS->getOpcode() == ISD::OR) ? LHS.getNode() : RHS.getNode();
49022     LHS = OR->getOperand(0);
49023     RHS = OR->getOperand(1);
49024   }
49025 
49026   // The last OR node should match or(setcc(eq, cmp 0), setcc(eq, cmp 0)).
49027   if (!(isSetCCCandidate(LHS) && isSetCCCandidate(RHS)) ||
49028       !isORCandidate(SDValue(OR, 0)))
49029     return SDValue();
49030 
49031   // We have a or(setcc(eq, cmp 0), setcc(eq, cmp 0)) pattern, try to lower it
49032   // to
49033   // or(srl(ctlz),srl(ctlz)).
49034   // The dag combiner can then fold it into:
49035   // srl(or(ctlz, ctlz)).
49036   SDValue NewLHS = lowerX86CmpEqZeroToCtlzSrl(LHS, DAG);
49037   SDValue Ret, NewRHS;
49038   if (NewLHS && (NewRHS = lowerX86CmpEqZeroToCtlzSrl(RHS, DAG)))
49039     Ret = DAG.getNode(ISD::OR, SDLoc(OR), MVT::i32, NewLHS, NewRHS);
49040 
49041   if (!Ret)
49042     return SDValue();
49043 
49044   // Try to lower nodes matching the or(or, setcc(eq, cmp 0)) pattern.
49045   while (!ORNodes.empty()) {
49046     OR = ORNodes.pop_back_val();
49047     LHS = OR->getOperand(0);
49048     RHS = OR->getOperand(1);
49049     // Swap rhs with lhs to match or(setcc(eq, cmp, 0), or).
49050     if (RHS->getOpcode() == ISD::OR)
49051       std::swap(LHS, RHS);
49052     NewRHS = lowerX86CmpEqZeroToCtlzSrl(RHS, DAG);
49053     if (!NewRHS)
49054       return SDValue();
49055     Ret = DAG.getNode(ISD::OR, SDLoc(OR), MVT::i32, Ret, NewRHS);
49056   }
49057 
49058   return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), N->getValueType(0), Ret);
49059 }
49060 
foldMaskedMergeImpl(SDValue And0_L,SDValue And0_R,SDValue And1_L,SDValue And1_R,const SDLoc & DL,SelectionDAG & DAG)49061 static SDValue foldMaskedMergeImpl(SDValue And0_L, SDValue And0_R,
49062                                    SDValue And1_L, SDValue And1_R,
49063                                    const SDLoc &DL, SelectionDAG &DAG) {
49064   if (!isBitwiseNot(And0_L, true) || !And0_L->hasOneUse())
49065     return SDValue();
49066   SDValue NotOp = And0_L->getOperand(0);
49067   if (NotOp == And1_R)
49068     std::swap(And1_R, And1_L);
49069   if (NotOp != And1_L)
49070     return SDValue();
49071 
49072   // (~(NotOp) & And0_R) | (NotOp & And1_R)
49073   // --> ((And0_R ^ And1_R) & NotOp) ^ And1_R
49074   EVT VT = And1_L->getValueType(0);
49075   SDValue Freeze_And0_R = DAG.getNode(ISD::FREEZE, SDLoc(), VT, And0_R);
49076   SDValue Xor0 = DAG.getNode(ISD::XOR, DL, VT, And1_R, Freeze_And0_R);
49077   SDValue And = DAG.getNode(ISD::AND, DL, VT, Xor0, NotOp);
49078   SDValue Xor1 = DAG.getNode(ISD::XOR, DL, VT, And, Freeze_And0_R);
49079   return Xor1;
49080 }
49081 
49082 /// Fold "masked merge" expressions like `(m & x) | (~m & y)` into the
49083 /// equivalent `((x ^ y) & m) ^ y)` pattern.
49084 /// This is typically a better representation for  targets without a fused
49085 /// "and-not" operation. This function is intended to be called from a
49086 /// `TargetLowering::PerformDAGCombine` callback on `ISD::OR` nodes.
foldMaskedMerge(SDNode * Node,SelectionDAG & DAG)49087 static SDValue foldMaskedMerge(SDNode *Node, SelectionDAG &DAG) {
49088   // Note that masked-merge variants using XOR or ADD expressions are
49089   // normalized to OR by InstCombine so we only check for OR.
49090   assert(Node->getOpcode() == ISD::OR && "Must be called with ISD::OR node");
49091   SDValue N0 = Node->getOperand(0);
49092   if (N0->getOpcode() != ISD::AND || !N0->hasOneUse())
49093     return SDValue();
49094   SDValue N1 = Node->getOperand(1);
49095   if (N1->getOpcode() != ISD::AND || !N1->hasOneUse())
49096     return SDValue();
49097 
49098   SDLoc DL(Node);
49099   SDValue N00 = N0->getOperand(0);
49100   SDValue N01 = N0->getOperand(1);
49101   SDValue N10 = N1->getOperand(0);
49102   SDValue N11 = N1->getOperand(1);
49103   if (SDValue Result = foldMaskedMergeImpl(N00, N01, N10, N11, DL, DAG))
49104     return Result;
49105   if (SDValue Result = foldMaskedMergeImpl(N01, N00, N10, N11, DL, DAG))
49106     return Result;
49107   if (SDValue Result = foldMaskedMergeImpl(N10, N11, N00, N01, DL, DAG))
49108     return Result;
49109   if (SDValue Result = foldMaskedMergeImpl(N11, N10, N00, N01, DL, DAG))
49110     return Result;
49111   return SDValue();
49112 }
49113 
49114 /// If this is an add or subtract where one operand is produced by a cmp+setcc,
49115 /// then try to convert it to an ADC or SBB. This replaces TEST+SET+{ADD/SUB}
49116 /// with CMP+{ADC, SBB}.
49117 /// Also try (ADD/SUB)+(AND(SRL,1)) bit extraction pattern with BT+{ADC, SBB}.
combineAddOrSubToADCOrSBB(bool IsSub,const SDLoc & DL,EVT VT,SDValue X,SDValue Y,SelectionDAG & DAG,bool ZeroSecondOpOnly=false)49118 static SDValue combineAddOrSubToADCOrSBB(bool IsSub, const SDLoc &DL, EVT VT,
49119                                          SDValue X, SDValue Y,
49120                                          SelectionDAG &DAG,
49121                                          bool ZeroSecondOpOnly = false) {
49122   if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
49123     return SDValue();
49124 
49125   // Look through a one-use zext.
49126   if (Y.getOpcode() == ISD::ZERO_EXTEND && Y.hasOneUse())
49127     Y = Y.getOperand(0);
49128 
49129   X86::CondCode CC;
49130   SDValue EFLAGS;
49131   if (Y.getOpcode() == X86ISD::SETCC && Y.hasOneUse()) {
49132     CC = (X86::CondCode)Y.getConstantOperandVal(0);
49133     EFLAGS = Y.getOperand(1);
49134   } else if (Y.getOpcode() == ISD::AND && isOneConstant(Y.getOperand(1)) &&
49135              Y.hasOneUse()) {
49136     EFLAGS = LowerAndToBT(Y, ISD::SETNE, DL, DAG, CC);
49137   }
49138 
49139   if (!EFLAGS)
49140     return SDValue();
49141 
49142   // If X is -1 or 0, then we have an opportunity to avoid constants required in
49143   // the general case below.
49144   auto *ConstantX = dyn_cast<ConstantSDNode>(X);
49145   if (ConstantX && !ZeroSecondOpOnly) {
49146     if ((!IsSub && CC == X86::COND_AE && ConstantX->isAllOnes()) ||
49147         (IsSub && CC == X86::COND_B && ConstantX->isZero())) {
49148       // This is a complicated way to get -1 or 0 from the carry flag:
49149       // -1 + SETAE --> -1 + (!CF) --> CF ? -1 : 0 --> SBB %eax, %eax
49150       //  0 - SETB  -->  0 -  (CF) --> CF ? -1 : 0 --> SBB %eax, %eax
49151       return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
49152                          DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
49153                          EFLAGS);
49154     }
49155 
49156     if ((!IsSub && CC == X86::COND_BE && ConstantX->isAllOnes()) ||
49157         (IsSub && CC == X86::COND_A && ConstantX->isZero())) {
49158       if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.hasOneUse() &&
49159           EFLAGS.getValueType().isInteger() &&
49160           !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
49161         // Swap the operands of a SUB, and we have the same pattern as above.
49162         // -1 + SETBE (SUB A, B) --> -1 + SETAE (SUB B, A) --> SUB + SBB
49163         //  0 - SETA  (SUB A, B) -->  0 - SETB  (SUB B, A) --> SUB + SBB
49164         SDValue NewSub = DAG.getNode(
49165             X86ISD::SUB, SDLoc(EFLAGS), EFLAGS.getNode()->getVTList(),
49166             EFLAGS.getOperand(1), EFLAGS.getOperand(0));
49167         SDValue NewEFLAGS = SDValue(NewSub.getNode(), EFLAGS.getResNo());
49168         return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
49169                            DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
49170                            NewEFLAGS);
49171       }
49172     }
49173   }
49174 
49175   if (CC == X86::COND_B) {
49176     // X + SETB Z --> adc X, 0
49177     // X - SETB Z --> sbb X, 0
49178     return DAG.getNode(IsSub ? X86ISD::SBB : X86ISD::ADC, DL,
49179                        DAG.getVTList(VT, MVT::i32), X,
49180                        DAG.getConstant(0, DL, VT), EFLAGS);
49181   }
49182 
49183   if (ZeroSecondOpOnly)
49184     return SDValue();
49185 
49186   if (CC == X86::COND_A) {
49187     // Try to convert COND_A into COND_B in an attempt to facilitate
49188     // materializing "setb reg".
49189     //
49190     // Do not flip "e > c", where "c" is a constant, because Cmp instruction
49191     // cannot take an immediate as its first operand.
49192     //
49193     if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.getNode()->hasOneUse() &&
49194         EFLAGS.getValueType().isInteger() &&
49195         !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
49196       SDValue NewSub =
49197           DAG.getNode(X86ISD::SUB, SDLoc(EFLAGS), EFLAGS.getNode()->getVTList(),
49198                       EFLAGS.getOperand(1), EFLAGS.getOperand(0));
49199       SDValue NewEFLAGS = NewSub.getValue(EFLAGS.getResNo());
49200       return DAG.getNode(IsSub ? X86ISD::SBB : X86ISD::ADC, DL,
49201                          DAG.getVTList(VT, MVT::i32), X,
49202                          DAG.getConstant(0, DL, VT), NewEFLAGS);
49203     }
49204   }
49205 
49206   if (CC == X86::COND_AE) {
49207     // X + SETAE --> sbb X, -1
49208     // X - SETAE --> adc X, -1
49209     return DAG.getNode(IsSub ? X86ISD::ADC : X86ISD::SBB, DL,
49210                        DAG.getVTList(VT, MVT::i32), X,
49211                        DAG.getConstant(-1, DL, VT), EFLAGS);
49212   }
49213 
49214   if (CC == X86::COND_BE) {
49215     // X + SETBE --> sbb X, -1
49216     // X - SETBE --> adc X, -1
49217     // Try to convert COND_BE into COND_AE in an attempt to facilitate
49218     // materializing "setae reg".
49219     //
49220     // Do not flip "e <= c", where "c" is a constant, because Cmp instruction
49221     // cannot take an immediate as its first operand.
49222     //
49223     if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.getNode()->hasOneUse() &&
49224         EFLAGS.getValueType().isInteger() &&
49225         !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
49226       SDValue NewSub =
49227           DAG.getNode(X86ISD::SUB, SDLoc(EFLAGS), EFLAGS.getNode()->getVTList(),
49228                       EFLAGS.getOperand(1), EFLAGS.getOperand(0));
49229       SDValue NewEFLAGS = NewSub.getValue(EFLAGS.getResNo());
49230       return DAG.getNode(IsSub ? X86ISD::ADC : X86ISD::SBB, DL,
49231                          DAG.getVTList(VT, MVT::i32), X,
49232                          DAG.getConstant(-1, DL, VT), NewEFLAGS);
49233     }
49234   }
49235 
49236   if (CC != X86::COND_E && CC != X86::COND_NE)
49237     return SDValue();
49238 
49239   if (EFLAGS.getOpcode() != X86ISD::CMP || !EFLAGS.hasOneUse() ||
49240       !X86::isZeroNode(EFLAGS.getOperand(1)) ||
49241       !EFLAGS.getOperand(0).getValueType().isInteger())
49242     return SDValue();
49243 
49244   SDValue Z = EFLAGS.getOperand(0);
49245   EVT ZVT = Z.getValueType();
49246 
49247   // If X is -1 or 0, then we have an opportunity to avoid constants required in
49248   // the general case below.
49249   if (ConstantX) {
49250     // 'neg' sets the carry flag when Z != 0, so create 0 or -1 using 'sbb' with
49251     // fake operands:
49252     //  0 - (Z != 0) --> sbb %eax, %eax, (neg Z)
49253     // -1 + (Z == 0) --> sbb %eax, %eax, (neg Z)
49254     if ((IsSub && CC == X86::COND_NE && ConstantX->isZero()) ||
49255         (!IsSub && CC == X86::COND_E && ConstantX->isAllOnes())) {
49256       SDValue Zero = DAG.getConstant(0, DL, ZVT);
49257       SDVTList X86SubVTs = DAG.getVTList(ZVT, MVT::i32);
49258       SDValue Neg = DAG.getNode(X86ISD::SUB, DL, X86SubVTs, Zero, Z);
49259       return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
49260                          DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
49261                          SDValue(Neg.getNode(), 1));
49262     }
49263 
49264     // cmp with 1 sets the carry flag when Z == 0, so create 0 or -1 using 'sbb'
49265     // with fake operands:
49266     //  0 - (Z == 0) --> sbb %eax, %eax, (cmp Z, 1)
49267     // -1 + (Z != 0) --> sbb %eax, %eax, (cmp Z, 1)
49268     if ((IsSub && CC == X86::COND_E && ConstantX->isZero()) ||
49269         (!IsSub && CC == X86::COND_NE && ConstantX->isAllOnes())) {
49270       SDValue One = DAG.getConstant(1, DL, ZVT);
49271       SDVTList X86SubVTs = DAG.getVTList(ZVT, MVT::i32);
49272       SDValue Cmp1 = DAG.getNode(X86ISD::SUB, DL, X86SubVTs, Z, One);
49273       return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
49274                          DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
49275                          Cmp1.getValue(1));
49276     }
49277   }
49278 
49279   // (cmp Z, 1) sets the carry flag if Z is 0.
49280   SDValue One = DAG.getConstant(1, DL, ZVT);
49281   SDVTList X86SubVTs = DAG.getVTList(ZVT, MVT::i32);
49282   SDValue Cmp1 = DAG.getNode(X86ISD::SUB, DL, X86SubVTs, Z, One);
49283 
49284   // Add the flags type for ADC/SBB nodes.
49285   SDVTList VTs = DAG.getVTList(VT, MVT::i32);
49286 
49287   // X - (Z != 0) --> sub X, (zext(setne Z, 0)) --> adc X, -1, (cmp Z, 1)
49288   // X + (Z != 0) --> add X, (zext(setne Z, 0)) --> sbb X, -1, (cmp Z, 1)
49289   if (CC == X86::COND_NE)
49290     return DAG.getNode(IsSub ? X86ISD::ADC : X86ISD::SBB, DL, VTs, X,
49291                        DAG.getConstant(-1ULL, DL, VT), Cmp1.getValue(1));
49292 
49293   // X - (Z == 0) --> sub X, (zext(sete  Z, 0)) --> sbb X, 0, (cmp Z, 1)
49294   // X + (Z == 0) --> add X, (zext(sete  Z, 0)) --> adc X, 0, (cmp Z, 1)
49295   return DAG.getNode(IsSub ? X86ISD::SBB : X86ISD::ADC, DL, VTs, X,
49296                      DAG.getConstant(0, DL, VT), Cmp1.getValue(1));
49297 }
49298 
49299 /// If this is an add or subtract where one operand is produced by a cmp+setcc,
49300 /// then try to convert it to an ADC or SBB. This replaces TEST+SET+{ADD/SUB}
49301 /// with CMP+{ADC, SBB}.
combineAddOrSubToADCOrSBB(SDNode * N,SelectionDAG & DAG)49302 static SDValue combineAddOrSubToADCOrSBB(SDNode *N, SelectionDAG &DAG) {
49303   bool IsSub = N->getOpcode() == ISD::SUB;
49304   SDValue X = N->getOperand(0);
49305   SDValue Y = N->getOperand(1);
49306   EVT VT = N->getValueType(0);
49307   SDLoc DL(N);
49308 
49309   if (SDValue ADCOrSBB = combineAddOrSubToADCOrSBB(IsSub, DL, VT, X, Y, DAG))
49310     return ADCOrSBB;
49311 
49312   // Commute and try again (negate the result for subtracts).
49313   if (SDValue ADCOrSBB = combineAddOrSubToADCOrSBB(IsSub, DL, VT, Y, X, DAG)) {
49314     if (IsSub)
49315       ADCOrSBB =
49316           DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), ADCOrSBB);
49317     return ADCOrSBB;
49318   }
49319 
49320   return SDValue();
49321 }
49322 
combineOrXorWithSETCC(SDNode * N,SDValue N0,SDValue N1,SelectionDAG & DAG)49323 static SDValue combineOrXorWithSETCC(SDNode *N, SDValue N0, SDValue N1,
49324                                      SelectionDAG &DAG) {
49325   assert((N->getOpcode() == ISD::XOR || N->getOpcode() == ISD::OR) &&
49326          "Unexpected opcode");
49327 
49328   // Delegate to combineAddOrSubToADCOrSBB if we have:
49329   //
49330   //   (xor/or (zero_extend (setcc)) imm)
49331   //
49332   // where imm is odd if and only if we have xor, in which case the XOR/OR are
49333   // equivalent to a SUB/ADD, respectively.
49334   if (N0.getOpcode() == ISD::ZERO_EXTEND &&
49335       N0.getOperand(0).getOpcode() == X86ISD::SETCC && N0.hasOneUse()) {
49336     if (auto *N1C = dyn_cast<ConstantSDNode>(N1)) {
49337       bool IsSub = N->getOpcode() == ISD::XOR;
49338       bool N1COdd = N1C->getZExtValue() & 1;
49339       if (IsSub ? N1COdd : !N1COdd) {
49340         SDLoc DL(N);
49341         EVT VT = N->getValueType(0);
49342         if (SDValue R = combineAddOrSubToADCOrSBB(IsSub, DL, VT, N1, N0, DAG))
49343           return R;
49344       }
49345     }
49346   }
49347 
49348   return SDValue();
49349 }
49350 
combineOr(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)49351 static SDValue combineOr(SDNode *N, SelectionDAG &DAG,
49352                          TargetLowering::DAGCombinerInfo &DCI,
49353                          const X86Subtarget &Subtarget) {
49354   SDValue N0 = N->getOperand(0);
49355   SDValue N1 = N->getOperand(1);
49356   EVT VT = N->getValueType(0);
49357   SDLoc dl(N);
49358   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
49359 
49360   // If this is SSE1 only convert to FOR to avoid scalarization.
49361   if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32) {
49362     return DAG.getBitcast(MVT::v4i32,
49363                           DAG.getNode(X86ISD::FOR, dl, MVT::v4f32,
49364                                       DAG.getBitcast(MVT::v4f32, N0),
49365                                       DAG.getBitcast(MVT::v4f32, N1)));
49366   }
49367 
49368   // Match any-of bool scalar reductions into a bitcast/movmsk + cmp.
49369   // TODO: Support multiple SrcOps.
49370   if (VT == MVT::i1) {
49371     SmallVector<SDValue, 2> SrcOps;
49372     SmallVector<APInt, 2> SrcPartials;
49373     if (matchScalarReduction(SDValue(N, 0), ISD::OR, SrcOps, &SrcPartials) &&
49374         SrcOps.size() == 1) {
49375       unsigned NumElts = SrcOps[0].getValueType().getVectorNumElements();
49376       EVT MaskVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
49377       SDValue Mask = combineBitcastvxi1(DAG, MaskVT, SrcOps[0], dl, Subtarget);
49378       if (!Mask && TLI.isTypeLegal(SrcOps[0].getValueType()))
49379         Mask = DAG.getBitcast(MaskVT, SrcOps[0]);
49380       if (Mask) {
49381         assert(SrcPartials[0].getBitWidth() == NumElts &&
49382                "Unexpected partial reduction mask");
49383         SDValue ZeroBits = DAG.getConstant(0, dl, MaskVT);
49384         SDValue PartialBits = DAG.getConstant(SrcPartials[0], dl, MaskVT);
49385         Mask = DAG.getNode(ISD::AND, dl, MaskVT, Mask, PartialBits);
49386         return DAG.getSetCC(dl, MVT::i1, Mask, ZeroBits, ISD::SETNE);
49387       }
49388     }
49389   }
49390 
49391   if (SDValue R = combineBitOpWithMOVMSK(N, DAG))
49392     return R;
49393 
49394   if (SDValue R = combineBitOpWithShift(N, DAG))
49395     return R;
49396 
49397   if (SDValue R = combineBitOpWithPACK(N, DAG))
49398     return R;
49399 
49400   if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, DCI, Subtarget))
49401     return FPLogic;
49402 
49403   if (DCI.isBeforeLegalizeOps())
49404     return SDValue();
49405 
49406   if (SDValue R = combineCompareEqual(N, DAG, DCI, Subtarget))
49407     return R;
49408 
49409   if (SDValue R = canonicalizeBitSelect(N, DAG, Subtarget))
49410     return R;
49411 
49412   if (SDValue R = combineLogicBlendIntoPBLENDV(N, DAG, Subtarget))
49413     return R;
49414 
49415   // (0 - SetCC) | C -> (zext (not SetCC)) * (C + 1) - 1 if we can get a LEA out of it.
49416   if ((VT == MVT::i32 || VT == MVT::i64) &&
49417       N0.getOpcode() == ISD::SUB && N0.hasOneUse() &&
49418       isNullConstant(N0.getOperand(0))) {
49419     SDValue Cond = N0.getOperand(1);
49420     if (Cond.getOpcode() == ISD::ZERO_EXTEND && Cond.hasOneUse())
49421       Cond = Cond.getOperand(0);
49422 
49423     if (Cond.getOpcode() == X86ISD::SETCC && Cond.hasOneUse()) {
49424       if (auto *CN = dyn_cast<ConstantSDNode>(N1)) {
49425         uint64_t Val = CN->getZExtValue();
49426         if (Val == 1 || Val == 2 || Val == 3 || Val == 4 || Val == 7 || Val == 8) {
49427           X86::CondCode CCode = (X86::CondCode)Cond.getConstantOperandVal(0);
49428           CCode = X86::GetOppositeBranchCondition(CCode);
49429           SDValue NotCond = getSETCC(CCode, Cond.getOperand(1), SDLoc(Cond), DAG);
49430 
49431           SDValue R = DAG.getZExtOrTrunc(NotCond, dl, VT);
49432           R = DAG.getNode(ISD::MUL, dl, VT, R, DAG.getConstant(Val + 1, dl, VT));
49433           R = DAG.getNode(ISD::SUB, dl, VT, R, DAG.getConstant(1, dl, VT));
49434           return R;
49435         }
49436       }
49437     }
49438   }
49439 
49440   // Combine OR(X,KSHIFTL(Y,Elts/2)) -> CONCAT_VECTORS(X,Y) == KUNPCK(X,Y).
49441   // Combine OR(KSHIFTL(X,Elts/2),Y) -> CONCAT_VECTORS(Y,X) == KUNPCK(Y,X).
49442   // iff the upper elements of the non-shifted arg are zero.
49443   // KUNPCK require 16+ bool vector elements.
49444   if (N0.getOpcode() == X86ISD::KSHIFTL || N1.getOpcode() == X86ISD::KSHIFTL) {
49445     unsigned NumElts = VT.getVectorNumElements();
49446     unsigned HalfElts = NumElts / 2;
49447     APInt UpperElts = APInt::getHighBitsSet(NumElts, HalfElts);
49448     if (NumElts >= 16 && N1.getOpcode() == X86ISD::KSHIFTL &&
49449         N1.getConstantOperandAPInt(1) == HalfElts &&
49450         DAG.MaskedVectorIsZero(N0, UpperElts)) {
49451       return DAG.getNode(
49452           ISD::CONCAT_VECTORS, dl, VT,
49453           extractSubVector(N0, 0, DAG, dl, HalfElts),
49454           extractSubVector(N1.getOperand(0), 0, DAG, dl, HalfElts));
49455     }
49456     if (NumElts >= 16 && N0.getOpcode() == X86ISD::KSHIFTL &&
49457         N0.getConstantOperandAPInt(1) == HalfElts &&
49458         DAG.MaskedVectorIsZero(N1, UpperElts)) {
49459       return DAG.getNode(
49460           ISD::CONCAT_VECTORS, dl, VT,
49461           extractSubVector(N1, 0, DAG, dl, HalfElts),
49462           extractSubVector(N0.getOperand(0), 0, DAG, dl, HalfElts));
49463     }
49464   }
49465 
49466   if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) {
49467     // Attempt to recursively combine an OR of shuffles.
49468     SDValue Op(N, 0);
49469     if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
49470       return Res;
49471 
49472     // If either operand is a constant mask, then only the elements that aren't
49473     // allones are actually demanded by the other operand.
49474     auto SimplifyUndemandedElts = [&](SDValue Op, SDValue OtherOp) {
49475       APInt UndefElts;
49476       SmallVector<APInt> EltBits;
49477       int NumElts = VT.getVectorNumElements();
49478       int EltSizeInBits = VT.getScalarSizeInBits();
49479       if (!getTargetConstantBitsFromNode(Op, EltSizeInBits, UndefElts, EltBits))
49480         return false;
49481 
49482       APInt DemandedElts = APInt::getZero(NumElts);
49483       for (int I = 0; I != NumElts; ++I)
49484         if (!EltBits[I].isAllOnes())
49485           DemandedElts.setBit(I);
49486 
49487       return TLI.SimplifyDemandedVectorElts(OtherOp, DemandedElts, DCI);
49488     };
49489     if (SimplifyUndemandedElts(N0, N1) || SimplifyUndemandedElts(N1, N0)) {
49490       if (N->getOpcode() != ISD::DELETED_NODE)
49491         DCI.AddToWorklist(N);
49492       return SDValue(N, 0);
49493     }
49494   }
49495 
49496   // We should fold "masked merge" patterns when `andn` is not available.
49497   if (!Subtarget.hasBMI() && VT.isScalarInteger() && VT != MVT::i1)
49498     if (SDValue R = foldMaskedMerge(N, DAG))
49499       return R;
49500 
49501   if (SDValue R = combineOrXorWithSETCC(N, N0, N1, DAG))
49502     return R;
49503 
49504   return SDValue();
49505 }
49506 
49507 /// Try to turn tests against the signbit in the form of:
49508 ///   XOR(TRUNCATE(SRL(X, size(X)-1)), 1)
49509 /// into:
49510 ///   SETGT(X, -1)
foldXorTruncShiftIntoCmp(SDNode * N,SelectionDAG & DAG)49511 static SDValue foldXorTruncShiftIntoCmp(SDNode *N, SelectionDAG &DAG) {
49512   // This is only worth doing if the output type is i8 or i1.
49513   EVT ResultType = N->getValueType(0);
49514   if (ResultType != MVT::i8 && ResultType != MVT::i1)
49515     return SDValue();
49516 
49517   SDValue N0 = N->getOperand(0);
49518   SDValue N1 = N->getOperand(1);
49519 
49520   // We should be performing an xor against a truncated shift.
49521   if (N0.getOpcode() != ISD::TRUNCATE || !N0.hasOneUse())
49522     return SDValue();
49523 
49524   // Make sure we are performing an xor against one.
49525   if (!isOneConstant(N1))
49526     return SDValue();
49527 
49528   // SetCC on x86 zero extends so only act on this if it's a logical shift.
49529   SDValue Shift = N0.getOperand(0);
49530   if (Shift.getOpcode() != ISD::SRL || !Shift.hasOneUse())
49531     return SDValue();
49532 
49533   // Make sure we are truncating from one of i16, i32 or i64.
49534   EVT ShiftTy = Shift.getValueType();
49535   if (ShiftTy != MVT::i16 && ShiftTy != MVT::i32 && ShiftTy != MVT::i64)
49536     return SDValue();
49537 
49538   // Make sure the shift amount extracts the sign bit.
49539   if (!isa<ConstantSDNode>(Shift.getOperand(1)) ||
49540       Shift.getConstantOperandAPInt(1) != (ShiftTy.getSizeInBits() - 1))
49541     return SDValue();
49542 
49543   // Create a greater-than comparison against -1.
49544   // N.B. Using SETGE against 0 works but we want a canonical looking
49545   // comparison, using SETGT matches up with what TranslateX86CC.
49546   SDLoc DL(N);
49547   SDValue ShiftOp = Shift.getOperand(0);
49548   EVT ShiftOpTy = ShiftOp.getValueType();
49549   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
49550   EVT SetCCResultType = TLI.getSetCCResultType(DAG.getDataLayout(),
49551                                                *DAG.getContext(), ResultType);
49552   SDValue Cond = DAG.getSetCC(DL, SetCCResultType, ShiftOp,
49553                               DAG.getConstant(-1, DL, ShiftOpTy), ISD::SETGT);
49554   if (SetCCResultType != ResultType)
49555     Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, ResultType, Cond);
49556   return Cond;
49557 }
49558 
49559 /// Turn vector tests of the signbit in the form of:
49560 ///   xor (sra X, elt_size(X)-1), -1
49561 /// into:
49562 ///   pcmpgt X, -1
49563 ///
49564 /// This should be called before type legalization because the pattern may not
49565 /// persist after that.
foldVectorXorShiftIntoCmp(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)49566 static SDValue foldVectorXorShiftIntoCmp(SDNode *N, SelectionDAG &DAG,
49567                                          const X86Subtarget &Subtarget) {
49568   EVT VT = N->getValueType(0);
49569   if (!VT.isSimple())
49570     return SDValue();
49571 
49572   switch (VT.getSimpleVT().SimpleTy) {
49573   default: return SDValue();
49574   case MVT::v16i8:
49575   case MVT::v8i16:
49576   case MVT::v4i32:
49577   case MVT::v2i64: if (!Subtarget.hasSSE2()) return SDValue(); break;
49578   case MVT::v32i8:
49579   case MVT::v16i16:
49580   case MVT::v8i32:
49581   case MVT::v4i64: if (!Subtarget.hasAVX2()) return SDValue(); break;
49582   }
49583 
49584   // There must be a shift right algebraic before the xor, and the xor must be a
49585   // 'not' operation.
49586   SDValue Shift = N->getOperand(0);
49587   SDValue Ones = N->getOperand(1);
49588   if (Shift.getOpcode() != ISD::SRA || !Shift.hasOneUse() ||
49589       !ISD::isBuildVectorAllOnes(Ones.getNode()))
49590     return SDValue();
49591 
49592   // The shift should be smearing the sign bit across each vector element.
49593   auto *ShiftAmt =
49594       isConstOrConstSplat(Shift.getOperand(1), /*AllowUndefs*/ true);
49595   if (!ShiftAmt ||
49596       ShiftAmt->getAPIntValue() != (Shift.getScalarValueSizeInBits() - 1))
49597     return SDValue();
49598 
49599   // Create a greater-than comparison against -1. We don't use the more obvious
49600   // greater-than-or-equal-to-zero because SSE/AVX don't have that instruction.
49601   return DAG.getSetCC(SDLoc(N), VT, Shift.getOperand(0), Ones, ISD::SETGT);
49602 }
49603 
49604 /// Detect patterns of truncation with unsigned saturation:
49605 ///
49606 /// 1. (truncate (umin (x, unsigned_max_of_dest_type)) to dest_type).
49607 ///   Return the source value x to be truncated or SDValue() if the pattern was
49608 ///   not matched.
49609 ///
49610 /// 2. (truncate (smin (smax (x, C1), C2)) to dest_type),
49611 ///   where C1 >= 0 and C2 is unsigned max of destination type.
49612 ///
49613 ///    (truncate (smax (smin (x, C2), C1)) to dest_type)
49614 ///   where C1 >= 0, C2 is unsigned max of destination type and C1 <= C2.
49615 ///
49616 ///   These two patterns are equivalent to:
49617 ///   (truncate (umin (smax(x, C1), unsigned_max_of_dest_type)) to dest_type)
49618 ///   So return the smax(x, C1) value to be truncated or SDValue() if the
49619 ///   pattern was not matched.
detectUSatPattern(SDValue In,EVT VT,SelectionDAG & DAG,const SDLoc & DL)49620 static SDValue detectUSatPattern(SDValue In, EVT VT, SelectionDAG &DAG,
49621                                  const SDLoc &DL) {
49622   EVT InVT = In.getValueType();
49623 
49624   // Saturation with truncation. We truncate from InVT to VT.
49625   assert(InVT.getScalarSizeInBits() > VT.getScalarSizeInBits() &&
49626          "Unexpected types for truncate operation");
49627 
49628   // Match min/max and return limit value as a parameter.
49629   auto MatchMinMax = [](SDValue V, unsigned Opcode, APInt &Limit) -> SDValue {
49630     if (V.getOpcode() == Opcode &&
49631         ISD::isConstantSplatVector(V.getOperand(1).getNode(), Limit))
49632       return V.getOperand(0);
49633     return SDValue();
49634   };
49635 
49636   APInt C1, C2;
49637   if (SDValue UMin = MatchMinMax(In, ISD::UMIN, C2))
49638     // C2 should be equal to UINT32_MAX / UINT16_MAX / UINT8_MAX according
49639     // the element size of the destination type.
49640     if (C2.isMask(VT.getScalarSizeInBits()))
49641       return UMin;
49642 
49643   if (SDValue SMin = MatchMinMax(In, ISD::SMIN, C2))
49644     if (MatchMinMax(SMin, ISD::SMAX, C1))
49645       if (C1.isNonNegative() && C2.isMask(VT.getScalarSizeInBits()))
49646         return SMin;
49647 
49648   if (SDValue SMax = MatchMinMax(In, ISD::SMAX, C1))
49649     if (SDValue SMin = MatchMinMax(SMax, ISD::SMIN, C2))
49650       if (C1.isNonNegative() && C2.isMask(VT.getScalarSizeInBits()) &&
49651           C2.uge(C1)) {
49652         return DAG.getNode(ISD::SMAX, DL, InVT, SMin, In.getOperand(1));
49653       }
49654 
49655   return SDValue();
49656 }
49657 
49658 /// Detect patterns of truncation with signed saturation:
49659 /// (truncate (smin ((smax (x, signed_min_of_dest_type)),
49660 ///                  signed_max_of_dest_type)) to dest_type)
49661 /// or:
49662 /// (truncate (smax ((smin (x, signed_max_of_dest_type)),
49663 ///                  signed_min_of_dest_type)) to dest_type).
49664 /// With MatchPackUS, the smax/smin range is [0, unsigned_max_of_dest_type].
49665 /// Return the source value to be truncated or SDValue() if the pattern was not
49666 /// matched.
detectSSatPattern(SDValue In,EVT VT,bool MatchPackUS=false)49667 static SDValue detectSSatPattern(SDValue In, EVT VT, bool MatchPackUS = false) {
49668   unsigned NumDstBits = VT.getScalarSizeInBits();
49669   unsigned NumSrcBits = In.getScalarValueSizeInBits();
49670   assert(NumSrcBits > NumDstBits && "Unexpected types for truncate operation");
49671 
49672   auto MatchMinMax = [](SDValue V, unsigned Opcode,
49673                         const APInt &Limit) -> SDValue {
49674     APInt C;
49675     if (V.getOpcode() == Opcode &&
49676         ISD::isConstantSplatVector(V.getOperand(1).getNode(), C) && C == Limit)
49677       return V.getOperand(0);
49678     return SDValue();
49679   };
49680 
49681   APInt SignedMax, SignedMin;
49682   if (MatchPackUS) {
49683     SignedMax = APInt::getAllOnes(NumDstBits).zext(NumSrcBits);
49684     SignedMin = APInt(NumSrcBits, 0);
49685   } else {
49686     SignedMax = APInt::getSignedMaxValue(NumDstBits).sext(NumSrcBits);
49687     SignedMin = APInt::getSignedMinValue(NumDstBits).sext(NumSrcBits);
49688   }
49689 
49690   if (SDValue SMin = MatchMinMax(In, ISD::SMIN, SignedMax))
49691     if (SDValue SMax = MatchMinMax(SMin, ISD::SMAX, SignedMin))
49692       return SMax;
49693 
49694   if (SDValue SMax = MatchMinMax(In, ISD::SMAX, SignedMin))
49695     if (SDValue SMin = MatchMinMax(SMax, ISD::SMIN, SignedMax))
49696       return SMin;
49697 
49698   return SDValue();
49699 }
49700 
combineTruncateWithSat(SDValue In,EVT VT,const SDLoc & DL,SelectionDAG & DAG,const X86Subtarget & Subtarget)49701 static SDValue combineTruncateWithSat(SDValue In, EVT VT, const SDLoc &DL,
49702                                       SelectionDAG &DAG,
49703                                       const X86Subtarget &Subtarget) {
49704   if (!Subtarget.hasSSE2() || !VT.isVector())
49705     return SDValue();
49706 
49707   EVT SVT = VT.getVectorElementType();
49708   EVT InVT = In.getValueType();
49709   EVT InSVT = InVT.getVectorElementType();
49710 
49711   // If we're clamping a signed 32-bit vector to 0-255 and the 32-bit vector is
49712   // split across two registers. We can use a packusdw+perm to clamp to 0-65535
49713   // and concatenate at the same time. Then we can use a final vpmovuswb to
49714   // clip to 0-255.
49715   if (Subtarget.hasBWI() && !Subtarget.useAVX512Regs() &&
49716       InVT == MVT::v16i32 && VT == MVT::v16i8) {
49717     if (SDValue USatVal = detectSSatPattern(In, VT, true)) {
49718       // Emit a VPACKUSDW+VPERMQ followed by a VPMOVUSWB.
49719       SDValue Mid = truncateVectorWithPACK(X86ISD::PACKUS, MVT::v16i16, USatVal,
49720                                            DL, DAG, Subtarget);
49721       assert(Mid && "Failed to pack!");
49722       return DAG.getNode(X86ISD::VTRUNCUS, DL, VT, Mid);
49723     }
49724   }
49725 
49726   // vXi32 truncate instructions are available with AVX512F.
49727   // vXi16 truncate instructions are only available with AVX512BW.
49728   // For 256-bit or smaller vectors, we require VLX.
49729   // FIXME: We could widen truncates to 512 to remove the VLX restriction.
49730   // If the result type is 256-bits or larger and we have disable 512-bit
49731   // registers, we should go ahead and use the pack instructions if possible.
49732   bool PreferAVX512 = ((Subtarget.hasAVX512() && InSVT == MVT::i32) ||
49733                        (Subtarget.hasBWI() && InSVT == MVT::i16)) &&
49734                       (InVT.getSizeInBits() > 128) &&
49735                       (Subtarget.hasVLX() || InVT.getSizeInBits() > 256) &&
49736                       !(!Subtarget.useAVX512Regs() && VT.getSizeInBits() >= 256);
49737 
49738   if (!PreferAVX512 && VT.getVectorNumElements() > 1 &&
49739       isPowerOf2_32(VT.getVectorNumElements()) &&
49740       (SVT == MVT::i8 || SVT == MVT::i16) &&
49741       (InSVT == MVT::i16 || InSVT == MVT::i32)) {
49742     if (SDValue USatVal = detectSSatPattern(In, VT, true)) {
49743       // vXi32 -> vXi8 must be performed as PACKUSWB(PACKSSDW,PACKSSDW).
49744       if (SVT == MVT::i8 && InSVT == MVT::i32) {
49745         EVT MidVT = VT.changeVectorElementType(MVT::i16);
49746         SDValue Mid = truncateVectorWithPACK(X86ISD::PACKSS, MidVT, USatVal, DL,
49747                                              DAG, Subtarget);
49748         assert(Mid && "Failed to pack!");
49749         SDValue V = truncateVectorWithPACK(X86ISD::PACKUS, VT, Mid, DL, DAG,
49750                                            Subtarget);
49751         assert(V && "Failed to pack!");
49752         return V;
49753       } else if (SVT == MVT::i8 || Subtarget.hasSSE41())
49754         return truncateVectorWithPACK(X86ISD::PACKUS, VT, USatVal, DL, DAG,
49755                                       Subtarget);
49756     }
49757     if (SDValue SSatVal = detectSSatPattern(In, VT))
49758       return truncateVectorWithPACK(X86ISD::PACKSS, VT, SSatVal, DL, DAG,
49759                                     Subtarget);
49760   }
49761 
49762   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
49763   if (TLI.isTypeLegal(InVT) && InVT.isVector() && SVT != MVT::i1 &&
49764       Subtarget.hasAVX512() && (InSVT != MVT::i16 || Subtarget.hasBWI()) &&
49765       (SVT == MVT::i32 || SVT == MVT::i16 || SVT == MVT::i8)) {
49766     unsigned TruncOpc = 0;
49767     SDValue SatVal;
49768     if (SDValue SSatVal = detectSSatPattern(In, VT)) {
49769       SatVal = SSatVal;
49770       TruncOpc = X86ISD::VTRUNCS;
49771     } else if (SDValue USatVal = detectUSatPattern(In, VT, DAG, DL)) {
49772       SatVal = USatVal;
49773       TruncOpc = X86ISD::VTRUNCUS;
49774     }
49775     if (SatVal) {
49776       unsigned ResElts = VT.getVectorNumElements();
49777       // If the input type is less than 512 bits and we don't have VLX, we need
49778       // to widen to 512 bits.
49779       if (!Subtarget.hasVLX() && !InVT.is512BitVector()) {
49780         unsigned NumConcats = 512 / InVT.getSizeInBits();
49781         ResElts *= NumConcats;
49782         SmallVector<SDValue, 4> ConcatOps(NumConcats, DAG.getUNDEF(InVT));
49783         ConcatOps[0] = SatVal;
49784         InVT = EVT::getVectorVT(*DAG.getContext(), InSVT,
49785                                 NumConcats * InVT.getVectorNumElements());
49786         SatVal = DAG.getNode(ISD::CONCAT_VECTORS, DL, InVT, ConcatOps);
49787       }
49788       // Widen the result if its narrower than 128 bits.
49789       if (ResElts * SVT.getSizeInBits() < 128)
49790         ResElts = 128 / SVT.getSizeInBits();
49791       EVT TruncVT = EVT::getVectorVT(*DAG.getContext(), SVT, ResElts);
49792       SDValue Res = DAG.getNode(TruncOpc, DL, TruncVT, SatVal);
49793       return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
49794                          DAG.getIntPtrConstant(0, DL));
49795     }
49796   }
49797 
49798   return SDValue();
49799 }
49800 
49801 /// This function detects the AVG pattern between vectors of unsigned i8/i16,
49802 /// which is c = (a + b + 1) / 2, and replace this operation with the efficient
49803 /// ISD::AVGCEILU (AVG) instruction.
detectAVGPattern(SDValue In,EVT VT,SelectionDAG & DAG,const X86Subtarget & Subtarget,const SDLoc & DL)49804 static SDValue detectAVGPattern(SDValue In, EVT VT, SelectionDAG &DAG,
49805                                 const X86Subtarget &Subtarget,
49806                                 const SDLoc &DL) {
49807   if (!VT.isVector())
49808     return SDValue();
49809   EVT InVT = In.getValueType();
49810   unsigned NumElems = VT.getVectorNumElements();
49811 
49812   EVT ScalarVT = VT.getVectorElementType();
49813   if (!((ScalarVT == MVT::i8 || ScalarVT == MVT::i16) && NumElems >= 2))
49814     return SDValue();
49815 
49816   // InScalarVT is the intermediate type in AVG pattern and it should be greater
49817   // than the original input type (i8/i16).
49818   EVT InScalarVT = InVT.getVectorElementType();
49819   if (InScalarVT.getFixedSizeInBits() <= ScalarVT.getFixedSizeInBits())
49820     return SDValue();
49821 
49822   if (!Subtarget.hasSSE2())
49823     return SDValue();
49824 
49825   // Detect the following pattern:
49826   //
49827   //   %1 = zext <N x i8> %a to <N x i32>
49828   //   %2 = zext <N x i8> %b to <N x i32>
49829   //   %3 = add nuw nsw <N x i32> %1, <i32 1 x N>
49830   //   %4 = add nuw nsw <N x i32> %3, %2
49831   //   %5 = lshr <N x i32> %N, <i32 1 x N>
49832   //   %6 = trunc <N x i32> %5 to <N x i8>
49833   //
49834   // In AVX512, the last instruction can also be a trunc store.
49835   if (In.getOpcode() != ISD::SRL)
49836     return SDValue();
49837 
49838   // A lambda checking the given SDValue is a constant vector and each element
49839   // is in the range [Min, Max].
49840   auto IsConstVectorInRange = [](SDValue V, unsigned Min, unsigned Max) {
49841     return ISD::matchUnaryPredicate(V, [Min, Max](ConstantSDNode *C) {
49842       return !(C->getAPIntValue().ult(Min) || C->getAPIntValue().ugt(Max));
49843     });
49844   };
49845 
49846   auto IsZExtLike = [DAG = &DAG, ScalarVT](SDValue V) {
49847     unsigned MaxActiveBits = DAG->computeKnownBits(V).countMaxActiveBits();
49848     return MaxActiveBits <= ScalarVT.getSizeInBits();
49849   };
49850 
49851   // Check if each element of the vector is right-shifted by one.
49852   SDValue LHS = In.getOperand(0);
49853   SDValue RHS = In.getOperand(1);
49854   if (!IsConstVectorInRange(RHS, 1, 1))
49855     return SDValue();
49856   if (LHS.getOpcode() != ISD::ADD)
49857     return SDValue();
49858 
49859   // Detect a pattern of a + b + 1 where the order doesn't matter.
49860   SDValue Operands[3];
49861   Operands[0] = LHS.getOperand(0);
49862   Operands[1] = LHS.getOperand(1);
49863 
49864   auto AVGBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
49865                        ArrayRef<SDValue> Ops) {
49866     return DAG.getNode(ISD::AVGCEILU, DL, Ops[0].getValueType(), Ops);
49867   };
49868 
49869   auto AVGSplitter = [&](std::array<SDValue, 2> Ops) {
49870     for (SDValue &Op : Ops)
49871       if (Op.getValueType() != VT)
49872         Op = DAG.getNode(ISD::TRUNCATE, DL, VT, Op);
49873     // Pad to a power-of-2 vector, split+apply and extract the original vector.
49874     unsigned NumElemsPow2 = PowerOf2Ceil(NumElems);
49875     EVT Pow2VT = EVT::getVectorVT(*DAG.getContext(), ScalarVT, NumElemsPow2);
49876     if (NumElemsPow2 != NumElems) {
49877       for (SDValue &Op : Ops) {
49878         SmallVector<SDValue, 32> EltsOfOp(NumElemsPow2, DAG.getUNDEF(ScalarVT));
49879         for (unsigned i = 0; i != NumElems; ++i) {
49880           SDValue Idx = DAG.getIntPtrConstant(i, DL);
49881           EltsOfOp[i] =
49882               DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ScalarVT, Op, Idx);
49883         }
49884         Op = DAG.getBuildVector(Pow2VT, DL, EltsOfOp);
49885       }
49886     }
49887     SDValue Res = SplitOpsAndApply(DAG, Subtarget, DL, Pow2VT, Ops, AVGBuilder);
49888     if (NumElemsPow2 == NumElems)
49889       return Res;
49890     return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
49891                        DAG.getIntPtrConstant(0, DL));
49892   };
49893 
49894   // Take care of the case when one of the operands is a constant vector whose
49895   // element is in the range [1, 256].
49896   if (IsConstVectorInRange(Operands[1], 1, ScalarVT == MVT::i8 ? 256 : 65536) &&
49897       IsZExtLike(Operands[0])) {
49898     // The pattern is detected. Subtract one from the constant vector, then
49899     // demote it and emit X86ISD::AVG instruction.
49900     SDValue VecOnes = DAG.getConstant(1, DL, InVT);
49901     Operands[1] = DAG.getNode(ISD::SUB, DL, InVT, Operands[1], VecOnes);
49902     return AVGSplitter({Operands[0], Operands[1]});
49903   }
49904 
49905   // Matches 'add like' patterns: add(Op0,Op1) + zext(or(Op0,Op1)).
49906   // Match the or case only if its 'add-like' - can be replaced by an add.
49907   auto FindAddLike = [&](SDValue V, SDValue &Op0, SDValue &Op1) {
49908     if (ISD::ADD == V.getOpcode()) {
49909       Op0 = V.getOperand(0);
49910       Op1 = V.getOperand(1);
49911       return true;
49912     }
49913     if (ISD::ZERO_EXTEND != V.getOpcode())
49914       return false;
49915     V = V.getOperand(0);
49916     if (V.getValueType() != VT || ISD::OR != V.getOpcode() ||
49917         !DAG.haveNoCommonBitsSet(V.getOperand(0), V.getOperand(1)))
49918       return false;
49919     Op0 = V.getOperand(0);
49920     Op1 = V.getOperand(1);
49921     return true;
49922   };
49923 
49924   SDValue Op0, Op1;
49925   if (FindAddLike(Operands[0], Op0, Op1))
49926     std::swap(Operands[0], Operands[1]);
49927   else if (!FindAddLike(Operands[1], Op0, Op1))
49928     return SDValue();
49929   Operands[2] = Op0;
49930   Operands[1] = Op1;
49931 
49932   // Now we have three operands of two additions. Check that one of them is a
49933   // constant vector with ones, and the other two can be promoted from i8/i16.
49934   for (SDValue &Op : Operands) {
49935     if (!IsConstVectorInRange(Op, 1, 1))
49936       continue;
49937     std::swap(Op, Operands[2]);
49938 
49939     // Check if Operands[0] and Operands[1] are results of type promotion.
49940     for (int j = 0; j < 2; ++j)
49941       if (Operands[j].getValueType() != VT)
49942         if (!IsZExtLike(Operands[j]))
49943           return SDValue();
49944 
49945     // The pattern is detected, emit X86ISD::AVG instruction(s).
49946     return AVGSplitter({Operands[0], Operands[1]});
49947   }
49948 
49949   return SDValue();
49950 }
49951 
combineLoad(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)49952 static SDValue combineLoad(SDNode *N, SelectionDAG &DAG,
49953                            TargetLowering::DAGCombinerInfo &DCI,
49954                            const X86Subtarget &Subtarget) {
49955   LoadSDNode *Ld = cast<LoadSDNode>(N);
49956   EVT RegVT = Ld->getValueType(0);
49957   EVT MemVT = Ld->getMemoryVT();
49958   SDLoc dl(Ld);
49959   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
49960 
49961   // For chips with slow 32-byte unaligned loads, break the 32-byte operation
49962   // into two 16-byte operations. Also split non-temporal aligned loads on
49963   // pre-AVX2 targets as 32-byte loads will lower to regular temporal loads.
49964   ISD::LoadExtType Ext = Ld->getExtensionType();
49965   unsigned Fast;
49966   if (RegVT.is256BitVector() && !DCI.isBeforeLegalizeOps() &&
49967       Ext == ISD::NON_EXTLOAD &&
49968       ((Ld->isNonTemporal() && !Subtarget.hasInt256() &&
49969         Ld->getAlign() >= Align(16)) ||
49970        (TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), RegVT,
49971                                *Ld->getMemOperand(), &Fast) &&
49972         !Fast))) {
49973     unsigned NumElems = RegVT.getVectorNumElements();
49974     if (NumElems < 2)
49975       return SDValue();
49976 
49977     unsigned HalfOffset = 16;
49978     SDValue Ptr1 = Ld->getBasePtr();
49979     SDValue Ptr2 =
49980         DAG.getMemBasePlusOffset(Ptr1, TypeSize::getFixed(HalfOffset), dl);
49981     EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
49982                                   NumElems / 2);
49983     SDValue Load1 =
49984         DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr1, Ld->getPointerInfo(),
49985                     Ld->getOriginalAlign(),
49986                     Ld->getMemOperand()->getFlags());
49987     SDValue Load2 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr2,
49988                                 Ld->getPointerInfo().getWithOffset(HalfOffset),
49989                                 Ld->getOriginalAlign(),
49990                                 Ld->getMemOperand()->getFlags());
49991     SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
49992                              Load1.getValue(1), Load2.getValue(1));
49993 
49994     SDValue NewVec = DAG.getNode(ISD::CONCAT_VECTORS, dl, RegVT, Load1, Load2);
49995     return DCI.CombineTo(N, NewVec, TF, true);
49996   }
49997 
49998   // Bool vector load - attempt to cast to an integer, as we have good
49999   // (vXiY *ext(vXi1 bitcast(iX))) handling.
50000   if (Ext == ISD::NON_EXTLOAD && !Subtarget.hasAVX512() && RegVT.isVector() &&
50001       RegVT.getScalarType() == MVT::i1 && DCI.isBeforeLegalize()) {
50002     unsigned NumElts = RegVT.getVectorNumElements();
50003     EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
50004     if (TLI.isTypeLegal(IntVT)) {
50005       SDValue IntLoad = DAG.getLoad(IntVT, dl, Ld->getChain(), Ld->getBasePtr(),
50006                                     Ld->getPointerInfo(),
50007                                     Ld->getOriginalAlign(),
50008                                     Ld->getMemOperand()->getFlags());
50009       SDValue BoolVec = DAG.getBitcast(RegVT, IntLoad);
50010       return DCI.CombineTo(N, BoolVec, IntLoad.getValue(1), true);
50011     }
50012   }
50013 
50014   // If we also load/broadcast this to a wider type, then just extract the
50015   // lowest subvector.
50016   if (Ext == ISD::NON_EXTLOAD && Subtarget.hasAVX() && Ld->isSimple() &&
50017       (RegVT.is128BitVector() || RegVT.is256BitVector())) {
50018     SDValue Ptr = Ld->getBasePtr();
50019     SDValue Chain = Ld->getChain();
50020     for (SDNode *User : Chain->uses()) {
50021       auto *UserLd = dyn_cast<MemSDNode>(User);
50022       if (User != N && UserLd &&
50023           (User->getOpcode() == X86ISD::SUBV_BROADCAST_LOAD ||
50024            User->getOpcode() == X86ISD::VBROADCAST_LOAD ||
50025            ISD::isNormalLoad(User)) &&
50026           UserLd->getChain() == Chain && !User->hasAnyUseOfValue(1) &&
50027           User->getValueSizeInBits(0).getFixedValue() >
50028               RegVT.getFixedSizeInBits()) {
50029         if (User->getOpcode() == X86ISD::SUBV_BROADCAST_LOAD &&
50030             UserLd->getBasePtr() == Ptr &&
50031             UserLd->getMemoryVT().getSizeInBits() == MemVT.getSizeInBits()) {
50032           SDValue Extract = extractSubVector(SDValue(User, 0), 0, DAG, SDLoc(N),
50033                                              RegVT.getSizeInBits());
50034           Extract = DAG.getBitcast(RegVT, Extract);
50035           return DCI.CombineTo(N, Extract, SDValue(User, 1));
50036         }
50037         auto MatchingBits = [](const APInt &Undefs, const APInt &UserUndefs,
50038                                ArrayRef<APInt> Bits, ArrayRef<APInt> UserBits) {
50039           for (unsigned I = 0, E = Undefs.getBitWidth(); I != E; ++I) {
50040             if (Undefs[I])
50041               continue;
50042             if (UserUndefs[I] || Bits[I] != UserBits[I])
50043               return false;
50044           }
50045           return true;
50046         };
50047         // See if we are loading a constant that matches in the lower
50048         // bits of a longer constant (but from a different constant pool ptr).
50049         EVT UserVT = User->getValueType(0);
50050         SDValue UserPtr = UserLd->getBasePtr();
50051         const Constant *LdC = getTargetConstantFromBasePtr(Ptr);
50052         const Constant *UserC = getTargetConstantFromBasePtr(UserPtr);
50053         if (LdC && UserC && UserPtr != Ptr) {
50054           unsigned LdSize = LdC->getType()->getPrimitiveSizeInBits();
50055           unsigned UserSize = UserC->getType()->getPrimitiveSizeInBits();
50056           if (LdSize < UserSize || !ISD::isNormalLoad(User)) {
50057             APInt Undefs, UserUndefs;
50058             SmallVector<APInt> Bits, UserBits;
50059             unsigned NumBits = std::min(RegVT.getScalarSizeInBits(),
50060                                         UserVT.getScalarSizeInBits());
50061             if (getTargetConstantBitsFromNode(SDValue(N, 0), NumBits, Undefs,
50062                                               Bits) &&
50063                 getTargetConstantBitsFromNode(SDValue(User, 0), NumBits,
50064                                               UserUndefs, UserBits)) {
50065               if (MatchingBits(Undefs, UserUndefs, Bits, UserBits)) {
50066                 SDValue Extract = extractSubVector(
50067                     SDValue(User, 0), 0, DAG, SDLoc(N), RegVT.getSizeInBits());
50068                 Extract = DAG.getBitcast(RegVT, Extract);
50069                 return DCI.CombineTo(N, Extract, SDValue(User, 1));
50070               }
50071             }
50072           }
50073         }
50074       }
50075     }
50076   }
50077 
50078   // Cast ptr32 and ptr64 pointers to the default address space before a load.
50079   unsigned AddrSpace = Ld->getAddressSpace();
50080   if (AddrSpace == X86AS::PTR64 || AddrSpace == X86AS::PTR32_SPTR ||
50081       AddrSpace == X86AS::PTR32_UPTR) {
50082     MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
50083     if (PtrVT != Ld->getBasePtr().getSimpleValueType()) {
50084       SDValue Cast =
50085           DAG.getAddrSpaceCast(dl, PtrVT, Ld->getBasePtr(), AddrSpace, 0);
50086       return DAG.getExtLoad(Ext, dl, RegVT, Ld->getChain(), Cast,
50087                             Ld->getPointerInfo(), MemVT, Ld->getOriginalAlign(),
50088                             Ld->getMemOperand()->getFlags());
50089     }
50090   }
50091 
50092   return SDValue();
50093 }
50094 
50095 /// If V is a build vector of boolean constants and exactly one of those
50096 /// constants is true, return the operand index of that true element.
50097 /// Otherwise, return -1.
getOneTrueElt(SDValue V)50098 static int getOneTrueElt(SDValue V) {
50099   // This needs to be a build vector of booleans.
50100   // TODO: Checking for the i1 type matches the IR definition for the mask,
50101   // but the mask check could be loosened to i8 or other types. That might
50102   // also require checking more than 'allOnesValue'; eg, the x86 HW
50103   // instructions only require that the MSB is set for each mask element.
50104   // The ISD::MSTORE comments/definition do not specify how the mask operand
50105   // is formatted.
50106   auto *BV = dyn_cast<BuildVectorSDNode>(V);
50107   if (!BV || BV->getValueType(0).getVectorElementType() != MVT::i1)
50108     return -1;
50109 
50110   int TrueIndex = -1;
50111   unsigned NumElts = BV->getValueType(0).getVectorNumElements();
50112   for (unsigned i = 0; i < NumElts; ++i) {
50113     const SDValue &Op = BV->getOperand(i);
50114     if (Op.isUndef())
50115       continue;
50116     auto *ConstNode = dyn_cast<ConstantSDNode>(Op);
50117     if (!ConstNode)
50118       return -1;
50119     if (ConstNode->getAPIntValue().countr_one() >= 1) {
50120       // If we already found a one, this is too many.
50121       if (TrueIndex >= 0)
50122         return -1;
50123       TrueIndex = i;
50124     }
50125   }
50126   return TrueIndex;
50127 }
50128 
50129 /// Given a masked memory load/store operation, return true if it has one mask
50130 /// bit set. If it has one mask bit set, then also return the memory address of
50131 /// the scalar element to load/store, the vector index to insert/extract that
50132 /// scalar element, and the alignment for the scalar memory access.
getParamsForOneTrueMaskedElt(MaskedLoadStoreSDNode * MaskedOp,SelectionDAG & DAG,SDValue & Addr,SDValue & Index,Align & Alignment,unsigned & Offset)50133 static bool getParamsForOneTrueMaskedElt(MaskedLoadStoreSDNode *MaskedOp,
50134                                          SelectionDAG &DAG, SDValue &Addr,
50135                                          SDValue &Index, Align &Alignment,
50136                                          unsigned &Offset) {
50137   int TrueMaskElt = getOneTrueElt(MaskedOp->getMask());
50138   if (TrueMaskElt < 0)
50139     return false;
50140 
50141   // Get the address of the one scalar element that is specified by the mask
50142   // using the appropriate offset from the base pointer.
50143   EVT EltVT = MaskedOp->getMemoryVT().getVectorElementType();
50144   Offset = 0;
50145   Addr = MaskedOp->getBasePtr();
50146   if (TrueMaskElt != 0) {
50147     Offset = TrueMaskElt * EltVT.getStoreSize();
50148     Addr = DAG.getMemBasePlusOffset(Addr, TypeSize::getFixed(Offset),
50149                                     SDLoc(MaskedOp));
50150   }
50151 
50152   Index = DAG.getIntPtrConstant(TrueMaskElt, SDLoc(MaskedOp));
50153   Alignment = commonAlignment(MaskedOp->getOriginalAlign(),
50154                               EltVT.getStoreSize());
50155   return true;
50156 }
50157 
50158 /// If exactly one element of the mask is set for a non-extending masked load,
50159 /// it is a scalar load and vector insert.
50160 /// Note: It is expected that the degenerate cases of an all-zeros or all-ones
50161 /// mask have already been optimized in IR, so we don't bother with those here.
50162 static SDValue
reduceMaskedLoadToScalarLoad(MaskedLoadSDNode * ML,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)50163 reduceMaskedLoadToScalarLoad(MaskedLoadSDNode *ML, SelectionDAG &DAG,
50164                              TargetLowering::DAGCombinerInfo &DCI,
50165                              const X86Subtarget &Subtarget) {
50166   assert(ML->isUnindexed() && "Unexpected indexed masked load!");
50167   // TODO: This is not x86-specific, so it could be lifted to DAGCombiner.
50168   // However, some target hooks may need to be added to know when the transform
50169   // is profitable. Endianness would also have to be considered.
50170 
50171   SDValue Addr, VecIndex;
50172   Align Alignment;
50173   unsigned Offset;
50174   if (!getParamsForOneTrueMaskedElt(ML, DAG, Addr, VecIndex, Alignment, Offset))
50175     return SDValue();
50176 
50177   // Load the one scalar element that is specified by the mask using the
50178   // appropriate offset from the base pointer.
50179   SDLoc DL(ML);
50180   EVT VT = ML->getValueType(0);
50181   EVT EltVT = VT.getVectorElementType();
50182 
50183   EVT CastVT = VT;
50184   if (EltVT == MVT::i64 && !Subtarget.is64Bit()) {
50185     EltVT = MVT::f64;
50186     CastVT = VT.changeVectorElementType(EltVT);
50187   }
50188 
50189   SDValue Load =
50190       DAG.getLoad(EltVT, DL, ML->getChain(), Addr,
50191                   ML->getPointerInfo().getWithOffset(Offset),
50192                   Alignment, ML->getMemOperand()->getFlags());
50193 
50194   SDValue PassThru = DAG.getBitcast(CastVT, ML->getPassThru());
50195 
50196   // Insert the loaded element into the appropriate place in the vector.
50197   SDValue Insert =
50198       DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, CastVT, PassThru, Load, VecIndex);
50199   Insert = DAG.getBitcast(VT, Insert);
50200   return DCI.CombineTo(ML, Insert, Load.getValue(1), true);
50201 }
50202 
50203 static SDValue
combineMaskedLoadConstantMask(MaskedLoadSDNode * ML,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI)50204 combineMaskedLoadConstantMask(MaskedLoadSDNode *ML, SelectionDAG &DAG,
50205                               TargetLowering::DAGCombinerInfo &DCI) {
50206   assert(ML->isUnindexed() && "Unexpected indexed masked load!");
50207   if (!ISD::isBuildVectorOfConstantSDNodes(ML->getMask().getNode()))
50208     return SDValue();
50209 
50210   SDLoc DL(ML);
50211   EVT VT = ML->getValueType(0);
50212 
50213   // If we are loading the first and last elements of a vector, it is safe and
50214   // always faster to load the whole vector. Replace the masked load with a
50215   // vector load and select.
50216   unsigned NumElts = VT.getVectorNumElements();
50217   BuildVectorSDNode *MaskBV = cast<BuildVectorSDNode>(ML->getMask());
50218   bool LoadFirstElt = !isNullConstant(MaskBV->getOperand(0));
50219   bool LoadLastElt = !isNullConstant(MaskBV->getOperand(NumElts - 1));
50220   if (LoadFirstElt && LoadLastElt) {
50221     SDValue VecLd = DAG.getLoad(VT, DL, ML->getChain(), ML->getBasePtr(),
50222                                 ML->getMemOperand());
50223     SDValue Blend = DAG.getSelect(DL, VT, ML->getMask(), VecLd,
50224                                   ML->getPassThru());
50225     return DCI.CombineTo(ML, Blend, VecLd.getValue(1), true);
50226   }
50227 
50228   // Convert a masked load with a constant mask into a masked load and a select.
50229   // This allows the select operation to use a faster kind of select instruction
50230   // (for example, vblendvps -> vblendps).
50231 
50232   // Don't try this if the pass-through operand is already undefined. That would
50233   // cause an infinite loop because that's what we're about to create.
50234   if (ML->getPassThru().isUndef())
50235     return SDValue();
50236 
50237   if (ISD::isBuildVectorAllZeros(ML->getPassThru().getNode()))
50238     return SDValue();
50239 
50240   // The new masked load has an undef pass-through operand. The select uses the
50241   // original pass-through operand.
50242   SDValue NewML = DAG.getMaskedLoad(
50243       VT, DL, ML->getChain(), ML->getBasePtr(), ML->getOffset(), ML->getMask(),
50244       DAG.getUNDEF(VT), ML->getMemoryVT(), ML->getMemOperand(),
50245       ML->getAddressingMode(), ML->getExtensionType());
50246   SDValue Blend = DAG.getSelect(DL, VT, ML->getMask(), NewML,
50247                                 ML->getPassThru());
50248 
50249   return DCI.CombineTo(ML, Blend, NewML.getValue(1), true);
50250 }
50251 
combineMaskedLoad(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)50252 static SDValue combineMaskedLoad(SDNode *N, SelectionDAG &DAG,
50253                                  TargetLowering::DAGCombinerInfo &DCI,
50254                                  const X86Subtarget &Subtarget) {
50255   auto *Mld = cast<MaskedLoadSDNode>(N);
50256 
50257   // TODO: Expanding load with constant mask may be optimized as well.
50258   if (Mld->isExpandingLoad())
50259     return SDValue();
50260 
50261   if (Mld->getExtensionType() == ISD::NON_EXTLOAD) {
50262     if (SDValue ScalarLoad =
50263             reduceMaskedLoadToScalarLoad(Mld, DAG, DCI, Subtarget))
50264       return ScalarLoad;
50265 
50266     // TODO: Do some AVX512 subsets benefit from this transform?
50267     if (!Subtarget.hasAVX512())
50268       if (SDValue Blend = combineMaskedLoadConstantMask(Mld, DAG, DCI))
50269         return Blend;
50270   }
50271 
50272   // If the mask value has been legalized to a non-boolean vector, try to
50273   // simplify ops leading up to it. We only demand the MSB of each lane.
50274   SDValue Mask = Mld->getMask();
50275   if (Mask.getScalarValueSizeInBits() != 1) {
50276     EVT VT = Mld->getValueType(0);
50277     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
50278     APInt DemandedBits(APInt::getSignMask(VT.getScalarSizeInBits()));
50279     if (TLI.SimplifyDemandedBits(Mask, DemandedBits, DCI)) {
50280       if (N->getOpcode() != ISD::DELETED_NODE)
50281         DCI.AddToWorklist(N);
50282       return SDValue(N, 0);
50283     }
50284     if (SDValue NewMask =
50285             TLI.SimplifyMultipleUseDemandedBits(Mask, DemandedBits, DAG))
50286       return DAG.getMaskedLoad(
50287           VT, SDLoc(N), Mld->getChain(), Mld->getBasePtr(), Mld->getOffset(),
50288           NewMask, Mld->getPassThru(), Mld->getMemoryVT(), Mld->getMemOperand(),
50289           Mld->getAddressingMode(), Mld->getExtensionType());
50290   }
50291 
50292   return SDValue();
50293 }
50294 
50295 /// If exactly one element of the mask is set for a non-truncating masked store,
50296 /// it is a vector extract and scalar store.
50297 /// Note: It is expected that the degenerate cases of an all-zeros or all-ones
50298 /// mask have already been optimized in IR, so we don't bother with those here.
reduceMaskedStoreToScalarStore(MaskedStoreSDNode * MS,SelectionDAG & DAG,const X86Subtarget & Subtarget)50299 static SDValue reduceMaskedStoreToScalarStore(MaskedStoreSDNode *MS,
50300                                               SelectionDAG &DAG,
50301                                               const X86Subtarget &Subtarget) {
50302   // TODO: This is not x86-specific, so it could be lifted to DAGCombiner.
50303   // However, some target hooks may need to be added to know when the transform
50304   // is profitable. Endianness would also have to be considered.
50305 
50306   SDValue Addr, VecIndex;
50307   Align Alignment;
50308   unsigned Offset;
50309   if (!getParamsForOneTrueMaskedElt(MS, DAG, Addr, VecIndex, Alignment, Offset))
50310     return SDValue();
50311 
50312   // Extract the one scalar element that is actually being stored.
50313   SDLoc DL(MS);
50314   SDValue Value = MS->getValue();
50315   EVT VT = Value.getValueType();
50316   EVT EltVT = VT.getVectorElementType();
50317   if (EltVT == MVT::i64 && !Subtarget.is64Bit()) {
50318     EltVT = MVT::f64;
50319     EVT CastVT = VT.changeVectorElementType(EltVT);
50320     Value = DAG.getBitcast(CastVT, Value);
50321   }
50322   SDValue Extract =
50323       DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Value, VecIndex);
50324 
50325   // Store that element at the appropriate offset from the base pointer.
50326   return DAG.getStore(MS->getChain(), DL, Extract, Addr,
50327                       MS->getPointerInfo().getWithOffset(Offset),
50328                       Alignment, MS->getMemOperand()->getFlags());
50329 }
50330 
combineMaskedStore(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)50331 static SDValue combineMaskedStore(SDNode *N, SelectionDAG &DAG,
50332                                   TargetLowering::DAGCombinerInfo &DCI,
50333                                   const X86Subtarget &Subtarget) {
50334   MaskedStoreSDNode *Mst = cast<MaskedStoreSDNode>(N);
50335   if (Mst->isCompressingStore())
50336     return SDValue();
50337 
50338   EVT VT = Mst->getValue().getValueType();
50339   SDLoc dl(Mst);
50340   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
50341 
50342   if (Mst->isTruncatingStore())
50343     return SDValue();
50344 
50345   if (SDValue ScalarStore = reduceMaskedStoreToScalarStore(Mst, DAG, Subtarget))
50346     return ScalarStore;
50347 
50348   // If the mask value has been legalized to a non-boolean vector, try to
50349   // simplify ops leading up to it. We only demand the MSB of each lane.
50350   SDValue Mask = Mst->getMask();
50351   if (Mask.getScalarValueSizeInBits() != 1) {
50352     APInt DemandedBits(APInt::getSignMask(VT.getScalarSizeInBits()));
50353     if (TLI.SimplifyDemandedBits(Mask, DemandedBits, DCI)) {
50354       if (N->getOpcode() != ISD::DELETED_NODE)
50355         DCI.AddToWorklist(N);
50356       return SDValue(N, 0);
50357     }
50358     if (SDValue NewMask =
50359             TLI.SimplifyMultipleUseDemandedBits(Mask, DemandedBits, DAG))
50360       return DAG.getMaskedStore(Mst->getChain(), SDLoc(N), Mst->getValue(),
50361                                 Mst->getBasePtr(), Mst->getOffset(), NewMask,
50362                                 Mst->getMemoryVT(), Mst->getMemOperand(),
50363                                 Mst->getAddressingMode());
50364   }
50365 
50366   SDValue Value = Mst->getValue();
50367   if (Value.getOpcode() == ISD::TRUNCATE && Value.getNode()->hasOneUse() &&
50368       TLI.isTruncStoreLegal(Value.getOperand(0).getValueType(),
50369                             Mst->getMemoryVT())) {
50370     return DAG.getMaskedStore(Mst->getChain(), SDLoc(N), Value.getOperand(0),
50371                               Mst->getBasePtr(), Mst->getOffset(), Mask,
50372                               Mst->getMemoryVT(), Mst->getMemOperand(),
50373                               Mst->getAddressingMode(), true);
50374   }
50375 
50376   return SDValue();
50377 }
50378 
combineStore(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)50379 static SDValue combineStore(SDNode *N, SelectionDAG &DAG,
50380                             TargetLowering::DAGCombinerInfo &DCI,
50381                             const X86Subtarget &Subtarget) {
50382   StoreSDNode *St = cast<StoreSDNode>(N);
50383   EVT StVT = St->getMemoryVT();
50384   SDLoc dl(St);
50385   SDValue StoredVal = St->getValue();
50386   EVT VT = StoredVal.getValueType();
50387   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
50388 
50389   // Convert a store of vXi1 into a store of iX and a bitcast.
50390   if (!Subtarget.hasAVX512() && VT == StVT && VT.isVector() &&
50391       VT.getVectorElementType() == MVT::i1) {
50392 
50393     EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), VT.getVectorNumElements());
50394     StoredVal = DAG.getBitcast(NewVT, StoredVal);
50395 
50396     return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
50397                         St->getPointerInfo(), St->getOriginalAlign(),
50398                         St->getMemOperand()->getFlags());
50399   }
50400 
50401   // If this is a store of a scalar_to_vector to v1i1, just use a scalar store.
50402   // This will avoid a copy to k-register.
50403   if (VT == MVT::v1i1 && VT == StVT && Subtarget.hasAVX512() &&
50404       StoredVal.getOpcode() == ISD::SCALAR_TO_VECTOR &&
50405       StoredVal.getOperand(0).getValueType() == MVT::i8) {
50406     SDValue Val = StoredVal.getOperand(0);
50407     // We must store zeros to the unused bits.
50408     Val = DAG.getZeroExtendInReg(Val, dl, MVT::i1);
50409     return DAG.getStore(St->getChain(), dl, Val,
50410                         St->getBasePtr(), St->getPointerInfo(),
50411                         St->getOriginalAlign(),
50412                         St->getMemOperand()->getFlags());
50413   }
50414 
50415   // Widen v2i1/v4i1 stores to v8i1.
50416   if ((VT == MVT::v1i1 || VT == MVT::v2i1 || VT == MVT::v4i1) && VT == StVT &&
50417       Subtarget.hasAVX512()) {
50418     unsigned NumConcats = 8 / VT.getVectorNumElements();
50419     // We must store zeros to the unused bits.
50420     SmallVector<SDValue, 4> Ops(NumConcats, DAG.getConstant(0, dl, VT));
50421     Ops[0] = StoredVal;
50422     StoredVal = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i1, Ops);
50423     return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
50424                         St->getPointerInfo(), St->getOriginalAlign(),
50425                         St->getMemOperand()->getFlags());
50426   }
50427 
50428   // Turn vXi1 stores of constants into a scalar store.
50429   if ((VT == MVT::v8i1 || VT == MVT::v16i1 || VT == MVT::v32i1 ||
50430        VT == MVT::v64i1) && VT == StVT && TLI.isTypeLegal(VT) &&
50431       ISD::isBuildVectorOfConstantSDNodes(StoredVal.getNode())) {
50432     // If its a v64i1 store without 64-bit support, we need two stores.
50433     if (!DCI.isBeforeLegalize() && VT == MVT::v64i1 && !Subtarget.is64Bit()) {
50434       SDValue Lo = DAG.getBuildVector(MVT::v32i1, dl,
50435                                       StoredVal->ops().slice(0, 32));
50436       Lo = combinevXi1ConstantToInteger(Lo, DAG);
50437       SDValue Hi = DAG.getBuildVector(MVT::v32i1, dl,
50438                                       StoredVal->ops().slice(32, 32));
50439       Hi = combinevXi1ConstantToInteger(Hi, DAG);
50440 
50441       SDValue Ptr0 = St->getBasePtr();
50442       SDValue Ptr1 = DAG.getMemBasePlusOffset(Ptr0, TypeSize::getFixed(4), dl);
50443 
50444       SDValue Ch0 =
50445           DAG.getStore(St->getChain(), dl, Lo, Ptr0, St->getPointerInfo(),
50446                        St->getOriginalAlign(),
50447                        St->getMemOperand()->getFlags());
50448       SDValue Ch1 =
50449           DAG.getStore(St->getChain(), dl, Hi, Ptr1,
50450                        St->getPointerInfo().getWithOffset(4),
50451                        St->getOriginalAlign(),
50452                        St->getMemOperand()->getFlags());
50453       return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ch0, Ch1);
50454     }
50455 
50456     StoredVal = combinevXi1ConstantToInteger(StoredVal, DAG);
50457     return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
50458                         St->getPointerInfo(), St->getOriginalAlign(),
50459                         St->getMemOperand()->getFlags());
50460   }
50461 
50462   // If we are saving a 32-byte vector and 32-byte stores are slow, such as on
50463   // Sandy Bridge, perform two 16-byte stores.
50464   unsigned Fast;
50465   if (VT.is256BitVector() && StVT == VT &&
50466       TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
50467                              *St->getMemOperand(), &Fast) &&
50468       !Fast) {
50469     unsigned NumElems = VT.getVectorNumElements();
50470     if (NumElems < 2)
50471       return SDValue();
50472 
50473     return splitVectorStore(St, DAG);
50474   }
50475 
50476   // Split under-aligned vector non-temporal stores.
50477   if (St->isNonTemporal() && StVT == VT &&
50478       St->getAlign().value() < VT.getStoreSize()) {
50479     // ZMM/YMM nt-stores - either it can be stored as a series of shorter
50480     // vectors or the legalizer can scalarize it to use MOVNTI.
50481     if (VT.is256BitVector() || VT.is512BitVector()) {
50482       unsigned NumElems = VT.getVectorNumElements();
50483       if (NumElems < 2)
50484         return SDValue();
50485       return splitVectorStore(St, DAG);
50486     }
50487 
50488     // XMM nt-stores - scalarize this to f64 nt-stores on SSE4A, else i32/i64
50489     // to use MOVNTI.
50490     if (VT.is128BitVector() && Subtarget.hasSSE2()) {
50491       MVT NTVT = Subtarget.hasSSE4A()
50492                      ? MVT::v2f64
50493                      : (TLI.isTypeLegal(MVT::i64) ? MVT::v2i64 : MVT::v4i32);
50494       return scalarizeVectorStore(St, NTVT, DAG);
50495     }
50496   }
50497 
50498   // Try to optimize v16i16->v16i8 truncating stores when BWI is not
50499   // supported, but avx512f is by extending to v16i32 and truncating.
50500   if (!St->isTruncatingStore() && VT == MVT::v16i8 && !Subtarget.hasBWI() &&
50501       St->getValue().getOpcode() == ISD::TRUNCATE &&
50502       St->getValue().getOperand(0).getValueType() == MVT::v16i16 &&
50503       TLI.isTruncStoreLegal(MVT::v16i32, MVT::v16i8) &&
50504       St->getValue().hasOneUse() && !DCI.isBeforeLegalizeOps()) {
50505     SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::v16i32,
50506                               St->getValue().getOperand(0));
50507     return DAG.getTruncStore(St->getChain(), dl, Ext, St->getBasePtr(),
50508                              MVT::v16i8, St->getMemOperand());
50509   }
50510 
50511   // Try to fold a VTRUNCUS or VTRUNCS into a truncating store.
50512   if (!St->isTruncatingStore() &&
50513       (StoredVal.getOpcode() == X86ISD::VTRUNCUS ||
50514        StoredVal.getOpcode() == X86ISD::VTRUNCS) &&
50515       StoredVal.hasOneUse() &&
50516       TLI.isTruncStoreLegal(StoredVal.getOperand(0).getValueType(), VT)) {
50517     bool IsSigned = StoredVal.getOpcode() == X86ISD::VTRUNCS;
50518     return EmitTruncSStore(IsSigned, St->getChain(),
50519                            dl, StoredVal.getOperand(0), St->getBasePtr(),
50520                            VT, St->getMemOperand(), DAG);
50521   }
50522 
50523   // Try to fold a extract_element(VTRUNC) pattern into a truncating store.
50524   if (!St->isTruncatingStore()) {
50525     auto IsExtractedElement = [](SDValue V) {
50526       if (V.getOpcode() == ISD::TRUNCATE && V.hasOneUse())
50527         V = V.getOperand(0);
50528       unsigned Opc = V.getOpcode();
50529       if ((Opc == ISD::EXTRACT_VECTOR_ELT || Opc == X86ISD::PEXTRW) &&
50530           isNullConstant(V.getOperand(1)) && V.hasOneUse() &&
50531           V.getOperand(0).hasOneUse())
50532         return V.getOperand(0);
50533       return SDValue();
50534     };
50535     if (SDValue Extract = IsExtractedElement(StoredVal)) {
50536       SDValue Trunc = peekThroughOneUseBitcasts(Extract);
50537       if (Trunc.getOpcode() == X86ISD::VTRUNC) {
50538         SDValue Src = Trunc.getOperand(0);
50539         MVT DstVT = Trunc.getSimpleValueType();
50540         MVT SrcVT = Src.getSimpleValueType();
50541         unsigned NumSrcElts = SrcVT.getVectorNumElements();
50542         unsigned NumTruncBits = DstVT.getScalarSizeInBits() * NumSrcElts;
50543         MVT TruncVT = MVT::getVectorVT(DstVT.getScalarType(), NumSrcElts);
50544         if (NumTruncBits == VT.getSizeInBits() &&
50545             TLI.isTruncStoreLegal(SrcVT, TruncVT)) {
50546           return DAG.getTruncStore(St->getChain(), dl, Src, St->getBasePtr(),
50547                                    TruncVT, St->getMemOperand());
50548         }
50549       }
50550     }
50551   }
50552 
50553   // Optimize trunc store (of multiple scalars) to shuffle and store.
50554   // First, pack all of the elements in one place. Next, store to memory
50555   // in fewer chunks.
50556   if (St->isTruncatingStore() && VT.isVector()) {
50557     // Check if we can detect an AVG pattern from the truncation. If yes,
50558     // replace the trunc store by a normal store with the result of X86ISD::AVG
50559     // instruction.
50560     if (DCI.isBeforeLegalize() || TLI.isTypeLegal(St->getMemoryVT()))
50561       if (SDValue Avg = detectAVGPattern(St->getValue(), St->getMemoryVT(), DAG,
50562                                          Subtarget, dl))
50563         return DAG.getStore(St->getChain(), dl, Avg, St->getBasePtr(),
50564                             St->getPointerInfo(), St->getOriginalAlign(),
50565                             St->getMemOperand()->getFlags());
50566 
50567     if (TLI.isTruncStoreLegal(VT, StVT)) {
50568       if (SDValue Val = detectSSatPattern(St->getValue(), St->getMemoryVT()))
50569         return EmitTruncSStore(true /* Signed saturation */, St->getChain(),
50570                                dl, Val, St->getBasePtr(),
50571                                St->getMemoryVT(), St->getMemOperand(), DAG);
50572       if (SDValue Val = detectUSatPattern(St->getValue(), St->getMemoryVT(),
50573                                           DAG, dl))
50574         return EmitTruncSStore(false /* Unsigned saturation */, St->getChain(),
50575                                dl, Val, St->getBasePtr(),
50576                                St->getMemoryVT(), St->getMemOperand(), DAG);
50577     }
50578 
50579     return SDValue();
50580   }
50581 
50582   // Cast ptr32 and ptr64 pointers to the default address space before a store.
50583   unsigned AddrSpace = St->getAddressSpace();
50584   if (AddrSpace == X86AS::PTR64 || AddrSpace == X86AS::PTR32_SPTR ||
50585       AddrSpace == X86AS::PTR32_UPTR) {
50586     MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
50587     if (PtrVT != St->getBasePtr().getSimpleValueType()) {
50588       SDValue Cast =
50589           DAG.getAddrSpaceCast(dl, PtrVT, St->getBasePtr(), AddrSpace, 0);
50590       return DAG.getTruncStore(
50591           St->getChain(), dl, StoredVal, Cast, St->getPointerInfo(), StVT,
50592           St->getOriginalAlign(), St->getMemOperand()->getFlags(),
50593           St->getAAInfo());
50594     }
50595   }
50596 
50597   // Turn load->store of MMX types into GPR load/stores.  This avoids clobbering
50598   // the FP state in cases where an emms may be missing.
50599   // A preferable solution to the general problem is to figure out the right
50600   // places to insert EMMS.  This qualifies as a quick hack.
50601 
50602   // Similarly, turn load->store of i64 into double load/stores in 32-bit mode.
50603   if (VT.getSizeInBits() != 64)
50604     return SDValue();
50605 
50606   const Function &F = DAG.getMachineFunction().getFunction();
50607   bool NoImplicitFloatOps = F.hasFnAttribute(Attribute::NoImplicitFloat);
50608   bool F64IsLegal =
50609       !Subtarget.useSoftFloat() && !NoImplicitFloatOps && Subtarget.hasSSE2();
50610 
50611   if (!F64IsLegal || Subtarget.is64Bit())
50612     return SDValue();
50613 
50614   if (VT == MVT::i64 && isa<LoadSDNode>(St->getValue()) &&
50615       cast<LoadSDNode>(St->getValue())->isSimple() &&
50616       St->getChain().hasOneUse() && St->isSimple()) {
50617     auto *Ld = cast<LoadSDNode>(St->getValue());
50618 
50619     if (!ISD::isNormalLoad(Ld))
50620       return SDValue();
50621 
50622     // Avoid the transformation if there are multiple uses of the loaded value.
50623     if (!Ld->hasNUsesOfValue(1, 0))
50624       return SDValue();
50625 
50626     SDLoc LdDL(Ld);
50627     SDLoc StDL(N);
50628     // Lower to a single movq load/store pair.
50629     SDValue NewLd = DAG.getLoad(MVT::f64, LdDL, Ld->getChain(),
50630                                 Ld->getBasePtr(), Ld->getMemOperand());
50631 
50632     // Make sure new load is placed in same chain order.
50633     DAG.makeEquivalentMemoryOrdering(Ld, NewLd);
50634     return DAG.getStore(St->getChain(), StDL, NewLd, St->getBasePtr(),
50635                         St->getMemOperand());
50636   }
50637 
50638   // This is similar to the above case, but here we handle a scalar 64-bit
50639   // integer store that is extracted from a vector on a 32-bit target.
50640   // If we have SSE2, then we can treat it like a floating-point double
50641   // to get past legalization. The execution dependencies fixup pass will
50642   // choose the optimal machine instruction for the store if this really is
50643   // an integer or v2f32 rather than an f64.
50644   if (VT == MVT::i64 &&
50645       St->getOperand(1).getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
50646     SDValue OldExtract = St->getOperand(1);
50647     SDValue ExtOp0 = OldExtract.getOperand(0);
50648     unsigned VecSize = ExtOp0.getValueSizeInBits();
50649     EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, VecSize / 64);
50650     SDValue BitCast = DAG.getBitcast(VecVT, ExtOp0);
50651     SDValue NewExtract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
50652                                      BitCast, OldExtract.getOperand(1));
50653     return DAG.getStore(St->getChain(), dl, NewExtract, St->getBasePtr(),
50654                         St->getPointerInfo(), St->getOriginalAlign(),
50655                         St->getMemOperand()->getFlags());
50656   }
50657 
50658   return SDValue();
50659 }
50660 
combineVEXTRACT_STORE(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)50661 static SDValue combineVEXTRACT_STORE(SDNode *N, SelectionDAG &DAG,
50662                                      TargetLowering::DAGCombinerInfo &DCI,
50663                                      const X86Subtarget &Subtarget) {
50664   auto *St = cast<MemIntrinsicSDNode>(N);
50665 
50666   SDValue StoredVal = N->getOperand(1);
50667   MVT VT = StoredVal.getSimpleValueType();
50668   EVT MemVT = St->getMemoryVT();
50669 
50670   // Figure out which elements we demand.
50671   unsigned StElts = MemVT.getSizeInBits() / VT.getScalarSizeInBits();
50672   APInt DemandedElts = APInt::getLowBitsSet(VT.getVectorNumElements(), StElts);
50673 
50674   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
50675   if (TLI.SimplifyDemandedVectorElts(StoredVal, DemandedElts, DCI)) {
50676     if (N->getOpcode() != ISD::DELETED_NODE)
50677       DCI.AddToWorklist(N);
50678     return SDValue(N, 0);
50679   }
50680 
50681   return SDValue();
50682 }
50683 
50684 /// Return 'true' if this vector operation is "horizontal"
50685 /// and return the operands for the horizontal operation in LHS and RHS.  A
50686 /// horizontal operation performs the binary operation on successive elements
50687 /// of its first operand, then on successive elements of its second operand,
50688 /// returning the resulting values in a vector.  For example, if
50689 ///   A = < float a0, float a1, float a2, float a3 >
50690 /// and
50691 ///   B = < float b0, float b1, float b2, float b3 >
50692 /// then the result of doing a horizontal operation on A and B is
50693 ///   A horizontal-op B = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >.
50694 /// In short, LHS and RHS are inspected to see if LHS op RHS is of the form
50695 /// A horizontal-op B, for some already available A and B, and if so then LHS is
50696 /// set to A, RHS to B, and the routine returns 'true'.
isHorizontalBinOp(unsigned HOpcode,SDValue & LHS,SDValue & RHS,SelectionDAG & DAG,const X86Subtarget & Subtarget,bool IsCommutative,SmallVectorImpl<int> & PostShuffleMask)50697 static bool isHorizontalBinOp(unsigned HOpcode, SDValue &LHS, SDValue &RHS,
50698                               SelectionDAG &DAG, const X86Subtarget &Subtarget,
50699                               bool IsCommutative,
50700                               SmallVectorImpl<int> &PostShuffleMask) {
50701   // If either operand is undef, bail out. The binop should be simplified.
50702   if (LHS.isUndef() || RHS.isUndef())
50703     return false;
50704 
50705   // Look for the following pattern:
50706   //   A = < float a0, float a1, float a2, float a3 >
50707   //   B = < float b0, float b1, float b2, float b3 >
50708   // and
50709   //   LHS = VECTOR_SHUFFLE A, B, <0, 2, 4, 6>
50710   //   RHS = VECTOR_SHUFFLE A, B, <1, 3, 5, 7>
50711   // then LHS op RHS = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >
50712   // which is A horizontal-op B.
50713 
50714   MVT VT = LHS.getSimpleValueType();
50715   assert((VT.is128BitVector() || VT.is256BitVector()) &&
50716          "Unsupported vector type for horizontal add/sub");
50717   unsigned NumElts = VT.getVectorNumElements();
50718 
50719   auto GetShuffle = [&](SDValue Op, SDValue &N0, SDValue &N1,
50720                         SmallVectorImpl<int> &ShuffleMask) {
50721     bool UseSubVector = false;
50722     if (Op.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
50723         Op.getOperand(0).getValueType().is256BitVector() &&
50724         llvm::isNullConstant(Op.getOperand(1))) {
50725       Op = Op.getOperand(0);
50726       UseSubVector = true;
50727     }
50728     SmallVector<SDValue, 2> SrcOps;
50729     SmallVector<int, 16> SrcMask, ScaledMask;
50730     SDValue BC = peekThroughBitcasts(Op);
50731     if (getTargetShuffleInputs(BC, SrcOps, SrcMask, DAG) &&
50732         !isAnyZero(SrcMask) && all_of(SrcOps, [BC](SDValue Op) {
50733           return Op.getValueSizeInBits() == BC.getValueSizeInBits();
50734         })) {
50735       resolveTargetShuffleInputsAndMask(SrcOps, SrcMask);
50736       if (!UseSubVector && SrcOps.size() <= 2 &&
50737           scaleShuffleElements(SrcMask, NumElts, ScaledMask)) {
50738         N0 = !SrcOps.empty() ? SrcOps[0] : SDValue();
50739         N1 = SrcOps.size() > 1 ? SrcOps[1] : SDValue();
50740         ShuffleMask.assign(ScaledMask.begin(), ScaledMask.end());
50741       }
50742       if (UseSubVector && SrcOps.size() == 1 &&
50743           scaleShuffleElements(SrcMask, 2 * NumElts, ScaledMask)) {
50744         std::tie(N0, N1) = DAG.SplitVector(SrcOps[0], SDLoc(Op));
50745         ArrayRef<int> Mask = ArrayRef<int>(ScaledMask).slice(0, NumElts);
50746         ShuffleMask.assign(Mask.begin(), Mask.end());
50747       }
50748     }
50749   };
50750 
50751   // View LHS in the form
50752   //   LHS = VECTOR_SHUFFLE A, B, LMask
50753   // If LHS is not a shuffle, then pretend it is the identity shuffle:
50754   //   LHS = VECTOR_SHUFFLE LHS, undef, <0, 1, ..., N-1>
50755   // NOTE: A default initialized SDValue represents an UNDEF of type VT.
50756   SDValue A, B;
50757   SmallVector<int, 16> LMask;
50758   GetShuffle(LHS, A, B, LMask);
50759 
50760   // Likewise, view RHS in the form
50761   //   RHS = VECTOR_SHUFFLE C, D, RMask
50762   SDValue C, D;
50763   SmallVector<int, 16> RMask;
50764   GetShuffle(RHS, C, D, RMask);
50765 
50766   // At least one of the operands should be a vector shuffle.
50767   unsigned NumShuffles = (LMask.empty() ? 0 : 1) + (RMask.empty() ? 0 : 1);
50768   if (NumShuffles == 0)
50769     return false;
50770 
50771   if (LMask.empty()) {
50772     A = LHS;
50773     for (unsigned i = 0; i != NumElts; ++i)
50774       LMask.push_back(i);
50775   }
50776 
50777   if (RMask.empty()) {
50778     C = RHS;
50779     for (unsigned i = 0; i != NumElts; ++i)
50780       RMask.push_back(i);
50781   }
50782 
50783   // If we have an unary mask, ensure the other op is set to null.
50784   if (isUndefOrInRange(LMask, 0, NumElts))
50785     B = SDValue();
50786   else if (isUndefOrInRange(LMask, NumElts, NumElts * 2))
50787     A = SDValue();
50788 
50789   if (isUndefOrInRange(RMask, 0, NumElts))
50790     D = SDValue();
50791   else if (isUndefOrInRange(RMask, NumElts, NumElts * 2))
50792     C = SDValue();
50793 
50794   // If A and B occur in reverse order in RHS, then canonicalize by commuting
50795   // RHS operands and shuffle mask.
50796   if (A != C) {
50797     std::swap(C, D);
50798     ShuffleVectorSDNode::commuteMask(RMask);
50799   }
50800   // Check that the shuffles are both shuffling the same vectors.
50801   if (!(A == C && B == D))
50802     return false;
50803 
50804   PostShuffleMask.clear();
50805   PostShuffleMask.append(NumElts, SM_SentinelUndef);
50806 
50807   // LHS and RHS are now:
50808   //   LHS = shuffle A, B, LMask
50809   //   RHS = shuffle A, B, RMask
50810   // Check that the masks correspond to performing a horizontal operation.
50811   // AVX defines horizontal add/sub to operate independently on 128-bit lanes,
50812   // so we just repeat the inner loop if this is a 256-bit op.
50813   unsigned Num128BitChunks = VT.getSizeInBits() / 128;
50814   unsigned NumEltsPer128BitChunk = NumElts / Num128BitChunks;
50815   unsigned NumEltsPer64BitChunk = NumEltsPer128BitChunk / 2;
50816   assert((NumEltsPer128BitChunk % 2 == 0) &&
50817          "Vector type should have an even number of elements in each lane");
50818   for (unsigned j = 0; j != NumElts; j += NumEltsPer128BitChunk) {
50819     for (unsigned i = 0; i != NumEltsPer128BitChunk; ++i) {
50820       // Ignore undefined components.
50821       int LIdx = LMask[i + j], RIdx = RMask[i + j];
50822       if (LIdx < 0 || RIdx < 0 ||
50823           (!A.getNode() && (LIdx < (int)NumElts || RIdx < (int)NumElts)) ||
50824           (!B.getNode() && (LIdx >= (int)NumElts || RIdx >= (int)NumElts)))
50825         continue;
50826 
50827       // Check that successive odd/even elements are being operated on. If not,
50828       // this is not a horizontal operation.
50829       if (!((RIdx & 1) == 1 && (LIdx + 1) == RIdx) &&
50830           !((LIdx & 1) == 1 && (RIdx + 1) == LIdx && IsCommutative))
50831         return false;
50832 
50833       // Compute the post-shuffle mask index based on where the element
50834       // is stored in the HOP result, and where it needs to be moved to.
50835       int Base = LIdx & ~1u;
50836       int Index = ((Base % NumEltsPer128BitChunk) / 2) +
50837                   ((Base % NumElts) & ~(NumEltsPer128BitChunk - 1));
50838 
50839       // The  low half of the 128-bit result must choose from A.
50840       // The high half of the 128-bit result must choose from B,
50841       // unless B is undef. In that case, we are always choosing from A.
50842       if ((B && Base >= (int)NumElts) || (!B && i >= NumEltsPer64BitChunk))
50843         Index += NumEltsPer64BitChunk;
50844       PostShuffleMask[i + j] = Index;
50845     }
50846   }
50847 
50848   SDValue NewLHS = A.getNode() ? A : B; // If A is 'UNDEF', use B for it.
50849   SDValue NewRHS = B.getNode() ? B : A; // If B is 'UNDEF', use A for it.
50850 
50851   bool IsIdentityPostShuffle =
50852       isSequentialOrUndefInRange(PostShuffleMask, 0, NumElts, 0);
50853   if (IsIdentityPostShuffle)
50854     PostShuffleMask.clear();
50855 
50856   // Avoid 128-bit multi lane shuffles if pre-AVX2 and FP (integer will split).
50857   if (!IsIdentityPostShuffle && !Subtarget.hasAVX2() && VT.isFloatingPoint() &&
50858       isMultiLaneShuffleMask(128, VT.getScalarSizeInBits(), PostShuffleMask))
50859     return false;
50860 
50861   // If the source nodes are already used in HorizOps then always accept this.
50862   // Shuffle folding should merge these back together.
50863   bool FoundHorizLHS = llvm::any_of(NewLHS->uses(), [&](SDNode *User) {
50864     return User->getOpcode() == HOpcode && User->getValueType(0) == VT;
50865   });
50866   bool FoundHorizRHS = llvm::any_of(NewRHS->uses(), [&](SDNode *User) {
50867     return User->getOpcode() == HOpcode && User->getValueType(0) == VT;
50868   });
50869   bool ForceHorizOp = FoundHorizLHS && FoundHorizRHS;
50870 
50871   // Assume a SingleSource HOP if we only shuffle one input and don't need to
50872   // shuffle the result.
50873   if (!ForceHorizOp &&
50874       !shouldUseHorizontalOp(NewLHS == NewRHS &&
50875                                  (NumShuffles < 2 || !IsIdentityPostShuffle),
50876                              DAG, Subtarget))
50877     return false;
50878 
50879   LHS = DAG.getBitcast(VT, NewLHS);
50880   RHS = DAG.getBitcast(VT, NewRHS);
50881   return true;
50882 }
50883 
50884 // Try to synthesize horizontal (f)hadd/hsub from (f)adds/subs of shuffles.
combineToHorizontalAddSub(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)50885 static SDValue combineToHorizontalAddSub(SDNode *N, SelectionDAG &DAG,
50886                                          const X86Subtarget &Subtarget) {
50887   EVT VT = N->getValueType(0);
50888   unsigned Opcode = N->getOpcode();
50889   bool IsAdd = (Opcode == ISD::FADD) || (Opcode == ISD::ADD);
50890   SmallVector<int, 8> PostShuffleMask;
50891 
50892   switch (Opcode) {
50893   case ISD::FADD:
50894   case ISD::FSUB:
50895     if ((Subtarget.hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
50896         (Subtarget.hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))) {
50897       SDValue LHS = N->getOperand(0);
50898       SDValue RHS = N->getOperand(1);
50899       auto HorizOpcode = IsAdd ? X86ISD::FHADD : X86ISD::FHSUB;
50900       if (isHorizontalBinOp(HorizOpcode, LHS, RHS, DAG, Subtarget, IsAdd,
50901                             PostShuffleMask)) {
50902         SDValue HorizBinOp = DAG.getNode(HorizOpcode, SDLoc(N), VT, LHS, RHS);
50903         if (!PostShuffleMask.empty())
50904           HorizBinOp = DAG.getVectorShuffle(VT, SDLoc(HorizBinOp), HorizBinOp,
50905                                             DAG.getUNDEF(VT), PostShuffleMask);
50906         return HorizBinOp;
50907       }
50908     }
50909     break;
50910   case ISD::ADD:
50911   case ISD::SUB:
50912     if (Subtarget.hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32 ||
50913                                  VT == MVT::v16i16 || VT == MVT::v8i32)) {
50914       SDValue LHS = N->getOperand(0);
50915       SDValue RHS = N->getOperand(1);
50916       auto HorizOpcode = IsAdd ? X86ISD::HADD : X86ISD::HSUB;
50917       if (isHorizontalBinOp(HorizOpcode, LHS, RHS, DAG, Subtarget, IsAdd,
50918                             PostShuffleMask)) {
50919         auto HOpBuilder = [HorizOpcode](SelectionDAG &DAG, const SDLoc &DL,
50920                                         ArrayRef<SDValue> Ops) {
50921           return DAG.getNode(HorizOpcode, DL, Ops[0].getValueType(), Ops);
50922         };
50923         SDValue HorizBinOp = SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT,
50924                                               {LHS, RHS}, HOpBuilder);
50925         if (!PostShuffleMask.empty())
50926           HorizBinOp = DAG.getVectorShuffle(VT, SDLoc(HorizBinOp), HorizBinOp,
50927                                             DAG.getUNDEF(VT), PostShuffleMask);
50928         return HorizBinOp;
50929       }
50930     }
50931     break;
50932   }
50933 
50934   return SDValue();
50935 }
50936 
50937 //  Try to combine the following nodes
50938 //  t29: i64 = X86ISD::Wrapper TargetConstantPool:i64
50939 //    <i32 -2147483648[float -0.000000e+00]> 0
50940 //  t27: v16i32[v16f32],ch = X86ISD::VBROADCAST_LOAD
50941 //    <(load 4 from constant-pool)> t0, t29
50942 //  [t30: v16i32 = bitcast t27]
50943 //  t6: v16i32 = xor t7, t27[t30]
50944 //  t11: v16f32 = bitcast t6
50945 //  t21: v16f32 = X86ISD::VFMULC[X86ISD::VCFMULC] t11, t8
50946 //  into X86ISD::VFCMULC[X86ISD::VFMULC] if possible:
50947 //  t22: v16f32 = bitcast t7
50948 //  t23: v16f32 = X86ISD::VFCMULC[X86ISD::VFMULC] t8, t22
50949 //  t24: v32f16 = bitcast t23
combineFMulcFCMulc(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)50950 static SDValue combineFMulcFCMulc(SDNode *N, SelectionDAG &DAG,
50951                                   const X86Subtarget &Subtarget) {
50952   EVT VT = N->getValueType(0);
50953   SDValue LHS = N->getOperand(0);
50954   SDValue RHS = N->getOperand(1);
50955   int CombineOpcode =
50956       N->getOpcode() == X86ISD::VFCMULC ? X86ISD::VFMULC : X86ISD::VFCMULC;
50957   auto combineConjugation = [&](SDValue &r) {
50958     if (LHS->getOpcode() == ISD::BITCAST && RHS.hasOneUse()) {
50959       SDValue XOR = LHS.getOperand(0);
50960       if (XOR->getOpcode() == ISD::XOR && XOR.hasOneUse()) {
50961         KnownBits XORRHS = DAG.computeKnownBits(XOR.getOperand(1));
50962         if (XORRHS.isConstant()) {
50963           APInt ConjugationInt32 = APInt(32, 0x80000000, true);
50964           APInt ConjugationInt64 = APInt(64, 0x8000000080000000ULL, true);
50965           if ((XORRHS.getBitWidth() == 32 &&
50966                XORRHS.getConstant() == ConjugationInt32) ||
50967               (XORRHS.getBitWidth() == 64 &&
50968                XORRHS.getConstant() == ConjugationInt64)) {
50969             SelectionDAG::FlagInserter FlagsInserter(DAG, N);
50970             SDValue I2F = DAG.getBitcast(VT, LHS.getOperand(0).getOperand(0));
50971             SDValue FCMulC = DAG.getNode(CombineOpcode, SDLoc(N), VT, RHS, I2F);
50972             r = DAG.getBitcast(VT, FCMulC);
50973             return true;
50974           }
50975         }
50976       }
50977     }
50978     return false;
50979   };
50980   SDValue Res;
50981   if (combineConjugation(Res))
50982     return Res;
50983   std::swap(LHS, RHS);
50984   if (combineConjugation(Res))
50985     return Res;
50986   return Res;
50987 }
50988 
50989 //  Try to combine the following nodes:
50990 //  FADD(A, FMA(B, C, 0)) and FADD(A, FMUL(B, C)) to FMA(B, C, A)
combineFaddCFmul(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)50991 static SDValue combineFaddCFmul(SDNode *N, SelectionDAG &DAG,
50992                                 const X86Subtarget &Subtarget) {
50993   auto AllowContract = [&DAG](const SDNodeFlags &Flags) {
50994     return DAG.getTarget().Options.AllowFPOpFusion == FPOpFusion::Fast ||
50995            Flags.hasAllowContract();
50996   };
50997 
50998   auto HasNoSignedZero = [&DAG](const SDNodeFlags &Flags) {
50999     return DAG.getTarget().Options.NoSignedZerosFPMath ||
51000            Flags.hasNoSignedZeros();
51001   };
51002   auto IsVectorAllNegativeZero = [&DAG](SDValue Op) {
51003     APInt AI = APInt(32, 0x80008000, true);
51004     KnownBits Bits = DAG.computeKnownBits(Op);
51005     return Bits.getBitWidth() == 32 && Bits.isConstant() &&
51006            Bits.getConstant() == AI;
51007   };
51008 
51009   if (N->getOpcode() != ISD::FADD || !Subtarget.hasFP16() ||
51010       !AllowContract(N->getFlags()))
51011     return SDValue();
51012 
51013   EVT VT = N->getValueType(0);
51014   if (VT != MVT::v8f16 && VT != MVT::v16f16 && VT != MVT::v32f16)
51015     return SDValue();
51016 
51017   SDValue LHS = N->getOperand(0);
51018   SDValue RHS = N->getOperand(1);
51019   bool IsConj;
51020   SDValue FAddOp1, MulOp0, MulOp1;
51021   auto GetCFmulFrom = [&MulOp0, &MulOp1, &IsConj, &AllowContract,
51022                        &IsVectorAllNegativeZero,
51023                        &HasNoSignedZero](SDValue N) -> bool {
51024     if (!N.hasOneUse() || N.getOpcode() != ISD::BITCAST)
51025       return false;
51026     SDValue Op0 = N.getOperand(0);
51027     unsigned Opcode = Op0.getOpcode();
51028     if (Op0.hasOneUse() && AllowContract(Op0->getFlags())) {
51029       if ((Opcode == X86ISD::VFMULC || Opcode == X86ISD::VFCMULC)) {
51030         MulOp0 = Op0.getOperand(0);
51031         MulOp1 = Op0.getOperand(1);
51032         IsConj = Opcode == X86ISD::VFCMULC;
51033         return true;
51034       }
51035       if ((Opcode == X86ISD::VFMADDC || Opcode == X86ISD::VFCMADDC) &&
51036           ((ISD::isBuildVectorAllZeros(Op0->getOperand(2).getNode()) &&
51037             HasNoSignedZero(Op0->getFlags())) ||
51038            IsVectorAllNegativeZero(Op0->getOperand(2)))) {
51039         MulOp0 = Op0.getOperand(0);
51040         MulOp1 = Op0.getOperand(1);
51041         IsConj = Opcode == X86ISD::VFCMADDC;
51042         return true;
51043       }
51044     }
51045     return false;
51046   };
51047 
51048   if (GetCFmulFrom(LHS))
51049     FAddOp1 = RHS;
51050   else if (GetCFmulFrom(RHS))
51051     FAddOp1 = LHS;
51052   else
51053     return SDValue();
51054 
51055   MVT CVT = MVT::getVectorVT(MVT::f32, VT.getVectorNumElements() / 2);
51056   FAddOp1 = DAG.getBitcast(CVT, FAddOp1);
51057   unsigned NewOp = IsConj ? X86ISD::VFCMADDC : X86ISD::VFMADDC;
51058   // FIXME: How do we handle when fast math flags of FADD are different from
51059   // CFMUL's?
51060   SDValue CFmul =
51061       DAG.getNode(NewOp, SDLoc(N), CVT, MulOp0, MulOp1, FAddOp1, N->getFlags());
51062   return DAG.getBitcast(VT, CFmul);
51063 }
51064 
51065 /// Do target-specific dag combines on floating-point adds/subs.
combineFaddFsub(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)51066 static SDValue combineFaddFsub(SDNode *N, SelectionDAG &DAG,
51067                                const X86Subtarget &Subtarget) {
51068   if (SDValue HOp = combineToHorizontalAddSub(N, DAG, Subtarget))
51069     return HOp;
51070 
51071   if (SDValue COp = combineFaddCFmul(N, DAG, Subtarget))
51072     return COp;
51073 
51074   return SDValue();
51075 }
51076 
51077 /// Attempt to pre-truncate inputs to arithmetic ops if it will simplify
51078 /// the codegen.
51079 /// e.g. TRUNC( BINOP( X, Y ) ) --> BINOP( TRUNC( X ), TRUNC( Y ) )
51080 /// TODO: This overlaps with the generic combiner's visitTRUNCATE. Remove
51081 ///       anything that is guaranteed to be transformed by DAGCombiner.
combineTruncatedArithmetic(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget,const SDLoc & DL)51082 static SDValue combineTruncatedArithmetic(SDNode *N, SelectionDAG &DAG,
51083                                           const X86Subtarget &Subtarget,
51084                                           const SDLoc &DL) {
51085   assert(N->getOpcode() == ISD::TRUNCATE && "Wrong opcode");
51086   SDValue Src = N->getOperand(0);
51087   unsigned SrcOpcode = Src.getOpcode();
51088   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
51089 
51090   EVT VT = N->getValueType(0);
51091   EVT SrcVT = Src.getValueType();
51092 
51093   auto IsFreeTruncation = [VT](SDValue Op) {
51094     unsigned TruncSizeInBits = VT.getScalarSizeInBits();
51095 
51096     // See if this has been extended from a smaller/equal size to
51097     // the truncation size, allowing a truncation to combine with the extend.
51098     unsigned Opcode = Op.getOpcode();
51099     if ((Opcode == ISD::ANY_EXTEND || Opcode == ISD::SIGN_EXTEND ||
51100          Opcode == ISD::ZERO_EXTEND) &&
51101         Op.getOperand(0).getScalarValueSizeInBits() <= TruncSizeInBits)
51102       return true;
51103 
51104     // See if this is a single use constant which can be constant folded.
51105     // NOTE: We don't peek throught bitcasts here because there is currently
51106     // no support for constant folding truncate+bitcast+vector_of_constants. So
51107     // we'll just send up with a truncate on both operands which will
51108     // get turned back into (truncate (binop)) causing an infinite loop.
51109     return ISD::isBuildVectorOfConstantSDNodes(Op.getNode());
51110   };
51111 
51112   auto TruncateArithmetic = [&](SDValue N0, SDValue N1) {
51113     SDValue Trunc0 = DAG.getNode(ISD::TRUNCATE, DL, VT, N0);
51114     SDValue Trunc1 = DAG.getNode(ISD::TRUNCATE, DL, VT, N1);
51115     return DAG.getNode(SrcOpcode, DL, VT, Trunc0, Trunc1);
51116   };
51117 
51118   // Don't combine if the operation has other uses.
51119   if (!Src.hasOneUse())
51120     return SDValue();
51121 
51122   // Only support vector truncation for now.
51123   // TODO: i64 scalar math would benefit as well.
51124   if (!VT.isVector())
51125     return SDValue();
51126 
51127   // In most cases its only worth pre-truncating if we're only facing the cost
51128   // of one truncation.
51129   // i.e. if one of the inputs will constant fold or the input is repeated.
51130   switch (SrcOpcode) {
51131   case ISD::MUL:
51132     // X86 is rubbish at scalar and vector i64 multiplies (until AVX512DQ) - its
51133     // better to truncate if we have the chance.
51134     if (SrcVT.getScalarType() == MVT::i64 &&
51135         TLI.isOperationLegal(SrcOpcode, VT) &&
51136         !TLI.isOperationLegal(SrcOpcode, SrcVT))
51137       return TruncateArithmetic(Src.getOperand(0), Src.getOperand(1));
51138     [[fallthrough]];
51139   case ISD::AND:
51140   case ISD::XOR:
51141   case ISD::OR:
51142   case ISD::ADD:
51143   case ISD::SUB: {
51144     SDValue Op0 = Src.getOperand(0);
51145     SDValue Op1 = Src.getOperand(1);
51146     if (TLI.isOperationLegal(SrcOpcode, VT) &&
51147         (Op0 == Op1 || IsFreeTruncation(Op0) || IsFreeTruncation(Op1)))
51148       return TruncateArithmetic(Op0, Op1);
51149     break;
51150   }
51151   }
51152 
51153   return SDValue();
51154 }
51155 
51156 // Try to form a MULHU or MULHS node by looking for
51157 // (trunc (srl (mul ext, ext), 16))
51158 // TODO: This is X86 specific because we want to be able to handle wide types
51159 // before type legalization. But we can only do it if the vector will be
51160 // legalized via widening/splitting. Type legalization can't handle promotion
51161 // of a MULHU/MULHS. There isn't a way to convey this to the generic DAG
51162 // combiner.
combinePMULH(SDValue Src,EVT VT,const SDLoc & DL,SelectionDAG & DAG,const X86Subtarget & Subtarget)51163 static SDValue combinePMULH(SDValue Src, EVT VT, const SDLoc &DL,
51164                             SelectionDAG &DAG, const X86Subtarget &Subtarget) {
51165   // First instruction should be a right shift of a multiply.
51166   if (Src.getOpcode() != ISD::SRL ||
51167       Src.getOperand(0).getOpcode() != ISD::MUL)
51168     return SDValue();
51169 
51170   if (!Subtarget.hasSSE2())
51171     return SDValue();
51172 
51173   // Only handle vXi16 types that are at least 128-bits unless they will be
51174   // widened.
51175   if (!VT.isVector() || VT.getVectorElementType() != MVT::i16)
51176     return SDValue();
51177 
51178   // Input type should be at least vXi32.
51179   EVT InVT = Src.getValueType();
51180   if (InVT.getVectorElementType().getSizeInBits() < 32)
51181     return SDValue();
51182 
51183   // Need a shift by 16.
51184   APInt ShiftAmt;
51185   if (!ISD::isConstantSplatVector(Src.getOperand(1).getNode(), ShiftAmt) ||
51186       ShiftAmt != 16)
51187     return SDValue();
51188 
51189   SDValue LHS = Src.getOperand(0).getOperand(0);
51190   SDValue RHS = Src.getOperand(0).getOperand(1);
51191 
51192   // Count leading sign/zero bits on both inputs - if there are enough then
51193   // truncation back to vXi16 will be cheap - either as a pack/shuffle
51194   // sequence or using AVX512 truncations. If the inputs are sext/zext then the
51195   // truncations may actually be free by peeking through to the ext source.
51196   auto IsSext = [&DAG](SDValue V) {
51197     return DAG.ComputeMaxSignificantBits(V) <= 16;
51198   };
51199   auto IsZext = [&DAG](SDValue V) {
51200     return DAG.computeKnownBits(V).countMaxActiveBits() <= 16;
51201   };
51202 
51203   bool IsSigned = IsSext(LHS) && IsSext(RHS);
51204   bool IsUnsigned = IsZext(LHS) && IsZext(RHS);
51205   if (!IsSigned && !IsUnsigned)
51206     return SDValue();
51207 
51208   // Check if both inputs are extensions, which will be removed by truncation.
51209   bool IsTruncateFree = (LHS.getOpcode() == ISD::SIGN_EXTEND ||
51210                          LHS.getOpcode() == ISD::ZERO_EXTEND) &&
51211                         (RHS.getOpcode() == ISD::SIGN_EXTEND ||
51212                          RHS.getOpcode() == ISD::ZERO_EXTEND) &&
51213                         LHS.getOperand(0).getScalarValueSizeInBits() <= 16 &&
51214                         RHS.getOperand(0).getScalarValueSizeInBits() <= 16;
51215 
51216   // For AVX2+ targets, with the upper bits known zero, we can perform MULHU on
51217   // the (bitcasted) inputs directly, and then cheaply pack/truncate the result
51218   // (upper elts will be zero). Don't attempt this with just AVX512F as MULHU
51219   // will have to split anyway.
51220   unsigned InSizeInBits = InVT.getSizeInBits();
51221   if (IsUnsigned && !IsTruncateFree && Subtarget.hasInt256() &&
51222       !(Subtarget.hasAVX512() && !Subtarget.hasBWI() && VT.is256BitVector()) &&
51223       (InSizeInBits % 16) == 0) {
51224     EVT BCVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
51225                                 InVT.getSizeInBits() / 16);
51226     SDValue Res = DAG.getNode(ISD::MULHU, DL, BCVT, DAG.getBitcast(BCVT, LHS),
51227                               DAG.getBitcast(BCVT, RHS));
51228     return DAG.getNode(ISD::TRUNCATE, DL, VT, DAG.getBitcast(InVT, Res));
51229   }
51230 
51231   // Truncate back to source type.
51232   LHS = DAG.getNode(ISD::TRUNCATE, DL, VT, LHS);
51233   RHS = DAG.getNode(ISD::TRUNCATE, DL, VT, RHS);
51234 
51235   unsigned Opc = IsSigned ? ISD::MULHS : ISD::MULHU;
51236   return DAG.getNode(Opc, DL, VT, LHS, RHS);
51237 }
51238 
51239 // Attempt to match PMADDUBSW, which multiplies corresponding unsigned bytes
51240 // from one vector with signed bytes from another vector, adds together
51241 // adjacent pairs of 16-bit products, and saturates the result before
51242 // truncating to 16-bits.
51243 //
51244 // Which looks something like this:
51245 // (i16 (ssat (add (mul (zext (even elts (i8 A))), (sext (even elts (i8 B)))),
51246 //                 (mul (zext (odd elts (i8 A)), (sext (odd elts (i8 B))))))))
detectPMADDUBSW(SDValue In,EVT VT,SelectionDAG & DAG,const X86Subtarget & Subtarget,const SDLoc & DL)51247 static SDValue detectPMADDUBSW(SDValue In, EVT VT, SelectionDAG &DAG,
51248                                const X86Subtarget &Subtarget,
51249                                const SDLoc &DL) {
51250   if (!VT.isVector() || !Subtarget.hasSSSE3())
51251     return SDValue();
51252 
51253   unsigned NumElems = VT.getVectorNumElements();
51254   EVT ScalarVT = VT.getVectorElementType();
51255   if (ScalarVT != MVT::i16 || NumElems < 8 || !isPowerOf2_32(NumElems))
51256     return SDValue();
51257 
51258   SDValue SSatVal = detectSSatPattern(In, VT);
51259   if (!SSatVal || SSatVal.getOpcode() != ISD::ADD)
51260     return SDValue();
51261 
51262   // Ok this is a signed saturation of an ADD. See if this ADD is adding pairs
51263   // of multiplies from even/odd elements.
51264   SDValue N0 = SSatVal.getOperand(0);
51265   SDValue N1 = SSatVal.getOperand(1);
51266 
51267   if (N0.getOpcode() != ISD::MUL || N1.getOpcode() != ISD::MUL)
51268     return SDValue();
51269 
51270   SDValue N00 = N0.getOperand(0);
51271   SDValue N01 = N0.getOperand(1);
51272   SDValue N10 = N1.getOperand(0);
51273   SDValue N11 = N1.getOperand(1);
51274 
51275   // TODO: Handle constant vectors and use knownbits/computenumsignbits?
51276   // Canonicalize zero_extend to LHS.
51277   if (N01.getOpcode() == ISD::ZERO_EXTEND)
51278     std::swap(N00, N01);
51279   if (N11.getOpcode() == ISD::ZERO_EXTEND)
51280     std::swap(N10, N11);
51281 
51282   // Ensure we have a zero_extend and a sign_extend.
51283   if (N00.getOpcode() != ISD::ZERO_EXTEND ||
51284       N01.getOpcode() != ISD::SIGN_EXTEND ||
51285       N10.getOpcode() != ISD::ZERO_EXTEND ||
51286       N11.getOpcode() != ISD::SIGN_EXTEND)
51287     return SDValue();
51288 
51289   // Peek through the extends.
51290   N00 = N00.getOperand(0);
51291   N01 = N01.getOperand(0);
51292   N10 = N10.getOperand(0);
51293   N11 = N11.getOperand(0);
51294 
51295   // Ensure the extend is from vXi8.
51296   if (N00.getValueType().getVectorElementType() != MVT::i8 ||
51297       N01.getValueType().getVectorElementType() != MVT::i8 ||
51298       N10.getValueType().getVectorElementType() != MVT::i8 ||
51299       N11.getValueType().getVectorElementType() != MVT::i8)
51300     return SDValue();
51301 
51302   // All inputs should be build_vectors.
51303   if (N00.getOpcode() != ISD::BUILD_VECTOR ||
51304       N01.getOpcode() != ISD::BUILD_VECTOR ||
51305       N10.getOpcode() != ISD::BUILD_VECTOR ||
51306       N11.getOpcode() != ISD::BUILD_VECTOR)
51307     return SDValue();
51308 
51309   // N00/N10 are zero extended. N01/N11 are sign extended.
51310 
51311   // For each element, we need to ensure we have an odd element from one vector
51312   // multiplied by the odd element of another vector and the even element from
51313   // one of the same vectors being multiplied by the even element from the
51314   // other vector. So we need to make sure for each element i, this operator
51315   // is being performed:
51316   //  A[2 * i] * B[2 * i] + A[2 * i + 1] * B[2 * i + 1]
51317   SDValue ZExtIn, SExtIn;
51318   for (unsigned i = 0; i != NumElems; ++i) {
51319     SDValue N00Elt = N00.getOperand(i);
51320     SDValue N01Elt = N01.getOperand(i);
51321     SDValue N10Elt = N10.getOperand(i);
51322     SDValue N11Elt = N11.getOperand(i);
51323     // TODO: Be more tolerant to undefs.
51324     if (N00Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
51325         N01Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
51326         N10Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
51327         N11Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
51328       return SDValue();
51329     auto *ConstN00Elt = dyn_cast<ConstantSDNode>(N00Elt.getOperand(1));
51330     auto *ConstN01Elt = dyn_cast<ConstantSDNode>(N01Elt.getOperand(1));
51331     auto *ConstN10Elt = dyn_cast<ConstantSDNode>(N10Elt.getOperand(1));
51332     auto *ConstN11Elt = dyn_cast<ConstantSDNode>(N11Elt.getOperand(1));
51333     if (!ConstN00Elt || !ConstN01Elt || !ConstN10Elt || !ConstN11Elt)
51334       return SDValue();
51335     unsigned IdxN00 = ConstN00Elt->getZExtValue();
51336     unsigned IdxN01 = ConstN01Elt->getZExtValue();
51337     unsigned IdxN10 = ConstN10Elt->getZExtValue();
51338     unsigned IdxN11 = ConstN11Elt->getZExtValue();
51339     // Add is commutative so indices can be reordered.
51340     if (IdxN00 > IdxN10) {
51341       std::swap(IdxN00, IdxN10);
51342       std::swap(IdxN01, IdxN11);
51343     }
51344     // N0 indices be the even element. N1 indices must be the next odd element.
51345     if (IdxN00 != 2 * i || IdxN10 != 2 * i + 1 ||
51346         IdxN01 != 2 * i || IdxN11 != 2 * i + 1)
51347       return SDValue();
51348     SDValue N00In = N00Elt.getOperand(0);
51349     SDValue N01In = N01Elt.getOperand(0);
51350     SDValue N10In = N10Elt.getOperand(0);
51351     SDValue N11In = N11Elt.getOperand(0);
51352     // First time we find an input capture it.
51353     if (!ZExtIn) {
51354       ZExtIn = N00In;
51355       SExtIn = N01In;
51356     }
51357     if (ZExtIn != N00In || SExtIn != N01In ||
51358         ZExtIn != N10In || SExtIn != N11In)
51359       return SDValue();
51360   }
51361 
51362   auto PMADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
51363                          ArrayRef<SDValue> Ops) {
51364     // Shrink by adding truncate nodes and let DAGCombine fold with the
51365     // sources.
51366     EVT InVT = Ops[0].getValueType();
51367     assert(InVT.getScalarType() == MVT::i8 &&
51368            "Unexpected scalar element type");
51369     assert(InVT == Ops[1].getValueType() && "Operands' types mismatch");
51370     EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
51371                                  InVT.getVectorNumElements() / 2);
51372     return DAG.getNode(X86ISD::VPMADDUBSW, DL, ResVT, Ops[0], Ops[1]);
51373   };
51374   return SplitOpsAndApply(DAG, Subtarget, DL, VT, { ZExtIn, SExtIn },
51375                           PMADDBuilder);
51376 }
51377 
combineTruncate(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)51378 static SDValue combineTruncate(SDNode *N, SelectionDAG &DAG,
51379                                const X86Subtarget &Subtarget) {
51380   EVT VT = N->getValueType(0);
51381   SDValue Src = N->getOperand(0);
51382   SDLoc DL(N);
51383 
51384   // Attempt to pre-truncate inputs to arithmetic ops instead.
51385   if (SDValue V = combineTruncatedArithmetic(N, DAG, Subtarget, DL))
51386     return V;
51387 
51388   // Try to detect AVG pattern first.
51389   if (SDValue Avg = detectAVGPattern(Src, VT, DAG, Subtarget, DL))
51390     return Avg;
51391 
51392   // Try to detect PMADD
51393   if (SDValue PMAdd = detectPMADDUBSW(Src, VT, DAG, Subtarget, DL))
51394     return PMAdd;
51395 
51396   // Try to combine truncation with signed/unsigned saturation.
51397   if (SDValue Val = combineTruncateWithSat(Src, VT, DL, DAG, Subtarget))
51398     return Val;
51399 
51400   // Try to combine PMULHUW/PMULHW for vXi16.
51401   if (SDValue V = combinePMULH(Src, VT, DL, DAG, Subtarget))
51402     return V;
51403 
51404   // The bitcast source is a direct mmx result.
51405   // Detect bitcasts between i32 to x86mmx
51406   if (Src.getOpcode() == ISD::BITCAST && VT == MVT::i32) {
51407     SDValue BCSrc = Src.getOperand(0);
51408     if (BCSrc.getValueType() == MVT::x86mmx)
51409       return DAG.getNode(X86ISD::MMX_MOVD2W, DL, MVT::i32, BCSrc);
51410   }
51411 
51412   return SDValue();
51413 }
51414 
combineVTRUNC(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI)51415 static SDValue combineVTRUNC(SDNode *N, SelectionDAG &DAG,
51416                              TargetLowering::DAGCombinerInfo &DCI) {
51417   EVT VT = N->getValueType(0);
51418   SDValue In = N->getOperand(0);
51419   SDLoc DL(N);
51420 
51421   if (SDValue SSatVal = detectSSatPattern(In, VT))
51422     return DAG.getNode(X86ISD::VTRUNCS, DL, VT, SSatVal);
51423   if (SDValue USatVal = detectUSatPattern(In, VT, DAG, DL))
51424     return DAG.getNode(X86ISD::VTRUNCUS, DL, VT, USatVal);
51425 
51426   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
51427   APInt DemandedMask(APInt::getAllOnes(VT.getScalarSizeInBits()));
51428   if (TLI.SimplifyDemandedBits(SDValue(N, 0), DemandedMask, DCI))
51429     return SDValue(N, 0);
51430 
51431   return SDValue();
51432 }
51433 
51434 /// Returns the negated value if the node \p N flips sign of FP value.
51435 ///
51436 /// FP-negation node may have different forms: FNEG(x), FXOR (x, 0x80000000)
51437 /// or FSUB(0, x)
51438 /// AVX512F does not have FXOR, so FNEG is lowered as
51439 /// (bitcast (xor (bitcast x), (bitcast ConstantFP(0x80000000)))).
51440 /// In this case we go though all bitcasts.
51441 /// This also recognizes splat of a negated value and returns the splat of that
51442 /// value.
isFNEG(SelectionDAG & DAG,SDNode * N,unsigned Depth=0)51443 static SDValue isFNEG(SelectionDAG &DAG, SDNode *N, unsigned Depth = 0) {
51444   if (N->getOpcode() == ISD::FNEG)
51445     return N->getOperand(0);
51446 
51447   // Don't recurse exponentially.
51448   if (Depth > SelectionDAG::MaxRecursionDepth)
51449     return SDValue();
51450 
51451   unsigned ScalarSize = N->getValueType(0).getScalarSizeInBits();
51452 
51453   SDValue Op = peekThroughBitcasts(SDValue(N, 0));
51454   EVT VT = Op->getValueType(0);
51455 
51456   // Make sure the element size doesn't change.
51457   if (VT.getScalarSizeInBits() != ScalarSize)
51458     return SDValue();
51459 
51460   unsigned Opc = Op.getOpcode();
51461   switch (Opc) {
51462   case ISD::VECTOR_SHUFFLE: {
51463     // For a VECTOR_SHUFFLE(VEC1, VEC2), if the VEC2 is undef, then the negate
51464     // of this is VECTOR_SHUFFLE(-VEC1, UNDEF).  The mask can be anything here.
51465     if (!Op.getOperand(1).isUndef())
51466       return SDValue();
51467     if (SDValue NegOp0 = isFNEG(DAG, Op.getOperand(0).getNode(), Depth + 1))
51468       if (NegOp0.getValueType() == VT) // FIXME: Can we do better?
51469         return DAG.getVectorShuffle(VT, SDLoc(Op), NegOp0, DAG.getUNDEF(VT),
51470                                     cast<ShuffleVectorSDNode>(Op)->getMask());
51471     break;
51472   }
51473   case ISD::INSERT_VECTOR_ELT: {
51474     // Negate of INSERT_VECTOR_ELT(UNDEF, V, INDEX) is INSERT_VECTOR_ELT(UNDEF,
51475     // -V, INDEX).
51476     SDValue InsVector = Op.getOperand(0);
51477     SDValue InsVal = Op.getOperand(1);
51478     if (!InsVector.isUndef())
51479       return SDValue();
51480     if (SDValue NegInsVal = isFNEG(DAG, InsVal.getNode(), Depth + 1))
51481       if (NegInsVal.getValueType() == VT.getVectorElementType()) // FIXME
51482         return DAG.getNode(ISD::INSERT_VECTOR_ELT, SDLoc(Op), VT, InsVector,
51483                            NegInsVal, Op.getOperand(2));
51484     break;
51485   }
51486   case ISD::FSUB:
51487   case ISD::XOR:
51488   case X86ISD::FXOR: {
51489     SDValue Op1 = Op.getOperand(1);
51490     SDValue Op0 = Op.getOperand(0);
51491 
51492     // For XOR and FXOR, we want to check if constant
51493     // bits of Op1 are sign bit masks. For FSUB, we
51494     // have to check if constant bits of Op0 are sign
51495     // bit masks and hence we swap the operands.
51496     if (Opc == ISD::FSUB)
51497       std::swap(Op0, Op1);
51498 
51499     APInt UndefElts;
51500     SmallVector<APInt, 16> EltBits;
51501     // Extract constant bits and see if they are all
51502     // sign bit masks. Ignore the undef elements.
51503     if (getTargetConstantBitsFromNode(Op1, ScalarSize, UndefElts, EltBits,
51504                                       /* AllowWholeUndefs */ true,
51505                                       /* AllowPartialUndefs */ false)) {
51506       for (unsigned I = 0, E = EltBits.size(); I < E; I++)
51507         if (!UndefElts[I] && !EltBits[I].isSignMask())
51508           return SDValue();
51509 
51510       // Only allow bitcast from correctly-sized constant.
51511       Op0 = peekThroughBitcasts(Op0);
51512       if (Op0.getScalarValueSizeInBits() == ScalarSize)
51513         return Op0;
51514     }
51515     break;
51516   } // case
51517   } // switch
51518 
51519   return SDValue();
51520 }
51521 
negateFMAOpcode(unsigned Opcode,bool NegMul,bool NegAcc,bool NegRes)51522 static unsigned negateFMAOpcode(unsigned Opcode, bool NegMul, bool NegAcc,
51523                                 bool NegRes) {
51524   if (NegMul) {
51525     switch (Opcode) {
51526     default: llvm_unreachable("Unexpected opcode");
51527     case ISD::FMA:              Opcode = X86ISD::FNMADD;        break;
51528     case ISD::STRICT_FMA:       Opcode = X86ISD::STRICT_FNMADD; break;
51529     case X86ISD::FMADD_RND:     Opcode = X86ISD::FNMADD_RND;    break;
51530     case X86ISD::FMSUB:         Opcode = X86ISD::FNMSUB;        break;
51531     case X86ISD::STRICT_FMSUB:  Opcode = X86ISD::STRICT_FNMSUB; break;
51532     case X86ISD::FMSUB_RND:     Opcode = X86ISD::FNMSUB_RND;    break;
51533     case X86ISD::FNMADD:        Opcode = ISD::FMA;              break;
51534     case X86ISD::STRICT_FNMADD: Opcode = ISD::STRICT_FMA;       break;
51535     case X86ISD::FNMADD_RND:    Opcode = X86ISD::FMADD_RND;     break;
51536     case X86ISD::FNMSUB:        Opcode = X86ISD::FMSUB;         break;
51537     case X86ISD::STRICT_FNMSUB: Opcode = X86ISD::STRICT_FMSUB;  break;
51538     case X86ISD::FNMSUB_RND:    Opcode = X86ISD::FMSUB_RND;     break;
51539     }
51540   }
51541 
51542   if (NegAcc) {
51543     switch (Opcode) {
51544     default: llvm_unreachable("Unexpected opcode");
51545     case ISD::FMA:              Opcode = X86ISD::FMSUB;         break;
51546     case ISD::STRICT_FMA:       Opcode = X86ISD::STRICT_FMSUB;  break;
51547     case X86ISD::FMADD_RND:     Opcode = X86ISD::FMSUB_RND;     break;
51548     case X86ISD::FMSUB:         Opcode = ISD::FMA;              break;
51549     case X86ISD::STRICT_FMSUB:  Opcode = ISD::STRICT_FMA;       break;
51550     case X86ISD::FMSUB_RND:     Opcode = X86ISD::FMADD_RND;     break;
51551     case X86ISD::FNMADD:        Opcode = X86ISD::FNMSUB;        break;
51552     case X86ISD::STRICT_FNMADD: Opcode = X86ISD::STRICT_FNMSUB; break;
51553     case X86ISD::FNMADD_RND:    Opcode = X86ISD::FNMSUB_RND;    break;
51554     case X86ISD::FNMSUB:        Opcode = X86ISD::FNMADD;        break;
51555     case X86ISD::STRICT_FNMSUB: Opcode = X86ISD::STRICT_FNMADD; break;
51556     case X86ISD::FNMSUB_RND:    Opcode = X86ISD::FNMADD_RND;    break;
51557     case X86ISD::FMADDSUB:      Opcode = X86ISD::FMSUBADD;      break;
51558     case X86ISD::FMADDSUB_RND:  Opcode = X86ISD::FMSUBADD_RND;  break;
51559     case X86ISD::FMSUBADD:      Opcode = X86ISD::FMADDSUB;      break;
51560     case X86ISD::FMSUBADD_RND:  Opcode = X86ISD::FMADDSUB_RND;  break;
51561     }
51562   }
51563 
51564   if (NegRes) {
51565     switch (Opcode) {
51566     // For accuracy reason, we never combine fneg and fma under strict FP.
51567     default: llvm_unreachable("Unexpected opcode");
51568     case ISD::FMA:             Opcode = X86ISD::FNMSUB;       break;
51569     case X86ISD::FMADD_RND:    Opcode = X86ISD::FNMSUB_RND;   break;
51570     case X86ISD::FMSUB:        Opcode = X86ISD::FNMADD;       break;
51571     case X86ISD::FMSUB_RND:    Opcode = X86ISD::FNMADD_RND;   break;
51572     case X86ISD::FNMADD:       Opcode = X86ISD::FMSUB;        break;
51573     case X86ISD::FNMADD_RND:   Opcode = X86ISD::FMSUB_RND;    break;
51574     case X86ISD::FNMSUB:       Opcode = ISD::FMA;             break;
51575     case X86ISD::FNMSUB_RND:   Opcode = X86ISD::FMADD_RND;    break;
51576     }
51577   }
51578 
51579   return Opcode;
51580 }
51581 
51582 /// Do target-specific dag combines on floating point negations.
combineFneg(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)51583 static SDValue combineFneg(SDNode *N, SelectionDAG &DAG,
51584                            TargetLowering::DAGCombinerInfo &DCI,
51585                            const X86Subtarget &Subtarget) {
51586   EVT OrigVT = N->getValueType(0);
51587   SDValue Arg = isFNEG(DAG, N);
51588   if (!Arg)
51589     return SDValue();
51590 
51591   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
51592   EVT VT = Arg.getValueType();
51593   EVT SVT = VT.getScalarType();
51594   SDLoc DL(N);
51595 
51596   // Let legalize expand this if it isn't a legal type yet.
51597   if (!TLI.isTypeLegal(VT))
51598     return SDValue();
51599 
51600   // If we're negating a FMUL node on a target with FMA, then we can avoid the
51601   // use of a constant by performing (-0 - A*B) instead.
51602   // FIXME: Check rounding control flags as well once it becomes available.
51603   if (Arg.getOpcode() == ISD::FMUL && (SVT == MVT::f32 || SVT == MVT::f64) &&
51604       Arg->getFlags().hasNoSignedZeros() && Subtarget.hasAnyFMA()) {
51605     SDValue Zero = DAG.getConstantFP(0.0, DL, VT);
51606     SDValue NewNode = DAG.getNode(X86ISD::FNMSUB, DL, VT, Arg.getOperand(0),
51607                                   Arg.getOperand(1), Zero);
51608     return DAG.getBitcast(OrigVT, NewNode);
51609   }
51610 
51611   bool CodeSize = DAG.getMachineFunction().getFunction().hasOptSize();
51612   bool LegalOperations = !DCI.isBeforeLegalizeOps();
51613   if (SDValue NegArg =
51614           TLI.getNegatedExpression(Arg, DAG, LegalOperations, CodeSize))
51615     return DAG.getBitcast(OrigVT, NegArg);
51616 
51617   return SDValue();
51618 }
51619 
getNegatedExpression(SDValue Op,SelectionDAG & DAG,bool LegalOperations,bool ForCodeSize,NegatibleCost & Cost,unsigned Depth) const51620 SDValue X86TargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG,
51621                                                 bool LegalOperations,
51622                                                 bool ForCodeSize,
51623                                                 NegatibleCost &Cost,
51624                                                 unsigned Depth) const {
51625   // fneg patterns are removable even if they have multiple uses.
51626   if (SDValue Arg = isFNEG(DAG, Op.getNode(), Depth)) {
51627     Cost = NegatibleCost::Cheaper;
51628     return DAG.getBitcast(Op.getValueType(), Arg);
51629   }
51630 
51631   EVT VT = Op.getValueType();
51632   EVT SVT = VT.getScalarType();
51633   unsigned Opc = Op.getOpcode();
51634   SDNodeFlags Flags = Op.getNode()->getFlags();
51635   switch (Opc) {
51636   case ISD::FMA:
51637   case X86ISD::FMSUB:
51638   case X86ISD::FNMADD:
51639   case X86ISD::FNMSUB:
51640   case X86ISD::FMADD_RND:
51641   case X86ISD::FMSUB_RND:
51642   case X86ISD::FNMADD_RND:
51643   case X86ISD::FNMSUB_RND: {
51644     if (!Op.hasOneUse() || !Subtarget.hasAnyFMA() || !isTypeLegal(VT) ||
51645         !(SVT == MVT::f32 || SVT == MVT::f64) ||
51646         !isOperationLegal(ISD::FMA, VT))
51647       break;
51648 
51649     // Don't fold (fneg (fma (fneg x), y, (fneg z))) to (fma x, y, z)
51650     // if it may have signed zeros.
51651     if (!Flags.hasNoSignedZeros())
51652       break;
51653 
51654     // This is always negatible for free but we might be able to remove some
51655     // extra operand negations as well.
51656     SmallVector<SDValue, 4> NewOps(Op.getNumOperands(), SDValue());
51657     for (int i = 0; i != 3; ++i)
51658       NewOps[i] = getCheaperNegatedExpression(
51659           Op.getOperand(i), DAG, LegalOperations, ForCodeSize, Depth + 1);
51660 
51661     bool NegA = !!NewOps[0];
51662     bool NegB = !!NewOps[1];
51663     bool NegC = !!NewOps[2];
51664     unsigned NewOpc = negateFMAOpcode(Opc, NegA != NegB, NegC, true);
51665 
51666     Cost = (NegA || NegB || NegC) ? NegatibleCost::Cheaper
51667                                   : NegatibleCost::Neutral;
51668 
51669     // Fill in the non-negated ops with the original values.
51670     for (int i = 0, e = Op.getNumOperands(); i != e; ++i)
51671       if (!NewOps[i])
51672         NewOps[i] = Op.getOperand(i);
51673     return DAG.getNode(NewOpc, SDLoc(Op), VT, NewOps);
51674   }
51675   case X86ISD::FRCP:
51676     if (SDValue NegOp0 =
51677             getNegatedExpression(Op.getOperand(0), DAG, LegalOperations,
51678                                  ForCodeSize, Cost, Depth + 1))
51679       return DAG.getNode(Opc, SDLoc(Op), VT, NegOp0);
51680     break;
51681   }
51682 
51683   return TargetLowering::getNegatedExpression(Op, DAG, LegalOperations,
51684                                               ForCodeSize, Cost, Depth);
51685 }
51686 
lowerX86FPLogicOp(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)51687 static SDValue lowerX86FPLogicOp(SDNode *N, SelectionDAG &DAG,
51688                                  const X86Subtarget &Subtarget) {
51689   MVT VT = N->getSimpleValueType(0);
51690   // If we have integer vector types available, use the integer opcodes.
51691   if (!VT.isVector() || !Subtarget.hasSSE2())
51692     return SDValue();
51693 
51694   SDLoc dl(N);
51695 
51696   unsigned IntBits = VT.getScalarSizeInBits();
51697   MVT IntSVT = MVT::getIntegerVT(IntBits);
51698   MVT IntVT = MVT::getVectorVT(IntSVT, VT.getSizeInBits() / IntBits);
51699 
51700   SDValue Op0 = DAG.getBitcast(IntVT, N->getOperand(0));
51701   SDValue Op1 = DAG.getBitcast(IntVT, N->getOperand(1));
51702   unsigned IntOpcode;
51703   switch (N->getOpcode()) {
51704   default: llvm_unreachable("Unexpected FP logic op");
51705   case X86ISD::FOR:   IntOpcode = ISD::OR; break;
51706   case X86ISD::FXOR:  IntOpcode = ISD::XOR; break;
51707   case X86ISD::FAND:  IntOpcode = ISD::AND; break;
51708   case X86ISD::FANDN: IntOpcode = X86ISD::ANDNP; break;
51709   }
51710   SDValue IntOp = DAG.getNode(IntOpcode, dl, IntVT, Op0, Op1);
51711   return DAG.getBitcast(VT, IntOp);
51712 }
51713 
51714 
51715 /// Fold a xor(setcc cond, val), 1 --> setcc (inverted(cond), val)
foldXor1SetCC(SDNode * N,SelectionDAG & DAG)51716 static SDValue foldXor1SetCC(SDNode *N, SelectionDAG &DAG) {
51717   if (N->getOpcode() != ISD::XOR)
51718     return SDValue();
51719 
51720   SDValue LHS = N->getOperand(0);
51721   if (!isOneConstant(N->getOperand(1)) || LHS->getOpcode() != X86ISD::SETCC)
51722     return SDValue();
51723 
51724   X86::CondCode NewCC = X86::GetOppositeBranchCondition(
51725       X86::CondCode(LHS->getConstantOperandVal(0)));
51726   SDLoc DL(N);
51727   return getSETCC(NewCC, LHS->getOperand(1), DL, DAG);
51728 }
51729 
combineXorSubCTLZ(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)51730 static SDValue combineXorSubCTLZ(SDNode *N, SelectionDAG &DAG,
51731                                  const X86Subtarget &Subtarget) {
51732   assert((N->getOpcode() == ISD::XOR || N->getOpcode() == ISD::SUB) &&
51733          "Invalid opcode for combing with CTLZ");
51734   if (Subtarget.hasFastLZCNT())
51735     return SDValue();
51736 
51737   EVT VT = N->getValueType(0);
51738   if (VT != MVT::i8 && VT != MVT::i16 && VT != MVT::i32 &&
51739       (VT != MVT::i64 || !Subtarget.is64Bit()))
51740     return SDValue();
51741 
51742   SDValue N0 = N->getOperand(0);
51743   SDValue N1 = N->getOperand(1);
51744 
51745   if (N0.getOpcode() != ISD::CTLZ_ZERO_UNDEF &&
51746       N1.getOpcode() != ISD::CTLZ_ZERO_UNDEF)
51747     return SDValue();
51748 
51749   SDValue OpCTLZ;
51750   SDValue OpSizeTM1;
51751 
51752   if (N1.getOpcode() == ISD::CTLZ_ZERO_UNDEF) {
51753     OpCTLZ = N1;
51754     OpSizeTM1 = N0;
51755   } else if (N->getOpcode() == ISD::SUB) {
51756     return SDValue();
51757   } else {
51758     OpCTLZ = N0;
51759     OpSizeTM1 = N1;
51760   }
51761 
51762   if (!OpCTLZ.hasOneUse())
51763     return SDValue();
51764   auto *C = dyn_cast<ConstantSDNode>(OpSizeTM1);
51765   if (!C)
51766     return SDValue();
51767 
51768   if (C->getZExtValue() != uint64_t(OpCTLZ.getValueSizeInBits() - 1))
51769     return SDValue();
51770   SDLoc DL(N);
51771   EVT OpVT = VT;
51772   SDValue Op = OpCTLZ.getOperand(0);
51773   if (VT == MVT::i8) {
51774     // Zero extend to i32 since there is not an i8 bsr.
51775     OpVT = MVT::i32;
51776     Op = DAG.getNode(ISD::ZERO_EXTEND, DL, OpVT, Op);
51777   }
51778 
51779   SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
51780   Op = DAG.getNode(X86ISD::BSR, DL, VTs, Op);
51781   if (VT == MVT::i8)
51782     Op = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, Op);
51783 
51784   return Op;
51785 }
51786 
combineXor(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)51787 static SDValue combineXor(SDNode *N, SelectionDAG &DAG,
51788                           TargetLowering::DAGCombinerInfo &DCI,
51789                           const X86Subtarget &Subtarget) {
51790   SDValue N0 = N->getOperand(0);
51791   SDValue N1 = N->getOperand(1);
51792   EVT VT = N->getValueType(0);
51793 
51794   // If this is SSE1 only convert to FXOR to avoid scalarization.
51795   if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32) {
51796     return DAG.getBitcast(MVT::v4i32,
51797                           DAG.getNode(X86ISD::FXOR, SDLoc(N), MVT::v4f32,
51798                                       DAG.getBitcast(MVT::v4f32, N0),
51799                                       DAG.getBitcast(MVT::v4f32, N1)));
51800   }
51801 
51802   if (SDValue Cmp = foldVectorXorShiftIntoCmp(N, DAG, Subtarget))
51803     return Cmp;
51804 
51805   if (SDValue R = combineBitOpWithMOVMSK(N, DAG))
51806     return R;
51807 
51808   if (SDValue R = combineBitOpWithShift(N, DAG))
51809     return R;
51810 
51811   if (SDValue R = combineBitOpWithPACK(N, DAG))
51812     return R;
51813 
51814   if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, DCI, Subtarget))
51815     return FPLogic;
51816 
51817   if (SDValue R = combineXorSubCTLZ(N, DAG, Subtarget))
51818     return R;
51819 
51820   if (DCI.isBeforeLegalizeOps())
51821     return SDValue();
51822 
51823   if (SDValue SetCC = foldXor1SetCC(N, DAG))
51824     return SetCC;
51825 
51826   if (SDValue R = combineOrXorWithSETCC(N, N0, N1, DAG))
51827     return R;
51828 
51829   if (SDValue RV = foldXorTruncShiftIntoCmp(N, DAG))
51830     return RV;
51831 
51832   // Fold not(iX bitcast(vXi1)) -> (iX bitcast(not(vec))) for legal boolvecs.
51833   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
51834   if (llvm::isAllOnesConstant(N1) && N0.getOpcode() == ISD::BITCAST &&
51835       N0.getOperand(0).getValueType().isVector() &&
51836       N0.getOperand(0).getValueType().getVectorElementType() == MVT::i1 &&
51837       TLI.isTypeLegal(N0.getOperand(0).getValueType()) && N0.hasOneUse()) {
51838     return DAG.getBitcast(VT, DAG.getNOT(SDLoc(N), N0.getOperand(0),
51839                                          N0.getOperand(0).getValueType()));
51840   }
51841 
51842   // Handle AVX512 mask widening.
51843   // Fold not(insert_subvector(undef,sub)) -> insert_subvector(undef,not(sub))
51844   if (ISD::isBuildVectorAllOnes(N1.getNode()) && VT.isVector() &&
51845       VT.getVectorElementType() == MVT::i1 &&
51846       N0.getOpcode() == ISD::INSERT_SUBVECTOR && N0.getOperand(0).isUndef() &&
51847       TLI.isTypeLegal(N0.getOperand(1).getValueType())) {
51848     return DAG.getNode(
51849         ISD::INSERT_SUBVECTOR, SDLoc(N), VT, N0.getOperand(0),
51850         DAG.getNOT(SDLoc(N), N0.getOperand(1), N0.getOperand(1).getValueType()),
51851         N0.getOperand(2));
51852   }
51853 
51854   // Fold xor(zext(xor(x,c1)),c2) -> xor(zext(x),xor(zext(c1),c2))
51855   // Fold xor(truncate(xor(x,c1)),c2) -> xor(truncate(x),xor(truncate(c1),c2))
51856   // TODO: Under what circumstances could this be performed in DAGCombine?
51857   if ((N0.getOpcode() == ISD::TRUNCATE || N0.getOpcode() == ISD::ZERO_EXTEND) &&
51858       N0.getOperand(0).getOpcode() == N->getOpcode()) {
51859     SDValue TruncExtSrc = N0.getOperand(0);
51860     auto *N1C = dyn_cast<ConstantSDNode>(N1);
51861     auto *N001C = dyn_cast<ConstantSDNode>(TruncExtSrc.getOperand(1));
51862     if (N1C && !N1C->isOpaque() && N001C && !N001C->isOpaque()) {
51863       SDLoc DL(N);
51864       SDValue LHS = DAG.getZExtOrTrunc(TruncExtSrc.getOperand(0), DL, VT);
51865       SDValue RHS = DAG.getZExtOrTrunc(TruncExtSrc.getOperand(1), DL, VT);
51866       return DAG.getNode(ISD::XOR, DL, VT, LHS,
51867                          DAG.getNode(ISD::XOR, DL, VT, RHS, N1));
51868     }
51869   }
51870 
51871   if (SDValue R = combineBMILogicOp(N, DAG, Subtarget))
51872     return R;
51873 
51874   return combineFneg(N, DAG, DCI, Subtarget);
51875 }
51876 
combineBITREVERSE(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)51877 static SDValue combineBITREVERSE(SDNode *N, SelectionDAG &DAG,
51878                                  TargetLowering::DAGCombinerInfo &DCI,
51879                                  const X86Subtarget &Subtarget) {
51880   SDValue N0 = N->getOperand(0);
51881   EVT VT = N->getValueType(0);
51882 
51883   // Convert a (iX bitreverse(bitcast(vXi1 X))) -> (iX bitcast(shuffle(X)))
51884   if (VT.isInteger() && N0.getOpcode() == ISD::BITCAST && N0.hasOneUse()) {
51885     SDValue Src = N0.getOperand(0);
51886     EVT SrcVT = Src.getValueType();
51887     if (SrcVT.isVector() && SrcVT.getScalarType() == MVT::i1 &&
51888         (DCI.isBeforeLegalize() ||
51889          DAG.getTargetLoweringInfo().isTypeLegal(SrcVT)) &&
51890         Subtarget.hasSSSE3()) {
51891       unsigned NumElts = SrcVT.getVectorNumElements();
51892       SmallVector<int, 32> ReverseMask(NumElts);
51893       for (unsigned I = 0; I != NumElts; ++I)
51894         ReverseMask[I] = (NumElts - 1) - I;
51895       SDValue Rev =
51896           DAG.getVectorShuffle(SrcVT, SDLoc(N), Src, Src, ReverseMask);
51897       return DAG.getBitcast(VT, Rev);
51898     }
51899   }
51900 
51901   return SDValue();
51902 }
51903 
combineBEXTR(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)51904 static SDValue combineBEXTR(SDNode *N, SelectionDAG &DAG,
51905                             TargetLowering::DAGCombinerInfo &DCI,
51906                             const X86Subtarget &Subtarget) {
51907   EVT VT = N->getValueType(0);
51908   unsigned NumBits = VT.getSizeInBits();
51909 
51910   // TODO - Constant Folding.
51911 
51912   // Simplify the inputs.
51913   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
51914   APInt DemandedMask(APInt::getAllOnes(NumBits));
51915   if (TLI.SimplifyDemandedBits(SDValue(N, 0), DemandedMask, DCI))
51916     return SDValue(N, 0);
51917 
51918   return SDValue();
51919 }
51920 
isNullFPScalarOrVectorConst(SDValue V)51921 static bool isNullFPScalarOrVectorConst(SDValue V) {
51922   return isNullFPConstant(V) || ISD::isBuildVectorAllZeros(V.getNode());
51923 }
51924 
51925 /// If a value is a scalar FP zero or a vector FP zero (potentially including
51926 /// undefined elements), return a zero constant that may be used to fold away
51927 /// that value. In the case of a vector, the returned constant will not contain
51928 /// undefined elements even if the input parameter does. This makes it suitable
51929 /// to be used as a replacement operand with operations (eg, bitwise-and) where
51930 /// an undef should not propagate.
getNullFPConstForNullVal(SDValue V,SelectionDAG & DAG,const X86Subtarget & Subtarget)51931 static SDValue getNullFPConstForNullVal(SDValue V, SelectionDAG &DAG,
51932                                         const X86Subtarget &Subtarget) {
51933   if (!isNullFPScalarOrVectorConst(V))
51934     return SDValue();
51935 
51936   if (V.getValueType().isVector())
51937     return getZeroVector(V.getSimpleValueType(), Subtarget, DAG, SDLoc(V));
51938 
51939   return V;
51940 }
51941 
combineFAndFNotToFAndn(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)51942 static SDValue combineFAndFNotToFAndn(SDNode *N, SelectionDAG &DAG,
51943                                       const X86Subtarget &Subtarget) {
51944   SDValue N0 = N->getOperand(0);
51945   SDValue N1 = N->getOperand(1);
51946   EVT VT = N->getValueType(0);
51947   SDLoc DL(N);
51948 
51949   // Vector types are handled in combineANDXORWithAllOnesIntoANDNP().
51950   if (!((VT == MVT::f32 && Subtarget.hasSSE1()) ||
51951         (VT == MVT::f64 && Subtarget.hasSSE2()) ||
51952         (VT == MVT::v4f32 && Subtarget.hasSSE1() && !Subtarget.hasSSE2())))
51953     return SDValue();
51954 
51955   auto isAllOnesConstantFP = [](SDValue V) {
51956     if (V.getSimpleValueType().isVector())
51957       return ISD::isBuildVectorAllOnes(V.getNode());
51958     auto *C = dyn_cast<ConstantFPSDNode>(V);
51959     return C && C->getConstantFPValue()->isAllOnesValue();
51960   };
51961 
51962   // fand (fxor X, -1), Y --> fandn X, Y
51963   if (N0.getOpcode() == X86ISD::FXOR && isAllOnesConstantFP(N0.getOperand(1)))
51964     return DAG.getNode(X86ISD::FANDN, DL, VT, N0.getOperand(0), N1);
51965 
51966   // fand X, (fxor Y, -1) --> fandn Y, X
51967   if (N1.getOpcode() == X86ISD::FXOR && isAllOnesConstantFP(N1.getOperand(1)))
51968     return DAG.getNode(X86ISD::FANDN, DL, VT, N1.getOperand(0), N0);
51969 
51970   return SDValue();
51971 }
51972 
51973 /// Do target-specific dag combines on X86ISD::FAND nodes.
combineFAnd(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)51974 static SDValue combineFAnd(SDNode *N, SelectionDAG &DAG,
51975                            const X86Subtarget &Subtarget) {
51976   // FAND(0.0, x) -> 0.0
51977   if (SDValue V = getNullFPConstForNullVal(N->getOperand(0), DAG, Subtarget))
51978     return V;
51979 
51980   // FAND(x, 0.0) -> 0.0
51981   if (SDValue V = getNullFPConstForNullVal(N->getOperand(1), DAG, Subtarget))
51982     return V;
51983 
51984   if (SDValue V = combineFAndFNotToFAndn(N, DAG, Subtarget))
51985     return V;
51986 
51987   return lowerX86FPLogicOp(N, DAG, Subtarget);
51988 }
51989 
51990 /// Do target-specific dag combines on X86ISD::FANDN nodes.
combineFAndn(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)51991 static SDValue combineFAndn(SDNode *N, SelectionDAG &DAG,
51992                             const X86Subtarget &Subtarget) {
51993   // FANDN(0.0, x) -> x
51994   if (isNullFPScalarOrVectorConst(N->getOperand(0)))
51995     return N->getOperand(1);
51996 
51997   // FANDN(x, 0.0) -> 0.0
51998   if (SDValue V = getNullFPConstForNullVal(N->getOperand(1), DAG, Subtarget))
51999     return V;
52000 
52001   return lowerX86FPLogicOp(N, DAG, Subtarget);
52002 }
52003 
52004 /// Do target-specific dag combines on X86ISD::FOR and X86ISD::FXOR nodes.
combineFOr(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)52005 static SDValue combineFOr(SDNode *N, SelectionDAG &DAG,
52006                           TargetLowering::DAGCombinerInfo &DCI,
52007                           const X86Subtarget &Subtarget) {
52008   assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR);
52009 
52010   // F[X]OR(0.0, x) -> x
52011   if (isNullFPScalarOrVectorConst(N->getOperand(0)))
52012     return N->getOperand(1);
52013 
52014   // F[X]OR(x, 0.0) -> x
52015   if (isNullFPScalarOrVectorConst(N->getOperand(1)))
52016     return N->getOperand(0);
52017 
52018   if (SDValue NewVal = combineFneg(N, DAG, DCI, Subtarget))
52019     return NewVal;
52020 
52021   return lowerX86FPLogicOp(N, DAG, Subtarget);
52022 }
52023 
52024 /// Do target-specific dag combines on X86ISD::FMIN and X86ISD::FMAX nodes.
combineFMinFMax(SDNode * N,SelectionDAG & DAG)52025 static SDValue combineFMinFMax(SDNode *N, SelectionDAG &DAG) {
52026   assert(N->getOpcode() == X86ISD::FMIN || N->getOpcode() == X86ISD::FMAX);
52027 
52028   // FMIN/FMAX are commutative if no NaNs and no negative zeros are allowed.
52029   if (!DAG.getTarget().Options.NoNaNsFPMath ||
52030       !DAG.getTarget().Options.NoSignedZerosFPMath)
52031     return SDValue();
52032 
52033   // If we run in unsafe-math mode, then convert the FMAX and FMIN nodes
52034   // into FMINC and FMAXC, which are Commutative operations.
52035   unsigned NewOp = 0;
52036   switch (N->getOpcode()) {
52037     default: llvm_unreachable("unknown opcode");
52038     case X86ISD::FMIN:  NewOp = X86ISD::FMINC; break;
52039     case X86ISD::FMAX:  NewOp = X86ISD::FMAXC; break;
52040   }
52041 
52042   return DAG.getNode(NewOp, SDLoc(N), N->getValueType(0),
52043                      N->getOperand(0), N->getOperand(1));
52044 }
52045 
combineFMinNumFMaxNum(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)52046 static SDValue combineFMinNumFMaxNum(SDNode *N, SelectionDAG &DAG,
52047                                      const X86Subtarget &Subtarget) {
52048   EVT VT = N->getValueType(0);
52049   if (Subtarget.useSoftFloat() || isSoftF16(VT, Subtarget))
52050     return SDValue();
52051 
52052   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
52053 
52054   if (!((Subtarget.hasSSE1() && VT == MVT::f32) ||
52055         (Subtarget.hasSSE2() && VT == MVT::f64) ||
52056         (Subtarget.hasFP16() && VT == MVT::f16) ||
52057         (VT.isVector() && TLI.isTypeLegal(VT))))
52058     return SDValue();
52059 
52060   SDValue Op0 = N->getOperand(0);
52061   SDValue Op1 = N->getOperand(1);
52062   SDLoc DL(N);
52063   auto MinMaxOp = N->getOpcode() == ISD::FMAXNUM ? X86ISD::FMAX : X86ISD::FMIN;
52064 
52065   // If we don't have to respect NaN inputs, this is a direct translation to x86
52066   // min/max instructions.
52067   if (DAG.getTarget().Options.NoNaNsFPMath || N->getFlags().hasNoNaNs())
52068     return DAG.getNode(MinMaxOp, DL, VT, Op0, Op1, N->getFlags());
52069 
52070   // If one of the operands is known non-NaN use the native min/max instructions
52071   // with the non-NaN input as second operand.
52072   if (DAG.isKnownNeverNaN(Op1))
52073     return DAG.getNode(MinMaxOp, DL, VT, Op0, Op1, N->getFlags());
52074   if (DAG.isKnownNeverNaN(Op0))
52075     return DAG.getNode(MinMaxOp, DL, VT, Op1, Op0, N->getFlags());
52076 
52077   // If we have to respect NaN inputs, this takes at least 3 instructions.
52078   // Favor a library call when operating on a scalar and minimizing code size.
52079   if (!VT.isVector() && DAG.getMachineFunction().getFunction().hasMinSize())
52080     return SDValue();
52081 
52082   EVT SetCCType = TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
52083                                          VT);
52084 
52085   // There are 4 possibilities involving NaN inputs, and these are the required
52086   // outputs:
52087   //                   Op1
52088   //               Num     NaN
52089   //            ----------------
52090   //       Num  |  Max  |  Op0 |
52091   // Op0        ----------------
52092   //       NaN  |  Op1  |  NaN |
52093   //            ----------------
52094   //
52095   // The SSE FP max/min instructions were not designed for this case, but rather
52096   // to implement:
52097   //   Min = Op1 < Op0 ? Op1 : Op0
52098   //   Max = Op1 > Op0 ? Op1 : Op0
52099   //
52100   // So they always return Op0 if either input is a NaN. However, we can still
52101   // use those instructions for fmaxnum by selecting away a NaN input.
52102 
52103   // If either operand is NaN, the 2nd source operand (Op0) is passed through.
52104   SDValue MinOrMax = DAG.getNode(MinMaxOp, DL, VT, Op1, Op0);
52105   SDValue IsOp0Nan = DAG.getSetCC(DL, SetCCType, Op0, Op0, ISD::SETUO);
52106 
52107   // If Op0 is a NaN, select Op1. Otherwise, select the max. If both operands
52108   // are NaN, the NaN value of Op1 is the result.
52109   return DAG.getSelect(DL, VT, IsOp0Nan, Op1, MinOrMax);
52110 }
52111 
combineX86INT_TO_FP(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI)52112 static SDValue combineX86INT_TO_FP(SDNode *N, SelectionDAG &DAG,
52113                                    TargetLowering::DAGCombinerInfo &DCI) {
52114   EVT VT = N->getValueType(0);
52115   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
52116 
52117   APInt DemandedElts = APInt::getAllOnes(VT.getVectorNumElements());
52118   if (TLI.SimplifyDemandedVectorElts(SDValue(N, 0), DemandedElts, DCI))
52119     return SDValue(N, 0);
52120 
52121   // Convert a full vector load into vzload when not all bits are needed.
52122   SDValue In = N->getOperand(0);
52123   MVT InVT = In.getSimpleValueType();
52124   if (VT.getVectorNumElements() < InVT.getVectorNumElements() &&
52125       ISD::isNormalLoad(In.getNode()) && In.hasOneUse()) {
52126     assert(InVT.is128BitVector() && "Expected 128-bit input vector");
52127     LoadSDNode *LN = cast<LoadSDNode>(N->getOperand(0));
52128     unsigned NumBits = InVT.getScalarSizeInBits() * VT.getVectorNumElements();
52129     MVT MemVT = MVT::getIntegerVT(NumBits);
52130     MVT LoadVT = MVT::getVectorVT(MemVT, 128 / NumBits);
52131     if (SDValue VZLoad = narrowLoadToVZLoad(LN, MemVT, LoadVT, DAG)) {
52132       SDLoc dl(N);
52133       SDValue Convert = DAG.getNode(N->getOpcode(), dl, VT,
52134                                     DAG.getBitcast(InVT, VZLoad));
52135       DCI.CombineTo(N, Convert);
52136       DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
52137       DCI.recursivelyDeleteUnusedNodes(LN);
52138       return SDValue(N, 0);
52139     }
52140   }
52141 
52142   return SDValue();
52143 }
52144 
combineCVTP2I_CVTTP2I(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI)52145 static SDValue combineCVTP2I_CVTTP2I(SDNode *N, SelectionDAG &DAG,
52146                                      TargetLowering::DAGCombinerInfo &DCI) {
52147   bool IsStrict = N->isTargetStrictFPOpcode();
52148   EVT VT = N->getValueType(0);
52149 
52150   // Convert a full vector load into vzload when not all bits are needed.
52151   SDValue In = N->getOperand(IsStrict ? 1 : 0);
52152   MVT InVT = In.getSimpleValueType();
52153   if (VT.getVectorNumElements() < InVT.getVectorNumElements() &&
52154       ISD::isNormalLoad(In.getNode()) && In.hasOneUse()) {
52155     assert(InVT.is128BitVector() && "Expected 128-bit input vector");
52156     LoadSDNode *LN = cast<LoadSDNode>(In);
52157     unsigned NumBits = InVT.getScalarSizeInBits() * VT.getVectorNumElements();
52158     MVT MemVT = MVT::getFloatingPointVT(NumBits);
52159     MVT LoadVT = MVT::getVectorVT(MemVT, 128 / NumBits);
52160     if (SDValue VZLoad = narrowLoadToVZLoad(LN, MemVT, LoadVT, DAG)) {
52161       SDLoc dl(N);
52162       if (IsStrict) {
52163         SDValue Convert =
52164             DAG.getNode(N->getOpcode(), dl, {VT, MVT::Other},
52165                         {N->getOperand(0), DAG.getBitcast(InVT, VZLoad)});
52166         DCI.CombineTo(N, Convert, Convert.getValue(1));
52167       } else {
52168         SDValue Convert =
52169             DAG.getNode(N->getOpcode(), dl, VT, DAG.getBitcast(InVT, VZLoad));
52170         DCI.CombineTo(N, Convert);
52171       }
52172       DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
52173       DCI.recursivelyDeleteUnusedNodes(LN);
52174       return SDValue(N, 0);
52175     }
52176   }
52177 
52178   return SDValue();
52179 }
52180 
52181 /// Do target-specific dag combines on X86ISD::ANDNP nodes.
combineAndnp(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)52182 static SDValue combineAndnp(SDNode *N, SelectionDAG &DAG,
52183                             TargetLowering::DAGCombinerInfo &DCI,
52184                             const X86Subtarget &Subtarget) {
52185   SDValue N0 = N->getOperand(0);
52186   SDValue N1 = N->getOperand(1);
52187   MVT VT = N->getSimpleValueType(0);
52188   int NumElts = VT.getVectorNumElements();
52189   unsigned EltSizeInBits = VT.getScalarSizeInBits();
52190   SDLoc DL(N);
52191 
52192   // ANDNP(undef, x) -> 0
52193   // ANDNP(x, undef) -> 0
52194   if (N0.isUndef() || N1.isUndef())
52195     return DAG.getConstant(0, DL, VT);
52196 
52197   // ANDNP(0, x) -> x
52198   if (ISD::isBuildVectorAllZeros(N0.getNode()))
52199     return N1;
52200 
52201   // ANDNP(x, 0) -> 0
52202   if (ISD::isBuildVectorAllZeros(N1.getNode()))
52203     return DAG.getConstant(0, DL, VT);
52204 
52205   // ANDNP(x, -1) -> NOT(x) -> XOR(x, -1)
52206   if (ISD::isBuildVectorAllOnes(N1.getNode()))
52207     return DAG.getNOT(DL, N0, VT);
52208 
52209   // Turn ANDNP back to AND if input is inverted.
52210   if (SDValue Not = IsNOT(N0, DAG))
52211     return DAG.getNode(ISD::AND, DL, VT, DAG.getBitcast(VT, Not), N1);
52212 
52213   // Fold for better commutatvity:
52214   // ANDNP(x,NOT(y)) -> AND(NOT(x),NOT(y)) -> NOT(OR(X,Y)).
52215   if (N1->hasOneUse())
52216     if (SDValue Not = IsNOT(N1, DAG))
52217       return DAG.getNOT(
52218           DL, DAG.getNode(ISD::OR, DL, VT, N0, DAG.getBitcast(VT, Not)), VT);
52219 
52220   // Constant Folding
52221   APInt Undefs0, Undefs1;
52222   SmallVector<APInt> EltBits0, EltBits1;
52223   if (getTargetConstantBitsFromNode(N0, EltSizeInBits, Undefs0, EltBits0)) {
52224     if (getTargetConstantBitsFromNode(N1, EltSizeInBits, Undefs1, EltBits1)) {
52225       SmallVector<APInt> ResultBits;
52226       for (int I = 0; I != NumElts; ++I)
52227         ResultBits.push_back(~EltBits0[I] & EltBits1[I]);
52228       return getConstVector(ResultBits, VT, DAG, DL);
52229     }
52230 
52231     // Constant fold NOT(N0) to allow us to use AND.
52232     // Ensure this is only performed if we can confirm that the bitcasted source
52233     // has oneuse to prevent an infinite loop with canonicalizeBitSelect.
52234     if (N0->hasOneUse()) {
52235       SDValue BC0 = peekThroughOneUseBitcasts(N0);
52236       if (BC0.getOpcode() != ISD::BITCAST) {
52237         for (APInt &Elt : EltBits0)
52238           Elt = ~Elt;
52239         SDValue Not = getConstVector(EltBits0, VT, DAG, DL);
52240         return DAG.getNode(ISD::AND, DL, VT, Not, N1);
52241       }
52242     }
52243   }
52244 
52245   // Attempt to recursively combine a bitmask ANDNP with shuffles.
52246   if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) {
52247     SDValue Op(N, 0);
52248     if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
52249       return Res;
52250 
52251     // If either operand is a constant mask, then only the elements that aren't
52252     // zero are actually demanded by the other operand.
52253     auto GetDemandedMasks = [&](SDValue Op, bool Invert = false) {
52254       APInt UndefElts;
52255       SmallVector<APInt> EltBits;
52256       APInt DemandedBits = APInt::getAllOnes(EltSizeInBits);
52257       APInt DemandedElts = APInt::getAllOnes(NumElts);
52258       if (getTargetConstantBitsFromNode(Op, EltSizeInBits, UndefElts,
52259                                         EltBits)) {
52260         DemandedBits.clearAllBits();
52261         DemandedElts.clearAllBits();
52262         for (int I = 0; I != NumElts; ++I) {
52263           if (UndefElts[I]) {
52264             // We can't assume an undef src element gives an undef dst - the
52265             // other src might be zero.
52266             DemandedBits.setAllBits();
52267             DemandedElts.setBit(I);
52268           } else if ((Invert && !EltBits[I].isAllOnes()) ||
52269                      (!Invert && !EltBits[I].isZero())) {
52270             DemandedBits |= Invert ? ~EltBits[I] : EltBits[I];
52271             DemandedElts.setBit(I);
52272           }
52273         }
52274       }
52275       return std::make_pair(DemandedBits, DemandedElts);
52276     };
52277     APInt Bits0, Elts0;
52278     APInt Bits1, Elts1;
52279     std::tie(Bits0, Elts0) = GetDemandedMasks(N1);
52280     std::tie(Bits1, Elts1) = GetDemandedMasks(N0, true);
52281 
52282     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
52283     if (TLI.SimplifyDemandedVectorElts(N0, Elts0, DCI) ||
52284         TLI.SimplifyDemandedVectorElts(N1, Elts1, DCI) ||
52285         TLI.SimplifyDemandedBits(N0, Bits0, Elts0, DCI) ||
52286         TLI.SimplifyDemandedBits(N1, Bits1, Elts1, DCI)) {
52287       if (N->getOpcode() != ISD::DELETED_NODE)
52288         DCI.AddToWorklist(N);
52289       return SDValue(N, 0);
52290     }
52291   }
52292 
52293   return SDValue();
52294 }
52295 
combineBT(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI)52296 static SDValue combineBT(SDNode *N, SelectionDAG &DAG,
52297                          TargetLowering::DAGCombinerInfo &DCI) {
52298   SDValue N1 = N->getOperand(1);
52299 
52300   // BT ignores high bits in the bit index operand.
52301   unsigned BitWidth = N1.getValueSizeInBits();
52302   APInt DemandedMask = APInt::getLowBitsSet(BitWidth, Log2_32(BitWidth));
52303   if (DAG.getTargetLoweringInfo().SimplifyDemandedBits(N1, DemandedMask, DCI)) {
52304     if (N->getOpcode() != ISD::DELETED_NODE)
52305       DCI.AddToWorklist(N);
52306     return SDValue(N, 0);
52307   }
52308 
52309   return SDValue();
52310 }
52311 
combineCVTPH2PS(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI)52312 static SDValue combineCVTPH2PS(SDNode *N, SelectionDAG &DAG,
52313                                TargetLowering::DAGCombinerInfo &DCI) {
52314   bool IsStrict = N->getOpcode() == X86ISD::STRICT_CVTPH2PS;
52315   SDValue Src = N->getOperand(IsStrict ? 1 : 0);
52316 
52317   if (N->getValueType(0) == MVT::v4f32 && Src.getValueType() == MVT::v8i16) {
52318     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
52319     APInt DemandedElts = APInt::getLowBitsSet(8, 4);
52320     if (TLI.SimplifyDemandedVectorElts(Src, DemandedElts, DCI)) {
52321       if (N->getOpcode() != ISD::DELETED_NODE)
52322         DCI.AddToWorklist(N);
52323       return SDValue(N, 0);
52324     }
52325 
52326     // Convert a full vector load into vzload when not all bits are needed.
52327     if (ISD::isNormalLoad(Src.getNode()) && Src.hasOneUse()) {
52328       LoadSDNode *LN = cast<LoadSDNode>(N->getOperand(IsStrict ? 1 : 0));
52329       if (SDValue VZLoad = narrowLoadToVZLoad(LN, MVT::i64, MVT::v2i64, DAG)) {
52330         SDLoc dl(N);
52331         if (IsStrict) {
52332           SDValue Convert = DAG.getNode(
52333               N->getOpcode(), dl, {MVT::v4f32, MVT::Other},
52334               {N->getOperand(0), DAG.getBitcast(MVT::v8i16, VZLoad)});
52335           DCI.CombineTo(N, Convert, Convert.getValue(1));
52336         } else {
52337           SDValue Convert = DAG.getNode(N->getOpcode(), dl, MVT::v4f32,
52338                                         DAG.getBitcast(MVT::v8i16, VZLoad));
52339           DCI.CombineTo(N, Convert);
52340         }
52341 
52342         DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
52343         DCI.recursivelyDeleteUnusedNodes(LN);
52344         return SDValue(N, 0);
52345       }
52346     }
52347   }
52348 
52349   return SDValue();
52350 }
52351 
52352 // Try to combine sext_in_reg of a cmov of constants by extending the constants.
combineSextInRegCmov(SDNode * N,SelectionDAG & DAG)52353 static SDValue combineSextInRegCmov(SDNode *N, SelectionDAG &DAG) {
52354   assert(N->getOpcode() == ISD::SIGN_EXTEND_INREG);
52355 
52356   EVT DstVT = N->getValueType(0);
52357 
52358   SDValue N0 = N->getOperand(0);
52359   SDValue N1 = N->getOperand(1);
52360   EVT ExtraVT = cast<VTSDNode>(N1)->getVT();
52361 
52362   if (ExtraVT != MVT::i8 && ExtraVT != MVT::i16)
52363     return SDValue();
52364 
52365   // Look through single use any_extends / truncs.
52366   SDValue IntermediateBitwidthOp;
52367   if ((N0.getOpcode() == ISD::ANY_EXTEND || N0.getOpcode() == ISD::TRUNCATE) &&
52368       N0.hasOneUse()) {
52369     IntermediateBitwidthOp = N0;
52370     N0 = N0.getOperand(0);
52371   }
52372 
52373   // See if we have a single use cmov.
52374   if (N0.getOpcode() != X86ISD::CMOV || !N0.hasOneUse())
52375     return SDValue();
52376 
52377   SDValue CMovOp0 = N0.getOperand(0);
52378   SDValue CMovOp1 = N0.getOperand(1);
52379 
52380   // Make sure both operands are constants.
52381   if (!isa<ConstantSDNode>(CMovOp0.getNode()) ||
52382       !isa<ConstantSDNode>(CMovOp1.getNode()))
52383     return SDValue();
52384 
52385   SDLoc DL(N);
52386 
52387   // If we looked through an any_extend/trunc above, add one to the constants.
52388   if (IntermediateBitwidthOp) {
52389     unsigned IntermediateOpc = IntermediateBitwidthOp.getOpcode();
52390     CMovOp0 = DAG.getNode(IntermediateOpc, DL, DstVT, CMovOp0);
52391     CMovOp1 = DAG.getNode(IntermediateOpc, DL, DstVT, CMovOp1);
52392   }
52393 
52394   CMovOp0 = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, DstVT, CMovOp0, N1);
52395   CMovOp1 = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, DstVT, CMovOp1, N1);
52396 
52397   EVT CMovVT = DstVT;
52398   // We do not want i16 CMOV's. Promote to i32 and truncate afterwards.
52399   if (DstVT == MVT::i16) {
52400     CMovVT = MVT::i32;
52401     CMovOp0 = DAG.getNode(ISD::ZERO_EXTEND, DL, CMovVT, CMovOp0);
52402     CMovOp1 = DAG.getNode(ISD::ZERO_EXTEND, DL, CMovVT, CMovOp1);
52403   }
52404 
52405   SDValue CMov = DAG.getNode(X86ISD::CMOV, DL, CMovVT, CMovOp0, CMovOp1,
52406                              N0.getOperand(2), N0.getOperand(3));
52407 
52408   if (CMovVT != DstVT)
52409     CMov = DAG.getNode(ISD::TRUNCATE, DL, DstVT, CMov);
52410 
52411   return CMov;
52412 }
52413 
combineSignExtendInReg(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)52414 static SDValue combineSignExtendInReg(SDNode *N, SelectionDAG &DAG,
52415                                       const X86Subtarget &Subtarget) {
52416   assert(N->getOpcode() == ISD::SIGN_EXTEND_INREG);
52417 
52418   if (SDValue V = combineSextInRegCmov(N, DAG))
52419     return V;
52420 
52421   EVT VT = N->getValueType(0);
52422   SDValue N0 = N->getOperand(0);
52423   SDValue N1 = N->getOperand(1);
52424   EVT ExtraVT = cast<VTSDNode>(N1)->getVT();
52425   SDLoc dl(N);
52426 
52427   // The SIGN_EXTEND_INREG to v4i64 is expensive operation on the
52428   // both SSE and AVX2 since there is no sign-extended shift right
52429   // operation on a vector with 64-bit elements.
52430   //(sext_in_reg (v4i64 anyext (v4i32 x )), ExtraVT) ->
52431   // (v4i64 sext (v4i32 sext_in_reg (v4i32 x , ExtraVT)))
52432   if (VT == MVT::v4i64 && (N0.getOpcode() == ISD::ANY_EXTEND ||
52433                            N0.getOpcode() == ISD::SIGN_EXTEND)) {
52434     SDValue N00 = N0.getOperand(0);
52435 
52436     // EXTLOAD has a better solution on AVX2,
52437     // it may be replaced with X86ISD::VSEXT node.
52438     if (N00.getOpcode() == ISD::LOAD && Subtarget.hasInt256())
52439       if (!ISD::isNormalLoad(N00.getNode()))
52440         return SDValue();
52441 
52442     // Attempt to promote any comparison mask ops before moving the
52443     // SIGN_EXTEND_INREG in the way.
52444     if (SDValue Promote = PromoteMaskArithmetic(N0.getNode(), DAG, Subtarget))
52445       return DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, VT, Promote, N1);
52446 
52447     if (N00.getValueType() == MVT::v4i32 && ExtraVT.getSizeInBits() < 128) {
52448       SDValue Tmp =
52449           DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32, N00, N1);
52450       return DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i64, Tmp);
52451     }
52452   }
52453   return SDValue();
52454 }
52455 
52456 /// sext(add_nsw(x, C)) --> add(sext(x), C_sext)
52457 /// zext(add_nuw(x, C)) --> add(zext(x), C_zext)
52458 /// Promoting a sign/zero extension ahead of a no overflow 'add' exposes
52459 /// opportunities to combine math ops, use an LEA, or use a complex addressing
52460 /// mode. This can eliminate extend, add, and shift instructions.
promoteExtBeforeAdd(SDNode * Ext,SelectionDAG & DAG,const X86Subtarget & Subtarget)52461 static SDValue promoteExtBeforeAdd(SDNode *Ext, SelectionDAG &DAG,
52462                                    const X86Subtarget &Subtarget) {
52463   if (Ext->getOpcode() != ISD::SIGN_EXTEND &&
52464       Ext->getOpcode() != ISD::ZERO_EXTEND)
52465     return SDValue();
52466 
52467   // TODO: This should be valid for other integer types.
52468   EVT VT = Ext->getValueType(0);
52469   if (VT != MVT::i64)
52470     return SDValue();
52471 
52472   SDValue Add = Ext->getOperand(0);
52473   if (Add.getOpcode() != ISD::ADD)
52474     return SDValue();
52475 
52476   SDValue AddOp0 = Add.getOperand(0);
52477   SDValue AddOp1 = Add.getOperand(1);
52478   bool Sext = Ext->getOpcode() == ISD::SIGN_EXTEND;
52479   bool NSW = Add->getFlags().hasNoSignedWrap();
52480   bool NUW = Add->getFlags().hasNoUnsignedWrap();
52481   NSW = NSW || (Sext && DAG.willNotOverflowAdd(true, AddOp0, AddOp1));
52482   NUW = NUW || (!Sext && DAG.willNotOverflowAdd(false, AddOp0, AddOp1));
52483 
52484   // We need an 'add nsw' feeding into the 'sext' or 'add nuw' feeding
52485   // into the 'zext'
52486   if ((Sext && !NSW) || (!Sext && !NUW))
52487     return SDValue();
52488 
52489   // Having a constant operand to the 'add' ensures that we are not increasing
52490   // the instruction count because the constant is extended for free below.
52491   // A constant operand can also become the displacement field of an LEA.
52492   auto *AddOp1C = dyn_cast<ConstantSDNode>(AddOp1);
52493   if (!AddOp1C)
52494     return SDValue();
52495 
52496   // Don't make the 'add' bigger if there's no hope of combining it with some
52497   // other 'add' or 'shl' instruction.
52498   // TODO: It may be profitable to generate simpler LEA instructions in place
52499   // of single 'add' instructions, but the cost model for selecting an LEA
52500   // currently has a high threshold.
52501   bool HasLEAPotential = false;
52502   for (auto *User : Ext->uses()) {
52503     if (User->getOpcode() == ISD::ADD || User->getOpcode() == ISD::SHL) {
52504       HasLEAPotential = true;
52505       break;
52506     }
52507   }
52508   if (!HasLEAPotential)
52509     return SDValue();
52510 
52511   // Everything looks good, so pull the '{s|z}ext' ahead of the 'add'.
52512   int64_t AddC = Sext ? AddOp1C->getSExtValue() : AddOp1C->getZExtValue();
52513   SDValue NewExt = DAG.getNode(Ext->getOpcode(), SDLoc(Ext), VT, AddOp0);
52514   SDValue NewConstant = DAG.getConstant(AddC, SDLoc(Add), VT);
52515 
52516   // The wider add is guaranteed to not wrap because both operands are
52517   // sign-extended.
52518   SDNodeFlags Flags;
52519   Flags.setNoSignedWrap(NSW);
52520   Flags.setNoUnsignedWrap(NUW);
52521   return DAG.getNode(ISD::ADD, SDLoc(Add), VT, NewExt, NewConstant, Flags);
52522 }
52523 
52524 // If we face {ANY,SIGN,ZERO}_EXTEND that is applied to a CMOV with constant
52525 // operands and the result of CMOV is not used anywhere else - promote CMOV
52526 // itself instead of promoting its result. This could be beneficial, because:
52527 //     1) X86TargetLowering::EmitLoweredSelect later can do merging of two
52528 //        (or more) pseudo-CMOVs only when they go one-after-another and
52529 //        getting rid of result extension code after CMOV will help that.
52530 //     2) Promotion of constant CMOV arguments is free, hence the
52531 //        {ANY,SIGN,ZERO}_EXTEND will just be deleted.
52532 //     3) 16-bit CMOV encoding is 4 bytes, 32-bit CMOV is 3-byte, so this
52533 //        promotion is also good in terms of code-size.
52534 //        (64-bit CMOV is 4-bytes, that's why we don't do 32-bit => 64-bit
52535 //         promotion).
combineToExtendCMOV(SDNode * Extend,SelectionDAG & DAG)52536 static SDValue combineToExtendCMOV(SDNode *Extend, SelectionDAG &DAG) {
52537   SDValue CMovN = Extend->getOperand(0);
52538   if (CMovN.getOpcode() != X86ISD::CMOV || !CMovN.hasOneUse())
52539     return SDValue();
52540 
52541   EVT TargetVT = Extend->getValueType(0);
52542   unsigned ExtendOpcode = Extend->getOpcode();
52543   SDLoc DL(Extend);
52544 
52545   EVT VT = CMovN.getValueType();
52546   SDValue CMovOp0 = CMovN.getOperand(0);
52547   SDValue CMovOp1 = CMovN.getOperand(1);
52548 
52549   if (!isa<ConstantSDNode>(CMovOp0.getNode()) ||
52550       !isa<ConstantSDNode>(CMovOp1.getNode()))
52551     return SDValue();
52552 
52553   // Only extend to i32 or i64.
52554   if (TargetVT != MVT::i32 && TargetVT != MVT::i64)
52555     return SDValue();
52556 
52557   // Only extend from i16 unless its a sign_extend from i32. Zext/aext from i32
52558   // are free.
52559   if (VT != MVT::i16 && !(ExtendOpcode == ISD::SIGN_EXTEND && VT == MVT::i32))
52560     return SDValue();
52561 
52562   // If this a zero extend to i64, we should only extend to i32 and use a free
52563   // zero extend to finish.
52564   EVT ExtendVT = TargetVT;
52565   if (TargetVT == MVT::i64 && ExtendOpcode != ISD::SIGN_EXTEND)
52566     ExtendVT = MVT::i32;
52567 
52568   CMovOp0 = DAG.getNode(ExtendOpcode, DL, ExtendVT, CMovOp0);
52569   CMovOp1 = DAG.getNode(ExtendOpcode, DL, ExtendVT, CMovOp1);
52570 
52571   SDValue Res = DAG.getNode(X86ISD::CMOV, DL, ExtendVT, CMovOp0, CMovOp1,
52572                             CMovN.getOperand(2), CMovN.getOperand(3));
52573 
52574   // Finish extending if needed.
52575   if (ExtendVT != TargetVT)
52576     Res = DAG.getNode(ExtendOpcode, DL, TargetVT, Res);
52577 
52578   return Res;
52579 }
52580 
52581 // Attempt to combine a (sext/zext (setcc)) to a setcc with a xmm/ymm/zmm
52582 // result type.
combineExtSetcc(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)52583 static SDValue combineExtSetcc(SDNode *N, SelectionDAG &DAG,
52584                                const X86Subtarget &Subtarget) {
52585   SDValue N0 = N->getOperand(0);
52586   EVT VT = N->getValueType(0);
52587   SDLoc dl(N);
52588 
52589   // Only do this combine with AVX512 for vector extends.
52590   if (!Subtarget.hasAVX512() || !VT.isVector() || N0.getOpcode() != ISD::SETCC)
52591     return SDValue();
52592 
52593   // Only combine legal element types.
52594   EVT SVT = VT.getVectorElementType();
52595   if (SVT != MVT::i8 && SVT != MVT::i16 && SVT != MVT::i32 &&
52596       SVT != MVT::i64 && SVT != MVT::f32 && SVT != MVT::f64)
52597     return SDValue();
52598 
52599   // We don't have CMPP Instruction for vxf16
52600   if (N0.getOperand(0).getValueType().getVectorElementType() == MVT::f16)
52601     return SDValue();
52602   // We can only do this if the vector size in 256 bits or less.
52603   unsigned Size = VT.getSizeInBits();
52604   if (Size > 256 && Subtarget.useAVX512Regs())
52605     return SDValue();
52606 
52607   // Don't fold if the condition code can't be handled by PCMPEQ/PCMPGT since
52608   // that's the only integer compares with we have.
52609   ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get();
52610   if (ISD::isUnsignedIntSetCC(CC))
52611     return SDValue();
52612 
52613   // Only do this combine if the extension will be fully consumed by the setcc.
52614   EVT N00VT = N0.getOperand(0).getValueType();
52615   EVT MatchingVecType = N00VT.changeVectorElementTypeToInteger();
52616   if (Size != MatchingVecType.getSizeInBits())
52617     return SDValue();
52618 
52619   SDValue Res = DAG.getSetCC(dl, VT, N0.getOperand(0), N0.getOperand(1), CC);
52620 
52621   if (N->getOpcode() == ISD::ZERO_EXTEND)
52622     Res = DAG.getZeroExtendInReg(Res, dl, N0.getValueType());
52623 
52624   return Res;
52625 }
52626 
combineSext(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)52627 static SDValue combineSext(SDNode *N, SelectionDAG &DAG,
52628                            TargetLowering::DAGCombinerInfo &DCI,
52629                            const X86Subtarget &Subtarget) {
52630   SDValue N0 = N->getOperand(0);
52631   EVT VT = N->getValueType(0);
52632   SDLoc DL(N);
52633 
52634   // (i32 (sext (i8 (x86isd::setcc_carry)))) -> (i32 (x86isd::setcc_carry))
52635   if (!DCI.isBeforeLegalizeOps() &&
52636       N0.getOpcode() == X86ISD::SETCC_CARRY) {
52637     SDValue Setcc = DAG.getNode(X86ISD::SETCC_CARRY, DL, VT, N0->getOperand(0),
52638                                  N0->getOperand(1));
52639     bool ReplaceOtherUses = !N0.hasOneUse();
52640     DCI.CombineTo(N, Setcc);
52641     // Replace other uses with a truncate of the widened setcc_carry.
52642     if (ReplaceOtherUses) {
52643       SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SDLoc(N0),
52644                                   N0.getValueType(), Setcc);
52645       DCI.CombineTo(N0.getNode(), Trunc);
52646     }
52647 
52648     return SDValue(N, 0);
52649   }
52650 
52651   if (SDValue NewCMov = combineToExtendCMOV(N, DAG))
52652     return NewCMov;
52653 
52654   if (!DCI.isBeforeLegalizeOps())
52655     return SDValue();
52656 
52657   if (SDValue V = combineExtSetcc(N, DAG, Subtarget))
52658     return V;
52659 
52660   if (SDValue V = combineToExtendBoolVectorInReg(N->getOpcode(), DL, VT, N0,
52661                                                  DAG, DCI, Subtarget))
52662     return V;
52663 
52664   if (VT.isVector()) {
52665     if (SDValue R = PromoteMaskArithmetic(N, DAG, Subtarget))
52666       return R;
52667 
52668     if (N0.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG)
52669       return DAG.getNode(N0.getOpcode(), DL, VT, N0.getOperand(0));
52670   }
52671 
52672   if (SDValue NewAdd = promoteExtBeforeAdd(N, DAG, Subtarget))
52673     return NewAdd;
52674 
52675   return SDValue();
52676 }
52677 
52678 // Inverting a constant vector is profitable if it can be eliminated and the
52679 // inverted vector is already present in DAG. Otherwise, it will be loaded
52680 // anyway.
52681 //
52682 // We determine which of the values can be completely eliminated and invert it.
52683 // If both are eliminable, select a vector with the first negative element.
getInvertedVectorForFMA(SDValue V,SelectionDAG & DAG)52684 static SDValue getInvertedVectorForFMA(SDValue V, SelectionDAG &DAG) {
52685   assert(ISD::isBuildVectorOfConstantFPSDNodes(V.getNode()) &&
52686          "ConstantFP build vector expected");
52687   // Check if we can eliminate V. We assume if a value is only used in FMAs, we
52688   // can eliminate it. Since this function is invoked for each FMA with this
52689   // vector.
52690   auto IsNotFMA = [](SDNode *Use) {
52691     return Use->getOpcode() != ISD::FMA && Use->getOpcode() != ISD::STRICT_FMA;
52692   };
52693   if (llvm::any_of(V->uses(), IsNotFMA))
52694     return SDValue();
52695 
52696   SmallVector<SDValue, 8> Ops;
52697   EVT VT = V.getValueType();
52698   EVT EltVT = VT.getVectorElementType();
52699   for (auto Op : V->op_values()) {
52700     if (auto *Cst = dyn_cast<ConstantFPSDNode>(Op)) {
52701       Ops.push_back(DAG.getConstantFP(-Cst->getValueAPF(), SDLoc(Op), EltVT));
52702     } else {
52703       assert(Op.isUndef());
52704       Ops.push_back(DAG.getUNDEF(EltVT));
52705     }
52706   }
52707 
52708   SDNode *NV = DAG.getNodeIfExists(ISD::BUILD_VECTOR, DAG.getVTList(VT), Ops);
52709   if (!NV)
52710     return SDValue();
52711 
52712   // If an inverted version cannot be eliminated, choose it instead of the
52713   // original version.
52714   if (llvm::any_of(NV->uses(), IsNotFMA))
52715     return SDValue(NV, 0);
52716 
52717   // If the inverted version also can be eliminated, we have to consistently
52718   // prefer one of the values. We prefer a constant with a negative value on
52719   // the first place.
52720   // N.B. We need to skip undefs that may precede a value.
52721   for (auto op : V->op_values()) {
52722     if (auto *Cst = dyn_cast<ConstantFPSDNode>(op)) {
52723       if (Cst->isNegative())
52724         return SDValue();
52725       break;
52726     }
52727   }
52728   return SDValue(NV, 0);
52729 }
52730 
combineFMA(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)52731 static SDValue combineFMA(SDNode *N, SelectionDAG &DAG,
52732                           TargetLowering::DAGCombinerInfo &DCI,
52733                           const X86Subtarget &Subtarget) {
52734   SDLoc dl(N);
52735   EVT VT = N->getValueType(0);
52736   bool IsStrict = N->isStrictFPOpcode() || N->isTargetStrictFPOpcode();
52737 
52738   // Let legalize expand this if it isn't a legal type yet.
52739   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
52740   if (!TLI.isTypeLegal(VT))
52741     return SDValue();
52742 
52743   SDValue A = N->getOperand(IsStrict ? 1 : 0);
52744   SDValue B = N->getOperand(IsStrict ? 2 : 1);
52745   SDValue C = N->getOperand(IsStrict ? 3 : 2);
52746 
52747   // If the operation allows fast-math and the target does not support FMA,
52748   // split this into mul+add to avoid libcall(s).
52749   SDNodeFlags Flags = N->getFlags();
52750   if (!IsStrict && Flags.hasAllowReassociation() &&
52751       TLI.isOperationExpand(ISD::FMA, VT)) {
52752     SDValue Fmul = DAG.getNode(ISD::FMUL, dl, VT, A, B, Flags);
52753     return DAG.getNode(ISD::FADD, dl, VT, Fmul, C, Flags);
52754   }
52755 
52756   EVT ScalarVT = VT.getScalarType();
52757   if (((ScalarVT != MVT::f32 && ScalarVT != MVT::f64) ||
52758        !Subtarget.hasAnyFMA()) &&
52759       !(ScalarVT == MVT::f16 && Subtarget.hasFP16()))
52760     return SDValue();
52761 
52762   auto invertIfNegative = [&DAG, &TLI, &DCI](SDValue &V) {
52763     bool CodeSize = DAG.getMachineFunction().getFunction().hasOptSize();
52764     bool LegalOperations = !DCI.isBeforeLegalizeOps();
52765     if (SDValue NegV = TLI.getCheaperNegatedExpression(V, DAG, LegalOperations,
52766                                                        CodeSize)) {
52767       V = NegV;
52768       return true;
52769     }
52770     // Look through extract_vector_elts. If it comes from an FNEG, create a
52771     // new extract from the FNEG input.
52772     if (V.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
52773         isNullConstant(V.getOperand(1))) {
52774       SDValue Vec = V.getOperand(0);
52775       if (SDValue NegV = TLI.getCheaperNegatedExpression(
52776               Vec, DAG, LegalOperations, CodeSize)) {
52777         V = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(V), V.getValueType(),
52778                         NegV, V.getOperand(1));
52779         return true;
52780       }
52781     }
52782     // Lookup if there is an inverted version of constant vector V in DAG.
52783     if (ISD::isBuildVectorOfConstantFPSDNodes(V.getNode())) {
52784       if (SDValue NegV = getInvertedVectorForFMA(V, DAG)) {
52785         V = NegV;
52786         return true;
52787       }
52788     }
52789     return false;
52790   };
52791 
52792   // Do not convert the passthru input of scalar intrinsics.
52793   // FIXME: We could allow negations of the lower element only.
52794   bool NegA = invertIfNegative(A);
52795   bool NegB = invertIfNegative(B);
52796   bool NegC = invertIfNegative(C);
52797 
52798   if (!NegA && !NegB && !NegC)
52799     return SDValue();
52800 
52801   unsigned NewOpcode =
52802       negateFMAOpcode(N->getOpcode(), NegA != NegB, NegC, false);
52803 
52804   // Propagate fast-math-flags to new FMA node.
52805   SelectionDAG::FlagInserter FlagsInserter(DAG, Flags);
52806   if (IsStrict) {
52807     assert(N->getNumOperands() == 4 && "Shouldn't be greater than 4");
52808     return DAG.getNode(NewOpcode, dl, {VT, MVT::Other},
52809                        {N->getOperand(0), A, B, C});
52810   } else {
52811     if (N->getNumOperands() == 4)
52812       return DAG.getNode(NewOpcode, dl, VT, A, B, C, N->getOperand(3));
52813     return DAG.getNode(NewOpcode, dl, VT, A, B, C);
52814   }
52815 }
52816 
52817 // Combine FMADDSUB(A, B, FNEG(C)) -> FMSUBADD(A, B, C)
52818 // Combine FMSUBADD(A, B, FNEG(C)) -> FMADDSUB(A, B, C)
combineFMADDSUB(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI)52819 static SDValue combineFMADDSUB(SDNode *N, SelectionDAG &DAG,
52820                                TargetLowering::DAGCombinerInfo &DCI) {
52821   SDLoc dl(N);
52822   EVT VT = N->getValueType(0);
52823   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
52824   bool CodeSize = DAG.getMachineFunction().getFunction().hasOptSize();
52825   bool LegalOperations = !DCI.isBeforeLegalizeOps();
52826 
52827   SDValue N2 = N->getOperand(2);
52828 
52829   SDValue NegN2 =
52830       TLI.getCheaperNegatedExpression(N2, DAG, LegalOperations, CodeSize);
52831   if (!NegN2)
52832     return SDValue();
52833   unsigned NewOpcode = negateFMAOpcode(N->getOpcode(), false, true, false);
52834 
52835   if (N->getNumOperands() == 4)
52836     return DAG.getNode(NewOpcode, dl, VT, N->getOperand(0), N->getOperand(1),
52837                        NegN2, N->getOperand(3));
52838   return DAG.getNode(NewOpcode, dl, VT, N->getOperand(0), N->getOperand(1),
52839                      NegN2);
52840 }
52841 
combineZext(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)52842 static SDValue combineZext(SDNode *N, SelectionDAG &DAG,
52843                            TargetLowering::DAGCombinerInfo &DCI,
52844                            const X86Subtarget &Subtarget) {
52845   SDLoc dl(N);
52846   SDValue N0 = N->getOperand(0);
52847   EVT VT = N->getValueType(0);
52848 
52849   // (i32 (aext (i8 (x86isd::setcc_carry)))) -> (i32 (x86isd::setcc_carry))
52850   // FIXME: Is this needed? We don't seem to have any tests for it.
52851   if (!DCI.isBeforeLegalizeOps() && N->getOpcode() == ISD::ANY_EXTEND &&
52852       N0.getOpcode() == X86ISD::SETCC_CARRY) {
52853     SDValue Setcc = DAG.getNode(X86ISD::SETCC_CARRY, dl, VT, N0->getOperand(0),
52854                                  N0->getOperand(1));
52855     bool ReplaceOtherUses = !N0.hasOneUse();
52856     DCI.CombineTo(N, Setcc);
52857     // Replace other uses with a truncate of the widened setcc_carry.
52858     if (ReplaceOtherUses) {
52859       SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SDLoc(N0),
52860                                   N0.getValueType(), Setcc);
52861       DCI.CombineTo(N0.getNode(), Trunc);
52862     }
52863 
52864     return SDValue(N, 0);
52865   }
52866 
52867   if (SDValue NewCMov = combineToExtendCMOV(N, DAG))
52868     return NewCMov;
52869 
52870   if (DCI.isBeforeLegalizeOps())
52871     if (SDValue V = combineExtSetcc(N, DAG, Subtarget))
52872       return V;
52873 
52874   if (SDValue V = combineToExtendBoolVectorInReg(N->getOpcode(), dl, VT, N0,
52875                                                  DAG, DCI, Subtarget))
52876     return V;
52877 
52878   if (VT.isVector())
52879     if (SDValue R = PromoteMaskArithmetic(N, DAG, Subtarget))
52880       return R;
52881 
52882   if (SDValue NewAdd = promoteExtBeforeAdd(N, DAG, Subtarget))
52883     return NewAdd;
52884 
52885   if (SDValue R = combineOrCmpEqZeroToCtlzSrl(N, DAG, DCI, Subtarget))
52886     return R;
52887 
52888   // TODO: Combine with any target/faux shuffle.
52889   if (N0.getOpcode() == X86ISD::PACKUS && N0.getValueSizeInBits() == 128 &&
52890       VT.getScalarSizeInBits() == N0.getOperand(0).getScalarValueSizeInBits()) {
52891     SDValue N00 = N0.getOperand(0);
52892     SDValue N01 = N0.getOperand(1);
52893     unsigned NumSrcEltBits = N00.getScalarValueSizeInBits();
52894     APInt ZeroMask = APInt::getHighBitsSet(NumSrcEltBits, NumSrcEltBits / 2);
52895     if ((N00.isUndef() || DAG.MaskedValueIsZero(N00, ZeroMask)) &&
52896         (N01.isUndef() || DAG.MaskedValueIsZero(N01, ZeroMask))) {
52897       return concatSubVectors(N00, N01, DAG, dl);
52898     }
52899   }
52900 
52901   return SDValue();
52902 }
52903 
52904 /// If we have AVX512, but not BWI and this is a vXi16/vXi8 setcc, just
52905 /// pre-promote its result type since vXi1 vectors don't get promoted
52906 /// during type legalization.
truncateAVX512SetCCNoBWI(EVT VT,EVT OpVT,SDValue LHS,SDValue RHS,ISD::CondCode CC,const SDLoc & DL,SelectionDAG & DAG,const X86Subtarget & Subtarget)52907 static SDValue truncateAVX512SetCCNoBWI(EVT VT, EVT OpVT, SDValue LHS,
52908                                         SDValue RHS, ISD::CondCode CC,
52909                                         const SDLoc &DL, SelectionDAG &DAG,
52910                                         const X86Subtarget &Subtarget) {
52911   if (Subtarget.hasAVX512() && !Subtarget.hasBWI() && VT.isVector() &&
52912       VT.getVectorElementType() == MVT::i1 &&
52913       (OpVT.getVectorElementType() == MVT::i8 ||
52914        OpVT.getVectorElementType() == MVT::i16)) {
52915     SDValue Setcc = DAG.getSetCC(DL, OpVT, LHS, RHS, CC);
52916     return DAG.getNode(ISD::TRUNCATE, DL, VT, Setcc);
52917   }
52918   return SDValue();
52919 }
52920 
combineSetCC(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)52921 static SDValue combineSetCC(SDNode *N, SelectionDAG &DAG,
52922                             TargetLowering::DAGCombinerInfo &DCI,
52923                             const X86Subtarget &Subtarget) {
52924   const ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
52925   const SDValue LHS = N->getOperand(0);
52926   const SDValue RHS = N->getOperand(1);
52927   EVT VT = N->getValueType(0);
52928   EVT OpVT = LHS.getValueType();
52929   SDLoc DL(N);
52930 
52931   if (CC == ISD::SETNE || CC == ISD::SETEQ) {
52932     if (SDValue V = combineVectorSizedSetCCEquality(VT, LHS, RHS, CC, DL, DAG,
52933                                                     Subtarget))
52934       return V;
52935 
52936     if (VT == MVT::i1) {
52937       X86::CondCode X86CC;
52938       if (SDValue V =
52939               MatchVectorAllEqualTest(LHS, RHS, CC, DL, Subtarget, DAG, X86CC))
52940         return DAG.getNode(ISD::TRUNCATE, DL, VT, getSETCC(X86CC, V, DL, DAG));
52941     }
52942 
52943     if (OpVT.isScalarInteger()) {
52944       // cmpeq(or(X,Y),X) --> cmpeq(and(~X,Y),0)
52945       // cmpne(or(X,Y),X) --> cmpne(and(~X,Y),0)
52946       auto MatchOrCmpEq = [&](SDValue N0, SDValue N1) {
52947         if (N0.getOpcode() == ISD::OR && N0->hasOneUse()) {
52948           if (N0.getOperand(0) == N1)
52949             return DAG.getNode(ISD::AND, DL, OpVT, DAG.getNOT(DL, N1, OpVT),
52950                                N0.getOperand(1));
52951           if (N0.getOperand(1) == N1)
52952             return DAG.getNode(ISD::AND, DL, OpVT, DAG.getNOT(DL, N1, OpVT),
52953                                N0.getOperand(0));
52954         }
52955         return SDValue();
52956       };
52957       if (SDValue AndN = MatchOrCmpEq(LHS, RHS))
52958         return DAG.getSetCC(DL, VT, AndN, DAG.getConstant(0, DL, OpVT), CC);
52959       if (SDValue AndN = MatchOrCmpEq(RHS, LHS))
52960         return DAG.getSetCC(DL, VT, AndN, DAG.getConstant(0, DL, OpVT), CC);
52961 
52962       // cmpeq(and(X,Y),Y) --> cmpeq(and(~X,Y),0)
52963       // cmpne(and(X,Y),Y) --> cmpne(and(~X,Y),0)
52964       auto MatchAndCmpEq = [&](SDValue N0, SDValue N1) {
52965         if (N0.getOpcode() == ISD::AND && N0->hasOneUse()) {
52966           if (N0.getOperand(0) == N1)
52967             return DAG.getNode(ISD::AND, DL, OpVT, N1,
52968                                DAG.getNOT(DL, N0.getOperand(1), OpVT));
52969           if (N0.getOperand(1) == N1)
52970             return DAG.getNode(ISD::AND, DL, OpVT, N1,
52971                                DAG.getNOT(DL, N0.getOperand(0), OpVT));
52972         }
52973         return SDValue();
52974       };
52975       if (SDValue AndN = MatchAndCmpEq(LHS, RHS))
52976         return DAG.getSetCC(DL, VT, AndN, DAG.getConstant(0, DL, OpVT), CC);
52977       if (SDValue AndN = MatchAndCmpEq(RHS, LHS))
52978         return DAG.getSetCC(DL, VT, AndN, DAG.getConstant(0, DL, OpVT), CC);
52979 
52980       // cmpeq(trunc(x),C) --> cmpeq(x,C)
52981       // cmpne(trunc(x),C) --> cmpne(x,C)
52982       // iff x upper bits are zero.
52983       if (LHS.getOpcode() == ISD::TRUNCATE &&
52984           LHS.getOperand(0).getScalarValueSizeInBits() >= 32 &&
52985           isa<ConstantSDNode>(RHS) && !DCI.isBeforeLegalize()) {
52986         EVT SrcVT = LHS.getOperand(0).getValueType();
52987         APInt UpperBits = APInt::getBitsSetFrom(SrcVT.getScalarSizeInBits(),
52988                                                 OpVT.getScalarSizeInBits());
52989         const TargetLowering &TLI = DAG.getTargetLoweringInfo();
52990         auto *C = cast<ConstantSDNode>(RHS);
52991         if (DAG.MaskedValueIsZero(LHS.getOperand(0), UpperBits) &&
52992             TLI.isTypeLegal(LHS.getOperand(0).getValueType()))
52993           return DAG.getSetCC(DL, VT, LHS.getOperand(0),
52994                               DAG.getConstant(C->getAPIntValue().zextOrTrunc(
52995                                                   SrcVT.getScalarSizeInBits()),
52996                                               DL, SrcVT),
52997                               CC);
52998       }
52999 
53000       // With C as a power of 2 and C != 0 and C != INT_MIN:
53001       //    icmp eq Abs(X) C ->
53002       //        (icmp eq A, C) | (icmp eq A, -C)
53003       //    icmp ne Abs(X) C ->
53004       //        (icmp ne A, C) & (icmp ne A, -C)
53005       // Both of these patterns can be better optimized in
53006       // DAGCombiner::foldAndOrOfSETCC. Note this only applies for scalar
53007       // integers which is checked above.
53008       if (LHS.getOpcode() == ISD::ABS && LHS.hasOneUse()) {
53009         if (auto *C = dyn_cast<ConstantSDNode>(RHS)) {
53010           const APInt &CInt = C->getAPIntValue();
53011           // We can better optimize this case in DAGCombiner::foldAndOrOfSETCC.
53012           if (CInt.isPowerOf2() && !CInt.isMinSignedValue()) {
53013             SDValue BaseOp = LHS.getOperand(0);
53014             SDValue SETCC0 = DAG.getSetCC(DL, VT, BaseOp, RHS, CC);
53015             SDValue SETCC1 = DAG.getSetCC(
53016                 DL, VT, BaseOp, DAG.getConstant(-CInt, DL, OpVT), CC);
53017             return DAG.getNode(CC == ISD::SETEQ ? ISD::OR : ISD::AND, DL, VT,
53018                                SETCC0, SETCC1);
53019           }
53020         }
53021       }
53022     }
53023   }
53024 
53025   if (VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
53026       (CC == ISD::SETNE || CC == ISD::SETEQ || ISD::isSignedIntSetCC(CC))) {
53027     // Using temporaries to avoid messing up operand ordering for later
53028     // transformations if this doesn't work.
53029     SDValue Op0 = LHS;
53030     SDValue Op1 = RHS;
53031     ISD::CondCode TmpCC = CC;
53032     // Put build_vector on the right.
53033     if (Op0.getOpcode() == ISD::BUILD_VECTOR) {
53034       std::swap(Op0, Op1);
53035       TmpCC = ISD::getSetCCSwappedOperands(TmpCC);
53036     }
53037 
53038     bool IsSEXT0 =
53039         (Op0.getOpcode() == ISD::SIGN_EXTEND) &&
53040         (Op0.getOperand(0).getValueType().getVectorElementType() == MVT::i1);
53041     bool IsVZero1 = ISD::isBuildVectorAllZeros(Op1.getNode());
53042 
53043     if (IsSEXT0 && IsVZero1) {
53044       assert(VT == Op0.getOperand(0).getValueType() &&
53045              "Unexpected operand type");
53046       if (TmpCC == ISD::SETGT)
53047         return DAG.getConstant(0, DL, VT);
53048       if (TmpCC == ISD::SETLE)
53049         return DAG.getConstant(1, DL, VT);
53050       if (TmpCC == ISD::SETEQ || TmpCC == ISD::SETGE)
53051         return DAG.getNOT(DL, Op0.getOperand(0), VT);
53052 
53053       assert((TmpCC == ISD::SETNE || TmpCC == ISD::SETLT) &&
53054              "Unexpected condition code!");
53055       return Op0.getOperand(0);
53056     }
53057   }
53058 
53059   // Try and make unsigned vector comparison signed. On pre AVX512 targets there
53060   // only are unsigned comparisons (`PCMPGT`) and on AVX512 its often better to
53061   // use `PCMPGT` if the result is mean to stay in a vector (and if its going to
53062   // a mask, there are signed AVX512 comparisons).
53063   if (VT.isVector() && OpVT.isVector() && OpVT.isInteger()) {
53064     bool CanMakeSigned = false;
53065     if (ISD::isUnsignedIntSetCC(CC)) {
53066       KnownBits CmpKnown =
53067           DAG.computeKnownBits(LHS).intersectWith(DAG.computeKnownBits(RHS));
53068       // If we know LHS/RHS share the same sign bit at each element we can
53069       // make this signed.
53070       // NOTE: `computeKnownBits` on a vector type aggregates common bits
53071       // across all lanes. So a pattern where the sign varies from lane to
53072       // lane, but at each lane Sign(LHS) is known to equal Sign(RHS), will be
53073       // missed. We could get around this by demanding each lane
53074       // independently, but this isn't the most important optimization and
53075       // that may eat into compile time.
53076       CanMakeSigned =
53077           CmpKnown.Zero.isSignBitSet() || CmpKnown.One.isSignBitSet();
53078     }
53079     if (CanMakeSigned || ISD::isSignedIntSetCC(CC)) {
53080       SDValue LHSOut = LHS;
53081       SDValue RHSOut = RHS;
53082       ISD::CondCode NewCC = CC;
53083       switch (CC) {
53084       case ISD::SETGE:
53085       case ISD::SETUGE:
53086         if (SDValue NewLHS = incDecVectorConstant(LHS, DAG, /*IsInc*/ true,
53087                                                   /*NSW*/ true))
53088           LHSOut = NewLHS;
53089         else if (SDValue NewRHS = incDecVectorConstant(
53090                      RHS, DAG, /*IsInc*/ false, /*NSW*/ true))
53091           RHSOut = NewRHS;
53092         else
53093           break;
53094 
53095         [[fallthrough]];
53096       case ISD::SETUGT:
53097         NewCC = ISD::SETGT;
53098         break;
53099 
53100       case ISD::SETLE:
53101       case ISD::SETULE:
53102         if (SDValue NewLHS = incDecVectorConstant(LHS, DAG, /*IsInc*/ false,
53103                                                   /*NSW*/ true))
53104           LHSOut = NewLHS;
53105         else if (SDValue NewRHS = incDecVectorConstant(RHS, DAG, /*IsInc*/ true,
53106                                                        /*NSW*/ true))
53107           RHSOut = NewRHS;
53108         else
53109           break;
53110 
53111         [[fallthrough]];
53112       case ISD::SETULT:
53113         // Will be swapped to SETGT in LowerVSETCC*.
53114         NewCC = ISD::SETLT;
53115         break;
53116       default:
53117         break;
53118       }
53119       if (NewCC != CC) {
53120         if (SDValue R = truncateAVX512SetCCNoBWI(VT, OpVT, LHSOut, RHSOut,
53121                                                  NewCC, DL, DAG, Subtarget))
53122           return R;
53123         return DAG.getSetCC(DL, VT, LHSOut, RHSOut, NewCC);
53124       }
53125     }
53126   }
53127 
53128   if (SDValue R =
53129           truncateAVX512SetCCNoBWI(VT, OpVT, LHS, RHS, CC, DL, DAG, Subtarget))
53130     return R;
53131 
53132   // For an SSE1-only target, lower a comparison of v4f32 to X86ISD::CMPP early
53133   // to avoid scalarization via legalization because v4i32 is not a legal type.
53134   if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32 &&
53135       LHS.getValueType() == MVT::v4f32)
53136     return LowerVSETCC(SDValue(N, 0), Subtarget, DAG);
53137 
53138   // X pred 0.0 --> X pred -X
53139   // If the negation of X already exists, use it in the comparison. This removes
53140   // the need to materialize 0.0 and allows matching to SSE's MIN/MAX
53141   // instructions in patterns with a 'select' node.
53142   if (isNullFPScalarOrVectorConst(RHS)) {
53143     SDVTList FNegVT = DAG.getVTList(OpVT);
53144     if (SDNode *FNeg = DAG.getNodeIfExists(ISD::FNEG, FNegVT, {LHS}))
53145       return DAG.getSetCC(DL, VT, LHS, SDValue(FNeg, 0), CC);
53146   }
53147 
53148   return SDValue();
53149 }
53150 
combineMOVMSK(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)53151 static SDValue combineMOVMSK(SDNode *N, SelectionDAG &DAG,
53152                              TargetLowering::DAGCombinerInfo &DCI,
53153                              const X86Subtarget &Subtarget) {
53154   SDValue Src = N->getOperand(0);
53155   MVT SrcVT = Src.getSimpleValueType();
53156   MVT VT = N->getSimpleValueType(0);
53157   unsigned NumBits = VT.getScalarSizeInBits();
53158   unsigned NumElts = SrcVT.getVectorNumElements();
53159   unsigned NumBitsPerElt = SrcVT.getScalarSizeInBits();
53160   assert(VT == MVT::i32 && NumElts <= NumBits && "Unexpected MOVMSK types");
53161 
53162   // Perform constant folding.
53163   APInt UndefElts;
53164   SmallVector<APInt, 32> EltBits;
53165   if (getTargetConstantBitsFromNode(Src, NumBitsPerElt, UndefElts, EltBits)) {
53166     APInt Imm(32, 0);
53167     for (unsigned Idx = 0; Idx != NumElts; ++Idx)
53168       if (!UndefElts[Idx] && EltBits[Idx].isNegative())
53169         Imm.setBit(Idx);
53170 
53171     return DAG.getConstant(Imm, SDLoc(N), VT);
53172   }
53173 
53174   // Look through int->fp bitcasts that don't change the element width.
53175   unsigned EltWidth = SrcVT.getScalarSizeInBits();
53176   if (Subtarget.hasSSE2() && Src.getOpcode() == ISD::BITCAST &&
53177       Src.getOperand(0).getScalarValueSizeInBits() == EltWidth)
53178     return DAG.getNode(X86ISD::MOVMSK, SDLoc(N), VT, Src.getOperand(0));
53179 
53180   // Fold movmsk(not(x)) -> not(movmsk(x)) to improve folding of movmsk results
53181   // with scalar comparisons.
53182   if (SDValue NotSrc = IsNOT(Src, DAG)) {
53183     SDLoc DL(N);
53184     APInt NotMask = APInt::getLowBitsSet(NumBits, NumElts);
53185     NotSrc = DAG.getBitcast(SrcVT, NotSrc);
53186     return DAG.getNode(ISD::XOR, DL, VT,
53187                        DAG.getNode(X86ISD::MOVMSK, DL, VT, NotSrc),
53188                        DAG.getConstant(NotMask, DL, VT));
53189   }
53190 
53191   // Fold movmsk(icmp_sgt(x,-1)) -> not(movmsk(x)) to improve folding of movmsk
53192   // results with scalar comparisons.
53193   if (Src.getOpcode() == X86ISD::PCMPGT &&
53194       ISD::isBuildVectorAllOnes(Src.getOperand(1).getNode())) {
53195     SDLoc DL(N);
53196     APInt NotMask = APInt::getLowBitsSet(NumBits, NumElts);
53197     return DAG.getNode(ISD::XOR, DL, VT,
53198                        DAG.getNode(X86ISD::MOVMSK, DL, VT, Src.getOperand(0)),
53199                        DAG.getConstant(NotMask, DL, VT));
53200   }
53201 
53202   // Fold movmsk(icmp_eq(and(x,c1),c1)) -> movmsk(shl(x,c2))
53203   // Fold movmsk(icmp_eq(and(x,c1),0)) -> movmsk(not(shl(x,c2)))
53204   // iff pow2splat(c1).
53205   // Use KnownBits to determine if only a single bit is non-zero
53206   // in each element (pow2 or zero), and shift that bit to the msb.
53207   if (Src.getOpcode() == X86ISD::PCMPEQ) {
53208     KnownBits KnownLHS = DAG.computeKnownBits(Src.getOperand(0));
53209     KnownBits KnownRHS = DAG.computeKnownBits(Src.getOperand(1));
53210     unsigned ShiftAmt = KnownLHS.countMinLeadingZeros();
53211     if (KnownLHS.countMaxPopulation() == 1 &&
53212         (KnownRHS.isZero() || (KnownRHS.countMaxPopulation() == 1 &&
53213                                ShiftAmt == KnownRHS.countMinLeadingZeros()))) {
53214       SDLoc DL(N);
53215       MVT ShiftVT = SrcVT;
53216       SDValue ShiftLHS = Src.getOperand(0);
53217       SDValue ShiftRHS = Src.getOperand(1);
53218       if (ShiftVT.getScalarType() == MVT::i8) {
53219         // vXi8 shifts - we only care about the signbit so can use PSLLW.
53220         ShiftVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
53221         ShiftLHS = DAG.getBitcast(ShiftVT, ShiftLHS);
53222         ShiftRHS = DAG.getBitcast(ShiftVT, ShiftRHS);
53223       }
53224       ShiftLHS = getTargetVShiftByConstNode(X86ISD::VSHLI, DL, ShiftVT,
53225                                             ShiftLHS, ShiftAmt, DAG);
53226       ShiftRHS = getTargetVShiftByConstNode(X86ISD::VSHLI, DL, ShiftVT,
53227                                             ShiftRHS, ShiftAmt, DAG);
53228       ShiftLHS = DAG.getBitcast(SrcVT, ShiftLHS);
53229       ShiftRHS = DAG.getBitcast(SrcVT, ShiftRHS);
53230       SDValue Res = DAG.getNode(ISD::XOR, DL, SrcVT, ShiftLHS, ShiftRHS);
53231       return DAG.getNode(X86ISD::MOVMSK, DL, VT, DAG.getNOT(DL, Res, SrcVT));
53232     }
53233   }
53234 
53235   // Fold movmsk(logic(X,C)) -> logic(movmsk(X),C)
53236   if (N->isOnlyUserOf(Src.getNode())) {
53237     SDValue SrcBC = peekThroughOneUseBitcasts(Src);
53238     if (ISD::isBitwiseLogicOp(SrcBC.getOpcode())) {
53239       APInt UndefElts;
53240       SmallVector<APInt, 32> EltBits;
53241       if (getTargetConstantBitsFromNode(SrcBC.getOperand(1), NumBitsPerElt,
53242                                         UndefElts, EltBits)) {
53243         APInt Mask = APInt::getZero(NumBits);
53244         for (unsigned Idx = 0; Idx != NumElts; ++Idx) {
53245           if (!UndefElts[Idx] && EltBits[Idx].isNegative())
53246             Mask.setBit(Idx);
53247         }
53248         SDLoc DL(N);
53249         SDValue NewSrc = DAG.getBitcast(SrcVT, SrcBC.getOperand(0));
53250         SDValue NewMovMsk = DAG.getNode(X86ISD::MOVMSK, DL, VT, NewSrc);
53251         return DAG.getNode(SrcBC.getOpcode(), DL, VT, NewMovMsk,
53252                            DAG.getConstant(Mask, DL, VT));
53253       }
53254     }
53255   }
53256 
53257   // Simplify the inputs.
53258   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
53259   APInt DemandedMask(APInt::getAllOnes(NumBits));
53260   if (TLI.SimplifyDemandedBits(SDValue(N, 0), DemandedMask, DCI))
53261     return SDValue(N, 0);
53262 
53263   return SDValue();
53264 }
53265 
combineTESTP(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)53266 static SDValue combineTESTP(SDNode *N, SelectionDAG &DAG,
53267                             TargetLowering::DAGCombinerInfo &DCI,
53268                             const X86Subtarget &Subtarget) {
53269   MVT VT = N->getSimpleValueType(0);
53270   unsigned NumBits = VT.getScalarSizeInBits();
53271 
53272   // Simplify the inputs.
53273   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
53274   APInt DemandedMask(APInt::getAllOnes(NumBits));
53275   if (TLI.SimplifyDemandedBits(SDValue(N, 0), DemandedMask, DCI))
53276     return SDValue(N, 0);
53277 
53278   return SDValue();
53279 }
53280 
combineX86GatherScatter(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI)53281 static SDValue combineX86GatherScatter(SDNode *N, SelectionDAG &DAG,
53282                                        TargetLowering::DAGCombinerInfo &DCI) {
53283   auto *MemOp = cast<X86MaskedGatherScatterSDNode>(N);
53284   SDValue Mask = MemOp->getMask();
53285 
53286   // With vector masks we only demand the upper bit of the mask.
53287   if (Mask.getScalarValueSizeInBits() != 1) {
53288     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
53289     APInt DemandedMask(APInt::getSignMask(Mask.getScalarValueSizeInBits()));
53290     if (TLI.SimplifyDemandedBits(Mask, DemandedMask, DCI)) {
53291       if (N->getOpcode() != ISD::DELETED_NODE)
53292         DCI.AddToWorklist(N);
53293       return SDValue(N, 0);
53294     }
53295   }
53296 
53297   return SDValue();
53298 }
53299 
rebuildGatherScatter(MaskedGatherScatterSDNode * GorS,SDValue Index,SDValue Base,SDValue Scale,SelectionDAG & DAG)53300 static SDValue rebuildGatherScatter(MaskedGatherScatterSDNode *GorS,
53301                                     SDValue Index, SDValue Base, SDValue Scale,
53302                                     SelectionDAG &DAG) {
53303   SDLoc DL(GorS);
53304 
53305   if (auto *Gather = dyn_cast<MaskedGatherSDNode>(GorS)) {
53306     SDValue Ops[] = { Gather->getChain(), Gather->getPassThru(),
53307                       Gather->getMask(), Base, Index, Scale } ;
53308     return DAG.getMaskedGather(Gather->getVTList(),
53309                                Gather->getMemoryVT(), DL, Ops,
53310                                Gather->getMemOperand(),
53311                                Gather->getIndexType(),
53312                                Gather->getExtensionType());
53313   }
53314   auto *Scatter = cast<MaskedScatterSDNode>(GorS);
53315   SDValue Ops[] = { Scatter->getChain(), Scatter->getValue(),
53316                     Scatter->getMask(), Base, Index, Scale };
53317   return DAG.getMaskedScatter(Scatter->getVTList(),
53318                               Scatter->getMemoryVT(), DL,
53319                               Ops, Scatter->getMemOperand(),
53320                               Scatter->getIndexType(),
53321                               Scatter->isTruncatingStore());
53322 }
53323 
combineGatherScatter(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI)53324 static SDValue combineGatherScatter(SDNode *N, SelectionDAG &DAG,
53325                                     TargetLowering::DAGCombinerInfo &DCI) {
53326   SDLoc DL(N);
53327   auto *GorS = cast<MaskedGatherScatterSDNode>(N);
53328   SDValue Index = GorS->getIndex();
53329   SDValue Base = GorS->getBasePtr();
53330   SDValue Scale = GorS->getScale();
53331 
53332   if (DCI.isBeforeLegalize()) {
53333     unsigned IndexWidth = Index.getScalarValueSizeInBits();
53334 
53335     // Shrink constant indices if they are larger than 32-bits.
53336     // Only do this before legalize types since v2i64 could become v2i32.
53337     // FIXME: We could check that the type is legal if we're after legalize
53338     // types, but then we would need to construct test cases where that happens.
53339     // FIXME: We could support more than just constant vectors, but we need to
53340     // careful with costing. A truncate that can be optimized out would be fine.
53341     // Otherwise we might only want to create a truncate if it avoids a split.
53342     if (auto *BV = dyn_cast<BuildVectorSDNode>(Index)) {
53343       if (BV->isConstant() && IndexWidth > 32 &&
53344           DAG.ComputeNumSignBits(Index) > (IndexWidth - 32)) {
53345         EVT NewVT = Index.getValueType().changeVectorElementType(MVT::i32);
53346         Index = DAG.getNode(ISD::TRUNCATE, DL, NewVT, Index);
53347         return rebuildGatherScatter(GorS, Index, Base, Scale, DAG);
53348       }
53349     }
53350 
53351     // Shrink any sign/zero extends from 32 or smaller to larger than 32 if
53352     // there are sufficient sign bits. Only do this before legalize types to
53353     // avoid creating illegal types in truncate.
53354     if ((Index.getOpcode() == ISD::SIGN_EXTEND ||
53355          Index.getOpcode() == ISD::ZERO_EXTEND) &&
53356         IndexWidth > 32 &&
53357         Index.getOperand(0).getScalarValueSizeInBits() <= 32 &&
53358         DAG.ComputeNumSignBits(Index) > (IndexWidth - 32)) {
53359       EVT NewVT = Index.getValueType().changeVectorElementType(MVT::i32);
53360       Index = DAG.getNode(ISD::TRUNCATE, DL, NewVT, Index);
53361       return rebuildGatherScatter(GorS, Index, Base, Scale, DAG);
53362     }
53363   }
53364 
53365   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
53366   EVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
53367   // Try to move splat constant adders from the index operand to the base
53368   // pointer operand. Taking care to multiply by the scale. We can only do
53369   // this when index element type is the same as the pointer type.
53370   // Otherwise we need to be sure the math doesn't wrap before the scale.
53371   if (Index.getOpcode() == ISD::ADD &&
53372       Index.getValueType().getVectorElementType() == PtrVT &&
53373       isa<ConstantSDNode>(Scale)) {
53374     uint64_t ScaleAmt = Scale->getAsZExtVal();
53375     if (auto *BV = dyn_cast<BuildVectorSDNode>(Index.getOperand(1))) {
53376       BitVector UndefElts;
53377       if (ConstantSDNode *C = BV->getConstantSplatNode(&UndefElts)) {
53378         // FIXME: Allow non-constant?
53379         if (UndefElts.none()) {
53380           // Apply the scale.
53381           APInt Adder = C->getAPIntValue() * ScaleAmt;
53382           // Add it to the existing base.
53383           Base = DAG.getNode(ISD::ADD, DL, PtrVT, Base,
53384                              DAG.getConstant(Adder, DL, PtrVT));
53385           Index = Index.getOperand(0);
53386           return rebuildGatherScatter(GorS, Index, Base, Scale, DAG);
53387         }
53388       }
53389 
53390       // It's also possible base is just a constant. In that case, just
53391       // replace it with 0 and move the displacement into the index.
53392       if (BV->isConstant() && isa<ConstantSDNode>(Base) &&
53393           isOneConstant(Scale)) {
53394         SDValue Splat = DAG.getSplatBuildVector(Index.getValueType(), DL, Base);
53395         // Combine the constant build_vector and the constant base.
53396         Splat = DAG.getNode(ISD::ADD, DL, Index.getValueType(),
53397                             Index.getOperand(1), Splat);
53398         // Add to the LHS of the original Index add.
53399         Index = DAG.getNode(ISD::ADD, DL, Index.getValueType(),
53400                             Index.getOperand(0), Splat);
53401         Base = DAG.getConstant(0, DL, Base.getValueType());
53402         return rebuildGatherScatter(GorS, Index, Base, Scale, DAG);
53403       }
53404     }
53405   }
53406 
53407   if (DCI.isBeforeLegalizeOps()) {
53408     unsigned IndexWidth = Index.getScalarValueSizeInBits();
53409 
53410     // Make sure the index is either i32 or i64
53411     if (IndexWidth != 32 && IndexWidth != 64) {
53412       MVT EltVT = IndexWidth > 32 ? MVT::i64 : MVT::i32;
53413       EVT IndexVT = Index.getValueType().changeVectorElementType(EltVT);
53414       Index = DAG.getSExtOrTrunc(Index, DL, IndexVT);
53415       return rebuildGatherScatter(GorS, Index, Base, Scale, DAG);
53416     }
53417   }
53418 
53419   // With vector masks we only demand the upper bit of the mask.
53420   SDValue Mask = GorS->getMask();
53421   if (Mask.getScalarValueSizeInBits() != 1) {
53422     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
53423     APInt DemandedMask(APInt::getSignMask(Mask.getScalarValueSizeInBits()));
53424     if (TLI.SimplifyDemandedBits(Mask, DemandedMask, DCI)) {
53425       if (N->getOpcode() != ISD::DELETED_NODE)
53426         DCI.AddToWorklist(N);
53427       return SDValue(N, 0);
53428     }
53429   }
53430 
53431   return SDValue();
53432 }
53433 
53434 // Optimize  RES = X86ISD::SETCC CONDCODE, EFLAG_INPUT
combineX86SetCC(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)53435 static SDValue combineX86SetCC(SDNode *N, SelectionDAG &DAG,
53436                                const X86Subtarget &Subtarget) {
53437   SDLoc DL(N);
53438   X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(0));
53439   SDValue EFLAGS = N->getOperand(1);
53440 
53441   // Try to simplify the EFLAGS and condition code operands.
53442   if (SDValue Flags = combineSetCCEFLAGS(EFLAGS, CC, DAG, Subtarget))
53443     return getSETCC(CC, Flags, DL, DAG);
53444 
53445   return SDValue();
53446 }
53447 
53448 /// Optimize branch condition evaluation.
combineBrCond(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)53449 static SDValue combineBrCond(SDNode *N, SelectionDAG &DAG,
53450                              const X86Subtarget &Subtarget) {
53451   SDLoc DL(N);
53452   SDValue EFLAGS = N->getOperand(3);
53453   X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(2));
53454 
53455   // Try to simplify the EFLAGS and condition code operands.
53456   // Make sure to not keep references to operands, as combineSetCCEFLAGS can
53457   // RAUW them under us.
53458   if (SDValue Flags = combineSetCCEFLAGS(EFLAGS, CC, DAG, Subtarget)) {
53459     SDValue Cond = DAG.getTargetConstant(CC, DL, MVT::i8);
53460     return DAG.getNode(X86ISD::BRCOND, DL, N->getVTList(), N->getOperand(0),
53461                        N->getOperand(1), Cond, Flags);
53462   }
53463 
53464   return SDValue();
53465 }
53466 
53467 // TODO: Could we move this to DAGCombine?
combineVectorCompareAndMaskUnaryOp(SDNode * N,SelectionDAG & DAG)53468 static SDValue combineVectorCompareAndMaskUnaryOp(SDNode *N,
53469                                                   SelectionDAG &DAG) {
53470   // Take advantage of vector comparisons (etc.) producing 0 or -1 in each lane
53471   // to optimize away operation when it's from a constant.
53472   //
53473   // The general transformation is:
53474   //    UNARYOP(AND(VECTOR_CMP(x,y), constant)) -->
53475   //       AND(VECTOR_CMP(x,y), constant2)
53476   //    constant2 = UNARYOP(constant)
53477 
53478   // Early exit if this isn't a vector operation, the operand of the
53479   // unary operation isn't a bitwise AND, or if the sizes of the operations
53480   // aren't the same.
53481   EVT VT = N->getValueType(0);
53482   bool IsStrict = N->isStrictFPOpcode();
53483   unsigned NumEltBits = VT.getScalarSizeInBits();
53484   SDValue Op0 = N->getOperand(IsStrict ? 1 : 0);
53485   if (!VT.isVector() || Op0.getOpcode() != ISD::AND ||
53486       DAG.ComputeNumSignBits(Op0.getOperand(0)) != NumEltBits ||
53487       VT.getSizeInBits() != Op0.getValueSizeInBits())
53488     return SDValue();
53489 
53490   // Now check that the other operand of the AND is a constant. We could
53491   // make the transformation for non-constant splats as well, but it's unclear
53492   // that would be a benefit as it would not eliminate any operations, just
53493   // perform one more step in scalar code before moving to the vector unit.
53494   if (auto *BV = dyn_cast<BuildVectorSDNode>(Op0.getOperand(1))) {
53495     // Bail out if the vector isn't a constant.
53496     if (!BV->isConstant())
53497       return SDValue();
53498 
53499     // Everything checks out. Build up the new and improved node.
53500     SDLoc DL(N);
53501     EVT IntVT = BV->getValueType(0);
53502     // Create a new constant of the appropriate type for the transformed
53503     // DAG.
53504     SDValue SourceConst;
53505     if (IsStrict)
53506       SourceConst = DAG.getNode(N->getOpcode(), DL, {VT, MVT::Other},
53507                                 {N->getOperand(0), SDValue(BV, 0)});
53508     else
53509       SourceConst = DAG.getNode(N->getOpcode(), DL, VT, SDValue(BV, 0));
53510     // The AND node needs bitcasts to/from an integer vector type around it.
53511     SDValue MaskConst = DAG.getBitcast(IntVT, SourceConst);
53512     SDValue NewAnd = DAG.getNode(ISD::AND, DL, IntVT, Op0->getOperand(0),
53513                                  MaskConst);
53514     SDValue Res = DAG.getBitcast(VT, NewAnd);
53515     if (IsStrict)
53516       return DAG.getMergeValues({Res, SourceConst.getValue(1)}, DL);
53517     return Res;
53518   }
53519 
53520   return SDValue();
53521 }
53522 
53523 /// If we are converting a value to floating-point, try to replace scalar
53524 /// truncate of an extracted vector element with a bitcast. This tries to keep
53525 /// the sequence on XMM registers rather than moving between vector and GPRs.
combineToFPTruncExtElt(SDNode * N,SelectionDAG & DAG)53526 static SDValue combineToFPTruncExtElt(SDNode *N, SelectionDAG &DAG) {
53527   // TODO: This is currently only used by combineSIntToFP, but it is generalized
53528   //       to allow being called by any similar cast opcode.
53529   // TODO: Consider merging this into lowering: vectorizeExtractedCast().
53530   SDValue Trunc = N->getOperand(0);
53531   if (!Trunc.hasOneUse() || Trunc.getOpcode() != ISD::TRUNCATE)
53532     return SDValue();
53533 
53534   SDValue ExtElt = Trunc.getOperand(0);
53535   if (!ExtElt.hasOneUse() || ExtElt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
53536       !isNullConstant(ExtElt.getOperand(1)))
53537     return SDValue();
53538 
53539   EVT TruncVT = Trunc.getValueType();
53540   EVT SrcVT = ExtElt.getValueType();
53541   unsigned DestWidth = TruncVT.getSizeInBits();
53542   unsigned SrcWidth = SrcVT.getSizeInBits();
53543   if (SrcWidth % DestWidth != 0)
53544     return SDValue();
53545 
53546   // inttofp (trunc (extelt X, 0)) --> inttofp (extelt (bitcast X), 0)
53547   EVT SrcVecVT = ExtElt.getOperand(0).getValueType();
53548   unsigned VecWidth = SrcVecVT.getSizeInBits();
53549   unsigned NumElts = VecWidth / DestWidth;
53550   EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), TruncVT, NumElts);
53551   SDValue BitcastVec = DAG.getBitcast(BitcastVT, ExtElt.getOperand(0));
53552   SDLoc DL(N);
53553   SDValue NewExtElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, TruncVT,
53554                                   BitcastVec, ExtElt.getOperand(1));
53555   return DAG.getNode(N->getOpcode(), DL, N->getValueType(0), NewExtElt);
53556 }
53557 
combineUIntToFP(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)53558 static SDValue combineUIntToFP(SDNode *N, SelectionDAG &DAG,
53559                                const X86Subtarget &Subtarget) {
53560   bool IsStrict = N->isStrictFPOpcode();
53561   SDValue Op0 = N->getOperand(IsStrict ? 1 : 0);
53562   EVT VT = N->getValueType(0);
53563   EVT InVT = Op0.getValueType();
53564 
53565   // Using i16 as an intermediate type is a bad idea, unless we have HW support
53566   // for it. Therefore for type sizes equal or smaller than 32 just go with i32.
53567   // if hasFP16 support:
53568   //   UINT_TO_FP(vXi1~15)  -> SINT_TO_FP(ZEXT(vXi1~15  to vXi16))
53569   //   UINT_TO_FP(vXi17~31) -> SINT_TO_FP(ZEXT(vXi17~31 to vXi32))
53570   // else
53571   //   UINT_TO_FP(vXi1~31) -> SINT_TO_FP(ZEXT(vXi1~31 to vXi32))
53572   // UINT_TO_FP(vXi33~63) -> SINT_TO_FP(ZEXT(vXi33~63 to vXi64))
53573   if (InVT.isVector() && VT.getVectorElementType() == MVT::f16) {
53574     unsigned ScalarSize = InVT.getScalarSizeInBits();
53575     if ((ScalarSize == 16 && Subtarget.hasFP16()) || ScalarSize == 32 ||
53576         ScalarSize >= 64)
53577       return SDValue();
53578     SDLoc dl(N);
53579     EVT DstVT =
53580         EVT::getVectorVT(*DAG.getContext(),
53581                          (Subtarget.hasFP16() && ScalarSize < 16) ? MVT::i16
53582                          : ScalarSize < 32                        ? MVT::i32
53583                                                                   : MVT::i64,
53584                          InVT.getVectorNumElements());
53585     SDValue P = DAG.getNode(ISD::ZERO_EXTEND, dl, DstVT, Op0);
53586     if (IsStrict)
53587       return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {VT, MVT::Other},
53588                          {N->getOperand(0), P});
53589     return DAG.getNode(ISD::SINT_TO_FP, dl, VT, P);
53590   }
53591 
53592   // UINT_TO_FP(vXi1) -> SINT_TO_FP(ZEXT(vXi1 to vXi32))
53593   // UINT_TO_FP(vXi8) -> SINT_TO_FP(ZEXT(vXi8 to vXi32))
53594   // UINT_TO_FP(vXi16) -> SINT_TO_FP(ZEXT(vXi16 to vXi32))
53595   if (InVT.isVector() && InVT.getScalarSizeInBits() < 32 &&
53596       VT.getScalarType() != MVT::f16) {
53597     SDLoc dl(N);
53598     EVT DstVT = InVT.changeVectorElementType(MVT::i32);
53599     SDValue P = DAG.getNode(ISD::ZERO_EXTEND, dl, DstVT, Op0);
53600 
53601     // UINT_TO_FP isn't legal without AVX512 so use SINT_TO_FP.
53602     if (IsStrict)
53603       return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {VT, MVT::Other},
53604                          {N->getOperand(0), P});
53605     return DAG.getNode(ISD::SINT_TO_FP, dl, VT, P);
53606   }
53607 
53608   // Since UINT_TO_FP is legal (it's marked custom), dag combiner won't
53609   // optimize it to a SINT_TO_FP when the sign bit is known zero. Perform
53610   // the optimization here.
53611   if (DAG.SignBitIsZero(Op0)) {
53612     if (IsStrict)
53613       return DAG.getNode(ISD::STRICT_SINT_TO_FP, SDLoc(N), {VT, MVT::Other},
53614                          {N->getOperand(0), Op0});
53615     return DAG.getNode(ISD::SINT_TO_FP, SDLoc(N), VT, Op0);
53616   }
53617 
53618   return SDValue();
53619 }
53620 
combineSIntToFP(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)53621 static SDValue combineSIntToFP(SDNode *N, SelectionDAG &DAG,
53622                                TargetLowering::DAGCombinerInfo &DCI,
53623                                const X86Subtarget &Subtarget) {
53624   // First try to optimize away the conversion entirely when it's
53625   // conditionally from a constant. Vectors only.
53626   bool IsStrict = N->isStrictFPOpcode();
53627   if (SDValue Res = combineVectorCompareAndMaskUnaryOp(N, DAG))
53628     return Res;
53629 
53630   // Now move on to more general possibilities.
53631   SDValue Op0 = N->getOperand(IsStrict ? 1 : 0);
53632   EVT VT = N->getValueType(0);
53633   EVT InVT = Op0.getValueType();
53634 
53635   // Using i16 as an intermediate type is a bad idea, unless we have HW support
53636   // for it. Therefore for type sizes equal or smaller than 32 just go with i32.
53637   // if hasFP16 support:
53638   //   SINT_TO_FP(vXi1~15)  -> SINT_TO_FP(SEXT(vXi1~15  to vXi16))
53639   //   SINT_TO_FP(vXi17~31) -> SINT_TO_FP(SEXT(vXi17~31 to vXi32))
53640   // else
53641   //   SINT_TO_FP(vXi1~31) -> SINT_TO_FP(ZEXT(vXi1~31 to vXi32))
53642   // SINT_TO_FP(vXi33~63) -> SINT_TO_FP(SEXT(vXi33~63 to vXi64))
53643   if (InVT.isVector() && VT.getVectorElementType() == MVT::f16) {
53644     unsigned ScalarSize = InVT.getScalarSizeInBits();
53645     if ((ScalarSize == 16 && Subtarget.hasFP16()) || ScalarSize == 32 ||
53646         ScalarSize >= 64)
53647       return SDValue();
53648     SDLoc dl(N);
53649     EVT DstVT =
53650         EVT::getVectorVT(*DAG.getContext(),
53651                          (Subtarget.hasFP16() && ScalarSize < 16) ? MVT::i16
53652                          : ScalarSize < 32                        ? MVT::i32
53653                                                                   : MVT::i64,
53654                          InVT.getVectorNumElements());
53655     SDValue P = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Op0);
53656     if (IsStrict)
53657       return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {VT, MVT::Other},
53658                          {N->getOperand(0), P});
53659     return DAG.getNode(ISD::SINT_TO_FP, dl, VT, P);
53660   }
53661 
53662   // SINT_TO_FP(vXi1) -> SINT_TO_FP(SEXT(vXi1 to vXi32))
53663   // SINT_TO_FP(vXi8) -> SINT_TO_FP(SEXT(vXi8 to vXi32))
53664   // SINT_TO_FP(vXi16) -> SINT_TO_FP(SEXT(vXi16 to vXi32))
53665   if (InVT.isVector() && InVT.getScalarSizeInBits() < 32 &&
53666       VT.getScalarType() != MVT::f16) {
53667     SDLoc dl(N);
53668     EVT DstVT = InVT.changeVectorElementType(MVT::i32);
53669     SDValue P = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Op0);
53670     if (IsStrict)
53671       return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {VT, MVT::Other},
53672                          {N->getOperand(0), P});
53673     return DAG.getNode(ISD::SINT_TO_FP, dl, VT, P);
53674   }
53675 
53676   // Without AVX512DQ we only support i64 to float scalar conversion. For both
53677   // vectors and scalars, see if we know that the upper bits are all the sign
53678   // bit, in which case we can truncate the input to i32 and convert from that.
53679   if (InVT.getScalarSizeInBits() > 32 && !Subtarget.hasDQI()) {
53680     unsigned BitWidth = InVT.getScalarSizeInBits();
53681     unsigned NumSignBits = DAG.ComputeNumSignBits(Op0);
53682     if (NumSignBits >= (BitWidth - 31)) {
53683       EVT TruncVT = MVT::i32;
53684       if (InVT.isVector())
53685         TruncVT = InVT.changeVectorElementType(TruncVT);
53686       SDLoc dl(N);
53687       if (DCI.isBeforeLegalize() || TruncVT != MVT::v2i32) {
53688         SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, TruncVT, Op0);
53689         if (IsStrict)
53690           return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {VT, MVT::Other},
53691                              {N->getOperand(0), Trunc});
53692         return DAG.getNode(ISD::SINT_TO_FP, dl, VT, Trunc);
53693       }
53694       // If we're after legalize and the type is v2i32 we need to shuffle and
53695       // use CVTSI2P.
53696       assert(InVT == MVT::v2i64 && "Unexpected VT!");
53697       SDValue Cast = DAG.getBitcast(MVT::v4i32, Op0);
53698       SDValue Shuf = DAG.getVectorShuffle(MVT::v4i32, dl, Cast, Cast,
53699                                           { 0, 2, -1, -1 });
53700       if (IsStrict)
53701         return DAG.getNode(X86ISD::STRICT_CVTSI2P, dl, {VT, MVT::Other},
53702                            {N->getOperand(0), Shuf});
53703       return DAG.getNode(X86ISD::CVTSI2P, dl, VT, Shuf);
53704     }
53705   }
53706 
53707   // Transform (SINT_TO_FP (i64 ...)) into an x87 operation if we have
53708   // a 32-bit target where SSE doesn't support i64->FP operations.
53709   if (!Subtarget.useSoftFloat() && Subtarget.hasX87() &&
53710       Op0.getOpcode() == ISD::LOAD) {
53711     LoadSDNode *Ld = cast<LoadSDNode>(Op0.getNode());
53712 
53713     // This transformation is not supported if the result type is f16 or f128.
53714     if (VT == MVT::f16 || VT == MVT::f128)
53715       return SDValue();
53716 
53717     // If we have AVX512DQ we can use packed conversion instructions unless
53718     // the VT is f80.
53719     if (Subtarget.hasDQI() && VT != MVT::f80)
53720       return SDValue();
53721 
53722     if (Ld->isSimple() && !VT.isVector() && ISD::isNormalLoad(Op0.getNode()) &&
53723         Op0.hasOneUse() && !Subtarget.is64Bit() && InVT == MVT::i64) {
53724       std::pair<SDValue, SDValue> Tmp =
53725           Subtarget.getTargetLowering()->BuildFILD(
53726               VT, InVT, SDLoc(N), Ld->getChain(), Ld->getBasePtr(),
53727               Ld->getPointerInfo(), Ld->getOriginalAlign(), DAG);
53728       DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), Tmp.second);
53729       return Tmp.first;
53730     }
53731   }
53732 
53733   if (IsStrict)
53734     return SDValue();
53735 
53736   if (SDValue V = combineToFPTruncExtElt(N, DAG))
53737     return V;
53738 
53739   return SDValue();
53740 }
53741 
needCarryOrOverflowFlag(SDValue Flags)53742 static bool needCarryOrOverflowFlag(SDValue Flags) {
53743   assert(Flags.getValueType() == MVT::i32 && "Unexpected VT!");
53744 
53745   for (const SDNode *User : Flags->uses()) {
53746     X86::CondCode CC;
53747     switch (User->getOpcode()) {
53748     default:
53749       // Be conservative.
53750       return true;
53751     case X86ISD::SETCC:
53752     case X86ISD::SETCC_CARRY:
53753       CC = (X86::CondCode)User->getConstantOperandVal(0);
53754       break;
53755     case X86ISD::BRCOND:
53756     case X86ISD::CMOV:
53757       CC = (X86::CondCode)User->getConstantOperandVal(2);
53758       break;
53759     }
53760 
53761     switch (CC) {
53762     default: break;
53763     case X86::COND_A: case X86::COND_AE:
53764     case X86::COND_B: case X86::COND_BE:
53765     case X86::COND_O: case X86::COND_NO:
53766     case X86::COND_G: case X86::COND_GE:
53767     case X86::COND_L: case X86::COND_LE:
53768       return true;
53769     }
53770   }
53771 
53772   return false;
53773 }
53774 
onlyZeroFlagUsed(SDValue Flags)53775 static bool onlyZeroFlagUsed(SDValue Flags) {
53776   assert(Flags.getValueType() == MVT::i32 && "Unexpected VT!");
53777 
53778   for (const SDNode *User : Flags->uses()) {
53779     unsigned CCOpNo;
53780     switch (User->getOpcode()) {
53781     default:
53782       // Be conservative.
53783       return false;
53784     case X86ISD::SETCC:
53785     case X86ISD::SETCC_CARRY:
53786       CCOpNo = 0;
53787       break;
53788     case X86ISD::BRCOND:
53789     case X86ISD::CMOV:
53790       CCOpNo = 2;
53791       break;
53792     }
53793 
53794     X86::CondCode CC = (X86::CondCode)User->getConstantOperandVal(CCOpNo);
53795     if (CC != X86::COND_E && CC != X86::COND_NE)
53796       return false;
53797   }
53798 
53799   return true;
53800 }
53801 
combineCMP(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)53802 static SDValue combineCMP(SDNode *N, SelectionDAG &DAG,
53803                           const X86Subtarget &Subtarget) {
53804   // Only handle test patterns.
53805   if (!isNullConstant(N->getOperand(1)))
53806     return SDValue();
53807 
53808   // If we have a CMP of a truncated binop, see if we can make a smaller binop
53809   // and use its flags directly.
53810   // TODO: Maybe we should try promoting compares that only use the zero flag
53811   // first if we can prove the upper bits with computeKnownBits?
53812   SDLoc dl(N);
53813   SDValue Op = N->getOperand(0);
53814   EVT VT = Op.getValueType();
53815   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
53816 
53817   // If we have a constant logical shift that's only used in a comparison
53818   // against zero turn it into an equivalent AND. This allows turning it into
53819   // a TEST instruction later.
53820   if ((Op.getOpcode() == ISD::SRL || Op.getOpcode() == ISD::SHL) &&
53821       Op.hasOneUse() && isa<ConstantSDNode>(Op.getOperand(1)) &&
53822       onlyZeroFlagUsed(SDValue(N, 0))) {
53823     unsigned BitWidth = VT.getSizeInBits();
53824     const APInt &ShAmt = Op.getConstantOperandAPInt(1);
53825     if (ShAmt.ult(BitWidth)) { // Avoid undefined shifts.
53826       unsigned MaskBits = BitWidth - ShAmt.getZExtValue();
53827       APInt Mask = Op.getOpcode() == ISD::SRL
53828                        ? APInt::getHighBitsSet(BitWidth, MaskBits)
53829                        : APInt::getLowBitsSet(BitWidth, MaskBits);
53830       if (Mask.isSignedIntN(32)) {
53831         Op = DAG.getNode(ISD::AND, dl, VT, Op.getOperand(0),
53832                          DAG.getConstant(Mask, dl, VT));
53833         return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
53834                            DAG.getConstant(0, dl, VT));
53835       }
53836     }
53837   }
53838 
53839   // If we're extracting from a avx512 bool vector and comparing against zero,
53840   // then try to just bitcast the vector to an integer to use TEST/BT directly.
53841   // (and (extract_elt (kshiftr vXi1, C), 0), 1) -> (and (bc vXi1), 1<<C)
53842   if (Op.getOpcode() == ISD::AND && isOneConstant(Op.getOperand(1)) &&
53843       Op.hasOneUse() && onlyZeroFlagUsed(SDValue(N, 0))) {
53844     SDValue Src = Op.getOperand(0);
53845     if (Src.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
53846         isNullConstant(Src.getOperand(1)) &&
53847         Src.getOperand(0).getValueType().getScalarType() == MVT::i1) {
53848       SDValue BoolVec = Src.getOperand(0);
53849       unsigned ShAmt = 0;
53850       if (BoolVec.getOpcode() == X86ISD::KSHIFTR) {
53851         ShAmt = BoolVec.getConstantOperandVal(1);
53852         BoolVec = BoolVec.getOperand(0);
53853       }
53854       BoolVec = widenMaskVector(BoolVec, false, Subtarget, DAG, dl);
53855       EVT VecVT = BoolVec.getValueType();
53856       unsigned BitWidth = VecVT.getVectorNumElements();
53857       EVT BCVT = EVT::getIntegerVT(*DAG.getContext(), BitWidth);
53858       if (TLI.isTypeLegal(VecVT) && TLI.isTypeLegal(BCVT)) {
53859         APInt Mask = APInt::getOneBitSet(BitWidth, ShAmt);
53860         Op = DAG.getBitcast(BCVT, BoolVec);
53861         Op = DAG.getNode(ISD::AND, dl, BCVT, Op,
53862                          DAG.getConstant(Mask, dl, BCVT));
53863         return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
53864                            DAG.getConstant(0, dl, BCVT));
53865       }
53866     }
53867   }
53868 
53869   // Peek through any zero-extend if we're only testing for a zero result.
53870   if (Op.getOpcode() == ISD::ZERO_EXTEND && onlyZeroFlagUsed(SDValue(N, 0))) {
53871     SDValue Src = Op.getOperand(0);
53872     EVT SrcVT = Src.getValueType();
53873     if (SrcVT.getScalarSizeInBits() >= 8 && TLI.isTypeLegal(SrcVT))
53874       return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Src,
53875                          DAG.getConstant(0, dl, SrcVT));
53876   }
53877 
53878   // Look for a truncate.
53879   if (Op.getOpcode() != ISD::TRUNCATE)
53880     return SDValue();
53881 
53882   SDValue Trunc = Op;
53883   Op = Op.getOperand(0);
53884 
53885   // See if we can compare with zero against the truncation source,
53886   // which should help using the Z flag from many ops. Only do this for
53887   // i32 truncated op to prevent partial-reg compares of promoted ops.
53888   EVT OpVT = Op.getValueType();
53889   APInt UpperBits =
53890       APInt::getBitsSetFrom(OpVT.getSizeInBits(), VT.getSizeInBits());
53891   if (OpVT == MVT::i32 && DAG.MaskedValueIsZero(Op, UpperBits) &&
53892       onlyZeroFlagUsed(SDValue(N, 0))) {
53893     return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
53894                        DAG.getConstant(0, dl, OpVT));
53895   }
53896 
53897   // After this the truncate and arithmetic op must have a single use.
53898   if (!Trunc.hasOneUse() || !Op.hasOneUse())
53899       return SDValue();
53900 
53901   unsigned NewOpc;
53902   switch (Op.getOpcode()) {
53903   default: return SDValue();
53904   case ISD::AND:
53905     // Skip and with constant. We have special handling for and with immediate
53906     // during isel to generate test instructions.
53907     if (isa<ConstantSDNode>(Op.getOperand(1)))
53908       return SDValue();
53909     NewOpc = X86ISD::AND;
53910     break;
53911   case ISD::OR:  NewOpc = X86ISD::OR;  break;
53912   case ISD::XOR: NewOpc = X86ISD::XOR; break;
53913   case ISD::ADD:
53914     // If the carry or overflow flag is used, we can't truncate.
53915     if (needCarryOrOverflowFlag(SDValue(N, 0)))
53916       return SDValue();
53917     NewOpc = X86ISD::ADD;
53918     break;
53919   case ISD::SUB:
53920     // If the carry or overflow flag is used, we can't truncate.
53921     if (needCarryOrOverflowFlag(SDValue(N, 0)))
53922       return SDValue();
53923     NewOpc = X86ISD::SUB;
53924     break;
53925   }
53926 
53927   // We found an op we can narrow. Truncate its inputs.
53928   SDValue Op0 = DAG.getNode(ISD::TRUNCATE, dl, VT, Op.getOperand(0));
53929   SDValue Op1 = DAG.getNode(ISD::TRUNCATE, dl, VT, Op.getOperand(1));
53930 
53931   // Use a X86 specific opcode to avoid DAG combine messing with it.
53932   SDVTList VTs = DAG.getVTList(VT, MVT::i32);
53933   Op = DAG.getNode(NewOpc, dl, VTs, Op0, Op1);
53934 
53935   // For AND, keep a CMP so that we can match the test pattern.
53936   if (NewOpc == X86ISD::AND)
53937     return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
53938                        DAG.getConstant(0, dl, VT));
53939 
53940   // Return the flags.
53941   return Op.getValue(1);
53942 }
53943 
combineX86AddSub(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI)53944 static SDValue combineX86AddSub(SDNode *N, SelectionDAG &DAG,
53945                                 TargetLowering::DAGCombinerInfo &DCI) {
53946   assert((X86ISD::ADD == N->getOpcode() || X86ISD::SUB == N->getOpcode()) &&
53947          "Expected X86ISD::ADD or X86ISD::SUB");
53948 
53949   SDLoc DL(N);
53950   SDValue LHS = N->getOperand(0);
53951   SDValue RHS = N->getOperand(1);
53952   MVT VT = LHS.getSimpleValueType();
53953   bool IsSub = X86ISD::SUB == N->getOpcode();
53954   unsigned GenericOpc = IsSub ? ISD::SUB : ISD::ADD;
53955 
53956   // If we don't use the flag result, simplify back to a generic ADD/SUB.
53957   if (!N->hasAnyUseOfValue(1)) {
53958     SDValue Res = DAG.getNode(GenericOpc, DL, VT, LHS, RHS);
53959     return DAG.getMergeValues({Res, DAG.getConstant(0, DL, MVT::i32)}, DL);
53960   }
53961 
53962   // Fold any similar generic ADD/SUB opcodes to reuse this node.
53963   auto MatchGeneric = [&](SDValue N0, SDValue N1, bool Negate) {
53964     SDValue Ops[] = {N0, N1};
53965     SDVTList VTs = DAG.getVTList(N->getValueType(0));
53966     if (SDNode *GenericAddSub = DAG.getNodeIfExists(GenericOpc, VTs, Ops)) {
53967       SDValue Op(N, 0);
53968       if (Negate)
53969         Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op);
53970       DCI.CombineTo(GenericAddSub, Op);
53971     }
53972   };
53973   MatchGeneric(LHS, RHS, false);
53974   MatchGeneric(RHS, LHS, X86ISD::SUB == N->getOpcode());
53975 
53976   // TODO: Can we drop the ZeroSecondOpOnly limit? This is to guarantee that the
53977   // EFLAGS result doesn't change.
53978   return combineAddOrSubToADCOrSBB(IsSub, DL, VT, LHS, RHS, DAG,
53979                                    /*ZeroSecondOpOnly*/ true);
53980 }
53981 
combineSBB(SDNode * N,SelectionDAG & DAG)53982 static SDValue combineSBB(SDNode *N, SelectionDAG &DAG) {
53983   SDValue LHS = N->getOperand(0);
53984   SDValue RHS = N->getOperand(1);
53985   SDValue BorrowIn = N->getOperand(2);
53986 
53987   if (SDValue Flags = combineCarryThroughADD(BorrowIn, DAG)) {
53988     MVT VT = N->getSimpleValueType(0);
53989     SDVTList VTs = DAG.getVTList(VT, MVT::i32);
53990     return DAG.getNode(X86ISD::SBB, SDLoc(N), VTs, LHS, RHS, Flags);
53991   }
53992 
53993   // Fold SBB(SUB(X,Y),0,Carry) -> SBB(X,Y,Carry)
53994   // iff the flag result is dead.
53995   if (LHS.getOpcode() == ISD::SUB && isNullConstant(RHS) &&
53996       !N->hasAnyUseOfValue(1))
53997     return DAG.getNode(X86ISD::SBB, SDLoc(N), N->getVTList(), LHS.getOperand(0),
53998                        LHS.getOperand(1), BorrowIn);
53999 
54000   return SDValue();
54001 }
54002 
54003 // Optimize RES, EFLAGS = X86ISD::ADC LHS, RHS, EFLAGS
combineADC(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI)54004 static SDValue combineADC(SDNode *N, SelectionDAG &DAG,
54005                           TargetLowering::DAGCombinerInfo &DCI) {
54006   SDValue LHS = N->getOperand(0);
54007   SDValue RHS = N->getOperand(1);
54008   SDValue CarryIn = N->getOperand(2);
54009   auto *LHSC = dyn_cast<ConstantSDNode>(LHS);
54010   auto *RHSC = dyn_cast<ConstantSDNode>(RHS);
54011 
54012   // Canonicalize constant to RHS.
54013   if (LHSC && !RHSC)
54014     return DAG.getNode(X86ISD::ADC, SDLoc(N), N->getVTList(), RHS, LHS,
54015                        CarryIn);
54016 
54017   // If the LHS and RHS of the ADC node are zero, then it can't overflow and
54018   // the result is either zero or one (depending on the input carry bit).
54019   // Strength reduce this down to a "set on carry" aka SETCC_CARRY&1.
54020   if (LHSC && RHSC && LHSC->isZero() && RHSC->isZero() &&
54021       // We don't have a good way to replace an EFLAGS use, so only do this when
54022       // dead right now.
54023       SDValue(N, 1).use_empty()) {
54024     SDLoc DL(N);
54025     EVT VT = N->getValueType(0);
54026     SDValue CarryOut = DAG.getConstant(0, DL, N->getValueType(1));
54027     SDValue Res1 = DAG.getNode(
54028         ISD::AND, DL, VT,
54029         DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
54030                     DAG.getTargetConstant(X86::COND_B, DL, MVT::i8), CarryIn),
54031         DAG.getConstant(1, DL, VT));
54032     return DCI.CombineTo(N, Res1, CarryOut);
54033   }
54034 
54035   // Fold ADC(C1,C2,Carry) -> ADC(0,C1+C2,Carry)
54036   // iff the flag result is dead.
54037   // TODO: Allow flag result if C1+C2 doesn't signed/unsigned overflow.
54038   if (LHSC && RHSC && !LHSC->isZero() && !N->hasAnyUseOfValue(1)) {
54039     SDLoc DL(N);
54040     APInt Sum = LHSC->getAPIntValue() + RHSC->getAPIntValue();
54041     return DAG.getNode(X86ISD::ADC, DL, N->getVTList(),
54042                        DAG.getConstant(0, DL, LHS.getValueType()),
54043                        DAG.getConstant(Sum, DL, LHS.getValueType()), CarryIn);
54044   }
54045 
54046   if (SDValue Flags = combineCarryThroughADD(CarryIn, DAG)) {
54047     MVT VT = N->getSimpleValueType(0);
54048     SDVTList VTs = DAG.getVTList(VT, MVT::i32);
54049     return DAG.getNode(X86ISD::ADC, SDLoc(N), VTs, LHS, RHS, Flags);
54050   }
54051 
54052   // Fold ADC(ADD(X,Y),0,Carry) -> ADC(X,Y,Carry)
54053   // iff the flag result is dead.
54054   if (LHS.getOpcode() == ISD::ADD && RHSC && RHSC->isZero() &&
54055       !N->hasAnyUseOfValue(1))
54056     return DAG.getNode(X86ISD::ADC, SDLoc(N), N->getVTList(), LHS.getOperand(0),
54057                        LHS.getOperand(1), CarryIn);
54058 
54059   return SDValue();
54060 }
54061 
matchPMADDWD(SelectionDAG & DAG,SDValue Op0,SDValue Op1,const SDLoc & DL,EVT VT,const X86Subtarget & Subtarget)54062 static SDValue matchPMADDWD(SelectionDAG &DAG, SDValue Op0, SDValue Op1,
54063                             const SDLoc &DL, EVT VT,
54064                             const X86Subtarget &Subtarget) {
54065   // Example of pattern we try to detect:
54066   // t := (v8i32 mul (sext (v8i16 x0), (sext (v8i16 x1))))
54067   //(add (build_vector (extract_elt t, 0),
54068   //                   (extract_elt t, 2),
54069   //                   (extract_elt t, 4),
54070   //                   (extract_elt t, 6)),
54071   //     (build_vector (extract_elt t, 1),
54072   //                   (extract_elt t, 3),
54073   //                   (extract_elt t, 5),
54074   //                   (extract_elt t, 7)))
54075 
54076   if (!Subtarget.hasSSE2())
54077     return SDValue();
54078 
54079   if (Op0.getOpcode() != ISD::BUILD_VECTOR ||
54080       Op1.getOpcode() != ISD::BUILD_VECTOR)
54081     return SDValue();
54082 
54083   if (!VT.isVector() || VT.getVectorElementType() != MVT::i32 ||
54084       VT.getVectorNumElements() < 4 ||
54085       !isPowerOf2_32(VT.getVectorNumElements()))
54086     return SDValue();
54087 
54088   // Check if one of Op0,Op1 is of the form:
54089   // (build_vector (extract_elt Mul, 0),
54090   //               (extract_elt Mul, 2),
54091   //               (extract_elt Mul, 4),
54092   //                   ...
54093   // the other is of the form:
54094   // (build_vector (extract_elt Mul, 1),
54095   //               (extract_elt Mul, 3),
54096   //               (extract_elt Mul, 5),
54097   //                   ...
54098   // and identify Mul.
54099   SDValue Mul;
54100   for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; i += 2) {
54101     SDValue Op0L = Op0->getOperand(i), Op1L = Op1->getOperand(i),
54102             Op0H = Op0->getOperand(i + 1), Op1H = Op1->getOperand(i + 1);
54103     // TODO: Be more tolerant to undefs.
54104     if (Op0L.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
54105         Op1L.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
54106         Op0H.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
54107         Op1H.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
54108       return SDValue();
54109     auto *Const0L = dyn_cast<ConstantSDNode>(Op0L->getOperand(1));
54110     auto *Const1L = dyn_cast<ConstantSDNode>(Op1L->getOperand(1));
54111     auto *Const0H = dyn_cast<ConstantSDNode>(Op0H->getOperand(1));
54112     auto *Const1H = dyn_cast<ConstantSDNode>(Op1H->getOperand(1));
54113     if (!Const0L || !Const1L || !Const0H || !Const1H)
54114       return SDValue();
54115     unsigned Idx0L = Const0L->getZExtValue(), Idx1L = Const1L->getZExtValue(),
54116              Idx0H = Const0H->getZExtValue(), Idx1H = Const1H->getZExtValue();
54117     // Commutativity of mul allows factors of a product to reorder.
54118     if (Idx0L > Idx1L)
54119       std::swap(Idx0L, Idx1L);
54120     if (Idx0H > Idx1H)
54121       std::swap(Idx0H, Idx1H);
54122     // Commutativity of add allows pairs of factors to reorder.
54123     if (Idx0L > Idx0H) {
54124       std::swap(Idx0L, Idx0H);
54125       std::swap(Idx1L, Idx1H);
54126     }
54127     if (Idx0L != 2 * i || Idx1L != 2 * i + 1 || Idx0H != 2 * i + 2 ||
54128         Idx1H != 2 * i + 3)
54129       return SDValue();
54130     if (!Mul) {
54131       // First time an extract_elt's source vector is visited. Must be a MUL
54132       // with 2X number of vector elements than the BUILD_VECTOR.
54133       // Both extracts must be from same MUL.
54134       Mul = Op0L->getOperand(0);
54135       if (Mul->getOpcode() != ISD::MUL ||
54136           Mul.getValueType().getVectorNumElements() != 2 * e)
54137         return SDValue();
54138     }
54139     // Check that the extract is from the same MUL previously seen.
54140     if (Mul != Op0L->getOperand(0) || Mul != Op1L->getOperand(0) ||
54141         Mul != Op0H->getOperand(0) || Mul != Op1H->getOperand(0))
54142       return SDValue();
54143   }
54144 
54145   // Check if the Mul source can be safely shrunk.
54146   ShrinkMode Mode;
54147   if (!canReduceVMulWidth(Mul.getNode(), DAG, Mode) ||
54148       Mode == ShrinkMode::MULU16)
54149     return SDValue();
54150 
54151   EVT TruncVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
54152                                  VT.getVectorNumElements() * 2);
54153   SDValue N0 = DAG.getNode(ISD::TRUNCATE, DL, TruncVT, Mul.getOperand(0));
54154   SDValue N1 = DAG.getNode(ISD::TRUNCATE, DL, TruncVT, Mul.getOperand(1));
54155 
54156   auto PMADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
54157                          ArrayRef<SDValue> Ops) {
54158     EVT InVT = Ops[0].getValueType();
54159     assert(InVT == Ops[1].getValueType() && "Operands' types mismatch");
54160     EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
54161                                  InVT.getVectorNumElements() / 2);
54162     return DAG.getNode(X86ISD::VPMADDWD, DL, ResVT, Ops[0], Ops[1]);
54163   };
54164   return SplitOpsAndApply(DAG, Subtarget, DL, VT, { N0, N1 }, PMADDBuilder);
54165 }
54166 
54167 // Attempt to turn this pattern into PMADDWD.
54168 // (add (mul (sext (build_vector)), (sext (build_vector))),
54169 //      (mul (sext (build_vector)), (sext (build_vector)))
matchPMADDWD_2(SelectionDAG & DAG,SDValue N0,SDValue N1,const SDLoc & DL,EVT VT,const X86Subtarget & Subtarget)54170 static SDValue matchPMADDWD_2(SelectionDAG &DAG, SDValue N0, SDValue N1,
54171                               const SDLoc &DL, EVT VT,
54172                               const X86Subtarget &Subtarget) {
54173   if (!Subtarget.hasSSE2())
54174     return SDValue();
54175 
54176   if (N0.getOpcode() != ISD::MUL || N1.getOpcode() != ISD::MUL)
54177     return SDValue();
54178 
54179   if (!VT.isVector() || VT.getVectorElementType() != MVT::i32 ||
54180       VT.getVectorNumElements() < 4 ||
54181       !isPowerOf2_32(VT.getVectorNumElements()))
54182     return SDValue();
54183 
54184   SDValue N00 = N0.getOperand(0);
54185   SDValue N01 = N0.getOperand(1);
54186   SDValue N10 = N1.getOperand(0);
54187   SDValue N11 = N1.getOperand(1);
54188 
54189   // All inputs need to be sign extends.
54190   // TODO: Support ZERO_EXTEND from known positive?
54191   if (N00.getOpcode() != ISD::SIGN_EXTEND ||
54192       N01.getOpcode() != ISD::SIGN_EXTEND ||
54193       N10.getOpcode() != ISD::SIGN_EXTEND ||
54194       N11.getOpcode() != ISD::SIGN_EXTEND)
54195     return SDValue();
54196 
54197   // Peek through the extends.
54198   N00 = N00.getOperand(0);
54199   N01 = N01.getOperand(0);
54200   N10 = N10.getOperand(0);
54201   N11 = N11.getOperand(0);
54202 
54203   // Must be extending from vXi16.
54204   EVT InVT = N00.getValueType();
54205   if (InVT.getVectorElementType() != MVT::i16 || N01.getValueType() != InVT ||
54206       N10.getValueType() != InVT || N11.getValueType() != InVT)
54207     return SDValue();
54208 
54209   // All inputs should be build_vectors.
54210   if (N00.getOpcode() != ISD::BUILD_VECTOR ||
54211       N01.getOpcode() != ISD::BUILD_VECTOR ||
54212       N10.getOpcode() != ISD::BUILD_VECTOR ||
54213       N11.getOpcode() != ISD::BUILD_VECTOR)
54214     return SDValue();
54215 
54216   // For each element, we need to ensure we have an odd element from one vector
54217   // multiplied by the odd element of another vector and the even element from
54218   // one of the same vectors being multiplied by the even element from the
54219   // other vector. So we need to make sure for each element i, this operator
54220   // is being performed:
54221   //  A[2 * i] * B[2 * i] + A[2 * i + 1] * B[2 * i + 1]
54222   SDValue In0, In1;
54223   for (unsigned i = 0; i != N00.getNumOperands(); ++i) {
54224     SDValue N00Elt = N00.getOperand(i);
54225     SDValue N01Elt = N01.getOperand(i);
54226     SDValue N10Elt = N10.getOperand(i);
54227     SDValue N11Elt = N11.getOperand(i);
54228     // TODO: Be more tolerant to undefs.
54229     if (N00Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
54230         N01Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
54231         N10Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
54232         N11Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
54233       return SDValue();
54234     auto *ConstN00Elt = dyn_cast<ConstantSDNode>(N00Elt.getOperand(1));
54235     auto *ConstN01Elt = dyn_cast<ConstantSDNode>(N01Elt.getOperand(1));
54236     auto *ConstN10Elt = dyn_cast<ConstantSDNode>(N10Elt.getOperand(1));
54237     auto *ConstN11Elt = dyn_cast<ConstantSDNode>(N11Elt.getOperand(1));
54238     if (!ConstN00Elt || !ConstN01Elt || !ConstN10Elt || !ConstN11Elt)
54239       return SDValue();
54240     unsigned IdxN00 = ConstN00Elt->getZExtValue();
54241     unsigned IdxN01 = ConstN01Elt->getZExtValue();
54242     unsigned IdxN10 = ConstN10Elt->getZExtValue();
54243     unsigned IdxN11 = ConstN11Elt->getZExtValue();
54244     // Add is commutative so indices can be reordered.
54245     if (IdxN00 > IdxN10) {
54246       std::swap(IdxN00, IdxN10);
54247       std::swap(IdxN01, IdxN11);
54248     }
54249     // N0 indices be the even element. N1 indices must be the next odd element.
54250     if (IdxN00 != 2 * i || IdxN10 != 2 * i + 1 ||
54251         IdxN01 != 2 * i || IdxN11 != 2 * i + 1)
54252       return SDValue();
54253     SDValue N00In = N00Elt.getOperand(0);
54254     SDValue N01In = N01Elt.getOperand(0);
54255     SDValue N10In = N10Elt.getOperand(0);
54256     SDValue N11In = N11Elt.getOperand(0);
54257 
54258     // First time we find an input capture it.
54259     if (!In0) {
54260       In0 = N00In;
54261       In1 = N01In;
54262 
54263       // The input vectors must be at least as wide as the output.
54264       // If they are larger than the output, we extract subvector below.
54265       if (In0.getValueSizeInBits() < VT.getSizeInBits() ||
54266           In1.getValueSizeInBits() < VT.getSizeInBits())
54267         return SDValue();
54268     }
54269     // Mul is commutative so the input vectors can be in any order.
54270     // Canonicalize to make the compares easier.
54271     if (In0 != N00In)
54272       std::swap(N00In, N01In);
54273     if (In0 != N10In)
54274       std::swap(N10In, N11In);
54275     if (In0 != N00In || In1 != N01In || In0 != N10In || In1 != N11In)
54276       return SDValue();
54277   }
54278 
54279   auto PMADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
54280                          ArrayRef<SDValue> Ops) {
54281     EVT OpVT = Ops[0].getValueType();
54282     assert(OpVT.getScalarType() == MVT::i16 &&
54283            "Unexpected scalar element type");
54284     assert(OpVT == Ops[1].getValueType() && "Operands' types mismatch");
54285     EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
54286                                  OpVT.getVectorNumElements() / 2);
54287     return DAG.getNode(X86ISD::VPMADDWD, DL, ResVT, Ops[0], Ops[1]);
54288   };
54289 
54290   // If the output is narrower than an input, extract the low part of the input
54291   // vector.
54292   EVT OutVT16 = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
54293                                VT.getVectorNumElements() * 2);
54294   if (OutVT16.bitsLT(In0.getValueType())) {
54295     In0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OutVT16, In0,
54296                       DAG.getIntPtrConstant(0, DL));
54297   }
54298   if (OutVT16.bitsLT(In1.getValueType())) {
54299     In1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OutVT16, In1,
54300                       DAG.getIntPtrConstant(0, DL));
54301   }
54302   return SplitOpsAndApply(DAG, Subtarget, DL, VT, { In0, In1 },
54303                           PMADDBuilder);
54304 }
54305 
54306 // ADD(VPMADDWD(X,Y),VPMADDWD(Z,W)) -> VPMADDWD(SHUFFLE(X,Z), SHUFFLE(Y,W))
54307 // If upper element in each pair of both VPMADDWD are zero then we can merge
54308 // the operand elements and use the implicit add of VPMADDWD.
54309 // TODO: Add support for VPMADDUBSW (which isn't commutable).
combineAddOfPMADDWD(SelectionDAG & DAG,SDValue N0,SDValue N1,const SDLoc & DL,EVT VT)54310 static SDValue combineAddOfPMADDWD(SelectionDAG &DAG, SDValue N0, SDValue N1,
54311                                    const SDLoc &DL, EVT VT) {
54312   if (N0.getOpcode() != N1.getOpcode() || N0.getOpcode() != X86ISD::VPMADDWD)
54313     return SDValue();
54314 
54315   // TODO: Add 256/512-bit support once VPMADDWD combines with shuffles.
54316   if (VT.getSizeInBits() > 128)
54317     return SDValue();
54318 
54319   unsigned NumElts = VT.getVectorNumElements();
54320   MVT OpVT = N0.getOperand(0).getSimpleValueType();
54321   APInt DemandedBits = APInt::getAllOnes(OpVT.getScalarSizeInBits());
54322   APInt DemandedHiElts = APInt::getSplat(2 * NumElts, APInt(2, 2));
54323 
54324   bool Op0HiZero =
54325       DAG.MaskedValueIsZero(N0.getOperand(0), DemandedBits, DemandedHiElts) ||
54326       DAG.MaskedValueIsZero(N0.getOperand(1), DemandedBits, DemandedHiElts);
54327   bool Op1HiZero =
54328       DAG.MaskedValueIsZero(N1.getOperand(0), DemandedBits, DemandedHiElts) ||
54329       DAG.MaskedValueIsZero(N1.getOperand(1), DemandedBits, DemandedHiElts);
54330 
54331   // TODO: Check for zero lower elements once we have actual codegen that
54332   // creates them.
54333   if (!Op0HiZero || !Op1HiZero)
54334     return SDValue();
54335 
54336   // Create a shuffle mask packing the lower elements from each VPMADDWD.
54337   SmallVector<int> Mask;
54338   for (int i = 0; i != (int)NumElts; ++i) {
54339     Mask.push_back(2 * i);
54340     Mask.push_back(2 * (i + NumElts));
54341   }
54342 
54343   SDValue LHS =
54344       DAG.getVectorShuffle(OpVT, DL, N0.getOperand(0), N1.getOperand(0), Mask);
54345   SDValue RHS =
54346       DAG.getVectorShuffle(OpVT, DL, N0.getOperand(1), N1.getOperand(1), Mask);
54347   return DAG.getNode(X86ISD::VPMADDWD, DL, VT, LHS, RHS);
54348 }
54349 
54350 /// CMOV of constants requires materializing constant operands in registers.
54351 /// Try to fold those constants into an 'add' instruction to reduce instruction
54352 /// count. We do this with CMOV rather the generic 'select' because there are
54353 /// earlier folds that may be used to turn select-of-constants into logic hacks.
pushAddIntoCmovOfConsts(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)54354 static SDValue pushAddIntoCmovOfConsts(SDNode *N, SelectionDAG &DAG,
54355                                        const X86Subtarget &Subtarget) {
54356   // If an operand is zero, add-of-0 gets simplified away, so that's clearly
54357   // better because we eliminate 1-2 instructions. This transform is still
54358   // an improvement without zero operands because we trade 2 move constants and
54359   // 1 add for 2 adds (LEA) as long as the constants can be represented as
54360   // immediate asm operands (fit in 32-bits).
54361   auto isSuitableCmov = [](SDValue V) {
54362     if (V.getOpcode() != X86ISD::CMOV || !V.hasOneUse())
54363       return false;
54364     if (!isa<ConstantSDNode>(V.getOperand(0)) ||
54365         !isa<ConstantSDNode>(V.getOperand(1)))
54366       return false;
54367     return isNullConstant(V.getOperand(0)) || isNullConstant(V.getOperand(1)) ||
54368            (V.getConstantOperandAPInt(0).isSignedIntN(32) &&
54369             V.getConstantOperandAPInt(1).isSignedIntN(32));
54370   };
54371 
54372   // Match an appropriate CMOV as the first operand of the add.
54373   SDValue Cmov = N->getOperand(0);
54374   SDValue OtherOp = N->getOperand(1);
54375   if (!isSuitableCmov(Cmov))
54376     std::swap(Cmov, OtherOp);
54377   if (!isSuitableCmov(Cmov))
54378     return SDValue();
54379 
54380   // Don't remove a load folding opportunity for the add. That would neutralize
54381   // any improvements from removing constant materializations.
54382   if (X86::mayFoldLoad(OtherOp, Subtarget))
54383     return SDValue();
54384 
54385   EVT VT = N->getValueType(0);
54386   SDLoc DL(N);
54387   SDValue FalseOp = Cmov.getOperand(0);
54388   SDValue TrueOp = Cmov.getOperand(1);
54389 
54390   // We will push the add through the select, but we can potentially do better
54391   // if we know there is another add in the sequence and this is pointer math.
54392   // In that case, we can absorb an add into the trailing memory op and avoid
54393   // a 3-operand LEA which is likely slower than a 2-operand LEA.
54394   // TODO: If target has "slow3OpsLEA", do this even without the trailing memop?
54395   if (OtherOp.getOpcode() == ISD::ADD && OtherOp.hasOneUse() &&
54396       !isa<ConstantSDNode>(OtherOp.getOperand(0)) &&
54397       all_of(N->uses(), [&](SDNode *Use) {
54398         auto *MemNode = dyn_cast<MemSDNode>(Use);
54399         return MemNode && MemNode->getBasePtr().getNode() == N;
54400       })) {
54401     // add (cmov C1, C2), add (X, Y) --> add (cmov (add X, C1), (add X, C2)), Y
54402     // TODO: We are arbitrarily choosing op0 as the 1st piece of the sum, but
54403     //       it is possible that choosing op1 might be better.
54404     SDValue X = OtherOp.getOperand(0), Y = OtherOp.getOperand(1);
54405     FalseOp = DAG.getNode(ISD::ADD, DL, VT, X, FalseOp);
54406     TrueOp = DAG.getNode(ISD::ADD, DL, VT, X, TrueOp);
54407     Cmov = DAG.getNode(X86ISD::CMOV, DL, VT, FalseOp, TrueOp,
54408                        Cmov.getOperand(2), Cmov.getOperand(3));
54409     return DAG.getNode(ISD::ADD, DL, VT, Cmov, Y);
54410   }
54411 
54412   // add (cmov C1, C2), OtherOp --> cmov (add OtherOp, C1), (add OtherOp, C2)
54413   FalseOp = DAG.getNode(ISD::ADD, DL, VT, OtherOp, FalseOp);
54414   TrueOp = DAG.getNode(ISD::ADD, DL, VT, OtherOp, TrueOp);
54415   return DAG.getNode(X86ISD::CMOV, DL, VT, FalseOp, TrueOp, Cmov.getOperand(2),
54416                      Cmov.getOperand(3));
54417 }
54418 
combineAdd(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)54419 static SDValue combineAdd(SDNode *N, SelectionDAG &DAG,
54420                           TargetLowering::DAGCombinerInfo &DCI,
54421                           const X86Subtarget &Subtarget) {
54422   EVT VT = N->getValueType(0);
54423   SDValue Op0 = N->getOperand(0);
54424   SDValue Op1 = N->getOperand(1);
54425   SDLoc DL(N);
54426 
54427   if (SDValue Select = pushAddIntoCmovOfConsts(N, DAG, Subtarget))
54428     return Select;
54429 
54430   if (SDValue MAdd = matchPMADDWD(DAG, Op0, Op1, DL, VT, Subtarget))
54431     return MAdd;
54432   if (SDValue MAdd = matchPMADDWD_2(DAG, Op0, Op1, DL, VT, Subtarget))
54433     return MAdd;
54434   if (SDValue MAdd = combineAddOfPMADDWD(DAG, Op0, Op1, DL, VT))
54435     return MAdd;
54436 
54437   // Try to synthesize horizontal adds from adds of shuffles.
54438   if (SDValue V = combineToHorizontalAddSub(N, DAG, Subtarget))
54439     return V;
54440 
54441   // If vectors of i1 are legal, turn (add (zext (vXi1 X)), Y) into
54442   // (sub Y, (sext (vXi1 X))).
54443   // FIXME: We have the (sub Y, (zext (vXi1 X))) -> (add (sext (vXi1 X)), Y) in
54444   // generic DAG combine without a legal type check, but adding this there
54445   // caused regressions.
54446   if (VT.isVector()) {
54447     const TargetLowering &TLI = DAG.getTargetLoweringInfo();
54448     if (Op0.getOpcode() == ISD::ZERO_EXTEND &&
54449         Op0.getOperand(0).getValueType().getVectorElementType() == MVT::i1 &&
54450         TLI.isTypeLegal(Op0.getOperand(0).getValueType())) {
54451       SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Op0.getOperand(0));
54452       return DAG.getNode(ISD::SUB, DL, VT, Op1, SExt);
54453     }
54454 
54455     if (Op1.getOpcode() == ISD::ZERO_EXTEND &&
54456         Op1.getOperand(0).getValueType().getVectorElementType() == MVT::i1 &&
54457         TLI.isTypeLegal(Op1.getOperand(0).getValueType())) {
54458       SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Op1.getOperand(0));
54459       return DAG.getNode(ISD::SUB, DL, VT, Op0, SExt);
54460     }
54461   }
54462 
54463   // Fold ADD(ADC(Y,0,W),X) -> ADC(X,Y,W)
54464   if (Op0.getOpcode() == X86ISD::ADC && Op0->hasOneUse() &&
54465       X86::isZeroNode(Op0.getOperand(1))) {
54466     assert(!Op0->hasAnyUseOfValue(1) && "Overflow bit in use");
54467     return DAG.getNode(X86ISD::ADC, SDLoc(Op0), Op0->getVTList(), Op1,
54468                        Op0.getOperand(0), Op0.getOperand(2));
54469   }
54470 
54471   return combineAddOrSubToADCOrSBB(N, DAG);
54472 }
54473 
54474 // Try to fold (sub Y, cmovns X, -X) -> (add Y, cmovns -X, X) if the cmov
54475 // condition comes from the subtract node that produced -X. This matches the
54476 // cmov expansion for absolute value. By swapping the operands we convert abs
54477 // to nabs.
combineSubABS(SDNode * N,SelectionDAG & DAG)54478 static SDValue combineSubABS(SDNode *N, SelectionDAG &DAG) {
54479   SDValue N0 = N->getOperand(0);
54480   SDValue N1 = N->getOperand(1);
54481 
54482   if (N1.getOpcode() != X86ISD::CMOV || !N1.hasOneUse())
54483     return SDValue();
54484 
54485   X86::CondCode CC = (X86::CondCode)N1.getConstantOperandVal(2);
54486   if (CC != X86::COND_S && CC != X86::COND_NS)
54487     return SDValue();
54488 
54489   // Condition should come from a negate operation.
54490   SDValue Cond = N1.getOperand(3);
54491   if (Cond.getOpcode() != X86ISD::SUB || !isNullConstant(Cond.getOperand(0)))
54492     return SDValue();
54493   assert(Cond.getResNo() == 1 && "Unexpected result number");
54494 
54495   // Get the X and -X from the negate.
54496   SDValue NegX = Cond.getValue(0);
54497   SDValue X = Cond.getOperand(1);
54498 
54499   SDValue FalseOp = N1.getOperand(0);
54500   SDValue TrueOp = N1.getOperand(1);
54501 
54502   // Cmov operands should be X and NegX. Order doesn't matter.
54503   if (!(TrueOp == X && FalseOp == NegX) && !(TrueOp == NegX && FalseOp == X))
54504     return SDValue();
54505 
54506   // Build a new CMOV with the operands swapped.
54507   SDLoc DL(N);
54508   MVT VT = N->getSimpleValueType(0);
54509   SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, VT, TrueOp, FalseOp,
54510                              N1.getOperand(2), Cond);
54511   // Convert sub to add.
54512   return DAG.getNode(ISD::ADD, DL, VT, N0, Cmov);
54513 }
54514 
combineSubSetcc(SDNode * N,SelectionDAG & DAG)54515 static SDValue combineSubSetcc(SDNode *N, SelectionDAG &DAG) {
54516   SDValue Op0 = N->getOperand(0);
54517   SDValue Op1 = N->getOperand(1);
54518 
54519   // (sub C (zero_extend (setcc)))
54520   // =>
54521   // (add (zero_extend (setcc inverted) C-1))   if C is a nonzero immediate
54522   // Don't disturb (sub 0 setcc), which is easily done with neg.
54523   EVT VT = N->getValueType(0);
54524   auto *Op0C = dyn_cast<ConstantSDNode>(Op0);
54525   if (Op1.getOpcode() == ISD::ZERO_EXTEND && Op1.hasOneUse() && Op0C &&
54526       !Op0C->isZero() && Op1.getOperand(0).getOpcode() == X86ISD::SETCC &&
54527       Op1.getOperand(0).hasOneUse()) {
54528     SDValue SetCC = Op1.getOperand(0);
54529     X86::CondCode CC = (X86::CondCode)SetCC.getConstantOperandVal(0);
54530     X86::CondCode NewCC = X86::GetOppositeBranchCondition(CC);
54531     APInt NewImm = Op0C->getAPIntValue() - 1;
54532     SDLoc DL(Op1);
54533     SDValue NewSetCC = getSETCC(NewCC, SetCC.getOperand(1), DL, DAG);
54534     NewSetCC = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, NewSetCC);
54535     return DAG.getNode(X86ISD::ADD, DL, DAG.getVTList(VT, VT), NewSetCC,
54536                        DAG.getConstant(NewImm, DL, VT));
54537   }
54538 
54539   return SDValue();
54540 }
54541 
combineSub(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)54542 static SDValue combineSub(SDNode *N, SelectionDAG &DAG,
54543                           TargetLowering::DAGCombinerInfo &DCI,
54544                           const X86Subtarget &Subtarget) {
54545   SDValue Op0 = N->getOperand(0);
54546   SDValue Op1 = N->getOperand(1);
54547 
54548   // TODO: Add NoOpaque handling to isConstantIntBuildVectorOrConstantInt.
54549   auto IsNonOpaqueConstant = [&](SDValue Op) {
54550     if (SDNode *C = DAG.isConstantIntBuildVectorOrConstantInt(Op)) {
54551       if (auto *Cst = dyn_cast<ConstantSDNode>(C))
54552         return !Cst->isOpaque();
54553       return true;
54554     }
54555     return false;
54556   };
54557 
54558   // X86 can't encode an immediate LHS of a sub. See if we can push the
54559   // negation into a preceding instruction. If the RHS of the sub is a XOR with
54560   // one use and a constant, invert the immediate, saving one register.
54561   // However, ignore cases where C1 is 0, as those will become a NEG.
54562   // sub(C1, xor(X, C2)) -> add(xor(X, ~C2), C1+1)
54563   if (Op1.getOpcode() == ISD::XOR && IsNonOpaqueConstant(Op0) &&
54564       !isNullConstant(Op0) && IsNonOpaqueConstant(Op1.getOperand(1)) &&
54565       Op1->hasOneUse()) {
54566     SDLoc DL(N);
54567     EVT VT = Op0.getValueType();
54568     SDValue NewXor = DAG.getNode(ISD::XOR, SDLoc(Op1), VT, Op1.getOperand(0),
54569                                  DAG.getNOT(SDLoc(Op1), Op1.getOperand(1), VT));
54570     SDValue NewAdd =
54571         DAG.getNode(ISD::ADD, DL, VT, Op0, DAG.getConstant(1, DL, VT));
54572     return DAG.getNode(ISD::ADD, DL, VT, NewXor, NewAdd);
54573   }
54574 
54575   if (SDValue V = combineSubABS(N, DAG))
54576     return V;
54577 
54578   // Try to synthesize horizontal subs from subs of shuffles.
54579   if (SDValue V = combineToHorizontalAddSub(N, DAG, Subtarget))
54580     return V;
54581 
54582   // Fold SUB(X,ADC(Y,0,W)) -> SBB(X,Y,W)
54583   if (Op1.getOpcode() == X86ISD::ADC && Op1->hasOneUse() &&
54584       X86::isZeroNode(Op1.getOperand(1))) {
54585     assert(!Op1->hasAnyUseOfValue(1) && "Overflow bit in use");
54586     return DAG.getNode(X86ISD::SBB, SDLoc(Op1), Op1->getVTList(), Op0,
54587                        Op1.getOperand(0), Op1.getOperand(2));
54588   }
54589 
54590   // Fold SUB(X,SBB(Y,Z,W)) -> SUB(ADC(X,Z,W),Y)
54591   // Don't fold to ADC(0,0,W)/SETCC_CARRY pattern which will prevent more folds.
54592   if (Op1.getOpcode() == X86ISD::SBB && Op1->hasOneUse() &&
54593       !(X86::isZeroNode(Op0) && X86::isZeroNode(Op1.getOperand(1)))) {
54594     assert(!Op1->hasAnyUseOfValue(1) && "Overflow bit in use");
54595     SDValue ADC = DAG.getNode(X86ISD::ADC, SDLoc(Op1), Op1->getVTList(), Op0,
54596                               Op1.getOperand(1), Op1.getOperand(2));
54597     return DAG.getNode(ISD::SUB, SDLoc(N), Op0.getValueType(), ADC.getValue(0),
54598                        Op1.getOperand(0));
54599   }
54600 
54601   if (SDValue V = combineXorSubCTLZ(N, DAG, Subtarget))
54602     return V;
54603 
54604   if (SDValue V = combineAddOrSubToADCOrSBB(N, DAG))
54605     return V;
54606 
54607   return combineSubSetcc(N, DAG);
54608 }
54609 
combineVectorCompare(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)54610 static SDValue combineVectorCompare(SDNode *N, SelectionDAG &DAG,
54611                                     const X86Subtarget &Subtarget) {
54612   MVT VT = N->getSimpleValueType(0);
54613   SDLoc DL(N);
54614 
54615   if (N->getOperand(0) == N->getOperand(1)) {
54616     if (N->getOpcode() == X86ISD::PCMPEQ)
54617       return DAG.getConstant(-1, DL, VT);
54618     if (N->getOpcode() == X86ISD::PCMPGT)
54619       return DAG.getConstant(0, DL, VT);
54620   }
54621 
54622   return SDValue();
54623 }
54624 
54625 /// Helper that combines an array of subvector ops as if they were the operands
54626 /// of a ISD::CONCAT_VECTORS node, but may have come from another source (e.g.
54627 /// ISD::INSERT_SUBVECTOR). The ops are assumed to be of the same type.
combineConcatVectorOps(const SDLoc & DL,MVT VT,ArrayRef<SDValue> Ops,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)54628 static SDValue combineConcatVectorOps(const SDLoc &DL, MVT VT,
54629                                       ArrayRef<SDValue> Ops, SelectionDAG &DAG,
54630                                       TargetLowering::DAGCombinerInfo &DCI,
54631                                       const X86Subtarget &Subtarget) {
54632   assert(Subtarget.hasAVX() && "AVX assumed for concat_vectors");
54633   unsigned EltSizeInBits = VT.getScalarSizeInBits();
54634 
54635   if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); }))
54636     return DAG.getUNDEF(VT);
54637 
54638   if (llvm::all_of(Ops, [](SDValue Op) {
54639         return ISD::isBuildVectorAllZeros(Op.getNode());
54640       }))
54641     return getZeroVector(VT, Subtarget, DAG, DL);
54642 
54643   SDValue Op0 = Ops[0];
54644   bool IsSplat = llvm::all_equal(Ops);
54645   unsigned NumOps = Ops.size();
54646   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
54647   LLVMContext &Ctx = *DAG.getContext();
54648 
54649   // Repeated subvectors.
54650   if (IsSplat &&
54651       (VT.is256BitVector() || (VT.is512BitVector() && Subtarget.hasAVX512()))) {
54652     // If this broadcast is inserted into both halves, use a larger broadcast.
54653     if (Op0.getOpcode() == X86ISD::VBROADCAST)
54654       return DAG.getNode(Op0.getOpcode(), DL, VT, Op0.getOperand(0));
54655 
54656     // concat_vectors(movddup(x),movddup(x)) -> broadcast(x)
54657     if (Op0.getOpcode() == X86ISD::MOVDDUP && VT == MVT::v4f64 &&
54658         (Subtarget.hasAVX2() ||
54659          X86::mayFoldLoadIntoBroadcastFromMem(Op0.getOperand(0),
54660                                               VT.getScalarType(), Subtarget)))
54661       return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
54662                          DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f64,
54663                                      Op0.getOperand(0),
54664                                      DAG.getIntPtrConstant(0, DL)));
54665 
54666     // concat_vectors(scalar_to_vector(x),scalar_to_vector(x)) -> broadcast(x)
54667     if (Op0.getOpcode() == ISD::SCALAR_TO_VECTOR &&
54668         (Subtarget.hasAVX2() ||
54669          (EltSizeInBits >= 32 &&
54670           X86::mayFoldLoad(Op0.getOperand(0), Subtarget))) &&
54671         Op0.getOperand(0).getValueType() == VT.getScalarType())
54672       return DAG.getNode(X86ISD::VBROADCAST, DL, VT, Op0.getOperand(0));
54673 
54674     // concat_vectors(extract_subvector(broadcast(x)),
54675     //                extract_subvector(broadcast(x))) -> broadcast(x)
54676     // concat_vectors(extract_subvector(subv_broadcast(x)),
54677     //                extract_subvector(subv_broadcast(x))) -> subv_broadcast(x)
54678     if (Op0.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
54679         Op0.getOperand(0).getValueType() == VT) {
54680       SDValue SrcVec = Op0.getOperand(0);
54681       if (SrcVec.getOpcode() == X86ISD::VBROADCAST ||
54682           SrcVec.getOpcode() == X86ISD::VBROADCAST_LOAD)
54683         return Op0.getOperand(0);
54684       if (SrcVec.getOpcode() == X86ISD::SUBV_BROADCAST_LOAD &&
54685           Op0.getValueType() == cast<MemSDNode>(SrcVec)->getMemoryVT())
54686         return Op0.getOperand(0);
54687     }
54688 
54689     // concat_vectors(permq(x),permq(x)) -> permq(concat_vectors(x,x))
54690     if (Op0.getOpcode() == X86ISD::VPERMI && Subtarget.useAVX512Regs() &&
54691         !X86::mayFoldLoad(Op0.getOperand(0), Subtarget))
54692       return DAG.getNode(Op0.getOpcode(), DL, VT,
54693                          DAG.getNode(ISD::CONCAT_VECTORS, DL, VT,
54694                                      Op0.getOperand(0), Op0.getOperand(0)),
54695                          Op0.getOperand(1));
54696   }
54697 
54698   // concat(extract_subvector(v0,c0), extract_subvector(v1,c1)) -> vperm2x128.
54699   // Only concat of subvector high halves which vperm2x128 is best at.
54700   // TODO: This should go in combineX86ShufflesRecursively eventually.
54701   if (VT.is256BitVector() && NumOps == 2) {
54702     SDValue Src0 = peekThroughBitcasts(Ops[0]);
54703     SDValue Src1 = peekThroughBitcasts(Ops[1]);
54704     if (Src0.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
54705         Src1.getOpcode() == ISD::EXTRACT_SUBVECTOR) {
54706       EVT SrcVT0 = Src0.getOperand(0).getValueType();
54707       EVT SrcVT1 = Src1.getOperand(0).getValueType();
54708       unsigned NumSrcElts0 = SrcVT0.getVectorNumElements();
54709       unsigned NumSrcElts1 = SrcVT1.getVectorNumElements();
54710       if (SrcVT0.is256BitVector() && SrcVT1.is256BitVector() &&
54711           Src0.getConstantOperandAPInt(1) == (NumSrcElts0 / 2) &&
54712           Src1.getConstantOperandAPInt(1) == (NumSrcElts1 / 2)) {
54713         return DAG.getNode(X86ISD::VPERM2X128, DL, VT,
54714                            DAG.getBitcast(VT, Src0.getOperand(0)),
54715                            DAG.getBitcast(VT, Src1.getOperand(0)),
54716                            DAG.getTargetConstant(0x31, DL, MVT::i8));
54717       }
54718     }
54719   }
54720 
54721   // Repeated opcode.
54722   // TODO - combineX86ShufflesRecursively should handle shuffle concatenation
54723   // but it currently struggles with different vector widths.
54724   if (llvm::all_of(Ops, [Op0](SDValue Op) {
54725         return Op.getOpcode() == Op0.getOpcode() && Op.hasOneUse();
54726       })) {
54727     auto ConcatSubOperand = [&](EVT VT, ArrayRef<SDValue> SubOps, unsigned I) {
54728       SmallVector<SDValue> Subs;
54729       for (SDValue SubOp : SubOps)
54730         Subs.push_back(SubOp.getOperand(I));
54731       return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Subs);
54732     };
54733     auto IsConcatFree = [](MVT VT, ArrayRef<SDValue> SubOps, unsigned Op) {
54734       bool AllConstants = true;
54735       bool AllSubVectors = true;
54736       for (unsigned I = 0, E = SubOps.size(); I != E; ++I) {
54737         SDValue Sub = SubOps[I].getOperand(Op);
54738         unsigned NumSubElts = Sub.getValueType().getVectorNumElements();
54739         SDValue BC = peekThroughBitcasts(Sub);
54740         AllConstants &= ISD::isBuildVectorOfConstantSDNodes(BC.getNode()) ||
54741                         ISD::isBuildVectorOfConstantFPSDNodes(BC.getNode());
54742         AllSubVectors &= Sub.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
54743                          Sub.getOperand(0).getValueType() == VT &&
54744                          Sub.getConstantOperandAPInt(1) == (I * NumSubElts);
54745       }
54746       return AllConstants || AllSubVectors;
54747     };
54748 
54749     switch (Op0.getOpcode()) {
54750     case X86ISD::VBROADCAST: {
54751       if (!IsSplat && llvm::all_of(Ops, [](SDValue Op) {
54752             return Op.getOperand(0).getValueType().is128BitVector();
54753           })) {
54754         if (VT == MVT::v4f64 || VT == MVT::v4i64)
54755           return DAG.getNode(X86ISD::UNPCKL, DL, VT,
54756                              ConcatSubOperand(VT, Ops, 0),
54757                              ConcatSubOperand(VT, Ops, 0));
54758         // TODO: Add pseudo v8i32 PSHUFD handling to AVX1Only targets.
54759         if (VT == MVT::v8f32 || (VT == MVT::v8i32 && Subtarget.hasInt256()))
54760           return DAG.getNode(VT == MVT::v8f32 ? X86ISD::VPERMILPI
54761                                               : X86ISD::PSHUFD,
54762                              DL, VT, ConcatSubOperand(VT, Ops, 0),
54763                              getV4X86ShuffleImm8ForMask({0, 0, 0, 0}, DL, DAG));
54764       }
54765       break;
54766     }
54767     case X86ISD::MOVDDUP:
54768     case X86ISD::MOVSHDUP:
54769     case X86ISD::MOVSLDUP: {
54770       if (!IsSplat)
54771         return DAG.getNode(Op0.getOpcode(), DL, VT,
54772                            ConcatSubOperand(VT, Ops, 0));
54773       break;
54774     }
54775     case X86ISD::SHUFP: {
54776       // Add SHUFPD support if/when necessary.
54777       if (!IsSplat && VT.getScalarType() == MVT::f32 &&
54778           llvm::all_of(Ops, [Op0](SDValue Op) {
54779             return Op.getOperand(2) == Op0.getOperand(2);
54780           })) {
54781         return DAG.getNode(Op0.getOpcode(), DL, VT,
54782                            ConcatSubOperand(VT, Ops, 0),
54783                            ConcatSubOperand(VT, Ops, 1), Op0.getOperand(2));
54784       }
54785       break;
54786     }
54787     case X86ISD::UNPCKH:
54788     case X86ISD::UNPCKL: {
54789       // Don't concatenate build_vector patterns.
54790       if (!IsSplat && EltSizeInBits >= 32 &&
54791           ((VT.is256BitVector() && Subtarget.hasInt256()) ||
54792            (VT.is512BitVector() && Subtarget.useAVX512Regs())) &&
54793           none_of(Ops, [](SDValue Op) {
54794             return peekThroughBitcasts(Op.getOperand(0)).getOpcode() ==
54795                        ISD::SCALAR_TO_VECTOR ||
54796                    peekThroughBitcasts(Op.getOperand(1)).getOpcode() ==
54797                        ISD::SCALAR_TO_VECTOR;
54798           })) {
54799         return DAG.getNode(Op0.getOpcode(), DL, VT,
54800                            ConcatSubOperand(VT, Ops, 0),
54801                            ConcatSubOperand(VT, Ops, 1));
54802       }
54803       break;
54804     }
54805     case X86ISD::PSHUFHW:
54806     case X86ISD::PSHUFLW:
54807     case X86ISD::PSHUFD:
54808       if (!IsSplat && NumOps == 2 && VT.is256BitVector() &&
54809           Subtarget.hasInt256() && Op0.getOperand(1) == Ops[1].getOperand(1)) {
54810         return DAG.getNode(Op0.getOpcode(), DL, VT,
54811                            ConcatSubOperand(VT, Ops, 0), Op0.getOperand(1));
54812       }
54813       [[fallthrough]];
54814     case X86ISD::VPERMILPI:
54815       if (!IsSplat && EltSizeInBits == 32 &&
54816           (VT.is256BitVector() ||
54817            (VT.is512BitVector() && Subtarget.useAVX512Regs())) &&
54818           all_of(Ops, [&Op0](SDValue Op) {
54819             return Op0.getOperand(1) == Op.getOperand(1);
54820           })) {
54821         MVT FloatVT = VT.changeVectorElementType(MVT::f32);
54822         SDValue Res = DAG.getBitcast(FloatVT, ConcatSubOperand(VT, Ops, 0));
54823         Res =
54824             DAG.getNode(X86ISD::VPERMILPI, DL, FloatVT, Res, Op0.getOperand(1));
54825         return DAG.getBitcast(VT, Res);
54826       }
54827       if (!IsSplat && NumOps == 2 && VT == MVT::v4f64) {
54828         uint64_t Idx0 = Ops[0].getConstantOperandVal(1);
54829         uint64_t Idx1 = Ops[1].getConstantOperandVal(1);
54830         uint64_t Idx = ((Idx1 & 3) << 2) | (Idx0 & 3);
54831         return DAG.getNode(Op0.getOpcode(), DL, VT,
54832                            ConcatSubOperand(VT, Ops, 0),
54833                            DAG.getTargetConstant(Idx, DL, MVT::i8));
54834       }
54835       break;
54836     case X86ISD::PSHUFB:
54837     case X86ISD::PSADBW:
54838       if (!IsSplat && ((VT.is256BitVector() && Subtarget.hasInt256()) ||
54839                        (VT.is512BitVector() && Subtarget.useBWIRegs()))) {
54840         MVT SrcVT = Op0.getOperand(0).getSimpleValueType();
54841         SrcVT = MVT::getVectorVT(SrcVT.getScalarType(),
54842                                  NumOps * SrcVT.getVectorNumElements());
54843         return DAG.getNode(Op0.getOpcode(), DL, VT,
54844                            ConcatSubOperand(SrcVT, Ops, 0),
54845                            ConcatSubOperand(SrcVT, Ops, 1));
54846       }
54847       break;
54848     case X86ISD::VPERMV:
54849       if (!IsSplat && NumOps == 2 &&
54850           (VT.is512BitVector() && Subtarget.useAVX512Regs())) {
54851         MVT OpVT = Op0.getSimpleValueType();
54852         int NumSrcElts = OpVT.getVectorNumElements();
54853         SmallVector<int, 64> ConcatMask;
54854         for (unsigned i = 0; i != NumOps; ++i) {
54855           SmallVector<int, 64> SubMask;
54856           SmallVector<SDValue, 2> SubOps;
54857           if (!getTargetShuffleMask(Ops[i].getNode(), OpVT, false, SubOps,
54858                                     SubMask))
54859             break;
54860           for (int M : SubMask) {
54861             if (0 <= M)
54862               M += i * NumSrcElts;
54863             ConcatMask.push_back(M);
54864           }
54865         }
54866         if (ConcatMask.size() == (NumOps * NumSrcElts)) {
54867           SDValue Src = concatSubVectors(Ops[0].getOperand(1),
54868                                          Ops[1].getOperand(1), DAG, DL);
54869           MVT IntMaskSVT = MVT::getIntegerVT(EltSizeInBits);
54870           MVT IntMaskVT = MVT::getVectorVT(IntMaskSVT, NumOps * NumSrcElts);
54871           SDValue Mask = getConstVector(ConcatMask, IntMaskVT, DAG, DL, true);
54872           return DAG.getNode(X86ISD::VPERMV, DL, VT, Mask, Src);
54873         }
54874       }
54875       break;
54876     case X86ISD::VPERMV3:
54877       if (!IsSplat && NumOps == 2 && VT.is512BitVector()) {
54878         MVT OpVT = Op0.getSimpleValueType();
54879         int NumSrcElts = OpVT.getVectorNumElements();
54880         SmallVector<int, 64> ConcatMask;
54881         for (unsigned i = 0; i != NumOps; ++i) {
54882           SmallVector<int, 64> SubMask;
54883           SmallVector<SDValue, 2> SubOps;
54884           if (!getTargetShuffleMask(Ops[i].getNode(), OpVT, false, SubOps,
54885                                     SubMask))
54886             break;
54887           for (int M : SubMask) {
54888             if (0 <= M) {
54889               M += M < NumSrcElts ? 0 : NumSrcElts;
54890               M += i * NumSrcElts;
54891             }
54892             ConcatMask.push_back(M);
54893           }
54894         }
54895         if (ConcatMask.size() == (NumOps * NumSrcElts)) {
54896           SDValue Src0 = concatSubVectors(Ops[0].getOperand(0),
54897                                           Ops[1].getOperand(0), DAG, DL);
54898           SDValue Src1 = concatSubVectors(Ops[0].getOperand(2),
54899                                           Ops[1].getOperand(2), DAG, DL);
54900           MVT IntMaskSVT = MVT::getIntegerVT(EltSizeInBits);
54901           MVT IntMaskVT = MVT::getVectorVT(IntMaskSVT, NumOps * NumSrcElts);
54902           SDValue Mask = getConstVector(ConcatMask, IntMaskVT, DAG, DL, true);
54903           return DAG.getNode(X86ISD::VPERMV3, DL, VT, Src0, Mask, Src1);
54904         }
54905       }
54906       break;
54907     case X86ISD::VPERM2X128: {
54908       if (!IsSplat && VT.is512BitVector() && Subtarget.useAVX512Regs()) {
54909         assert(NumOps == 2 && "Bad concat_vectors operands");
54910         unsigned Imm0 = Ops[0].getConstantOperandVal(2);
54911         unsigned Imm1 = Ops[1].getConstantOperandVal(2);
54912         // TODO: Handle zero'd subvectors.
54913         if ((Imm0 & 0x88) == 0 && (Imm1 & 0x88) == 0) {
54914           int Mask[4] = {(int)(Imm0 & 0x03), (int)((Imm0 >> 4) & 0x3), (int)(Imm1 & 0x03),
54915                          (int)((Imm1 >> 4) & 0x3)};
54916           MVT ShuffleVT = VT.isFloatingPoint() ? MVT::v8f64 : MVT::v8i64;
54917           SDValue LHS = concatSubVectors(Ops[0].getOperand(0),
54918                                          Ops[0].getOperand(1), DAG, DL);
54919           SDValue RHS = concatSubVectors(Ops[1].getOperand(0),
54920                                          Ops[1].getOperand(1), DAG, DL);
54921           SDValue Res = DAG.getNode(X86ISD::SHUF128, DL, ShuffleVT,
54922                                     DAG.getBitcast(ShuffleVT, LHS),
54923                                     DAG.getBitcast(ShuffleVT, RHS),
54924                                     getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
54925           return DAG.getBitcast(VT, Res);
54926         }
54927       }
54928       break;
54929     }
54930     case X86ISD::SHUF128: {
54931       if (!IsSplat && NumOps == 2 && VT.is512BitVector()) {
54932         unsigned Imm0 = Ops[0].getConstantOperandVal(2);
54933         unsigned Imm1 = Ops[1].getConstantOperandVal(2);
54934         unsigned Imm = ((Imm0 & 1) << 0) | ((Imm0 & 2) << 1) | 0x08 |
54935                        ((Imm1 & 1) << 4) | ((Imm1 & 2) << 5) | 0x80;
54936         SDValue LHS = concatSubVectors(Ops[0].getOperand(0),
54937                                        Ops[0].getOperand(1), DAG, DL);
54938         SDValue RHS = concatSubVectors(Ops[1].getOperand(0),
54939                                        Ops[1].getOperand(1), DAG, DL);
54940         return DAG.getNode(X86ISD::SHUF128, DL, VT, LHS, RHS,
54941                            DAG.getTargetConstant(Imm, DL, MVT::i8));
54942       }
54943       break;
54944     }
54945     case ISD::TRUNCATE:
54946       if (!IsSplat && NumOps == 2 && VT.is256BitVector()) {
54947         EVT SrcVT = Ops[0].getOperand(0).getValueType();
54948         if (SrcVT.is256BitVector() && SrcVT.isSimple() &&
54949             SrcVT == Ops[1].getOperand(0).getValueType() &&
54950             Subtarget.useAVX512Regs() &&
54951             Subtarget.getPreferVectorWidth() >= 512 &&
54952             (SrcVT.getScalarSizeInBits() > 16 || Subtarget.useBWIRegs())) {
54953           EVT NewSrcVT = SrcVT.getDoubleNumVectorElementsVT(Ctx);
54954           return DAG.getNode(ISD::TRUNCATE, DL, VT,
54955                              ConcatSubOperand(NewSrcVT, Ops, 0));
54956         }
54957       }
54958       break;
54959     case X86ISD::VSHLI:
54960     case X86ISD::VSRLI:
54961       // Special case: SHL/SRL AVX1 V4i64 by 32-bits can lower as a shuffle.
54962       // TODO: Move this to LowerShiftByScalarImmediate?
54963       if (VT == MVT::v4i64 && !Subtarget.hasInt256() &&
54964           llvm::all_of(Ops, [](SDValue Op) {
54965             return Op.getConstantOperandAPInt(1) == 32;
54966           })) {
54967         SDValue Res = DAG.getBitcast(MVT::v8i32, ConcatSubOperand(VT, Ops, 0));
54968         SDValue Zero = getZeroVector(MVT::v8i32, Subtarget, DAG, DL);
54969         if (Op0.getOpcode() == X86ISD::VSHLI) {
54970           Res = DAG.getVectorShuffle(MVT::v8i32, DL, Res, Zero,
54971                                      {8, 0, 8, 2, 8, 4, 8, 6});
54972         } else {
54973           Res = DAG.getVectorShuffle(MVT::v8i32, DL, Res, Zero,
54974                                      {1, 8, 3, 8, 5, 8, 7, 8});
54975         }
54976         return DAG.getBitcast(VT, Res);
54977       }
54978       [[fallthrough]];
54979     case X86ISD::VSRAI:
54980     case X86ISD::VSHL:
54981     case X86ISD::VSRL:
54982     case X86ISD::VSRA:
54983       if (((VT.is256BitVector() && Subtarget.hasInt256()) ||
54984            (VT.is512BitVector() && Subtarget.useAVX512Regs() &&
54985             (EltSizeInBits >= 32 || Subtarget.useBWIRegs()))) &&
54986           llvm::all_of(Ops, [Op0](SDValue Op) {
54987             return Op0.getOperand(1) == Op.getOperand(1);
54988           })) {
54989         return DAG.getNode(Op0.getOpcode(), DL, VT,
54990                            ConcatSubOperand(VT, Ops, 0), Op0.getOperand(1));
54991       }
54992       break;
54993     case X86ISD::VPERMI:
54994     case X86ISD::VROTLI:
54995     case X86ISD::VROTRI:
54996       if (VT.is512BitVector() && Subtarget.useAVX512Regs() &&
54997           llvm::all_of(Ops, [Op0](SDValue Op) {
54998             return Op0.getOperand(1) == Op.getOperand(1);
54999           })) {
55000         return DAG.getNode(Op0.getOpcode(), DL, VT,
55001                            ConcatSubOperand(VT, Ops, 0), Op0.getOperand(1));
55002       }
55003       break;
55004     case ISD::AND:
55005     case ISD::OR:
55006     case ISD::XOR:
55007     case X86ISD::ANDNP:
55008       if (!IsSplat && ((VT.is256BitVector() && Subtarget.hasInt256()) ||
55009                        (VT.is512BitVector() && Subtarget.useAVX512Regs()))) {
55010         return DAG.getNode(Op0.getOpcode(), DL, VT,
55011                            ConcatSubOperand(VT, Ops, 0),
55012                            ConcatSubOperand(VT, Ops, 1));
55013       }
55014       break;
55015     case X86ISD::PCMPEQ:
55016     case X86ISD::PCMPGT:
55017       if (!IsSplat && VT.is256BitVector() && Subtarget.hasInt256() &&
55018           (IsConcatFree(VT, Ops, 0) || IsConcatFree(VT, Ops, 1))) {
55019         return DAG.getNode(Op0.getOpcode(), DL, VT,
55020                            ConcatSubOperand(VT, Ops, 0),
55021                            ConcatSubOperand(VT, Ops, 1));
55022       }
55023       break;
55024     case ISD::CTPOP:
55025     case ISD::CTTZ:
55026     case ISD::CTLZ:
55027     case ISD::CTTZ_ZERO_UNDEF:
55028     case ISD::CTLZ_ZERO_UNDEF:
55029       if (!IsSplat && ((VT.is256BitVector() && Subtarget.hasInt256()) ||
55030                        (VT.is512BitVector() && Subtarget.useBWIRegs()))) {
55031         return DAG.getNode(Op0.getOpcode(), DL, VT,
55032                            ConcatSubOperand(VT, Ops, 0));
55033       }
55034       break;
55035     case X86ISD::GF2P8AFFINEQB:
55036       if (!IsSplat &&
55037           (VT.is256BitVector() ||
55038            (VT.is512BitVector() && Subtarget.useAVX512Regs())) &&
55039           llvm::all_of(Ops, [Op0](SDValue Op) {
55040             return Op0.getOperand(2) == Op.getOperand(2);
55041           })) {
55042         return DAG.getNode(Op0.getOpcode(), DL, VT,
55043                            ConcatSubOperand(VT, Ops, 0),
55044                            ConcatSubOperand(VT, Ops, 1), Op0.getOperand(2));
55045       }
55046       break;
55047     case ISD::ADD:
55048     case ISD::SUB:
55049     case ISD::MUL:
55050       if (!IsSplat && ((VT.is256BitVector() && Subtarget.hasInt256()) ||
55051                        (VT.is512BitVector() && Subtarget.useAVX512Regs() &&
55052                         (EltSizeInBits >= 32 || Subtarget.useBWIRegs())))) {
55053         return DAG.getNode(Op0.getOpcode(), DL, VT,
55054                            ConcatSubOperand(VT, Ops, 0),
55055                            ConcatSubOperand(VT, Ops, 1));
55056       }
55057       break;
55058     // Due to VADD, VSUB, VMUL can executed on more ports than VINSERT and
55059     // their latency are short, so here we don't replace them.
55060     case ISD::FDIV:
55061       if (!IsSplat && (VT.is256BitVector() ||
55062                        (VT.is512BitVector() && Subtarget.useAVX512Regs()))) {
55063         return DAG.getNode(Op0.getOpcode(), DL, VT,
55064                            ConcatSubOperand(VT, Ops, 0),
55065                            ConcatSubOperand(VT, Ops, 1));
55066       }
55067       break;
55068     case X86ISD::HADD:
55069     case X86ISD::HSUB:
55070     case X86ISD::FHADD:
55071     case X86ISD::FHSUB:
55072       if (!IsSplat && VT.is256BitVector() &&
55073           (VT.isFloatingPoint() || Subtarget.hasInt256())) {
55074         return DAG.getNode(Op0.getOpcode(), DL, VT,
55075                            ConcatSubOperand(VT, Ops, 0),
55076                            ConcatSubOperand(VT, Ops, 1));
55077       }
55078       break;
55079     case X86ISD::PACKSS:
55080     case X86ISD::PACKUS:
55081       if (!IsSplat && ((VT.is256BitVector() && Subtarget.hasInt256()) ||
55082                        (VT.is512BitVector() && Subtarget.useBWIRegs()))) {
55083         MVT SrcVT = Op0.getOperand(0).getSimpleValueType();
55084         SrcVT = MVT::getVectorVT(SrcVT.getScalarType(),
55085                                  NumOps * SrcVT.getVectorNumElements());
55086         return DAG.getNode(Op0.getOpcode(), DL, VT,
55087                            ConcatSubOperand(SrcVT, Ops, 0),
55088                            ConcatSubOperand(SrcVT, Ops, 1));
55089       }
55090       break;
55091     case X86ISD::PALIGNR:
55092       if (!IsSplat &&
55093           ((VT.is256BitVector() && Subtarget.hasInt256()) ||
55094            (VT.is512BitVector() && Subtarget.useBWIRegs())) &&
55095           llvm::all_of(Ops, [Op0](SDValue Op) {
55096             return Op0.getOperand(2) == Op.getOperand(2);
55097           })) {
55098         return DAG.getNode(Op0.getOpcode(), DL, VT,
55099                            ConcatSubOperand(VT, Ops, 0),
55100                            ConcatSubOperand(VT, Ops, 1), Op0.getOperand(2));
55101       }
55102       break;
55103     case X86ISD::BLENDI:
55104       if (NumOps == 2 && VT.is512BitVector() && Subtarget.useBWIRegs()) {
55105         uint64_t Mask0 = Ops[0].getConstantOperandVal(2);
55106         uint64_t Mask1 = Ops[1].getConstantOperandVal(2);
55107         uint64_t Mask = (Mask1 << (VT.getVectorNumElements() / 2)) | Mask0;
55108         MVT MaskSVT = MVT::getIntegerVT(VT.getVectorNumElements());
55109         MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
55110         SDValue Sel =
55111             DAG.getBitcast(MaskVT, DAG.getConstant(Mask, DL, MaskSVT));
55112         return DAG.getSelect(DL, VT, Sel, ConcatSubOperand(VT, Ops, 1),
55113                              ConcatSubOperand(VT, Ops, 0));
55114       }
55115       break;
55116     case ISD::VSELECT:
55117       if (!IsSplat && Subtarget.hasAVX512() &&
55118           (VT.is256BitVector() ||
55119            (VT.is512BitVector() && Subtarget.useAVX512Regs())) &&
55120           (EltSizeInBits >= 32 || Subtarget.hasBWI())) {
55121         EVT SelVT = Ops[0].getOperand(0).getValueType();
55122         if (SelVT.getVectorElementType() == MVT::i1) {
55123           SelVT = EVT::getVectorVT(Ctx, MVT::i1,
55124                                    NumOps * SelVT.getVectorNumElements());
55125           if (TLI.isTypeLegal(SelVT))
55126             return DAG.getNode(Op0.getOpcode(), DL, VT,
55127                                ConcatSubOperand(SelVT.getSimpleVT(), Ops, 0),
55128                                ConcatSubOperand(VT, Ops, 1),
55129                                ConcatSubOperand(VT, Ops, 2));
55130         }
55131       }
55132       [[fallthrough]];
55133     case X86ISD::BLENDV:
55134       if (!IsSplat && VT.is256BitVector() && NumOps == 2 &&
55135           (EltSizeInBits >= 32 || Subtarget.hasInt256()) &&
55136           IsConcatFree(VT, Ops, 1) && IsConcatFree(VT, Ops, 2)) {
55137         EVT SelVT = Ops[0].getOperand(0).getValueType();
55138         SelVT = SelVT.getDoubleNumVectorElementsVT(Ctx);
55139         if (TLI.isTypeLegal(SelVT))
55140           return DAG.getNode(Op0.getOpcode(), DL, VT,
55141                              ConcatSubOperand(SelVT.getSimpleVT(), Ops, 0),
55142                              ConcatSubOperand(VT, Ops, 1),
55143                              ConcatSubOperand(VT, Ops, 2));
55144       }
55145       break;
55146     }
55147   }
55148 
55149   // Fold subvector loads into one.
55150   // If needed, look through bitcasts to get to the load.
55151   if (auto *FirstLd = dyn_cast<LoadSDNode>(peekThroughBitcasts(Op0))) {
55152     unsigned Fast;
55153     const X86TargetLowering *TLI = Subtarget.getTargetLowering();
55154     if (TLI->allowsMemoryAccess(Ctx, DAG.getDataLayout(), VT,
55155                                 *FirstLd->getMemOperand(), &Fast) &&
55156         Fast) {
55157       if (SDValue Ld =
55158               EltsFromConsecutiveLoads(VT, Ops, DL, DAG, Subtarget, false))
55159         return Ld;
55160     }
55161   }
55162 
55163   // Attempt to fold target constant loads.
55164   if (all_of(Ops, [](SDValue Op) { return getTargetConstantFromNode(Op); })) {
55165     SmallVector<APInt> EltBits;
55166     APInt UndefElts = APInt::getZero(VT.getVectorNumElements());
55167     for (unsigned I = 0; I != NumOps; ++I) {
55168       APInt OpUndefElts;
55169       SmallVector<APInt> OpEltBits;
55170       if (!getTargetConstantBitsFromNode(Ops[I], EltSizeInBits, OpUndefElts,
55171                                          OpEltBits, true, false))
55172         break;
55173       EltBits.append(OpEltBits);
55174       UndefElts.insertBits(OpUndefElts, I * OpUndefElts.getBitWidth());
55175     }
55176     if (EltBits.size() == VT.getVectorNumElements()) {
55177       Constant *C = getConstantVector(VT, EltBits, UndefElts, Ctx);
55178       MVT PVT = TLI.getPointerTy(DAG.getDataLayout());
55179       SDValue CV = DAG.getConstantPool(C, PVT);
55180       MachineFunction &MF = DAG.getMachineFunction();
55181       MachinePointerInfo MPI = MachinePointerInfo::getConstantPool(MF);
55182       SDValue Ld = DAG.getLoad(VT, DL, DAG.getEntryNode(), CV, MPI);
55183       SDValue Sub = extractSubVector(Ld, 0, DAG, DL, Op0.getValueSizeInBits());
55184       DAG.ReplaceAllUsesOfValueWith(Op0, Sub);
55185       return Ld;
55186     }
55187   }
55188 
55189   // If this simple subvector or scalar/subvector broadcast_load is inserted
55190   // into both halves, use a larger broadcast_load. Update other uses to use
55191   // an extracted subvector.
55192   if (IsSplat &&
55193       (VT.is256BitVector() || (VT.is512BitVector() && Subtarget.hasAVX512()))) {
55194     if (ISD::isNormalLoad(Op0.getNode()) ||
55195         Op0.getOpcode() == X86ISD::VBROADCAST_LOAD ||
55196         Op0.getOpcode() == X86ISD::SUBV_BROADCAST_LOAD) {
55197       auto *Mem = cast<MemSDNode>(Op0);
55198       unsigned Opc = Op0.getOpcode() == X86ISD::VBROADCAST_LOAD
55199                          ? X86ISD::VBROADCAST_LOAD
55200                          : X86ISD::SUBV_BROADCAST_LOAD;
55201       if (SDValue BcastLd =
55202               getBROADCAST_LOAD(Opc, DL, VT, Mem->getMemoryVT(), Mem, 0, DAG)) {
55203         SDValue BcastSrc =
55204             extractSubVector(BcastLd, 0, DAG, DL, Op0.getValueSizeInBits());
55205         DAG.ReplaceAllUsesOfValueWith(Op0, BcastSrc);
55206         return BcastLd;
55207       }
55208     }
55209   }
55210 
55211   // If we're splatting a 128-bit subvector to 512-bits, use SHUF128 directly.
55212   if (IsSplat && NumOps == 4 && VT.is512BitVector() &&
55213       Subtarget.useAVX512Regs()) {
55214     MVT ShuffleVT = VT.isFloatingPoint() ? MVT::v8f64 : MVT::v8i64;
55215     SDValue Res = widenSubVector(Op0, false, Subtarget, DAG, DL, 512);
55216     Res = DAG.getBitcast(ShuffleVT, Res);
55217     Res = DAG.getNode(X86ISD::SHUF128, DL, ShuffleVT, Res, Res,
55218                       getV4X86ShuffleImm8ForMask({0, 0, 0, 0}, DL, DAG));
55219     return DAG.getBitcast(VT, Res);
55220   }
55221 
55222   return SDValue();
55223 }
55224 
combineCONCAT_VECTORS(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)55225 static SDValue combineCONCAT_VECTORS(SDNode *N, SelectionDAG &DAG,
55226                                      TargetLowering::DAGCombinerInfo &DCI,
55227                                      const X86Subtarget &Subtarget) {
55228   EVT VT = N->getValueType(0);
55229   EVT SrcVT = N->getOperand(0).getValueType();
55230   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
55231   SmallVector<SDValue, 4> Ops(N->op_begin(), N->op_end());
55232 
55233   if (VT.getVectorElementType() == MVT::i1) {
55234     // Attempt to constant fold.
55235     unsigned SubSizeInBits = SrcVT.getSizeInBits();
55236     APInt Constant = APInt::getZero(VT.getSizeInBits());
55237     for (unsigned I = 0, E = Ops.size(); I != E; ++I) {
55238       auto *C = dyn_cast<ConstantSDNode>(peekThroughBitcasts(Ops[I]));
55239       if (!C) break;
55240       Constant.insertBits(C->getAPIntValue(), I * SubSizeInBits);
55241       if (I == (E - 1)) {
55242         EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits());
55243         if (TLI.isTypeLegal(IntVT))
55244           return DAG.getBitcast(VT, DAG.getConstant(Constant, SDLoc(N), IntVT));
55245       }
55246     }
55247 
55248     // Don't do anything else for i1 vectors.
55249     return SDValue();
55250   }
55251 
55252   if (Subtarget.hasAVX() && TLI.isTypeLegal(VT) && TLI.isTypeLegal(SrcVT)) {
55253     if (SDValue R = combineConcatVectorOps(SDLoc(N), VT.getSimpleVT(), Ops, DAG,
55254                                            DCI, Subtarget))
55255       return R;
55256   }
55257 
55258   return SDValue();
55259 }
55260 
combineINSERT_SUBVECTOR(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)55261 static SDValue combineINSERT_SUBVECTOR(SDNode *N, SelectionDAG &DAG,
55262                                        TargetLowering::DAGCombinerInfo &DCI,
55263                                        const X86Subtarget &Subtarget) {
55264   if (DCI.isBeforeLegalizeOps())
55265     return SDValue();
55266 
55267   MVT OpVT = N->getSimpleValueType(0);
55268 
55269   bool IsI1Vector = OpVT.getVectorElementType() == MVT::i1;
55270 
55271   SDLoc dl(N);
55272   SDValue Vec = N->getOperand(0);
55273   SDValue SubVec = N->getOperand(1);
55274 
55275   uint64_t IdxVal = N->getConstantOperandVal(2);
55276   MVT SubVecVT = SubVec.getSimpleValueType();
55277 
55278   if (Vec.isUndef() && SubVec.isUndef())
55279     return DAG.getUNDEF(OpVT);
55280 
55281   // Inserting undefs/zeros into zeros/undefs is a zero vector.
55282   if ((Vec.isUndef() || ISD::isBuildVectorAllZeros(Vec.getNode())) &&
55283       (SubVec.isUndef() || ISD::isBuildVectorAllZeros(SubVec.getNode())))
55284     return getZeroVector(OpVT, Subtarget, DAG, dl);
55285 
55286   if (ISD::isBuildVectorAllZeros(Vec.getNode())) {
55287     // If we're inserting into a zero vector and then into a larger zero vector,
55288     // just insert into the larger zero vector directly.
55289     if (SubVec.getOpcode() == ISD::INSERT_SUBVECTOR &&
55290         ISD::isBuildVectorAllZeros(SubVec.getOperand(0).getNode())) {
55291       uint64_t Idx2Val = SubVec.getConstantOperandVal(2);
55292       return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT,
55293                          getZeroVector(OpVT, Subtarget, DAG, dl),
55294                          SubVec.getOperand(1),
55295                          DAG.getIntPtrConstant(IdxVal + Idx2Val, dl));
55296     }
55297 
55298     // If we're inserting into a zero vector and our input was extracted from an
55299     // insert into a zero vector of the same type and the extraction was at
55300     // least as large as the original insertion. Just insert the original
55301     // subvector into a zero vector.
55302     if (SubVec.getOpcode() == ISD::EXTRACT_SUBVECTOR && IdxVal == 0 &&
55303         isNullConstant(SubVec.getOperand(1)) &&
55304         SubVec.getOperand(0).getOpcode() == ISD::INSERT_SUBVECTOR) {
55305       SDValue Ins = SubVec.getOperand(0);
55306       if (isNullConstant(Ins.getOperand(2)) &&
55307           ISD::isBuildVectorAllZeros(Ins.getOperand(0).getNode()) &&
55308           Ins.getOperand(1).getValueSizeInBits().getFixedValue() <=
55309               SubVecVT.getFixedSizeInBits())
55310           return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT,
55311                              getZeroVector(OpVT, Subtarget, DAG, dl),
55312                              Ins.getOperand(1), N->getOperand(2));
55313     }
55314   }
55315 
55316   // Stop here if this is an i1 vector.
55317   if (IsI1Vector)
55318     return SDValue();
55319 
55320   // Eliminate an intermediate vector widening:
55321   // insert_subvector X, (insert_subvector undef, Y, 0), Idx -->
55322   // insert_subvector X, Y, Idx
55323   // TODO: This is a more general version of a DAGCombiner fold, can we move it
55324   // there?
55325   if (SubVec.getOpcode() == ISD::INSERT_SUBVECTOR &&
55326       SubVec.getOperand(0).isUndef() && isNullConstant(SubVec.getOperand(2)))
55327     return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT, Vec,
55328                        SubVec.getOperand(1), N->getOperand(2));
55329 
55330   // If this is an insert of an extract, combine to a shuffle. Don't do this
55331   // if the insert or extract can be represented with a subregister operation.
55332   if (SubVec.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
55333       SubVec.getOperand(0).getSimpleValueType() == OpVT &&
55334       (IdxVal != 0 ||
55335        !(Vec.isUndef() || ISD::isBuildVectorAllZeros(Vec.getNode())))) {
55336     int ExtIdxVal = SubVec.getConstantOperandVal(1);
55337     if (ExtIdxVal != 0) {
55338       int VecNumElts = OpVT.getVectorNumElements();
55339       int SubVecNumElts = SubVecVT.getVectorNumElements();
55340       SmallVector<int, 64> Mask(VecNumElts);
55341       // First create an identity shuffle mask.
55342       for (int i = 0; i != VecNumElts; ++i)
55343         Mask[i] = i;
55344       // Now insert the extracted portion.
55345       for (int i = 0; i != SubVecNumElts; ++i)
55346         Mask[i + IdxVal] = i + ExtIdxVal + VecNumElts;
55347 
55348       return DAG.getVectorShuffle(OpVT, dl, Vec, SubVec.getOperand(0), Mask);
55349     }
55350   }
55351 
55352   // Match concat_vector style patterns.
55353   SmallVector<SDValue, 2> SubVectorOps;
55354   if (collectConcatOps(N, SubVectorOps, DAG)) {
55355     if (SDValue Fold =
55356             combineConcatVectorOps(dl, OpVT, SubVectorOps, DAG, DCI, Subtarget))
55357       return Fold;
55358 
55359     // If we're inserting all zeros into the upper half, change this to
55360     // a concat with zero. We will match this to a move
55361     // with implicit upper bit zeroing during isel.
55362     // We do this here because we don't want combineConcatVectorOps to
55363     // create INSERT_SUBVECTOR from CONCAT_VECTORS.
55364     if (SubVectorOps.size() == 2 &&
55365         ISD::isBuildVectorAllZeros(SubVectorOps[1].getNode()))
55366       return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT,
55367                          getZeroVector(OpVT, Subtarget, DAG, dl),
55368                          SubVectorOps[0], DAG.getIntPtrConstant(0, dl));
55369 
55370     // Attempt to recursively combine to a shuffle.
55371     if (all_of(SubVectorOps, [](SDValue SubOp) {
55372           return isTargetShuffle(SubOp.getOpcode());
55373         })) {
55374       SDValue Op(N, 0);
55375       if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
55376         return Res;
55377     }
55378   }
55379 
55380   // If this is a broadcast insert into an upper undef, use a larger broadcast.
55381   if (Vec.isUndef() && IdxVal != 0 && SubVec.getOpcode() == X86ISD::VBROADCAST)
55382     return DAG.getNode(X86ISD::VBROADCAST, dl, OpVT, SubVec.getOperand(0));
55383 
55384   // If this is a broadcast load inserted into an upper undef, use a larger
55385   // broadcast load.
55386   if (Vec.isUndef() && IdxVal != 0 && SubVec.hasOneUse() &&
55387       SubVec.getOpcode() == X86ISD::VBROADCAST_LOAD) {
55388     auto *MemIntr = cast<MemIntrinsicSDNode>(SubVec);
55389     SDVTList Tys = DAG.getVTList(OpVT, MVT::Other);
55390     SDValue Ops[] = { MemIntr->getChain(), MemIntr->getBasePtr() };
55391     SDValue BcastLd =
55392         DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, dl, Tys, Ops,
55393                                 MemIntr->getMemoryVT(),
55394                                 MemIntr->getMemOperand());
55395     DAG.ReplaceAllUsesOfValueWith(SDValue(MemIntr, 1), BcastLd.getValue(1));
55396     return BcastLd;
55397   }
55398 
55399   // If we're splatting the lower half subvector of a full vector load into the
55400   // upper half, attempt to create a subvector broadcast.
55401   if (IdxVal == (OpVT.getVectorNumElements() / 2) && SubVec.hasOneUse() &&
55402       Vec.getValueSizeInBits() == (2 * SubVec.getValueSizeInBits())) {
55403     auto *VecLd = dyn_cast<LoadSDNode>(Vec);
55404     auto *SubLd = dyn_cast<LoadSDNode>(SubVec);
55405     if (VecLd && SubLd &&
55406         DAG.areNonVolatileConsecutiveLoads(SubLd, VecLd,
55407                                            SubVec.getValueSizeInBits() / 8, 0))
55408       return getBROADCAST_LOAD(X86ISD::SUBV_BROADCAST_LOAD, dl, OpVT, SubVecVT,
55409                                SubLd, 0, DAG);
55410   }
55411 
55412   return SDValue();
55413 }
55414 
55415 /// If we are extracting a subvector of a vector select and the select condition
55416 /// is composed of concatenated vectors, try to narrow the select width. This
55417 /// is a common pattern for AVX1 integer code because 256-bit selects may be
55418 /// legal, but there is almost no integer math/logic available for 256-bit.
55419 /// This function should only be called with legal types (otherwise, the calls
55420 /// to get simple value types will assert).
narrowExtractedVectorSelect(SDNode * Ext,SelectionDAG & DAG)55421 static SDValue narrowExtractedVectorSelect(SDNode *Ext, SelectionDAG &DAG) {
55422   SDValue Sel = Ext->getOperand(0);
55423   if (Sel.getOpcode() != ISD::VSELECT ||
55424       !isFreeToSplitVector(Sel.getOperand(0).getNode(), DAG))
55425     return SDValue();
55426 
55427   // Note: We assume simple value types because this should only be called with
55428   //       legal operations/types.
55429   // TODO: This can be extended to handle extraction to 256-bits.
55430   MVT VT = Ext->getSimpleValueType(0);
55431   if (!VT.is128BitVector())
55432     return SDValue();
55433 
55434   MVT SelCondVT = Sel.getOperand(0).getSimpleValueType();
55435   if (!SelCondVT.is256BitVector() && !SelCondVT.is512BitVector())
55436     return SDValue();
55437 
55438   MVT WideVT = Ext->getOperand(0).getSimpleValueType();
55439   MVT SelVT = Sel.getSimpleValueType();
55440   assert((SelVT.is256BitVector() || SelVT.is512BitVector()) &&
55441          "Unexpected vector type with legal operations");
55442 
55443   unsigned SelElts = SelVT.getVectorNumElements();
55444   unsigned CastedElts = WideVT.getVectorNumElements();
55445   unsigned ExtIdx = Ext->getConstantOperandVal(1);
55446   if (SelElts % CastedElts == 0) {
55447     // The select has the same or more (narrower) elements than the extract
55448     // operand. The extraction index gets scaled by that factor.
55449     ExtIdx *= (SelElts / CastedElts);
55450   } else if (CastedElts % SelElts == 0) {
55451     // The select has less (wider) elements than the extract operand. Make sure
55452     // that the extraction index can be divided evenly.
55453     unsigned IndexDivisor = CastedElts / SelElts;
55454     if (ExtIdx % IndexDivisor != 0)
55455       return SDValue();
55456     ExtIdx /= IndexDivisor;
55457   } else {
55458     llvm_unreachable("Element count of simple vector types are not divisible?");
55459   }
55460 
55461   unsigned NarrowingFactor = WideVT.getSizeInBits() / VT.getSizeInBits();
55462   unsigned NarrowElts = SelElts / NarrowingFactor;
55463   MVT NarrowSelVT = MVT::getVectorVT(SelVT.getVectorElementType(), NarrowElts);
55464   SDLoc DL(Ext);
55465   SDValue ExtCond = extract128BitVector(Sel.getOperand(0), ExtIdx, DAG, DL);
55466   SDValue ExtT = extract128BitVector(Sel.getOperand(1), ExtIdx, DAG, DL);
55467   SDValue ExtF = extract128BitVector(Sel.getOperand(2), ExtIdx, DAG, DL);
55468   SDValue NarrowSel = DAG.getSelect(DL, NarrowSelVT, ExtCond, ExtT, ExtF);
55469   return DAG.getBitcast(VT, NarrowSel);
55470 }
55471 
combineEXTRACT_SUBVECTOR(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)55472 static SDValue combineEXTRACT_SUBVECTOR(SDNode *N, SelectionDAG &DAG,
55473                                         TargetLowering::DAGCombinerInfo &DCI,
55474                                         const X86Subtarget &Subtarget) {
55475   // For AVX1 only, if we are extracting from a 256-bit and+not (which will
55476   // eventually get combined/lowered into ANDNP) with a concatenated operand,
55477   // split the 'and' into 128-bit ops to avoid the concatenate and extract.
55478   // We let generic combining take over from there to simplify the
55479   // insert/extract and 'not'.
55480   // This pattern emerges during AVX1 legalization. We handle it before lowering
55481   // to avoid complications like splitting constant vector loads.
55482 
55483   // Capture the original wide type in the likely case that we need to bitcast
55484   // back to this type.
55485   if (!N->getValueType(0).isSimple())
55486     return SDValue();
55487 
55488   MVT VT = N->getSimpleValueType(0);
55489   SDValue InVec = N->getOperand(0);
55490   unsigned IdxVal = N->getConstantOperandVal(1);
55491   SDValue InVecBC = peekThroughBitcasts(InVec);
55492   EVT InVecVT = InVec.getValueType();
55493   unsigned SizeInBits = VT.getSizeInBits();
55494   unsigned InSizeInBits = InVecVT.getSizeInBits();
55495   unsigned NumSubElts = VT.getVectorNumElements();
55496   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
55497 
55498   if (Subtarget.hasAVX() && !Subtarget.hasAVX2() &&
55499       TLI.isTypeLegal(InVecVT) &&
55500       InSizeInBits == 256 && InVecBC.getOpcode() == ISD::AND) {
55501     auto isConcatenatedNot = [](SDValue V) {
55502       V = peekThroughBitcasts(V);
55503       if (!isBitwiseNot(V))
55504         return false;
55505       SDValue NotOp = V->getOperand(0);
55506       return peekThroughBitcasts(NotOp).getOpcode() == ISD::CONCAT_VECTORS;
55507     };
55508     if (isConcatenatedNot(InVecBC.getOperand(0)) ||
55509         isConcatenatedNot(InVecBC.getOperand(1))) {
55510       // extract (and v4i64 X, (not (concat Y1, Y2))), n -> andnp v2i64 X(n), Y1
55511       SDValue Concat = splitVectorIntBinary(InVecBC, DAG);
55512       return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(N), VT,
55513                          DAG.getBitcast(InVecVT, Concat), N->getOperand(1));
55514     }
55515   }
55516 
55517   if (DCI.isBeforeLegalizeOps())
55518     return SDValue();
55519 
55520   if (SDValue V = narrowExtractedVectorSelect(N, DAG))
55521     return V;
55522 
55523   if (ISD::isBuildVectorAllZeros(InVec.getNode()))
55524     return getZeroVector(VT, Subtarget, DAG, SDLoc(N));
55525 
55526   if (ISD::isBuildVectorAllOnes(InVec.getNode())) {
55527     if (VT.getScalarType() == MVT::i1)
55528       return DAG.getConstant(1, SDLoc(N), VT);
55529     return getOnesVector(VT, DAG, SDLoc(N));
55530   }
55531 
55532   if (InVec.getOpcode() == ISD::BUILD_VECTOR)
55533     return DAG.getBuildVector(VT, SDLoc(N),
55534                               InVec->ops().slice(IdxVal, NumSubElts));
55535 
55536   // If we are extracting from an insert into a larger vector, replace with a
55537   // smaller insert if we don't access less than the original subvector. Don't
55538   // do this for i1 vectors.
55539   // TODO: Relax the matching indices requirement?
55540   if (VT.getVectorElementType() != MVT::i1 &&
55541       InVec.getOpcode() == ISD::INSERT_SUBVECTOR && InVec.hasOneUse() &&
55542       IdxVal == InVec.getConstantOperandVal(2) &&
55543       InVec.getOperand(1).getValueSizeInBits() <= SizeInBits) {
55544     SDLoc DL(N);
55545     SDValue NewExt = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT,
55546                                  InVec.getOperand(0), N->getOperand(1));
55547     unsigned NewIdxVal = InVec.getConstantOperandVal(2) - IdxVal;
55548     return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, NewExt,
55549                        InVec.getOperand(1),
55550                        DAG.getVectorIdxConstant(NewIdxVal, DL));
55551   }
55552 
55553   // If we're extracting an upper subvector from a broadcast we should just
55554   // extract the lowest subvector instead which should allow
55555   // SimplifyDemandedVectorElts do more simplifications.
55556   if (IdxVal != 0 && (InVec.getOpcode() == X86ISD::VBROADCAST ||
55557                       InVec.getOpcode() == X86ISD::VBROADCAST_LOAD ||
55558                       DAG.isSplatValue(InVec, /*AllowUndefs*/ false)))
55559     return extractSubVector(InVec, 0, DAG, SDLoc(N), SizeInBits);
55560 
55561   // If we're extracting a broadcasted subvector, just use the lowest subvector.
55562   if (IdxVal != 0 && InVec.getOpcode() == X86ISD::SUBV_BROADCAST_LOAD &&
55563       cast<MemIntrinsicSDNode>(InVec)->getMemoryVT() == VT)
55564     return extractSubVector(InVec, 0, DAG, SDLoc(N), SizeInBits);
55565 
55566   // Attempt to extract from the source of a shuffle vector.
55567   if ((InSizeInBits % SizeInBits) == 0 && (IdxVal % NumSubElts) == 0) {
55568     SmallVector<int, 32> ShuffleMask;
55569     SmallVector<int, 32> ScaledMask;
55570     SmallVector<SDValue, 2> ShuffleInputs;
55571     unsigned NumSubVecs = InSizeInBits / SizeInBits;
55572     // Decode the shuffle mask and scale it so its shuffling subvectors.
55573     if (getTargetShuffleInputs(InVecBC, ShuffleInputs, ShuffleMask, DAG) &&
55574         scaleShuffleElements(ShuffleMask, NumSubVecs, ScaledMask)) {
55575       unsigned SubVecIdx = IdxVal / NumSubElts;
55576       if (ScaledMask[SubVecIdx] == SM_SentinelUndef)
55577         return DAG.getUNDEF(VT);
55578       if (ScaledMask[SubVecIdx] == SM_SentinelZero)
55579         return getZeroVector(VT, Subtarget, DAG, SDLoc(N));
55580       SDValue Src = ShuffleInputs[ScaledMask[SubVecIdx] / NumSubVecs];
55581       if (Src.getValueSizeInBits() == InSizeInBits) {
55582         unsigned SrcSubVecIdx = ScaledMask[SubVecIdx] % NumSubVecs;
55583         unsigned SrcEltIdx = SrcSubVecIdx * NumSubElts;
55584         return extractSubVector(DAG.getBitcast(InVecVT, Src), SrcEltIdx, DAG,
55585                                 SDLoc(N), SizeInBits);
55586       }
55587     }
55588   }
55589 
55590   // If we're extracting the lowest subvector and we're the only user,
55591   // we may be able to perform this with a smaller vector width.
55592   unsigned InOpcode = InVec.getOpcode();
55593   if (InVec.hasOneUse()) {
55594     if (IdxVal == 0 && VT == MVT::v2f64 && InVecVT == MVT::v4f64) {
55595       // v2f64 CVTDQ2PD(v4i32).
55596       if (InOpcode == ISD::SINT_TO_FP &&
55597           InVec.getOperand(0).getValueType() == MVT::v4i32) {
55598         return DAG.getNode(X86ISD::CVTSI2P, SDLoc(N), VT, InVec.getOperand(0));
55599       }
55600       // v2f64 CVTUDQ2PD(v4i32).
55601       if (InOpcode == ISD::UINT_TO_FP && Subtarget.hasVLX() &&
55602           InVec.getOperand(0).getValueType() == MVT::v4i32) {
55603         return DAG.getNode(X86ISD::CVTUI2P, SDLoc(N), VT, InVec.getOperand(0));
55604       }
55605       // v2f64 CVTPS2PD(v4f32).
55606       if (InOpcode == ISD::FP_EXTEND &&
55607           InVec.getOperand(0).getValueType() == MVT::v4f32) {
55608         return DAG.getNode(X86ISD::VFPEXT, SDLoc(N), VT, InVec.getOperand(0));
55609       }
55610     }
55611     if (IdxVal == 0 &&
55612         (ISD::isExtOpcode(InOpcode) || ISD::isExtVecInRegOpcode(InOpcode)) &&
55613         (SizeInBits == 128 || SizeInBits == 256) &&
55614         InVec.getOperand(0).getValueSizeInBits() >= SizeInBits) {
55615       SDLoc DL(N);
55616       SDValue Ext = InVec.getOperand(0);
55617       if (Ext.getValueSizeInBits() > SizeInBits)
55618         Ext = extractSubVector(Ext, 0, DAG, DL, SizeInBits);
55619       unsigned ExtOp = DAG.getOpcode_EXTEND_VECTOR_INREG(InOpcode);
55620       return DAG.getNode(ExtOp, DL, VT, Ext);
55621     }
55622     if (IdxVal == 0 && InOpcode == ISD::VSELECT &&
55623         InVec.getOperand(0).getValueType().is256BitVector() &&
55624         InVec.getOperand(1).getValueType().is256BitVector() &&
55625         InVec.getOperand(2).getValueType().is256BitVector()) {
55626       SDLoc DL(N);
55627       SDValue Ext0 = extractSubVector(InVec.getOperand(0), 0, DAG, DL, 128);
55628       SDValue Ext1 = extractSubVector(InVec.getOperand(1), 0, DAG, DL, 128);
55629       SDValue Ext2 = extractSubVector(InVec.getOperand(2), 0, DAG, DL, 128);
55630       return DAG.getNode(InOpcode, DL, VT, Ext0, Ext1, Ext2);
55631     }
55632     if (IdxVal == 0 && InOpcode == ISD::TRUNCATE && Subtarget.hasVLX() &&
55633         (VT.is128BitVector() || VT.is256BitVector())) {
55634       SDLoc DL(N);
55635       SDValue InVecSrc = InVec.getOperand(0);
55636       unsigned Scale = InVecSrc.getValueSizeInBits() / InSizeInBits;
55637       SDValue Ext = extractSubVector(InVecSrc, 0, DAG, DL, Scale * SizeInBits);
55638       return DAG.getNode(InOpcode, DL, VT, Ext);
55639     }
55640     if (InOpcode == X86ISD::MOVDDUP &&
55641         (VT.is128BitVector() || VT.is256BitVector())) {
55642       SDLoc DL(N);
55643       SDValue Ext0 =
55644           extractSubVector(InVec.getOperand(0), IdxVal, DAG, DL, SizeInBits);
55645       return DAG.getNode(InOpcode, DL, VT, Ext0);
55646     }
55647   }
55648 
55649   // Always split vXi64 logical shifts where we're extracting the upper 32-bits
55650   // as this is very likely to fold into a shuffle/truncation.
55651   if ((InOpcode == X86ISD::VSHLI || InOpcode == X86ISD::VSRLI) &&
55652       InVecVT.getScalarSizeInBits() == 64 &&
55653       InVec.getConstantOperandAPInt(1) == 32) {
55654     SDLoc DL(N);
55655     SDValue Ext =
55656         extractSubVector(InVec.getOperand(0), IdxVal, DAG, DL, SizeInBits);
55657     return DAG.getNode(InOpcode, DL, VT, Ext, InVec.getOperand(1));
55658   }
55659 
55660   return SDValue();
55661 }
55662 
combineScalarToVector(SDNode * N,SelectionDAG & DAG)55663 static SDValue combineScalarToVector(SDNode *N, SelectionDAG &DAG) {
55664   EVT VT = N->getValueType(0);
55665   SDValue Src = N->getOperand(0);
55666   SDLoc DL(N);
55667 
55668   // If this is a scalar to vector to v1i1 from an AND with 1, bypass the and.
55669   // This occurs frequently in our masked scalar intrinsic code and our
55670   // floating point select lowering with AVX512.
55671   // TODO: SimplifyDemandedBits instead?
55672   if (VT == MVT::v1i1 && Src.getOpcode() == ISD::AND && Src.hasOneUse() &&
55673       isOneConstant(Src.getOperand(1)))
55674     return DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v1i1, Src.getOperand(0));
55675 
55676   // Combine scalar_to_vector of an extract_vector_elt into an extract_subvec.
55677   if (VT == MVT::v1i1 && Src.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
55678       Src.hasOneUse() && Src.getOperand(0).getValueType().isVector() &&
55679       Src.getOperand(0).getValueType().getVectorElementType() == MVT::i1 &&
55680       isNullConstant(Src.getOperand(1)))
55681     return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Src.getOperand(0),
55682                        Src.getOperand(1));
55683 
55684   // Reduce v2i64 to v4i32 if we don't need the upper bits or are known zero.
55685   // TODO: Move to DAGCombine/SimplifyDemandedBits?
55686   if ((VT == MVT::v2i64 || VT == MVT::v2f64) && Src.hasOneUse()) {
55687     auto IsExt64 = [&DAG](SDValue Op, bool IsZeroExt) {
55688       if (Op.getValueType() != MVT::i64)
55689         return SDValue();
55690       unsigned Opc = IsZeroExt ? ISD::ZERO_EXTEND : ISD::ANY_EXTEND;
55691       if (Op.getOpcode() == Opc &&
55692           Op.getOperand(0).getScalarValueSizeInBits() <= 32)
55693         return Op.getOperand(0);
55694       unsigned Ext = IsZeroExt ? ISD::ZEXTLOAD : ISD::EXTLOAD;
55695       if (auto *Ld = dyn_cast<LoadSDNode>(Op))
55696         if (Ld->getExtensionType() == Ext &&
55697             Ld->getMemoryVT().getScalarSizeInBits() <= 32)
55698           return Op;
55699       if (IsZeroExt) {
55700         KnownBits Known = DAG.computeKnownBits(Op);
55701         if (!Known.isConstant() && Known.countMinLeadingZeros() >= 32)
55702           return Op;
55703       }
55704       return SDValue();
55705     };
55706 
55707     if (SDValue AnyExt = IsExt64(peekThroughOneUseBitcasts(Src), false))
55708       return DAG.getBitcast(
55709           VT, DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v4i32,
55710                           DAG.getAnyExtOrTrunc(AnyExt, DL, MVT::i32)));
55711 
55712     if (SDValue ZeroExt = IsExt64(peekThroughOneUseBitcasts(Src), true))
55713       return DAG.getBitcast(
55714           VT,
55715           DAG.getNode(X86ISD::VZEXT_MOVL, DL, MVT::v4i32,
55716                       DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v4i32,
55717                                   DAG.getZExtOrTrunc(ZeroExt, DL, MVT::i32))));
55718   }
55719 
55720   // Combine (v2i64 (scalar_to_vector (i64 (bitconvert (mmx))))) to MOVQ2DQ.
55721   if (VT == MVT::v2i64 && Src.getOpcode() == ISD::BITCAST &&
55722       Src.getOperand(0).getValueType() == MVT::x86mmx)
55723     return DAG.getNode(X86ISD::MOVQ2DQ, DL, VT, Src.getOperand(0));
55724 
55725   // See if we're broadcasting the scalar value, in which case just reuse that.
55726   // Ensure the same SDValue from the SDNode use is being used.
55727   if (VT.getScalarType() == Src.getValueType())
55728     for (SDNode *User : Src->uses())
55729       if (User->getOpcode() == X86ISD::VBROADCAST &&
55730           Src == User->getOperand(0)) {
55731         unsigned SizeInBits = VT.getFixedSizeInBits();
55732         unsigned BroadcastSizeInBits =
55733             User->getValueSizeInBits(0).getFixedValue();
55734         if (BroadcastSizeInBits == SizeInBits)
55735           return SDValue(User, 0);
55736         if (BroadcastSizeInBits > SizeInBits)
55737           return extractSubVector(SDValue(User, 0), 0, DAG, DL, SizeInBits);
55738         // TODO: Handle BroadcastSizeInBits < SizeInBits when we have test
55739         // coverage.
55740       }
55741 
55742   return SDValue();
55743 }
55744 
55745 // Simplify PMULDQ and PMULUDQ operations.
combinePMULDQ(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)55746 static SDValue combinePMULDQ(SDNode *N, SelectionDAG &DAG,
55747                              TargetLowering::DAGCombinerInfo &DCI,
55748                              const X86Subtarget &Subtarget) {
55749   SDValue LHS = N->getOperand(0);
55750   SDValue RHS = N->getOperand(1);
55751 
55752   // Canonicalize constant to RHS.
55753   if (DAG.isConstantIntBuildVectorOrConstantInt(LHS) &&
55754       !DAG.isConstantIntBuildVectorOrConstantInt(RHS))
55755     return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0), RHS, LHS);
55756 
55757   // Multiply by zero.
55758   // Don't return RHS as it may contain UNDEFs.
55759   if (ISD::isBuildVectorAllZeros(RHS.getNode()))
55760     return DAG.getConstant(0, SDLoc(N), N->getValueType(0));
55761 
55762   // PMULDQ/PMULUDQ only uses lower 32 bits from each vector element.
55763   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
55764   if (TLI.SimplifyDemandedBits(SDValue(N, 0), APInt::getAllOnes(64), DCI))
55765     return SDValue(N, 0);
55766 
55767   // If the input is an extend_invec and the SimplifyDemandedBits call didn't
55768   // convert it to any_extend_invec, due to the LegalOperations check, do the
55769   // conversion directly to a vector shuffle manually. This exposes combine
55770   // opportunities missed by combineEXTEND_VECTOR_INREG not calling
55771   // combineX86ShufflesRecursively on SSE4.1 targets.
55772   // FIXME: This is basically a hack around several other issues related to
55773   // ANY_EXTEND_VECTOR_INREG.
55774   if (N->getValueType(0) == MVT::v2i64 && LHS.hasOneUse() &&
55775       (LHS.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG ||
55776        LHS.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG) &&
55777       LHS.getOperand(0).getValueType() == MVT::v4i32) {
55778     SDLoc dl(N);
55779     LHS = DAG.getVectorShuffle(MVT::v4i32, dl, LHS.getOperand(0),
55780                                LHS.getOperand(0), { 0, -1, 1, -1 });
55781     LHS = DAG.getBitcast(MVT::v2i64, LHS);
55782     return DAG.getNode(N->getOpcode(), dl, MVT::v2i64, LHS, RHS);
55783   }
55784   if (N->getValueType(0) == MVT::v2i64 && RHS.hasOneUse() &&
55785       (RHS.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG ||
55786        RHS.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG) &&
55787       RHS.getOperand(0).getValueType() == MVT::v4i32) {
55788     SDLoc dl(N);
55789     RHS = DAG.getVectorShuffle(MVT::v4i32, dl, RHS.getOperand(0),
55790                                RHS.getOperand(0), { 0, -1, 1, -1 });
55791     RHS = DAG.getBitcast(MVT::v2i64, RHS);
55792     return DAG.getNode(N->getOpcode(), dl, MVT::v2i64, LHS, RHS);
55793   }
55794 
55795   return SDValue();
55796 }
55797 
55798 // Simplify VPMADDUBSW/VPMADDWD operations.
combineVPMADD(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI)55799 static SDValue combineVPMADD(SDNode *N, SelectionDAG &DAG,
55800                              TargetLowering::DAGCombinerInfo &DCI) {
55801   EVT VT = N->getValueType(0);
55802   SDValue LHS = N->getOperand(0);
55803   SDValue RHS = N->getOperand(1);
55804 
55805   // Multiply by zero.
55806   // Don't return LHS/RHS as it may contain UNDEFs.
55807   if (ISD::isBuildVectorAllZeros(LHS.getNode()) ||
55808       ISD::isBuildVectorAllZeros(RHS.getNode()))
55809     return DAG.getConstant(0, SDLoc(N), VT);
55810 
55811   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
55812   APInt DemandedElts = APInt::getAllOnes(VT.getVectorNumElements());
55813   if (TLI.SimplifyDemandedVectorElts(SDValue(N, 0), DemandedElts, DCI))
55814     return SDValue(N, 0);
55815 
55816   return SDValue();
55817 }
55818 
combineEXTEND_VECTOR_INREG(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI,const X86Subtarget & Subtarget)55819 static SDValue combineEXTEND_VECTOR_INREG(SDNode *N, SelectionDAG &DAG,
55820                                           TargetLowering::DAGCombinerInfo &DCI,
55821                                           const X86Subtarget &Subtarget) {
55822   EVT VT = N->getValueType(0);
55823   SDValue In = N->getOperand(0);
55824   unsigned Opcode = N->getOpcode();
55825   unsigned InOpcode = In.getOpcode();
55826   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
55827   SDLoc DL(N);
55828 
55829   // Try to merge vector loads and extend_inreg to an extload.
55830   if (!DCI.isBeforeLegalizeOps() && ISD::isNormalLoad(In.getNode()) &&
55831       In.hasOneUse()) {
55832     auto *Ld = cast<LoadSDNode>(In);
55833     if (Ld->isSimple()) {
55834       MVT SVT = In.getSimpleValueType().getVectorElementType();
55835       ISD::LoadExtType Ext = Opcode == ISD::SIGN_EXTEND_VECTOR_INREG
55836                                  ? ISD::SEXTLOAD
55837                                  : ISD::ZEXTLOAD;
55838       EVT MemVT = VT.changeVectorElementType(SVT);
55839       if (TLI.isLoadExtLegal(Ext, VT, MemVT)) {
55840         SDValue Load = DAG.getExtLoad(
55841             Ext, DL, VT, Ld->getChain(), Ld->getBasePtr(), Ld->getPointerInfo(),
55842             MemVT, Ld->getOriginalAlign(), Ld->getMemOperand()->getFlags());
55843         DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), Load.getValue(1));
55844         return Load;
55845       }
55846     }
55847   }
55848 
55849   // Fold EXTEND_VECTOR_INREG(EXTEND_VECTOR_INREG(X)) -> EXTEND_VECTOR_INREG(X).
55850   if (Opcode == InOpcode)
55851     return DAG.getNode(Opcode, DL, VT, In.getOperand(0));
55852 
55853   // Fold EXTEND_VECTOR_INREG(EXTRACT_SUBVECTOR(EXTEND(X),0))
55854   // -> EXTEND_VECTOR_INREG(X).
55855   // TODO: Handle non-zero subvector indices.
55856   if (InOpcode == ISD::EXTRACT_SUBVECTOR && In.getConstantOperandVal(1) == 0 &&
55857       In.getOperand(0).getOpcode() == DAG.getOpcode_EXTEND(Opcode) &&
55858       In.getOperand(0).getOperand(0).getValueSizeInBits() ==
55859           In.getValueSizeInBits())
55860     return DAG.getNode(Opcode, DL, VT, In.getOperand(0).getOperand(0));
55861 
55862   // Fold EXTEND_VECTOR_INREG(BUILD_VECTOR(X,Y,?,?)) -> BUILD_VECTOR(X,0,Y,0).
55863   // TODO: Move to DAGCombine?
55864   if (!DCI.isBeforeLegalizeOps() && Opcode == ISD::ZERO_EXTEND_VECTOR_INREG &&
55865       In.getOpcode() == ISD::BUILD_VECTOR && In.hasOneUse() &&
55866       In.getValueSizeInBits() == VT.getSizeInBits()) {
55867     unsigned NumElts = VT.getVectorNumElements();
55868     unsigned Scale = VT.getScalarSizeInBits() / In.getScalarValueSizeInBits();
55869     EVT EltVT = In.getOperand(0).getValueType();
55870     SmallVector<SDValue> Elts(Scale * NumElts, DAG.getConstant(0, DL, EltVT));
55871     for (unsigned I = 0; I != NumElts; ++I)
55872       Elts[I * Scale] = In.getOperand(I);
55873     return DAG.getBitcast(VT, DAG.getBuildVector(In.getValueType(), DL, Elts));
55874   }
55875 
55876   // Attempt to combine as a shuffle on SSE41+ targets.
55877   if (Subtarget.hasSSE41()) {
55878     SDValue Op(N, 0);
55879     if (TLI.isTypeLegal(VT) && TLI.isTypeLegal(In.getValueType()))
55880       if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
55881         return Res;
55882   }
55883 
55884   return SDValue();
55885 }
55886 
combineKSHIFT(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI)55887 static SDValue combineKSHIFT(SDNode *N, SelectionDAG &DAG,
55888                              TargetLowering::DAGCombinerInfo &DCI) {
55889   EVT VT = N->getValueType(0);
55890 
55891   if (ISD::isBuildVectorAllZeros(N->getOperand(0).getNode()))
55892     return DAG.getConstant(0, SDLoc(N), VT);
55893 
55894   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
55895   APInt DemandedElts = APInt::getAllOnes(VT.getVectorNumElements());
55896   if (TLI.SimplifyDemandedVectorElts(SDValue(N, 0), DemandedElts, DCI))
55897     return SDValue(N, 0);
55898 
55899   return SDValue();
55900 }
55901 
55902 // Optimize (fp16_to_fp (fp_to_fp16 X)) to VCVTPS2PH followed by VCVTPH2PS.
55903 // Done as a combine because the lowering for fp16_to_fp and fp_to_fp16 produce
55904 // extra instructions between the conversion due to going to scalar and back.
combineFP16_TO_FP(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)55905 static SDValue combineFP16_TO_FP(SDNode *N, SelectionDAG &DAG,
55906                                  const X86Subtarget &Subtarget) {
55907   if (Subtarget.useSoftFloat() || !Subtarget.hasF16C())
55908     return SDValue();
55909 
55910   if (N->getOperand(0).getOpcode() != ISD::FP_TO_FP16)
55911     return SDValue();
55912 
55913   if (N->getValueType(0) != MVT::f32 ||
55914       N->getOperand(0).getOperand(0).getValueType() != MVT::f32)
55915     return SDValue();
55916 
55917   SDLoc dl(N);
55918   SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32,
55919                             N->getOperand(0).getOperand(0));
55920   Res = DAG.getNode(X86ISD::CVTPS2PH, dl, MVT::v8i16, Res,
55921                     DAG.getTargetConstant(4, dl, MVT::i32));
55922   Res = DAG.getNode(X86ISD::CVTPH2PS, dl, MVT::v4f32, Res);
55923   return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, Res,
55924                      DAG.getIntPtrConstant(0, dl));
55925 }
55926 
combineFP_EXTEND(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)55927 static SDValue combineFP_EXTEND(SDNode *N, SelectionDAG &DAG,
55928                                 const X86Subtarget &Subtarget) {
55929   if (!Subtarget.hasF16C() || Subtarget.useSoftFloat())
55930     return SDValue();
55931 
55932   if (Subtarget.hasFP16())
55933     return SDValue();
55934 
55935   bool IsStrict = N->isStrictFPOpcode();
55936   EVT VT = N->getValueType(0);
55937   SDValue Src = N->getOperand(IsStrict ? 1 : 0);
55938   EVT SrcVT = Src.getValueType();
55939 
55940   if (!SrcVT.isVector() || SrcVT.getVectorElementType() != MVT::f16)
55941     return SDValue();
55942 
55943   if (VT.getVectorElementType() != MVT::f32 &&
55944       VT.getVectorElementType() != MVT::f64)
55945     return SDValue();
55946 
55947   unsigned NumElts = VT.getVectorNumElements();
55948   if (NumElts == 1 || !isPowerOf2_32(NumElts))
55949     return SDValue();
55950 
55951   SDLoc dl(N);
55952 
55953   // Convert the input to vXi16.
55954   EVT IntVT = SrcVT.changeVectorElementTypeToInteger();
55955   Src = DAG.getBitcast(IntVT, Src);
55956 
55957   // Widen to at least 8 input elements.
55958   if (NumElts < 8) {
55959     unsigned NumConcats = 8 / NumElts;
55960     SDValue Fill = NumElts == 4 ? DAG.getUNDEF(IntVT)
55961                                 : DAG.getConstant(0, dl, IntVT);
55962     SmallVector<SDValue, 4> Ops(NumConcats, Fill);
55963     Ops[0] = Src;
55964     Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, Ops);
55965   }
55966 
55967   // Destination is vXf32 with at least 4 elements.
55968   EVT CvtVT = EVT::getVectorVT(*DAG.getContext(), MVT::f32,
55969                                std::max(4U, NumElts));
55970   SDValue Cvt, Chain;
55971   if (IsStrict) {
55972     Cvt = DAG.getNode(X86ISD::STRICT_CVTPH2PS, dl, {CvtVT, MVT::Other},
55973                       {N->getOperand(0), Src});
55974     Chain = Cvt.getValue(1);
55975   } else {
55976     Cvt = DAG.getNode(X86ISD::CVTPH2PS, dl, CvtVT, Src);
55977   }
55978 
55979   if (NumElts < 4) {
55980     assert(NumElts == 2 && "Unexpected size");
55981     Cvt = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2f32, Cvt,
55982                       DAG.getIntPtrConstant(0, dl));
55983   }
55984 
55985   if (IsStrict) {
55986     // Extend to the original VT if necessary.
55987     if (Cvt.getValueType() != VT) {
55988       Cvt = DAG.getNode(ISD::STRICT_FP_EXTEND, dl, {VT, MVT::Other},
55989                         {Chain, Cvt});
55990       Chain = Cvt.getValue(1);
55991     }
55992     return DAG.getMergeValues({Cvt, Chain}, dl);
55993   }
55994 
55995   // Extend to the original VT if necessary.
55996   return DAG.getNode(ISD::FP_EXTEND, dl, VT, Cvt);
55997 }
55998 
55999 // Try to find a larger VBROADCAST_LOAD/SUBV_BROADCAST_LOAD that we can extract
56000 // from. Limit this to cases where the loads have the same input chain and the
56001 // output chains are unused. This avoids any memory ordering issues.
combineBROADCAST_LOAD(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI)56002 static SDValue combineBROADCAST_LOAD(SDNode *N, SelectionDAG &DAG,
56003                                      TargetLowering::DAGCombinerInfo &DCI) {
56004   assert((N->getOpcode() == X86ISD::VBROADCAST_LOAD ||
56005           N->getOpcode() == X86ISD::SUBV_BROADCAST_LOAD) &&
56006          "Unknown broadcast load type");
56007 
56008   // Only do this if the chain result is unused.
56009   if (N->hasAnyUseOfValue(1))
56010     return SDValue();
56011 
56012   auto *MemIntrin = cast<MemIntrinsicSDNode>(N);
56013 
56014   SDValue Ptr = MemIntrin->getBasePtr();
56015   SDValue Chain = MemIntrin->getChain();
56016   EVT VT = N->getSimpleValueType(0);
56017   EVT MemVT = MemIntrin->getMemoryVT();
56018 
56019   // Look at other users of our base pointer and try to find a wider broadcast.
56020   // The input chain and the size of the memory VT must match.
56021   for (SDNode *User : Ptr->uses())
56022     if (User != N && User->getOpcode() == N->getOpcode() &&
56023         cast<MemIntrinsicSDNode>(User)->getBasePtr() == Ptr &&
56024         cast<MemIntrinsicSDNode>(User)->getChain() == Chain &&
56025         cast<MemIntrinsicSDNode>(User)->getMemoryVT().getSizeInBits() ==
56026             MemVT.getSizeInBits() &&
56027         !User->hasAnyUseOfValue(1) &&
56028         User->getValueSizeInBits(0).getFixedValue() > VT.getFixedSizeInBits()) {
56029       SDValue Extract = extractSubVector(SDValue(User, 0), 0, DAG, SDLoc(N),
56030                                          VT.getSizeInBits());
56031       Extract = DAG.getBitcast(VT, Extract);
56032       return DCI.CombineTo(N, Extract, SDValue(User, 1));
56033     }
56034 
56035   return SDValue();
56036 }
56037 
combineFP_ROUND(SDNode * N,SelectionDAG & DAG,const X86Subtarget & Subtarget)56038 static SDValue combineFP_ROUND(SDNode *N, SelectionDAG &DAG,
56039                                const X86Subtarget &Subtarget) {
56040   if (!Subtarget.hasF16C() || Subtarget.useSoftFloat())
56041     return SDValue();
56042 
56043   bool IsStrict = N->isStrictFPOpcode();
56044   EVT VT = N->getValueType(0);
56045   SDValue Src = N->getOperand(IsStrict ? 1 : 0);
56046   EVT SrcVT = Src.getValueType();
56047 
56048   if (!VT.isVector() || VT.getVectorElementType() != MVT::f16 ||
56049       SrcVT.getVectorElementType() != MVT::f32)
56050     return SDValue();
56051 
56052   SDLoc dl(N);
56053 
56054   SDValue Cvt, Chain;
56055   unsigned NumElts = VT.getVectorNumElements();
56056   if (Subtarget.hasFP16()) {
56057     // Combine (v8f16 fp_round(concat_vectors(v4f32 (xint_to_fp v4i64), ..)))
56058     // into (v8f16 vector_shuffle(v8f16 (CVTXI2P v4i64), ..))
56059     if (NumElts == 8 && Src.getOpcode() == ISD::CONCAT_VECTORS) {
56060       SDValue Cvt0, Cvt1;
56061       SDValue Op0 = Src.getOperand(0);
56062       SDValue Op1 = Src.getOperand(1);
56063       bool IsOp0Strict = Op0->isStrictFPOpcode();
56064       if (Op0.getOpcode() != Op1.getOpcode() ||
56065           Op0.getOperand(IsOp0Strict ? 1 : 0).getValueType() != MVT::v4i64 ||
56066           Op1.getOperand(IsOp0Strict ? 1 : 0).getValueType() != MVT::v4i64) {
56067         return SDValue();
56068       }
56069       int Mask[8] = {0, 1, 2, 3, 8, 9, 10, 11};
56070       if (IsStrict) {
56071         assert(IsOp0Strict && "Op0 must be strict node");
56072         unsigned Opc = Op0.getOpcode() == ISD::STRICT_SINT_TO_FP
56073                            ? X86ISD::STRICT_CVTSI2P
56074                            : X86ISD::STRICT_CVTUI2P;
56075         Cvt0 = DAG.getNode(Opc, dl, {MVT::v8f16, MVT::Other},
56076                            {Op0.getOperand(0), Op0.getOperand(1)});
56077         Cvt1 = DAG.getNode(Opc, dl, {MVT::v8f16, MVT::Other},
56078                            {Op1.getOperand(0), Op1.getOperand(1)});
56079         Cvt = DAG.getVectorShuffle(MVT::v8f16, dl, Cvt0, Cvt1, Mask);
56080         return DAG.getMergeValues({Cvt, Cvt0.getValue(1)}, dl);
56081       }
56082       unsigned Opc = Op0.getOpcode() == ISD::SINT_TO_FP ? X86ISD::CVTSI2P
56083                                                         : X86ISD::CVTUI2P;
56084       Cvt0 = DAG.getNode(Opc, dl, MVT::v8f16, Op0.getOperand(0));
56085       Cvt1 = DAG.getNode(Opc, dl, MVT::v8f16, Op1.getOperand(0));
56086       return Cvt = DAG.getVectorShuffle(MVT::v8f16, dl, Cvt0, Cvt1, Mask);
56087     }
56088     return SDValue();
56089   }
56090 
56091   if (NumElts == 1 || !isPowerOf2_32(NumElts))
56092     return SDValue();
56093 
56094   // Widen to at least 4 input elements.
56095   if (NumElts < 4)
56096     Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, Src,
56097                       DAG.getConstantFP(0.0, dl, SrcVT));
56098 
56099   // Destination is v8i16 with at least 8 elements.
56100   EVT CvtVT =
56101       EVT::getVectorVT(*DAG.getContext(), MVT::i16, std::max(8U, NumElts));
56102   SDValue Rnd = DAG.getTargetConstant(4, dl, MVT::i32);
56103   if (IsStrict) {
56104     Cvt = DAG.getNode(X86ISD::STRICT_CVTPS2PH, dl, {CvtVT, MVT::Other},
56105                       {N->getOperand(0), Src, Rnd});
56106     Chain = Cvt.getValue(1);
56107   } else {
56108     Cvt = DAG.getNode(X86ISD::CVTPS2PH, dl, CvtVT, Src, Rnd);
56109   }
56110 
56111   // Extract down to real number of elements.
56112   if (NumElts < 8) {
56113     EVT IntVT = VT.changeVectorElementTypeToInteger();
56114     Cvt = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, IntVT, Cvt,
56115                       DAG.getIntPtrConstant(0, dl));
56116   }
56117 
56118   Cvt = DAG.getBitcast(VT, Cvt);
56119 
56120   if (IsStrict)
56121     return DAG.getMergeValues({Cvt, Chain}, dl);
56122 
56123   return Cvt;
56124 }
56125 
combineMOVDQ2Q(SDNode * N,SelectionDAG & DAG)56126 static SDValue combineMOVDQ2Q(SDNode *N, SelectionDAG &DAG) {
56127   SDValue Src = N->getOperand(0);
56128 
56129   // Turn MOVDQ2Q+simple_load into an mmx load.
56130   if (ISD::isNormalLoad(Src.getNode()) && Src.hasOneUse()) {
56131     LoadSDNode *LN = cast<LoadSDNode>(Src.getNode());
56132 
56133     if (LN->isSimple()) {
56134       SDValue NewLd = DAG.getLoad(MVT::x86mmx, SDLoc(N), LN->getChain(),
56135                                   LN->getBasePtr(),
56136                                   LN->getPointerInfo(),
56137                                   LN->getOriginalAlign(),
56138                                   LN->getMemOperand()->getFlags());
56139       DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), NewLd.getValue(1));
56140       return NewLd;
56141     }
56142   }
56143 
56144   return SDValue();
56145 }
56146 
combinePDEP(SDNode * N,SelectionDAG & DAG,TargetLowering::DAGCombinerInfo & DCI)56147 static SDValue combinePDEP(SDNode *N, SelectionDAG &DAG,
56148                            TargetLowering::DAGCombinerInfo &DCI) {
56149   unsigned NumBits = N->getSimpleValueType(0).getSizeInBits();
56150   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
56151   if (TLI.SimplifyDemandedBits(SDValue(N, 0), APInt::getAllOnes(NumBits), DCI))
56152     return SDValue(N, 0);
56153 
56154   return SDValue();
56155 }
56156 
PerformDAGCombine(SDNode * N,DAGCombinerInfo & DCI) const56157 SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
56158                                              DAGCombinerInfo &DCI) const {
56159   SelectionDAG &DAG = DCI.DAG;
56160   switch (N->getOpcode()) {
56161   default: break;
56162   case ISD::SCALAR_TO_VECTOR:
56163     return combineScalarToVector(N, DAG);
56164   case ISD::EXTRACT_VECTOR_ELT:
56165   case X86ISD::PEXTRW:
56166   case X86ISD::PEXTRB:
56167     return combineExtractVectorElt(N, DAG, DCI, Subtarget);
56168   case ISD::CONCAT_VECTORS:
56169     return combineCONCAT_VECTORS(N, DAG, DCI, Subtarget);
56170   case ISD::INSERT_SUBVECTOR:
56171     return combineINSERT_SUBVECTOR(N, DAG, DCI, Subtarget);
56172   case ISD::EXTRACT_SUBVECTOR:
56173     return combineEXTRACT_SUBVECTOR(N, DAG, DCI, Subtarget);
56174   case ISD::VSELECT:
56175   case ISD::SELECT:
56176   case X86ISD::BLENDV:      return combineSelect(N, DAG, DCI, Subtarget);
56177   case ISD::BITCAST:        return combineBitcast(N, DAG, DCI, Subtarget);
56178   case X86ISD::CMOV:        return combineCMov(N, DAG, DCI, Subtarget);
56179   case X86ISD::CMP:         return combineCMP(N, DAG, Subtarget);
56180   case ISD::ADD:            return combineAdd(N, DAG, DCI, Subtarget);
56181   case ISD::SUB:            return combineSub(N, DAG, DCI, Subtarget);
56182   case X86ISD::ADD:
56183   case X86ISD::SUB:         return combineX86AddSub(N, DAG, DCI);
56184   case X86ISD::SBB:         return combineSBB(N, DAG);
56185   case X86ISD::ADC:         return combineADC(N, DAG, DCI);
56186   case ISD::MUL:            return combineMul(N, DAG, DCI, Subtarget);
56187   case ISD::SHL:            return combineShiftLeft(N, DAG);
56188   case ISD::SRA:            return combineShiftRightArithmetic(N, DAG, Subtarget);
56189   case ISD::SRL:            return combineShiftRightLogical(N, DAG, DCI, Subtarget);
56190   case ISD::AND:            return combineAnd(N, DAG, DCI, Subtarget);
56191   case ISD::OR:             return combineOr(N, DAG, DCI, Subtarget);
56192   case ISD::XOR:            return combineXor(N, DAG, DCI, Subtarget);
56193   case ISD::BITREVERSE:     return combineBITREVERSE(N, DAG, DCI, Subtarget);
56194   case X86ISD::BEXTR:
56195   case X86ISD::BEXTRI:      return combineBEXTR(N, DAG, DCI, Subtarget);
56196   case ISD::LOAD:           return combineLoad(N, DAG, DCI, Subtarget);
56197   case ISD::MLOAD:          return combineMaskedLoad(N, DAG, DCI, Subtarget);
56198   case ISD::STORE:          return combineStore(N, DAG, DCI, Subtarget);
56199   case ISD::MSTORE:         return combineMaskedStore(N, DAG, DCI, Subtarget);
56200   case X86ISD::VEXTRACT_STORE:
56201     return combineVEXTRACT_STORE(N, DAG, DCI, Subtarget);
56202   case ISD::SINT_TO_FP:
56203   case ISD::STRICT_SINT_TO_FP:
56204     return combineSIntToFP(N, DAG, DCI, Subtarget);
56205   case ISD::UINT_TO_FP:
56206   case ISD::STRICT_UINT_TO_FP:
56207     return combineUIntToFP(N, DAG, Subtarget);
56208   case ISD::FADD:
56209   case ISD::FSUB:           return combineFaddFsub(N, DAG, Subtarget);
56210   case X86ISD::VFCMULC:
56211   case X86ISD::VFMULC:      return combineFMulcFCMulc(N, DAG, Subtarget);
56212   case ISD::FNEG:           return combineFneg(N, DAG, DCI, Subtarget);
56213   case ISD::TRUNCATE:       return combineTruncate(N, DAG, Subtarget);
56214   case X86ISD::VTRUNC:      return combineVTRUNC(N, DAG, DCI);
56215   case X86ISD::ANDNP:       return combineAndnp(N, DAG, DCI, Subtarget);
56216   case X86ISD::FAND:        return combineFAnd(N, DAG, Subtarget);
56217   case X86ISD::FANDN:       return combineFAndn(N, DAG, Subtarget);
56218   case X86ISD::FXOR:
56219   case X86ISD::FOR:         return combineFOr(N, DAG, DCI, Subtarget);
56220   case X86ISD::FMIN:
56221   case X86ISD::FMAX:        return combineFMinFMax(N, DAG);
56222   case ISD::FMINNUM:
56223   case ISD::FMAXNUM:        return combineFMinNumFMaxNum(N, DAG, Subtarget);
56224   case X86ISD::CVTSI2P:
56225   case X86ISD::CVTUI2P:     return combineX86INT_TO_FP(N, DAG, DCI);
56226   case X86ISD::CVTP2SI:
56227   case X86ISD::CVTP2UI:
56228   case X86ISD::STRICT_CVTTP2SI:
56229   case X86ISD::CVTTP2SI:
56230   case X86ISD::STRICT_CVTTP2UI:
56231   case X86ISD::CVTTP2UI:
56232                             return combineCVTP2I_CVTTP2I(N, DAG, DCI);
56233   case X86ISD::STRICT_CVTPH2PS:
56234   case X86ISD::CVTPH2PS:    return combineCVTPH2PS(N, DAG, DCI);
56235   case X86ISD::BT:          return combineBT(N, DAG, DCI);
56236   case ISD::ANY_EXTEND:
56237   case ISD::ZERO_EXTEND:    return combineZext(N, DAG, DCI, Subtarget);
56238   case ISD::SIGN_EXTEND:    return combineSext(N, DAG, DCI, Subtarget);
56239   case ISD::SIGN_EXTEND_INREG: return combineSignExtendInReg(N, DAG, Subtarget);
56240   case ISD::ANY_EXTEND_VECTOR_INREG:
56241   case ISD::SIGN_EXTEND_VECTOR_INREG:
56242   case ISD::ZERO_EXTEND_VECTOR_INREG:
56243     return combineEXTEND_VECTOR_INREG(N, DAG, DCI, Subtarget);
56244   case ISD::SETCC:          return combineSetCC(N, DAG, DCI, Subtarget);
56245   case X86ISD::SETCC:       return combineX86SetCC(N, DAG, Subtarget);
56246   case X86ISD::BRCOND:      return combineBrCond(N, DAG, Subtarget);
56247   case X86ISD::PACKSS:
56248   case X86ISD::PACKUS:      return combineVectorPack(N, DAG, DCI, Subtarget);
56249   case X86ISD::HADD:
56250   case X86ISD::HSUB:
56251   case X86ISD::FHADD:
56252   case X86ISD::FHSUB:       return combineVectorHADDSUB(N, DAG, DCI, Subtarget);
56253   case X86ISD::VSHL:
56254   case X86ISD::VSRA:
56255   case X86ISD::VSRL:
56256     return combineVectorShiftVar(N, DAG, DCI, Subtarget);
56257   case X86ISD::VSHLI:
56258   case X86ISD::VSRAI:
56259   case X86ISD::VSRLI:
56260     return combineVectorShiftImm(N, DAG, DCI, Subtarget);
56261   case ISD::INSERT_VECTOR_ELT:
56262   case X86ISD::PINSRB:
56263   case X86ISD::PINSRW:      return combineVectorInsert(N, DAG, DCI, Subtarget);
56264   case X86ISD::SHUFP:       // Handle all target specific shuffles
56265   case X86ISD::INSERTPS:
56266   case X86ISD::EXTRQI:
56267   case X86ISD::INSERTQI:
56268   case X86ISD::VALIGN:
56269   case X86ISD::PALIGNR:
56270   case X86ISD::VSHLDQ:
56271   case X86ISD::VSRLDQ:
56272   case X86ISD::BLENDI:
56273   case X86ISD::UNPCKH:
56274   case X86ISD::UNPCKL:
56275   case X86ISD::MOVHLPS:
56276   case X86ISD::MOVLHPS:
56277   case X86ISD::PSHUFB:
56278   case X86ISD::PSHUFD:
56279   case X86ISD::PSHUFHW:
56280   case X86ISD::PSHUFLW:
56281   case X86ISD::MOVSHDUP:
56282   case X86ISD::MOVSLDUP:
56283   case X86ISD::MOVDDUP:
56284   case X86ISD::MOVSS:
56285   case X86ISD::MOVSD:
56286   case X86ISD::MOVSH:
56287   case X86ISD::VBROADCAST:
56288   case X86ISD::VPPERM:
56289   case X86ISD::VPERMI:
56290   case X86ISD::VPERMV:
56291   case X86ISD::VPERMV3:
56292   case X86ISD::VPERMIL2:
56293   case X86ISD::VPERMILPI:
56294   case X86ISD::VPERMILPV:
56295   case X86ISD::VPERM2X128:
56296   case X86ISD::SHUF128:
56297   case X86ISD::VZEXT_MOVL:
56298   case ISD::VECTOR_SHUFFLE: return combineShuffle(N, DAG, DCI,Subtarget);
56299   case X86ISD::FMADD_RND:
56300   case X86ISD::FMSUB:
56301   case X86ISD::STRICT_FMSUB:
56302   case X86ISD::FMSUB_RND:
56303   case X86ISD::FNMADD:
56304   case X86ISD::STRICT_FNMADD:
56305   case X86ISD::FNMADD_RND:
56306   case X86ISD::FNMSUB:
56307   case X86ISD::STRICT_FNMSUB:
56308   case X86ISD::FNMSUB_RND:
56309   case ISD::FMA:
56310   case ISD::STRICT_FMA:     return combineFMA(N, DAG, DCI, Subtarget);
56311   case X86ISD::FMADDSUB_RND:
56312   case X86ISD::FMSUBADD_RND:
56313   case X86ISD::FMADDSUB:
56314   case X86ISD::FMSUBADD:    return combineFMADDSUB(N, DAG, DCI);
56315   case X86ISD::MOVMSK:      return combineMOVMSK(N, DAG, DCI, Subtarget);
56316   case X86ISD::TESTP:       return combineTESTP(N, DAG, DCI, Subtarget);
56317   case X86ISD::MGATHER:
56318   case X86ISD::MSCATTER:    return combineX86GatherScatter(N, DAG, DCI);
56319   case ISD::MGATHER:
56320   case ISD::MSCATTER:       return combineGatherScatter(N, DAG, DCI);
56321   case X86ISD::PCMPEQ:
56322   case X86ISD::PCMPGT:      return combineVectorCompare(N, DAG, Subtarget);
56323   case X86ISD::PMULDQ:
56324   case X86ISD::PMULUDQ:     return combinePMULDQ(N, DAG, DCI, Subtarget);
56325   case X86ISD::VPMADDUBSW:
56326   case X86ISD::VPMADDWD:    return combineVPMADD(N, DAG, DCI);
56327   case X86ISD::KSHIFTL:
56328   case X86ISD::KSHIFTR:     return combineKSHIFT(N, DAG, DCI);
56329   case ISD::FP16_TO_FP:     return combineFP16_TO_FP(N, DAG, Subtarget);
56330   case ISD::STRICT_FP_EXTEND:
56331   case ISD::FP_EXTEND:      return combineFP_EXTEND(N, DAG, Subtarget);
56332   case ISD::STRICT_FP_ROUND:
56333   case ISD::FP_ROUND:       return combineFP_ROUND(N, DAG, Subtarget);
56334   case X86ISD::VBROADCAST_LOAD:
56335   case X86ISD::SUBV_BROADCAST_LOAD: return combineBROADCAST_LOAD(N, DAG, DCI);
56336   case X86ISD::MOVDQ2Q:     return combineMOVDQ2Q(N, DAG);
56337   case X86ISD::PDEP:        return combinePDEP(N, DAG, DCI);
56338   }
56339 
56340   return SDValue();
56341 }
56342 
preferABDSToABSWithNSW(EVT VT) const56343 bool X86TargetLowering::preferABDSToABSWithNSW(EVT VT) const {
56344   return false;
56345 }
56346 
56347 // Prefer (non-AVX512) vector TRUNCATE(SIGN_EXTEND_INREG(X)) to use of PACKSS.
preferSextInRegOfTruncate(EVT TruncVT,EVT VT,EVT ExtVT) const56348 bool X86TargetLowering::preferSextInRegOfTruncate(EVT TruncVT, EVT VT,
56349                                                   EVT ExtVT) const {
56350   return Subtarget.hasAVX512() || !VT.isVector();
56351 }
56352 
isTypeDesirableForOp(unsigned Opc,EVT VT) const56353 bool X86TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const {
56354   if (!isTypeLegal(VT))
56355     return false;
56356 
56357   // There are no vXi8 shifts.
56358   if (Opc == ISD::SHL && VT.isVector() && VT.getVectorElementType() == MVT::i8)
56359     return false;
56360 
56361   // TODO: Almost no 8-bit ops are desirable because they have no actual
56362   //       size/speed advantages vs. 32-bit ops, but they do have a major
56363   //       potential disadvantage by causing partial register stalls.
56364   //
56365   // 8-bit multiply/shl is probably not cheaper than 32-bit multiply/shl, and
56366   // we have specializations to turn 32-bit multiply/shl into LEA or other ops.
56367   // Also, see the comment in "IsDesirableToPromoteOp" - where we additionally
56368   // check for a constant operand to the multiply.
56369   if ((Opc == ISD::MUL || Opc == ISD::SHL) && VT == MVT::i8)
56370     return false;
56371 
56372   // i16 instruction encodings are longer and some i16 instructions are slow,
56373   // so those are not desirable.
56374   if (VT == MVT::i16) {
56375     switch (Opc) {
56376     default:
56377       break;
56378     case ISD::LOAD:
56379     case ISD::SIGN_EXTEND:
56380     case ISD::ZERO_EXTEND:
56381     case ISD::ANY_EXTEND:
56382     case ISD::SHL:
56383     case ISD::SRA:
56384     case ISD::SRL:
56385     case ISD::SUB:
56386     case ISD::ADD:
56387     case ISD::MUL:
56388     case ISD::AND:
56389     case ISD::OR:
56390     case ISD::XOR:
56391       return false;
56392     }
56393   }
56394 
56395   // Any legal type not explicitly accounted for above here is desirable.
56396   return true;
56397 }
56398 
expandIndirectJTBranch(const SDLoc & dl,SDValue Value,SDValue Addr,int JTI,SelectionDAG & DAG) const56399 SDValue X86TargetLowering::expandIndirectJTBranch(const SDLoc &dl,
56400                                                   SDValue Value, SDValue Addr,
56401                                                   int JTI,
56402                                                   SelectionDAG &DAG) const {
56403   const Module *M = DAG.getMachineFunction().getMMI().getModule();
56404   Metadata *IsCFProtectionSupported = M->getModuleFlag("cf-protection-branch");
56405   if (IsCFProtectionSupported) {
56406     // In case control-flow branch protection is enabled, we need to add
56407     // notrack prefix to the indirect branch.
56408     // In order to do that we create NT_BRIND SDNode.
56409     // Upon ISEL, the pattern will convert it to jmp with NoTrack prefix.
56410     SDValue JTInfo = DAG.getJumpTableDebugInfo(JTI, Value, dl);
56411     return DAG.getNode(X86ISD::NT_BRIND, dl, MVT::Other, JTInfo, Addr);
56412   }
56413 
56414   return TargetLowering::expandIndirectJTBranch(dl, Value, Addr, JTI, DAG);
56415 }
56416 
56417 TargetLowering::AndOrSETCCFoldKind
isDesirableToCombineLogicOpOfSETCC(const SDNode * LogicOp,const SDNode * SETCC0,const SDNode * SETCC1) const56418 X86TargetLowering::isDesirableToCombineLogicOpOfSETCC(
56419     const SDNode *LogicOp, const SDNode *SETCC0, const SDNode *SETCC1) const {
56420   using AndOrSETCCFoldKind = TargetLowering::AndOrSETCCFoldKind;
56421   EVT VT = LogicOp->getValueType(0);
56422   EVT OpVT = SETCC0->getOperand(0).getValueType();
56423   if (!VT.isInteger())
56424     return AndOrSETCCFoldKind::None;
56425 
56426   if (VT.isVector())
56427     return AndOrSETCCFoldKind(AndOrSETCCFoldKind::NotAnd |
56428                               (isOperationLegal(ISD::ABS, OpVT)
56429                                    ? AndOrSETCCFoldKind::ABS
56430                                    : AndOrSETCCFoldKind::None));
56431 
56432   // Don't use `NotAnd` as even though `not` is generally shorter code size than
56433   // `add`, `add` can lower to LEA which can save moves / spills. Any case where
56434   // `NotAnd` applies, `AddAnd` does as well.
56435   // TODO: Currently we lower (icmp eq/ne (and ~X, Y), 0) -> `test (not X), Y`,
56436   // if we change that to `andn Y, X` it may be worth prefering `NotAnd` here.
56437   return AndOrSETCCFoldKind::AddAnd;
56438 }
56439 
IsDesirableToPromoteOp(SDValue Op,EVT & PVT) const56440 bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const {
56441   EVT VT = Op.getValueType();
56442   bool Is8BitMulByConstant = VT == MVT::i8 && Op.getOpcode() == ISD::MUL &&
56443                              isa<ConstantSDNode>(Op.getOperand(1));
56444 
56445   // i16 is legal, but undesirable since i16 instruction encodings are longer
56446   // and some i16 instructions are slow.
56447   // 8-bit multiply-by-constant can usually be expanded to something cheaper
56448   // using LEA and/or other ALU ops.
56449   if (VT != MVT::i16 && !Is8BitMulByConstant)
56450     return false;
56451 
56452   auto IsFoldableRMW = [](SDValue Load, SDValue Op) {
56453     if (!Op.hasOneUse())
56454       return false;
56455     SDNode *User = *Op->use_begin();
56456     if (!ISD::isNormalStore(User))
56457       return false;
56458     auto *Ld = cast<LoadSDNode>(Load);
56459     auto *St = cast<StoreSDNode>(User);
56460     return Ld->getBasePtr() == St->getBasePtr();
56461   };
56462 
56463   auto IsFoldableAtomicRMW = [](SDValue Load, SDValue Op) {
56464     if (!Load.hasOneUse() || Load.getOpcode() != ISD::ATOMIC_LOAD)
56465       return false;
56466     if (!Op.hasOneUse())
56467       return false;
56468     SDNode *User = *Op->use_begin();
56469     if (User->getOpcode() != ISD::ATOMIC_STORE)
56470       return false;
56471     auto *Ld = cast<AtomicSDNode>(Load);
56472     auto *St = cast<AtomicSDNode>(User);
56473     return Ld->getBasePtr() == St->getBasePtr();
56474   };
56475 
56476   bool Commute = false;
56477   switch (Op.getOpcode()) {
56478   default: return false;
56479   case ISD::SIGN_EXTEND:
56480   case ISD::ZERO_EXTEND:
56481   case ISD::ANY_EXTEND:
56482     break;
56483   case ISD::SHL:
56484   case ISD::SRA:
56485   case ISD::SRL: {
56486     SDValue N0 = Op.getOperand(0);
56487     // Look out for (store (shl (load), x)).
56488     if (X86::mayFoldLoad(N0, Subtarget) && IsFoldableRMW(N0, Op))
56489       return false;
56490     break;
56491   }
56492   case ISD::ADD:
56493   case ISD::MUL:
56494   case ISD::AND:
56495   case ISD::OR:
56496   case ISD::XOR:
56497     Commute = true;
56498     [[fallthrough]];
56499   case ISD::SUB: {
56500     SDValue N0 = Op.getOperand(0);
56501     SDValue N1 = Op.getOperand(1);
56502     // Avoid disabling potential load folding opportunities.
56503     if (X86::mayFoldLoad(N1, Subtarget) &&
56504         (!Commute || !isa<ConstantSDNode>(N0) ||
56505          (Op.getOpcode() != ISD::MUL && IsFoldableRMW(N1, Op))))
56506       return false;
56507     if (X86::mayFoldLoad(N0, Subtarget) &&
56508         ((Commute && !isa<ConstantSDNode>(N1)) ||
56509          (Op.getOpcode() != ISD::MUL && IsFoldableRMW(N0, Op))))
56510       return false;
56511     if (IsFoldableAtomicRMW(N0, Op) ||
56512         (Commute && IsFoldableAtomicRMW(N1, Op)))
56513       return false;
56514   }
56515   }
56516 
56517   PVT = MVT::i32;
56518   return true;
56519 }
56520 
56521 //===----------------------------------------------------------------------===//
56522 //                           X86 Inline Assembly Support
56523 //===----------------------------------------------------------------------===//
56524 
56525 // Helper to match a string separated by whitespace.
matchAsm(StringRef S,ArrayRef<const char * > Pieces)56526 static bool matchAsm(StringRef S, ArrayRef<const char *> Pieces) {
56527   S = S.substr(S.find_first_not_of(" \t")); // Skip leading whitespace.
56528 
56529   for (StringRef Piece : Pieces) {
56530     if (!S.starts_with(Piece)) // Check if the piece matches.
56531       return false;
56532 
56533     S = S.substr(Piece.size());
56534     StringRef::size_type Pos = S.find_first_not_of(" \t");
56535     if (Pos == 0) // We matched a prefix.
56536       return false;
56537 
56538     S = S.substr(Pos);
56539   }
56540 
56541   return S.empty();
56542 }
56543 
clobbersFlagRegisters(const SmallVector<StringRef,4> & AsmPieces)56544 static bool clobbersFlagRegisters(const SmallVector<StringRef, 4> &AsmPieces) {
56545 
56546   if (AsmPieces.size() == 3 || AsmPieces.size() == 4) {
56547     if (llvm::is_contained(AsmPieces, "~{cc}") &&
56548         llvm::is_contained(AsmPieces, "~{flags}") &&
56549         llvm::is_contained(AsmPieces, "~{fpsr}")) {
56550 
56551       if (AsmPieces.size() == 3)
56552         return true;
56553       else if (llvm::is_contained(AsmPieces, "~{dirflag}"))
56554         return true;
56555     }
56556   }
56557   return false;
56558 }
56559 
ExpandInlineAsm(CallInst * CI) const56560 bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {
56561   InlineAsm *IA = cast<InlineAsm>(CI->getCalledOperand());
56562 
56563   const std::string &AsmStr = IA->getAsmString();
56564 
56565   IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
56566   if (!Ty || Ty->getBitWidth() % 16 != 0)
56567     return false;
56568 
56569   // TODO: should remove alternatives from the asmstring: "foo {a|b}" -> "foo a"
56570   SmallVector<StringRef, 4> AsmPieces;
56571   SplitString(AsmStr, AsmPieces, ";\n");
56572 
56573   switch (AsmPieces.size()) {
56574   default: return false;
56575   case 1:
56576     // FIXME: this should verify that we are targeting a 486 or better.  If not,
56577     // we will turn this bswap into something that will be lowered to logical
56578     // ops instead of emitting the bswap asm.  For now, we don't support 486 or
56579     // lower so don't worry about this.
56580     // bswap $0
56581     if (matchAsm(AsmPieces[0], {"bswap", "$0"}) ||
56582         matchAsm(AsmPieces[0], {"bswapl", "$0"}) ||
56583         matchAsm(AsmPieces[0], {"bswapq", "$0"}) ||
56584         matchAsm(AsmPieces[0], {"bswap", "${0:q}"}) ||
56585         matchAsm(AsmPieces[0], {"bswapl", "${0:q}"}) ||
56586         matchAsm(AsmPieces[0], {"bswapq", "${0:q}"})) {
56587       // No need to check constraints, nothing other than the equivalent of
56588       // "=r,0" would be valid here.
56589       return IntrinsicLowering::LowerToByteSwap(CI);
56590     }
56591 
56592     // rorw $$8, ${0:w}  -->  llvm.bswap.i16
56593     if (CI->getType()->isIntegerTy(16) &&
56594         IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
56595         (matchAsm(AsmPieces[0], {"rorw", "$$8,", "${0:w}"}) ||
56596          matchAsm(AsmPieces[0], {"rolw", "$$8,", "${0:w}"}))) {
56597       AsmPieces.clear();
56598       StringRef ConstraintsStr = IA->getConstraintString();
56599       SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
56600       array_pod_sort(AsmPieces.begin(), AsmPieces.end());
56601       if (clobbersFlagRegisters(AsmPieces))
56602         return IntrinsicLowering::LowerToByteSwap(CI);
56603     }
56604     break;
56605   case 3:
56606     if (CI->getType()->isIntegerTy(32) &&
56607         IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
56608         matchAsm(AsmPieces[0], {"rorw", "$$8,", "${0:w}"}) &&
56609         matchAsm(AsmPieces[1], {"rorl", "$$16,", "$0"}) &&
56610         matchAsm(AsmPieces[2], {"rorw", "$$8,", "${0:w}"})) {
56611       AsmPieces.clear();
56612       StringRef ConstraintsStr = IA->getConstraintString();
56613       SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
56614       array_pod_sort(AsmPieces.begin(), AsmPieces.end());
56615       if (clobbersFlagRegisters(AsmPieces))
56616         return IntrinsicLowering::LowerToByteSwap(CI);
56617     }
56618 
56619     if (CI->getType()->isIntegerTy(64)) {
56620       InlineAsm::ConstraintInfoVector Constraints = IA->ParseConstraints();
56621       if (Constraints.size() >= 2 &&
56622           Constraints[0].Codes.size() == 1 && Constraints[0].Codes[0] == "A" &&
56623           Constraints[1].Codes.size() == 1 && Constraints[1].Codes[0] == "0") {
56624         // bswap %eax / bswap %edx / xchgl %eax, %edx  -> llvm.bswap.i64
56625         if (matchAsm(AsmPieces[0], {"bswap", "%eax"}) &&
56626             matchAsm(AsmPieces[1], {"bswap", "%edx"}) &&
56627             matchAsm(AsmPieces[2], {"xchgl", "%eax,", "%edx"}))
56628           return IntrinsicLowering::LowerToByteSwap(CI);
56629       }
56630     }
56631     break;
56632   }
56633   return false;
56634 }
56635 
parseConstraintCode(llvm::StringRef Constraint)56636 static X86::CondCode parseConstraintCode(llvm::StringRef Constraint) {
56637   X86::CondCode Cond = StringSwitch<X86::CondCode>(Constraint)
56638                            .Case("{@cca}", X86::COND_A)
56639                            .Case("{@ccae}", X86::COND_AE)
56640                            .Case("{@ccb}", X86::COND_B)
56641                            .Case("{@ccbe}", X86::COND_BE)
56642                            .Case("{@ccc}", X86::COND_B)
56643                            .Case("{@cce}", X86::COND_E)
56644                            .Case("{@ccz}", X86::COND_E)
56645                            .Case("{@ccg}", X86::COND_G)
56646                            .Case("{@ccge}", X86::COND_GE)
56647                            .Case("{@ccl}", X86::COND_L)
56648                            .Case("{@ccle}", X86::COND_LE)
56649                            .Case("{@ccna}", X86::COND_BE)
56650                            .Case("{@ccnae}", X86::COND_B)
56651                            .Case("{@ccnb}", X86::COND_AE)
56652                            .Case("{@ccnbe}", X86::COND_A)
56653                            .Case("{@ccnc}", X86::COND_AE)
56654                            .Case("{@ccne}", X86::COND_NE)
56655                            .Case("{@ccnz}", X86::COND_NE)
56656                            .Case("{@ccng}", X86::COND_LE)
56657                            .Case("{@ccnge}", X86::COND_L)
56658                            .Case("{@ccnl}", X86::COND_GE)
56659                            .Case("{@ccnle}", X86::COND_G)
56660                            .Case("{@ccno}", X86::COND_NO)
56661                            .Case("{@ccnp}", X86::COND_NP)
56662                            .Case("{@ccns}", X86::COND_NS)
56663                            .Case("{@cco}", X86::COND_O)
56664                            .Case("{@ccp}", X86::COND_P)
56665                            .Case("{@ccs}", X86::COND_S)
56666                            .Default(X86::COND_INVALID);
56667   return Cond;
56668 }
56669 
56670 /// Given a constraint letter, return the type of constraint for this target.
56671 X86TargetLowering::ConstraintType
getConstraintType(StringRef Constraint) const56672 X86TargetLowering::getConstraintType(StringRef Constraint) const {
56673   if (Constraint.size() == 1) {
56674     switch (Constraint[0]) {
56675     case 'R':
56676     case 'q':
56677     case 'Q':
56678     case 'f':
56679     case 't':
56680     case 'u':
56681     case 'y':
56682     case 'x':
56683     case 'v':
56684     case 'l':
56685     case 'k': // AVX512 masking registers.
56686       return C_RegisterClass;
56687     case 'a':
56688     case 'b':
56689     case 'c':
56690     case 'd':
56691     case 'S':
56692     case 'D':
56693     case 'A':
56694       return C_Register;
56695     case 'I':
56696     case 'J':
56697     case 'K':
56698     case 'N':
56699     case 'G':
56700     case 'L':
56701     case 'M':
56702       return C_Immediate;
56703     case 'C':
56704     case 'e':
56705     case 'Z':
56706       return C_Other;
56707     default:
56708       break;
56709     }
56710   }
56711   else if (Constraint.size() == 2) {
56712     switch (Constraint[0]) {
56713     default:
56714       break;
56715     case 'W':
56716       if (Constraint[1] != 's')
56717         break;
56718       return C_Other;
56719     case 'Y':
56720       switch (Constraint[1]) {
56721       default:
56722         break;
56723       case 'z':
56724         return C_Register;
56725       case 'i':
56726       case 'm':
56727       case 'k':
56728       case 't':
56729       case '2':
56730         return C_RegisterClass;
56731       }
56732     }
56733   } else if (parseConstraintCode(Constraint) != X86::COND_INVALID)
56734     return C_Other;
56735   return TargetLowering::getConstraintType(Constraint);
56736 }
56737 
56738 /// Examine constraint type and operand type and determine a weight value.
56739 /// This object must already have been set up with the operand type
56740 /// and the current alternative constraint selected.
56741 TargetLowering::ConstraintWeight
getSingleConstraintMatchWeight(AsmOperandInfo & Info,const char * Constraint) const56742 X86TargetLowering::getSingleConstraintMatchWeight(
56743     AsmOperandInfo &Info, const char *Constraint) const {
56744   ConstraintWeight Wt = CW_Invalid;
56745   Value *CallOperandVal = Info.CallOperandVal;
56746   // If we don't have a value, we can't do a match,
56747   // but allow it at the lowest weight.
56748   if (!CallOperandVal)
56749     return CW_Default;
56750   Type *Ty = CallOperandVal->getType();
56751   // Look at the constraint type.
56752   switch (*Constraint) {
56753   default:
56754     Wt = TargetLowering::getSingleConstraintMatchWeight(Info, Constraint);
56755     [[fallthrough]];
56756   case 'R':
56757   case 'q':
56758   case 'Q':
56759   case 'a':
56760   case 'b':
56761   case 'c':
56762   case 'd':
56763   case 'S':
56764   case 'D':
56765   case 'A':
56766     if (CallOperandVal->getType()->isIntegerTy())
56767       Wt = CW_SpecificReg;
56768     break;
56769   case 'f':
56770   case 't':
56771   case 'u':
56772     if (Ty->isFloatingPointTy())
56773       Wt = CW_SpecificReg;
56774     break;
56775   case 'y':
56776     if (Ty->isX86_MMXTy() && Subtarget.hasMMX())
56777       Wt = CW_SpecificReg;
56778     break;
56779   case 'Y':
56780     if (StringRef(Constraint).size() != 2)
56781       break;
56782     switch (Constraint[1]) {
56783     default:
56784       return CW_Invalid;
56785     // XMM0
56786     case 'z':
56787       if (((Ty->getPrimitiveSizeInBits() == 128) && Subtarget.hasSSE1()) ||
56788           ((Ty->getPrimitiveSizeInBits() == 256) && Subtarget.hasAVX()) ||
56789           ((Ty->getPrimitiveSizeInBits() == 512) && Subtarget.hasAVX512()))
56790         return CW_SpecificReg;
56791       return CW_Invalid;
56792     // Conditional OpMask regs (AVX512)
56793     case 'k':
56794       if ((Ty->getPrimitiveSizeInBits() == 64) && Subtarget.hasAVX512())
56795         return CW_Register;
56796       return CW_Invalid;
56797     // Any MMX reg
56798     case 'm':
56799       if (Ty->isX86_MMXTy() && Subtarget.hasMMX())
56800         return Wt;
56801       return CW_Invalid;
56802     // Any SSE reg when ISA >= SSE2, same as 'x'
56803     case 'i':
56804     case 't':
56805     case '2':
56806       if (!Subtarget.hasSSE2())
56807         return CW_Invalid;
56808       break;
56809     }
56810     break;
56811   case 'v':
56812     if ((Ty->getPrimitiveSizeInBits() == 512) && Subtarget.hasAVX512())
56813       Wt = CW_Register;
56814     [[fallthrough]];
56815   case 'x':
56816     if (((Ty->getPrimitiveSizeInBits() == 128) && Subtarget.hasSSE1()) ||
56817         ((Ty->getPrimitiveSizeInBits() == 256) && Subtarget.hasAVX()))
56818       Wt = CW_Register;
56819     break;
56820   case 'k':
56821     // Enable conditional vector operations using %k<#> registers.
56822     if ((Ty->getPrimitiveSizeInBits() == 64) && Subtarget.hasAVX512())
56823       Wt = CW_Register;
56824     break;
56825   case 'I':
56826     if (auto *C = dyn_cast<ConstantInt>(Info.CallOperandVal))
56827       if (C->getZExtValue() <= 31)
56828         Wt = CW_Constant;
56829     break;
56830   case 'J':
56831     if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
56832       if (C->getZExtValue() <= 63)
56833         Wt = CW_Constant;
56834     break;
56835   case 'K':
56836     if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
56837       if ((C->getSExtValue() >= -0x80) && (C->getSExtValue() <= 0x7f))
56838         Wt = CW_Constant;
56839     break;
56840   case 'L':
56841     if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
56842       if ((C->getZExtValue() == 0xff) || (C->getZExtValue() == 0xffff))
56843         Wt = CW_Constant;
56844     break;
56845   case 'M':
56846     if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
56847       if (C->getZExtValue() <= 3)
56848         Wt = CW_Constant;
56849     break;
56850   case 'N':
56851     if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
56852       if (C->getZExtValue() <= 0xff)
56853         Wt = CW_Constant;
56854     break;
56855   case 'G':
56856   case 'C':
56857     if (isa<ConstantFP>(CallOperandVal))
56858       Wt = CW_Constant;
56859     break;
56860   case 'e':
56861     if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
56862       if ((C->getSExtValue() >= -0x80000000LL) &&
56863           (C->getSExtValue() <= 0x7fffffffLL))
56864         Wt = CW_Constant;
56865     break;
56866   case 'Z':
56867     if (auto *C = dyn_cast<ConstantInt>(CallOperandVal))
56868       if (C->getZExtValue() <= 0xffffffff)
56869         Wt = CW_Constant;
56870     break;
56871   }
56872   return Wt;
56873 }
56874 
56875 /// Try to replace an X constraint, which matches anything, with another that
56876 /// has more specific requirements based on the type of the corresponding
56877 /// operand.
56878 const char *X86TargetLowering::
LowerXConstraint(EVT ConstraintVT) const56879 LowerXConstraint(EVT ConstraintVT) const {
56880   // FP X constraints get lowered to SSE1/2 registers if available, otherwise
56881   // 'f' like normal targets.
56882   if (ConstraintVT.isFloatingPoint()) {
56883     if (Subtarget.hasSSE1())
56884       return "x";
56885   }
56886 
56887   return TargetLowering::LowerXConstraint(ConstraintVT);
56888 }
56889 
56890 // Lower @cc targets via setcc.
LowerAsmOutputForConstraint(SDValue & Chain,SDValue & Glue,const SDLoc & DL,const AsmOperandInfo & OpInfo,SelectionDAG & DAG) const56891 SDValue X86TargetLowering::LowerAsmOutputForConstraint(
56892     SDValue &Chain, SDValue &Glue, const SDLoc &DL,
56893     const AsmOperandInfo &OpInfo, SelectionDAG &DAG) const {
56894   X86::CondCode Cond = parseConstraintCode(OpInfo.ConstraintCode);
56895   if (Cond == X86::COND_INVALID)
56896     return SDValue();
56897   // Check that return type is valid.
56898   if (OpInfo.ConstraintVT.isVector() || !OpInfo.ConstraintVT.isInteger() ||
56899       OpInfo.ConstraintVT.getSizeInBits() < 8)
56900     report_fatal_error("Glue output operand is of invalid type");
56901 
56902   // Get EFLAGS register. Only update chain when copyfrom is glued.
56903   if (Glue.getNode()) {
56904     Glue = DAG.getCopyFromReg(Chain, DL, X86::EFLAGS, MVT::i32, Glue);
56905     Chain = Glue.getValue(1);
56906   } else
56907     Glue = DAG.getCopyFromReg(Chain, DL, X86::EFLAGS, MVT::i32);
56908   // Extract CC code.
56909   SDValue CC = getSETCC(Cond, Glue, DL, DAG);
56910   // Extend to 32-bits
56911   SDValue Result = DAG.getNode(ISD::ZERO_EXTEND, DL, OpInfo.ConstraintVT, CC);
56912 
56913   return Result;
56914 }
56915 
56916 /// Lower the specified operand into the Ops vector.
56917 /// If it is invalid, don't add anything to Ops.
LowerAsmOperandForConstraint(SDValue Op,StringRef Constraint,std::vector<SDValue> & Ops,SelectionDAG & DAG) const56918 void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
56919                                                      StringRef Constraint,
56920                                                      std::vector<SDValue> &Ops,
56921                                                      SelectionDAG &DAG) const {
56922   SDValue Result;
56923   char ConstraintLetter = Constraint[0];
56924   switch (ConstraintLetter) {
56925   default: break;
56926   case 'I':
56927     if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
56928       if (C->getZExtValue() <= 31) {
56929         Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
56930                                        Op.getValueType());
56931         break;
56932       }
56933     }
56934     return;
56935   case 'J':
56936     if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
56937       if (C->getZExtValue() <= 63) {
56938         Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
56939                                        Op.getValueType());
56940         break;
56941       }
56942     }
56943     return;
56944   case 'K':
56945     if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
56946       if (isInt<8>(C->getSExtValue())) {
56947         Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
56948                                        Op.getValueType());
56949         break;
56950       }
56951     }
56952     return;
56953   case 'L':
56954     if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
56955       if (C->getZExtValue() == 0xff || C->getZExtValue() == 0xffff ||
56956           (Subtarget.is64Bit() && C->getZExtValue() == 0xffffffff)) {
56957         Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),
56958                                        Op.getValueType());
56959         break;
56960       }
56961     }
56962     return;
56963   case 'M':
56964     if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
56965       if (C->getZExtValue() <= 3) {
56966         Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
56967                                        Op.getValueType());
56968         break;
56969       }
56970     }
56971     return;
56972   case 'N':
56973     if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
56974       if (C->getZExtValue() <= 255) {
56975         Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
56976                                        Op.getValueType());
56977         break;
56978       }
56979     }
56980     return;
56981   case 'O':
56982     if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
56983       if (C->getZExtValue() <= 127) {
56984         Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
56985                                        Op.getValueType());
56986         break;
56987       }
56988     }
56989     return;
56990   case 'e': {
56991     // 32-bit signed value
56992     if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
56993       if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
56994                                            C->getSExtValue())) {
56995         // Widen to 64 bits here to get it sign extended.
56996         Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op), MVT::i64);
56997         break;
56998       }
56999     // FIXME gcc accepts some relocatable values here too, but only in certain
57000     // memory models; it's complicated.
57001     }
57002     return;
57003   }
57004   case 'W': {
57005     assert(Constraint[1] == 's');
57006     // Op is a BlockAddressSDNode or a GlobalAddressSDNode with an optional
57007     // offset.
57008     if (const auto *BA = dyn_cast<BlockAddressSDNode>(Op)) {
57009       Ops.push_back(DAG.getTargetBlockAddress(BA->getBlockAddress(),
57010                                               BA->getValueType(0)));
57011     } else {
57012       int64_t Offset = 0;
57013       if (Op->getOpcode() == ISD::ADD &&
57014           isa<ConstantSDNode>(Op->getOperand(1))) {
57015         Offset = cast<ConstantSDNode>(Op->getOperand(1))->getSExtValue();
57016         Op = Op->getOperand(0);
57017       }
57018       if (const auto *GA = dyn_cast<GlobalAddressSDNode>(Op))
57019         Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op),
57020                                                  GA->getValueType(0), Offset));
57021     }
57022     return;
57023   }
57024   case 'Z': {
57025     // 32-bit unsigned value
57026     if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
57027       if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
57028                                            C->getZExtValue())) {
57029         Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
57030                                        Op.getValueType());
57031         break;
57032       }
57033     }
57034     // FIXME gcc accepts some relocatable values here too, but only in certain
57035     // memory models; it's complicated.
57036     return;
57037   }
57038   case 'i': {
57039     // Literal immediates are always ok.
57040     if (auto *CST = dyn_cast<ConstantSDNode>(Op)) {
57041       bool IsBool = CST->getConstantIntValue()->getBitWidth() == 1;
57042       BooleanContent BCont = getBooleanContents(MVT::i64);
57043       ISD::NodeType ExtOpc = IsBool ? getExtendForContent(BCont)
57044                                     : ISD::SIGN_EXTEND;
57045       int64_t ExtVal = ExtOpc == ISD::ZERO_EXTEND ? CST->getZExtValue()
57046                                                   : CST->getSExtValue();
57047       Result = DAG.getTargetConstant(ExtVal, SDLoc(Op), MVT::i64);
57048       break;
57049     }
57050 
57051     // In any sort of PIC mode addresses need to be computed at runtime by
57052     // adding in a register or some sort of table lookup.  These can't
57053     // be used as immediates. BlockAddresses and BasicBlocks are fine though.
57054     if ((Subtarget.isPICStyleGOT() || Subtarget.isPICStyleStubPIC()) &&
57055         !(isa<BlockAddressSDNode>(Op) || isa<BasicBlockSDNode>(Op)))
57056       return;
57057 
57058     // If we are in non-pic codegen mode, we allow the address of a global (with
57059     // an optional displacement) to be used with 'i'.
57060     if (auto *GA = dyn_cast<GlobalAddressSDNode>(Op))
57061       // If we require an extra load to get this address, as in PIC mode, we
57062       // can't accept it.
57063       if (isGlobalStubReference(
57064               Subtarget.classifyGlobalReference(GA->getGlobal())))
57065         return;
57066     break;
57067   }
57068   }
57069 
57070   if (Result.getNode()) {
57071     Ops.push_back(Result);
57072     return;
57073   }
57074   return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
57075 }
57076 
57077 /// Check if \p RC is a general purpose register class.
57078 /// I.e., GR* or one of their variant.
isGRClass(const TargetRegisterClass & RC)57079 static bool isGRClass(const TargetRegisterClass &RC) {
57080   return RC.hasSuperClassEq(&X86::GR8RegClass) ||
57081          RC.hasSuperClassEq(&X86::GR16RegClass) ||
57082          RC.hasSuperClassEq(&X86::GR32RegClass) ||
57083          RC.hasSuperClassEq(&X86::GR64RegClass) ||
57084          RC.hasSuperClassEq(&X86::LOW32_ADDR_ACCESS_RBPRegClass);
57085 }
57086 
57087 /// Check if \p RC is a vector register class.
57088 /// I.e., FR* / VR* or one of their variant.
isFRClass(const TargetRegisterClass & RC)57089 static bool isFRClass(const TargetRegisterClass &RC) {
57090   return RC.hasSuperClassEq(&X86::FR16XRegClass) ||
57091          RC.hasSuperClassEq(&X86::FR32XRegClass) ||
57092          RC.hasSuperClassEq(&X86::FR64XRegClass) ||
57093          RC.hasSuperClassEq(&X86::VR128XRegClass) ||
57094          RC.hasSuperClassEq(&X86::VR256XRegClass) ||
57095          RC.hasSuperClassEq(&X86::VR512RegClass);
57096 }
57097 
57098 /// Check if \p RC is a mask register class.
57099 /// I.e., VK* or one of their variant.
isVKClass(const TargetRegisterClass & RC)57100 static bool isVKClass(const TargetRegisterClass &RC) {
57101   return RC.hasSuperClassEq(&X86::VK1RegClass) ||
57102          RC.hasSuperClassEq(&X86::VK2RegClass) ||
57103          RC.hasSuperClassEq(&X86::VK4RegClass) ||
57104          RC.hasSuperClassEq(&X86::VK8RegClass) ||
57105          RC.hasSuperClassEq(&X86::VK16RegClass) ||
57106          RC.hasSuperClassEq(&X86::VK32RegClass) ||
57107          RC.hasSuperClassEq(&X86::VK64RegClass);
57108 }
57109 
57110 std::pair<unsigned, const TargetRegisterClass *>
getRegForInlineAsmConstraint(const TargetRegisterInfo * TRI,StringRef Constraint,MVT VT) const57111 X86TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
57112                                                 StringRef Constraint,
57113                                                 MVT VT) const {
57114   // First, see if this is a constraint that directly corresponds to an LLVM
57115   // register class.
57116   if (Constraint.size() == 1) {
57117     // GCC Constraint Letters
57118     switch (Constraint[0]) {
57119     default: break;
57120     // 'A' means [ER]AX + [ER]DX.
57121     case 'A':
57122       if (Subtarget.is64Bit())
57123         return std::make_pair(X86::RAX, &X86::GR64_ADRegClass);
57124       assert((Subtarget.is32Bit() || Subtarget.is16Bit()) &&
57125              "Expecting 64, 32 or 16 bit subtarget");
57126       return std::make_pair(X86::EAX, &X86::GR32_ADRegClass);
57127 
57128       // TODO: Slight differences here in allocation order and leaving
57129       // RIP in the class. Do they matter any more here than they do
57130       // in the normal allocation?
57131     case 'k':
57132       if (Subtarget.hasAVX512()) {
57133         if (VT == MVT::v1i1 || VT == MVT::i1)
57134           return std::make_pair(0U, &X86::VK1RegClass);
57135         if (VT == MVT::v8i1 || VT == MVT::i8)
57136           return std::make_pair(0U, &X86::VK8RegClass);
57137         if (VT == MVT::v16i1 || VT == MVT::i16)
57138           return std::make_pair(0U, &X86::VK16RegClass);
57139       }
57140       if (Subtarget.hasBWI()) {
57141         if (VT == MVT::v32i1 || VT == MVT::i32)
57142           return std::make_pair(0U, &X86::VK32RegClass);
57143         if (VT == MVT::v64i1 || VT == MVT::i64)
57144           return std::make_pair(0U, &X86::VK64RegClass);
57145       }
57146       break;
57147     case 'q':   // GENERAL_REGS in 64-bit mode, Q_REGS in 32-bit mode.
57148       if (Subtarget.is64Bit()) {
57149         if (VT == MVT::i8 || VT == MVT::i1)
57150           return std::make_pair(0U, &X86::GR8_NOREX2RegClass);
57151         if (VT == MVT::i16)
57152           return std::make_pair(0U, &X86::GR16_NOREX2RegClass);
57153         if (VT == MVT::i32 || VT == MVT::f32)
57154           return std::make_pair(0U, &X86::GR32_NOREX2RegClass);
57155         if (VT != MVT::f80 && !VT.isVector())
57156           return std::make_pair(0U, &X86::GR64_NOREX2RegClass);
57157         break;
57158       }
57159       [[fallthrough]];
57160       // 32-bit fallthrough
57161     case 'Q':   // Q_REGS
57162       if (VT == MVT::i8 || VT == MVT::i1)
57163         return std::make_pair(0U, &X86::GR8_ABCD_LRegClass);
57164       if (VT == MVT::i16)
57165         return std::make_pair(0U, &X86::GR16_ABCDRegClass);
57166       if (VT == MVT::i32 || VT == MVT::f32 ||
57167           (!VT.isVector() && !Subtarget.is64Bit()))
57168         return std::make_pair(0U, &X86::GR32_ABCDRegClass);
57169       if (VT != MVT::f80 && !VT.isVector())
57170         return std::make_pair(0U, &X86::GR64_ABCDRegClass);
57171       break;
57172     case 'r':   // GENERAL_REGS
57173     case 'l':   // INDEX_REGS
57174       if (VT == MVT::i8 || VT == MVT::i1)
57175         return std::make_pair(0U, &X86::GR8_NOREX2RegClass);
57176       if (VT == MVT::i16)
57177         return std::make_pair(0U, &X86::GR16_NOREX2RegClass);
57178       if (VT == MVT::i32 || VT == MVT::f32 ||
57179           (!VT.isVector() && !Subtarget.is64Bit()))
57180         return std::make_pair(0U, &X86::GR32_NOREX2RegClass);
57181       if (VT != MVT::f80 && !VT.isVector())
57182         return std::make_pair(0U, &X86::GR64_NOREX2RegClass);
57183       break;
57184     case 'R':   // LEGACY_REGS
57185       if (VT == MVT::i8 || VT == MVT::i1)
57186         return std::make_pair(0U, &X86::GR8_NOREXRegClass);
57187       if (VT == MVT::i16)
57188         return std::make_pair(0U, &X86::GR16_NOREXRegClass);
57189       if (VT == MVT::i32 || VT == MVT::f32 ||
57190           (!VT.isVector() && !Subtarget.is64Bit()))
57191         return std::make_pair(0U, &X86::GR32_NOREXRegClass);
57192       if (VT != MVT::f80 && !VT.isVector())
57193         return std::make_pair(0U, &X86::GR64_NOREXRegClass);
57194       break;
57195     case 'f':  // FP Stack registers.
57196       // If SSE is enabled for this VT, use f80 to ensure the isel moves the
57197       // value to the correct fpstack register class.
57198       if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT))
57199         return std::make_pair(0U, &X86::RFP32RegClass);
57200       if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT))
57201         return std::make_pair(0U, &X86::RFP64RegClass);
57202       if (VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f80)
57203         return std::make_pair(0U, &X86::RFP80RegClass);
57204       break;
57205     case 'y':   // MMX_REGS if MMX allowed.
57206       if (!Subtarget.hasMMX()) break;
57207       return std::make_pair(0U, &X86::VR64RegClass);
57208     case 'v':
57209     case 'x':   // SSE_REGS if SSE1 allowed or AVX_REGS if AVX allowed
57210       if (!Subtarget.hasSSE1()) break;
57211       bool VConstraint = (Constraint[0] == 'v');
57212 
57213       switch (VT.SimpleTy) {
57214       default: break;
57215       // Scalar SSE types.
57216       case MVT::f16:
57217         if (VConstraint && Subtarget.hasFP16())
57218           return std::make_pair(0U, &X86::FR16XRegClass);
57219         break;
57220       case MVT::f32:
57221       case MVT::i32:
57222         if (VConstraint && Subtarget.hasVLX())
57223           return std::make_pair(0U, &X86::FR32XRegClass);
57224         return std::make_pair(0U, &X86::FR32RegClass);
57225       case MVT::f64:
57226       case MVT::i64:
57227         if (VConstraint && Subtarget.hasVLX())
57228           return std::make_pair(0U, &X86::FR64XRegClass);
57229         return std::make_pair(0U, &X86::FR64RegClass);
57230       case MVT::i128:
57231         if (Subtarget.is64Bit()) {
57232           if (VConstraint && Subtarget.hasVLX())
57233             return std::make_pair(0U, &X86::VR128XRegClass);
57234           return std::make_pair(0U, &X86::VR128RegClass);
57235         }
57236         break;
57237       // Vector types and fp128.
57238       case MVT::v8f16:
57239         if (!Subtarget.hasFP16())
57240           break;
57241         if (VConstraint)
57242           return std::make_pair(0U, &X86::VR128XRegClass);
57243         return std::make_pair(0U, &X86::VR128RegClass);
57244       case MVT::v8bf16:
57245         if (!Subtarget.hasBF16() || !Subtarget.hasVLX())
57246           break;
57247         if (VConstraint)
57248           return std::make_pair(0U, &X86::VR128XRegClass);
57249         return std::make_pair(0U, &X86::VR128RegClass);
57250       case MVT::f128:
57251       case MVT::v16i8:
57252       case MVT::v8i16:
57253       case MVT::v4i32:
57254       case MVT::v2i64:
57255       case MVT::v4f32:
57256       case MVT::v2f64:
57257         if (VConstraint && Subtarget.hasVLX())
57258           return std::make_pair(0U, &X86::VR128XRegClass);
57259         return std::make_pair(0U, &X86::VR128RegClass);
57260       // AVX types.
57261       case MVT::v16f16:
57262         if (!Subtarget.hasFP16())
57263           break;
57264         if (VConstraint)
57265           return std::make_pair(0U, &X86::VR256XRegClass);
57266         return std::make_pair(0U, &X86::VR256RegClass);
57267       case MVT::v16bf16:
57268         if (!Subtarget.hasBF16() || !Subtarget.hasVLX())
57269           break;
57270         if (VConstraint)
57271           return std::make_pair(0U, &X86::VR256XRegClass);
57272         return std::make_pair(0U, &X86::VR256RegClass);
57273       case MVT::v32i8:
57274       case MVT::v16i16:
57275       case MVT::v8i32:
57276       case MVT::v4i64:
57277       case MVT::v8f32:
57278       case MVT::v4f64:
57279         if (VConstraint && Subtarget.hasVLX())
57280           return std::make_pair(0U, &X86::VR256XRegClass);
57281         if (Subtarget.hasAVX())
57282           return std::make_pair(0U, &X86::VR256RegClass);
57283         break;
57284       case MVT::v32f16:
57285         if (!Subtarget.hasFP16())
57286           break;
57287         if (VConstraint)
57288           return std::make_pair(0U, &X86::VR512RegClass);
57289         return std::make_pair(0U, &X86::VR512_0_15RegClass);
57290       case MVT::v32bf16:
57291         if (!Subtarget.hasBF16())
57292           break;
57293         if (VConstraint)
57294           return std::make_pair(0U, &X86::VR512RegClass);
57295         return std::make_pair(0U, &X86::VR512_0_15RegClass);
57296       case MVT::v64i8:
57297       case MVT::v32i16:
57298       case MVT::v8f64:
57299       case MVT::v16f32:
57300       case MVT::v16i32:
57301       case MVT::v8i64:
57302         if (!Subtarget.hasAVX512()) break;
57303         if (VConstraint)
57304           return std::make_pair(0U, &X86::VR512RegClass);
57305         return std::make_pair(0U, &X86::VR512_0_15RegClass);
57306       }
57307       break;
57308     }
57309   } else if (Constraint.size() == 2 && Constraint[0] == 'Y') {
57310     switch (Constraint[1]) {
57311     default:
57312       break;
57313     case 'i':
57314     case 't':
57315     case '2':
57316       return getRegForInlineAsmConstraint(TRI, "x", VT);
57317     case 'm':
57318       if (!Subtarget.hasMMX()) break;
57319       return std::make_pair(0U, &X86::VR64RegClass);
57320     case 'z':
57321       if (!Subtarget.hasSSE1()) break;
57322       switch (VT.SimpleTy) {
57323       default: break;
57324       // Scalar SSE types.
57325       case MVT::f16:
57326         if (!Subtarget.hasFP16())
57327           break;
57328         return std::make_pair(X86::XMM0, &X86::FR16XRegClass);
57329       case MVT::f32:
57330       case MVT::i32:
57331         return std::make_pair(X86::XMM0, &X86::FR32RegClass);
57332       case MVT::f64:
57333       case MVT::i64:
57334         return std::make_pair(X86::XMM0, &X86::FR64RegClass);
57335       case MVT::v8f16:
57336         if (!Subtarget.hasFP16())
57337           break;
57338         return std::make_pair(X86::XMM0, &X86::VR128RegClass);
57339       case MVT::v8bf16:
57340         if (!Subtarget.hasBF16() || !Subtarget.hasVLX())
57341           break;
57342         return std::make_pair(X86::XMM0, &X86::VR128RegClass);
57343       case MVT::f128:
57344       case MVT::v16i8:
57345       case MVT::v8i16:
57346       case MVT::v4i32:
57347       case MVT::v2i64:
57348       case MVT::v4f32:
57349       case MVT::v2f64:
57350         return std::make_pair(X86::XMM0, &X86::VR128RegClass);
57351       // AVX types.
57352       case MVT::v16f16:
57353         if (!Subtarget.hasFP16())
57354           break;
57355         return std::make_pair(X86::YMM0, &X86::VR256RegClass);
57356       case MVT::v16bf16:
57357         if (!Subtarget.hasBF16() || !Subtarget.hasVLX())
57358           break;
57359         return std::make_pair(X86::YMM0, &X86::VR256RegClass);
57360       case MVT::v32i8:
57361       case MVT::v16i16:
57362       case MVT::v8i32:
57363       case MVT::v4i64:
57364       case MVT::v8f32:
57365       case MVT::v4f64:
57366         if (Subtarget.hasAVX())
57367           return std::make_pair(X86::YMM0, &X86::VR256RegClass);
57368         break;
57369       case MVT::v32f16:
57370         if (!Subtarget.hasFP16())
57371           break;
57372         return std::make_pair(X86::ZMM0, &X86::VR512_0_15RegClass);
57373       case MVT::v32bf16:
57374         if (!Subtarget.hasBF16())
57375           break;
57376         return std::make_pair(X86::ZMM0, &X86::VR512_0_15RegClass);
57377       case MVT::v64i8:
57378       case MVT::v32i16:
57379       case MVT::v8f64:
57380       case MVT::v16f32:
57381       case MVT::v16i32:
57382       case MVT::v8i64:
57383         if (Subtarget.hasAVX512())
57384           return std::make_pair(X86::ZMM0, &X86::VR512_0_15RegClass);
57385         break;
57386       }
57387       break;
57388     case 'k':
57389       // This register class doesn't allocate k0 for masked vector operation.
57390       if (Subtarget.hasAVX512()) {
57391         if (VT == MVT::v1i1 || VT == MVT::i1)
57392           return std::make_pair(0U, &X86::VK1WMRegClass);
57393         if (VT == MVT::v8i1 || VT == MVT::i8)
57394           return std::make_pair(0U, &X86::VK8WMRegClass);
57395         if (VT == MVT::v16i1 || VT == MVT::i16)
57396           return std::make_pair(0U, &X86::VK16WMRegClass);
57397       }
57398       if (Subtarget.hasBWI()) {
57399         if (VT == MVT::v32i1 || VT == MVT::i32)
57400           return std::make_pair(0U, &X86::VK32WMRegClass);
57401         if (VT == MVT::v64i1 || VT == MVT::i64)
57402           return std::make_pair(0U, &X86::VK64WMRegClass);
57403       }
57404       break;
57405     }
57406   }
57407 
57408   if (parseConstraintCode(Constraint) != X86::COND_INVALID)
57409     return std::make_pair(0U, &X86::GR32RegClass);
57410 
57411   // Use the default implementation in TargetLowering to convert the register
57412   // constraint into a member of a register class.
57413   std::pair<Register, const TargetRegisterClass*> Res;
57414   Res = TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
57415 
57416   // Not found as a standard register?
57417   if (!Res.second) {
57418     // Only match x87 registers if the VT is one SelectionDAGBuilder can convert
57419     // to/from f80.
57420     if (VT == MVT::Other || VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f80) {
57421       // Map st(0) -> st(7) -> ST0
57422       if (Constraint.size() == 7 && Constraint[0] == '{' &&
57423           tolower(Constraint[1]) == 's' && tolower(Constraint[2]) == 't' &&
57424           Constraint[3] == '(' &&
57425           (Constraint[4] >= '0' && Constraint[4] <= '7') &&
57426           Constraint[5] == ')' && Constraint[6] == '}') {
57427         // st(7) is not allocatable and thus not a member of RFP80. Return
57428         // singleton class in cases where we have a reference to it.
57429         if (Constraint[4] == '7')
57430           return std::make_pair(X86::FP7, &X86::RFP80_7RegClass);
57431         return std::make_pair(X86::FP0 + Constraint[4] - '0',
57432                               &X86::RFP80RegClass);
57433       }
57434 
57435       // GCC allows "st(0)" to be called just plain "st".
57436       if (StringRef("{st}").equals_insensitive(Constraint))
57437         return std::make_pair(X86::FP0, &X86::RFP80RegClass);
57438     }
57439 
57440     // flags -> EFLAGS
57441     if (StringRef("{flags}").equals_insensitive(Constraint))
57442       return std::make_pair(X86::EFLAGS, &X86::CCRRegClass);
57443 
57444     // dirflag -> DF
57445     // Only allow for clobber.
57446     if (StringRef("{dirflag}").equals_insensitive(Constraint) &&
57447         VT == MVT::Other)
57448       return std::make_pair(X86::DF, &X86::DFCCRRegClass);
57449 
57450     // fpsr -> FPSW
57451     // Only allow for clobber.
57452     if (StringRef("{fpsr}").equals_insensitive(Constraint) && VT == MVT::Other)
57453       return std::make_pair(X86::FPSW, &X86::FPCCRRegClass);
57454 
57455     return Res;
57456   }
57457 
57458   // Make sure it isn't a register that requires 64-bit mode.
57459   if (!Subtarget.is64Bit() &&
57460       (isFRClass(*Res.second) || isGRClass(*Res.second)) &&
57461       TRI->getEncodingValue(Res.first) >= 8) {
57462     // Register requires REX prefix, but we're in 32-bit mode.
57463     return std::make_pair(0, nullptr);
57464   }
57465 
57466   // Make sure it isn't a register that requires AVX512.
57467   if (!Subtarget.hasAVX512() && isFRClass(*Res.second) &&
57468       TRI->getEncodingValue(Res.first) & 0x10) {
57469     // Register requires EVEX prefix.
57470     return std::make_pair(0, nullptr);
57471   }
57472 
57473   // Otherwise, check to see if this is a register class of the wrong value
57474   // type.  For example, we want to map "{ax},i32" -> {eax}, we don't want it to
57475   // turn into {ax},{dx}.
57476   // MVT::Other is used to specify clobber names.
57477   if (TRI->isTypeLegalForClass(*Res.second, VT) || VT == MVT::Other)
57478     return Res;   // Correct type already, nothing to do.
57479 
57480   // Get a matching integer of the correct size. i.e. "ax" with MVT::32 should
57481   // return "eax". This should even work for things like getting 64bit integer
57482   // registers when given an f64 type.
57483   const TargetRegisterClass *Class = Res.second;
57484   // The generic code will match the first register class that contains the
57485   // given register. Thus, based on the ordering of the tablegened file,
57486   // the "plain" GR classes might not come first.
57487   // Therefore, use a helper method.
57488   if (isGRClass(*Class)) {
57489     unsigned Size = VT.getSizeInBits();
57490     if (Size == 1) Size = 8;
57491     if (Size != 8 && Size != 16 && Size != 32 && Size != 64)
57492       return std::make_pair(0, nullptr);
57493     Register DestReg = getX86SubSuperRegister(Res.first, Size);
57494     if (DestReg.isValid()) {
57495       bool is64Bit = Subtarget.is64Bit();
57496       const TargetRegisterClass *RC =
57497           Size == 8 ? (is64Bit ? &X86::GR8RegClass : &X86::GR8_NOREXRegClass)
57498         : Size == 16 ? (is64Bit ? &X86::GR16RegClass : &X86::GR16_NOREXRegClass)
57499         : Size == 32 ? (is64Bit ? &X86::GR32RegClass : &X86::GR32_NOREXRegClass)
57500         : /*Size == 64*/ (is64Bit ? &X86::GR64RegClass : nullptr);
57501       if (Size == 64 && !is64Bit) {
57502         // Model GCC's behavior here and select a fixed pair of 32-bit
57503         // registers.
57504         switch (DestReg) {
57505         case X86::RAX:
57506           return std::make_pair(X86::EAX, &X86::GR32_ADRegClass);
57507         case X86::RDX:
57508           return std::make_pair(X86::EDX, &X86::GR32_DCRegClass);
57509         case X86::RCX:
57510           return std::make_pair(X86::ECX, &X86::GR32_CBRegClass);
57511         case X86::RBX:
57512           return std::make_pair(X86::EBX, &X86::GR32_BSIRegClass);
57513         case X86::RSI:
57514           return std::make_pair(X86::ESI, &X86::GR32_SIDIRegClass);
57515         case X86::RDI:
57516           return std::make_pair(X86::EDI, &X86::GR32_DIBPRegClass);
57517         case X86::RBP:
57518           return std::make_pair(X86::EBP, &X86::GR32_BPSPRegClass);
57519         default:
57520           return std::make_pair(0, nullptr);
57521         }
57522       }
57523       if (RC && RC->contains(DestReg))
57524         return std::make_pair(DestReg, RC);
57525       return Res;
57526     }
57527     // No register found/type mismatch.
57528     return std::make_pair(0, nullptr);
57529   } else if (isFRClass(*Class)) {
57530     // Handle references to XMM physical registers that got mapped into the
57531     // wrong class.  This can happen with constraints like {xmm0} where the
57532     // target independent register mapper will just pick the first match it can
57533     // find, ignoring the required type.
57534 
57535     // TODO: Handle f128 and i128 in FR128RegClass after it is tested well.
57536     if (VT == MVT::f16)
57537       Res.second = &X86::FR16XRegClass;
57538     else if (VT == MVT::f32 || VT == MVT::i32)
57539       Res.second = &X86::FR32XRegClass;
57540     else if (VT == MVT::f64 || VT == MVT::i64)
57541       Res.second = &X86::FR64XRegClass;
57542     else if (TRI->isTypeLegalForClass(X86::VR128XRegClass, VT))
57543       Res.second = &X86::VR128XRegClass;
57544     else if (TRI->isTypeLegalForClass(X86::VR256XRegClass, VT))
57545       Res.second = &X86::VR256XRegClass;
57546     else if (TRI->isTypeLegalForClass(X86::VR512RegClass, VT))
57547       Res.second = &X86::VR512RegClass;
57548     else {
57549       // Type mismatch and not a clobber: Return an error;
57550       Res.first = 0;
57551       Res.second = nullptr;
57552     }
57553   } else if (isVKClass(*Class)) {
57554     if (VT == MVT::v1i1 || VT == MVT::i1)
57555       Res.second = &X86::VK1RegClass;
57556     else if (VT == MVT::v8i1 || VT == MVT::i8)
57557       Res.second = &X86::VK8RegClass;
57558     else if (VT == MVT::v16i1 || VT == MVT::i16)
57559       Res.second = &X86::VK16RegClass;
57560     else if (VT == MVT::v32i1 || VT == MVT::i32)
57561       Res.second = &X86::VK32RegClass;
57562     else if (VT == MVT::v64i1 || VT == MVT::i64)
57563       Res.second = &X86::VK64RegClass;
57564     else {
57565       // Type mismatch and not a clobber: Return an error;
57566       Res.first = 0;
57567       Res.second = nullptr;
57568     }
57569   }
57570 
57571   return Res;
57572 }
57573 
isIntDivCheap(EVT VT,AttributeList Attr) const57574 bool X86TargetLowering::isIntDivCheap(EVT VT, AttributeList Attr) const {
57575   // Integer division on x86 is expensive. However, when aggressively optimizing
57576   // for code size, we prefer to use a div instruction, as it is usually smaller
57577   // than the alternative sequence.
57578   // The exception to this is vector division. Since x86 doesn't have vector
57579   // integer division, leaving the division as-is is a loss even in terms of
57580   // size, because it will have to be scalarized, while the alternative code
57581   // sequence can be performed in vector form.
57582   bool OptSize = Attr.hasFnAttr(Attribute::MinSize);
57583   return OptSize && !VT.isVector();
57584 }
57585 
initializeSplitCSR(MachineBasicBlock * Entry) const57586 void X86TargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
57587   if (!Subtarget.is64Bit())
57588     return;
57589 
57590   // Update IsSplitCSR in X86MachineFunctionInfo.
57591   X86MachineFunctionInfo *AFI =
57592       Entry->getParent()->getInfo<X86MachineFunctionInfo>();
57593   AFI->setIsSplitCSR(true);
57594 }
57595 
insertCopiesSplitCSR(MachineBasicBlock * Entry,const SmallVectorImpl<MachineBasicBlock * > & Exits) const57596 void X86TargetLowering::insertCopiesSplitCSR(
57597     MachineBasicBlock *Entry,
57598     const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
57599   const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
57600   const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
57601   if (!IStart)
57602     return;
57603 
57604   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
57605   MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
57606   MachineBasicBlock::iterator MBBI = Entry->begin();
57607   for (const MCPhysReg *I = IStart; *I; ++I) {
57608     const TargetRegisterClass *RC = nullptr;
57609     if (X86::GR64RegClass.contains(*I))
57610       RC = &X86::GR64RegClass;
57611     else
57612       llvm_unreachable("Unexpected register class in CSRsViaCopy!");
57613 
57614     Register NewVR = MRI->createVirtualRegister(RC);
57615     // Create copy from CSR to a virtual register.
57616     // FIXME: this currently does not emit CFI pseudo-instructions, it works
57617     // fine for CXX_FAST_TLS since the C++-style TLS access functions should be
57618     // nounwind. If we want to generalize this later, we may need to emit
57619     // CFI pseudo-instructions.
57620     assert(
57621         Entry->getParent()->getFunction().hasFnAttribute(Attribute::NoUnwind) &&
57622         "Function should be nounwind in insertCopiesSplitCSR!");
57623     Entry->addLiveIn(*I);
57624     BuildMI(*Entry, MBBI, MIMetadata(), TII->get(TargetOpcode::COPY), NewVR)
57625         .addReg(*I);
57626 
57627     // Insert the copy-back instructions right before the terminator.
57628     for (auto *Exit : Exits)
57629       BuildMI(*Exit, Exit->getFirstTerminator(), MIMetadata(),
57630               TII->get(TargetOpcode::COPY), *I)
57631           .addReg(NewVR);
57632   }
57633 }
57634 
supportSwiftError() const57635 bool X86TargetLowering::supportSwiftError() const {
57636   return Subtarget.is64Bit();
57637 }
57638 
57639 MachineInstr *
EmitKCFICheck(MachineBasicBlock & MBB,MachineBasicBlock::instr_iterator & MBBI,const TargetInstrInfo * TII) const57640 X86TargetLowering::EmitKCFICheck(MachineBasicBlock &MBB,
57641                                  MachineBasicBlock::instr_iterator &MBBI,
57642                                  const TargetInstrInfo *TII) const {
57643   assert(MBBI->isCall() && MBBI->getCFIType() &&
57644          "Invalid call instruction for a KCFI check");
57645 
57646   MachineFunction &MF = *MBB.getParent();
57647   // If the call target is a memory operand, unfold it and use R11 for the
57648   // call, so KCFI_CHECK won't have to recompute the address.
57649   switch (MBBI->getOpcode()) {
57650   case X86::CALL64m:
57651   case X86::CALL64m_NT:
57652   case X86::TAILJMPm64:
57653   case X86::TAILJMPm64_REX: {
57654     MachineBasicBlock::instr_iterator OrigCall = MBBI;
57655     SmallVector<MachineInstr *, 2> NewMIs;
57656     if (!TII->unfoldMemoryOperand(MF, *OrigCall, X86::R11, /*UnfoldLoad=*/true,
57657                                   /*UnfoldStore=*/false, NewMIs))
57658       report_fatal_error("Failed to unfold memory operand for a KCFI check");
57659     for (auto *NewMI : NewMIs)
57660       MBBI = MBB.insert(OrigCall, NewMI);
57661     assert(MBBI->isCall() &&
57662            "Unexpected instruction after memory operand unfolding");
57663     if (OrigCall->shouldUpdateCallSiteInfo())
57664       MF.moveCallSiteInfo(&*OrigCall, &*MBBI);
57665     MBBI->setCFIType(MF, OrigCall->getCFIType());
57666     OrigCall->eraseFromParent();
57667     break;
57668   }
57669   default:
57670     break;
57671   }
57672 
57673   MachineOperand &Target = MBBI->getOperand(0);
57674   Register TargetReg;
57675   switch (MBBI->getOpcode()) {
57676   case X86::CALL64r:
57677   case X86::CALL64r_NT:
57678   case X86::TAILJMPr64:
57679   case X86::TAILJMPr64_REX:
57680     assert(Target.isReg() && "Unexpected target operand for an indirect call");
57681     Target.setIsRenamable(false);
57682     TargetReg = Target.getReg();
57683     break;
57684   case X86::CALL64pcrel32:
57685   case X86::TAILJMPd64:
57686     assert(Target.isSymbol() && "Unexpected target operand for a direct call");
57687     // X86TargetLowering::EmitLoweredIndirectThunk always uses r11 for
57688     // 64-bit indirect thunk calls.
57689     assert(StringRef(Target.getSymbolName()).ends_with("_r11") &&
57690            "Unexpected register for an indirect thunk call");
57691     TargetReg = X86::R11;
57692     break;
57693   default:
57694     llvm_unreachable("Unexpected CFI call opcode");
57695     break;
57696   }
57697 
57698   return BuildMI(MBB, MBBI, MIMetadata(*MBBI), TII->get(X86::KCFI_CHECK))
57699       .addReg(TargetReg)
57700       .addImm(MBBI->getCFIType())
57701       .getInstr();
57702 }
57703 
57704 /// Returns true if stack probing through a function call is requested.
hasStackProbeSymbol(const MachineFunction & MF) const57705 bool X86TargetLowering::hasStackProbeSymbol(const MachineFunction &MF) const {
57706   return !getStackProbeSymbolName(MF).empty();
57707 }
57708 
57709 /// Returns true if stack probing through inline assembly is requested.
hasInlineStackProbe(const MachineFunction & MF) const57710 bool X86TargetLowering::hasInlineStackProbe(const MachineFunction &MF) const {
57711 
57712   // No inline stack probe for Windows, they have their own mechanism.
57713   if (Subtarget.isOSWindows() ||
57714       MF.getFunction().hasFnAttribute("no-stack-arg-probe"))
57715     return false;
57716 
57717   // If the function specifically requests inline stack probes, emit them.
57718   if (MF.getFunction().hasFnAttribute("probe-stack"))
57719     return MF.getFunction().getFnAttribute("probe-stack").getValueAsString() ==
57720            "inline-asm";
57721 
57722   return false;
57723 }
57724 
57725 /// Returns the name of the symbol used to emit stack probes or the empty
57726 /// string if not applicable.
57727 StringRef
getStackProbeSymbolName(const MachineFunction & MF) const57728 X86TargetLowering::getStackProbeSymbolName(const MachineFunction &MF) const {
57729   // Inline Stack probes disable stack probe call
57730   if (hasInlineStackProbe(MF))
57731     return "";
57732 
57733   // If the function specifically requests stack probes, emit them.
57734   if (MF.getFunction().hasFnAttribute("probe-stack"))
57735     return MF.getFunction().getFnAttribute("probe-stack").getValueAsString();
57736 
57737   // Generally, if we aren't on Windows, the platform ABI does not include
57738   // support for stack probes, so don't emit them.
57739   if (!Subtarget.isOSWindows() || Subtarget.isTargetMachO() ||
57740       MF.getFunction().hasFnAttribute("no-stack-arg-probe"))
57741     return "";
57742 
57743   // We need a stack probe to conform to the Windows ABI. Choose the right
57744   // symbol.
57745   if (Subtarget.is64Bit())
57746     return Subtarget.isTargetCygMing() ? "___chkstk_ms" : "__chkstk";
57747   return Subtarget.isTargetCygMing() ? "_alloca" : "_chkstk";
57748 }
57749 
57750 unsigned
getStackProbeSize(const MachineFunction & MF) const57751 X86TargetLowering::getStackProbeSize(const MachineFunction &MF) const {
57752   // The default stack probe size is 4096 if the function has no stackprobesize
57753   // attribute.
57754   return MF.getFunction().getFnAttributeAsParsedInteger("stack-probe-size",
57755                                                         4096);
57756 }
57757 
getPrefLoopAlignment(MachineLoop * ML) const57758 Align X86TargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
57759   if (ML && ML->isInnermost() &&
57760       ExperimentalPrefInnermostLoopAlignment.getNumOccurrences())
57761     return Align(1ULL << ExperimentalPrefInnermostLoopAlignment);
57762   return TargetLowering::getPrefLoopAlignment();
57763 }
57764