1 //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the PPCISelLowering class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "PPCISelLowering.h"
14 #include "MCTargetDesc/PPCPredicates.h"
15 #include "PPC.h"
16 #include "PPCCCState.h"
17 #include "PPCCallingConv.h"
18 #include "PPCFrameLowering.h"
19 #include "PPCInstrInfo.h"
20 #include "PPCMachineFunctionInfo.h"
21 #include "PPCPerfectShuffle.h"
22 #include "PPCRegisterInfo.h"
23 #include "PPCSubtarget.h"
24 #include "PPCTargetMachine.h"
25 #include "llvm/ADT/APFloat.h"
26 #include "llvm/ADT/APInt.h"
27 #include "llvm/ADT/ArrayRef.h"
28 #include "llvm/ADT/DenseMap.h"
29 #include "llvm/ADT/None.h"
30 #include "llvm/ADT/STLExtras.h"
31 #include "llvm/ADT/SmallPtrSet.h"
32 #include "llvm/ADT/SmallSet.h"
33 #include "llvm/ADT/SmallVector.h"
34 #include "llvm/ADT/Statistic.h"
35 #include "llvm/ADT/StringRef.h"
36 #include "llvm/ADT/StringSwitch.h"
37 #include "llvm/CodeGen/CallingConvLower.h"
38 #include "llvm/CodeGen/ISDOpcodes.h"
39 #include "llvm/CodeGen/MachineBasicBlock.h"
40 #include "llvm/CodeGen/MachineFrameInfo.h"
41 #include "llvm/CodeGen/MachineFunction.h"
42 #include "llvm/CodeGen/MachineInstr.h"
43 #include "llvm/CodeGen/MachineInstrBuilder.h"
44 #include "llvm/CodeGen/MachineJumpTableInfo.h"
45 #include "llvm/CodeGen/MachineLoopInfo.h"
46 #include "llvm/CodeGen/MachineMemOperand.h"
47 #include "llvm/CodeGen/MachineModuleInfo.h"
48 #include "llvm/CodeGen/MachineOperand.h"
49 #include "llvm/CodeGen/MachineRegisterInfo.h"
50 #include "llvm/CodeGen/RuntimeLibcalls.h"
51 #include "llvm/CodeGen/SelectionDAG.h"
52 #include "llvm/CodeGen/SelectionDAGNodes.h"
53 #include "llvm/CodeGen/TargetInstrInfo.h"
54 #include "llvm/CodeGen/TargetLowering.h"
55 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
56 #include "llvm/CodeGen/TargetRegisterInfo.h"
57 #include "llvm/CodeGen/ValueTypes.h"
58 #include "llvm/IR/CallingConv.h"
59 #include "llvm/IR/Constant.h"
60 #include "llvm/IR/Constants.h"
61 #include "llvm/IR/DataLayout.h"
62 #include "llvm/IR/DebugLoc.h"
63 #include "llvm/IR/DerivedTypes.h"
64 #include "llvm/IR/Function.h"
65 #include "llvm/IR/GlobalValue.h"
66 #include "llvm/IR/IRBuilder.h"
67 #include "llvm/IR/Instructions.h"
68 #include "llvm/IR/Intrinsics.h"
69 #include "llvm/IR/IntrinsicsPowerPC.h"
70 #include "llvm/IR/Module.h"
71 #include "llvm/IR/Type.h"
72 #include "llvm/IR/Use.h"
73 #include "llvm/IR/Value.h"
74 #include "llvm/MC/MCContext.h"
75 #include "llvm/MC/MCExpr.h"
76 #include "llvm/MC/MCRegisterInfo.h"
77 #include "llvm/MC/MCSymbolXCOFF.h"
78 #include "llvm/Support/AtomicOrdering.h"
79 #include "llvm/Support/BranchProbability.h"
80 #include "llvm/Support/Casting.h"
81 #include "llvm/Support/CodeGen.h"
82 #include "llvm/Support/CommandLine.h"
83 #include "llvm/Support/Compiler.h"
84 #include "llvm/Support/Debug.h"
85 #include "llvm/Support/ErrorHandling.h"
86 #include "llvm/Support/Format.h"
87 #include "llvm/Support/KnownBits.h"
88 #include "llvm/Support/MachineValueType.h"
89 #include "llvm/Support/MathExtras.h"
90 #include "llvm/Support/raw_ostream.h"
91 #include "llvm/Target/TargetMachine.h"
92 #include "llvm/Target/TargetOptions.h"
93 #include <algorithm>
94 #include <cassert>
95 #include <cstdint>
96 #include <iterator>
97 #include <list>
98 #include <utility>
99 #include <vector>
100 
101 using namespace llvm;
102 
103 #define DEBUG_TYPE "ppc-lowering"
104 
105 static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc",
106 cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden);
107 
108 static cl::opt<bool> DisableILPPref("disable-ppc-ilp-pref",
109 cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden);
110 
111 static cl::opt<bool> DisablePPCUnaligned("disable-ppc-unaligned",
112 cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden);
113 
114 static cl::opt<bool> DisableSCO("disable-ppc-sco",
115 cl::desc("disable sibling call optimization on ppc"), cl::Hidden);
116 
117 static cl::opt<bool> DisableInnermostLoopAlign32("disable-ppc-innermost-loop-align32",
118 cl::desc("don't always align innermost loop to 32 bytes on ppc"), cl::Hidden);
119 
120 static cl::opt<bool> UseAbsoluteJumpTables("ppc-use-absolute-jumptables",
121 cl::desc("use absolute jump tables on ppc"), cl::Hidden);
122 
123 STATISTIC(NumTailCalls, "Number of tail calls");
124 STATISTIC(NumSiblingCalls, "Number of sibling calls");
125 STATISTIC(ShufflesHandledWithVPERM, "Number of shuffles lowered to a VPERM");
126 STATISTIC(NumDynamicAllocaProbed, "Number of dynamic stack allocation probed");
127 
128 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *, unsigned, int);
129 
130 static SDValue widenVec(SelectionDAG &DAG, SDValue Vec, const SDLoc &dl);
131 
132 // FIXME: Remove this once the bug has been fixed!
133 extern cl::opt<bool> ANDIGlueBug;
134 
PPCTargetLowering(const PPCTargetMachine & TM,const PPCSubtarget & STI)135 PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
136                                      const PPCSubtarget &STI)
137     : TargetLowering(TM), Subtarget(STI) {
138   // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all
139   // arguments are at least 4/8 bytes aligned.
140   bool isPPC64 = Subtarget.isPPC64();
141   setMinStackArgumentAlignment(isPPC64 ? Align(8) : Align(4));
142 
143   // Set up the register classes.
144   addRegisterClass(MVT::i32, &PPC::GPRCRegClass);
145   if (!useSoftFloat()) {
146     if (hasSPE()) {
147       addRegisterClass(MVT::f32, &PPC::GPRCRegClass);
148       addRegisterClass(MVT::f64, &PPC::SPERCRegClass);
149     } else {
150       addRegisterClass(MVT::f32, &PPC::F4RCRegClass);
151       addRegisterClass(MVT::f64, &PPC::F8RCRegClass);
152     }
153   }
154 
155   // Match BITREVERSE to customized fast code sequence in the td file.
156   setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
157   setOperationAction(ISD::BITREVERSE, MVT::i64, Legal);
158 
159   // Sub-word ATOMIC_CMP_SWAP need to ensure that the input is zero-extended.
160   setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom);
161 
162   // PowerPC has an i16 but no i8 (or i1) SEXTLOAD.
163   for (MVT VT : MVT::integer_valuetypes()) {
164     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
165     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand);
166   }
167 
168   if (Subtarget.isISA3_0()) {
169     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Legal);
170     setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Legal);
171     setTruncStoreAction(MVT::f64, MVT::f16, Legal);
172     setTruncStoreAction(MVT::f32, MVT::f16, Legal);
173   } else {
174     // No extending loads from f16 or HW conversions back and forth.
175     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
176     setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
177     setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
178     setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
179     setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
180     setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
181     setTruncStoreAction(MVT::f64, MVT::f16, Expand);
182     setTruncStoreAction(MVT::f32, MVT::f16, Expand);
183   }
184 
185   setTruncStoreAction(MVT::f64, MVT::f32, Expand);
186 
187   // PowerPC has pre-inc load and store's.
188   setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal);
189   setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal);
190   setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal);
191   setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal);
192   setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal);
193   setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal);
194   setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal);
195   setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal);
196   setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal);
197   setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal);
198   if (!Subtarget.hasSPE()) {
199     setIndexedLoadAction(ISD::PRE_INC, MVT::f32, Legal);
200     setIndexedLoadAction(ISD::PRE_INC, MVT::f64, Legal);
201     setIndexedStoreAction(ISD::PRE_INC, MVT::f32, Legal);
202     setIndexedStoreAction(ISD::PRE_INC, MVT::f64, Legal);
203   }
204 
205   // PowerPC uses ADDC/ADDE/SUBC/SUBE to propagate carry.
206   const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 };
207   for (MVT VT : ScalarIntVTs) {
208     setOperationAction(ISD::ADDC, VT, Legal);
209     setOperationAction(ISD::ADDE, VT, Legal);
210     setOperationAction(ISD::SUBC, VT, Legal);
211     setOperationAction(ISD::SUBE, VT, Legal);
212   }
213 
214   if (Subtarget.useCRBits()) {
215     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
216 
217     if (isPPC64 || Subtarget.hasFPCVT()) {
218       setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote);
219       AddPromotedToType (ISD::SINT_TO_FP, MVT::i1,
220                          isPPC64 ? MVT::i64 : MVT::i32);
221       setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote);
222       AddPromotedToType(ISD::UINT_TO_FP, MVT::i1,
223                         isPPC64 ? MVT::i64 : MVT::i32);
224     } else {
225       setOperationAction(ISD::SINT_TO_FP, MVT::i1, Custom);
226       setOperationAction(ISD::UINT_TO_FP, MVT::i1, Custom);
227     }
228 
229     // PowerPC does not support direct load/store of condition registers.
230     setOperationAction(ISD::LOAD, MVT::i1, Custom);
231     setOperationAction(ISD::STORE, MVT::i1, Custom);
232 
233     // FIXME: Remove this once the ANDI glue bug is fixed:
234     if (ANDIGlueBug)
235       setOperationAction(ISD::TRUNCATE, MVT::i1, Custom);
236 
237     for (MVT VT : MVT::integer_valuetypes()) {
238       setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
239       setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
240       setTruncStoreAction(VT, MVT::i1, Expand);
241     }
242 
243     addRegisterClass(MVT::i1, &PPC::CRBITRCRegClass);
244   }
245 
246   // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on
247   // PPC (the libcall is not available).
248   setOperationAction(ISD::FP_TO_SINT, MVT::ppcf128, Custom);
249   setOperationAction(ISD::FP_TO_UINT, MVT::ppcf128, Custom);
250 
251   // We do not currently implement these libm ops for PowerPC.
252   setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand);
253   setOperationAction(ISD::FCEIL,  MVT::ppcf128, Expand);
254   setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand);
255   setOperationAction(ISD::FRINT,  MVT::ppcf128, Expand);
256   setOperationAction(ISD::FNEARBYINT, MVT::ppcf128, Expand);
257   setOperationAction(ISD::FREM, MVT::ppcf128, Expand);
258 
259   // PowerPC has no SREM/UREM instructions unless we are on P9
260   // On P9 we may use a hardware instruction to compute the remainder.
261   // When the result of both the remainder and the division is required it is
262   // more efficient to compute the remainder from the result of the division
263   // rather than use the remainder instruction. The instructions are legalized
264   // directly because the DivRemPairsPass performs the transformation at the IR
265   // level.
266   if (Subtarget.isISA3_0()) {
267     setOperationAction(ISD::SREM, MVT::i32, Legal);
268     setOperationAction(ISD::UREM, MVT::i32, Legal);
269     setOperationAction(ISD::SREM, MVT::i64, Legal);
270     setOperationAction(ISD::UREM, MVT::i64, Legal);
271   } else {
272     setOperationAction(ISD::SREM, MVT::i32, Expand);
273     setOperationAction(ISD::UREM, MVT::i32, Expand);
274     setOperationAction(ISD::SREM, MVT::i64, Expand);
275     setOperationAction(ISD::UREM, MVT::i64, Expand);
276   }
277 
278   // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM.
279   setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
280   setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
281   setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
282   setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
283   setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
284   setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
285   setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
286   setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
287 
288   // Handle constrained floating-point operations of scalar.
289   // TODO: Handle SPE specific operation.
290   setOperationAction(ISD::STRICT_FADD, MVT::f32, Legal);
291   setOperationAction(ISD::STRICT_FSUB, MVT::f32, Legal);
292   setOperationAction(ISD::STRICT_FMUL, MVT::f32, Legal);
293   setOperationAction(ISD::STRICT_FDIV, MVT::f32, Legal);
294   setOperationAction(ISD::STRICT_FMA, MVT::f32, Legal);
295   setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal);
296 
297   setOperationAction(ISD::STRICT_FADD, MVT::f64, Legal);
298   setOperationAction(ISD::STRICT_FSUB, MVT::f64, Legal);
299   setOperationAction(ISD::STRICT_FMUL, MVT::f64, Legal);
300   setOperationAction(ISD::STRICT_FDIV, MVT::f64, Legal);
301   setOperationAction(ISD::STRICT_FMA, MVT::f64, Legal);
302   if (Subtarget.hasVSX())
303     setOperationAction(ISD::STRICT_FNEARBYINT, MVT::f64, Legal);
304 
305   if (Subtarget.hasFSQRT()) {
306     setOperationAction(ISD::STRICT_FSQRT, MVT::f32, Legal);
307     setOperationAction(ISD::STRICT_FSQRT, MVT::f64, Legal);
308   }
309 
310   if (Subtarget.hasFPRND()) {
311     setOperationAction(ISD::STRICT_FFLOOR, MVT::f32, Legal);
312     setOperationAction(ISD::STRICT_FCEIL,  MVT::f32, Legal);
313     setOperationAction(ISD::STRICT_FTRUNC, MVT::f32, Legal);
314     setOperationAction(ISD::STRICT_FROUND, MVT::f32, Legal);
315 
316     setOperationAction(ISD::STRICT_FFLOOR, MVT::f64, Legal);
317     setOperationAction(ISD::STRICT_FCEIL,  MVT::f64, Legal);
318     setOperationAction(ISD::STRICT_FTRUNC, MVT::f64, Legal);
319     setOperationAction(ISD::STRICT_FROUND, MVT::f64, Legal);
320   }
321 
322   // We don't support sin/cos/sqrt/fmod/pow
323   setOperationAction(ISD::FSIN , MVT::f64, Expand);
324   setOperationAction(ISD::FCOS , MVT::f64, Expand);
325   setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
326   setOperationAction(ISD::FREM , MVT::f64, Expand);
327   setOperationAction(ISD::FPOW , MVT::f64, Expand);
328   setOperationAction(ISD::FSIN , MVT::f32, Expand);
329   setOperationAction(ISD::FCOS , MVT::f32, Expand);
330   setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
331   setOperationAction(ISD::FREM , MVT::f32, Expand);
332   setOperationAction(ISD::FPOW , MVT::f32, Expand);
333   if (Subtarget.hasSPE()) {
334     setOperationAction(ISD::FMA  , MVT::f64, Expand);
335     setOperationAction(ISD::FMA  , MVT::f32, Expand);
336   } else {
337     setOperationAction(ISD::FMA  , MVT::f64, Legal);
338     setOperationAction(ISD::FMA  , MVT::f32, Legal);
339   }
340 
341   setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom);
342 
343   // If we're enabling GP optimizations, use hardware square root
344   if (!Subtarget.hasFSQRT() &&
345       !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTE() &&
346         Subtarget.hasFRE()))
347     setOperationAction(ISD::FSQRT, MVT::f64, Expand);
348 
349   if (!Subtarget.hasFSQRT() &&
350       !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTES() &&
351         Subtarget.hasFRES()))
352     setOperationAction(ISD::FSQRT, MVT::f32, Expand);
353 
354   if (Subtarget.hasFCPSGN()) {
355     setOperationAction(ISD::FCOPYSIGN, MVT::f64, Legal);
356     setOperationAction(ISD::FCOPYSIGN, MVT::f32, Legal);
357   } else {
358     setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
359     setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
360   }
361 
362   if (Subtarget.hasFPRND()) {
363     setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
364     setOperationAction(ISD::FCEIL,  MVT::f64, Legal);
365     setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
366     setOperationAction(ISD::FROUND, MVT::f64, Legal);
367 
368     setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
369     setOperationAction(ISD::FCEIL,  MVT::f32, Legal);
370     setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
371     setOperationAction(ISD::FROUND, MVT::f32, Legal);
372   }
373 
374   // PowerPC does not have BSWAP, but we can use vector BSWAP instruction xxbrd
375   // to speed up scalar BSWAP64.
376   // CTPOP or CTTZ were introduced in P8/P9 respectively
377   setOperationAction(ISD::BSWAP, MVT::i32  , Expand);
378   if (Subtarget.hasP9Vector())
379     setOperationAction(ISD::BSWAP, MVT::i64  , Custom);
380   else
381     setOperationAction(ISD::BSWAP, MVT::i64  , Expand);
382   if (Subtarget.isISA3_0()) {
383     setOperationAction(ISD::CTTZ , MVT::i32  , Legal);
384     setOperationAction(ISD::CTTZ , MVT::i64  , Legal);
385   } else {
386     setOperationAction(ISD::CTTZ , MVT::i32  , Expand);
387     setOperationAction(ISD::CTTZ , MVT::i64  , Expand);
388   }
389 
390   if (Subtarget.hasPOPCNTD() == PPCSubtarget::POPCNTD_Fast) {
391     setOperationAction(ISD::CTPOP, MVT::i32  , Legal);
392     setOperationAction(ISD::CTPOP, MVT::i64  , Legal);
393   } else {
394     setOperationAction(ISD::CTPOP, MVT::i32  , Expand);
395     setOperationAction(ISD::CTPOP, MVT::i64  , Expand);
396   }
397 
398   // PowerPC does not have ROTR
399   setOperationAction(ISD::ROTR, MVT::i32   , Expand);
400   setOperationAction(ISD::ROTR, MVT::i64   , Expand);
401 
402   if (!Subtarget.useCRBits()) {
403     // PowerPC does not have Select
404     setOperationAction(ISD::SELECT, MVT::i32, Expand);
405     setOperationAction(ISD::SELECT, MVT::i64, Expand);
406     setOperationAction(ISD::SELECT, MVT::f32, Expand);
407     setOperationAction(ISD::SELECT, MVT::f64, Expand);
408   }
409 
410   // PowerPC wants to turn select_cc of FP into fsel when possible.
411   setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
412   setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
413 
414   // PowerPC wants to optimize integer setcc a bit
415   if (!Subtarget.useCRBits())
416     setOperationAction(ISD::SETCC, MVT::i32, Custom);
417 
418   // PowerPC does not have BRCOND which requires SetCC
419   if (!Subtarget.useCRBits())
420     setOperationAction(ISD::BRCOND, MVT::Other, Expand);
421 
422   setOperationAction(ISD::BR_JT,  MVT::Other, Expand);
423 
424   if (Subtarget.hasSPE()) {
425     // SPE has built-in conversions
426     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Legal);
427     setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Legal);
428     setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Legal);
429     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal);
430     setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal);
431     setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal);
432   } else {
433     // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores.
434     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
435 
436     // PowerPC does not have [U|S]INT_TO_FP
437     setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand);
438     setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
439   }
440 
441   if (Subtarget.hasDirectMove() && isPPC64) {
442     setOperationAction(ISD::BITCAST, MVT::f32, Legal);
443     setOperationAction(ISD::BITCAST, MVT::i32, Legal);
444     setOperationAction(ISD::BITCAST, MVT::i64, Legal);
445     setOperationAction(ISD::BITCAST, MVT::f64, Legal);
446     if (TM.Options.UnsafeFPMath) {
447       setOperationAction(ISD::LRINT, MVT::f64, Legal);
448       setOperationAction(ISD::LRINT, MVT::f32, Legal);
449       setOperationAction(ISD::LLRINT, MVT::f64, Legal);
450       setOperationAction(ISD::LLRINT, MVT::f32, Legal);
451       setOperationAction(ISD::LROUND, MVT::f64, Legal);
452       setOperationAction(ISD::LROUND, MVT::f32, Legal);
453       setOperationAction(ISD::LLROUND, MVT::f64, Legal);
454       setOperationAction(ISD::LLROUND, MVT::f32, Legal);
455     }
456   } else {
457     setOperationAction(ISD::BITCAST, MVT::f32, Expand);
458     setOperationAction(ISD::BITCAST, MVT::i32, Expand);
459     setOperationAction(ISD::BITCAST, MVT::i64, Expand);
460     setOperationAction(ISD::BITCAST, MVT::f64, Expand);
461   }
462 
463   // We cannot sextinreg(i1).  Expand to shifts.
464   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
465 
466   // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support
467   // SjLj exception handling but a light-weight setjmp/longjmp replacement to
468   // support continuation, user-level threading, and etc.. As a result, no
469   // other SjLj exception interfaces are implemented and please don't build
470   // your own exception handling based on them.
471   // LLVM/Clang supports zero-cost DWARF exception handling.
472   setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
473   setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
474 
475   // We want to legalize GlobalAddress and ConstantPool nodes into the
476   // appropriate instructions to materialize the address.
477   setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
478   setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
479   setOperationAction(ISD::BlockAddress,  MVT::i32, Custom);
480   setOperationAction(ISD::ConstantPool,  MVT::i32, Custom);
481   setOperationAction(ISD::JumpTable,     MVT::i32, Custom);
482   setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
483   setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
484   setOperationAction(ISD::BlockAddress,  MVT::i64, Custom);
485   setOperationAction(ISD::ConstantPool,  MVT::i64, Custom);
486   setOperationAction(ISD::JumpTable,     MVT::i64, Custom);
487 
488   // TRAP is legal.
489   setOperationAction(ISD::TRAP, MVT::Other, Legal);
490 
491   // TRAMPOLINE is custom lowered.
492   setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
493   setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
494 
495   // VASTART needs to be custom lowered to use the VarArgsFrameIndex
496   setOperationAction(ISD::VASTART           , MVT::Other, Custom);
497 
498   if (Subtarget.is64BitELFABI()) {
499     // VAARG always uses double-word chunks, so promote anything smaller.
500     setOperationAction(ISD::VAARG, MVT::i1, Promote);
501     AddPromotedToType(ISD::VAARG, MVT::i1, MVT::i64);
502     setOperationAction(ISD::VAARG, MVT::i8, Promote);
503     AddPromotedToType(ISD::VAARG, MVT::i8, MVT::i64);
504     setOperationAction(ISD::VAARG, MVT::i16, Promote);
505     AddPromotedToType(ISD::VAARG, MVT::i16, MVT::i64);
506     setOperationAction(ISD::VAARG, MVT::i32, Promote);
507     AddPromotedToType(ISD::VAARG, MVT::i32, MVT::i64);
508     setOperationAction(ISD::VAARG, MVT::Other, Expand);
509   } else if (Subtarget.is32BitELFABI()) {
510     // VAARG is custom lowered with the 32-bit SVR4 ABI.
511     setOperationAction(ISD::VAARG, MVT::Other, Custom);
512     setOperationAction(ISD::VAARG, MVT::i64, Custom);
513   } else
514     setOperationAction(ISD::VAARG, MVT::Other, Expand);
515 
516   // VACOPY is custom lowered with the 32-bit SVR4 ABI.
517   if (Subtarget.is32BitELFABI())
518     setOperationAction(ISD::VACOPY            , MVT::Other, Custom);
519   else
520     setOperationAction(ISD::VACOPY            , MVT::Other, Expand);
521 
522   // Use the default implementation.
523   setOperationAction(ISD::VAEND             , MVT::Other, Expand);
524   setOperationAction(ISD::STACKSAVE         , MVT::Other, Expand);
525   setOperationAction(ISD::STACKRESTORE      , MVT::Other, Custom);
526   setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32  , Custom);
527   setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64  , Custom);
528   setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i32, Custom);
529   setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i64, Custom);
530   setOperationAction(ISD::EH_DWARF_CFA, MVT::i32, Custom);
531   setOperationAction(ISD::EH_DWARF_CFA, MVT::i64, Custom);
532 
533   // We want to custom lower some of our intrinsics.
534   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
535 
536   // To handle counter-based loop conditions.
537   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i1, Custom);
538 
539   setOperationAction(ISD::INTRINSIC_VOID, MVT::i8, Custom);
540   setOperationAction(ISD::INTRINSIC_VOID, MVT::i16, Custom);
541   setOperationAction(ISD::INTRINSIC_VOID, MVT::i32, Custom);
542   setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
543 
544   // Comparisons that require checking two conditions.
545   if (Subtarget.hasSPE()) {
546     setCondCodeAction(ISD::SETO, MVT::f32, Expand);
547     setCondCodeAction(ISD::SETO, MVT::f64, Expand);
548     setCondCodeAction(ISD::SETUO, MVT::f32, Expand);
549     setCondCodeAction(ISD::SETUO, MVT::f64, Expand);
550   }
551   setCondCodeAction(ISD::SETULT, MVT::f32, Expand);
552   setCondCodeAction(ISD::SETULT, MVT::f64, Expand);
553   setCondCodeAction(ISD::SETUGT, MVT::f32, Expand);
554   setCondCodeAction(ISD::SETUGT, MVT::f64, Expand);
555   setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand);
556   setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand);
557   setCondCodeAction(ISD::SETOGE, MVT::f32, Expand);
558   setCondCodeAction(ISD::SETOGE, MVT::f64, Expand);
559   setCondCodeAction(ISD::SETOLE, MVT::f32, Expand);
560   setCondCodeAction(ISD::SETOLE, MVT::f64, Expand);
561   setCondCodeAction(ISD::SETONE, MVT::f32, Expand);
562   setCondCodeAction(ISD::SETONE, MVT::f64, Expand);
563 
564   if (Subtarget.has64BitSupport()) {
565     // They also have instructions for converting between i64 and fp.
566     setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
567     setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand);
568     setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
569     setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
570     // This is just the low 32 bits of a (signed) fp->i64 conversion.
571     // We cannot do this with Promote because i64 is not a legal type.
572     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
573 
574     if (Subtarget.hasLFIWAX() || Subtarget.isPPC64())
575       setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
576   } else {
577     // PowerPC does not have FP_TO_UINT on 32-bit implementations.
578     if (Subtarget.hasSPE()) {
579       setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Legal);
580       setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal);
581     } else
582       setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
583   }
584 
585   // With the instructions enabled under FPCVT, we can do everything.
586   if (Subtarget.hasFPCVT()) {
587     if (Subtarget.has64BitSupport()) {
588       setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
589       setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
590       setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
591       setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
592     }
593 
594     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
595     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
596     setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
597     setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
598   }
599 
600   if (Subtarget.use64BitRegs()) {
601     // 64-bit PowerPC implementations can support i64 types directly
602     addRegisterClass(MVT::i64, &PPC::G8RCRegClass);
603     // BUILD_PAIR can't be handled natively, and should be expanded to shl/or
604     setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
605     // 64-bit PowerPC wants to expand i128 shifts itself.
606     setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom);
607     setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom);
608     setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom);
609   } else {
610     // 32-bit PowerPC wants to expand i64 shifts itself.
611     setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
612     setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
613     setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
614   }
615 
616   if (Subtarget.hasVSX()) {
617     setOperationAction(ISD::FMAXNUM_IEEE, MVT::f64, Legal);
618     setOperationAction(ISD::FMAXNUM_IEEE, MVT::f32, Legal);
619     setOperationAction(ISD::FMINNUM_IEEE, MVT::f64, Legal);
620     setOperationAction(ISD::FMINNUM_IEEE, MVT::f32, Legal);
621   }
622 
623   if (Subtarget.hasAltivec()) {
624     for (MVT VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) {
625       setOperationAction(ISD::SADDSAT, VT, Legal);
626       setOperationAction(ISD::SSUBSAT, VT, Legal);
627       setOperationAction(ISD::UADDSAT, VT, Legal);
628       setOperationAction(ISD::USUBSAT, VT, Legal);
629     }
630     // First set operation action for all vector types to expand. Then we
631     // will selectively turn on ones that can be effectively codegen'd.
632     for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
633       // add/sub are legal for all supported vector VT's.
634       setOperationAction(ISD::ADD, VT, Legal);
635       setOperationAction(ISD::SUB, VT, Legal);
636 
637       // For v2i64, these are only valid with P8Vector. This is corrected after
638       // the loop.
639       if (VT.getSizeInBits() <= 128 && VT.getScalarSizeInBits() <= 64) {
640         setOperationAction(ISD::SMAX, VT, Legal);
641         setOperationAction(ISD::SMIN, VT, Legal);
642         setOperationAction(ISD::UMAX, VT, Legal);
643         setOperationAction(ISD::UMIN, VT, Legal);
644       }
645       else {
646         setOperationAction(ISD::SMAX, VT, Expand);
647         setOperationAction(ISD::SMIN, VT, Expand);
648         setOperationAction(ISD::UMAX, VT, Expand);
649         setOperationAction(ISD::UMIN, VT, Expand);
650       }
651 
652       if (Subtarget.hasVSX()) {
653         setOperationAction(ISD::FMAXNUM, VT, Legal);
654         setOperationAction(ISD::FMINNUM, VT, Legal);
655       }
656 
657       // Vector instructions introduced in P8
658       if (Subtarget.hasP8Altivec() && (VT.SimpleTy != MVT::v1i128)) {
659         setOperationAction(ISD::CTPOP, VT, Legal);
660         setOperationAction(ISD::CTLZ, VT, Legal);
661       }
662       else {
663         setOperationAction(ISD::CTPOP, VT, Expand);
664         setOperationAction(ISD::CTLZ, VT, Expand);
665       }
666 
667       // Vector instructions introduced in P9
668       if (Subtarget.hasP9Altivec() && (VT.SimpleTy != MVT::v1i128))
669         setOperationAction(ISD::CTTZ, VT, Legal);
670       else
671         setOperationAction(ISD::CTTZ, VT, Expand);
672 
673       // We promote all shuffles to v16i8.
674       setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote);
675       AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8);
676 
677       // We promote all non-typed operations to v4i32.
678       setOperationAction(ISD::AND   , VT, Promote);
679       AddPromotedToType (ISD::AND   , VT, MVT::v4i32);
680       setOperationAction(ISD::OR    , VT, Promote);
681       AddPromotedToType (ISD::OR    , VT, MVT::v4i32);
682       setOperationAction(ISD::XOR   , VT, Promote);
683       AddPromotedToType (ISD::XOR   , VT, MVT::v4i32);
684       setOperationAction(ISD::LOAD  , VT, Promote);
685       AddPromotedToType (ISD::LOAD  , VT, MVT::v4i32);
686       setOperationAction(ISD::SELECT, VT, Promote);
687       AddPromotedToType (ISD::SELECT, VT, MVT::v4i32);
688       setOperationAction(ISD::VSELECT, VT, Legal);
689       setOperationAction(ISD::SELECT_CC, VT, Promote);
690       AddPromotedToType (ISD::SELECT_CC, VT, MVT::v4i32);
691       setOperationAction(ISD::STORE, VT, Promote);
692       AddPromotedToType (ISD::STORE, VT, MVT::v4i32);
693 
694       // No other operations are legal.
695       setOperationAction(ISD::MUL , VT, Expand);
696       setOperationAction(ISD::SDIV, VT, Expand);
697       setOperationAction(ISD::SREM, VT, Expand);
698       setOperationAction(ISD::UDIV, VT, Expand);
699       setOperationAction(ISD::UREM, VT, Expand);
700       setOperationAction(ISD::FDIV, VT, Expand);
701       setOperationAction(ISD::FREM, VT, Expand);
702       setOperationAction(ISD::FNEG, VT, Expand);
703       setOperationAction(ISD::FSQRT, VT, Expand);
704       setOperationAction(ISD::FLOG, VT, Expand);
705       setOperationAction(ISD::FLOG10, VT, Expand);
706       setOperationAction(ISD::FLOG2, VT, Expand);
707       setOperationAction(ISD::FEXP, VT, Expand);
708       setOperationAction(ISD::FEXP2, VT, Expand);
709       setOperationAction(ISD::FSIN, VT, Expand);
710       setOperationAction(ISD::FCOS, VT, Expand);
711       setOperationAction(ISD::FABS, VT, Expand);
712       setOperationAction(ISD::FFLOOR, VT, Expand);
713       setOperationAction(ISD::FCEIL,  VT, Expand);
714       setOperationAction(ISD::FTRUNC, VT, Expand);
715       setOperationAction(ISD::FRINT,  VT, Expand);
716       setOperationAction(ISD::FNEARBYINT, VT, Expand);
717       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand);
718       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
719       setOperationAction(ISD::BUILD_VECTOR, VT, Expand);
720       setOperationAction(ISD::MULHU, VT, Expand);
721       setOperationAction(ISD::MULHS, VT, Expand);
722       setOperationAction(ISD::UMUL_LOHI, VT, Expand);
723       setOperationAction(ISD::SMUL_LOHI, VT, Expand);
724       setOperationAction(ISD::UDIVREM, VT, Expand);
725       setOperationAction(ISD::SDIVREM, VT, Expand);
726       setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand);
727       setOperationAction(ISD::FPOW, VT, Expand);
728       setOperationAction(ISD::BSWAP, VT, Expand);
729       setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
730       setOperationAction(ISD::ROTL, VT, Expand);
731       setOperationAction(ISD::ROTR, VT, Expand);
732 
733       for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) {
734         setTruncStoreAction(VT, InnerVT, Expand);
735         setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand);
736         setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand);
737         setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand);
738       }
739     }
740     setOperationAction(ISD::SELECT_CC, MVT::v4i32, Expand);
741     if (!Subtarget.hasP8Vector()) {
742       setOperationAction(ISD::SMAX, MVT::v2i64, Expand);
743       setOperationAction(ISD::SMIN, MVT::v2i64, Expand);
744       setOperationAction(ISD::UMAX, MVT::v2i64, Expand);
745       setOperationAction(ISD::UMIN, MVT::v2i64, Expand);
746     }
747 
748     for (auto VT : {MVT::v2i64, MVT::v4i32, MVT::v8i16, MVT::v16i8})
749       setOperationAction(ISD::ABS, VT, Custom);
750 
751     // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle
752     // with merges, splats, etc.
753     setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom);
754 
755     // Vector truncates to sub-word integer that fit in an Altivec/VSX register
756     // are cheap, so handle them before they get expanded to scalar.
757     setOperationAction(ISD::TRUNCATE, MVT::v8i8, Custom);
758     setOperationAction(ISD::TRUNCATE, MVT::v4i8, Custom);
759     setOperationAction(ISD::TRUNCATE, MVT::v2i8, Custom);
760     setOperationAction(ISD::TRUNCATE, MVT::v4i16, Custom);
761     setOperationAction(ISD::TRUNCATE, MVT::v2i16, Custom);
762 
763     setOperationAction(ISD::AND   , MVT::v4i32, Legal);
764     setOperationAction(ISD::OR    , MVT::v4i32, Legal);
765     setOperationAction(ISD::XOR   , MVT::v4i32, Legal);
766     setOperationAction(ISD::LOAD  , MVT::v4i32, Legal);
767     setOperationAction(ISD::SELECT, MVT::v4i32,
768                        Subtarget.useCRBits() ? Legal : Expand);
769     setOperationAction(ISD::STORE , MVT::v4i32, Legal);
770     setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
771     setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
772     setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
773     setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal);
774     setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
775     setOperationAction(ISD::FCEIL, MVT::v4f32, Legal);
776     setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
777     setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
778 
779     // Without hasP8Altivec set, v2i64 SMAX isn't available.
780     // But ABS custom lowering requires SMAX support.
781     if (!Subtarget.hasP8Altivec())
782       setOperationAction(ISD::ABS, MVT::v2i64, Expand);
783 
784     // Custom lowering ROTL v1i128 to VECTOR_SHUFFLE v16i8.
785     setOperationAction(ISD::ROTL, MVT::v1i128, Custom);
786     // With hasAltivec set, we can lower ISD::ROTL to vrl(b|h|w).
787     if (Subtarget.hasAltivec())
788       for (auto VT : {MVT::v4i32, MVT::v8i16, MVT::v16i8})
789         setOperationAction(ISD::ROTL, VT, Legal);
790     // With hasP8Altivec set, we can lower ISD::ROTL to vrld.
791     if (Subtarget.hasP8Altivec())
792       setOperationAction(ISD::ROTL, MVT::v2i64, Legal);
793 
794     addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass);
795     addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass);
796     addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass);
797     addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass);
798 
799     setOperationAction(ISD::MUL, MVT::v4f32, Legal);
800     setOperationAction(ISD::FMA, MVT::v4f32, Legal);
801 
802     if (TM.Options.UnsafeFPMath || Subtarget.hasVSX()) {
803       setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
804       setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
805     }
806 
807     if (Subtarget.hasP8Altivec())
808       setOperationAction(ISD::MUL, MVT::v4i32, Legal);
809     else
810       setOperationAction(ISD::MUL, MVT::v4i32, Custom);
811 
812     setOperationAction(ISD::MUL, MVT::v8i16, Legal);
813     setOperationAction(ISD::MUL, MVT::v16i8, Custom);
814 
815     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom);
816     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom);
817 
818     setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom);
819     setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom);
820     setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom);
821     setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
822 
823     // Altivec does not contain unordered floating-point compare instructions
824     setCondCodeAction(ISD::SETUO, MVT::v4f32, Expand);
825     setCondCodeAction(ISD::SETUEQ, MVT::v4f32, Expand);
826     setCondCodeAction(ISD::SETO,   MVT::v4f32, Expand);
827     setCondCodeAction(ISD::SETONE, MVT::v4f32, Expand);
828 
829     if (Subtarget.hasVSX()) {
830       setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal);
831       setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal);
832       if (Subtarget.hasP8Vector()) {
833         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal);
834         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Legal);
835       }
836       if (Subtarget.hasDirectMove() && isPPC64) {
837         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Legal);
838         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Legal);
839         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Legal);
840         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i64, Legal);
841         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Legal);
842         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Legal);
843         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Legal);
844         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal);
845       }
846       setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal);
847 
848       // The nearbyint variants are not allowed to raise the inexact exception
849       // so we can only code-gen them with unsafe math.
850       if (TM.Options.UnsafeFPMath) {
851         setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
852         setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
853       }
854 
855       setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal);
856       setOperationAction(ISD::FCEIL, MVT::v2f64, Legal);
857       setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal);
858       setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal);
859       setOperationAction(ISD::FRINT, MVT::v2f64, Legal);
860       setOperationAction(ISD::FROUND, MVT::v2f64, Legal);
861       setOperationAction(ISD::FROUND, MVT::f64, Legal);
862       setOperationAction(ISD::FRINT, MVT::f64, Legal);
863 
864       setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
865       setOperationAction(ISD::FRINT, MVT::v4f32, Legal);
866       setOperationAction(ISD::FROUND, MVT::v4f32, Legal);
867       setOperationAction(ISD::FROUND, MVT::f32, Legal);
868       setOperationAction(ISD::FRINT, MVT::f32, Legal);
869 
870       setOperationAction(ISD::MUL, MVT::v2f64, Legal);
871       setOperationAction(ISD::FMA, MVT::v2f64, Legal);
872 
873       setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
874       setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
875 
876       // Share the Altivec comparison restrictions.
877       setCondCodeAction(ISD::SETUO, MVT::v2f64, Expand);
878       setCondCodeAction(ISD::SETUEQ, MVT::v2f64, Expand);
879       setCondCodeAction(ISD::SETO,   MVT::v2f64, Expand);
880       setCondCodeAction(ISD::SETONE, MVT::v2f64, Expand);
881 
882       setOperationAction(ISD::LOAD, MVT::v2f64, Legal);
883       setOperationAction(ISD::STORE, MVT::v2f64, Legal);
884 
885       setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Legal);
886 
887       if (Subtarget.hasP8Vector())
888         addRegisterClass(MVT::f32, &PPC::VSSRCRegClass);
889 
890       addRegisterClass(MVT::f64, &PPC::VSFRCRegClass);
891 
892       addRegisterClass(MVT::v4i32, &PPC::VSRCRegClass);
893       addRegisterClass(MVT::v4f32, &PPC::VSRCRegClass);
894       addRegisterClass(MVT::v2f64, &PPC::VSRCRegClass);
895 
896       if (Subtarget.hasP8Altivec()) {
897         setOperationAction(ISD::SHL, MVT::v2i64, Legal);
898         setOperationAction(ISD::SRA, MVT::v2i64, Legal);
899         setOperationAction(ISD::SRL, MVT::v2i64, Legal);
900 
901         // 128 bit shifts can be accomplished via 3 instructions for SHL and
902         // SRL, but not for SRA because of the instructions available:
903         // VS{RL} and VS{RL}O. However due to direct move costs, it's not worth
904         // doing
905         setOperationAction(ISD::SHL, MVT::v1i128, Expand);
906         setOperationAction(ISD::SRL, MVT::v1i128, Expand);
907         setOperationAction(ISD::SRA, MVT::v1i128, Expand);
908 
909         setOperationAction(ISD::SETCC, MVT::v2i64, Legal);
910       }
911       else {
912         setOperationAction(ISD::SHL, MVT::v2i64, Expand);
913         setOperationAction(ISD::SRA, MVT::v2i64, Expand);
914         setOperationAction(ISD::SRL, MVT::v2i64, Expand);
915 
916         setOperationAction(ISD::SETCC, MVT::v2i64, Custom);
917 
918         // VSX v2i64 only supports non-arithmetic operations.
919         setOperationAction(ISD::ADD, MVT::v2i64, Expand);
920         setOperationAction(ISD::SUB, MVT::v2i64, Expand);
921       }
922 
923       setOperationAction(ISD::LOAD, MVT::v2i64, Promote);
924       AddPromotedToType (ISD::LOAD, MVT::v2i64, MVT::v2f64);
925       setOperationAction(ISD::STORE, MVT::v2i64, Promote);
926       AddPromotedToType (ISD::STORE, MVT::v2i64, MVT::v2f64);
927 
928       setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Legal);
929 
930       setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal);
931       setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal);
932       setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal);
933       setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal);
934 
935       // Custom handling for partial vectors of integers converted to
936       // floating point. We already have optimal handling for v2i32 through
937       // the DAG combine, so those aren't necessary.
938       setOperationAction(ISD::UINT_TO_FP, MVT::v2i8, Custom);
939       setOperationAction(ISD::UINT_TO_FP, MVT::v4i8, Custom);
940       setOperationAction(ISD::UINT_TO_FP, MVT::v2i16, Custom);
941       setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
942       setOperationAction(ISD::SINT_TO_FP, MVT::v2i8, Custom);
943       setOperationAction(ISD::SINT_TO_FP, MVT::v4i8, Custom);
944       setOperationAction(ISD::SINT_TO_FP, MVT::v2i16, Custom);
945       setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom);
946 
947       setOperationAction(ISD::FNEG, MVT::v4f32, Legal);
948       setOperationAction(ISD::FNEG, MVT::v2f64, Legal);
949       setOperationAction(ISD::FABS, MVT::v4f32, Legal);
950       setOperationAction(ISD::FABS, MVT::v2f64, Legal);
951       setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal);
952       setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Legal);
953 
954       if (Subtarget.hasDirectMove())
955         setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
956       setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
957 
958       // Handle constrained floating-point operations of vector.
959       // The predictor is `hasVSX` because altivec instruction has
960       // no exception but VSX vector instruction has.
961       setOperationAction(ISD::STRICT_FADD, MVT::v4f32, Legal);
962       setOperationAction(ISD::STRICT_FSUB, MVT::v4f32, Legal);
963       setOperationAction(ISD::STRICT_FMUL, MVT::v4f32, Legal);
964       setOperationAction(ISD::STRICT_FDIV, MVT::v4f32, Legal);
965       setOperationAction(ISD::STRICT_FMA, MVT::v4f32, Legal);
966       setOperationAction(ISD::STRICT_FSQRT, MVT::v4f32, Legal);
967       setOperationAction(ISD::STRICT_FMAXNUM, MVT::v4f32, Legal);
968       setOperationAction(ISD::STRICT_FMINNUM, MVT::v4f32, Legal);
969       setOperationAction(ISD::STRICT_FNEARBYINT, MVT::v4f32, Legal);
970       setOperationAction(ISD::STRICT_FFLOOR, MVT::v4f32, Legal);
971       setOperationAction(ISD::STRICT_FCEIL,  MVT::v4f32, Legal);
972       setOperationAction(ISD::STRICT_FTRUNC, MVT::v4f32, Legal);
973       setOperationAction(ISD::STRICT_FROUND, MVT::v4f32, Legal);
974 
975       setOperationAction(ISD::STRICT_FADD, MVT::v2f64, Legal);
976       setOperationAction(ISD::STRICT_FSUB, MVT::v2f64, Legal);
977       setOperationAction(ISD::STRICT_FMUL, MVT::v2f64, Legal);
978       setOperationAction(ISD::STRICT_FDIV, MVT::v2f64, Legal);
979       setOperationAction(ISD::STRICT_FMA, MVT::v2f64, Legal);
980       setOperationAction(ISD::STRICT_FSQRT, MVT::v2f64, Legal);
981       setOperationAction(ISD::STRICT_FMAXNUM, MVT::v2f64, Legal);
982       setOperationAction(ISD::STRICT_FMINNUM, MVT::v2f64, Legal);
983       setOperationAction(ISD::STRICT_FNEARBYINT, MVT::v2f64, Legal);
984       setOperationAction(ISD::STRICT_FFLOOR, MVT::v2f64, Legal);
985       setOperationAction(ISD::STRICT_FCEIL,  MVT::v2f64, Legal);
986       setOperationAction(ISD::STRICT_FTRUNC, MVT::v2f64, Legal);
987       setOperationAction(ISD::STRICT_FROUND, MVT::v2f64, Legal);
988 
989       addRegisterClass(MVT::v2i64, &PPC::VSRCRegClass);
990     }
991 
992     if (Subtarget.hasP8Altivec()) {
993       addRegisterClass(MVT::v2i64, &PPC::VRRCRegClass);
994       addRegisterClass(MVT::v1i128, &PPC::VRRCRegClass);
995     }
996 
997     if (Subtarget.hasP9Vector()) {
998       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
999       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
1000 
1001       // 128 bit shifts can be accomplished via 3 instructions for SHL and
1002       // SRL, but not for SRA because of the instructions available:
1003       // VS{RL} and VS{RL}O.
1004       setOperationAction(ISD::SHL, MVT::v1i128, Legal);
1005       setOperationAction(ISD::SRL, MVT::v1i128, Legal);
1006       setOperationAction(ISD::SRA, MVT::v1i128, Expand);
1007 
1008       addRegisterClass(MVT::f128, &PPC::VRRCRegClass);
1009       setOperationAction(ISD::FADD, MVT::f128, Legal);
1010       setOperationAction(ISD::FSUB, MVT::f128, Legal);
1011       setOperationAction(ISD::FDIV, MVT::f128, Legal);
1012       setOperationAction(ISD::FMUL, MVT::f128, Legal);
1013       setOperationAction(ISD::FP_EXTEND, MVT::f128, Legal);
1014       // No extending loads to f128 on PPC.
1015       for (MVT FPT : MVT::fp_valuetypes())
1016         setLoadExtAction(ISD::EXTLOAD, MVT::f128, FPT, Expand);
1017       setOperationAction(ISD::FMA, MVT::f128, Legal);
1018       setCondCodeAction(ISD::SETULT, MVT::f128, Expand);
1019       setCondCodeAction(ISD::SETUGT, MVT::f128, Expand);
1020       setCondCodeAction(ISD::SETUEQ, MVT::f128, Expand);
1021       setCondCodeAction(ISD::SETOGE, MVT::f128, Expand);
1022       setCondCodeAction(ISD::SETOLE, MVT::f128, Expand);
1023       setCondCodeAction(ISD::SETONE, MVT::f128, Expand);
1024 
1025       setOperationAction(ISD::FTRUNC, MVT::f128, Legal);
1026       setOperationAction(ISD::FRINT, MVT::f128, Legal);
1027       setOperationAction(ISD::FFLOOR, MVT::f128, Legal);
1028       setOperationAction(ISD::FCEIL, MVT::f128, Legal);
1029       setOperationAction(ISD::FNEARBYINT, MVT::f128, Legal);
1030       setOperationAction(ISD::FROUND, MVT::f128, Legal);
1031 
1032       setOperationAction(ISD::SELECT, MVT::f128, Expand);
1033       setOperationAction(ISD::FP_ROUND, MVT::f64, Legal);
1034       setOperationAction(ISD::FP_ROUND, MVT::f32, Legal);
1035       setTruncStoreAction(MVT::f128, MVT::f64, Expand);
1036       setTruncStoreAction(MVT::f128, MVT::f32, Expand);
1037       setOperationAction(ISD::BITCAST, MVT::i128, Custom);
1038       // No implementation for these ops for PowerPC.
1039       setOperationAction(ISD::FSIN, MVT::f128, Expand);
1040       setOperationAction(ISD::FCOS, MVT::f128, Expand);
1041       setOperationAction(ISD::FPOW, MVT::f128, Expand);
1042       setOperationAction(ISD::FPOWI, MVT::f128, Expand);
1043       setOperationAction(ISD::FREM, MVT::f128, Expand);
1044 
1045       // Handle constrained floating-point operations of fp128
1046       setOperationAction(ISD::STRICT_FADD, MVT::f128, Legal);
1047       setOperationAction(ISD::STRICT_FSUB, MVT::f128, Legal);
1048       setOperationAction(ISD::STRICT_FMUL, MVT::f128, Legal);
1049       setOperationAction(ISD::STRICT_FDIV, MVT::f128, Legal);
1050       setOperationAction(ISD::STRICT_FMA, MVT::f128, Legal);
1051       setOperationAction(ISD::STRICT_FSQRT, MVT::f128, Legal);
1052       setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f128, Legal);
1053       setOperationAction(ISD::STRICT_FP_ROUND, MVT::f64, Legal);
1054       setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal);
1055       setOperationAction(ISD::STRICT_FRINT, MVT::f128, Legal);
1056       setOperationAction(ISD::STRICT_FNEARBYINT, MVT::f128, Legal);
1057       setOperationAction(ISD::STRICT_FFLOOR, MVT::f128, Legal);
1058       setOperationAction(ISD::STRICT_FCEIL, MVT::f128, Legal);
1059       setOperationAction(ISD::STRICT_FTRUNC, MVT::f128, Legal);
1060       setOperationAction(ISD::STRICT_FROUND, MVT::f128, Legal);
1061       setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom);
1062       setOperationAction(ISD::BSWAP, MVT::v8i16, Legal);
1063       setOperationAction(ISD::BSWAP, MVT::v4i32, Legal);
1064       setOperationAction(ISD::BSWAP, MVT::v2i64, Legal);
1065       setOperationAction(ISD::BSWAP, MVT::v1i128, Legal);
1066     }
1067 
1068     if (Subtarget.hasP9Altivec()) {
1069       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
1070       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom);
1071 
1072       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8,  Legal);
1073       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Legal);
1074       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i32, Legal);
1075       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8,  Legal);
1076       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Legal);
1077       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal);
1078       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i64, Legal);
1079     }
1080   }
1081 
1082   if (Subtarget.hasQPX()) {
1083     setOperationAction(ISD::FADD, MVT::v4f64, Legal);
1084     setOperationAction(ISD::FSUB, MVT::v4f64, Legal);
1085     setOperationAction(ISD::FMUL, MVT::v4f64, Legal);
1086     setOperationAction(ISD::FREM, MVT::v4f64, Expand);
1087 
1088     setOperationAction(ISD::FCOPYSIGN, MVT::v4f64, Legal);
1089     setOperationAction(ISD::FGETSIGN, MVT::v4f64, Expand);
1090 
1091     setOperationAction(ISD::LOAD  , MVT::v4f64, Custom);
1092     setOperationAction(ISD::STORE , MVT::v4f64, Custom);
1093 
1094     setTruncStoreAction(MVT::v4f64, MVT::v4f32, Custom);
1095     setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f32, Custom);
1096 
1097     if (!Subtarget.useCRBits())
1098       setOperationAction(ISD::SELECT, MVT::v4f64, Expand);
1099     setOperationAction(ISD::VSELECT, MVT::v4f64, Legal);
1100 
1101     setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f64, Legal);
1102     setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f64, Expand);
1103     setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f64, Expand);
1104     setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f64, Expand);
1105     setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f64, Custom);
1106     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f64, Legal);
1107     setOperationAction(ISD::BUILD_VECTOR, MVT::v4f64, Custom);
1108 
1109     setOperationAction(ISD::FP_TO_SINT , MVT::v4f64, Legal);
1110     setOperationAction(ISD::FP_TO_UINT , MVT::v4f64, Expand);
1111 
1112     setOperationAction(ISD::FP_ROUND , MVT::v4f32, Legal);
1113     setOperationAction(ISD::FP_EXTEND, MVT::v4f64, Legal);
1114 
1115     setOperationAction(ISD::FNEG , MVT::v4f64, Legal);
1116     setOperationAction(ISD::FABS , MVT::v4f64, Legal);
1117     setOperationAction(ISD::FSIN , MVT::v4f64, Expand);
1118     setOperationAction(ISD::FCOS , MVT::v4f64, Expand);
1119     setOperationAction(ISD::FPOW , MVT::v4f64, Expand);
1120     setOperationAction(ISD::FLOG , MVT::v4f64, Expand);
1121     setOperationAction(ISD::FLOG2 , MVT::v4f64, Expand);
1122     setOperationAction(ISD::FLOG10 , MVT::v4f64, Expand);
1123     setOperationAction(ISD::FEXP , MVT::v4f64, Expand);
1124     setOperationAction(ISD::FEXP2 , MVT::v4f64, Expand);
1125 
1126     setOperationAction(ISD::FMINNUM, MVT::v4f64, Legal);
1127     setOperationAction(ISD::FMAXNUM, MVT::v4f64, Legal);
1128 
1129     setIndexedLoadAction(ISD::PRE_INC, MVT::v4f64, Legal);
1130     setIndexedStoreAction(ISD::PRE_INC, MVT::v4f64, Legal);
1131 
1132     addRegisterClass(MVT::v4f64, &PPC::QFRCRegClass);
1133 
1134     setOperationAction(ISD::FADD, MVT::v4f32, Legal);
1135     setOperationAction(ISD::FSUB, MVT::v4f32, Legal);
1136     setOperationAction(ISD::FMUL, MVT::v4f32, Legal);
1137     setOperationAction(ISD::FREM, MVT::v4f32, Expand);
1138 
1139     setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal);
1140     setOperationAction(ISD::FGETSIGN, MVT::v4f32, Expand);
1141 
1142     setOperationAction(ISD::LOAD  , MVT::v4f32, Custom);
1143     setOperationAction(ISD::STORE , MVT::v4f32, Custom);
1144 
1145     if (!Subtarget.useCRBits())
1146       setOperationAction(ISD::SELECT, MVT::v4f32, Expand);
1147     setOperationAction(ISD::VSELECT, MVT::v4f32, Legal);
1148 
1149     setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4f32, Legal);
1150     setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4f32, Expand);
1151     setOperationAction(ISD::CONCAT_VECTORS , MVT::v4f32, Expand);
1152     setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4f32, Expand);
1153     setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4f32, Custom);
1154     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal);
1155     setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
1156 
1157     setOperationAction(ISD::FP_TO_SINT , MVT::v4f32, Legal);
1158     setOperationAction(ISD::FP_TO_UINT , MVT::v4f32, Expand);
1159 
1160     setOperationAction(ISD::FNEG , MVT::v4f32, Legal);
1161     setOperationAction(ISD::FABS , MVT::v4f32, Legal);
1162     setOperationAction(ISD::FSIN , MVT::v4f32, Expand);
1163     setOperationAction(ISD::FCOS , MVT::v4f32, Expand);
1164     setOperationAction(ISD::FPOW , MVT::v4f32, Expand);
1165     setOperationAction(ISD::FLOG , MVT::v4f32, Expand);
1166     setOperationAction(ISD::FLOG2 , MVT::v4f32, Expand);
1167     setOperationAction(ISD::FLOG10 , MVT::v4f32, Expand);
1168     setOperationAction(ISD::FEXP , MVT::v4f32, Expand);
1169     setOperationAction(ISD::FEXP2 , MVT::v4f32, Expand);
1170 
1171     setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal);
1172     setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal);
1173 
1174     setIndexedLoadAction(ISD::PRE_INC, MVT::v4f32, Legal);
1175     setIndexedStoreAction(ISD::PRE_INC, MVT::v4f32, Legal);
1176 
1177     addRegisterClass(MVT::v4f32, &PPC::QSRCRegClass);
1178 
1179     setOperationAction(ISD::AND , MVT::v4i1, Legal);
1180     setOperationAction(ISD::OR , MVT::v4i1, Legal);
1181     setOperationAction(ISD::XOR , MVT::v4i1, Legal);
1182 
1183     if (!Subtarget.useCRBits())
1184       setOperationAction(ISD::SELECT, MVT::v4i1, Expand);
1185     setOperationAction(ISD::VSELECT, MVT::v4i1, Legal);
1186 
1187     setOperationAction(ISD::LOAD  , MVT::v4i1, Custom);
1188     setOperationAction(ISD::STORE , MVT::v4i1, Custom);
1189 
1190     setOperationAction(ISD::EXTRACT_VECTOR_ELT , MVT::v4i1, Custom);
1191     setOperationAction(ISD::INSERT_VECTOR_ELT , MVT::v4i1, Expand);
1192     setOperationAction(ISD::CONCAT_VECTORS , MVT::v4i1, Expand);
1193     setOperationAction(ISD::EXTRACT_SUBVECTOR , MVT::v4i1, Expand);
1194     setOperationAction(ISD::VECTOR_SHUFFLE , MVT::v4i1, Custom);
1195     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i1, Expand);
1196     setOperationAction(ISD::BUILD_VECTOR, MVT::v4i1, Custom);
1197 
1198     setOperationAction(ISD::SINT_TO_FP, MVT::v4i1, Custom);
1199     setOperationAction(ISD::UINT_TO_FP, MVT::v4i1, Custom);
1200 
1201     addRegisterClass(MVT::v4i1, &PPC::QBRCRegClass);
1202 
1203     setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal);
1204     setOperationAction(ISD::FCEIL,  MVT::v4f64, Legal);
1205     setOperationAction(ISD::FTRUNC, MVT::v4f64, Legal);
1206     setOperationAction(ISD::FROUND, MVT::v4f64, Legal);
1207 
1208     setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
1209     setOperationAction(ISD::FCEIL,  MVT::v4f32, Legal);
1210     setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
1211     setOperationAction(ISD::FROUND, MVT::v4f32, Legal);
1212 
1213     setOperationAction(ISD::FNEARBYINT, MVT::v4f64, Expand);
1214     setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand);
1215 
1216     // These need to set FE_INEXACT, and so cannot be vectorized here.
1217     setOperationAction(ISD::FRINT, MVT::v4f64, Expand);
1218     setOperationAction(ISD::FRINT, MVT::v4f32, Expand);
1219 
1220     if (TM.Options.UnsafeFPMath) {
1221       setOperationAction(ISD::FDIV, MVT::v4f64, Legal);
1222       setOperationAction(ISD::FSQRT, MVT::v4f64, Legal);
1223 
1224       setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
1225       setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
1226     } else {
1227       setOperationAction(ISD::FDIV, MVT::v4f64, Expand);
1228       setOperationAction(ISD::FSQRT, MVT::v4f64, Expand);
1229 
1230       setOperationAction(ISD::FDIV, MVT::v4f32, Expand);
1231       setOperationAction(ISD::FSQRT, MVT::v4f32, Expand);
1232     }
1233 
1234     // TODO: Handle constrained floating-point operations of v4f64
1235   }
1236 
1237   if (Subtarget.has64BitSupport())
1238     setOperationAction(ISD::PREFETCH, MVT::Other, Legal);
1239 
1240   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, isPPC64 ? Legal : Custom);
1241 
1242   if (!isPPC64) {
1243     setOperationAction(ISD::ATOMIC_LOAD,  MVT::i64, Expand);
1244     setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand);
1245   }
1246 
1247   setBooleanContents(ZeroOrOneBooleanContent);
1248 
1249   if (Subtarget.hasAltivec()) {
1250     // Altivec instructions set fields to all zeros or all ones.
1251     setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
1252   }
1253 
1254   if (!isPPC64) {
1255     // These libcalls are not available in 32-bit.
1256     setLibcallName(RTLIB::SHL_I128, nullptr);
1257     setLibcallName(RTLIB::SRL_I128, nullptr);
1258     setLibcallName(RTLIB::SRA_I128, nullptr);
1259   }
1260 
1261   setStackPointerRegisterToSaveRestore(isPPC64 ? PPC::X1 : PPC::R1);
1262 
1263   // We have target-specific dag combine patterns for the following nodes:
1264   setTargetDAGCombine(ISD::ADD);
1265   setTargetDAGCombine(ISD::SHL);
1266   setTargetDAGCombine(ISD::SRA);
1267   setTargetDAGCombine(ISD::SRL);
1268   setTargetDAGCombine(ISD::MUL);
1269   setTargetDAGCombine(ISD::FMA);
1270   setTargetDAGCombine(ISD::SINT_TO_FP);
1271   setTargetDAGCombine(ISD::BUILD_VECTOR);
1272   if (Subtarget.hasFPCVT())
1273     setTargetDAGCombine(ISD::UINT_TO_FP);
1274   setTargetDAGCombine(ISD::LOAD);
1275   setTargetDAGCombine(ISD::STORE);
1276   setTargetDAGCombine(ISD::BR_CC);
1277   if (Subtarget.useCRBits())
1278     setTargetDAGCombine(ISD::BRCOND);
1279   setTargetDAGCombine(ISD::BSWAP);
1280   setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
1281   setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN);
1282   setTargetDAGCombine(ISD::INTRINSIC_VOID);
1283 
1284   setTargetDAGCombine(ISD::SIGN_EXTEND);
1285   setTargetDAGCombine(ISD::ZERO_EXTEND);
1286   setTargetDAGCombine(ISD::ANY_EXTEND);
1287 
1288   setTargetDAGCombine(ISD::TRUNCATE);
1289   setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
1290 
1291 
1292   if (Subtarget.useCRBits()) {
1293     setTargetDAGCombine(ISD::TRUNCATE);
1294     setTargetDAGCombine(ISD::SETCC);
1295     setTargetDAGCombine(ISD::SELECT_CC);
1296   }
1297 
1298   // Use reciprocal estimates.
1299   if (TM.Options.UnsafeFPMath) {
1300     setTargetDAGCombine(ISD::FDIV);
1301     setTargetDAGCombine(ISD::FSQRT);
1302   }
1303 
1304   if (Subtarget.hasP9Altivec()) {
1305     setTargetDAGCombine(ISD::ABS);
1306     setTargetDAGCombine(ISD::VSELECT);
1307   }
1308 
1309   setLibcallName(RTLIB::LOG_F128, "logf128");
1310   setLibcallName(RTLIB::LOG2_F128, "log2f128");
1311   setLibcallName(RTLIB::LOG10_F128, "log10f128");
1312   setLibcallName(RTLIB::EXP_F128, "expf128");
1313   setLibcallName(RTLIB::EXP2_F128, "exp2f128");
1314   setLibcallName(RTLIB::SIN_F128, "sinf128");
1315   setLibcallName(RTLIB::COS_F128, "cosf128");
1316   setLibcallName(RTLIB::POW_F128, "powf128");
1317   setLibcallName(RTLIB::FMIN_F128, "fminf128");
1318   setLibcallName(RTLIB::FMAX_F128, "fmaxf128");
1319   setLibcallName(RTLIB::POWI_F128, "__powikf2");
1320   setLibcallName(RTLIB::REM_F128, "fmodf128");
1321 
1322   // With 32 condition bits, we don't need to sink (and duplicate) compares
1323   // aggressively in CodeGenPrep.
1324   if (Subtarget.useCRBits()) {
1325     setHasMultipleConditionRegisters();
1326     setJumpIsExpensive();
1327   }
1328 
1329   setMinFunctionAlignment(Align(4));
1330 
1331   switch (Subtarget.getCPUDirective()) {
1332   default: break;
1333   case PPC::DIR_970:
1334   case PPC::DIR_A2:
1335   case PPC::DIR_E500:
1336   case PPC::DIR_E500mc:
1337   case PPC::DIR_E5500:
1338   case PPC::DIR_PWR4:
1339   case PPC::DIR_PWR5:
1340   case PPC::DIR_PWR5X:
1341   case PPC::DIR_PWR6:
1342   case PPC::DIR_PWR6X:
1343   case PPC::DIR_PWR7:
1344   case PPC::DIR_PWR8:
1345   case PPC::DIR_PWR9:
1346   case PPC::DIR_PWR10:
1347   case PPC::DIR_PWR_FUTURE:
1348     setPrefLoopAlignment(Align(16));
1349     setPrefFunctionAlignment(Align(16));
1350     break;
1351   }
1352 
1353   if (Subtarget.enableMachineScheduler())
1354     setSchedulingPreference(Sched::Source);
1355   else
1356     setSchedulingPreference(Sched::Hybrid);
1357 
1358   computeRegisterProperties(STI.getRegisterInfo());
1359 
1360   // The Freescale cores do better with aggressive inlining of memcpy and
1361   // friends. GCC uses same threshold of 128 bytes (= 32 word stores).
1362   if (Subtarget.getCPUDirective() == PPC::DIR_E500mc ||
1363       Subtarget.getCPUDirective() == PPC::DIR_E5500) {
1364     MaxStoresPerMemset = 32;
1365     MaxStoresPerMemsetOptSize = 16;
1366     MaxStoresPerMemcpy = 32;
1367     MaxStoresPerMemcpyOptSize = 8;
1368     MaxStoresPerMemmove = 32;
1369     MaxStoresPerMemmoveOptSize = 8;
1370   } else if (Subtarget.getCPUDirective() == PPC::DIR_A2) {
1371     // The A2 also benefits from (very) aggressive inlining of memcpy and
1372     // friends. The overhead of a the function call, even when warm, can be
1373     // over one hundred cycles.
1374     MaxStoresPerMemset = 128;
1375     MaxStoresPerMemcpy = 128;
1376     MaxStoresPerMemmove = 128;
1377     MaxLoadsPerMemcmp = 128;
1378   } else {
1379     MaxLoadsPerMemcmp = 8;
1380     MaxLoadsPerMemcmpOptSize = 4;
1381   }
1382 
1383   // Let the subtarget (CPU) decide if a predictable select is more expensive
1384   // than the corresponding branch. This information is used in CGP to decide
1385   // when to convert selects into branches.
1386   PredictableSelectIsExpensive = Subtarget.isPredictableSelectIsExpensive();
1387 }
1388 
1389 /// getMaxByValAlign - Helper for getByValTypeAlignment to determine
1390 /// the desired ByVal argument alignment.
getMaxByValAlign(Type * Ty,Align & MaxAlign,Align MaxMaxAlign)1391 static void getMaxByValAlign(Type *Ty, Align &MaxAlign, Align MaxMaxAlign) {
1392   if (MaxAlign == MaxMaxAlign)
1393     return;
1394   if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
1395     if (MaxMaxAlign >= 32 &&
1396         VTy->getPrimitiveSizeInBits().getFixedSize() >= 256)
1397       MaxAlign = Align(32);
1398     else if (VTy->getPrimitiveSizeInBits().getFixedSize() >= 128 &&
1399              MaxAlign < 16)
1400       MaxAlign = Align(16);
1401   } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
1402     Align EltAlign;
1403     getMaxByValAlign(ATy->getElementType(), EltAlign, MaxMaxAlign);
1404     if (EltAlign > MaxAlign)
1405       MaxAlign = EltAlign;
1406   } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
1407     for (auto *EltTy : STy->elements()) {
1408       Align EltAlign;
1409       getMaxByValAlign(EltTy, EltAlign, MaxMaxAlign);
1410       if (EltAlign > MaxAlign)
1411         MaxAlign = EltAlign;
1412       if (MaxAlign == MaxMaxAlign)
1413         break;
1414     }
1415   }
1416 }
1417 
1418 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
1419 /// function arguments in the caller parameter area.
getByValTypeAlignment(Type * Ty,const DataLayout & DL) const1420 unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty,
1421                                                   const DataLayout &DL) const {
1422   // 16byte and wider vectors are passed on 16byte boundary.
1423   // The rest is 8 on PPC64 and 4 on PPC32 boundary.
1424   Align Alignment = Subtarget.isPPC64() ? Align(8) : Align(4);
1425   if (Subtarget.hasAltivec() || Subtarget.hasQPX())
1426     getMaxByValAlign(Ty, Alignment, Subtarget.hasQPX() ? Align(32) : Align(16));
1427   return Alignment.value();
1428 }
1429 
useSoftFloat() const1430 bool PPCTargetLowering::useSoftFloat() const {
1431   return Subtarget.useSoftFloat();
1432 }
1433 
hasSPE() const1434 bool PPCTargetLowering::hasSPE() const {
1435   return Subtarget.hasSPE();
1436 }
1437 
preferIncOfAddToSubOfNot(EVT VT) const1438 bool PPCTargetLowering::preferIncOfAddToSubOfNot(EVT VT) const {
1439   return VT.isScalarInteger();
1440 }
1441 
1442 /// isMulhCheaperThanMulShift - Return true if a mulh[s|u] node for a specific
1443 /// type is cheaper than a multiply followed by a shift.
1444 /// This is true for words and doublewords on 64-bit PowerPC.
isMulhCheaperThanMulShift(EVT Type) const1445 bool PPCTargetLowering::isMulhCheaperThanMulShift(EVT Type) const {
1446   if (Subtarget.isPPC64() && (isOperationLegal(ISD::MULHS, Type) ||
1447                               isOperationLegal(ISD::MULHU, Type)))
1448     return true;
1449   return TargetLowering::isMulhCheaperThanMulShift(Type);
1450 }
1451 
getTargetNodeName(unsigned Opcode) const1452 const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const {
1453   switch ((PPCISD::NodeType)Opcode) {
1454   case PPCISD::FIRST_NUMBER:    break;
1455   case PPCISD::FSEL:            return "PPCISD::FSEL";
1456   case PPCISD::XSMAXCDP:        return "PPCISD::XSMAXCDP";
1457   case PPCISD::XSMINCDP:        return "PPCISD::XSMINCDP";
1458   case PPCISD::FCFID:           return "PPCISD::FCFID";
1459   case PPCISD::FCFIDU:          return "PPCISD::FCFIDU";
1460   case PPCISD::FCFIDS:          return "PPCISD::FCFIDS";
1461   case PPCISD::FCFIDUS:         return "PPCISD::FCFIDUS";
1462   case PPCISD::FCTIDZ:          return "PPCISD::FCTIDZ";
1463   case PPCISD::FCTIWZ:          return "PPCISD::FCTIWZ";
1464   case PPCISD::FCTIDUZ:         return "PPCISD::FCTIDUZ";
1465   case PPCISD::FCTIWUZ:         return "PPCISD::FCTIWUZ";
1466   case PPCISD::FP_TO_UINT_IN_VSR:
1467                                 return "PPCISD::FP_TO_UINT_IN_VSR,";
1468   case PPCISD::FP_TO_SINT_IN_VSR:
1469                                 return "PPCISD::FP_TO_SINT_IN_VSR";
1470   case PPCISD::FRE:             return "PPCISD::FRE";
1471   case PPCISD::FRSQRTE:         return "PPCISD::FRSQRTE";
1472   case PPCISD::STFIWX:          return "PPCISD::STFIWX";
1473   case PPCISD::VPERM:           return "PPCISD::VPERM";
1474   case PPCISD::XXSPLT:          return "PPCISD::XXSPLT";
1475   case PPCISD::XXSPLTI_SP_TO_DP:
1476     return "PPCISD::XXSPLTI_SP_TO_DP";
1477   case PPCISD::XXSPLTI32DX:
1478     return "PPCISD::XXSPLTI32DX";
1479   case PPCISD::VECINSERT:       return "PPCISD::VECINSERT";
1480   case PPCISD::XXPERMDI:        return "PPCISD::XXPERMDI";
1481   case PPCISD::VECSHL:          return "PPCISD::VECSHL";
1482   case PPCISD::CMPB:            return "PPCISD::CMPB";
1483   case PPCISD::Hi:              return "PPCISD::Hi";
1484   case PPCISD::Lo:              return "PPCISD::Lo";
1485   case PPCISD::TOC_ENTRY:       return "PPCISD::TOC_ENTRY";
1486   case PPCISD::ATOMIC_CMP_SWAP_8: return "PPCISD::ATOMIC_CMP_SWAP_8";
1487   case PPCISD::ATOMIC_CMP_SWAP_16: return "PPCISD::ATOMIC_CMP_SWAP_16";
1488   case PPCISD::DYNALLOC:        return "PPCISD::DYNALLOC";
1489   case PPCISD::DYNAREAOFFSET:   return "PPCISD::DYNAREAOFFSET";
1490   case PPCISD::PROBED_ALLOCA:   return "PPCISD::PROBED_ALLOCA";
1491   case PPCISD::GlobalBaseReg:   return "PPCISD::GlobalBaseReg";
1492   case PPCISD::SRL:             return "PPCISD::SRL";
1493   case PPCISD::SRA:             return "PPCISD::SRA";
1494   case PPCISD::SHL:             return "PPCISD::SHL";
1495   case PPCISD::SRA_ADDZE:       return "PPCISD::SRA_ADDZE";
1496   case PPCISD::CALL:            return "PPCISD::CALL";
1497   case PPCISD::CALL_NOP:        return "PPCISD::CALL_NOP";
1498   case PPCISD::CALL_NOTOC:      return "PPCISD::CALL_NOTOC";
1499   case PPCISD::MTCTR:           return "PPCISD::MTCTR";
1500   case PPCISD::BCTRL:           return "PPCISD::BCTRL";
1501   case PPCISD::BCTRL_LOAD_TOC:  return "PPCISD::BCTRL_LOAD_TOC";
1502   case PPCISD::RET_FLAG:        return "PPCISD::RET_FLAG";
1503   case PPCISD::READ_TIME_BASE:  return "PPCISD::READ_TIME_BASE";
1504   case PPCISD::EH_SJLJ_SETJMP:  return "PPCISD::EH_SJLJ_SETJMP";
1505   case PPCISD::EH_SJLJ_LONGJMP: return "PPCISD::EH_SJLJ_LONGJMP";
1506   case PPCISD::MFOCRF:          return "PPCISD::MFOCRF";
1507   case PPCISD::MFVSR:           return "PPCISD::MFVSR";
1508   case PPCISD::MTVSRA:          return "PPCISD::MTVSRA";
1509   case PPCISD::MTVSRZ:          return "PPCISD::MTVSRZ";
1510   case PPCISD::SINT_VEC_TO_FP:  return "PPCISD::SINT_VEC_TO_FP";
1511   case PPCISD::UINT_VEC_TO_FP:  return "PPCISD::UINT_VEC_TO_FP";
1512   case PPCISD::SCALAR_TO_VECTOR_PERMUTED:
1513     return "PPCISD::SCALAR_TO_VECTOR_PERMUTED";
1514   case PPCISD::ANDI_rec_1_EQ_BIT:
1515     return "PPCISD::ANDI_rec_1_EQ_BIT";
1516   case PPCISD::ANDI_rec_1_GT_BIT:
1517     return "PPCISD::ANDI_rec_1_GT_BIT";
1518   case PPCISD::VCMP:            return "PPCISD::VCMP";
1519   case PPCISD::VCMPo:           return "PPCISD::VCMPo";
1520   case PPCISD::LBRX:            return "PPCISD::LBRX";
1521   case PPCISD::STBRX:           return "PPCISD::STBRX";
1522   case PPCISD::LFIWAX:          return "PPCISD::LFIWAX";
1523   case PPCISD::LFIWZX:          return "PPCISD::LFIWZX";
1524   case PPCISD::LXSIZX:          return "PPCISD::LXSIZX";
1525   case PPCISD::STXSIX:          return "PPCISD::STXSIX";
1526   case PPCISD::VEXTS:           return "PPCISD::VEXTS";
1527   case PPCISD::LXVD2X:          return "PPCISD::LXVD2X";
1528   case PPCISD::STXVD2X:         return "PPCISD::STXVD2X";
1529   case PPCISD::LOAD_VEC_BE:     return "PPCISD::LOAD_VEC_BE";
1530   case PPCISD::STORE_VEC_BE:    return "PPCISD::STORE_VEC_BE";
1531   case PPCISD::ST_VSR_SCAL_INT:
1532                                 return "PPCISD::ST_VSR_SCAL_INT";
1533   case PPCISD::COND_BRANCH:     return "PPCISD::COND_BRANCH";
1534   case PPCISD::BDNZ:            return "PPCISD::BDNZ";
1535   case PPCISD::BDZ:             return "PPCISD::BDZ";
1536   case PPCISD::MFFS:            return "PPCISD::MFFS";
1537   case PPCISD::FADDRTZ:         return "PPCISD::FADDRTZ";
1538   case PPCISD::TC_RETURN:       return "PPCISD::TC_RETURN";
1539   case PPCISD::CR6SET:          return "PPCISD::CR6SET";
1540   case PPCISD::CR6UNSET:        return "PPCISD::CR6UNSET";
1541   case PPCISD::PPC32_GOT:       return "PPCISD::PPC32_GOT";
1542   case PPCISD::PPC32_PICGOT:    return "PPCISD::PPC32_PICGOT";
1543   case PPCISD::ADDIS_GOT_TPREL_HA: return "PPCISD::ADDIS_GOT_TPREL_HA";
1544   case PPCISD::LD_GOT_TPREL_L:  return "PPCISD::LD_GOT_TPREL_L";
1545   case PPCISD::ADD_TLS:         return "PPCISD::ADD_TLS";
1546   case PPCISD::ADDIS_TLSGD_HA:  return "PPCISD::ADDIS_TLSGD_HA";
1547   case PPCISD::ADDI_TLSGD_L:    return "PPCISD::ADDI_TLSGD_L";
1548   case PPCISD::GET_TLS_ADDR:    return "PPCISD::GET_TLS_ADDR";
1549   case PPCISD::ADDI_TLSGD_L_ADDR: return "PPCISD::ADDI_TLSGD_L_ADDR";
1550   case PPCISD::ADDIS_TLSLD_HA:  return "PPCISD::ADDIS_TLSLD_HA";
1551   case PPCISD::ADDI_TLSLD_L:    return "PPCISD::ADDI_TLSLD_L";
1552   case PPCISD::GET_TLSLD_ADDR:  return "PPCISD::GET_TLSLD_ADDR";
1553   case PPCISD::ADDI_TLSLD_L_ADDR: return "PPCISD::ADDI_TLSLD_L_ADDR";
1554   case PPCISD::ADDIS_DTPREL_HA: return "PPCISD::ADDIS_DTPREL_HA";
1555   case PPCISD::ADDI_DTPREL_L:   return "PPCISD::ADDI_DTPREL_L";
1556   case PPCISD::VADD_SPLAT:      return "PPCISD::VADD_SPLAT";
1557   case PPCISD::SC:              return "PPCISD::SC";
1558   case PPCISD::CLRBHRB:         return "PPCISD::CLRBHRB";
1559   case PPCISD::MFBHRBE:         return "PPCISD::MFBHRBE";
1560   case PPCISD::RFEBB:           return "PPCISD::RFEBB";
1561   case PPCISD::XXSWAPD:         return "PPCISD::XXSWAPD";
1562   case PPCISD::SWAP_NO_CHAIN:   return "PPCISD::SWAP_NO_CHAIN";
1563   case PPCISD::VABSD:           return "PPCISD::VABSD";
1564   case PPCISD::QVFPERM:         return "PPCISD::QVFPERM";
1565   case PPCISD::QVGPCI:          return "PPCISD::QVGPCI";
1566   case PPCISD::QVALIGNI:        return "PPCISD::QVALIGNI";
1567   case PPCISD::QVESPLATI:       return "PPCISD::QVESPLATI";
1568   case PPCISD::QBFLT:           return "PPCISD::QBFLT";
1569   case PPCISD::QVLFSb:          return "PPCISD::QVLFSb";
1570   case PPCISD::BUILD_FP128:     return "PPCISD::BUILD_FP128";
1571   case PPCISD::BUILD_SPE64:     return "PPCISD::BUILD_SPE64";
1572   case PPCISD::EXTRACT_SPE:     return "PPCISD::EXTRACT_SPE";
1573   case PPCISD::EXTSWSLI:        return "PPCISD::EXTSWSLI";
1574   case PPCISD::LD_VSX_LH:       return "PPCISD::LD_VSX_LH";
1575   case PPCISD::FP_EXTEND_HALF:  return "PPCISD::FP_EXTEND_HALF";
1576   case PPCISD::MAT_PCREL_ADDR:  return "PPCISD::MAT_PCREL_ADDR";
1577   case PPCISD::LD_SPLAT:        return "PPCISD::LD_SPLAT";
1578   case PPCISD::FNMSUB:          return "PPCISD::FNMSUB";
1579   }
1580   return nullptr;
1581 }
1582 
getSetCCResultType(const DataLayout & DL,LLVMContext & C,EVT VT) const1583 EVT PPCTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &C,
1584                                           EVT VT) const {
1585   if (!VT.isVector())
1586     return Subtarget.useCRBits() ? MVT::i1 : MVT::i32;
1587 
1588   if (Subtarget.hasQPX())
1589     return EVT::getVectorVT(C, MVT::i1, VT.getVectorNumElements());
1590 
1591   return VT.changeVectorElementTypeToInteger();
1592 }
1593 
enableAggressiveFMAFusion(EVT VT) const1594 bool PPCTargetLowering::enableAggressiveFMAFusion(EVT VT) const {
1595   assert(VT.isFloatingPoint() && "Non-floating-point FMA?");
1596   return true;
1597 }
1598 
1599 //===----------------------------------------------------------------------===//
1600 // Node matching predicates, for use by the tblgen matching code.
1601 //===----------------------------------------------------------------------===//
1602 
1603 /// isFloatingPointZero - Return true if this is 0.0 or -0.0.
isFloatingPointZero(SDValue Op)1604 static bool isFloatingPointZero(SDValue Op) {
1605   if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
1606     return CFP->getValueAPF().isZero();
1607   else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) {
1608     // Maybe this has already been legalized into the constant pool?
1609     if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1)))
1610       if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
1611         return CFP->getValueAPF().isZero();
1612   }
1613   return false;
1614 }
1615 
1616 /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode.  Return
1617 /// true if Op is undef or if it matches the specified value.
isConstantOrUndef(int Op,int Val)1618 static bool isConstantOrUndef(int Op, int Val) {
1619   return Op < 0 || Op == Val;
1620 }
1621 
1622 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a
1623 /// VPKUHUM instruction.
1624 /// The ShuffleKind distinguishes between big-endian operations with
1625 /// two different inputs (0), either-endian operations with two identical
1626 /// inputs (1), and little-endian operations with two different inputs (2).
1627 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
isVPKUHUMShuffleMask(ShuffleVectorSDNode * N,unsigned ShuffleKind,SelectionDAG & DAG)1628 bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1629                                SelectionDAG &DAG) {
1630   bool IsLE = DAG.getDataLayout().isLittleEndian();
1631   if (ShuffleKind == 0) {
1632     if (IsLE)
1633       return false;
1634     for (unsigned i = 0; i != 16; ++i)
1635       if (!isConstantOrUndef(N->getMaskElt(i), i*2+1))
1636         return false;
1637   } else if (ShuffleKind == 2) {
1638     if (!IsLE)
1639       return false;
1640     for (unsigned i = 0; i != 16; ++i)
1641       if (!isConstantOrUndef(N->getMaskElt(i), i*2))
1642         return false;
1643   } else if (ShuffleKind == 1) {
1644     unsigned j = IsLE ? 0 : 1;
1645     for (unsigned i = 0; i != 8; ++i)
1646       if (!isConstantOrUndef(N->getMaskElt(i),    i*2+j) ||
1647           !isConstantOrUndef(N->getMaskElt(i+8),  i*2+j))
1648         return false;
1649   }
1650   return true;
1651 }
1652 
1653 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a
1654 /// VPKUWUM instruction.
1655 /// The ShuffleKind distinguishes between big-endian operations with
1656 /// two different inputs (0), either-endian operations with two identical
1657 /// inputs (1), and little-endian operations with two different inputs (2).
1658 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
isVPKUWUMShuffleMask(ShuffleVectorSDNode * N,unsigned ShuffleKind,SelectionDAG & DAG)1659 bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1660                                SelectionDAG &DAG) {
1661   bool IsLE = DAG.getDataLayout().isLittleEndian();
1662   if (ShuffleKind == 0) {
1663     if (IsLE)
1664       return false;
1665     for (unsigned i = 0; i != 16; i += 2)
1666       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+2) ||
1667           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+3))
1668         return false;
1669   } else if (ShuffleKind == 2) {
1670     if (!IsLE)
1671       return false;
1672     for (unsigned i = 0; i != 16; i += 2)
1673       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2) ||
1674           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+1))
1675         return false;
1676   } else if (ShuffleKind == 1) {
1677     unsigned j = IsLE ? 0 : 2;
1678     for (unsigned i = 0; i != 8; i += 2)
1679       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+j)   ||
1680           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+j+1) ||
1681           !isConstantOrUndef(N->getMaskElt(i+8),  i*2+j)   ||
1682           !isConstantOrUndef(N->getMaskElt(i+9),  i*2+j+1))
1683         return false;
1684   }
1685   return true;
1686 }
1687 
1688 /// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a
1689 /// VPKUDUM instruction, AND the VPKUDUM instruction exists for the
1690 /// current subtarget.
1691 ///
1692 /// The ShuffleKind distinguishes between big-endian operations with
1693 /// two different inputs (0), either-endian operations with two identical
1694 /// inputs (1), and little-endian operations with two different inputs (2).
1695 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
isVPKUDUMShuffleMask(ShuffleVectorSDNode * N,unsigned ShuffleKind,SelectionDAG & DAG)1696 bool PPC::isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1697                                SelectionDAG &DAG) {
1698   const PPCSubtarget& Subtarget =
1699       static_cast<const PPCSubtarget&>(DAG.getSubtarget());
1700   if (!Subtarget.hasP8Vector())
1701     return false;
1702 
1703   bool IsLE = DAG.getDataLayout().isLittleEndian();
1704   if (ShuffleKind == 0) {
1705     if (IsLE)
1706       return false;
1707     for (unsigned i = 0; i != 16; i += 4)
1708       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+4) ||
1709           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+5) ||
1710           !isConstantOrUndef(N->getMaskElt(i+2),  i*2+6) ||
1711           !isConstantOrUndef(N->getMaskElt(i+3),  i*2+7))
1712         return false;
1713   } else if (ShuffleKind == 2) {
1714     if (!IsLE)
1715       return false;
1716     for (unsigned i = 0; i != 16; i += 4)
1717       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2) ||
1718           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+1) ||
1719           !isConstantOrUndef(N->getMaskElt(i+2),  i*2+2) ||
1720           !isConstantOrUndef(N->getMaskElt(i+3),  i*2+3))
1721         return false;
1722   } else if (ShuffleKind == 1) {
1723     unsigned j = IsLE ? 0 : 4;
1724     for (unsigned i = 0; i != 8; i += 4)
1725       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+j)   ||
1726           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+j+1) ||
1727           !isConstantOrUndef(N->getMaskElt(i+2),  i*2+j+2) ||
1728           !isConstantOrUndef(N->getMaskElt(i+3),  i*2+j+3) ||
1729           !isConstantOrUndef(N->getMaskElt(i+8),  i*2+j)   ||
1730           !isConstantOrUndef(N->getMaskElt(i+9),  i*2+j+1) ||
1731           !isConstantOrUndef(N->getMaskElt(i+10), i*2+j+2) ||
1732           !isConstantOrUndef(N->getMaskElt(i+11), i*2+j+3))
1733         return false;
1734   }
1735   return true;
1736 }
1737 
1738 /// isVMerge - Common function, used to match vmrg* shuffles.
1739 ///
isVMerge(ShuffleVectorSDNode * N,unsigned UnitSize,unsigned LHSStart,unsigned RHSStart)1740 static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize,
1741                      unsigned LHSStart, unsigned RHSStart) {
1742   if (N->getValueType(0) != MVT::v16i8)
1743     return false;
1744   assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) &&
1745          "Unsupported merge size!");
1746 
1747   for (unsigned i = 0; i != 8/UnitSize; ++i)     // Step over units
1748     for (unsigned j = 0; j != UnitSize; ++j) {   // Step over bytes within unit
1749       if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j),
1750                              LHSStart+j+i*UnitSize) ||
1751           !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j),
1752                              RHSStart+j+i*UnitSize))
1753         return false;
1754     }
1755   return true;
1756 }
1757 
1758 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for
1759 /// a VMRGL* instruction with the specified unit size (1,2 or 4 bytes).
1760 /// The ShuffleKind distinguishes between big-endian merges with two
1761 /// different inputs (0), either-endian merges with two identical inputs (1),
1762 /// and little-endian merges with two different inputs (2).  For the latter,
1763 /// the input operands are swapped (see PPCInstrAltivec.td).
isVMRGLShuffleMask(ShuffleVectorSDNode * N,unsigned UnitSize,unsigned ShuffleKind,SelectionDAG & DAG)1764 bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
1765                              unsigned ShuffleKind, SelectionDAG &DAG) {
1766   if (DAG.getDataLayout().isLittleEndian()) {
1767     if (ShuffleKind == 1) // unary
1768       return isVMerge(N, UnitSize, 0, 0);
1769     else if (ShuffleKind == 2) // swapped
1770       return isVMerge(N, UnitSize, 0, 16);
1771     else
1772       return false;
1773   } else {
1774     if (ShuffleKind == 1) // unary
1775       return isVMerge(N, UnitSize, 8, 8);
1776     else if (ShuffleKind == 0) // normal
1777       return isVMerge(N, UnitSize, 8, 24);
1778     else
1779       return false;
1780   }
1781 }
1782 
1783 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for
1784 /// a VMRGH* instruction with the specified unit size (1,2 or 4 bytes).
1785 /// The ShuffleKind distinguishes between big-endian merges with two
1786 /// different inputs (0), either-endian merges with two identical inputs (1),
1787 /// and little-endian merges with two different inputs (2).  For the latter,
1788 /// the input operands are swapped (see PPCInstrAltivec.td).
isVMRGHShuffleMask(ShuffleVectorSDNode * N,unsigned UnitSize,unsigned ShuffleKind,SelectionDAG & DAG)1789 bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
1790                              unsigned ShuffleKind, SelectionDAG &DAG) {
1791   if (DAG.getDataLayout().isLittleEndian()) {
1792     if (ShuffleKind == 1) // unary
1793       return isVMerge(N, UnitSize, 8, 8);
1794     else if (ShuffleKind == 2) // swapped
1795       return isVMerge(N, UnitSize, 8, 24);
1796     else
1797       return false;
1798   } else {
1799     if (ShuffleKind == 1) // unary
1800       return isVMerge(N, UnitSize, 0, 0);
1801     else if (ShuffleKind == 0) // normal
1802       return isVMerge(N, UnitSize, 0, 16);
1803     else
1804       return false;
1805   }
1806 }
1807 
1808 /**
1809  * Common function used to match vmrgew and vmrgow shuffles
1810  *
1811  * The indexOffset determines whether to look for even or odd words in
1812  * the shuffle mask. This is based on the of the endianness of the target
1813  * machine.
1814  *   - Little Endian:
1815  *     - Use offset of 0 to check for odd elements
1816  *     - Use offset of 4 to check for even elements
1817  *   - Big Endian:
1818  *     - Use offset of 0 to check for even elements
1819  *     - Use offset of 4 to check for odd elements
1820  * A detailed description of the vector element ordering for little endian and
1821  * big endian can be found at
1822  * http://www.ibm.com/developerworks/library/l-ibm-xl-c-cpp-compiler/index.html
1823  * Targeting your applications - what little endian and big endian IBM XL C/C++
1824  * compiler differences mean to you
1825  *
1826  * The mask to the shuffle vector instruction specifies the indices of the
1827  * elements from the two input vectors to place in the result. The elements are
1828  * numbered in array-access order, starting with the first vector. These vectors
1829  * are always of type v16i8, thus each vector will contain 16 elements of size
1830  * 8. More info on the shuffle vector can be found in the
1831  * http://llvm.org/docs/LangRef.html#shufflevector-instruction
1832  * Language Reference.
1833  *
1834  * The RHSStartValue indicates whether the same input vectors are used (unary)
1835  * or two different input vectors are used, based on the following:
1836  *   - If the instruction uses the same vector for both inputs, the range of the
1837  *     indices will be 0 to 15. In this case, the RHSStart value passed should
1838  *     be 0.
1839  *   - If the instruction has two different vectors then the range of the
1840  *     indices will be 0 to 31. In this case, the RHSStart value passed should
1841  *     be 16 (indices 0-15 specify elements in the first vector while indices 16
1842  *     to 31 specify elements in the second vector).
1843  *
1844  * \param[in] N The shuffle vector SD Node to analyze
1845  * \param[in] IndexOffset Specifies whether to look for even or odd elements
1846  * \param[in] RHSStartValue Specifies the starting index for the righthand input
1847  * vector to the shuffle_vector instruction
1848  * \return true iff this shuffle vector represents an even or odd word merge
1849  */
isVMerge(ShuffleVectorSDNode * N,unsigned IndexOffset,unsigned RHSStartValue)1850 static bool isVMerge(ShuffleVectorSDNode *N, unsigned IndexOffset,
1851                      unsigned RHSStartValue) {
1852   if (N->getValueType(0) != MVT::v16i8)
1853     return false;
1854 
1855   for (unsigned i = 0; i < 2; ++i)
1856     for (unsigned j = 0; j < 4; ++j)
1857       if (!isConstantOrUndef(N->getMaskElt(i*4+j),
1858                              i*RHSStartValue+j+IndexOffset) ||
1859           !isConstantOrUndef(N->getMaskElt(i*4+j+8),
1860                              i*RHSStartValue+j+IndexOffset+8))
1861         return false;
1862   return true;
1863 }
1864 
1865 /**
1866  * Determine if the specified shuffle mask is suitable for the vmrgew or
1867  * vmrgow instructions.
1868  *
1869  * \param[in] N The shuffle vector SD Node to analyze
1870  * \param[in] CheckEven Check for an even merge (true) or an odd merge (false)
1871  * \param[in] ShuffleKind Identify the type of merge:
1872  *   - 0 = big-endian merge with two different inputs;
1873  *   - 1 = either-endian merge with two identical inputs;
1874  *   - 2 = little-endian merge with two different inputs (inputs are swapped for
1875  *     little-endian merges).
1876  * \param[in] DAG The current SelectionDAG
1877  * \return true iff this shuffle mask
1878  */
isVMRGEOShuffleMask(ShuffleVectorSDNode * N,bool CheckEven,unsigned ShuffleKind,SelectionDAG & DAG)1879 bool PPC::isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven,
1880                               unsigned ShuffleKind, SelectionDAG &DAG) {
1881   if (DAG.getDataLayout().isLittleEndian()) {
1882     unsigned indexOffset = CheckEven ? 4 : 0;
1883     if (ShuffleKind == 1) // Unary
1884       return isVMerge(N, indexOffset, 0);
1885     else if (ShuffleKind == 2) // swapped
1886       return isVMerge(N, indexOffset, 16);
1887     else
1888       return false;
1889   }
1890   else {
1891     unsigned indexOffset = CheckEven ? 0 : 4;
1892     if (ShuffleKind == 1) // Unary
1893       return isVMerge(N, indexOffset, 0);
1894     else if (ShuffleKind == 0) // Normal
1895       return isVMerge(N, indexOffset, 16);
1896     else
1897       return false;
1898   }
1899   return false;
1900 }
1901 
1902 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift
1903 /// amount, otherwise return -1.
1904 /// The ShuffleKind distinguishes between big-endian operations with two
1905 /// different inputs (0), either-endian operations with two identical inputs
1906 /// (1), and little-endian operations with two different inputs (2).  For the
1907 /// latter, the input operands are swapped (see PPCInstrAltivec.td).
isVSLDOIShuffleMask(SDNode * N,unsigned ShuffleKind,SelectionDAG & DAG)1908 int PPC::isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind,
1909                              SelectionDAG &DAG) {
1910   if (N->getValueType(0) != MVT::v16i8)
1911     return -1;
1912 
1913   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
1914 
1915   // Find the first non-undef value in the shuffle mask.
1916   unsigned i;
1917   for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i)
1918     /*search*/;
1919 
1920   if (i == 16) return -1;  // all undef.
1921 
1922   // Otherwise, check to see if the rest of the elements are consecutively
1923   // numbered from this value.
1924   unsigned ShiftAmt = SVOp->getMaskElt(i);
1925   if (ShiftAmt < i) return -1;
1926 
1927   ShiftAmt -= i;
1928   bool isLE = DAG.getDataLayout().isLittleEndian();
1929 
1930   if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) {
1931     // Check the rest of the elements to see if they are consecutive.
1932     for (++i; i != 16; ++i)
1933       if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i))
1934         return -1;
1935   } else if (ShuffleKind == 1) {
1936     // Check the rest of the elements to see if they are consecutive.
1937     for (++i; i != 16; ++i)
1938       if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15))
1939         return -1;
1940   } else
1941     return -1;
1942 
1943   if (isLE)
1944     ShiftAmt = 16 - ShiftAmt;
1945 
1946   return ShiftAmt;
1947 }
1948 
1949 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand
1950 /// specifies a splat of a single element that is suitable for input to
1951 /// one of the splat operations (VSPLTB/VSPLTH/VSPLTW/XXSPLTW/LXVDSX/etc.).
isSplatShuffleMask(ShuffleVectorSDNode * N,unsigned EltSize)1952 bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) {
1953   assert(N->getValueType(0) == MVT::v16i8 && isPowerOf2_32(EltSize) &&
1954          EltSize <= 8 && "Can only handle 1,2,4,8 byte element sizes");
1955 
1956   // The consecutive indices need to specify an element, not part of two
1957   // different elements.  So abandon ship early if this isn't the case.
1958   if (N->getMaskElt(0) % EltSize != 0)
1959     return false;
1960 
1961   // This is a splat operation if each element of the permute is the same, and
1962   // if the value doesn't reference the second vector.
1963   unsigned ElementBase = N->getMaskElt(0);
1964 
1965   // FIXME: Handle UNDEF elements too!
1966   if (ElementBase >= 16)
1967     return false;
1968 
1969   // Check that the indices are consecutive, in the case of a multi-byte element
1970   // splatted with a v16i8 mask.
1971   for (unsigned i = 1; i != EltSize; ++i)
1972     if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase))
1973       return false;
1974 
1975   for (unsigned i = EltSize, e = 16; i != e; i += EltSize) {
1976     if (N->getMaskElt(i) < 0) continue;
1977     for (unsigned j = 0; j != EltSize; ++j)
1978       if (N->getMaskElt(i+j) != N->getMaskElt(j))
1979         return false;
1980   }
1981   return true;
1982 }
1983 
1984 /// Check that the mask is shuffling N byte elements. Within each N byte
1985 /// element of the mask, the indices could be either in increasing or
1986 /// decreasing order as long as they are consecutive.
1987 /// \param[in] N the shuffle vector SD Node to analyze
1988 /// \param[in] Width the element width in bytes, could be 2/4/8/16 (HalfWord/
1989 /// Word/DoubleWord/QuadWord).
1990 /// \param[in] StepLen the delta indices number among the N byte element, if
1991 /// the mask is in increasing/decreasing order then it is 1/-1.
1992 /// \return true iff the mask is shuffling N byte elements.
isNByteElemShuffleMask(ShuffleVectorSDNode * N,unsigned Width,int StepLen)1993 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *N, unsigned Width,
1994                                    int StepLen) {
1995   assert((Width == 2 || Width == 4 || Width == 8 || Width == 16) &&
1996          "Unexpected element width.");
1997   assert((StepLen == 1 || StepLen == -1) && "Unexpected element width.");
1998 
1999   unsigned NumOfElem = 16 / Width;
2000   unsigned MaskVal[16]; //  Width is never greater than 16
2001   for (unsigned i = 0; i < NumOfElem; ++i) {
2002     MaskVal[0] = N->getMaskElt(i * Width);
2003     if ((StepLen == 1) && (MaskVal[0] % Width)) {
2004       return false;
2005     } else if ((StepLen == -1) && ((MaskVal[0] + 1) % Width)) {
2006       return false;
2007     }
2008 
2009     for (unsigned int j = 1; j < Width; ++j) {
2010       MaskVal[j] = N->getMaskElt(i * Width + j);
2011       if (MaskVal[j] != MaskVal[j-1] + StepLen) {
2012         return false;
2013       }
2014     }
2015   }
2016 
2017   return true;
2018 }
2019 
isXXINSERTWMask(ShuffleVectorSDNode * N,unsigned & ShiftElts,unsigned & InsertAtByte,bool & Swap,bool IsLE)2020 bool PPC::isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
2021                           unsigned &InsertAtByte, bool &Swap, bool IsLE) {
2022   if (!isNByteElemShuffleMask(N, 4, 1))
2023     return false;
2024 
2025   // Now we look at mask elements 0,4,8,12
2026   unsigned M0 = N->getMaskElt(0) / 4;
2027   unsigned M1 = N->getMaskElt(4) / 4;
2028   unsigned M2 = N->getMaskElt(8) / 4;
2029   unsigned M3 = N->getMaskElt(12) / 4;
2030   unsigned LittleEndianShifts[] = { 2, 1, 0, 3 };
2031   unsigned BigEndianShifts[] = { 3, 0, 1, 2 };
2032 
2033   // Below, let H and L be arbitrary elements of the shuffle mask
2034   // where H is in the range [4,7] and L is in the range [0,3].
2035   // H, 1, 2, 3 or L, 5, 6, 7
2036   if ((M0 > 3 && M1 == 1 && M2 == 2 && M3 == 3) ||
2037       (M0 < 4 && M1 == 5 && M2 == 6 && M3 == 7)) {
2038     ShiftElts = IsLE ? LittleEndianShifts[M0 & 0x3] : BigEndianShifts[M0 & 0x3];
2039     InsertAtByte = IsLE ? 12 : 0;
2040     Swap = M0 < 4;
2041     return true;
2042   }
2043   // 0, H, 2, 3 or 4, L, 6, 7
2044   if ((M1 > 3 && M0 == 0 && M2 == 2 && M3 == 3) ||
2045       (M1 < 4 && M0 == 4 && M2 == 6 && M3 == 7)) {
2046     ShiftElts = IsLE ? LittleEndianShifts[M1 & 0x3] : BigEndianShifts[M1 & 0x3];
2047     InsertAtByte = IsLE ? 8 : 4;
2048     Swap = M1 < 4;
2049     return true;
2050   }
2051   // 0, 1, H, 3 or 4, 5, L, 7
2052   if ((M2 > 3 && M0 == 0 && M1 == 1 && M3 == 3) ||
2053       (M2 < 4 && M0 == 4 && M1 == 5 && M3 == 7)) {
2054     ShiftElts = IsLE ? LittleEndianShifts[M2 & 0x3] : BigEndianShifts[M2 & 0x3];
2055     InsertAtByte = IsLE ? 4 : 8;
2056     Swap = M2 < 4;
2057     return true;
2058   }
2059   // 0, 1, 2, H or 4, 5, 6, L
2060   if ((M3 > 3 && M0 == 0 && M1 == 1 && M2 == 2) ||
2061       (M3 < 4 && M0 == 4 && M1 == 5 && M2 == 6)) {
2062     ShiftElts = IsLE ? LittleEndianShifts[M3 & 0x3] : BigEndianShifts[M3 & 0x3];
2063     InsertAtByte = IsLE ? 0 : 12;
2064     Swap = M3 < 4;
2065     return true;
2066   }
2067 
2068   // If both vector operands for the shuffle are the same vector, the mask will
2069   // contain only elements from the first one and the second one will be undef.
2070   if (N->getOperand(1).isUndef()) {
2071     ShiftElts = 0;
2072     Swap = true;
2073     unsigned XXINSERTWSrcElem = IsLE ? 2 : 1;
2074     if (M0 == XXINSERTWSrcElem && M1 == 1 && M2 == 2 && M3 == 3) {
2075       InsertAtByte = IsLE ? 12 : 0;
2076       return true;
2077     }
2078     if (M0 == 0 && M1 == XXINSERTWSrcElem && M2 == 2 && M3 == 3) {
2079       InsertAtByte = IsLE ? 8 : 4;
2080       return true;
2081     }
2082     if (M0 == 0 && M1 == 1 && M2 == XXINSERTWSrcElem && M3 == 3) {
2083       InsertAtByte = IsLE ? 4 : 8;
2084       return true;
2085     }
2086     if (M0 == 0 && M1 == 1 && M2 == 2 && M3 == XXINSERTWSrcElem) {
2087       InsertAtByte = IsLE ? 0 : 12;
2088       return true;
2089     }
2090   }
2091 
2092   return false;
2093 }
2094 
isXXSLDWIShuffleMask(ShuffleVectorSDNode * N,unsigned & ShiftElts,bool & Swap,bool IsLE)2095 bool PPC::isXXSLDWIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
2096                                bool &Swap, bool IsLE) {
2097   assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8");
2098   // Ensure each byte index of the word is consecutive.
2099   if (!isNByteElemShuffleMask(N, 4, 1))
2100     return false;
2101 
2102   // Now we look at mask elements 0,4,8,12, which are the beginning of words.
2103   unsigned M0 = N->getMaskElt(0) / 4;
2104   unsigned M1 = N->getMaskElt(4) / 4;
2105   unsigned M2 = N->getMaskElt(8) / 4;
2106   unsigned M3 = N->getMaskElt(12) / 4;
2107 
2108   // If both vector operands for the shuffle are the same vector, the mask will
2109   // contain only elements from the first one and the second one will be undef.
2110   if (N->getOperand(1).isUndef()) {
2111     assert(M0 < 4 && "Indexing into an undef vector?");
2112     if (M1 != (M0 + 1) % 4 || M2 != (M1 + 1) % 4 || M3 != (M2 + 1) % 4)
2113       return false;
2114 
2115     ShiftElts = IsLE ? (4 - M0) % 4 : M0;
2116     Swap = false;
2117     return true;
2118   }
2119 
2120   // Ensure each word index of the ShuffleVector Mask is consecutive.
2121   if (M1 != (M0 + 1) % 8 || M2 != (M1 + 1) % 8 || M3 != (M2 + 1) % 8)
2122     return false;
2123 
2124   if (IsLE) {
2125     if (M0 == 0 || M0 == 7 || M0 == 6 || M0 == 5) {
2126       // Input vectors don't need to be swapped if the leading element
2127       // of the result is one of the 3 left elements of the second vector
2128       // (or if there is no shift to be done at all).
2129       Swap = false;
2130       ShiftElts = (8 - M0) % 8;
2131     } else if (M0 == 4 || M0 == 3 || M0 == 2 || M0 == 1) {
2132       // Input vectors need to be swapped if the leading element
2133       // of the result is one of the 3 left elements of the first vector
2134       // (or if we're shifting by 4 - thereby simply swapping the vectors).
2135       Swap = true;
2136       ShiftElts = (4 - M0) % 4;
2137     }
2138 
2139     return true;
2140   } else {                                          // BE
2141     if (M0 == 0 || M0 == 1 || M0 == 2 || M0 == 3) {
2142       // Input vectors don't need to be swapped if the leading element
2143       // of the result is one of the 4 elements of the first vector.
2144       Swap = false;
2145       ShiftElts = M0;
2146     } else if (M0 == 4 || M0 == 5 || M0 == 6 || M0 == 7) {
2147       // Input vectors need to be swapped if the leading element
2148       // of the result is one of the 4 elements of the right vector.
2149       Swap = true;
2150       ShiftElts = M0 - 4;
2151     }
2152 
2153     return true;
2154   }
2155 }
2156 
isXXBRShuffleMaskHelper(ShuffleVectorSDNode * N,int Width)2157 bool static isXXBRShuffleMaskHelper(ShuffleVectorSDNode *N, int Width) {
2158   assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8");
2159 
2160   if (!isNByteElemShuffleMask(N, Width, -1))
2161     return false;
2162 
2163   for (int i = 0; i < 16; i += Width)
2164     if (N->getMaskElt(i) != i + Width - 1)
2165       return false;
2166 
2167   return true;
2168 }
2169 
isXXBRHShuffleMask(ShuffleVectorSDNode * N)2170 bool PPC::isXXBRHShuffleMask(ShuffleVectorSDNode *N) {
2171   return isXXBRShuffleMaskHelper(N, 2);
2172 }
2173 
isXXBRWShuffleMask(ShuffleVectorSDNode * N)2174 bool PPC::isXXBRWShuffleMask(ShuffleVectorSDNode *N) {
2175   return isXXBRShuffleMaskHelper(N, 4);
2176 }
2177 
isXXBRDShuffleMask(ShuffleVectorSDNode * N)2178 bool PPC::isXXBRDShuffleMask(ShuffleVectorSDNode *N) {
2179   return isXXBRShuffleMaskHelper(N, 8);
2180 }
2181 
isXXBRQShuffleMask(ShuffleVectorSDNode * N)2182 bool PPC::isXXBRQShuffleMask(ShuffleVectorSDNode *N) {
2183   return isXXBRShuffleMaskHelper(N, 16);
2184 }
2185 
2186 /// Can node \p N be lowered to an XXPERMDI instruction? If so, set \p Swap
2187 /// if the inputs to the instruction should be swapped and set \p DM to the
2188 /// value for the immediate.
2189 /// Specifically, set \p Swap to true only if \p N can be lowered to XXPERMDI
2190 /// AND element 0 of the result comes from the first input (LE) or second input
2191 /// (BE). Set \p DM to the calculated result (0-3) only if \p N can be lowered.
2192 /// \return true iff the given mask of shuffle node \p N is a XXPERMDI shuffle
2193 /// mask.
isXXPERMDIShuffleMask(ShuffleVectorSDNode * N,unsigned & DM,bool & Swap,bool IsLE)2194 bool PPC::isXXPERMDIShuffleMask(ShuffleVectorSDNode *N, unsigned &DM,
2195                                bool &Swap, bool IsLE) {
2196   assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8");
2197 
2198   // Ensure each byte index of the double word is consecutive.
2199   if (!isNByteElemShuffleMask(N, 8, 1))
2200     return false;
2201 
2202   unsigned M0 = N->getMaskElt(0) / 8;
2203   unsigned M1 = N->getMaskElt(8) / 8;
2204   assert(((M0 | M1) < 4) && "A mask element out of bounds?");
2205 
2206   // If both vector operands for the shuffle are the same vector, the mask will
2207   // contain only elements from the first one and the second one will be undef.
2208   if (N->getOperand(1).isUndef()) {
2209     if ((M0 | M1) < 2) {
2210       DM = IsLE ? (((~M1) & 1) << 1) + ((~M0) & 1) : (M0 << 1) + (M1 & 1);
2211       Swap = false;
2212       return true;
2213     } else
2214       return false;
2215   }
2216 
2217   if (IsLE) {
2218     if (M0 > 1 && M1 < 2) {
2219       Swap = false;
2220     } else if (M0 < 2 && M1 > 1) {
2221       M0 = (M0 + 2) % 4;
2222       M1 = (M1 + 2) % 4;
2223       Swap = true;
2224     } else
2225       return false;
2226 
2227     // Note: if control flow comes here that means Swap is already set above
2228     DM = (((~M1) & 1) << 1) + ((~M0) & 1);
2229     return true;
2230   } else { // BE
2231     if (M0 < 2 && M1 > 1) {
2232       Swap = false;
2233     } else if (M0 > 1 && M1 < 2) {
2234       M0 = (M0 + 2) % 4;
2235       M1 = (M1 + 2) % 4;
2236       Swap = true;
2237     } else
2238       return false;
2239 
2240     // Note: if control flow comes here that means Swap is already set above
2241     DM = (M0 << 1) + (M1 & 1);
2242     return true;
2243   }
2244 }
2245 
2246 
2247 /// getSplatIdxForPPCMnemonics - Return the splat index as a value that is
2248 /// appropriate for PPC mnemonics (which have a big endian bias - namely
2249 /// elements are counted from the left of the vector register).
getSplatIdxForPPCMnemonics(SDNode * N,unsigned EltSize,SelectionDAG & DAG)2250 unsigned PPC::getSplatIdxForPPCMnemonics(SDNode *N, unsigned EltSize,
2251                                          SelectionDAG &DAG) {
2252   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
2253   assert(isSplatShuffleMask(SVOp, EltSize));
2254   if (DAG.getDataLayout().isLittleEndian())
2255     return (16 / EltSize) - 1 - (SVOp->getMaskElt(0) / EltSize);
2256   else
2257     return SVOp->getMaskElt(0) / EltSize;
2258 }
2259 
2260 /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed
2261 /// by using a vspltis[bhw] instruction of the specified element size, return
2262 /// the constant being splatted.  The ByteSize field indicates the number of
2263 /// bytes of each element [124] -> [bhw].
get_VSPLTI_elt(SDNode * N,unsigned ByteSize,SelectionDAG & DAG)2264 SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {
2265   SDValue OpVal(nullptr, 0);
2266 
2267   // If ByteSize of the splat is bigger than the element size of the
2268   // build_vector, then we have a case where we are checking for a splat where
2269   // multiple elements of the buildvector are folded together into a single
2270   // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8).
2271   unsigned EltSize = 16/N->getNumOperands();
2272   if (EltSize < ByteSize) {
2273     unsigned Multiple = ByteSize/EltSize;   // Number of BV entries per spltval.
2274     SDValue UniquedVals[4];
2275     assert(Multiple > 1 && Multiple <= 4 && "How can this happen?");
2276 
2277     // See if all of the elements in the buildvector agree across.
2278     for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
2279       if (N->getOperand(i).isUndef()) continue;
2280       // If the element isn't a constant, bail fully out.
2281       if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue();
2282 
2283       if (!UniquedVals[i&(Multiple-1)].getNode())
2284         UniquedVals[i&(Multiple-1)] = N->getOperand(i);
2285       else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i))
2286         return SDValue();  // no match.
2287     }
2288 
2289     // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains
2290     // either constant or undef values that are identical for each chunk.  See
2291     // if these chunks can form into a larger vspltis*.
2292 
2293     // Check to see if all of the leading entries are either 0 or -1.  If
2294     // neither, then this won't fit into the immediate field.
2295     bool LeadingZero = true;
2296     bool LeadingOnes = true;
2297     for (unsigned i = 0; i != Multiple-1; ++i) {
2298       if (!UniquedVals[i].getNode()) continue;  // Must have been undefs.
2299 
2300       LeadingZero &= isNullConstant(UniquedVals[i]);
2301       LeadingOnes &= isAllOnesConstant(UniquedVals[i]);
2302     }
2303     // Finally, check the least significant entry.
2304     if (LeadingZero) {
2305       if (!UniquedVals[Multiple-1].getNode())
2306         return DAG.getTargetConstant(0, SDLoc(N), MVT::i32);  // 0,0,0,undef
2307       int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue();
2308       if (Val < 16)                                   // 0,0,0,4 -> vspltisw(4)
2309         return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32);
2310     }
2311     if (LeadingOnes) {
2312       if (!UniquedVals[Multiple-1].getNode())
2313         return DAG.getTargetConstant(~0U, SDLoc(N), MVT::i32); // -1,-1,-1,undef
2314       int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue();
2315       if (Val >= -16)                            // -1,-1,-1,-2 -> vspltisw(-2)
2316         return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32);
2317     }
2318 
2319     return SDValue();
2320   }
2321 
2322   // Check to see if this buildvec has a single non-undef value in its elements.
2323   for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
2324     if (N->getOperand(i).isUndef()) continue;
2325     if (!OpVal.getNode())
2326       OpVal = N->getOperand(i);
2327     else if (OpVal != N->getOperand(i))
2328       return SDValue();
2329   }
2330 
2331   if (!OpVal.getNode()) return SDValue();  // All UNDEF: use implicit def.
2332 
2333   unsigned ValSizeInBytes = EltSize;
2334   uint64_t Value = 0;
2335   if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) {
2336     Value = CN->getZExtValue();
2337   } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) {
2338     assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!");
2339     Value = FloatToBits(CN->getValueAPF().convertToFloat());
2340   }
2341 
2342   // If the splat value is larger than the element value, then we can never do
2343   // this splat.  The only case that we could fit the replicated bits into our
2344   // immediate field for would be zero, and we prefer to use vxor for it.
2345   if (ValSizeInBytes < ByteSize) return SDValue();
2346 
2347   // If the element value is larger than the splat value, check if it consists
2348   // of a repeated bit pattern of size ByteSize.
2349   if (!APInt(ValSizeInBytes * 8, Value).isSplat(ByteSize * 8))
2350     return SDValue();
2351 
2352   // Properly sign extend the value.
2353   int MaskVal = SignExtend32(Value, ByteSize * 8);
2354 
2355   // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros.
2356   if (MaskVal == 0) return SDValue();
2357 
2358   // Finally, if this value fits in a 5 bit sext field, return it
2359   if (SignExtend32<5>(MaskVal) == MaskVal)
2360     return DAG.getTargetConstant(MaskVal, SDLoc(N), MVT::i32);
2361   return SDValue();
2362 }
2363 
2364 /// isQVALIGNIShuffleMask - If this is a qvaligni shuffle mask, return the shift
2365 /// amount, otherwise return -1.
isQVALIGNIShuffleMask(SDNode * N)2366 int PPC::isQVALIGNIShuffleMask(SDNode *N) {
2367   EVT VT = N->getValueType(0);
2368   if (VT != MVT::v4f64 && VT != MVT::v4f32 && VT != MVT::v4i1)
2369     return -1;
2370 
2371   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
2372 
2373   // Find the first non-undef value in the shuffle mask.
2374   unsigned i;
2375   for (i = 0; i != 4 && SVOp->getMaskElt(i) < 0; ++i)
2376     /*search*/;
2377 
2378   if (i == 4) return -1;  // all undef.
2379 
2380   // Otherwise, check to see if the rest of the elements are consecutively
2381   // numbered from this value.
2382   unsigned ShiftAmt = SVOp->getMaskElt(i);
2383   if (ShiftAmt < i) return -1;
2384   ShiftAmt -= i;
2385 
2386   // Check the rest of the elements to see if they are consecutive.
2387   for (++i; i != 4; ++i)
2388     if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i))
2389       return -1;
2390 
2391   return ShiftAmt;
2392 }
2393 
2394 //===----------------------------------------------------------------------===//
2395 //  Addressing Mode Selection
2396 //===----------------------------------------------------------------------===//
2397 
2398 /// isIntS16Immediate - This method tests to see if the node is either a 32-bit
2399 /// or 64-bit immediate, and if the value can be accurately represented as a
2400 /// sign extension from a 16-bit value.  If so, this returns true and the
2401 /// immediate.
isIntS16Immediate(SDNode * N,int16_t & Imm)2402 bool llvm::isIntS16Immediate(SDNode *N, int16_t &Imm) {
2403   if (!isa<ConstantSDNode>(N))
2404     return false;
2405 
2406   Imm = (int16_t)cast<ConstantSDNode>(N)->getZExtValue();
2407   if (N->getValueType(0) == MVT::i32)
2408     return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue();
2409   else
2410     return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue();
2411 }
isIntS16Immediate(SDValue Op,int16_t & Imm)2412 bool llvm::isIntS16Immediate(SDValue Op, int16_t &Imm) {
2413   return isIntS16Immediate(Op.getNode(), Imm);
2414 }
2415 
2416 
2417 /// SelectAddressEVXRegReg - Given the specified address, check to see if it can
2418 /// be represented as an indexed [r+r] operation.
SelectAddressEVXRegReg(SDValue N,SDValue & Base,SDValue & Index,SelectionDAG & DAG) const2419 bool PPCTargetLowering::SelectAddressEVXRegReg(SDValue N, SDValue &Base,
2420                                                SDValue &Index,
2421                                                SelectionDAG &DAG) const {
2422   for (SDNode::use_iterator UI = N->use_begin(), E = N->use_end();
2423       UI != E; ++UI) {
2424     if (MemSDNode *Memop = dyn_cast<MemSDNode>(*UI)) {
2425       if (Memop->getMemoryVT() == MVT::f64) {
2426           Base = N.getOperand(0);
2427           Index = N.getOperand(1);
2428           return true;
2429       }
2430     }
2431   }
2432   return false;
2433 }
2434 
2435 /// SelectAddressRegReg - Given the specified addressed, check to see if it
2436 /// can be represented as an indexed [r+r] operation.  Returns false if it
2437 /// can be more efficiently represented as [r+imm]. If \p EncodingAlignment is
2438 /// non-zero and N can be represented by a base register plus a signed 16-bit
2439 /// displacement, make a more precise judgement by checking (displacement % \p
2440 /// EncodingAlignment).
SelectAddressRegReg(SDValue N,SDValue & Base,SDValue & Index,SelectionDAG & DAG,MaybeAlign EncodingAlignment) const2441 bool PPCTargetLowering::SelectAddressRegReg(
2442     SDValue N, SDValue &Base, SDValue &Index, SelectionDAG &DAG,
2443     MaybeAlign EncodingAlignment) const {
2444   // If we have a PC Relative target flag don't select as [reg+reg]. It will be
2445   // a [pc+imm].
2446   if (SelectAddressPCRel(N, Base))
2447     return false;
2448 
2449   int16_t Imm = 0;
2450   if (N.getOpcode() == ISD::ADD) {
2451     // Is there any SPE load/store (f64), which can't handle 16bit offset?
2452     // SPE load/store can only handle 8-bit offsets.
2453     if (hasSPE() && SelectAddressEVXRegReg(N, Base, Index, DAG))
2454         return true;
2455     if (isIntS16Immediate(N.getOperand(1), Imm) &&
2456         (!EncodingAlignment || isAligned(*EncodingAlignment, Imm)))
2457       return false; // r+i
2458     if (N.getOperand(1).getOpcode() == PPCISD::Lo)
2459       return false;    // r+i
2460 
2461     Base = N.getOperand(0);
2462     Index = N.getOperand(1);
2463     return true;
2464   } else if (N.getOpcode() == ISD::OR) {
2465     if (isIntS16Immediate(N.getOperand(1), Imm) &&
2466         (!EncodingAlignment || isAligned(*EncodingAlignment, Imm)))
2467       return false; // r+i can fold it if we can.
2468 
2469     // If this is an or of disjoint bitfields, we can codegen this as an add
2470     // (for better address arithmetic) if the LHS and RHS of the OR are provably
2471     // disjoint.
2472     KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0));
2473 
2474     if (LHSKnown.Zero.getBoolValue()) {
2475       KnownBits RHSKnown = DAG.computeKnownBits(N.getOperand(1));
2476       // If all of the bits are known zero on the LHS or RHS, the add won't
2477       // carry.
2478       if (~(LHSKnown.Zero | RHSKnown.Zero) == 0) {
2479         Base = N.getOperand(0);
2480         Index = N.getOperand(1);
2481         return true;
2482       }
2483     }
2484   }
2485 
2486   return false;
2487 }
2488 
2489 // If we happen to be doing an i64 load or store into a stack slot that has
2490 // less than a 4-byte alignment, then the frame-index elimination may need to
2491 // use an indexed load or store instruction (because the offset may not be a
2492 // multiple of 4). The extra register needed to hold the offset comes from the
2493 // register scavenger, and it is possible that the scavenger will need to use
2494 // an emergency spill slot. As a result, we need to make sure that a spill slot
2495 // is allocated when doing an i64 load/store into a less-than-4-byte-aligned
2496 // stack slot.
fixupFuncForFI(SelectionDAG & DAG,int FrameIdx,EVT VT)2497 static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT) {
2498   // FIXME: This does not handle the LWA case.
2499   if (VT != MVT::i64)
2500     return;
2501 
2502   // NOTE: We'll exclude negative FIs here, which come from argument
2503   // lowering, because there are no known test cases triggering this problem
2504   // using packed structures (or similar). We can remove this exclusion if
2505   // we find such a test case. The reason why this is so test-case driven is
2506   // because this entire 'fixup' is only to prevent crashes (from the
2507   // register scavenger) on not-really-valid inputs. For example, if we have:
2508   //   %a = alloca i1
2509   //   %b = bitcast i1* %a to i64*
2510   //   store i64* a, i64 b
2511   // then the store should really be marked as 'align 1', but is not. If it
2512   // were marked as 'align 1' then the indexed form would have been
2513   // instruction-selected initially, and the problem this 'fixup' is preventing
2514   // won't happen regardless.
2515   if (FrameIdx < 0)
2516     return;
2517 
2518   MachineFunction &MF = DAG.getMachineFunction();
2519   MachineFrameInfo &MFI = MF.getFrameInfo();
2520 
2521   if (MFI.getObjectAlign(FrameIdx) >= Align(4))
2522     return;
2523 
2524   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
2525   FuncInfo->setHasNonRISpills();
2526 }
2527 
2528 /// Returns true if the address N can be represented by a base register plus
2529 /// a signed 16-bit displacement [r+imm], and if it is not better
2530 /// represented as reg+reg.  If \p EncodingAlignment is non-zero, only accept
2531 /// displacements that are multiples of that value.
SelectAddressRegImm(SDValue N,SDValue & Disp,SDValue & Base,SelectionDAG & DAG,MaybeAlign EncodingAlignment) const2532 bool PPCTargetLowering::SelectAddressRegImm(
2533     SDValue N, SDValue &Disp, SDValue &Base, SelectionDAG &DAG,
2534     MaybeAlign EncodingAlignment) const {
2535   // FIXME dl should come from parent load or store, not from address
2536   SDLoc dl(N);
2537 
2538   // If we have a PC Relative target flag don't select as [reg+imm]. It will be
2539   // a [pc+imm].
2540   if (SelectAddressPCRel(N, Base))
2541     return false;
2542 
2543   // If this can be more profitably realized as r+r, fail.
2544   if (SelectAddressRegReg(N, Disp, Base, DAG, EncodingAlignment))
2545     return false;
2546 
2547   if (N.getOpcode() == ISD::ADD) {
2548     int16_t imm = 0;
2549     if (isIntS16Immediate(N.getOperand(1), imm) &&
2550         (!EncodingAlignment || isAligned(*EncodingAlignment, imm))) {
2551       Disp = DAG.getTargetConstant(imm, dl, N.getValueType());
2552       if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
2553         Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2554         fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2555       } else {
2556         Base = N.getOperand(0);
2557       }
2558       return true; // [r+i]
2559     } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) {
2560       // Match LOAD (ADD (X, Lo(G))).
2561       assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue()
2562              && "Cannot handle constant offsets yet!");
2563       Disp = N.getOperand(1).getOperand(0);  // The global address.
2564       assert(Disp.getOpcode() == ISD::TargetGlobalAddress ||
2565              Disp.getOpcode() == ISD::TargetGlobalTLSAddress ||
2566              Disp.getOpcode() == ISD::TargetConstantPool ||
2567              Disp.getOpcode() == ISD::TargetJumpTable);
2568       Base = N.getOperand(0);
2569       return true;  // [&g+r]
2570     }
2571   } else if (N.getOpcode() == ISD::OR) {
2572     int16_t imm = 0;
2573     if (isIntS16Immediate(N.getOperand(1), imm) &&
2574         (!EncodingAlignment || isAligned(*EncodingAlignment, imm))) {
2575       // If this is an or of disjoint bitfields, we can codegen this as an add
2576       // (for better address arithmetic) if the LHS and RHS of the OR are
2577       // provably disjoint.
2578       KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0));
2579 
2580       if ((LHSKnown.Zero.getZExtValue()|~(uint64_t)imm) == ~0ULL) {
2581         // If all of the bits are known zero on the LHS or RHS, the add won't
2582         // carry.
2583         if (FrameIndexSDNode *FI =
2584               dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
2585           Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2586           fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2587         } else {
2588           Base = N.getOperand(0);
2589         }
2590         Disp = DAG.getTargetConstant(imm, dl, N.getValueType());
2591         return true;
2592       }
2593     }
2594   } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
2595     // Loading from a constant address.
2596 
2597     // If this address fits entirely in a 16-bit sext immediate field, codegen
2598     // this as "d, 0"
2599     int16_t Imm;
2600     if (isIntS16Immediate(CN, Imm) &&
2601         (!EncodingAlignment || isAligned(*EncodingAlignment, Imm))) {
2602       Disp = DAG.getTargetConstant(Imm, dl, CN->getValueType(0));
2603       Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
2604                              CN->getValueType(0));
2605       return true;
2606     }
2607 
2608     // Handle 32-bit sext immediates with LIS + addr mode.
2609     if ((CN->getValueType(0) == MVT::i32 ||
2610          (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) &&
2611         (!EncodingAlignment ||
2612          isAligned(*EncodingAlignment, CN->getZExtValue()))) {
2613       int Addr = (int)CN->getZExtValue();
2614 
2615       // Otherwise, break this down into an LIS + disp.
2616       Disp = DAG.getTargetConstant((short)Addr, dl, MVT::i32);
2617 
2618       Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, dl,
2619                                    MVT::i32);
2620       unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8;
2621       Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0);
2622       return true;
2623     }
2624   }
2625 
2626   Disp = DAG.getTargetConstant(0, dl, getPointerTy(DAG.getDataLayout()));
2627   if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) {
2628     Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2629     fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2630   } else
2631     Base = N;
2632   return true;      // [r+0]
2633 }
2634 
2635 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be
2636 /// represented as an indexed [r+r] operation.
SelectAddressRegRegOnly(SDValue N,SDValue & Base,SDValue & Index,SelectionDAG & DAG) const2637 bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base,
2638                                                 SDValue &Index,
2639                                                 SelectionDAG &DAG) const {
2640   // Check to see if we can easily represent this as an [r+r] address.  This
2641   // will fail if it thinks that the address is more profitably represented as
2642   // reg+imm, e.g. where imm = 0.
2643   if (SelectAddressRegReg(N, Base, Index, DAG))
2644     return true;
2645 
2646   // If the address is the result of an add, we will utilize the fact that the
2647   // address calculation includes an implicit add.  However, we can reduce
2648   // register pressure if we do not materialize a constant just for use as the
2649   // index register.  We only get rid of the add if it is not an add of a
2650   // value and a 16-bit signed constant and both have a single use.
2651   int16_t imm = 0;
2652   if (N.getOpcode() == ISD::ADD &&
2653       (!isIntS16Immediate(N.getOperand(1), imm) ||
2654        !N.getOperand(1).hasOneUse() || !N.getOperand(0).hasOneUse())) {
2655     Base = N.getOperand(0);
2656     Index = N.getOperand(1);
2657     return true;
2658   }
2659 
2660   // Otherwise, do it the hard way, using R0 as the base register.
2661   Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
2662                          N.getValueType());
2663   Index = N;
2664   return true;
2665 }
2666 
isValidPCRelNode(SDValue N)2667 template <typename Ty> static bool isValidPCRelNode(SDValue N) {
2668   Ty *PCRelCand = dyn_cast<Ty>(N);
2669   return PCRelCand && (PCRelCand->getTargetFlags() & PPCII::MO_PCREL_FLAG);
2670 }
2671 
2672 /// Returns true if this address is a PC Relative address.
2673 /// PC Relative addresses are marked with the flag PPCII::MO_PCREL_FLAG
2674 /// or if the node opcode is PPCISD::MAT_PCREL_ADDR.
SelectAddressPCRel(SDValue N,SDValue & Base) const2675 bool PPCTargetLowering::SelectAddressPCRel(SDValue N, SDValue &Base) const {
2676   // This is a materialize PC Relative node. Always select this as PC Relative.
2677   Base = N;
2678   if (N.getOpcode() == PPCISD::MAT_PCREL_ADDR)
2679     return true;
2680   if (isValidPCRelNode<ConstantPoolSDNode>(N) ||
2681       isValidPCRelNode<GlobalAddressSDNode>(N) ||
2682       isValidPCRelNode<JumpTableSDNode>(N) ||
2683       isValidPCRelNode<BlockAddressSDNode>(N))
2684     return true;
2685   return false;
2686 }
2687 
2688 /// Returns true if we should use a direct load into vector instruction
2689 /// (such as lxsd or lfd), instead of a load into gpr + direct move sequence.
usePartialVectorLoads(SDNode * N,const PPCSubtarget & ST)2690 static bool usePartialVectorLoads(SDNode *N, const PPCSubtarget& ST) {
2691 
2692   // If there are any other uses other than scalar to vector, then we should
2693   // keep it as a scalar load -> direct move pattern to prevent multiple
2694   // loads.
2695   LoadSDNode *LD = dyn_cast<LoadSDNode>(N);
2696   if (!LD)
2697     return false;
2698 
2699   EVT MemVT = LD->getMemoryVT();
2700   if (!MemVT.isSimple())
2701     return false;
2702   switch(MemVT.getSimpleVT().SimpleTy) {
2703   case MVT::i64:
2704     break;
2705   case MVT::i32:
2706     if (!ST.hasP8Vector())
2707       return false;
2708     break;
2709   case MVT::i16:
2710   case MVT::i8:
2711     if (!ST.hasP9Vector())
2712       return false;
2713     break;
2714   default:
2715     return false;
2716   }
2717 
2718   SDValue LoadedVal(N, 0);
2719   if (!LoadedVal.hasOneUse())
2720     return false;
2721 
2722   for (SDNode::use_iterator UI = LD->use_begin(), UE = LD->use_end();
2723        UI != UE; ++UI)
2724     if (UI.getUse().get().getResNo() == 0 &&
2725         UI->getOpcode() != ISD::SCALAR_TO_VECTOR &&
2726         UI->getOpcode() != PPCISD::SCALAR_TO_VECTOR_PERMUTED)
2727       return false;
2728 
2729   return true;
2730 }
2731 
2732 /// getPreIndexedAddressParts - returns true by value, base pointer and
2733 /// offset pointer and addressing mode by reference if the node's address
2734 /// can be legally represented as pre-indexed load / store address.
getPreIndexedAddressParts(SDNode * N,SDValue & Base,SDValue & Offset,ISD::MemIndexedMode & AM,SelectionDAG & DAG) const2735 bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
2736                                                   SDValue &Offset,
2737                                                   ISD::MemIndexedMode &AM,
2738                                                   SelectionDAG &DAG) const {
2739   if (DisablePPCPreinc) return false;
2740 
2741   bool isLoad = true;
2742   SDValue Ptr;
2743   EVT VT;
2744   unsigned Alignment;
2745   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
2746     Ptr = LD->getBasePtr();
2747     VT = LD->getMemoryVT();
2748     Alignment = LD->getAlignment();
2749   } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
2750     Ptr = ST->getBasePtr();
2751     VT  = ST->getMemoryVT();
2752     Alignment = ST->getAlignment();
2753     isLoad = false;
2754   } else
2755     return false;
2756 
2757   // Do not generate pre-inc forms for specific loads that feed scalar_to_vector
2758   // instructions because we can fold these into a more efficient instruction
2759   // instead, (such as LXSD).
2760   if (isLoad && usePartialVectorLoads(N, Subtarget)) {
2761     return false;
2762   }
2763 
2764   // PowerPC doesn't have preinc load/store instructions for vectors (except
2765   // for QPX, which does have preinc r+r forms).
2766   if (VT.isVector()) {
2767     if (!Subtarget.hasQPX() || (VT != MVT::v4f64 && VT != MVT::v4f32)) {
2768       return false;
2769     } else if (SelectAddressRegRegOnly(Ptr, Offset, Base, DAG)) {
2770       AM = ISD::PRE_INC;
2771       return true;
2772     }
2773   }
2774 
2775   if (SelectAddressRegReg(Ptr, Base, Offset, DAG)) {
2776     // Common code will reject creating a pre-inc form if the base pointer
2777     // is a frame index, or if N is a store and the base pointer is either
2778     // the same as or a predecessor of the value being stored.  Check for
2779     // those situations here, and try with swapped Base/Offset instead.
2780     bool Swap = false;
2781 
2782     if (isa<FrameIndexSDNode>(Base) || isa<RegisterSDNode>(Base))
2783       Swap = true;
2784     else if (!isLoad) {
2785       SDValue Val = cast<StoreSDNode>(N)->getValue();
2786       if (Val == Base || Base.getNode()->isPredecessorOf(Val.getNode()))
2787         Swap = true;
2788     }
2789 
2790     if (Swap)
2791       std::swap(Base, Offset);
2792 
2793     AM = ISD::PRE_INC;
2794     return true;
2795   }
2796 
2797   // LDU/STU can only handle immediates that are a multiple of 4.
2798   if (VT != MVT::i64) {
2799     if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, None))
2800       return false;
2801   } else {
2802     // LDU/STU need an address with at least 4-byte alignment.
2803     if (Alignment < 4)
2804       return false;
2805 
2806     if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, Align(4)))
2807       return false;
2808   }
2809 
2810   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
2811     // PPC64 doesn't have lwau, but it does have lwaux.  Reject preinc load of
2812     // sext i32 to i64 when addr mode is r+i.
2813     if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 &&
2814         LD->getExtensionType() == ISD::SEXTLOAD &&
2815         isa<ConstantSDNode>(Offset))
2816       return false;
2817   }
2818 
2819   AM = ISD::PRE_INC;
2820   return true;
2821 }
2822 
2823 //===----------------------------------------------------------------------===//
2824 //  LowerOperation implementation
2825 //===----------------------------------------------------------------------===//
2826 
2827 /// Return true if we should reference labels using a PICBase, set the HiOpFlags
2828 /// and LoOpFlags to the target MO flags.
getLabelAccessInfo(bool IsPIC,const PPCSubtarget & Subtarget,unsigned & HiOpFlags,unsigned & LoOpFlags,const GlobalValue * GV=nullptr)2829 static void getLabelAccessInfo(bool IsPIC, const PPCSubtarget &Subtarget,
2830                                unsigned &HiOpFlags, unsigned &LoOpFlags,
2831                                const GlobalValue *GV = nullptr) {
2832   HiOpFlags = PPCII::MO_HA;
2833   LoOpFlags = PPCII::MO_LO;
2834 
2835   // Don't use the pic base if not in PIC relocation model.
2836   if (IsPIC) {
2837     HiOpFlags |= PPCII::MO_PIC_FLAG;
2838     LoOpFlags |= PPCII::MO_PIC_FLAG;
2839   }
2840 }
2841 
LowerLabelRef(SDValue HiPart,SDValue LoPart,bool isPIC,SelectionDAG & DAG)2842 static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC,
2843                              SelectionDAG &DAG) {
2844   SDLoc DL(HiPart);
2845   EVT PtrVT = HiPart.getValueType();
2846   SDValue Zero = DAG.getConstant(0, DL, PtrVT);
2847 
2848   SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero);
2849   SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero);
2850 
2851   // With PIC, the first instruction is actually "GR+hi(&G)".
2852   if (isPIC)
2853     Hi = DAG.getNode(ISD::ADD, DL, PtrVT,
2854                      DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi);
2855 
2856   // Generate non-pic code that has direct accesses to the constant pool.
2857   // The address of the global is just (hi(&g)+lo(&g)).
2858   return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo);
2859 }
2860 
setUsesTOCBasePtr(MachineFunction & MF)2861 static void setUsesTOCBasePtr(MachineFunction &MF) {
2862   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
2863   FuncInfo->setUsesTOCBasePtr();
2864 }
2865 
setUsesTOCBasePtr(SelectionDAG & DAG)2866 static void setUsesTOCBasePtr(SelectionDAG &DAG) {
2867   setUsesTOCBasePtr(DAG.getMachineFunction());
2868 }
2869 
getTOCEntry(SelectionDAG & DAG,const SDLoc & dl,SDValue GA) const2870 SDValue PPCTargetLowering::getTOCEntry(SelectionDAG &DAG, const SDLoc &dl,
2871                                        SDValue GA) const {
2872   const bool Is64Bit = Subtarget.isPPC64();
2873   EVT VT = Is64Bit ? MVT::i64 : MVT::i32;
2874   SDValue Reg = Is64Bit ? DAG.getRegister(PPC::X2, VT)
2875                         : Subtarget.isAIXABI()
2876                               ? DAG.getRegister(PPC::R2, VT)
2877                               : DAG.getNode(PPCISD::GlobalBaseReg, dl, VT);
2878   SDValue Ops[] = { GA, Reg };
2879   return DAG.getMemIntrinsicNode(
2880       PPCISD::TOC_ENTRY, dl, DAG.getVTList(VT, MVT::Other), Ops, VT,
2881       MachinePointerInfo::getGOT(DAG.getMachineFunction()), None,
2882       MachineMemOperand::MOLoad);
2883 }
2884 
LowerConstantPool(SDValue Op,SelectionDAG & DAG) const2885 SDValue PPCTargetLowering::LowerConstantPool(SDValue Op,
2886                                              SelectionDAG &DAG) const {
2887   EVT PtrVT = Op.getValueType();
2888   ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
2889   const Constant *C = CP->getConstVal();
2890 
2891   // 64-bit SVR4 ABI and AIX ABI code are always position-independent.
2892   // The actual address of the GlobalValue is stored in the TOC.
2893   if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
2894     if (Subtarget.isUsingPCRelativeCalls()) {
2895       SDLoc DL(CP);
2896       EVT Ty = getPointerTy(DAG.getDataLayout());
2897       SDValue ConstPool = DAG.getTargetConstantPool(
2898           C, Ty, CP->getAlign(), CP->getOffset(), PPCII::MO_PCREL_FLAG);
2899       return DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, ConstPool);
2900     }
2901     setUsesTOCBasePtr(DAG);
2902     SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), 0);
2903     return getTOCEntry(DAG, SDLoc(CP), GA);
2904   }
2905 
2906   unsigned MOHiFlag, MOLoFlag;
2907   bool IsPIC = isPositionIndependent();
2908   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
2909 
2910   if (IsPIC && Subtarget.isSVR4ABI()) {
2911     SDValue GA =
2912         DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), PPCII::MO_PIC_FLAG);
2913     return getTOCEntry(DAG, SDLoc(CP), GA);
2914   }
2915 
2916   SDValue CPIHi =
2917       DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), 0, MOHiFlag);
2918   SDValue CPILo =
2919       DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), 0, MOLoFlag);
2920   return LowerLabelRef(CPIHi, CPILo, IsPIC, DAG);
2921 }
2922 
2923 // For 64-bit PowerPC, prefer the more compact relative encodings.
2924 // This trades 32 bits per jump table entry for one or two instructions
2925 // on the jump site.
getJumpTableEncoding() const2926 unsigned PPCTargetLowering::getJumpTableEncoding() const {
2927   if (isJumpTableRelative())
2928     return MachineJumpTableInfo::EK_LabelDifference32;
2929 
2930   return TargetLowering::getJumpTableEncoding();
2931 }
2932 
isJumpTableRelative() const2933 bool PPCTargetLowering::isJumpTableRelative() const {
2934   if (UseAbsoluteJumpTables)
2935     return false;
2936   if (Subtarget.isPPC64() || Subtarget.isAIXABI())
2937     return true;
2938   return TargetLowering::isJumpTableRelative();
2939 }
2940 
getPICJumpTableRelocBase(SDValue Table,SelectionDAG & DAG) const2941 SDValue PPCTargetLowering::getPICJumpTableRelocBase(SDValue Table,
2942                                                     SelectionDAG &DAG) const {
2943   if (!Subtarget.isPPC64() || Subtarget.isAIXABI())
2944     return TargetLowering::getPICJumpTableRelocBase(Table, DAG);
2945 
2946   switch (getTargetMachine().getCodeModel()) {
2947   case CodeModel::Small:
2948   case CodeModel::Medium:
2949     return TargetLowering::getPICJumpTableRelocBase(Table, DAG);
2950   default:
2951     return DAG.getNode(PPCISD::GlobalBaseReg, SDLoc(),
2952                        getPointerTy(DAG.getDataLayout()));
2953   }
2954 }
2955 
2956 const MCExpr *
getPICJumpTableRelocBaseExpr(const MachineFunction * MF,unsigned JTI,MCContext & Ctx) const2957 PPCTargetLowering::getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
2958                                                 unsigned JTI,
2959                                                 MCContext &Ctx) const {
2960   if (!Subtarget.isPPC64() || Subtarget.isAIXABI())
2961     return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
2962 
2963   switch (getTargetMachine().getCodeModel()) {
2964   case CodeModel::Small:
2965   case CodeModel::Medium:
2966     return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
2967   default:
2968     return MCSymbolRefExpr::create(MF->getPICBaseSymbol(), Ctx);
2969   }
2970 }
2971 
LowerJumpTable(SDValue Op,SelectionDAG & DAG) const2972 SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
2973   EVT PtrVT = Op.getValueType();
2974   JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
2975 
2976   // isUsingPCRelativeCalls() returns true when PCRelative is enabled
2977   if (Subtarget.isUsingPCRelativeCalls()) {
2978     SDLoc DL(JT);
2979     EVT Ty = getPointerTy(DAG.getDataLayout());
2980     SDValue GA =
2981         DAG.getTargetJumpTable(JT->getIndex(), Ty, PPCII::MO_PCREL_FLAG);
2982     SDValue MatAddr = DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA);
2983     return MatAddr;
2984   }
2985 
2986   // 64-bit SVR4 ABI and AIX ABI code are always position-independent.
2987   // The actual address of the GlobalValue is stored in the TOC.
2988   if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
2989     setUsesTOCBasePtr(DAG);
2990     SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
2991     return getTOCEntry(DAG, SDLoc(JT), GA);
2992   }
2993 
2994   unsigned MOHiFlag, MOLoFlag;
2995   bool IsPIC = isPositionIndependent();
2996   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
2997 
2998   if (IsPIC && Subtarget.isSVR4ABI()) {
2999     SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT,
3000                                         PPCII::MO_PIC_FLAG);
3001     return getTOCEntry(DAG, SDLoc(GA), GA);
3002   }
3003 
3004   SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag);
3005   SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag);
3006   return LowerLabelRef(JTIHi, JTILo, IsPIC, DAG);
3007 }
3008 
LowerBlockAddress(SDValue Op,SelectionDAG & DAG) const3009 SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op,
3010                                              SelectionDAG &DAG) const {
3011   EVT PtrVT = Op.getValueType();
3012   BlockAddressSDNode *BASDN = cast<BlockAddressSDNode>(Op);
3013   const BlockAddress *BA = BASDN->getBlockAddress();
3014 
3015   // isUsingPCRelativeCalls() returns true when PCRelative is enabled
3016   if (Subtarget.isUsingPCRelativeCalls()) {
3017     SDLoc DL(BASDN);
3018     EVT Ty = getPointerTy(DAG.getDataLayout());
3019     SDValue GA = DAG.getTargetBlockAddress(BA, Ty, BASDN->getOffset(),
3020                                            PPCII::MO_PCREL_FLAG);
3021     SDValue MatAddr = DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA);
3022     return MatAddr;
3023   }
3024 
3025   // 64-bit SVR4 ABI and AIX ABI code are always position-independent.
3026   // The actual BlockAddress is stored in the TOC.
3027   if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
3028     setUsesTOCBasePtr(DAG);
3029     SDValue GA = DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset());
3030     return getTOCEntry(DAG, SDLoc(BASDN), GA);
3031   }
3032 
3033   // 32-bit position-independent ELF stores the BlockAddress in the .got.
3034   if (Subtarget.is32BitELFABI() && isPositionIndependent())
3035     return getTOCEntry(
3036         DAG, SDLoc(BASDN),
3037         DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset()));
3038 
3039   unsigned MOHiFlag, MOLoFlag;
3040   bool IsPIC = isPositionIndependent();
3041   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
3042   SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag);
3043   SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag);
3044   return LowerLabelRef(TgtBAHi, TgtBALo, IsPIC, DAG);
3045 }
3046 
LowerGlobalTLSAddress(SDValue Op,SelectionDAG & DAG) const3047 SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op,
3048                                               SelectionDAG &DAG) const {
3049   // FIXME: TLS addresses currently use medium model code sequences,
3050   // which is the most useful form.  Eventually support for small and
3051   // large models could be added if users need it, at the cost of
3052   // additional complexity.
3053   GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
3054   if (DAG.getTarget().useEmulatedTLS())
3055     return LowerToTLSEmulatedModel(GA, DAG);
3056 
3057   SDLoc dl(GA);
3058   const GlobalValue *GV = GA->getGlobal();
3059   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3060   bool is64bit = Subtarget.isPPC64();
3061   const Module *M = DAG.getMachineFunction().getFunction().getParent();
3062   PICLevel::Level picLevel = M->getPICLevel();
3063 
3064   const TargetMachine &TM = getTargetMachine();
3065   TLSModel::Model Model = TM.getTLSModel(GV);
3066 
3067   if (Model == TLSModel::LocalExec) {
3068     SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
3069                                                PPCII::MO_TPREL_HA);
3070     SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
3071                                                PPCII::MO_TPREL_LO);
3072     SDValue TLSReg = is64bit ? DAG.getRegister(PPC::X13, MVT::i64)
3073                              : DAG.getRegister(PPC::R2, MVT::i32);
3074 
3075     SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg);
3076     return DAG.getNode(PPCISD::Lo, dl, PtrVT, TGALo, Hi);
3077   }
3078 
3079   if (Model == TLSModel::InitialExec) {
3080     SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
3081     SDValue TGATLS = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
3082                                                 PPCII::MO_TLS);
3083     SDValue GOTPtr;
3084     if (is64bit) {
3085       setUsesTOCBasePtr(DAG);
3086       SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
3087       GOTPtr = DAG.getNode(PPCISD::ADDIS_GOT_TPREL_HA, dl,
3088                            PtrVT, GOTReg, TGA);
3089     } else {
3090       if (!TM.isPositionIndependent())
3091         GOTPtr = DAG.getNode(PPCISD::PPC32_GOT, dl, PtrVT);
3092       else if (picLevel == PICLevel::SmallPIC)
3093         GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
3094       else
3095         GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
3096     }
3097     SDValue TPOffset = DAG.getNode(PPCISD::LD_GOT_TPREL_L, dl,
3098                                    PtrVT, TGA, GOTPtr);
3099     return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TPOffset, TGATLS);
3100   }
3101 
3102   if (Model == TLSModel::GeneralDynamic) {
3103     SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
3104     SDValue GOTPtr;
3105     if (is64bit) {
3106       setUsesTOCBasePtr(DAG);
3107       SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
3108       GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSGD_HA, dl, PtrVT,
3109                                    GOTReg, TGA);
3110     } else {
3111       if (picLevel == PICLevel::SmallPIC)
3112         GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
3113       else
3114         GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
3115     }
3116     return DAG.getNode(PPCISD::ADDI_TLSGD_L_ADDR, dl, PtrVT,
3117                        GOTPtr, TGA, TGA);
3118   }
3119 
3120   if (Model == TLSModel::LocalDynamic) {
3121     SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
3122     SDValue GOTPtr;
3123     if (is64bit) {
3124       setUsesTOCBasePtr(DAG);
3125       SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
3126       GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSLD_HA, dl, PtrVT,
3127                            GOTReg, TGA);
3128     } else {
3129       if (picLevel == PICLevel::SmallPIC)
3130         GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
3131       else
3132         GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
3133     }
3134     SDValue TLSAddr = DAG.getNode(PPCISD::ADDI_TLSLD_L_ADDR, dl,
3135                                   PtrVT, GOTPtr, TGA, TGA);
3136     SDValue DtvOffsetHi = DAG.getNode(PPCISD::ADDIS_DTPREL_HA, dl,
3137                                       PtrVT, TLSAddr, TGA);
3138     return DAG.getNode(PPCISD::ADDI_DTPREL_L, dl, PtrVT, DtvOffsetHi, TGA);
3139   }
3140 
3141   llvm_unreachable("Unknown TLS model!");
3142 }
3143 
LowerGlobalAddress(SDValue Op,SelectionDAG & DAG) const3144 SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op,
3145                                               SelectionDAG &DAG) const {
3146   EVT PtrVT = Op.getValueType();
3147   GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
3148   SDLoc DL(GSDN);
3149   const GlobalValue *GV = GSDN->getGlobal();
3150 
3151   // 64-bit SVR4 ABI & AIX ABI code is always position-independent.
3152   // The actual address of the GlobalValue is stored in the TOC.
3153   if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
3154     if (Subtarget.isUsingPCRelativeCalls()) {
3155       EVT Ty = getPointerTy(DAG.getDataLayout());
3156       if (isAccessedAsGotIndirect(Op)) {
3157         SDValue GA = DAG.getTargetGlobalAddress(GV, DL, Ty, GSDN->getOffset(),
3158                                                 PPCII::MO_PCREL_FLAG |
3159                                                     PPCII::MO_GOT_FLAG);
3160         SDValue MatPCRel = DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA);
3161         SDValue Load = DAG.getLoad(MVT::i64, DL, DAG.getEntryNode(), MatPCRel,
3162                                    MachinePointerInfo());
3163         return Load;
3164       } else {
3165         SDValue GA = DAG.getTargetGlobalAddress(GV, DL, Ty, GSDN->getOffset(),
3166                                                 PPCII::MO_PCREL_FLAG);
3167         return DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA);
3168       }
3169     }
3170     setUsesTOCBasePtr(DAG);
3171     SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset());
3172     return getTOCEntry(DAG, DL, GA);
3173   }
3174 
3175   unsigned MOHiFlag, MOLoFlag;
3176   bool IsPIC = isPositionIndependent();
3177   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag, GV);
3178 
3179   if (IsPIC && Subtarget.isSVR4ABI()) {
3180     SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT,
3181                                             GSDN->getOffset(),
3182                                             PPCII::MO_PIC_FLAG);
3183     return getTOCEntry(DAG, DL, GA);
3184   }
3185 
3186   SDValue GAHi =
3187     DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag);
3188   SDValue GALo =
3189     DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag);
3190 
3191   return LowerLabelRef(GAHi, GALo, IsPIC, DAG);
3192 }
3193 
LowerSETCC(SDValue Op,SelectionDAG & DAG) const3194 SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
3195   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
3196   SDLoc dl(Op);
3197 
3198   if (Op.getValueType() == MVT::v2i64) {
3199     // When the operands themselves are v2i64 values, we need to do something
3200     // special because VSX has no underlying comparison operations for these.
3201     if (Op.getOperand(0).getValueType() == MVT::v2i64) {
3202       // Equality can be handled by casting to the legal type for Altivec
3203       // comparisons, everything else needs to be expanded.
3204       if (CC == ISD::SETEQ || CC == ISD::SETNE) {
3205         return DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
3206                  DAG.getSetCC(dl, MVT::v4i32,
3207                    DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0)),
3208                    DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(1)),
3209                    CC));
3210       }
3211 
3212       return SDValue();
3213     }
3214 
3215     // We handle most of these in the usual way.
3216     return Op;
3217   }
3218 
3219   // If we're comparing for equality to zero, expose the fact that this is
3220   // implemented as a ctlz/srl pair on ppc, so that the dag combiner can
3221   // fold the new nodes.
3222   if (SDValue V = lowerCmpEqZeroToCtlzSrl(Op, DAG))
3223     return V;
3224 
3225   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
3226     // Leave comparisons against 0 and -1 alone for now, since they're usually
3227     // optimized.  FIXME: revisit this when we can custom lower all setcc
3228     // optimizations.
3229     if (C->isAllOnesValue() || C->isNullValue())
3230       return SDValue();
3231   }
3232 
3233   // If we have an integer seteq/setne, turn it into a compare against zero
3234   // by xor'ing the rhs with the lhs, which is faster than setting a
3235   // condition register, reading it back out, and masking the correct bit.  The
3236   // normal approach here uses sub to do this instead of xor.  Using xor exposes
3237   // the result to other bit-twiddling opportunities.
3238   EVT LHSVT = Op.getOperand(0).getValueType();
3239   if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
3240     EVT VT = Op.getValueType();
3241     SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, Op.getOperand(0),
3242                                 Op.getOperand(1));
3243     return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, dl, LHSVT), CC);
3244   }
3245   return SDValue();
3246 }
3247 
LowerVAARG(SDValue Op,SelectionDAG & DAG) const3248 SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
3249   SDNode *Node = Op.getNode();
3250   EVT VT = Node->getValueType(0);
3251   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3252   SDValue InChain = Node->getOperand(0);
3253   SDValue VAListPtr = Node->getOperand(1);
3254   const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
3255   SDLoc dl(Node);
3256 
3257   assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only");
3258 
3259   // gpr_index
3260   SDValue GprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain,
3261                                     VAListPtr, MachinePointerInfo(SV), MVT::i8);
3262   InChain = GprIndex.getValue(1);
3263 
3264   if (VT == MVT::i64) {
3265     // Check if GprIndex is even
3266     SDValue GprAnd = DAG.getNode(ISD::AND, dl, MVT::i32, GprIndex,
3267                                  DAG.getConstant(1, dl, MVT::i32));
3268     SDValue CC64 = DAG.getSetCC(dl, MVT::i32, GprAnd,
3269                                 DAG.getConstant(0, dl, MVT::i32), ISD::SETNE);
3270     SDValue GprIndexPlusOne = DAG.getNode(ISD::ADD, dl, MVT::i32, GprIndex,
3271                                           DAG.getConstant(1, dl, MVT::i32));
3272     // Align GprIndex to be even if it isn't
3273     GprIndex = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC64, GprIndexPlusOne,
3274                            GprIndex);
3275   }
3276 
3277   // fpr index is 1 byte after gpr
3278   SDValue FprPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
3279                                DAG.getConstant(1, dl, MVT::i32));
3280 
3281   // fpr
3282   SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain,
3283                                     FprPtr, MachinePointerInfo(SV), MVT::i8);
3284   InChain = FprIndex.getValue(1);
3285 
3286   SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
3287                                        DAG.getConstant(8, dl, MVT::i32));
3288 
3289   SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
3290                                         DAG.getConstant(4, dl, MVT::i32));
3291 
3292   // areas
3293   SDValue OverflowArea =
3294       DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr, MachinePointerInfo());
3295   InChain = OverflowArea.getValue(1);
3296 
3297   SDValue RegSaveArea =
3298       DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr, MachinePointerInfo());
3299   InChain = RegSaveArea.getValue(1);
3300 
3301   // select overflow_area if index > 8
3302   SDValue CC = DAG.getSetCC(dl, MVT::i32, VT.isInteger() ? GprIndex : FprIndex,
3303                             DAG.getConstant(8, dl, MVT::i32), ISD::SETLT);
3304 
3305   // adjustment constant gpr_index * 4/8
3306   SDValue RegConstant = DAG.getNode(ISD::MUL, dl, MVT::i32,
3307                                     VT.isInteger() ? GprIndex : FprIndex,
3308                                     DAG.getConstant(VT.isInteger() ? 4 : 8, dl,
3309                                                     MVT::i32));
3310 
3311   // OurReg = RegSaveArea + RegConstant
3312   SDValue OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, RegSaveArea,
3313                                RegConstant);
3314 
3315   // Floating types are 32 bytes into RegSaveArea
3316   if (VT.isFloatingPoint())
3317     OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, OurReg,
3318                          DAG.getConstant(32, dl, MVT::i32));
3319 
3320   // increase {f,g}pr_index by 1 (or 2 if VT is i64)
3321   SDValue IndexPlus1 = DAG.getNode(ISD::ADD, dl, MVT::i32,
3322                                    VT.isInteger() ? GprIndex : FprIndex,
3323                                    DAG.getConstant(VT == MVT::i64 ? 2 : 1, dl,
3324                                                    MVT::i32));
3325 
3326   InChain = DAG.getTruncStore(InChain, dl, IndexPlus1,
3327                               VT.isInteger() ? VAListPtr : FprPtr,
3328                               MachinePointerInfo(SV), MVT::i8);
3329 
3330   // determine if we should load from reg_save_area or overflow_area
3331   SDValue Result = DAG.getNode(ISD::SELECT, dl, PtrVT, CC, OurReg, OverflowArea);
3332 
3333   // increase overflow_area by 4/8 if gpr/fpr > 8
3334   SDValue OverflowAreaPlusN = DAG.getNode(ISD::ADD, dl, PtrVT, OverflowArea,
3335                                           DAG.getConstant(VT.isInteger() ? 4 : 8,
3336                                           dl, MVT::i32));
3337 
3338   OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea,
3339                              OverflowAreaPlusN);
3340 
3341   InChain = DAG.getTruncStore(InChain, dl, OverflowArea, OverflowAreaPtr,
3342                               MachinePointerInfo(), MVT::i32);
3343 
3344   return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo());
3345 }
3346 
LowerVACOPY(SDValue Op,SelectionDAG & DAG) const3347 SDValue PPCTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const {
3348   assert(!Subtarget.isPPC64() && "LowerVACOPY is PPC32 only");
3349 
3350   // We have to copy the entire va_list struct:
3351   // 2*sizeof(char) + 2 Byte alignment + 2*sizeof(char*) = 12 Byte
3352   return DAG.getMemcpy(Op.getOperand(0), Op, Op.getOperand(1), Op.getOperand(2),
3353                        DAG.getConstant(12, SDLoc(Op), MVT::i32), Align(8),
3354                        false, true, false, false, MachinePointerInfo(),
3355                        MachinePointerInfo());
3356 }
3357 
LowerADJUST_TRAMPOLINE(SDValue Op,SelectionDAG & DAG) const3358 SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op,
3359                                                   SelectionDAG &DAG) const {
3360   if (Subtarget.isAIXABI())
3361     report_fatal_error("ADJUST_TRAMPOLINE operation is not supported on AIX.");
3362 
3363   return Op.getOperand(0);
3364 }
3365 
LowerINIT_TRAMPOLINE(SDValue Op,SelectionDAG & DAG) const3366 SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
3367                                                 SelectionDAG &DAG) const {
3368   if (Subtarget.isAIXABI())
3369     report_fatal_error("INIT_TRAMPOLINE operation is not supported on AIX.");
3370 
3371   SDValue Chain = Op.getOperand(0);
3372   SDValue Trmp = Op.getOperand(1); // trampoline
3373   SDValue FPtr = Op.getOperand(2); // nested function
3374   SDValue Nest = Op.getOperand(3); // 'nest' parameter value
3375   SDLoc dl(Op);
3376 
3377   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3378   bool isPPC64 = (PtrVT == MVT::i64);
3379   Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext());
3380 
3381   TargetLowering::ArgListTy Args;
3382   TargetLowering::ArgListEntry Entry;
3383 
3384   Entry.Ty = IntPtrTy;
3385   Entry.Node = Trmp; Args.push_back(Entry);
3386 
3387   // TrampSize == (isPPC64 ? 48 : 40);
3388   Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40, dl,
3389                                isPPC64 ? MVT::i64 : MVT::i32);
3390   Args.push_back(Entry);
3391 
3392   Entry.Node = FPtr; Args.push_back(Entry);
3393   Entry.Node = Nest; Args.push_back(Entry);
3394 
3395   // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg)
3396   TargetLowering::CallLoweringInfo CLI(DAG);
3397   CLI.setDebugLoc(dl).setChain(Chain).setLibCallee(
3398       CallingConv::C, Type::getVoidTy(*DAG.getContext()),
3399       DAG.getExternalFunctionSymbol("__trampoline_setup"), std::move(Args));
3400 
3401   std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
3402   return CallResult.second;
3403 }
3404 
LowerVASTART(SDValue Op,SelectionDAG & DAG) const3405 SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
3406   MachineFunction &MF = DAG.getMachineFunction();
3407   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3408   EVT PtrVT = getPointerTy(MF.getDataLayout());
3409 
3410   SDLoc dl(Op);
3411 
3412   if (Subtarget.isPPC64() || Subtarget.isAIXABI()) {
3413     // vastart just stores the address of the VarArgsFrameIndex slot into the
3414     // memory location argument.
3415     SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
3416     const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3417     return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1),
3418                         MachinePointerInfo(SV));
3419   }
3420 
3421   // For the 32-bit SVR4 ABI we follow the layout of the va_list struct.
3422   // We suppose the given va_list is already allocated.
3423   //
3424   // typedef struct {
3425   //  char gpr;     /* index into the array of 8 GPRs
3426   //                 * stored in the register save area
3427   //                 * gpr=0 corresponds to r3,
3428   //                 * gpr=1 to r4, etc.
3429   //                 */
3430   //  char fpr;     /* index into the array of 8 FPRs
3431   //                 * stored in the register save area
3432   //                 * fpr=0 corresponds to f1,
3433   //                 * fpr=1 to f2, etc.
3434   //                 */
3435   //  char *overflow_arg_area;
3436   //                /* location on stack that holds
3437   //                 * the next overflow argument
3438   //                 */
3439   //  char *reg_save_area;
3440   //               /* where r3:r10 and f1:f8 (if saved)
3441   //                * are stored
3442   //                */
3443   // } va_list[1];
3444 
3445   SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), dl, MVT::i32);
3446   SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), dl, MVT::i32);
3447   SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(),
3448                                             PtrVT);
3449   SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
3450                                  PtrVT);
3451 
3452   uint64_t FrameOffset = PtrVT.getSizeInBits()/8;
3453   SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, dl, PtrVT);
3454 
3455   uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1;
3456   SDValue ConstStackOffset = DAG.getConstant(StackOffset, dl, PtrVT);
3457 
3458   uint64_t FPROffset = 1;
3459   SDValue ConstFPROffset = DAG.getConstant(FPROffset, dl, PtrVT);
3460 
3461   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3462 
3463   // Store first byte : number of int regs
3464   SDValue firstStore =
3465       DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR, Op.getOperand(1),
3466                         MachinePointerInfo(SV), MVT::i8);
3467   uint64_t nextOffset = FPROffset;
3468   SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1),
3469                                   ConstFPROffset);
3470 
3471   // Store second byte : number of float regs
3472   SDValue secondStore =
3473       DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr,
3474                         MachinePointerInfo(SV, nextOffset), MVT::i8);
3475   nextOffset += StackOffset;
3476   nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset);
3477 
3478   // Store second word : arguments given on stack
3479   SDValue thirdStore = DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr,
3480                                     MachinePointerInfo(SV, nextOffset));
3481   nextOffset += FrameOffset;
3482   nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset);
3483 
3484   // Store third word : arguments given in registers
3485   return DAG.getStore(thirdStore, dl, FR, nextPtr,
3486                       MachinePointerInfo(SV, nextOffset));
3487 }
3488 
3489 /// FPR - The set of FP registers that should be allocated for arguments
3490 /// on Darwin and AIX.
3491 static const MCPhysReg FPR[] = {PPC::F1,  PPC::F2,  PPC::F3, PPC::F4, PPC::F5,
3492                                 PPC::F6,  PPC::F7,  PPC::F8, PPC::F9, PPC::F10,
3493                                 PPC::F11, PPC::F12, PPC::F13};
3494 
3495 /// QFPR - The set of QPX registers that should be allocated for arguments.
3496 static const MCPhysReg QFPR[] = {
3497     PPC::QF1, PPC::QF2, PPC::QF3,  PPC::QF4,  PPC::QF5,  PPC::QF6, PPC::QF7,
3498     PPC::QF8, PPC::QF9, PPC::QF10, PPC::QF11, PPC::QF12, PPC::QF13};
3499 
3500 /// CalculateStackSlotSize - Calculates the size reserved for this argument on
3501 /// the stack.
CalculateStackSlotSize(EVT ArgVT,ISD::ArgFlagsTy Flags,unsigned PtrByteSize)3502 static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags,
3503                                        unsigned PtrByteSize) {
3504   unsigned ArgSize = ArgVT.getStoreSize();
3505   if (Flags.isByVal())
3506     ArgSize = Flags.getByValSize();
3507 
3508   // Round up to multiples of the pointer size, except for array members,
3509   // which are always packed.
3510   if (!Flags.isInConsecutiveRegs())
3511     ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3512 
3513   return ArgSize;
3514 }
3515 
3516 /// CalculateStackSlotAlignment - Calculates the alignment of this argument
3517 /// on the stack.
CalculateStackSlotAlignment(EVT ArgVT,EVT OrigVT,ISD::ArgFlagsTy Flags,unsigned PtrByteSize)3518 static Align CalculateStackSlotAlignment(EVT ArgVT, EVT OrigVT,
3519                                          ISD::ArgFlagsTy Flags,
3520                                          unsigned PtrByteSize) {
3521   Align Alignment(PtrByteSize);
3522 
3523   // Altivec parameters are padded to a 16 byte boundary.
3524   if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
3525       ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
3526       ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 ||
3527       ArgVT == MVT::v1i128 || ArgVT == MVT::f128)
3528     Alignment = Align(16);
3529   // QPX vector types stored in double-precision are padded to a 32 byte
3530   // boundary.
3531   else if (ArgVT == MVT::v4f64 || ArgVT == MVT::v4i1)
3532     Alignment = Align(32);
3533 
3534   // ByVal parameters are aligned as requested.
3535   if (Flags.isByVal()) {
3536     auto BVAlign = Flags.getNonZeroByValAlign();
3537     if (BVAlign > PtrByteSize) {
3538       if (BVAlign.value() % PtrByteSize != 0)
3539         llvm_unreachable(
3540             "ByVal alignment is not a multiple of the pointer size");
3541 
3542       Alignment = BVAlign;
3543     }
3544   }
3545 
3546   // Array members are always packed to their original alignment.
3547   if (Flags.isInConsecutiveRegs()) {
3548     // If the array member was split into multiple registers, the first
3549     // needs to be aligned to the size of the full type.  (Except for
3550     // ppcf128, which is only aligned as its f64 components.)
3551     if (Flags.isSplit() && OrigVT != MVT::ppcf128)
3552       Alignment = Align(OrigVT.getStoreSize());
3553     else
3554       Alignment = Align(ArgVT.getStoreSize());
3555   }
3556 
3557   return Alignment;
3558 }
3559 
3560 /// CalculateStackSlotUsed - Return whether this argument will use its
3561 /// stack slot (instead of being passed in registers).  ArgOffset,
3562 /// AvailableFPRs, and AvailableVRs must hold the current argument
3563 /// position, and will be updated to account for this argument.
CalculateStackSlotUsed(EVT ArgVT,EVT OrigVT,ISD::ArgFlagsTy Flags,unsigned PtrByteSize,unsigned LinkageSize,unsigned ParamAreaSize,unsigned & ArgOffset,unsigned & AvailableFPRs,unsigned & AvailableVRs,bool HasQPX)3564 static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT,
3565                                    ISD::ArgFlagsTy Flags,
3566                                    unsigned PtrByteSize,
3567                                    unsigned LinkageSize,
3568                                    unsigned ParamAreaSize,
3569                                    unsigned &ArgOffset,
3570                                    unsigned &AvailableFPRs,
3571                                    unsigned &AvailableVRs, bool HasQPX) {
3572   bool UseMemory = false;
3573 
3574   // Respect alignment of argument on the stack.
3575   Align Alignment =
3576       CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
3577   ArgOffset = alignTo(ArgOffset, Alignment);
3578   // If there's no space left in the argument save area, we must
3579   // use memory (this check also catches zero-sized arguments).
3580   if (ArgOffset >= LinkageSize + ParamAreaSize)
3581     UseMemory = true;
3582 
3583   // Allocate argument on the stack.
3584   ArgOffset += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
3585   if (Flags.isInConsecutiveRegsLast())
3586     ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3587   // If we overran the argument save area, we must use memory
3588   // (this check catches arguments passed partially in memory)
3589   if (ArgOffset > LinkageSize + ParamAreaSize)
3590     UseMemory = true;
3591 
3592   // However, if the argument is actually passed in an FPR or a VR,
3593   // we don't use memory after all.
3594   if (!Flags.isByVal()) {
3595     if (ArgVT == MVT::f32 || ArgVT == MVT::f64 ||
3596         // QPX registers overlap with the scalar FP registers.
3597         (HasQPX && (ArgVT == MVT::v4f32 ||
3598                     ArgVT == MVT::v4f64 ||
3599                     ArgVT == MVT::v4i1)))
3600       if (AvailableFPRs > 0) {
3601         --AvailableFPRs;
3602         return false;
3603       }
3604     if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
3605         ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
3606         ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 ||
3607         ArgVT == MVT::v1i128 || ArgVT == MVT::f128)
3608       if (AvailableVRs > 0) {
3609         --AvailableVRs;
3610         return false;
3611       }
3612   }
3613 
3614   return UseMemory;
3615 }
3616 
3617 /// EnsureStackAlignment - Round stack frame size up from NumBytes to
3618 /// ensure minimum alignment required for target.
EnsureStackAlignment(const PPCFrameLowering * Lowering,unsigned NumBytes)3619 static unsigned EnsureStackAlignment(const PPCFrameLowering *Lowering,
3620                                      unsigned NumBytes) {
3621   return alignTo(NumBytes, Lowering->getStackAlign());
3622 }
3623 
LowerFormalArguments(SDValue Chain,CallingConv::ID CallConv,bool isVarArg,const SmallVectorImpl<ISD::InputArg> & Ins,const SDLoc & dl,SelectionDAG & DAG,SmallVectorImpl<SDValue> & InVals) const3624 SDValue PPCTargetLowering::LowerFormalArguments(
3625     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3626     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3627     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3628   if (Subtarget.isAIXABI())
3629     return LowerFormalArguments_AIX(Chain, CallConv, isVarArg, Ins, dl, DAG,
3630                                     InVals);
3631   if (Subtarget.is64BitELFABI())
3632     return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG,
3633                                        InVals);
3634   if (Subtarget.is32BitELFABI())
3635     return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG,
3636                                        InVals);
3637 
3638   return LowerFormalArguments_Darwin(Chain, CallConv, isVarArg, Ins, dl, DAG,
3639                                      InVals);
3640 }
3641 
LowerFormalArguments_32SVR4(SDValue Chain,CallingConv::ID CallConv,bool isVarArg,const SmallVectorImpl<ISD::InputArg> & Ins,const SDLoc & dl,SelectionDAG & DAG,SmallVectorImpl<SDValue> & InVals) const3642 SDValue PPCTargetLowering::LowerFormalArguments_32SVR4(
3643     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3644     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3645     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3646 
3647   // 32-bit SVR4 ABI Stack Frame Layout:
3648   //              +-----------------------------------+
3649   //        +-->  |            Back chain             |
3650   //        |     +-----------------------------------+
3651   //        |     | Floating-point register save area |
3652   //        |     +-----------------------------------+
3653   //        |     |    General register save area     |
3654   //        |     +-----------------------------------+
3655   //        |     |          CR save word             |
3656   //        |     +-----------------------------------+
3657   //        |     |         VRSAVE save word          |
3658   //        |     +-----------------------------------+
3659   //        |     |         Alignment padding         |
3660   //        |     +-----------------------------------+
3661   //        |     |     Vector register save area     |
3662   //        |     +-----------------------------------+
3663   //        |     |       Local variable space        |
3664   //        |     +-----------------------------------+
3665   //        |     |        Parameter list area        |
3666   //        |     +-----------------------------------+
3667   //        |     |           LR save word            |
3668   //        |     +-----------------------------------+
3669   // SP-->  +---  |            Back chain             |
3670   //              +-----------------------------------+
3671   //
3672   // Specifications:
3673   //   System V Application Binary Interface PowerPC Processor Supplement
3674   //   AltiVec Technology Programming Interface Manual
3675 
3676   MachineFunction &MF = DAG.getMachineFunction();
3677   MachineFrameInfo &MFI = MF.getFrameInfo();
3678   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3679 
3680   EVT PtrVT = getPointerTy(MF.getDataLayout());
3681   // Potential tail calls could cause overwriting of argument stack slots.
3682   bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
3683                        (CallConv == CallingConv::Fast));
3684   const Align PtrAlign(4);
3685 
3686   // Assign locations to all of the incoming arguments.
3687   SmallVector<CCValAssign, 16> ArgLocs;
3688   PPCCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
3689                  *DAG.getContext());
3690 
3691   // Reserve space for the linkage area on the stack.
3692   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
3693   CCInfo.AllocateStack(LinkageSize, PtrAlign);
3694   if (useSoftFloat())
3695     CCInfo.PreAnalyzeFormalArguments(Ins);
3696 
3697   CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4);
3698   CCInfo.clearWasPPCF128();
3699 
3700   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3701     CCValAssign &VA = ArgLocs[i];
3702 
3703     // Arguments stored in registers.
3704     if (VA.isRegLoc()) {
3705       const TargetRegisterClass *RC;
3706       EVT ValVT = VA.getValVT();
3707 
3708       switch (ValVT.getSimpleVT().SimpleTy) {
3709         default:
3710           llvm_unreachable("ValVT not supported by formal arguments Lowering");
3711         case MVT::i1:
3712         case MVT::i32:
3713           RC = &PPC::GPRCRegClass;
3714           break;
3715         case MVT::f32:
3716           if (Subtarget.hasP8Vector())
3717             RC = &PPC::VSSRCRegClass;
3718           else if (Subtarget.hasSPE())
3719             RC = &PPC::GPRCRegClass;
3720           else
3721             RC = &PPC::F4RCRegClass;
3722           break;
3723         case MVT::f64:
3724           if (Subtarget.hasVSX())
3725             RC = &PPC::VSFRCRegClass;
3726           else if (Subtarget.hasSPE())
3727             // SPE passes doubles in GPR pairs.
3728             RC = &PPC::GPRCRegClass;
3729           else
3730             RC = &PPC::F8RCRegClass;
3731           break;
3732         case MVT::v16i8:
3733         case MVT::v8i16:
3734         case MVT::v4i32:
3735           RC = &PPC::VRRCRegClass;
3736           break;
3737         case MVT::v4f32:
3738           RC = Subtarget.hasQPX() ? &PPC::QSRCRegClass : &PPC::VRRCRegClass;
3739           break;
3740         case MVT::v2f64:
3741         case MVT::v2i64:
3742           RC = &PPC::VRRCRegClass;
3743           break;
3744         case MVT::v4f64:
3745           RC = &PPC::QFRCRegClass;
3746           break;
3747         case MVT::v4i1:
3748           RC = &PPC::QBRCRegClass;
3749           break;
3750       }
3751 
3752       SDValue ArgValue;
3753       // Transform the arguments stored in physical registers into
3754       // virtual ones.
3755       if (VA.getLocVT() == MVT::f64 && Subtarget.hasSPE()) {
3756         assert(i + 1 < e && "No second half of double precision argument");
3757         unsigned RegLo = MF.addLiveIn(VA.getLocReg(), RC);
3758         unsigned RegHi = MF.addLiveIn(ArgLocs[++i].getLocReg(), RC);
3759         SDValue ArgValueLo = DAG.getCopyFromReg(Chain, dl, RegLo, MVT::i32);
3760         SDValue ArgValueHi = DAG.getCopyFromReg(Chain, dl, RegHi, MVT::i32);
3761         if (!Subtarget.isLittleEndian())
3762           std::swap (ArgValueLo, ArgValueHi);
3763         ArgValue = DAG.getNode(PPCISD::BUILD_SPE64, dl, MVT::f64, ArgValueLo,
3764                                ArgValueHi);
3765       } else {
3766         unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
3767         ArgValue = DAG.getCopyFromReg(Chain, dl, Reg,
3768                                       ValVT == MVT::i1 ? MVT::i32 : ValVT);
3769         if (ValVT == MVT::i1)
3770           ArgValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgValue);
3771       }
3772 
3773       InVals.push_back(ArgValue);
3774     } else {
3775       // Argument stored in memory.
3776       assert(VA.isMemLoc());
3777 
3778       // Get the extended size of the argument type in stack
3779       unsigned ArgSize = VA.getLocVT().getStoreSize();
3780       // Get the actual size of the argument type
3781       unsigned ObjSize = VA.getValVT().getStoreSize();
3782       unsigned ArgOffset = VA.getLocMemOffset();
3783       // Stack objects in PPC32 are right justified.
3784       ArgOffset += ArgSize - ObjSize;
3785       int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, isImmutable);
3786 
3787       // Create load nodes to retrieve arguments from the stack.
3788       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3789       InVals.push_back(
3790           DAG.getLoad(VA.getValVT(), dl, Chain, FIN, MachinePointerInfo()));
3791     }
3792   }
3793 
3794   // Assign locations to all of the incoming aggregate by value arguments.
3795   // Aggregates passed by value are stored in the local variable space of the
3796   // caller's stack frame, right above the parameter list area.
3797   SmallVector<CCValAssign, 16> ByValArgLocs;
3798   CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(),
3799                       ByValArgLocs, *DAG.getContext());
3800 
3801   // Reserve stack space for the allocations in CCInfo.
3802   CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrAlign);
3803 
3804   CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal);
3805 
3806   // Area that is at least reserved in the caller of this function.
3807   unsigned MinReservedArea = CCByValInfo.getNextStackOffset();
3808   MinReservedArea = std::max(MinReservedArea, LinkageSize);
3809 
3810   // Set the size that is at least reserved in caller of this function.  Tail
3811   // call optimized function's reserved stack space needs to be aligned so that
3812   // taking the difference between two stack areas will result in an aligned
3813   // stack.
3814   MinReservedArea =
3815       EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
3816   FuncInfo->setMinReservedArea(MinReservedArea);
3817 
3818   SmallVector<SDValue, 8> MemOps;
3819 
3820   // If the function takes variable number of arguments, make a frame index for
3821   // the start of the first vararg value... for expansion of llvm.va_start.
3822   if (isVarArg) {
3823     static const MCPhysReg GPArgRegs[] = {
3824       PPC::R3, PPC::R4, PPC::R5, PPC::R6,
3825       PPC::R7, PPC::R8, PPC::R9, PPC::R10,
3826     };
3827     const unsigned NumGPArgRegs = array_lengthof(GPArgRegs);
3828 
3829     static const MCPhysReg FPArgRegs[] = {
3830       PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
3831       PPC::F8
3832     };
3833     unsigned NumFPArgRegs = array_lengthof(FPArgRegs);
3834 
3835     if (useSoftFloat() || hasSPE())
3836        NumFPArgRegs = 0;
3837 
3838     FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs));
3839     FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs));
3840 
3841     // Make room for NumGPArgRegs and NumFPArgRegs.
3842     int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 +
3843                 NumFPArgRegs * MVT(MVT::f64).getSizeInBits()/8;
3844 
3845     FuncInfo->setVarArgsStackOffset(
3846       MFI.CreateFixedObject(PtrVT.getSizeInBits()/8,
3847                             CCInfo.getNextStackOffset(), true));
3848 
3849     FuncInfo->setVarArgsFrameIndex(
3850         MFI.CreateStackObject(Depth, Align(8), false));
3851     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
3852 
3853     // The fixed integer arguments of a variadic function are stored to the
3854     // VarArgsFrameIndex on the stack so that they may be loaded by
3855     // dereferencing the result of va_next.
3856     for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) {
3857       // Get an existing live-in vreg, or add a new one.
3858       unsigned VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]);
3859       if (!VReg)
3860         VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass);
3861 
3862       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
3863       SDValue Store =
3864           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
3865       MemOps.push_back(Store);
3866       // Increment the address by four for the next argument to store
3867       SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT);
3868       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
3869     }
3870 
3871     // FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6
3872     // is set.
3873     // The double arguments are stored to the VarArgsFrameIndex
3874     // on the stack.
3875     for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) {
3876       // Get an existing live-in vreg, or add a new one.
3877       unsigned VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]);
3878       if (!VReg)
3879         VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass);
3880 
3881       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64);
3882       SDValue Store =
3883           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
3884       MemOps.push_back(Store);
3885       // Increment the address by eight for the next argument to store
3886       SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, dl,
3887                                          PtrVT);
3888       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
3889     }
3890   }
3891 
3892   if (!MemOps.empty())
3893     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
3894 
3895   return Chain;
3896 }
3897 
3898 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
3899 // value to MVT::i64 and then truncate to the correct register size.
extendArgForPPC64(ISD::ArgFlagsTy Flags,EVT ObjectVT,SelectionDAG & DAG,SDValue ArgVal,const SDLoc & dl) const3900 SDValue PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags,
3901                                              EVT ObjectVT, SelectionDAG &DAG,
3902                                              SDValue ArgVal,
3903                                              const SDLoc &dl) const {
3904   if (Flags.isSExt())
3905     ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal,
3906                          DAG.getValueType(ObjectVT));
3907   else if (Flags.isZExt())
3908     ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal,
3909                          DAG.getValueType(ObjectVT));
3910 
3911   return DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, ArgVal);
3912 }
3913 
LowerFormalArguments_64SVR4(SDValue Chain,CallingConv::ID CallConv,bool isVarArg,const SmallVectorImpl<ISD::InputArg> & Ins,const SDLoc & dl,SelectionDAG & DAG,SmallVectorImpl<SDValue> & InVals) const3914 SDValue PPCTargetLowering::LowerFormalArguments_64SVR4(
3915     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3916     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3917     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3918   // TODO: add description of PPC stack frame format, or at least some docs.
3919   //
3920   bool isELFv2ABI = Subtarget.isELFv2ABI();
3921   bool isLittleEndian = Subtarget.isLittleEndian();
3922   MachineFunction &MF = DAG.getMachineFunction();
3923   MachineFrameInfo &MFI = MF.getFrameInfo();
3924   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3925 
3926   assert(!(CallConv == CallingConv::Fast && isVarArg) &&
3927          "fastcc not supported on varargs functions");
3928 
3929   EVT PtrVT = getPointerTy(MF.getDataLayout());
3930   // Potential tail calls could cause overwriting of argument stack slots.
3931   bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
3932                        (CallConv == CallingConv::Fast));
3933   unsigned PtrByteSize = 8;
3934   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
3935 
3936   static const MCPhysReg GPR[] = {
3937     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
3938     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
3939   };
3940   static const MCPhysReg VR[] = {
3941     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
3942     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
3943   };
3944 
3945   const unsigned Num_GPR_Regs = array_lengthof(GPR);
3946   const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13;
3947   const unsigned Num_VR_Regs  = array_lengthof(VR);
3948   const unsigned Num_QFPR_Regs = Num_FPR_Regs;
3949 
3950   // Do a first pass over the arguments to determine whether the ABI
3951   // guarantees that our caller has allocated the parameter save area
3952   // on its stack frame.  In the ELFv1 ABI, this is always the case;
3953   // in the ELFv2 ABI, it is true if this is a vararg function or if
3954   // any parameter is located in a stack slot.
3955 
3956   bool HasParameterArea = !isELFv2ABI || isVarArg;
3957   unsigned ParamAreaSize = Num_GPR_Regs * PtrByteSize;
3958   unsigned NumBytes = LinkageSize;
3959   unsigned AvailableFPRs = Num_FPR_Regs;
3960   unsigned AvailableVRs = Num_VR_Regs;
3961   for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
3962     if (Ins[i].Flags.isNest())
3963       continue;
3964 
3965     if (CalculateStackSlotUsed(Ins[i].VT, Ins[i].ArgVT, Ins[i].Flags,
3966                                PtrByteSize, LinkageSize, ParamAreaSize,
3967                                NumBytes, AvailableFPRs, AvailableVRs,
3968                                Subtarget.hasQPX()))
3969       HasParameterArea = true;
3970   }
3971 
3972   // Add DAG nodes to load the arguments or copy them out of registers.  On
3973   // entry to a function on PPC, the arguments start after the linkage area,
3974   // although the first ones are often in registers.
3975 
3976   unsigned ArgOffset = LinkageSize;
3977   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
3978   unsigned &QFPR_idx = FPR_idx;
3979   SmallVector<SDValue, 8> MemOps;
3980   Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin();
3981   unsigned CurArgIdx = 0;
3982   for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
3983     SDValue ArgVal;
3984     bool needsLoad = false;
3985     EVT ObjectVT = Ins[ArgNo].VT;
3986     EVT OrigVT = Ins[ArgNo].ArgVT;
3987     unsigned ObjSize = ObjectVT.getStoreSize();
3988     unsigned ArgSize = ObjSize;
3989     ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
3990     if (Ins[ArgNo].isOrigArg()) {
3991       std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx);
3992       CurArgIdx = Ins[ArgNo].getOrigArgIndex();
3993     }
3994     // We re-align the argument offset for each argument, except when using the
3995     // fast calling convention, when we need to make sure we do that only when
3996     // we'll actually use a stack slot.
3997     unsigned CurArgOffset;
3998     Align Alignment;
3999     auto ComputeArgOffset = [&]() {
4000       /* Respect alignment of argument on the stack.  */
4001       Alignment =
4002           CalculateStackSlotAlignment(ObjectVT, OrigVT, Flags, PtrByteSize);
4003       ArgOffset = alignTo(ArgOffset, Alignment);
4004       CurArgOffset = ArgOffset;
4005     };
4006 
4007     if (CallConv != CallingConv::Fast) {
4008       ComputeArgOffset();
4009 
4010       /* Compute GPR index associated with argument offset.  */
4011       GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
4012       GPR_idx = std::min(GPR_idx, Num_GPR_Regs);
4013     }
4014 
4015     // FIXME the codegen can be much improved in some cases.
4016     // We do not have to keep everything in memory.
4017     if (Flags.isByVal()) {
4018       assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit");
4019 
4020       if (CallConv == CallingConv::Fast)
4021         ComputeArgOffset();
4022 
4023       // ObjSize is the true size, ArgSize rounded up to multiple of registers.
4024       ObjSize = Flags.getByValSize();
4025       ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4026       // Empty aggregate parameters do not take up registers.  Examples:
4027       //   struct { } a;
4028       //   union  { } b;
4029       //   int c[0];
4030       // etc.  However, we have to provide a place-holder in InVals, so
4031       // pretend we have an 8-byte item at the current address for that
4032       // purpose.
4033       if (!ObjSize) {
4034         int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true);
4035         SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4036         InVals.push_back(FIN);
4037         continue;
4038       }
4039 
4040       // Create a stack object covering all stack doublewords occupied
4041       // by the argument.  If the argument is (fully or partially) on
4042       // the stack, or if the argument is fully in registers but the
4043       // caller has allocated the parameter save anyway, we can refer
4044       // directly to the caller's stack frame.  Otherwise, create a
4045       // local copy in our own frame.
4046       int FI;
4047       if (HasParameterArea ||
4048           ArgSize + ArgOffset > LinkageSize + Num_GPR_Regs * PtrByteSize)
4049         FI = MFI.CreateFixedObject(ArgSize, ArgOffset, false, true);
4050       else
4051         FI = MFI.CreateStackObject(ArgSize, Alignment, false);
4052       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4053 
4054       // Handle aggregates smaller than 8 bytes.
4055       if (ObjSize < PtrByteSize) {
4056         // The value of the object is its address, which differs from the
4057         // address of the enclosing doubleword on big-endian systems.
4058         SDValue Arg = FIN;
4059         if (!isLittleEndian) {
4060           SDValue ArgOff = DAG.getConstant(PtrByteSize - ObjSize, dl, PtrVT);
4061           Arg = DAG.getNode(ISD::ADD, dl, ArgOff.getValueType(), Arg, ArgOff);
4062         }
4063         InVals.push_back(Arg);
4064 
4065         if (GPR_idx != Num_GPR_Regs) {
4066           unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
4067           FuncInfo->addLiveInAttr(VReg, Flags);
4068           SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4069           SDValue Store;
4070 
4071           if (ObjSize==1 || ObjSize==2 || ObjSize==4) {
4072             EVT ObjType = (ObjSize == 1 ? MVT::i8 :
4073                            (ObjSize == 2 ? MVT::i16 : MVT::i32));
4074             Store = DAG.getTruncStore(Val.getValue(1), dl, Val, Arg,
4075                                       MachinePointerInfo(&*FuncArg), ObjType);
4076           } else {
4077             // For sizes that don't fit a truncating store (3, 5, 6, 7),
4078             // store the whole register as-is to the parameter save area
4079             // slot.
4080             Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
4081                                  MachinePointerInfo(&*FuncArg));
4082           }
4083 
4084           MemOps.push_back(Store);
4085         }
4086         // Whether we copied from a register or not, advance the offset
4087         // into the parameter save area by a full doubleword.
4088         ArgOffset += PtrByteSize;
4089         continue;
4090       }
4091 
4092       // The value of the object is its address, which is the address of
4093       // its first stack doubleword.
4094       InVals.push_back(FIN);
4095 
4096       // Store whatever pieces of the object are in registers to memory.
4097       for (unsigned j = 0; j < ArgSize; j += PtrByteSize) {
4098         if (GPR_idx == Num_GPR_Regs)
4099           break;
4100 
4101         unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4102         FuncInfo->addLiveInAttr(VReg, Flags);
4103         SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4104         SDValue Addr = FIN;
4105         if (j) {
4106           SDValue Off = DAG.getConstant(j, dl, PtrVT);
4107           Addr = DAG.getNode(ISD::ADD, dl, Off.getValueType(), Addr, Off);
4108         }
4109         SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, Addr,
4110                                      MachinePointerInfo(&*FuncArg, j));
4111         MemOps.push_back(Store);
4112         ++GPR_idx;
4113       }
4114       ArgOffset += ArgSize;
4115       continue;
4116     }
4117 
4118     switch (ObjectVT.getSimpleVT().SimpleTy) {
4119     default: llvm_unreachable("Unhandled argument type!");
4120     case MVT::i1:
4121     case MVT::i32:
4122     case MVT::i64:
4123       if (Flags.isNest()) {
4124         // The 'nest' parameter, if any, is passed in R11.
4125         unsigned VReg = MF.addLiveIn(PPC::X11, &PPC::G8RCRegClass);
4126         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
4127 
4128         if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
4129           ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
4130 
4131         break;
4132       }
4133 
4134       // These can be scalar arguments or elements of an integer array type
4135       // passed directly.  Clang may use those instead of "byval" aggregate
4136       // types to avoid forcing arguments to memory unnecessarily.
4137       if (GPR_idx != Num_GPR_Regs) {
4138         unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
4139         FuncInfo->addLiveInAttr(VReg, Flags);
4140         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
4141 
4142         if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
4143           // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
4144           // value to MVT::i64 and then truncate to the correct register size.
4145           ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
4146       } else {
4147         if (CallConv == CallingConv::Fast)
4148           ComputeArgOffset();
4149 
4150         needsLoad = true;
4151         ArgSize = PtrByteSize;
4152       }
4153       if (CallConv != CallingConv::Fast || needsLoad)
4154         ArgOffset += 8;
4155       break;
4156 
4157     case MVT::f32:
4158     case MVT::f64:
4159       // These can be scalar arguments or elements of a float array type
4160       // passed directly.  The latter are used to implement ELFv2 homogenous
4161       // float aggregates.
4162       if (FPR_idx != Num_FPR_Regs) {
4163         unsigned VReg;
4164 
4165         if (ObjectVT == MVT::f32)
4166           VReg = MF.addLiveIn(FPR[FPR_idx],
4167                               Subtarget.hasP8Vector()
4168                                   ? &PPC::VSSRCRegClass
4169                                   : &PPC::F4RCRegClass);
4170         else
4171           VReg = MF.addLiveIn(FPR[FPR_idx], Subtarget.hasVSX()
4172                                                 ? &PPC::VSFRCRegClass
4173                                                 : &PPC::F8RCRegClass);
4174 
4175         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4176         ++FPR_idx;
4177       } else if (GPR_idx != Num_GPR_Regs && CallConv != CallingConv::Fast) {
4178         // FIXME: We may want to re-enable this for CallingConv::Fast on the P8
4179         // once we support fp <-> gpr moves.
4180 
4181         // This can only ever happen in the presence of f32 array types,
4182         // since otherwise we never run out of FPRs before running out
4183         // of GPRs.
4184         unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
4185         FuncInfo->addLiveInAttr(VReg, Flags);
4186         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
4187 
4188         if (ObjectVT == MVT::f32) {
4189           if ((ArgOffset % PtrByteSize) == (isLittleEndian ? 4 : 0))
4190             ArgVal = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgVal,
4191                                  DAG.getConstant(32, dl, MVT::i32));
4192           ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal);
4193         }
4194 
4195         ArgVal = DAG.getNode(ISD::BITCAST, dl, ObjectVT, ArgVal);
4196       } else {
4197         if (CallConv == CallingConv::Fast)
4198           ComputeArgOffset();
4199 
4200         needsLoad = true;
4201       }
4202 
4203       // When passing an array of floats, the array occupies consecutive
4204       // space in the argument area; only round up to the next doubleword
4205       // at the end of the array.  Otherwise, each float takes 8 bytes.
4206       if (CallConv != CallingConv::Fast || needsLoad) {
4207         ArgSize = Flags.isInConsecutiveRegs() ? ObjSize : PtrByteSize;
4208         ArgOffset += ArgSize;
4209         if (Flags.isInConsecutiveRegsLast())
4210           ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4211       }
4212       break;
4213     case MVT::v4f32:
4214     case MVT::v4i32:
4215     case MVT::v8i16:
4216     case MVT::v16i8:
4217     case MVT::v2f64:
4218     case MVT::v2i64:
4219     case MVT::v1i128:
4220     case MVT::f128:
4221       if (!Subtarget.hasQPX()) {
4222         // These can be scalar arguments or elements of a vector array type
4223         // passed directly.  The latter are used to implement ELFv2 homogenous
4224         // vector aggregates.
4225         if (VR_idx != Num_VR_Regs) {
4226           unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
4227           ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4228           ++VR_idx;
4229         } else {
4230           if (CallConv == CallingConv::Fast)
4231             ComputeArgOffset();
4232           needsLoad = true;
4233         }
4234         if (CallConv != CallingConv::Fast || needsLoad)
4235           ArgOffset += 16;
4236         break;
4237       } // not QPX
4238 
4239       assert(ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 &&
4240              "Invalid QPX parameter type");
4241       LLVM_FALLTHROUGH;
4242 
4243     case MVT::v4f64:
4244     case MVT::v4i1:
4245       // QPX vectors are treated like their scalar floating-point subregisters
4246       // (except that they're larger).
4247       unsigned Sz = ObjectVT.getSimpleVT().SimpleTy == MVT::v4f32 ? 16 : 32;
4248       if (QFPR_idx != Num_QFPR_Regs) {
4249         const TargetRegisterClass *RC;
4250         switch (ObjectVT.getSimpleVT().SimpleTy) {
4251         case MVT::v4f64: RC = &PPC::QFRCRegClass; break;
4252         case MVT::v4f32: RC = &PPC::QSRCRegClass; break;
4253         default:         RC = &PPC::QBRCRegClass; break;
4254         }
4255 
4256         unsigned VReg = MF.addLiveIn(QFPR[QFPR_idx], RC);
4257         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4258         ++QFPR_idx;
4259       } else {
4260         if (CallConv == CallingConv::Fast)
4261           ComputeArgOffset();
4262         needsLoad = true;
4263       }
4264       if (CallConv != CallingConv::Fast || needsLoad)
4265         ArgOffset += Sz;
4266       break;
4267     }
4268 
4269     // We need to load the argument to a virtual register if we determined
4270     // above that we ran out of physical registers of the appropriate type.
4271     if (needsLoad) {
4272       if (ObjSize < ArgSize && !isLittleEndian)
4273         CurArgOffset += ArgSize - ObjSize;
4274       int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, isImmutable);
4275       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4276       ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo());
4277     }
4278 
4279     InVals.push_back(ArgVal);
4280   }
4281 
4282   // Area that is at least reserved in the caller of this function.
4283   unsigned MinReservedArea;
4284   if (HasParameterArea)
4285     MinReservedArea = std::max(ArgOffset, LinkageSize + 8 * PtrByteSize);
4286   else
4287     MinReservedArea = LinkageSize;
4288 
4289   // Set the size that is at least reserved in caller of this function.  Tail
4290   // call optimized functions' reserved stack space needs to be aligned so that
4291   // taking the difference between two stack areas will result in an aligned
4292   // stack.
4293   MinReservedArea =
4294       EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
4295   FuncInfo->setMinReservedArea(MinReservedArea);
4296 
4297   // If the function takes variable number of arguments, make a frame index for
4298   // the start of the first vararg value... for expansion of llvm.va_start.
4299   // On ELFv2ABI spec, it writes:
4300   // C programs that are intended to be *portable* across different compilers
4301   // and architectures must use the header file <stdarg.h> to deal with variable
4302   // argument lists.
4303   if (isVarArg && MFI.hasVAStart()) {
4304     int Depth = ArgOffset;
4305 
4306     FuncInfo->setVarArgsFrameIndex(
4307       MFI.CreateFixedObject(PtrByteSize, Depth, true));
4308     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
4309 
4310     // If this function is vararg, store any remaining integer argument regs
4311     // to their spots on the stack so that they may be loaded by dereferencing
4312     // the result of va_next.
4313     for (GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
4314          GPR_idx < Num_GPR_Regs; ++GPR_idx) {
4315       unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4316       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4317       SDValue Store =
4318           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
4319       MemOps.push_back(Store);
4320       // Increment the address by four for the next argument to store
4321       SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT);
4322       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
4323     }
4324   }
4325 
4326   if (!MemOps.empty())
4327     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
4328 
4329   return Chain;
4330 }
4331 
LowerFormalArguments_Darwin(SDValue Chain,CallingConv::ID CallConv,bool isVarArg,const SmallVectorImpl<ISD::InputArg> & Ins,const SDLoc & dl,SelectionDAG & DAG,SmallVectorImpl<SDValue> & InVals) const4332 SDValue PPCTargetLowering::LowerFormalArguments_Darwin(
4333     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
4334     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
4335     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
4336   // TODO: add description of PPC stack frame format, or at least some docs.
4337   //
4338   MachineFunction &MF = DAG.getMachineFunction();
4339   MachineFrameInfo &MFI = MF.getFrameInfo();
4340   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
4341 
4342   EVT PtrVT = getPointerTy(MF.getDataLayout());
4343   bool isPPC64 = PtrVT == MVT::i64;
4344   // Potential tail calls could cause overwriting of argument stack slots.
4345   bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
4346                        (CallConv == CallingConv::Fast));
4347   unsigned PtrByteSize = isPPC64 ? 8 : 4;
4348   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
4349   unsigned ArgOffset = LinkageSize;
4350   // Area that is at least reserved in caller of this function.
4351   unsigned MinReservedArea = ArgOffset;
4352 
4353   static const MCPhysReg GPR_32[] = {           // 32-bit registers.
4354     PPC::R3, PPC::R4, PPC::R5, PPC::R6,
4355     PPC::R7, PPC::R8, PPC::R9, PPC::R10,
4356   };
4357   static const MCPhysReg GPR_64[] = {           // 64-bit registers.
4358     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
4359     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
4360   };
4361   static const MCPhysReg VR[] = {
4362     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
4363     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
4364   };
4365 
4366   const unsigned Num_GPR_Regs = array_lengthof(GPR_32);
4367   const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13;
4368   const unsigned Num_VR_Regs  = array_lengthof( VR);
4369 
4370   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
4371 
4372   const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32;
4373 
4374   // In 32-bit non-varargs functions, the stack space for vectors is after the
4375   // stack space for non-vectors.  We do not use this space unless we have
4376   // too many vectors to fit in registers, something that only occurs in
4377   // constructed examples:), but we have to walk the arglist to figure
4378   // that out...for the pathological case, compute VecArgOffset as the
4379   // start of the vector parameter area.  Computing VecArgOffset is the
4380   // entire point of the following loop.
4381   unsigned VecArgOffset = ArgOffset;
4382   if (!isVarArg && !isPPC64) {
4383     for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e;
4384          ++ArgNo) {
4385       EVT ObjectVT = Ins[ArgNo].VT;
4386       ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
4387 
4388       if (Flags.isByVal()) {
4389         // ObjSize is the true size, ArgSize rounded up to multiple of regs.
4390         unsigned ObjSize = Flags.getByValSize();
4391         unsigned ArgSize =
4392                 ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4393         VecArgOffset += ArgSize;
4394         continue;
4395       }
4396 
4397       switch(ObjectVT.getSimpleVT().SimpleTy) {
4398       default: llvm_unreachable("Unhandled argument type!");
4399       case MVT::i1:
4400       case MVT::i32:
4401       case MVT::f32:
4402         VecArgOffset += 4;
4403         break;
4404       case MVT::i64:  // PPC64
4405       case MVT::f64:
4406         // FIXME: We are guaranteed to be !isPPC64 at this point.
4407         // Does MVT::i64 apply?
4408         VecArgOffset += 8;
4409         break;
4410       case MVT::v4f32:
4411       case MVT::v4i32:
4412       case MVT::v8i16:
4413       case MVT::v16i8:
4414         // Nothing to do, we're only looking at Nonvector args here.
4415         break;
4416       }
4417     }
4418   }
4419   // We've found where the vector parameter area in memory is.  Skip the
4420   // first 12 parameters; these don't use that memory.
4421   VecArgOffset = ((VecArgOffset+15)/16)*16;
4422   VecArgOffset += 12*16;
4423 
4424   // Add DAG nodes to load the arguments or copy them out of registers.  On
4425   // entry to a function on PPC, the arguments start after the linkage area,
4426   // although the first ones are often in registers.
4427 
4428   SmallVector<SDValue, 8> MemOps;
4429   unsigned nAltivecParamsAtEnd = 0;
4430   Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin();
4431   unsigned CurArgIdx = 0;
4432   for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
4433     SDValue ArgVal;
4434     bool needsLoad = false;
4435     EVT ObjectVT = Ins[ArgNo].VT;
4436     unsigned ObjSize = ObjectVT.getSizeInBits()/8;
4437     unsigned ArgSize = ObjSize;
4438     ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
4439     if (Ins[ArgNo].isOrigArg()) {
4440       std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx);
4441       CurArgIdx = Ins[ArgNo].getOrigArgIndex();
4442     }
4443     unsigned CurArgOffset = ArgOffset;
4444 
4445     // Varargs or 64 bit Altivec parameters are padded to a 16 byte boundary.
4446     if (ObjectVT==MVT::v4f32 || ObjectVT==MVT::v4i32 ||
4447         ObjectVT==MVT::v8i16 || ObjectVT==MVT::v16i8) {
4448       if (isVarArg || isPPC64) {
4449         MinReservedArea = ((MinReservedArea+15)/16)*16;
4450         MinReservedArea += CalculateStackSlotSize(ObjectVT,
4451                                                   Flags,
4452                                                   PtrByteSize);
4453       } else  nAltivecParamsAtEnd++;
4454     } else
4455       // Calculate min reserved area.
4456       MinReservedArea += CalculateStackSlotSize(Ins[ArgNo].VT,
4457                                                 Flags,
4458                                                 PtrByteSize);
4459 
4460     // FIXME the codegen can be much improved in some cases.
4461     // We do not have to keep everything in memory.
4462     if (Flags.isByVal()) {
4463       assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit");
4464 
4465       // ObjSize is the true size, ArgSize rounded up to multiple of registers.
4466       ObjSize = Flags.getByValSize();
4467       ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4468       // Objects of size 1 and 2 are right justified, everything else is
4469       // left justified.  This means the memory address is adjusted forwards.
4470       if (ObjSize==1 || ObjSize==2) {
4471         CurArgOffset = CurArgOffset + (4 - ObjSize);
4472       }
4473       // The value of the object is its address.
4474       int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, false, true);
4475       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4476       InVals.push_back(FIN);
4477       if (ObjSize==1 || ObjSize==2) {
4478         if (GPR_idx != Num_GPR_Regs) {
4479           unsigned VReg;
4480           if (isPPC64)
4481             VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4482           else
4483             VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
4484           SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4485           EVT ObjType = ObjSize == 1 ? MVT::i8 : MVT::i16;
4486           SDValue Store =
4487               DAG.getTruncStore(Val.getValue(1), dl, Val, FIN,
4488                                 MachinePointerInfo(&*FuncArg), ObjType);
4489           MemOps.push_back(Store);
4490           ++GPR_idx;
4491         }
4492 
4493         ArgOffset += PtrByteSize;
4494 
4495         continue;
4496       }
4497       for (unsigned j = 0; j < ArgSize; j += PtrByteSize) {
4498         // Store whatever pieces of the object are in registers
4499         // to memory.  ArgOffset will be the address of the beginning
4500         // of the object.
4501         if (GPR_idx != Num_GPR_Regs) {
4502           unsigned VReg;
4503           if (isPPC64)
4504             VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4505           else
4506             VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
4507           int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true);
4508           SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4509           SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4510           SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
4511                                        MachinePointerInfo(&*FuncArg, j));
4512           MemOps.push_back(Store);
4513           ++GPR_idx;
4514           ArgOffset += PtrByteSize;
4515         } else {
4516           ArgOffset += ArgSize - (ArgOffset-CurArgOffset);
4517           break;
4518         }
4519       }
4520       continue;
4521     }
4522 
4523     switch (ObjectVT.getSimpleVT().SimpleTy) {
4524     default: llvm_unreachable("Unhandled argument type!");
4525     case MVT::i1:
4526     case MVT::i32:
4527       if (!isPPC64) {
4528         if (GPR_idx != Num_GPR_Regs) {
4529           unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
4530           ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
4531 
4532           if (ObjectVT == MVT::i1)
4533             ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgVal);
4534 
4535           ++GPR_idx;
4536         } else {
4537           needsLoad = true;
4538           ArgSize = PtrByteSize;
4539         }
4540         // All int arguments reserve stack space in the Darwin ABI.
4541         ArgOffset += PtrByteSize;
4542         break;
4543       }
4544       LLVM_FALLTHROUGH;
4545     case MVT::i64:  // PPC64
4546       if (GPR_idx != Num_GPR_Regs) {
4547         unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4548         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
4549 
4550         if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
4551           // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
4552           // value to MVT::i64 and then truncate to the correct register size.
4553           ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
4554 
4555         ++GPR_idx;
4556       } else {
4557         needsLoad = true;
4558         ArgSize = PtrByteSize;
4559       }
4560       // All int arguments reserve stack space in the Darwin ABI.
4561       ArgOffset += 8;
4562       break;
4563 
4564     case MVT::f32:
4565     case MVT::f64:
4566       // Every 4 bytes of argument space consumes one of the GPRs available for
4567       // argument passing.
4568       if (GPR_idx != Num_GPR_Regs) {
4569         ++GPR_idx;
4570         if (ObjSize == 8 && GPR_idx != Num_GPR_Regs && !isPPC64)
4571           ++GPR_idx;
4572       }
4573       if (FPR_idx != Num_FPR_Regs) {
4574         unsigned VReg;
4575 
4576         if (ObjectVT == MVT::f32)
4577           VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F4RCRegClass);
4578         else
4579           VReg = MF.addLiveIn(FPR[FPR_idx], &PPC::F8RCRegClass);
4580 
4581         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4582         ++FPR_idx;
4583       } else {
4584         needsLoad = true;
4585       }
4586 
4587       // All FP arguments reserve stack space in the Darwin ABI.
4588       ArgOffset += isPPC64 ? 8 : ObjSize;
4589       break;
4590     case MVT::v4f32:
4591     case MVT::v4i32:
4592     case MVT::v8i16:
4593     case MVT::v16i8:
4594       // Note that vector arguments in registers don't reserve stack space,
4595       // except in varargs functions.
4596       if (VR_idx != Num_VR_Regs) {
4597         unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
4598         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4599         if (isVarArg) {
4600           while ((ArgOffset % 16) != 0) {
4601             ArgOffset += PtrByteSize;
4602             if (GPR_idx != Num_GPR_Regs)
4603               GPR_idx++;
4604           }
4605           ArgOffset += 16;
4606           GPR_idx = std::min(GPR_idx+4, Num_GPR_Regs); // FIXME correct for ppc64?
4607         }
4608         ++VR_idx;
4609       } else {
4610         if (!isVarArg && !isPPC64) {
4611           // Vectors go after all the nonvectors.
4612           CurArgOffset = VecArgOffset;
4613           VecArgOffset += 16;
4614         } else {
4615           // Vectors are aligned.
4616           ArgOffset = ((ArgOffset+15)/16)*16;
4617           CurArgOffset = ArgOffset;
4618           ArgOffset += 16;
4619         }
4620         needsLoad = true;
4621       }
4622       break;
4623     }
4624 
4625     // We need to load the argument to a virtual register if we determined above
4626     // that we ran out of physical registers of the appropriate type.
4627     if (needsLoad) {
4628       int FI = MFI.CreateFixedObject(ObjSize,
4629                                      CurArgOffset + (ArgSize - ObjSize),
4630                                      isImmutable);
4631       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4632       ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo());
4633     }
4634 
4635     InVals.push_back(ArgVal);
4636   }
4637 
4638   // Allow for Altivec parameters at the end, if needed.
4639   if (nAltivecParamsAtEnd) {
4640     MinReservedArea = ((MinReservedArea+15)/16)*16;
4641     MinReservedArea += 16*nAltivecParamsAtEnd;
4642   }
4643 
4644   // Area that is at least reserved in the caller of this function.
4645   MinReservedArea = std::max(MinReservedArea, LinkageSize + 8 * PtrByteSize);
4646 
4647   // Set the size that is at least reserved in caller of this function.  Tail
4648   // call optimized functions' reserved stack space needs to be aligned so that
4649   // taking the difference between two stack areas will result in an aligned
4650   // stack.
4651   MinReservedArea =
4652       EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
4653   FuncInfo->setMinReservedArea(MinReservedArea);
4654 
4655   // If the function takes variable number of arguments, make a frame index for
4656   // the start of the first vararg value... for expansion of llvm.va_start.
4657   if (isVarArg) {
4658     int Depth = ArgOffset;
4659 
4660     FuncInfo->setVarArgsFrameIndex(
4661       MFI.CreateFixedObject(PtrVT.getSizeInBits()/8,
4662                             Depth, true));
4663     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
4664 
4665     // If this function is vararg, store any remaining integer argument regs
4666     // to their spots on the stack so that they may be loaded by dereferencing
4667     // the result of va_next.
4668     for (; GPR_idx != Num_GPR_Regs; ++GPR_idx) {
4669       unsigned VReg;
4670 
4671       if (isPPC64)
4672         VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4673       else
4674         VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::GPRCRegClass);
4675 
4676       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4677       SDValue Store =
4678           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
4679       MemOps.push_back(Store);
4680       // Increment the address by four for the next argument to store
4681       SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT);
4682       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
4683     }
4684   }
4685 
4686   if (!MemOps.empty())
4687     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
4688 
4689   return Chain;
4690 }
4691 
4692 /// CalculateTailCallSPDiff - Get the amount the stack pointer has to be
4693 /// adjusted to accommodate the arguments for the tailcall.
CalculateTailCallSPDiff(SelectionDAG & DAG,bool isTailCall,unsigned ParamSize)4694 static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall,
4695                                    unsigned ParamSize) {
4696 
4697   if (!isTailCall) return 0;
4698 
4699   PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>();
4700   unsigned CallerMinReservedArea = FI->getMinReservedArea();
4701   int SPDiff = (int)CallerMinReservedArea - (int)ParamSize;
4702   // Remember only if the new adjustment is bigger.
4703   if (SPDiff < FI->getTailCallSPDelta())
4704     FI->setTailCallSPDelta(SPDiff);
4705 
4706   return SPDiff;
4707 }
4708 
4709 static bool isFunctionGlobalAddress(SDValue Callee);
4710 
callsShareTOCBase(const Function * Caller,SDValue Callee,const TargetMachine & TM)4711 static bool callsShareTOCBase(const Function *Caller, SDValue Callee,
4712                               const TargetMachine &TM) {
4713   // It does not make sense to call callsShareTOCBase() with a caller that
4714   // is PC Relative since PC Relative callers do not have a TOC.
4715 #ifndef NDEBUG
4716   const PPCSubtarget *STICaller = &TM.getSubtarget<PPCSubtarget>(*Caller);
4717   assert(!STICaller->isUsingPCRelativeCalls() &&
4718          "PC Relative callers do not have a TOC and cannot share a TOC Base");
4719 #endif
4720 
4721   // Callee is either a GlobalAddress or an ExternalSymbol. ExternalSymbols
4722   // don't have enough information to determine if the caller and callee share
4723   // the same  TOC base, so we have to pessimistically assume they don't for
4724   // correctness.
4725   GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
4726   if (!G)
4727     return false;
4728 
4729   const GlobalValue *GV = G->getGlobal();
4730 
4731   // If the callee is preemptable, then the static linker will use a plt-stub
4732   // which saves the toc to the stack, and needs a nop after the call
4733   // instruction to convert to a toc-restore.
4734   if (!TM.shouldAssumeDSOLocal(*Caller->getParent(), GV))
4735     return false;
4736 
4737   // Functions with PC Relative enabled may clobber the TOC in the same DSO.
4738   // We may need a TOC restore in the situation where the caller requires a
4739   // valid TOC but the callee is PC Relative and does not.
4740   const Function *F = dyn_cast<Function>(GV);
4741   const GlobalAlias *Alias = dyn_cast<GlobalAlias>(GV);
4742 
4743   // If we have an Alias we can try to get the function from there.
4744   if (Alias) {
4745     const GlobalObject *GlobalObj = Alias->getBaseObject();
4746     F = dyn_cast<Function>(GlobalObj);
4747   }
4748 
4749   // If we still have no valid function pointer we do not have enough
4750   // information to determine if the callee uses PC Relative calls so we must
4751   // assume that it does.
4752   if (!F)
4753     return false;
4754 
4755   // If the callee uses PC Relative we cannot guarantee that the callee won't
4756   // clobber the TOC of the caller and so we must assume that the two
4757   // functions do not share a TOC base.
4758   const PPCSubtarget *STICallee = &TM.getSubtarget<PPCSubtarget>(*F);
4759   if (STICallee->isUsingPCRelativeCalls())
4760     return false;
4761 
4762   // The medium and large code models are expected to provide a sufficiently
4763   // large TOC to provide all data addressing needs of a module with a
4764   // single TOC.
4765   if (CodeModel::Medium == TM.getCodeModel() ||
4766       CodeModel::Large == TM.getCodeModel())
4767     return true;
4768 
4769   // Otherwise we need to ensure callee and caller are in the same section,
4770   // since the linker may allocate multiple TOCs, and we don't know which
4771   // sections will belong to the same TOC base.
4772   if (!GV->isStrongDefinitionForLinker())
4773     return false;
4774 
4775   // Any explicitly-specified sections and section prefixes must also match.
4776   // Also, if we're using -ffunction-sections, then each function is always in
4777   // a different section (the same is true for COMDAT functions).
4778   if (TM.getFunctionSections() || GV->hasComdat() || Caller->hasComdat() ||
4779       GV->getSection() != Caller->getSection())
4780     return false;
4781   if (const auto *F = dyn_cast<Function>(GV)) {
4782     if (F->getSectionPrefix() != Caller->getSectionPrefix())
4783       return false;
4784   }
4785 
4786   return true;
4787 }
4788 
4789 static bool
needStackSlotPassParameters(const PPCSubtarget & Subtarget,const SmallVectorImpl<ISD::OutputArg> & Outs)4790 needStackSlotPassParameters(const PPCSubtarget &Subtarget,
4791                             const SmallVectorImpl<ISD::OutputArg> &Outs) {
4792   assert(Subtarget.is64BitELFABI());
4793 
4794   const unsigned PtrByteSize = 8;
4795   const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
4796 
4797   static const MCPhysReg GPR[] = {
4798     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
4799     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
4800   };
4801   static const MCPhysReg VR[] = {
4802     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
4803     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
4804   };
4805 
4806   const unsigned NumGPRs = array_lengthof(GPR);
4807   const unsigned NumFPRs = 13;
4808   const unsigned NumVRs = array_lengthof(VR);
4809   const unsigned ParamAreaSize = NumGPRs * PtrByteSize;
4810 
4811   unsigned NumBytes = LinkageSize;
4812   unsigned AvailableFPRs = NumFPRs;
4813   unsigned AvailableVRs = NumVRs;
4814 
4815   for (const ISD::OutputArg& Param : Outs) {
4816     if (Param.Flags.isNest()) continue;
4817 
4818     if (CalculateStackSlotUsed(Param.VT, Param.ArgVT, Param.Flags,
4819                                PtrByteSize, LinkageSize, ParamAreaSize,
4820                                NumBytes, AvailableFPRs, AvailableVRs,
4821                                Subtarget.hasQPX()))
4822       return true;
4823   }
4824   return false;
4825 }
4826 
hasSameArgumentList(const Function * CallerFn,const CallBase & CB)4827 static bool hasSameArgumentList(const Function *CallerFn, const CallBase &CB) {
4828   if (CB.arg_size() != CallerFn->arg_size())
4829     return false;
4830 
4831   auto CalleeArgIter = CB.arg_begin();
4832   auto CalleeArgEnd = CB.arg_end();
4833   Function::const_arg_iterator CallerArgIter = CallerFn->arg_begin();
4834 
4835   for (; CalleeArgIter != CalleeArgEnd; ++CalleeArgIter, ++CallerArgIter) {
4836     const Value* CalleeArg = *CalleeArgIter;
4837     const Value* CallerArg = &(*CallerArgIter);
4838     if (CalleeArg == CallerArg)
4839       continue;
4840 
4841     // e.g. @caller([4 x i64] %a, [4 x i64] %b) {
4842     //        tail call @callee([4 x i64] undef, [4 x i64] %b)
4843     //      }
4844     // 1st argument of callee is undef and has the same type as caller.
4845     if (CalleeArg->getType() == CallerArg->getType() &&
4846         isa<UndefValue>(CalleeArg))
4847       continue;
4848 
4849     return false;
4850   }
4851 
4852   return true;
4853 }
4854 
4855 // Returns true if TCO is possible between the callers and callees
4856 // calling conventions.
4857 static bool
areCallingConvEligibleForTCO_64SVR4(CallingConv::ID CallerCC,CallingConv::ID CalleeCC)4858 areCallingConvEligibleForTCO_64SVR4(CallingConv::ID CallerCC,
4859                                     CallingConv::ID CalleeCC) {
4860   // Tail calls are possible with fastcc and ccc.
4861   auto isTailCallableCC  = [] (CallingConv::ID CC){
4862       return  CC == CallingConv::C || CC == CallingConv::Fast;
4863   };
4864   if (!isTailCallableCC(CallerCC) || !isTailCallableCC(CalleeCC))
4865     return false;
4866 
4867   // We can safely tail call both fastcc and ccc callees from a c calling
4868   // convention caller. If the caller is fastcc, we may have less stack space
4869   // than a non-fastcc caller with the same signature so disable tail-calls in
4870   // that case.
4871   return CallerCC == CallingConv::C || CallerCC == CalleeCC;
4872 }
4873 
IsEligibleForTailCallOptimization_64SVR4(SDValue Callee,CallingConv::ID CalleeCC,const CallBase * CB,bool isVarArg,const SmallVectorImpl<ISD::OutputArg> & Outs,const SmallVectorImpl<ISD::InputArg> & Ins,SelectionDAG & DAG) const4874 bool PPCTargetLowering::IsEligibleForTailCallOptimization_64SVR4(
4875     SDValue Callee, CallingConv::ID CalleeCC, const CallBase *CB, bool isVarArg,
4876     const SmallVectorImpl<ISD::OutputArg> &Outs,
4877     const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
4878   bool TailCallOpt = getTargetMachine().Options.GuaranteedTailCallOpt;
4879 
4880   if (DisableSCO && !TailCallOpt) return false;
4881 
4882   // Variadic argument functions are not supported.
4883   if (isVarArg) return false;
4884 
4885   auto &Caller = DAG.getMachineFunction().getFunction();
4886   // Check that the calling conventions are compatible for tco.
4887   if (!areCallingConvEligibleForTCO_64SVR4(Caller.getCallingConv(), CalleeCC))
4888     return false;
4889 
4890   // Caller contains any byval parameter is not supported.
4891   if (any_of(Ins, [](const ISD::InputArg &IA) { return IA.Flags.isByVal(); }))
4892     return false;
4893 
4894   // Callee contains any byval parameter is not supported, too.
4895   // Note: This is a quick work around, because in some cases, e.g.
4896   // caller's stack size > callee's stack size, we are still able to apply
4897   // sibling call optimization. For example, gcc is able to do SCO for caller1
4898   // in the following example, but not for caller2.
4899   //   struct test {
4900   //     long int a;
4901   //     char ary[56];
4902   //   } gTest;
4903   //   __attribute__((noinline)) int callee(struct test v, struct test *b) {
4904   //     b->a = v.a;
4905   //     return 0;
4906   //   }
4907   //   void caller1(struct test a, struct test c, struct test *b) {
4908   //     callee(gTest, b); }
4909   //   void caller2(struct test *b) { callee(gTest, b); }
4910   if (any_of(Outs, [](const ISD::OutputArg& OA) { return OA.Flags.isByVal(); }))
4911     return false;
4912 
4913   // If callee and caller use different calling conventions, we cannot pass
4914   // parameters on stack since offsets for the parameter area may be different.
4915   if (Caller.getCallingConv() != CalleeCC &&
4916       needStackSlotPassParameters(Subtarget, Outs))
4917     return false;
4918 
4919   // All variants of 64-bit ELF ABIs without PC-Relative addressing require that
4920   // the caller and callee share the same TOC for TCO/SCO. If the caller and
4921   // callee potentially have different TOC bases then we cannot tail call since
4922   // we need to restore the TOC pointer after the call.
4923   // ref: https://bugzilla.mozilla.org/show_bug.cgi?id=973977
4924   // We cannot guarantee this for indirect calls or calls to external functions.
4925   // When PC-Relative addressing is used, the concept of the TOC is no longer
4926   // applicable so this check is not required.
4927   // Check first for indirect calls.
4928   if (!Subtarget.isUsingPCRelativeCalls() &&
4929       !isFunctionGlobalAddress(Callee) && !isa<ExternalSymbolSDNode>(Callee))
4930     return false;
4931 
4932   // Check if we share the TOC base.
4933   if (!Subtarget.isUsingPCRelativeCalls() &&
4934       !callsShareTOCBase(&Caller, Callee, getTargetMachine()))
4935     return false;
4936 
4937   // TCO allows altering callee ABI, so we don't have to check further.
4938   if (CalleeCC == CallingConv::Fast && TailCallOpt)
4939     return true;
4940 
4941   if (DisableSCO) return false;
4942 
4943   // If callee use the same argument list that caller is using, then we can
4944   // apply SCO on this case. If it is not, then we need to check if callee needs
4945   // stack for passing arguments.
4946   // PC Relative tail calls may not have a CallBase.
4947   // If there is no CallBase we cannot verify if we have the same argument
4948   // list so assume that we don't have the same argument list.
4949   if (CB && !hasSameArgumentList(&Caller, *CB) &&
4950       needStackSlotPassParameters(Subtarget, Outs))
4951     return false;
4952   else if (!CB && needStackSlotPassParameters(Subtarget, Outs))
4953     return false;
4954 
4955   return true;
4956 }
4957 
4958 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
4959 /// for tail call optimization. Targets which want to do tail call
4960 /// optimization should implement this function.
4961 bool
IsEligibleForTailCallOptimization(SDValue Callee,CallingConv::ID CalleeCC,bool isVarArg,const SmallVectorImpl<ISD::InputArg> & Ins,SelectionDAG & DAG) const4962 PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
4963                                                      CallingConv::ID CalleeCC,
4964                                                      bool isVarArg,
4965                                       const SmallVectorImpl<ISD::InputArg> &Ins,
4966                                                      SelectionDAG& DAG) const {
4967   if (!getTargetMachine().Options.GuaranteedTailCallOpt)
4968     return false;
4969 
4970   // Variable argument functions are not supported.
4971   if (isVarArg)
4972     return false;
4973 
4974   MachineFunction &MF = DAG.getMachineFunction();
4975   CallingConv::ID CallerCC = MF.getFunction().getCallingConv();
4976   if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) {
4977     // Functions containing by val parameters are not supported.
4978     for (unsigned i = 0; i != Ins.size(); i++) {
4979        ISD::ArgFlagsTy Flags = Ins[i].Flags;
4980        if (Flags.isByVal()) return false;
4981     }
4982 
4983     // Non-PIC/GOT tail calls are supported.
4984     if (getTargetMachine().getRelocationModel() != Reloc::PIC_)
4985       return true;
4986 
4987     // At the moment we can only do local tail calls (in same module, hidden
4988     // or protected) if we are generating PIC.
4989     if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
4990       return G->getGlobal()->hasHiddenVisibility()
4991           || G->getGlobal()->hasProtectedVisibility();
4992   }
4993 
4994   return false;
4995 }
4996 
4997 /// isCallCompatibleAddress - Return the immediate to use if the specified
4998 /// 32-bit value is representable in the immediate field of a BxA instruction.
isBLACompatibleAddress(SDValue Op,SelectionDAG & DAG)4999 static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) {
5000   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
5001   if (!C) return nullptr;
5002 
5003   int Addr = C->getZExtValue();
5004   if ((Addr & 3) != 0 ||  // Low 2 bits are implicitly zero.
5005       SignExtend32<26>(Addr) != Addr)
5006     return nullptr;  // Top 6 bits have to be sext of immediate.
5007 
5008   return DAG
5009       .getConstant(
5010           (int)C->getZExtValue() >> 2, SDLoc(Op),
5011           DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()))
5012       .getNode();
5013 }
5014 
5015 namespace {
5016 
5017 struct TailCallArgumentInfo {
5018   SDValue Arg;
5019   SDValue FrameIdxOp;
5020   int FrameIdx = 0;
5021 
5022   TailCallArgumentInfo() = default;
5023 };
5024 
5025 } // end anonymous namespace
5026 
5027 /// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot.
StoreTailCallArgumentsToStackSlot(SelectionDAG & DAG,SDValue Chain,const SmallVectorImpl<TailCallArgumentInfo> & TailCallArgs,SmallVectorImpl<SDValue> & MemOpChains,const SDLoc & dl)5028 static void StoreTailCallArgumentsToStackSlot(
5029     SelectionDAG &DAG, SDValue Chain,
5030     const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs,
5031     SmallVectorImpl<SDValue> &MemOpChains, const SDLoc &dl) {
5032   for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) {
5033     SDValue Arg = TailCallArgs[i].Arg;
5034     SDValue FIN = TailCallArgs[i].FrameIdxOp;
5035     int FI = TailCallArgs[i].FrameIdx;
5036     // Store relative to framepointer.
5037     MemOpChains.push_back(DAG.getStore(
5038         Chain, dl, Arg, FIN,
5039         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)));
5040   }
5041 }
5042 
5043 /// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to
5044 /// the appropriate stack slot for the tail call optimized function call.
EmitTailCallStoreFPAndRetAddr(SelectionDAG & DAG,SDValue Chain,SDValue OldRetAddr,SDValue OldFP,int SPDiff,const SDLoc & dl)5045 static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, SDValue Chain,
5046                                              SDValue OldRetAddr, SDValue OldFP,
5047                                              int SPDiff, const SDLoc &dl) {
5048   if (SPDiff) {
5049     // Calculate the new stack slot for the return address.
5050     MachineFunction &MF = DAG.getMachineFunction();
5051     const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
5052     const PPCFrameLowering *FL = Subtarget.getFrameLowering();
5053     bool isPPC64 = Subtarget.isPPC64();
5054     int SlotSize = isPPC64 ? 8 : 4;
5055     int NewRetAddrLoc = SPDiff + FL->getReturnSaveOffset();
5056     int NewRetAddr = MF.getFrameInfo().CreateFixedObject(SlotSize,
5057                                                          NewRetAddrLoc, true);
5058     EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
5059     SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT);
5060     Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx,
5061                          MachinePointerInfo::getFixedStack(MF, NewRetAddr));
5062   }
5063   return Chain;
5064 }
5065 
5066 /// CalculateTailCallArgDest - Remember Argument for later processing. Calculate
5067 /// the position of the argument.
5068 static void
CalculateTailCallArgDest(SelectionDAG & DAG,MachineFunction & MF,bool isPPC64,SDValue Arg,int SPDiff,unsigned ArgOffset,SmallVectorImpl<TailCallArgumentInfo> & TailCallArguments)5069 CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64,
5070                          SDValue Arg, int SPDiff, unsigned ArgOffset,
5071                      SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) {
5072   int Offset = ArgOffset + SPDiff;
5073   uint32_t OpSize = (Arg.getValueSizeInBits() + 7) / 8;
5074   int FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true);
5075   EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
5076   SDValue FIN = DAG.getFrameIndex(FI, VT);
5077   TailCallArgumentInfo Info;
5078   Info.Arg = Arg;
5079   Info.FrameIdxOp = FIN;
5080   Info.FrameIdx = FI;
5081   TailCallArguments.push_back(Info);
5082 }
5083 
5084 /// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address
5085 /// stack slot. Returns the chain as result and the loaded frame pointers in
5086 /// LROpOut/FPOpout. Used when tail calling.
EmitTailCallLoadFPAndRetAddr(SelectionDAG & DAG,int SPDiff,SDValue Chain,SDValue & LROpOut,SDValue & FPOpOut,const SDLoc & dl) const5087 SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(
5088     SelectionDAG &DAG, int SPDiff, SDValue Chain, SDValue &LROpOut,
5089     SDValue &FPOpOut, const SDLoc &dl) const {
5090   if (SPDiff) {
5091     // Load the LR and FP stack slot for later adjusting.
5092     EVT VT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
5093     LROpOut = getReturnAddrFrameIndex(DAG);
5094     LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo());
5095     Chain = SDValue(LROpOut.getNode(), 1);
5096   }
5097   return Chain;
5098 }
5099 
5100 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
5101 /// by "Src" to address "Dst" of size "Size".  Alignment information is
5102 /// specified by the specific parameter attribute. The copy will be passed as
5103 /// a byval function parameter.
5104 /// Sometimes what we are copying is the end of a larger object, the part that
5105 /// does not fit in registers.
CreateCopyOfByValArgument(SDValue Src,SDValue Dst,SDValue Chain,ISD::ArgFlagsTy Flags,SelectionDAG & DAG,const SDLoc & dl)5106 static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst,
5107                                          SDValue Chain, ISD::ArgFlagsTy Flags,
5108                                          SelectionDAG &DAG, const SDLoc &dl) {
5109   SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32);
5110   return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode,
5111                        Flags.getNonZeroByValAlign(), false, false, false, false,
5112                        MachinePointerInfo(), MachinePointerInfo());
5113 }
5114 
5115 /// LowerMemOpCallTo - Store the argument to the stack or remember it in case of
5116 /// tail calls.
LowerMemOpCallTo(SelectionDAG & DAG,MachineFunction & MF,SDValue Chain,SDValue Arg,SDValue PtrOff,int SPDiff,unsigned ArgOffset,bool isPPC64,bool isTailCall,bool isVector,SmallVectorImpl<SDValue> & MemOpChains,SmallVectorImpl<TailCallArgumentInfo> & TailCallArguments,const SDLoc & dl)5117 static void LowerMemOpCallTo(
5118     SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue Arg,
5119     SDValue PtrOff, int SPDiff, unsigned ArgOffset, bool isPPC64,
5120     bool isTailCall, bool isVector, SmallVectorImpl<SDValue> &MemOpChains,
5121     SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments, const SDLoc &dl) {
5122   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
5123   if (!isTailCall) {
5124     if (isVector) {
5125       SDValue StackPtr;
5126       if (isPPC64)
5127         StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
5128       else
5129         StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
5130       PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
5131                            DAG.getConstant(ArgOffset, dl, PtrVT));
5132     }
5133     MemOpChains.push_back(
5134         DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
5135     // Calculate and remember argument location.
5136   } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset,
5137                                   TailCallArguments);
5138 }
5139 
5140 static void
PrepareTailCall(SelectionDAG & DAG,SDValue & InFlag,SDValue & Chain,const SDLoc & dl,int SPDiff,unsigned NumBytes,SDValue LROp,SDValue FPOp,SmallVectorImpl<TailCallArgumentInfo> & TailCallArguments)5141 PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain,
5142                 const SDLoc &dl, int SPDiff, unsigned NumBytes, SDValue LROp,
5143                 SDValue FPOp,
5144                 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) {
5145   // Emit a sequence of copyto/copyfrom virtual registers for arguments that
5146   // might overwrite each other in case of tail call optimization.
5147   SmallVector<SDValue, 8> MemOpChains2;
5148   // Do not flag preceding copytoreg stuff together with the following stuff.
5149   InFlag = SDValue();
5150   StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments,
5151                                     MemOpChains2, dl);
5152   if (!MemOpChains2.empty())
5153     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
5154 
5155   // Store the return address to the appropriate stack slot.
5156   Chain = EmitTailCallStoreFPAndRetAddr(DAG, Chain, LROp, FPOp, SPDiff, dl);
5157 
5158   // Emit callseq_end just before tailcall node.
5159   Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
5160                              DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
5161   InFlag = Chain.getValue(1);
5162 }
5163 
5164 // Is this global address that of a function that can be called by name? (as
5165 // opposed to something that must hold a descriptor for an indirect call).
isFunctionGlobalAddress(SDValue Callee)5166 static bool isFunctionGlobalAddress(SDValue Callee) {
5167   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
5168     if (Callee.getOpcode() == ISD::GlobalTLSAddress ||
5169         Callee.getOpcode() == ISD::TargetGlobalTLSAddress)
5170       return false;
5171 
5172     return G->getGlobal()->getValueType()->isFunctionTy();
5173   }
5174 
5175   return false;
5176 }
5177 
LowerCallResult(SDValue Chain,SDValue InFlag,CallingConv::ID CallConv,bool isVarArg,const SmallVectorImpl<ISD::InputArg> & Ins,const SDLoc & dl,SelectionDAG & DAG,SmallVectorImpl<SDValue> & InVals) const5178 SDValue PPCTargetLowering::LowerCallResult(
5179     SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
5180     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
5181     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
5182   SmallVector<CCValAssign, 16> RVLocs;
5183   CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
5184                     *DAG.getContext());
5185 
5186   CCRetInfo.AnalyzeCallResult(
5187       Ins, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
5188                ? RetCC_PPC_Cold
5189                : RetCC_PPC);
5190 
5191   // Copy all of the result registers out of their specified physreg.
5192   for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
5193     CCValAssign &VA = RVLocs[i];
5194     assert(VA.isRegLoc() && "Can only return in registers!");
5195 
5196     SDValue Val;
5197 
5198     if (Subtarget.hasSPE() && VA.getLocVT() == MVT::f64) {
5199       SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
5200                                       InFlag);
5201       Chain = Lo.getValue(1);
5202       InFlag = Lo.getValue(2);
5203       VA = RVLocs[++i]; // skip ahead to next loc
5204       SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
5205                                       InFlag);
5206       Chain = Hi.getValue(1);
5207       InFlag = Hi.getValue(2);
5208       if (!Subtarget.isLittleEndian())
5209         std::swap (Lo, Hi);
5210       Val = DAG.getNode(PPCISD::BUILD_SPE64, dl, MVT::f64, Lo, Hi);
5211     } else {
5212       Val = DAG.getCopyFromReg(Chain, dl,
5213                                VA.getLocReg(), VA.getLocVT(), InFlag);
5214       Chain = Val.getValue(1);
5215       InFlag = Val.getValue(2);
5216     }
5217 
5218     switch (VA.getLocInfo()) {
5219     default: llvm_unreachable("Unknown loc info!");
5220     case CCValAssign::Full: break;
5221     case CCValAssign::AExt:
5222       Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
5223       break;
5224     case CCValAssign::ZExt:
5225       Val = DAG.getNode(ISD::AssertZext, dl, VA.getLocVT(), Val,
5226                         DAG.getValueType(VA.getValVT()));
5227       Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
5228       break;
5229     case CCValAssign::SExt:
5230       Val = DAG.getNode(ISD::AssertSext, dl, VA.getLocVT(), Val,
5231                         DAG.getValueType(VA.getValVT()));
5232       Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
5233       break;
5234     }
5235 
5236     InVals.push_back(Val);
5237   }
5238 
5239   return Chain;
5240 }
5241 
isIndirectCall(const SDValue & Callee,SelectionDAG & DAG,const PPCSubtarget & Subtarget,bool isPatchPoint)5242 static bool isIndirectCall(const SDValue &Callee, SelectionDAG &DAG,
5243                            const PPCSubtarget &Subtarget, bool isPatchPoint) {
5244   // PatchPoint calls are not indirect.
5245   if (isPatchPoint)
5246     return false;
5247 
5248   if (isFunctionGlobalAddress(Callee) || dyn_cast<ExternalSymbolSDNode>(Callee))
5249     return false;
5250 
5251   // Darwin, and 32-bit ELF can use a BLA. The descriptor based ABIs can not
5252   // becuase the immediate function pointer points to a descriptor instead of
5253   // a function entry point. The ELFv2 ABI cannot use a BLA because the function
5254   // pointer immediate points to the global entry point, while the BLA would
5255   // need to jump to the local entry point (see rL211174).
5256   if (!Subtarget.usesFunctionDescriptors() && !Subtarget.isELFv2ABI() &&
5257       isBLACompatibleAddress(Callee, DAG))
5258     return false;
5259 
5260   return true;
5261 }
5262 
5263 // AIX and 64-bit ELF ABIs w/o PCRel require a TOC save/restore around calls.
isTOCSaveRestoreRequired(const PPCSubtarget & Subtarget)5264 static inline bool isTOCSaveRestoreRequired(const PPCSubtarget &Subtarget) {
5265   return Subtarget.isAIXABI() ||
5266          (Subtarget.is64BitELFABI() && !Subtarget.isUsingPCRelativeCalls());
5267 }
5268 
getCallOpcode(PPCTargetLowering::CallFlags CFlags,const Function & Caller,const SDValue & Callee,const PPCSubtarget & Subtarget,const TargetMachine & TM)5269 static unsigned getCallOpcode(PPCTargetLowering::CallFlags CFlags,
5270                               const Function &Caller,
5271                               const SDValue &Callee,
5272                               const PPCSubtarget &Subtarget,
5273                               const TargetMachine &TM) {
5274   if (CFlags.IsTailCall)
5275     return PPCISD::TC_RETURN;
5276 
5277   // This is a call through a function pointer.
5278   if (CFlags.IsIndirect) {
5279     // AIX and the 64-bit ELF ABIs need to maintain the TOC pointer accross
5280     // indirect calls. The save of the caller's TOC pointer to the stack will be
5281     // inserted into the DAG as part of call lowering. The restore of the TOC
5282     // pointer is modeled by using a pseudo instruction for the call opcode that
5283     // represents the 2 instruction sequence of an indirect branch and link,
5284     // immediately followed by a load of the TOC pointer from the the stack save
5285     // slot into gpr2. For 64-bit ELFv2 ABI with PCRel, do not restore the TOC
5286     // as it is not saved or used.
5287     return isTOCSaveRestoreRequired(Subtarget) ? PPCISD::BCTRL_LOAD_TOC
5288                                                : PPCISD::BCTRL;
5289   }
5290 
5291   if (Subtarget.isUsingPCRelativeCalls()) {
5292     assert(Subtarget.is64BitELFABI() && "PC Relative is only on ELF ABI.");
5293     return PPCISD::CALL_NOTOC;
5294   }
5295 
5296   // The ABIs that maintain a TOC pointer accross calls need to have a nop
5297   // immediately following the call instruction if the caller and callee may
5298   // have different TOC bases. At link time if the linker determines the calls
5299   // may not share a TOC base, the call is redirected to a trampoline inserted
5300   // by the linker. The trampoline will (among other things) save the callers
5301   // TOC pointer at an ABI designated offset in the linkage area and the linker
5302   // will rewrite the nop to be a load of the TOC pointer from the linkage area
5303   // into gpr2.
5304   if (Subtarget.isAIXABI() || Subtarget.is64BitELFABI())
5305     return callsShareTOCBase(&Caller, Callee, TM) ? PPCISD::CALL
5306                                                   : PPCISD::CALL_NOP;
5307 
5308   return PPCISD::CALL;
5309 }
5310 
transformCallee(const SDValue & Callee,SelectionDAG & DAG,const SDLoc & dl,const PPCSubtarget & Subtarget)5311 static SDValue transformCallee(const SDValue &Callee, SelectionDAG &DAG,
5312                                const SDLoc &dl, const PPCSubtarget &Subtarget) {
5313   if (!Subtarget.usesFunctionDescriptors() && !Subtarget.isELFv2ABI())
5314     if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG))
5315       return SDValue(Dest, 0);
5316 
5317   // Returns true if the callee is local, and false otherwise.
5318   auto isLocalCallee = [&]() {
5319     const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
5320     const Module *Mod = DAG.getMachineFunction().getFunction().getParent();
5321     const GlobalValue *GV = G ? G->getGlobal() : nullptr;
5322 
5323     return DAG.getTarget().shouldAssumeDSOLocal(*Mod, GV) &&
5324            !dyn_cast_or_null<GlobalIFunc>(GV);
5325   };
5326 
5327   // The PLT is only used in 32-bit ELF PIC mode.  Attempting to use the PLT in
5328   // a static relocation model causes some versions of GNU LD (2.17.50, at
5329   // least) to force BSS-PLT, instead of secure-PLT, even if all objects are
5330   // built with secure-PLT.
5331   bool UsePlt =
5332       Subtarget.is32BitELFABI() && !isLocalCallee() &&
5333       Subtarget.getTargetMachine().getRelocationModel() == Reloc::PIC_;
5334 
5335   // On AIX, direct function calls reference the symbol for the function's
5336   // entry point, which is named by prepending a "." before the function's
5337   // C-linkage name.
5338   const auto getAIXFuncEntryPointSymbolSDNode =
5339       [&](StringRef FuncName, bool IsDeclaration,
5340           const XCOFF::StorageClass &SC) {
5341         auto &Context = DAG.getMachineFunction().getMMI().getContext();
5342 
5343         MCSymbolXCOFF *S = cast<MCSymbolXCOFF>(
5344             Context.getOrCreateSymbol(Twine(".") + Twine(FuncName)));
5345 
5346         if (IsDeclaration && !S->hasRepresentedCsectSet()) {
5347           // On AIX, an undefined symbol needs to be associated with a
5348           // MCSectionXCOFF to get the correct storage mapping class.
5349           // In this case, XCOFF::XMC_PR.
5350           MCSectionXCOFF *Sec = Context.getXCOFFSection(
5351               S->getSymbolTableName(), XCOFF::XMC_PR, XCOFF::XTY_ER, SC,
5352               SectionKind::getMetadata());
5353           S->setRepresentedCsect(Sec);
5354         }
5355 
5356         MVT PtrVT =
5357             DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
5358         return DAG.getMCSymbol(S, PtrVT);
5359       };
5360 
5361   if (isFunctionGlobalAddress(Callee)) {
5362     const GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Callee);
5363     const GlobalValue *GV = G->getGlobal();
5364 
5365     if (!Subtarget.isAIXABI())
5366       return DAG.getTargetGlobalAddress(GV, dl, Callee.getValueType(), 0,
5367                                         UsePlt ? PPCII::MO_PLT : 0);
5368 
5369     assert(!isa<GlobalIFunc>(GV) && "IFunc is not supported on AIX.");
5370     const GlobalObject *GO = cast<GlobalObject>(GV);
5371     const XCOFF::StorageClass SC =
5372         TargetLoweringObjectFileXCOFF::getStorageClassForGlobal(GO);
5373     return getAIXFuncEntryPointSymbolSDNode(GO->getName(), GO->isDeclaration(),
5374                                             SC);
5375   }
5376 
5377   if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
5378     const char *SymName = S->getSymbol();
5379     if (!Subtarget.isAIXABI())
5380       return DAG.getTargetExternalSymbol(SymName, Callee.getValueType(),
5381                                          UsePlt ? PPCII::MO_PLT : 0);
5382 
5383     // If there exists a user-declared function whose name is the same as the
5384     // ExternalSymbol's, then we pick up the user-declared version.
5385     const Module *Mod = DAG.getMachineFunction().getFunction().getParent();
5386     if (const Function *F =
5387             dyn_cast_or_null<Function>(Mod->getNamedValue(SymName))) {
5388       const XCOFF::StorageClass SC =
5389           TargetLoweringObjectFileXCOFF::getStorageClassForGlobal(F);
5390       return getAIXFuncEntryPointSymbolSDNode(F->getName(), F->isDeclaration(),
5391                                               SC);
5392     }
5393 
5394     return getAIXFuncEntryPointSymbolSDNode(SymName, true, XCOFF::C_EXT);
5395   }
5396 
5397   // No transformation needed.
5398   assert(Callee.getNode() && "What no callee?");
5399   return Callee;
5400 }
5401 
getOutputChainFromCallSeq(SDValue CallSeqStart)5402 static SDValue getOutputChainFromCallSeq(SDValue CallSeqStart) {
5403   assert(CallSeqStart.getOpcode() == ISD::CALLSEQ_START &&
5404          "Expected a CALLSEQ_STARTSDNode.");
5405 
5406   // The last operand is the chain, except when the node has glue. If the node
5407   // has glue, then the last operand is the glue, and the chain is the second
5408   // last operand.
5409   SDValue LastValue = CallSeqStart.getValue(CallSeqStart->getNumValues() - 1);
5410   if (LastValue.getValueType() != MVT::Glue)
5411     return LastValue;
5412 
5413   return CallSeqStart.getValue(CallSeqStart->getNumValues() - 2);
5414 }
5415 
5416 // Creates the node that moves a functions address into the count register
5417 // to prepare for an indirect call instruction.
prepareIndirectCall(SelectionDAG & DAG,SDValue & Callee,SDValue & Glue,SDValue & Chain,const SDLoc & dl)5418 static void prepareIndirectCall(SelectionDAG &DAG, SDValue &Callee,
5419                                 SDValue &Glue, SDValue &Chain,
5420                                 const SDLoc &dl) {
5421   SDValue MTCTROps[] = {Chain, Callee, Glue};
5422   EVT ReturnTypes[] = {MVT::Other, MVT::Glue};
5423   Chain = DAG.getNode(PPCISD::MTCTR, dl, makeArrayRef(ReturnTypes, 2),
5424                       makeArrayRef(MTCTROps, Glue.getNode() ? 3 : 2));
5425   // The glue is the second value produced.
5426   Glue = Chain.getValue(1);
5427 }
5428 
prepareDescriptorIndirectCall(SelectionDAG & DAG,SDValue & Callee,SDValue & Glue,SDValue & Chain,SDValue CallSeqStart,const CallBase * CB,const SDLoc & dl,bool hasNest,const PPCSubtarget & Subtarget)5429 static void prepareDescriptorIndirectCall(SelectionDAG &DAG, SDValue &Callee,
5430                                           SDValue &Glue, SDValue &Chain,
5431                                           SDValue CallSeqStart,
5432                                           const CallBase *CB, const SDLoc &dl,
5433                                           bool hasNest,
5434                                           const PPCSubtarget &Subtarget) {
5435   // Function pointers in the 64-bit SVR4 ABI do not point to the function
5436   // entry point, but to the function descriptor (the function entry point
5437   // address is part of the function descriptor though).
5438   // The function descriptor is a three doubleword structure with the
5439   // following fields: function entry point, TOC base address and
5440   // environment pointer.
5441   // Thus for a call through a function pointer, the following actions need
5442   // to be performed:
5443   //   1. Save the TOC of the caller in the TOC save area of its stack
5444   //      frame (this is done in LowerCall_Darwin() or LowerCall_64SVR4()).
5445   //   2. Load the address of the function entry point from the function
5446   //      descriptor.
5447   //   3. Load the TOC of the callee from the function descriptor into r2.
5448   //   4. Load the environment pointer from the function descriptor into
5449   //      r11.
5450   //   5. Branch to the function entry point address.
5451   //   6. On return of the callee, the TOC of the caller needs to be
5452   //      restored (this is done in FinishCall()).
5453   //
5454   // The loads are scheduled at the beginning of the call sequence, and the
5455   // register copies are flagged together to ensure that no other
5456   // operations can be scheduled in between. E.g. without flagging the
5457   // copies together, a TOC access in the caller could be scheduled between
5458   // the assignment of the callee TOC and the branch to the callee, which leads
5459   // to incorrect code.
5460 
5461   // Start by loading the function address from the descriptor.
5462   SDValue LDChain = getOutputChainFromCallSeq(CallSeqStart);
5463   auto MMOFlags = Subtarget.hasInvariantFunctionDescriptors()
5464                       ? (MachineMemOperand::MODereferenceable |
5465                          MachineMemOperand::MOInvariant)
5466                       : MachineMemOperand::MONone;
5467 
5468   MachinePointerInfo MPI(CB ? CB->getCalledOperand() : nullptr);
5469 
5470   // Registers used in building the DAG.
5471   const MCRegister EnvPtrReg = Subtarget.getEnvironmentPointerRegister();
5472   const MCRegister TOCReg = Subtarget.getTOCPointerRegister();
5473 
5474   // Offsets of descriptor members.
5475   const unsigned TOCAnchorOffset = Subtarget.descriptorTOCAnchorOffset();
5476   const unsigned EnvPtrOffset = Subtarget.descriptorEnvironmentPointerOffset();
5477 
5478   const MVT RegVT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
5479   const unsigned Alignment = Subtarget.isPPC64() ? 8 : 4;
5480 
5481   // One load for the functions entry point address.
5482   SDValue LoadFuncPtr = DAG.getLoad(RegVT, dl, LDChain, Callee, MPI,
5483                                     Alignment, MMOFlags);
5484 
5485   // One for loading the TOC anchor for the module that contains the called
5486   // function.
5487   SDValue TOCOff = DAG.getIntPtrConstant(TOCAnchorOffset, dl);
5488   SDValue AddTOC = DAG.getNode(ISD::ADD, dl, RegVT, Callee, TOCOff);
5489   SDValue TOCPtr =
5490       DAG.getLoad(RegVT, dl, LDChain, AddTOC,
5491                   MPI.getWithOffset(TOCAnchorOffset), Alignment, MMOFlags);
5492 
5493   // One for loading the environment pointer.
5494   SDValue PtrOff = DAG.getIntPtrConstant(EnvPtrOffset, dl);
5495   SDValue AddPtr = DAG.getNode(ISD::ADD, dl, RegVT, Callee, PtrOff);
5496   SDValue LoadEnvPtr =
5497       DAG.getLoad(RegVT, dl, LDChain, AddPtr,
5498                   MPI.getWithOffset(EnvPtrOffset), Alignment, MMOFlags);
5499 
5500 
5501   // Then copy the newly loaded TOC anchor to the TOC pointer.
5502   SDValue TOCVal = DAG.getCopyToReg(Chain, dl, TOCReg, TOCPtr, Glue);
5503   Chain = TOCVal.getValue(0);
5504   Glue = TOCVal.getValue(1);
5505 
5506   // If the function call has an explicit 'nest' parameter, it takes the
5507   // place of the environment pointer.
5508   assert((!hasNest || !Subtarget.isAIXABI()) &&
5509          "Nest parameter is not supported on AIX.");
5510   if (!hasNest) {
5511     SDValue EnvVal = DAG.getCopyToReg(Chain, dl, EnvPtrReg, LoadEnvPtr, Glue);
5512     Chain = EnvVal.getValue(0);
5513     Glue = EnvVal.getValue(1);
5514   }
5515 
5516   // The rest of the indirect call sequence is the same as the non-descriptor
5517   // DAG.
5518   prepareIndirectCall(DAG, LoadFuncPtr, Glue, Chain, dl);
5519 }
5520 
5521 static void
buildCallOperands(SmallVectorImpl<SDValue> & Ops,PPCTargetLowering::CallFlags CFlags,const SDLoc & dl,SelectionDAG & DAG,SmallVector<std::pair<unsigned,SDValue>,8> & RegsToPass,SDValue Glue,SDValue Chain,SDValue & Callee,int SPDiff,const PPCSubtarget & Subtarget)5522 buildCallOperands(SmallVectorImpl<SDValue> &Ops,
5523                   PPCTargetLowering::CallFlags CFlags, const SDLoc &dl,
5524                   SelectionDAG &DAG,
5525                   SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass,
5526                   SDValue Glue, SDValue Chain, SDValue &Callee, int SPDiff,
5527                   const PPCSubtarget &Subtarget) {
5528   const bool IsPPC64 = Subtarget.isPPC64();
5529   // MVT for a general purpose register.
5530   const MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32;
5531 
5532   // First operand is always the chain.
5533   Ops.push_back(Chain);
5534 
5535   // If it's a direct call pass the callee as the second operand.
5536   if (!CFlags.IsIndirect)
5537     Ops.push_back(Callee);
5538   else {
5539     assert(!CFlags.IsPatchPoint && "Patch point calls are not indirect.");
5540 
5541     // For the TOC based ABIs, we have saved the TOC pointer to the linkage area
5542     // on the stack (this would have been done in `LowerCall_64SVR4` or
5543     // `LowerCall_AIX`). The call instruction is a pseudo instruction that
5544     // represents both the indirect branch and a load that restores the TOC
5545     // pointer from the linkage area. The operand for the TOC restore is an add
5546     // of the TOC save offset to the stack pointer. This must be the second
5547     // operand: after the chain input but before any other variadic arguments.
5548     // For 64-bit ELFv2 ABI with PCRel, do not restore the TOC as it is not
5549     // saved or used.
5550     if (isTOCSaveRestoreRequired(Subtarget)) {
5551       const MCRegister StackPtrReg = Subtarget.getStackPointerRegister();
5552 
5553       SDValue StackPtr = DAG.getRegister(StackPtrReg, RegVT);
5554       unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset();
5555       SDValue TOCOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
5556       SDValue AddTOC = DAG.getNode(ISD::ADD, dl, RegVT, StackPtr, TOCOff);
5557       Ops.push_back(AddTOC);
5558     }
5559 
5560     // Add the register used for the environment pointer.
5561     if (Subtarget.usesFunctionDescriptors() && !CFlags.HasNest)
5562       Ops.push_back(DAG.getRegister(Subtarget.getEnvironmentPointerRegister(),
5563                                     RegVT));
5564 
5565 
5566     // Add CTR register as callee so a bctr can be emitted later.
5567     if (CFlags.IsTailCall)
5568       Ops.push_back(DAG.getRegister(IsPPC64 ? PPC::CTR8 : PPC::CTR, RegVT));
5569   }
5570 
5571   // If this is a tail call add stack pointer delta.
5572   if (CFlags.IsTailCall)
5573     Ops.push_back(DAG.getConstant(SPDiff, dl, MVT::i32));
5574 
5575   // Add argument registers to the end of the list so that they are known live
5576   // into the call.
5577   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
5578     Ops.push_back(DAG.getRegister(RegsToPass[i].first,
5579                                   RegsToPass[i].second.getValueType()));
5580 
5581   // We cannot add R2/X2 as an operand here for PATCHPOINT, because there is
5582   // no way to mark dependencies as implicit here.
5583   // We will add the R2/X2 dependency in EmitInstrWithCustomInserter.
5584   if ((Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) &&
5585        !CFlags.IsPatchPoint && !Subtarget.isUsingPCRelativeCalls())
5586     Ops.push_back(DAG.getRegister(Subtarget.getTOCPointerRegister(), RegVT));
5587 
5588   // Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls
5589   if (CFlags.IsVarArg && Subtarget.is32BitELFABI())
5590     Ops.push_back(DAG.getRegister(PPC::CR1EQ, MVT::i32));
5591 
5592   // Add a register mask operand representing the call-preserved registers.
5593   const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
5594   const uint32_t *Mask =
5595       TRI->getCallPreservedMask(DAG.getMachineFunction(), CFlags.CallConv);
5596   assert(Mask && "Missing call preserved mask for calling convention");
5597   Ops.push_back(DAG.getRegisterMask(Mask));
5598 
5599   // If the glue is valid, it is the last operand.
5600   if (Glue.getNode())
5601     Ops.push_back(Glue);
5602 }
5603 
FinishCall(CallFlags CFlags,const SDLoc & dl,SelectionDAG & DAG,SmallVector<std::pair<unsigned,SDValue>,8> & RegsToPass,SDValue Glue,SDValue Chain,SDValue CallSeqStart,SDValue & Callee,int SPDiff,unsigned NumBytes,const SmallVectorImpl<ISD::InputArg> & Ins,SmallVectorImpl<SDValue> & InVals,const CallBase * CB) const5604 SDValue PPCTargetLowering::FinishCall(
5605     CallFlags CFlags, const SDLoc &dl, SelectionDAG &DAG,
5606     SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, SDValue Glue,
5607     SDValue Chain, SDValue CallSeqStart, SDValue &Callee, int SPDiff,
5608     unsigned NumBytes, const SmallVectorImpl<ISD::InputArg> &Ins,
5609     SmallVectorImpl<SDValue> &InVals, const CallBase *CB) const {
5610 
5611   if ((Subtarget.is64BitELFABI() && !Subtarget.isUsingPCRelativeCalls()) ||
5612       Subtarget.isAIXABI())
5613     setUsesTOCBasePtr(DAG);
5614 
5615   unsigned CallOpc =
5616       getCallOpcode(CFlags, DAG.getMachineFunction().getFunction(), Callee,
5617                     Subtarget, DAG.getTarget());
5618 
5619   if (!CFlags.IsIndirect)
5620     Callee = transformCallee(Callee, DAG, dl, Subtarget);
5621   else if (Subtarget.usesFunctionDescriptors())
5622     prepareDescriptorIndirectCall(DAG, Callee, Glue, Chain, CallSeqStart, CB,
5623                                   dl, CFlags.HasNest, Subtarget);
5624   else
5625     prepareIndirectCall(DAG, Callee, Glue, Chain, dl);
5626 
5627   // Build the operand list for the call instruction.
5628   SmallVector<SDValue, 8> Ops;
5629   buildCallOperands(Ops, CFlags, dl, DAG, RegsToPass, Glue, Chain, Callee,
5630                     SPDiff, Subtarget);
5631 
5632   // Emit tail call.
5633   if (CFlags.IsTailCall) {
5634     // Indirect tail call when using PC Relative calls do not have the same
5635     // constraints.
5636     assert(((Callee.getOpcode() == ISD::Register &&
5637              cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) ||
5638             Callee.getOpcode() == ISD::TargetExternalSymbol ||
5639             Callee.getOpcode() == ISD::TargetGlobalAddress ||
5640             isa<ConstantSDNode>(Callee) ||
5641             (CFlags.IsIndirect && Subtarget.isUsingPCRelativeCalls())) &&
5642            "Expecting a global address, external symbol, absolute value, "
5643            "register or an indirect tail call when PC Relative calls are "
5644            "used.");
5645     // PC Relative calls also use TC_RETURN as the way to mark tail calls.
5646     assert(CallOpc == PPCISD::TC_RETURN &&
5647            "Unexpected call opcode for a tail call.");
5648     DAG.getMachineFunction().getFrameInfo().setHasTailCall();
5649     return DAG.getNode(CallOpc, dl, MVT::Other, Ops);
5650   }
5651 
5652   std::array<EVT, 2> ReturnTypes = {{MVT::Other, MVT::Glue}};
5653   Chain = DAG.getNode(CallOpc, dl, ReturnTypes, Ops);
5654   DAG.addNoMergeSiteInfo(Chain.getNode(), CFlags.NoMerge);
5655   Glue = Chain.getValue(1);
5656 
5657   // When performing tail call optimization the callee pops its arguments off
5658   // the stack. Account for this here so these bytes can be pushed back on in
5659   // PPCFrameLowering::eliminateCallFramePseudoInstr.
5660   int BytesCalleePops = (CFlags.CallConv == CallingConv::Fast &&
5661                          getTargetMachine().Options.GuaranteedTailCallOpt)
5662                             ? NumBytes
5663                             : 0;
5664 
5665   Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
5666                              DAG.getIntPtrConstant(BytesCalleePops, dl, true),
5667                              Glue, dl);
5668   Glue = Chain.getValue(1);
5669 
5670   return LowerCallResult(Chain, Glue, CFlags.CallConv, CFlags.IsVarArg, Ins, dl,
5671                          DAG, InVals);
5672 }
5673 
5674 SDValue
LowerCall(TargetLowering::CallLoweringInfo & CLI,SmallVectorImpl<SDValue> & InVals) const5675 PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
5676                              SmallVectorImpl<SDValue> &InVals) const {
5677   SelectionDAG &DAG                     = CLI.DAG;
5678   SDLoc &dl                             = CLI.DL;
5679   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
5680   SmallVectorImpl<SDValue> &OutVals     = CLI.OutVals;
5681   SmallVectorImpl<ISD::InputArg> &Ins   = CLI.Ins;
5682   SDValue Chain                         = CLI.Chain;
5683   SDValue Callee                        = CLI.Callee;
5684   bool &isTailCall                      = CLI.IsTailCall;
5685   CallingConv::ID CallConv              = CLI.CallConv;
5686   bool isVarArg                         = CLI.IsVarArg;
5687   bool isPatchPoint                     = CLI.IsPatchPoint;
5688   const CallBase *CB                    = CLI.CB;
5689 
5690   if (isTailCall) {
5691     if (Subtarget.useLongCalls() && !(CB && CB->isMustTailCall()))
5692       isTailCall = false;
5693     else if (Subtarget.isSVR4ABI() && Subtarget.isPPC64())
5694       isTailCall = IsEligibleForTailCallOptimization_64SVR4(
5695           Callee, CallConv, CB, isVarArg, Outs, Ins, DAG);
5696     else
5697       isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg,
5698                                                      Ins, DAG);
5699     if (isTailCall) {
5700       ++NumTailCalls;
5701       if (!getTargetMachine().Options.GuaranteedTailCallOpt)
5702         ++NumSiblingCalls;
5703 
5704       // PC Relative calls no longer guarantee that the callee is a Global
5705       // Address Node. The callee could be an indirect tail call in which
5706       // case the SDValue for the callee could be a load (to load the address
5707       // of a function pointer) or it may be a register copy (to move the
5708       // address of the callee from a function parameter into a virtual
5709       // register). It may also be an ExternalSymbolSDNode (ex memcopy).
5710       assert((Subtarget.isUsingPCRelativeCalls() ||
5711               isa<GlobalAddressSDNode>(Callee)) &&
5712              "Callee should be an llvm::Function object.");
5713 
5714       LLVM_DEBUG(dbgs() << "TCO caller: " << DAG.getMachineFunction().getName()
5715                         << "\nTCO callee: ");
5716       LLVM_DEBUG(Callee.dump());
5717     }
5718   }
5719 
5720   if (!isTailCall && CB && CB->isMustTailCall())
5721     report_fatal_error("failed to perform tail call elimination on a call "
5722                        "site marked musttail");
5723 
5724   // When long calls (i.e. indirect calls) are always used, calls are always
5725   // made via function pointer. If we have a function name, first translate it
5726   // into a pointer.
5727   if (Subtarget.useLongCalls() && isa<GlobalAddressSDNode>(Callee) &&
5728       !isTailCall)
5729     Callee = LowerGlobalAddress(Callee, DAG);
5730 
5731   CallFlags CFlags(
5732       CallConv, isTailCall, isVarArg, isPatchPoint,
5733       isIndirectCall(Callee, DAG, Subtarget, isPatchPoint),
5734       // hasNest
5735       Subtarget.is64BitELFABI() &&
5736           any_of(Outs, [](ISD::OutputArg Arg) { return Arg.Flags.isNest(); }),
5737       CLI.NoMerge);
5738 
5739   if (Subtarget.isSVR4ABI() && Subtarget.isPPC64())
5740     return LowerCall_64SVR4(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
5741                             InVals, CB);
5742 
5743   if (Subtarget.isSVR4ABI())
5744     return LowerCall_32SVR4(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
5745                             InVals, CB);
5746 
5747   if (Subtarget.isAIXABI())
5748     return LowerCall_AIX(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
5749                          InVals, CB);
5750 
5751   return LowerCall_Darwin(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
5752                           InVals, CB);
5753 }
5754 
LowerCall_32SVR4(SDValue Chain,SDValue Callee,CallFlags CFlags,const SmallVectorImpl<ISD::OutputArg> & Outs,const SmallVectorImpl<SDValue> & OutVals,const SmallVectorImpl<ISD::InputArg> & Ins,const SDLoc & dl,SelectionDAG & DAG,SmallVectorImpl<SDValue> & InVals,const CallBase * CB) const5755 SDValue PPCTargetLowering::LowerCall_32SVR4(
5756     SDValue Chain, SDValue Callee, CallFlags CFlags,
5757     const SmallVectorImpl<ISD::OutputArg> &Outs,
5758     const SmallVectorImpl<SDValue> &OutVals,
5759     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
5760     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
5761     const CallBase *CB) const {
5762   // See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description
5763   // of the 32-bit SVR4 ABI stack frame layout.
5764 
5765   const CallingConv::ID CallConv = CFlags.CallConv;
5766   const bool IsVarArg = CFlags.IsVarArg;
5767   const bool IsTailCall = CFlags.IsTailCall;
5768 
5769   assert((CallConv == CallingConv::C ||
5770           CallConv == CallingConv::Cold ||
5771           CallConv == CallingConv::Fast) && "Unknown calling convention!");
5772 
5773   const Align PtrAlign(4);
5774 
5775   MachineFunction &MF = DAG.getMachineFunction();
5776 
5777   // Mark this function as potentially containing a function that contains a
5778   // tail call. As a consequence the frame pointer will be used for dynamicalloc
5779   // and restoring the callers stack pointer in this functions epilog. This is
5780   // done because by tail calling the called function might overwrite the value
5781   // in this function's (MF) stack pointer stack slot 0(SP).
5782   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
5783       CallConv == CallingConv::Fast)
5784     MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
5785 
5786   // Count how many bytes are to be pushed on the stack, including the linkage
5787   // area, parameter list area and the part of the local variable space which
5788   // contains copies of aggregates which are passed by value.
5789 
5790   // Assign locations to all of the outgoing arguments.
5791   SmallVector<CCValAssign, 16> ArgLocs;
5792   PPCCCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
5793 
5794   // Reserve space for the linkage area on the stack.
5795   CCInfo.AllocateStack(Subtarget.getFrameLowering()->getLinkageSize(),
5796                        PtrAlign);
5797   if (useSoftFloat())
5798     CCInfo.PreAnalyzeCallOperands(Outs);
5799 
5800   if (IsVarArg) {
5801     // Handle fixed and variable vector arguments differently.
5802     // Fixed vector arguments go into registers as long as registers are
5803     // available. Variable vector arguments always go into memory.
5804     unsigned NumArgs = Outs.size();
5805 
5806     for (unsigned i = 0; i != NumArgs; ++i) {
5807       MVT ArgVT = Outs[i].VT;
5808       ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
5809       bool Result;
5810 
5811       if (Outs[i].IsFixed) {
5812         Result = CC_PPC32_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags,
5813                                CCInfo);
5814       } else {
5815         Result = CC_PPC32_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full,
5816                                       ArgFlags, CCInfo);
5817       }
5818 
5819       if (Result) {
5820 #ifndef NDEBUG
5821         errs() << "Call operand #" << i << " has unhandled type "
5822              << EVT(ArgVT).getEVTString() << "\n";
5823 #endif
5824         llvm_unreachable(nullptr);
5825       }
5826     }
5827   } else {
5828     // All arguments are treated the same.
5829     CCInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4);
5830   }
5831   CCInfo.clearWasPPCF128();
5832 
5833   // Assign locations to all of the outgoing aggregate by value arguments.
5834   SmallVector<CCValAssign, 16> ByValArgLocs;
5835   CCState CCByValInfo(CallConv, IsVarArg, MF, ByValArgLocs, *DAG.getContext());
5836 
5837   // Reserve stack space for the allocations in CCInfo.
5838   CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrAlign);
5839 
5840   CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal);
5841 
5842   // Size of the linkage area, parameter list area and the part of the local
5843   // space variable where copies of aggregates which are passed by value are
5844   // stored.
5845   unsigned NumBytes = CCByValInfo.getNextStackOffset();
5846 
5847   // Calculate by how many bytes the stack has to be adjusted in case of tail
5848   // call optimization.
5849   int SPDiff = CalculateTailCallSPDiff(DAG, IsTailCall, NumBytes);
5850 
5851   // Adjust the stack pointer for the new arguments...
5852   // These operations are automatically eliminated by the prolog/epilog pass
5853   Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
5854   SDValue CallSeqStart = Chain;
5855 
5856   // Load the return address and frame pointer so it can be moved somewhere else
5857   // later.
5858   SDValue LROp, FPOp;
5859   Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
5860 
5861   // Set up a copy of the stack pointer for use loading and storing any
5862   // arguments that may not fit in the registers available for argument
5863   // passing.
5864   SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
5865 
5866   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
5867   SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
5868   SmallVector<SDValue, 8> MemOpChains;
5869 
5870   bool seenFloatArg = false;
5871   // Walk the register/memloc assignments, inserting copies/loads.
5872   // i - Tracks the index into the list of registers allocated for the call
5873   // RealArgIdx - Tracks the index into the list of actual function arguments
5874   // j - Tracks the index into the list of byval arguments
5875   for (unsigned i = 0, RealArgIdx = 0, j = 0, e = ArgLocs.size();
5876        i != e;
5877        ++i, ++RealArgIdx) {
5878     CCValAssign &VA = ArgLocs[i];
5879     SDValue Arg = OutVals[RealArgIdx];
5880     ISD::ArgFlagsTy Flags = Outs[RealArgIdx].Flags;
5881 
5882     if (Flags.isByVal()) {
5883       // Argument is an aggregate which is passed by value, thus we need to
5884       // create a copy of it in the local variable space of the current stack
5885       // frame (which is the stack frame of the caller) and pass the address of
5886       // this copy to the callee.
5887       assert((j < ByValArgLocs.size()) && "Index out of bounds!");
5888       CCValAssign &ByValVA = ByValArgLocs[j++];
5889       assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!");
5890 
5891       // Memory reserved in the local variable space of the callers stack frame.
5892       unsigned LocMemOffset = ByValVA.getLocMemOffset();
5893 
5894       SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
5895       PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()),
5896                            StackPtr, PtrOff);
5897 
5898       // Create a copy of the argument in the local area of the current
5899       // stack frame.
5900       SDValue MemcpyCall =
5901         CreateCopyOfByValArgument(Arg, PtrOff,
5902                                   CallSeqStart.getNode()->getOperand(0),
5903                                   Flags, DAG, dl);
5904 
5905       // This must go outside the CALLSEQ_START..END.
5906       SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, NumBytes, 0,
5907                                                      SDLoc(MemcpyCall));
5908       DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
5909                              NewCallSeqStart.getNode());
5910       Chain = CallSeqStart = NewCallSeqStart;
5911 
5912       // Pass the address of the aggregate copy on the stack either in a
5913       // physical register or in the parameter list area of the current stack
5914       // frame to the callee.
5915       Arg = PtrOff;
5916     }
5917 
5918     // When useCRBits() is true, there can be i1 arguments.
5919     // It is because getRegisterType(MVT::i1) => MVT::i1,
5920     // and for other integer types getRegisterType() => MVT::i32.
5921     // Extend i1 and ensure callee will get i32.
5922     if (Arg.getValueType() == MVT::i1)
5923       Arg = DAG.getNode(Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND,
5924                         dl, MVT::i32, Arg);
5925 
5926     if (VA.isRegLoc()) {
5927       seenFloatArg |= VA.getLocVT().isFloatingPoint();
5928       // Put argument in a physical register.
5929       if (Subtarget.hasSPE() && Arg.getValueType() == MVT::f64) {
5930         bool IsLE = Subtarget.isLittleEndian();
5931         SDValue SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
5932                         DAG.getIntPtrConstant(IsLE ? 0 : 1, dl));
5933         RegsToPass.push_back(std::make_pair(VA.getLocReg(), SVal.getValue(0)));
5934         SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
5935                            DAG.getIntPtrConstant(IsLE ? 1 : 0, dl));
5936         RegsToPass.push_back(std::make_pair(ArgLocs[++i].getLocReg(),
5937                              SVal.getValue(0)));
5938       } else
5939         RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
5940     } else {
5941       // Put argument in the parameter list area of the current stack frame.
5942       assert(VA.isMemLoc());
5943       unsigned LocMemOffset = VA.getLocMemOffset();
5944 
5945       if (!IsTailCall) {
5946         SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
5947         PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()),
5948                              StackPtr, PtrOff);
5949 
5950         MemOpChains.push_back(
5951             DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
5952       } else {
5953         // Calculate and remember argument location.
5954         CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset,
5955                                  TailCallArguments);
5956       }
5957     }
5958   }
5959 
5960   if (!MemOpChains.empty())
5961     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
5962 
5963   // Build a sequence of copy-to-reg nodes chained together with token chain
5964   // and flag operands which copy the outgoing args into the appropriate regs.
5965   SDValue InFlag;
5966   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
5967     Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
5968                              RegsToPass[i].second, InFlag);
5969     InFlag = Chain.getValue(1);
5970   }
5971 
5972   // Set CR bit 6 to true if this is a vararg call with floating args passed in
5973   // registers.
5974   if (IsVarArg) {
5975     SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
5976     SDValue Ops[] = { Chain, InFlag };
5977 
5978     Chain = DAG.getNode(seenFloatArg ? PPCISD::CR6SET : PPCISD::CR6UNSET,
5979                         dl, VTs, makeArrayRef(Ops, InFlag.getNode() ? 2 : 1));
5980 
5981     InFlag = Chain.getValue(1);
5982   }
5983 
5984   if (IsTailCall)
5985     PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
5986                     TailCallArguments);
5987 
5988   return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart,
5989                     Callee, SPDiff, NumBytes, Ins, InVals, CB);
5990 }
5991 
5992 // Copy an argument into memory, being careful to do this outside the
5993 // call sequence for the call to which the argument belongs.
createMemcpyOutsideCallSeq(SDValue Arg,SDValue PtrOff,SDValue CallSeqStart,ISD::ArgFlagsTy Flags,SelectionDAG & DAG,const SDLoc & dl) const5994 SDValue PPCTargetLowering::createMemcpyOutsideCallSeq(
5995     SDValue Arg, SDValue PtrOff, SDValue CallSeqStart, ISD::ArgFlagsTy Flags,
5996     SelectionDAG &DAG, const SDLoc &dl) const {
5997   SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff,
5998                         CallSeqStart.getNode()->getOperand(0),
5999                         Flags, DAG, dl);
6000   // The MEMCPY must go outside the CALLSEQ_START..END.
6001   int64_t FrameSize = CallSeqStart.getConstantOperandVal(1);
6002   SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, FrameSize, 0,
6003                                                  SDLoc(MemcpyCall));
6004   DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
6005                          NewCallSeqStart.getNode());
6006   return NewCallSeqStart;
6007 }
6008 
LowerCall_64SVR4(SDValue Chain,SDValue Callee,CallFlags CFlags,const SmallVectorImpl<ISD::OutputArg> & Outs,const SmallVectorImpl<SDValue> & OutVals,const SmallVectorImpl<ISD::InputArg> & Ins,const SDLoc & dl,SelectionDAG & DAG,SmallVectorImpl<SDValue> & InVals,const CallBase * CB) const6009 SDValue PPCTargetLowering::LowerCall_64SVR4(
6010     SDValue Chain, SDValue Callee, CallFlags CFlags,
6011     const SmallVectorImpl<ISD::OutputArg> &Outs,
6012     const SmallVectorImpl<SDValue> &OutVals,
6013     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
6014     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
6015     const CallBase *CB) const {
6016   bool isELFv2ABI = Subtarget.isELFv2ABI();
6017   bool isLittleEndian = Subtarget.isLittleEndian();
6018   unsigned NumOps = Outs.size();
6019   bool IsSibCall = false;
6020   bool IsFastCall = CFlags.CallConv == CallingConv::Fast;
6021 
6022   EVT PtrVT = getPointerTy(DAG.getDataLayout());
6023   unsigned PtrByteSize = 8;
6024 
6025   MachineFunction &MF = DAG.getMachineFunction();
6026 
6027   if (CFlags.IsTailCall && !getTargetMachine().Options.GuaranteedTailCallOpt)
6028     IsSibCall = true;
6029 
6030   // Mark this function as potentially containing a function that contains a
6031   // tail call. As a consequence the frame pointer will be used for dynamicalloc
6032   // and restoring the callers stack pointer in this functions epilog. This is
6033   // done because by tail calling the called function might overwrite the value
6034   // in this function's (MF) stack pointer stack slot 0(SP).
6035   if (getTargetMachine().Options.GuaranteedTailCallOpt && IsFastCall)
6036     MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
6037 
6038   assert(!(IsFastCall && CFlags.IsVarArg) &&
6039          "fastcc not supported on varargs functions");
6040 
6041   // Count how many bytes are to be pushed on the stack, including the linkage
6042   // area, and parameter passing area.  On ELFv1, the linkage area is 48 bytes
6043   // reserved space for [SP][CR][LR][2 x unused][TOC]; on ELFv2, the linkage
6044   // area is 32 bytes reserved space for [SP][CR][LR][TOC].
6045   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
6046   unsigned NumBytes = LinkageSize;
6047   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
6048   unsigned &QFPR_idx = FPR_idx;
6049 
6050   static const MCPhysReg GPR[] = {
6051     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
6052     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
6053   };
6054   static const MCPhysReg VR[] = {
6055     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
6056     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
6057   };
6058 
6059   const unsigned NumGPRs = array_lengthof(GPR);
6060   const unsigned NumFPRs = useSoftFloat() ? 0 : 13;
6061   const unsigned NumVRs  = array_lengthof(VR);
6062   const unsigned NumQFPRs = NumFPRs;
6063 
6064   // On ELFv2, we can avoid allocating the parameter area if all the arguments
6065   // can be passed to the callee in registers.
6066   // For the fast calling convention, there is another check below.
6067   // Note: We should keep consistent with LowerFormalArguments_64SVR4()
6068   bool HasParameterArea = !isELFv2ABI || CFlags.IsVarArg || IsFastCall;
6069   if (!HasParameterArea) {
6070     unsigned ParamAreaSize = NumGPRs * PtrByteSize;
6071     unsigned AvailableFPRs = NumFPRs;
6072     unsigned AvailableVRs = NumVRs;
6073     unsigned NumBytesTmp = NumBytes;
6074     for (unsigned i = 0; i != NumOps; ++i) {
6075       if (Outs[i].Flags.isNest()) continue;
6076       if (CalculateStackSlotUsed(Outs[i].VT, Outs[i].ArgVT, Outs[i].Flags,
6077                                 PtrByteSize, LinkageSize, ParamAreaSize,
6078                                 NumBytesTmp, AvailableFPRs, AvailableVRs,
6079                                 Subtarget.hasQPX()))
6080         HasParameterArea = true;
6081     }
6082   }
6083 
6084   // When using the fast calling convention, we don't provide backing for
6085   // arguments that will be in registers.
6086   unsigned NumGPRsUsed = 0, NumFPRsUsed = 0, NumVRsUsed = 0;
6087 
6088   // Avoid allocating parameter area for fastcc functions if all the arguments
6089   // can be passed in the registers.
6090   if (IsFastCall)
6091     HasParameterArea = false;
6092 
6093   // Add up all the space actually used.
6094   for (unsigned i = 0; i != NumOps; ++i) {
6095     ISD::ArgFlagsTy Flags = Outs[i].Flags;
6096     EVT ArgVT = Outs[i].VT;
6097     EVT OrigVT = Outs[i].ArgVT;
6098 
6099     if (Flags.isNest())
6100       continue;
6101 
6102     if (IsFastCall) {
6103       if (Flags.isByVal()) {
6104         NumGPRsUsed += (Flags.getByValSize()+7)/8;
6105         if (NumGPRsUsed > NumGPRs)
6106           HasParameterArea = true;
6107       } else {
6108         switch (ArgVT.getSimpleVT().SimpleTy) {
6109         default: llvm_unreachable("Unexpected ValueType for argument!");
6110         case MVT::i1:
6111         case MVT::i32:
6112         case MVT::i64:
6113           if (++NumGPRsUsed <= NumGPRs)
6114             continue;
6115           break;
6116         case MVT::v4i32:
6117         case MVT::v8i16:
6118         case MVT::v16i8:
6119         case MVT::v2f64:
6120         case MVT::v2i64:
6121         case MVT::v1i128:
6122         case MVT::f128:
6123           if (++NumVRsUsed <= NumVRs)
6124             continue;
6125           break;
6126         case MVT::v4f32:
6127           // When using QPX, this is handled like a FP register, otherwise, it
6128           // is an Altivec register.
6129           if (Subtarget.hasQPX()) {
6130             if (++NumFPRsUsed <= NumFPRs)
6131               continue;
6132           } else {
6133             if (++NumVRsUsed <= NumVRs)
6134               continue;
6135           }
6136           break;
6137         case MVT::f32:
6138         case MVT::f64:
6139         case MVT::v4f64: // QPX
6140         case MVT::v4i1:  // QPX
6141           if (++NumFPRsUsed <= NumFPRs)
6142             continue;
6143           break;
6144         }
6145         HasParameterArea = true;
6146       }
6147     }
6148 
6149     /* Respect alignment of argument on the stack.  */
6150     auto Alignement =
6151         CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
6152     NumBytes = alignTo(NumBytes, Alignement);
6153 
6154     NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
6155     if (Flags.isInConsecutiveRegsLast())
6156       NumBytes = ((NumBytes + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
6157   }
6158 
6159   unsigned NumBytesActuallyUsed = NumBytes;
6160 
6161   // In the old ELFv1 ABI,
6162   // the prolog code of the callee may store up to 8 GPR argument registers to
6163   // the stack, allowing va_start to index over them in memory if its varargs.
6164   // Because we cannot tell if this is needed on the caller side, we have to
6165   // conservatively assume that it is needed.  As such, make sure we have at
6166   // least enough stack space for the caller to store the 8 GPRs.
6167   // In the ELFv2 ABI, we allocate the parameter area iff a callee
6168   // really requires memory operands, e.g. a vararg function.
6169   if (HasParameterArea)
6170     NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize);
6171   else
6172     NumBytes = LinkageSize;
6173 
6174   // Tail call needs the stack to be aligned.
6175   if (getTargetMachine().Options.GuaranteedTailCallOpt && IsFastCall)
6176     NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes);
6177 
6178   int SPDiff = 0;
6179 
6180   // Calculate by how many bytes the stack has to be adjusted in case of tail
6181   // call optimization.
6182   if (!IsSibCall)
6183     SPDiff = CalculateTailCallSPDiff(DAG, CFlags.IsTailCall, NumBytes);
6184 
6185   // To protect arguments on the stack from being clobbered in a tail call,
6186   // force all the loads to happen before doing any other lowering.
6187   if (CFlags.IsTailCall)
6188     Chain = DAG.getStackArgumentTokenFactor(Chain);
6189 
6190   // Adjust the stack pointer for the new arguments...
6191   // These operations are automatically eliminated by the prolog/epilog pass
6192   if (!IsSibCall)
6193     Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
6194   SDValue CallSeqStart = Chain;
6195 
6196   // Load the return address and frame pointer so it can be move somewhere else
6197   // later.
6198   SDValue LROp, FPOp;
6199   Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
6200 
6201   // Set up a copy of the stack pointer for use loading and storing any
6202   // arguments that may not fit in the registers available for argument
6203   // passing.
6204   SDValue StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
6205 
6206   // Figure out which arguments are going to go in registers, and which in
6207   // memory.  Also, if this is a vararg function, floating point operations
6208   // must be stored to our stack, and loaded into integer regs as well, if
6209   // any integer regs are available for argument passing.
6210   unsigned ArgOffset = LinkageSize;
6211 
6212   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
6213   SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
6214 
6215   SmallVector<SDValue, 8> MemOpChains;
6216   for (unsigned i = 0; i != NumOps; ++i) {
6217     SDValue Arg = OutVals[i];
6218     ISD::ArgFlagsTy Flags = Outs[i].Flags;
6219     EVT ArgVT = Outs[i].VT;
6220     EVT OrigVT = Outs[i].ArgVT;
6221 
6222     // PtrOff will be used to store the current argument to the stack if a
6223     // register cannot be found for it.
6224     SDValue PtrOff;
6225 
6226     // We re-align the argument offset for each argument, except when using the
6227     // fast calling convention, when we need to make sure we do that only when
6228     // we'll actually use a stack slot.
6229     auto ComputePtrOff = [&]() {
6230       /* Respect alignment of argument on the stack.  */
6231       auto Alignment =
6232           CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
6233       ArgOffset = alignTo(ArgOffset, Alignment);
6234 
6235       PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType());
6236 
6237       PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
6238     };
6239 
6240     if (!IsFastCall) {
6241       ComputePtrOff();
6242 
6243       /* Compute GPR index associated with argument offset.  */
6244       GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
6245       GPR_idx = std::min(GPR_idx, NumGPRs);
6246     }
6247 
6248     // Promote integers to 64-bit values.
6249     if (Arg.getValueType() == MVT::i32 || Arg.getValueType() == MVT::i1) {
6250       // FIXME: Should this use ANY_EXTEND if neither sext nor zext?
6251       unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
6252       Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg);
6253     }
6254 
6255     // FIXME memcpy is used way more than necessary.  Correctness first.
6256     // Note: "by value" is code for passing a structure by value, not
6257     // basic types.
6258     if (Flags.isByVal()) {
6259       // Note: Size includes alignment padding, so
6260       //   struct x { short a; char b; }
6261       // will have Size = 4.  With #pragma pack(1), it will have Size = 3.
6262       // These are the proper values we need for right-justifying the
6263       // aggregate in a parameter register.
6264       unsigned Size = Flags.getByValSize();
6265 
6266       // An empty aggregate parameter takes up no storage and no
6267       // registers.
6268       if (Size == 0)
6269         continue;
6270 
6271       if (IsFastCall)
6272         ComputePtrOff();
6273 
6274       // All aggregates smaller than 8 bytes must be passed right-justified.
6275       if (Size==1 || Size==2 || Size==4) {
6276         EVT VT = (Size==1) ? MVT::i8 : ((Size==2) ? MVT::i16 : MVT::i32);
6277         if (GPR_idx != NumGPRs) {
6278           SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg,
6279                                         MachinePointerInfo(), VT);
6280           MemOpChains.push_back(Load.getValue(1));
6281           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6282 
6283           ArgOffset += PtrByteSize;
6284           continue;
6285         }
6286       }
6287 
6288       if (GPR_idx == NumGPRs && Size < 8) {
6289         SDValue AddPtr = PtrOff;
6290         if (!isLittleEndian) {
6291           SDValue Const = DAG.getConstant(PtrByteSize - Size, dl,
6292                                           PtrOff.getValueType());
6293           AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
6294         }
6295         Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
6296                                                           CallSeqStart,
6297                                                           Flags, DAG, dl);
6298         ArgOffset += PtrByteSize;
6299         continue;
6300       }
6301       // Copy entire object into memory.  There are cases where gcc-generated
6302       // code assumes it is there, even if it could be put entirely into
6303       // registers.  (This is not what the doc says.)
6304 
6305       // FIXME: The above statement is likely due to a misunderstanding of the
6306       // documents.  All arguments must be copied into the parameter area BY
6307       // THE CALLEE in the event that the callee takes the address of any
6308       // formal argument.  That has not yet been implemented.  However, it is
6309       // reasonable to use the stack area as a staging area for the register
6310       // load.
6311 
6312       // Skip this for small aggregates, as we will use the same slot for a
6313       // right-justified copy, below.
6314       if (Size >= 8)
6315         Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff,
6316                                                           CallSeqStart,
6317                                                           Flags, DAG, dl);
6318 
6319       // When a register is available, pass a small aggregate right-justified.
6320       if (Size < 8 && GPR_idx != NumGPRs) {
6321         // The easiest way to get this right-justified in a register
6322         // is to copy the structure into the rightmost portion of a
6323         // local variable slot, then load the whole slot into the
6324         // register.
6325         // FIXME: The memcpy seems to produce pretty awful code for
6326         // small aggregates, particularly for packed ones.
6327         // FIXME: It would be preferable to use the slot in the
6328         // parameter save area instead of a new local variable.
6329         SDValue AddPtr = PtrOff;
6330         if (!isLittleEndian) {
6331           SDValue Const = DAG.getConstant(8 - Size, dl, PtrOff.getValueType());
6332           AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
6333         }
6334         Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
6335                                                           CallSeqStart,
6336                                                           Flags, DAG, dl);
6337 
6338         // Load the slot into the register.
6339         SDValue Load =
6340             DAG.getLoad(PtrVT, dl, Chain, PtrOff, MachinePointerInfo());
6341         MemOpChains.push_back(Load.getValue(1));
6342         RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6343 
6344         // Done with this argument.
6345         ArgOffset += PtrByteSize;
6346         continue;
6347       }
6348 
6349       // For aggregates larger than PtrByteSize, copy the pieces of the
6350       // object that fit into registers from the parameter save area.
6351       for (unsigned j=0; j<Size; j+=PtrByteSize) {
6352         SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType());
6353         SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
6354         if (GPR_idx != NumGPRs) {
6355           SDValue Load =
6356               DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo());
6357           MemOpChains.push_back(Load.getValue(1));
6358           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6359           ArgOffset += PtrByteSize;
6360         } else {
6361           ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize;
6362           break;
6363         }
6364       }
6365       continue;
6366     }
6367 
6368     switch (Arg.getSimpleValueType().SimpleTy) {
6369     default: llvm_unreachable("Unexpected ValueType for argument!");
6370     case MVT::i1:
6371     case MVT::i32:
6372     case MVT::i64:
6373       if (Flags.isNest()) {
6374         // The 'nest' parameter, if any, is passed in R11.
6375         RegsToPass.push_back(std::make_pair(PPC::X11, Arg));
6376         break;
6377       }
6378 
6379       // These can be scalar arguments or elements of an integer array type
6380       // passed directly.  Clang may use those instead of "byval" aggregate
6381       // types to avoid forcing arguments to memory unnecessarily.
6382       if (GPR_idx != NumGPRs) {
6383         RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg));
6384       } else {
6385         if (IsFastCall)
6386           ComputePtrOff();
6387 
6388         assert(HasParameterArea &&
6389                "Parameter area must exist to pass an argument in memory.");
6390         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6391                          true, CFlags.IsTailCall, false, MemOpChains,
6392                          TailCallArguments, dl);
6393         if (IsFastCall)
6394           ArgOffset += PtrByteSize;
6395       }
6396       if (!IsFastCall)
6397         ArgOffset += PtrByteSize;
6398       break;
6399     case MVT::f32:
6400     case MVT::f64: {
6401       // These can be scalar arguments or elements of a float array type
6402       // passed directly.  The latter are used to implement ELFv2 homogenous
6403       // float aggregates.
6404 
6405       // Named arguments go into FPRs first, and once they overflow, the
6406       // remaining arguments go into GPRs and then the parameter save area.
6407       // Unnamed arguments for vararg functions always go to GPRs and
6408       // then the parameter save area.  For now, put all arguments to vararg
6409       // routines always in both locations (FPR *and* GPR or stack slot).
6410       bool NeedGPROrStack = CFlags.IsVarArg || FPR_idx == NumFPRs;
6411       bool NeededLoad = false;
6412 
6413       // First load the argument into the next available FPR.
6414       if (FPR_idx != NumFPRs)
6415         RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg));
6416 
6417       // Next, load the argument into GPR or stack slot if needed.
6418       if (!NeedGPROrStack)
6419         ;
6420       else if (GPR_idx != NumGPRs && !IsFastCall) {
6421         // FIXME: We may want to re-enable this for CallingConv::Fast on the P8
6422         // once we support fp <-> gpr moves.
6423 
6424         // In the non-vararg case, this can only ever happen in the
6425         // presence of f32 array types, since otherwise we never run
6426         // out of FPRs before running out of GPRs.
6427         SDValue ArgVal;
6428 
6429         // Double values are always passed in a single GPR.
6430         if (Arg.getValueType() != MVT::f32) {
6431           ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg);
6432 
6433         // Non-array float values are extended and passed in a GPR.
6434         } else if (!Flags.isInConsecutiveRegs()) {
6435           ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
6436           ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal);
6437 
6438         // If we have an array of floats, we collect every odd element
6439         // together with its predecessor into one GPR.
6440         } else if (ArgOffset % PtrByteSize != 0) {
6441           SDValue Lo, Hi;
6442           Lo = DAG.getNode(ISD::BITCAST, dl, MVT::i32, OutVals[i - 1]);
6443           Hi = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
6444           if (!isLittleEndian)
6445             std::swap(Lo, Hi);
6446           ArgVal = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
6447 
6448         // The final element, if even, goes into the first half of a GPR.
6449         } else if (Flags.isInConsecutiveRegsLast()) {
6450           ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
6451           ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal);
6452           if (!isLittleEndian)
6453             ArgVal = DAG.getNode(ISD::SHL, dl, MVT::i64, ArgVal,
6454                                  DAG.getConstant(32, dl, MVT::i32));
6455 
6456         // Non-final even elements are skipped; they will be handled
6457         // together the with subsequent argument on the next go-around.
6458         } else
6459           ArgVal = SDValue();
6460 
6461         if (ArgVal.getNode())
6462           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], ArgVal));
6463       } else {
6464         if (IsFastCall)
6465           ComputePtrOff();
6466 
6467         // Single-precision floating-point values are mapped to the
6468         // second (rightmost) word of the stack doubleword.
6469         if (Arg.getValueType() == MVT::f32 &&
6470             !isLittleEndian && !Flags.isInConsecutiveRegs()) {
6471           SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType());
6472           PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour);
6473         }
6474 
6475         assert(HasParameterArea &&
6476                "Parameter area must exist to pass an argument in memory.");
6477         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6478                          true, CFlags.IsTailCall, false, MemOpChains,
6479                          TailCallArguments, dl);
6480 
6481         NeededLoad = true;
6482       }
6483       // When passing an array of floats, the array occupies consecutive
6484       // space in the argument area; only round up to the next doubleword
6485       // at the end of the array.  Otherwise, each float takes 8 bytes.
6486       if (!IsFastCall || NeededLoad) {
6487         ArgOffset += (Arg.getValueType() == MVT::f32 &&
6488                       Flags.isInConsecutiveRegs()) ? 4 : 8;
6489         if (Flags.isInConsecutiveRegsLast())
6490           ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
6491       }
6492       break;
6493     }
6494     case MVT::v4f32:
6495     case MVT::v4i32:
6496     case MVT::v8i16:
6497     case MVT::v16i8:
6498     case MVT::v2f64:
6499     case MVT::v2i64:
6500     case MVT::v1i128:
6501     case MVT::f128:
6502       if (!Subtarget.hasQPX()) {
6503       // These can be scalar arguments or elements of a vector array type
6504       // passed directly.  The latter are used to implement ELFv2 homogenous
6505       // vector aggregates.
6506 
6507       // For a varargs call, named arguments go into VRs or on the stack as
6508       // usual; unnamed arguments always go to the stack or the corresponding
6509       // GPRs when within range.  For now, we always put the value in both
6510       // locations (or even all three).
6511       if (CFlags.IsVarArg) {
6512         assert(HasParameterArea &&
6513                "Parameter area must exist if we have a varargs call.");
6514         // We could elide this store in the case where the object fits
6515         // entirely in R registers.  Maybe later.
6516         SDValue Store =
6517             DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
6518         MemOpChains.push_back(Store);
6519         if (VR_idx != NumVRs) {
6520           SDValue Load =
6521               DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo());
6522           MemOpChains.push_back(Load.getValue(1));
6523           RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load));
6524         }
6525         ArgOffset += 16;
6526         for (unsigned i=0; i<16; i+=PtrByteSize) {
6527           if (GPR_idx == NumGPRs)
6528             break;
6529           SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
6530                                    DAG.getConstant(i, dl, PtrVT));
6531           SDValue Load =
6532               DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo());
6533           MemOpChains.push_back(Load.getValue(1));
6534           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6535         }
6536         break;
6537       }
6538 
6539       // Non-varargs Altivec params go into VRs or on the stack.
6540       if (VR_idx != NumVRs) {
6541         RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg));
6542       } else {
6543         if (IsFastCall)
6544           ComputePtrOff();
6545 
6546         assert(HasParameterArea &&
6547                "Parameter area must exist to pass an argument in memory.");
6548         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6549                          true, CFlags.IsTailCall, true, MemOpChains,
6550                          TailCallArguments, dl);
6551         if (IsFastCall)
6552           ArgOffset += 16;
6553       }
6554 
6555       if (!IsFastCall)
6556         ArgOffset += 16;
6557       break;
6558       } // not QPX
6559 
6560       assert(Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32 &&
6561              "Invalid QPX parameter type");
6562 
6563       LLVM_FALLTHROUGH;
6564     case MVT::v4f64:
6565     case MVT::v4i1: {
6566       bool IsF32 = Arg.getValueType().getSimpleVT().SimpleTy == MVT::v4f32;
6567       if (CFlags.IsVarArg) {
6568         assert(HasParameterArea &&
6569                "Parameter area must exist if we have a varargs call.");
6570         // We could elide this store in the case where the object fits
6571         // entirely in R registers.  Maybe later.
6572         SDValue Store =
6573             DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
6574         MemOpChains.push_back(Store);
6575         if (QFPR_idx != NumQFPRs) {
6576           SDValue Load = DAG.getLoad(IsF32 ? MVT::v4f32 : MVT::v4f64, dl, Store,
6577                                      PtrOff, MachinePointerInfo());
6578           MemOpChains.push_back(Load.getValue(1));
6579           RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Load));
6580         }
6581         ArgOffset += (IsF32 ? 16 : 32);
6582         for (unsigned i = 0; i < (IsF32 ? 16U : 32U); i += PtrByteSize) {
6583           if (GPR_idx == NumGPRs)
6584             break;
6585           SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
6586                                    DAG.getConstant(i, dl, PtrVT));
6587           SDValue Load =
6588               DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo());
6589           MemOpChains.push_back(Load.getValue(1));
6590           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6591         }
6592         break;
6593       }
6594 
6595       // Non-varargs QPX params go into registers or on the stack.
6596       if (QFPR_idx != NumQFPRs) {
6597         RegsToPass.push_back(std::make_pair(QFPR[QFPR_idx++], Arg));
6598       } else {
6599         if (IsFastCall)
6600           ComputePtrOff();
6601 
6602         assert(HasParameterArea &&
6603                "Parameter area must exist to pass an argument in memory.");
6604         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6605                          true, CFlags.IsTailCall, true, MemOpChains,
6606                          TailCallArguments, dl);
6607         if (IsFastCall)
6608           ArgOffset += (IsF32 ? 16 : 32);
6609       }
6610 
6611       if (!IsFastCall)
6612         ArgOffset += (IsF32 ? 16 : 32);
6613       break;
6614       }
6615     }
6616   }
6617 
6618   assert((!HasParameterArea || NumBytesActuallyUsed == ArgOffset) &&
6619          "mismatch in size of parameter area");
6620   (void)NumBytesActuallyUsed;
6621 
6622   if (!MemOpChains.empty())
6623     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
6624 
6625   // Check if this is an indirect call (MTCTR/BCTRL).
6626   // See prepareDescriptorIndirectCall and buildCallOperands for more
6627   // information about calls through function pointers in the 64-bit SVR4 ABI.
6628   if (CFlags.IsIndirect) {
6629     // For 64-bit ELFv2 ABI with PCRel, do not save the TOC of the
6630     // caller in the TOC save area.
6631     if (isTOCSaveRestoreRequired(Subtarget)) {
6632       assert(!CFlags.IsTailCall && "Indirect tails calls not supported");
6633       // Load r2 into a virtual register and store it to the TOC save area.
6634       setUsesTOCBasePtr(DAG);
6635       SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64);
6636       // TOC save area offset.
6637       unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset();
6638       SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
6639       SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
6640       Chain = DAG.getStore(Val.getValue(1), dl, Val, AddPtr,
6641                            MachinePointerInfo::getStack(
6642                                DAG.getMachineFunction(), TOCSaveOffset));
6643     }
6644     // In the ELFv2 ABI, R12 must contain the address of an indirect callee.
6645     // This does not mean the MTCTR instruction must use R12; it's easier
6646     // to model this as an extra parameter, so do that.
6647     if (isELFv2ABI && !CFlags.IsPatchPoint)
6648       RegsToPass.push_back(std::make_pair((unsigned)PPC::X12, Callee));
6649   }
6650 
6651   // Build a sequence of copy-to-reg nodes chained together with token chain
6652   // and flag operands which copy the outgoing args into the appropriate regs.
6653   SDValue InFlag;
6654   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
6655     Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
6656                              RegsToPass[i].second, InFlag);
6657     InFlag = Chain.getValue(1);
6658   }
6659 
6660   if (CFlags.IsTailCall && !IsSibCall)
6661     PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
6662                     TailCallArguments);
6663 
6664   return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart,
6665                     Callee, SPDiff, NumBytes, Ins, InVals, CB);
6666 }
6667 
LowerCall_Darwin(SDValue Chain,SDValue Callee,CallFlags CFlags,const SmallVectorImpl<ISD::OutputArg> & Outs,const SmallVectorImpl<SDValue> & OutVals,const SmallVectorImpl<ISD::InputArg> & Ins,const SDLoc & dl,SelectionDAG & DAG,SmallVectorImpl<SDValue> & InVals,const CallBase * CB) const6668 SDValue PPCTargetLowering::LowerCall_Darwin(
6669     SDValue Chain, SDValue Callee, CallFlags CFlags,
6670     const SmallVectorImpl<ISD::OutputArg> &Outs,
6671     const SmallVectorImpl<SDValue> &OutVals,
6672     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
6673     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
6674     const CallBase *CB) const {
6675   unsigned NumOps = Outs.size();
6676 
6677   EVT PtrVT = getPointerTy(DAG.getDataLayout());
6678   bool isPPC64 = PtrVT == MVT::i64;
6679   unsigned PtrByteSize = isPPC64 ? 8 : 4;
6680 
6681   MachineFunction &MF = DAG.getMachineFunction();
6682 
6683   // Mark this function as potentially containing a function that contains a
6684   // tail call. As a consequence the frame pointer will be used for dynamicalloc
6685   // and restoring the callers stack pointer in this functions epilog. This is
6686   // done because by tail calling the called function might overwrite the value
6687   // in this function's (MF) stack pointer stack slot 0(SP).
6688   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
6689       CFlags.CallConv == CallingConv::Fast)
6690     MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
6691 
6692   // Count how many bytes are to be pushed on the stack, including the linkage
6693   // area, and parameter passing area.  We start with 24/48 bytes, which is
6694   // prereserved space for [SP][CR][LR][3 x unused].
6695   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
6696   unsigned NumBytes = LinkageSize;
6697 
6698   // Add up all the space actually used.
6699   // In 32-bit non-varargs calls, Altivec parameters all go at the end; usually
6700   // they all go in registers, but we must reserve stack space for them for
6701   // possible use by the caller.  In varargs or 64-bit calls, parameters are
6702   // assigned stack space in order, with padding so Altivec parameters are
6703   // 16-byte aligned.
6704   unsigned nAltivecParamsAtEnd = 0;
6705   for (unsigned i = 0; i != NumOps; ++i) {
6706     ISD::ArgFlagsTy Flags = Outs[i].Flags;
6707     EVT ArgVT = Outs[i].VT;
6708     // Varargs Altivec parameters are padded to a 16 byte boundary.
6709     if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
6710         ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
6711         ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64) {
6712       if (!CFlags.IsVarArg && !isPPC64) {
6713         // Non-varargs Altivec parameters go after all the non-Altivec
6714         // parameters; handle those later so we know how much padding we need.
6715         nAltivecParamsAtEnd++;
6716         continue;
6717       }
6718       // Varargs and 64-bit Altivec parameters are padded to 16 byte boundary.
6719       NumBytes = ((NumBytes+15)/16)*16;
6720     }
6721     NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
6722   }
6723 
6724   // Allow for Altivec parameters at the end, if needed.
6725   if (nAltivecParamsAtEnd) {
6726     NumBytes = ((NumBytes+15)/16)*16;
6727     NumBytes += 16*nAltivecParamsAtEnd;
6728   }
6729 
6730   // The prolog code of the callee may store up to 8 GPR argument registers to
6731   // the stack, allowing va_start to index over them in memory if its varargs.
6732   // Because we cannot tell if this is needed on the caller side, we have to
6733   // conservatively assume that it is needed.  As such, make sure we have at
6734   // least enough stack space for the caller to store the 8 GPRs.
6735   NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize);
6736 
6737   // Tail call needs the stack to be aligned.
6738   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
6739       CFlags.CallConv == CallingConv::Fast)
6740     NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes);
6741 
6742   // Calculate by how many bytes the stack has to be adjusted in case of tail
6743   // call optimization.
6744   int SPDiff = CalculateTailCallSPDiff(DAG, CFlags.IsTailCall, NumBytes);
6745 
6746   // To protect arguments on the stack from being clobbered in a tail call,
6747   // force all the loads to happen before doing any other lowering.
6748   if (CFlags.IsTailCall)
6749     Chain = DAG.getStackArgumentTokenFactor(Chain);
6750 
6751   // Adjust the stack pointer for the new arguments...
6752   // These operations are automatically eliminated by the prolog/epilog pass
6753   Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
6754   SDValue CallSeqStart = Chain;
6755 
6756   // Load the return address and frame pointer so it can be move somewhere else
6757   // later.
6758   SDValue LROp, FPOp;
6759   Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
6760 
6761   // Set up a copy of the stack pointer for use loading and storing any
6762   // arguments that may not fit in the registers available for argument
6763   // passing.
6764   SDValue StackPtr;
6765   if (isPPC64)
6766     StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
6767   else
6768     StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
6769 
6770   // Figure out which arguments are going to go in registers, and which in
6771   // memory.  Also, if this is a vararg function, floating point operations
6772   // must be stored to our stack, and loaded into integer regs as well, if
6773   // any integer regs are available for argument passing.
6774   unsigned ArgOffset = LinkageSize;
6775   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
6776 
6777   static const MCPhysReg GPR_32[] = {           // 32-bit registers.
6778     PPC::R3, PPC::R4, PPC::R5, PPC::R6,
6779     PPC::R7, PPC::R8, PPC::R9, PPC::R10,
6780   };
6781   static const MCPhysReg GPR_64[] = {           // 64-bit registers.
6782     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
6783     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
6784   };
6785   static const MCPhysReg VR[] = {
6786     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
6787     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
6788   };
6789   const unsigned NumGPRs = array_lengthof(GPR_32);
6790   const unsigned NumFPRs = 13;
6791   const unsigned NumVRs  = array_lengthof(VR);
6792 
6793   const MCPhysReg *GPR = isPPC64 ? GPR_64 : GPR_32;
6794 
6795   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
6796   SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
6797 
6798   SmallVector<SDValue, 8> MemOpChains;
6799   for (unsigned i = 0; i != NumOps; ++i) {
6800     SDValue Arg = OutVals[i];
6801     ISD::ArgFlagsTy Flags = Outs[i].Flags;
6802 
6803     // PtrOff will be used to store the current argument to the stack if a
6804     // register cannot be found for it.
6805     SDValue PtrOff;
6806 
6807     PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType());
6808 
6809     PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
6810 
6811     // On PPC64, promote integers to 64-bit values.
6812     if (isPPC64 && Arg.getValueType() == MVT::i32) {
6813       // FIXME: Should this use ANY_EXTEND if neither sext nor zext?
6814       unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
6815       Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg);
6816     }
6817 
6818     // FIXME memcpy is used way more than necessary.  Correctness first.
6819     // Note: "by value" is code for passing a structure by value, not
6820     // basic types.
6821     if (Flags.isByVal()) {
6822       unsigned Size = Flags.getByValSize();
6823       // Very small objects are passed right-justified.  Everything else is
6824       // passed left-justified.
6825       if (Size==1 || Size==2) {
6826         EVT VT = (Size==1) ? MVT::i8 : MVT::i16;
6827         if (GPR_idx != NumGPRs) {
6828           SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg,
6829                                         MachinePointerInfo(), VT);
6830           MemOpChains.push_back(Load.getValue(1));
6831           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6832 
6833           ArgOffset += PtrByteSize;
6834         } else {
6835           SDValue Const = DAG.getConstant(PtrByteSize - Size, dl,
6836                                           PtrOff.getValueType());
6837           SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
6838           Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
6839                                                             CallSeqStart,
6840                                                             Flags, DAG, dl);
6841           ArgOffset += PtrByteSize;
6842         }
6843         continue;
6844       }
6845       // Copy entire object into memory.  There are cases where gcc-generated
6846       // code assumes it is there, even if it could be put entirely into
6847       // registers.  (This is not what the doc says.)
6848       Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff,
6849                                                         CallSeqStart,
6850                                                         Flags, DAG, dl);
6851 
6852       // For small aggregates (Darwin only) and aggregates >= PtrByteSize,
6853       // copy the pieces of the object that fit into registers from the
6854       // parameter save area.
6855       for (unsigned j=0; j<Size; j+=PtrByteSize) {
6856         SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType());
6857         SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
6858         if (GPR_idx != NumGPRs) {
6859           SDValue Load =
6860               DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo());
6861           MemOpChains.push_back(Load.getValue(1));
6862           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6863           ArgOffset += PtrByteSize;
6864         } else {
6865           ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize;
6866           break;
6867         }
6868       }
6869       continue;
6870     }
6871 
6872     switch (Arg.getSimpleValueType().SimpleTy) {
6873     default: llvm_unreachable("Unexpected ValueType for argument!");
6874     case MVT::i1:
6875     case MVT::i32:
6876     case MVT::i64:
6877       if (GPR_idx != NumGPRs) {
6878         if (Arg.getValueType() == MVT::i1)
6879           Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, PtrVT, Arg);
6880 
6881         RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg));
6882       } else {
6883         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6884                          isPPC64, CFlags.IsTailCall, false, MemOpChains,
6885                          TailCallArguments, dl);
6886       }
6887       ArgOffset += PtrByteSize;
6888       break;
6889     case MVT::f32:
6890     case MVT::f64:
6891       if (FPR_idx != NumFPRs) {
6892         RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg));
6893 
6894         if (CFlags.IsVarArg) {
6895           SDValue Store =
6896               DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
6897           MemOpChains.push_back(Store);
6898 
6899           // Float varargs are always shadowed in available integer registers
6900           if (GPR_idx != NumGPRs) {
6901             SDValue Load =
6902                 DAG.getLoad(PtrVT, dl, Store, PtrOff, MachinePointerInfo());
6903             MemOpChains.push_back(Load.getValue(1));
6904             RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6905           }
6906           if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 && !isPPC64){
6907             SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType());
6908             PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour);
6909             SDValue Load =
6910                 DAG.getLoad(PtrVT, dl, Store, PtrOff, MachinePointerInfo());
6911             MemOpChains.push_back(Load.getValue(1));
6912             RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6913           }
6914         } else {
6915           // If we have any FPRs remaining, we may also have GPRs remaining.
6916           // Args passed in FPRs consume either 1 (f32) or 2 (f64) available
6917           // GPRs.
6918           if (GPR_idx != NumGPRs)
6919             ++GPR_idx;
6920           if (GPR_idx != NumGPRs && Arg.getValueType() == MVT::f64 &&
6921               !isPPC64)  // PPC64 has 64-bit GPR's obviously :)
6922             ++GPR_idx;
6923         }
6924       } else
6925         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6926                          isPPC64, CFlags.IsTailCall, false, MemOpChains,
6927                          TailCallArguments, dl);
6928       if (isPPC64)
6929         ArgOffset += 8;
6930       else
6931         ArgOffset += Arg.getValueType() == MVT::f32 ? 4 : 8;
6932       break;
6933     case MVT::v4f32:
6934     case MVT::v4i32:
6935     case MVT::v8i16:
6936     case MVT::v16i8:
6937       if (CFlags.IsVarArg) {
6938         // These go aligned on the stack, or in the corresponding R registers
6939         // when within range.  The Darwin PPC ABI doc claims they also go in
6940         // V registers; in fact gcc does this only for arguments that are
6941         // prototyped, not for those that match the ...  We do it for all
6942         // arguments, seems to work.
6943         while (ArgOffset % 16 !=0) {
6944           ArgOffset += PtrByteSize;
6945           if (GPR_idx != NumGPRs)
6946             GPR_idx++;
6947         }
6948         // We could elide this store in the case where the object fits
6949         // entirely in R registers.  Maybe later.
6950         PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
6951                              DAG.getConstant(ArgOffset, dl, PtrVT));
6952         SDValue Store =
6953             DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
6954         MemOpChains.push_back(Store);
6955         if (VR_idx != NumVRs) {
6956           SDValue Load =
6957               DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo());
6958           MemOpChains.push_back(Load.getValue(1));
6959           RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load));
6960         }
6961         ArgOffset += 16;
6962         for (unsigned i=0; i<16; i+=PtrByteSize) {
6963           if (GPR_idx == NumGPRs)
6964             break;
6965           SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
6966                                    DAG.getConstant(i, dl, PtrVT));
6967           SDValue Load =
6968               DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo());
6969           MemOpChains.push_back(Load.getValue(1));
6970           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6971         }
6972         break;
6973       }
6974 
6975       // Non-varargs Altivec params generally go in registers, but have
6976       // stack space allocated at the end.
6977       if (VR_idx != NumVRs) {
6978         // Doesn't have GPR space allocated.
6979         RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg));
6980       } else if (nAltivecParamsAtEnd==0) {
6981         // We are emitting Altivec params in order.
6982         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6983                          isPPC64, CFlags.IsTailCall, true, MemOpChains,
6984                          TailCallArguments, dl);
6985         ArgOffset += 16;
6986       }
6987       break;
6988     }
6989   }
6990   // If all Altivec parameters fit in registers, as they usually do,
6991   // they get stack space following the non-Altivec parameters.  We
6992   // don't track this here because nobody below needs it.
6993   // If there are more Altivec parameters than fit in registers emit
6994   // the stores here.
6995   if (!CFlags.IsVarArg && nAltivecParamsAtEnd > NumVRs) {
6996     unsigned j = 0;
6997     // Offset is aligned; skip 1st 12 params which go in V registers.
6998     ArgOffset = ((ArgOffset+15)/16)*16;
6999     ArgOffset += 12*16;
7000     for (unsigned i = 0; i != NumOps; ++i) {
7001       SDValue Arg = OutVals[i];
7002       EVT ArgType = Outs[i].VT;
7003       if (ArgType==MVT::v4f32 || ArgType==MVT::v4i32 ||
7004           ArgType==MVT::v8i16 || ArgType==MVT::v16i8) {
7005         if (++j > NumVRs) {
7006           SDValue PtrOff;
7007           // We are emitting Altivec params in order.
7008           LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
7009                            isPPC64, CFlags.IsTailCall, true, MemOpChains,
7010                            TailCallArguments, dl);
7011           ArgOffset += 16;
7012         }
7013       }
7014     }
7015   }
7016 
7017   if (!MemOpChains.empty())
7018     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
7019 
7020   // On Darwin, R12 must contain the address of an indirect callee.  This does
7021   // not mean the MTCTR instruction must use R12; it's easier to model this as
7022   // an extra parameter, so do that.
7023   if (CFlags.IsIndirect) {
7024     assert(!CFlags.IsTailCall && "Indirect tail-calls not supported.");
7025     RegsToPass.push_back(std::make_pair((unsigned)(isPPC64 ? PPC::X12 :
7026                                                    PPC::R12), Callee));
7027   }
7028 
7029   // Build a sequence of copy-to-reg nodes chained together with token chain
7030   // and flag operands which copy the outgoing args into the appropriate regs.
7031   SDValue InFlag;
7032   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
7033     Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
7034                              RegsToPass[i].second, InFlag);
7035     InFlag = Chain.getValue(1);
7036   }
7037 
7038   if (CFlags.IsTailCall)
7039     PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
7040                     TailCallArguments);
7041 
7042   return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart,
7043                     Callee, SPDiff, NumBytes, Ins, InVals, CB);
7044 }
7045 
CC_AIX(unsigned ValNo,MVT ValVT,MVT LocVT,CCValAssign::LocInfo LocInfo,ISD::ArgFlagsTy ArgFlags,CCState & State)7046 static bool CC_AIX(unsigned ValNo, MVT ValVT, MVT LocVT,
7047                    CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
7048                    CCState &State) {
7049 
7050   const PPCSubtarget &Subtarget = static_cast<const PPCSubtarget &>(
7051       State.getMachineFunction().getSubtarget());
7052   const bool IsPPC64 = Subtarget.isPPC64();
7053   const Align PtrAlign = IsPPC64 ? Align(8) : Align(4);
7054   const MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32;
7055 
7056   assert((!ValVT.isInteger() ||
7057           (ValVT.getSizeInBits() <= RegVT.getSizeInBits())) &&
7058          "Integer argument exceeds register size: should have been legalized");
7059 
7060   if (ValVT == MVT::f128)
7061     report_fatal_error("f128 is unimplemented on AIX.");
7062 
7063   if (ArgFlags.isNest())
7064     report_fatal_error("Nest arguments are unimplemented.");
7065 
7066   if (ValVT.isVector() || LocVT.isVector())
7067     report_fatal_error("Vector arguments are unimplemented on AIX.");
7068 
7069   static const MCPhysReg GPR_32[] = {// 32-bit registers.
7070                                      PPC::R3, PPC::R4, PPC::R5, PPC::R6,
7071                                      PPC::R7, PPC::R8, PPC::R9, PPC::R10};
7072   static const MCPhysReg GPR_64[] = {// 64-bit registers.
7073                                      PPC::X3, PPC::X4, PPC::X5, PPC::X6,
7074                                      PPC::X7, PPC::X8, PPC::X9, PPC::X10};
7075 
7076   if (ArgFlags.isByVal()) {
7077     if (ArgFlags.getNonZeroByValAlign() > PtrAlign)
7078       report_fatal_error("Pass-by-value arguments with alignment greater than "
7079                          "register width are not supported.");
7080 
7081     const unsigned ByValSize = ArgFlags.getByValSize();
7082 
7083     // An empty aggregate parameter takes up no storage and no registers,
7084     // but needs a MemLoc for a stack slot for the formal arguments side.
7085     if (ByValSize == 0) {
7086       State.addLoc(CCValAssign::getMem(ValNo, MVT::INVALID_SIMPLE_VALUE_TYPE,
7087                                        State.getNextStackOffset(), RegVT,
7088                                        LocInfo));
7089       return false;
7090     }
7091 
7092     const unsigned StackSize = alignTo(ByValSize, PtrAlign);
7093     unsigned Offset = State.AllocateStack(StackSize, PtrAlign);
7094     for (const unsigned E = Offset + StackSize; Offset < E;
7095          Offset += PtrAlign.value()) {
7096       if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32))
7097         State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, RegVT, LocInfo));
7098       else {
7099         State.addLoc(CCValAssign::getMem(ValNo, MVT::INVALID_SIMPLE_VALUE_TYPE,
7100                                          Offset, MVT::INVALID_SIMPLE_VALUE_TYPE,
7101                                          LocInfo));
7102         break;
7103       }
7104     }
7105     return false;
7106   }
7107 
7108   // Arguments always reserve parameter save area.
7109   switch (ValVT.SimpleTy) {
7110   default:
7111     report_fatal_error("Unhandled value type for argument.");
7112   case MVT::i64:
7113     // i64 arguments should have been split to i32 for PPC32.
7114     assert(IsPPC64 && "PPC32 should have split i64 values.");
7115     LLVM_FALLTHROUGH;
7116   case MVT::i1:
7117   case MVT::i32: {
7118     const unsigned Offset = State.AllocateStack(PtrAlign.value(), PtrAlign);
7119     // AIX integer arguments are always passed in register width.
7120     if (ValVT.getSizeInBits() < RegVT.getSizeInBits())
7121       LocInfo = ArgFlags.isSExt() ? CCValAssign::LocInfo::SExt
7122                                   : CCValAssign::LocInfo::ZExt;
7123     if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32))
7124       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, RegVT, LocInfo));
7125     else
7126       State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, RegVT, LocInfo));
7127 
7128     return false;
7129   }
7130   case MVT::f32:
7131   case MVT::f64: {
7132     // Parameter save area (PSA) is reserved even if the float passes in fpr.
7133     const unsigned StoreSize = LocVT.getStoreSize();
7134     // Floats are always 4-byte aligned in the PSA on AIX.
7135     // This includes f64 in 64-bit mode for ABI compatibility.
7136     const unsigned Offset =
7137         State.AllocateStack(IsPPC64 ? 8 : StoreSize, Align(4));
7138     unsigned FReg = State.AllocateReg(FPR);
7139     if (FReg)
7140       State.addLoc(CCValAssign::getReg(ValNo, ValVT, FReg, LocVT, LocInfo));
7141 
7142     // Reserve and initialize GPRs or initialize the PSA as required.
7143     for (unsigned I = 0; I < StoreSize; I += PtrAlign.value()) {
7144       if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32)) {
7145         assert(FReg && "An FPR should be available when a GPR is reserved.");
7146         if (State.isVarArg()) {
7147           // Successfully reserved GPRs are only initialized for vararg calls.
7148           // Custom handling is required for:
7149           //   f64 in PPC32 needs to be split into 2 GPRs.
7150           //   f32 in PPC64 needs to occupy only lower 32 bits of 64-bit GPR.
7151           State.addLoc(
7152               CCValAssign::getCustomReg(ValNo, ValVT, Reg, RegVT, LocInfo));
7153         }
7154       } else {
7155         // If there are insufficient GPRs, the PSA needs to be initialized.
7156         // Initialization occurs even if an FPR was initialized for
7157         // compatibility with the AIX XL compiler. The full memory for the
7158         // argument will be initialized even if a prior word is saved in GPR.
7159         // A custom memLoc is used when the argument also passes in FPR so
7160         // that the callee handling can skip over it easily.
7161         State.addLoc(
7162             FReg ? CCValAssign::getCustomMem(ValNo, ValVT, Offset, LocVT,
7163                                              LocInfo)
7164                  : CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
7165         break;
7166       }
7167     }
7168 
7169     return false;
7170   }
7171   }
7172   return true;
7173 }
7174 
getRegClassForSVT(MVT::SimpleValueType SVT,bool IsPPC64)7175 static const TargetRegisterClass *getRegClassForSVT(MVT::SimpleValueType SVT,
7176                                                     bool IsPPC64) {
7177   assert((IsPPC64 || SVT != MVT::i64) &&
7178          "i64 should have been split for 32-bit codegen.");
7179 
7180   switch (SVT) {
7181   default:
7182     report_fatal_error("Unexpected value type for formal argument");
7183   case MVT::i1:
7184   case MVT::i32:
7185   case MVT::i64:
7186     return IsPPC64 ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
7187   case MVT::f32:
7188     return &PPC::F4RCRegClass;
7189   case MVT::f64:
7190     return &PPC::F8RCRegClass;
7191   }
7192 }
7193 
truncateScalarIntegerArg(ISD::ArgFlagsTy Flags,EVT ValVT,SelectionDAG & DAG,SDValue ArgValue,MVT LocVT,const SDLoc & dl)7194 static SDValue truncateScalarIntegerArg(ISD::ArgFlagsTy Flags, EVT ValVT,
7195                                         SelectionDAG &DAG, SDValue ArgValue,
7196                                         MVT LocVT, const SDLoc &dl) {
7197   assert(ValVT.isScalarInteger() && LocVT.isScalarInteger());
7198   assert(ValVT.getSizeInBits() < LocVT.getSizeInBits());
7199 
7200   if (Flags.isSExt())
7201     ArgValue = DAG.getNode(ISD::AssertSext, dl, LocVT, ArgValue,
7202                            DAG.getValueType(ValVT));
7203   else if (Flags.isZExt())
7204     ArgValue = DAG.getNode(ISD::AssertZext, dl, LocVT, ArgValue,
7205                            DAG.getValueType(ValVT));
7206 
7207   return DAG.getNode(ISD::TRUNCATE, dl, ValVT, ArgValue);
7208 }
7209 
mapArgRegToOffsetAIX(unsigned Reg,const PPCFrameLowering * FL)7210 static unsigned mapArgRegToOffsetAIX(unsigned Reg, const PPCFrameLowering *FL) {
7211   const unsigned LASize = FL->getLinkageSize();
7212 
7213   if (PPC::GPRCRegClass.contains(Reg)) {
7214     assert(Reg >= PPC::R3 && Reg <= PPC::R10 &&
7215            "Reg must be a valid argument register!");
7216     return LASize + 4 * (Reg - PPC::R3);
7217   }
7218 
7219   if (PPC::G8RCRegClass.contains(Reg)) {
7220     assert(Reg >= PPC::X3 && Reg <= PPC::X10 &&
7221            "Reg must be a valid argument register!");
7222     return LASize + 8 * (Reg - PPC::X3);
7223   }
7224 
7225   llvm_unreachable("Only general purpose registers expected.");
7226 }
7227 
7228 //   AIX ABI Stack Frame Layout:
7229 //
7230 //   Low Memory +--------------------------------------------+
7231 //   SP   +---> | Back chain                                 | ---+
7232 //        |     +--------------------------------------------+    |
7233 //        |     | Saved Condition Register                   |    |
7234 //        |     +--------------------------------------------+    |
7235 //        |     | Saved Linkage Register                     |    |
7236 //        |     +--------------------------------------------+    | Linkage Area
7237 //        |     | Reserved for compilers                     |    |
7238 //        |     +--------------------------------------------+    |
7239 //        |     | Reserved for binders                       |    |
7240 //        |     +--------------------------------------------+    |
7241 //        |     | Saved TOC pointer                          | ---+
7242 //        |     +--------------------------------------------+
7243 //        |     | Parameter save area                        |
7244 //        |     +--------------------------------------------+
7245 //        |     | Alloca space                               |
7246 //        |     +--------------------------------------------+
7247 //        |     | Local variable space                       |
7248 //        |     +--------------------------------------------+
7249 //        |     | Float/int conversion temporary             |
7250 //        |     +--------------------------------------------+
7251 //        |     | Save area for AltiVec registers            |
7252 //        |     +--------------------------------------------+
7253 //        |     | AltiVec alignment padding                  |
7254 //        |     +--------------------------------------------+
7255 //        |     | Save area for VRSAVE register              |
7256 //        |     +--------------------------------------------+
7257 //        |     | Save area for General Purpose registers    |
7258 //        |     +--------------------------------------------+
7259 //        |     | Save area for Floating Point registers     |
7260 //        |     +--------------------------------------------+
7261 //        +---- | Back chain                                 |
7262 // High Memory  +--------------------------------------------+
7263 //
7264 //  Specifications:
7265 //  AIX 7.2 Assembler Language Reference
7266 //  Subroutine linkage convention
7267 
LowerFormalArguments_AIX(SDValue Chain,CallingConv::ID CallConv,bool isVarArg,const SmallVectorImpl<ISD::InputArg> & Ins,const SDLoc & dl,SelectionDAG & DAG,SmallVectorImpl<SDValue> & InVals) const7268 SDValue PPCTargetLowering::LowerFormalArguments_AIX(
7269     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
7270     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
7271     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
7272 
7273   assert((CallConv == CallingConv::C || CallConv == CallingConv::Cold ||
7274           CallConv == CallingConv::Fast) &&
7275          "Unexpected calling convention!");
7276 
7277   if (getTargetMachine().Options.GuaranteedTailCallOpt)
7278     report_fatal_error("Tail call support is unimplemented on AIX.");
7279 
7280   if (useSoftFloat())
7281     report_fatal_error("Soft float support is unimplemented on AIX.");
7282 
7283   const PPCSubtarget &Subtarget =
7284       static_cast<const PPCSubtarget &>(DAG.getSubtarget());
7285   if (Subtarget.hasQPX())
7286     report_fatal_error("QPX support is not supported on AIX.");
7287 
7288   const bool IsPPC64 = Subtarget.isPPC64();
7289   const unsigned PtrByteSize = IsPPC64 ? 8 : 4;
7290 
7291   // Assign locations to all of the incoming arguments.
7292   SmallVector<CCValAssign, 16> ArgLocs;
7293   MachineFunction &MF = DAG.getMachineFunction();
7294   MachineFrameInfo &MFI = MF.getFrameInfo();
7295   CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
7296 
7297   const EVT PtrVT = getPointerTy(MF.getDataLayout());
7298   // Reserve space for the linkage area on the stack.
7299   const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
7300   CCInfo.AllocateStack(LinkageSize, Align(PtrByteSize));
7301   CCInfo.AnalyzeFormalArguments(Ins, CC_AIX);
7302 
7303   SmallVector<SDValue, 8> MemOps;
7304 
7305   for (size_t I = 0, End = ArgLocs.size(); I != End; /* No increment here */) {
7306     CCValAssign &VA = ArgLocs[I++];
7307     MVT LocVT = VA.getLocVT();
7308     ISD::ArgFlagsTy Flags = Ins[VA.getValNo()].Flags;
7309 
7310     // For compatibility with the AIX XL compiler, the float args in the
7311     // parameter save area are initialized even if the argument is available
7312     // in register.  The caller is required to initialize both the register
7313     // and memory, however, the callee can choose to expect it in either.
7314     // The memloc is dismissed here because the argument is retrieved from
7315     // the register.
7316     if (VA.isMemLoc() && VA.needsCustom())
7317       continue;
7318 
7319     if (Flags.isByVal() && VA.isMemLoc()) {
7320       const unsigned Size =
7321           alignTo(Flags.getByValSize() ? Flags.getByValSize() : PtrByteSize,
7322                   PtrByteSize);
7323       const int FI = MF.getFrameInfo().CreateFixedObject(
7324           Size, VA.getLocMemOffset(), /* IsImmutable */ false,
7325           /* IsAliased */ true);
7326       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
7327       InVals.push_back(FIN);
7328 
7329       continue;
7330     }
7331 
7332     if (Flags.isByVal()) {
7333       assert(VA.isRegLoc() && "MemLocs should already be handled.");
7334 
7335       const MCPhysReg ArgReg = VA.getLocReg();
7336       const PPCFrameLowering *FL = Subtarget.getFrameLowering();
7337 
7338       if (Flags.getNonZeroByValAlign() > PtrByteSize)
7339         report_fatal_error("Over aligned byvals not supported yet.");
7340 
7341       const unsigned StackSize = alignTo(Flags.getByValSize(), PtrByteSize);
7342       const int FI = MF.getFrameInfo().CreateFixedObject(
7343           StackSize, mapArgRegToOffsetAIX(ArgReg, FL), /* IsImmutable */ false,
7344           /* IsAliased */ true);
7345       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
7346       InVals.push_back(FIN);
7347 
7348       // Add live ins for all the RegLocs for the same ByVal.
7349       const TargetRegisterClass *RegClass =
7350           IsPPC64 ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
7351 
7352       auto HandleRegLoc = [&, RegClass, LocVT](const MCPhysReg PhysReg,
7353                                                unsigned Offset) {
7354         const unsigned VReg = MF.addLiveIn(PhysReg, RegClass);
7355         // Since the callers side has left justified the aggregate in the
7356         // register, we can simply store the entire register into the stack
7357         // slot.
7358         SDValue CopyFrom = DAG.getCopyFromReg(Chain, dl, VReg, LocVT);
7359         // The store to the fixedstack object is needed becuase accessing a
7360         // field of the ByVal will use a gep and load. Ideally we will optimize
7361         // to extracting the value from the register directly, and elide the
7362         // stores when the arguments address is not taken, but that will need to
7363         // be future work.
7364         SDValue Store =
7365             DAG.getStore(CopyFrom.getValue(1), dl, CopyFrom,
7366                          DAG.getObjectPtrOffset(dl, FIN, Offset),
7367                          MachinePointerInfo::getFixedStack(MF, FI, Offset));
7368 
7369         MemOps.push_back(Store);
7370       };
7371 
7372       unsigned Offset = 0;
7373       HandleRegLoc(VA.getLocReg(), Offset);
7374       Offset += PtrByteSize;
7375       for (; Offset != StackSize && ArgLocs[I].isRegLoc();
7376            Offset += PtrByteSize) {
7377         assert(ArgLocs[I].getValNo() == VA.getValNo() &&
7378                "RegLocs should be for ByVal argument.");
7379 
7380         const CCValAssign RL = ArgLocs[I++];
7381         HandleRegLoc(RL.getLocReg(), Offset);
7382       }
7383 
7384       if (Offset != StackSize) {
7385         assert(ArgLocs[I].getValNo() == VA.getValNo() &&
7386                "Expected MemLoc for remaining bytes.");
7387         assert(ArgLocs[I].isMemLoc() && "Expected MemLoc for remaining bytes.");
7388         // Consume the MemLoc.The InVal has already been emitted, so nothing
7389         // more needs to be done.
7390         ++I;
7391       }
7392 
7393       continue;
7394     }
7395 
7396     EVT ValVT = VA.getValVT();
7397     if (VA.isRegLoc() && !VA.needsCustom()) {
7398       MVT::SimpleValueType SVT = ValVT.getSimpleVT().SimpleTy;
7399       unsigned VReg =
7400           MF.addLiveIn(VA.getLocReg(), getRegClassForSVT(SVT, IsPPC64));
7401       SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, VReg, LocVT);
7402       if (ValVT.isScalarInteger() &&
7403           (ValVT.getSizeInBits() < LocVT.getSizeInBits())) {
7404         ArgValue =
7405             truncateScalarIntegerArg(Flags, ValVT, DAG, ArgValue, LocVT, dl);
7406       }
7407       InVals.push_back(ArgValue);
7408       continue;
7409     }
7410     if (VA.isMemLoc()) {
7411       const unsigned LocSize = LocVT.getStoreSize();
7412       const unsigned ValSize = ValVT.getStoreSize();
7413       assert((ValSize <= LocSize) &&
7414              "Object size is larger than size of MemLoc");
7415       int CurArgOffset = VA.getLocMemOffset();
7416       // Objects are right-justified because AIX is big-endian.
7417       if (LocSize > ValSize)
7418         CurArgOffset += LocSize - ValSize;
7419       // Potential tail calls could cause overwriting of argument stack slots.
7420       const bool IsImmutable =
7421           !(getTargetMachine().Options.GuaranteedTailCallOpt &&
7422             (CallConv == CallingConv::Fast));
7423       int FI = MFI.CreateFixedObject(ValSize, CurArgOffset, IsImmutable);
7424       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
7425       SDValue ArgValue =
7426           DAG.getLoad(ValVT, dl, Chain, FIN, MachinePointerInfo());
7427       InVals.push_back(ArgValue);
7428       continue;
7429     }
7430   }
7431 
7432   // On AIX a minimum of 8 words is saved to the parameter save area.
7433   const unsigned MinParameterSaveArea = 8 * PtrByteSize;
7434   // Area that is at least reserved in the caller of this function.
7435   unsigned CallerReservedArea =
7436       std::max(CCInfo.getNextStackOffset(), LinkageSize + MinParameterSaveArea);
7437 
7438   // Set the size that is at least reserved in caller of this function. Tail
7439   // call optimized function's reserved stack space needs to be aligned so
7440   // that taking the difference between two stack areas will result in an
7441   // aligned stack.
7442   CallerReservedArea =
7443       EnsureStackAlignment(Subtarget.getFrameLowering(), CallerReservedArea);
7444   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
7445   FuncInfo->setMinReservedArea(CallerReservedArea);
7446 
7447   if (isVarArg) {
7448     FuncInfo->setVarArgsFrameIndex(
7449         MFI.CreateFixedObject(PtrByteSize, CCInfo.getNextStackOffset(), true));
7450     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
7451 
7452     static const MCPhysReg GPR_32[] = {PPC::R3, PPC::R4, PPC::R5, PPC::R6,
7453                                        PPC::R7, PPC::R8, PPC::R9, PPC::R10};
7454 
7455     static const MCPhysReg GPR_64[] = {PPC::X3, PPC::X4, PPC::X5, PPC::X6,
7456                                        PPC::X7, PPC::X8, PPC::X9, PPC::X10};
7457     const unsigned NumGPArgRegs = array_lengthof(IsPPC64 ? GPR_64 : GPR_32);
7458 
7459     // The fixed integer arguments of a variadic function are stored to the
7460     // VarArgsFrameIndex on the stack so that they may be loaded by
7461     // dereferencing the result of va_next.
7462     for (unsigned GPRIndex =
7463              (CCInfo.getNextStackOffset() - LinkageSize) / PtrByteSize;
7464          GPRIndex < NumGPArgRegs; ++GPRIndex) {
7465 
7466       const unsigned VReg =
7467           IsPPC64 ? MF.addLiveIn(GPR_64[GPRIndex], &PPC::G8RCRegClass)
7468                   : MF.addLiveIn(GPR_32[GPRIndex], &PPC::GPRCRegClass);
7469 
7470       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
7471       SDValue Store =
7472           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
7473       MemOps.push_back(Store);
7474       // Increment the address for the next argument to store.
7475       SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT);
7476       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
7477     }
7478   }
7479 
7480   if (!MemOps.empty())
7481     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
7482 
7483   return Chain;
7484 }
7485 
LowerCall_AIX(SDValue Chain,SDValue Callee,CallFlags CFlags,const SmallVectorImpl<ISD::OutputArg> & Outs,const SmallVectorImpl<SDValue> & OutVals,const SmallVectorImpl<ISD::InputArg> & Ins,const SDLoc & dl,SelectionDAG & DAG,SmallVectorImpl<SDValue> & InVals,const CallBase * CB) const7486 SDValue PPCTargetLowering::LowerCall_AIX(
7487     SDValue Chain, SDValue Callee, CallFlags CFlags,
7488     const SmallVectorImpl<ISD::OutputArg> &Outs,
7489     const SmallVectorImpl<SDValue> &OutVals,
7490     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
7491     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
7492     const CallBase *CB) const {
7493   // See PPCTargetLowering::LowerFormalArguments_AIX() for a description of the
7494   // AIX ABI stack frame layout.
7495 
7496   assert((CFlags.CallConv == CallingConv::C ||
7497           CFlags.CallConv == CallingConv::Cold ||
7498           CFlags.CallConv == CallingConv::Fast) &&
7499          "Unexpected calling convention!");
7500 
7501   if (CFlags.IsPatchPoint)
7502     report_fatal_error("This call type is unimplemented on AIX.");
7503 
7504   const PPCSubtarget& Subtarget =
7505       static_cast<const PPCSubtarget&>(DAG.getSubtarget());
7506   if (Subtarget.hasQPX())
7507     report_fatal_error("QPX is not supported on AIX.");
7508   if (Subtarget.hasAltivec())
7509     report_fatal_error("Altivec support is unimplemented on AIX.");
7510 
7511   MachineFunction &MF = DAG.getMachineFunction();
7512   SmallVector<CCValAssign, 16> ArgLocs;
7513   CCState CCInfo(CFlags.CallConv, CFlags.IsVarArg, MF, ArgLocs,
7514                  *DAG.getContext());
7515 
7516   // Reserve space for the linkage save area (LSA) on the stack.
7517   // In both PPC32 and PPC64 there are 6 reserved slots in the LSA:
7518   //   [SP][CR][LR][2 x reserved][TOC].
7519   // The LSA is 24 bytes (6x4) in PPC32 and 48 bytes (6x8) in PPC64.
7520   const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
7521   const bool IsPPC64 = Subtarget.isPPC64();
7522   const EVT PtrVT = getPointerTy(DAG.getDataLayout());
7523   const unsigned PtrByteSize = IsPPC64 ? 8 : 4;
7524   CCInfo.AllocateStack(LinkageSize, Align(PtrByteSize));
7525   CCInfo.AnalyzeCallOperands(Outs, CC_AIX);
7526 
7527   // The prolog code of the callee may store up to 8 GPR argument registers to
7528   // the stack, allowing va_start to index over them in memory if the callee
7529   // is variadic.
7530   // Because we cannot tell if this is needed on the caller side, we have to
7531   // conservatively assume that it is needed.  As such, make sure we have at
7532   // least enough stack space for the caller to store the 8 GPRs.
7533   const unsigned MinParameterSaveAreaSize = 8 * PtrByteSize;
7534   const unsigned NumBytes = std::max(LinkageSize + MinParameterSaveAreaSize,
7535                                      CCInfo.getNextStackOffset());
7536 
7537   // Adjust the stack pointer for the new arguments...
7538   // These operations are automatically eliminated by the prolog/epilog pass.
7539   Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
7540   SDValue CallSeqStart = Chain;
7541 
7542   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
7543   SmallVector<SDValue, 8> MemOpChains;
7544 
7545   // Set up a copy of the stack pointer for loading and storing any
7546   // arguments that may not fit in the registers available for argument
7547   // passing.
7548   const SDValue StackPtr = IsPPC64 ? DAG.getRegister(PPC::X1, MVT::i64)
7549                                    : DAG.getRegister(PPC::R1, MVT::i32);
7550 
7551   for (unsigned I = 0, E = ArgLocs.size(); I != E;) {
7552     const unsigned ValNo = ArgLocs[I].getValNo();
7553     SDValue Arg = OutVals[ValNo];
7554     ISD::ArgFlagsTy Flags = Outs[ValNo].Flags;
7555 
7556     if (Flags.isByVal()) {
7557       const unsigned ByValSize = Flags.getByValSize();
7558 
7559       // Nothing to do for zero-sized ByVals on the caller side.
7560       if (!ByValSize) {
7561         ++I;
7562         continue;
7563       }
7564 
7565       auto GetLoad = [&](EVT VT, unsigned LoadOffset) {
7566         return DAG.getExtLoad(ISD::ZEXTLOAD, dl, PtrVT, Chain,
7567                               (LoadOffset != 0)
7568                                   ? DAG.getObjectPtrOffset(dl, Arg, LoadOffset)
7569                                   : Arg,
7570                               MachinePointerInfo(), VT);
7571       };
7572 
7573       unsigned LoadOffset = 0;
7574 
7575       // Initialize registers, which are fully occupied by the by-val argument.
7576       while (LoadOffset + PtrByteSize <= ByValSize && ArgLocs[I].isRegLoc()) {
7577         SDValue Load = GetLoad(PtrVT, LoadOffset);
7578         MemOpChains.push_back(Load.getValue(1));
7579         LoadOffset += PtrByteSize;
7580         const CCValAssign &ByValVA = ArgLocs[I++];
7581         assert(ByValVA.getValNo() == ValNo &&
7582                "Unexpected location for pass-by-value argument.");
7583         RegsToPass.push_back(std::make_pair(ByValVA.getLocReg(), Load));
7584       }
7585 
7586       if (LoadOffset == ByValSize)
7587         continue;
7588 
7589       // There must be one more loc to handle the remainder.
7590       assert(ArgLocs[I].getValNo() == ValNo &&
7591              "Expected additional location for by-value argument.");
7592 
7593       if (ArgLocs[I].isMemLoc()) {
7594         assert(LoadOffset < ByValSize && "Unexpected memloc for by-val arg.");
7595         const CCValAssign &ByValVA = ArgLocs[I++];
7596         ISD::ArgFlagsTy MemcpyFlags = Flags;
7597         // Only memcpy the bytes that don't pass in register.
7598         MemcpyFlags.setByValSize(ByValSize - LoadOffset);
7599         Chain = CallSeqStart = createMemcpyOutsideCallSeq(
7600             (LoadOffset != 0) ? DAG.getObjectPtrOffset(dl, Arg, LoadOffset)
7601                               : Arg,
7602             DAG.getObjectPtrOffset(dl, StackPtr, ByValVA.getLocMemOffset()),
7603             CallSeqStart, MemcpyFlags, DAG, dl);
7604         continue;
7605       }
7606 
7607       // Initialize the final register residue.
7608       // Any residue that occupies the final by-val arg register must be
7609       // left-justified on AIX. Loads must be a power-of-2 size and cannot be
7610       // larger than the ByValSize. For example: a 7 byte by-val arg requires 4,
7611       // 2 and 1 byte loads.
7612       const unsigned ResidueBytes = ByValSize % PtrByteSize;
7613       assert(ResidueBytes != 0 && LoadOffset + PtrByteSize > ByValSize &&
7614              "Unexpected register residue for by-value argument.");
7615       SDValue ResidueVal;
7616       for (unsigned Bytes = 0; Bytes != ResidueBytes;) {
7617         const unsigned N = PowerOf2Floor(ResidueBytes - Bytes);
7618         const MVT VT =
7619             N == 1 ? MVT::i8
7620                    : ((N == 2) ? MVT::i16 : (N == 4 ? MVT::i32 : MVT::i64));
7621         SDValue Load = GetLoad(VT, LoadOffset);
7622         MemOpChains.push_back(Load.getValue(1));
7623         LoadOffset += N;
7624         Bytes += N;
7625 
7626         // By-val arguments are passed left-justfied in register.
7627         // Every load here needs to be shifted, otherwise a full register load
7628         // should have been used.
7629         assert(PtrVT.getSimpleVT().getSizeInBits() > (Bytes * 8) &&
7630                "Unexpected load emitted during handling of pass-by-value "
7631                "argument.");
7632         unsigned NumSHLBits = PtrVT.getSimpleVT().getSizeInBits() - (Bytes * 8);
7633         EVT ShiftAmountTy =
7634             getShiftAmountTy(Load->getValueType(0), DAG.getDataLayout());
7635         SDValue SHLAmt = DAG.getConstant(NumSHLBits, dl, ShiftAmountTy);
7636         SDValue ShiftedLoad =
7637             DAG.getNode(ISD::SHL, dl, Load.getValueType(), Load, SHLAmt);
7638         ResidueVal = ResidueVal ? DAG.getNode(ISD::OR, dl, PtrVT, ResidueVal,
7639                                               ShiftedLoad)
7640                                 : ShiftedLoad;
7641       }
7642 
7643       const CCValAssign &ByValVA = ArgLocs[I++];
7644       RegsToPass.push_back(std::make_pair(ByValVA.getLocReg(), ResidueVal));
7645       continue;
7646     }
7647 
7648     CCValAssign &VA = ArgLocs[I++];
7649     const MVT LocVT = VA.getLocVT();
7650     const MVT ValVT = VA.getValVT();
7651 
7652     switch (VA.getLocInfo()) {
7653     default:
7654       report_fatal_error("Unexpected argument extension type.");
7655     case CCValAssign::Full:
7656       break;
7657     case CCValAssign::ZExt:
7658       Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
7659       break;
7660     case CCValAssign::SExt:
7661       Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
7662       break;
7663     }
7664 
7665     if (VA.isRegLoc() && !VA.needsCustom()) {
7666       RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
7667       continue;
7668     }
7669 
7670     if (VA.isMemLoc()) {
7671       SDValue PtrOff =
7672           DAG.getConstant(VA.getLocMemOffset(), dl, StackPtr.getValueType());
7673       PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
7674       MemOpChains.push_back(
7675           DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
7676 
7677       continue;
7678     }
7679 
7680     // Custom handling is used for GPR initializations for vararg float
7681     // arguments.
7682     assert(VA.isRegLoc() && VA.needsCustom() && CFlags.IsVarArg &&
7683            ValVT.isFloatingPoint() && LocVT.isInteger() &&
7684            "Unexpected register handling for calling convention.");
7685 
7686     SDValue ArgAsInt =
7687         DAG.getBitcast(MVT::getIntegerVT(ValVT.getSizeInBits()), Arg);
7688 
7689     if (Arg.getValueType().getStoreSize() == LocVT.getStoreSize())
7690       // f32 in 32-bit GPR
7691       // f64 in 64-bit GPR
7692       RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgAsInt));
7693     else if (Arg.getValueType().getSizeInBits() < LocVT.getSizeInBits())
7694       // f32 in 64-bit GPR.
7695       RegsToPass.push_back(std::make_pair(
7696           VA.getLocReg(), DAG.getZExtOrTrunc(ArgAsInt, dl, LocVT)));
7697     else {
7698       // f64 in two 32-bit GPRs
7699       // The 2 GPRs are marked custom and expected to be adjacent in ArgLocs.
7700       assert(Arg.getValueType() == MVT::f64 && CFlags.IsVarArg && !IsPPC64 &&
7701              "Unexpected custom register for argument!");
7702       CCValAssign &GPR1 = VA;
7703       SDValue MSWAsI64 = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgAsInt,
7704                                      DAG.getConstant(32, dl, MVT::i8));
7705       RegsToPass.push_back(std::make_pair(
7706           GPR1.getLocReg(), DAG.getZExtOrTrunc(MSWAsI64, dl, MVT::i32)));
7707 
7708       if (I != E) {
7709         // If only 1 GPR was available, there will only be one custom GPR and
7710         // the argument will also pass in memory.
7711         CCValAssign &PeekArg = ArgLocs[I];
7712         if (PeekArg.isRegLoc() && PeekArg.getValNo() == PeekArg.getValNo()) {
7713           assert(PeekArg.needsCustom() && "A second custom GPR is expected.");
7714           CCValAssign &GPR2 = ArgLocs[I++];
7715           RegsToPass.push_back(std::make_pair(
7716               GPR2.getLocReg(), DAG.getZExtOrTrunc(ArgAsInt, dl, MVT::i32)));
7717         }
7718       }
7719     }
7720   }
7721 
7722   if (!MemOpChains.empty())
7723     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
7724 
7725   // For indirect calls, we need to save the TOC base to the stack for
7726   // restoration after the call.
7727   if (CFlags.IsIndirect) {
7728     assert(!CFlags.IsTailCall && "Indirect tail-calls not supported.");
7729     const MCRegister TOCBaseReg = Subtarget.getTOCPointerRegister();
7730     const MCRegister StackPtrReg = Subtarget.getStackPointerRegister();
7731     const MVT PtrVT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
7732     const unsigned TOCSaveOffset =
7733         Subtarget.getFrameLowering()->getTOCSaveOffset();
7734 
7735     setUsesTOCBasePtr(DAG);
7736     SDValue Val = DAG.getCopyFromReg(Chain, dl, TOCBaseReg, PtrVT);
7737     SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
7738     SDValue StackPtr = DAG.getRegister(StackPtrReg, PtrVT);
7739     SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
7740     Chain = DAG.getStore(
7741         Val.getValue(1), dl, Val, AddPtr,
7742         MachinePointerInfo::getStack(DAG.getMachineFunction(), TOCSaveOffset));
7743   }
7744 
7745   // Build a sequence of copy-to-reg nodes chained together with token chain
7746   // and flag operands which copy the outgoing args into the appropriate regs.
7747   SDValue InFlag;
7748   for (auto Reg : RegsToPass) {
7749     Chain = DAG.getCopyToReg(Chain, dl, Reg.first, Reg.second, InFlag);
7750     InFlag = Chain.getValue(1);
7751   }
7752 
7753   const int SPDiff = 0;
7754   return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart,
7755                     Callee, SPDiff, NumBytes, Ins, InVals, CB);
7756 }
7757 
7758 bool
CanLowerReturn(CallingConv::ID CallConv,MachineFunction & MF,bool isVarArg,const SmallVectorImpl<ISD::OutputArg> & Outs,LLVMContext & Context) const7759 PPCTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
7760                                   MachineFunction &MF, bool isVarArg,
7761                                   const SmallVectorImpl<ISD::OutputArg> &Outs,
7762                                   LLVMContext &Context) const {
7763   SmallVector<CCValAssign, 16> RVLocs;
7764   CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
7765   return CCInfo.CheckReturn(
7766       Outs, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
7767                 ? RetCC_PPC_Cold
7768                 : RetCC_PPC);
7769 }
7770 
7771 SDValue
LowerReturn(SDValue Chain,CallingConv::ID CallConv,bool isVarArg,const SmallVectorImpl<ISD::OutputArg> & Outs,const SmallVectorImpl<SDValue> & OutVals,const SDLoc & dl,SelectionDAG & DAG) const7772 PPCTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
7773                                bool isVarArg,
7774                                const SmallVectorImpl<ISD::OutputArg> &Outs,
7775                                const SmallVectorImpl<SDValue> &OutVals,
7776                                const SDLoc &dl, SelectionDAG &DAG) const {
7777   SmallVector<CCValAssign, 16> RVLocs;
7778   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
7779                  *DAG.getContext());
7780   CCInfo.AnalyzeReturn(Outs,
7781                        (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
7782                            ? RetCC_PPC_Cold
7783                            : RetCC_PPC);
7784 
7785   SDValue Flag;
7786   SmallVector<SDValue, 4> RetOps(1, Chain);
7787 
7788   // Copy the result values into the output registers.
7789   for (unsigned i = 0, RealResIdx = 0; i != RVLocs.size(); ++i, ++RealResIdx) {
7790     CCValAssign &VA = RVLocs[i];
7791     assert(VA.isRegLoc() && "Can only return in registers!");
7792 
7793     SDValue Arg = OutVals[RealResIdx];
7794 
7795     switch (VA.getLocInfo()) {
7796     default: llvm_unreachable("Unknown loc info!");
7797     case CCValAssign::Full: break;
7798     case CCValAssign::AExt:
7799       Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
7800       break;
7801     case CCValAssign::ZExt:
7802       Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
7803       break;
7804     case CCValAssign::SExt:
7805       Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
7806       break;
7807     }
7808     if (Subtarget.hasSPE() && VA.getLocVT() == MVT::f64) {
7809       bool isLittleEndian = Subtarget.isLittleEndian();
7810       // Legalize ret f64 -> ret 2 x i32.
7811       SDValue SVal =
7812           DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
7813                       DAG.getIntPtrConstant(isLittleEndian ? 0 : 1, dl));
7814       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), SVal, Flag);
7815       RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
7816       SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
7817                          DAG.getIntPtrConstant(isLittleEndian ? 1 : 0, dl));
7818       Flag = Chain.getValue(1);
7819       VA = RVLocs[++i]; // skip ahead to next loc
7820       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), SVal, Flag);
7821     } else
7822       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag);
7823     Flag = Chain.getValue(1);
7824     RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
7825   }
7826 
7827   RetOps[0] = Chain;  // Update chain.
7828 
7829   // Add the flag if we have it.
7830   if (Flag.getNode())
7831     RetOps.push_back(Flag);
7832 
7833   return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, RetOps);
7834 }
7835 
7836 SDValue
LowerGET_DYNAMIC_AREA_OFFSET(SDValue Op,SelectionDAG & DAG) const7837 PPCTargetLowering::LowerGET_DYNAMIC_AREA_OFFSET(SDValue Op,
7838                                                 SelectionDAG &DAG) const {
7839   SDLoc dl(Op);
7840 
7841   // Get the correct type for integers.
7842   EVT IntVT = Op.getValueType();
7843 
7844   // Get the inputs.
7845   SDValue Chain = Op.getOperand(0);
7846   SDValue FPSIdx = getFramePointerFrameIndex(DAG);
7847   // Build a DYNAREAOFFSET node.
7848   SDValue Ops[2] = {Chain, FPSIdx};
7849   SDVTList VTs = DAG.getVTList(IntVT);
7850   return DAG.getNode(PPCISD::DYNAREAOFFSET, dl, VTs, Ops);
7851 }
7852 
LowerSTACKRESTORE(SDValue Op,SelectionDAG & DAG) const7853 SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op,
7854                                              SelectionDAG &DAG) const {
7855   // When we pop the dynamic allocation we need to restore the SP link.
7856   SDLoc dl(Op);
7857 
7858   // Get the correct type for pointers.
7859   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7860 
7861   // Construct the stack pointer operand.
7862   bool isPPC64 = Subtarget.isPPC64();
7863   unsigned SP = isPPC64 ? PPC::X1 : PPC::R1;
7864   SDValue StackPtr = DAG.getRegister(SP, PtrVT);
7865 
7866   // Get the operands for the STACKRESTORE.
7867   SDValue Chain = Op.getOperand(0);
7868   SDValue SaveSP = Op.getOperand(1);
7869 
7870   // Load the old link SP.
7871   SDValue LoadLinkSP =
7872       DAG.getLoad(PtrVT, dl, Chain, StackPtr, MachinePointerInfo());
7873 
7874   // Restore the stack pointer.
7875   Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP);
7876 
7877   // Store the old link SP.
7878   return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo());
7879 }
7880 
getReturnAddrFrameIndex(SelectionDAG & DAG) const7881 SDValue PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG &DAG) const {
7882   MachineFunction &MF = DAG.getMachineFunction();
7883   bool isPPC64 = Subtarget.isPPC64();
7884   EVT PtrVT = getPointerTy(MF.getDataLayout());
7885 
7886   // Get current frame pointer save index.  The users of this index will be
7887   // primarily DYNALLOC instructions.
7888   PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
7889   int RASI = FI->getReturnAddrSaveIndex();
7890 
7891   // If the frame pointer save index hasn't been defined yet.
7892   if (!RASI) {
7893     // Find out what the fix offset of the frame pointer save area.
7894     int LROffset = Subtarget.getFrameLowering()->getReturnSaveOffset();
7895     // Allocate the frame index for frame pointer save area.
7896     RASI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, LROffset, false);
7897     // Save the result.
7898     FI->setReturnAddrSaveIndex(RASI);
7899   }
7900   return DAG.getFrameIndex(RASI, PtrVT);
7901 }
7902 
7903 SDValue
getFramePointerFrameIndex(SelectionDAG & DAG) const7904 PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const {
7905   MachineFunction &MF = DAG.getMachineFunction();
7906   bool isPPC64 = Subtarget.isPPC64();
7907   EVT PtrVT = getPointerTy(MF.getDataLayout());
7908 
7909   // Get current frame pointer save index.  The users of this index will be
7910   // primarily DYNALLOC instructions.
7911   PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
7912   int FPSI = FI->getFramePointerSaveIndex();
7913 
7914   // If the frame pointer save index hasn't been defined yet.
7915   if (!FPSI) {
7916     // Find out what the fix offset of the frame pointer save area.
7917     int FPOffset = Subtarget.getFrameLowering()->getFramePointerSaveOffset();
7918     // Allocate the frame index for frame pointer save area.
7919     FPSI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, FPOffset, true);
7920     // Save the result.
7921     FI->setFramePointerSaveIndex(FPSI);
7922   }
7923   return DAG.getFrameIndex(FPSI, PtrVT);
7924 }
7925 
LowerDYNAMIC_STACKALLOC(SDValue Op,SelectionDAG & DAG) const7926 SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
7927                                                    SelectionDAG &DAG) const {
7928   MachineFunction &MF = DAG.getMachineFunction();
7929   // Get the inputs.
7930   SDValue Chain = Op.getOperand(0);
7931   SDValue Size  = Op.getOperand(1);
7932   SDLoc dl(Op);
7933 
7934   // Get the correct type for pointers.
7935   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7936   // Negate the size.
7937   SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT,
7938                                 DAG.getConstant(0, dl, PtrVT), Size);
7939   // Construct a node for the frame pointer save index.
7940   SDValue FPSIdx = getFramePointerFrameIndex(DAG);
7941   SDValue Ops[3] = { Chain, NegSize, FPSIdx };
7942   SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other);
7943   if (hasInlineStackProbe(MF))
7944     return DAG.getNode(PPCISD::PROBED_ALLOCA, dl, VTs, Ops);
7945   return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops);
7946 }
7947 
LowerEH_DWARF_CFA(SDValue Op,SelectionDAG & DAG) const7948 SDValue PPCTargetLowering::LowerEH_DWARF_CFA(SDValue Op,
7949                                                      SelectionDAG &DAG) const {
7950   MachineFunction &MF = DAG.getMachineFunction();
7951 
7952   bool isPPC64 = Subtarget.isPPC64();
7953   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7954 
7955   int FI = MF.getFrameInfo().CreateFixedObject(isPPC64 ? 8 : 4, 0, false);
7956   return DAG.getFrameIndex(FI, PtrVT);
7957 }
7958 
lowerEH_SJLJ_SETJMP(SDValue Op,SelectionDAG & DAG) const7959 SDValue PPCTargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
7960                                                SelectionDAG &DAG) const {
7961   SDLoc DL(Op);
7962   return DAG.getNode(PPCISD::EH_SJLJ_SETJMP, DL,
7963                      DAG.getVTList(MVT::i32, MVT::Other),
7964                      Op.getOperand(0), Op.getOperand(1));
7965 }
7966 
lowerEH_SJLJ_LONGJMP(SDValue Op,SelectionDAG & DAG) const7967 SDValue PPCTargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
7968                                                 SelectionDAG &DAG) const {
7969   SDLoc DL(Op);
7970   return DAG.getNode(PPCISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
7971                      Op.getOperand(0), Op.getOperand(1));
7972 }
7973 
LowerLOAD(SDValue Op,SelectionDAG & DAG) const7974 SDValue PPCTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
7975   if (Op.getValueType().isVector())
7976     return LowerVectorLoad(Op, DAG);
7977 
7978   assert(Op.getValueType() == MVT::i1 &&
7979          "Custom lowering only for i1 loads");
7980 
7981   // First, load 8 bits into 32 bits, then truncate to 1 bit.
7982 
7983   SDLoc dl(Op);
7984   LoadSDNode *LD = cast<LoadSDNode>(Op);
7985 
7986   SDValue Chain = LD->getChain();
7987   SDValue BasePtr = LD->getBasePtr();
7988   MachineMemOperand *MMO = LD->getMemOperand();
7989 
7990   SDValue NewLD =
7991       DAG.getExtLoad(ISD::EXTLOAD, dl, getPointerTy(DAG.getDataLayout()), Chain,
7992                      BasePtr, MVT::i8, MMO);
7993   SDValue Result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewLD);
7994 
7995   SDValue Ops[] = { Result, SDValue(NewLD.getNode(), 1) };
7996   return DAG.getMergeValues(Ops, dl);
7997 }
7998 
LowerSTORE(SDValue Op,SelectionDAG & DAG) const7999 SDValue PPCTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
8000   if (Op.getOperand(1).getValueType().isVector())
8001     return LowerVectorStore(Op, DAG);
8002 
8003   assert(Op.getOperand(1).getValueType() == MVT::i1 &&
8004          "Custom lowering only for i1 stores");
8005 
8006   // First, zero extend to 32 bits, then use a truncating store to 8 bits.
8007 
8008   SDLoc dl(Op);
8009   StoreSDNode *ST = cast<StoreSDNode>(Op);
8010 
8011   SDValue Chain = ST->getChain();
8012   SDValue BasePtr = ST->getBasePtr();
8013   SDValue Value = ST->getValue();
8014   MachineMemOperand *MMO = ST->getMemOperand();
8015 
8016   Value = DAG.getNode(ISD::ZERO_EXTEND, dl, getPointerTy(DAG.getDataLayout()),
8017                       Value);
8018   return DAG.getTruncStore(Chain, dl, Value, BasePtr, MVT::i8, MMO);
8019 }
8020 
8021 // FIXME: Remove this once the ANDI glue bug is fixed:
LowerTRUNCATE(SDValue Op,SelectionDAG & DAG) const8022 SDValue PPCTargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
8023   assert(Op.getValueType() == MVT::i1 &&
8024          "Custom lowering only for i1 results");
8025 
8026   SDLoc DL(Op);
8027   return DAG.getNode(PPCISD::ANDI_rec_1_GT_BIT, DL, MVT::i1, Op.getOperand(0));
8028 }
8029 
LowerTRUNCATEVector(SDValue Op,SelectionDAG & DAG) const8030 SDValue PPCTargetLowering::LowerTRUNCATEVector(SDValue Op,
8031                                                SelectionDAG &DAG) const {
8032 
8033   // Implements a vector truncate that fits in a vector register as a shuffle.
8034   // We want to legalize vector truncates down to where the source fits in
8035   // a vector register (and target is therefore smaller than vector register
8036   // size).  At that point legalization will try to custom lower the sub-legal
8037   // result and get here - where we can contain the truncate as a single target
8038   // operation.
8039 
8040   // For example a trunc <2 x i16> to <2 x i8> could be visualized as follows:
8041   //   <MSB1|LSB1, MSB2|LSB2> to <LSB1, LSB2>
8042   //
8043   // We will implement it for big-endian ordering as this (where x denotes
8044   // undefined):
8045   //   < MSB1|LSB1, MSB2|LSB2, uu, uu, uu, uu, uu, uu> to
8046   //   < LSB1, LSB2, u, u, u, u, u, u, u, u, u, u, u, u, u, u>
8047   //
8048   // The same operation in little-endian ordering will be:
8049   //   <uu, uu, uu, uu, uu, uu, LSB2|MSB2, LSB1|MSB1> to
8050   //   <u, u, u, u, u, u, u, u, u, u, u, u, u, u, LSB2, LSB1>
8051 
8052   assert(Op.getValueType().isVector() && "Vector type expected.");
8053 
8054   SDLoc DL(Op);
8055   SDValue N1 = Op.getOperand(0);
8056   unsigned SrcSize = N1.getValueType().getSizeInBits();
8057   assert(SrcSize <= 128 && "Source must fit in an Altivec/VSX vector");
8058   SDValue WideSrc = SrcSize == 128 ? N1 : widenVec(DAG, N1, DL);
8059 
8060   EVT TrgVT = Op.getValueType();
8061   unsigned TrgNumElts = TrgVT.getVectorNumElements();
8062   EVT EltVT = TrgVT.getVectorElementType();
8063   unsigned WideNumElts = 128 / EltVT.getSizeInBits();
8064   EVT WideVT = EVT::getVectorVT(*DAG.getContext(), EltVT, WideNumElts);
8065 
8066   // First list the elements we want to keep.
8067   unsigned SizeMult = SrcSize / TrgVT.getSizeInBits();
8068   SmallVector<int, 16> ShuffV;
8069   if (Subtarget.isLittleEndian())
8070     for (unsigned i = 0; i < TrgNumElts; ++i)
8071       ShuffV.push_back(i * SizeMult);
8072   else
8073     for (unsigned i = 1; i <= TrgNumElts; ++i)
8074       ShuffV.push_back(i * SizeMult - 1);
8075 
8076   // Populate the remaining elements with undefs.
8077   for (unsigned i = TrgNumElts; i < WideNumElts; ++i)
8078     // ShuffV.push_back(i + WideNumElts);
8079     ShuffV.push_back(WideNumElts + 1);
8080 
8081   SDValue Conv = DAG.getNode(ISD::BITCAST, DL, WideVT, WideSrc);
8082   return DAG.getVectorShuffle(WideVT, DL, Conv, DAG.getUNDEF(WideVT), ShuffV);
8083 }
8084 
8085 /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when
8086 /// possible.
LowerSELECT_CC(SDValue Op,SelectionDAG & DAG) const8087 SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
8088   // Not FP? Not a fsel.
8089   if (!Op.getOperand(0).getValueType().isFloatingPoint() ||
8090       !Op.getOperand(2).getValueType().isFloatingPoint())
8091     return Op;
8092 
8093   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
8094 
8095   EVT ResVT = Op.getValueType();
8096   EVT CmpVT = Op.getOperand(0).getValueType();
8097   SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
8098   SDValue TV  = Op.getOperand(2), FV  = Op.getOperand(3);
8099   SDLoc dl(Op);
8100   SDNodeFlags Flags = Op.getNode()->getFlags();
8101 
8102   // We have xsmaxcdp/xsmincdp which are OK to emit even in the
8103   // presence of infinities.
8104   if (Subtarget.hasP9Vector() && LHS == TV && RHS == FV) {
8105     switch (CC) {
8106     default:
8107       break;
8108     case ISD::SETOGT:
8109     case ISD::SETGT:
8110       return DAG.getNode(PPCISD::XSMAXCDP, dl, Op.getValueType(), LHS, RHS);
8111     case ISD::SETOLT:
8112     case ISD::SETLT:
8113       return DAG.getNode(PPCISD::XSMINCDP, dl, Op.getValueType(), LHS, RHS);
8114     }
8115   }
8116 
8117   // We might be able to do better than this under some circumstances, but in
8118   // general, fsel-based lowering of select is a finite-math-only optimization.
8119   // For more information, see section F.3 of the 2.06 ISA specification.
8120   // With ISA 3.0
8121   if ((!DAG.getTarget().Options.NoInfsFPMath && !Flags.hasNoInfs()) ||
8122       (!DAG.getTarget().Options.NoNaNsFPMath && !Flags.hasNoNaNs()))
8123     return Op;
8124 
8125   // If the RHS of the comparison is a 0.0, we don't need to do the
8126   // subtraction at all.
8127   SDValue Sel1;
8128   if (isFloatingPointZero(RHS))
8129     switch (CC) {
8130     default: break;       // SETUO etc aren't handled by fsel.
8131     case ISD::SETNE:
8132       std::swap(TV, FV);
8133       LLVM_FALLTHROUGH;
8134     case ISD::SETEQ:
8135       if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits
8136         LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
8137       Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV);
8138       if (Sel1.getValueType() == MVT::f32)   // Comparison is always 64-bits
8139         Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1);
8140       return DAG.getNode(PPCISD::FSEL, dl, ResVT,
8141                          DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), Sel1, FV);
8142     case ISD::SETULT:
8143     case ISD::SETLT:
8144       std::swap(TV, FV);  // fsel is natively setge, swap operands for setlt
8145       LLVM_FALLTHROUGH;
8146     case ISD::SETOGE:
8147     case ISD::SETGE:
8148       if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits
8149         LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
8150       return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV);
8151     case ISD::SETUGT:
8152     case ISD::SETGT:
8153       std::swap(TV, FV);  // fsel is natively setge, swap operands for setlt
8154       LLVM_FALLTHROUGH;
8155     case ISD::SETOLE:
8156     case ISD::SETLE:
8157       if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits
8158         LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
8159       return DAG.getNode(PPCISD::FSEL, dl, ResVT,
8160                          DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV);
8161     }
8162 
8163   SDValue Cmp;
8164   switch (CC) {
8165   default: break;       // SETUO etc aren't handled by fsel.
8166   case ISD::SETNE:
8167     std::swap(TV, FV);
8168     LLVM_FALLTHROUGH;
8169   case ISD::SETEQ:
8170     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
8171     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
8172       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
8173     Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
8174     if (Sel1.getValueType() == MVT::f32)   // Comparison is always 64-bits
8175       Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1);
8176     return DAG.getNode(PPCISD::FSEL, dl, ResVT,
8177                        DAG.getNode(ISD::FNEG, dl, MVT::f64, Cmp), Sel1, FV);
8178   case ISD::SETULT:
8179   case ISD::SETLT:
8180     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
8181     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
8182       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
8183     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV);
8184   case ISD::SETOGE:
8185   case ISD::SETGE:
8186     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
8187     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
8188       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
8189     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
8190   case ISD::SETUGT:
8191   case ISD::SETGT:
8192     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags);
8193     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
8194       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
8195     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV);
8196   case ISD::SETOLE:
8197   case ISD::SETLE:
8198     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags);
8199     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
8200       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
8201     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
8202   }
8203   return Op;
8204 }
8205 
LowerFP_TO_INTForReuse(SDValue Op,ReuseLoadInfo & RLI,SelectionDAG & DAG,const SDLoc & dl) const8206 void PPCTargetLowering::LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI,
8207                                                SelectionDAG &DAG,
8208                                                const SDLoc &dl) const {
8209   assert(Op.getOperand(0).getValueType().isFloatingPoint());
8210   SDValue Src = Op.getOperand(0);
8211   if (Src.getValueType() == MVT::f32)
8212     Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
8213 
8214   SDValue Tmp;
8215   switch (Op.getSimpleValueType().SimpleTy) {
8216   default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!");
8217   case MVT::i32:
8218     Tmp = DAG.getNode(
8219         Op.getOpcode() == ISD::FP_TO_SINT
8220             ? PPCISD::FCTIWZ
8221             : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ),
8222         dl, MVT::f64, Src);
8223     break;
8224   case MVT::i64:
8225     assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) &&
8226            "i64 FP_TO_UINT is supported only with FPCVT");
8227     Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ :
8228                                                         PPCISD::FCTIDUZ,
8229                       dl, MVT::f64, Src);
8230     break;
8231   }
8232 
8233   // Convert the FP value to an int value through memory.
8234   bool i32Stack = Op.getValueType() == MVT::i32 && Subtarget.hasSTFIWX() &&
8235     (Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT());
8236   SDValue FIPtr = DAG.CreateStackTemporary(i32Stack ? MVT::i32 : MVT::f64);
8237   int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex();
8238   MachinePointerInfo MPI =
8239       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI);
8240 
8241   // Emit a store to the stack slot.
8242   SDValue Chain;
8243   Align Alignment(DAG.getEVTAlign(Tmp.getValueType()));
8244   if (i32Stack) {
8245     MachineFunction &MF = DAG.getMachineFunction();
8246     Alignment = Align(4);
8247     MachineMemOperand *MMO =
8248         MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Alignment);
8249     SDValue Ops[] = { DAG.getEntryNode(), Tmp, FIPtr };
8250     Chain = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl,
8251               DAG.getVTList(MVT::Other), Ops, MVT::i32, MMO);
8252   } else
8253     Chain = DAG.getStore(DAG.getEntryNode(), dl, Tmp, FIPtr, MPI, Alignment);
8254 
8255   // Result is a load from the stack slot.  If loading 4 bytes, make sure to
8256   // add in a bias on big endian.
8257   if (Op.getValueType() == MVT::i32 && !i32Stack) {
8258     FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr,
8259                         DAG.getConstant(4, dl, FIPtr.getValueType()));
8260     MPI = MPI.getWithOffset(Subtarget.isLittleEndian() ? 0 : 4);
8261   }
8262 
8263   RLI.Chain = Chain;
8264   RLI.Ptr = FIPtr;
8265   RLI.MPI = MPI;
8266   RLI.Alignment = Alignment;
8267 }
8268 
8269 /// Custom lowers floating point to integer conversions to use
8270 /// the direct move instructions available in ISA 2.07 to avoid the
8271 /// need for load/store combinations.
LowerFP_TO_INTDirectMove(SDValue Op,SelectionDAG & DAG,const SDLoc & dl) const8272 SDValue PPCTargetLowering::LowerFP_TO_INTDirectMove(SDValue Op,
8273                                                     SelectionDAG &DAG,
8274                                                     const SDLoc &dl) const {
8275   assert(Op.getOperand(0).getValueType().isFloatingPoint());
8276   SDValue Src = Op.getOperand(0);
8277 
8278   if (Src.getValueType() == MVT::f32)
8279     Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
8280 
8281   SDValue Tmp;
8282   switch (Op.getSimpleValueType().SimpleTy) {
8283   default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!");
8284   case MVT::i32:
8285     Tmp = DAG.getNode(
8286         Op.getOpcode() == ISD::FP_TO_SINT
8287             ? PPCISD::FCTIWZ
8288             : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ),
8289         dl, MVT::f64, Src);
8290     Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i32, Tmp);
8291     break;
8292   case MVT::i64:
8293     assert((Op.getOpcode() == ISD::FP_TO_SINT || Subtarget.hasFPCVT()) &&
8294            "i64 FP_TO_UINT is supported only with FPCVT");
8295     Tmp = DAG.getNode(Op.getOpcode()==ISD::FP_TO_SINT ? PPCISD::FCTIDZ :
8296                                                         PPCISD::FCTIDUZ,
8297                       dl, MVT::f64, Src);
8298     Tmp = DAG.getNode(PPCISD::MFVSR, dl, MVT::i64, Tmp);
8299     break;
8300   }
8301   return Tmp;
8302 }
8303 
LowerFP_TO_INT(SDValue Op,SelectionDAG & DAG,const SDLoc & dl) const8304 SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
8305                                           const SDLoc &dl) const {
8306 
8307   // FP to INT conversions are legal for f128.
8308   if (Op->getOperand(0).getValueType() == MVT::f128)
8309     return Op;
8310 
8311   // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on
8312   // PPC (the libcall is not available).
8313   if (Op.getOperand(0).getValueType() == MVT::ppcf128) {
8314     if (Op.getValueType() == MVT::i32) {
8315       if (Op.getOpcode() == ISD::FP_TO_SINT) {
8316         SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
8317                                  MVT::f64, Op.getOperand(0),
8318                                  DAG.getIntPtrConstant(0, dl));
8319         SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl,
8320                                  MVT::f64, Op.getOperand(0),
8321                                  DAG.getIntPtrConstant(1, dl));
8322 
8323         // Add the two halves of the long double in round-to-zero mode.
8324         SDValue Res = DAG.getNode(PPCISD::FADDRTZ, dl, MVT::f64, Lo, Hi);
8325 
8326         // Now use a smaller FP_TO_SINT.
8327         return DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Res);
8328       }
8329       if (Op.getOpcode() == ISD::FP_TO_UINT) {
8330         const uint64_t TwoE31[] = {0x41e0000000000000LL, 0};
8331         APFloat APF = APFloat(APFloat::PPCDoubleDouble(), APInt(128, TwoE31));
8332         SDValue Tmp = DAG.getConstantFP(APF, dl, MVT::ppcf128);
8333         //  X>=2^31 ? (int)(X-2^31)+0x80000000 : (int)X
8334         // FIXME: generated code sucks.
8335         // TODO: Are there fast-math-flags to propagate to this FSUB?
8336         SDValue True = DAG.getNode(ISD::FSUB, dl, MVT::ppcf128,
8337                                    Op.getOperand(0), Tmp);
8338         True = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, True);
8339         True = DAG.getNode(ISD::ADD, dl, MVT::i32, True,
8340                            DAG.getConstant(0x80000000, dl, MVT::i32));
8341         SDValue False = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32,
8342                                     Op.getOperand(0));
8343         return DAG.getSelectCC(dl, Op.getOperand(0), Tmp, True, False,
8344                                ISD::SETGE);
8345       }
8346     }
8347 
8348     return SDValue();
8349   }
8350 
8351   if (Subtarget.hasDirectMove() && Subtarget.isPPC64())
8352     return LowerFP_TO_INTDirectMove(Op, DAG, dl);
8353 
8354   ReuseLoadInfo RLI;
8355   LowerFP_TO_INTForReuse(Op, RLI, DAG, dl);
8356 
8357   return DAG.getLoad(Op.getValueType(), dl, RLI.Chain, RLI.Ptr, RLI.MPI,
8358                      RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges);
8359 }
8360 
8361 // We're trying to insert a regular store, S, and then a load, L. If the
8362 // incoming value, O, is a load, we might just be able to have our load use the
8363 // address used by O. However, we don't know if anything else will store to
8364 // that address before we can load from it. To prevent this situation, we need
8365 // to insert our load, L, into the chain as a peer of O. To do this, we give L
8366 // the same chain operand as O, we create a token factor from the chain results
8367 // of O and L, and we replace all uses of O's chain result with that token
8368 // factor (see spliceIntoChain below for this last part).
canReuseLoadAddress(SDValue Op,EVT MemVT,ReuseLoadInfo & RLI,SelectionDAG & DAG,ISD::LoadExtType ET) const8369 bool PPCTargetLowering::canReuseLoadAddress(SDValue Op, EVT MemVT,
8370                                             ReuseLoadInfo &RLI,
8371                                             SelectionDAG &DAG,
8372                                             ISD::LoadExtType ET) const {
8373   SDLoc dl(Op);
8374   bool ValidFPToUint = Op.getOpcode() == ISD::FP_TO_UINT &&
8375                        (Subtarget.hasFPCVT() || Op.getValueType() == MVT::i32);
8376   if (ET == ISD::NON_EXTLOAD &&
8377       (ValidFPToUint || Op.getOpcode() == ISD::FP_TO_SINT) &&
8378       isOperationLegalOrCustom(Op.getOpcode(),
8379                                Op.getOperand(0).getValueType())) {
8380 
8381     LowerFP_TO_INTForReuse(Op, RLI, DAG, dl);
8382     return true;
8383   }
8384 
8385   LoadSDNode *LD = dyn_cast<LoadSDNode>(Op);
8386   if (!LD || LD->getExtensionType() != ET || LD->isVolatile() ||
8387       LD->isNonTemporal())
8388     return false;
8389   if (LD->getMemoryVT() != MemVT)
8390     return false;
8391 
8392   RLI.Ptr = LD->getBasePtr();
8393   if (LD->isIndexed() && !LD->getOffset().isUndef()) {
8394     assert(LD->getAddressingMode() == ISD::PRE_INC &&
8395            "Non-pre-inc AM on PPC?");
8396     RLI.Ptr = DAG.getNode(ISD::ADD, dl, RLI.Ptr.getValueType(), RLI.Ptr,
8397                           LD->getOffset());
8398   }
8399 
8400   RLI.Chain = LD->getChain();
8401   RLI.MPI = LD->getPointerInfo();
8402   RLI.IsDereferenceable = LD->isDereferenceable();
8403   RLI.IsInvariant = LD->isInvariant();
8404   RLI.Alignment = LD->getAlign();
8405   RLI.AAInfo = LD->getAAInfo();
8406   RLI.Ranges = LD->getRanges();
8407 
8408   RLI.ResChain = SDValue(LD, LD->isIndexed() ? 2 : 1);
8409   return true;
8410 }
8411 
8412 // Given the head of the old chain, ResChain, insert a token factor containing
8413 // it and NewResChain, and make users of ResChain now be users of that token
8414 // factor.
8415 // TODO: Remove and use DAG::makeEquivalentMemoryOrdering() instead.
spliceIntoChain(SDValue ResChain,SDValue NewResChain,SelectionDAG & DAG) const8416 void PPCTargetLowering::spliceIntoChain(SDValue ResChain,
8417                                         SDValue NewResChain,
8418                                         SelectionDAG &DAG) const {
8419   if (!ResChain)
8420     return;
8421 
8422   SDLoc dl(NewResChain);
8423 
8424   SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
8425                            NewResChain, DAG.getUNDEF(MVT::Other));
8426   assert(TF.getNode() != NewResChain.getNode() &&
8427          "A new TF really is required here");
8428 
8429   DAG.ReplaceAllUsesOfValueWith(ResChain, TF);
8430   DAG.UpdateNodeOperands(TF.getNode(), ResChain, NewResChain);
8431 }
8432 
8433 /// Analyze profitability of direct move
8434 /// prefer float load to int load plus direct move
8435 /// when there is no integer use of int load
directMoveIsProfitable(const SDValue & Op) const8436 bool PPCTargetLowering::directMoveIsProfitable(const SDValue &Op) const {
8437   SDNode *Origin = Op.getOperand(0).getNode();
8438   if (Origin->getOpcode() != ISD::LOAD)
8439     return true;
8440 
8441   // If there is no LXSIBZX/LXSIHZX, like Power8,
8442   // prefer direct move if the memory size is 1 or 2 bytes.
8443   MachineMemOperand *MMO = cast<LoadSDNode>(Origin)->getMemOperand();
8444   if (!Subtarget.hasP9Vector() && MMO->getSize() <= 2)
8445     return true;
8446 
8447   for (SDNode::use_iterator UI = Origin->use_begin(),
8448                             UE = Origin->use_end();
8449        UI != UE; ++UI) {
8450 
8451     // Only look at the users of the loaded value.
8452     if (UI.getUse().get().getResNo() != 0)
8453       continue;
8454 
8455     if (UI->getOpcode() != ISD::SINT_TO_FP &&
8456         UI->getOpcode() != ISD::UINT_TO_FP)
8457       return true;
8458   }
8459 
8460   return false;
8461 }
8462 
8463 /// Custom lowers integer to floating point conversions to use
8464 /// the direct move instructions available in ISA 2.07 to avoid the
8465 /// need for load/store combinations.
LowerINT_TO_FPDirectMove(SDValue Op,SelectionDAG & DAG,const SDLoc & dl) const8466 SDValue PPCTargetLowering::LowerINT_TO_FPDirectMove(SDValue Op,
8467                                                     SelectionDAG &DAG,
8468                                                     const SDLoc &dl) const {
8469   assert((Op.getValueType() == MVT::f32 ||
8470           Op.getValueType() == MVT::f64) &&
8471          "Invalid floating point type as target of conversion");
8472   assert(Subtarget.hasFPCVT() &&
8473          "Int to FP conversions with direct moves require FPCVT");
8474   SDValue FP;
8475   SDValue Src = Op.getOperand(0);
8476   bool SinglePrec = Op.getValueType() == MVT::f32;
8477   bool WordInt = Src.getSimpleValueType().SimpleTy == MVT::i32;
8478   bool Signed = Op.getOpcode() == ISD::SINT_TO_FP;
8479   unsigned ConvOp = Signed ? (SinglePrec ? PPCISD::FCFIDS : PPCISD::FCFID) :
8480                              (SinglePrec ? PPCISD::FCFIDUS : PPCISD::FCFIDU);
8481 
8482   if (WordInt) {
8483     FP = DAG.getNode(Signed ? PPCISD::MTVSRA : PPCISD::MTVSRZ,
8484                      dl, MVT::f64, Src);
8485     FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP);
8486   }
8487   else {
8488     FP = DAG.getNode(PPCISD::MTVSRA, dl, MVT::f64, Src);
8489     FP = DAG.getNode(ConvOp, dl, SinglePrec ? MVT::f32 : MVT::f64, FP);
8490   }
8491 
8492   return FP;
8493 }
8494 
widenVec(SelectionDAG & DAG,SDValue Vec,const SDLoc & dl)8495 static SDValue widenVec(SelectionDAG &DAG, SDValue Vec, const SDLoc &dl) {
8496 
8497   EVT VecVT = Vec.getValueType();
8498   assert(VecVT.isVector() && "Expected a vector type.");
8499   assert(VecVT.getSizeInBits() < 128 && "Vector is already full width.");
8500 
8501   EVT EltVT = VecVT.getVectorElementType();
8502   unsigned WideNumElts = 128 / EltVT.getSizeInBits();
8503   EVT WideVT = EVT::getVectorVT(*DAG.getContext(), EltVT, WideNumElts);
8504 
8505   unsigned NumConcat = WideNumElts / VecVT.getVectorNumElements();
8506   SmallVector<SDValue, 16> Ops(NumConcat);
8507   Ops[0] = Vec;
8508   SDValue UndefVec = DAG.getUNDEF(VecVT);
8509   for (unsigned i = 1; i < NumConcat; ++i)
8510     Ops[i] = UndefVec;
8511 
8512   return DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT, Ops);
8513 }
8514 
LowerINT_TO_FPVector(SDValue Op,SelectionDAG & DAG,const SDLoc & dl) const8515 SDValue PPCTargetLowering::LowerINT_TO_FPVector(SDValue Op, SelectionDAG &DAG,
8516                                                 const SDLoc &dl) const {
8517 
8518   unsigned Opc = Op.getOpcode();
8519   assert((Opc == ISD::UINT_TO_FP || Opc == ISD::SINT_TO_FP) &&
8520          "Unexpected conversion type");
8521   assert((Op.getValueType() == MVT::v2f64 || Op.getValueType() == MVT::v4f32) &&
8522          "Supports conversions to v2f64/v4f32 only.");
8523 
8524   bool SignedConv = Opc == ISD::SINT_TO_FP;
8525   bool FourEltRes = Op.getValueType() == MVT::v4f32;
8526 
8527   SDValue Wide = widenVec(DAG, Op.getOperand(0), dl);
8528   EVT WideVT = Wide.getValueType();
8529   unsigned WideNumElts = WideVT.getVectorNumElements();
8530   MVT IntermediateVT = FourEltRes ? MVT::v4i32 : MVT::v2i64;
8531 
8532   SmallVector<int, 16> ShuffV;
8533   for (unsigned i = 0; i < WideNumElts; ++i)
8534     ShuffV.push_back(i + WideNumElts);
8535 
8536   int Stride = FourEltRes ? WideNumElts / 4 : WideNumElts / 2;
8537   int SaveElts = FourEltRes ? 4 : 2;
8538   if (Subtarget.isLittleEndian())
8539     for (int i = 0; i < SaveElts; i++)
8540       ShuffV[i * Stride] = i;
8541   else
8542     for (int i = 1; i <= SaveElts; i++)
8543       ShuffV[i * Stride - 1] = i - 1;
8544 
8545   SDValue ShuffleSrc2 =
8546       SignedConv ? DAG.getUNDEF(WideVT) : DAG.getConstant(0, dl, WideVT);
8547   SDValue Arrange = DAG.getVectorShuffle(WideVT, dl, Wide, ShuffleSrc2, ShuffV);
8548 
8549   SDValue Extend;
8550   if (SignedConv) {
8551     Arrange = DAG.getBitcast(IntermediateVT, Arrange);
8552     EVT ExtVT = Op.getOperand(0).getValueType();
8553     if (Subtarget.hasP9Altivec())
8554       ExtVT = EVT::getVectorVT(*DAG.getContext(), WideVT.getVectorElementType(),
8555                                IntermediateVT.getVectorNumElements());
8556 
8557     Extend = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, IntermediateVT, Arrange,
8558                          DAG.getValueType(ExtVT));
8559   } else
8560     Extend = DAG.getNode(ISD::BITCAST, dl, IntermediateVT, Arrange);
8561 
8562   return DAG.getNode(Opc, dl, Op.getValueType(), Extend);
8563 }
8564 
LowerINT_TO_FP(SDValue Op,SelectionDAG & DAG) const8565 SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op,
8566                                           SelectionDAG &DAG) const {
8567   SDLoc dl(Op);
8568 
8569   EVT InVT = Op.getOperand(0).getValueType();
8570   EVT OutVT = Op.getValueType();
8571   if (OutVT.isVector() && OutVT.isFloatingPoint() &&
8572       isOperationCustom(Op.getOpcode(), InVT))
8573     return LowerINT_TO_FPVector(Op, DAG, dl);
8574 
8575   // Conversions to f128 are legal.
8576   if (Op.getValueType() == MVT::f128)
8577     return Op;
8578 
8579   if (Subtarget.hasQPX() && Op.getOperand(0).getValueType() == MVT::v4i1) {
8580     if (Op.getValueType() != MVT::v4f32 && Op.getValueType() != MVT::v4f64)
8581       return SDValue();
8582 
8583     SDValue Value = Op.getOperand(0);
8584     // The values are now known to be -1 (false) or 1 (true). To convert this
8585     // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5).
8586     // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5
8587     Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value);
8588 
8589     SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64);
8590 
8591     Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs);
8592 
8593     if (Op.getValueType() != MVT::v4f64)
8594       Value = DAG.getNode(ISD::FP_ROUND, dl,
8595                           Op.getValueType(), Value,
8596                           DAG.getIntPtrConstant(1, dl));
8597     return Value;
8598   }
8599 
8600   // Don't handle ppc_fp128 here; let it be lowered to a libcall.
8601   if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64)
8602     return SDValue();
8603 
8604   if (Op.getOperand(0).getValueType() == MVT::i1)
8605     return DAG.getNode(ISD::SELECT, dl, Op.getValueType(), Op.getOperand(0),
8606                        DAG.getConstantFP(1.0, dl, Op.getValueType()),
8607                        DAG.getConstantFP(0.0, dl, Op.getValueType()));
8608 
8609   // If we have direct moves, we can do all the conversion, skip the store/load
8610   // however, without FPCVT we can't do most conversions.
8611   if (Subtarget.hasDirectMove() && directMoveIsProfitable(Op) &&
8612       Subtarget.isPPC64() && Subtarget.hasFPCVT())
8613     return LowerINT_TO_FPDirectMove(Op, DAG, dl);
8614 
8615   assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) &&
8616          "UINT_TO_FP is supported only with FPCVT");
8617 
8618   // If we have FCFIDS, then use it when converting to single-precision.
8619   // Otherwise, convert to double-precision and then round.
8620   unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
8621                        ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS
8622                                                             : PPCISD::FCFIDS)
8623                        : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU
8624                                                             : PPCISD::FCFID);
8625   MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
8626                   ? MVT::f32
8627                   : MVT::f64;
8628 
8629   if (Op.getOperand(0).getValueType() == MVT::i64) {
8630     SDValue SINT = Op.getOperand(0);
8631     // When converting to single-precision, we actually need to convert
8632     // to double-precision first and then round to single-precision.
8633     // To avoid double-rounding effects during that operation, we have
8634     // to prepare the input operand.  Bits that might be truncated when
8635     // converting to double-precision are replaced by a bit that won't
8636     // be lost at this stage, but is below the single-precision rounding
8637     // position.
8638     //
8639     // However, if -enable-unsafe-fp-math is in effect, accept double
8640     // rounding to avoid the extra overhead.
8641     if (Op.getValueType() == MVT::f32 &&
8642         !Subtarget.hasFPCVT() &&
8643         !DAG.getTarget().Options.UnsafeFPMath) {
8644 
8645       // Twiddle input to make sure the low 11 bits are zero.  (If this
8646       // is the case, we are guaranteed the value will fit into the 53 bit
8647       // mantissa of an IEEE double-precision value without rounding.)
8648       // If any of those low 11 bits were not zero originally, make sure
8649       // bit 12 (value 2048) is set instead, so that the final rounding
8650       // to single-precision gets the correct result.
8651       SDValue Round = DAG.getNode(ISD::AND, dl, MVT::i64,
8652                                   SINT, DAG.getConstant(2047, dl, MVT::i64));
8653       Round = DAG.getNode(ISD::ADD, dl, MVT::i64,
8654                           Round, DAG.getConstant(2047, dl, MVT::i64));
8655       Round = DAG.getNode(ISD::OR, dl, MVT::i64, Round, SINT);
8656       Round = DAG.getNode(ISD::AND, dl, MVT::i64,
8657                           Round, DAG.getConstant(-2048, dl, MVT::i64));
8658 
8659       // However, we cannot use that value unconditionally: if the magnitude
8660       // of the input value is small, the bit-twiddling we did above might
8661       // end up visibly changing the output.  Fortunately, in that case, we
8662       // don't need to twiddle bits since the original input will convert
8663       // exactly to double-precision floating-point already.  Therefore,
8664       // construct a conditional to use the original value if the top 11
8665       // bits are all sign-bit copies, and use the rounded value computed
8666       // above otherwise.
8667       SDValue Cond = DAG.getNode(ISD::SRA, dl, MVT::i64,
8668                                  SINT, DAG.getConstant(53, dl, MVT::i32));
8669       Cond = DAG.getNode(ISD::ADD, dl, MVT::i64,
8670                          Cond, DAG.getConstant(1, dl, MVT::i64));
8671       Cond = DAG.getSetCC(
8672           dl,
8673           getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i64),
8674           Cond, DAG.getConstant(1, dl, MVT::i64), ISD::SETUGT);
8675 
8676       SINT = DAG.getNode(ISD::SELECT, dl, MVT::i64, Cond, Round, SINT);
8677     }
8678 
8679     ReuseLoadInfo RLI;
8680     SDValue Bits;
8681 
8682     MachineFunction &MF = DAG.getMachineFunction();
8683     if (canReuseLoadAddress(SINT, MVT::i64, RLI, DAG)) {
8684       Bits = DAG.getLoad(MVT::f64, dl, RLI.Chain, RLI.Ptr, RLI.MPI,
8685                          RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges);
8686       spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
8687     } else if (Subtarget.hasLFIWAX() &&
8688                canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::SEXTLOAD)) {
8689       MachineMemOperand *MMO =
8690         MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8691                                 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8692       SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8693       Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWAX, dl,
8694                                      DAG.getVTList(MVT::f64, MVT::Other),
8695                                      Ops, MVT::i32, MMO);
8696       spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
8697     } else if (Subtarget.hasFPCVT() &&
8698                canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::ZEXTLOAD)) {
8699       MachineMemOperand *MMO =
8700         MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8701                                 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8702       SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8703       Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWZX, dl,
8704                                      DAG.getVTList(MVT::f64, MVT::Other),
8705                                      Ops, MVT::i32, MMO);
8706       spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
8707     } else if (((Subtarget.hasLFIWAX() &&
8708                  SINT.getOpcode() == ISD::SIGN_EXTEND) ||
8709                 (Subtarget.hasFPCVT() &&
8710                  SINT.getOpcode() == ISD::ZERO_EXTEND)) &&
8711                SINT.getOperand(0).getValueType() == MVT::i32) {
8712       MachineFrameInfo &MFI = MF.getFrameInfo();
8713       EVT PtrVT = getPointerTy(DAG.getDataLayout());
8714 
8715       int FrameIdx = MFI.CreateStackObject(4, Align(4), false);
8716       SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
8717 
8718       SDValue Store =
8719           DAG.getStore(DAG.getEntryNode(), dl, SINT.getOperand(0), FIdx,
8720                        MachinePointerInfo::getFixedStack(
8721                            DAG.getMachineFunction(), FrameIdx));
8722 
8723       assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&
8724              "Expected an i32 store");
8725 
8726       RLI.Ptr = FIdx;
8727       RLI.Chain = Store;
8728       RLI.MPI =
8729           MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
8730       RLI.Alignment = Align(4);
8731 
8732       MachineMemOperand *MMO =
8733         MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8734                                 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8735       SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8736       Bits = DAG.getMemIntrinsicNode(SINT.getOpcode() == ISD::ZERO_EXTEND ?
8737                                      PPCISD::LFIWZX : PPCISD::LFIWAX,
8738                                      dl, DAG.getVTList(MVT::f64, MVT::Other),
8739                                      Ops, MVT::i32, MMO);
8740     } else
8741       Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SINT);
8742 
8743     SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Bits);
8744 
8745     if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT())
8746       FP = DAG.getNode(ISD::FP_ROUND, dl,
8747                        MVT::f32, FP, DAG.getIntPtrConstant(0, dl));
8748     return FP;
8749   }
8750 
8751   assert(Op.getOperand(0).getValueType() == MVT::i32 &&
8752          "Unhandled INT_TO_FP type in custom expander!");
8753   // Since we only generate this in 64-bit mode, we can take advantage of
8754   // 64-bit registers.  In particular, sign extend the input value into the
8755   // 64-bit register with extsw, store the WHOLE 64-bit value into the stack
8756   // then lfd it and fcfid it.
8757   MachineFunction &MF = DAG.getMachineFunction();
8758   MachineFrameInfo &MFI = MF.getFrameInfo();
8759   EVT PtrVT = getPointerTy(MF.getDataLayout());
8760 
8761   SDValue Ld;
8762   if (Subtarget.hasLFIWAX() || Subtarget.hasFPCVT()) {
8763     ReuseLoadInfo RLI;
8764     bool ReusingLoad;
8765     if (!(ReusingLoad = canReuseLoadAddress(Op.getOperand(0), MVT::i32, RLI,
8766                                             DAG))) {
8767       int FrameIdx = MFI.CreateStackObject(4, Align(4), false);
8768       SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
8769 
8770       SDValue Store =
8771           DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx,
8772                        MachinePointerInfo::getFixedStack(
8773                            DAG.getMachineFunction(), FrameIdx));
8774 
8775       assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&
8776              "Expected an i32 store");
8777 
8778       RLI.Ptr = FIdx;
8779       RLI.Chain = Store;
8780       RLI.MPI =
8781           MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
8782       RLI.Alignment = Align(4);
8783     }
8784 
8785     MachineMemOperand *MMO =
8786       MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8787                               RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8788     SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8789     Ld = DAG.getMemIntrinsicNode(Op.getOpcode() == ISD::UINT_TO_FP ?
8790                                    PPCISD::LFIWZX : PPCISD::LFIWAX,
8791                                  dl, DAG.getVTList(MVT::f64, MVT::Other),
8792                                  Ops, MVT::i32, MMO);
8793     if (ReusingLoad)
8794       spliceIntoChain(RLI.ResChain, Ld.getValue(1), DAG);
8795   } else {
8796     assert(Subtarget.isPPC64() &&
8797            "i32->FP without LFIWAX supported only on PPC64");
8798 
8799     int FrameIdx = MFI.CreateStackObject(8, Align(8), false);
8800     SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
8801 
8802     SDValue Ext64 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i64,
8803                                 Op.getOperand(0));
8804 
8805     // STD the extended value into the stack slot.
8806     SDValue Store = DAG.getStore(
8807         DAG.getEntryNode(), dl, Ext64, FIdx,
8808         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx));
8809 
8810     // Load the value as a double.
8811     Ld = DAG.getLoad(
8812         MVT::f64, dl, Store, FIdx,
8813         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx));
8814   }
8815 
8816   // FCFID it and return it.
8817   SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Ld);
8818   if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT())
8819     FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP,
8820                      DAG.getIntPtrConstant(0, dl));
8821   return FP;
8822 }
8823 
LowerFLT_ROUNDS_(SDValue Op,SelectionDAG & DAG) const8824 SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op,
8825                                             SelectionDAG &DAG) const {
8826   SDLoc dl(Op);
8827   /*
8828    The rounding mode is in bits 30:31 of FPSR, and has the following
8829    settings:
8830      00 Round to nearest
8831      01 Round to 0
8832      10 Round to +inf
8833      11 Round to -inf
8834 
8835   FLT_ROUNDS, on the other hand, expects the following:
8836     -1 Undefined
8837      0 Round to 0
8838      1 Round to nearest
8839      2 Round to +inf
8840      3 Round to -inf
8841 
8842   To perform the conversion, we do:
8843     ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1))
8844   */
8845 
8846   MachineFunction &MF = DAG.getMachineFunction();
8847   EVT VT = Op.getValueType();
8848   EVT PtrVT = getPointerTy(MF.getDataLayout());
8849 
8850   // Save FP Control Word to register
8851   SDValue Chain = Op.getOperand(0);
8852   SDValue MFFS = DAG.getNode(PPCISD::MFFS, dl, {MVT::f64, MVT::Other}, Chain);
8853   Chain = MFFS.getValue(1);
8854 
8855   // Save FP register to stack slot
8856   int SSFI = MF.getFrameInfo().CreateStackObject(8, Align(8), false);
8857   SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
8858   Chain = DAG.getStore(Chain, dl, MFFS, StackSlot, MachinePointerInfo());
8859 
8860   // Load FP Control Word from low 32 bits of stack slot.
8861   SDValue Four = DAG.getConstant(4, dl, PtrVT);
8862   SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four);
8863   SDValue CWD = DAG.getLoad(MVT::i32, dl, Chain, Addr, MachinePointerInfo());
8864   Chain = CWD.getValue(1);
8865 
8866   // Transform as necessary
8867   SDValue CWD1 =
8868     DAG.getNode(ISD::AND, dl, MVT::i32,
8869                 CWD, DAG.getConstant(3, dl, MVT::i32));
8870   SDValue CWD2 =
8871     DAG.getNode(ISD::SRL, dl, MVT::i32,
8872                 DAG.getNode(ISD::AND, dl, MVT::i32,
8873                             DAG.getNode(ISD::XOR, dl, MVT::i32,
8874                                         CWD, DAG.getConstant(3, dl, MVT::i32)),
8875                             DAG.getConstant(3, dl, MVT::i32)),
8876                 DAG.getConstant(1, dl, MVT::i32));
8877 
8878   SDValue RetVal =
8879     DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2);
8880 
8881   RetVal =
8882       DAG.getNode((VT.getSizeInBits() < 16 ? ISD::TRUNCATE : ISD::ZERO_EXTEND),
8883                   dl, VT, RetVal);
8884 
8885   return DAG.getMergeValues({RetVal, Chain}, dl);
8886 }
8887 
LowerSHL_PARTS(SDValue Op,SelectionDAG & DAG) const8888 SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const {
8889   EVT VT = Op.getValueType();
8890   unsigned BitWidth = VT.getSizeInBits();
8891   SDLoc dl(Op);
8892   assert(Op.getNumOperands() == 3 &&
8893          VT == Op.getOperand(1).getValueType() &&
8894          "Unexpected SHL!");
8895 
8896   // Expand into a bunch of logical ops.  Note that these ops
8897   // depend on the PPC behavior for oversized shift amounts.
8898   SDValue Lo = Op.getOperand(0);
8899   SDValue Hi = Op.getOperand(1);
8900   SDValue Amt = Op.getOperand(2);
8901   EVT AmtVT = Amt.getValueType();
8902 
8903   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
8904                              DAG.getConstant(BitWidth, dl, AmtVT), Amt);
8905   SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt);
8906   SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1);
8907   SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3);
8908   SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
8909                              DAG.getConstant(-BitWidth, dl, AmtVT));
8910   SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5);
8911   SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6);
8912   SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt);
8913   SDValue OutOps[] = { OutLo, OutHi };
8914   return DAG.getMergeValues(OutOps, dl);
8915 }
8916 
LowerSRL_PARTS(SDValue Op,SelectionDAG & DAG) const8917 SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const {
8918   EVT VT = Op.getValueType();
8919   SDLoc dl(Op);
8920   unsigned BitWidth = VT.getSizeInBits();
8921   assert(Op.getNumOperands() == 3 &&
8922          VT == Op.getOperand(1).getValueType() &&
8923          "Unexpected SRL!");
8924 
8925   // Expand into a bunch of logical ops.  Note that these ops
8926   // depend on the PPC behavior for oversized shift amounts.
8927   SDValue Lo = Op.getOperand(0);
8928   SDValue Hi = Op.getOperand(1);
8929   SDValue Amt = Op.getOperand(2);
8930   EVT AmtVT = Amt.getValueType();
8931 
8932   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
8933                              DAG.getConstant(BitWidth, dl, AmtVT), Amt);
8934   SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt);
8935   SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1);
8936   SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3);
8937   SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
8938                              DAG.getConstant(-BitWidth, dl, AmtVT));
8939   SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5);
8940   SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6);
8941   SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt);
8942   SDValue OutOps[] = { OutLo, OutHi };
8943   return DAG.getMergeValues(OutOps, dl);
8944 }
8945 
LowerSRA_PARTS(SDValue Op,SelectionDAG & DAG) const8946 SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const {
8947   SDLoc dl(Op);
8948   EVT VT = Op.getValueType();
8949   unsigned BitWidth = VT.getSizeInBits();
8950   assert(Op.getNumOperands() == 3 &&
8951          VT == Op.getOperand(1).getValueType() &&
8952          "Unexpected SRA!");
8953 
8954   // Expand into a bunch of logical ops, followed by a select_cc.
8955   SDValue Lo = Op.getOperand(0);
8956   SDValue Hi = Op.getOperand(1);
8957   SDValue Amt = Op.getOperand(2);
8958   EVT AmtVT = Amt.getValueType();
8959 
8960   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
8961                              DAG.getConstant(BitWidth, dl, AmtVT), Amt);
8962   SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt);
8963   SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1);
8964   SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3);
8965   SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
8966                              DAG.getConstant(-BitWidth, dl, AmtVT));
8967   SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5);
8968   SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt);
8969   SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, dl, AmtVT),
8970                                   Tmp4, Tmp6, ISD::SETLE);
8971   SDValue OutOps[] = { OutLo, OutHi };
8972   return DAG.getMergeValues(OutOps, dl);
8973 }
8974 
8975 //===----------------------------------------------------------------------===//
8976 // Vector related lowering.
8977 //
8978 
8979 /// getCanonicalConstSplat - Build a canonical splat immediate of Val with an
8980 /// element size of SplatSize. Cast the result to VT.
getCanonicalConstSplat(uint64_t Val,unsigned SplatSize,EVT VT,SelectionDAG & DAG,const SDLoc & dl)8981 static SDValue getCanonicalConstSplat(uint64_t Val, unsigned SplatSize, EVT VT,
8982                                       SelectionDAG &DAG, const SDLoc &dl) {
8983   static const MVT VTys[] = { // canonical VT to use for each size.
8984     MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32
8985   };
8986 
8987   EVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1];
8988 
8989   // For a splat with all ones, turn it to vspltisb 0xFF to canonicalize.
8990   if (Val == ((1LU << (SplatSize * 8)) - 1)) {
8991     SplatSize = 1;
8992     Val = 0xFF;
8993   }
8994 
8995   EVT CanonicalVT = VTys[SplatSize-1];
8996 
8997   // Build a canonical splat for this value.
8998   return DAG.getBitcast(ReqVT, DAG.getConstant(Val, dl, CanonicalVT));
8999 }
9000 
9001 /// BuildIntrinsicOp - Return a unary operator intrinsic node with the
9002 /// specified intrinsic ID.
BuildIntrinsicOp(unsigned IID,SDValue Op,SelectionDAG & DAG,const SDLoc & dl,EVT DestVT=MVT::Other)9003 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, SelectionDAG &DAG,
9004                                 const SDLoc &dl, EVT DestVT = MVT::Other) {
9005   if (DestVT == MVT::Other) DestVT = Op.getValueType();
9006   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
9007                      DAG.getConstant(IID, dl, MVT::i32), Op);
9008 }
9009 
9010 /// BuildIntrinsicOp - Return a binary operator intrinsic node with the
9011 /// specified intrinsic ID.
BuildIntrinsicOp(unsigned IID,SDValue LHS,SDValue RHS,SelectionDAG & DAG,const SDLoc & dl,EVT DestVT=MVT::Other)9012 static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS,
9013                                 SelectionDAG &DAG, const SDLoc &dl,
9014                                 EVT DestVT = MVT::Other) {
9015   if (DestVT == MVT::Other) DestVT = LHS.getValueType();
9016   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
9017                      DAG.getConstant(IID, dl, MVT::i32), LHS, RHS);
9018 }
9019 
9020 /// BuildIntrinsicOp - Return a ternary operator intrinsic node with the
9021 /// specified intrinsic ID.
BuildIntrinsicOp(unsigned IID,SDValue Op0,SDValue Op1,SDValue Op2,SelectionDAG & DAG,const SDLoc & dl,EVT DestVT=MVT::Other)9022 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1,
9023                                 SDValue Op2, SelectionDAG &DAG, const SDLoc &dl,
9024                                 EVT DestVT = MVT::Other) {
9025   if (DestVT == MVT::Other) DestVT = Op0.getValueType();
9026   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
9027                      DAG.getConstant(IID, dl, MVT::i32), Op0, Op1, Op2);
9028 }
9029 
9030 /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified
9031 /// amount.  The result has the specified value type.
BuildVSLDOI(SDValue LHS,SDValue RHS,unsigned Amt,EVT VT,SelectionDAG & DAG,const SDLoc & dl)9032 static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, EVT VT,
9033                            SelectionDAG &DAG, const SDLoc &dl) {
9034   // Force LHS/RHS to be the right type.
9035   LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS);
9036   RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS);
9037 
9038   int Ops[16];
9039   for (unsigned i = 0; i != 16; ++i)
9040     Ops[i] = i + Amt;
9041   SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops);
9042   return DAG.getNode(ISD::BITCAST, dl, VT, T);
9043 }
9044 
9045 /// Do we have an efficient pattern in a .td file for this node?
9046 ///
9047 /// \param V - pointer to the BuildVectorSDNode being matched
9048 /// \param HasDirectMove - does this subtarget have VSR <-> GPR direct moves?
9049 ///
9050 /// There are some patterns where it is beneficial to keep a BUILD_VECTOR
9051 /// node as a BUILD_VECTOR node rather than expanding it. The patterns where
9052 /// the opposite is true (expansion is beneficial) are:
9053 /// - The node builds a vector out of integers that are not 32 or 64-bits
9054 /// - The node builds a vector out of constants
9055 /// - The node is a "load-and-splat"
9056 /// In all other cases, we will choose to keep the BUILD_VECTOR.
haveEfficientBuildVectorPattern(BuildVectorSDNode * V,bool HasDirectMove,bool HasP8Vector)9057 static bool haveEfficientBuildVectorPattern(BuildVectorSDNode *V,
9058                                             bool HasDirectMove,
9059                                             bool HasP8Vector) {
9060   EVT VecVT = V->getValueType(0);
9061   bool RightType = VecVT == MVT::v2f64 ||
9062     (HasP8Vector && VecVT == MVT::v4f32) ||
9063     (HasDirectMove && (VecVT == MVT::v2i64 || VecVT == MVT::v4i32));
9064   if (!RightType)
9065     return false;
9066 
9067   bool IsSplat = true;
9068   bool IsLoad = false;
9069   SDValue Op0 = V->getOperand(0);
9070 
9071   // This function is called in a block that confirms the node is not a constant
9072   // splat. So a constant BUILD_VECTOR here means the vector is built out of
9073   // different constants.
9074   if (V->isConstant())
9075     return false;
9076   for (int i = 0, e = V->getNumOperands(); i < e; ++i) {
9077     if (V->getOperand(i).isUndef())
9078       return false;
9079     // We want to expand nodes that represent load-and-splat even if the
9080     // loaded value is a floating point truncation or conversion to int.
9081     if (V->getOperand(i).getOpcode() == ISD::LOAD ||
9082         (V->getOperand(i).getOpcode() == ISD::FP_ROUND &&
9083          V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) ||
9084         (V->getOperand(i).getOpcode() == ISD::FP_TO_SINT &&
9085          V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) ||
9086         (V->getOperand(i).getOpcode() == ISD::FP_TO_UINT &&
9087          V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD))
9088       IsLoad = true;
9089     // If the operands are different or the input is not a load and has more
9090     // uses than just this BV node, then it isn't a splat.
9091     if (V->getOperand(i) != Op0 ||
9092         (!IsLoad && !V->isOnlyUserOf(V->getOperand(i).getNode())))
9093       IsSplat = false;
9094   }
9095   return !(IsSplat && IsLoad);
9096 }
9097 
9098 // Lower BITCAST(f128, (build_pair i64, i64)) to BUILD_FP128.
LowerBITCAST(SDValue Op,SelectionDAG & DAG) const9099 SDValue PPCTargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const {
9100 
9101   SDLoc dl(Op);
9102   SDValue Op0 = Op->getOperand(0);
9103 
9104   if ((Op.getValueType() != MVT::f128) ||
9105       (Op0.getOpcode() != ISD::BUILD_PAIR) ||
9106       (Op0.getOperand(0).getValueType() != MVT::i64) ||
9107       (Op0.getOperand(1).getValueType() != MVT::i64))
9108     return SDValue();
9109 
9110   return DAG.getNode(PPCISD::BUILD_FP128, dl, MVT::f128, Op0.getOperand(0),
9111                      Op0.getOperand(1));
9112 }
9113 
getNormalLoadInput(const SDValue & Op)9114 static const SDValue *getNormalLoadInput(const SDValue &Op) {
9115   const SDValue *InputLoad = &Op;
9116   if (InputLoad->getOpcode() == ISD::BITCAST)
9117     InputLoad = &InputLoad->getOperand(0);
9118   if (InputLoad->getOpcode() == ISD::SCALAR_TO_VECTOR ||
9119       InputLoad->getOpcode() == PPCISD::SCALAR_TO_VECTOR_PERMUTED)
9120     InputLoad = &InputLoad->getOperand(0);
9121   if (InputLoad->getOpcode() != ISD::LOAD)
9122     return nullptr;
9123   LoadSDNode *LD = cast<LoadSDNode>(*InputLoad);
9124   return ISD::isNormalLoad(LD) ? InputLoad : nullptr;
9125 }
9126 
9127 // Convert the argument APFloat to a single precision APFloat if there is no
9128 // loss in information during the conversion to single precision APFloat and the
9129 // resulting number is not a denormal number. Return true if successful.
convertToNonDenormSingle(APFloat & ArgAPFloat)9130 bool llvm::convertToNonDenormSingle(APFloat &ArgAPFloat) {
9131   APFloat APFloatToConvert = ArgAPFloat;
9132   bool LosesInfo = true;
9133   APFloatToConvert.convert(APFloat::IEEEsingle(), APFloat::rmNearestTiesToEven,
9134                            &LosesInfo);
9135   bool Success = (!LosesInfo && !APFloatToConvert.isDenormal());
9136   if (Success)
9137     ArgAPFloat = APFloatToConvert;
9138   return Success;
9139 }
9140 
9141 // Bitcast the argument APInt to a double and convert it to a single precision
9142 // APFloat, bitcast the APFloat to an APInt and assign it to the original
9143 // argument if there is no loss in information during the conversion from
9144 // double to single precision APFloat and the resulting number is not a denormal
9145 // number. Return true if successful.
convertToNonDenormSingle(APInt & ArgAPInt)9146 bool llvm::convertToNonDenormSingle(APInt &ArgAPInt) {
9147   double DpValue = ArgAPInt.bitsToDouble();
9148   APFloat APFloatDp(DpValue);
9149   bool Success = convertToNonDenormSingle(APFloatDp);
9150   if (Success)
9151     ArgAPInt = APFloatDp.bitcastToAPInt();
9152   return Success;
9153 }
9154 
9155 // If this is a case we can't handle, return null and let the default
9156 // expansion code take care of it.  If we CAN select this case, and if it
9157 // selects to a single instruction, return Op.  Otherwise, if we can codegen
9158 // this case more efficiently than a constant pool load, lower it to the
9159 // sequence of ops that should be used.
LowerBUILD_VECTOR(SDValue Op,SelectionDAG & DAG) const9160 SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
9161                                              SelectionDAG &DAG) const {
9162   SDLoc dl(Op);
9163   BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
9164   assert(BVN && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR");
9165 
9166   if (Subtarget.hasQPX() && Op.getValueType() == MVT::v4i1) {
9167     // We first build an i32 vector, load it into a QPX register,
9168     // then convert it to a floating-point vector and compare it
9169     // to a zero vector to get the boolean result.
9170     MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
9171     int FrameIdx = MFI.CreateStackObject(16, Align(16), false);
9172     MachinePointerInfo PtrInfo =
9173         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
9174     EVT PtrVT = getPointerTy(DAG.getDataLayout());
9175     SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
9176 
9177     assert(BVN->getNumOperands() == 4 &&
9178       "BUILD_VECTOR for v4i1 does not have 4 operands");
9179 
9180     bool IsConst = true;
9181     for (unsigned i = 0; i < 4; ++i) {
9182       if (BVN->getOperand(i).isUndef()) continue;
9183       if (!isa<ConstantSDNode>(BVN->getOperand(i))) {
9184         IsConst = false;
9185         break;
9186       }
9187     }
9188 
9189     if (IsConst) {
9190       Constant *One =
9191         ConstantFP::get(Type::getFloatTy(*DAG.getContext()), 1.0);
9192       Constant *NegOne =
9193         ConstantFP::get(Type::getFloatTy(*DAG.getContext()), -1.0);
9194 
9195       Constant *CV[4];
9196       for (unsigned i = 0; i < 4; ++i) {
9197         if (BVN->getOperand(i).isUndef())
9198           CV[i] = UndefValue::get(Type::getFloatTy(*DAG.getContext()));
9199         else if (isNullConstant(BVN->getOperand(i)))
9200           CV[i] = NegOne;
9201         else
9202           CV[i] = One;
9203       }
9204 
9205       Constant *CP = ConstantVector::get(CV);
9206       SDValue CPIdx =
9207           DAG.getConstantPool(CP, getPointerTy(DAG.getDataLayout()), Align(16));
9208 
9209       SDValue Ops[] = {DAG.getEntryNode(), CPIdx};
9210       SDVTList VTs = DAG.getVTList({MVT::v4i1, /*chain*/ MVT::Other});
9211       return DAG.getMemIntrinsicNode(
9212           PPCISD::QVLFSb, dl, VTs, Ops, MVT::v4f32,
9213           MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
9214     }
9215 
9216     SmallVector<SDValue, 4> Stores;
9217     for (unsigned i = 0; i < 4; ++i) {
9218       if (BVN->getOperand(i).isUndef()) continue;
9219 
9220       unsigned Offset = 4*i;
9221       SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType());
9222       Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx);
9223 
9224       unsigned StoreSize = BVN->getOperand(i).getValueType().getStoreSize();
9225       if (StoreSize > 4) {
9226         Stores.push_back(
9227             DAG.getTruncStore(DAG.getEntryNode(), dl, BVN->getOperand(i), Idx,
9228                               PtrInfo.getWithOffset(Offset), MVT::i32));
9229       } else {
9230         SDValue StoreValue = BVN->getOperand(i);
9231         if (StoreSize < 4)
9232           StoreValue = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, StoreValue);
9233 
9234         Stores.push_back(DAG.getStore(DAG.getEntryNode(), dl, StoreValue, Idx,
9235                                       PtrInfo.getWithOffset(Offset)));
9236       }
9237     }
9238 
9239     SDValue StoreChain;
9240     if (!Stores.empty())
9241       StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
9242     else
9243       StoreChain = DAG.getEntryNode();
9244 
9245     // Now load from v4i32 into the QPX register; this will extend it to
9246     // v4i64 but not yet convert it to a floating point. Nevertheless, this
9247     // is typed as v4f64 because the QPX register integer states are not
9248     // explicitly represented.
9249 
9250     SDValue Ops[] = {StoreChain,
9251                      DAG.getConstant(Intrinsic::ppc_qpx_qvlfiwz, dl, MVT::i32),
9252                      FIdx};
9253     SDVTList VTs = DAG.getVTList({MVT::v4f64, /*chain*/ MVT::Other});
9254 
9255     SDValue LoadedVect = DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN,
9256       dl, VTs, Ops, MVT::v4i32, PtrInfo);
9257     LoadedVect = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64,
9258       DAG.getConstant(Intrinsic::ppc_qpx_qvfcfidu, dl, MVT::i32),
9259       LoadedVect);
9260 
9261     SDValue FPZeros = DAG.getConstantFP(0.0, dl, MVT::v4f64);
9262 
9263     return DAG.getSetCC(dl, MVT::v4i1, LoadedVect, FPZeros, ISD::SETEQ);
9264   }
9265 
9266   // All other QPX vectors are handled by generic code.
9267   if (Subtarget.hasQPX())
9268     return SDValue();
9269 
9270   // Check if this is a splat of a constant value.
9271   APInt APSplatBits, APSplatUndef;
9272   unsigned SplatBitSize;
9273   bool HasAnyUndefs;
9274   bool BVNIsConstantSplat =
9275       BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize,
9276                            HasAnyUndefs, 0, !Subtarget.isLittleEndian());
9277 
9278   // If it is a splat of a double, check if we can shrink it to a 32 bit
9279   // non-denormal float which when converted back to double gives us the same
9280   // double. This is to exploit the XXSPLTIDP instruction.
9281   if (BVNIsConstantSplat && Subtarget.hasPrefixInstrs() &&
9282       (SplatBitSize == 64) && (Op->getValueType(0) == MVT::v2f64) &&
9283       convertToNonDenormSingle(APSplatBits)) {
9284     SDValue SplatNode = DAG.getNode(
9285         PPCISD::XXSPLTI_SP_TO_DP, dl, MVT::v2f64,
9286         DAG.getTargetConstant(APSplatBits.getZExtValue(), dl, MVT::i32));
9287     return DAG.getBitcast(Op.getValueType(), SplatNode);
9288   }
9289 
9290   if (!BVNIsConstantSplat || SplatBitSize > 32) {
9291 
9292     const SDValue *InputLoad = getNormalLoadInput(Op.getOperand(0));
9293     // Handle load-and-splat patterns as we have instructions that will do this
9294     // in one go.
9295     if (InputLoad && DAG.isSplatValue(Op, true)) {
9296       LoadSDNode *LD = cast<LoadSDNode>(*InputLoad);
9297 
9298       // We have handling for 4 and 8 byte elements.
9299       unsigned ElementSize = LD->getMemoryVT().getScalarSizeInBits();
9300 
9301       // Checking for a single use of this load, we have to check for vector
9302       // width (128 bits) / ElementSize uses (since each operand of the
9303       // BUILD_VECTOR is a separate use of the value.
9304       if (InputLoad->getNode()->hasNUsesOfValue(128 / ElementSize, 0) &&
9305           ((Subtarget.hasVSX() && ElementSize == 64) ||
9306            (Subtarget.hasP9Vector() && ElementSize == 32))) {
9307         SDValue Ops[] = {
9308           LD->getChain(),    // Chain
9309           LD->getBasePtr(),  // Ptr
9310           DAG.getValueType(Op.getValueType()) // VT
9311         };
9312         return
9313           DAG.getMemIntrinsicNode(PPCISD::LD_SPLAT, dl,
9314                                   DAG.getVTList(Op.getValueType(), MVT::Other),
9315                                   Ops, LD->getMemoryVT(), LD->getMemOperand());
9316       }
9317     }
9318 
9319     // BUILD_VECTOR nodes that are not constant splats of up to 32-bits can be
9320     // lowered to VSX instructions under certain conditions.
9321     // Without VSX, there is no pattern more efficient than expanding the node.
9322     if (Subtarget.hasVSX() &&
9323         haveEfficientBuildVectorPattern(BVN, Subtarget.hasDirectMove(),
9324                                         Subtarget.hasP8Vector()))
9325       return Op;
9326     return SDValue();
9327   }
9328 
9329   uint64_t SplatBits = APSplatBits.getZExtValue();
9330   uint64_t SplatUndef = APSplatUndef.getZExtValue();
9331   unsigned SplatSize = SplatBitSize / 8;
9332 
9333   // First, handle single instruction cases.
9334 
9335   // All zeros?
9336   if (SplatBits == 0) {
9337     // Canonicalize all zero vectors to be v4i32.
9338     if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) {
9339       SDValue Z = DAG.getConstant(0, dl, MVT::v4i32);
9340       Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z);
9341     }
9342     return Op;
9343   }
9344 
9345   // We have XXSPLTIW for constant splats four bytes wide.
9346   // Given vector length is a multiple of 4, 2-byte splats can be replaced
9347   // with 4-byte splats. We replicate the SplatBits in case of 2-byte splat to
9348   // make a 4-byte splat element. For example: 2-byte splat of 0xABAB can be
9349   // turned into a 4-byte splat of 0xABABABAB.
9350   if (Subtarget.hasPrefixInstrs() && SplatSize == 2)
9351     return getCanonicalConstSplat((SplatBits |= SplatBits << 16), SplatSize * 2,
9352                                   Op.getValueType(), DAG, dl);
9353 
9354   if (Subtarget.hasPrefixInstrs() && SplatSize == 4)
9355     return getCanonicalConstSplat(SplatBits, SplatSize, Op.getValueType(), DAG,
9356                                   dl);
9357 
9358   // We have XXSPLTIB for constant splats one byte wide.
9359   if (Subtarget.hasP9Vector() && SplatSize == 1)
9360     return getCanonicalConstSplat(SplatBits, SplatSize, Op.getValueType(), DAG,
9361                                   dl);
9362 
9363   // If the sign extended value is in the range [-16,15], use VSPLTI[bhw].
9364   int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >>
9365                     (32-SplatBitSize));
9366   if (SextVal >= -16 && SextVal <= 15)
9367     return getCanonicalConstSplat(SextVal, SplatSize, Op.getValueType(), DAG,
9368                                   dl);
9369 
9370   // Two instruction sequences.
9371 
9372   // If this value is in the range [-32,30] and is even, use:
9373   //     VSPLTI[bhw](val/2) + VSPLTI[bhw](val/2)
9374   // If this value is in the range [17,31] and is odd, use:
9375   //     VSPLTI[bhw](val-16) - VSPLTI[bhw](-16)
9376   // If this value is in the range [-31,-17] and is odd, use:
9377   //     VSPLTI[bhw](val+16) + VSPLTI[bhw](-16)
9378   // Note the last two are three-instruction sequences.
9379   if (SextVal >= -32 && SextVal <= 31) {
9380     // To avoid having these optimizations undone by constant folding,
9381     // we convert to a pseudo that will be expanded later into one of
9382     // the above forms.
9383     SDValue Elt = DAG.getConstant(SextVal, dl, MVT::i32);
9384     EVT VT = (SplatSize == 1 ? MVT::v16i8 :
9385               (SplatSize == 2 ? MVT::v8i16 : MVT::v4i32));
9386     SDValue EltSize = DAG.getConstant(SplatSize, dl, MVT::i32);
9387     SDValue RetVal = DAG.getNode(PPCISD::VADD_SPLAT, dl, VT, Elt, EltSize);
9388     if (VT == Op.getValueType())
9389       return RetVal;
9390     else
9391       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), RetVal);
9392   }
9393 
9394   // If this is 0x8000_0000 x 4, turn into vspltisw + vslw.  If it is
9395   // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000).  This is important
9396   // for fneg/fabs.
9397   if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) {
9398     // Make -1 and vspltisw -1:
9399     SDValue OnesV = getCanonicalConstSplat(-1, 4, MVT::v4i32, DAG, dl);
9400 
9401     // Make the VSLW intrinsic, computing 0x8000_0000.
9402     SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV,
9403                                    OnesV, DAG, dl);
9404 
9405     // xor by OnesV to invert it.
9406     Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV);
9407     return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9408   }
9409 
9410   // Check to see if this is a wide variety of vsplti*, binop self cases.
9411   static const signed char SplatCsts[] = {
9412     -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7,
9413     -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16
9414   };
9415 
9416   for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) {
9417     // Indirect through the SplatCsts array so that we favor 'vsplti -1' for
9418     // cases which are ambiguous (e.g. formation of 0x8000_0000).  'vsplti -1'
9419     int i = SplatCsts[idx];
9420 
9421     // Figure out what shift amount will be used by altivec if shifted by i in
9422     // this splat size.
9423     unsigned TypeShiftAmt = i & (SplatBitSize-1);
9424 
9425     // vsplti + shl self.
9426     if (SextVal == (int)((unsigned)i << TypeShiftAmt)) {
9427       SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl);
9428       static const unsigned IIDs[] = { // Intrinsic to use for each size.
9429         Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0,
9430         Intrinsic::ppc_altivec_vslw
9431       };
9432       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
9433       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9434     }
9435 
9436     // vsplti + srl self.
9437     if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
9438       SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl);
9439       static const unsigned IIDs[] = { // Intrinsic to use for each size.
9440         Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0,
9441         Intrinsic::ppc_altivec_vsrw
9442       };
9443       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
9444       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9445     }
9446 
9447     // vsplti + sra self.
9448     if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
9449       SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl);
9450       static const unsigned IIDs[] = { // Intrinsic to use for each size.
9451         Intrinsic::ppc_altivec_vsrab, Intrinsic::ppc_altivec_vsrah, 0,
9452         Intrinsic::ppc_altivec_vsraw
9453       };
9454       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
9455       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9456     }
9457 
9458     // vsplti + rol self.
9459     if (SextVal == (int)(((unsigned)i << TypeShiftAmt) |
9460                          ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) {
9461       SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl);
9462       static const unsigned IIDs[] = { // Intrinsic to use for each size.
9463         Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0,
9464         Intrinsic::ppc_altivec_vrlw
9465       };
9466       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
9467       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9468     }
9469 
9470     // t = vsplti c, result = vsldoi t, t, 1
9471     if (SextVal == (int)(((unsigned)i << 8) | (i < 0 ? 0xFF : 0))) {
9472       SDValue T = getCanonicalConstSplat(i, SplatSize, MVT::v16i8, DAG, dl);
9473       unsigned Amt = Subtarget.isLittleEndian() ? 15 : 1;
9474       return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
9475     }
9476     // t = vsplti c, result = vsldoi t, t, 2
9477     if (SextVal == (int)(((unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) {
9478       SDValue T = getCanonicalConstSplat(i, SplatSize, MVT::v16i8, DAG, dl);
9479       unsigned Amt = Subtarget.isLittleEndian() ? 14 : 2;
9480       return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
9481     }
9482     // t = vsplti c, result = vsldoi t, t, 3
9483     if (SextVal == (int)(((unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) {
9484       SDValue T = getCanonicalConstSplat(i, SplatSize, MVT::v16i8, DAG, dl);
9485       unsigned Amt = Subtarget.isLittleEndian() ? 13 : 3;
9486       return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
9487     }
9488   }
9489 
9490   return SDValue();
9491 }
9492 
9493 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit
9494 /// the specified operations to build the shuffle.
GeneratePerfectShuffle(unsigned PFEntry,SDValue LHS,SDValue RHS,SelectionDAG & DAG,const SDLoc & dl)9495 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
9496                                       SDValue RHS, SelectionDAG &DAG,
9497                                       const SDLoc &dl) {
9498   unsigned OpNum = (PFEntry >> 26) & 0x0F;
9499   unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
9500   unsigned RHSID = (PFEntry >>  0) & ((1 << 13)-1);
9501 
9502   enum {
9503     OP_COPY = 0,  // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3>
9504     OP_VMRGHW,
9505     OP_VMRGLW,
9506     OP_VSPLTISW0,
9507     OP_VSPLTISW1,
9508     OP_VSPLTISW2,
9509     OP_VSPLTISW3,
9510     OP_VSLDOI4,
9511     OP_VSLDOI8,
9512     OP_VSLDOI12
9513   };
9514 
9515   if (OpNum == OP_COPY) {
9516     if (LHSID == (1*9+2)*9+3) return LHS;
9517     assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!");
9518     return RHS;
9519   }
9520 
9521   SDValue OpLHS, OpRHS;
9522   OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl);
9523   OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl);
9524 
9525   int ShufIdxs[16];
9526   switch (OpNum) {
9527   default: llvm_unreachable("Unknown i32 permute!");
9528   case OP_VMRGHW:
9529     ShufIdxs[ 0] =  0; ShufIdxs[ 1] =  1; ShufIdxs[ 2] =  2; ShufIdxs[ 3] =  3;
9530     ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19;
9531     ShufIdxs[ 8] =  4; ShufIdxs[ 9] =  5; ShufIdxs[10] =  6; ShufIdxs[11] =  7;
9532     ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23;
9533     break;
9534   case OP_VMRGLW:
9535     ShufIdxs[ 0] =  8; ShufIdxs[ 1] =  9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11;
9536     ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27;
9537     ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15;
9538     ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31;
9539     break;
9540   case OP_VSPLTISW0:
9541     for (unsigned i = 0; i != 16; ++i)
9542       ShufIdxs[i] = (i&3)+0;
9543     break;
9544   case OP_VSPLTISW1:
9545     for (unsigned i = 0; i != 16; ++i)
9546       ShufIdxs[i] = (i&3)+4;
9547     break;
9548   case OP_VSPLTISW2:
9549     for (unsigned i = 0; i != 16; ++i)
9550       ShufIdxs[i] = (i&3)+8;
9551     break;
9552   case OP_VSPLTISW3:
9553     for (unsigned i = 0; i != 16; ++i)
9554       ShufIdxs[i] = (i&3)+12;
9555     break;
9556   case OP_VSLDOI4:
9557     return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG, dl);
9558   case OP_VSLDOI8:
9559     return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG, dl);
9560   case OP_VSLDOI12:
9561     return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl);
9562   }
9563   EVT VT = OpLHS.getValueType();
9564   OpLHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLHS);
9565   OpRHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpRHS);
9566   SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs);
9567   return DAG.getNode(ISD::BITCAST, dl, VT, T);
9568 }
9569 
9570 /// lowerToVINSERTB - Return the SDValue if this VECTOR_SHUFFLE can be handled
9571 /// by the VINSERTB instruction introduced in ISA 3.0, else just return default
9572 /// SDValue.
lowerToVINSERTB(ShuffleVectorSDNode * N,SelectionDAG & DAG) const9573 SDValue PPCTargetLowering::lowerToVINSERTB(ShuffleVectorSDNode *N,
9574                                            SelectionDAG &DAG) const {
9575   const unsigned BytesInVector = 16;
9576   bool IsLE = Subtarget.isLittleEndian();
9577   SDLoc dl(N);
9578   SDValue V1 = N->getOperand(0);
9579   SDValue V2 = N->getOperand(1);
9580   unsigned ShiftElts = 0, InsertAtByte = 0;
9581   bool Swap = false;
9582 
9583   // Shifts required to get the byte we want at element 7.
9584   unsigned LittleEndianShifts[] = {8, 7,  6,  5,  4,  3,  2,  1,
9585                                    0, 15, 14, 13, 12, 11, 10, 9};
9586   unsigned BigEndianShifts[] = {9, 10, 11, 12, 13, 14, 15, 0,
9587                                 1, 2,  3,  4,  5,  6,  7,  8};
9588 
9589   ArrayRef<int> Mask = N->getMask();
9590   int OriginalOrder[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
9591 
9592   // For each mask element, find out if we're just inserting something
9593   // from V2 into V1 or vice versa.
9594   // Possible permutations inserting an element from V2 into V1:
9595   //   X, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
9596   //   0, X, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
9597   //   ...
9598   //   0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, X
9599   // Inserting from V1 into V2 will be similar, except mask range will be
9600   // [16,31].
9601 
9602   bool FoundCandidate = false;
9603   // If both vector operands for the shuffle are the same vector, the mask
9604   // will contain only elements from the first one and the second one will be
9605   // undef.
9606   unsigned VINSERTBSrcElem = IsLE ? 8 : 7;
9607   // Go through the mask of half-words to find an element that's being moved
9608   // from one vector to the other.
9609   for (unsigned i = 0; i < BytesInVector; ++i) {
9610     unsigned CurrentElement = Mask[i];
9611     // If 2nd operand is undefined, we should only look for element 7 in the
9612     // Mask.
9613     if (V2.isUndef() && CurrentElement != VINSERTBSrcElem)
9614       continue;
9615 
9616     bool OtherElementsInOrder = true;
9617     // Examine the other elements in the Mask to see if they're in original
9618     // order.
9619     for (unsigned j = 0; j < BytesInVector; ++j) {
9620       if (j == i)
9621         continue;
9622       // If CurrentElement is from V1 [0,15], then we the rest of the Mask to be
9623       // from V2 [16,31] and vice versa.  Unless the 2nd operand is undefined,
9624       // in which we always assume we're always picking from the 1st operand.
9625       int MaskOffset =
9626           (!V2.isUndef() && CurrentElement < BytesInVector) ? BytesInVector : 0;
9627       if (Mask[j] != OriginalOrder[j] + MaskOffset) {
9628         OtherElementsInOrder = false;
9629         break;
9630       }
9631     }
9632     // If other elements are in original order, we record the number of shifts
9633     // we need to get the element we want into element 7. Also record which byte
9634     // in the vector we should insert into.
9635     if (OtherElementsInOrder) {
9636       // If 2nd operand is undefined, we assume no shifts and no swapping.
9637       if (V2.isUndef()) {
9638         ShiftElts = 0;
9639         Swap = false;
9640       } else {
9641         // Only need the last 4-bits for shifts because operands will be swapped if CurrentElement is >= 2^4.
9642         ShiftElts = IsLE ? LittleEndianShifts[CurrentElement & 0xF]
9643                          : BigEndianShifts[CurrentElement & 0xF];
9644         Swap = CurrentElement < BytesInVector;
9645       }
9646       InsertAtByte = IsLE ? BytesInVector - (i + 1) : i;
9647       FoundCandidate = true;
9648       break;
9649     }
9650   }
9651 
9652   if (!FoundCandidate)
9653     return SDValue();
9654 
9655   // Candidate found, construct the proper SDAG sequence with VINSERTB,
9656   // optionally with VECSHL if shift is required.
9657   if (Swap)
9658     std::swap(V1, V2);
9659   if (V2.isUndef())
9660     V2 = V1;
9661   if (ShiftElts) {
9662     SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2,
9663                               DAG.getConstant(ShiftElts, dl, MVT::i32));
9664     return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, Shl,
9665                        DAG.getConstant(InsertAtByte, dl, MVT::i32));
9666   }
9667   return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, V2,
9668                      DAG.getConstant(InsertAtByte, dl, MVT::i32));
9669 }
9670 
9671 /// lowerToVINSERTH - Return the SDValue if this VECTOR_SHUFFLE can be handled
9672 /// by the VINSERTH instruction introduced in ISA 3.0, else just return default
9673 /// SDValue.
lowerToVINSERTH(ShuffleVectorSDNode * N,SelectionDAG & DAG) const9674 SDValue PPCTargetLowering::lowerToVINSERTH(ShuffleVectorSDNode *N,
9675                                            SelectionDAG &DAG) const {
9676   const unsigned NumHalfWords = 8;
9677   const unsigned BytesInVector = NumHalfWords * 2;
9678   // Check that the shuffle is on half-words.
9679   if (!isNByteElemShuffleMask(N, 2, 1))
9680     return SDValue();
9681 
9682   bool IsLE = Subtarget.isLittleEndian();
9683   SDLoc dl(N);
9684   SDValue V1 = N->getOperand(0);
9685   SDValue V2 = N->getOperand(1);
9686   unsigned ShiftElts = 0, InsertAtByte = 0;
9687   bool Swap = false;
9688 
9689   // Shifts required to get the half-word we want at element 3.
9690   unsigned LittleEndianShifts[] = {4, 3, 2, 1, 0, 7, 6, 5};
9691   unsigned BigEndianShifts[] = {5, 6, 7, 0, 1, 2, 3, 4};
9692 
9693   uint32_t Mask = 0;
9694   uint32_t OriginalOrderLow = 0x1234567;
9695   uint32_t OriginalOrderHigh = 0x89ABCDEF;
9696   // Now we look at mask elements 0,2,4,6,8,10,12,14.  Pack the mask into a
9697   // 32-bit space, only need 4-bit nibbles per element.
9698   for (unsigned i = 0; i < NumHalfWords; ++i) {
9699     unsigned MaskShift = (NumHalfWords - 1 - i) * 4;
9700     Mask |= ((uint32_t)(N->getMaskElt(i * 2) / 2) << MaskShift);
9701   }
9702 
9703   // For each mask element, find out if we're just inserting something
9704   // from V2 into V1 or vice versa.  Possible permutations inserting an element
9705   // from V2 into V1:
9706   //   X, 1, 2, 3, 4, 5, 6, 7
9707   //   0, X, 2, 3, 4, 5, 6, 7
9708   //   0, 1, X, 3, 4, 5, 6, 7
9709   //   0, 1, 2, X, 4, 5, 6, 7
9710   //   0, 1, 2, 3, X, 5, 6, 7
9711   //   0, 1, 2, 3, 4, X, 6, 7
9712   //   0, 1, 2, 3, 4, 5, X, 7
9713   //   0, 1, 2, 3, 4, 5, 6, X
9714   // Inserting from V1 into V2 will be similar, except mask range will be [8,15].
9715 
9716   bool FoundCandidate = false;
9717   // Go through the mask of half-words to find an element that's being moved
9718   // from one vector to the other.
9719   for (unsigned i = 0; i < NumHalfWords; ++i) {
9720     unsigned MaskShift = (NumHalfWords - 1 - i) * 4;
9721     uint32_t MaskOneElt = (Mask >> MaskShift) & 0xF;
9722     uint32_t MaskOtherElts = ~(0xF << MaskShift);
9723     uint32_t TargetOrder = 0x0;
9724 
9725     // If both vector operands for the shuffle are the same vector, the mask
9726     // will contain only elements from the first one and the second one will be
9727     // undef.
9728     if (V2.isUndef()) {
9729       ShiftElts = 0;
9730       unsigned VINSERTHSrcElem = IsLE ? 4 : 3;
9731       TargetOrder = OriginalOrderLow;
9732       Swap = false;
9733       // Skip if not the correct element or mask of other elements don't equal
9734       // to our expected order.
9735       if (MaskOneElt == VINSERTHSrcElem &&
9736           (Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) {
9737         InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2;
9738         FoundCandidate = true;
9739         break;
9740       }
9741     } else { // If both operands are defined.
9742       // Target order is [8,15] if the current mask is between [0,7].
9743       TargetOrder =
9744           (MaskOneElt < NumHalfWords) ? OriginalOrderHigh : OriginalOrderLow;
9745       // Skip if mask of other elements don't equal our expected order.
9746       if ((Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) {
9747         // We only need the last 3 bits for the number of shifts.
9748         ShiftElts = IsLE ? LittleEndianShifts[MaskOneElt & 0x7]
9749                          : BigEndianShifts[MaskOneElt & 0x7];
9750         InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2;
9751         Swap = MaskOneElt < NumHalfWords;
9752         FoundCandidate = true;
9753         break;
9754       }
9755     }
9756   }
9757 
9758   if (!FoundCandidate)
9759     return SDValue();
9760 
9761   // Candidate found, construct the proper SDAG sequence with VINSERTH,
9762   // optionally with VECSHL if shift is required.
9763   if (Swap)
9764     std::swap(V1, V2);
9765   if (V2.isUndef())
9766     V2 = V1;
9767   SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
9768   if (ShiftElts) {
9769     // Double ShiftElts because we're left shifting on v16i8 type.
9770     SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2,
9771                               DAG.getConstant(2 * ShiftElts, dl, MVT::i32));
9772     SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, Shl);
9773     SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2,
9774                               DAG.getConstant(InsertAtByte, dl, MVT::i32));
9775     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9776   }
9777   SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2);
9778   SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2,
9779                             DAG.getConstant(InsertAtByte, dl, MVT::i32));
9780   return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9781 }
9782 
9783 /// lowerToXXSPLTI32DX - Return the SDValue if this VECTOR_SHUFFLE can be
9784 /// handled by the XXSPLTI32DX instruction introduced in ISA 3.1, otherwise
9785 /// return the default SDValue.
lowerToXXSPLTI32DX(ShuffleVectorSDNode * SVN,SelectionDAG & DAG) const9786 SDValue PPCTargetLowering::lowerToXXSPLTI32DX(ShuffleVectorSDNode *SVN,
9787                                               SelectionDAG &DAG) const {
9788   // The LHS and RHS may be bitcasts to v16i8 as we canonicalize shuffles
9789   // to v16i8. Peek through the bitcasts to get the actual operands.
9790   SDValue LHS = peekThroughBitcasts(SVN->getOperand(0));
9791   SDValue RHS = peekThroughBitcasts(SVN->getOperand(1));
9792 
9793   auto ShuffleMask = SVN->getMask();
9794   SDValue VecShuffle(SVN, 0);
9795   SDLoc DL(SVN);
9796 
9797   // Check that we have a four byte shuffle.
9798   if (!isNByteElemShuffleMask(SVN, 4, 1))
9799     return SDValue();
9800 
9801   // Canonicalize the RHS being a BUILD_VECTOR when lowering to xxsplti32dx.
9802   if (RHS->getOpcode() != ISD::BUILD_VECTOR) {
9803     std::swap(LHS, RHS);
9804     VecShuffle = DAG.getCommutedVectorShuffle(*SVN);
9805     ShuffleMask = cast<ShuffleVectorSDNode>(VecShuffle)->getMask();
9806   }
9807 
9808   // Ensure that the RHS is a vector of constants.
9809   BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(RHS.getNode());
9810   if (!BVN)
9811     return SDValue();
9812 
9813   // Check if RHS is a splat of 4-bytes (or smaller).
9814   APInt APSplatValue, APSplatUndef;
9815   unsigned SplatBitSize;
9816   bool HasAnyUndefs;
9817   if (!BVN->isConstantSplat(APSplatValue, APSplatUndef, SplatBitSize,
9818                             HasAnyUndefs, 0, !Subtarget.isLittleEndian()) ||
9819       SplatBitSize > 32)
9820     return SDValue();
9821 
9822   // Check that the shuffle mask matches the semantics of XXSPLTI32DX.
9823   // The instruction splats a constant C into two words of the source vector
9824   // producing { C, Unchanged, C, Unchanged } or { Unchanged, C, Unchanged, C }.
9825   // Thus we check that the shuffle mask is the equivalent  of
9826   // <0, [4-7], 2, [4-7]> or <[4-7], 1, [4-7], 3> respectively.
9827   // Note: the check above of isNByteElemShuffleMask() ensures that the bytes
9828   // within each word are consecutive, so we only need to check the first byte.
9829   SDValue Index;
9830   bool IsLE = Subtarget.isLittleEndian();
9831   if ((ShuffleMask[0] == 0 && ShuffleMask[8] == 8) &&
9832       (ShuffleMask[4] % 4 == 0 && ShuffleMask[12] % 4 == 0 &&
9833        ShuffleMask[4] > 15 && ShuffleMask[12] > 15))
9834     Index = DAG.getTargetConstant(IsLE ? 0 : 1, DL, MVT::i32);
9835   else if ((ShuffleMask[4] == 4 && ShuffleMask[12] == 12) &&
9836            (ShuffleMask[0] % 4 == 0 && ShuffleMask[8] % 4 == 0 &&
9837             ShuffleMask[0] > 15 && ShuffleMask[8] > 15))
9838     Index = DAG.getTargetConstant(IsLE ? 1 : 0, DL, MVT::i32);
9839   else
9840     return SDValue();
9841 
9842   // If the splat is narrower than 32-bits, we need to get the 32-bit value
9843   // for XXSPLTI32DX.
9844   unsigned SplatVal = APSplatValue.getZExtValue();
9845   for (; SplatBitSize < 32; SplatBitSize <<= 1)
9846     SplatVal |= (SplatVal << SplatBitSize);
9847 
9848   SDValue SplatNode = DAG.getNode(
9849       PPCISD::XXSPLTI32DX, DL, MVT::v2i64, DAG.getBitcast(MVT::v2i64, LHS),
9850       Index, DAG.getTargetConstant(SplatVal, DL, MVT::i32));
9851   return DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, SplatNode);
9852 }
9853 
9854 /// LowerROTL - Custom lowering for ROTL(v1i128) to vector_shuffle(v16i8).
9855 /// We lower ROTL(v1i128) to vector_shuffle(v16i8) only if shift amount is
9856 /// a multiple of 8. Otherwise convert it to a scalar rotation(i128)
9857 /// i.e (or (shl x, C1), (srl x, 128-C1)).
LowerROTL(SDValue Op,SelectionDAG & DAG) const9858 SDValue PPCTargetLowering::LowerROTL(SDValue Op, SelectionDAG &DAG) const {
9859   assert(Op.getOpcode() == ISD::ROTL && "Should only be called for ISD::ROTL");
9860   assert(Op.getValueType() == MVT::v1i128 &&
9861          "Only set v1i128 as custom, other type shouldn't reach here!");
9862   SDLoc dl(Op);
9863   SDValue N0 = peekThroughBitcasts(Op.getOperand(0));
9864   SDValue N1 = peekThroughBitcasts(Op.getOperand(1));
9865   unsigned SHLAmt = N1.getConstantOperandVal(0);
9866   if (SHLAmt % 8 == 0) {
9867     SmallVector<int, 16> Mask(16, 0);
9868     std::iota(Mask.begin(), Mask.end(), 0);
9869     std::rotate(Mask.begin(), Mask.begin() + SHLAmt / 8, Mask.end());
9870     if (SDValue Shuffle =
9871             DAG.getVectorShuffle(MVT::v16i8, dl, DAG.getBitcast(MVT::v16i8, N0),
9872                                  DAG.getUNDEF(MVT::v16i8), Mask))
9873       return DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, Shuffle);
9874   }
9875   SDValue ArgVal = DAG.getBitcast(MVT::i128, N0);
9876   SDValue SHLOp = DAG.getNode(ISD::SHL, dl, MVT::i128, ArgVal,
9877                               DAG.getConstant(SHLAmt, dl, MVT::i32));
9878   SDValue SRLOp = DAG.getNode(ISD::SRL, dl, MVT::i128, ArgVal,
9879                               DAG.getConstant(128 - SHLAmt, dl, MVT::i32));
9880   SDValue OROp = DAG.getNode(ISD::OR, dl, MVT::i128, SHLOp, SRLOp);
9881   return DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, OROp);
9882 }
9883 
9884 /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE.  If this
9885 /// is a shuffle we can handle in a single instruction, return it.  Otherwise,
9886 /// return the code it can be lowered into.  Worst case, it can always be
9887 /// lowered into a vperm.
LowerVECTOR_SHUFFLE(SDValue Op,SelectionDAG & DAG) const9888 SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
9889                                                SelectionDAG &DAG) const {
9890   SDLoc dl(Op);
9891   SDValue V1 = Op.getOperand(0);
9892   SDValue V2 = Op.getOperand(1);
9893   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9894 
9895   // Any nodes that were combined in the target-independent combiner prior
9896   // to vector legalization will not be sent to the target combine. Try to
9897   // combine it here.
9898   if (SDValue NewShuffle = combineVectorShuffle(SVOp, DAG)) {
9899     if (!isa<ShuffleVectorSDNode>(NewShuffle))
9900       return NewShuffle;
9901     Op = NewShuffle;
9902     SVOp = cast<ShuffleVectorSDNode>(Op);
9903     V1 = Op.getOperand(0);
9904     V2 = Op.getOperand(1);
9905   }
9906   EVT VT = Op.getValueType();
9907   bool isLittleEndian = Subtarget.isLittleEndian();
9908 
9909   unsigned ShiftElts, InsertAtByte;
9910   bool Swap = false;
9911 
9912   // If this is a load-and-splat, we can do that with a single instruction
9913   // in some cases. However if the load has multiple uses, we don't want to
9914   // combine it because that will just produce multiple loads.
9915   const SDValue *InputLoad = getNormalLoadInput(V1);
9916   if (InputLoad && Subtarget.hasVSX() && V2.isUndef() &&
9917       (PPC::isSplatShuffleMask(SVOp, 4) || PPC::isSplatShuffleMask(SVOp, 8)) &&
9918       InputLoad->hasOneUse()) {
9919     bool IsFourByte = PPC::isSplatShuffleMask(SVOp, 4);
9920     int SplatIdx =
9921       PPC::getSplatIdxForPPCMnemonics(SVOp, IsFourByte ? 4 : 8, DAG);
9922 
9923     LoadSDNode *LD = cast<LoadSDNode>(*InputLoad);
9924     // For 4-byte load-and-splat, we need Power9.
9925     if ((IsFourByte && Subtarget.hasP9Vector()) || !IsFourByte) {
9926       uint64_t Offset = 0;
9927       if (IsFourByte)
9928         Offset = isLittleEndian ? (3 - SplatIdx) * 4 : SplatIdx * 4;
9929       else
9930         Offset = isLittleEndian ? (1 - SplatIdx) * 8 : SplatIdx * 8;
9931 
9932       // If we are loading a partial vector, it does not make sense to adjust
9933       // the base pointer. This happens with (splat (s_to_v_permuted (ld))).
9934       if (LD->getMemoryVT().getSizeInBits() == (IsFourByte ? 32 : 64))
9935         Offset = 0;
9936       SDValue BasePtr = LD->getBasePtr();
9937       if (Offset != 0)
9938         BasePtr = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
9939                               BasePtr, DAG.getIntPtrConstant(Offset, dl));
9940       SDValue Ops[] = {
9941         LD->getChain(),    // Chain
9942         BasePtr,           // BasePtr
9943         DAG.getValueType(Op.getValueType()) // VT
9944       };
9945       SDVTList VTL =
9946         DAG.getVTList(IsFourByte ? MVT::v4i32 : MVT::v2i64, MVT::Other);
9947       SDValue LdSplt =
9948         DAG.getMemIntrinsicNode(PPCISD::LD_SPLAT, dl, VTL,
9949                                 Ops, LD->getMemoryVT(), LD->getMemOperand());
9950       if (LdSplt.getValueType() != SVOp->getValueType(0))
9951         LdSplt = DAG.getBitcast(SVOp->getValueType(0), LdSplt);
9952       return LdSplt;
9953     }
9954   }
9955   if (Subtarget.hasP9Vector() &&
9956       PPC::isXXINSERTWMask(SVOp, ShiftElts, InsertAtByte, Swap,
9957                            isLittleEndian)) {
9958     if (Swap)
9959       std::swap(V1, V2);
9960     SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
9961     SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2);
9962     if (ShiftElts) {
9963       SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv2, Conv2,
9964                                 DAG.getConstant(ShiftElts, dl, MVT::i32));
9965       SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Shl,
9966                                 DAG.getConstant(InsertAtByte, dl, MVT::i32));
9967       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9968     }
9969     SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Conv2,
9970                               DAG.getConstant(InsertAtByte, dl, MVT::i32));
9971     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9972   }
9973 
9974   if (Subtarget.hasPrefixInstrs()) {
9975     SDValue SplatInsertNode;
9976     if ((SplatInsertNode = lowerToXXSPLTI32DX(SVOp, DAG)))
9977       return SplatInsertNode;
9978   }
9979 
9980   if (Subtarget.hasP9Altivec()) {
9981     SDValue NewISDNode;
9982     if ((NewISDNode = lowerToVINSERTH(SVOp, DAG)))
9983       return NewISDNode;
9984 
9985     if ((NewISDNode = lowerToVINSERTB(SVOp, DAG)))
9986       return NewISDNode;
9987   }
9988 
9989   if (Subtarget.hasVSX() &&
9990       PPC::isXXSLDWIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) {
9991     if (Swap)
9992       std::swap(V1, V2);
9993     SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
9994     SDValue Conv2 =
9995         DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2.isUndef() ? V1 : V2);
9996 
9997     SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv1, Conv2,
9998                               DAG.getConstant(ShiftElts, dl, MVT::i32));
9999     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Shl);
10000   }
10001 
10002   if (Subtarget.hasVSX() &&
10003     PPC::isXXPERMDIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) {
10004     if (Swap)
10005       std::swap(V1, V2);
10006     SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1);
10007     SDValue Conv2 =
10008         DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2.isUndef() ? V1 : V2);
10009 
10010     SDValue PermDI = DAG.getNode(PPCISD::XXPERMDI, dl, MVT::v2i64, Conv1, Conv2,
10011                               DAG.getConstant(ShiftElts, dl, MVT::i32));
10012     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, PermDI);
10013   }
10014 
10015   if (Subtarget.hasP9Vector()) {
10016      if (PPC::isXXBRHShuffleMask(SVOp)) {
10017       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
10018       SDValue ReveHWord = DAG.getNode(ISD::BSWAP, dl, MVT::v8i16, Conv);
10019       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveHWord);
10020     } else if (PPC::isXXBRWShuffleMask(SVOp)) {
10021       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
10022       SDValue ReveWord = DAG.getNode(ISD::BSWAP, dl, MVT::v4i32, Conv);
10023       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveWord);
10024     } else if (PPC::isXXBRDShuffleMask(SVOp)) {
10025       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1);
10026       SDValue ReveDWord = DAG.getNode(ISD::BSWAP, dl, MVT::v2i64, Conv);
10027       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveDWord);
10028     } else if (PPC::isXXBRQShuffleMask(SVOp)) {
10029       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, V1);
10030       SDValue ReveQWord = DAG.getNode(ISD::BSWAP, dl, MVT::v1i128, Conv);
10031       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveQWord);
10032     }
10033   }
10034 
10035   if (Subtarget.hasVSX()) {
10036     if (V2.isUndef() && PPC::isSplatShuffleMask(SVOp, 4)) {
10037       int SplatIdx = PPC::getSplatIdxForPPCMnemonics(SVOp, 4, DAG);
10038 
10039       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
10040       SDValue Splat = DAG.getNode(PPCISD::XXSPLT, dl, MVT::v4i32, Conv,
10041                                   DAG.getConstant(SplatIdx, dl, MVT::i32));
10042       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Splat);
10043     }
10044 
10045     // Left shifts of 8 bytes are actually swaps. Convert accordingly.
10046     if (V2.isUndef() && PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) == 8) {
10047       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1);
10048       SDValue Swap = DAG.getNode(PPCISD::SWAP_NO_CHAIN, dl, MVT::v2f64, Conv);
10049       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Swap);
10050     }
10051   }
10052 
10053   if (Subtarget.hasQPX()) {
10054     if (VT.getVectorNumElements() != 4)
10055       return SDValue();
10056 
10057     if (V2.isUndef()) V2 = V1;
10058 
10059     int AlignIdx = PPC::isQVALIGNIShuffleMask(SVOp);
10060     if (AlignIdx != -1) {
10061       return DAG.getNode(PPCISD::QVALIGNI, dl, VT, V1, V2,
10062                          DAG.getConstant(AlignIdx, dl, MVT::i32));
10063     } else if (SVOp->isSplat()) {
10064       int SplatIdx = SVOp->getSplatIndex();
10065       if (SplatIdx >= 4) {
10066         std::swap(V1, V2);
10067         SplatIdx -= 4;
10068       }
10069 
10070       return DAG.getNode(PPCISD::QVESPLATI, dl, VT, V1,
10071                          DAG.getConstant(SplatIdx, dl, MVT::i32));
10072     }
10073 
10074     // Lower this into a qvgpci/qvfperm pair.
10075 
10076     // Compute the qvgpci literal
10077     unsigned idx = 0;
10078     for (unsigned i = 0; i < 4; ++i) {
10079       int m = SVOp->getMaskElt(i);
10080       unsigned mm = m >= 0 ? (unsigned) m : i;
10081       idx |= mm << (3-i)*3;
10082     }
10083 
10084     SDValue V3 = DAG.getNode(PPCISD::QVGPCI, dl, MVT::v4f64,
10085                              DAG.getConstant(idx, dl, MVT::i32));
10086     return DAG.getNode(PPCISD::QVFPERM, dl, VT, V1, V2, V3);
10087   }
10088 
10089   // Cases that are handled by instructions that take permute immediates
10090   // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be
10091   // selected by the instruction selector.
10092   if (V2.isUndef()) {
10093     if (PPC::isSplatShuffleMask(SVOp, 1) ||
10094         PPC::isSplatShuffleMask(SVOp, 2) ||
10095         PPC::isSplatShuffleMask(SVOp, 4) ||
10096         PPC::isVPKUWUMShuffleMask(SVOp, 1, DAG) ||
10097         PPC::isVPKUHUMShuffleMask(SVOp, 1, DAG) ||
10098         PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) != -1 ||
10099         PPC::isVMRGLShuffleMask(SVOp, 1, 1, DAG) ||
10100         PPC::isVMRGLShuffleMask(SVOp, 2, 1, DAG) ||
10101         PPC::isVMRGLShuffleMask(SVOp, 4, 1, DAG) ||
10102         PPC::isVMRGHShuffleMask(SVOp, 1, 1, DAG) ||
10103         PPC::isVMRGHShuffleMask(SVOp, 2, 1, DAG) ||
10104         PPC::isVMRGHShuffleMask(SVOp, 4, 1, DAG) ||
10105         (Subtarget.hasP8Altivec() && (
10106          PPC::isVPKUDUMShuffleMask(SVOp, 1, DAG) ||
10107          PPC::isVMRGEOShuffleMask(SVOp, true, 1, DAG) ||
10108          PPC::isVMRGEOShuffleMask(SVOp, false, 1, DAG)))) {
10109       return Op;
10110     }
10111   }
10112 
10113   // Altivec has a variety of "shuffle immediates" that take two vector inputs
10114   // and produce a fixed permutation.  If any of these match, do not lower to
10115   // VPERM.
10116   unsigned int ShuffleKind = isLittleEndian ? 2 : 0;
10117   if (PPC::isVPKUWUMShuffleMask(SVOp, ShuffleKind, DAG) ||
10118       PPC::isVPKUHUMShuffleMask(SVOp, ShuffleKind, DAG) ||
10119       PPC::isVSLDOIShuffleMask(SVOp, ShuffleKind, DAG) != -1 ||
10120       PPC::isVMRGLShuffleMask(SVOp, 1, ShuffleKind, DAG) ||
10121       PPC::isVMRGLShuffleMask(SVOp, 2, ShuffleKind, DAG) ||
10122       PPC::isVMRGLShuffleMask(SVOp, 4, ShuffleKind, DAG) ||
10123       PPC::isVMRGHShuffleMask(SVOp, 1, ShuffleKind, DAG) ||
10124       PPC::isVMRGHShuffleMask(SVOp, 2, ShuffleKind, DAG) ||
10125       PPC::isVMRGHShuffleMask(SVOp, 4, ShuffleKind, DAG) ||
10126       (Subtarget.hasP8Altivec() && (
10127        PPC::isVPKUDUMShuffleMask(SVOp, ShuffleKind, DAG) ||
10128        PPC::isVMRGEOShuffleMask(SVOp, true, ShuffleKind, DAG) ||
10129        PPC::isVMRGEOShuffleMask(SVOp, false, ShuffleKind, DAG))))
10130     return Op;
10131 
10132   // Check to see if this is a shuffle of 4-byte values.  If so, we can use our
10133   // perfect shuffle table to emit an optimal matching sequence.
10134   ArrayRef<int> PermMask = SVOp->getMask();
10135 
10136   unsigned PFIndexes[4];
10137   bool isFourElementShuffle = true;
10138   for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number
10139     unsigned EltNo = 8;   // Start out undef.
10140     for (unsigned j = 0; j != 4; ++j) {  // Intra-element byte.
10141       if (PermMask[i*4+j] < 0)
10142         continue;   // Undef, ignore it.
10143 
10144       unsigned ByteSource = PermMask[i*4+j];
10145       if ((ByteSource & 3) != j) {
10146         isFourElementShuffle = false;
10147         break;
10148       }
10149 
10150       if (EltNo == 8) {
10151         EltNo = ByteSource/4;
10152       } else if (EltNo != ByteSource/4) {
10153         isFourElementShuffle = false;
10154         break;
10155       }
10156     }
10157     PFIndexes[i] = EltNo;
10158   }
10159 
10160   // If this shuffle can be expressed as a shuffle of 4-byte elements, use the
10161   // perfect shuffle vector to determine if it is cost effective to do this as
10162   // discrete instructions, or whether we should use a vperm.
10163   // For now, we skip this for little endian until such time as we have a
10164   // little-endian perfect shuffle table.
10165   if (isFourElementShuffle && !isLittleEndian) {
10166     // Compute the index in the perfect shuffle table.
10167     unsigned PFTableIndex =
10168       PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
10169 
10170     unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
10171     unsigned Cost  = (PFEntry >> 30);
10172 
10173     // Determining when to avoid vperm is tricky.  Many things affect the cost
10174     // of vperm, particularly how many times the perm mask needs to be computed.
10175     // For example, if the perm mask can be hoisted out of a loop or is already
10176     // used (perhaps because there are multiple permutes with the same shuffle
10177     // mask?) the vperm has a cost of 1.  OTOH, hoisting the permute mask out of
10178     // the loop requires an extra register.
10179     //
10180     // As a compromise, we only emit discrete instructions if the shuffle can be
10181     // generated in 3 or fewer operations.  When we have loop information
10182     // available, if this block is within a loop, we should avoid using vperm
10183     // for 3-operation perms and use a constant pool load instead.
10184     if (Cost < 3)
10185       return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl);
10186   }
10187 
10188   // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant
10189   // vector that will get spilled to the constant pool.
10190   if (V2.isUndef()) V2 = V1;
10191 
10192   // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except
10193   // that it is in input element units, not in bytes.  Convert now.
10194 
10195   // For little endian, the order of the input vectors is reversed, and
10196   // the permutation mask is complemented with respect to 31.  This is
10197   // necessary to produce proper semantics with the big-endian-biased vperm
10198   // instruction.
10199   EVT EltVT = V1.getValueType().getVectorElementType();
10200   unsigned BytesPerElement = EltVT.getSizeInBits()/8;
10201 
10202   SmallVector<SDValue, 16> ResultMask;
10203   for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
10204     unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i];
10205 
10206     for (unsigned j = 0; j != BytesPerElement; ++j)
10207       if (isLittleEndian)
10208         ResultMask.push_back(DAG.getConstant(31 - (SrcElt*BytesPerElement + j),
10209                                              dl, MVT::i32));
10210       else
10211         ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement + j, dl,
10212                                              MVT::i32));
10213   }
10214 
10215   ShufflesHandledWithVPERM++;
10216   SDValue VPermMask = DAG.getBuildVector(MVT::v16i8, dl, ResultMask);
10217   LLVM_DEBUG(dbgs() << "Emitting a VPERM for the following shuffle:\n");
10218   LLVM_DEBUG(SVOp->dump());
10219   LLVM_DEBUG(dbgs() << "With the following permute control vector:\n");
10220   LLVM_DEBUG(VPermMask.dump());
10221 
10222   if (isLittleEndian)
10223     return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(),
10224                        V2, V1, VPermMask);
10225   else
10226     return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(),
10227                        V1, V2, VPermMask);
10228 }
10229 
10230 /// getVectorCompareInfo - Given an intrinsic, return false if it is not a
10231 /// vector comparison.  If it is, return true and fill in Opc/isDot with
10232 /// information about the intrinsic.
getVectorCompareInfo(SDValue Intrin,int & CompareOpc,bool & isDot,const PPCSubtarget & Subtarget)10233 static bool getVectorCompareInfo(SDValue Intrin, int &CompareOpc,
10234                                  bool &isDot, const PPCSubtarget &Subtarget) {
10235   unsigned IntrinsicID =
10236       cast<ConstantSDNode>(Intrin.getOperand(0))->getZExtValue();
10237   CompareOpc = -1;
10238   isDot = false;
10239   switch (IntrinsicID) {
10240   default:
10241     return false;
10242   // Comparison predicates.
10243   case Intrinsic::ppc_altivec_vcmpbfp_p:
10244     CompareOpc = 966;
10245     isDot = true;
10246     break;
10247   case Intrinsic::ppc_altivec_vcmpeqfp_p:
10248     CompareOpc = 198;
10249     isDot = true;
10250     break;
10251   case Intrinsic::ppc_altivec_vcmpequb_p:
10252     CompareOpc = 6;
10253     isDot = true;
10254     break;
10255   case Intrinsic::ppc_altivec_vcmpequh_p:
10256     CompareOpc = 70;
10257     isDot = true;
10258     break;
10259   case Intrinsic::ppc_altivec_vcmpequw_p:
10260     CompareOpc = 134;
10261     isDot = true;
10262     break;
10263   case Intrinsic::ppc_altivec_vcmpequd_p:
10264     if (Subtarget.hasP8Altivec()) {
10265       CompareOpc = 199;
10266       isDot = true;
10267     } else
10268       return false;
10269     break;
10270   case Intrinsic::ppc_altivec_vcmpneb_p:
10271   case Intrinsic::ppc_altivec_vcmpneh_p:
10272   case Intrinsic::ppc_altivec_vcmpnew_p:
10273   case Intrinsic::ppc_altivec_vcmpnezb_p:
10274   case Intrinsic::ppc_altivec_vcmpnezh_p:
10275   case Intrinsic::ppc_altivec_vcmpnezw_p:
10276     if (Subtarget.hasP9Altivec()) {
10277       switch (IntrinsicID) {
10278       default:
10279         llvm_unreachable("Unknown comparison intrinsic.");
10280       case Intrinsic::ppc_altivec_vcmpneb_p:
10281         CompareOpc = 7;
10282         break;
10283       case Intrinsic::ppc_altivec_vcmpneh_p:
10284         CompareOpc = 71;
10285         break;
10286       case Intrinsic::ppc_altivec_vcmpnew_p:
10287         CompareOpc = 135;
10288         break;
10289       case Intrinsic::ppc_altivec_vcmpnezb_p:
10290         CompareOpc = 263;
10291         break;
10292       case Intrinsic::ppc_altivec_vcmpnezh_p:
10293         CompareOpc = 327;
10294         break;
10295       case Intrinsic::ppc_altivec_vcmpnezw_p:
10296         CompareOpc = 391;
10297         break;
10298       }
10299       isDot = true;
10300     } else
10301       return false;
10302     break;
10303   case Intrinsic::ppc_altivec_vcmpgefp_p:
10304     CompareOpc = 454;
10305     isDot = true;
10306     break;
10307   case Intrinsic::ppc_altivec_vcmpgtfp_p:
10308     CompareOpc = 710;
10309     isDot = true;
10310     break;
10311   case Intrinsic::ppc_altivec_vcmpgtsb_p:
10312     CompareOpc = 774;
10313     isDot = true;
10314     break;
10315   case Intrinsic::ppc_altivec_vcmpgtsh_p:
10316     CompareOpc = 838;
10317     isDot = true;
10318     break;
10319   case Intrinsic::ppc_altivec_vcmpgtsw_p:
10320     CompareOpc = 902;
10321     isDot = true;
10322     break;
10323   case Intrinsic::ppc_altivec_vcmpgtsd_p:
10324     if (Subtarget.hasP8Altivec()) {
10325       CompareOpc = 967;
10326       isDot = true;
10327     } else
10328       return false;
10329     break;
10330   case Intrinsic::ppc_altivec_vcmpgtub_p:
10331     CompareOpc = 518;
10332     isDot = true;
10333     break;
10334   case Intrinsic::ppc_altivec_vcmpgtuh_p:
10335     CompareOpc = 582;
10336     isDot = true;
10337     break;
10338   case Intrinsic::ppc_altivec_vcmpgtuw_p:
10339     CompareOpc = 646;
10340     isDot = true;
10341     break;
10342   case Intrinsic::ppc_altivec_vcmpgtud_p:
10343     if (Subtarget.hasP8Altivec()) {
10344       CompareOpc = 711;
10345       isDot = true;
10346     } else
10347       return false;
10348     break;
10349 
10350   // VSX predicate comparisons use the same infrastructure
10351   case Intrinsic::ppc_vsx_xvcmpeqdp_p:
10352   case Intrinsic::ppc_vsx_xvcmpgedp_p:
10353   case Intrinsic::ppc_vsx_xvcmpgtdp_p:
10354   case Intrinsic::ppc_vsx_xvcmpeqsp_p:
10355   case Intrinsic::ppc_vsx_xvcmpgesp_p:
10356   case Intrinsic::ppc_vsx_xvcmpgtsp_p:
10357     if (Subtarget.hasVSX()) {
10358       switch (IntrinsicID) {
10359       case Intrinsic::ppc_vsx_xvcmpeqdp_p:
10360         CompareOpc = 99;
10361         break;
10362       case Intrinsic::ppc_vsx_xvcmpgedp_p:
10363         CompareOpc = 115;
10364         break;
10365       case Intrinsic::ppc_vsx_xvcmpgtdp_p:
10366         CompareOpc = 107;
10367         break;
10368       case Intrinsic::ppc_vsx_xvcmpeqsp_p:
10369         CompareOpc = 67;
10370         break;
10371       case Intrinsic::ppc_vsx_xvcmpgesp_p:
10372         CompareOpc = 83;
10373         break;
10374       case Intrinsic::ppc_vsx_xvcmpgtsp_p:
10375         CompareOpc = 75;
10376         break;
10377       }
10378       isDot = true;
10379     } else
10380       return false;
10381     break;
10382 
10383   // Normal Comparisons.
10384   case Intrinsic::ppc_altivec_vcmpbfp:
10385     CompareOpc = 966;
10386     break;
10387   case Intrinsic::ppc_altivec_vcmpeqfp:
10388     CompareOpc = 198;
10389     break;
10390   case Intrinsic::ppc_altivec_vcmpequb:
10391     CompareOpc = 6;
10392     break;
10393   case Intrinsic::ppc_altivec_vcmpequh:
10394     CompareOpc = 70;
10395     break;
10396   case Intrinsic::ppc_altivec_vcmpequw:
10397     CompareOpc = 134;
10398     break;
10399   case Intrinsic::ppc_altivec_vcmpequd:
10400     if (Subtarget.hasP8Altivec())
10401       CompareOpc = 199;
10402     else
10403       return false;
10404     break;
10405   case Intrinsic::ppc_altivec_vcmpneb:
10406   case Intrinsic::ppc_altivec_vcmpneh:
10407   case Intrinsic::ppc_altivec_vcmpnew:
10408   case Intrinsic::ppc_altivec_vcmpnezb:
10409   case Intrinsic::ppc_altivec_vcmpnezh:
10410   case Intrinsic::ppc_altivec_vcmpnezw:
10411     if (Subtarget.hasP9Altivec())
10412       switch (IntrinsicID) {
10413       default:
10414         llvm_unreachable("Unknown comparison intrinsic.");
10415       case Intrinsic::ppc_altivec_vcmpneb:
10416         CompareOpc = 7;
10417         break;
10418       case Intrinsic::ppc_altivec_vcmpneh:
10419         CompareOpc = 71;
10420         break;
10421       case Intrinsic::ppc_altivec_vcmpnew:
10422         CompareOpc = 135;
10423         break;
10424       case Intrinsic::ppc_altivec_vcmpnezb:
10425         CompareOpc = 263;
10426         break;
10427       case Intrinsic::ppc_altivec_vcmpnezh:
10428         CompareOpc = 327;
10429         break;
10430       case Intrinsic::ppc_altivec_vcmpnezw:
10431         CompareOpc = 391;
10432         break;
10433       }
10434     else
10435       return false;
10436     break;
10437   case Intrinsic::ppc_altivec_vcmpgefp:
10438     CompareOpc = 454;
10439     break;
10440   case Intrinsic::ppc_altivec_vcmpgtfp:
10441     CompareOpc = 710;
10442     break;
10443   case Intrinsic::ppc_altivec_vcmpgtsb:
10444     CompareOpc = 774;
10445     break;
10446   case Intrinsic::ppc_altivec_vcmpgtsh:
10447     CompareOpc = 838;
10448     break;
10449   case Intrinsic::ppc_altivec_vcmpgtsw:
10450     CompareOpc = 902;
10451     break;
10452   case Intrinsic::ppc_altivec_vcmpgtsd:
10453     if (Subtarget.hasP8Altivec())
10454       CompareOpc = 967;
10455     else
10456       return false;
10457     break;
10458   case Intrinsic::ppc_altivec_vcmpgtub:
10459     CompareOpc = 518;
10460     break;
10461   case Intrinsic::ppc_altivec_vcmpgtuh:
10462     CompareOpc = 582;
10463     break;
10464   case Intrinsic::ppc_altivec_vcmpgtuw:
10465     CompareOpc = 646;
10466     break;
10467   case Intrinsic::ppc_altivec_vcmpgtud:
10468     if (Subtarget.hasP8Altivec())
10469       CompareOpc = 711;
10470     else
10471       return false;
10472     break;
10473   }
10474   return true;
10475 }
10476 
10477 /// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom
10478 /// lower, do it, otherwise return null.
LowerINTRINSIC_WO_CHAIN(SDValue Op,SelectionDAG & DAG) const10479 SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
10480                                                    SelectionDAG &DAG) const {
10481   unsigned IntrinsicID =
10482     cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
10483 
10484   SDLoc dl(Op);
10485 
10486   if (IntrinsicID == Intrinsic::thread_pointer) {
10487     // Reads the thread pointer register, used for __builtin_thread_pointer.
10488     if (Subtarget.isPPC64())
10489       return DAG.getRegister(PPC::X13, MVT::i64);
10490     return DAG.getRegister(PPC::R2, MVT::i32);
10491   }
10492 
10493   // If this is a lowered altivec predicate compare, CompareOpc is set to the
10494   // opcode number of the comparison.
10495   int CompareOpc;
10496   bool isDot;
10497   if (!getVectorCompareInfo(Op, CompareOpc, isDot, Subtarget))
10498     return SDValue();    // Don't custom lower most intrinsics.
10499 
10500   // If this is a non-dot comparison, make the VCMP node and we are done.
10501   if (!isDot) {
10502     SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(),
10503                               Op.getOperand(1), Op.getOperand(2),
10504                               DAG.getConstant(CompareOpc, dl, MVT::i32));
10505     return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Tmp);
10506   }
10507 
10508   // Create the PPCISD altivec 'dot' comparison node.
10509   SDValue Ops[] = {
10510     Op.getOperand(2),  // LHS
10511     Op.getOperand(3),  // RHS
10512     DAG.getConstant(CompareOpc, dl, MVT::i32)
10513   };
10514   EVT VTs[] = { Op.getOperand(2).getValueType(), MVT::Glue };
10515   SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops);
10516 
10517   // Now that we have the comparison, emit a copy from the CR to a GPR.
10518   // This is flagged to the above dot comparison.
10519   SDValue Flags = DAG.getNode(PPCISD::MFOCRF, dl, MVT::i32,
10520                                 DAG.getRegister(PPC::CR6, MVT::i32),
10521                                 CompNode.getValue(1));
10522 
10523   // Unpack the result based on how the target uses it.
10524   unsigned BitNo;   // Bit # of CR6.
10525   bool InvertBit;   // Invert result?
10526   switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) {
10527   default:  // Can't happen, don't crash on invalid number though.
10528   case 0:   // Return the value of the EQ bit of CR6.
10529     BitNo = 0; InvertBit = false;
10530     break;
10531   case 1:   // Return the inverted value of the EQ bit of CR6.
10532     BitNo = 0; InvertBit = true;
10533     break;
10534   case 2:   // Return the value of the LT bit of CR6.
10535     BitNo = 2; InvertBit = false;
10536     break;
10537   case 3:   // Return the inverted value of the LT bit of CR6.
10538     BitNo = 2; InvertBit = true;
10539     break;
10540   }
10541 
10542   // Shift the bit into the low position.
10543   Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags,
10544                       DAG.getConstant(8 - (3 - BitNo), dl, MVT::i32));
10545   // Isolate the bit.
10546   Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags,
10547                       DAG.getConstant(1, dl, MVT::i32));
10548 
10549   // If we are supposed to, toggle the bit.
10550   if (InvertBit)
10551     Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags,
10552                         DAG.getConstant(1, dl, MVT::i32));
10553   return Flags;
10554 }
10555 
LowerINTRINSIC_VOID(SDValue Op,SelectionDAG & DAG) const10556 SDValue PPCTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
10557                                                SelectionDAG &DAG) const {
10558   // SelectionDAGBuilder::visitTargetIntrinsic may insert one extra chain to
10559   // the beginning of the argument list.
10560   int ArgStart = isa<ConstantSDNode>(Op.getOperand(0)) ? 0 : 1;
10561   SDLoc DL(Op);
10562   switch (cast<ConstantSDNode>(Op.getOperand(ArgStart))->getZExtValue()) {
10563   case Intrinsic::ppc_cfence: {
10564     assert(ArgStart == 1 && "llvm.ppc.cfence must carry a chain argument.");
10565     assert(Subtarget.isPPC64() && "Only 64-bit is supported for now.");
10566     return SDValue(DAG.getMachineNode(PPC::CFENCE8, DL, MVT::Other,
10567                                       DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64,
10568                                                   Op.getOperand(ArgStart + 1)),
10569                                       Op.getOperand(0)),
10570                    0);
10571   }
10572   default:
10573     break;
10574   }
10575   return SDValue();
10576 }
10577 
10578 // Lower scalar BSWAP64 to xxbrd.
LowerBSWAP(SDValue Op,SelectionDAG & DAG) const10579 SDValue PPCTargetLowering::LowerBSWAP(SDValue Op, SelectionDAG &DAG) const {
10580   SDLoc dl(Op);
10581   // MTVSRDD
10582   Op = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, Op.getOperand(0),
10583                    Op.getOperand(0));
10584   // XXBRD
10585   Op = DAG.getNode(ISD::BSWAP, dl, MVT::v2i64, Op);
10586   // MFVSRD
10587   int VectorIndex = 0;
10588   if (Subtarget.isLittleEndian())
10589     VectorIndex = 1;
10590   Op = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Op,
10591                    DAG.getTargetConstant(VectorIndex, dl, MVT::i32));
10592   return Op;
10593 }
10594 
10595 // ATOMIC_CMP_SWAP for i8/i16 needs to zero-extend its input since it will be
10596 // compared to a value that is atomically loaded (atomic loads zero-extend).
LowerATOMIC_CMP_SWAP(SDValue Op,SelectionDAG & DAG) const10597 SDValue PPCTargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op,
10598                                                 SelectionDAG &DAG) const {
10599   assert(Op.getOpcode() == ISD::ATOMIC_CMP_SWAP &&
10600          "Expecting an atomic compare-and-swap here.");
10601   SDLoc dl(Op);
10602   auto *AtomicNode = cast<AtomicSDNode>(Op.getNode());
10603   EVT MemVT = AtomicNode->getMemoryVT();
10604   if (MemVT.getSizeInBits() >= 32)
10605     return Op;
10606 
10607   SDValue CmpOp = Op.getOperand(2);
10608   // If this is already correctly zero-extended, leave it alone.
10609   auto HighBits = APInt::getHighBitsSet(32, 32 - MemVT.getSizeInBits());
10610   if (DAG.MaskedValueIsZero(CmpOp, HighBits))
10611     return Op;
10612 
10613   // Clear the high bits of the compare operand.
10614   unsigned MaskVal = (1 << MemVT.getSizeInBits()) - 1;
10615   SDValue NewCmpOp =
10616     DAG.getNode(ISD::AND, dl, MVT::i32, CmpOp,
10617                 DAG.getConstant(MaskVal, dl, MVT::i32));
10618 
10619   // Replace the existing compare operand with the properly zero-extended one.
10620   SmallVector<SDValue, 4> Ops;
10621   for (int i = 0, e = AtomicNode->getNumOperands(); i < e; i++)
10622     Ops.push_back(AtomicNode->getOperand(i));
10623   Ops[2] = NewCmpOp;
10624   MachineMemOperand *MMO = AtomicNode->getMemOperand();
10625   SDVTList Tys = DAG.getVTList(MVT::i32, MVT::Other);
10626   auto NodeTy =
10627     (MemVT == MVT::i8) ? PPCISD::ATOMIC_CMP_SWAP_8 : PPCISD::ATOMIC_CMP_SWAP_16;
10628   return DAG.getMemIntrinsicNode(NodeTy, dl, Tys, Ops, MemVT, MMO);
10629 }
10630 
LowerSCALAR_TO_VECTOR(SDValue Op,SelectionDAG & DAG) const10631 SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op,
10632                                                  SelectionDAG &DAG) const {
10633   SDLoc dl(Op);
10634   // Create a stack slot that is 16-byte aligned.
10635   MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
10636   int FrameIdx = MFI.CreateStackObject(16, Align(16), false);
10637   EVT PtrVT = getPointerTy(DAG.getDataLayout());
10638   SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
10639 
10640   // Store the input value into Value#0 of the stack slot.
10641   SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx,
10642                                MachinePointerInfo());
10643   // Load it out.
10644   return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, MachinePointerInfo());
10645 }
10646 
LowerINSERT_VECTOR_ELT(SDValue Op,SelectionDAG & DAG) const10647 SDValue PPCTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
10648                                                   SelectionDAG &DAG) const {
10649   assert(Op.getOpcode() == ISD::INSERT_VECTOR_ELT &&
10650          "Should only be called for ISD::INSERT_VECTOR_ELT");
10651 
10652   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(2));
10653   // We have legal lowering for constant indices but not for variable ones.
10654   if (!C)
10655     return SDValue();
10656 
10657   EVT VT = Op.getValueType();
10658   SDLoc dl(Op);
10659   SDValue V1 = Op.getOperand(0);
10660   SDValue V2 = Op.getOperand(1);
10661   // We can use MTVSRZ + VECINSERT for v8i16 and v16i8 types.
10662   if (VT == MVT::v8i16 || VT == MVT::v16i8) {
10663     SDValue Mtvsrz = DAG.getNode(PPCISD::MTVSRZ, dl, VT, V2);
10664     unsigned BytesInEachElement = VT.getVectorElementType().getSizeInBits() / 8;
10665     unsigned InsertAtElement = C->getZExtValue();
10666     unsigned InsertAtByte = InsertAtElement * BytesInEachElement;
10667     if (Subtarget.isLittleEndian()) {
10668       InsertAtByte = (16 - BytesInEachElement) - InsertAtByte;
10669     }
10670     return DAG.getNode(PPCISD::VECINSERT, dl, VT, V1, Mtvsrz,
10671                        DAG.getConstant(InsertAtByte, dl, MVT::i32));
10672   }
10673   return Op;
10674 }
10675 
LowerEXTRACT_VECTOR_ELT(SDValue Op,SelectionDAG & DAG) const10676 SDValue PPCTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
10677                                                    SelectionDAG &DAG) const {
10678   SDLoc dl(Op);
10679   SDNode *N = Op.getNode();
10680 
10681   assert(N->getOperand(0).getValueType() == MVT::v4i1 &&
10682          "Unknown extract_vector_elt type");
10683 
10684   SDValue Value = N->getOperand(0);
10685 
10686   // The first part of this is like the store lowering except that we don't
10687   // need to track the chain.
10688 
10689   // The values are now known to be -1 (false) or 1 (true). To convert this
10690   // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5).
10691   // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5
10692   Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value);
10693 
10694   // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to
10695   // understand how to form the extending load.
10696   SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64);
10697 
10698   Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs);
10699 
10700   // Now convert to an integer and store.
10701   Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64,
10702     DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32),
10703     Value);
10704 
10705   MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
10706   int FrameIdx = MFI.CreateStackObject(16, Align(16), false);
10707   MachinePointerInfo PtrInfo =
10708       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
10709   EVT PtrVT = getPointerTy(DAG.getDataLayout());
10710   SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
10711 
10712   SDValue StoreChain = DAG.getEntryNode();
10713   SDValue Ops[] = {StoreChain,
10714                    DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32),
10715                    Value, FIdx};
10716   SDVTList VTs = DAG.getVTList(/*chain*/ MVT::Other);
10717 
10718   StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID,
10719     dl, VTs, Ops, MVT::v4i32, PtrInfo);
10720 
10721   // Extract the value requested.
10722   unsigned Offset = 4*cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
10723   SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType());
10724   Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx);
10725 
10726   SDValue IntVal =
10727       DAG.getLoad(MVT::i32, dl, StoreChain, Idx, PtrInfo.getWithOffset(Offset));
10728 
10729   if (!Subtarget.useCRBits())
10730     return IntVal;
10731 
10732   return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, IntVal);
10733 }
10734 
10735 /// Lowering for QPX v4i1 loads
LowerVectorLoad(SDValue Op,SelectionDAG & DAG) const10736 SDValue PPCTargetLowering::LowerVectorLoad(SDValue Op,
10737                                            SelectionDAG &DAG) const {
10738   SDLoc dl(Op);
10739   LoadSDNode *LN = cast<LoadSDNode>(Op.getNode());
10740   SDValue LoadChain = LN->getChain();
10741   SDValue BasePtr = LN->getBasePtr();
10742 
10743   if (Op.getValueType() == MVT::v4f64 ||
10744       Op.getValueType() == MVT::v4f32) {
10745     EVT MemVT = LN->getMemoryVT();
10746     unsigned Alignment = LN->getAlignment();
10747 
10748     // If this load is properly aligned, then it is legal.
10749     if (Alignment >= MemVT.getStoreSize())
10750       return Op;
10751 
10752     EVT ScalarVT = Op.getValueType().getScalarType(),
10753         ScalarMemVT = MemVT.getScalarType();
10754     unsigned Stride = ScalarMemVT.getStoreSize();
10755 
10756     SDValue Vals[4], LoadChains[4];
10757     for (unsigned Idx = 0; Idx < 4; ++Idx) {
10758       SDValue Load;
10759       if (ScalarVT != ScalarMemVT)
10760         Load = DAG.getExtLoad(LN->getExtensionType(), dl, ScalarVT, LoadChain,
10761                               BasePtr,
10762                               LN->getPointerInfo().getWithOffset(Idx * Stride),
10763                               ScalarMemVT, MinAlign(Alignment, Idx * Stride),
10764                               LN->getMemOperand()->getFlags(), LN->getAAInfo());
10765       else
10766         Load = DAG.getLoad(ScalarVT, dl, LoadChain, BasePtr,
10767                            LN->getPointerInfo().getWithOffset(Idx * Stride),
10768                            MinAlign(Alignment, Idx * Stride),
10769                            LN->getMemOperand()->getFlags(), LN->getAAInfo());
10770 
10771       if (Idx == 0 && LN->isIndexed()) {
10772         assert(LN->getAddressingMode() == ISD::PRE_INC &&
10773                "Unknown addressing mode on vector load");
10774         Load = DAG.getIndexedLoad(Load, dl, BasePtr, LN->getOffset(),
10775                                   LN->getAddressingMode());
10776       }
10777 
10778       Vals[Idx] = Load;
10779       LoadChains[Idx] = Load.getValue(1);
10780 
10781       BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
10782                             DAG.getConstant(Stride, dl,
10783                                             BasePtr.getValueType()));
10784     }
10785 
10786     SDValue TF =  DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
10787     SDValue Value = DAG.getBuildVector(Op.getValueType(), dl, Vals);
10788 
10789     if (LN->isIndexed()) {
10790       SDValue RetOps[] = { Value, Vals[0].getValue(1), TF };
10791       return DAG.getMergeValues(RetOps, dl);
10792     }
10793 
10794     SDValue RetOps[] = { Value, TF };
10795     return DAG.getMergeValues(RetOps, dl);
10796   }
10797 
10798   assert(Op.getValueType() == MVT::v4i1 && "Unknown load to lower");
10799   assert(LN->isUnindexed() && "Indexed v4i1 loads are not supported");
10800 
10801   // To lower v4i1 from a byte array, we load the byte elements of the
10802   // vector and then reuse the BUILD_VECTOR logic.
10803 
10804   SDValue VectElmts[4], VectElmtChains[4];
10805   for (unsigned i = 0; i < 4; ++i) {
10806     SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType());
10807     Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx);
10808 
10809     VectElmts[i] = DAG.getExtLoad(
10810         ISD::EXTLOAD, dl, MVT::i32, LoadChain, Idx,
10811         LN->getPointerInfo().getWithOffset(i), MVT::i8,
10812         /* Alignment = */ 1, LN->getMemOperand()->getFlags(), LN->getAAInfo());
10813     VectElmtChains[i] = VectElmts[i].getValue(1);
10814   }
10815 
10816   LoadChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, VectElmtChains);
10817   SDValue Value = DAG.getBuildVector(MVT::v4i1, dl, VectElmts);
10818 
10819   SDValue RVals[] = { Value, LoadChain };
10820   return DAG.getMergeValues(RVals, dl);
10821 }
10822 
10823 /// Lowering for QPX v4i1 stores
LowerVectorStore(SDValue Op,SelectionDAG & DAG) const10824 SDValue PPCTargetLowering::LowerVectorStore(SDValue Op,
10825                                             SelectionDAG &DAG) const {
10826   SDLoc dl(Op);
10827   StoreSDNode *SN = cast<StoreSDNode>(Op.getNode());
10828   SDValue StoreChain = SN->getChain();
10829   SDValue BasePtr = SN->getBasePtr();
10830   SDValue Value = SN->getValue();
10831 
10832   if (Value.getValueType() == MVT::v4f64 ||
10833       Value.getValueType() == MVT::v4f32) {
10834     EVT MemVT = SN->getMemoryVT();
10835     unsigned Alignment = SN->getAlignment();
10836 
10837     // If this store is properly aligned, then it is legal.
10838     if (Alignment >= MemVT.getStoreSize())
10839       return Op;
10840 
10841     EVT ScalarVT = Value.getValueType().getScalarType(),
10842         ScalarMemVT = MemVT.getScalarType();
10843     unsigned Stride = ScalarMemVT.getStoreSize();
10844 
10845     SDValue Stores[4];
10846     for (unsigned Idx = 0; Idx < 4; ++Idx) {
10847       SDValue Ex = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ScalarVT, Value,
10848                                DAG.getVectorIdxConstant(Idx, dl));
10849       SDValue Store;
10850       if (ScalarVT != ScalarMemVT)
10851         Store =
10852             DAG.getTruncStore(StoreChain, dl, Ex, BasePtr,
10853                               SN->getPointerInfo().getWithOffset(Idx * Stride),
10854                               ScalarMemVT, MinAlign(Alignment, Idx * Stride),
10855                               SN->getMemOperand()->getFlags(), SN->getAAInfo());
10856       else
10857         Store = DAG.getStore(StoreChain, dl, Ex, BasePtr,
10858                              SN->getPointerInfo().getWithOffset(Idx * Stride),
10859                              MinAlign(Alignment, Idx * Stride),
10860                              SN->getMemOperand()->getFlags(), SN->getAAInfo());
10861 
10862       if (Idx == 0 && SN->isIndexed()) {
10863         assert(SN->getAddressingMode() == ISD::PRE_INC &&
10864                "Unknown addressing mode on vector store");
10865         Store = DAG.getIndexedStore(Store, dl, BasePtr, SN->getOffset(),
10866                                     SN->getAddressingMode());
10867       }
10868 
10869       BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
10870                             DAG.getConstant(Stride, dl,
10871                                             BasePtr.getValueType()));
10872       Stores[Idx] = Store;
10873     }
10874 
10875     SDValue TF =  DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
10876 
10877     if (SN->isIndexed()) {
10878       SDValue RetOps[] = { TF, Stores[0].getValue(1) };
10879       return DAG.getMergeValues(RetOps, dl);
10880     }
10881 
10882     return TF;
10883   }
10884 
10885   assert(SN->isUnindexed() && "Indexed v4i1 stores are not supported");
10886   assert(Value.getValueType() == MVT::v4i1 && "Unknown store to lower");
10887 
10888   // The values are now known to be -1 (false) or 1 (true). To convert this
10889   // into 0 (false) and 1 (true), add 1 and then divide by 2 (multiply by 0.5).
10890   // This can be done with an fma and the 0.5 constant: (V+1.0)*0.5 = 0.5*V+0.5
10891   Value = DAG.getNode(PPCISD::QBFLT, dl, MVT::v4f64, Value);
10892 
10893   // FIXME: We can make this an f32 vector, but the BUILD_VECTOR code needs to
10894   // understand how to form the extending load.
10895   SDValue FPHalfs = DAG.getConstantFP(0.5, dl, MVT::v4f64);
10896 
10897   Value = DAG.getNode(ISD::FMA, dl, MVT::v4f64, Value, FPHalfs, FPHalfs);
10898 
10899   // Now convert to an integer and store.
10900   Value = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f64,
10901     DAG.getConstant(Intrinsic::ppc_qpx_qvfctiwu, dl, MVT::i32),
10902     Value);
10903 
10904   MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
10905   int FrameIdx = MFI.CreateStackObject(16, Align(16), false);
10906   MachinePointerInfo PtrInfo =
10907       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
10908   EVT PtrVT = getPointerTy(DAG.getDataLayout());
10909   SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
10910 
10911   SDValue Ops[] = {StoreChain,
10912                    DAG.getConstant(Intrinsic::ppc_qpx_qvstfiw, dl, MVT::i32),
10913                    Value, FIdx};
10914   SDVTList VTs = DAG.getVTList(/*chain*/ MVT::Other);
10915 
10916   StoreChain = DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID,
10917     dl, VTs, Ops, MVT::v4i32, PtrInfo);
10918 
10919   // Move data into the byte array.
10920   SDValue Loads[4], LoadChains[4];
10921   for (unsigned i = 0; i < 4; ++i) {
10922     unsigned Offset = 4*i;
10923     SDValue Idx = DAG.getConstant(Offset, dl, FIdx.getValueType());
10924     Idx = DAG.getNode(ISD::ADD, dl, FIdx.getValueType(), FIdx, Idx);
10925 
10926     Loads[i] = DAG.getLoad(MVT::i32, dl, StoreChain, Idx,
10927                            PtrInfo.getWithOffset(Offset));
10928     LoadChains[i] = Loads[i].getValue(1);
10929   }
10930 
10931   StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
10932 
10933   SDValue Stores[4];
10934   for (unsigned i = 0; i < 4; ++i) {
10935     SDValue Idx = DAG.getConstant(i, dl, BasePtr.getValueType());
10936     Idx = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr, Idx);
10937 
10938     Stores[i] = DAG.getTruncStore(
10939         StoreChain, dl, Loads[i], Idx, SN->getPointerInfo().getWithOffset(i),
10940         MVT::i8, /* Alignment = */ 1, SN->getMemOperand()->getFlags(),
10941         SN->getAAInfo());
10942   }
10943 
10944   StoreChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Stores);
10945 
10946   return StoreChain;
10947 }
10948 
LowerMUL(SDValue Op,SelectionDAG & DAG) const10949 SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {
10950   SDLoc dl(Op);
10951   if (Op.getValueType() == MVT::v4i32) {
10952     SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
10953 
10954     SDValue Zero = getCanonicalConstSplat(0, 1, MVT::v4i32, DAG, dl);
10955     // +16 as shift amt.
10956     SDValue Neg16 = getCanonicalConstSplat(-16, 4, MVT::v4i32, DAG, dl);
10957     SDValue RHSSwap =   // = vrlw RHS, 16
10958       BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl);
10959 
10960     // Shrinkify inputs to v8i16.
10961     LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, LHS);
10962     RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHS);
10963     RHSSwap = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHSSwap);
10964 
10965     // Low parts multiplied together, generating 32-bit results (we ignore the
10966     // top parts).
10967     SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh,
10968                                         LHS, RHS, DAG, dl, MVT::v4i32);
10969 
10970     SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm,
10971                                       LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32);
10972     // Shift the high parts up 16 bits.
10973     HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd,
10974                               Neg16, DAG, dl);
10975     return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd);
10976   } else if (Op.getValueType() == MVT::v16i8) {
10977     SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
10978     bool isLittleEndian = Subtarget.isLittleEndian();
10979 
10980     // Multiply the even 8-bit parts, producing 16-bit sums.
10981     SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub,
10982                                            LHS, RHS, DAG, dl, MVT::v8i16);
10983     EvenParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, EvenParts);
10984 
10985     // Multiply the odd 8-bit parts, producing 16-bit sums.
10986     SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub,
10987                                           LHS, RHS, DAG, dl, MVT::v8i16);
10988     OddParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OddParts);
10989 
10990     // Merge the results together.  Because vmuleub and vmuloub are
10991     // instructions with a big-endian bias, we must reverse the
10992     // element numbering and reverse the meaning of "odd" and "even"
10993     // when generating little endian code.
10994     int Ops[16];
10995     for (unsigned i = 0; i != 8; ++i) {
10996       if (isLittleEndian) {
10997         Ops[i*2  ] = 2*i;
10998         Ops[i*2+1] = 2*i+16;
10999       } else {
11000         Ops[i*2  ] = 2*i+1;
11001         Ops[i*2+1] = 2*i+1+16;
11002       }
11003     }
11004     if (isLittleEndian)
11005       return DAG.getVectorShuffle(MVT::v16i8, dl, OddParts, EvenParts, Ops);
11006     else
11007       return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops);
11008   } else {
11009     llvm_unreachable("Unknown mul to lower!");
11010   }
11011 }
11012 
LowerABS(SDValue Op,SelectionDAG & DAG) const11013 SDValue PPCTargetLowering::LowerABS(SDValue Op, SelectionDAG &DAG) const {
11014 
11015   assert(Op.getOpcode() == ISD::ABS && "Should only be called for ISD::ABS");
11016 
11017   EVT VT = Op.getValueType();
11018   assert(VT.isVector() &&
11019          "Only set vector abs as custom, scalar abs shouldn't reach here!");
11020   assert((VT == MVT::v2i64 || VT == MVT::v4i32 || VT == MVT::v8i16 ||
11021           VT == MVT::v16i8) &&
11022          "Unexpected vector element type!");
11023   assert((VT != MVT::v2i64 || Subtarget.hasP8Altivec()) &&
11024          "Current subtarget doesn't support smax v2i64!");
11025 
11026   // For vector abs, it can be lowered to:
11027   // abs x
11028   // ==>
11029   // y = -x
11030   // smax(x, y)
11031 
11032   SDLoc dl(Op);
11033   SDValue X = Op.getOperand(0);
11034   SDValue Zero = DAG.getConstant(0, dl, VT);
11035   SDValue Y = DAG.getNode(ISD::SUB, dl, VT, Zero, X);
11036 
11037   // SMAX patch https://reviews.llvm.org/D47332
11038   // hasn't landed yet, so use intrinsic first here.
11039   // TODO: Should use SMAX directly once SMAX patch landed
11040   Intrinsic::ID BifID = Intrinsic::ppc_altivec_vmaxsw;
11041   if (VT == MVT::v2i64)
11042     BifID = Intrinsic::ppc_altivec_vmaxsd;
11043   else if (VT == MVT::v8i16)
11044     BifID = Intrinsic::ppc_altivec_vmaxsh;
11045   else if (VT == MVT::v16i8)
11046     BifID = Intrinsic::ppc_altivec_vmaxsb;
11047 
11048   return BuildIntrinsicOp(BifID, X, Y, DAG, dl, VT);
11049 }
11050 
11051 // Custom lowering for fpext vf32 to v2f64
LowerFP_EXTEND(SDValue Op,SelectionDAG & DAG) const11052 SDValue PPCTargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const {
11053 
11054   assert(Op.getOpcode() == ISD::FP_EXTEND &&
11055          "Should only be called for ISD::FP_EXTEND");
11056 
11057   // FIXME: handle extends from half precision float vectors on P9.
11058   // We only want to custom lower an extend from v2f32 to v2f64.
11059   if (Op.getValueType() != MVT::v2f64 ||
11060       Op.getOperand(0).getValueType() != MVT::v2f32)
11061     return SDValue();
11062 
11063   SDLoc dl(Op);
11064   SDValue Op0 = Op.getOperand(0);
11065 
11066   switch (Op0.getOpcode()) {
11067   default:
11068     return SDValue();
11069   case ISD::EXTRACT_SUBVECTOR: {
11070     assert(Op0.getNumOperands() == 2 &&
11071            isa<ConstantSDNode>(Op0->getOperand(1)) &&
11072            "Node should have 2 operands with second one being a constant!");
11073 
11074     if (Op0.getOperand(0).getValueType() != MVT::v4f32)
11075       return SDValue();
11076 
11077     // Custom lower is only done for high or low doubleword.
11078     int Idx = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
11079     if (Idx % 2 != 0)
11080       return SDValue();
11081 
11082     // Since input is v4f32, at this point Idx is either 0 or 2.
11083     // Shift to get the doubleword position we want.
11084     int DWord = Idx >> 1;
11085 
11086     // High and low word positions are different on little endian.
11087     if (Subtarget.isLittleEndian())
11088       DWord ^= 0x1;
11089 
11090     return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64,
11091                        Op0.getOperand(0), DAG.getConstant(DWord, dl, MVT::i32));
11092   }
11093   case ISD::FADD:
11094   case ISD::FMUL:
11095   case ISD::FSUB: {
11096     SDValue NewLoad[2];
11097     for (unsigned i = 0, ie = Op0.getNumOperands(); i != ie; ++i) {
11098       // Ensure both input are loads.
11099       SDValue LdOp = Op0.getOperand(i);
11100       if (LdOp.getOpcode() != ISD::LOAD)
11101         return SDValue();
11102       // Generate new load node.
11103       LoadSDNode *LD = cast<LoadSDNode>(LdOp);
11104       SDValue LoadOps[] = {LD->getChain(), LD->getBasePtr()};
11105       NewLoad[i] = DAG.getMemIntrinsicNode(
11106           PPCISD::LD_VSX_LH, dl, DAG.getVTList(MVT::v4f32, MVT::Other), LoadOps,
11107           LD->getMemoryVT(), LD->getMemOperand());
11108     }
11109     SDValue NewOp =
11110         DAG.getNode(Op0.getOpcode(), SDLoc(Op0), MVT::v4f32, NewLoad[0],
11111                     NewLoad[1], Op0.getNode()->getFlags());
11112     return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64, NewOp,
11113                        DAG.getConstant(0, dl, MVT::i32));
11114   }
11115   case ISD::LOAD: {
11116     LoadSDNode *LD = cast<LoadSDNode>(Op0);
11117     SDValue LoadOps[] = {LD->getChain(), LD->getBasePtr()};
11118     SDValue NewLd = DAG.getMemIntrinsicNode(
11119         PPCISD::LD_VSX_LH, dl, DAG.getVTList(MVT::v4f32, MVT::Other), LoadOps,
11120         LD->getMemoryVT(), LD->getMemOperand());
11121     return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64, NewLd,
11122                        DAG.getConstant(0, dl, MVT::i32));
11123   }
11124   }
11125   llvm_unreachable("ERROR:Should return for all cases within swtich.");
11126 }
11127 
11128 /// LowerOperation - Provide custom lowering hooks for some operations.
11129 ///
LowerOperation(SDValue Op,SelectionDAG & DAG) const11130 SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
11131   switch (Op.getOpcode()) {
11132   default: llvm_unreachable("Wasn't expecting to be able to lower this!");
11133   case ISD::ConstantPool:       return LowerConstantPool(Op, DAG);
11134   case ISD::BlockAddress:       return LowerBlockAddress(Op, DAG);
11135   case ISD::GlobalAddress:      return LowerGlobalAddress(Op, DAG);
11136   case ISD::GlobalTLSAddress:   return LowerGlobalTLSAddress(Op, DAG);
11137   case ISD::JumpTable:          return LowerJumpTable(Op, DAG);
11138   case ISD::SETCC:              return LowerSETCC(Op, DAG);
11139   case ISD::INIT_TRAMPOLINE:    return LowerINIT_TRAMPOLINE(Op, DAG);
11140   case ISD::ADJUST_TRAMPOLINE:  return LowerADJUST_TRAMPOLINE(Op, DAG);
11141 
11142   // Variable argument lowering.
11143   case ISD::VASTART:            return LowerVASTART(Op, DAG);
11144   case ISD::VAARG:              return LowerVAARG(Op, DAG);
11145   case ISD::VACOPY:             return LowerVACOPY(Op, DAG);
11146 
11147   case ISD::STACKRESTORE:       return LowerSTACKRESTORE(Op, DAG);
11148   case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
11149   case ISD::GET_DYNAMIC_AREA_OFFSET:
11150     return LowerGET_DYNAMIC_AREA_OFFSET(Op, DAG);
11151 
11152   // Exception handling lowering.
11153   case ISD::EH_DWARF_CFA:       return LowerEH_DWARF_CFA(Op, DAG);
11154   case ISD::EH_SJLJ_SETJMP:     return lowerEH_SJLJ_SETJMP(Op, DAG);
11155   case ISD::EH_SJLJ_LONGJMP:    return lowerEH_SJLJ_LONGJMP(Op, DAG);
11156 
11157   case ISD::LOAD:               return LowerLOAD(Op, DAG);
11158   case ISD::STORE:              return LowerSTORE(Op, DAG);
11159   case ISD::TRUNCATE:           return LowerTRUNCATE(Op, DAG);
11160   case ISD::SELECT_CC:          return LowerSELECT_CC(Op, DAG);
11161   case ISD::FP_TO_UINT:
11162   case ISD::FP_TO_SINT:         return LowerFP_TO_INT(Op, DAG, SDLoc(Op));
11163   case ISD::UINT_TO_FP:
11164   case ISD::SINT_TO_FP:         return LowerINT_TO_FP(Op, DAG);
11165   case ISD::FLT_ROUNDS_:        return LowerFLT_ROUNDS_(Op, DAG);
11166 
11167   // Lower 64-bit shifts.
11168   case ISD::SHL_PARTS:          return LowerSHL_PARTS(Op, DAG);
11169   case ISD::SRL_PARTS:          return LowerSRL_PARTS(Op, DAG);
11170   case ISD::SRA_PARTS:          return LowerSRA_PARTS(Op, DAG);
11171 
11172   // Vector-related lowering.
11173   case ISD::BUILD_VECTOR:       return LowerBUILD_VECTOR(Op, DAG);
11174   case ISD::VECTOR_SHUFFLE:     return LowerVECTOR_SHUFFLE(Op, DAG);
11175   case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
11176   case ISD::SCALAR_TO_VECTOR:   return LowerSCALAR_TO_VECTOR(Op, DAG);
11177   case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
11178   case ISD::INSERT_VECTOR_ELT:  return LowerINSERT_VECTOR_ELT(Op, DAG);
11179   case ISD::MUL:                return LowerMUL(Op, DAG);
11180   case ISD::ABS:                return LowerABS(Op, DAG);
11181   case ISD::FP_EXTEND:          return LowerFP_EXTEND(Op, DAG);
11182   case ISD::ROTL:               return LowerROTL(Op, DAG);
11183 
11184   // For counter-based loop handling.
11185   case ISD::INTRINSIC_W_CHAIN:  return SDValue();
11186 
11187   case ISD::BITCAST:            return LowerBITCAST(Op, DAG);
11188 
11189   // Frame & Return address.
11190   case ISD::RETURNADDR:         return LowerRETURNADDR(Op, DAG);
11191   case ISD::FRAMEADDR:          return LowerFRAMEADDR(Op, DAG);
11192 
11193   case ISD::INTRINSIC_VOID:
11194     return LowerINTRINSIC_VOID(Op, DAG);
11195   case ISD::BSWAP:
11196     return LowerBSWAP(Op, DAG);
11197   case ISD::ATOMIC_CMP_SWAP:
11198     return LowerATOMIC_CMP_SWAP(Op, DAG);
11199   }
11200 }
11201 
ReplaceNodeResults(SDNode * N,SmallVectorImpl<SDValue> & Results,SelectionDAG & DAG) const11202 void PPCTargetLowering::ReplaceNodeResults(SDNode *N,
11203                                            SmallVectorImpl<SDValue>&Results,
11204                                            SelectionDAG &DAG) const {
11205   SDLoc dl(N);
11206   switch (N->getOpcode()) {
11207   default:
11208     llvm_unreachable("Do not know how to custom type legalize this operation!");
11209   case ISD::READCYCLECOUNTER: {
11210     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
11211     SDValue RTB = DAG.getNode(PPCISD::READ_TIME_BASE, dl, VTs, N->getOperand(0));
11212 
11213     Results.push_back(
11214         DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, RTB, RTB.getValue(1)));
11215     Results.push_back(RTB.getValue(2));
11216     break;
11217   }
11218   case ISD::INTRINSIC_W_CHAIN: {
11219     if (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() !=
11220         Intrinsic::loop_decrement)
11221       break;
11222 
11223     assert(N->getValueType(0) == MVT::i1 &&
11224            "Unexpected result type for CTR decrement intrinsic");
11225     EVT SVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
11226                                  N->getValueType(0));
11227     SDVTList VTs = DAG.getVTList(SVT, MVT::Other);
11228     SDValue NewInt = DAG.getNode(N->getOpcode(), dl, VTs, N->getOperand(0),
11229                                  N->getOperand(1));
11230 
11231     Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewInt));
11232     Results.push_back(NewInt.getValue(1));
11233     break;
11234   }
11235   case ISD::VAARG: {
11236     if (!Subtarget.isSVR4ABI() || Subtarget.isPPC64())
11237       return;
11238 
11239     EVT VT = N->getValueType(0);
11240 
11241     if (VT == MVT::i64) {
11242       SDValue NewNode = LowerVAARG(SDValue(N, 1), DAG);
11243 
11244       Results.push_back(NewNode);
11245       Results.push_back(NewNode.getValue(1));
11246     }
11247     return;
11248   }
11249   case ISD::FP_TO_SINT:
11250   case ISD::FP_TO_UINT:
11251     // LowerFP_TO_INT() can only handle f32 and f64.
11252     if (N->getOperand(0).getValueType() == MVT::ppcf128)
11253       return;
11254     Results.push_back(LowerFP_TO_INT(SDValue(N, 0), DAG, dl));
11255     return;
11256   case ISD::TRUNCATE: {
11257     EVT TrgVT = N->getValueType(0);
11258     EVT OpVT = N->getOperand(0).getValueType();
11259     if (TrgVT.isVector() &&
11260         isOperationCustom(N->getOpcode(), TrgVT) &&
11261         OpVT.getSizeInBits() <= 128 &&
11262         isPowerOf2_32(OpVT.getVectorElementType().getSizeInBits()))
11263       Results.push_back(LowerTRUNCATEVector(SDValue(N, 0), DAG));
11264     return;
11265   }
11266   case ISD::BITCAST:
11267     // Don't handle bitcast here.
11268     return;
11269   case ISD::FP_EXTEND:
11270     SDValue Lowered = LowerFP_EXTEND(SDValue(N, 0), DAG);
11271     if (Lowered)
11272       Results.push_back(Lowered);
11273     return;
11274   }
11275 }
11276 
11277 //===----------------------------------------------------------------------===//
11278 //  Other Lowering Code
11279 //===----------------------------------------------------------------------===//
11280 
callIntrinsic(IRBuilder<> & Builder,Intrinsic::ID Id)11281 static Instruction* callIntrinsic(IRBuilder<> &Builder, Intrinsic::ID Id) {
11282   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
11283   Function *Func = Intrinsic::getDeclaration(M, Id);
11284   return Builder.CreateCall(Func, {});
11285 }
11286 
11287 // The mappings for emitLeading/TrailingFence is taken from
11288 // http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
emitLeadingFence(IRBuilder<> & Builder,Instruction * Inst,AtomicOrdering Ord) const11289 Instruction *PPCTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
11290                                                  Instruction *Inst,
11291                                                  AtomicOrdering Ord) const {
11292   if (Ord == AtomicOrdering::SequentiallyConsistent)
11293     return callIntrinsic(Builder, Intrinsic::ppc_sync);
11294   if (isReleaseOrStronger(Ord))
11295     return callIntrinsic(Builder, Intrinsic::ppc_lwsync);
11296   return nullptr;
11297 }
11298 
emitTrailingFence(IRBuilder<> & Builder,Instruction * Inst,AtomicOrdering Ord) const11299 Instruction *PPCTargetLowering::emitTrailingFence(IRBuilder<> &Builder,
11300                                                   Instruction *Inst,
11301                                                   AtomicOrdering Ord) const {
11302   if (Inst->hasAtomicLoad() && isAcquireOrStronger(Ord)) {
11303     // See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html and
11304     // http://www.rdrop.com/users/paulmck/scalability/paper/N2745r.2011.03.04a.html
11305     // and http://www.cl.cam.ac.uk/~pes20/cppppc/ for justification.
11306     if (isa<LoadInst>(Inst) && Subtarget.isPPC64())
11307       return Builder.CreateCall(
11308           Intrinsic::getDeclaration(
11309               Builder.GetInsertBlock()->getParent()->getParent(),
11310               Intrinsic::ppc_cfence, {Inst->getType()}),
11311           {Inst});
11312     // FIXME: Can use isync for rmw operation.
11313     return callIntrinsic(Builder, Intrinsic::ppc_lwsync);
11314   }
11315   return nullptr;
11316 }
11317 
11318 MachineBasicBlock *
EmitAtomicBinary(MachineInstr & MI,MachineBasicBlock * BB,unsigned AtomicSize,unsigned BinOpcode,unsigned CmpOpcode,unsigned CmpPred) const11319 PPCTargetLowering::EmitAtomicBinary(MachineInstr &MI, MachineBasicBlock *BB,
11320                                     unsigned AtomicSize,
11321                                     unsigned BinOpcode,
11322                                     unsigned CmpOpcode,
11323                                     unsigned CmpPred) const {
11324   // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
11325   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11326 
11327   auto LoadMnemonic = PPC::LDARX;
11328   auto StoreMnemonic = PPC::STDCX;
11329   switch (AtomicSize) {
11330   default:
11331     llvm_unreachable("Unexpected size of atomic entity");
11332   case 1:
11333     LoadMnemonic = PPC::LBARX;
11334     StoreMnemonic = PPC::STBCX;
11335     assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4");
11336     break;
11337   case 2:
11338     LoadMnemonic = PPC::LHARX;
11339     StoreMnemonic = PPC::STHCX;
11340     assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4");
11341     break;
11342   case 4:
11343     LoadMnemonic = PPC::LWARX;
11344     StoreMnemonic = PPC::STWCX;
11345     break;
11346   case 8:
11347     LoadMnemonic = PPC::LDARX;
11348     StoreMnemonic = PPC::STDCX;
11349     break;
11350   }
11351 
11352   const BasicBlock *LLVM_BB = BB->getBasicBlock();
11353   MachineFunction *F = BB->getParent();
11354   MachineFunction::iterator It = ++BB->getIterator();
11355 
11356   Register dest = MI.getOperand(0).getReg();
11357   Register ptrA = MI.getOperand(1).getReg();
11358   Register ptrB = MI.getOperand(2).getReg();
11359   Register incr = MI.getOperand(3).getReg();
11360   DebugLoc dl = MI.getDebugLoc();
11361 
11362   MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
11363   MachineBasicBlock *loop2MBB =
11364     CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr;
11365   MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
11366   F->insert(It, loopMBB);
11367   if (CmpOpcode)
11368     F->insert(It, loop2MBB);
11369   F->insert(It, exitMBB);
11370   exitMBB->splice(exitMBB->begin(), BB,
11371                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
11372   exitMBB->transferSuccessorsAndUpdatePHIs(BB);
11373 
11374   MachineRegisterInfo &RegInfo = F->getRegInfo();
11375   Register TmpReg = (!BinOpcode) ? incr :
11376     RegInfo.createVirtualRegister( AtomicSize == 8 ? &PPC::G8RCRegClass
11377                                            : &PPC::GPRCRegClass);
11378 
11379   //  thisMBB:
11380   //   ...
11381   //   fallthrough --> loopMBB
11382   BB->addSuccessor(loopMBB);
11383 
11384   //  loopMBB:
11385   //   l[wd]arx dest, ptr
11386   //   add r0, dest, incr
11387   //   st[wd]cx. r0, ptr
11388   //   bne- loopMBB
11389   //   fallthrough --> exitMBB
11390 
11391   // For max/min...
11392   //  loopMBB:
11393   //   l[wd]arx dest, ptr
11394   //   cmpl?[wd] incr, dest
11395   //   bgt exitMBB
11396   //  loop2MBB:
11397   //   st[wd]cx. dest, ptr
11398   //   bne- loopMBB
11399   //   fallthrough --> exitMBB
11400 
11401   BB = loopMBB;
11402   BuildMI(BB, dl, TII->get(LoadMnemonic), dest)
11403     .addReg(ptrA).addReg(ptrB);
11404   if (BinOpcode)
11405     BuildMI(BB, dl, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest);
11406   if (CmpOpcode) {
11407     // Signed comparisons of byte or halfword values must be sign-extended.
11408     if (CmpOpcode == PPC::CMPW && AtomicSize < 4) {
11409       Register ExtReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
11410       BuildMI(BB, dl, TII->get(AtomicSize == 1 ? PPC::EXTSB : PPC::EXTSH),
11411               ExtReg).addReg(dest);
11412       BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
11413         .addReg(incr).addReg(ExtReg);
11414     } else
11415       BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
11416         .addReg(incr).addReg(dest);
11417 
11418     BuildMI(BB, dl, TII->get(PPC::BCC))
11419       .addImm(CmpPred).addReg(PPC::CR0).addMBB(exitMBB);
11420     BB->addSuccessor(loop2MBB);
11421     BB->addSuccessor(exitMBB);
11422     BB = loop2MBB;
11423   }
11424   BuildMI(BB, dl, TII->get(StoreMnemonic))
11425     .addReg(TmpReg).addReg(ptrA).addReg(ptrB);
11426   BuildMI(BB, dl, TII->get(PPC::BCC))
11427     .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB);
11428   BB->addSuccessor(loopMBB);
11429   BB->addSuccessor(exitMBB);
11430 
11431   //  exitMBB:
11432   //   ...
11433   BB = exitMBB;
11434   return BB;
11435 }
11436 
EmitPartwordAtomicBinary(MachineInstr & MI,MachineBasicBlock * BB,bool is8bit,unsigned BinOpcode,unsigned CmpOpcode,unsigned CmpPred) const11437 MachineBasicBlock *PPCTargetLowering::EmitPartwordAtomicBinary(
11438     MachineInstr &MI, MachineBasicBlock *BB,
11439     bool is8bit, // operation
11440     unsigned BinOpcode, unsigned CmpOpcode, unsigned CmpPred) const {
11441   // If we support part-word atomic mnemonics, just use them
11442   if (Subtarget.hasPartwordAtomics())
11443     return EmitAtomicBinary(MI, BB, is8bit ? 1 : 2, BinOpcode, CmpOpcode,
11444                             CmpPred);
11445 
11446   // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
11447   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11448   // In 64 bit mode we have to use 64 bits for addresses, even though the
11449   // lwarx/stwcx are 32 bits.  With the 32-bit atomics we can use address
11450   // registers without caring whether they're 32 or 64, but here we're
11451   // doing actual arithmetic on the addresses.
11452   bool is64bit = Subtarget.isPPC64();
11453   bool isLittleEndian = Subtarget.isLittleEndian();
11454   unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
11455 
11456   const BasicBlock *LLVM_BB = BB->getBasicBlock();
11457   MachineFunction *F = BB->getParent();
11458   MachineFunction::iterator It = ++BB->getIterator();
11459 
11460   Register dest = MI.getOperand(0).getReg();
11461   Register ptrA = MI.getOperand(1).getReg();
11462   Register ptrB = MI.getOperand(2).getReg();
11463   Register incr = MI.getOperand(3).getReg();
11464   DebugLoc dl = MI.getDebugLoc();
11465 
11466   MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
11467   MachineBasicBlock *loop2MBB =
11468       CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr;
11469   MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
11470   F->insert(It, loopMBB);
11471   if (CmpOpcode)
11472     F->insert(It, loop2MBB);
11473   F->insert(It, exitMBB);
11474   exitMBB->splice(exitMBB->begin(), BB,
11475                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
11476   exitMBB->transferSuccessorsAndUpdatePHIs(BB);
11477 
11478   MachineRegisterInfo &RegInfo = F->getRegInfo();
11479   const TargetRegisterClass *RC =
11480       is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
11481   const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
11482 
11483   Register PtrReg = RegInfo.createVirtualRegister(RC);
11484   Register Shift1Reg = RegInfo.createVirtualRegister(GPRC);
11485   Register ShiftReg =
11486       isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC);
11487   Register Incr2Reg = RegInfo.createVirtualRegister(GPRC);
11488   Register MaskReg = RegInfo.createVirtualRegister(GPRC);
11489   Register Mask2Reg = RegInfo.createVirtualRegister(GPRC);
11490   Register Mask3Reg = RegInfo.createVirtualRegister(GPRC);
11491   Register Tmp2Reg = RegInfo.createVirtualRegister(GPRC);
11492   Register Tmp3Reg = RegInfo.createVirtualRegister(GPRC);
11493   Register Tmp4Reg = RegInfo.createVirtualRegister(GPRC);
11494   Register TmpDestReg = RegInfo.createVirtualRegister(GPRC);
11495   Register Ptr1Reg;
11496   Register TmpReg =
11497       (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(GPRC);
11498 
11499   //  thisMBB:
11500   //   ...
11501   //   fallthrough --> loopMBB
11502   BB->addSuccessor(loopMBB);
11503 
11504   // The 4-byte load must be aligned, while a char or short may be
11505   // anywhere in the word.  Hence all this nasty bookkeeping code.
11506   //   add ptr1, ptrA, ptrB [copy if ptrA==0]
11507   //   rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27]
11508   //   xori shift, shift1, 24 [16]
11509   //   rlwinm ptr, ptr1, 0, 0, 29
11510   //   slw incr2, incr, shift
11511   //   li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535]
11512   //   slw mask, mask2, shift
11513   //  loopMBB:
11514   //   lwarx tmpDest, ptr
11515   //   add tmp, tmpDest, incr2
11516   //   andc tmp2, tmpDest, mask
11517   //   and tmp3, tmp, mask
11518   //   or tmp4, tmp3, tmp2
11519   //   stwcx. tmp4, ptr
11520   //   bne- loopMBB
11521   //   fallthrough --> exitMBB
11522   //   srw dest, tmpDest, shift
11523   if (ptrA != ZeroReg) {
11524     Ptr1Reg = RegInfo.createVirtualRegister(RC);
11525     BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
11526         .addReg(ptrA)
11527         .addReg(ptrB);
11528   } else {
11529     Ptr1Reg = ptrB;
11530   }
11531   // We need use 32-bit subregister to avoid mismatch register class in 64-bit
11532   // mode.
11533   BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg)
11534       .addReg(Ptr1Reg, 0, is64bit ? PPC::sub_32 : 0)
11535       .addImm(3)
11536       .addImm(27)
11537       .addImm(is8bit ? 28 : 27);
11538   if (!isLittleEndian)
11539     BuildMI(BB, dl, TII->get(PPC::XORI), ShiftReg)
11540         .addReg(Shift1Reg)
11541         .addImm(is8bit ? 24 : 16);
11542   if (is64bit)
11543     BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg)
11544         .addReg(Ptr1Reg)
11545         .addImm(0)
11546         .addImm(61);
11547   else
11548     BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg)
11549         .addReg(Ptr1Reg)
11550         .addImm(0)
11551         .addImm(0)
11552         .addImm(29);
11553   BuildMI(BB, dl, TII->get(PPC::SLW), Incr2Reg).addReg(incr).addReg(ShiftReg);
11554   if (is8bit)
11555     BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255);
11556   else {
11557     BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0);
11558     BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg)
11559         .addReg(Mask3Reg)
11560         .addImm(65535);
11561   }
11562   BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg)
11563       .addReg(Mask2Reg)
11564       .addReg(ShiftReg);
11565 
11566   BB = loopMBB;
11567   BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg)
11568       .addReg(ZeroReg)
11569       .addReg(PtrReg);
11570   if (BinOpcode)
11571     BuildMI(BB, dl, TII->get(BinOpcode), TmpReg)
11572         .addReg(Incr2Reg)
11573         .addReg(TmpDestReg);
11574   BuildMI(BB, dl, TII->get(PPC::ANDC), Tmp2Reg)
11575       .addReg(TmpDestReg)
11576       .addReg(MaskReg);
11577   BuildMI(BB, dl, TII->get(PPC::AND), Tmp3Reg).addReg(TmpReg).addReg(MaskReg);
11578   if (CmpOpcode) {
11579     // For unsigned comparisons, we can directly compare the shifted values.
11580     // For signed comparisons we shift and sign extend.
11581     Register SReg = RegInfo.createVirtualRegister(GPRC);
11582     BuildMI(BB, dl, TII->get(PPC::AND), SReg)
11583         .addReg(TmpDestReg)
11584         .addReg(MaskReg);
11585     unsigned ValueReg = SReg;
11586     unsigned CmpReg = Incr2Reg;
11587     if (CmpOpcode == PPC::CMPW) {
11588       ValueReg = RegInfo.createVirtualRegister(GPRC);
11589       BuildMI(BB, dl, TII->get(PPC::SRW), ValueReg)
11590           .addReg(SReg)
11591           .addReg(ShiftReg);
11592       Register ValueSReg = RegInfo.createVirtualRegister(GPRC);
11593       BuildMI(BB, dl, TII->get(is8bit ? PPC::EXTSB : PPC::EXTSH), ValueSReg)
11594           .addReg(ValueReg);
11595       ValueReg = ValueSReg;
11596       CmpReg = incr;
11597     }
11598     BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
11599         .addReg(CmpReg)
11600         .addReg(ValueReg);
11601     BuildMI(BB, dl, TII->get(PPC::BCC))
11602         .addImm(CmpPred)
11603         .addReg(PPC::CR0)
11604         .addMBB(exitMBB);
11605     BB->addSuccessor(loop2MBB);
11606     BB->addSuccessor(exitMBB);
11607     BB = loop2MBB;
11608   }
11609   BuildMI(BB, dl, TII->get(PPC::OR), Tmp4Reg).addReg(Tmp3Reg).addReg(Tmp2Reg);
11610   BuildMI(BB, dl, TII->get(PPC::STWCX))
11611       .addReg(Tmp4Reg)
11612       .addReg(ZeroReg)
11613       .addReg(PtrReg);
11614   BuildMI(BB, dl, TII->get(PPC::BCC))
11615       .addImm(PPC::PRED_NE)
11616       .addReg(PPC::CR0)
11617       .addMBB(loopMBB);
11618   BB->addSuccessor(loopMBB);
11619   BB->addSuccessor(exitMBB);
11620 
11621   //  exitMBB:
11622   //   ...
11623   BB = exitMBB;
11624   BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest)
11625       .addReg(TmpDestReg)
11626       .addReg(ShiftReg);
11627   return BB;
11628 }
11629 
11630 llvm::MachineBasicBlock *
emitEHSjLjSetJmp(MachineInstr & MI,MachineBasicBlock * MBB) const11631 PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr &MI,
11632                                     MachineBasicBlock *MBB) const {
11633   DebugLoc DL = MI.getDebugLoc();
11634   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11635   const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo();
11636 
11637   MachineFunction *MF = MBB->getParent();
11638   MachineRegisterInfo &MRI = MF->getRegInfo();
11639 
11640   const BasicBlock *BB = MBB->getBasicBlock();
11641   MachineFunction::iterator I = ++MBB->getIterator();
11642 
11643   Register DstReg = MI.getOperand(0).getReg();
11644   const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
11645   assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!");
11646   Register mainDstReg = MRI.createVirtualRegister(RC);
11647   Register restoreDstReg = MRI.createVirtualRegister(RC);
11648 
11649   MVT PVT = getPointerTy(MF->getDataLayout());
11650   assert((PVT == MVT::i64 || PVT == MVT::i32) &&
11651          "Invalid Pointer Size!");
11652   // For v = setjmp(buf), we generate
11653   //
11654   // thisMBB:
11655   //  SjLjSetup mainMBB
11656   //  bl mainMBB
11657   //  v_restore = 1
11658   //  b sinkMBB
11659   //
11660   // mainMBB:
11661   //  buf[LabelOffset] = LR
11662   //  v_main = 0
11663   //
11664   // sinkMBB:
11665   //  v = phi(main, restore)
11666   //
11667 
11668   MachineBasicBlock *thisMBB = MBB;
11669   MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
11670   MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
11671   MF->insert(I, mainMBB);
11672   MF->insert(I, sinkMBB);
11673 
11674   MachineInstrBuilder MIB;
11675 
11676   // Transfer the remainder of BB and its successor edges to sinkMBB.
11677   sinkMBB->splice(sinkMBB->begin(), MBB,
11678                   std::next(MachineBasicBlock::iterator(MI)), MBB->end());
11679   sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
11680 
11681   // Note that the structure of the jmp_buf used here is not compatible
11682   // with that used by libc, and is not designed to be. Specifically, it
11683   // stores only those 'reserved' registers that LLVM does not otherwise
11684   // understand how to spill. Also, by convention, by the time this
11685   // intrinsic is called, Clang has already stored the frame address in the
11686   // first slot of the buffer and stack address in the third. Following the
11687   // X86 target code, we'll store the jump address in the second slot. We also
11688   // need to save the TOC pointer (R2) to handle jumps between shared
11689   // libraries, and that will be stored in the fourth slot. The thread
11690   // identifier (R13) is not affected.
11691 
11692   // thisMBB:
11693   const int64_t LabelOffset = 1 * PVT.getStoreSize();
11694   const int64_t TOCOffset   = 3 * PVT.getStoreSize();
11695   const int64_t BPOffset    = 4 * PVT.getStoreSize();
11696 
11697   // Prepare IP either in reg.
11698   const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
11699   Register LabelReg = MRI.createVirtualRegister(PtrRC);
11700   Register BufReg = MI.getOperand(1).getReg();
11701 
11702   if (Subtarget.is64BitELFABI()) {
11703     setUsesTOCBasePtr(*MBB->getParent());
11704     MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::STD))
11705               .addReg(PPC::X2)
11706               .addImm(TOCOffset)
11707               .addReg(BufReg)
11708               .cloneMemRefs(MI);
11709   }
11710 
11711   // Naked functions never have a base pointer, and so we use r1. For all
11712   // other functions, this decision must be delayed until during PEI.
11713   unsigned BaseReg;
11714   if (MF->getFunction().hasFnAttribute(Attribute::Naked))
11715     BaseReg = Subtarget.isPPC64() ? PPC::X1 : PPC::R1;
11716   else
11717     BaseReg = Subtarget.isPPC64() ? PPC::BP8 : PPC::BP;
11718 
11719   MIB = BuildMI(*thisMBB, MI, DL,
11720                 TII->get(Subtarget.isPPC64() ? PPC::STD : PPC::STW))
11721             .addReg(BaseReg)
11722             .addImm(BPOffset)
11723             .addReg(BufReg)
11724             .cloneMemRefs(MI);
11725 
11726   // Setup
11727   MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::BCLalways)).addMBB(mainMBB);
11728   MIB.addRegMask(TRI->getNoPreservedMask());
11729 
11730   BuildMI(*thisMBB, MI, DL, TII->get(PPC::LI), restoreDstReg).addImm(1);
11731 
11732   MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::EH_SjLj_Setup))
11733           .addMBB(mainMBB);
11734   MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::B)).addMBB(sinkMBB);
11735 
11736   thisMBB->addSuccessor(mainMBB, BranchProbability::getZero());
11737   thisMBB->addSuccessor(sinkMBB, BranchProbability::getOne());
11738 
11739   // mainMBB:
11740   //  mainDstReg = 0
11741   MIB =
11742       BuildMI(mainMBB, DL,
11743               TII->get(Subtarget.isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg);
11744 
11745   // Store IP
11746   if (Subtarget.isPPC64()) {
11747     MIB = BuildMI(mainMBB, DL, TII->get(PPC::STD))
11748             .addReg(LabelReg)
11749             .addImm(LabelOffset)
11750             .addReg(BufReg);
11751   } else {
11752     MIB = BuildMI(mainMBB, DL, TII->get(PPC::STW))
11753             .addReg(LabelReg)
11754             .addImm(LabelOffset)
11755             .addReg(BufReg);
11756   }
11757   MIB.cloneMemRefs(MI);
11758 
11759   BuildMI(mainMBB, DL, TII->get(PPC::LI), mainDstReg).addImm(0);
11760   mainMBB->addSuccessor(sinkMBB);
11761 
11762   // sinkMBB:
11763   BuildMI(*sinkMBB, sinkMBB->begin(), DL,
11764           TII->get(PPC::PHI), DstReg)
11765     .addReg(mainDstReg).addMBB(mainMBB)
11766     .addReg(restoreDstReg).addMBB(thisMBB);
11767 
11768   MI.eraseFromParent();
11769   return sinkMBB;
11770 }
11771 
11772 MachineBasicBlock *
emitEHSjLjLongJmp(MachineInstr & MI,MachineBasicBlock * MBB) const11773 PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
11774                                      MachineBasicBlock *MBB) const {
11775   DebugLoc DL = MI.getDebugLoc();
11776   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11777 
11778   MachineFunction *MF = MBB->getParent();
11779   MachineRegisterInfo &MRI = MF->getRegInfo();
11780 
11781   MVT PVT = getPointerTy(MF->getDataLayout());
11782   assert((PVT == MVT::i64 || PVT == MVT::i32) &&
11783          "Invalid Pointer Size!");
11784 
11785   const TargetRegisterClass *RC =
11786     (PVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
11787   Register Tmp = MRI.createVirtualRegister(RC);
11788   // Since FP is only updated here but NOT referenced, it's treated as GPR.
11789   unsigned FP  = (PVT == MVT::i64) ? PPC::X31 : PPC::R31;
11790   unsigned SP  = (PVT == MVT::i64) ? PPC::X1 : PPC::R1;
11791   unsigned BP =
11792       (PVT == MVT::i64)
11793           ? PPC::X30
11794           : (Subtarget.isSVR4ABI() && isPositionIndependent() ? PPC::R29
11795                                                               : PPC::R30);
11796 
11797   MachineInstrBuilder MIB;
11798 
11799   const int64_t LabelOffset = 1 * PVT.getStoreSize();
11800   const int64_t SPOffset    = 2 * PVT.getStoreSize();
11801   const int64_t TOCOffset   = 3 * PVT.getStoreSize();
11802   const int64_t BPOffset    = 4 * PVT.getStoreSize();
11803 
11804   Register BufReg = MI.getOperand(0).getReg();
11805 
11806   // Reload FP (the jumped-to function may not have had a
11807   // frame pointer, and if so, then its r31 will be restored
11808   // as necessary).
11809   if (PVT == MVT::i64) {
11810     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), FP)
11811             .addImm(0)
11812             .addReg(BufReg);
11813   } else {
11814     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), FP)
11815             .addImm(0)
11816             .addReg(BufReg);
11817   }
11818   MIB.cloneMemRefs(MI);
11819 
11820   // Reload IP
11821   if (PVT == MVT::i64) {
11822     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), Tmp)
11823             .addImm(LabelOffset)
11824             .addReg(BufReg);
11825   } else {
11826     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), Tmp)
11827             .addImm(LabelOffset)
11828             .addReg(BufReg);
11829   }
11830   MIB.cloneMemRefs(MI);
11831 
11832   // Reload SP
11833   if (PVT == MVT::i64) {
11834     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), SP)
11835             .addImm(SPOffset)
11836             .addReg(BufReg);
11837   } else {
11838     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), SP)
11839             .addImm(SPOffset)
11840             .addReg(BufReg);
11841   }
11842   MIB.cloneMemRefs(MI);
11843 
11844   // Reload BP
11845   if (PVT == MVT::i64) {
11846     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), BP)
11847             .addImm(BPOffset)
11848             .addReg(BufReg);
11849   } else {
11850     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), BP)
11851             .addImm(BPOffset)
11852             .addReg(BufReg);
11853   }
11854   MIB.cloneMemRefs(MI);
11855 
11856   // Reload TOC
11857   if (PVT == MVT::i64 && Subtarget.isSVR4ABI()) {
11858     setUsesTOCBasePtr(*MBB->getParent());
11859     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), PPC::X2)
11860               .addImm(TOCOffset)
11861               .addReg(BufReg)
11862               .cloneMemRefs(MI);
11863   }
11864 
11865   // Jump
11866   BuildMI(*MBB, MI, DL,
11867           TII->get(PVT == MVT::i64 ? PPC::MTCTR8 : PPC::MTCTR)).addReg(Tmp);
11868   BuildMI(*MBB, MI, DL, TII->get(PVT == MVT::i64 ? PPC::BCTR8 : PPC::BCTR));
11869 
11870   MI.eraseFromParent();
11871   return MBB;
11872 }
11873 
hasInlineStackProbe(MachineFunction & MF) const11874 bool PPCTargetLowering::hasInlineStackProbe(MachineFunction &MF) const {
11875   // If the function specifically requests inline stack probes, emit them.
11876   if (MF.getFunction().hasFnAttribute("probe-stack"))
11877     return MF.getFunction().getFnAttribute("probe-stack").getValueAsString() ==
11878            "inline-asm";
11879   return false;
11880 }
11881 
getStackProbeSize(MachineFunction & MF) const11882 unsigned PPCTargetLowering::getStackProbeSize(MachineFunction &MF) const {
11883   const TargetFrameLowering *TFI = Subtarget.getFrameLowering();
11884   unsigned StackAlign = TFI->getStackAlignment();
11885   assert(StackAlign >= 1 && isPowerOf2_32(StackAlign) &&
11886          "Unexpected stack alignment");
11887   // The default stack probe size is 4096 if the function has no
11888   // stack-probe-size attribute.
11889   unsigned StackProbeSize = 4096;
11890   const Function &Fn = MF.getFunction();
11891   if (Fn.hasFnAttribute("stack-probe-size"))
11892     Fn.getFnAttribute("stack-probe-size")
11893         .getValueAsString()
11894         .getAsInteger(0, StackProbeSize);
11895   // Round down to the stack alignment.
11896   StackProbeSize &= ~(StackAlign - 1);
11897   return StackProbeSize ? StackProbeSize : StackAlign;
11898 }
11899 
11900 // Lower dynamic stack allocation with probing. `emitProbedAlloca` is splitted
11901 // into three phases. In the first phase, it uses pseudo instruction
11902 // PREPARE_PROBED_ALLOCA to get the future result of actual FramePointer and
11903 // FinalStackPtr. In the second phase, it generates a loop for probing blocks.
11904 // At last, it uses pseudo instruction DYNAREAOFFSET to get the future result of
11905 // MaxCallFrameSize so that it can calculate correct data area pointer.
11906 MachineBasicBlock *
emitProbedAlloca(MachineInstr & MI,MachineBasicBlock * MBB) const11907 PPCTargetLowering::emitProbedAlloca(MachineInstr &MI,
11908                                     MachineBasicBlock *MBB) const {
11909   const bool isPPC64 = Subtarget.isPPC64();
11910   MachineFunction *MF = MBB->getParent();
11911   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11912   DebugLoc DL = MI.getDebugLoc();
11913   const unsigned ProbeSize = getStackProbeSize(*MF);
11914   const BasicBlock *ProbedBB = MBB->getBasicBlock();
11915   MachineRegisterInfo &MRI = MF->getRegInfo();
11916   // The CFG of probing stack looks as
11917   //         +-----+
11918   //         | MBB |
11919   //         +--+--+
11920   //            |
11921   //       +----v----+
11922   //  +--->+ TestMBB +---+
11923   //  |    +----+----+   |
11924   //  |         |        |
11925   //  |   +-----v----+   |
11926   //  +---+ BlockMBB |   |
11927   //      +----------+   |
11928   //                     |
11929   //       +---------+   |
11930   //       | TailMBB +<--+
11931   //       +---------+
11932   // In MBB, calculate previous frame pointer and final stack pointer.
11933   // In TestMBB, test if sp is equal to final stack pointer, if so, jump to
11934   // TailMBB. In BlockMBB, update the sp atomically and jump back to TestMBB.
11935   // TailMBB is spliced via \p MI.
11936   MachineBasicBlock *TestMBB = MF->CreateMachineBasicBlock(ProbedBB);
11937   MachineBasicBlock *TailMBB = MF->CreateMachineBasicBlock(ProbedBB);
11938   MachineBasicBlock *BlockMBB = MF->CreateMachineBasicBlock(ProbedBB);
11939 
11940   MachineFunction::iterator MBBIter = ++MBB->getIterator();
11941   MF->insert(MBBIter, TestMBB);
11942   MF->insert(MBBIter, BlockMBB);
11943   MF->insert(MBBIter, TailMBB);
11944 
11945   const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
11946   const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
11947 
11948   Register DstReg = MI.getOperand(0).getReg();
11949   Register NegSizeReg = MI.getOperand(1).getReg();
11950   Register SPReg = isPPC64 ? PPC::X1 : PPC::R1;
11951   Register FinalStackPtr = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11952   Register FramePointer = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11953 
11954   // Get the canonical FinalStackPtr like what
11955   // PPCRegisterInfo::lowerDynamicAlloc does.
11956   BuildMI(*MBB, {MI}, DL,
11957           TII->get(isPPC64 ? PPC::PREPARE_PROBED_ALLOCA_64
11958                            : PPC::PREPARE_PROBED_ALLOCA_32),
11959           FramePointer)
11960       .addDef(FinalStackPtr)
11961       .addReg(NegSizeReg)
11962       .add(MI.getOperand(2))
11963       .add(MI.getOperand(3));
11964 
11965   // Materialize a scratch register for update.
11966   int64_t NegProbeSize = -(int64_t)ProbeSize;
11967   assert(isInt<32>(NegProbeSize) && "Unhandled probe size!");
11968   Register ScratchReg = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11969   if (!isInt<16>(NegProbeSize)) {
11970     Register TempReg = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11971     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::LIS8 : PPC::LIS), TempReg)
11972         .addImm(NegProbeSize >> 16);
11973     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::ORI8 : PPC::ORI),
11974             ScratchReg)
11975         .addReg(TempReg)
11976         .addImm(NegProbeSize & 0xFFFF);
11977   } else
11978     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::LI8 : PPC::LI), ScratchReg)
11979         .addImm(NegProbeSize);
11980 
11981   {
11982     // Probing leading residual part.
11983     Register Div = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11984     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::DIVD : PPC::DIVW), Div)
11985         .addReg(NegSizeReg)
11986         .addReg(ScratchReg);
11987     Register Mul = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11988     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::MULLD : PPC::MULLW), Mul)
11989         .addReg(Div)
11990         .addReg(ScratchReg);
11991     Register NegMod = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11992     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::SUBF8 : PPC::SUBF), NegMod)
11993         .addReg(Mul)
11994         .addReg(NegSizeReg);
11995     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::STDUX : PPC::STWUX), SPReg)
11996         .addReg(FramePointer)
11997         .addReg(SPReg)
11998         .addReg(NegMod);
11999   }
12000 
12001   {
12002     // Remaining part should be multiple of ProbeSize.
12003     Register CmpResult = MRI.createVirtualRegister(&PPC::CRRCRegClass);
12004     BuildMI(TestMBB, DL, TII->get(isPPC64 ? PPC::CMPD : PPC::CMPW), CmpResult)
12005         .addReg(SPReg)
12006         .addReg(FinalStackPtr);
12007     BuildMI(TestMBB, DL, TII->get(PPC::BCC))
12008         .addImm(PPC::PRED_EQ)
12009         .addReg(CmpResult)
12010         .addMBB(TailMBB);
12011     TestMBB->addSuccessor(BlockMBB);
12012     TestMBB->addSuccessor(TailMBB);
12013   }
12014 
12015   {
12016     // Touch the block.
12017     // |P...|P...|P...
12018     BuildMI(BlockMBB, DL, TII->get(isPPC64 ? PPC::STDUX : PPC::STWUX), SPReg)
12019         .addReg(FramePointer)
12020         .addReg(SPReg)
12021         .addReg(ScratchReg);
12022     BuildMI(BlockMBB, DL, TII->get(PPC::B)).addMBB(TestMBB);
12023     BlockMBB->addSuccessor(TestMBB);
12024   }
12025 
12026   // Calculation of MaxCallFrameSize is deferred to prologepilog, use
12027   // DYNAREAOFFSET pseudo instruction to get the future result.
12028   Register MaxCallFrameSizeReg =
12029       MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
12030   BuildMI(TailMBB, DL,
12031           TII->get(isPPC64 ? PPC::DYNAREAOFFSET8 : PPC::DYNAREAOFFSET),
12032           MaxCallFrameSizeReg)
12033       .add(MI.getOperand(2))
12034       .add(MI.getOperand(3));
12035   BuildMI(TailMBB, DL, TII->get(isPPC64 ? PPC::ADD8 : PPC::ADD4), DstReg)
12036       .addReg(SPReg)
12037       .addReg(MaxCallFrameSizeReg);
12038 
12039   // Splice instructions after MI to TailMBB.
12040   TailMBB->splice(TailMBB->end(), MBB,
12041                   std::next(MachineBasicBlock::iterator(MI)), MBB->end());
12042   TailMBB->transferSuccessorsAndUpdatePHIs(MBB);
12043   MBB->addSuccessor(TestMBB);
12044 
12045   // Delete the pseudo instruction.
12046   MI.eraseFromParent();
12047 
12048   ++NumDynamicAllocaProbed;
12049   return TailMBB;
12050 }
12051 
12052 MachineBasicBlock *
EmitInstrWithCustomInserter(MachineInstr & MI,MachineBasicBlock * BB) const12053 PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
12054                                                MachineBasicBlock *BB) const {
12055   if (MI.getOpcode() == TargetOpcode::STACKMAP ||
12056       MI.getOpcode() == TargetOpcode::PATCHPOINT) {
12057     if (Subtarget.is64BitELFABI() &&
12058         MI.getOpcode() == TargetOpcode::PATCHPOINT &&
12059         !Subtarget.isUsingPCRelativeCalls()) {
12060       // Call lowering should have added an r2 operand to indicate a dependence
12061       // on the TOC base pointer value. It can't however, because there is no
12062       // way to mark the dependence as implicit there, and so the stackmap code
12063       // will confuse it with a regular operand. Instead, add the dependence
12064       // here.
12065       MI.addOperand(MachineOperand::CreateReg(PPC::X2, false, true));
12066     }
12067 
12068     return emitPatchPoint(MI, BB);
12069   }
12070 
12071   if (MI.getOpcode() == PPC::EH_SjLj_SetJmp32 ||
12072       MI.getOpcode() == PPC::EH_SjLj_SetJmp64) {
12073     return emitEHSjLjSetJmp(MI, BB);
12074   } else if (MI.getOpcode() == PPC::EH_SjLj_LongJmp32 ||
12075              MI.getOpcode() == PPC::EH_SjLj_LongJmp64) {
12076     return emitEHSjLjLongJmp(MI, BB);
12077   }
12078 
12079   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
12080 
12081   // To "insert" these instructions we actually have to insert their
12082   // control-flow patterns.
12083   const BasicBlock *LLVM_BB = BB->getBasicBlock();
12084   MachineFunction::iterator It = ++BB->getIterator();
12085 
12086   MachineFunction *F = BB->getParent();
12087 
12088   if (MI.getOpcode() == PPC::SELECT_CC_I4 ||
12089       MI.getOpcode() == PPC::SELECT_CC_I8 || MI.getOpcode() == PPC::SELECT_I4 ||
12090       MI.getOpcode() == PPC::SELECT_I8) {
12091     SmallVector<MachineOperand, 2> Cond;
12092     if (MI.getOpcode() == PPC::SELECT_CC_I4 ||
12093         MI.getOpcode() == PPC::SELECT_CC_I8)
12094       Cond.push_back(MI.getOperand(4));
12095     else
12096       Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_SET));
12097     Cond.push_back(MI.getOperand(1));
12098 
12099     DebugLoc dl = MI.getDebugLoc();
12100     TII->insertSelect(*BB, MI, dl, MI.getOperand(0).getReg(), Cond,
12101                       MI.getOperand(2).getReg(), MI.getOperand(3).getReg());
12102   } else if (MI.getOpcode() == PPC::SELECT_CC_F4 ||
12103              MI.getOpcode() == PPC::SELECT_CC_F8 ||
12104              MI.getOpcode() == PPC::SELECT_CC_F16 ||
12105              MI.getOpcode() == PPC::SELECT_CC_QFRC ||
12106              MI.getOpcode() == PPC::SELECT_CC_QSRC ||
12107              MI.getOpcode() == PPC::SELECT_CC_QBRC ||
12108              MI.getOpcode() == PPC::SELECT_CC_VRRC ||
12109              MI.getOpcode() == PPC::SELECT_CC_VSFRC ||
12110              MI.getOpcode() == PPC::SELECT_CC_VSSRC ||
12111              MI.getOpcode() == PPC::SELECT_CC_VSRC ||
12112              MI.getOpcode() == PPC::SELECT_CC_SPE4 ||
12113              MI.getOpcode() == PPC::SELECT_CC_SPE ||
12114              MI.getOpcode() == PPC::SELECT_F4 ||
12115              MI.getOpcode() == PPC::SELECT_F8 ||
12116              MI.getOpcode() == PPC::SELECT_F16 ||
12117              MI.getOpcode() == PPC::SELECT_QFRC ||
12118              MI.getOpcode() == PPC::SELECT_QSRC ||
12119              MI.getOpcode() == PPC::SELECT_QBRC ||
12120              MI.getOpcode() == PPC::SELECT_SPE ||
12121              MI.getOpcode() == PPC::SELECT_SPE4 ||
12122              MI.getOpcode() == PPC::SELECT_VRRC ||
12123              MI.getOpcode() == PPC::SELECT_VSFRC ||
12124              MI.getOpcode() == PPC::SELECT_VSSRC ||
12125              MI.getOpcode() == PPC::SELECT_VSRC) {
12126     // The incoming instruction knows the destination vreg to set, the
12127     // condition code register to branch on, the true/false values to
12128     // select between, and a branch opcode to use.
12129 
12130     //  thisMBB:
12131     //  ...
12132     //   TrueVal = ...
12133     //   cmpTY ccX, r1, r2
12134     //   bCC copy1MBB
12135     //   fallthrough --> copy0MBB
12136     MachineBasicBlock *thisMBB = BB;
12137     MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
12138     MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
12139     DebugLoc dl = MI.getDebugLoc();
12140     F->insert(It, copy0MBB);
12141     F->insert(It, sinkMBB);
12142 
12143     // Transfer the remainder of BB and its successor edges to sinkMBB.
12144     sinkMBB->splice(sinkMBB->begin(), BB,
12145                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
12146     sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
12147 
12148     // Next, add the true and fallthrough blocks as its successors.
12149     BB->addSuccessor(copy0MBB);
12150     BB->addSuccessor(sinkMBB);
12151 
12152     if (MI.getOpcode() == PPC::SELECT_I4 || MI.getOpcode() == PPC::SELECT_I8 ||
12153         MI.getOpcode() == PPC::SELECT_F4 || MI.getOpcode() == PPC::SELECT_F8 ||
12154         MI.getOpcode() == PPC::SELECT_F16 ||
12155         MI.getOpcode() == PPC::SELECT_SPE4 ||
12156         MI.getOpcode() == PPC::SELECT_SPE ||
12157         MI.getOpcode() == PPC::SELECT_QFRC ||
12158         MI.getOpcode() == PPC::SELECT_QSRC ||
12159         MI.getOpcode() == PPC::SELECT_QBRC ||
12160         MI.getOpcode() == PPC::SELECT_VRRC ||
12161         MI.getOpcode() == PPC::SELECT_VSFRC ||
12162         MI.getOpcode() == PPC::SELECT_VSSRC ||
12163         MI.getOpcode() == PPC::SELECT_VSRC) {
12164       BuildMI(BB, dl, TII->get(PPC::BC))
12165           .addReg(MI.getOperand(1).getReg())
12166           .addMBB(sinkMBB);
12167     } else {
12168       unsigned SelectPred = MI.getOperand(4).getImm();
12169       BuildMI(BB, dl, TII->get(PPC::BCC))
12170           .addImm(SelectPred)
12171           .addReg(MI.getOperand(1).getReg())
12172           .addMBB(sinkMBB);
12173     }
12174 
12175     //  copy0MBB:
12176     //   %FalseValue = ...
12177     //   # fallthrough to sinkMBB
12178     BB = copy0MBB;
12179 
12180     // Update machine-CFG edges
12181     BB->addSuccessor(sinkMBB);
12182 
12183     //  sinkMBB:
12184     //   %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
12185     //  ...
12186     BB = sinkMBB;
12187     BuildMI(*BB, BB->begin(), dl, TII->get(PPC::PHI), MI.getOperand(0).getReg())
12188         .addReg(MI.getOperand(3).getReg())
12189         .addMBB(copy0MBB)
12190         .addReg(MI.getOperand(2).getReg())
12191         .addMBB(thisMBB);
12192   } else if (MI.getOpcode() == PPC::ReadTB) {
12193     // To read the 64-bit time-base register on a 32-bit target, we read the
12194     // two halves. Should the counter have wrapped while it was being read, we
12195     // need to try again.
12196     // ...
12197     // readLoop:
12198     // mfspr Rx,TBU # load from TBU
12199     // mfspr Ry,TB  # load from TB
12200     // mfspr Rz,TBU # load from TBU
12201     // cmpw crX,Rx,Rz # check if 'old'='new'
12202     // bne readLoop   # branch if they're not equal
12203     // ...
12204 
12205     MachineBasicBlock *readMBB = F->CreateMachineBasicBlock(LLVM_BB);
12206     MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
12207     DebugLoc dl = MI.getDebugLoc();
12208     F->insert(It, readMBB);
12209     F->insert(It, sinkMBB);
12210 
12211     // Transfer the remainder of BB and its successor edges to sinkMBB.
12212     sinkMBB->splice(sinkMBB->begin(), BB,
12213                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
12214     sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
12215 
12216     BB->addSuccessor(readMBB);
12217     BB = readMBB;
12218 
12219     MachineRegisterInfo &RegInfo = F->getRegInfo();
12220     Register ReadAgainReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
12221     Register LoReg = MI.getOperand(0).getReg();
12222     Register HiReg = MI.getOperand(1).getReg();
12223 
12224     BuildMI(BB, dl, TII->get(PPC::MFSPR), HiReg).addImm(269);
12225     BuildMI(BB, dl, TII->get(PPC::MFSPR), LoReg).addImm(268);
12226     BuildMI(BB, dl, TII->get(PPC::MFSPR), ReadAgainReg).addImm(269);
12227 
12228     Register CmpReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass);
12229 
12230     BuildMI(BB, dl, TII->get(PPC::CMPW), CmpReg)
12231         .addReg(HiReg)
12232         .addReg(ReadAgainReg);
12233     BuildMI(BB, dl, TII->get(PPC::BCC))
12234         .addImm(PPC::PRED_NE)
12235         .addReg(CmpReg)
12236         .addMBB(readMBB);
12237 
12238     BB->addSuccessor(readMBB);
12239     BB->addSuccessor(sinkMBB);
12240   } else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I8)
12241     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4);
12242   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I16)
12243     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4);
12244   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I32)
12245     BB = EmitAtomicBinary(MI, BB, 4, PPC::ADD4);
12246   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I64)
12247     BB = EmitAtomicBinary(MI, BB, 8, PPC::ADD8);
12248 
12249   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I8)
12250     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::AND);
12251   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I16)
12252     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::AND);
12253   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I32)
12254     BB = EmitAtomicBinary(MI, BB, 4, PPC::AND);
12255   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I64)
12256     BB = EmitAtomicBinary(MI, BB, 8, PPC::AND8);
12257 
12258   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I8)
12259     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::OR);
12260   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I16)
12261     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::OR);
12262   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I32)
12263     BB = EmitAtomicBinary(MI, BB, 4, PPC::OR);
12264   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I64)
12265     BB = EmitAtomicBinary(MI, BB, 8, PPC::OR8);
12266 
12267   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I8)
12268     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::XOR);
12269   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I16)
12270     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::XOR);
12271   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I32)
12272     BB = EmitAtomicBinary(MI, BB, 4, PPC::XOR);
12273   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I64)
12274     BB = EmitAtomicBinary(MI, BB, 8, PPC::XOR8);
12275 
12276   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I8)
12277     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::NAND);
12278   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I16)
12279     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::NAND);
12280   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I32)
12281     BB = EmitAtomicBinary(MI, BB, 4, PPC::NAND);
12282   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I64)
12283     BB = EmitAtomicBinary(MI, BB, 8, PPC::NAND8);
12284 
12285   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I8)
12286     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF);
12287   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I16)
12288     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::SUBF);
12289   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I32)
12290     BB = EmitAtomicBinary(MI, BB, 4, PPC::SUBF);
12291   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I64)
12292     BB = EmitAtomicBinary(MI, BB, 8, PPC::SUBF8);
12293 
12294   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I8)
12295     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_GE);
12296   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I16)
12297     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_GE);
12298   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I32)
12299     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_GE);
12300   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I64)
12301     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_GE);
12302 
12303   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I8)
12304     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_LE);
12305   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I16)
12306     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_LE);
12307   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I32)
12308     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_LE);
12309   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I64)
12310     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_LE);
12311 
12312   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I8)
12313     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_GE);
12314   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I16)
12315     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_GE);
12316   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I32)
12317     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_GE);
12318   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I64)
12319     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_GE);
12320 
12321   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I8)
12322     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_LE);
12323   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I16)
12324     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_LE);
12325   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I32)
12326     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_LE);
12327   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I64)
12328     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_LE);
12329 
12330   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I8)
12331     BB = EmitPartwordAtomicBinary(MI, BB, true, 0);
12332   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I16)
12333     BB = EmitPartwordAtomicBinary(MI, BB, false, 0);
12334   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I32)
12335     BB = EmitAtomicBinary(MI, BB, 4, 0);
12336   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I64)
12337     BB = EmitAtomicBinary(MI, BB, 8, 0);
12338   else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 ||
12339            MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64 ||
12340            (Subtarget.hasPartwordAtomics() &&
12341             MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8) ||
12342            (Subtarget.hasPartwordAtomics() &&
12343             MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16)) {
12344     bool is64bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64;
12345 
12346     auto LoadMnemonic = PPC::LDARX;
12347     auto StoreMnemonic = PPC::STDCX;
12348     switch (MI.getOpcode()) {
12349     default:
12350       llvm_unreachable("Compare and swap of unknown size");
12351     case PPC::ATOMIC_CMP_SWAP_I8:
12352       LoadMnemonic = PPC::LBARX;
12353       StoreMnemonic = PPC::STBCX;
12354       assert(Subtarget.hasPartwordAtomics() && "No support partword atomics.");
12355       break;
12356     case PPC::ATOMIC_CMP_SWAP_I16:
12357       LoadMnemonic = PPC::LHARX;
12358       StoreMnemonic = PPC::STHCX;
12359       assert(Subtarget.hasPartwordAtomics() && "No support partword atomics.");
12360       break;
12361     case PPC::ATOMIC_CMP_SWAP_I32:
12362       LoadMnemonic = PPC::LWARX;
12363       StoreMnemonic = PPC::STWCX;
12364       break;
12365     case PPC::ATOMIC_CMP_SWAP_I64:
12366       LoadMnemonic = PPC::LDARX;
12367       StoreMnemonic = PPC::STDCX;
12368       break;
12369     }
12370     Register dest = MI.getOperand(0).getReg();
12371     Register ptrA = MI.getOperand(1).getReg();
12372     Register ptrB = MI.getOperand(2).getReg();
12373     Register oldval = MI.getOperand(3).getReg();
12374     Register newval = MI.getOperand(4).getReg();
12375     DebugLoc dl = MI.getDebugLoc();
12376 
12377     MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB);
12378     MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB);
12379     MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB);
12380     MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
12381     F->insert(It, loop1MBB);
12382     F->insert(It, loop2MBB);
12383     F->insert(It, midMBB);
12384     F->insert(It, exitMBB);
12385     exitMBB->splice(exitMBB->begin(), BB,
12386                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
12387     exitMBB->transferSuccessorsAndUpdatePHIs(BB);
12388 
12389     //  thisMBB:
12390     //   ...
12391     //   fallthrough --> loopMBB
12392     BB->addSuccessor(loop1MBB);
12393 
12394     // loop1MBB:
12395     //   l[bhwd]arx dest, ptr
12396     //   cmp[wd] dest, oldval
12397     //   bne- midMBB
12398     // loop2MBB:
12399     //   st[bhwd]cx. newval, ptr
12400     //   bne- loopMBB
12401     //   b exitBB
12402     // midMBB:
12403     //   st[bhwd]cx. dest, ptr
12404     // exitBB:
12405     BB = loop1MBB;
12406     BuildMI(BB, dl, TII->get(LoadMnemonic), dest).addReg(ptrA).addReg(ptrB);
12407     BuildMI(BB, dl, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0)
12408         .addReg(oldval)
12409         .addReg(dest);
12410     BuildMI(BB, dl, TII->get(PPC::BCC))
12411         .addImm(PPC::PRED_NE)
12412         .addReg(PPC::CR0)
12413         .addMBB(midMBB);
12414     BB->addSuccessor(loop2MBB);
12415     BB->addSuccessor(midMBB);
12416 
12417     BB = loop2MBB;
12418     BuildMI(BB, dl, TII->get(StoreMnemonic))
12419         .addReg(newval)
12420         .addReg(ptrA)
12421         .addReg(ptrB);
12422     BuildMI(BB, dl, TII->get(PPC::BCC))
12423         .addImm(PPC::PRED_NE)
12424         .addReg(PPC::CR0)
12425         .addMBB(loop1MBB);
12426     BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB);
12427     BB->addSuccessor(loop1MBB);
12428     BB->addSuccessor(exitMBB);
12429 
12430     BB = midMBB;
12431     BuildMI(BB, dl, TII->get(StoreMnemonic))
12432         .addReg(dest)
12433         .addReg(ptrA)
12434         .addReg(ptrB);
12435     BB->addSuccessor(exitMBB);
12436 
12437     //  exitMBB:
12438     //   ...
12439     BB = exitMBB;
12440   } else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 ||
12441              MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) {
12442     // We must use 64-bit registers for addresses when targeting 64-bit,
12443     // since we're actually doing arithmetic on them.  Other registers
12444     // can be 32-bit.
12445     bool is64bit = Subtarget.isPPC64();
12446     bool isLittleEndian = Subtarget.isLittleEndian();
12447     bool is8bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8;
12448 
12449     Register dest = MI.getOperand(0).getReg();
12450     Register ptrA = MI.getOperand(1).getReg();
12451     Register ptrB = MI.getOperand(2).getReg();
12452     Register oldval = MI.getOperand(3).getReg();
12453     Register newval = MI.getOperand(4).getReg();
12454     DebugLoc dl = MI.getDebugLoc();
12455 
12456     MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB);
12457     MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB);
12458     MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB);
12459     MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
12460     F->insert(It, loop1MBB);
12461     F->insert(It, loop2MBB);
12462     F->insert(It, midMBB);
12463     F->insert(It, exitMBB);
12464     exitMBB->splice(exitMBB->begin(), BB,
12465                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
12466     exitMBB->transferSuccessorsAndUpdatePHIs(BB);
12467 
12468     MachineRegisterInfo &RegInfo = F->getRegInfo();
12469     const TargetRegisterClass *RC =
12470         is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
12471     const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
12472 
12473     Register PtrReg = RegInfo.createVirtualRegister(RC);
12474     Register Shift1Reg = RegInfo.createVirtualRegister(GPRC);
12475     Register ShiftReg =
12476         isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC);
12477     Register NewVal2Reg = RegInfo.createVirtualRegister(GPRC);
12478     Register NewVal3Reg = RegInfo.createVirtualRegister(GPRC);
12479     Register OldVal2Reg = RegInfo.createVirtualRegister(GPRC);
12480     Register OldVal3Reg = RegInfo.createVirtualRegister(GPRC);
12481     Register MaskReg = RegInfo.createVirtualRegister(GPRC);
12482     Register Mask2Reg = RegInfo.createVirtualRegister(GPRC);
12483     Register Mask3Reg = RegInfo.createVirtualRegister(GPRC);
12484     Register Tmp2Reg = RegInfo.createVirtualRegister(GPRC);
12485     Register Tmp4Reg = RegInfo.createVirtualRegister(GPRC);
12486     Register TmpDestReg = RegInfo.createVirtualRegister(GPRC);
12487     Register Ptr1Reg;
12488     Register TmpReg = RegInfo.createVirtualRegister(GPRC);
12489     Register ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
12490     //  thisMBB:
12491     //   ...
12492     //   fallthrough --> loopMBB
12493     BB->addSuccessor(loop1MBB);
12494 
12495     // The 4-byte load must be aligned, while a char or short may be
12496     // anywhere in the word.  Hence all this nasty bookkeeping code.
12497     //   add ptr1, ptrA, ptrB [copy if ptrA==0]
12498     //   rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27]
12499     //   xori shift, shift1, 24 [16]
12500     //   rlwinm ptr, ptr1, 0, 0, 29
12501     //   slw newval2, newval, shift
12502     //   slw oldval2, oldval,shift
12503     //   li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535]
12504     //   slw mask, mask2, shift
12505     //   and newval3, newval2, mask
12506     //   and oldval3, oldval2, mask
12507     // loop1MBB:
12508     //   lwarx tmpDest, ptr
12509     //   and tmp, tmpDest, mask
12510     //   cmpw tmp, oldval3
12511     //   bne- midMBB
12512     // loop2MBB:
12513     //   andc tmp2, tmpDest, mask
12514     //   or tmp4, tmp2, newval3
12515     //   stwcx. tmp4, ptr
12516     //   bne- loop1MBB
12517     //   b exitBB
12518     // midMBB:
12519     //   stwcx. tmpDest, ptr
12520     // exitBB:
12521     //   srw dest, tmpDest, shift
12522     if (ptrA != ZeroReg) {
12523       Ptr1Reg = RegInfo.createVirtualRegister(RC);
12524       BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
12525           .addReg(ptrA)
12526           .addReg(ptrB);
12527     } else {
12528       Ptr1Reg = ptrB;
12529     }
12530 
12531     // We need use 32-bit subregister to avoid mismatch register class in 64-bit
12532     // mode.
12533     BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg)
12534         .addReg(Ptr1Reg, 0, is64bit ? PPC::sub_32 : 0)
12535         .addImm(3)
12536         .addImm(27)
12537         .addImm(is8bit ? 28 : 27);
12538     if (!isLittleEndian)
12539       BuildMI(BB, dl, TII->get(PPC::XORI), ShiftReg)
12540           .addReg(Shift1Reg)
12541           .addImm(is8bit ? 24 : 16);
12542     if (is64bit)
12543       BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg)
12544           .addReg(Ptr1Reg)
12545           .addImm(0)
12546           .addImm(61);
12547     else
12548       BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg)
12549           .addReg(Ptr1Reg)
12550           .addImm(0)
12551           .addImm(0)
12552           .addImm(29);
12553     BuildMI(BB, dl, TII->get(PPC::SLW), NewVal2Reg)
12554         .addReg(newval)
12555         .addReg(ShiftReg);
12556     BuildMI(BB, dl, TII->get(PPC::SLW), OldVal2Reg)
12557         .addReg(oldval)
12558         .addReg(ShiftReg);
12559     if (is8bit)
12560       BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255);
12561     else {
12562       BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0);
12563       BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg)
12564           .addReg(Mask3Reg)
12565           .addImm(65535);
12566     }
12567     BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg)
12568         .addReg(Mask2Reg)
12569         .addReg(ShiftReg);
12570     BuildMI(BB, dl, TII->get(PPC::AND), NewVal3Reg)
12571         .addReg(NewVal2Reg)
12572         .addReg(MaskReg);
12573     BuildMI(BB, dl, TII->get(PPC::AND), OldVal3Reg)
12574         .addReg(OldVal2Reg)
12575         .addReg(MaskReg);
12576 
12577     BB = loop1MBB;
12578     BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg)
12579         .addReg(ZeroReg)
12580         .addReg(PtrReg);
12581     BuildMI(BB, dl, TII->get(PPC::AND), TmpReg)
12582         .addReg(TmpDestReg)
12583         .addReg(MaskReg);
12584     BuildMI(BB, dl, TII->get(PPC::CMPW), PPC::CR0)
12585         .addReg(TmpReg)
12586         .addReg(OldVal3Reg);
12587     BuildMI(BB, dl, TII->get(PPC::BCC))
12588         .addImm(PPC::PRED_NE)
12589         .addReg(PPC::CR0)
12590         .addMBB(midMBB);
12591     BB->addSuccessor(loop2MBB);
12592     BB->addSuccessor(midMBB);
12593 
12594     BB = loop2MBB;
12595     BuildMI(BB, dl, TII->get(PPC::ANDC), Tmp2Reg)
12596         .addReg(TmpDestReg)
12597         .addReg(MaskReg);
12598     BuildMI(BB, dl, TII->get(PPC::OR), Tmp4Reg)
12599         .addReg(Tmp2Reg)
12600         .addReg(NewVal3Reg);
12601     BuildMI(BB, dl, TII->get(PPC::STWCX))
12602         .addReg(Tmp4Reg)
12603         .addReg(ZeroReg)
12604         .addReg(PtrReg);
12605     BuildMI(BB, dl, TII->get(PPC::BCC))
12606         .addImm(PPC::PRED_NE)
12607         .addReg(PPC::CR0)
12608         .addMBB(loop1MBB);
12609     BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB);
12610     BB->addSuccessor(loop1MBB);
12611     BB->addSuccessor(exitMBB);
12612 
12613     BB = midMBB;
12614     BuildMI(BB, dl, TII->get(PPC::STWCX))
12615         .addReg(TmpDestReg)
12616         .addReg(ZeroReg)
12617         .addReg(PtrReg);
12618     BB->addSuccessor(exitMBB);
12619 
12620     //  exitMBB:
12621     //   ...
12622     BB = exitMBB;
12623     BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest)
12624         .addReg(TmpReg)
12625         .addReg(ShiftReg);
12626   } else if (MI.getOpcode() == PPC::FADDrtz) {
12627     // This pseudo performs an FADD with rounding mode temporarily forced
12628     // to round-to-zero.  We emit this via custom inserter since the FPSCR
12629     // is not modeled at the SelectionDAG level.
12630     Register Dest = MI.getOperand(0).getReg();
12631     Register Src1 = MI.getOperand(1).getReg();
12632     Register Src2 = MI.getOperand(2).getReg();
12633     DebugLoc dl = MI.getDebugLoc();
12634 
12635     MachineRegisterInfo &RegInfo = F->getRegInfo();
12636     Register MFFSReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass);
12637 
12638     // Save FPSCR value.
12639     BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), MFFSReg);
12640 
12641     // Set rounding mode to round-to-zero.
12642     BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB1)).addImm(31);
12643     BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB0)).addImm(30);
12644 
12645     // Perform addition.
12646     BuildMI(*BB, MI, dl, TII->get(PPC::FADD), Dest).addReg(Src1).addReg(Src2);
12647 
12648     // Restore FPSCR value.
12649     BuildMI(*BB, MI, dl, TII->get(PPC::MTFSFb)).addImm(1).addReg(MFFSReg);
12650   } else if (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT ||
12651              MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT ||
12652              MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8 ||
12653              MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT8) {
12654     unsigned Opcode = (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8 ||
12655                        MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT8)
12656                           ? PPC::ANDI8_rec
12657                           : PPC::ANDI_rec;
12658     bool IsEQ = (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT ||
12659                  MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8);
12660 
12661     MachineRegisterInfo &RegInfo = F->getRegInfo();
12662     Register Dest = RegInfo.createVirtualRegister(
12663         Opcode == PPC::ANDI_rec ? &PPC::GPRCRegClass : &PPC::G8RCRegClass);
12664 
12665     DebugLoc Dl = MI.getDebugLoc();
12666     BuildMI(*BB, MI, Dl, TII->get(Opcode), Dest)
12667         .addReg(MI.getOperand(1).getReg())
12668         .addImm(1);
12669     BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY),
12670             MI.getOperand(0).getReg())
12671         .addReg(IsEQ ? PPC::CR0EQ : PPC::CR0GT);
12672   } else if (MI.getOpcode() == PPC::TCHECK_RET) {
12673     DebugLoc Dl = MI.getDebugLoc();
12674     MachineRegisterInfo &RegInfo = F->getRegInfo();
12675     Register CRReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass);
12676     BuildMI(*BB, MI, Dl, TII->get(PPC::TCHECK), CRReg);
12677     BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY),
12678             MI.getOperand(0).getReg())
12679         .addReg(CRReg);
12680   } else if (MI.getOpcode() == PPC::TBEGIN_RET) {
12681     DebugLoc Dl = MI.getDebugLoc();
12682     unsigned Imm = MI.getOperand(1).getImm();
12683     BuildMI(*BB, MI, Dl, TII->get(PPC::TBEGIN)).addImm(Imm);
12684     BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY),
12685             MI.getOperand(0).getReg())
12686         .addReg(PPC::CR0EQ);
12687   } else if (MI.getOpcode() == PPC::SETRNDi) {
12688     DebugLoc dl = MI.getDebugLoc();
12689     Register OldFPSCRReg = MI.getOperand(0).getReg();
12690 
12691     // Save FPSCR value.
12692     BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), OldFPSCRReg);
12693 
12694     // The floating point rounding mode is in the bits 62:63 of FPCSR, and has
12695     // the following settings:
12696     //   00 Round to nearest
12697     //   01 Round to 0
12698     //   10 Round to +inf
12699     //   11 Round to -inf
12700 
12701     // When the operand is immediate, using the two least significant bits of
12702     // the immediate to set the bits 62:63 of FPSCR.
12703     unsigned Mode = MI.getOperand(1).getImm();
12704     BuildMI(*BB, MI, dl, TII->get((Mode & 1) ? PPC::MTFSB1 : PPC::MTFSB0))
12705       .addImm(31);
12706 
12707     BuildMI(*BB, MI, dl, TII->get((Mode & 2) ? PPC::MTFSB1 : PPC::MTFSB0))
12708       .addImm(30);
12709   } else if (MI.getOpcode() == PPC::SETRND) {
12710     DebugLoc dl = MI.getDebugLoc();
12711 
12712     // Copy register from F8RCRegClass::SrcReg to G8RCRegClass::DestReg
12713     // or copy register from G8RCRegClass::SrcReg to F8RCRegClass::DestReg.
12714     // If the target doesn't have DirectMove, we should use stack to do the
12715     // conversion, because the target doesn't have the instructions like mtvsrd
12716     // or mfvsrd to do this conversion directly.
12717     auto copyRegFromG8RCOrF8RC = [&] (unsigned DestReg, unsigned SrcReg) {
12718       if (Subtarget.hasDirectMove()) {
12719         BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), DestReg)
12720           .addReg(SrcReg);
12721       } else {
12722         // Use stack to do the register copy.
12723         unsigned StoreOp = PPC::STD, LoadOp = PPC::LFD;
12724         MachineRegisterInfo &RegInfo = F->getRegInfo();
12725         const TargetRegisterClass *RC = RegInfo.getRegClass(SrcReg);
12726         if (RC == &PPC::F8RCRegClass) {
12727           // Copy register from F8RCRegClass to G8RCRegclass.
12728           assert((RegInfo.getRegClass(DestReg) == &PPC::G8RCRegClass) &&
12729                  "Unsupported RegClass.");
12730 
12731           StoreOp = PPC::STFD;
12732           LoadOp = PPC::LD;
12733         } else {
12734           // Copy register from G8RCRegClass to F8RCRegclass.
12735           assert((RegInfo.getRegClass(SrcReg) == &PPC::G8RCRegClass) &&
12736                  (RegInfo.getRegClass(DestReg) == &PPC::F8RCRegClass) &&
12737                  "Unsupported RegClass.");
12738         }
12739 
12740         MachineFrameInfo &MFI = F->getFrameInfo();
12741         int FrameIdx = MFI.CreateStackObject(8, Align(8), false);
12742 
12743         MachineMemOperand *MMOStore = F->getMachineMemOperand(
12744             MachinePointerInfo::getFixedStack(*F, FrameIdx, 0),
12745             MachineMemOperand::MOStore, MFI.getObjectSize(FrameIdx),
12746             MFI.getObjectAlign(FrameIdx));
12747 
12748         // Store the SrcReg into the stack.
12749         BuildMI(*BB, MI, dl, TII->get(StoreOp))
12750           .addReg(SrcReg)
12751           .addImm(0)
12752           .addFrameIndex(FrameIdx)
12753           .addMemOperand(MMOStore);
12754 
12755         MachineMemOperand *MMOLoad = F->getMachineMemOperand(
12756             MachinePointerInfo::getFixedStack(*F, FrameIdx, 0),
12757             MachineMemOperand::MOLoad, MFI.getObjectSize(FrameIdx),
12758             MFI.getObjectAlign(FrameIdx));
12759 
12760         // Load from the stack where SrcReg is stored, and save to DestReg,
12761         // so we have done the RegClass conversion from RegClass::SrcReg to
12762         // RegClass::DestReg.
12763         BuildMI(*BB, MI, dl, TII->get(LoadOp), DestReg)
12764           .addImm(0)
12765           .addFrameIndex(FrameIdx)
12766           .addMemOperand(MMOLoad);
12767       }
12768     };
12769 
12770     Register OldFPSCRReg = MI.getOperand(0).getReg();
12771 
12772     // Save FPSCR value.
12773     BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), OldFPSCRReg);
12774 
12775     // When the operand is gprc register, use two least significant bits of the
12776     // register and mtfsf instruction to set the bits 62:63 of FPSCR.
12777     //
12778     // copy OldFPSCRTmpReg, OldFPSCRReg
12779     // (INSERT_SUBREG ExtSrcReg, (IMPLICIT_DEF ImDefReg), SrcOp, 1)
12780     // rldimi NewFPSCRTmpReg, ExtSrcReg, OldFPSCRReg, 0, 62
12781     // copy NewFPSCRReg, NewFPSCRTmpReg
12782     // mtfsf 255, NewFPSCRReg
12783     MachineOperand SrcOp = MI.getOperand(1);
12784     MachineRegisterInfo &RegInfo = F->getRegInfo();
12785     Register OldFPSCRTmpReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
12786 
12787     copyRegFromG8RCOrF8RC(OldFPSCRTmpReg, OldFPSCRReg);
12788 
12789     Register ImDefReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
12790     Register ExtSrcReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
12791 
12792     // The first operand of INSERT_SUBREG should be a register which has
12793     // subregisters, we only care about its RegClass, so we should use an
12794     // IMPLICIT_DEF register.
12795     BuildMI(*BB, MI, dl, TII->get(TargetOpcode::IMPLICIT_DEF), ImDefReg);
12796     BuildMI(*BB, MI, dl, TII->get(PPC::INSERT_SUBREG), ExtSrcReg)
12797       .addReg(ImDefReg)
12798       .add(SrcOp)
12799       .addImm(1);
12800 
12801     Register NewFPSCRTmpReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
12802     BuildMI(*BB, MI, dl, TII->get(PPC::RLDIMI), NewFPSCRTmpReg)
12803       .addReg(OldFPSCRTmpReg)
12804       .addReg(ExtSrcReg)
12805       .addImm(0)
12806       .addImm(62);
12807 
12808     Register NewFPSCRReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass);
12809     copyRegFromG8RCOrF8RC(NewFPSCRReg, NewFPSCRTmpReg);
12810 
12811     // The mask 255 means that put the 32:63 bits of NewFPSCRReg to the 32:63
12812     // bits of FPSCR.
12813     BuildMI(*BB, MI, dl, TII->get(PPC::MTFSF))
12814       .addImm(255)
12815       .addReg(NewFPSCRReg)
12816       .addImm(0)
12817       .addImm(0);
12818   } else if (MI.getOpcode() == PPC::PROBED_ALLOCA_32 ||
12819              MI.getOpcode() == PPC::PROBED_ALLOCA_64) {
12820     return emitProbedAlloca(MI, BB);
12821   } else {
12822     llvm_unreachable("Unexpected instr type to insert");
12823   }
12824 
12825   MI.eraseFromParent(); // The pseudo instruction is gone now.
12826   return BB;
12827 }
12828 
12829 //===----------------------------------------------------------------------===//
12830 // Target Optimization Hooks
12831 //===----------------------------------------------------------------------===//
12832 
getEstimateRefinementSteps(EVT VT,const PPCSubtarget & Subtarget)12833 static int getEstimateRefinementSteps(EVT VT, const PPCSubtarget &Subtarget) {
12834   // For the estimates, convergence is quadratic, so we essentially double the
12835   // number of digits correct after every iteration. For both FRE and FRSQRTE,
12836   // the minimum architected relative accuracy is 2^-5. When hasRecipPrec(),
12837   // this is 2^-14. IEEE float has 23 digits and double has 52 digits.
12838   int RefinementSteps = Subtarget.hasRecipPrec() ? 1 : 3;
12839   if (VT.getScalarType() == MVT::f64)
12840     RefinementSteps++;
12841   return RefinementSteps;
12842 }
12843 
getSqrtEstimate(SDValue Operand,SelectionDAG & DAG,int Enabled,int & RefinementSteps,bool & UseOneConstNR,bool Reciprocal) const12844 SDValue PPCTargetLowering::getSqrtEstimate(SDValue Operand, SelectionDAG &DAG,
12845                                            int Enabled, int &RefinementSteps,
12846                                            bool &UseOneConstNR,
12847                                            bool Reciprocal) const {
12848   EVT VT = Operand.getValueType();
12849   if ((VT == MVT::f32 && Subtarget.hasFRSQRTES()) ||
12850       (VT == MVT::f64 && Subtarget.hasFRSQRTE()) ||
12851       (VT == MVT::v4f32 && Subtarget.hasAltivec()) ||
12852       (VT == MVT::v2f64 && Subtarget.hasVSX()) ||
12853       (VT == MVT::v4f32 && Subtarget.hasQPX()) ||
12854       (VT == MVT::v4f64 && Subtarget.hasQPX())) {
12855     if (RefinementSteps == ReciprocalEstimate::Unspecified)
12856       RefinementSteps = getEstimateRefinementSteps(VT, Subtarget);
12857 
12858     // The Newton-Raphson computation with a single constant does not provide
12859     // enough accuracy on some CPUs.
12860     UseOneConstNR = !Subtarget.needsTwoConstNR();
12861     return DAG.getNode(PPCISD::FRSQRTE, SDLoc(Operand), VT, Operand);
12862   }
12863   return SDValue();
12864 }
12865 
getRecipEstimate(SDValue Operand,SelectionDAG & DAG,int Enabled,int & RefinementSteps) const12866 SDValue PPCTargetLowering::getRecipEstimate(SDValue Operand, SelectionDAG &DAG,
12867                                             int Enabled,
12868                                             int &RefinementSteps) const {
12869   EVT VT = Operand.getValueType();
12870   if ((VT == MVT::f32 && Subtarget.hasFRES()) ||
12871       (VT == MVT::f64 && Subtarget.hasFRE()) ||
12872       (VT == MVT::v4f32 && Subtarget.hasAltivec()) ||
12873       (VT == MVT::v2f64 && Subtarget.hasVSX()) ||
12874       (VT == MVT::v4f32 && Subtarget.hasQPX()) ||
12875       (VT == MVT::v4f64 && Subtarget.hasQPX())) {
12876     if (RefinementSteps == ReciprocalEstimate::Unspecified)
12877       RefinementSteps = getEstimateRefinementSteps(VT, Subtarget);
12878     return DAG.getNode(PPCISD::FRE, SDLoc(Operand), VT, Operand);
12879   }
12880   return SDValue();
12881 }
12882 
combineRepeatedFPDivisors() const12883 unsigned PPCTargetLowering::combineRepeatedFPDivisors() const {
12884   // Note: This functionality is used only when unsafe-fp-math is enabled, and
12885   // on cores with reciprocal estimates (which are used when unsafe-fp-math is
12886   // enabled for division), this functionality is redundant with the default
12887   // combiner logic (once the division -> reciprocal/multiply transformation
12888   // has taken place). As a result, this matters more for older cores than for
12889   // newer ones.
12890 
12891   // Combine multiple FDIVs with the same divisor into multiple FMULs by the
12892   // reciprocal if there are two or more FDIVs (for embedded cores with only
12893   // one FP pipeline) for three or more FDIVs (for generic OOO cores).
12894   switch (Subtarget.getCPUDirective()) {
12895   default:
12896     return 3;
12897   case PPC::DIR_440:
12898   case PPC::DIR_A2:
12899   case PPC::DIR_E500:
12900   case PPC::DIR_E500mc:
12901   case PPC::DIR_E5500:
12902     return 2;
12903   }
12904 }
12905 
12906 // isConsecutiveLSLoc needs to work even if all adds have not yet been
12907 // collapsed, and so we need to look through chains of them.
getBaseWithConstantOffset(SDValue Loc,SDValue & Base,int64_t & Offset,SelectionDAG & DAG)12908 static void getBaseWithConstantOffset(SDValue Loc, SDValue &Base,
12909                                      int64_t& Offset, SelectionDAG &DAG) {
12910   if (DAG.isBaseWithConstantOffset(Loc)) {
12911     Base = Loc.getOperand(0);
12912     Offset += cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue();
12913 
12914     // The base might itself be a base plus an offset, and if so, accumulate
12915     // that as well.
12916     getBaseWithConstantOffset(Loc.getOperand(0), Base, Offset, DAG);
12917   }
12918 }
12919 
isConsecutiveLSLoc(SDValue Loc,EVT VT,LSBaseSDNode * Base,unsigned Bytes,int Dist,SelectionDAG & DAG)12920 static bool isConsecutiveLSLoc(SDValue Loc, EVT VT, LSBaseSDNode *Base,
12921                             unsigned Bytes, int Dist,
12922                             SelectionDAG &DAG) {
12923   if (VT.getSizeInBits() / 8 != Bytes)
12924     return false;
12925 
12926   SDValue BaseLoc = Base->getBasePtr();
12927   if (Loc.getOpcode() == ISD::FrameIndex) {
12928     if (BaseLoc.getOpcode() != ISD::FrameIndex)
12929       return false;
12930     const MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
12931     int FI  = cast<FrameIndexSDNode>(Loc)->getIndex();
12932     int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex();
12933     int FS  = MFI.getObjectSize(FI);
12934     int BFS = MFI.getObjectSize(BFI);
12935     if (FS != BFS || FS != (int)Bytes) return false;
12936     return MFI.getObjectOffset(FI) == (MFI.getObjectOffset(BFI) + Dist*Bytes);
12937   }
12938 
12939   SDValue Base1 = Loc, Base2 = BaseLoc;
12940   int64_t Offset1 = 0, Offset2 = 0;
12941   getBaseWithConstantOffset(Loc, Base1, Offset1, DAG);
12942   getBaseWithConstantOffset(BaseLoc, Base2, Offset2, DAG);
12943   if (Base1 == Base2 && Offset1 == (Offset2 + Dist * Bytes))
12944     return true;
12945 
12946   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
12947   const GlobalValue *GV1 = nullptr;
12948   const GlobalValue *GV2 = nullptr;
12949   Offset1 = 0;
12950   Offset2 = 0;
12951   bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1);
12952   bool isGA2 = TLI.isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2);
12953   if (isGA1 && isGA2 && GV1 == GV2)
12954     return Offset1 == (Offset2 + Dist*Bytes);
12955   return false;
12956 }
12957 
12958 // Like SelectionDAG::isConsecutiveLoad, but also works for stores, and does
12959 // not enforce equality of the chain operands.
isConsecutiveLS(SDNode * N,LSBaseSDNode * Base,unsigned Bytes,int Dist,SelectionDAG & DAG)12960 static bool isConsecutiveLS(SDNode *N, LSBaseSDNode *Base,
12961                             unsigned Bytes, int Dist,
12962                             SelectionDAG &DAG) {
12963   if (LSBaseSDNode *LS = dyn_cast<LSBaseSDNode>(N)) {
12964     EVT VT = LS->getMemoryVT();
12965     SDValue Loc = LS->getBasePtr();
12966     return isConsecutiveLSLoc(Loc, VT, Base, Bytes, Dist, DAG);
12967   }
12968 
12969   if (N->getOpcode() == ISD::INTRINSIC_W_CHAIN) {
12970     EVT VT;
12971     switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
12972     default: return false;
12973     case Intrinsic::ppc_qpx_qvlfd:
12974     case Intrinsic::ppc_qpx_qvlfda:
12975       VT = MVT::v4f64;
12976       break;
12977     case Intrinsic::ppc_qpx_qvlfs:
12978     case Intrinsic::ppc_qpx_qvlfsa:
12979       VT = MVT::v4f32;
12980       break;
12981     case Intrinsic::ppc_qpx_qvlfcd:
12982     case Intrinsic::ppc_qpx_qvlfcda:
12983       VT = MVT::v2f64;
12984       break;
12985     case Intrinsic::ppc_qpx_qvlfcs:
12986     case Intrinsic::ppc_qpx_qvlfcsa:
12987       VT = MVT::v2f32;
12988       break;
12989     case Intrinsic::ppc_qpx_qvlfiwa:
12990     case Intrinsic::ppc_qpx_qvlfiwz:
12991     case Intrinsic::ppc_altivec_lvx:
12992     case Intrinsic::ppc_altivec_lvxl:
12993     case Intrinsic::ppc_vsx_lxvw4x:
12994     case Intrinsic::ppc_vsx_lxvw4x_be:
12995       VT = MVT::v4i32;
12996       break;
12997     case Intrinsic::ppc_vsx_lxvd2x:
12998     case Intrinsic::ppc_vsx_lxvd2x_be:
12999       VT = MVT::v2f64;
13000       break;
13001     case Intrinsic::ppc_altivec_lvebx:
13002       VT = MVT::i8;
13003       break;
13004     case Intrinsic::ppc_altivec_lvehx:
13005       VT = MVT::i16;
13006       break;
13007     case Intrinsic::ppc_altivec_lvewx:
13008       VT = MVT::i32;
13009       break;
13010     }
13011 
13012     return isConsecutiveLSLoc(N->getOperand(2), VT, Base, Bytes, Dist, DAG);
13013   }
13014 
13015   if (N->getOpcode() == ISD::INTRINSIC_VOID) {
13016     EVT VT;
13017     switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
13018     default: return false;
13019     case Intrinsic::ppc_qpx_qvstfd:
13020     case Intrinsic::ppc_qpx_qvstfda:
13021       VT = MVT::v4f64;
13022       break;
13023     case Intrinsic::ppc_qpx_qvstfs:
13024     case Intrinsic::ppc_qpx_qvstfsa:
13025       VT = MVT::v4f32;
13026       break;
13027     case Intrinsic::ppc_qpx_qvstfcd:
13028     case Intrinsic::ppc_qpx_qvstfcda:
13029       VT = MVT::v2f64;
13030       break;
13031     case Intrinsic::ppc_qpx_qvstfcs:
13032     case Intrinsic::ppc_qpx_qvstfcsa:
13033       VT = MVT::v2f32;
13034       break;
13035     case Intrinsic::ppc_qpx_qvstfiw:
13036     case Intrinsic::ppc_qpx_qvstfiwa:
13037     case Intrinsic::ppc_altivec_stvx:
13038     case Intrinsic::ppc_altivec_stvxl:
13039     case Intrinsic::ppc_vsx_stxvw4x:
13040       VT = MVT::v4i32;
13041       break;
13042     case Intrinsic::ppc_vsx_stxvd2x:
13043       VT = MVT::v2f64;
13044       break;
13045     case Intrinsic::ppc_vsx_stxvw4x_be:
13046       VT = MVT::v4i32;
13047       break;
13048     case Intrinsic::ppc_vsx_stxvd2x_be:
13049       VT = MVT::v2f64;
13050       break;
13051     case Intrinsic::ppc_altivec_stvebx:
13052       VT = MVT::i8;
13053       break;
13054     case Intrinsic::ppc_altivec_stvehx:
13055       VT = MVT::i16;
13056       break;
13057     case Intrinsic::ppc_altivec_stvewx:
13058       VT = MVT::i32;
13059       break;
13060     }
13061 
13062     return isConsecutiveLSLoc(N->getOperand(3), VT, Base, Bytes, Dist, DAG);
13063   }
13064 
13065   return false;
13066 }
13067 
13068 // Return true is there is a nearyby consecutive load to the one provided
13069 // (regardless of alignment). We search up and down the chain, looking though
13070 // token factors and other loads (but nothing else). As a result, a true result
13071 // indicates that it is safe to create a new consecutive load adjacent to the
13072 // load provided.
findConsecutiveLoad(LoadSDNode * LD,SelectionDAG & DAG)13073 static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG) {
13074   SDValue Chain = LD->getChain();
13075   EVT VT = LD->getMemoryVT();
13076 
13077   SmallSet<SDNode *, 16> LoadRoots;
13078   SmallVector<SDNode *, 8> Queue(1, Chain.getNode());
13079   SmallSet<SDNode *, 16> Visited;
13080 
13081   // First, search up the chain, branching to follow all token-factor operands.
13082   // If we find a consecutive load, then we're done, otherwise, record all
13083   // nodes just above the top-level loads and token factors.
13084   while (!Queue.empty()) {
13085     SDNode *ChainNext = Queue.pop_back_val();
13086     if (!Visited.insert(ChainNext).second)
13087       continue;
13088 
13089     if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(ChainNext)) {
13090       if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG))
13091         return true;
13092 
13093       if (!Visited.count(ChainLD->getChain().getNode()))
13094         Queue.push_back(ChainLD->getChain().getNode());
13095     } else if (ChainNext->getOpcode() == ISD::TokenFactor) {
13096       for (const SDUse &O : ChainNext->ops())
13097         if (!Visited.count(O.getNode()))
13098           Queue.push_back(O.getNode());
13099     } else
13100       LoadRoots.insert(ChainNext);
13101   }
13102 
13103   // Second, search down the chain, starting from the top-level nodes recorded
13104   // in the first phase. These top-level nodes are the nodes just above all
13105   // loads and token factors. Starting with their uses, recursively look though
13106   // all loads (just the chain uses) and token factors to find a consecutive
13107   // load.
13108   Visited.clear();
13109   Queue.clear();
13110 
13111   for (SmallSet<SDNode *, 16>::iterator I = LoadRoots.begin(),
13112        IE = LoadRoots.end(); I != IE; ++I) {
13113     Queue.push_back(*I);
13114 
13115     while (!Queue.empty()) {
13116       SDNode *LoadRoot = Queue.pop_back_val();
13117       if (!Visited.insert(LoadRoot).second)
13118         continue;
13119 
13120       if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(LoadRoot))
13121         if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG))
13122           return true;
13123 
13124       for (SDNode::use_iterator UI = LoadRoot->use_begin(),
13125            UE = LoadRoot->use_end(); UI != UE; ++UI)
13126         if (((isa<MemSDNode>(*UI) &&
13127             cast<MemSDNode>(*UI)->getChain().getNode() == LoadRoot) ||
13128             UI->getOpcode() == ISD::TokenFactor) && !Visited.count(*UI))
13129           Queue.push_back(*UI);
13130     }
13131   }
13132 
13133   return false;
13134 }
13135 
13136 /// This function is called when we have proved that a SETCC node can be replaced
13137 /// by subtraction (and other supporting instructions) so that the result of
13138 /// comparison is kept in a GPR instead of CR. This function is purely for
13139 /// codegen purposes and has some flags to guide the codegen process.
generateEquivalentSub(SDNode * N,int Size,bool Complement,bool Swap,SDLoc & DL,SelectionDAG & DAG)13140 static SDValue generateEquivalentSub(SDNode *N, int Size, bool Complement,
13141                                      bool Swap, SDLoc &DL, SelectionDAG &DAG) {
13142   assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected.");
13143 
13144   // Zero extend the operands to the largest legal integer. Originally, they
13145   // must be of a strictly smaller size.
13146   auto Op0 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(0),
13147                          DAG.getConstant(Size, DL, MVT::i32));
13148   auto Op1 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1),
13149                          DAG.getConstant(Size, DL, MVT::i32));
13150 
13151   // Swap if needed. Depends on the condition code.
13152   if (Swap)
13153     std::swap(Op0, Op1);
13154 
13155   // Subtract extended integers.
13156   auto SubNode = DAG.getNode(ISD::SUB, DL, MVT::i64, Op0, Op1);
13157 
13158   // Move the sign bit to the least significant position and zero out the rest.
13159   // Now the least significant bit carries the result of original comparison.
13160   auto Shifted = DAG.getNode(ISD::SRL, DL, MVT::i64, SubNode,
13161                              DAG.getConstant(Size - 1, DL, MVT::i32));
13162   auto Final = Shifted;
13163 
13164   // Complement the result if needed. Based on the condition code.
13165   if (Complement)
13166     Final = DAG.getNode(ISD::XOR, DL, MVT::i64, Shifted,
13167                         DAG.getConstant(1, DL, MVT::i64));
13168 
13169   return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Final);
13170 }
13171 
ConvertSETCCToSubtract(SDNode * N,DAGCombinerInfo & DCI) const13172 SDValue PPCTargetLowering::ConvertSETCCToSubtract(SDNode *N,
13173                                                   DAGCombinerInfo &DCI) const {
13174   assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected.");
13175 
13176   SelectionDAG &DAG = DCI.DAG;
13177   SDLoc DL(N);
13178 
13179   // Size of integers being compared has a critical role in the following
13180   // analysis, so we prefer to do this when all types are legal.
13181   if (!DCI.isAfterLegalizeDAG())
13182     return SDValue();
13183 
13184   // If all users of SETCC extend its value to a legal integer type
13185   // then we replace SETCC with a subtraction
13186   for (SDNode::use_iterator UI = N->use_begin(),
13187        UE = N->use_end(); UI != UE; ++UI) {
13188     if (UI->getOpcode() != ISD::ZERO_EXTEND)
13189       return SDValue();
13190   }
13191 
13192   ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
13193   auto OpSize = N->getOperand(0).getValueSizeInBits();
13194 
13195   unsigned Size = DAG.getDataLayout().getLargestLegalIntTypeSizeInBits();
13196 
13197   if (OpSize < Size) {
13198     switch (CC) {
13199     default: break;
13200     case ISD::SETULT:
13201       return generateEquivalentSub(N, Size, false, false, DL, DAG);
13202     case ISD::SETULE:
13203       return generateEquivalentSub(N, Size, true, true, DL, DAG);
13204     case ISD::SETUGT:
13205       return generateEquivalentSub(N, Size, false, true, DL, DAG);
13206     case ISD::SETUGE:
13207       return generateEquivalentSub(N, Size, true, false, DL, DAG);
13208     }
13209   }
13210 
13211   return SDValue();
13212 }
13213 
DAGCombineTruncBoolExt(SDNode * N,DAGCombinerInfo & DCI) const13214 SDValue PPCTargetLowering::DAGCombineTruncBoolExt(SDNode *N,
13215                                                   DAGCombinerInfo &DCI) const {
13216   SelectionDAG &DAG = DCI.DAG;
13217   SDLoc dl(N);
13218 
13219   assert(Subtarget.useCRBits() && "Expecting to be tracking CR bits");
13220   // If we're tracking CR bits, we need to be careful that we don't have:
13221   //   trunc(binary-ops(zext(x), zext(y)))
13222   // or
13223   //   trunc(binary-ops(binary-ops(zext(x), zext(y)), ...)
13224   // such that we're unnecessarily moving things into GPRs when it would be
13225   // better to keep them in CR bits.
13226 
13227   // Note that trunc here can be an actual i1 trunc, or can be the effective
13228   // truncation that comes from a setcc or select_cc.
13229   if (N->getOpcode() == ISD::TRUNCATE &&
13230       N->getValueType(0) != MVT::i1)
13231     return SDValue();
13232 
13233   if (N->getOperand(0).getValueType() != MVT::i32 &&
13234       N->getOperand(0).getValueType() != MVT::i64)
13235     return SDValue();
13236 
13237   if (N->getOpcode() == ISD::SETCC ||
13238       N->getOpcode() == ISD::SELECT_CC) {
13239     // If we're looking at a comparison, then we need to make sure that the
13240     // high bits (all except for the first) don't matter the result.
13241     ISD::CondCode CC =
13242       cast<CondCodeSDNode>(N->getOperand(
13243         N->getOpcode() == ISD::SETCC ? 2 : 4))->get();
13244     unsigned OpBits = N->getOperand(0).getValueSizeInBits();
13245 
13246     if (ISD::isSignedIntSetCC(CC)) {
13247       if (DAG.ComputeNumSignBits(N->getOperand(0)) != OpBits ||
13248           DAG.ComputeNumSignBits(N->getOperand(1)) != OpBits)
13249         return SDValue();
13250     } else if (ISD::isUnsignedIntSetCC(CC)) {
13251       if (!DAG.MaskedValueIsZero(N->getOperand(0),
13252                                  APInt::getHighBitsSet(OpBits, OpBits-1)) ||
13253           !DAG.MaskedValueIsZero(N->getOperand(1),
13254                                  APInt::getHighBitsSet(OpBits, OpBits-1)))
13255         return (N->getOpcode() == ISD::SETCC ? ConvertSETCCToSubtract(N, DCI)
13256                                              : SDValue());
13257     } else {
13258       // This is neither a signed nor an unsigned comparison, just make sure
13259       // that the high bits are equal.
13260       KnownBits Op1Known = DAG.computeKnownBits(N->getOperand(0));
13261       KnownBits Op2Known = DAG.computeKnownBits(N->getOperand(1));
13262 
13263       // We don't really care about what is known about the first bit (if
13264       // anything), so clear it in all masks prior to comparing them.
13265       Op1Known.Zero.clearBit(0); Op1Known.One.clearBit(0);
13266       Op2Known.Zero.clearBit(0); Op2Known.One.clearBit(0);
13267 
13268       if (Op1Known.Zero != Op2Known.Zero || Op1Known.One != Op2Known.One)
13269         return SDValue();
13270     }
13271   }
13272 
13273   // We now know that the higher-order bits are irrelevant, we just need to
13274   // make sure that all of the intermediate operations are bit operations, and
13275   // all inputs are extensions.
13276   if (N->getOperand(0).getOpcode() != ISD::AND &&
13277       N->getOperand(0).getOpcode() != ISD::OR  &&
13278       N->getOperand(0).getOpcode() != ISD::XOR &&
13279       N->getOperand(0).getOpcode() != ISD::SELECT &&
13280       N->getOperand(0).getOpcode() != ISD::SELECT_CC &&
13281       N->getOperand(0).getOpcode() != ISD::TRUNCATE &&
13282       N->getOperand(0).getOpcode() != ISD::SIGN_EXTEND &&
13283       N->getOperand(0).getOpcode() != ISD::ZERO_EXTEND &&
13284       N->getOperand(0).getOpcode() != ISD::ANY_EXTEND)
13285     return SDValue();
13286 
13287   if ((N->getOpcode() == ISD::SETCC || N->getOpcode() == ISD::SELECT_CC) &&
13288       N->getOperand(1).getOpcode() != ISD::AND &&
13289       N->getOperand(1).getOpcode() != ISD::OR  &&
13290       N->getOperand(1).getOpcode() != ISD::XOR &&
13291       N->getOperand(1).getOpcode() != ISD::SELECT &&
13292       N->getOperand(1).getOpcode() != ISD::SELECT_CC &&
13293       N->getOperand(1).getOpcode() != ISD::TRUNCATE &&
13294       N->getOperand(1).getOpcode() != ISD::SIGN_EXTEND &&
13295       N->getOperand(1).getOpcode() != ISD::ZERO_EXTEND &&
13296       N->getOperand(1).getOpcode() != ISD::ANY_EXTEND)
13297     return SDValue();
13298 
13299   SmallVector<SDValue, 4> Inputs;
13300   SmallVector<SDValue, 8> BinOps, PromOps;
13301   SmallPtrSet<SDNode *, 16> Visited;
13302 
13303   for (unsigned i = 0; i < 2; ++i) {
13304     if (((N->getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
13305           N->getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
13306           N->getOperand(i).getOpcode() == ISD::ANY_EXTEND) &&
13307           N->getOperand(i).getOperand(0).getValueType() == MVT::i1) ||
13308         isa<ConstantSDNode>(N->getOperand(i)))
13309       Inputs.push_back(N->getOperand(i));
13310     else
13311       BinOps.push_back(N->getOperand(i));
13312 
13313     if (N->getOpcode() == ISD::TRUNCATE)
13314       break;
13315   }
13316 
13317   // Visit all inputs, collect all binary operations (and, or, xor and
13318   // select) that are all fed by extensions.
13319   while (!BinOps.empty()) {
13320     SDValue BinOp = BinOps.back();
13321     BinOps.pop_back();
13322 
13323     if (!Visited.insert(BinOp.getNode()).second)
13324       continue;
13325 
13326     PromOps.push_back(BinOp);
13327 
13328     for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) {
13329       // The condition of the select is not promoted.
13330       if (BinOp.getOpcode() == ISD::SELECT && i == 0)
13331         continue;
13332       if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3)
13333         continue;
13334 
13335       if (((BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
13336             BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
13337             BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) &&
13338            BinOp.getOperand(i).getOperand(0).getValueType() == MVT::i1) ||
13339           isa<ConstantSDNode>(BinOp.getOperand(i))) {
13340         Inputs.push_back(BinOp.getOperand(i));
13341       } else if (BinOp.getOperand(i).getOpcode() == ISD::AND ||
13342                  BinOp.getOperand(i).getOpcode() == ISD::OR  ||
13343                  BinOp.getOperand(i).getOpcode() == ISD::XOR ||
13344                  BinOp.getOperand(i).getOpcode() == ISD::SELECT ||
13345                  BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC ||
13346                  BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE ||
13347                  BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
13348                  BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
13349                  BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) {
13350         BinOps.push_back(BinOp.getOperand(i));
13351       } else {
13352         // We have an input that is not an extension or another binary
13353         // operation; we'll abort this transformation.
13354         return SDValue();
13355       }
13356     }
13357   }
13358 
13359   // Make sure that this is a self-contained cluster of operations (which
13360   // is not quite the same thing as saying that everything has only one
13361   // use).
13362   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
13363     if (isa<ConstantSDNode>(Inputs[i]))
13364       continue;
13365 
13366     for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(),
13367                               UE = Inputs[i].getNode()->use_end();
13368          UI != UE; ++UI) {
13369       SDNode *User = *UI;
13370       if (User != N && !Visited.count(User))
13371         return SDValue();
13372 
13373       // Make sure that we're not going to promote the non-output-value
13374       // operand(s) or SELECT or SELECT_CC.
13375       // FIXME: Although we could sometimes handle this, and it does occur in
13376       // practice that one of the condition inputs to the select is also one of
13377       // the outputs, we currently can't deal with this.
13378       if (User->getOpcode() == ISD::SELECT) {
13379         if (User->getOperand(0) == Inputs[i])
13380           return SDValue();
13381       } else if (User->getOpcode() == ISD::SELECT_CC) {
13382         if (User->getOperand(0) == Inputs[i] ||
13383             User->getOperand(1) == Inputs[i])
13384           return SDValue();
13385       }
13386     }
13387   }
13388 
13389   for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) {
13390     for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(),
13391                               UE = PromOps[i].getNode()->use_end();
13392          UI != UE; ++UI) {
13393       SDNode *User = *UI;
13394       if (User != N && !Visited.count(User))
13395         return SDValue();
13396 
13397       // Make sure that we're not going to promote the non-output-value
13398       // operand(s) or SELECT or SELECT_CC.
13399       // FIXME: Although we could sometimes handle this, and it does occur in
13400       // practice that one of the condition inputs to the select is also one of
13401       // the outputs, we currently can't deal with this.
13402       if (User->getOpcode() == ISD::SELECT) {
13403         if (User->getOperand(0) == PromOps[i])
13404           return SDValue();
13405       } else if (User->getOpcode() == ISD::SELECT_CC) {
13406         if (User->getOperand(0) == PromOps[i] ||
13407             User->getOperand(1) == PromOps[i])
13408           return SDValue();
13409       }
13410     }
13411   }
13412 
13413   // Replace all inputs with the extension operand.
13414   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
13415     // Constants may have users outside the cluster of to-be-promoted nodes,
13416     // and so we need to replace those as we do the promotions.
13417     if (isa<ConstantSDNode>(Inputs[i]))
13418       continue;
13419     else
13420       DAG.ReplaceAllUsesOfValueWith(Inputs[i], Inputs[i].getOperand(0));
13421   }
13422 
13423   std::list<HandleSDNode> PromOpHandles;
13424   for (auto &PromOp : PromOps)
13425     PromOpHandles.emplace_back(PromOp);
13426 
13427   // Replace all operations (these are all the same, but have a different
13428   // (i1) return type). DAG.getNode will validate that the types of
13429   // a binary operator match, so go through the list in reverse so that
13430   // we've likely promoted both operands first. Any intermediate truncations or
13431   // extensions disappear.
13432   while (!PromOpHandles.empty()) {
13433     SDValue PromOp = PromOpHandles.back().getValue();
13434     PromOpHandles.pop_back();
13435 
13436     if (PromOp.getOpcode() == ISD::TRUNCATE ||
13437         PromOp.getOpcode() == ISD::SIGN_EXTEND ||
13438         PromOp.getOpcode() == ISD::ZERO_EXTEND ||
13439         PromOp.getOpcode() == ISD::ANY_EXTEND) {
13440       if (!isa<ConstantSDNode>(PromOp.getOperand(0)) &&
13441           PromOp.getOperand(0).getValueType() != MVT::i1) {
13442         // The operand is not yet ready (see comment below).
13443         PromOpHandles.emplace_front(PromOp);
13444         continue;
13445       }
13446 
13447       SDValue RepValue = PromOp.getOperand(0);
13448       if (isa<ConstantSDNode>(RepValue))
13449         RepValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, RepValue);
13450 
13451       DAG.ReplaceAllUsesOfValueWith(PromOp, RepValue);
13452       continue;
13453     }
13454 
13455     unsigned C;
13456     switch (PromOp.getOpcode()) {
13457     default:             C = 0; break;
13458     case ISD::SELECT:    C = 1; break;
13459     case ISD::SELECT_CC: C = 2; break;
13460     }
13461 
13462     if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) &&
13463          PromOp.getOperand(C).getValueType() != MVT::i1) ||
13464         (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) &&
13465          PromOp.getOperand(C+1).getValueType() != MVT::i1)) {
13466       // The to-be-promoted operands of this node have not yet been
13467       // promoted (this should be rare because we're going through the
13468       // list backward, but if one of the operands has several users in
13469       // this cluster of to-be-promoted nodes, it is possible).
13470       PromOpHandles.emplace_front(PromOp);
13471       continue;
13472     }
13473 
13474     SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(),
13475                                 PromOp.getNode()->op_end());
13476 
13477     // If there are any constant inputs, make sure they're replaced now.
13478     for (unsigned i = 0; i < 2; ++i)
13479       if (isa<ConstantSDNode>(Ops[C+i]))
13480         Ops[C+i] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ops[C+i]);
13481 
13482     DAG.ReplaceAllUsesOfValueWith(PromOp,
13483       DAG.getNode(PromOp.getOpcode(), dl, MVT::i1, Ops));
13484   }
13485 
13486   // Now we're left with the initial truncation itself.
13487   if (N->getOpcode() == ISD::TRUNCATE)
13488     return N->getOperand(0);
13489 
13490   // Otherwise, this is a comparison. The operands to be compared have just
13491   // changed type (to i1), but everything else is the same.
13492   return SDValue(N, 0);
13493 }
13494 
DAGCombineExtBoolTrunc(SDNode * N,DAGCombinerInfo & DCI) const13495 SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N,
13496                                                   DAGCombinerInfo &DCI) const {
13497   SelectionDAG &DAG = DCI.DAG;
13498   SDLoc dl(N);
13499 
13500   // If we're tracking CR bits, we need to be careful that we don't have:
13501   //   zext(binary-ops(trunc(x), trunc(y)))
13502   // or
13503   //   zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...)
13504   // such that we're unnecessarily moving things into CR bits that can more
13505   // efficiently stay in GPRs. Note that if we're not certain that the high
13506   // bits are set as required by the final extension, we still may need to do
13507   // some masking to get the proper behavior.
13508 
13509   // This same functionality is important on PPC64 when dealing with
13510   // 32-to-64-bit extensions; these occur often when 32-bit values are used as
13511   // the return values of functions. Because it is so similar, it is handled
13512   // here as well.
13513 
13514   if (N->getValueType(0) != MVT::i32 &&
13515       N->getValueType(0) != MVT::i64)
13516     return SDValue();
13517 
13518   if (!((N->getOperand(0).getValueType() == MVT::i1 && Subtarget.useCRBits()) ||
13519         (N->getOperand(0).getValueType() == MVT::i32 && Subtarget.isPPC64())))
13520     return SDValue();
13521 
13522   if (N->getOperand(0).getOpcode() != ISD::AND &&
13523       N->getOperand(0).getOpcode() != ISD::OR  &&
13524       N->getOperand(0).getOpcode() != ISD::XOR &&
13525       N->getOperand(0).getOpcode() != ISD::SELECT &&
13526       N->getOperand(0).getOpcode() != ISD::SELECT_CC)
13527     return SDValue();
13528 
13529   SmallVector<SDValue, 4> Inputs;
13530   SmallVector<SDValue, 8> BinOps(1, N->getOperand(0)), PromOps;
13531   SmallPtrSet<SDNode *, 16> Visited;
13532 
13533   // Visit all inputs, collect all binary operations (and, or, xor and
13534   // select) that are all fed by truncations.
13535   while (!BinOps.empty()) {
13536     SDValue BinOp = BinOps.back();
13537     BinOps.pop_back();
13538 
13539     if (!Visited.insert(BinOp.getNode()).second)
13540       continue;
13541 
13542     PromOps.push_back(BinOp);
13543 
13544     for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) {
13545       // The condition of the select is not promoted.
13546       if (BinOp.getOpcode() == ISD::SELECT && i == 0)
13547         continue;
13548       if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3)
13549         continue;
13550 
13551       if (BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE ||
13552           isa<ConstantSDNode>(BinOp.getOperand(i))) {
13553         Inputs.push_back(BinOp.getOperand(i));
13554       } else if (BinOp.getOperand(i).getOpcode() == ISD::AND ||
13555                  BinOp.getOperand(i).getOpcode() == ISD::OR  ||
13556                  BinOp.getOperand(i).getOpcode() == ISD::XOR ||
13557                  BinOp.getOperand(i).getOpcode() == ISD::SELECT ||
13558                  BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC) {
13559         BinOps.push_back(BinOp.getOperand(i));
13560       } else {
13561         // We have an input that is not a truncation or another binary
13562         // operation; we'll abort this transformation.
13563         return SDValue();
13564       }
13565     }
13566   }
13567 
13568   // The operands of a select that must be truncated when the select is
13569   // promoted because the operand is actually part of the to-be-promoted set.
13570   DenseMap<SDNode *, EVT> SelectTruncOp[2];
13571 
13572   // Make sure that this is a self-contained cluster of operations (which
13573   // is not quite the same thing as saying that everything has only one
13574   // use).
13575   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
13576     if (isa<ConstantSDNode>(Inputs[i]))
13577       continue;
13578 
13579     for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(),
13580                               UE = Inputs[i].getNode()->use_end();
13581          UI != UE; ++UI) {
13582       SDNode *User = *UI;
13583       if (User != N && !Visited.count(User))
13584         return SDValue();
13585 
13586       // If we're going to promote the non-output-value operand(s) or SELECT or
13587       // SELECT_CC, record them for truncation.
13588       if (User->getOpcode() == ISD::SELECT) {
13589         if (User->getOperand(0) == Inputs[i])
13590           SelectTruncOp[0].insert(std::make_pair(User,
13591                                     User->getOperand(0).getValueType()));
13592       } else if (User->getOpcode() == ISD::SELECT_CC) {
13593         if (User->getOperand(0) == Inputs[i])
13594           SelectTruncOp[0].insert(std::make_pair(User,
13595                                     User->getOperand(0).getValueType()));
13596         if (User->getOperand(1) == Inputs[i])
13597           SelectTruncOp[1].insert(std::make_pair(User,
13598                                     User->getOperand(1).getValueType()));
13599       }
13600     }
13601   }
13602 
13603   for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) {
13604     for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(),
13605                               UE = PromOps[i].getNode()->use_end();
13606          UI != UE; ++UI) {
13607       SDNode *User = *UI;
13608       if (User != N && !Visited.count(User))
13609         return SDValue();
13610 
13611       // If we're going to promote the non-output-value operand(s) or SELECT or
13612       // SELECT_CC, record them for truncation.
13613       if (User->getOpcode() == ISD::SELECT) {
13614         if (User->getOperand(0) == PromOps[i])
13615           SelectTruncOp[0].insert(std::make_pair(User,
13616                                     User->getOperand(0).getValueType()));
13617       } else if (User->getOpcode() == ISD::SELECT_CC) {
13618         if (User->getOperand(0) == PromOps[i])
13619           SelectTruncOp[0].insert(std::make_pair(User,
13620                                     User->getOperand(0).getValueType()));
13621         if (User->getOperand(1) == PromOps[i])
13622           SelectTruncOp[1].insert(std::make_pair(User,
13623                                     User->getOperand(1).getValueType()));
13624       }
13625     }
13626   }
13627 
13628   unsigned PromBits = N->getOperand(0).getValueSizeInBits();
13629   bool ReallyNeedsExt = false;
13630   if (N->getOpcode() != ISD::ANY_EXTEND) {
13631     // If all of the inputs are not already sign/zero extended, then
13632     // we'll still need to do that at the end.
13633     for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
13634       if (isa<ConstantSDNode>(Inputs[i]))
13635         continue;
13636 
13637       unsigned OpBits =
13638         Inputs[i].getOperand(0).getValueSizeInBits();
13639       assert(PromBits < OpBits && "Truncation not to a smaller bit count?");
13640 
13641       if ((N->getOpcode() == ISD::ZERO_EXTEND &&
13642            !DAG.MaskedValueIsZero(Inputs[i].getOperand(0),
13643                                   APInt::getHighBitsSet(OpBits,
13644                                                         OpBits-PromBits))) ||
13645           (N->getOpcode() == ISD::SIGN_EXTEND &&
13646            DAG.ComputeNumSignBits(Inputs[i].getOperand(0)) <
13647              (OpBits-(PromBits-1)))) {
13648         ReallyNeedsExt = true;
13649         break;
13650       }
13651     }
13652   }
13653 
13654   // Replace all inputs, either with the truncation operand, or a
13655   // truncation or extension to the final output type.
13656   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
13657     // Constant inputs need to be replaced with the to-be-promoted nodes that
13658     // use them because they might have users outside of the cluster of
13659     // promoted nodes.
13660     if (isa<ConstantSDNode>(Inputs[i]))
13661       continue;
13662 
13663     SDValue InSrc = Inputs[i].getOperand(0);
13664     if (Inputs[i].getValueType() == N->getValueType(0))
13665       DAG.ReplaceAllUsesOfValueWith(Inputs[i], InSrc);
13666     else if (N->getOpcode() == ISD::SIGN_EXTEND)
13667       DAG.ReplaceAllUsesOfValueWith(Inputs[i],
13668         DAG.getSExtOrTrunc(InSrc, dl, N->getValueType(0)));
13669     else if (N->getOpcode() == ISD::ZERO_EXTEND)
13670       DAG.ReplaceAllUsesOfValueWith(Inputs[i],
13671         DAG.getZExtOrTrunc(InSrc, dl, N->getValueType(0)));
13672     else
13673       DAG.ReplaceAllUsesOfValueWith(Inputs[i],
13674         DAG.getAnyExtOrTrunc(InSrc, dl, N->getValueType(0)));
13675   }
13676 
13677   std::list<HandleSDNode> PromOpHandles;
13678   for (auto &PromOp : PromOps)
13679     PromOpHandles.emplace_back(PromOp);
13680 
13681   // Replace all operations (these are all the same, but have a different
13682   // (promoted) return type). DAG.getNode will validate that the types of
13683   // a binary operator match, so go through the list in reverse so that
13684   // we've likely promoted both operands first.
13685   while (!PromOpHandles.empty()) {
13686     SDValue PromOp = PromOpHandles.back().getValue();
13687     PromOpHandles.pop_back();
13688 
13689     unsigned C;
13690     switch (PromOp.getOpcode()) {
13691     default:             C = 0; break;
13692     case ISD::SELECT:    C = 1; break;
13693     case ISD::SELECT_CC: C = 2; break;
13694     }
13695 
13696     if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) &&
13697          PromOp.getOperand(C).getValueType() != N->getValueType(0)) ||
13698         (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) &&
13699          PromOp.getOperand(C+1).getValueType() != N->getValueType(0))) {
13700       // The to-be-promoted operands of this node have not yet been
13701       // promoted (this should be rare because we're going through the
13702       // list backward, but if one of the operands has several users in
13703       // this cluster of to-be-promoted nodes, it is possible).
13704       PromOpHandles.emplace_front(PromOp);
13705       continue;
13706     }
13707 
13708     // For SELECT and SELECT_CC nodes, we do a similar check for any
13709     // to-be-promoted comparison inputs.
13710     if (PromOp.getOpcode() == ISD::SELECT ||
13711         PromOp.getOpcode() == ISD::SELECT_CC) {
13712       if ((SelectTruncOp[0].count(PromOp.getNode()) &&
13713            PromOp.getOperand(0).getValueType() != N->getValueType(0)) ||
13714           (SelectTruncOp[1].count(PromOp.getNode()) &&
13715            PromOp.getOperand(1).getValueType() != N->getValueType(0))) {
13716         PromOpHandles.emplace_front(PromOp);
13717         continue;
13718       }
13719     }
13720 
13721     SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(),
13722                                 PromOp.getNode()->op_end());
13723 
13724     // If this node has constant inputs, then they'll need to be promoted here.
13725     for (unsigned i = 0; i < 2; ++i) {
13726       if (!isa<ConstantSDNode>(Ops[C+i]))
13727         continue;
13728       if (Ops[C+i].getValueType() == N->getValueType(0))
13729         continue;
13730 
13731       if (N->getOpcode() == ISD::SIGN_EXTEND)
13732         Ops[C+i] = DAG.getSExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
13733       else if (N->getOpcode() == ISD::ZERO_EXTEND)
13734         Ops[C+i] = DAG.getZExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
13735       else
13736         Ops[C+i] = DAG.getAnyExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
13737     }
13738 
13739     // If we've promoted the comparison inputs of a SELECT or SELECT_CC,
13740     // truncate them again to the original value type.
13741     if (PromOp.getOpcode() == ISD::SELECT ||
13742         PromOp.getOpcode() == ISD::SELECT_CC) {
13743       auto SI0 = SelectTruncOp[0].find(PromOp.getNode());
13744       if (SI0 != SelectTruncOp[0].end())
13745         Ops[0] = DAG.getNode(ISD::TRUNCATE, dl, SI0->second, Ops[0]);
13746       auto SI1 = SelectTruncOp[1].find(PromOp.getNode());
13747       if (SI1 != SelectTruncOp[1].end())
13748         Ops[1] = DAG.getNode(ISD::TRUNCATE, dl, SI1->second, Ops[1]);
13749     }
13750 
13751     DAG.ReplaceAllUsesOfValueWith(PromOp,
13752       DAG.getNode(PromOp.getOpcode(), dl, N->getValueType(0), Ops));
13753   }
13754 
13755   // Now we're left with the initial extension itself.
13756   if (!ReallyNeedsExt)
13757     return N->getOperand(0);
13758 
13759   // To zero extend, just mask off everything except for the first bit (in the
13760   // i1 case).
13761   if (N->getOpcode() == ISD::ZERO_EXTEND)
13762     return DAG.getNode(ISD::AND, dl, N->getValueType(0), N->getOperand(0),
13763                        DAG.getConstant(APInt::getLowBitsSet(
13764                                          N->getValueSizeInBits(0), PromBits),
13765                                        dl, N->getValueType(0)));
13766 
13767   assert(N->getOpcode() == ISD::SIGN_EXTEND &&
13768          "Invalid extension type");
13769   EVT ShiftAmountTy = getShiftAmountTy(N->getValueType(0), DAG.getDataLayout());
13770   SDValue ShiftCst =
13771       DAG.getConstant(N->getValueSizeInBits(0) - PromBits, dl, ShiftAmountTy);
13772   return DAG.getNode(
13773       ISD::SRA, dl, N->getValueType(0),
13774       DAG.getNode(ISD::SHL, dl, N->getValueType(0), N->getOperand(0), ShiftCst),
13775       ShiftCst);
13776 }
13777 
combineSetCC(SDNode * N,DAGCombinerInfo & DCI) const13778 SDValue PPCTargetLowering::combineSetCC(SDNode *N,
13779                                         DAGCombinerInfo &DCI) const {
13780   assert(N->getOpcode() == ISD::SETCC &&
13781          "Should be called with a SETCC node");
13782 
13783   ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
13784   if (CC == ISD::SETNE || CC == ISD::SETEQ) {
13785     SDValue LHS = N->getOperand(0);
13786     SDValue RHS = N->getOperand(1);
13787 
13788     // If there is a '0 - y' pattern, canonicalize the pattern to the RHS.
13789     if (LHS.getOpcode() == ISD::SUB && isNullConstant(LHS.getOperand(0)) &&
13790         LHS.hasOneUse())
13791       std::swap(LHS, RHS);
13792 
13793     // x == 0-y --> x+y == 0
13794     // x != 0-y --> x+y != 0
13795     if (RHS.getOpcode() == ISD::SUB && isNullConstant(RHS.getOperand(0)) &&
13796         RHS.hasOneUse()) {
13797       SDLoc DL(N);
13798       SelectionDAG &DAG = DCI.DAG;
13799       EVT VT = N->getValueType(0);
13800       EVT OpVT = LHS.getValueType();
13801       SDValue Add = DAG.getNode(ISD::ADD, DL, OpVT, LHS, RHS.getOperand(1));
13802       return DAG.getSetCC(DL, VT, Add, DAG.getConstant(0, DL, OpVT), CC);
13803     }
13804   }
13805 
13806   return DAGCombineTruncBoolExt(N, DCI);
13807 }
13808 
13809 // Is this an extending load from an f32 to an f64?
isFPExtLoad(SDValue Op)13810 static bool isFPExtLoad(SDValue Op) {
13811   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op.getNode()))
13812     return LD->getExtensionType() == ISD::EXTLOAD &&
13813       Op.getValueType() == MVT::f64;
13814   return false;
13815 }
13816 
13817 /// Reduces the number of fp-to-int conversion when building a vector.
13818 ///
13819 /// If this vector is built out of floating to integer conversions,
13820 /// transform it to a vector built out of floating point values followed by a
13821 /// single floating to integer conversion of the vector.
13822 /// Namely  (build_vector (fptosi $A), (fptosi $B), ...)
13823 /// becomes (fptosi (build_vector ($A, $B, ...)))
13824 SDValue PPCTargetLowering::
combineElementTruncationToVectorTruncation(SDNode * N,DAGCombinerInfo & DCI) const13825 combineElementTruncationToVectorTruncation(SDNode *N,
13826                                            DAGCombinerInfo &DCI) const {
13827   assert(N->getOpcode() == ISD::BUILD_VECTOR &&
13828          "Should be called with a BUILD_VECTOR node");
13829 
13830   SelectionDAG &DAG = DCI.DAG;
13831   SDLoc dl(N);
13832 
13833   SDValue FirstInput = N->getOperand(0);
13834   assert(FirstInput.getOpcode() == PPCISD::MFVSR &&
13835          "The input operand must be an fp-to-int conversion.");
13836 
13837   // This combine happens after legalization so the fp_to_[su]i nodes are
13838   // already converted to PPCSISD nodes.
13839   unsigned FirstConversion = FirstInput.getOperand(0).getOpcode();
13840   if (FirstConversion == PPCISD::FCTIDZ ||
13841       FirstConversion == PPCISD::FCTIDUZ ||
13842       FirstConversion == PPCISD::FCTIWZ ||
13843       FirstConversion == PPCISD::FCTIWUZ) {
13844     bool IsSplat = true;
13845     bool Is32Bit = FirstConversion == PPCISD::FCTIWZ ||
13846       FirstConversion == PPCISD::FCTIWUZ;
13847     EVT SrcVT = FirstInput.getOperand(0).getValueType();
13848     SmallVector<SDValue, 4> Ops;
13849     EVT TargetVT = N->getValueType(0);
13850     for (int i = 0, e = N->getNumOperands(); i < e; ++i) {
13851       SDValue NextOp = N->getOperand(i);
13852       if (NextOp.getOpcode() != PPCISD::MFVSR)
13853         return SDValue();
13854       unsigned NextConversion = NextOp.getOperand(0).getOpcode();
13855       if (NextConversion != FirstConversion)
13856         return SDValue();
13857       // If we are converting to 32-bit integers, we need to add an FP_ROUND.
13858       // This is not valid if the input was originally double precision. It is
13859       // also not profitable to do unless this is an extending load in which
13860       // case doing this combine will allow us to combine consecutive loads.
13861       if (Is32Bit && !isFPExtLoad(NextOp.getOperand(0).getOperand(0)))
13862         return SDValue();
13863       if (N->getOperand(i) != FirstInput)
13864         IsSplat = false;
13865     }
13866 
13867     // If this is a splat, we leave it as-is since there will be only a single
13868     // fp-to-int conversion followed by a splat of the integer. This is better
13869     // for 32-bit and smaller ints and neutral for 64-bit ints.
13870     if (IsSplat)
13871       return SDValue();
13872 
13873     // Now that we know we have the right type of node, get its operands
13874     for (int i = 0, e = N->getNumOperands(); i < e; ++i) {
13875       SDValue In = N->getOperand(i).getOperand(0);
13876       if (Is32Bit) {
13877         // For 32-bit values, we need to add an FP_ROUND node (if we made it
13878         // here, we know that all inputs are extending loads so this is safe).
13879         if (In.isUndef())
13880           Ops.push_back(DAG.getUNDEF(SrcVT));
13881         else {
13882           SDValue Trunc = DAG.getNode(ISD::FP_ROUND, dl,
13883                                       MVT::f32, In.getOperand(0),
13884                                       DAG.getIntPtrConstant(1, dl));
13885           Ops.push_back(Trunc);
13886         }
13887       } else
13888         Ops.push_back(In.isUndef() ? DAG.getUNDEF(SrcVT) : In.getOperand(0));
13889     }
13890 
13891     unsigned Opcode;
13892     if (FirstConversion == PPCISD::FCTIDZ ||
13893         FirstConversion == PPCISD::FCTIWZ)
13894       Opcode = ISD::FP_TO_SINT;
13895     else
13896       Opcode = ISD::FP_TO_UINT;
13897 
13898     EVT NewVT = TargetVT == MVT::v2i64 ? MVT::v2f64 : MVT::v4f32;
13899     SDValue BV = DAG.getBuildVector(NewVT, dl, Ops);
13900     return DAG.getNode(Opcode, dl, TargetVT, BV);
13901   }
13902   return SDValue();
13903 }
13904 
13905 /// Reduce the number of loads when building a vector.
13906 ///
13907 /// Building a vector out of multiple loads can be converted to a load
13908 /// of the vector type if the loads are consecutive. If the loads are
13909 /// consecutive but in descending order, a shuffle is added at the end
13910 /// to reorder the vector.
combineBVOfConsecutiveLoads(SDNode * N,SelectionDAG & DAG)13911 static SDValue combineBVOfConsecutiveLoads(SDNode *N, SelectionDAG &DAG) {
13912   assert(N->getOpcode() == ISD::BUILD_VECTOR &&
13913          "Should be called with a BUILD_VECTOR node");
13914 
13915   SDLoc dl(N);
13916 
13917   // Return early for non byte-sized type, as they can't be consecutive.
13918   if (!N->getValueType(0).getVectorElementType().isByteSized())
13919     return SDValue();
13920 
13921   bool InputsAreConsecutiveLoads = true;
13922   bool InputsAreReverseConsecutive = true;
13923   unsigned ElemSize = N->getValueType(0).getScalarType().getStoreSize();
13924   SDValue FirstInput = N->getOperand(0);
13925   bool IsRoundOfExtLoad = false;
13926 
13927   if (FirstInput.getOpcode() == ISD::FP_ROUND &&
13928       FirstInput.getOperand(0).getOpcode() == ISD::LOAD) {
13929     LoadSDNode *LD = dyn_cast<LoadSDNode>(FirstInput.getOperand(0));
13930     IsRoundOfExtLoad = LD->getExtensionType() == ISD::EXTLOAD;
13931   }
13932   // Not a build vector of (possibly fp_rounded) loads.
13933   if ((!IsRoundOfExtLoad && FirstInput.getOpcode() != ISD::LOAD) ||
13934       N->getNumOperands() == 1)
13935     return SDValue();
13936 
13937   for (int i = 1, e = N->getNumOperands(); i < e; ++i) {
13938     // If any inputs are fp_round(extload), they all must be.
13939     if (IsRoundOfExtLoad && N->getOperand(i).getOpcode() != ISD::FP_ROUND)
13940       return SDValue();
13941 
13942     SDValue NextInput = IsRoundOfExtLoad ? N->getOperand(i).getOperand(0) :
13943       N->getOperand(i);
13944     if (NextInput.getOpcode() != ISD::LOAD)
13945       return SDValue();
13946 
13947     SDValue PreviousInput =
13948       IsRoundOfExtLoad ? N->getOperand(i-1).getOperand(0) : N->getOperand(i-1);
13949     LoadSDNode *LD1 = dyn_cast<LoadSDNode>(PreviousInput);
13950     LoadSDNode *LD2 = dyn_cast<LoadSDNode>(NextInput);
13951 
13952     // If any inputs are fp_round(extload), they all must be.
13953     if (IsRoundOfExtLoad && LD2->getExtensionType() != ISD::EXTLOAD)
13954       return SDValue();
13955 
13956     if (!isConsecutiveLS(LD2, LD1, ElemSize, 1, DAG))
13957       InputsAreConsecutiveLoads = false;
13958     if (!isConsecutiveLS(LD1, LD2, ElemSize, 1, DAG))
13959       InputsAreReverseConsecutive = false;
13960 
13961     // Exit early if the loads are neither consecutive nor reverse consecutive.
13962     if (!InputsAreConsecutiveLoads && !InputsAreReverseConsecutive)
13963       return SDValue();
13964   }
13965 
13966   assert(!(InputsAreConsecutiveLoads && InputsAreReverseConsecutive) &&
13967          "The loads cannot be both consecutive and reverse consecutive.");
13968 
13969   SDValue FirstLoadOp =
13970     IsRoundOfExtLoad ? FirstInput.getOperand(0) : FirstInput;
13971   SDValue LastLoadOp =
13972     IsRoundOfExtLoad ? N->getOperand(N->getNumOperands()-1).getOperand(0) :
13973                        N->getOperand(N->getNumOperands()-1);
13974 
13975   LoadSDNode *LD1 = dyn_cast<LoadSDNode>(FirstLoadOp);
13976   LoadSDNode *LDL = dyn_cast<LoadSDNode>(LastLoadOp);
13977   if (InputsAreConsecutiveLoads) {
13978     assert(LD1 && "Input needs to be a LoadSDNode.");
13979     return DAG.getLoad(N->getValueType(0), dl, LD1->getChain(),
13980                        LD1->getBasePtr(), LD1->getPointerInfo(),
13981                        LD1->getAlignment());
13982   }
13983   if (InputsAreReverseConsecutive) {
13984     assert(LDL && "Input needs to be a LoadSDNode.");
13985     SDValue Load = DAG.getLoad(N->getValueType(0), dl, LDL->getChain(),
13986                                LDL->getBasePtr(), LDL->getPointerInfo(),
13987                                LDL->getAlignment());
13988     SmallVector<int, 16> Ops;
13989     for (int i = N->getNumOperands() - 1; i >= 0; i--)
13990       Ops.push_back(i);
13991 
13992     return DAG.getVectorShuffle(N->getValueType(0), dl, Load,
13993                                 DAG.getUNDEF(N->getValueType(0)), Ops);
13994   }
13995   return SDValue();
13996 }
13997 
13998 // This function adds the required vector_shuffle needed to get
13999 // the elements of the vector extract in the correct position
14000 // as specified by the CorrectElems encoding.
addShuffleForVecExtend(SDNode * N,SelectionDAG & DAG,SDValue Input,uint64_t Elems,uint64_t CorrectElems)14001 static SDValue addShuffleForVecExtend(SDNode *N, SelectionDAG &DAG,
14002                                       SDValue Input, uint64_t Elems,
14003                                       uint64_t CorrectElems) {
14004   SDLoc dl(N);
14005 
14006   unsigned NumElems = Input.getValueType().getVectorNumElements();
14007   SmallVector<int, 16> ShuffleMask(NumElems, -1);
14008 
14009   // Knowing the element indices being extracted from the original
14010   // vector and the order in which they're being inserted, just put
14011   // them at element indices required for the instruction.
14012   for (unsigned i = 0; i < N->getNumOperands(); i++) {
14013     if (DAG.getDataLayout().isLittleEndian())
14014       ShuffleMask[CorrectElems & 0xF] = Elems & 0xF;
14015     else
14016       ShuffleMask[(CorrectElems & 0xF0) >> 4] = (Elems & 0xF0) >> 4;
14017     CorrectElems = CorrectElems >> 8;
14018     Elems = Elems >> 8;
14019   }
14020 
14021   SDValue Shuffle =
14022       DAG.getVectorShuffle(Input.getValueType(), dl, Input,
14023                            DAG.getUNDEF(Input.getValueType()), ShuffleMask);
14024 
14025   EVT VT = N->getValueType(0);
14026   SDValue Conv = DAG.getBitcast(VT, Shuffle);
14027 
14028   EVT ExtVT = EVT::getVectorVT(*DAG.getContext(),
14029                                Input.getValueType().getVectorElementType(),
14030                                VT.getVectorNumElements());
14031   return DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, VT, Conv,
14032                      DAG.getValueType(ExtVT));
14033 }
14034 
14035 // Look for build vector patterns where input operands come from sign
14036 // extended vector_extract elements of specific indices. If the correct indices
14037 // aren't used, add a vector shuffle to fix up the indices and create
14038 // SIGN_EXTEND_INREG node which selects the vector sign extend instructions
14039 // during instruction selection.
combineBVOfVecSExt(SDNode * N,SelectionDAG & DAG)14040 static SDValue combineBVOfVecSExt(SDNode *N, SelectionDAG &DAG) {
14041   // This array encodes the indices that the vector sign extend instructions
14042   // extract from when extending from one type to another for both BE and LE.
14043   // The right nibble of each byte corresponds to the LE incides.
14044   // and the left nibble of each byte corresponds to the BE incides.
14045   // For example: 0x3074B8FC  byte->word
14046   // For LE: the allowed indices are: 0x0,0x4,0x8,0xC
14047   // For BE: the allowed indices are: 0x3,0x7,0xB,0xF
14048   // For example: 0x000070F8  byte->double word
14049   // For LE: the allowed indices are: 0x0,0x8
14050   // For BE: the allowed indices are: 0x7,0xF
14051   uint64_t TargetElems[] = {
14052       0x3074B8FC, // b->w
14053       0x000070F8, // b->d
14054       0x10325476, // h->w
14055       0x00003074, // h->d
14056       0x00001032, // w->d
14057   };
14058 
14059   uint64_t Elems = 0;
14060   int Index;
14061   SDValue Input;
14062 
14063   auto isSExtOfVecExtract = [&](SDValue Op) -> bool {
14064     if (!Op)
14065       return false;
14066     if (Op.getOpcode() != ISD::SIGN_EXTEND &&
14067         Op.getOpcode() != ISD::SIGN_EXTEND_INREG)
14068       return false;
14069 
14070     // A SIGN_EXTEND_INREG might be fed by an ANY_EXTEND to produce a value
14071     // of the right width.
14072     SDValue Extract = Op.getOperand(0);
14073     if (Extract.getOpcode() == ISD::ANY_EXTEND)
14074       Extract = Extract.getOperand(0);
14075     if (Extract.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
14076       return false;
14077 
14078     ConstantSDNode *ExtOp = dyn_cast<ConstantSDNode>(Extract.getOperand(1));
14079     if (!ExtOp)
14080       return false;
14081 
14082     Index = ExtOp->getZExtValue();
14083     if (Input && Input != Extract.getOperand(0))
14084       return false;
14085 
14086     if (!Input)
14087       Input = Extract.getOperand(0);
14088 
14089     Elems = Elems << 8;
14090     Index = DAG.getDataLayout().isLittleEndian() ? Index : Index << 4;
14091     Elems |= Index;
14092 
14093     return true;
14094   };
14095 
14096   // If the build vector operands aren't sign extended vector extracts,
14097   // of the same input vector, then return.
14098   for (unsigned i = 0; i < N->getNumOperands(); i++) {
14099     if (!isSExtOfVecExtract(N->getOperand(i))) {
14100       return SDValue();
14101     }
14102   }
14103 
14104   // If the vector extract indicies are not correct, add the appropriate
14105   // vector_shuffle.
14106   int TgtElemArrayIdx;
14107   int InputSize = Input.getValueType().getScalarSizeInBits();
14108   int OutputSize = N->getValueType(0).getScalarSizeInBits();
14109   if (InputSize + OutputSize == 40)
14110     TgtElemArrayIdx = 0;
14111   else if (InputSize + OutputSize == 72)
14112     TgtElemArrayIdx = 1;
14113   else if (InputSize + OutputSize == 48)
14114     TgtElemArrayIdx = 2;
14115   else if (InputSize + OutputSize == 80)
14116     TgtElemArrayIdx = 3;
14117   else if (InputSize + OutputSize == 96)
14118     TgtElemArrayIdx = 4;
14119   else
14120     return SDValue();
14121 
14122   uint64_t CorrectElems = TargetElems[TgtElemArrayIdx];
14123   CorrectElems = DAG.getDataLayout().isLittleEndian()
14124                      ? CorrectElems & 0x0F0F0F0F0F0F0F0F
14125                      : CorrectElems & 0xF0F0F0F0F0F0F0F0;
14126   if (Elems != CorrectElems) {
14127     return addShuffleForVecExtend(N, DAG, Input, Elems, CorrectElems);
14128   }
14129 
14130   // Regular lowering will catch cases where a shuffle is not needed.
14131   return SDValue();
14132 }
14133 
DAGCombineBuildVector(SDNode * N,DAGCombinerInfo & DCI) const14134 SDValue PPCTargetLowering::DAGCombineBuildVector(SDNode *N,
14135                                                  DAGCombinerInfo &DCI) const {
14136   assert(N->getOpcode() == ISD::BUILD_VECTOR &&
14137          "Should be called with a BUILD_VECTOR node");
14138 
14139   SelectionDAG &DAG = DCI.DAG;
14140   SDLoc dl(N);
14141 
14142   if (!Subtarget.hasVSX())
14143     return SDValue();
14144 
14145   // The target independent DAG combiner will leave a build_vector of
14146   // float-to-int conversions intact. We can generate MUCH better code for
14147   // a float-to-int conversion of a vector of floats.
14148   SDValue FirstInput = N->getOperand(0);
14149   if (FirstInput.getOpcode() == PPCISD::MFVSR) {
14150     SDValue Reduced = combineElementTruncationToVectorTruncation(N, DCI);
14151     if (Reduced)
14152       return Reduced;
14153   }
14154 
14155   // If we're building a vector out of consecutive loads, just load that
14156   // vector type.
14157   SDValue Reduced = combineBVOfConsecutiveLoads(N, DAG);
14158   if (Reduced)
14159     return Reduced;
14160 
14161   // If we're building a vector out of extended elements from another vector
14162   // we have P9 vector integer extend instructions. The code assumes legal
14163   // input types (i.e. it can't handle things like v4i16) so do not run before
14164   // legalization.
14165   if (Subtarget.hasP9Altivec() && !DCI.isBeforeLegalize()) {
14166     Reduced = combineBVOfVecSExt(N, DAG);
14167     if (Reduced)
14168       return Reduced;
14169   }
14170 
14171 
14172   if (N->getValueType(0) != MVT::v2f64)
14173     return SDValue();
14174 
14175   // Looking for:
14176   // (build_vector ([su]int_to_fp (extractelt 0)), [su]int_to_fp (extractelt 1))
14177   if (FirstInput.getOpcode() != ISD::SINT_TO_FP &&
14178       FirstInput.getOpcode() != ISD::UINT_TO_FP)
14179     return SDValue();
14180   if (N->getOperand(1).getOpcode() != ISD::SINT_TO_FP &&
14181       N->getOperand(1).getOpcode() != ISD::UINT_TO_FP)
14182     return SDValue();
14183   if (FirstInput.getOpcode() != N->getOperand(1).getOpcode())
14184     return SDValue();
14185 
14186   SDValue Ext1 = FirstInput.getOperand(0);
14187   SDValue Ext2 = N->getOperand(1).getOperand(0);
14188   if(Ext1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
14189      Ext2.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
14190     return SDValue();
14191 
14192   ConstantSDNode *Ext1Op = dyn_cast<ConstantSDNode>(Ext1.getOperand(1));
14193   ConstantSDNode *Ext2Op = dyn_cast<ConstantSDNode>(Ext2.getOperand(1));
14194   if (!Ext1Op || !Ext2Op)
14195     return SDValue();
14196   if (Ext1.getOperand(0).getValueType() != MVT::v4i32 ||
14197       Ext1.getOperand(0) != Ext2.getOperand(0))
14198     return SDValue();
14199 
14200   int FirstElem = Ext1Op->getZExtValue();
14201   int SecondElem = Ext2Op->getZExtValue();
14202   int SubvecIdx;
14203   if (FirstElem == 0 && SecondElem == 1)
14204     SubvecIdx = Subtarget.isLittleEndian() ? 1 : 0;
14205   else if (FirstElem == 2 && SecondElem == 3)
14206     SubvecIdx = Subtarget.isLittleEndian() ? 0 : 1;
14207   else
14208     return SDValue();
14209 
14210   SDValue SrcVec = Ext1.getOperand(0);
14211   auto NodeType = (N->getOperand(1).getOpcode() == ISD::SINT_TO_FP) ?
14212     PPCISD::SINT_VEC_TO_FP : PPCISD::UINT_VEC_TO_FP;
14213   return DAG.getNode(NodeType, dl, MVT::v2f64,
14214                      SrcVec, DAG.getIntPtrConstant(SubvecIdx, dl));
14215 }
14216 
combineFPToIntToFP(SDNode * N,DAGCombinerInfo & DCI) const14217 SDValue PPCTargetLowering::combineFPToIntToFP(SDNode *N,
14218                                               DAGCombinerInfo &DCI) const {
14219   assert((N->getOpcode() == ISD::SINT_TO_FP ||
14220           N->getOpcode() == ISD::UINT_TO_FP) &&
14221          "Need an int -> FP conversion node here");
14222 
14223   if (useSoftFloat() || !Subtarget.has64BitSupport())
14224     return SDValue();
14225 
14226   SelectionDAG &DAG = DCI.DAG;
14227   SDLoc dl(N);
14228   SDValue Op(N, 0);
14229 
14230   // Don't handle ppc_fp128 here or conversions that are out-of-range capable
14231   // from the hardware.
14232   if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64)
14233     return SDValue();
14234   if (Op.getOperand(0).getValueType().getSimpleVT() <= MVT(MVT::i1) ||
14235       Op.getOperand(0).getValueType().getSimpleVT() > MVT(MVT::i64))
14236     return SDValue();
14237 
14238   SDValue FirstOperand(Op.getOperand(0));
14239   bool SubWordLoad = FirstOperand.getOpcode() == ISD::LOAD &&
14240     (FirstOperand.getValueType() == MVT::i8 ||
14241      FirstOperand.getValueType() == MVT::i16);
14242   if (Subtarget.hasP9Vector() && Subtarget.hasP9Altivec() && SubWordLoad) {
14243     bool Signed = N->getOpcode() == ISD::SINT_TO_FP;
14244     bool DstDouble = Op.getValueType() == MVT::f64;
14245     unsigned ConvOp = Signed ?
14246       (DstDouble ? PPCISD::FCFID  : PPCISD::FCFIDS) :
14247       (DstDouble ? PPCISD::FCFIDU : PPCISD::FCFIDUS);
14248     SDValue WidthConst =
14249       DAG.getIntPtrConstant(FirstOperand.getValueType() == MVT::i8 ? 1 : 2,
14250                             dl, false);
14251     LoadSDNode *LDN = cast<LoadSDNode>(FirstOperand.getNode());
14252     SDValue Ops[] = { LDN->getChain(), LDN->getBasePtr(), WidthConst };
14253     SDValue Ld = DAG.getMemIntrinsicNode(PPCISD::LXSIZX, dl,
14254                                          DAG.getVTList(MVT::f64, MVT::Other),
14255                                          Ops, MVT::i8, LDN->getMemOperand());
14256 
14257     // For signed conversion, we need to sign-extend the value in the VSR
14258     if (Signed) {
14259       SDValue ExtOps[] = { Ld, WidthConst };
14260       SDValue Ext = DAG.getNode(PPCISD::VEXTS, dl, MVT::f64, ExtOps);
14261       return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ext);
14262     } else
14263       return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ld);
14264   }
14265 
14266 
14267   // For i32 intermediate values, unfortunately, the conversion functions
14268   // leave the upper 32 bits of the value are undefined. Within the set of
14269   // scalar instructions, we have no method for zero- or sign-extending the
14270   // value. Thus, we cannot handle i32 intermediate values here.
14271   if (Op.getOperand(0).getValueType() == MVT::i32)
14272     return SDValue();
14273 
14274   assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) &&
14275          "UINT_TO_FP is supported only with FPCVT");
14276 
14277   // If we have FCFIDS, then use it when converting to single-precision.
14278   // Otherwise, convert to double-precision and then round.
14279   unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
14280                        ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS
14281                                                             : PPCISD::FCFIDS)
14282                        : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU
14283                                                             : PPCISD::FCFID);
14284   MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
14285                   ? MVT::f32
14286                   : MVT::f64;
14287 
14288   // If we're converting from a float, to an int, and back to a float again,
14289   // then we don't need the store/load pair at all.
14290   if ((Op.getOperand(0).getOpcode() == ISD::FP_TO_UINT &&
14291        Subtarget.hasFPCVT()) ||
14292       (Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT)) {
14293     SDValue Src = Op.getOperand(0).getOperand(0);
14294     if (Src.getValueType() == MVT::f32) {
14295       Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
14296       DCI.AddToWorklist(Src.getNode());
14297     } else if (Src.getValueType() != MVT::f64) {
14298       // Make sure that we don't pick up a ppc_fp128 source value.
14299       return SDValue();
14300     }
14301 
14302     unsigned FCTOp =
14303       Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT ? PPCISD::FCTIDZ :
14304                                                         PPCISD::FCTIDUZ;
14305 
14306     SDValue Tmp = DAG.getNode(FCTOp, dl, MVT::f64, Src);
14307     SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Tmp);
14308 
14309     if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) {
14310       FP = DAG.getNode(ISD::FP_ROUND, dl,
14311                        MVT::f32, FP, DAG.getIntPtrConstant(0, dl));
14312       DCI.AddToWorklist(FP.getNode());
14313     }
14314 
14315     return FP;
14316   }
14317 
14318   return SDValue();
14319 }
14320 
14321 // expandVSXLoadForLE - Convert VSX loads (which may be intrinsics for
14322 // builtins) into loads with swaps.
expandVSXLoadForLE(SDNode * N,DAGCombinerInfo & DCI) const14323 SDValue PPCTargetLowering::expandVSXLoadForLE(SDNode *N,
14324                                               DAGCombinerInfo &DCI) const {
14325   SelectionDAG &DAG = DCI.DAG;
14326   SDLoc dl(N);
14327   SDValue Chain;
14328   SDValue Base;
14329   MachineMemOperand *MMO;
14330 
14331   switch (N->getOpcode()) {
14332   default:
14333     llvm_unreachable("Unexpected opcode for little endian VSX load");
14334   case ISD::LOAD: {
14335     LoadSDNode *LD = cast<LoadSDNode>(N);
14336     Chain = LD->getChain();
14337     Base = LD->getBasePtr();
14338     MMO = LD->getMemOperand();
14339     // If the MMO suggests this isn't a load of a full vector, leave
14340     // things alone.  For a built-in, we have to make the change for
14341     // correctness, so if there is a size problem that will be a bug.
14342     if (MMO->getSize() < 16)
14343       return SDValue();
14344     break;
14345   }
14346   case ISD::INTRINSIC_W_CHAIN: {
14347     MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N);
14348     Chain = Intrin->getChain();
14349     // Similarly to the store case below, Intrin->getBasePtr() doesn't get
14350     // us what we want. Get operand 2 instead.
14351     Base = Intrin->getOperand(2);
14352     MMO = Intrin->getMemOperand();
14353     break;
14354   }
14355   }
14356 
14357   MVT VecTy = N->getValueType(0).getSimpleVT();
14358 
14359   // Do not expand to PPCISD::LXVD2X + PPCISD::XXSWAPD when the load is
14360   // aligned and the type is a vector with elements up to 4 bytes
14361   if (Subtarget.needsSwapsForVSXMemOps() && MMO->getAlign() >= Align(16) &&
14362       VecTy.getScalarSizeInBits() <= 32) {
14363     return SDValue();
14364   }
14365 
14366   SDValue LoadOps[] = { Chain, Base };
14367   SDValue Load = DAG.getMemIntrinsicNode(PPCISD::LXVD2X, dl,
14368                                          DAG.getVTList(MVT::v2f64, MVT::Other),
14369                                          LoadOps, MVT::v2f64, MMO);
14370 
14371   DCI.AddToWorklist(Load.getNode());
14372   Chain = Load.getValue(1);
14373   SDValue Swap = DAG.getNode(
14374       PPCISD::XXSWAPD, dl, DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Load);
14375   DCI.AddToWorklist(Swap.getNode());
14376 
14377   // Add a bitcast if the resulting load type doesn't match v2f64.
14378   if (VecTy != MVT::v2f64) {
14379     SDValue N = DAG.getNode(ISD::BITCAST, dl, VecTy, Swap);
14380     DCI.AddToWorklist(N.getNode());
14381     // Package {bitcast value, swap's chain} to match Load's shape.
14382     return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(VecTy, MVT::Other),
14383                        N, Swap.getValue(1));
14384   }
14385 
14386   return Swap;
14387 }
14388 
14389 // expandVSXStoreForLE - Convert VSX stores (which may be intrinsics for
14390 // builtins) into stores with swaps.
expandVSXStoreForLE(SDNode * N,DAGCombinerInfo & DCI) const14391 SDValue PPCTargetLowering::expandVSXStoreForLE(SDNode *N,
14392                                                DAGCombinerInfo &DCI) const {
14393   SelectionDAG &DAG = DCI.DAG;
14394   SDLoc dl(N);
14395   SDValue Chain;
14396   SDValue Base;
14397   unsigned SrcOpnd;
14398   MachineMemOperand *MMO;
14399 
14400   switch (N->getOpcode()) {
14401   default:
14402     llvm_unreachable("Unexpected opcode for little endian VSX store");
14403   case ISD::STORE: {
14404     StoreSDNode *ST = cast<StoreSDNode>(N);
14405     Chain = ST->getChain();
14406     Base = ST->getBasePtr();
14407     MMO = ST->getMemOperand();
14408     SrcOpnd = 1;
14409     // If the MMO suggests this isn't a store of a full vector, leave
14410     // things alone.  For a built-in, we have to make the change for
14411     // correctness, so if there is a size problem that will be a bug.
14412     if (MMO->getSize() < 16)
14413       return SDValue();
14414     break;
14415   }
14416   case ISD::INTRINSIC_VOID: {
14417     MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N);
14418     Chain = Intrin->getChain();
14419     // Intrin->getBasePtr() oddly does not get what we want.
14420     Base = Intrin->getOperand(3);
14421     MMO = Intrin->getMemOperand();
14422     SrcOpnd = 2;
14423     break;
14424   }
14425   }
14426 
14427   SDValue Src = N->getOperand(SrcOpnd);
14428   MVT VecTy = Src.getValueType().getSimpleVT();
14429 
14430   // Do not expand to PPCISD::XXSWAPD and PPCISD::STXVD2X when the load is
14431   // aligned and the type is a vector with elements up to 4 bytes
14432   if (Subtarget.needsSwapsForVSXMemOps() && MMO->getAlign() >= Align(16) &&
14433       VecTy.getScalarSizeInBits() <= 32) {
14434     return SDValue();
14435   }
14436 
14437   // All stores are done as v2f64 and possible bit cast.
14438   if (VecTy != MVT::v2f64) {
14439     Src = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Src);
14440     DCI.AddToWorklist(Src.getNode());
14441   }
14442 
14443   SDValue Swap = DAG.getNode(PPCISD::XXSWAPD, dl,
14444                              DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Src);
14445   DCI.AddToWorklist(Swap.getNode());
14446   Chain = Swap.getValue(1);
14447   SDValue StoreOps[] = { Chain, Swap, Base };
14448   SDValue Store = DAG.getMemIntrinsicNode(PPCISD::STXVD2X, dl,
14449                                           DAG.getVTList(MVT::Other),
14450                                           StoreOps, VecTy, MMO);
14451   DCI.AddToWorklist(Store.getNode());
14452   return Store;
14453 }
14454 
14455 // Handle DAG combine for STORE (FP_TO_INT F).
combineStoreFPToInt(SDNode * N,DAGCombinerInfo & DCI) const14456 SDValue PPCTargetLowering::combineStoreFPToInt(SDNode *N,
14457                                                DAGCombinerInfo &DCI) const {
14458 
14459   SelectionDAG &DAG = DCI.DAG;
14460   SDLoc dl(N);
14461   unsigned Opcode = N->getOperand(1).getOpcode();
14462 
14463   assert((Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT)
14464          && "Not a FP_TO_INT Instruction!");
14465 
14466   SDValue Val = N->getOperand(1).getOperand(0);
14467   EVT Op1VT = N->getOperand(1).getValueType();
14468   EVT ResVT = Val.getValueType();
14469 
14470   // Floating point types smaller than 32 bits are not legal on Power.
14471   if (ResVT.getScalarSizeInBits() < 32)
14472     return SDValue();
14473 
14474   // Only perform combine for conversion to i64/i32 or power9 i16/i8.
14475   bool ValidTypeForStoreFltAsInt =
14476         (Op1VT == MVT::i32 || Op1VT == MVT::i64 ||
14477          (Subtarget.hasP9Vector() && (Op1VT == MVT::i16 || Op1VT == MVT::i8)));
14478 
14479   if (ResVT == MVT::ppcf128 || !Subtarget.hasP8Vector() ||
14480       cast<StoreSDNode>(N)->isTruncatingStore() || !ValidTypeForStoreFltAsInt)
14481     return SDValue();
14482 
14483   // Extend f32 values to f64
14484   if (ResVT.getScalarSizeInBits() == 32) {
14485     Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val);
14486     DCI.AddToWorklist(Val.getNode());
14487   }
14488 
14489   // Set signed or unsigned conversion opcode.
14490   unsigned ConvOpcode = (Opcode == ISD::FP_TO_SINT) ?
14491                           PPCISD::FP_TO_SINT_IN_VSR :
14492                           PPCISD::FP_TO_UINT_IN_VSR;
14493 
14494   Val = DAG.getNode(ConvOpcode,
14495                     dl, ResVT == MVT::f128 ? MVT::f128 : MVT::f64, Val);
14496   DCI.AddToWorklist(Val.getNode());
14497 
14498   // Set number of bytes being converted.
14499   unsigned ByteSize = Op1VT.getScalarSizeInBits() / 8;
14500   SDValue Ops[] = { N->getOperand(0), Val, N->getOperand(2),
14501                     DAG.getIntPtrConstant(ByteSize, dl, false),
14502                     DAG.getValueType(Op1VT) };
14503 
14504   Val = DAG.getMemIntrinsicNode(PPCISD::ST_VSR_SCAL_INT, dl,
14505           DAG.getVTList(MVT::Other), Ops,
14506           cast<StoreSDNode>(N)->getMemoryVT(),
14507           cast<StoreSDNode>(N)->getMemOperand());
14508 
14509   DCI.AddToWorklist(Val.getNode());
14510   return Val;
14511 }
14512 
isAlternatingShuffMask(const ArrayRef<int> & Mask,int NumElts)14513 static bool isAlternatingShuffMask(const ArrayRef<int> &Mask, int NumElts) {
14514   // Check that the source of the element keeps flipping
14515   // (i.e. Mask[i] < NumElts -> Mask[i+i] >= NumElts).
14516   bool PrevElemFromFirstVec = Mask[0] < NumElts;
14517   for (int i = 1, e = Mask.size(); i < e; i++) {
14518     if (PrevElemFromFirstVec && Mask[i] < NumElts)
14519       return false;
14520     if (!PrevElemFromFirstVec && Mask[i] >= NumElts)
14521       return false;
14522     PrevElemFromFirstVec = !PrevElemFromFirstVec;
14523   }
14524   return true;
14525 }
14526 
isSplatBV(SDValue Op)14527 static bool isSplatBV(SDValue Op) {
14528   if (Op.getOpcode() != ISD::BUILD_VECTOR)
14529     return false;
14530   SDValue FirstOp;
14531 
14532   // Find first non-undef input.
14533   for (int i = 0, e = Op.getNumOperands(); i < e; i++) {
14534     FirstOp = Op.getOperand(i);
14535     if (!FirstOp.isUndef())
14536       break;
14537   }
14538 
14539   // All inputs are undef or the same as the first non-undef input.
14540   for (int i = 1, e = Op.getNumOperands(); i < e; i++)
14541     if (Op.getOperand(i) != FirstOp && !Op.getOperand(i).isUndef())
14542       return false;
14543   return true;
14544 }
14545 
isScalarToVec(SDValue Op)14546 static SDValue isScalarToVec(SDValue Op) {
14547   if (Op.getOpcode() == ISD::SCALAR_TO_VECTOR)
14548     return Op;
14549   if (Op.getOpcode() != ISD::BITCAST)
14550     return SDValue();
14551   Op = Op.getOperand(0);
14552   if (Op.getOpcode() == ISD::SCALAR_TO_VECTOR)
14553     return Op;
14554   return SDValue();
14555 }
14556 
fixupShuffleMaskForPermutedSToV(SmallVectorImpl<int> & ShuffV,int LHSMaxIdx,int RHSMinIdx,int RHSMaxIdx,int HalfVec)14557 static void fixupShuffleMaskForPermutedSToV(SmallVectorImpl<int> &ShuffV,
14558                                             int LHSMaxIdx, int RHSMinIdx,
14559                                             int RHSMaxIdx, int HalfVec) {
14560   for (int i = 0, e = ShuffV.size(); i < e; i++) {
14561     int Idx = ShuffV[i];
14562     if ((Idx >= 0 && Idx < LHSMaxIdx) || (Idx >= RHSMinIdx && Idx < RHSMaxIdx))
14563       ShuffV[i] += HalfVec;
14564   }
14565   return;
14566 }
14567 
14568 // Replace a SCALAR_TO_VECTOR with a SCALAR_TO_VECTOR_PERMUTED except if
14569 // the original is:
14570 // (<n x Ty> (scalar_to_vector (Ty (extract_elt <n x Ty> %a, C))))
14571 // In such a case, just change the shuffle mask to extract the element
14572 // from the permuted index.
getSToVPermuted(SDValue OrigSToV,SelectionDAG & DAG)14573 static SDValue getSToVPermuted(SDValue OrigSToV, SelectionDAG &DAG) {
14574   SDLoc dl(OrigSToV);
14575   EVT VT = OrigSToV.getValueType();
14576   assert(OrigSToV.getOpcode() == ISD::SCALAR_TO_VECTOR &&
14577          "Expecting a SCALAR_TO_VECTOR here");
14578   SDValue Input = OrigSToV.getOperand(0);
14579 
14580   if (Input.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
14581     ConstantSDNode *Idx = dyn_cast<ConstantSDNode>(Input.getOperand(1));
14582     SDValue OrigVector = Input.getOperand(0);
14583 
14584     // Can't handle non-const element indices or different vector types
14585     // for the input to the extract and the output of the scalar_to_vector.
14586     if (Idx && VT == OrigVector.getValueType()) {
14587       SmallVector<int, 16> NewMask(VT.getVectorNumElements(), -1);
14588       NewMask[VT.getVectorNumElements() / 2] = Idx->getZExtValue();
14589       return DAG.getVectorShuffle(VT, dl, OrigVector, OrigVector, NewMask);
14590     }
14591   }
14592   return DAG.getNode(PPCISD::SCALAR_TO_VECTOR_PERMUTED, dl, VT,
14593                      OrigSToV.getOperand(0));
14594 }
14595 
14596 // On little endian subtargets, combine shuffles such as:
14597 // vector_shuffle<16,1,17,3,18,5,19,7,20,9,21,11,22,13,23,15>, <zero>, %b
14598 // into:
14599 // vector_shuffle<16,0,17,1,18,2,19,3,20,4,21,5,22,6,23,7>, <zero>, %b
14600 // because the latter can be matched to a single instruction merge.
14601 // Furthermore, SCALAR_TO_VECTOR on little endian always involves a permute
14602 // to put the value into element zero. Adjust the shuffle mask so that the
14603 // vector can remain in permuted form (to prevent a swap prior to a shuffle).
combineVectorShuffle(ShuffleVectorSDNode * SVN,SelectionDAG & DAG) const14604 SDValue PPCTargetLowering::combineVectorShuffle(ShuffleVectorSDNode *SVN,
14605                                                 SelectionDAG &DAG) const {
14606   SDValue LHS = SVN->getOperand(0);
14607   SDValue RHS = SVN->getOperand(1);
14608   auto Mask = SVN->getMask();
14609   int NumElts = LHS.getValueType().getVectorNumElements();
14610   SDValue Res(SVN, 0);
14611   SDLoc dl(SVN);
14612 
14613   // None of these combines are useful on big endian systems since the ISA
14614   // already has a big endian bias.
14615   if (!Subtarget.isLittleEndian() || !Subtarget.hasVSX())
14616     return Res;
14617 
14618   // If this is not a shuffle of a shuffle and the first element comes from
14619   // the second vector, canonicalize to the commuted form. This will make it
14620   // more likely to match one of the single instruction patterns.
14621   if (Mask[0] >= NumElts && LHS.getOpcode() != ISD::VECTOR_SHUFFLE &&
14622       RHS.getOpcode() != ISD::VECTOR_SHUFFLE) {
14623     std::swap(LHS, RHS);
14624     Res = DAG.getCommutedVectorShuffle(*SVN);
14625     Mask = cast<ShuffleVectorSDNode>(Res)->getMask();
14626   }
14627 
14628   // Adjust the shuffle mask if either input vector comes from a
14629   // SCALAR_TO_VECTOR and keep the respective input vector in permuted
14630   // form (to prevent the need for a swap).
14631   SmallVector<int, 16> ShuffV(Mask.begin(), Mask.end());
14632   SDValue SToVLHS = isScalarToVec(LHS);
14633   SDValue SToVRHS = isScalarToVec(RHS);
14634   if (SToVLHS || SToVRHS) {
14635     int NumEltsIn = SToVLHS ? SToVLHS.getValueType().getVectorNumElements()
14636                             : SToVRHS.getValueType().getVectorNumElements();
14637     int NumEltsOut = ShuffV.size();
14638 
14639     // Initially assume that neither input is permuted. These will be adjusted
14640     // accordingly if either input is.
14641     int LHSMaxIdx = -1;
14642     int RHSMinIdx = -1;
14643     int RHSMaxIdx = -1;
14644     int HalfVec = LHS.getValueType().getVectorNumElements() / 2;
14645 
14646     // Get the permuted scalar to vector nodes for the source(s) that come from
14647     // ISD::SCALAR_TO_VECTOR.
14648     if (SToVLHS) {
14649       // Set up the values for the shuffle vector fixup.
14650       LHSMaxIdx = NumEltsOut / NumEltsIn;
14651       SToVLHS = getSToVPermuted(SToVLHS, DAG);
14652       if (SToVLHS.getValueType() != LHS.getValueType())
14653         SToVLHS = DAG.getBitcast(LHS.getValueType(), SToVLHS);
14654       LHS = SToVLHS;
14655     }
14656     if (SToVRHS) {
14657       RHSMinIdx = NumEltsOut;
14658       RHSMaxIdx = NumEltsOut / NumEltsIn + RHSMinIdx;
14659       SToVRHS = getSToVPermuted(SToVRHS, DAG);
14660       if (SToVRHS.getValueType() != RHS.getValueType())
14661         SToVRHS = DAG.getBitcast(RHS.getValueType(), SToVRHS);
14662       RHS = SToVRHS;
14663     }
14664 
14665     // Fix up the shuffle mask to reflect where the desired element actually is.
14666     // The minimum and maximum indices that correspond to element zero for both
14667     // the LHS and RHS are computed and will control which shuffle mask entries
14668     // are to be changed. For example, if the RHS is permuted, any shuffle mask
14669     // entries in the range [RHSMinIdx,RHSMaxIdx) will be incremented by
14670     // HalfVec to refer to the corresponding element in the permuted vector.
14671     fixupShuffleMaskForPermutedSToV(ShuffV, LHSMaxIdx, RHSMinIdx, RHSMaxIdx,
14672                                     HalfVec);
14673     Res = DAG.getVectorShuffle(SVN->getValueType(0), dl, LHS, RHS, ShuffV);
14674 
14675     // We may have simplified away the shuffle. We won't be able to do anything
14676     // further with it here.
14677     if (!isa<ShuffleVectorSDNode>(Res))
14678       return Res;
14679     Mask = cast<ShuffleVectorSDNode>(Res)->getMask();
14680   }
14681 
14682   // The common case after we commuted the shuffle is that the RHS is a splat
14683   // and we have elements coming in from the splat at indices that are not
14684   // conducive to using a merge.
14685   // Example:
14686   // vector_shuffle<0,17,1,19,2,21,3,23,4,25,5,27,6,29,7,31> t1, <zero>
14687   if (!isSplatBV(RHS))
14688     return Res;
14689 
14690   // We are looking for a mask such that all even elements are from
14691   // one vector and all odd elements from the other.
14692   if (!isAlternatingShuffMask(Mask, NumElts))
14693     return Res;
14694 
14695   // Adjust the mask so we are pulling in the same index from the splat
14696   // as the index from the interesting vector in consecutive elements.
14697   // Example (even elements from first vector):
14698   // vector_shuffle<0,16,1,17,2,18,3,19,4,20,5,21,6,22,7,23> t1, <zero>
14699   if (Mask[0] < NumElts)
14700     for (int i = 1, e = Mask.size(); i < e; i += 2)
14701       ShuffV[i] = (ShuffV[i - 1] + NumElts);
14702   // Example (odd elements from first vector):
14703   // vector_shuffle<16,0,17,1,18,2,19,3,20,4,21,5,22,6,23,7> t1, <zero>
14704   else
14705     for (int i = 0, e = Mask.size(); i < e; i += 2)
14706       ShuffV[i] = (ShuffV[i + 1] + NumElts);
14707 
14708   // If the RHS has undefs, we need to remove them since we may have created
14709   // a shuffle that adds those instead of the splat value.
14710   SDValue SplatVal = cast<BuildVectorSDNode>(RHS.getNode())->getSplatValue();
14711   RHS = DAG.getSplatBuildVector(RHS.getValueType(), dl, SplatVal);
14712 
14713   Res = DAG.getVectorShuffle(SVN->getValueType(0), dl, LHS, RHS, ShuffV);
14714   return Res;
14715 }
14716 
combineVReverseMemOP(ShuffleVectorSDNode * SVN,LSBaseSDNode * LSBase,DAGCombinerInfo & DCI) const14717 SDValue PPCTargetLowering::combineVReverseMemOP(ShuffleVectorSDNode *SVN,
14718                                                 LSBaseSDNode *LSBase,
14719                                                 DAGCombinerInfo &DCI) const {
14720   assert((ISD::isNormalLoad(LSBase) || ISD::isNormalStore(LSBase)) &&
14721         "Not a reverse memop pattern!");
14722 
14723   auto IsElementReverse = [](const ShuffleVectorSDNode *SVN) -> bool {
14724     auto Mask = SVN->getMask();
14725     int i = 0;
14726     auto I = Mask.rbegin();
14727     auto E = Mask.rend();
14728 
14729     for (; I != E; ++I) {
14730       if (*I != i)
14731         return false;
14732       i++;
14733     }
14734     return true;
14735   };
14736 
14737   SelectionDAG &DAG = DCI.DAG;
14738   EVT VT = SVN->getValueType(0);
14739 
14740   if (!isTypeLegal(VT) || !Subtarget.isLittleEndian() || !Subtarget.hasVSX())
14741     return SDValue();
14742 
14743   // Before P9, we have PPCVSXSwapRemoval pass to hack the element order.
14744   // See comment in PPCVSXSwapRemoval.cpp.
14745   // It is conflict with PPCVSXSwapRemoval opt. So we don't do it.
14746   if (!Subtarget.hasP9Vector())
14747     return SDValue();
14748 
14749   if(!IsElementReverse(SVN))
14750     return SDValue();
14751 
14752   if (LSBase->getOpcode() == ISD::LOAD) {
14753     SDLoc dl(SVN);
14754     SDValue LoadOps[] = {LSBase->getChain(), LSBase->getBasePtr()};
14755     return DAG.getMemIntrinsicNode(
14756         PPCISD::LOAD_VEC_BE, dl, DAG.getVTList(VT, MVT::Other), LoadOps,
14757         LSBase->getMemoryVT(), LSBase->getMemOperand());
14758   }
14759 
14760   if (LSBase->getOpcode() == ISD::STORE) {
14761     SDLoc dl(LSBase);
14762     SDValue StoreOps[] = {LSBase->getChain(), SVN->getOperand(0),
14763                           LSBase->getBasePtr()};
14764     return DAG.getMemIntrinsicNode(
14765         PPCISD::STORE_VEC_BE, dl, DAG.getVTList(MVT::Other), StoreOps,
14766         LSBase->getMemoryVT(), LSBase->getMemOperand());
14767   }
14768 
14769   llvm_unreachable("Expected a load or store node here");
14770 }
14771 
PerformDAGCombine(SDNode * N,DAGCombinerInfo & DCI) const14772 SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
14773                                              DAGCombinerInfo &DCI) const {
14774   SelectionDAG &DAG = DCI.DAG;
14775   SDLoc dl(N);
14776   switch (N->getOpcode()) {
14777   default: break;
14778   case ISD::ADD:
14779     return combineADD(N, DCI);
14780   case ISD::SHL:
14781     return combineSHL(N, DCI);
14782   case ISD::SRA:
14783     return combineSRA(N, DCI);
14784   case ISD::SRL:
14785     return combineSRL(N, DCI);
14786   case ISD::MUL:
14787     return combineMUL(N, DCI);
14788   case ISD::FMA:
14789   case PPCISD::FNMSUB:
14790     return combineFMALike(N, DCI);
14791   case PPCISD::SHL:
14792     if (isNullConstant(N->getOperand(0))) // 0 << V -> 0.
14793         return N->getOperand(0);
14794     break;
14795   case PPCISD::SRL:
14796     if (isNullConstant(N->getOperand(0))) // 0 >>u V -> 0.
14797         return N->getOperand(0);
14798     break;
14799   case PPCISD::SRA:
14800     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
14801       if (C->isNullValue() ||   //  0 >>s V -> 0.
14802           C->isAllOnesValue())    // -1 >>s V -> -1.
14803         return N->getOperand(0);
14804     }
14805     break;
14806   case ISD::SIGN_EXTEND:
14807   case ISD::ZERO_EXTEND:
14808   case ISD::ANY_EXTEND:
14809     return DAGCombineExtBoolTrunc(N, DCI);
14810   case ISD::TRUNCATE:
14811     return combineTRUNCATE(N, DCI);
14812   case ISD::SETCC:
14813     if (SDValue CSCC = combineSetCC(N, DCI))
14814       return CSCC;
14815     LLVM_FALLTHROUGH;
14816   case ISD::SELECT_CC:
14817     return DAGCombineTruncBoolExt(N, DCI);
14818   case ISD::SINT_TO_FP:
14819   case ISD::UINT_TO_FP:
14820     return combineFPToIntToFP(N, DCI);
14821   case ISD::VECTOR_SHUFFLE:
14822     if (ISD::isNormalLoad(N->getOperand(0).getNode())) {
14823       LSBaseSDNode* LSBase = cast<LSBaseSDNode>(N->getOperand(0));
14824       return combineVReverseMemOP(cast<ShuffleVectorSDNode>(N), LSBase, DCI);
14825     }
14826     return combineVectorShuffle(cast<ShuffleVectorSDNode>(N), DCI.DAG);
14827   case ISD::STORE: {
14828 
14829     EVT Op1VT = N->getOperand(1).getValueType();
14830     unsigned Opcode = N->getOperand(1).getOpcode();
14831 
14832     if (Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT) {
14833       SDValue Val= combineStoreFPToInt(N, DCI);
14834       if (Val)
14835         return Val;
14836     }
14837 
14838     if (Opcode == ISD::VECTOR_SHUFFLE && ISD::isNormalStore(N)) {
14839       ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N->getOperand(1));
14840       SDValue Val= combineVReverseMemOP(SVN, cast<LSBaseSDNode>(N), DCI);
14841       if (Val)
14842         return Val;
14843     }
14844 
14845     // Turn STORE (BSWAP) -> sthbrx/stwbrx.
14846     if (cast<StoreSDNode>(N)->isUnindexed() && Opcode == ISD::BSWAP &&
14847         N->getOperand(1).getNode()->hasOneUse() &&
14848         (Op1VT == MVT::i32 || Op1VT == MVT::i16 ||
14849          (Subtarget.hasLDBRX() && Subtarget.isPPC64() && Op1VT == MVT::i64))) {
14850 
14851       // STBRX can only handle simple types and it makes no sense to store less
14852       // two bytes in byte-reversed order.
14853       EVT mVT = cast<StoreSDNode>(N)->getMemoryVT();
14854       if (mVT.isExtended() || mVT.getSizeInBits() < 16)
14855         break;
14856 
14857       SDValue BSwapOp = N->getOperand(1).getOperand(0);
14858       // Do an any-extend to 32-bits if this is a half-word input.
14859       if (BSwapOp.getValueType() == MVT::i16)
14860         BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BSwapOp);
14861 
14862       // If the type of BSWAP operand is wider than stored memory width
14863       // it need to be shifted to the right side before STBRX.
14864       if (Op1VT.bitsGT(mVT)) {
14865         int Shift = Op1VT.getSizeInBits() - mVT.getSizeInBits();
14866         BSwapOp = DAG.getNode(ISD::SRL, dl, Op1VT, BSwapOp,
14867                               DAG.getConstant(Shift, dl, MVT::i32));
14868         // Need to truncate if this is a bswap of i64 stored as i32/i16.
14869         if (Op1VT == MVT::i64)
14870           BSwapOp = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BSwapOp);
14871       }
14872 
14873       SDValue Ops[] = {
14874         N->getOperand(0), BSwapOp, N->getOperand(2), DAG.getValueType(mVT)
14875       };
14876       return
14877         DAG.getMemIntrinsicNode(PPCISD::STBRX, dl, DAG.getVTList(MVT::Other),
14878                                 Ops, cast<StoreSDNode>(N)->getMemoryVT(),
14879                                 cast<StoreSDNode>(N)->getMemOperand());
14880     }
14881 
14882     // STORE Constant:i32<0>  ->  STORE<trunc to i32> Constant:i64<0>
14883     // So it can increase the chance of CSE constant construction.
14884     if (Subtarget.isPPC64() && !DCI.isBeforeLegalize() &&
14885         isa<ConstantSDNode>(N->getOperand(1)) && Op1VT == MVT::i32) {
14886       // Need to sign-extended to 64-bits to handle negative values.
14887       EVT MemVT = cast<StoreSDNode>(N)->getMemoryVT();
14888       uint64_t Val64 = SignExtend64(N->getConstantOperandVal(1),
14889                                     MemVT.getSizeInBits());
14890       SDValue Const64 = DAG.getConstant(Val64, dl, MVT::i64);
14891 
14892       // DAG.getTruncStore() can't be used here because it doesn't accept
14893       // the general (base + offset) addressing mode.
14894       // So we use UpdateNodeOperands and setTruncatingStore instead.
14895       DAG.UpdateNodeOperands(N, N->getOperand(0), Const64, N->getOperand(2),
14896                              N->getOperand(3));
14897       cast<StoreSDNode>(N)->setTruncatingStore(true);
14898       return SDValue(N, 0);
14899     }
14900 
14901     // For little endian, VSX stores require generating xxswapd/lxvd2x.
14902     // Not needed on ISA 3.0 based CPUs since we have a non-permuting store.
14903     if (Op1VT.isSimple()) {
14904       MVT StoreVT = Op1VT.getSimpleVT();
14905       if (Subtarget.needsSwapsForVSXMemOps() &&
14906           (StoreVT == MVT::v2f64 || StoreVT == MVT::v2i64 ||
14907            StoreVT == MVT::v4f32 || StoreVT == MVT::v4i32))
14908         return expandVSXStoreForLE(N, DCI);
14909     }
14910     break;
14911   }
14912   case ISD::LOAD: {
14913     LoadSDNode *LD = cast<LoadSDNode>(N);
14914     EVT VT = LD->getValueType(0);
14915 
14916     // For little endian, VSX loads require generating lxvd2x/xxswapd.
14917     // Not needed on ISA 3.0 based CPUs since we have a non-permuting load.
14918     if (VT.isSimple()) {
14919       MVT LoadVT = VT.getSimpleVT();
14920       if (Subtarget.needsSwapsForVSXMemOps() &&
14921           (LoadVT == MVT::v2f64 || LoadVT == MVT::v2i64 ||
14922            LoadVT == MVT::v4f32 || LoadVT == MVT::v4i32))
14923         return expandVSXLoadForLE(N, DCI);
14924     }
14925 
14926     // We sometimes end up with a 64-bit integer load, from which we extract
14927     // two single-precision floating-point numbers. This happens with
14928     // std::complex<float>, and other similar structures, because of the way we
14929     // canonicalize structure copies. However, if we lack direct moves,
14930     // then the final bitcasts from the extracted integer values to the
14931     // floating-point numbers turn into store/load pairs. Even with direct moves,
14932     // just loading the two floating-point numbers is likely better.
14933     auto ReplaceTwoFloatLoad = [&]() {
14934       if (VT != MVT::i64)
14935         return false;
14936 
14937       if (LD->getExtensionType() != ISD::NON_EXTLOAD ||
14938           LD->isVolatile())
14939         return false;
14940 
14941       //  We're looking for a sequence like this:
14942       //  t13: i64,ch = load<LD8[%ref.tmp]> t0, t6, undef:i64
14943       //      t16: i64 = srl t13, Constant:i32<32>
14944       //    t17: i32 = truncate t16
14945       //  t18: f32 = bitcast t17
14946       //    t19: i32 = truncate t13
14947       //  t20: f32 = bitcast t19
14948 
14949       if (!LD->hasNUsesOfValue(2, 0))
14950         return false;
14951 
14952       auto UI = LD->use_begin();
14953       while (UI.getUse().getResNo() != 0) ++UI;
14954       SDNode *Trunc = *UI++;
14955       while (UI.getUse().getResNo() != 0) ++UI;
14956       SDNode *RightShift = *UI;
14957       if (Trunc->getOpcode() != ISD::TRUNCATE)
14958         std::swap(Trunc, RightShift);
14959 
14960       if (Trunc->getOpcode() != ISD::TRUNCATE ||
14961           Trunc->getValueType(0) != MVT::i32 ||
14962           !Trunc->hasOneUse())
14963         return false;
14964       if (RightShift->getOpcode() != ISD::SRL ||
14965           !isa<ConstantSDNode>(RightShift->getOperand(1)) ||
14966           RightShift->getConstantOperandVal(1) != 32 ||
14967           !RightShift->hasOneUse())
14968         return false;
14969 
14970       SDNode *Trunc2 = *RightShift->use_begin();
14971       if (Trunc2->getOpcode() != ISD::TRUNCATE ||
14972           Trunc2->getValueType(0) != MVT::i32 ||
14973           !Trunc2->hasOneUse())
14974         return false;
14975 
14976       SDNode *Bitcast = *Trunc->use_begin();
14977       SDNode *Bitcast2 = *Trunc2->use_begin();
14978 
14979       if (Bitcast->getOpcode() != ISD::BITCAST ||
14980           Bitcast->getValueType(0) != MVT::f32)
14981         return false;
14982       if (Bitcast2->getOpcode() != ISD::BITCAST ||
14983           Bitcast2->getValueType(0) != MVT::f32)
14984         return false;
14985 
14986       if (Subtarget.isLittleEndian())
14987         std::swap(Bitcast, Bitcast2);
14988 
14989       // Bitcast has the second float (in memory-layout order) and Bitcast2
14990       // has the first one.
14991 
14992       SDValue BasePtr = LD->getBasePtr();
14993       if (LD->isIndexed()) {
14994         assert(LD->getAddressingMode() == ISD::PRE_INC &&
14995                "Non-pre-inc AM on PPC?");
14996         BasePtr =
14997           DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
14998                       LD->getOffset());
14999       }
15000 
15001       auto MMOFlags =
15002           LD->getMemOperand()->getFlags() & ~MachineMemOperand::MOVolatile;
15003       SDValue FloatLoad = DAG.getLoad(MVT::f32, dl, LD->getChain(), BasePtr,
15004                                       LD->getPointerInfo(), LD->getAlignment(),
15005                                       MMOFlags, LD->getAAInfo());
15006       SDValue AddPtr =
15007         DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(),
15008                     BasePtr, DAG.getIntPtrConstant(4, dl));
15009       SDValue FloatLoad2 = DAG.getLoad(
15010           MVT::f32, dl, SDValue(FloatLoad.getNode(), 1), AddPtr,
15011           LD->getPointerInfo().getWithOffset(4),
15012           MinAlign(LD->getAlignment(), 4), MMOFlags, LD->getAAInfo());
15013 
15014       if (LD->isIndexed()) {
15015         // Note that DAGCombine should re-form any pre-increment load(s) from
15016         // what is produced here if that makes sense.
15017         DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), BasePtr);
15018       }
15019 
15020       DCI.CombineTo(Bitcast2, FloatLoad);
15021       DCI.CombineTo(Bitcast, FloatLoad2);
15022 
15023       DAG.ReplaceAllUsesOfValueWith(SDValue(LD, LD->isIndexed() ? 2 : 1),
15024                                     SDValue(FloatLoad2.getNode(), 1));
15025       return true;
15026     };
15027 
15028     if (ReplaceTwoFloatLoad())
15029       return SDValue(N, 0);
15030 
15031     EVT MemVT = LD->getMemoryVT();
15032     Type *Ty = MemVT.getTypeForEVT(*DAG.getContext());
15033     Align ABIAlignment = DAG.getDataLayout().getABITypeAlign(Ty);
15034     Type *STy = MemVT.getScalarType().getTypeForEVT(*DAG.getContext());
15035     Align ScalarABIAlignment = DAG.getDataLayout().getABITypeAlign(STy);
15036     if (LD->isUnindexed() && VT.isVector() &&
15037         ((Subtarget.hasAltivec() && ISD::isNON_EXTLoad(N) &&
15038           // P8 and later hardware should just use LOAD.
15039           !Subtarget.hasP8Vector() &&
15040           (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32 ||
15041            VT == MVT::v4f32)) ||
15042          (Subtarget.hasQPX() && (VT == MVT::v4f64 || VT == MVT::v4f32) &&
15043           LD->getAlign() >= ScalarABIAlignment)) &&
15044         LD->getAlign() < ABIAlignment) {
15045       // This is a type-legal unaligned Altivec or QPX load.
15046       SDValue Chain = LD->getChain();
15047       SDValue Ptr = LD->getBasePtr();
15048       bool isLittleEndian = Subtarget.isLittleEndian();
15049 
15050       // This implements the loading of unaligned vectors as described in
15051       // the venerable Apple Velocity Engine overview. Specifically:
15052       // https://developer.apple.com/hardwaredrivers/ve/alignment.html
15053       // https://developer.apple.com/hardwaredrivers/ve/code_optimization.html
15054       //
15055       // The general idea is to expand a sequence of one or more unaligned
15056       // loads into an alignment-based permutation-control instruction (lvsl
15057       // or lvsr), a series of regular vector loads (which always truncate
15058       // their input address to an aligned address), and a series of
15059       // permutations.  The results of these permutations are the requested
15060       // loaded values.  The trick is that the last "extra" load is not taken
15061       // from the address you might suspect (sizeof(vector) bytes after the
15062       // last requested load), but rather sizeof(vector) - 1 bytes after the
15063       // last requested vector. The point of this is to avoid a page fault if
15064       // the base address happened to be aligned. This works because if the
15065       // base address is aligned, then adding less than a full vector length
15066       // will cause the last vector in the sequence to be (re)loaded.
15067       // Otherwise, the next vector will be fetched as you might suspect was
15068       // necessary.
15069 
15070       // We might be able to reuse the permutation generation from
15071       // a different base address offset from this one by an aligned amount.
15072       // The INTRINSIC_WO_CHAIN DAG combine will attempt to perform this
15073       // optimization later.
15074       Intrinsic::ID Intr, IntrLD, IntrPerm;
15075       MVT PermCntlTy, PermTy, LDTy;
15076       if (Subtarget.hasAltivec()) {
15077         Intr = isLittleEndian ?  Intrinsic::ppc_altivec_lvsr :
15078                                  Intrinsic::ppc_altivec_lvsl;
15079         IntrLD = Intrinsic::ppc_altivec_lvx;
15080         IntrPerm = Intrinsic::ppc_altivec_vperm;
15081         PermCntlTy = MVT::v16i8;
15082         PermTy = MVT::v4i32;
15083         LDTy = MVT::v4i32;
15084       } else {
15085         Intr =   MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlpcld :
15086                                        Intrinsic::ppc_qpx_qvlpcls;
15087         IntrLD = MemVT == MVT::v4f64 ? Intrinsic::ppc_qpx_qvlfd :
15088                                        Intrinsic::ppc_qpx_qvlfs;
15089         IntrPerm = Intrinsic::ppc_qpx_qvfperm;
15090         PermCntlTy = MVT::v4f64;
15091         PermTy = MVT::v4f64;
15092         LDTy = MemVT.getSimpleVT();
15093       }
15094 
15095       SDValue PermCntl = BuildIntrinsicOp(Intr, Ptr, DAG, dl, PermCntlTy);
15096 
15097       // Create the new MMO for the new base load. It is like the original MMO,
15098       // but represents an area in memory almost twice the vector size centered
15099       // on the original address. If the address is unaligned, we might start
15100       // reading up to (sizeof(vector)-1) bytes below the address of the
15101       // original unaligned load.
15102       MachineFunction &MF = DAG.getMachineFunction();
15103       MachineMemOperand *BaseMMO =
15104         MF.getMachineMemOperand(LD->getMemOperand(),
15105                                 -(long)MemVT.getStoreSize()+1,
15106                                 2*MemVT.getStoreSize()-1);
15107 
15108       // Create the new base load.
15109       SDValue LDXIntID =
15110           DAG.getTargetConstant(IntrLD, dl, getPointerTy(MF.getDataLayout()));
15111       SDValue BaseLoadOps[] = { Chain, LDXIntID, Ptr };
15112       SDValue BaseLoad =
15113         DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl,
15114                                 DAG.getVTList(PermTy, MVT::Other),
15115                                 BaseLoadOps, LDTy, BaseMMO);
15116 
15117       // Note that the value of IncOffset (which is provided to the next
15118       // load's pointer info offset value, and thus used to calculate the
15119       // alignment), and the value of IncValue (which is actually used to
15120       // increment the pointer value) are different! This is because we
15121       // require the next load to appear to be aligned, even though it
15122       // is actually offset from the base pointer by a lesser amount.
15123       int IncOffset = VT.getSizeInBits() / 8;
15124       int IncValue = IncOffset;
15125 
15126       // Walk (both up and down) the chain looking for another load at the real
15127       // (aligned) offset (the alignment of the other load does not matter in
15128       // this case). If found, then do not use the offset reduction trick, as
15129       // that will prevent the loads from being later combined (as they would
15130       // otherwise be duplicates).
15131       if (!findConsecutiveLoad(LD, DAG))
15132         --IncValue;
15133 
15134       SDValue Increment =
15135           DAG.getConstant(IncValue, dl, getPointerTy(MF.getDataLayout()));
15136       Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
15137 
15138       MachineMemOperand *ExtraMMO =
15139         MF.getMachineMemOperand(LD->getMemOperand(),
15140                                 1, 2*MemVT.getStoreSize()-1);
15141       SDValue ExtraLoadOps[] = { Chain, LDXIntID, Ptr };
15142       SDValue ExtraLoad =
15143         DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl,
15144                                 DAG.getVTList(PermTy, MVT::Other),
15145                                 ExtraLoadOps, LDTy, ExtraMMO);
15146 
15147       SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
15148         BaseLoad.getValue(1), ExtraLoad.getValue(1));
15149 
15150       // Because vperm has a big-endian bias, we must reverse the order
15151       // of the input vectors and complement the permute control vector
15152       // when generating little endian code.  We have already handled the
15153       // latter by using lvsr instead of lvsl, so just reverse BaseLoad
15154       // and ExtraLoad here.
15155       SDValue Perm;
15156       if (isLittleEndian)
15157         Perm = BuildIntrinsicOp(IntrPerm,
15158                                 ExtraLoad, BaseLoad, PermCntl, DAG, dl);
15159       else
15160         Perm = BuildIntrinsicOp(IntrPerm,
15161                                 BaseLoad, ExtraLoad, PermCntl, DAG, dl);
15162 
15163       if (VT != PermTy)
15164         Perm = Subtarget.hasAltivec() ?
15165                  DAG.getNode(ISD::BITCAST, dl, VT, Perm) :
15166                  DAG.getNode(ISD::FP_ROUND, dl, VT, Perm, // QPX
15167                                DAG.getTargetConstant(1, dl, MVT::i64));
15168                                // second argument is 1 because this rounding
15169                                // is always exact.
15170 
15171       // The output of the permutation is our loaded result, the TokenFactor is
15172       // our new chain.
15173       DCI.CombineTo(N, Perm, TF);
15174       return SDValue(N, 0);
15175     }
15176     }
15177     break;
15178     case ISD::INTRINSIC_WO_CHAIN: {
15179       bool isLittleEndian = Subtarget.isLittleEndian();
15180       unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
15181       Intrinsic::ID Intr = (isLittleEndian ? Intrinsic::ppc_altivec_lvsr
15182                                            : Intrinsic::ppc_altivec_lvsl);
15183       if ((IID == Intr ||
15184            IID == Intrinsic::ppc_qpx_qvlpcld  ||
15185            IID == Intrinsic::ppc_qpx_qvlpcls) &&
15186         N->getOperand(1)->getOpcode() == ISD::ADD) {
15187         SDValue Add = N->getOperand(1);
15188 
15189         int Bits = IID == Intrinsic::ppc_qpx_qvlpcld ?
15190                    5 /* 32 byte alignment */ : 4 /* 16 byte alignment */;
15191 
15192         if (DAG.MaskedValueIsZero(Add->getOperand(1),
15193                                   APInt::getAllOnesValue(Bits /* alignment */)
15194                                       .zext(Add.getScalarValueSizeInBits()))) {
15195           SDNode *BasePtr = Add->getOperand(0).getNode();
15196           for (SDNode::use_iterator UI = BasePtr->use_begin(),
15197                                     UE = BasePtr->use_end();
15198                UI != UE; ++UI) {
15199             if (UI->getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
15200                 cast<ConstantSDNode>(UI->getOperand(0))->getZExtValue() == IID) {
15201               // We've found another LVSL/LVSR, and this address is an aligned
15202               // multiple of that one. The results will be the same, so use the
15203               // one we've just found instead.
15204 
15205               return SDValue(*UI, 0);
15206             }
15207           }
15208         }
15209 
15210         if (isa<ConstantSDNode>(Add->getOperand(1))) {
15211           SDNode *BasePtr = Add->getOperand(0).getNode();
15212           for (SDNode::use_iterator UI = BasePtr->use_begin(),
15213                UE = BasePtr->use_end(); UI != UE; ++UI) {
15214             if (UI->getOpcode() == ISD::ADD &&
15215                 isa<ConstantSDNode>(UI->getOperand(1)) &&
15216                 (cast<ConstantSDNode>(Add->getOperand(1))->getZExtValue() -
15217                  cast<ConstantSDNode>(UI->getOperand(1))->getZExtValue()) %
15218                 (1ULL << Bits) == 0) {
15219               SDNode *OtherAdd = *UI;
15220               for (SDNode::use_iterator VI = OtherAdd->use_begin(),
15221                    VE = OtherAdd->use_end(); VI != VE; ++VI) {
15222                 if (VI->getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
15223                     cast<ConstantSDNode>(VI->getOperand(0))->getZExtValue() == IID) {
15224                   return SDValue(*VI, 0);
15225                 }
15226               }
15227             }
15228           }
15229         }
15230       }
15231 
15232       // Combine vmaxsw/h/b(a, a's negation) to abs(a)
15233       // Expose the vabsduw/h/b opportunity for down stream
15234       if (!DCI.isAfterLegalizeDAG() && Subtarget.hasP9Altivec() &&
15235           (IID == Intrinsic::ppc_altivec_vmaxsw ||
15236            IID == Intrinsic::ppc_altivec_vmaxsh ||
15237            IID == Intrinsic::ppc_altivec_vmaxsb)) {
15238         SDValue V1 = N->getOperand(1);
15239         SDValue V2 = N->getOperand(2);
15240         if ((V1.getSimpleValueType() == MVT::v4i32 ||
15241              V1.getSimpleValueType() == MVT::v8i16 ||
15242              V1.getSimpleValueType() == MVT::v16i8) &&
15243             V1.getSimpleValueType() == V2.getSimpleValueType()) {
15244           // (0-a, a)
15245           if (V1.getOpcode() == ISD::SUB &&
15246               ISD::isBuildVectorAllZeros(V1.getOperand(0).getNode()) &&
15247               V1.getOperand(1) == V2) {
15248             return DAG.getNode(ISD::ABS, dl, V2.getValueType(), V2);
15249           }
15250           // (a, 0-a)
15251           if (V2.getOpcode() == ISD::SUB &&
15252               ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode()) &&
15253               V2.getOperand(1) == V1) {
15254             return DAG.getNode(ISD::ABS, dl, V1.getValueType(), V1);
15255           }
15256           // (x-y, y-x)
15257           if (V1.getOpcode() == ISD::SUB && V2.getOpcode() == ISD::SUB &&
15258               V1.getOperand(0) == V2.getOperand(1) &&
15259               V1.getOperand(1) == V2.getOperand(0)) {
15260             return DAG.getNode(ISD::ABS, dl, V1.getValueType(), V1);
15261           }
15262         }
15263       }
15264     }
15265 
15266     break;
15267   case ISD::INTRINSIC_W_CHAIN:
15268     // For little endian, VSX loads require generating lxvd2x/xxswapd.
15269     // Not needed on ISA 3.0 based CPUs since we have a non-permuting load.
15270     if (Subtarget.needsSwapsForVSXMemOps()) {
15271       switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
15272       default:
15273         break;
15274       case Intrinsic::ppc_vsx_lxvw4x:
15275       case Intrinsic::ppc_vsx_lxvd2x:
15276         return expandVSXLoadForLE(N, DCI);
15277       }
15278     }
15279     break;
15280   case ISD::INTRINSIC_VOID:
15281     // For little endian, VSX stores require generating xxswapd/stxvd2x.
15282     // Not needed on ISA 3.0 based CPUs since we have a non-permuting store.
15283     if (Subtarget.needsSwapsForVSXMemOps()) {
15284       switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
15285       default:
15286         break;
15287       case Intrinsic::ppc_vsx_stxvw4x:
15288       case Intrinsic::ppc_vsx_stxvd2x:
15289         return expandVSXStoreForLE(N, DCI);
15290       }
15291     }
15292     break;
15293   case ISD::BSWAP:
15294     // Turn BSWAP (LOAD) -> lhbrx/lwbrx.
15295     if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) &&
15296         N->getOperand(0).hasOneUse() &&
15297         (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16 ||
15298          (Subtarget.hasLDBRX() && Subtarget.isPPC64() &&
15299           N->getValueType(0) == MVT::i64))) {
15300       SDValue Load = N->getOperand(0);
15301       LoadSDNode *LD = cast<LoadSDNode>(Load);
15302       // Create the byte-swapping load.
15303       SDValue Ops[] = {
15304         LD->getChain(),    // Chain
15305         LD->getBasePtr(),  // Ptr
15306         DAG.getValueType(N->getValueType(0)) // VT
15307       };
15308       SDValue BSLoad =
15309         DAG.getMemIntrinsicNode(PPCISD::LBRX, dl,
15310                                 DAG.getVTList(N->getValueType(0) == MVT::i64 ?
15311                                               MVT::i64 : MVT::i32, MVT::Other),
15312                                 Ops, LD->getMemoryVT(), LD->getMemOperand());
15313 
15314       // If this is an i16 load, insert the truncate.
15315       SDValue ResVal = BSLoad;
15316       if (N->getValueType(0) == MVT::i16)
15317         ResVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, BSLoad);
15318 
15319       // First, combine the bswap away.  This makes the value produced by the
15320       // load dead.
15321       DCI.CombineTo(N, ResVal);
15322 
15323       // Next, combine the load away, we give it a bogus result value but a real
15324       // chain result.  The result value is dead because the bswap is dead.
15325       DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1));
15326 
15327       // Return N so it doesn't get rechecked!
15328       return SDValue(N, 0);
15329     }
15330     break;
15331   case PPCISD::VCMP:
15332     // If a VCMPo node already exists with exactly the same operands as this
15333     // node, use its result instead of this node (VCMPo computes both a CR6 and
15334     // a normal output).
15335     //
15336     if (!N->getOperand(0).hasOneUse() &&
15337         !N->getOperand(1).hasOneUse() &&
15338         !N->getOperand(2).hasOneUse()) {
15339 
15340       // Scan all of the users of the LHS, looking for VCMPo's that match.
15341       SDNode *VCMPoNode = nullptr;
15342 
15343       SDNode *LHSN = N->getOperand(0).getNode();
15344       for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end();
15345            UI != E; ++UI)
15346         if (UI->getOpcode() == PPCISD::VCMPo &&
15347             UI->getOperand(1) == N->getOperand(1) &&
15348             UI->getOperand(2) == N->getOperand(2) &&
15349             UI->getOperand(0) == N->getOperand(0)) {
15350           VCMPoNode = *UI;
15351           break;
15352         }
15353 
15354       // If there is no VCMPo node, or if the flag value has a single use, don't
15355       // transform this.
15356       if (!VCMPoNode || VCMPoNode->hasNUsesOfValue(0, 1))
15357         break;
15358 
15359       // Look at the (necessarily single) use of the flag value.  If it has a
15360       // chain, this transformation is more complex.  Note that multiple things
15361       // could use the value result, which we should ignore.
15362       SDNode *FlagUser = nullptr;
15363       for (SDNode::use_iterator UI = VCMPoNode->use_begin();
15364            FlagUser == nullptr; ++UI) {
15365         assert(UI != VCMPoNode->use_end() && "Didn't find user!");
15366         SDNode *User = *UI;
15367         for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) {
15368           if (User->getOperand(i) == SDValue(VCMPoNode, 1)) {
15369             FlagUser = User;
15370             break;
15371           }
15372         }
15373       }
15374 
15375       // If the user is a MFOCRF instruction, we know this is safe.
15376       // Otherwise we give up for right now.
15377       if (FlagUser->getOpcode() == PPCISD::MFOCRF)
15378         return SDValue(VCMPoNode, 0);
15379     }
15380     break;
15381   case ISD::BRCOND: {
15382     SDValue Cond = N->getOperand(1);
15383     SDValue Target = N->getOperand(2);
15384 
15385     if (Cond.getOpcode() == ISD::INTRINSIC_W_CHAIN &&
15386         cast<ConstantSDNode>(Cond.getOperand(1))->getZExtValue() ==
15387           Intrinsic::loop_decrement) {
15388 
15389       // We now need to make the intrinsic dead (it cannot be instruction
15390       // selected).
15391       DAG.ReplaceAllUsesOfValueWith(Cond.getValue(1), Cond.getOperand(0));
15392       assert(Cond.getNode()->hasOneUse() &&
15393              "Counter decrement has more than one use");
15394 
15395       return DAG.getNode(PPCISD::BDNZ, dl, MVT::Other,
15396                          N->getOperand(0), Target);
15397     }
15398   }
15399   break;
15400   case ISD::BR_CC: {
15401     // If this is a branch on an altivec predicate comparison, lower this so
15402     // that we don't have to do a MFOCRF: instead, branch directly on CR6.  This
15403     // lowering is done pre-legalize, because the legalizer lowers the predicate
15404     // compare down to code that is difficult to reassemble.
15405     ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get();
15406     SDValue LHS = N->getOperand(2), RHS = N->getOperand(3);
15407 
15408     // Sometimes the promoted value of the intrinsic is ANDed by some non-zero
15409     // value. If so, pass-through the AND to get to the intrinsic.
15410     if (LHS.getOpcode() == ISD::AND &&
15411         LHS.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN &&
15412         cast<ConstantSDNode>(LHS.getOperand(0).getOperand(1))->getZExtValue() ==
15413           Intrinsic::loop_decrement &&
15414         isa<ConstantSDNode>(LHS.getOperand(1)) &&
15415         !isNullConstant(LHS.getOperand(1)))
15416       LHS = LHS.getOperand(0);
15417 
15418     if (LHS.getOpcode() == ISD::INTRINSIC_W_CHAIN &&
15419         cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() ==
15420           Intrinsic::loop_decrement &&
15421         isa<ConstantSDNode>(RHS)) {
15422       assert((CC == ISD::SETEQ || CC == ISD::SETNE) &&
15423              "Counter decrement comparison is not EQ or NE");
15424 
15425       unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue();
15426       bool isBDNZ = (CC == ISD::SETEQ && Val) ||
15427                     (CC == ISD::SETNE && !Val);
15428 
15429       // We now need to make the intrinsic dead (it cannot be instruction
15430       // selected).
15431       DAG.ReplaceAllUsesOfValueWith(LHS.getValue(1), LHS.getOperand(0));
15432       assert(LHS.getNode()->hasOneUse() &&
15433              "Counter decrement has more than one use");
15434 
15435       return DAG.getNode(isBDNZ ? PPCISD::BDNZ : PPCISD::BDZ, dl, MVT::Other,
15436                          N->getOperand(0), N->getOperand(4));
15437     }
15438 
15439     int CompareOpc;
15440     bool isDot;
15441 
15442     if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
15443         isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) &&
15444         getVectorCompareInfo(LHS, CompareOpc, isDot, Subtarget)) {
15445       assert(isDot && "Can't compare against a vector result!");
15446 
15447       // If this is a comparison against something other than 0/1, then we know
15448       // that the condition is never/always true.
15449       unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue();
15450       if (Val != 0 && Val != 1) {
15451         if (CC == ISD::SETEQ)      // Cond never true, remove branch.
15452           return N->getOperand(0);
15453         // Always !=, turn it into an unconditional branch.
15454         return DAG.getNode(ISD::BR, dl, MVT::Other,
15455                            N->getOperand(0), N->getOperand(4));
15456       }
15457 
15458       bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0);
15459 
15460       // Create the PPCISD altivec 'dot' comparison node.
15461       SDValue Ops[] = {
15462         LHS.getOperand(2),  // LHS of compare
15463         LHS.getOperand(3),  // RHS of compare
15464         DAG.getConstant(CompareOpc, dl, MVT::i32)
15465       };
15466       EVT VTs[] = { LHS.getOperand(2).getValueType(), MVT::Glue };
15467       SDValue CompNode = DAG.getNode(PPCISD::VCMPo, dl, VTs, Ops);
15468 
15469       // Unpack the result based on how the target uses it.
15470       PPC::Predicate CompOpc;
15471       switch (cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue()) {
15472       default:  // Can't happen, don't crash on invalid number though.
15473       case 0:   // Branch on the value of the EQ bit of CR6.
15474         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE;
15475         break;
15476       case 1:   // Branch on the inverted value of the EQ bit of CR6.
15477         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ;
15478         break;
15479       case 2:   // Branch on the value of the LT bit of CR6.
15480         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE;
15481         break;
15482       case 3:   // Branch on the inverted value of the LT bit of CR6.
15483         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT;
15484         break;
15485       }
15486 
15487       return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, N->getOperand(0),
15488                          DAG.getConstant(CompOpc, dl, MVT::i32),
15489                          DAG.getRegister(PPC::CR6, MVT::i32),
15490                          N->getOperand(4), CompNode.getValue(1));
15491     }
15492     break;
15493   }
15494   case ISD::BUILD_VECTOR:
15495     return DAGCombineBuildVector(N, DCI);
15496   case ISD::ABS:
15497     return combineABS(N, DCI);
15498   case ISD::VSELECT:
15499     return combineVSelect(N, DCI);
15500   }
15501 
15502   return SDValue();
15503 }
15504 
15505 SDValue
BuildSDIVPow2(SDNode * N,const APInt & Divisor,SelectionDAG & DAG,SmallVectorImpl<SDNode * > & Created) const15506 PPCTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
15507                                  SelectionDAG &DAG,
15508                                  SmallVectorImpl<SDNode *> &Created) const {
15509   // fold (sdiv X, pow2)
15510   EVT VT = N->getValueType(0);
15511   if (VT == MVT::i64 && !Subtarget.isPPC64())
15512     return SDValue();
15513   if ((VT != MVT::i32 && VT != MVT::i64) ||
15514       !(Divisor.isPowerOf2() || (-Divisor).isPowerOf2()))
15515     return SDValue();
15516 
15517   SDLoc DL(N);
15518   SDValue N0 = N->getOperand(0);
15519 
15520   bool IsNegPow2 = (-Divisor).isPowerOf2();
15521   unsigned Lg2 = (IsNegPow2 ? -Divisor : Divisor).countTrailingZeros();
15522   SDValue ShiftAmt = DAG.getConstant(Lg2, DL, VT);
15523 
15524   SDValue Op = DAG.getNode(PPCISD::SRA_ADDZE, DL, VT, N0, ShiftAmt);
15525   Created.push_back(Op.getNode());
15526 
15527   if (IsNegPow2) {
15528     Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op);
15529     Created.push_back(Op.getNode());
15530   }
15531 
15532   return Op;
15533 }
15534 
15535 //===----------------------------------------------------------------------===//
15536 // Inline Assembly Support
15537 //===----------------------------------------------------------------------===//
15538 
computeKnownBitsForTargetNode(const SDValue Op,KnownBits & Known,const APInt & DemandedElts,const SelectionDAG & DAG,unsigned Depth) const15539 void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
15540                                                       KnownBits &Known,
15541                                                       const APInt &DemandedElts,
15542                                                       const SelectionDAG &DAG,
15543                                                       unsigned Depth) const {
15544   Known.resetAll();
15545   switch (Op.getOpcode()) {
15546   default: break;
15547   case PPCISD::LBRX: {
15548     // lhbrx is known to have the top bits cleared out.
15549     if (cast<VTSDNode>(Op.getOperand(2))->getVT() == MVT::i16)
15550       Known.Zero = 0xFFFF0000;
15551     break;
15552   }
15553   case ISD::INTRINSIC_WO_CHAIN: {
15554     switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) {
15555     default: break;
15556     case Intrinsic::ppc_altivec_vcmpbfp_p:
15557     case Intrinsic::ppc_altivec_vcmpeqfp_p:
15558     case Intrinsic::ppc_altivec_vcmpequb_p:
15559     case Intrinsic::ppc_altivec_vcmpequh_p:
15560     case Intrinsic::ppc_altivec_vcmpequw_p:
15561     case Intrinsic::ppc_altivec_vcmpequd_p:
15562     case Intrinsic::ppc_altivec_vcmpgefp_p:
15563     case Intrinsic::ppc_altivec_vcmpgtfp_p:
15564     case Intrinsic::ppc_altivec_vcmpgtsb_p:
15565     case Intrinsic::ppc_altivec_vcmpgtsh_p:
15566     case Intrinsic::ppc_altivec_vcmpgtsw_p:
15567     case Intrinsic::ppc_altivec_vcmpgtsd_p:
15568     case Intrinsic::ppc_altivec_vcmpgtub_p:
15569     case Intrinsic::ppc_altivec_vcmpgtuh_p:
15570     case Intrinsic::ppc_altivec_vcmpgtuw_p:
15571     case Intrinsic::ppc_altivec_vcmpgtud_p:
15572       Known.Zero = ~1U;  // All bits but the low one are known to be zero.
15573       break;
15574     }
15575   }
15576   }
15577 }
15578 
getPrefLoopAlignment(MachineLoop * ML) const15579 Align PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
15580   switch (Subtarget.getCPUDirective()) {
15581   default: break;
15582   case PPC::DIR_970:
15583   case PPC::DIR_PWR4:
15584   case PPC::DIR_PWR5:
15585   case PPC::DIR_PWR5X:
15586   case PPC::DIR_PWR6:
15587   case PPC::DIR_PWR6X:
15588   case PPC::DIR_PWR7:
15589   case PPC::DIR_PWR8:
15590   case PPC::DIR_PWR9:
15591   case PPC::DIR_PWR10:
15592   case PPC::DIR_PWR_FUTURE: {
15593     if (!ML)
15594       break;
15595 
15596     if (!DisableInnermostLoopAlign32) {
15597       // If the nested loop is an innermost loop, prefer to a 32-byte alignment,
15598       // so that we can decrease cache misses and branch-prediction misses.
15599       // Actual alignment of the loop will depend on the hotness check and other
15600       // logic in alignBlocks.
15601       if (ML->getLoopDepth() > 1 && ML->getSubLoops().empty())
15602         return Align(32);
15603     }
15604 
15605     const PPCInstrInfo *TII = Subtarget.getInstrInfo();
15606 
15607     // For small loops (between 5 and 8 instructions), align to a 32-byte
15608     // boundary so that the entire loop fits in one instruction-cache line.
15609     uint64_t LoopSize = 0;
15610     for (auto I = ML->block_begin(), IE = ML->block_end(); I != IE; ++I)
15611       for (auto J = (*I)->begin(), JE = (*I)->end(); J != JE; ++J) {
15612         LoopSize += TII->getInstSizeInBytes(*J);
15613         if (LoopSize > 32)
15614           break;
15615       }
15616 
15617     if (LoopSize > 16 && LoopSize <= 32)
15618       return Align(32);
15619 
15620     break;
15621   }
15622   }
15623 
15624   return TargetLowering::getPrefLoopAlignment(ML);
15625 }
15626 
15627 /// getConstraintType - Given a constraint, return the type of
15628 /// constraint it is for this target.
15629 PPCTargetLowering::ConstraintType
getConstraintType(StringRef Constraint) const15630 PPCTargetLowering::getConstraintType(StringRef Constraint) const {
15631   if (Constraint.size() == 1) {
15632     switch (Constraint[0]) {
15633     default: break;
15634     case 'b':
15635     case 'r':
15636     case 'f':
15637     case 'd':
15638     case 'v':
15639     case 'y':
15640       return C_RegisterClass;
15641     case 'Z':
15642       // FIXME: While Z does indicate a memory constraint, it specifically
15643       // indicates an r+r address (used in conjunction with the 'y' modifier
15644       // in the replacement string). Currently, we're forcing the base
15645       // register to be r0 in the asm printer (which is interpreted as zero)
15646       // and forming the complete address in the second register. This is
15647       // suboptimal.
15648       return C_Memory;
15649     }
15650   } else if (Constraint == "wc") { // individual CR bits.
15651     return C_RegisterClass;
15652   } else if (Constraint == "wa" || Constraint == "wd" ||
15653              Constraint == "wf" || Constraint == "ws" ||
15654              Constraint == "wi" || Constraint == "ww") {
15655     return C_RegisterClass; // VSX registers.
15656   }
15657   return TargetLowering::getConstraintType(Constraint);
15658 }
15659 
15660 /// Examine constraint type and operand type and determine a weight value.
15661 /// This object must already have been set up with the operand type
15662 /// and the current alternative constraint selected.
15663 TargetLowering::ConstraintWeight
getSingleConstraintMatchWeight(AsmOperandInfo & info,const char * constraint) const15664 PPCTargetLowering::getSingleConstraintMatchWeight(
15665     AsmOperandInfo &info, const char *constraint) const {
15666   ConstraintWeight weight = CW_Invalid;
15667   Value *CallOperandVal = info.CallOperandVal;
15668     // If we don't have a value, we can't do a match,
15669     // but allow it at the lowest weight.
15670   if (!CallOperandVal)
15671     return CW_Default;
15672   Type *type = CallOperandVal->getType();
15673 
15674   // Look at the constraint type.
15675   if (StringRef(constraint) == "wc" && type->isIntegerTy(1))
15676     return CW_Register; // an individual CR bit.
15677   else if ((StringRef(constraint) == "wa" ||
15678             StringRef(constraint) == "wd" ||
15679             StringRef(constraint) == "wf") &&
15680            type->isVectorTy())
15681     return CW_Register;
15682   else if (StringRef(constraint) == "wi" && type->isIntegerTy(64))
15683     return CW_Register; // just hold 64-bit integers data.
15684   else if (StringRef(constraint) == "ws" && type->isDoubleTy())
15685     return CW_Register;
15686   else if (StringRef(constraint) == "ww" && type->isFloatTy())
15687     return CW_Register;
15688 
15689   switch (*constraint) {
15690   default:
15691     weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
15692     break;
15693   case 'b':
15694     if (type->isIntegerTy())
15695       weight = CW_Register;
15696     break;
15697   case 'f':
15698     if (type->isFloatTy())
15699       weight = CW_Register;
15700     break;
15701   case 'd':
15702     if (type->isDoubleTy())
15703       weight = CW_Register;
15704     break;
15705   case 'v':
15706     if (type->isVectorTy())
15707       weight = CW_Register;
15708     break;
15709   case 'y':
15710     weight = CW_Register;
15711     break;
15712   case 'Z':
15713     weight = CW_Memory;
15714     break;
15715   }
15716   return weight;
15717 }
15718 
15719 std::pair<unsigned, const TargetRegisterClass *>
getRegForInlineAsmConstraint(const TargetRegisterInfo * TRI,StringRef Constraint,MVT VT) const15720 PPCTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
15721                                                 StringRef Constraint,
15722                                                 MVT VT) const {
15723   if (Constraint.size() == 1) {
15724     // GCC RS6000 Constraint Letters
15725     switch (Constraint[0]) {
15726     case 'b':   // R1-R31
15727       if (VT == MVT::i64 && Subtarget.isPPC64())
15728         return std::make_pair(0U, &PPC::G8RC_NOX0RegClass);
15729       return std::make_pair(0U, &PPC::GPRC_NOR0RegClass);
15730     case 'r':   // R0-R31
15731       if (VT == MVT::i64 && Subtarget.isPPC64())
15732         return std::make_pair(0U, &PPC::G8RCRegClass);
15733       return std::make_pair(0U, &PPC::GPRCRegClass);
15734     // 'd' and 'f' constraints are both defined to be "the floating point
15735     // registers", where one is for 32-bit and the other for 64-bit. We don't
15736     // really care overly much here so just give them all the same reg classes.
15737     case 'd':
15738     case 'f':
15739       if (Subtarget.hasSPE()) {
15740         if (VT == MVT::f32 || VT == MVT::i32)
15741           return std::make_pair(0U, &PPC::GPRCRegClass);
15742         if (VT == MVT::f64 || VT == MVT::i64)
15743           return std::make_pair(0U, &PPC::SPERCRegClass);
15744       } else {
15745         if (VT == MVT::f32 || VT == MVT::i32)
15746           return std::make_pair(0U, &PPC::F4RCRegClass);
15747         if (VT == MVT::f64 || VT == MVT::i64)
15748           return std::make_pair(0U, &PPC::F8RCRegClass);
15749         if (VT == MVT::v4f64 && Subtarget.hasQPX())
15750           return std::make_pair(0U, &PPC::QFRCRegClass);
15751         if (VT == MVT::v4f32 && Subtarget.hasQPX())
15752           return std::make_pair(0U, &PPC::QSRCRegClass);
15753       }
15754       break;
15755     case 'v':
15756       if (VT == MVT::v4f64 && Subtarget.hasQPX())
15757         return std::make_pair(0U, &PPC::QFRCRegClass);
15758       if (VT == MVT::v4f32 && Subtarget.hasQPX())
15759         return std::make_pair(0U, &PPC::QSRCRegClass);
15760       if (Subtarget.hasAltivec())
15761         return std::make_pair(0U, &PPC::VRRCRegClass);
15762       break;
15763     case 'y':   // crrc
15764       return std::make_pair(0U, &PPC::CRRCRegClass);
15765     }
15766   } else if (Constraint == "wc" && Subtarget.useCRBits()) {
15767     // An individual CR bit.
15768     return std::make_pair(0U, &PPC::CRBITRCRegClass);
15769   } else if ((Constraint == "wa" || Constraint == "wd" ||
15770              Constraint == "wf" || Constraint == "wi") &&
15771              Subtarget.hasVSX()) {
15772     return std::make_pair(0U, &PPC::VSRCRegClass);
15773   } else if ((Constraint == "ws" || Constraint == "ww") && Subtarget.hasVSX()) {
15774     if (VT == MVT::f32 && Subtarget.hasP8Vector())
15775       return std::make_pair(0U, &PPC::VSSRCRegClass);
15776     else
15777       return std::make_pair(0U, &PPC::VSFRCRegClass);
15778   }
15779 
15780   // If we name a VSX register, we can't defer to the base class because it
15781   // will not recognize the correct register (their names will be VSL{0-31}
15782   // and V{0-31} so they won't match). So we match them here.
15783   if (Constraint.size() > 3 && Constraint[1] == 'v' && Constraint[2] == 's') {
15784     int VSNum = atoi(Constraint.data() + 3);
15785     assert(VSNum >= 0 && VSNum <= 63 &&
15786            "Attempted to access a vsr out of range");
15787     if (VSNum < 32)
15788       return std::make_pair(PPC::VSL0 + VSNum, &PPC::VSRCRegClass);
15789     return std::make_pair(PPC::V0 + VSNum - 32, &PPC::VSRCRegClass);
15790   }
15791   std::pair<unsigned, const TargetRegisterClass *> R =
15792       TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
15793 
15794   // r[0-9]+ are used, on PPC64, to refer to the corresponding 64-bit registers
15795   // (which we call X[0-9]+). If a 64-bit value has been requested, and a
15796   // 32-bit GPR has been selected, then 'upgrade' it to the 64-bit parent
15797   // register.
15798   // FIXME: If TargetLowering::getRegForInlineAsmConstraint could somehow use
15799   // the AsmName field from *RegisterInfo.td, then this would not be necessary.
15800   if (R.first && VT == MVT::i64 && Subtarget.isPPC64() &&
15801       PPC::GPRCRegClass.contains(R.first))
15802     return std::make_pair(TRI->getMatchingSuperReg(R.first,
15803                             PPC::sub_32, &PPC::G8RCRegClass),
15804                           &PPC::G8RCRegClass);
15805 
15806   // GCC accepts 'cc' as an alias for 'cr0', and we need to do the same.
15807   if (!R.second && StringRef("{cc}").equals_lower(Constraint)) {
15808     R.first = PPC::CR0;
15809     R.second = &PPC::CRRCRegClass;
15810   }
15811 
15812   return R;
15813 }
15814 
15815 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
15816 /// vector.  If it is invalid, don't add anything to Ops.
LowerAsmOperandForConstraint(SDValue Op,std::string & Constraint,std::vector<SDValue> & Ops,SelectionDAG & DAG) const15817 void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
15818                                                      std::string &Constraint,
15819                                                      std::vector<SDValue>&Ops,
15820                                                      SelectionDAG &DAG) const {
15821   SDValue Result;
15822 
15823   // Only support length 1 constraints.
15824   if (Constraint.length() > 1) return;
15825 
15826   char Letter = Constraint[0];
15827   switch (Letter) {
15828   default: break;
15829   case 'I':
15830   case 'J':
15831   case 'K':
15832   case 'L':
15833   case 'M':
15834   case 'N':
15835   case 'O':
15836   case 'P': {
15837     ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op);
15838     if (!CST) return; // Must be an immediate to match.
15839     SDLoc dl(Op);
15840     int64_t Value = CST->getSExtValue();
15841     EVT TCVT = MVT::i64; // All constants taken to be 64 bits so that negative
15842                          // numbers are printed as such.
15843     switch (Letter) {
15844     default: llvm_unreachable("Unknown constraint letter!");
15845     case 'I':  // "I" is a signed 16-bit constant.
15846       if (isInt<16>(Value))
15847         Result = DAG.getTargetConstant(Value, dl, TCVT);
15848       break;
15849     case 'J':  // "J" is a constant with only the high-order 16 bits nonzero.
15850       if (isShiftedUInt<16, 16>(Value))
15851         Result = DAG.getTargetConstant(Value, dl, TCVT);
15852       break;
15853     case 'L':  // "L" is a signed 16-bit constant shifted left 16 bits.
15854       if (isShiftedInt<16, 16>(Value))
15855         Result = DAG.getTargetConstant(Value, dl, TCVT);
15856       break;
15857     case 'K':  // "K" is a constant with only the low-order 16 bits nonzero.
15858       if (isUInt<16>(Value))
15859         Result = DAG.getTargetConstant(Value, dl, TCVT);
15860       break;
15861     case 'M':  // "M" is a constant that is greater than 31.
15862       if (Value > 31)
15863         Result = DAG.getTargetConstant(Value, dl, TCVT);
15864       break;
15865     case 'N':  // "N" is a positive constant that is an exact power of two.
15866       if (Value > 0 && isPowerOf2_64(Value))
15867         Result = DAG.getTargetConstant(Value, dl, TCVT);
15868       break;
15869     case 'O':  // "O" is the constant zero.
15870       if (Value == 0)
15871         Result = DAG.getTargetConstant(Value, dl, TCVT);
15872       break;
15873     case 'P':  // "P" is a constant whose negation is a signed 16-bit constant.
15874       if (isInt<16>(-Value))
15875         Result = DAG.getTargetConstant(Value, dl, TCVT);
15876       break;
15877     }
15878     break;
15879   }
15880   }
15881 
15882   if (Result.getNode()) {
15883     Ops.push_back(Result);
15884     return;
15885   }
15886 
15887   // Handle standard constraint letters.
15888   TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
15889 }
15890 
15891 // isLegalAddressingMode - Return true if the addressing mode represented
15892 // by AM is legal for this target, for a load/store of the specified type.
isLegalAddressingMode(const DataLayout & DL,const AddrMode & AM,Type * Ty,unsigned AS,Instruction * I) const15893 bool PPCTargetLowering::isLegalAddressingMode(const DataLayout &DL,
15894                                               const AddrMode &AM, Type *Ty,
15895                                               unsigned AS, Instruction *I) const {
15896   // PPC does not allow r+i addressing modes for vectors!
15897   if (Ty->isVectorTy() && AM.BaseOffs != 0)
15898     return false;
15899 
15900   // PPC allows a sign-extended 16-bit immediate field.
15901   if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1)
15902     return false;
15903 
15904   // No global is ever allowed as a base.
15905   if (AM.BaseGV)
15906     return false;
15907 
15908   // PPC only support r+r,
15909   switch (AM.Scale) {
15910   case 0:  // "r+i" or just "i", depending on HasBaseReg.
15911     break;
15912   case 1:
15913     if (AM.HasBaseReg && AM.BaseOffs)  // "r+r+i" is not allowed.
15914       return false;
15915     // Otherwise we have r+r or r+i.
15916     break;
15917   case 2:
15918     if (AM.HasBaseReg || AM.BaseOffs)  // 2*r+r  or  2*r+i is not allowed.
15919       return false;
15920     // Allow 2*r as r+r.
15921     break;
15922   default:
15923     // No other scales are supported.
15924     return false;
15925   }
15926 
15927   return true;
15928 }
15929 
LowerRETURNADDR(SDValue Op,SelectionDAG & DAG) const15930 SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op,
15931                                            SelectionDAG &DAG) const {
15932   MachineFunction &MF = DAG.getMachineFunction();
15933   MachineFrameInfo &MFI = MF.getFrameInfo();
15934   MFI.setReturnAddressIsTaken(true);
15935 
15936   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
15937     return SDValue();
15938 
15939   SDLoc dl(Op);
15940   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
15941 
15942   // Make sure the function does not optimize away the store of the RA to
15943   // the stack.
15944   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
15945   FuncInfo->setLRStoreRequired();
15946   bool isPPC64 = Subtarget.isPPC64();
15947   auto PtrVT = getPointerTy(MF.getDataLayout());
15948 
15949   if (Depth > 0) {
15950     SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
15951     SDValue Offset =
15952         DAG.getConstant(Subtarget.getFrameLowering()->getReturnSaveOffset(), dl,
15953                         isPPC64 ? MVT::i64 : MVT::i32);
15954     return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
15955                        DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset),
15956                        MachinePointerInfo());
15957   }
15958 
15959   // Just load the return address off the stack.
15960   SDValue RetAddrFI = getReturnAddrFrameIndex(DAG);
15961   return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI,
15962                      MachinePointerInfo());
15963 }
15964 
LowerFRAMEADDR(SDValue Op,SelectionDAG & DAG) const15965 SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op,
15966                                           SelectionDAG &DAG) const {
15967   SDLoc dl(Op);
15968   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
15969 
15970   MachineFunction &MF = DAG.getMachineFunction();
15971   MachineFrameInfo &MFI = MF.getFrameInfo();
15972   MFI.setFrameAddressIsTaken(true);
15973 
15974   EVT PtrVT = getPointerTy(MF.getDataLayout());
15975   bool isPPC64 = PtrVT == MVT::i64;
15976 
15977   // Naked functions never have a frame pointer, and so we use r1. For all
15978   // other functions, this decision must be delayed until during PEI.
15979   unsigned FrameReg;
15980   if (MF.getFunction().hasFnAttribute(Attribute::Naked))
15981     FrameReg = isPPC64 ? PPC::X1 : PPC::R1;
15982   else
15983     FrameReg = isPPC64 ? PPC::FP8 : PPC::FP;
15984 
15985   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg,
15986                                          PtrVT);
15987   while (Depth--)
15988     FrameAddr = DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(),
15989                             FrameAddr, MachinePointerInfo());
15990   return FrameAddr;
15991 }
15992 
15993 // FIXME? Maybe this could be a TableGen attribute on some registers and
15994 // this table could be generated automatically from RegInfo.
getRegisterByName(const char * RegName,LLT VT,const MachineFunction & MF) const15995 Register PPCTargetLowering::getRegisterByName(const char* RegName, LLT VT,
15996                                               const MachineFunction &MF) const {
15997   bool isPPC64 = Subtarget.isPPC64();
15998 
15999   bool is64Bit = isPPC64 && VT == LLT::scalar(64);
16000   if (!is64Bit && VT != LLT::scalar(32))
16001     report_fatal_error("Invalid register global variable type");
16002 
16003   Register Reg = StringSwitch<Register>(RegName)
16004                      .Case("r1", is64Bit ? PPC::X1 : PPC::R1)
16005                      .Case("r2", isPPC64 ? Register() : PPC::R2)
16006                      .Case("r13", (is64Bit ? PPC::X13 : PPC::R13))
16007                      .Default(Register());
16008 
16009   if (Reg)
16010     return Reg;
16011   report_fatal_error("Invalid register name global variable");
16012 }
16013 
isAccessedAsGotIndirect(SDValue GA) const16014 bool PPCTargetLowering::isAccessedAsGotIndirect(SDValue GA) const {
16015   // 32-bit SVR4 ABI access everything as got-indirect.
16016   if (Subtarget.is32BitELFABI())
16017     return true;
16018 
16019   // AIX accesses everything indirectly through the TOC, which is similar to
16020   // the GOT.
16021   if (Subtarget.isAIXABI())
16022     return true;
16023 
16024   CodeModel::Model CModel = getTargetMachine().getCodeModel();
16025   // If it is small or large code model, module locals are accessed
16026   // indirectly by loading their address from .toc/.got.
16027   if (CModel == CodeModel::Small || CModel == CodeModel::Large)
16028     return true;
16029 
16030   // JumpTable and BlockAddress are accessed as got-indirect.
16031   if (isa<JumpTableSDNode>(GA) || isa<BlockAddressSDNode>(GA))
16032     return true;
16033 
16034   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(GA))
16035     return Subtarget.isGVIndirectSymbol(G->getGlobal());
16036 
16037   return false;
16038 }
16039 
16040 bool
isOffsetFoldingLegal(const GlobalAddressSDNode * GA) const16041 PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
16042   // The PowerPC target isn't yet aware of offsets.
16043   return false;
16044 }
16045 
getTgtMemIntrinsic(IntrinsicInfo & Info,const CallInst & I,MachineFunction & MF,unsigned Intrinsic) const16046 bool PPCTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
16047                                            const CallInst &I,
16048                                            MachineFunction &MF,
16049                                            unsigned Intrinsic) const {
16050   switch (Intrinsic) {
16051   case Intrinsic::ppc_qpx_qvlfd:
16052   case Intrinsic::ppc_qpx_qvlfs:
16053   case Intrinsic::ppc_qpx_qvlfcd:
16054   case Intrinsic::ppc_qpx_qvlfcs:
16055   case Intrinsic::ppc_qpx_qvlfiwa:
16056   case Intrinsic::ppc_qpx_qvlfiwz:
16057   case Intrinsic::ppc_altivec_lvx:
16058   case Intrinsic::ppc_altivec_lvxl:
16059   case Intrinsic::ppc_altivec_lvebx:
16060   case Intrinsic::ppc_altivec_lvehx:
16061   case Intrinsic::ppc_altivec_lvewx:
16062   case Intrinsic::ppc_vsx_lxvd2x:
16063   case Intrinsic::ppc_vsx_lxvw4x: {
16064     EVT VT;
16065     switch (Intrinsic) {
16066     case Intrinsic::ppc_altivec_lvebx:
16067       VT = MVT::i8;
16068       break;
16069     case Intrinsic::ppc_altivec_lvehx:
16070       VT = MVT::i16;
16071       break;
16072     case Intrinsic::ppc_altivec_lvewx:
16073       VT = MVT::i32;
16074       break;
16075     case Intrinsic::ppc_vsx_lxvd2x:
16076       VT = MVT::v2f64;
16077       break;
16078     case Intrinsic::ppc_qpx_qvlfd:
16079       VT = MVT::v4f64;
16080       break;
16081     case Intrinsic::ppc_qpx_qvlfs:
16082       VT = MVT::v4f32;
16083       break;
16084     case Intrinsic::ppc_qpx_qvlfcd:
16085       VT = MVT::v2f64;
16086       break;
16087     case Intrinsic::ppc_qpx_qvlfcs:
16088       VT = MVT::v2f32;
16089       break;
16090     default:
16091       VT = MVT::v4i32;
16092       break;
16093     }
16094 
16095     Info.opc = ISD::INTRINSIC_W_CHAIN;
16096     Info.memVT = VT;
16097     Info.ptrVal = I.getArgOperand(0);
16098     Info.offset = -VT.getStoreSize()+1;
16099     Info.size = 2*VT.getStoreSize()-1;
16100     Info.align = Align(1);
16101     Info.flags = MachineMemOperand::MOLoad;
16102     return true;
16103   }
16104   case Intrinsic::ppc_qpx_qvlfda:
16105   case Intrinsic::ppc_qpx_qvlfsa:
16106   case Intrinsic::ppc_qpx_qvlfcda:
16107   case Intrinsic::ppc_qpx_qvlfcsa:
16108   case Intrinsic::ppc_qpx_qvlfiwaa:
16109   case Intrinsic::ppc_qpx_qvlfiwza: {
16110     EVT VT;
16111     switch (Intrinsic) {
16112     case Intrinsic::ppc_qpx_qvlfda:
16113       VT = MVT::v4f64;
16114       break;
16115     case Intrinsic::ppc_qpx_qvlfsa:
16116       VT = MVT::v4f32;
16117       break;
16118     case Intrinsic::ppc_qpx_qvlfcda:
16119       VT = MVT::v2f64;
16120       break;
16121     case Intrinsic::ppc_qpx_qvlfcsa:
16122       VT = MVT::v2f32;
16123       break;
16124     default:
16125       VT = MVT::v4i32;
16126       break;
16127     }
16128 
16129     Info.opc = ISD::INTRINSIC_W_CHAIN;
16130     Info.memVT = VT;
16131     Info.ptrVal = I.getArgOperand(0);
16132     Info.offset = 0;
16133     Info.size = VT.getStoreSize();
16134     Info.align = Align(1);
16135     Info.flags = MachineMemOperand::MOLoad;
16136     return true;
16137   }
16138   case Intrinsic::ppc_qpx_qvstfd:
16139   case Intrinsic::ppc_qpx_qvstfs:
16140   case Intrinsic::ppc_qpx_qvstfcd:
16141   case Intrinsic::ppc_qpx_qvstfcs:
16142   case Intrinsic::ppc_qpx_qvstfiw:
16143   case Intrinsic::ppc_altivec_stvx:
16144   case Intrinsic::ppc_altivec_stvxl:
16145   case Intrinsic::ppc_altivec_stvebx:
16146   case Intrinsic::ppc_altivec_stvehx:
16147   case Intrinsic::ppc_altivec_stvewx:
16148   case Intrinsic::ppc_vsx_stxvd2x:
16149   case Intrinsic::ppc_vsx_stxvw4x: {
16150     EVT VT;
16151     switch (Intrinsic) {
16152     case Intrinsic::ppc_altivec_stvebx:
16153       VT = MVT::i8;
16154       break;
16155     case Intrinsic::ppc_altivec_stvehx:
16156       VT = MVT::i16;
16157       break;
16158     case Intrinsic::ppc_altivec_stvewx:
16159       VT = MVT::i32;
16160       break;
16161     case Intrinsic::ppc_vsx_stxvd2x:
16162       VT = MVT::v2f64;
16163       break;
16164     case Intrinsic::ppc_qpx_qvstfd:
16165       VT = MVT::v4f64;
16166       break;
16167     case Intrinsic::ppc_qpx_qvstfs:
16168       VT = MVT::v4f32;
16169       break;
16170     case Intrinsic::ppc_qpx_qvstfcd:
16171       VT = MVT::v2f64;
16172       break;
16173     case Intrinsic::ppc_qpx_qvstfcs:
16174       VT = MVT::v2f32;
16175       break;
16176     default:
16177       VT = MVT::v4i32;
16178       break;
16179     }
16180 
16181     Info.opc = ISD::INTRINSIC_VOID;
16182     Info.memVT = VT;
16183     Info.ptrVal = I.getArgOperand(1);
16184     Info.offset = -VT.getStoreSize()+1;
16185     Info.size = 2*VT.getStoreSize()-1;
16186     Info.align = Align(1);
16187     Info.flags = MachineMemOperand::MOStore;
16188     return true;
16189   }
16190   case Intrinsic::ppc_qpx_qvstfda:
16191   case Intrinsic::ppc_qpx_qvstfsa:
16192   case Intrinsic::ppc_qpx_qvstfcda:
16193   case Intrinsic::ppc_qpx_qvstfcsa:
16194   case Intrinsic::ppc_qpx_qvstfiwa: {
16195     EVT VT;
16196     switch (Intrinsic) {
16197     case Intrinsic::ppc_qpx_qvstfda:
16198       VT = MVT::v4f64;
16199       break;
16200     case Intrinsic::ppc_qpx_qvstfsa:
16201       VT = MVT::v4f32;
16202       break;
16203     case Intrinsic::ppc_qpx_qvstfcda:
16204       VT = MVT::v2f64;
16205       break;
16206     case Intrinsic::ppc_qpx_qvstfcsa:
16207       VT = MVT::v2f32;
16208       break;
16209     default:
16210       VT = MVT::v4i32;
16211       break;
16212     }
16213 
16214     Info.opc = ISD::INTRINSIC_VOID;
16215     Info.memVT = VT;
16216     Info.ptrVal = I.getArgOperand(1);
16217     Info.offset = 0;
16218     Info.size = VT.getStoreSize();
16219     Info.align = Align(1);
16220     Info.flags = MachineMemOperand::MOStore;
16221     return true;
16222   }
16223   default:
16224     break;
16225   }
16226 
16227   return false;
16228 }
16229 
16230 /// It returns EVT::Other if the type should be determined using generic
16231 /// target-independent logic.
getOptimalMemOpType(const MemOp & Op,const AttributeList & FuncAttributes) const16232 EVT PPCTargetLowering::getOptimalMemOpType(
16233     const MemOp &Op, const AttributeList &FuncAttributes) const {
16234   if (getTargetMachine().getOptLevel() != CodeGenOpt::None) {
16235     // When expanding a memset, require at least two QPX instructions to cover
16236     // the cost of loading the value to be stored from the constant pool.
16237     if (Subtarget.hasQPX() && Op.size() >= 32 &&
16238         (Op.isMemcpy() || Op.size() >= 64) && Op.isAligned(Align(32)) &&
16239         !FuncAttributes.hasFnAttribute(Attribute::NoImplicitFloat)) {
16240       return MVT::v4f64;
16241     }
16242 
16243     // We should use Altivec/VSX loads and stores when available. For unaligned
16244     // addresses, unaligned VSX loads are only fast starting with the P8.
16245     if (Subtarget.hasAltivec() && Op.size() >= 16 &&
16246         (Op.isAligned(Align(16)) ||
16247          ((Op.isMemset() && Subtarget.hasVSX()) || Subtarget.hasP8Vector())))
16248       return MVT::v4i32;
16249   }
16250 
16251   if (Subtarget.isPPC64()) {
16252     return MVT::i64;
16253   }
16254 
16255   return MVT::i32;
16256 }
16257 
16258 /// Returns true if it is beneficial to convert a load of a constant
16259 /// to just the constant itself.
shouldConvertConstantLoadToIntImm(const APInt & Imm,Type * Ty) const16260 bool PPCTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
16261                                                           Type *Ty) const {
16262   assert(Ty->isIntegerTy());
16263 
16264   unsigned BitSize = Ty->getPrimitiveSizeInBits();
16265   return !(BitSize == 0 || BitSize > 64);
16266 }
16267 
isTruncateFree(Type * Ty1,Type * Ty2) const16268 bool PPCTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
16269   if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
16270     return false;
16271   unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
16272   unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
16273   return NumBits1 == 64 && NumBits2 == 32;
16274 }
16275 
isTruncateFree(EVT VT1,EVT VT2) const16276 bool PPCTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
16277   if (!VT1.isInteger() || !VT2.isInteger())
16278     return false;
16279   unsigned NumBits1 = VT1.getSizeInBits();
16280   unsigned NumBits2 = VT2.getSizeInBits();
16281   return NumBits1 == 64 && NumBits2 == 32;
16282 }
16283 
isZExtFree(SDValue Val,EVT VT2) const16284 bool PPCTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
16285   // Generally speaking, zexts are not free, but they are free when they can be
16286   // folded with other operations.
16287   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Val)) {
16288     EVT MemVT = LD->getMemoryVT();
16289     if ((MemVT == MVT::i1 || MemVT == MVT::i8 || MemVT == MVT::i16 ||
16290          (Subtarget.isPPC64() && MemVT == MVT::i32)) &&
16291         (LD->getExtensionType() == ISD::NON_EXTLOAD ||
16292          LD->getExtensionType() == ISD::ZEXTLOAD))
16293       return true;
16294   }
16295 
16296   // FIXME: Add other cases...
16297   //  - 32-bit shifts with a zext to i64
16298   //  - zext after ctlz, bswap, etc.
16299   //  - zext after and by a constant mask
16300 
16301   return TargetLowering::isZExtFree(Val, VT2);
16302 }
16303 
isFPExtFree(EVT DestVT,EVT SrcVT) const16304 bool PPCTargetLowering::isFPExtFree(EVT DestVT, EVT SrcVT) const {
16305   assert(DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() &&
16306          "invalid fpext types");
16307   // Extending to float128 is not free.
16308   if (DestVT == MVT::f128)
16309     return false;
16310   return true;
16311 }
16312 
isLegalICmpImmediate(int64_t Imm) const16313 bool PPCTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
16314   return isInt<16>(Imm) || isUInt<16>(Imm);
16315 }
16316 
isLegalAddImmediate(int64_t Imm) const16317 bool PPCTargetLowering::isLegalAddImmediate(int64_t Imm) const {
16318   return isInt<16>(Imm) || isUInt<16>(Imm);
16319 }
16320 
allowsMisalignedMemoryAccesses(EVT VT,unsigned,unsigned,MachineMemOperand::Flags,bool * Fast) const16321 bool PPCTargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
16322                                                        unsigned,
16323                                                        unsigned,
16324                                                        MachineMemOperand::Flags,
16325                                                        bool *Fast) const {
16326   if (DisablePPCUnaligned)
16327     return false;
16328 
16329   // PowerPC supports unaligned memory access for simple non-vector types.
16330   // Although accessing unaligned addresses is not as efficient as accessing
16331   // aligned addresses, it is generally more efficient than manual expansion,
16332   // and generally only traps for software emulation when crossing page
16333   // boundaries.
16334 
16335   if (!VT.isSimple())
16336     return false;
16337 
16338   if (VT.isFloatingPoint() && !VT.isVector() &&
16339       !Subtarget.allowsUnalignedFPAccess())
16340     return false;
16341 
16342   if (VT.getSimpleVT().isVector()) {
16343     if (Subtarget.hasVSX()) {
16344       if (VT != MVT::v2f64 && VT != MVT::v2i64 &&
16345           VT != MVT::v4f32 && VT != MVT::v4i32)
16346         return false;
16347     } else {
16348       return false;
16349     }
16350   }
16351 
16352   if (VT == MVT::ppcf128)
16353     return false;
16354 
16355   if (Fast)
16356     *Fast = true;
16357 
16358   return true;
16359 }
16360 
isFMAFasterThanFMulAndFAdd(const MachineFunction & MF,EVT VT) const16361 bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
16362                                                    EVT VT) const {
16363   return isFMAFasterThanFMulAndFAdd(
16364       MF.getFunction(), VT.getTypeForEVT(MF.getFunction().getContext()));
16365 }
16366 
isFMAFasterThanFMulAndFAdd(const Function & F,Type * Ty) const16367 bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(const Function &F,
16368                                                    Type *Ty) const {
16369   switch (Ty->getScalarType()->getTypeID()) {
16370   case Type::FloatTyID:
16371   case Type::DoubleTyID:
16372     return true;
16373   case Type::FP128TyID:
16374     return Subtarget.hasP9Vector();
16375   default:
16376     return false;
16377   }
16378 }
16379 
16380 // Currently this is a copy from AArch64TargetLowering::isProfitableToHoist.
16381 // FIXME: add more patterns which are profitable to hoist.
isProfitableToHoist(Instruction * I) const16382 bool PPCTargetLowering::isProfitableToHoist(Instruction *I) const {
16383   if (I->getOpcode() != Instruction::FMul)
16384     return true;
16385 
16386   if (!I->hasOneUse())
16387     return true;
16388 
16389   Instruction *User = I->user_back();
16390   assert(User && "A single use instruction with no uses.");
16391 
16392   if (User->getOpcode() != Instruction::FSub &&
16393       User->getOpcode() != Instruction::FAdd)
16394     return true;
16395 
16396   const TargetOptions &Options = getTargetMachine().Options;
16397   const Function *F = I->getFunction();
16398   const DataLayout &DL = F->getParent()->getDataLayout();
16399   Type *Ty = User->getOperand(0)->getType();
16400 
16401   return !(
16402       isFMAFasterThanFMulAndFAdd(*F, Ty) &&
16403       isOperationLegalOrCustom(ISD::FMA, getValueType(DL, Ty)) &&
16404       (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath));
16405 }
16406 
16407 const MCPhysReg *
getScratchRegisters(CallingConv::ID) const16408 PPCTargetLowering::getScratchRegisters(CallingConv::ID) const {
16409   // LR is a callee-save register, but we must treat it as clobbered by any call
16410   // site. Hence we include LR in the scratch registers, which are in turn added
16411   // as implicit-defs for stackmaps and patchpoints. The same reasoning applies
16412   // to CTR, which is used by any indirect call.
16413   static const MCPhysReg ScratchRegs[] = {
16414     PPC::X12, PPC::LR8, PPC::CTR8, 0
16415   };
16416 
16417   return ScratchRegs;
16418 }
16419 
getExceptionPointerRegister(const Constant * PersonalityFn) const16420 Register PPCTargetLowering::getExceptionPointerRegister(
16421     const Constant *PersonalityFn) const {
16422   return Subtarget.isPPC64() ? PPC::X3 : PPC::R3;
16423 }
16424 
getExceptionSelectorRegister(const Constant * PersonalityFn) const16425 Register PPCTargetLowering::getExceptionSelectorRegister(
16426     const Constant *PersonalityFn) const {
16427   return Subtarget.isPPC64() ? PPC::X4 : PPC::R4;
16428 }
16429 
16430 bool
shouldExpandBuildVectorWithShuffles(EVT VT,unsigned DefinedValues) const16431 PPCTargetLowering::shouldExpandBuildVectorWithShuffles(
16432                      EVT VT , unsigned DefinedValues) const {
16433   if (VT == MVT::v2i64)
16434     return Subtarget.hasDirectMove(); // Don't need stack ops with direct moves
16435 
16436   if (Subtarget.hasVSX() || Subtarget.hasQPX())
16437     return true;
16438 
16439   return TargetLowering::shouldExpandBuildVectorWithShuffles(VT, DefinedValues);
16440 }
16441 
getSchedulingPreference(SDNode * N) const16442 Sched::Preference PPCTargetLowering::getSchedulingPreference(SDNode *N) const {
16443   if (DisableILPPref || Subtarget.enableMachineScheduler())
16444     return TargetLowering::getSchedulingPreference(N);
16445 
16446   return Sched::ILP;
16447 }
16448 
16449 // Create a fast isel object.
16450 FastISel *
createFastISel(FunctionLoweringInfo & FuncInfo,const TargetLibraryInfo * LibInfo) const16451 PPCTargetLowering::createFastISel(FunctionLoweringInfo &FuncInfo,
16452                                   const TargetLibraryInfo *LibInfo) const {
16453   return PPC::createFastISel(FuncInfo, LibInfo);
16454 }
16455 
16456 // 'Inverted' means the FMA opcode after negating one multiplicand.
16457 // For example, (fma -a b c) = (fnmsub a b c)
invertFMAOpcode(unsigned Opc)16458 static unsigned invertFMAOpcode(unsigned Opc) {
16459   switch (Opc) {
16460   default:
16461     llvm_unreachable("Invalid FMA opcode for PowerPC!");
16462   case ISD::FMA:
16463     return PPCISD::FNMSUB;
16464   case PPCISD::FNMSUB:
16465     return ISD::FMA;
16466   }
16467 }
16468 
getNegatedExpression(SDValue Op,SelectionDAG & DAG,bool LegalOps,bool OptForSize,NegatibleCost & Cost,unsigned Depth) const16469 SDValue PPCTargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG,
16470                                                 bool LegalOps, bool OptForSize,
16471                                                 NegatibleCost &Cost,
16472                                                 unsigned Depth) const {
16473   if (Depth > SelectionDAG::MaxRecursionDepth)
16474     return SDValue();
16475 
16476   unsigned Opc = Op.getOpcode();
16477   EVT VT = Op.getValueType();
16478   SDNodeFlags Flags = Op.getNode()->getFlags();
16479 
16480   switch (Opc) {
16481   case PPCISD::FNMSUB:
16482     // TODO: QPX subtarget is deprecated. No transformation here.
16483     if (!Op.hasOneUse() || !isTypeLegal(VT) || Subtarget.hasQPX())
16484       break;
16485 
16486     const TargetOptions &Options = getTargetMachine().Options;
16487     SDValue N0 = Op.getOperand(0);
16488     SDValue N1 = Op.getOperand(1);
16489     SDValue N2 = Op.getOperand(2);
16490     SDLoc Loc(Op);
16491 
16492     NegatibleCost N2Cost = NegatibleCost::Expensive;
16493     SDValue NegN2 =
16494         getNegatedExpression(N2, DAG, LegalOps, OptForSize, N2Cost, Depth + 1);
16495 
16496     if (!NegN2)
16497       return SDValue();
16498 
16499     // (fneg (fnmsub a b c)) => (fnmsub (fneg a) b (fneg c))
16500     // (fneg (fnmsub a b c)) => (fnmsub a (fneg b) (fneg c))
16501     // These transformations may change sign of zeroes. For example,
16502     // -(-ab-(-c))=-0 while -(-(ab-c))=+0 when a=b=c=1.
16503     if (Flags.hasNoSignedZeros() || Options.NoSignedZerosFPMath) {
16504       // Try and choose the cheaper one to negate.
16505       NegatibleCost N0Cost = NegatibleCost::Expensive;
16506       SDValue NegN0 = getNegatedExpression(N0, DAG, LegalOps, OptForSize,
16507                                            N0Cost, Depth + 1);
16508 
16509       NegatibleCost N1Cost = NegatibleCost::Expensive;
16510       SDValue NegN1 = getNegatedExpression(N1, DAG, LegalOps, OptForSize,
16511                                            N1Cost, Depth + 1);
16512 
16513       if (NegN0 && N0Cost <= N1Cost) {
16514         Cost = std::min(N0Cost, N2Cost);
16515         return DAG.getNode(Opc, Loc, VT, NegN0, N1, NegN2, Flags);
16516       } else if (NegN1) {
16517         Cost = std::min(N1Cost, N2Cost);
16518         return DAG.getNode(Opc, Loc, VT, N0, NegN1, NegN2, Flags);
16519       }
16520     }
16521 
16522     // (fneg (fnmsub a b c)) => (fma a b (fneg c))
16523     if (isOperationLegal(ISD::FMA, VT)) {
16524       Cost = N2Cost;
16525       return DAG.getNode(ISD::FMA, Loc, VT, N0, N1, NegN2, Flags);
16526     }
16527 
16528     break;
16529   }
16530 
16531   return TargetLowering::getNegatedExpression(Op, DAG, LegalOps, OptForSize,
16532                                               Cost, Depth);
16533 }
16534 
16535 // Override to enable LOAD_STACK_GUARD lowering on Linux.
useLoadStackGuardNode() const16536 bool PPCTargetLowering::useLoadStackGuardNode() const {
16537   if (!Subtarget.isTargetLinux())
16538     return TargetLowering::useLoadStackGuardNode();
16539   return true;
16540 }
16541 
16542 // Override to disable global variable loading on Linux.
insertSSPDeclarations(Module & M) const16543 void PPCTargetLowering::insertSSPDeclarations(Module &M) const {
16544   if (!Subtarget.isTargetLinux())
16545     return TargetLowering::insertSSPDeclarations(M);
16546 }
16547 
isFPImmLegal(const APFloat & Imm,EVT VT,bool ForCodeSize) const16548 bool PPCTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
16549                                      bool ForCodeSize) const {
16550   if (!VT.isSimple() || !Subtarget.hasVSX())
16551     return false;
16552 
16553   switch(VT.getSimpleVT().SimpleTy) {
16554   default:
16555     // For FP types that are currently not supported by PPC backend, return
16556     // false. Examples: f16, f80.
16557     return false;
16558   case MVT::f32:
16559   case MVT::f64:
16560     if (Subtarget.hasPrefixInstrs()) {
16561       // With prefixed instructions, we can materialize anything that can be
16562       // represented with a 32-bit immediate, not just positive zero.
16563       APFloat APFloatOfImm = Imm;
16564       return convertToNonDenormSingle(APFloatOfImm);
16565     }
16566     LLVM_FALLTHROUGH;
16567   case MVT::ppcf128:
16568     return Imm.isPosZero();
16569   }
16570 }
16571 
16572 // For vector shift operation op, fold
16573 // (op x, (and y, ((1 << numbits(x)) - 1))) -> (target op x, y)
stripModuloOnShift(const TargetLowering & TLI,SDNode * N,SelectionDAG & DAG)16574 static SDValue stripModuloOnShift(const TargetLowering &TLI, SDNode *N,
16575                                   SelectionDAG &DAG) {
16576   SDValue N0 = N->getOperand(0);
16577   SDValue N1 = N->getOperand(1);
16578   EVT VT = N0.getValueType();
16579   unsigned OpSizeInBits = VT.getScalarSizeInBits();
16580   unsigned Opcode = N->getOpcode();
16581   unsigned TargetOpcode;
16582 
16583   switch (Opcode) {
16584   default:
16585     llvm_unreachable("Unexpected shift operation");
16586   case ISD::SHL:
16587     TargetOpcode = PPCISD::SHL;
16588     break;
16589   case ISD::SRL:
16590     TargetOpcode = PPCISD::SRL;
16591     break;
16592   case ISD::SRA:
16593     TargetOpcode = PPCISD::SRA;
16594     break;
16595   }
16596 
16597   if (VT.isVector() && TLI.isOperationLegal(Opcode, VT) &&
16598       N1->getOpcode() == ISD::AND)
16599     if (ConstantSDNode *Mask = isConstOrConstSplat(N1->getOperand(1)))
16600       if (Mask->getZExtValue() == OpSizeInBits - 1)
16601         return DAG.getNode(TargetOpcode, SDLoc(N), VT, N0, N1->getOperand(0));
16602 
16603   return SDValue();
16604 }
16605 
combineSHL(SDNode * N,DAGCombinerInfo & DCI) const16606 SDValue PPCTargetLowering::combineSHL(SDNode *N, DAGCombinerInfo &DCI) const {
16607   if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
16608     return Value;
16609 
16610   SDValue N0 = N->getOperand(0);
16611   ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N->getOperand(1));
16612   if (!Subtarget.isISA3_0() ||
16613       N0.getOpcode() != ISD::SIGN_EXTEND ||
16614       N0.getOperand(0).getValueType() != MVT::i32 ||
16615       CN1 == nullptr || N->getValueType(0) != MVT::i64)
16616     return SDValue();
16617 
16618   // We can't save an operation here if the value is already extended, and
16619   // the existing shift is easier to combine.
16620   SDValue ExtsSrc = N0.getOperand(0);
16621   if (ExtsSrc.getOpcode() == ISD::TRUNCATE &&
16622       ExtsSrc.getOperand(0).getOpcode() == ISD::AssertSext)
16623     return SDValue();
16624 
16625   SDLoc DL(N0);
16626   SDValue ShiftBy = SDValue(CN1, 0);
16627   // We want the shift amount to be i32 on the extswli, but the shift could
16628   // have an i64.
16629   if (ShiftBy.getValueType() == MVT::i64)
16630     ShiftBy = DCI.DAG.getConstant(CN1->getZExtValue(), DL, MVT::i32);
16631 
16632   return DCI.DAG.getNode(PPCISD::EXTSWSLI, DL, MVT::i64, N0->getOperand(0),
16633                          ShiftBy);
16634 }
16635 
combineSRA(SDNode * N,DAGCombinerInfo & DCI) const16636 SDValue PPCTargetLowering::combineSRA(SDNode *N, DAGCombinerInfo &DCI) const {
16637   if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
16638     return Value;
16639 
16640   return SDValue();
16641 }
16642 
combineSRL(SDNode * N,DAGCombinerInfo & DCI) const16643 SDValue PPCTargetLowering::combineSRL(SDNode *N, DAGCombinerInfo &DCI) const {
16644   if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
16645     return Value;
16646 
16647   return SDValue();
16648 }
16649 
16650 // Transform (add X, (zext(setne Z, C))) -> (addze X, (addic (addi Z, -C), -1))
16651 // Transform (add X, (zext(sete  Z, C))) -> (addze X, (subfic (addi Z, -C), 0))
16652 // When C is zero, the equation (addi Z, -C) can be simplified to Z
16653 // Requirement: -C in [-32768, 32767], X and Z are MVT::i64 types
combineADDToADDZE(SDNode * N,SelectionDAG & DAG,const PPCSubtarget & Subtarget)16654 static SDValue combineADDToADDZE(SDNode *N, SelectionDAG &DAG,
16655                                  const PPCSubtarget &Subtarget) {
16656   if (!Subtarget.isPPC64())
16657     return SDValue();
16658 
16659   SDValue LHS = N->getOperand(0);
16660   SDValue RHS = N->getOperand(1);
16661 
16662   auto isZextOfCompareWithConstant = [](SDValue Op) {
16663     if (Op.getOpcode() != ISD::ZERO_EXTEND || !Op.hasOneUse() ||
16664         Op.getValueType() != MVT::i64)
16665       return false;
16666 
16667     SDValue Cmp = Op.getOperand(0);
16668     if (Cmp.getOpcode() != ISD::SETCC || !Cmp.hasOneUse() ||
16669         Cmp.getOperand(0).getValueType() != MVT::i64)
16670       return false;
16671 
16672     if (auto *Constant = dyn_cast<ConstantSDNode>(Cmp.getOperand(1))) {
16673       int64_t NegConstant = 0 - Constant->getSExtValue();
16674       // Due to the limitations of the addi instruction,
16675       // -C is required to be [-32768, 32767].
16676       return isInt<16>(NegConstant);
16677     }
16678 
16679     return false;
16680   };
16681 
16682   bool LHSHasPattern = isZextOfCompareWithConstant(LHS);
16683   bool RHSHasPattern = isZextOfCompareWithConstant(RHS);
16684 
16685   // If there is a pattern, canonicalize a zext operand to the RHS.
16686   if (LHSHasPattern && !RHSHasPattern)
16687     std::swap(LHS, RHS);
16688   else if (!LHSHasPattern && !RHSHasPattern)
16689     return SDValue();
16690 
16691   SDLoc DL(N);
16692   SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Glue);
16693   SDValue Cmp = RHS.getOperand(0);
16694   SDValue Z = Cmp.getOperand(0);
16695   auto *Constant = dyn_cast<ConstantSDNode>(Cmp.getOperand(1));
16696 
16697   assert(Constant && "Constant Should not be a null pointer.");
16698   int64_t NegConstant = 0 - Constant->getSExtValue();
16699 
16700   switch(cast<CondCodeSDNode>(Cmp.getOperand(2))->get()) {
16701   default: break;
16702   case ISD::SETNE: {
16703     //                                 when C == 0
16704     //                             --> addze X, (addic Z, -1).carry
16705     //                            /
16706     // add X, (zext(setne Z, C))--
16707     //                            \    when -32768 <= -C <= 32767 && C != 0
16708     //                             --> addze X, (addic (addi Z, -C), -1).carry
16709     SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::i64, Z,
16710                               DAG.getConstant(NegConstant, DL, MVT::i64));
16711     SDValue AddOrZ = NegConstant != 0 ? Add : Z;
16712     SDValue Addc = DAG.getNode(ISD::ADDC, DL, DAG.getVTList(MVT::i64, MVT::Glue),
16713                                AddOrZ, DAG.getConstant(-1ULL, DL, MVT::i64));
16714     return DAG.getNode(ISD::ADDE, DL, VTs, LHS, DAG.getConstant(0, DL, MVT::i64),
16715                        SDValue(Addc.getNode(), 1));
16716     }
16717   case ISD::SETEQ: {
16718     //                                 when C == 0
16719     //                             --> addze X, (subfic Z, 0).carry
16720     //                            /
16721     // add X, (zext(sete  Z, C))--
16722     //                            \    when -32768 <= -C <= 32767 && C != 0
16723     //                             --> addze X, (subfic (addi Z, -C), 0).carry
16724     SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::i64, Z,
16725                               DAG.getConstant(NegConstant, DL, MVT::i64));
16726     SDValue AddOrZ = NegConstant != 0 ? Add : Z;
16727     SDValue Subc = DAG.getNode(ISD::SUBC, DL, DAG.getVTList(MVT::i64, MVT::Glue),
16728                                DAG.getConstant(0, DL, MVT::i64), AddOrZ);
16729     return DAG.getNode(ISD::ADDE, DL, VTs, LHS, DAG.getConstant(0, DL, MVT::i64),
16730                        SDValue(Subc.getNode(), 1));
16731     }
16732   }
16733 
16734   return SDValue();
16735 }
16736 
16737 // Transform
16738 // (add C1, (MAT_PCREL_ADDR GlobalAddr+C2)) to
16739 // (MAT_PCREL_ADDR GlobalAddr+(C1+C2))
16740 // In this case both C1 and C2 must be known constants.
16741 // C1+C2 must fit into a 34 bit signed integer.
combineADDToMAT_PCREL_ADDR(SDNode * N,SelectionDAG & DAG,const PPCSubtarget & Subtarget)16742 static SDValue combineADDToMAT_PCREL_ADDR(SDNode *N, SelectionDAG &DAG,
16743                                           const PPCSubtarget &Subtarget) {
16744   if (!Subtarget.isUsingPCRelativeCalls())
16745     return SDValue();
16746 
16747   // Check both Operand 0 and Operand 1 of the ADD node for the PCRel node.
16748   // If we find that node try to cast the Global Address and the Constant.
16749   SDValue LHS = N->getOperand(0);
16750   SDValue RHS = N->getOperand(1);
16751 
16752   if (LHS.getOpcode() != PPCISD::MAT_PCREL_ADDR)
16753     std::swap(LHS, RHS);
16754 
16755   if (LHS.getOpcode() != PPCISD::MAT_PCREL_ADDR)
16756     return SDValue();
16757 
16758   // Operand zero of PPCISD::MAT_PCREL_ADDR is the GA node.
16759   GlobalAddressSDNode *GSDN = dyn_cast<GlobalAddressSDNode>(LHS.getOperand(0));
16760   ConstantSDNode* ConstNode = dyn_cast<ConstantSDNode>(RHS);
16761 
16762   // Check that both casts succeeded.
16763   if (!GSDN || !ConstNode)
16764     return SDValue();
16765 
16766   int64_t NewOffset = GSDN->getOffset() + ConstNode->getSExtValue();
16767   SDLoc DL(GSDN);
16768 
16769   // The signed int offset needs to fit in 34 bits.
16770   if (!isInt<34>(NewOffset))
16771     return SDValue();
16772 
16773   // The new global address is a copy of the old global address except
16774   // that it has the updated Offset.
16775   SDValue GA =
16776       DAG.getTargetGlobalAddress(GSDN->getGlobal(), DL, GSDN->getValueType(0),
16777                                  NewOffset, GSDN->getTargetFlags());
16778   SDValue MatPCRel =
16779       DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, GSDN->getValueType(0), GA);
16780   return MatPCRel;
16781 }
16782 
combineADD(SDNode * N,DAGCombinerInfo & DCI) const16783 SDValue PPCTargetLowering::combineADD(SDNode *N, DAGCombinerInfo &DCI) const {
16784   if (auto Value = combineADDToADDZE(N, DCI.DAG, Subtarget))
16785     return Value;
16786 
16787   if (auto Value = combineADDToMAT_PCREL_ADDR(N, DCI.DAG, Subtarget))
16788     return Value;
16789 
16790   return SDValue();
16791 }
16792 
16793 // Detect TRUNCATE operations on bitcasts of float128 values.
16794 // What we are looking for here is the situtation where we extract a subset
16795 // of bits from a 128 bit float.
16796 // This can be of two forms:
16797 // 1) BITCAST of f128 feeding TRUNCATE
16798 // 2) BITCAST of f128 feeding SRL (a shift) feeding TRUNCATE
16799 // The reason this is required is because we do not have a legal i128 type
16800 // and so we want to prevent having to store the f128 and then reload part
16801 // of it.
combineTRUNCATE(SDNode * N,DAGCombinerInfo & DCI) const16802 SDValue PPCTargetLowering::combineTRUNCATE(SDNode *N,
16803                                            DAGCombinerInfo &DCI) const {
16804   // If we are using CRBits then try that first.
16805   if (Subtarget.useCRBits()) {
16806     // Check if CRBits did anything and return that if it did.
16807     if (SDValue CRTruncValue = DAGCombineTruncBoolExt(N, DCI))
16808       return CRTruncValue;
16809   }
16810 
16811   SDLoc dl(N);
16812   SDValue Op0 = N->getOperand(0);
16813 
16814   // fold (truncate (abs (sub (zext a), (zext b)))) -> (vabsd a, b)
16815   if (Subtarget.hasP9Altivec() && Op0.getOpcode() == ISD::ABS) {
16816     EVT VT = N->getValueType(0);
16817     if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8)
16818       return SDValue();
16819     SDValue Sub = Op0.getOperand(0);
16820     if (Sub.getOpcode() == ISD::SUB) {
16821       SDValue SubOp0 = Sub.getOperand(0);
16822       SDValue SubOp1 = Sub.getOperand(1);
16823       if ((SubOp0.getOpcode() == ISD::ZERO_EXTEND) &&
16824           (SubOp1.getOpcode() == ISD::ZERO_EXTEND)) {
16825         return DCI.DAG.getNode(PPCISD::VABSD, dl, VT, SubOp0.getOperand(0),
16826                                SubOp1.getOperand(0),
16827                                DCI.DAG.getTargetConstant(0, dl, MVT::i32));
16828       }
16829     }
16830   }
16831 
16832   // Looking for a truncate of i128 to i64.
16833   if (Op0.getValueType() != MVT::i128 || N->getValueType(0) != MVT::i64)
16834     return SDValue();
16835 
16836   int EltToExtract = DCI.DAG.getDataLayout().isBigEndian() ? 1 : 0;
16837 
16838   // SRL feeding TRUNCATE.
16839   if (Op0.getOpcode() == ISD::SRL) {
16840     ConstantSDNode *ConstNode = dyn_cast<ConstantSDNode>(Op0.getOperand(1));
16841     // The right shift has to be by 64 bits.
16842     if (!ConstNode || ConstNode->getZExtValue() != 64)
16843       return SDValue();
16844 
16845     // Switch the element number to extract.
16846     EltToExtract = EltToExtract ? 0 : 1;
16847     // Update Op0 past the SRL.
16848     Op0 = Op0.getOperand(0);
16849   }
16850 
16851   // BITCAST feeding a TRUNCATE possibly via SRL.
16852   if (Op0.getOpcode() == ISD::BITCAST &&
16853       Op0.getValueType() == MVT::i128 &&
16854       Op0.getOperand(0).getValueType() == MVT::f128) {
16855     SDValue Bitcast = DCI.DAG.getBitcast(MVT::v2i64, Op0.getOperand(0));
16856     return DCI.DAG.getNode(
16857         ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Bitcast,
16858         DCI.DAG.getTargetConstant(EltToExtract, dl, MVT::i32));
16859   }
16860   return SDValue();
16861 }
16862 
combineMUL(SDNode * N,DAGCombinerInfo & DCI) const16863 SDValue PPCTargetLowering::combineMUL(SDNode *N, DAGCombinerInfo &DCI) const {
16864   SelectionDAG &DAG = DCI.DAG;
16865 
16866   ConstantSDNode *ConstOpOrElement = isConstOrConstSplat(N->getOperand(1));
16867   if (!ConstOpOrElement)
16868     return SDValue();
16869 
16870   // An imul is usually smaller than the alternative sequence for legal type.
16871   if (DAG.getMachineFunction().getFunction().hasMinSize() &&
16872       isOperationLegal(ISD::MUL, N->getValueType(0)))
16873     return SDValue();
16874 
16875   auto IsProfitable = [this](bool IsNeg, bool IsAddOne, EVT VT) -> bool {
16876     switch (this->Subtarget.getCPUDirective()) {
16877     default:
16878       // TODO: enhance the condition for subtarget before pwr8
16879       return false;
16880     case PPC::DIR_PWR8:
16881       //  type        mul     add    shl
16882       // scalar        4       1      1
16883       // vector        7       2      2
16884       return true;
16885     case PPC::DIR_PWR9:
16886     case PPC::DIR_PWR10:
16887     case PPC::DIR_PWR_FUTURE:
16888       //  type        mul     add    shl
16889       // scalar        5       2      2
16890       // vector        7       2      2
16891 
16892       // The cycle RATIO of related operations are showed as a table above.
16893       // Because mul is 5(scalar)/7(vector), add/sub/shl are all 2 for both
16894       // scalar and vector type. For 2 instrs patterns, add/sub + shl
16895       // are 4, it is always profitable; but for 3 instrs patterns
16896       // (mul x, -(2^N + 1)) => -(add (shl x, N), x), sub + add + shl are 6.
16897       // So we should only do it for vector type.
16898       return IsAddOne && IsNeg ? VT.isVector() : true;
16899     }
16900   };
16901 
16902   EVT VT = N->getValueType(0);
16903   SDLoc DL(N);
16904 
16905   const APInt &MulAmt = ConstOpOrElement->getAPIntValue();
16906   bool IsNeg = MulAmt.isNegative();
16907   APInt MulAmtAbs = MulAmt.abs();
16908 
16909   if ((MulAmtAbs - 1).isPowerOf2()) {
16910     // (mul x, 2^N + 1) => (add (shl x, N), x)
16911     // (mul x, -(2^N + 1)) => -(add (shl x, N), x)
16912 
16913     if (!IsProfitable(IsNeg, true, VT))
16914       return SDValue();
16915 
16916     SDValue Op0 = N->getOperand(0);
16917     SDValue Op1 =
16918         DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
16919                     DAG.getConstant((MulAmtAbs - 1).logBase2(), DL, VT));
16920     SDValue Res = DAG.getNode(ISD::ADD, DL, VT, Op0, Op1);
16921 
16922     if (!IsNeg)
16923       return Res;
16924 
16925     return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Res);
16926   } else if ((MulAmtAbs + 1).isPowerOf2()) {
16927     // (mul x, 2^N - 1) => (sub (shl x, N), x)
16928     // (mul x, -(2^N - 1)) => (sub x, (shl x, N))
16929 
16930     if (!IsProfitable(IsNeg, false, VT))
16931       return SDValue();
16932 
16933     SDValue Op0 = N->getOperand(0);
16934     SDValue Op1 =
16935         DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
16936                     DAG.getConstant((MulAmtAbs + 1).logBase2(), DL, VT));
16937 
16938     if (!IsNeg)
16939       return DAG.getNode(ISD::SUB, DL, VT, Op1, Op0);
16940     else
16941       return DAG.getNode(ISD::SUB, DL, VT, Op0, Op1);
16942 
16943   } else {
16944     return SDValue();
16945   }
16946 }
16947 
16948 // Combine fma-like op (like fnmsub) with fnegs to appropriate op. Do this
16949 // in combiner since we need to check SD flags and other subtarget features.
combineFMALike(SDNode * N,DAGCombinerInfo & DCI) const16950 SDValue PPCTargetLowering::combineFMALike(SDNode *N,
16951                                           DAGCombinerInfo &DCI) const {
16952   SDValue N0 = N->getOperand(0);
16953   SDValue N1 = N->getOperand(1);
16954   SDValue N2 = N->getOperand(2);
16955   SDNodeFlags Flags = N->getFlags();
16956   EVT VT = N->getValueType(0);
16957   SelectionDAG &DAG = DCI.DAG;
16958   const TargetOptions &Options = getTargetMachine().Options;
16959   unsigned Opc = N->getOpcode();
16960   bool CodeSize = DAG.getMachineFunction().getFunction().hasOptSize();
16961   bool LegalOps = !DCI.isBeforeLegalizeOps();
16962   SDLoc Loc(N);
16963 
16964   // TODO: QPX subtarget is deprecated. No transformation here.
16965   if (Subtarget.hasQPX() || !isOperationLegal(ISD::FMA, VT))
16966     return SDValue();
16967 
16968   // Allowing transformation to FNMSUB may change sign of zeroes when ab-c=0
16969   // since (fnmsub a b c)=-0 while c-ab=+0.
16970   if (!Flags.hasNoSignedZeros() && !Options.NoSignedZerosFPMath)
16971     return SDValue();
16972 
16973   // (fma (fneg a) b c) => (fnmsub a b c)
16974   // (fnmsub (fneg a) b c) => (fma a b c)
16975   if (SDValue NegN0 = getCheaperNegatedExpression(N0, DAG, LegalOps, CodeSize))
16976     return DAG.getNode(invertFMAOpcode(Opc), Loc, VT, NegN0, N1, N2, Flags);
16977 
16978   // (fma a (fneg b) c) => (fnmsub a b c)
16979   // (fnmsub a (fneg b) c) => (fma a b c)
16980   if (SDValue NegN1 = getCheaperNegatedExpression(N1, DAG, LegalOps, CodeSize))
16981     return DAG.getNode(invertFMAOpcode(Opc), Loc, VT, N0, NegN1, N2, Flags);
16982 
16983   return SDValue();
16984 }
16985 
mayBeEmittedAsTailCall(const CallInst * CI) const16986 bool PPCTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
16987   // Only duplicate to increase tail-calls for the 64bit SysV ABIs.
16988   if (!Subtarget.is64BitELFABI())
16989     return false;
16990 
16991   // If not a tail call then no need to proceed.
16992   if (!CI->isTailCall())
16993     return false;
16994 
16995   // If sibling calls have been disabled and tail-calls aren't guaranteed
16996   // there is no reason to duplicate.
16997   auto &TM = getTargetMachine();
16998   if (!TM.Options.GuaranteedTailCallOpt && DisableSCO)
16999     return false;
17000 
17001   // Can't tail call a function called indirectly, or if it has variadic args.
17002   const Function *Callee = CI->getCalledFunction();
17003   if (!Callee || Callee->isVarArg())
17004     return false;
17005 
17006   // Make sure the callee and caller calling conventions are eligible for tco.
17007   const Function *Caller = CI->getParent()->getParent();
17008   if (!areCallingConvEligibleForTCO_64SVR4(Caller->getCallingConv(),
17009                                            CI->getCallingConv()))
17010       return false;
17011 
17012   // If the function is local then we have a good chance at tail-calling it
17013   return getTargetMachine().shouldAssumeDSOLocal(*Caller->getParent(), Callee);
17014 }
17015 
hasBitPreservingFPLogic(EVT VT) const17016 bool PPCTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
17017   if (!Subtarget.hasVSX())
17018     return false;
17019   if (Subtarget.hasP9Vector() && VT == MVT::f128)
17020     return true;
17021   return VT == MVT::f32 || VT == MVT::f64 ||
17022     VT == MVT::v4f32 || VT == MVT::v2f64;
17023 }
17024 
17025 bool PPCTargetLowering::
isMaskAndCmp0FoldingBeneficial(const Instruction & AndI) const17026 isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const {
17027   const Value *Mask = AndI.getOperand(1);
17028   // If the mask is suitable for andi. or andis. we should sink the and.
17029   if (const ConstantInt *CI = dyn_cast<ConstantInt>(Mask)) {
17030     // Can't handle constants wider than 64-bits.
17031     if (CI->getBitWidth() > 64)
17032       return false;
17033     int64_t ConstVal = CI->getZExtValue();
17034     return isUInt<16>(ConstVal) ||
17035       (isUInt<16>(ConstVal >> 16) && !(ConstVal & 0xFFFF));
17036   }
17037 
17038   // For non-constant masks, we can always use the record-form and.
17039   return true;
17040 }
17041 
17042 // Transform (abs (sub (zext a), (zext b))) to (vabsd a b 0)
17043 // Transform (abs (sub (zext a), (zext_invec b))) to (vabsd a b 0)
17044 // Transform (abs (sub (zext_invec a), (zext_invec b))) to (vabsd a b 0)
17045 // Transform (abs (sub (zext_invec a), (zext b))) to (vabsd a b 0)
17046 // Transform (abs (sub a, b) to (vabsd a b 1)) if a & b of type v4i32
combineABS(SDNode * N,DAGCombinerInfo & DCI) const17047 SDValue PPCTargetLowering::combineABS(SDNode *N, DAGCombinerInfo &DCI) const {
17048   assert((N->getOpcode() == ISD::ABS) && "Need ABS node here");
17049   assert(Subtarget.hasP9Altivec() &&
17050          "Only combine this when P9 altivec supported!");
17051   EVT VT = N->getValueType(0);
17052   if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8)
17053     return SDValue();
17054 
17055   SelectionDAG &DAG = DCI.DAG;
17056   SDLoc dl(N);
17057   if (N->getOperand(0).getOpcode() == ISD::SUB) {
17058     // Even for signed integers, if it's known to be positive (as signed
17059     // integer) due to zero-extended inputs.
17060     unsigned SubOpcd0 = N->getOperand(0)->getOperand(0).getOpcode();
17061     unsigned SubOpcd1 = N->getOperand(0)->getOperand(1).getOpcode();
17062     if ((SubOpcd0 == ISD::ZERO_EXTEND ||
17063          SubOpcd0 == ISD::ZERO_EXTEND_VECTOR_INREG) &&
17064         (SubOpcd1 == ISD::ZERO_EXTEND ||
17065          SubOpcd1 == ISD::ZERO_EXTEND_VECTOR_INREG)) {
17066       return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(0).getValueType(),
17067                          N->getOperand(0)->getOperand(0),
17068                          N->getOperand(0)->getOperand(1),
17069                          DAG.getTargetConstant(0, dl, MVT::i32));
17070     }
17071 
17072     // For type v4i32, it can be optimized with xvnegsp + vabsduw
17073     if (N->getOperand(0).getValueType() == MVT::v4i32 &&
17074         N->getOperand(0).hasOneUse()) {
17075       return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(0).getValueType(),
17076                          N->getOperand(0)->getOperand(0),
17077                          N->getOperand(0)->getOperand(1),
17078                          DAG.getTargetConstant(1, dl, MVT::i32));
17079     }
17080   }
17081 
17082   return SDValue();
17083 }
17084 
17085 // For type v4i32/v8ii16/v16i8, transform
17086 // from (vselect (setcc a, b, setugt), (sub a, b), (sub b, a)) to (vabsd a, b)
17087 // from (vselect (setcc a, b, setuge), (sub a, b), (sub b, a)) to (vabsd a, b)
17088 // from (vselect (setcc a, b, setult), (sub b, a), (sub a, b)) to (vabsd a, b)
17089 // from (vselect (setcc a, b, setule), (sub b, a), (sub a, b)) to (vabsd a, b)
combineVSelect(SDNode * N,DAGCombinerInfo & DCI) const17090 SDValue PPCTargetLowering::combineVSelect(SDNode *N,
17091                                           DAGCombinerInfo &DCI) const {
17092   assert((N->getOpcode() == ISD::VSELECT) && "Need VSELECT node here");
17093   assert(Subtarget.hasP9Altivec() &&
17094          "Only combine this when P9 altivec supported!");
17095 
17096   SelectionDAG &DAG = DCI.DAG;
17097   SDLoc dl(N);
17098   SDValue Cond = N->getOperand(0);
17099   SDValue TrueOpnd = N->getOperand(1);
17100   SDValue FalseOpnd = N->getOperand(2);
17101   EVT VT = N->getOperand(1).getValueType();
17102 
17103   if (Cond.getOpcode() != ISD::SETCC || TrueOpnd.getOpcode() != ISD::SUB ||
17104       FalseOpnd.getOpcode() != ISD::SUB)
17105     return SDValue();
17106 
17107   // ABSD only available for type v4i32/v8i16/v16i8
17108   if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8)
17109     return SDValue();
17110 
17111   // At least to save one more dependent computation
17112   if (!(Cond.hasOneUse() || TrueOpnd.hasOneUse() || FalseOpnd.hasOneUse()))
17113     return SDValue();
17114 
17115   ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
17116 
17117   // Can only handle unsigned comparison here
17118   switch (CC) {
17119   default:
17120     return SDValue();
17121   case ISD::SETUGT:
17122   case ISD::SETUGE:
17123     break;
17124   case ISD::SETULT:
17125   case ISD::SETULE:
17126     std::swap(TrueOpnd, FalseOpnd);
17127     break;
17128   }
17129 
17130   SDValue CmpOpnd1 = Cond.getOperand(0);
17131   SDValue CmpOpnd2 = Cond.getOperand(1);
17132 
17133   // SETCC CmpOpnd1 CmpOpnd2 cond
17134   // TrueOpnd = CmpOpnd1 - CmpOpnd2
17135   // FalseOpnd = CmpOpnd2 - CmpOpnd1
17136   if (TrueOpnd.getOperand(0) == CmpOpnd1 &&
17137       TrueOpnd.getOperand(1) == CmpOpnd2 &&
17138       FalseOpnd.getOperand(0) == CmpOpnd2 &&
17139       FalseOpnd.getOperand(1) == CmpOpnd1) {
17140     return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(1).getValueType(),
17141                        CmpOpnd1, CmpOpnd2,
17142                        DAG.getTargetConstant(0, dl, MVT::i32));
17143   }
17144 
17145   return SDValue();
17146 }
17147