1 //===-- PPCISelLowering.cpp - PPC DAG Lowering Implementation -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the PPCISelLowering class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "PPCISelLowering.h"
14 #include "MCTargetDesc/PPCPredicates.h"
15 #include "PPC.h"
16 #include "PPCCCState.h"
17 #include "PPCCallingConv.h"
18 #include "PPCFrameLowering.h"
19 #include "PPCInstrInfo.h"
20 #include "PPCMachineFunctionInfo.h"
21 #include "PPCPerfectShuffle.h"
22 #include "PPCRegisterInfo.h"
23 #include "PPCSubtarget.h"
24 #include "PPCTargetMachine.h"
25 #include "llvm/ADT/APFloat.h"
26 #include "llvm/ADT/APInt.h"
27 #include "llvm/ADT/ArrayRef.h"
28 #include "llvm/ADT/DenseMap.h"
29 #include "llvm/ADT/None.h"
30 #include "llvm/ADT/STLExtras.h"
31 #include "llvm/ADT/SmallPtrSet.h"
32 #include "llvm/ADT/SmallSet.h"
33 #include "llvm/ADT/SmallVector.h"
34 #include "llvm/ADT/Statistic.h"
35 #include "llvm/ADT/StringRef.h"
36 #include "llvm/ADT/StringSwitch.h"
37 #include "llvm/CodeGen/CallingConvLower.h"
38 #include "llvm/CodeGen/ISDOpcodes.h"
39 #include "llvm/CodeGen/MachineBasicBlock.h"
40 #include "llvm/CodeGen/MachineFrameInfo.h"
41 #include "llvm/CodeGen/MachineFunction.h"
42 #include "llvm/CodeGen/MachineInstr.h"
43 #include "llvm/CodeGen/MachineInstrBuilder.h"
44 #include "llvm/CodeGen/MachineJumpTableInfo.h"
45 #include "llvm/CodeGen/MachineLoopInfo.h"
46 #include "llvm/CodeGen/MachineMemOperand.h"
47 #include "llvm/CodeGen/MachineModuleInfo.h"
48 #include "llvm/CodeGen/MachineOperand.h"
49 #include "llvm/CodeGen/MachineRegisterInfo.h"
50 #include "llvm/CodeGen/RuntimeLibcalls.h"
51 #include "llvm/CodeGen/SelectionDAG.h"
52 #include "llvm/CodeGen/SelectionDAGNodes.h"
53 #include "llvm/CodeGen/TargetInstrInfo.h"
54 #include "llvm/CodeGen/TargetLowering.h"
55 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
56 #include "llvm/CodeGen/TargetRegisterInfo.h"
57 #include "llvm/CodeGen/ValueTypes.h"
58 #include "llvm/IR/CallingConv.h"
59 #include "llvm/IR/Constant.h"
60 #include "llvm/IR/Constants.h"
61 #include "llvm/IR/DataLayout.h"
62 #include "llvm/IR/DebugLoc.h"
63 #include "llvm/IR/DerivedTypes.h"
64 #include "llvm/IR/Function.h"
65 #include "llvm/IR/GlobalValue.h"
66 #include "llvm/IR/IRBuilder.h"
67 #include "llvm/IR/Instructions.h"
68 #include "llvm/IR/Intrinsics.h"
69 #include "llvm/IR/IntrinsicsPowerPC.h"
70 #include "llvm/IR/Module.h"
71 #include "llvm/IR/Type.h"
72 #include "llvm/IR/Use.h"
73 #include "llvm/IR/Value.h"
74 #include "llvm/MC/MCContext.h"
75 #include "llvm/MC/MCExpr.h"
76 #include "llvm/MC/MCRegisterInfo.h"
77 #include "llvm/MC/MCSectionXCOFF.h"
78 #include "llvm/MC/MCSymbolXCOFF.h"
79 #include "llvm/Support/AtomicOrdering.h"
80 #include "llvm/Support/BranchProbability.h"
81 #include "llvm/Support/Casting.h"
82 #include "llvm/Support/CodeGen.h"
83 #include "llvm/Support/CommandLine.h"
84 #include "llvm/Support/Compiler.h"
85 #include "llvm/Support/Debug.h"
86 #include "llvm/Support/ErrorHandling.h"
87 #include "llvm/Support/Format.h"
88 #include "llvm/Support/KnownBits.h"
89 #include "llvm/Support/MachineValueType.h"
90 #include "llvm/Support/MathExtras.h"
91 #include "llvm/Support/raw_ostream.h"
92 #include "llvm/Target/TargetMachine.h"
93 #include "llvm/Target/TargetOptions.h"
94 #include <algorithm>
95 #include <cassert>
96 #include <cstdint>
97 #include <iterator>
98 #include <list>
99 #include <utility>
100 #include <vector>
101 
102 using namespace llvm;
103 
104 #define DEBUG_TYPE "ppc-lowering"
105 
106 static cl::opt<bool> DisablePPCPreinc("disable-ppc-preinc",
107 cl::desc("disable preincrement load/store generation on PPC"), cl::Hidden);
108 
109 static cl::opt<bool> DisableILPPref("disable-ppc-ilp-pref",
110 cl::desc("disable setting the node scheduling preference to ILP on PPC"), cl::Hidden);
111 
112 static cl::opt<bool> DisablePPCUnaligned("disable-ppc-unaligned",
113 cl::desc("disable unaligned load/store generation on PPC"), cl::Hidden);
114 
115 static cl::opt<bool> DisableSCO("disable-ppc-sco",
116 cl::desc("disable sibling call optimization on ppc"), cl::Hidden);
117 
118 static cl::opt<bool> DisableInnermostLoopAlign32("disable-ppc-innermost-loop-align32",
119 cl::desc("don't always align innermost loop to 32 bytes on ppc"), cl::Hidden);
120 
121 static cl::opt<bool> UseAbsoluteJumpTables("ppc-use-absolute-jumptables",
122 cl::desc("use absolute jump tables on ppc"), cl::Hidden);
123 
124 STATISTIC(NumTailCalls, "Number of tail calls");
125 STATISTIC(NumSiblingCalls, "Number of sibling calls");
126 STATISTIC(ShufflesHandledWithVPERM, "Number of shuffles lowered to a VPERM");
127 STATISTIC(NumDynamicAllocaProbed, "Number of dynamic stack allocation probed");
128 
129 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *, unsigned, int);
130 
131 static SDValue widenVec(SelectionDAG &DAG, SDValue Vec, const SDLoc &dl);
132 
133 // FIXME: Remove this once the bug has been fixed!
134 extern cl::opt<bool> ANDIGlueBug;
135 
PPCTargetLowering(const PPCTargetMachine & TM,const PPCSubtarget & STI)136 PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
137                                      const PPCSubtarget &STI)
138     : TargetLowering(TM), Subtarget(STI) {
139   // Initialize map that relates the PPC addressing modes to the computed flags
140   // of a load/store instruction. The map is used to determine the optimal
141   // addressing mode when selecting load and stores.
142   initializeAddrModeMap();
143   // On PPC32/64, arguments smaller than 4/8 bytes are extended, so all
144   // arguments are at least 4/8 bytes aligned.
145   bool isPPC64 = Subtarget.isPPC64();
146   setMinStackArgumentAlignment(isPPC64 ? Align(8) : Align(4));
147 
148   // Set up the register classes.
149   addRegisterClass(MVT::i32, &PPC::GPRCRegClass);
150   if (!useSoftFloat()) {
151     if (hasSPE()) {
152       addRegisterClass(MVT::f32, &PPC::GPRCRegClass);
153       // EFPU2 APU only supports f32
154       if (!Subtarget.hasEFPU2())
155         addRegisterClass(MVT::f64, &PPC::SPERCRegClass);
156     } else {
157       addRegisterClass(MVT::f32, &PPC::F4RCRegClass);
158       addRegisterClass(MVT::f64, &PPC::F8RCRegClass);
159     }
160   }
161 
162   // Match BITREVERSE to customized fast code sequence in the td file.
163   setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
164   setOperationAction(ISD::BITREVERSE, MVT::i64, Legal);
165 
166   // Sub-word ATOMIC_CMP_SWAP need to ensure that the input is zero-extended.
167   setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom);
168 
169   // Custom lower inline assembly to check for special registers.
170   setOperationAction(ISD::INLINEASM, MVT::Other, Custom);
171   setOperationAction(ISD::INLINEASM_BR, MVT::Other, Custom);
172 
173   // PowerPC has an i16 but no i8 (or i1) SEXTLOAD.
174   for (MVT VT : MVT::integer_valuetypes()) {
175     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
176     setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Expand);
177   }
178 
179   if (Subtarget.isISA3_0()) {
180     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Legal);
181     setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Legal);
182     setTruncStoreAction(MVT::f64, MVT::f16, Legal);
183     setTruncStoreAction(MVT::f32, MVT::f16, Legal);
184   } else {
185     // No extending loads from f16 or HW conversions back and forth.
186     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
187     setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
188     setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
189     setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
190     setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
191     setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
192     setTruncStoreAction(MVT::f64, MVT::f16, Expand);
193     setTruncStoreAction(MVT::f32, MVT::f16, Expand);
194   }
195 
196   setTruncStoreAction(MVT::f64, MVT::f32, Expand);
197 
198   // PowerPC has pre-inc load and store's.
199   setIndexedLoadAction(ISD::PRE_INC, MVT::i1, Legal);
200   setIndexedLoadAction(ISD::PRE_INC, MVT::i8, Legal);
201   setIndexedLoadAction(ISD::PRE_INC, MVT::i16, Legal);
202   setIndexedLoadAction(ISD::PRE_INC, MVT::i32, Legal);
203   setIndexedLoadAction(ISD::PRE_INC, MVT::i64, Legal);
204   setIndexedStoreAction(ISD::PRE_INC, MVT::i1, Legal);
205   setIndexedStoreAction(ISD::PRE_INC, MVT::i8, Legal);
206   setIndexedStoreAction(ISD::PRE_INC, MVT::i16, Legal);
207   setIndexedStoreAction(ISD::PRE_INC, MVT::i32, Legal);
208   setIndexedStoreAction(ISD::PRE_INC, MVT::i64, Legal);
209   if (!Subtarget.hasSPE()) {
210     setIndexedLoadAction(ISD::PRE_INC, MVT::f32, Legal);
211     setIndexedLoadAction(ISD::PRE_INC, MVT::f64, Legal);
212     setIndexedStoreAction(ISD::PRE_INC, MVT::f32, Legal);
213     setIndexedStoreAction(ISD::PRE_INC, MVT::f64, Legal);
214   }
215 
216   // PowerPC uses ADDC/ADDE/SUBC/SUBE to propagate carry.
217   const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 };
218   for (MVT VT : ScalarIntVTs) {
219     setOperationAction(ISD::ADDC, VT, Legal);
220     setOperationAction(ISD::ADDE, VT, Legal);
221     setOperationAction(ISD::SUBC, VT, Legal);
222     setOperationAction(ISD::SUBE, VT, Legal);
223   }
224 
225   if (Subtarget.useCRBits()) {
226     setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
227 
228     if (isPPC64 || Subtarget.hasFPCVT()) {
229       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i1, Promote);
230       AddPromotedToType(ISD::STRICT_SINT_TO_FP, MVT::i1,
231                         isPPC64 ? MVT::i64 : MVT::i32);
232       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i1, Promote);
233       AddPromotedToType(ISD::STRICT_UINT_TO_FP, MVT::i1,
234                         isPPC64 ? MVT::i64 : MVT::i32);
235 
236       setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote);
237       AddPromotedToType (ISD::SINT_TO_FP, MVT::i1,
238                          isPPC64 ? MVT::i64 : MVT::i32);
239       setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote);
240       AddPromotedToType(ISD::UINT_TO_FP, MVT::i1,
241                         isPPC64 ? MVT::i64 : MVT::i32);
242 
243       setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i1, Promote);
244       AddPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::i1,
245                         isPPC64 ? MVT::i64 : MVT::i32);
246       setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i1, Promote);
247       AddPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::i1,
248                         isPPC64 ? MVT::i64 : MVT::i32);
249 
250       setOperationAction(ISD::FP_TO_SINT, MVT::i1, Promote);
251       AddPromotedToType(ISD::FP_TO_SINT, MVT::i1,
252                         isPPC64 ? MVT::i64 : MVT::i32);
253       setOperationAction(ISD::FP_TO_UINT, MVT::i1, Promote);
254       AddPromotedToType(ISD::FP_TO_UINT, MVT::i1,
255                         isPPC64 ? MVT::i64 : MVT::i32);
256     } else {
257       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i1, Custom);
258       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i1, Custom);
259       setOperationAction(ISD::SINT_TO_FP, MVT::i1, Custom);
260       setOperationAction(ISD::UINT_TO_FP, MVT::i1, Custom);
261     }
262 
263     // PowerPC does not support direct load/store of condition registers.
264     setOperationAction(ISD::LOAD, MVT::i1, Custom);
265     setOperationAction(ISD::STORE, MVT::i1, Custom);
266 
267     // FIXME: Remove this once the ANDI glue bug is fixed:
268     if (ANDIGlueBug)
269       setOperationAction(ISD::TRUNCATE, MVT::i1, Custom);
270 
271     for (MVT VT : MVT::integer_valuetypes()) {
272       setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
273       setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
274       setTruncStoreAction(VT, MVT::i1, Expand);
275     }
276 
277     addRegisterClass(MVT::i1, &PPC::CRBITRCRegClass);
278   }
279 
280   // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on
281   // PPC (the libcall is not available).
282   setOperationAction(ISD::FP_TO_SINT, MVT::ppcf128, Custom);
283   setOperationAction(ISD::FP_TO_UINT, MVT::ppcf128, Custom);
284   setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::ppcf128, Custom);
285   setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::ppcf128, Custom);
286 
287   // We do not currently implement these libm ops for PowerPC.
288   setOperationAction(ISD::FFLOOR, MVT::ppcf128, Expand);
289   setOperationAction(ISD::FCEIL,  MVT::ppcf128, Expand);
290   setOperationAction(ISD::FTRUNC, MVT::ppcf128, Expand);
291   setOperationAction(ISD::FRINT,  MVT::ppcf128, Expand);
292   setOperationAction(ISD::FNEARBYINT, MVT::ppcf128, Expand);
293   setOperationAction(ISD::FREM, MVT::ppcf128, Expand);
294 
295   // PowerPC has no SREM/UREM instructions unless we are on P9
296   // On P9 we may use a hardware instruction to compute the remainder.
297   // When the result of both the remainder and the division is required it is
298   // more efficient to compute the remainder from the result of the division
299   // rather than use the remainder instruction. The instructions are legalized
300   // directly because the DivRemPairsPass performs the transformation at the IR
301   // level.
302   if (Subtarget.isISA3_0()) {
303     setOperationAction(ISD::SREM, MVT::i32, Legal);
304     setOperationAction(ISD::UREM, MVT::i32, Legal);
305     setOperationAction(ISD::SREM, MVT::i64, Legal);
306     setOperationAction(ISD::UREM, MVT::i64, Legal);
307   } else {
308     setOperationAction(ISD::SREM, MVT::i32, Expand);
309     setOperationAction(ISD::UREM, MVT::i32, Expand);
310     setOperationAction(ISD::SREM, MVT::i64, Expand);
311     setOperationAction(ISD::UREM, MVT::i64, Expand);
312   }
313 
314   // Don't use SMUL_LOHI/UMUL_LOHI or SDIVREM/UDIVREM to lower SREM/UREM.
315   setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
316   setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
317   setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
318   setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
319   setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
320   setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
321   setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
322   setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
323 
324   // Handle constrained floating-point operations of scalar.
325   // TODO: Handle SPE specific operation.
326   setOperationAction(ISD::STRICT_FADD, MVT::f32, Legal);
327   setOperationAction(ISD::STRICT_FSUB, MVT::f32, Legal);
328   setOperationAction(ISD::STRICT_FMUL, MVT::f32, Legal);
329   setOperationAction(ISD::STRICT_FDIV, MVT::f32, Legal);
330   setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal);
331 
332   setOperationAction(ISD::STRICT_FADD, MVT::f64, Legal);
333   setOperationAction(ISD::STRICT_FSUB, MVT::f64, Legal);
334   setOperationAction(ISD::STRICT_FMUL, MVT::f64, Legal);
335   setOperationAction(ISD::STRICT_FDIV, MVT::f64, Legal);
336 
337   if (!Subtarget.hasSPE()) {
338     setOperationAction(ISD::STRICT_FMA, MVT::f32, Legal);
339     setOperationAction(ISD::STRICT_FMA, MVT::f64, Legal);
340   }
341 
342   if (Subtarget.hasVSX()) {
343     setOperationAction(ISD::STRICT_FRINT, MVT::f32, Legal);
344     setOperationAction(ISD::STRICT_FRINT, MVT::f64, Legal);
345   }
346 
347   if (Subtarget.hasFSQRT()) {
348     setOperationAction(ISD::STRICT_FSQRT, MVT::f32, Legal);
349     setOperationAction(ISD::STRICT_FSQRT, MVT::f64, Legal);
350   }
351 
352   if (Subtarget.hasFPRND()) {
353     setOperationAction(ISD::STRICT_FFLOOR, MVT::f32, Legal);
354     setOperationAction(ISD::STRICT_FCEIL,  MVT::f32, Legal);
355     setOperationAction(ISD::STRICT_FTRUNC, MVT::f32, Legal);
356     setOperationAction(ISD::STRICT_FROUND, MVT::f32, Legal);
357 
358     setOperationAction(ISD::STRICT_FFLOOR, MVT::f64, Legal);
359     setOperationAction(ISD::STRICT_FCEIL,  MVT::f64, Legal);
360     setOperationAction(ISD::STRICT_FTRUNC, MVT::f64, Legal);
361     setOperationAction(ISD::STRICT_FROUND, MVT::f64, Legal);
362   }
363 
364   // We don't support sin/cos/sqrt/fmod/pow
365   setOperationAction(ISD::FSIN , MVT::f64, Expand);
366   setOperationAction(ISD::FCOS , MVT::f64, Expand);
367   setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
368   setOperationAction(ISD::FREM , MVT::f64, Expand);
369   setOperationAction(ISD::FPOW , MVT::f64, Expand);
370   setOperationAction(ISD::FSIN , MVT::f32, Expand);
371   setOperationAction(ISD::FCOS , MVT::f32, Expand);
372   setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
373   setOperationAction(ISD::FREM , MVT::f32, Expand);
374   setOperationAction(ISD::FPOW , MVT::f32, Expand);
375   if (Subtarget.hasSPE()) {
376     setOperationAction(ISD::FMA  , MVT::f64, Expand);
377     setOperationAction(ISD::FMA  , MVT::f32, Expand);
378   } else {
379     setOperationAction(ISD::FMA  , MVT::f64, Legal);
380     setOperationAction(ISD::FMA  , MVT::f32, Legal);
381   }
382 
383   if (Subtarget.hasSPE())
384     setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
385 
386   setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom);
387 
388   // If we're enabling GP optimizations, use hardware square root
389   if (!Subtarget.hasFSQRT() &&
390       !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTE() &&
391         Subtarget.hasFRE()))
392     setOperationAction(ISD::FSQRT, MVT::f64, Expand);
393 
394   if (!Subtarget.hasFSQRT() &&
395       !(TM.Options.UnsafeFPMath && Subtarget.hasFRSQRTES() &&
396         Subtarget.hasFRES()))
397     setOperationAction(ISD::FSQRT, MVT::f32, Expand);
398 
399   if (Subtarget.hasFCPSGN()) {
400     setOperationAction(ISD::FCOPYSIGN, MVT::f64, Legal);
401     setOperationAction(ISD::FCOPYSIGN, MVT::f32, Legal);
402   } else {
403     setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
404     setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
405   }
406 
407   if (Subtarget.hasFPRND()) {
408     setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
409     setOperationAction(ISD::FCEIL,  MVT::f64, Legal);
410     setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
411     setOperationAction(ISD::FROUND, MVT::f64, Legal);
412 
413     setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
414     setOperationAction(ISD::FCEIL,  MVT::f32, Legal);
415     setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
416     setOperationAction(ISD::FROUND, MVT::f32, Legal);
417   }
418 
419   // PowerPC does not have BSWAP, but we can use vector BSWAP instruction xxbrd
420   // to speed up scalar BSWAP64.
421   // CTPOP or CTTZ were introduced in P8/P9 respectively
422   setOperationAction(ISD::BSWAP, MVT::i32  , Expand);
423   if (Subtarget.hasP9Vector() && Subtarget.isPPC64())
424     setOperationAction(ISD::BSWAP, MVT::i64  , Custom);
425   else
426     setOperationAction(ISD::BSWAP, MVT::i64  , Expand);
427   if (Subtarget.isISA3_0()) {
428     setOperationAction(ISD::CTTZ , MVT::i32  , Legal);
429     setOperationAction(ISD::CTTZ , MVT::i64  , Legal);
430   } else {
431     setOperationAction(ISD::CTTZ , MVT::i32  , Expand);
432     setOperationAction(ISD::CTTZ , MVT::i64  , Expand);
433   }
434 
435   if (Subtarget.hasPOPCNTD() == PPCSubtarget::POPCNTD_Fast) {
436     setOperationAction(ISD::CTPOP, MVT::i32  , Legal);
437     setOperationAction(ISD::CTPOP, MVT::i64  , Legal);
438   } else {
439     setOperationAction(ISD::CTPOP, MVT::i32  , Expand);
440     setOperationAction(ISD::CTPOP, MVT::i64  , Expand);
441   }
442 
443   // PowerPC does not have ROTR
444   setOperationAction(ISD::ROTR, MVT::i32   , Expand);
445   setOperationAction(ISD::ROTR, MVT::i64   , Expand);
446 
447   if (!Subtarget.useCRBits()) {
448     // PowerPC does not have Select
449     setOperationAction(ISD::SELECT, MVT::i32, Expand);
450     setOperationAction(ISD::SELECT, MVT::i64, Expand);
451     setOperationAction(ISD::SELECT, MVT::f32, Expand);
452     setOperationAction(ISD::SELECT, MVT::f64, Expand);
453   }
454 
455   // PowerPC wants to turn select_cc of FP into fsel when possible.
456   setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
457   setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
458 
459   // PowerPC wants to optimize integer setcc a bit
460   if (!Subtarget.useCRBits())
461     setOperationAction(ISD::SETCC, MVT::i32, Custom);
462 
463   if (Subtarget.hasFPU()) {
464     setOperationAction(ISD::STRICT_FSETCC, MVT::f32, Legal);
465     setOperationAction(ISD::STRICT_FSETCC, MVT::f64, Legal);
466     setOperationAction(ISD::STRICT_FSETCC, MVT::f128, Legal);
467 
468     setOperationAction(ISD::STRICT_FSETCCS, MVT::f32, Legal);
469     setOperationAction(ISD::STRICT_FSETCCS, MVT::f64, Legal);
470     setOperationAction(ISD::STRICT_FSETCCS, MVT::f128, Legal);
471   }
472 
473   // PowerPC does not have BRCOND which requires SetCC
474   if (!Subtarget.useCRBits())
475     setOperationAction(ISD::BRCOND, MVT::Other, Expand);
476 
477   setOperationAction(ISD::BR_JT,  MVT::Other, Expand);
478 
479   if (Subtarget.hasSPE()) {
480     // SPE has built-in conversions
481     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Legal);
482     setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Legal);
483     setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Legal);
484     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal);
485     setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal);
486     setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal);
487 
488     // SPE supports signaling compare of f32/f64.
489     setOperationAction(ISD::STRICT_FSETCCS, MVT::f32, Legal);
490     setOperationAction(ISD::STRICT_FSETCCS, MVT::f64, Legal);
491   } else {
492     // PowerPC turns FP_TO_SINT into FCTIWZ and some load/stores.
493     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
494     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
495 
496     // PowerPC does not have [U|S]INT_TO_FP
497     setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Expand);
498     setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Expand);
499     setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand);
500     setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
501   }
502 
503   if (Subtarget.hasDirectMove() && isPPC64) {
504     setOperationAction(ISD::BITCAST, MVT::f32, Legal);
505     setOperationAction(ISD::BITCAST, MVT::i32, Legal);
506     setOperationAction(ISD::BITCAST, MVT::i64, Legal);
507     setOperationAction(ISD::BITCAST, MVT::f64, Legal);
508     if (TM.Options.UnsafeFPMath) {
509       setOperationAction(ISD::LRINT, MVT::f64, Legal);
510       setOperationAction(ISD::LRINT, MVT::f32, Legal);
511       setOperationAction(ISD::LLRINT, MVT::f64, Legal);
512       setOperationAction(ISD::LLRINT, MVT::f32, Legal);
513       setOperationAction(ISD::LROUND, MVT::f64, Legal);
514       setOperationAction(ISD::LROUND, MVT::f32, Legal);
515       setOperationAction(ISD::LLROUND, MVT::f64, Legal);
516       setOperationAction(ISD::LLROUND, MVT::f32, Legal);
517     }
518   } else {
519     setOperationAction(ISD::BITCAST, MVT::f32, Expand);
520     setOperationAction(ISD::BITCAST, MVT::i32, Expand);
521     setOperationAction(ISD::BITCAST, MVT::i64, Expand);
522     setOperationAction(ISD::BITCAST, MVT::f64, Expand);
523   }
524 
525   // We cannot sextinreg(i1).  Expand to shifts.
526   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
527 
528   // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support
529   // SjLj exception handling but a light-weight setjmp/longjmp replacement to
530   // support continuation, user-level threading, and etc.. As a result, no
531   // other SjLj exception interfaces are implemented and please don't build
532   // your own exception handling based on them.
533   // LLVM/Clang supports zero-cost DWARF exception handling.
534   setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
535   setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
536 
537   // We want to legalize GlobalAddress and ConstantPool nodes into the
538   // appropriate instructions to materialize the address.
539   setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
540   setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
541   setOperationAction(ISD::BlockAddress,  MVT::i32, Custom);
542   setOperationAction(ISD::ConstantPool,  MVT::i32, Custom);
543   setOperationAction(ISD::JumpTable,     MVT::i32, Custom);
544   setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
545   setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
546   setOperationAction(ISD::BlockAddress,  MVT::i64, Custom);
547   setOperationAction(ISD::ConstantPool,  MVT::i64, Custom);
548   setOperationAction(ISD::JumpTable,     MVT::i64, Custom);
549 
550   // TRAP is legal.
551   setOperationAction(ISD::TRAP, MVT::Other, Legal);
552 
553   // TRAMPOLINE is custom lowered.
554   setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
555   setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
556 
557   // VASTART needs to be custom lowered to use the VarArgsFrameIndex
558   setOperationAction(ISD::VASTART           , MVT::Other, Custom);
559 
560   if (Subtarget.is64BitELFABI()) {
561     // VAARG always uses double-word chunks, so promote anything smaller.
562     setOperationAction(ISD::VAARG, MVT::i1, Promote);
563     AddPromotedToType(ISD::VAARG, MVT::i1, MVT::i64);
564     setOperationAction(ISD::VAARG, MVT::i8, Promote);
565     AddPromotedToType(ISD::VAARG, MVT::i8, MVT::i64);
566     setOperationAction(ISD::VAARG, MVT::i16, Promote);
567     AddPromotedToType(ISD::VAARG, MVT::i16, MVT::i64);
568     setOperationAction(ISD::VAARG, MVT::i32, Promote);
569     AddPromotedToType(ISD::VAARG, MVT::i32, MVT::i64);
570     setOperationAction(ISD::VAARG, MVT::Other, Expand);
571   } else if (Subtarget.is32BitELFABI()) {
572     // VAARG is custom lowered with the 32-bit SVR4 ABI.
573     setOperationAction(ISD::VAARG, MVT::Other, Custom);
574     setOperationAction(ISD::VAARG, MVT::i64, Custom);
575   } else
576     setOperationAction(ISD::VAARG, MVT::Other, Expand);
577 
578   // VACOPY is custom lowered with the 32-bit SVR4 ABI.
579   if (Subtarget.is32BitELFABI())
580     setOperationAction(ISD::VACOPY            , MVT::Other, Custom);
581   else
582     setOperationAction(ISD::VACOPY            , MVT::Other, Expand);
583 
584   // Use the default implementation.
585   setOperationAction(ISD::VAEND             , MVT::Other, Expand);
586   setOperationAction(ISD::STACKSAVE         , MVT::Other, Expand);
587   setOperationAction(ISD::STACKRESTORE      , MVT::Other, Custom);
588   setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32  , Custom);
589   setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64  , Custom);
590   setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i32, Custom);
591   setOperationAction(ISD::GET_DYNAMIC_AREA_OFFSET, MVT::i64, Custom);
592   setOperationAction(ISD::EH_DWARF_CFA, MVT::i32, Custom);
593   setOperationAction(ISD::EH_DWARF_CFA, MVT::i64, Custom);
594 
595   // We want to custom lower some of our intrinsics.
596   setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
597 
598   // To handle counter-based loop conditions.
599   setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i1, Custom);
600 
601   setOperationAction(ISD::INTRINSIC_VOID, MVT::i8, Custom);
602   setOperationAction(ISD::INTRINSIC_VOID, MVT::i16, Custom);
603   setOperationAction(ISD::INTRINSIC_VOID, MVT::i32, Custom);
604   setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
605 
606   // Comparisons that require checking two conditions.
607   if (Subtarget.hasSPE()) {
608     setCondCodeAction(ISD::SETO, MVT::f32, Expand);
609     setCondCodeAction(ISD::SETO, MVT::f64, Expand);
610     setCondCodeAction(ISD::SETUO, MVT::f32, Expand);
611     setCondCodeAction(ISD::SETUO, MVT::f64, Expand);
612   }
613   setCondCodeAction(ISD::SETULT, MVT::f32, Expand);
614   setCondCodeAction(ISD::SETULT, MVT::f64, Expand);
615   setCondCodeAction(ISD::SETUGT, MVT::f32, Expand);
616   setCondCodeAction(ISD::SETUGT, MVT::f64, Expand);
617   setCondCodeAction(ISD::SETUEQ, MVT::f32, Expand);
618   setCondCodeAction(ISD::SETUEQ, MVT::f64, Expand);
619   setCondCodeAction(ISD::SETOGE, MVT::f32, Expand);
620   setCondCodeAction(ISD::SETOGE, MVT::f64, Expand);
621   setCondCodeAction(ISD::SETOLE, MVT::f32, Expand);
622   setCondCodeAction(ISD::SETOLE, MVT::f64, Expand);
623   setCondCodeAction(ISD::SETONE, MVT::f32, Expand);
624   setCondCodeAction(ISD::SETONE, MVT::f64, Expand);
625 
626   setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, Legal);
627   setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Legal);
628 
629   if (Subtarget.has64BitSupport()) {
630     // They also have instructions for converting between i64 and fp.
631     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i64, Custom);
632     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i64, Expand);
633     setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i64, Custom);
634     setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i64, Expand);
635     setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
636     setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand);
637     setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
638     setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
639     // This is just the low 32 bits of a (signed) fp->i64 conversion.
640     // We cannot do this with Promote because i64 is not a legal type.
641     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
642     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
643 
644     if (Subtarget.hasLFIWAX() || Subtarget.isPPC64()) {
645       setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
646       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Custom);
647     }
648   } else {
649     // PowerPC does not have FP_TO_UINT on 32-bit implementations.
650     if (Subtarget.hasSPE()) {
651       setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Legal);
652       setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal);
653     } else {
654       setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Expand);
655       setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
656     }
657   }
658 
659   // With the instructions enabled under FPCVT, we can do everything.
660   if (Subtarget.hasFPCVT()) {
661     if (Subtarget.has64BitSupport()) {
662       setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i64, Custom);
663       setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i64, Custom);
664       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i64, Custom);
665       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i64, Custom);
666       setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
667       setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
668       setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
669       setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
670     }
671 
672     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
673     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
674     setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Custom);
675     setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Custom);
676     setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
677     setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
678     setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
679     setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
680   }
681 
682   if (Subtarget.use64BitRegs()) {
683     // 64-bit PowerPC implementations can support i64 types directly
684     addRegisterClass(MVT::i64, &PPC::G8RCRegClass);
685     // BUILD_PAIR can't be handled natively, and should be expanded to shl/or
686     setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
687     // 64-bit PowerPC wants to expand i128 shifts itself.
688     setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom);
689     setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom);
690     setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom);
691   } else {
692     // 32-bit PowerPC wants to expand i64 shifts itself.
693     setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
694     setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
695     setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
696   }
697 
698   // PowerPC has better expansions for funnel shifts than the generic
699   // TargetLowering::expandFunnelShift.
700   if (Subtarget.has64BitSupport()) {
701     setOperationAction(ISD::FSHL, MVT::i64, Custom);
702     setOperationAction(ISD::FSHR, MVT::i64, Custom);
703   }
704   setOperationAction(ISD::FSHL, MVT::i32, Custom);
705   setOperationAction(ISD::FSHR, MVT::i32, Custom);
706 
707   if (Subtarget.hasVSX()) {
708     setOperationAction(ISD::FMAXNUM_IEEE, MVT::f64, Legal);
709     setOperationAction(ISD::FMAXNUM_IEEE, MVT::f32, Legal);
710     setOperationAction(ISD::FMINNUM_IEEE, MVT::f64, Legal);
711     setOperationAction(ISD::FMINNUM_IEEE, MVT::f32, Legal);
712   }
713 
714   if (Subtarget.hasAltivec()) {
715     for (MVT VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) {
716       setOperationAction(ISD::SADDSAT, VT, Legal);
717       setOperationAction(ISD::SSUBSAT, VT, Legal);
718       setOperationAction(ISD::UADDSAT, VT, Legal);
719       setOperationAction(ISD::USUBSAT, VT, Legal);
720     }
721     // First set operation action for all vector types to expand. Then we
722     // will selectively turn on ones that can be effectively codegen'd.
723     for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
724       // add/sub are legal for all supported vector VT's.
725       setOperationAction(ISD::ADD, VT, Legal);
726       setOperationAction(ISD::SUB, VT, Legal);
727 
728       // For v2i64, these are only valid with P8Vector. This is corrected after
729       // the loop.
730       if (VT.getSizeInBits() <= 128 && VT.getScalarSizeInBits() <= 64) {
731         setOperationAction(ISD::SMAX, VT, Legal);
732         setOperationAction(ISD::SMIN, VT, Legal);
733         setOperationAction(ISD::UMAX, VT, Legal);
734         setOperationAction(ISD::UMIN, VT, Legal);
735       }
736       else {
737         setOperationAction(ISD::SMAX, VT, Expand);
738         setOperationAction(ISD::SMIN, VT, Expand);
739         setOperationAction(ISD::UMAX, VT, Expand);
740         setOperationAction(ISD::UMIN, VT, Expand);
741       }
742 
743       if (Subtarget.hasVSX()) {
744         setOperationAction(ISD::FMAXNUM, VT, Legal);
745         setOperationAction(ISD::FMINNUM, VT, Legal);
746       }
747 
748       // Vector instructions introduced in P8
749       if (Subtarget.hasP8Altivec() && (VT.SimpleTy != MVT::v1i128)) {
750         setOperationAction(ISD::CTPOP, VT, Legal);
751         setOperationAction(ISD::CTLZ, VT, Legal);
752       }
753       else {
754         setOperationAction(ISD::CTPOP, VT, Expand);
755         setOperationAction(ISD::CTLZ, VT, Expand);
756       }
757 
758       // Vector instructions introduced in P9
759       if (Subtarget.hasP9Altivec() && (VT.SimpleTy != MVT::v1i128))
760         setOperationAction(ISD::CTTZ, VT, Legal);
761       else
762         setOperationAction(ISD::CTTZ, VT, Expand);
763 
764       // We promote all shuffles to v16i8.
765       setOperationAction(ISD::VECTOR_SHUFFLE, VT, Promote);
766       AddPromotedToType (ISD::VECTOR_SHUFFLE, VT, MVT::v16i8);
767 
768       // We promote all non-typed operations to v4i32.
769       setOperationAction(ISD::AND   , VT, Promote);
770       AddPromotedToType (ISD::AND   , VT, MVT::v4i32);
771       setOperationAction(ISD::OR    , VT, Promote);
772       AddPromotedToType (ISD::OR    , VT, MVT::v4i32);
773       setOperationAction(ISD::XOR   , VT, Promote);
774       AddPromotedToType (ISD::XOR   , VT, MVT::v4i32);
775       setOperationAction(ISD::LOAD  , VT, Promote);
776       AddPromotedToType (ISD::LOAD  , VT, MVT::v4i32);
777       setOperationAction(ISD::SELECT, VT, Promote);
778       AddPromotedToType (ISD::SELECT, VT, MVT::v4i32);
779       setOperationAction(ISD::VSELECT, VT, Legal);
780       setOperationAction(ISD::SELECT_CC, VT, Promote);
781       AddPromotedToType (ISD::SELECT_CC, VT, MVT::v4i32);
782       setOperationAction(ISD::STORE, VT, Promote);
783       AddPromotedToType (ISD::STORE, VT, MVT::v4i32);
784 
785       // No other operations are legal.
786       setOperationAction(ISD::MUL , VT, Expand);
787       setOperationAction(ISD::SDIV, VT, Expand);
788       setOperationAction(ISD::SREM, VT, Expand);
789       setOperationAction(ISD::UDIV, VT, Expand);
790       setOperationAction(ISD::UREM, VT, Expand);
791       setOperationAction(ISD::FDIV, VT, Expand);
792       setOperationAction(ISD::FREM, VT, Expand);
793       setOperationAction(ISD::FNEG, VT, Expand);
794       setOperationAction(ISD::FSQRT, VT, Expand);
795       setOperationAction(ISD::FLOG, VT, Expand);
796       setOperationAction(ISD::FLOG10, VT, Expand);
797       setOperationAction(ISD::FLOG2, VT, Expand);
798       setOperationAction(ISD::FEXP, VT, Expand);
799       setOperationAction(ISD::FEXP2, VT, Expand);
800       setOperationAction(ISD::FSIN, VT, Expand);
801       setOperationAction(ISD::FCOS, VT, Expand);
802       setOperationAction(ISD::FABS, VT, Expand);
803       setOperationAction(ISD::FFLOOR, VT, Expand);
804       setOperationAction(ISD::FCEIL,  VT, Expand);
805       setOperationAction(ISD::FTRUNC, VT, Expand);
806       setOperationAction(ISD::FRINT,  VT, Expand);
807       setOperationAction(ISD::FNEARBYINT, VT, Expand);
808       setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Expand);
809       setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
810       setOperationAction(ISD::BUILD_VECTOR, VT, Expand);
811       setOperationAction(ISD::MULHU, VT, Expand);
812       setOperationAction(ISD::MULHS, VT, Expand);
813       setOperationAction(ISD::UMUL_LOHI, VT, Expand);
814       setOperationAction(ISD::SMUL_LOHI, VT, Expand);
815       setOperationAction(ISD::UDIVREM, VT, Expand);
816       setOperationAction(ISD::SDIVREM, VT, Expand);
817       setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand);
818       setOperationAction(ISD::FPOW, VT, Expand);
819       setOperationAction(ISD::BSWAP, VT, Expand);
820       setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
821       setOperationAction(ISD::ROTL, VT, Expand);
822       setOperationAction(ISD::ROTR, VT, Expand);
823 
824       for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) {
825         setTruncStoreAction(VT, InnerVT, Expand);
826         setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand);
827         setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand);
828         setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand);
829       }
830     }
831     setOperationAction(ISD::SELECT_CC, MVT::v4i32, Expand);
832     if (!Subtarget.hasP8Vector()) {
833       setOperationAction(ISD::SMAX, MVT::v2i64, Expand);
834       setOperationAction(ISD::SMIN, MVT::v2i64, Expand);
835       setOperationAction(ISD::UMAX, MVT::v2i64, Expand);
836       setOperationAction(ISD::UMIN, MVT::v2i64, Expand);
837     }
838 
839     // We can custom expand all VECTOR_SHUFFLEs to VPERM, others we can handle
840     // with merges, splats, etc.
841     setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom);
842 
843     // Vector truncates to sub-word integer that fit in an Altivec/VSX register
844     // are cheap, so handle them before they get expanded to scalar.
845     setOperationAction(ISD::TRUNCATE, MVT::v8i8, Custom);
846     setOperationAction(ISD::TRUNCATE, MVT::v4i8, Custom);
847     setOperationAction(ISD::TRUNCATE, MVT::v2i8, Custom);
848     setOperationAction(ISD::TRUNCATE, MVT::v4i16, Custom);
849     setOperationAction(ISD::TRUNCATE, MVT::v2i16, Custom);
850 
851     setOperationAction(ISD::AND   , MVT::v4i32, Legal);
852     setOperationAction(ISD::OR    , MVT::v4i32, Legal);
853     setOperationAction(ISD::XOR   , MVT::v4i32, Legal);
854     setOperationAction(ISD::LOAD  , MVT::v4i32, Legal);
855     setOperationAction(ISD::SELECT, MVT::v4i32,
856                        Subtarget.useCRBits() ? Legal : Expand);
857     setOperationAction(ISD::STORE , MVT::v4i32, Legal);
858     setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v4i32, Legal);
859     setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v4i32, Legal);
860     setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i32, Legal);
861     setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i32, Legal);
862     setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
863     setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
864     setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
865     setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal);
866     setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
867     setOperationAction(ISD::FCEIL, MVT::v4f32, Legal);
868     setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
869     setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
870 
871     // Custom lowering ROTL v1i128 to VECTOR_SHUFFLE v16i8.
872     setOperationAction(ISD::ROTL, MVT::v1i128, Custom);
873     // With hasAltivec set, we can lower ISD::ROTL to vrl(b|h|w).
874     if (Subtarget.hasAltivec())
875       for (auto VT : {MVT::v4i32, MVT::v8i16, MVT::v16i8})
876         setOperationAction(ISD::ROTL, VT, Legal);
877     // With hasP8Altivec set, we can lower ISD::ROTL to vrld.
878     if (Subtarget.hasP8Altivec())
879       setOperationAction(ISD::ROTL, MVT::v2i64, Legal);
880 
881     addRegisterClass(MVT::v4f32, &PPC::VRRCRegClass);
882     addRegisterClass(MVT::v4i32, &PPC::VRRCRegClass);
883     addRegisterClass(MVT::v8i16, &PPC::VRRCRegClass);
884     addRegisterClass(MVT::v16i8, &PPC::VRRCRegClass);
885 
886     setOperationAction(ISD::MUL, MVT::v4f32, Legal);
887     setOperationAction(ISD::FMA, MVT::v4f32, Legal);
888 
889     if (Subtarget.hasVSX()) {
890       setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
891       setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
892       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom);
893     }
894 
895     if (Subtarget.hasP8Altivec())
896       setOperationAction(ISD::MUL, MVT::v4i32, Legal);
897     else
898       setOperationAction(ISD::MUL, MVT::v4i32, Custom);
899 
900     if (Subtarget.isISA3_1()) {
901       setOperationAction(ISD::MUL, MVT::v2i64, Legal);
902       setOperationAction(ISD::MULHS, MVT::v2i64, Legal);
903       setOperationAction(ISD::MULHU, MVT::v2i64, Legal);
904       setOperationAction(ISD::MULHS, MVT::v4i32, Legal);
905       setOperationAction(ISD::MULHU, MVT::v4i32, Legal);
906       setOperationAction(ISD::UDIV, MVT::v2i64, Legal);
907       setOperationAction(ISD::SDIV, MVT::v2i64, Legal);
908       setOperationAction(ISD::UDIV, MVT::v4i32, Legal);
909       setOperationAction(ISD::SDIV, MVT::v4i32, Legal);
910       setOperationAction(ISD::UREM, MVT::v2i64, Legal);
911       setOperationAction(ISD::SREM, MVT::v2i64, Legal);
912       setOperationAction(ISD::UREM, MVT::v4i32, Legal);
913       setOperationAction(ISD::SREM, MVT::v4i32, Legal);
914       setOperationAction(ISD::UREM, MVT::v1i128, Legal);
915       setOperationAction(ISD::SREM, MVT::v1i128, Legal);
916       setOperationAction(ISD::UDIV, MVT::v1i128, Legal);
917       setOperationAction(ISD::SDIV, MVT::v1i128, Legal);
918       setOperationAction(ISD::ROTL, MVT::v1i128, Legal);
919     }
920 
921     setOperationAction(ISD::MUL, MVT::v8i16, Legal);
922     setOperationAction(ISD::MUL, MVT::v16i8, Custom);
923 
924     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Custom);
925     setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Custom);
926 
927     setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom);
928     setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom);
929     setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom);
930     setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
931 
932     // Altivec does not contain unordered floating-point compare instructions
933     setCondCodeAction(ISD::SETUO, MVT::v4f32, Expand);
934     setCondCodeAction(ISD::SETUEQ, MVT::v4f32, Expand);
935     setCondCodeAction(ISD::SETO,   MVT::v4f32, Expand);
936     setCondCodeAction(ISD::SETONE, MVT::v4f32, Expand);
937 
938     if (Subtarget.hasVSX()) {
939       setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2f64, Legal);
940       setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal);
941       if (Subtarget.hasP8Vector()) {
942         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4f32, Legal);
943         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Legal);
944       }
945       if (Subtarget.hasDirectMove() && isPPC64) {
946         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Legal);
947         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Legal);
948         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i32, Legal);
949         setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i64, Legal);
950         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Legal);
951         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Legal);
952         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Legal);
953         setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Legal);
954       }
955       setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Legal);
956 
957       // The nearbyint variants are not allowed to raise the inexact exception
958       // so we can only code-gen them with unsafe math.
959       if (TM.Options.UnsafeFPMath) {
960         setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
961         setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
962       }
963 
964       setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal);
965       setOperationAction(ISD::FCEIL, MVT::v2f64, Legal);
966       setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal);
967       setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal);
968       setOperationAction(ISD::FRINT, MVT::v2f64, Legal);
969       setOperationAction(ISD::FROUND, MVT::v2f64, Legal);
970       setOperationAction(ISD::FROUND, MVT::f64, Legal);
971       setOperationAction(ISD::FRINT, MVT::f64, Legal);
972 
973       setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
974       setOperationAction(ISD::FRINT, MVT::v4f32, Legal);
975       setOperationAction(ISD::FROUND, MVT::v4f32, Legal);
976       setOperationAction(ISD::FROUND, MVT::f32, Legal);
977       setOperationAction(ISD::FRINT, MVT::f32, Legal);
978 
979       setOperationAction(ISD::MUL, MVT::v2f64, Legal);
980       setOperationAction(ISD::FMA, MVT::v2f64, Legal);
981 
982       setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
983       setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
984 
985       // Share the Altivec comparison restrictions.
986       setCondCodeAction(ISD::SETUO, MVT::v2f64, Expand);
987       setCondCodeAction(ISD::SETUEQ, MVT::v2f64, Expand);
988       setCondCodeAction(ISD::SETO,   MVT::v2f64, Expand);
989       setCondCodeAction(ISD::SETONE, MVT::v2f64, Expand);
990 
991       setOperationAction(ISD::LOAD, MVT::v2f64, Legal);
992       setOperationAction(ISD::STORE, MVT::v2f64, Legal);
993 
994       setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Legal);
995 
996       if (Subtarget.hasP8Vector())
997         addRegisterClass(MVT::f32, &PPC::VSSRCRegClass);
998 
999       addRegisterClass(MVT::f64, &PPC::VSFRCRegClass);
1000 
1001       addRegisterClass(MVT::v4i32, &PPC::VSRCRegClass);
1002       addRegisterClass(MVT::v4f32, &PPC::VSRCRegClass);
1003       addRegisterClass(MVT::v2f64, &PPC::VSRCRegClass);
1004 
1005       if (Subtarget.hasP8Altivec()) {
1006         setOperationAction(ISD::SHL, MVT::v2i64, Legal);
1007         setOperationAction(ISD::SRA, MVT::v2i64, Legal);
1008         setOperationAction(ISD::SRL, MVT::v2i64, Legal);
1009 
1010         // 128 bit shifts can be accomplished via 3 instructions for SHL and
1011         // SRL, but not for SRA because of the instructions available:
1012         // VS{RL} and VS{RL}O. However due to direct move costs, it's not worth
1013         // doing
1014         setOperationAction(ISD::SHL, MVT::v1i128, Expand);
1015         setOperationAction(ISD::SRL, MVT::v1i128, Expand);
1016         setOperationAction(ISD::SRA, MVT::v1i128, Expand);
1017 
1018         setOperationAction(ISD::SETCC, MVT::v2i64, Legal);
1019       }
1020       else {
1021         setOperationAction(ISD::SHL, MVT::v2i64, Expand);
1022         setOperationAction(ISD::SRA, MVT::v2i64, Expand);
1023         setOperationAction(ISD::SRL, MVT::v2i64, Expand);
1024 
1025         setOperationAction(ISD::SETCC, MVT::v2i64, Custom);
1026 
1027         // VSX v2i64 only supports non-arithmetic operations.
1028         setOperationAction(ISD::ADD, MVT::v2i64, Expand);
1029         setOperationAction(ISD::SUB, MVT::v2i64, Expand);
1030       }
1031 
1032       if (Subtarget.isISA3_1())
1033         setOperationAction(ISD::SETCC, MVT::v1i128, Legal);
1034       else
1035         setOperationAction(ISD::SETCC, MVT::v1i128, Expand);
1036 
1037       setOperationAction(ISD::LOAD, MVT::v2i64, Promote);
1038       AddPromotedToType (ISD::LOAD, MVT::v2i64, MVT::v2f64);
1039       setOperationAction(ISD::STORE, MVT::v2i64, Promote);
1040       AddPromotedToType (ISD::STORE, MVT::v2i64, MVT::v2f64);
1041 
1042       setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Legal);
1043 
1044       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2i64, Legal);
1045       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2i64, Legal);
1046       setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2i64, Legal);
1047       setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2i64, Legal);
1048       setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Legal);
1049       setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Legal);
1050       setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal);
1051       setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal);
1052 
1053       // Custom handling for partial vectors of integers converted to
1054       // floating point. We already have optimal handling for v2i32 through
1055       // the DAG combine, so those aren't necessary.
1056       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2i8, Custom);
1057       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i8, Custom);
1058       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2i16, Custom);
1059       setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i16, Custom);
1060       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2i8, Custom);
1061       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i8, Custom);
1062       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2i16, Custom);
1063       setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i16, Custom);
1064       setOperationAction(ISD::UINT_TO_FP, MVT::v2i8, Custom);
1065       setOperationAction(ISD::UINT_TO_FP, MVT::v4i8, Custom);
1066       setOperationAction(ISD::UINT_TO_FP, MVT::v2i16, Custom);
1067       setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
1068       setOperationAction(ISD::SINT_TO_FP, MVT::v2i8, Custom);
1069       setOperationAction(ISD::SINT_TO_FP, MVT::v4i8, Custom);
1070       setOperationAction(ISD::SINT_TO_FP, MVT::v2i16, Custom);
1071       setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom);
1072 
1073       setOperationAction(ISD::FNEG, MVT::v4f32, Legal);
1074       setOperationAction(ISD::FNEG, MVT::v2f64, Legal);
1075       setOperationAction(ISD::FABS, MVT::v4f32, Legal);
1076       setOperationAction(ISD::FABS, MVT::v2f64, Legal);
1077       setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Legal);
1078       setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Legal);
1079 
1080       if (Subtarget.hasDirectMove())
1081         setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
1082       setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
1083 
1084       // Handle constrained floating-point operations of vector.
1085       // The predictor is `hasVSX` because altivec instruction has
1086       // no exception but VSX vector instruction has.
1087       setOperationAction(ISD::STRICT_FADD, MVT::v4f32, Legal);
1088       setOperationAction(ISD::STRICT_FSUB, MVT::v4f32, Legal);
1089       setOperationAction(ISD::STRICT_FMUL, MVT::v4f32, Legal);
1090       setOperationAction(ISD::STRICT_FDIV, MVT::v4f32, Legal);
1091       setOperationAction(ISD::STRICT_FMA, MVT::v4f32, Legal);
1092       setOperationAction(ISD::STRICT_FSQRT, MVT::v4f32, Legal);
1093       setOperationAction(ISD::STRICT_FMAXNUM, MVT::v4f32, Legal);
1094       setOperationAction(ISD::STRICT_FMINNUM, MVT::v4f32, Legal);
1095       setOperationAction(ISD::STRICT_FRINT, MVT::v4f32, Legal);
1096       setOperationAction(ISD::STRICT_FFLOOR, MVT::v4f32, Legal);
1097       setOperationAction(ISD::STRICT_FCEIL,  MVT::v4f32, Legal);
1098       setOperationAction(ISD::STRICT_FTRUNC, MVT::v4f32, Legal);
1099       setOperationAction(ISD::STRICT_FROUND, MVT::v4f32, Legal);
1100 
1101       setOperationAction(ISD::STRICT_FADD, MVT::v2f64, Legal);
1102       setOperationAction(ISD::STRICT_FSUB, MVT::v2f64, Legal);
1103       setOperationAction(ISD::STRICT_FMUL, MVT::v2f64, Legal);
1104       setOperationAction(ISD::STRICT_FDIV, MVT::v2f64, Legal);
1105       setOperationAction(ISD::STRICT_FMA, MVT::v2f64, Legal);
1106       setOperationAction(ISD::STRICT_FSQRT, MVT::v2f64, Legal);
1107       setOperationAction(ISD::STRICT_FMAXNUM, MVT::v2f64, Legal);
1108       setOperationAction(ISD::STRICT_FMINNUM, MVT::v2f64, Legal);
1109       setOperationAction(ISD::STRICT_FRINT, MVT::v2f64, Legal);
1110       setOperationAction(ISD::STRICT_FFLOOR, MVT::v2f64, Legal);
1111       setOperationAction(ISD::STRICT_FCEIL,  MVT::v2f64, Legal);
1112       setOperationAction(ISD::STRICT_FTRUNC, MVT::v2f64, Legal);
1113       setOperationAction(ISD::STRICT_FROUND, MVT::v2f64, Legal);
1114 
1115       addRegisterClass(MVT::v2i64, &PPC::VSRCRegClass);
1116       addRegisterClass(MVT::f128, &PPC::VRRCRegClass);
1117 
1118       for (MVT FPT : MVT::fp_valuetypes())
1119         setLoadExtAction(ISD::EXTLOAD, MVT::f128, FPT, Expand);
1120 
1121       // Expand the SELECT to SELECT_CC
1122       setOperationAction(ISD::SELECT, MVT::f128, Expand);
1123 
1124       setTruncStoreAction(MVT::f128, MVT::f64, Expand);
1125       setTruncStoreAction(MVT::f128, MVT::f32, Expand);
1126 
1127       // No implementation for these ops for PowerPC.
1128       setOperationAction(ISD::FSIN, MVT::f128, Expand);
1129       setOperationAction(ISD::FCOS, MVT::f128, Expand);
1130       setOperationAction(ISD::FPOW, MVT::f128, Expand);
1131       setOperationAction(ISD::FPOWI, MVT::f128, Expand);
1132       setOperationAction(ISD::FREM, MVT::f128, Expand);
1133     }
1134 
1135     if (Subtarget.hasP8Altivec()) {
1136       addRegisterClass(MVT::v2i64, &PPC::VRRCRegClass);
1137       addRegisterClass(MVT::v1i128, &PPC::VRRCRegClass);
1138     }
1139 
1140     if (Subtarget.hasP9Vector()) {
1141       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
1142       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
1143 
1144       // 128 bit shifts can be accomplished via 3 instructions for SHL and
1145       // SRL, but not for SRA because of the instructions available:
1146       // VS{RL} and VS{RL}O.
1147       setOperationAction(ISD::SHL, MVT::v1i128, Legal);
1148       setOperationAction(ISD::SRL, MVT::v1i128, Legal);
1149       setOperationAction(ISD::SRA, MVT::v1i128, Expand);
1150 
1151       setOperationAction(ISD::FADD, MVT::f128, Legal);
1152       setOperationAction(ISD::FSUB, MVT::f128, Legal);
1153       setOperationAction(ISD::FDIV, MVT::f128, Legal);
1154       setOperationAction(ISD::FMUL, MVT::f128, Legal);
1155       setOperationAction(ISD::FP_EXTEND, MVT::f128, Legal);
1156 
1157       setOperationAction(ISD::FMA, MVT::f128, Legal);
1158       setCondCodeAction(ISD::SETULT, MVT::f128, Expand);
1159       setCondCodeAction(ISD::SETUGT, MVT::f128, Expand);
1160       setCondCodeAction(ISD::SETUEQ, MVT::f128, Expand);
1161       setCondCodeAction(ISD::SETOGE, MVT::f128, Expand);
1162       setCondCodeAction(ISD::SETOLE, MVT::f128, Expand);
1163       setCondCodeAction(ISD::SETONE, MVT::f128, Expand);
1164 
1165       setOperationAction(ISD::FTRUNC, MVT::f128, Legal);
1166       setOperationAction(ISD::FRINT, MVT::f128, Legal);
1167       setOperationAction(ISD::FFLOOR, MVT::f128, Legal);
1168       setOperationAction(ISD::FCEIL, MVT::f128, Legal);
1169       setOperationAction(ISD::FNEARBYINT, MVT::f128, Legal);
1170       setOperationAction(ISD::FROUND, MVT::f128, Legal);
1171 
1172       setOperationAction(ISD::FP_ROUND, MVT::f64, Legal);
1173       setOperationAction(ISD::FP_ROUND, MVT::f32, Legal);
1174       setOperationAction(ISD::BITCAST, MVT::i128, Custom);
1175 
1176       // Handle constrained floating-point operations of fp128
1177       setOperationAction(ISD::STRICT_FADD, MVT::f128, Legal);
1178       setOperationAction(ISD::STRICT_FSUB, MVT::f128, Legal);
1179       setOperationAction(ISD::STRICT_FMUL, MVT::f128, Legal);
1180       setOperationAction(ISD::STRICT_FDIV, MVT::f128, Legal);
1181       setOperationAction(ISD::STRICT_FMA, MVT::f128, Legal);
1182       setOperationAction(ISD::STRICT_FSQRT, MVT::f128, Legal);
1183       setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f128, Legal);
1184       setOperationAction(ISD::STRICT_FP_ROUND, MVT::f64, Legal);
1185       setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal);
1186       setOperationAction(ISD::STRICT_FRINT, MVT::f128, Legal);
1187       setOperationAction(ISD::STRICT_FNEARBYINT, MVT::f128, Legal);
1188       setOperationAction(ISD::STRICT_FFLOOR, MVT::f128, Legal);
1189       setOperationAction(ISD::STRICT_FCEIL, MVT::f128, Legal);
1190       setOperationAction(ISD::STRICT_FTRUNC, MVT::f128, Legal);
1191       setOperationAction(ISD::STRICT_FROUND, MVT::f128, Legal);
1192       setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom);
1193       setOperationAction(ISD::BSWAP, MVT::v8i16, Legal);
1194       setOperationAction(ISD::BSWAP, MVT::v4i32, Legal);
1195       setOperationAction(ISD::BSWAP, MVT::v2i64, Legal);
1196       setOperationAction(ISD::BSWAP, MVT::v1i128, Legal);
1197     } else if (Subtarget.hasVSX()) {
1198       setOperationAction(ISD::LOAD, MVT::f128, Promote);
1199       setOperationAction(ISD::STORE, MVT::f128, Promote);
1200 
1201       AddPromotedToType(ISD::LOAD, MVT::f128, MVT::v4i32);
1202       AddPromotedToType(ISD::STORE, MVT::f128, MVT::v4i32);
1203 
1204       // Set FADD/FSUB as libcall to avoid the legalizer to expand the
1205       // fp_to_uint and int_to_fp.
1206       setOperationAction(ISD::FADD, MVT::f128, LibCall);
1207       setOperationAction(ISD::FSUB, MVT::f128, LibCall);
1208 
1209       setOperationAction(ISD::FMUL, MVT::f128, Expand);
1210       setOperationAction(ISD::FDIV, MVT::f128, Expand);
1211       setOperationAction(ISD::FNEG, MVT::f128, Expand);
1212       setOperationAction(ISD::FABS, MVT::f128, Expand);
1213       setOperationAction(ISD::FSQRT, MVT::f128, Expand);
1214       setOperationAction(ISD::FMA, MVT::f128, Expand);
1215       setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand);
1216 
1217       // Expand the fp_extend if the target type is fp128.
1218       setOperationAction(ISD::FP_EXTEND, MVT::f128, Expand);
1219       setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f128, Expand);
1220 
1221       // Expand the fp_round if the source type is fp128.
1222       for (MVT VT : {MVT::f32, MVT::f64}) {
1223         setOperationAction(ISD::FP_ROUND, VT, Custom);
1224         setOperationAction(ISD::STRICT_FP_ROUND, VT, Custom);
1225       }
1226 
1227       setOperationAction(ISD::SETCC, MVT::f128, Custom);
1228       setOperationAction(ISD::STRICT_FSETCC, MVT::f128, Custom);
1229       setOperationAction(ISD::STRICT_FSETCCS, MVT::f128, Custom);
1230       setOperationAction(ISD::BR_CC, MVT::f128, Expand);
1231 
1232       // Lower following f128 select_cc pattern:
1233       // select_cc x, y, tv, fv, cc -> select_cc (setcc x, y, cc), 0, tv, fv, NE
1234       setOperationAction(ISD::SELECT_CC, MVT::f128, Custom);
1235 
1236       // We need to handle f128 SELECT_CC with integer result type.
1237       setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
1238       setOperationAction(ISD::SELECT_CC, MVT::i64, isPPC64 ? Custom : Expand);
1239     }
1240 
1241     if (Subtarget.hasP9Altivec()) {
1242       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
1243       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom);
1244 
1245       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8,  Legal);
1246       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Legal);
1247       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i32, Legal);
1248       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8,  Legal);
1249       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Legal);
1250       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal);
1251       setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i64, Legal);
1252     }
1253 
1254     if (Subtarget.isISA3_1())
1255       setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom);
1256   }
1257 
1258   if (Subtarget.pairedVectorMemops()) {
1259     addRegisterClass(MVT::v256i1, &PPC::VSRpRCRegClass);
1260     setOperationAction(ISD::LOAD, MVT::v256i1, Custom);
1261     setOperationAction(ISD::STORE, MVT::v256i1, Custom);
1262   }
1263   if (Subtarget.hasMMA()) {
1264     addRegisterClass(MVT::v512i1, &PPC::UACCRCRegClass);
1265     setOperationAction(ISD::LOAD, MVT::v512i1, Custom);
1266     setOperationAction(ISD::STORE, MVT::v512i1, Custom);
1267     setOperationAction(ISD::BUILD_VECTOR, MVT::v512i1, Custom);
1268   }
1269 
1270   if (Subtarget.has64BitSupport())
1271     setOperationAction(ISD::PREFETCH, MVT::Other, Legal);
1272 
1273   if (Subtarget.isISA3_1())
1274     setOperationAction(ISD::SRA, MVT::v1i128, Legal);
1275 
1276   setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, isPPC64 ? Legal : Custom);
1277 
1278   if (!isPPC64) {
1279     setOperationAction(ISD::ATOMIC_LOAD,  MVT::i64, Expand);
1280     setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand);
1281   }
1282 
1283   setBooleanContents(ZeroOrOneBooleanContent);
1284 
1285   if (Subtarget.hasAltivec()) {
1286     // Altivec instructions set fields to all zeros or all ones.
1287     setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
1288   }
1289 
1290   if (!isPPC64) {
1291     // These libcalls are not available in 32-bit.
1292     setLibcallName(RTLIB::SHL_I128, nullptr);
1293     setLibcallName(RTLIB::SRL_I128, nullptr);
1294     setLibcallName(RTLIB::SRA_I128, nullptr);
1295   }
1296 
1297   if (!isPPC64)
1298     setMaxAtomicSizeInBitsSupported(32);
1299 
1300   setStackPointerRegisterToSaveRestore(isPPC64 ? PPC::X1 : PPC::R1);
1301 
1302   // We have target-specific dag combine patterns for the following nodes:
1303   setTargetDAGCombine(ISD::ADD);
1304   setTargetDAGCombine(ISD::SHL);
1305   setTargetDAGCombine(ISD::SRA);
1306   setTargetDAGCombine(ISD::SRL);
1307   setTargetDAGCombine(ISD::MUL);
1308   setTargetDAGCombine(ISD::FMA);
1309   setTargetDAGCombine(ISD::SINT_TO_FP);
1310   setTargetDAGCombine(ISD::BUILD_VECTOR);
1311   if (Subtarget.hasFPCVT())
1312     setTargetDAGCombine(ISD::UINT_TO_FP);
1313   setTargetDAGCombine(ISD::LOAD);
1314   setTargetDAGCombine(ISD::STORE);
1315   setTargetDAGCombine(ISD::BR_CC);
1316   if (Subtarget.useCRBits())
1317     setTargetDAGCombine(ISD::BRCOND);
1318   setTargetDAGCombine(ISD::BSWAP);
1319   setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
1320   setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN);
1321   setTargetDAGCombine(ISD::INTRINSIC_VOID);
1322 
1323   setTargetDAGCombine(ISD::SIGN_EXTEND);
1324   setTargetDAGCombine(ISD::ZERO_EXTEND);
1325   setTargetDAGCombine(ISD::ANY_EXTEND);
1326 
1327   setTargetDAGCombine(ISD::TRUNCATE);
1328   setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
1329 
1330 
1331   if (Subtarget.useCRBits()) {
1332     setTargetDAGCombine(ISD::TRUNCATE);
1333     setTargetDAGCombine(ISD::SETCC);
1334     setTargetDAGCombine(ISD::SELECT_CC);
1335   }
1336 
1337   if (Subtarget.hasP9Altivec()) {
1338     setTargetDAGCombine(ISD::ABS);
1339     setTargetDAGCombine(ISD::VSELECT);
1340   }
1341 
1342   setLibcallName(RTLIB::LOG_F128, "logf128");
1343   setLibcallName(RTLIB::LOG2_F128, "log2f128");
1344   setLibcallName(RTLIB::LOG10_F128, "log10f128");
1345   setLibcallName(RTLIB::EXP_F128, "expf128");
1346   setLibcallName(RTLIB::EXP2_F128, "exp2f128");
1347   setLibcallName(RTLIB::SIN_F128, "sinf128");
1348   setLibcallName(RTLIB::COS_F128, "cosf128");
1349   setLibcallName(RTLIB::POW_F128, "powf128");
1350   setLibcallName(RTLIB::FMIN_F128, "fminf128");
1351   setLibcallName(RTLIB::FMAX_F128, "fmaxf128");
1352   setLibcallName(RTLIB::REM_F128, "fmodf128");
1353   setLibcallName(RTLIB::SQRT_F128, "sqrtf128");
1354   setLibcallName(RTLIB::CEIL_F128, "ceilf128");
1355   setLibcallName(RTLIB::FLOOR_F128, "floorf128");
1356   setLibcallName(RTLIB::TRUNC_F128, "truncf128");
1357   setLibcallName(RTLIB::ROUND_F128, "roundf128");
1358   setLibcallName(RTLIB::LROUND_F128, "lroundf128");
1359   setLibcallName(RTLIB::LLROUND_F128, "llroundf128");
1360   setLibcallName(RTLIB::RINT_F128, "rintf128");
1361   setLibcallName(RTLIB::LRINT_F128, "lrintf128");
1362   setLibcallName(RTLIB::LLRINT_F128, "llrintf128");
1363   setLibcallName(RTLIB::NEARBYINT_F128, "nearbyintf128");
1364   setLibcallName(RTLIB::FMA_F128, "fmaf128");
1365 
1366   // With 32 condition bits, we don't need to sink (and duplicate) compares
1367   // aggressively in CodeGenPrep.
1368   if (Subtarget.useCRBits()) {
1369     setHasMultipleConditionRegisters();
1370     setJumpIsExpensive();
1371   }
1372 
1373   setMinFunctionAlignment(Align(4));
1374 
1375   switch (Subtarget.getCPUDirective()) {
1376   default: break;
1377   case PPC::DIR_970:
1378   case PPC::DIR_A2:
1379   case PPC::DIR_E500:
1380   case PPC::DIR_E500mc:
1381   case PPC::DIR_E5500:
1382   case PPC::DIR_PWR4:
1383   case PPC::DIR_PWR5:
1384   case PPC::DIR_PWR5X:
1385   case PPC::DIR_PWR6:
1386   case PPC::DIR_PWR6X:
1387   case PPC::DIR_PWR7:
1388   case PPC::DIR_PWR8:
1389   case PPC::DIR_PWR9:
1390   case PPC::DIR_PWR10:
1391   case PPC::DIR_PWR_FUTURE:
1392     setPrefLoopAlignment(Align(16));
1393     setPrefFunctionAlignment(Align(16));
1394     break;
1395   }
1396 
1397   if (Subtarget.enableMachineScheduler())
1398     setSchedulingPreference(Sched::Source);
1399   else
1400     setSchedulingPreference(Sched::Hybrid);
1401 
1402   computeRegisterProperties(STI.getRegisterInfo());
1403 
1404   // The Freescale cores do better with aggressive inlining of memcpy and
1405   // friends. GCC uses same threshold of 128 bytes (= 32 word stores).
1406   if (Subtarget.getCPUDirective() == PPC::DIR_E500mc ||
1407       Subtarget.getCPUDirective() == PPC::DIR_E5500) {
1408     MaxStoresPerMemset = 32;
1409     MaxStoresPerMemsetOptSize = 16;
1410     MaxStoresPerMemcpy = 32;
1411     MaxStoresPerMemcpyOptSize = 8;
1412     MaxStoresPerMemmove = 32;
1413     MaxStoresPerMemmoveOptSize = 8;
1414   } else if (Subtarget.getCPUDirective() == PPC::DIR_A2) {
1415     // The A2 also benefits from (very) aggressive inlining of memcpy and
1416     // friends. The overhead of a the function call, even when warm, can be
1417     // over one hundred cycles.
1418     MaxStoresPerMemset = 128;
1419     MaxStoresPerMemcpy = 128;
1420     MaxStoresPerMemmove = 128;
1421     MaxLoadsPerMemcmp = 128;
1422   } else {
1423     MaxLoadsPerMemcmp = 8;
1424     MaxLoadsPerMemcmpOptSize = 4;
1425   }
1426 
1427   IsStrictFPEnabled = true;
1428 
1429   // Let the subtarget (CPU) decide if a predictable select is more expensive
1430   // than the corresponding branch. This information is used in CGP to decide
1431   // when to convert selects into branches.
1432   PredictableSelectIsExpensive = Subtarget.isPredictableSelectIsExpensive();
1433 }
1434 
1435 // *********************************** NOTE ************************************
1436 // For selecting load and store instructions, the addressing modes are defined
1437 // as ComplexPatterns in PPCInstrInfo.td, which are then utilized in the TD
1438 // patterns to match the load the store instructions.
1439 //
1440 // The TD definitions for the addressing modes correspond to their respective
1441 // Select<AddrMode>Form() function in PPCISelDAGToDAG.cpp. These functions rely
1442 // on SelectOptimalAddrMode(), which calls computeMOFlags() to compute the
1443 // address mode flags of a particular node. Afterwards, the computed address
1444 // flags are passed into getAddrModeForFlags() in order to retrieve the optimal
1445 // addressing mode. SelectOptimalAddrMode() then sets the Base and Displacement
1446 // accordingly, based on the preferred addressing mode.
1447 //
1448 // Within PPCISelLowering.h, there are two enums: MemOpFlags and AddrMode.
1449 // MemOpFlags contains all the possible flags that can be used to compute the
1450 // optimal addressing mode for load and store instructions.
1451 // AddrMode contains all the possible load and store addressing modes available
1452 // on Power (such as DForm, DSForm, DQForm, XForm, etc.)
1453 //
1454 // When adding new load and store instructions, it is possible that new address
1455 // flags may need to be added into MemOpFlags, and a new addressing mode will
1456 // need to be added to AddrMode. An entry of the new addressing mode (consisting
1457 // of the minimal and main distinguishing address flags for the new load/store
1458 // instructions) will need to be added into initializeAddrModeMap() below.
1459 // Finally, when adding new addressing modes, the getAddrModeForFlags() will
1460 // need to be updated to account for selecting the optimal addressing mode.
1461 // *****************************************************************************
1462 /// Initialize the map that relates the different addressing modes of the load
1463 /// and store instructions to a set of flags. This ensures the load/store
1464 /// instruction is correctly matched during instruction selection.
initializeAddrModeMap()1465 void PPCTargetLowering::initializeAddrModeMap() {
1466   AddrModesMap[PPC::AM_DForm] = {
1467       // LWZ, STW
1468       PPC::MOF_ZExt | PPC::MOF_RPlusSImm16 | PPC::MOF_WordInt,
1469       PPC::MOF_ZExt | PPC::MOF_RPlusLo | PPC::MOF_WordInt,
1470       PPC::MOF_ZExt | PPC::MOF_NotAddNorCst | PPC::MOF_WordInt,
1471       PPC::MOF_ZExt | PPC::MOF_AddrIsSImm32 | PPC::MOF_WordInt,
1472       // LBZ, LHZ, STB, STH
1473       PPC::MOF_ZExt | PPC::MOF_RPlusSImm16 | PPC::MOF_SubWordInt,
1474       PPC::MOF_ZExt | PPC::MOF_RPlusLo | PPC::MOF_SubWordInt,
1475       PPC::MOF_ZExt | PPC::MOF_NotAddNorCst | PPC::MOF_SubWordInt,
1476       PPC::MOF_ZExt | PPC::MOF_AddrIsSImm32 | PPC::MOF_SubWordInt,
1477       // LHA
1478       PPC::MOF_SExt | PPC::MOF_RPlusSImm16 | PPC::MOF_SubWordInt,
1479       PPC::MOF_SExt | PPC::MOF_RPlusLo | PPC::MOF_SubWordInt,
1480       PPC::MOF_SExt | PPC::MOF_NotAddNorCst | PPC::MOF_SubWordInt,
1481       PPC::MOF_SExt | PPC::MOF_AddrIsSImm32 | PPC::MOF_SubWordInt,
1482       // LFS, LFD, STFS, STFD
1483       PPC::MOF_RPlusSImm16 | PPC::MOF_ScalarFloat | PPC::MOF_SubtargetBeforeP9,
1484       PPC::MOF_RPlusLo | PPC::MOF_ScalarFloat | PPC::MOF_SubtargetBeforeP9,
1485       PPC::MOF_NotAddNorCst | PPC::MOF_ScalarFloat | PPC::MOF_SubtargetBeforeP9,
1486       PPC::MOF_AddrIsSImm32 | PPC::MOF_ScalarFloat | PPC::MOF_SubtargetBeforeP9,
1487   };
1488   AddrModesMap[PPC::AM_DSForm] = {
1489       // LWA
1490       PPC::MOF_SExt | PPC::MOF_RPlusSImm16Mult4 | PPC::MOF_WordInt,
1491       PPC::MOF_SExt | PPC::MOF_NotAddNorCst | PPC::MOF_WordInt,
1492       PPC::MOF_SExt | PPC::MOF_AddrIsSImm32 | PPC::MOF_WordInt,
1493       // LD, STD
1494       PPC::MOF_RPlusSImm16Mult4 | PPC::MOF_DoubleWordInt,
1495       PPC::MOF_NotAddNorCst | PPC::MOF_DoubleWordInt,
1496       PPC::MOF_AddrIsSImm32 | PPC::MOF_DoubleWordInt,
1497       // DFLOADf32, DFLOADf64, DSTOREf32, DSTOREf64
1498       PPC::MOF_RPlusSImm16Mult4 | PPC::MOF_ScalarFloat | PPC::MOF_SubtargetP9,
1499       PPC::MOF_NotAddNorCst | PPC::MOF_ScalarFloat | PPC::MOF_SubtargetP9,
1500       PPC::MOF_AddrIsSImm32 | PPC::MOF_ScalarFloat | PPC::MOF_SubtargetP9,
1501   };
1502   AddrModesMap[PPC::AM_DQForm] = {
1503       // LXV, STXV
1504       PPC::MOF_RPlusSImm16Mult16 | PPC::MOF_Vector | PPC::MOF_SubtargetP9,
1505       PPC::MOF_NotAddNorCst | PPC::MOF_Vector | PPC::MOF_SubtargetP9,
1506       PPC::MOF_AddrIsSImm32 | PPC::MOF_Vector | PPC::MOF_SubtargetP9,
1507       PPC::MOF_RPlusSImm16Mult16 | PPC::MOF_Vector256 | PPC::MOF_SubtargetP10,
1508       PPC::MOF_NotAddNorCst | PPC::MOF_Vector256 | PPC::MOF_SubtargetP10,
1509       PPC::MOF_AddrIsSImm32 | PPC::MOF_Vector256 | PPC::MOF_SubtargetP10,
1510   };
1511 }
1512 
1513 /// getMaxByValAlign - Helper for getByValTypeAlignment to determine
1514 /// the desired ByVal argument alignment.
getMaxByValAlign(Type * Ty,Align & MaxAlign,Align MaxMaxAlign)1515 static void getMaxByValAlign(Type *Ty, Align &MaxAlign, Align MaxMaxAlign) {
1516   if (MaxAlign == MaxMaxAlign)
1517     return;
1518   if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
1519     if (MaxMaxAlign >= 32 &&
1520         VTy->getPrimitiveSizeInBits().getFixedSize() >= 256)
1521       MaxAlign = Align(32);
1522     else if (VTy->getPrimitiveSizeInBits().getFixedSize() >= 128 &&
1523              MaxAlign < 16)
1524       MaxAlign = Align(16);
1525   } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
1526     Align EltAlign;
1527     getMaxByValAlign(ATy->getElementType(), EltAlign, MaxMaxAlign);
1528     if (EltAlign > MaxAlign)
1529       MaxAlign = EltAlign;
1530   } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
1531     for (auto *EltTy : STy->elements()) {
1532       Align EltAlign;
1533       getMaxByValAlign(EltTy, EltAlign, MaxMaxAlign);
1534       if (EltAlign > MaxAlign)
1535         MaxAlign = EltAlign;
1536       if (MaxAlign == MaxMaxAlign)
1537         break;
1538     }
1539   }
1540 }
1541 
1542 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
1543 /// function arguments in the caller parameter area.
getByValTypeAlignment(Type * Ty,const DataLayout & DL) const1544 unsigned PPCTargetLowering::getByValTypeAlignment(Type *Ty,
1545                                                   const DataLayout &DL) const {
1546   // 16byte and wider vectors are passed on 16byte boundary.
1547   // The rest is 8 on PPC64 and 4 on PPC32 boundary.
1548   Align Alignment = Subtarget.isPPC64() ? Align(8) : Align(4);
1549   if (Subtarget.hasAltivec())
1550     getMaxByValAlign(Ty, Alignment, Align(16));
1551   return Alignment.value();
1552 }
1553 
useSoftFloat() const1554 bool PPCTargetLowering::useSoftFloat() const {
1555   return Subtarget.useSoftFloat();
1556 }
1557 
hasSPE() const1558 bool PPCTargetLowering::hasSPE() const {
1559   return Subtarget.hasSPE();
1560 }
1561 
preferIncOfAddToSubOfNot(EVT VT) const1562 bool PPCTargetLowering::preferIncOfAddToSubOfNot(EVT VT) const {
1563   return VT.isScalarInteger();
1564 }
1565 
getTargetNodeName(unsigned Opcode) const1566 const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const {
1567   switch ((PPCISD::NodeType)Opcode) {
1568   case PPCISD::FIRST_NUMBER:    break;
1569   case PPCISD::FSEL:            return "PPCISD::FSEL";
1570   case PPCISD::XSMAXCDP:        return "PPCISD::XSMAXCDP";
1571   case PPCISD::XSMINCDP:        return "PPCISD::XSMINCDP";
1572   case PPCISD::FCFID:           return "PPCISD::FCFID";
1573   case PPCISD::FCFIDU:          return "PPCISD::FCFIDU";
1574   case PPCISD::FCFIDS:          return "PPCISD::FCFIDS";
1575   case PPCISD::FCFIDUS:         return "PPCISD::FCFIDUS";
1576   case PPCISD::FCTIDZ:          return "PPCISD::FCTIDZ";
1577   case PPCISD::FCTIWZ:          return "PPCISD::FCTIWZ";
1578   case PPCISD::FCTIDUZ:         return "PPCISD::FCTIDUZ";
1579   case PPCISD::FCTIWUZ:         return "PPCISD::FCTIWUZ";
1580   case PPCISD::FP_TO_UINT_IN_VSR:
1581                                 return "PPCISD::FP_TO_UINT_IN_VSR,";
1582   case PPCISD::FP_TO_SINT_IN_VSR:
1583                                 return "PPCISD::FP_TO_SINT_IN_VSR";
1584   case PPCISD::FRE:             return "PPCISD::FRE";
1585   case PPCISD::FRSQRTE:         return "PPCISD::FRSQRTE";
1586   case PPCISD::FTSQRT:
1587     return "PPCISD::FTSQRT";
1588   case PPCISD::FSQRT:
1589     return "PPCISD::FSQRT";
1590   case PPCISD::STFIWX:          return "PPCISD::STFIWX";
1591   case PPCISD::VPERM:           return "PPCISD::VPERM";
1592   case PPCISD::XXSPLT:          return "PPCISD::XXSPLT";
1593   case PPCISD::XXSPLTI_SP_TO_DP:
1594     return "PPCISD::XXSPLTI_SP_TO_DP";
1595   case PPCISD::XXSPLTI32DX:
1596     return "PPCISD::XXSPLTI32DX";
1597   case PPCISD::VECINSERT:       return "PPCISD::VECINSERT";
1598   case PPCISD::XXPERMDI:        return "PPCISD::XXPERMDI";
1599   case PPCISD::VECSHL:          return "PPCISD::VECSHL";
1600   case PPCISD::CMPB:            return "PPCISD::CMPB";
1601   case PPCISD::Hi:              return "PPCISD::Hi";
1602   case PPCISD::Lo:              return "PPCISD::Lo";
1603   case PPCISD::TOC_ENTRY:       return "PPCISD::TOC_ENTRY";
1604   case PPCISD::ATOMIC_CMP_SWAP_8: return "PPCISD::ATOMIC_CMP_SWAP_8";
1605   case PPCISD::ATOMIC_CMP_SWAP_16: return "PPCISD::ATOMIC_CMP_SWAP_16";
1606   case PPCISD::DYNALLOC:        return "PPCISD::DYNALLOC";
1607   case PPCISD::DYNAREAOFFSET:   return "PPCISD::DYNAREAOFFSET";
1608   case PPCISD::PROBED_ALLOCA:   return "PPCISD::PROBED_ALLOCA";
1609   case PPCISD::GlobalBaseReg:   return "PPCISD::GlobalBaseReg";
1610   case PPCISD::SRL:             return "PPCISD::SRL";
1611   case PPCISD::SRA:             return "PPCISD::SRA";
1612   case PPCISD::SHL:             return "PPCISD::SHL";
1613   case PPCISD::SRA_ADDZE:       return "PPCISD::SRA_ADDZE";
1614   case PPCISD::CALL:            return "PPCISD::CALL";
1615   case PPCISD::CALL_NOP:        return "PPCISD::CALL_NOP";
1616   case PPCISD::CALL_NOTOC:      return "PPCISD::CALL_NOTOC";
1617   case PPCISD::MTCTR:           return "PPCISD::MTCTR";
1618   case PPCISD::BCTRL:           return "PPCISD::BCTRL";
1619   case PPCISD::BCTRL_LOAD_TOC:  return "PPCISD::BCTRL_LOAD_TOC";
1620   case PPCISD::RET_FLAG:        return "PPCISD::RET_FLAG";
1621   case PPCISD::READ_TIME_BASE:  return "PPCISD::READ_TIME_BASE";
1622   case PPCISD::EH_SJLJ_SETJMP:  return "PPCISD::EH_SJLJ_SETJMP";
1623   case PPCISD::EH_SJLJ_LONGJMP: return "PPCISD::EH_SJLJ_LONGJMP";
1624   case PPCISD::MFOCRF:          return "PPCISD::MFOCRF";
1625   case PPCISD::MFVSR:           return "PPCISD::MFVSR";
1626   case PPCISD::MTVSRA:          return "PPCISD::MTVSRA";
1627   case PPCISD::MTVSRZ:          return "PPCISD::MTVSRZ";
1628   case PPCISD::SINT_VEC_TO_FP:  return "PPCISD::SINT_VEC_TO_FP";
1629   case PPCISD::UINT_VEC_TO_FP:  return "PPCISD::UINT_VEC_TO_FP";
1630   case PPCISD::SCALAR_TO_VECTOR_PERMUTED:
1631     return "PPCISD::SCALAR_TO_VECTOR_PERMUTED";
1632   case PPCISD::ANDI_rec_1_EQ_BIT:
1633     return "PPCISD::ANDI_rec_1_EQ_BIT";
1634   case PPCISD::ANDI_rec_1_GT_BIT:
1635     return "PPCISD::ANDI_rec_1_GT_BIT";
1636   case PPCISD::VCMP:            return "PPCISD::VCMP";
1637   case PPCISD::VCMP_rec:        return "PPCISD::VCMP_rec";
1638   case PPCISD::LBRX:            return "PPCISD::LBRX";
1639   case PPCISD::STBRX:           return "PPCISD::STBRX";
1640   case PPCISD::LFIWAX:          return "PPCISD::LFIWAX";
1641   case PPCISD::LFIWZX:          return "PPCISD::LFIWZX";
1642   case PPCISD::LXSIZX:          return "PPCISD::LXSIZX";
1643   case PPCISD::STXSIX:          return "PPCISD::STXSIX";
1644   case PPCISD::VEXTS:           return "PPCISD::VEXTS";
1645   case PPCISD::LXVD2X:          return "PPCISD::LXVD2X";
1646   case PPCISD::STXVD2X:         return "PPCISD::STXVD2X";
1647   case PPCISD::LOAD_VEC_BE:     return "PPCISD::LOAD_VEC_BE";
1648   case PPCISD::STORE_VEC_BE:    return "PPCISD::STORE_VEC_BE";
1649   case PPCISD::ST_VSR_SCAL_INT:
1650                                 return "PPCISD::ST_VSR_SCAL_INT";
1651   case PPCISD::COND_BRANCH:     return "PPCISD::COND_BRANCH";
1652   case PPCISD::BDNZ:            return "PPCISD::BDNZ";
1653   case PPCISD::BDZ:             return "PPCISD::BDZ";
1654   case PPCISD::MFFS:            return "PPCISD::MFFS";
1655   case PPCISD::FADDRTZ:         return "PPCISD::FADDRTZ";
1656   case PPCISD::TC_RETURN:       return "PPCISD::TC_RETURN";
1657   case PPCISD::CR6SET:          return "PPCISD::CR6SET";
1658   case PPCISD::CR6UNSET:        return "PPCISD::CR6UNSET";
1659   case PPCISD::PPC32_GOT:       return "PPCISD::PPC32_GOT";
1660   case PPCISD::PPC32_PICGOT:    return "PPCISD::PPC32_PICGOT";
1661   case PPCISD::ADDIS_GOT_TPREL_HA: return "PPCISD::ADDIS_GOT_TPREL_HA";
1662   case PPCISD::LD_GOT_TPREL_L:  return "PPCISD::LD_GOT_TPREL_L";
1663   case PPCISD::ADD_TLS:         return "PPCISD::ADD_TLS";
1664   case PPCISD::ADDIS_TLSGD_HA:  return "PPCISD::ADDIS_TLSGD_HA";
1665   case PPCISD::ADDI_TLSGD_L:    return "PPCISD::ADDI_TLSGD_L";
1666   case PPCISD::GET_TLS_ADDR:    return "PPCISD::GET_TLS_ADDR";
1667   case PPCISD::ADDI_TLSGD_L_ADDR: return "PPCISD::ADDI_TLSGD_L_ADDR";
1668   case PPCISD::TLSGD_AIX:       return "PPCISD::TLSGD_AIX";
1669   case PPCISD::ADDIS_TLSLD_HA:  return "PPCISD::ADDIS_TLSLD_HA";
1670   case PPCISD::ADDI_TLSLD_L:    return "PPCISD::ADDI_TLSLD_L";
1671   case PPCISD::GET_TLSLD_ADDR:  return "PPCISD::GET_TLSLD_ADDR";
1672   case PPCISD::ADDI_TLSLD_L_ADDR: return "PPCISD::ADDI_TLSLD_L_ADDR";
1673   case PPCISD::ADDIS_DTPREL_HA: return "PPCISD::ADDIS_DTPREL_HA";
1674   case PPCISD::ADDI_DTPREL_L:   return "PPCISD::ADDI_DTPREL_L";
1675   case PPCISD::PADDI_DTPREL:
1676     return "PPCISD::PADDI_DTPREL";
1677   case PPCISD::VADD_SPLAT:      return "PPCISD::VADD_SPLAT";
1678   case PPCISD::SC:              return "PPCISD::SC";
1679   case PPCISD::CLRBHRB:         return "PPCISD::CLRBHRB";
1680   case PPCISD::MFBHRBE:         return "PPCISD::MFBHRBE";
1681   case PPCISD::RFEBB:           return "PPCISD::RFEBB";
1682   case PPCISD::XXSWAPD:         return "PPCISD::XXSWAPD";
1683   case PPCISD::SWAP_NO_CHAIN:   return "PPCISD::SWAP_NO_CHAIN";
1684   case PPCISD::VABSD:           return "PPCISD::VABSD";
1685   case PPCISD::BUILD_FP128:     return "PPCISD::BUILD_FP128";
1686   case PPCISD::BUILD_SPE64:     return "PPCISD::BUILD_SPE64";
1687   case PPCISD::EXTRACT_SPE:     return "PPCISD::EXTRACT_SPE";
1688   case PPCISD::EXTSWSLI:        return "PPCISD::EXTSWSLI";
1689   case PPCISD::LD_VSX_LH:       return "PPCISD::LD_VSX_LH";
1690   case PPCISD::FP_EXTEND_HALF:  return "PPCISD::FP_EXTEND_HALF";
1691   case PPCISD::MAT_PCREL_ADDR:  return "PPCISD::MAT_PCREL_ADDR";
1692   case PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR:
1693     return "PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR";
1694   case PPCISD::TLS_LOCAL_EXEC_MAT_ADDR:
1695     return "PPCISD::TLS_LOCAL_EXEC_MAT_ADDR";
1696   case PPCISD::ACC_BUILD:       return "PPCISD::ACC_BUILD";
1697   case PPCISD::PAIR_BUILD:      return "PPCISD::PAIR_BUILD";
1698   case PPCISD::EXTRACT_VSX_REG: return "PPCISD::EXTRACT_VSX_REG";
1699   case PPCISD::XXMFACC:         return "PPCISD::XXMFACC";
1700   case PPCISD::LD_SPLAT:        return "PPCISD::LD_SPLAT";
1701   case PPCISD::FNMSUB:          return "PPCISD::FNMSUB";
1702   case PPCISD::STRICT_FADDRTZ:
1703     return "PPCISD::STRICT_FADDRTZ";
1704   case PPCISD::STRICT_FCTIDZ:
1705     return "PPCISD::STRICT_FCTIDZ";
1706   case PPCISD::STRICT_FCTIWZ:
1707     return "PPCISD::STRICT_FCTIWZ";
1708   case PPCISD::STRICT_FCTIDUZ:
1709     return "PPCISD::STRICT_FCTIDUZ";
1710   case PPCISD::STRICT_FCTIWUZ:
1711     return "PPCISD::STRICT_FCTIWUZ";
1712   case PPCISD::STRICT_FCFID:
1713     return "PPCISD::STRICT_FCFID";
1714   case PPCISD::STRICT_FCFIDU:
1715     return "PPCISD::STRICT_FCFIDU";
1716   case PPCISD::STRICT_FCFIDS:
1717     return "PPCISD::STRICT_FCFIDS";
1718   case PPCISD::STRICT_FCFIDUS:
1719     return "PPCISD::STRICT_FCFIDUS";
1720   case PPCISD::LXVRZX:          return "PPCISD::LXVRZX";
1721   }
1722   return nullptr;
1723 }
1724 
getSetCCResultType(const DataLayout & DL,LLVMContext & C,EVT VT) const1725 EVT PPCTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &C,
1726                                           EVT VT) const {
1727   if (!VT.isVector())
1728     return Subtarget.useCRBits() ? MVT::i1 : MVT::i32;
1729 
1730   return VT.changeVectorElementTypeToInteger();
1731 }
1732 
enableAggressiveFMAFusion(EVT VT) const1733 bool PPCTargetLowering::enableAggressiveFMAFusion(EVT VT) const {
1734   assert(VT.isFloatingPoint() && "Non-floating-point FMA?");
1735   return true;
1736 }
1737 
1738 //===----------------------------------------------------------------------===//
1739 // Node matching predicates, for use by the tblgen matching code.
1740 //===----------------------------------------------------------------------===//
1741 
1742 /// isFloatingPointZero - Return true if this is 0.0 or -0.0.
isFloatingPointZero(SDValue Op)1743 static bool isFloatingPointZero(SDValue Op) {
1744   if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
1745     return CFP->getValueAPF().isZero();
1746   else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) {
1747     // Maybe this has already been legalized into the constant pool?
1748     if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op.getOperand(1)))
1749       if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
1750         return CFP->getValueAPF().isZero();
1751   }
1752   return false;
1753 }
1754 
1755 /// isConstantOrUndef - Op is either an undef node or a ConstantSDNode.  Return
1756 /// true if Op is undef or if it matches the specified value.
isConstantOrUndef(int Op,int Val)1757 static bool isConstantOrUndef(int Op, int Val) {
1758   return Op < 0 || Op == Val;
1759 }
1760 
1761 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a
1762 /// VPKUHUM instruction.
1763 /// The ShuffleKind distinguishes between big-endian operations with
1764 /// two different inputs (0), either-endian operations with two identical
1765 /// inputs (1), and little-endian operations with two different inputs (2).
1766 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
isVPKUHUMShuffleMask(ShuffleVectorSDNode * N,unsigned ShuffleKind,SelectionDAG & DAG)1767 bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1768                                SelectionDAG &DAG) {
1769   bool IsLE = DAG.getDataLayout().isLittleEndian();
1770   if (ShuffleKind == 0) {
1771     if (IsLE)
1772       return false;
1773     for (unsigned i = 0; i != 16; ++i)
1774       if (!isConstantOrUndef(N->getMaskElt(i), i*2+1))
1775         return false;
1776   } else if (ShuffleKind == 2) {
1777     if (!IsLE)
1778       return false;
1779     for (unsigned i = 0; i != 16; ++i)
1780       if (!isConstantOrUndef(N->getMaskElt(i), i*2))
1781         return false;
1782   } else if (ShuffleKind == 1) {
1783     unsigned j = IsLE ? 0 : 1;
1784     for (unsigned i = 0; i != 8; ++i)
1785       if (!isConstantOrUndef(N->getMaskElt(i),    i*2+j) ||
1786           !isConstantOrUndef(N->getMaskElt(i+8),  i*2+j))
1787         return false;
1788   }
1789   return true;
1790 }
1791 
1792 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a
1793 /// VPKUWUM instruction.
1794 /// The ShuffleKind distinguishes between big-endian operations with
1795 /// two different inputs (0), either-endian operations with two identical
1796 /// inputs (1), and little-endian operations with two different inputs (2).
1797 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
isVPKUWUMShuffleMask(ShuffleVectorSDNode * N,unsigned ShuffleKind,SelectionDAG & DAG)1798 bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1799                                SelectionDAG &DAG) {
1800   bool IsLE = DAG.getDataLayout().isLittleEndian();
1801   if (ShuffleKind == 0) {
1802     if (IsLE)
1803       return false;
1804     for (unsigned i = 0; i != 16; i += 2)
1805       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+2) ||
1806           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+3))
1807         return false;
1808   } else if (ShuffleKind == 2) {
1809     if (!IsLE)
1810       return false;
1811     for (unsigned i = 0; i != 16; i += 2)
1812       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2) ||
1813           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+1))
1814         return false;
1815   } else if (ShuffleKind == 1) {
1816     unsigned j = IsLE ? 0 : 2;
1817     for (unsigned i = 0; i != 8; i += 2)
1818       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+j)   ||
1819           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+j+1) ||
1820           !isConstantOrUndef(N->getMaskElt(i+8),  i*2+j)   ||
1821           !isConstantOrUndef(N->getMaskElt(i+9),  i*2+j+1))
1822         return false;
1823   }
1824   return true;
1825 }
1826 
1827 /// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a
1828 /// VPKUDUM instruction, AND the VPKUDUM instruction exists for the
1829 /// current subtarget.
1830 ///
1831 /// The ShuffleKind distinguishes between big-endian operations with
1832 /// two different inputs (0), either-endian operations with two identical
1833 /// inputs (1), and little-endian operations with two different inputs (2).
1834 /// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
isVPKUDUMShuffleMask(ShuffleVectorSDNode * N,unsigned ShuffleKind,SelectionDAG & DAG)1835 bool PPC::isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
1836                                SelectionDAG &DAG) {
1837   const PPCSubtarget& Subtarget =
1838       static_cast<const PPCSubtarget&>(DAG.getSubtarget());
1839   if (!Subtarget.hasP8Vector())
1840     return false;
1841 
1842   bool IsLE = DAG.getDataLayout().isLittleEndian();
1843   if (ShuffleKind == 0) {
1844     if (IsLE)
1845       return false;
1846     for (unsigned i = 0; i != 16; i += 4)
1847       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+4) ||
1848           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+5) ||
1849           !isConstantOrUndef(N->getMaskElt(i+2),  i*2+6) ||
1850           !isConstantOrUndef(N->getMaskElt(i+3),  i*2+7))
1851         return false;
1852   } else if (ShuffleKind == 2) {
1853     if (!IsLE)
1854       return false;
1855     for (unsigned i = 0; i != 16; i += 4)
1856       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2) ||
1857           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+1) ||
1858           !isConstantOrUndef(N->getMaskElt(i+2),  i*2+2) ||
1859           !isConstantOrUndef(N->getMaskElt(i+3),  i*2+3))
1860         return false;
1861   } else if (ShuffleKind == 1) {
1862     unsigned j = IsLE ? 0 : 4;
1863     for (unsigned i = 0; i != 8; i += 4)
1864       if (!isConstantOrUndef(N->getMaskElt(i  ),  i*2+j)   ||
1865           !isConstantOrUndef(N->getMaskElt(i+1),  i*2+j+1) ||
1866           !isConstantOrUndef(N->getMaskElt(i+2),  i*2+j+2) ||
1867           !isConstantOrUndef(N->getMaskElt(i+3),  i*2+j+3) ||
1868           !isConstantOrUndef(N->getMaskElt(i+8),  i*2+j)   ||
1869           !isConstantOrUndef(N->getMaskElt(i+9),  i*2+j+1) ||
1870           !isConstantOrUndef(N->getMaskElt(i+10), i*2+j+2) ||
1871           !isConstantOrUndef(N->getMaskElt(i+11), i*2+j+3))
1872         return false;
1873   }
1874   return true;
1875 }
1876 
1877 /// isVMerge - Common function, used to match vmrg* shuffles.
1878 ///
isVMerge(ShuffleVectorSDNode * N,unsigned UnitSize,unsigned LHSStart,unsigned RHSStart)1879 static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize,
1880                      unsigned LHSStart, unsigned RHSStart) {
1881   if (N->getValueType(0) != MVT::v16i8)
1882     return false;
1883   assert((UnitSize == 1 || UnitSize == 2 || UnitSize == 4) &&
1884          "Unsupported merge size!");
1885 
1886   for (unsigned i = 0; i != 8/UnitSize; ++i)     // Step over units
1887     for (unsigned j = 0; j != UnitSize; ++j) {   // Step over bytes within unit
1888       if (!isConstantOrUndef(N->getMaskElt(i*UnitSize*2+j),
1889                              LHSStart+j+i*UnitSize) ||
1890           !isConstantOrUndef(N->getMaskElt(i*UnitSize*2+UnitSize+j),
1891                              RHSStart+j+i*UnitSize))
1892         return false;
1893     }
1894   return true;
1895 }
1896 
1897 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for
1898 /// a VMRGL* instruction with the specified unit size (1,2 or 4 bytes).
1899 /// The ShuffleKind distinguishes between big-endian merges with two
1900 /// different inputs (0), either-endian merges with two identical inputs (1),
1901 /// and little-endian merges with two different inputs (2).  For the latter,
1902 /// the input operands are swapped (see PPCInstrAltivec.td).
isVMRGLShuffleMask(ShuffleVectorSDNode * N,unsigned UnitSize,unsigned ShuffleKind,SelectionDAG & DAG)1903 bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
1904                              unsigned ShuffleKind, SelectionDAG &DAG) {
1905   if (DAG.getDataLayout().isLittleEndian()) {
1906     if (ShuffleKind == 1) // unary
1907       return isVMerge(N, UnitSize, 0, 0);
1908     else if (ShuffleKind == 2) // swapped
1909       return isVMerge(N, UnitSize, 0, 16);
1910     else
1911       return false;
1912   } else {
1913     if (ShuffleKind == 1) // unary
1914       return isVMerge(N, UnitSize, 8, 8);
1915     else if (ShuffleKind == 0) // normal
1916       return isVMerge(N, UnitSize, 8, 24);
1917     else
1918       return false;
1919   }
1920 }
1921 
1922 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for
1923 /// a VMRGH* instruction with the specified unit size (1,2 or 4 bytes).
1924 /// The ShuffleKind distinguishes between big-endian merges with two
1925 /// different inputs (0), either-endian merges with two identical inputs (1),
1926 /// and little-endian merges with two different inputs (2).  For the latter,
1927 /// the input operands are swapped (see PPCInstrAltivec.td).
isVMRGHShuffleMask(ShuffleVectorSDNode * N,unsigned UnitSize,unsigned ShuffleKind,SelectionDAG & DAG)1928 bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
1929                              unsigned ShuffleKind, SelectionDAG &DAG) {
1930   if (DAG.getDataLayout().isLittleEndian()) {
1931     if (ShuffleKind == 1) // unary
1932       return isVMerge(N, UnitSize, 8, 8);
1933     else if (ShuffleKind == 2) // swapped
1934       return isVMerge(N, UnitSize, 8, 24);
1935     else
1936       return false;
1937   } else {
1938     if (ShuffleKind == 1) // unary
1939       return isVMerge(N, UnitSize, 0, 0);
1940     else if (ShuffleKind == 0) // normal
1941       return isVMerge(N, UnitSize, 0, 16);
1942     else
1943       return false;
1944   }
1945 }
1946 
1947 /**
1948  * Common function used to match vmrgew and vmrgow shuffles
1949  *
1950  * The indexOffset determines whether to look for even or odd words in
1951  * the shuffle mask. This is based on the of the endianness of the target
1952  * machine.
1953  *   - Little Endian:
1954  *     - Use offset of 0 to check for odd elements
1955  *     - Use offset of 4 to check for even elements
1956  *   - Big Endian:
1957  *     - Use offset of 0 to check for even elements
1958  *     - Use offset of 4 to check for odd elements
1959  * A detailed description of the vector element ordering for little endian and
1960  * big endian can be found at
1961  * http://www.ibm.com/developerworks/library/l-ibm-xl-c-cpp-compiler/index.html
1962  * Targeting your applications - what little endian and big endian IBM XL C/C++
1963  * compiler differences mean to you
1964  *
1965  * The mask to the shuffle vector instruction specifies the indices of the
1966  * elements from the two input vectors to place in the result. The elements are
1967  * numbered in array-access order, starting with the first vector. These vectors
1968  * are always of type v16i8, thus each vector will contain 16 elements of size
1969  * 8. More info on the shuffle vector can be found in the
1970  * http://llvm.org/docs/LangRef.html#shufflevector-instruction
1971  * Language Reference.
1972  *
1973  * The RHSStartValue indicates whether the same input vectors are used (unary)
1974  * or two different input vectors are used, based on the following:
1975  *   - If the instruction uses the same vector for both inputs, the range of the
1976  *     indices will be 0 to 15. In this case, the RHSStart value passed should
1977  *     be 0.
1978  *   - If the instruction has two different vectors then the range of the
1979  *     indices will be 0 to 31. In this case, the RHSStart value passed should
1980  *     be 16 (indices 0-15 specify elements in the first vector while indices 16
1981  *     to 31 specify elements in the second vector).
1982  *
1983  * \param[in] N The shuffle vector SD Node to analyze
1984  * \param[in] IndexOffset Specifies whether to look for even or odd elements
1985  * \param[in] RHSStartValue Specifies the starting index for the righthand input
1986  * vector to the shuffle_vector instruction
1987  * \return true iff this shuffle vector represents an even or odd word merge
1988  */
isVMerge(ShuffleVectorSDNode * N,unsigned IndexOffset,unsigned RHSStartValue)1989 static bool isVMerge(ShuffleVectorSDNode *N, unsigned IndexOffset,
1990                      unsigned RHSStartValue) {
1991   if (N->getValueType(0) != MVT::v16i8)
1992     return false;
1993 
1994   for (unsigned i = 0; i < 2; ++i)
1995     for (unsigned j = 0; j < 4; ++j)
1996       if (!isConstantOrUndef(N->getMaskElt(i*4+j),
1997                              i*RHSStartValue+j+IndexOffset) ||
1998           !isConstantOrUndef(N->getMaskElt(i*4+j+8),
1999                              i*RHSStartValue+j+IndexOffset+8))
2000         return false;
2001   return true;
2002 }
2003 
2004 /**
2005  * Determine if the specified shuffle mask is suitable for the vmrgew or
2006  * vmrgow instructions.
2007  *
2008  * \param[in] N The shuffle vector SD Node to analyze
2009  * \param[in] CheckEven Check for an even merge (true) or an odd merge (false)
2010  * \param[in] ShuffleKind Identify the type of merge:
2011  *   - 0 = big-endian merge with two different inputs;
2012  *   - 1 = either-endian merge with two identical inputs;
2013  *   - 2 = little-endian merge with two different inputs (inputs are swapped for
2014  *     little-endian merges).
2015  * \param[in] DAG The current SelectionDAG
2016  * \return true iff this shuffle mask
2017  */
isVMRGEOShuffleMask(ShuffleVectorSDNode * N,bool CheckEven,unsigned ShuffleKind,SelectionDAG & DAG)2018 bool PPC::isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven,
2019                               unsigned ShuffleKind, SelectionDAG &DAG) {
2020   if (DAG.getDataLayout().isLittleEndian()) {
2021     unsigned indexOffset = CheckEven ? 4 : 0;
2022     if (ShuffleKind == 1) // Unary
2023       return isVMerge(N, indexOffset, 0);
2024     else if (ShuffleKind == 2) // swapped
2025       return isVMerge(N, indexOffset, 16);
2026     else
2027       return false;
2028   }
2029   else {
2030     unsigned indexOffset = CheckEven ? 0 : 4;
2031     if (ShuffleKind == 1) // Unary
2032       return isVMerge(N, indexOffset, 0);
2033     else if (ShuffleKind == 0) // Normal
2034       return isVMerge(N, indexOffset, 16);
2035     else
2036       return false;
2037   }
2038   return false;
2039 }
2040 
2041 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the shift
2042 /// amount, otherwise return -1.
2043 /// The ShuffleKind distinguishes between big-endian operations with two
2044 /// different inputs (0), either-endian operations with two identical inputs
2045 /// (1), and little-endian operations with two different inputs (2).  For the
2046 /// latter, the input operands are swapped (see PPCInstrAltivec.td).
isVSLDOIShuffleMask(SDNode * N,unsigned ShuffleKind,SelectionDAG & DAG)2047 int PPC::isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind,
2048                              SelectionDAG &DAG) {
2049   if (N->getValueType(0) != MVT::v16i8)
2050     return -1;
2051 
2052   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
2053 
2054   // Find the first non-undef value in the shuffle mask.
2055   unsigned i;
2056   for (i = 0; i != 16 && SVOp->getMaskElt(i) < 0; ++i)
2057     /*search*/;
2058 
2059   if (i == 16) return -1;  // all undef.
2060 
2061   // Otherwise, check to see if the rest of the elements are consecutively
2062   // numbered from this value.
2063   unsigned ShiftAmt = SVOp->getMaskElt(i);
2064   if (ShiftAmt < i) return -1;
2065 
2066   ShiftAmt -= i;
2067   bool isLE = DAG.getDataLayout().isLittleEndian();
2068 
2069   if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) {
2070     // Check the rest of the elements to see if they are consecutive.
2071     for (++i; i != 16; ++i)
2072       if (!isConstantOrUndef(SVOp->getMaskElt(i), ShiftAmt+i))
2073         return -1;
2074   } else if (ShuffleKind == 1) {
2075     // Check the rest of the elements to see if they are consecutive.
2076     for (++i; i != 16; ++i)
2077       if (!isConstantOrUndef(SVOp->getMaskElt(i), (ShiftAmt+i) & 15))
2078         return -1;
2079   } else
2080     return -1;
2081 
2082   if (isLE)
2083     ShiftAmt = 16 - ShiftAmt;
2084 
2085   return ShiftAmt;
2086 }
2087 
2088 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand
2089 /// specifies a splat of a single element that is suitable for input to
2090 /// one of the splat operations (VSPLTB/VSPLTH/VSPLTW/XXSPLTW/LXVDSX/etc.).
isSplatShuffleMask(ShuffleVectorSDNode * N,unsigned EltSize)2091 bool PPC::isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize) {
2092   assert(N->getValueType(0) == MVT::v16i8 && isPowerOf2_32(EltSize) &&
2093          EltSize <= 8 && "Can only handle 1,2,4,8 byte element sizes");
2094 
2095   // The consecutive indices need to specify an element, not part of two
2096   // different elements.  So abandon ship early if this isn't the case.
2097   if (N->getMaskElt(0) % EltSize != 0)
2098     return false;
2099 
2100   // This is a splat operation if each element of the permute is the same, and
2101   // if the value doesn't reference the second vector.
2102   unsigned ElementBase = N->getMaskElt(0);
2103 
2104   // FIXME: Handle UNDEF elements too!
2105   if (ElementBase >= 16)
2106     return false;
2107 
2108   // Check that the indices are consecutive, in the case of a multi-byte element
2109   // splatted with a v16i8 mask.
2110   for (unsigned i = 1; i != EltSize; ++i)
2111     if (N->getMaskElt(i) < 0 || N->getMaskElt(i) != (int)(i+ElementBase))
2112       return false;
2113 
2114   for (unsigned i = EltSize, e = 16; i != e; i += EltSize) {
2115     if (N->getMaskElt(i) < 0) continue;
2116     for (unsigned j = 0; j != EltSize; ++j)
2117       if (N->getMaskElt(i+j) != N->getMaskElt(j))
2118         return false;
2119   }
2120   return true;
2121 }
2122 
2123 /// Check that the mask is shuffling N byte elements. Within each N byte
2124 /// element of the mask, the indices could be either in increasing or
2125 /// decreasing order as long as they are consecutive.
2126 /// \param[in] N the shuffle vector SD Node to analyze
2127 /// \param[in] Width the element width in bytes, could be 2/4/8/16 (HalfWord/
2128 /// Word/DoubleWord/QuadWord).
2129 /// \param[in] StepLen the delta indices number among the N byte element, if
2130 /// the mask is in increasing/decreasing order then it is 1/-1.
2131 /// \return true iff the mask is shuffling N byte elements.
isNByteElemShuffleMask(ShuffleVectorSDNode * N,unsigned Width,int StepLen)2132 static bool isNByteElemShuffleMask(ShuffleVectorSDNode *N, unsigned Width,
2133                                    int StepLen) {
2134   assert((Width == 2 || Width == 4 || Width == 8 || Width == 16) &&
2135          "Unexpected element width.");
2136   assert((StepLen == 1 || StepLen == -1) && "Unexpected element width.");
2137 
2138   unsigned NumOfElem = 16 / Width;
2139   unsigned MaskVal[16]; //  Width is never greater than 16
2140   for (unsigned i = 0; i < NumOfElem; ++i) {
2141     MaskVal[0] = N->getMaskElt(i * Width);
2142     if ((StepLen == 1) && (MaskVal[0] % Width)) {
2143       return false;
2144     } else if ((StepLen == -1) && ((MaskVal[0] + 1) % Width)) {
2145       return false;
2146     }
2147 
2148     for (unsigned int j = 1; j < Width; ++j) {
2149       MaskVal[j] = N->getMaskElt(i * Width + j);
2150       if (MaskVal[j] != MaskVal[j-1] + StepLen) {
2151         return false;
2152       }
2153     }
2154   }
2155 
2156   return true;
2157 }
2158 
isXXINSERTWMask(ShuffleVectorSDNode * N,unsigned & ShiftElts,unsigned & InsertAtByte,bool & Swap,bool IsLE)2159 bool PPC::isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
2160                           unsigned &InsertAtByte, bool &Swap, bool IsLE) {
2161   if (!isNByteElemShuffleMask(N, 4, 1))
2162     return false;
2163 
2164   // Now we look at mask elements 0,4,8,12
2165   unsigned M0 = N->getMaskElt(0) / 4;
2166   unsigned M1 = N->getMaskElt(4) / 4;
2167   unsigned M2 = N->getMaskElt(8) / 4;
2168   unsigned M3 = N->getMaskElt(12) / 4;
2169   unsigned LittleEndianShifts[] = { 2, 1, 0, 3 };
2170   unsigned BigEndianShifts[] = { 3, 0, 1, 2 };
2171 
2172   // Below, let H and L be arbitrary elements of the shuffle mask
2173   // where H is in the range [4,7] and L is in the range [0,3].
2174   // H, 1, 2, 3 or L, 5, 6, 7
2175   if ((M0 > 3 && M1 == 1 && M2 == 2 && M3 == 3) ||
2176       (M0 < 4 && M1 == 5 && M2 == 6 && M3 == 7)) {
2177     ShiftElts = IsLE ? LittleEndianShifts[M0 & 0x3] : BigEndianShifts[M0 & 0x3];
2178     InsertAtByte = IsLE ? 12 : 0;
2179     Swap = M0 < 4;
2180     return true;
2181   }
2182   // 0, H, 2, 3 or 4, L, 6, 7
2183   if ((M1 > 3 && M0 == 0 && M2 == 2 && M3 == 3) ||
2184       (M1 < 4 && M0 == 4 && M2 == 6 && M3 == 7)) {
2185     ShiftElts = IsLE ? LittleEndianShifts[M1 & 0x3] : BigEndianShifts[M1 & 0x3];
2186     InsertAtByte = IsLE ? 8 : 4;
2187     Swap = M1 < 4;
2188     return true;
2189   }
2190   // 0, 1, H, 3 or 4, 5, L, 7
2191   if ((M2 > 3 && M0 == 0 && M1 == 1 && M3 == 3) ||
2192       (M2 < 4 && M0 == 4 && M1 == 5 && M3 == 7)) {
2193     ShiftElts = IsLE ? LittleEndianShifts[M2 & 0x3] : BigEndianShifts[M2 & 0x3];
2194     InsertAtByte = IsLE ? 4 : 8;
2195     Swap = M2 < 4;
2196     return true;
2197   }
2198   // 0, 1, 2, H or 4, 5, 6, L
2199   if ((M3 > 3 && M0 == 0 && M1 == 1 && M2 == 2) ||
2200       (M3 < 4 && M0 == 4 && M1 == 5 && M2 == 6)) {
2201     ShiftElts = IsLE ? LittleEndianShifts[M3 & 0x3] : BigEndianShifts[M3 & 0x3];
2202     InsertAtByte = IsLE ? 0 : 12;
2203     Swap = M3 < 4;
2204     return true;
2205   }
2206 
2207   // If both vector operands for the shuffle are the same vector, the mask will
2208   // contain only elements from the first one and the second one will be undef.
2209   if (N->getOperand(1).isUndef()) {
2210     ShiftElts = 0;
2211     Swap = true;
2212     unsigned XXINSERTWSrcElem = IsLE ? 2 : 1;
2213     if (M0 == XXINSERTWSrcElem && M1 == 1 && M2 == 2 && M3 == 3) {
2214       InsertAtByte = IsLE ? 12 : 0;
2215       return true;
2216     }
2217     if (M0 == 0 && M1 == XXINSERTWSrcElem && M2 == 2 && M3 == 3) {
2218       InsertAtByte = IsLE ? 8 : 4;
2219       return true;
2220     }
2221     if (M0 == 0 && M1 == 1 && M2 == XXINSERTWSrcElem && M3 == 3) {
2222       InsertAtByte = IsLE ? 4 : 8;
2223       return true;
2224     }
2225     if (M0 == 0 && M1 == 1 && M2 == 2 && M3 == XXINSERTWSrcElem) {
2226       InsertAtByte = IsLE ? 0 : 12;
2227       return true;
2228     }
2229   }
2230 
2231   return false;
2232 }
2233 
isXXSLDWIShuffleMask(ShuffleVectorSDNode * N,unsigned & ShiftElts,bool & Swap,bool IsLE)2234 bool PPC::isXXSLDWIShuffleMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
2235                                bool &Swap, bool IsLE) {
2236   assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8");
2237   // Ensure each byte index of the word is consecutive.
2238   if (!isNByteElemShuffleMask(N, 4, 1))
2239     return false;
2240 
2241   // Now we look at mask elements 0,4,8,12, which are the beginning of words.
2242   unsigned M0 = N->getMaskElt(0) / 4;
2243   unsigned M1 = N->getMaskElt(4) / 4;
2244   unsigned M2 = N->getMaskElt(8) / 4;
2245   unsigned M3 = N->getMaskElt(12) / 4;
2246 
2247   // If both vector operands for the shuffle are the same vector, the mask will
2248   // contain only elements from the first one and the second one will be undef.
2249   if (N->getOperand(1).isUndef()) {
2250     assert(M0 < 4 && "Indexing into an undef vector?");
2251     if (M1 != (M0 + 1) % 4 || M2 != (M1 + 1) % 4 || M3 != (M2 + 1) % 4)
2252       return false;
2253 
2254     ShiftElts = IsLE ? (4 - M0) % 4 : M0;
2255     Swap = false;
2256     return true;
2257   }
2258 
2259   // Ensure each word index of the ShuffleVector Mask is consecutive.
2260   if (M1 != (M0 + 1) % 8 || M2 != (M1 + 1) % 8 || M3 != (M2 + 1) % 8)
2261     return false;
2262 
2263   if (IsLE) {
2264     if (M0 == 0 || M0 == 7 || M0 == 6 || M0 == 5) {
2265       // Input vectors don't need to be swapped if the leading element
2266       // of the result is one of the 3 left elements of the second vector
2267       // (or if there is no shift to be done at all).
2268       Swap = false;
2269       ShiftElts = (8 - M0) % 8;
2270     } else if (M0 == 4 || M0 == 3 || M0 == 2 || M0 == 1) {
2271       // Input vectors need to be swapped if the leading element
2272       // of the result is one of the 3 left elements of the first vector
2273       // (or if we're shifting by 4 - thereby simply swapping the vectors).
2274       Swap = true;
2275       ShiftElts = (4 - M0) % 4;
2276     }
2277 
2278     return true;
2279   } else {                                          // BE
2280     if (M0 == 0 || M0 == 1 || M0 == 2 || M0 == 3) {
2281       // Input vectors don't need to be swapped if the leading element
2282       // of the result is one of the 4 elements of the first vector.
2283       Swap = false;
2284       ShiftElts = M0;
2285     } else if (M0 == 4 || M0 == 5 || M0 == 6 || M0 == 7) {
2286       // Input vectors need to be swapped if the leading element
2287       // of the result is one of the 4 elements of the right vector.
2288       Swap = true;
2289       ShiftElts = M0 - 4;
2290     }
2291 
2292     return true;
2293   }
2294 }
2295 
isXXBRShuffleMaskHelper(ShuffleVectorSDNode * N,int Width)2296 bool static isXXBRShuffleMaskHelper(ShuffleVectorSDNode *N, int Width) {
2297   assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8");
2298 
2299   if (!isNByteElemShuffleMask(N, Width, -1))
2300     return false;
2301 
2302   for (int i = 0; i < 16; i += Width)
2303     if (N->getMaskElt(i) != i + Width - 1)
2304       return false;
2305 
2306   return true;
2307 }
2308 
isXXBRHShuffleMask(ShuffleVectorSDNode * N)2309 bool PPC::isXXBRHShuffleMask(ShuffleVectorSDNode *N) {
2310   return isXXBRShuffleMaskHelper(N, 2);
2311 }
2312 
isXXBRWShuffleMask(ShuffleVectorSDNode * N)2313 bool PPC::isXXBRWShuffleMask(ShuffleVectorSDNode *N) {
2314   return isXXBRShuffleMaskHelper(N, 4);
2315 }
2316 
isXXBRDShuffleMask(ShuffleVectorSDNode * N)2317 bool PPC::isXXBRDShuffleMask(ShuffleVectorSDNode *N) {
2318   return isXXBRShuffleMaskHelper(N, 8);
2319 }
2320 
isXXBRQShuffleMask(ShuffleVectorSDNode * N)2321 bool PPC::isXXBRQShuffleMask(ShuffleVectorSDNode *N) {
2322   return isXXBRShuffleMaskHelper(N, 16);
2323 }
2324 
2325 /// Can node \p N be lowered to an XXPERMDI instruction? If so, set \p Swap
2326 /// if the inputs to the instruction should be swapped and set \p DM to the
2327 /// value for the immediate.
2328 /// Specifically, set \p Swap to true only if \p N can be lowered to XXPERMDI
2329 /// AND element 0 of the result comes from the first input (LE) or second input
2330 /// (BE). Set \p DM to the calculated result (0-3) only if \p N can be lowered.
2331 /// \return true iff the given mask of shuffle node \p N is a XXPERMDI shuffle
2332 /// mask.
isXXPERMDIShuffleMask(ShuffleVectorSDNode * N,unsigned & DM,bool & Swap,bool IsLE)2333 bool PPC::isXXPERMDIShuffleMask(ShuffleVectorSDNode *N, unsigned &DM,
2334                                bool &Swap, bool IsLE) {
2335   assert(N->getValueType(0) == MVT::v16i8 && "Shuffle vector expects v16i8");
2336 
2337   // Ensure each byte index of the double word is consecutive.
2338   if (!isNByteElemShuffleMask(N, 8, 1))
2339     return false;
2340 
2341   unsigned M0 = N->getMaskElt(0) / 8;
2342   unsigned M1 = N->getMaskElt(8) / 8;
2343   assert(((M0 | M1) < 4) && "A mask element out of bounds?");
2344 
2345   // If both vector operands for the shuffle are the same vector, the mask will
2346   // contain only elements from the first one and the second one will be undef.
2347   if (N->getOperand(1).isUndef()) {
2348     if ((M0 | M1) < 2) {
2349       DM = IsLE ? (((~M1) & 1) << 1) + ((~M0) & 1) : (M0 << 1) + (M1 & 1);
2350       Swap = false;
2351       return true;
2352     } else
2353       return false;
2354   }
2355 
2356   if (IsLE) {
2357     if (M0 > 1 && M1 < 2) {
2358       Swap = false;
2359     } else if (M0 < 2 && M1 > 1) {
2360       M0 = (M0 + 2) % 4;
2361       M1 = (M1 + 2) % 4;
2362       Swap = true;
2363     } else
2364       return false;
2365 
2366     // Note: if control flow comes here that means Swap is already set above
2367     DM = (((~M1) & 1) << 1) + ((~M0) & 1);
2368     return true;
2369   } else { // BE
2370     if (M0 < 2 && M1 > 1) {
2371       Swap = false;
2372     } else if (M0 > 1 && M1 < 2) {
2373       M0 = (M0 + 2) % 4;
2374       M1 = (M1 + 2) % 4;
2375       Swap = true;
2376     } else
2377       return false;
2378 
2379     // Note: if control flow comes here that means Swap is already set above
2380     DM = (M0 << 1) + (M1 & 1);
2381     return true;
2382   }
2383 }
2384 
2385 
2386 /// getSplatIdxForPPCMnemonics - Return the splat index as a value that is
2387 /// appropriate for PPC mnemonics (which have a big endian bias - namely
2388 /// elements are counted from the left of the vector register).
getSplatIdxForPPCMnemonics(SDNode * N,unsigned EltSize,SelectionDAG & DAG)2389 unsigned PPC::getSplatIdxForPPCMnemonics(SDNode *N, unsigned EltSize,
2390                                          SelectionDAG &DAG) {
2391   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
2392   assert(isSplatShuffleMask(SVOp, EltSize));
2393   if (DAG.getDataLayout().isLittleEndian())
2394     return (16 / EltSize) - 1 - (SVOp->getMaskElt(0) / EltSize);
2395   else
2396     return SVOp->getMaskElt(0) / EltSize;
2397 }
2398 
2399 /// get_VSPLTI_elt - If this is a build_vector of constants which can be formed
2400 /// by using a vspltis[bhw] instruction of the specified element size, return
2401 /// the constant being splatted.  The ByteSize field indicates the number of
2402 /// bytes of each element [124] -> [bhw].
get_VSPLTI_elt(SDNode * N,unsigned ByteSize,SelectionDAG & DAG)2403 SDValue PPC::get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG) {
2404   SDValue OpVal(nullptr, 0);
2405 
2406   // If ByteSize of the splat is bigger than the element size of the
2407   // build_vector, then we have a case where we are checking for a splat where
2408   // multiple elements of the buildvector are folded together into a single
2409   // logical element of the splat (e.g. "vsplish 1" to splat {0,1}*8).
2410   unsigned EltSize = 16/N->getNumOperands();
2411   if (EltSize < ByteSize) {
2412     unsigned Multiple = ByteSize/EltSize;   // Number of BV entries per spltval.
2413     SDValue UniquedVals[4];
2414     assert(Multiple > 1 && Multiple <= 4 && "How can this happen?");
2415 
2416     // See if all of the elements in the buildvector agree across.
2417     for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
2418       if (N->getOperand(i).isUndef()) continue;
2419       // If the element isn't a constant, bail fully out.
2420       if (!isa<ConstantSDNode>(N->getOperand(i))) return SDValue();
2421 
2422       if (!UniquedVals[i&(Multiple-1)].getNode())
2423         UniquedVals[i&(Multiple-1)] = N->getOperand(i);
2424       else if (UniquedVals[i&(Multiple-1)] != N->getOperand(i))
2425         return SDValue();  // no match.
2426     }
2427 
2428     // Okay, if we reached this point, UniquedVals[0..Multiple-1] contains
2429     // either constant or undef values that are identical for each chunk.  See
2430     // if these chunks can form into a larger vspltis*.
2431 
2432     // Check to see if all of the leading entries are either 0 or -1.  If
2433     // neither, then this won't fit into the immediate field.
2434     bool LeadingZero = true;
2435     bool LeadingOnes = true;
2436     for (unsigned i = 0; i != Multiple-1; ++i) {
2437       if (!UniquedVals[i].getNode()) continue;  // Must have been undefs.
2438 
2439       LeadingZero &= isNullConstant(UniquedVals[i]);
2440       LeadingOnes &= isAllOnesConstant(UniquedVals[i]);
2441     }
2442     // Finally, check the least significant entry.
2443     if (LeadingZero) {
2444       if (!UniquedVals[Multiple-1].getNode())
2445         return DAG.getTargetConstant(0, SDLoc(N), MVT::i32);  // 0,0,0,undef
2446       int Val = cast<ConstantSDNode>(UniquedVals[Multiple-1])->getZExtValue();
2447       if (Val < 16)                                   // 0,0,0,4 -> vspltisw(4)
2448         return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32);
2449     }
2450     if (LeadingOnes) {
2451       if (!UniquedVals[Multiple-1].getNode())
2452         return DAG.getTargetConstant(~0U, SDLoc(N), MVT::i32); // -1,-1,-1,undef
2453       int Val =cast<ConstantSDNode>(UniquedVals[Multiple-1])->getSExtValue();
2454       if (Val >= -16)                            // -1,-1,-1,-2 -> vspltisw(-2)
2455         return DAG.getTargetConstant(Val, SDLoc(N), MVT::i32);
2456     }
2457 
2458     return SDValue();
2459   }
2460 
2461   // Check to see if this buildvec has a single non-undef value in its elements.
2462   for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
2463     if (N->getOperand(i).isUndef()) continue;
2464     if (!OpVal.getNode())
2465       OpVal = N->getOperand(i);
2466     else if (OpVal != N->getOperand(i))
2467       return SDValue();
2468   }
2469 
2470   if (!OpVal.getNode()) return SDValue();  // All UNDEF: use implicit def.
2471 
2472   unsigned ValSizeInBytes = EltSize;
2473   uint64_t Value = 0;
2474   if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal)) {
2475     Value = CN->getZExtValue();
2476   } else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal)) {
2477     assert(CN->getValueType(0) == MVT::f32 && "Only one legal FP vector type!");
2478     Value = FloatToBits(CN->getValueAPF().convertToFloat());
2479   }
2480 
2481   // If the splat value is larger than the element value, then we can never do
2482   // this splat.  The only case that we could fit the replicated bits into our
2483   // immediate field for would be zero, and we prefer to use vxor for it.
2484   if (ValSizeInBytes < ByteSize) return SDValue();
2485 
2486   // If the element value is larger than the splat value, check if it consists
2487   // of a repeated bit pattern of size ByteSize.
2488   if (!APInt(ValSizeInBytes * 8, Value).isSplat(ByteSize * 8))
2489     return SDValue();
2490 
2491   // Properly sign extend the value.
2492   int MaskVal = SignExtend32(Value, ByteSize * 8);
2493 
2494   // If this is zero, don't match, zero matches ISD::isBuildVectorAllZeros.
2495   if (MaskVal == 0) return SDValue();
2496 
2497   // Finally, if this value fits in a 5 bit sext field, return it
2498   if (SignExtend32<5>(MaskVal) == MaskVal)
2499     return DAG.getTargetConstant(MaskVal, SDLoc(N), MVT::i32);
2500   return SDValue();
2501 }
2502 
2503 //===----------------------------------------------------------------------===//
2504 //  Addressing Mode Selection
2505 //===----------------------------------------------------------------------===//
2506 
2507 /// isIntS16Immediate - This method tests to see if the node is either a 32-bit
2508 /// or 64-bit immediate, and if the value can be accurately represented as a
2509 /// sign extension from a 16-bit value.  If so, this returns true and the
2510 /// immediate.
isIntS16Immediate(SDNode * N,int16_t & Imm)2511 bool llvm::isIntS16Immediate(SDNode *N, int16_t &Imm) {
2512   if (!isa<ConstantSDNode>(N))
2513     return false;
2514 
2515   Imm = (int16_t)cast<ConstantSDNode>(N)->getZExtValue();
2516   if (N->getValueType(0) == MVT::i32)
2517     return Imm == (int32_t)cast<ConstantSDNode>(N)->getZExtValue();
2518   else
2519     return Imm == (int64_t)cast<ConstantSDNode>(N)->getZExtValue();
2520 }
isIntS16Immediate(SDValue Op,int16_t & Imm)2521 bool llvm::isIntS16Immediate(SDValue Op, int16_t &Imm) {
2522   return isIntS16Immediate(Op.getNode(), Imm);
2523 }
2524 
2525 /// Used when computing address flags for selecting loads and stores.
2526 /// If we have an OR, check if the LHS and RHS are provably disjoint.
2527 /// An OR of two provably disjoint values is equivalent to an ADD.
2528 /// Most PPC load/store instructions compute the effective address as a sum,
2529 /// so doing this conversion is useful.
provablyDisjointOr(SelectionDAG & DAG,const SDValue & N)2530 static bool provablyDisjointOr(SelectionDAG &DAG, const SDValue &N) {
2531   if (N.getOpcode() != ISD::OR)
2532     return false;
2533   KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0));
2534   if (!LHSKnown.Zero.getBoolValue())
2535     return false;
2536   KnownBits RHSKnown = DAG.computeKnownBits(N.getOperand(1));
2537   return (~(LHSKnown.Zero | RHSKnown.Zero) == 0);
2538 }
2539 
2540 /// SelectAddressEVXRegReg - Given the specified address, check to see if it can
2541 /// be represented as an indexed [r+r] operation.
SelectAddressEVXRegReg(SDValue N,SDValue & Base,SDValue & Index,SelectionDAG & DAG) const2542 bool PPCTargetLowering::SelectAddressEVXRegReg(SDValue N, SDValue &Base,
2543                                                SDValue &Index,
2544                                                SelectionDAG &DAG) const {
2545   for (SDNode::use_iterator UI = N->use_begin(), E = N->use_end();
2546       UI != E; ++UI) {
2547     if (MemSDNode *Memop = dyn_cast<MemSDNode>(*UI)) {
2548       if (Memop->getMemoryVT() == MVT::f64) {
2549           Base = N.getOperand(0);
2550           Index = N.getOperand(1);
2551           return true;
2552       }
2553     }
2554   }
2555   return false;
2556 }
2557 
2558 /// isIntS34Immediate - This method tests if value of node given can be
2559 /// accurately represented as a sign extension from a 34-bit value.  If so,
2560 /// this returns true and the immediate.
isIntS34Immediate(SDNode * N,int64_t & Imm)2561 bool llvm::isIntS34Immediate(SDNode *N, int64_t &Imm) {
2562   if (!isa<ConstantSDNode>(N))
2563     return false;
2564 
2565   Imm = (int64_t)cast<ConstantSDNode>(N)->getZExtValue();
2566   return isInt<34>(Imm);
2567 }
isIntS34Immediate(SDValue Op,int64_t & Imm)2568 bool llvm::isIntS34Immediate(SDValue Op, int64_t &Imm) {
2569   return isIntS34Immediate(Op.getNode(), Imm);
2570 }
2571 
2572 /// SelectAddressRegReg - Given the specified addressed, check to see if it
2573 /// can be represented as an indexed [r+r] operation.  Returns false if it
2574 /// can be more efficiently represented as [r+imm]. If \p EncodingAlignment is
2575 /// non-zero and N can be represented by a base register plus a signed 16-bit
2576 /// displacement, make a more precise judgement by checking (displacement % \p
2577 /// EncodingAlignment).
SelectAddressRegReg(SDValue N,SDValue & Base,SDValue & Index,SelectionDAG & DAG,MaybeAlign EncodingAlignment) const2578 bool PPCTargetLowering::SelectAddressRegReg(
2579     SDValue N, SDValue &Base, SDValue &Index, SelectionDAG &DAG,
2580     MaybeAlign EncodingAlignment) const {
2581   // If we have a PC Relative target flag don't select as [reg+reg]. It will be
2582   // a [pc+imm].
2583   if (SelectAddressPCRel(N, Base))
2584     return false;
2585 
2586   int16_t Imm = 0;
2587   if (N.getOpcode() == ISD::ADD) {
2588     // Is there any SPE load/store (f64), which can't handle 16bit offset?
2589     // SPE load/store can only handle 8-bit offsets.
2590     if (hasSPE() && SelectAddressEVXRegReg(N, Base, Index, DAG))
2591         return true;
2592     if (isIntS16Immediate(N.getOperand(1), Imm) &&
2593         (!EncodingAlignment || isAligned(*EncodingAlignment, Imm)))
2594       return false; // r+i
2595     if (N.getOperand(1).getOpcode() == PPCISD::Lo)
2596       return false;    // r+i
2597 
2598     Base = N.getOperand(0);
2599     Index = N.getOperand(1);
2600     return true;
2601   } else if (N.getOpcode() == ISD::OR) {
2602     if (isIntS16Immediate(N.getOperand(1), Imm) &&
2603         (!EncodingAlignment || isAligned(*EncodingAlignment, Imm)))
2604       return false; // r+i can fold it if we can.
2605 
2606     // If this is an or of disjoint bitfields, we can codegen this as an add
2607     // (for better address arithmetic) if the LHS and RHS of the OR are provably
2608     // disjoint.
2609     KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0));
2610 
2611     if (LHSKnown.Zero.getBoolValue()) {
2612       KnownBits RHSKnown = DAG.computeKnownBits(N.getOperand(1));
2613       // If all of the bits are known zero on the LHS or RHS, the add won't
2614       // carry.
2615       if (~(LHSKnown.Zero | RHSKnown.Zero) == 0) {
2616         Base = N.getOperand(0);
2617         Index = N.getOperand(1);
2618         return true;
2619       }
2620     }
2621   }
2622 
2623   return false;
2624 }
2625 
2626 // If we happen to be doing an i64 load or store into a stack slot that has
2627 // less than a 4-byte alignment, then the frame-index elimination may need to
2628 // use an indexed load or store instruction (because the offset may not be a
2629 // multiple of 4). The extra register needed to hold the offset comes from the
2630 // register scavenger, and it is possible that the scavenger will need to use
2631 // an emergency spill slot. As a result, we need to make sure that a spill slot
2632 // is allocated when doing an i64 load/store into a less-than-4-byte-aligned
2633 // stack slot.
fixupFuncForFI(SelectionDAG & DAG,int FrameIdx,EVT VT)2634 static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT) {
2635   // FIXME: This does not handle the LWA case.
2636   if (VT != MVT::i64)
2637     return;
2638 
2639   // NOTE: We'll exclude negative FIs here, which come from argument
2640   // lowering, because there are no known test cases triggering this problem
2641   // using packed structures (or similar). We can remove this exclusion if
2642   // we find such a test case. The reason why this is so test-case driven is
2643   // because this entire 'fixup' is only to prevent crashes (from the
2644   // register scavenger) on not-really-valid inputs. For example, if we have:
2645   //   %a = alloca i1
2646   //   %b = bitcast i1* %a to i64*
2647   //   store i64* a, i64 b
2648   // then the store should really be marked as 'align 1', but is not. If it
2649   // were marked as 'align 1' then the indexed form would have been
2650   // instruction-selected initially, and the problem this 'fixup' is preventing
2651   // won't happen regardless.
2652   if (FrameIdx < 0)
2653     return;
2654 
2655   MachineFunction &MF = DAG.getMachineFunction();
2656   MachineFrameInfo &MFI = MF.getFrameInfo();
2657 
2658   if (MFI.getObjectAlign(FrameIdx) >= Align(4))
2659     return;
2660 
2661   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
2662   FuncInfo->setHasNonRISpills();
2663 }
2664 
2665 /// Returns true if the address N can be represented by a base register plus
2666 /// a signed 16-bit displacement [r+imm], and if it is not better
2667 /// represented as reg+reg.  If \p EncodingAlignment is non-zero, only accept
2668 /// displacements that are multiples of that value.
SelectAddressRegImm(SDValue N,SDValue & Disp,SDValue & Base,SelectionDAG & DAG,MaybeAlign EncodingAlignment) const2669 bool PPCTargetLowering::SelectAddressRegImm(
2670     SDValue N, SDValue &Disp, SDValue &Base, SelectionDAG &DAG,
2671     MaybeAlign EncodingAlignment) const {
2672   // FIXME dl should come from parent load or store, not from address
2673   SDLoc dl(N);
2674 
2675   // If we have a PC Relative target flag don't select as [reg+imm]. It will be
2676   // a [pc+imm].
2677   if (SelectAddressPCRel(N, Base))
2678     return false;
2679 
2680   // If this can be more profitably realized as r+r, fail.
2681   if (SelectAddressRegReg(N, Disp, Base, DAG, EncodingAlignment))
2682     return false;
2683 
2684   if (N.getOpcode() == ISD::ADD) {
2685     int16_t imm = 0;
2686     if (isIntS16Immediate(N.getOperand(1), imm) &&
2687         (!EncodingAlignment || isAligned(*EncodingAlignment, imm))) {
2688       Disp = DAG.getTargetConstant(imm, dl, N.getValueType());
2689       if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
2690         Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2691         fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2692       } else {
2693         Base = N.getOperand(0);
2694       }
2695       return true; // [r+i]
2696     } else if (N.getOperand(1).getOpcode() == PPCISD::Lo) {
2697       // Match LOAD (ADD (X, Lo(G))).
2698       assert(!cast<ConstantSDNode>(N.getOperand(1).getOperand(1))->getZExtValue()
2699              && "Cannot handle constant offsets yet!");
2700       Disp = N.getOperand(1).getOperand(0);  // The global address.
2701       assert(Disp.getOpcode() == ISD::TargetGlobalAddress ||
2702              Disp.getOpcode() == ISD::TargetGlobalTLSAddress ||
2703              Disp.getOpcode() == ISD::TargetConstantPool ||
2704              Disp.getOpcode() == ISD::TargetJumpTable);
2705       Base = N.getOperand(0);
2706       return true;  // [&g+r]
2707     }
2708   } else if (N.getOpcode() == ISD::OR) {
2709     int16_t imm = 0;
2710     if (isIntS16Immediate(N.getOperand(1), imm) &&
2711         (!EncodingAlignment || isAligned(*EncodingAlignment, imm))) {
2712       // If this is an or of disjoint bitfields, we can codegen this as an add
2713       // (for better address arithmetic) if the LHS and RHS of the OR are
2714       // provably disjoint.
2715       KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0));
2716 
2717       if ((LHSKnown.Zero.getZExtValue()|~(uint64_t)imm) == ~0ULL) {
2718         // If all of the bits are known zero on the LHS or RHS, the add won't
2719         // carry.
2720         if (FrameIndexSDNode *FI =
2721               dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
2722           Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2723           fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2724         } else {
2725           Base = N.getOperand(0);
2726         }
2727         Disp = DAG.getTargetConstant(imm, dl, N.getValueType());
2728         return true;
2729       }
2730     }
2731   } else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
2732     // Loading from a constant address.
2733 
2734     // If this address fits entirely in a 16-bit sext immediate field, codegen
2735     // this as "d, 0"
2736     int16_t Imm;
2737     if (isIntS16Immediate(CN, Imm) &&
2738         (!EncodingAlignment || isAligned(*EncodingAlignment, Imm))) {
2739       Disp = DAG.getTargetConstant(Imm, dl, CN->getValueType(0));
2740       Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
2741                              CN->getValueType(0));
2742       return true;
2743     }
2744 
2745     // Handle 32-bit sext immediates with LIS + addr mode.
2746     if ((CN->getValueType(0) == MVT::i32 ||
2747          (int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) &&
2748         (!EncodingAlignment ||
2749          isAligned(*EncodingAlignment, CN->getZExtValue()))) {
2750       int Addr = (int)CN->getZExtValue();
2751 
2752       // Otherwise, break this down into an LIS + disp.
2753       Disp = DAG.getTargetConstant((short)Addr, dl, MVT::i32);
2754 
2755       Base = DAG.getTargetConstant((Addr - (signed short)Addr) >> 16, dl,
2756                                    MVT::i32);
2757       unsigned Opc = CN->getValueType(0) == MVT::i32 ? PPC::LIS : PPC::LIS8;
2758       Base = SDValue(DAG.getMachineNode(Opc, dl, CN->getValueType(0), Base), 0);
2759       return true;
2760     }
2761   }
2762 
2763   Disp = DAG.getTargetConstant(0, dl, getPointerTy(DAG.getDataLayout()));
2764   if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) {
2765     Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2766     fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
2767   } else
2768     Base = N;
2769   return true;      // [r+0]
2770 }
2771 
2772 /// Similar to the 16-bit case but for instructions that take a 34-bit
2773 /// displacement field (prefixed loads/stores).
SelectAddressRegImm34(SDValue N,SDValue & Disp,SDValue & Base,SelectionDAG & DAG) const2774 bool PPCTargetLowering::SelectAddressRegImm34(SDValue N, SDValue &Disp,
2775                                               SDValue &Base,
2776                                               SelectionDAG &DAG) const {
2777   // Only on 64-bit targets.
2778   if (N.getValueType() != MVT::i64)
2779     return false;
2780 
2781   SDLoc dl(N);
2782   int64_t Imm = 0;
2783 
2784   if (N.getOpcode() == ISD::ADD) {
2785     if (!isIntS34Immediate(N.getOperand(1), Imm))
2786       return false;
2787     Disp = DAG.getTargetConstant(Imm, dl, N.getValueType());
2788     if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0)))
2789       Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2790     else
2791       Base = N.getOperand(0);
2792     return true;
2793   }
2794 
2795   if (N.getOpcode() == ISD::OR) {
2796     if (!isIntS34Immediate(N.getOperand(1), Imm))
2797       return false;
2798     // If this is an or of disjoint bitfields, we can codegen this as an add
2799     // (for better address arithmetic) if the LHS and RHS of the OR are
2800     // provably disjoint.
2801     KnownBits LHSKnown = DAG.computeKnownBits(N.getOperand(0));
2802     if ((LHSKnown.Zero.getZExtValue() | ~(uint64_t)Imm) != ~0ULL)
2803       return false;
2804     if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0)))
2805       Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
2806     else
2807       Base = N.getOperand(0);
2808     Disp = DAG.getTargetConstant(Imm, dl, N.getValueType());
2809     return true;
2810   }
2811 
2812   if (isIntS34Immediate(N, Imm)) { // If the address is a 34-bit const.
2813     Disp = DAG.getTargetConstant(Imm, dl, N.getValueType());
2814     Base = DAG.getRegister(PPC::ZERO8, N.getValueType());
2815     return true;
2816   }
2817 
2818   return false;
2819 }
2820 
2821 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be
2822 /// represented as an indexed [r+r] operation.
SelectAddressRegRegOnly(SDValue N,SDValue & Base,SDValue & Index,SelectionDAG & DAG) const2823 bool PPCTargetLowering::SelectAddressRegRegOnly(SDValue N, SDValue &Base,
2824                                                 SDValue &Index,
2825                                                 SelectionDAG &DAG) const {
2826   // Check to see if we can easily represent this as an [r+r] address.  This
2827   // will fail if it thinks that the address is more profitably represented as
2828   // reg+imm, e.g. where imm = 0.
2829   if (SelectAddressRegReg(N, Base, Index, DAG))
2830     return true;
2831 
2832   // If the address is the result of an add, we will utilize the fact that the
2833   // address calculation includes an implicit add.  However, we can reduce
2834   // register pressure if we do not materialize a constant just for use as the
2835   // index register.  We only get rid of the add if it is not an add of a
2836   // value and a 16-bit signed constant and both have a single use.
2837   int16_t imm = 0;
2838   if (N.getOpcode() == ISD::ADD &&
2839       (!isIntS16Immediate(N.getOperand(1), imm) ||
2840        !N.getOperand(1).hasOneUse() || !N.getOperand(0).hasOneUse())) {
2841     Base = N.getOperand(0);
2842     Index = N.getOperand(1);
2843     return true;
2844   }
2845 
2846   // Otherwise, do it the hard way, using R0 as the base register.
2847   Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
2848                          N.getValueType());
2849   Index = N;
2850   return true;
2851 }
2852 
isValidPCRelNode(SDValue N)2853 template <typename Ty> static bool isValidPCRelNode(SDValue N) {
2854   Ty *PCRelCand = dyn_cast<Ty>(N);
2855   return PCRelCand && (PCRelCand->getTargetFlags() & PPCII::MO_PCREL_FLAG);
2856 }
2857 
2858 /// Returns true if this address is a PC Relative address.
2859 /// PC Relative addresses are marked with the flag PPCII::MO_PCREL_FLAG
2860 /// or if the node opcode is PPCISD::MAT_PCREL_ADDR.
SelectAddressPCRel(SDValue N,SDValue & Base) const2861 bool PPCTargetLowering::SelectAddressPCRel(SDValue N, SDValue &Base) const {
2862   // This is a materialize PC Relative node. Always select this as PC Relative.
2863   Base = N;
2864   if (N.getOpcode() == PPCISD::MAT_PCREL_ADDR)
2865     return true;
2866   if (isValidPCRelNode<ConstantPoolSDNode>(N) ||
2867       isValidPCRelNode<GlobalAddressSDNode>(N) ||
2868       isValidPCRelNode<JumpTableSDNode>(N) ||
2869       isValidPCRelNode<BlockAddressSDNode>(N))
2870     return true;
2871   return false;
2872 }
2873 
2874 /// Returns true if we should use a direct load into vector instruction
2875 /// (such as lxsd or lfd), instead of a load into gpr + direct move sequence.
usePartialVectorLoads(SDNode * N,const PPCSubtarget & ST)2876 static bool usePartialVectorLoads(SDNode *N, const PPCSubtarget& ST) {
2877 
2878   // If there are any other uses other than scalar to vector, then we should
2879   // keep it as a scalar load -> direct move pattern to prevent multiple
2880   // loads.
2881   LoadSDNode *LD = dyn_cast<LoadSDNode>(N);
2882   if (!LD)
2883     return false;
2884 
2885   EVT MemVT = LD->getMemoryVT();
2886   if (!MemVT.isSimple())
2887     return false;
2888   switch(MemVT.getSimpleVT().SimpleTy) {
2889   case MVT::i64:
2890     break;
2891   case MVT::i32:
2892     if (!ST.hasP8Vector())
2893       return false;
2894     break;
2895   case MVT::i16:
2896   case MVT::i8:
2897     if (!ST.hasP9Vector())
2898       return false;
2899     break;
2900   default:
2901     return false;
2902   }
2903 
2904   SDValue LoadedVal(N, 0);
2905   if (!LoadedVal.hasOneUse())
2906     return false;
2907 
2908   for (SDNode::use_iterator UI = LD->use_begin(), UE = LD->use_end();
2909        UI != UE; ++UI)
2910     if (UI.getUse().get().getResNo() == 0 &&
2911         UI->getOpcode() != ISD::SCALAR_TO_VECTOR &&
2912         UI->getOpcode() != PPCISD::SCALAR_TO_VECTOR_PERMUTED)
2913       return false;
2914 
2915   return true;
2916 }
2917 
2918 /// getPreIndexedAddressParts - returns true by value, base pointer and
2919 /// offset pointer and addressing mode by reference if the node's address
2920 /// can be legally represented as pre-indexed load / store address.
getPreIndexedAddressParts(SDNode * N,SDValue & Base,SDValue & Offset,ISD::MemIndexedMode & AM,SelectionDAG & DAG) const2921 bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
2922                                                   SDValue &Offset,
2923                                                   ISD::MemIndexedMode &AM,
2924                                                   SelectionDAG &DAG) const {
2925   if (DisablePPCPreinc) return false;
2926 
2927   bool isLoad = true;
2928   SDValue Ptr;
2929   EVT VT;
2930   unsigned Alignment;
2931   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
2932     Ptr = LD->getBasePtr();
2933     VT = LD->getMemoryVT();
2934     Alignment = LD->getAlignment();
2935   } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
2936     Ptr = ST->getBasePtr();
2937     VT  = ST->getMemoryVT();
2938     Alignment = ST->getAlignment();
2939     isLoad = false;
2940   } else
2941     return false;
2942 
2943   // Do not generate pre-inc forms for specific loads that feed scalar_to_vector
2944   // instructions because we can fold these into a more efficient instruction
2945   // instead, (such as LXSD).
2946   if (isLoad && usePartialVectorLoads(N, Subtarget)) {
2947     return false;
2948   }
2949 
2950   // PowerPC doesn't have preinc load/store instructions for vectors
2951   if (VT.isVector())
2952     return false;
2953 
2954   if (SelectAddressRegReg(Ptr, Base, Offset, DAG)) {
2955     // Common code will reject creating a pre-inc form if the base pointer
2956     // is a frame index, or if N is a store and the base pointer is either
2957     // the same as or a predecessor of the value being stored.  Check for
2958     // those situations here, and try with swapped Base/Offset instead.
2959     bool Swap = false;
2960 
2961     if (isa<FrameIndexSDNode>(Base) || isa<RegisterSDNode>(Base))
2962       Swap = true;
2963     else if (!isLoad) {
2964       SDValue Val = cast<StoreSDNode>(N)->getValue();
2965       if (Val == Base || Base.getNode()->isPredecessorOf(Val.getNode()))
2966         Swap = true;
2967     }
2968 
2969     if (Swap)
2970       std::swap(Base, Offset);
2971 
2972     AM = ISD::PRE_INC;
2973     return true;
2974   }
2975 
2976   // LDU/STU can only handle immediates that are a multiple of 4.
2977   if (VT != MVT::i64) {
2978     if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, None))
2979       return false;
2980   } else {
2981     // LDU/STU need an address with at least 4-byte alignment.
2982     if (Alignment < 4)
2983       return false;
2984 
2985     if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, Align(4)))
2986       return false;
2987   }
2988 
2989   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
2990     // PPC64 doesn't have lwau, but it does have lwaux.  Reject preinc load of
2991     // sext i32 to i64 when addr mode is r+i.
2992     if (LD->getValueType(0) == MVT::i64 && LD->getMemoryVT() == MVT::i32 &&
2993         LD->getExtensionType() == ISD::SEXTLOAD &&
2994         isa<ConstantSDNode>(Offset))
2995       return false;
2996   }
2997 
2998   AM = ISD::PRE_INC;
2999   return true;
3000 }
3001 
3002 //===----------------------------------------------------------------------===//
3003 //  LowerOperation implementation
3004 //===----------------------------------------------------------------------===//
3005 
3006 /// Return true if we should reference labels using a PICBase, set the HiOpFlags
3007 /// and LoOpFlags to the target MO flags.
getLabelAccessInfo(bool IsPIC,const PPCSubtarget & Subtarget,unsigned & HiOpFlags,unsigned & LoOpFlags,const GlobalValue * GV=nullptr)3008 static void getLabelAccessInfo(bool IsPIC, const PPCSubtarget &Subtarget,
3009                                unsigned &HiOpFlags, unsigned &LoOpFlags,
3010                                const GlobalValue *GV = nullptr) {
3011   HiOpFlags = PPCII::MO_HA;
3012   LoOpFlags = PPCII::MO_LO;
3013 
3014   // Don't use the pic base if not in PIC relocation model.
3015   if (IsPIC) {
3016     HiOpFlags |= PPCII::MO_PIC_FLAG;
3017     LoOpFlags |= PPCII::MO_PIC_FLAG;
3018   }
3019 }
3020 
LowerLabelRef(SDValue HiPart,SDValue LoPart,bool isPIC,SelectionDAG & DAG)3021 static SDValue LowerLabelRef(SDValue HiPart, SDValue LoPart, bool isPIC,
3022                              SelectionDAG &DAG) {
3023   SDLoc DL(HiPart);
3024   EVT PtrVT = HiPart.getValueType();
3025   SDValue Zero = DAG.getConstant(0, DL, PtrVT);
3026 
3027   SDValue Hi = DAG.getNode(PPCISD::Hi, DL, PtrVT, HiPart, Zero);
3028   SDValue Lo = DAG.getNode(PPCISD::Lo, DL, PtrVT, LoPart, Zero);
3029 
3030   // With PIC, the first instruction is actually "GR+hi(&G)".
3031   if (isPIC)
3032     Hi = DAG.getNode(ISD::ADD, DL, PtrVT,
3033                      DAG.getNode(PPCISD::GlobalBaseReg, DL, PtrVT), Hi);
3034 
3035   // Generate non-pic code that has direct accesses to the constant pool.
3036   // The address of the global is just (hi(&g)+lo(&g)).
3037   return DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo);
3038 }
3039 
setUsesTOCBasePtr(MachineFunction & MF)3040 static void setUsesTOCBasePtr(MachineFunction &MF) {
3041   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3042   FuncInfo->setUsesTOCBasePtr();
3043 }
3044 
setUsesTOCBasePtr(SelectionDAG & DAG)3045 static void setUsesTOCBasePtr(SelectionDAG &DAG) {
3046   setUsesTOCBasePtr(DAG.getMachineFunction());
3047 }
3048 
getTOCEntry(SelectionDAG & DAG,const SDLoc & dl,SDValue GA) const3049 SDValue PPCTargetLowering::getTOCEntry(SelectionDAG &DAG, const SDLoc &dl,
3050                                        SDValue GA) const {
3051   const bool Is64Bit = Subtarget.isPPC64();
3052   EVT VT = Is64Bit ? MVT::i64 : MVT::i32;
3053   SDValue Reg = Is64Bit ? DAG.getRegister(PPC::X2, VT)
3054                         : Subtarget.isAIXABI()
3055                               ? DAG.getRegister(PPC::R2, VT)
3056                               : DAG.getNode(PPCISD::GlobalBaseReg, dl, VT);
3057   SDValue Ops[] = { GA, Reg };
3058   return DAG.getMemIntrinsicNode(
3059       PPCISD::TOC_ENTRY, dl, DAG.getVTList(VT, MVT::Other), Ops, VT,
3060       MachinePointerInfo::getGOT(DAG.getMachineFunction()), None,
3061       MachineMemOperand::MOLoad);
3062 }
3063 
LowerConstantPool(SDValue Op,SelectionDAG & DAG) const3064 SDValue PPCTargetLowering::LowerConstantPool(SDValue Op,
3065                                              SelectionDAG &DAG) const {
3066   EVT PtrVT = Op.getValueType();
3067   ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
3068   const Constant *C = CP->getConstVal();
3069 
3070   // 64-bit SVR4 ABI and AIX ABI code are always position-independent.
3071   // The actual address of the GlobalValue is stored in the TOC.
3072   if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
3073     if (Subtarget.isUsingPCRelativeCalls()) {
3074       SDLoc DL(CP);
3075       EVT Ty = getPointerTy(DAG.getDataLayout());
3076       SDValue ConstPool = DAG.getTargetConstantPool(
3077           C, Ty, CP->getAlign(), CP->getOffset(), PPCII::MO_PCREL_FLAG);
3078       return DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, ConstPool);
3079     }
3080     setUsesTOCBasePtr(DAG);
3081     SDValue GA = DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), 0);
3082     return getTOCEntry(DAG, SDLoc(CP), GA);
3083   }
3084 
3085   unsigned MOHiFlag, MOLoFlag;
3086   bool IsPIC = isPositionIndependent();
3087   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
3088 
3089   if (IsPIC && Subtarget.isSVR4ABI()) {
3090     SDValue GA =
3091         DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), PPCII::MO_PIC_FLAG);
3092     return getTOCEntry(DAG, SDLoc(CP), GA);
3093   }
3094 
3095   SDValue CPIHi =
3096       DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), 0, MOHiFlag);
3097   SDValue CPILo =
3098       DAG.getTargetConstantPool(C, PtrVT, CP->getAlign(), 0, MOLoFlag);
3099   return LowerLabelRef(CPIHi, CPILo, IsPIC, DAG);
3100 }
3101 
3102 // For 64-bit PowerPC, prefer the more compact relative encodings.
3103 // This trades 32 bits per jump table entry for one or two instructions
3104 // on the jump site.
getJumpTableEncoding() const3105 unsigned PPCTargetLowering::getJumpTableEncoding() const {
3106   if (isJumpTableRelative())
3107     return MachineJumpTableInfo::EK_LabelDifference32;
3108 
3109   return TargetLowering::getJumpTableEncoding();
3110 }
3111 
isJumpTableRelative() const3112 bool PPCTargetLowering::isJumpTableRelative() const {
3113   if (UseAbsoluteJumpTables)
3114     return false;
3115   if (Subtarget.isPPC64() || Subtarget.isAIXABI())
3116     return true;
3117   return TargetLowering::isJumpTableRelative();
3118 }
3119 
getPICJumpTableRelocBase(SDValue Table,SelectionDAG & DAG) const3120 SDValue PPCTargetLowering::getPICJumpTableRelocBase(SDValue Table,
3121                                                     SelectionDAG &DAG) const {
3122   if (!Subtarget.isPPC64() || Subtarget.isAIXABI())
3123     return TargetLowering::getPICJumpTableRelocBase(Table, DAG);
3124 
3125   switch (getTargetMachine().getCodeModel()) {
3126   case CodeModel::Small:
3127   case CodeModel::Medium:
3128     return TargetLowering::getPICJumpTableRelocBase(Table, DAG);
3129   default:
3130     return DAG.getNode(PPCISD::GlobalBaseReg, SDLoc(),
3131                        getPointerTy(DAG.getDataLayout()));
3132   }
3133 }
3134 
3135 const MCExpr *
getPICJumpTableRelocBaseExpr(const MachineFunction * MF,unsigned JTI,MCContext & Ctx) const3136 PPCTargetLowering::getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
3137                                                 unsigned JTI,
3138                                                 MCContext &Ctx) const {
3139   if (!Subtarget.isPPC64() || Subtarget.isAIXABI())
3140     return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
3141 
3142   switch (getTargetMachine().getCodeModel()) {
3143   case CodeModel::Small:
3144   case CodeModel::Medium:
3145     return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
3146   default:
3147     return MCSymbolRefExpr::create(MF->getPICBaseSymbol(), Ctx);
3148   }
3149 }
3150 
LowerJumpTable(SDValue Op,SelectionDAG & DAG) const3151 SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
3152   EVT PtrVT = Op.getValueType();
3153   JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
3154 
3155   // isUsingPCRelativeCalls() returns true when PCRelative is enabled
3156   if (Subtarget.isUsingPCRelativeCalls()) {
3157     SDLoc DL(JT);
3158     EVT Ty = getPointerTy(DAG.getDataLayout());
3159     SDValue GA =
3160         DAG.getTargetJumpTable(JT->getIndex(), Ty, PPCII::MO_PCREL_FLAG);
3161     SDValue MatAddr = DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA);
3162     return MatAddr;
3163   }
3164 
3165   // 64-bit SVR4 ABI and AIX ABI code are always position-independent.
3166   // The actual address of the GlobalValue is stored in the TOC.
3167   if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
3168     setUsesTOCBasePtr(DAG);
3169     SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT);
3170     return getTOCEntry(DAG, SDLoc(JT), GA);
3171   }
3172 
3173   unsigned MOHiFlag, MOLoFlag;
3174   bool IsPIC = isPositionIndependent();
3175   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
3176 
3177   if (IsPIC && Subtarget.isSVR4ABI()) {
3178     SDValue GA = DAG.getTargetJumpTable(JT->getIndex(), PtrVT,
3179                                         PPCII::MO_PIC_FLAG);
3180     return getTOCEntry(DAG, SDLoc(GA), GA);
3181   }
3182 
3183   SDValue JTIHi = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOHiFlag);
3184   SDValue JTILo = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, MOLoFlag);
3185   return LowerLabelRef(JTIHi, JTILo, IsPIC, DAG);
3186 }
3187 
LowerBlockAddress(SDValue Op,SelectionDAG & DAG) const3188 SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op,
3189                                              SelectionDAG &DAG) const {
3190   EVT PtrVT = Op.getValueType();
3191   BlockAddressSDNode *BASDN = cast<BlockAddressSDNode>(Op);
3192   const BlockAddress *BA = BASDN->getBlockAddress();
3193 
3194   // isUsingPCRelativeCalls() returns true when PCRelative is enabled
3195   if (Subtarget.isUsingPCRelativeCalls()) {
3196     SDLoc DL(BASDN);
3197     EVT Ty = getPointerTy(DAG.getDataLayout());
3198     SDValue GA = DAG.getTargetBlockAddress(BA, Ty, BASDN->getOffset(),
3199                                            PPCII::MO_PCREL_FLAG);
3200     SDValue MatAddr = DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA);
3201     return MatAddr;
3202   }
3203 
3204   // 64-bit SVR4 ABI and AIX ABI code are always position-independent.
3205   // The actual BlockAddress is stored in the TOC.
3206   if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
3207     setUsesTOCBasePtr(DAG);
3208     SDValue GA = DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset());
3209     return getTOCEntry(DAG, SDLoc(BASDN), GA);
3210   }
3211 
3212   // 32-bit position-independent ELF stores the BlockAddress in the .got.
3213   if (Subtarget.is32BitELFABI() && isPositionIndependent())
3214     return getTOCEntry(
3215         DAG, SDLoc(BASDN),
3216         DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset()));
3217 
3218   unsigned MOHiFlag, MOLoFlag;
3219   bool IsPIC = isPositionIndependent();
3220   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag);
3221   SDValue TgtBAHi = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOHiFlag);
3222   SDValue TgtBALo = DAG.getTargetBlockAddress(BA, PtrVT, 0, MOLoFlag);
3223   return LowerLabelRef(TgtBAHi, TgtBALo, IsPIC, DAG);
3224 }
3225 
LowerGlobalTLSAddress(SDValue Op,SelectionDAG & DAG) const3226 SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op,
3227                                               SelectionDAG &DAG) const {
3228   if (Subtarget.isAIXABI())
3229     return LowerGlobalTLSAddressAIX(Op, DAG);
3230 
3231   return LowerGlobalTLSAddressLinux(Op, DAG);
3232 }
3233 
LowerGlobalTLSAddressAIX(SDValue Op,SelectionDAG & DAG) const3234 SDValue PPCTargetLowering::LowerGlobalTLSAddressAIX(SDValue Op,
3235                                                     SelectionDAG &DAG) const {
3236   GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
3237 
3238   if (DAG.getTarget().useEmulatedTLS())
3239     report_fatal_error("Emulated TLS is not yet supported on AIX");
3240 
3241   SDLoc dl(GA);
3242   const GlobalValue *GV = GA->getGlobal();
3243   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3244 
3245   // The general-dynamic model is the only access model supported for now, so
3246   // all the GlobalTLSAddress nodes are lowered with this model.
3247   // We need to generate two TOC entries, one for the variable offset, one for
3248   // the region handle. The global address for the TOC entry of the region
3249   // handle is created with the MO_TLSGDM_FLAG flag and the global address
3250   // for the TOC entry of the variable offset is created with MO_TLSGD_FLAG.
3251   SDValue VariableOffsetTGA =
3252       DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, PPCII::MO_TLSGD_FLAG);
3253   SDValue RegionHandleTGA =
3254       DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, PPCII::MO_TLSGDM_FLAG);
3255   SDValue VariableOffset = getTOCEntry(DAG, dl, VariableOffsetTGA);
3256   SDValue RegionHandle = getTOCEntry(DAG, dl, RegionHandleTGA);
3257   return DAG.getNode(PPCISD::TLSGD_AIX, dl, PtrVT, VariableOffset,
3258                      RegionHandle);
3259 }
3260 
LowerGlobalTLSAddressLinux(SDValue Op,SelectionDAG & DAG) const3261 SDValue PPCTargetLowering::LowerGlobalTLSAddressLinux(SDValue Op,
3262                                                       SelectionDAG &DAG) const {
3263   // FIXME: TLS addresses currently use medium model code sequences,
3264   // which is the most useful form.  Eventually support for small and
3265   // large models could be added if users need it, at the cost of
3266   // additional complexity.
3267   GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
3268   if (DAG.getTarget().useEmulatedTLS())
3269     return LowerToTLSEmulatedModel(GA, DAG);
3270 
3271   SDLoc dl(GA);
3272   const GlobalValue *GV = GA->getGlobal();
3273   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3274   bool is64bit = Subtarget.isPPC64();
3275   const Module *M = DAG.getMachineFunction().getFunction().getParent();
3276   PICLevel::Level picLevel = M->getPICLevel();
3277 
3278   const TargetMachine &TM = getTargetMachine();
3279   TLSModel::Model Model = TM.getTLSModel(GV);
3280 
3281   if (Model == TLSModel::LocalExec) {
3282     if (Subtarget.isUsingPCRelativeCalls()) {
3283       SDValue TLSReg = DAG.getRegister(PPC::X13, MVT::i64);
3284       SDValue TGA = DAG.getTargetGlobalAddress(
3285           GV, dl, PtrVT, 0, (PPCII::MO_PCREL_FLAG | PPCII::MO_TPREL_FLAG));
3286       SDValue MatAddr =
3287           DAG.getNode(PPCISD::TLS_LOCAL_EXEC_MAT_ADDR, dl, PtrVT, TGA);
3288       return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TLSReg, MatAddr);
3289     }
3290 
3291     SDValue TGAHi = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
3292                                                PPCII::MO_TPREL_HA);
3293     SDValue TGALo = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
3294                                                PPCII::MO_TPREL_LO);
3295     SDValue TLSReg = is64bit ? DAG.getRegister(PPC::X13, MVT::i64)
3296                              : DAG.getRegister(PPC::R2, MVT::i32);
3297 
3298     SDValue Hi = DAG.getNode(PPCISD::Hi, dl, PtrVT, TGAHi, TLSReg);
3299     return DAG.getNode(PPCISD::Lo, dl, PtrVT, TGALo, Hi);
3300   }
3301 
3302   if (Model == TLSModel::InitialExec) {
3303     bool IsPCRel = Subtarget.isUsingPCRelativeCalls();
3304     SDValue TGA = DAG.getTargetGlobalAddress(
3305         GV, dl, PtrVT, 0, IsPCRel ? PPCII::MO_GOT_TPREL_PCREL_FLAG : 0);
3306     SDValue TGATLS = DAG.getTargetGlobalAddress(
3307         GV, dl, PtrVT, 0,
3308         IsPCRel ? (PPCII::MO_TLS | PPCII::MO_PCREL_FLAG) : PPCII::MO_TLS);
3309     SDValue TPOffset;
3310     if (IsPCRel) {
3311       SDValue MatPCRel = DAG.getNode(PPCISD::MAT_PCREL_ADDR, dl, PtrVT, TGA);
3312       TPOffset = DAG.getLoad(MVT::i64, dl, DAG.getEntryNode(), MatPCRel,
3313                              MachinePointerInfo());
3314     } else {
3315       SDValue GOTPtr;
3316       if (is64bit) {
3317         setUsesTOCBasePtr(DAG);
3318         SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
3319         GOTPtr =
3320             DAG.getNode(PPCISD::ADDIS_GOT_TPREL_HA, dl, PtrVT, GOTReg, TGA);
3321       } else {
3322         if (!TM.isPositionIndependent())
3323           GOTPtr = DAG.getNode(PPCISD::PPC32_GOT, dl, PtrVT);
3324         else if (picLevel == PICLevel::SmallPIC)
3325           GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
3326         else
3327           GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
3328       }
3329       TPOffset = DAG.getNode(PPCISD::LD_GOT_TPREL_L, dl, PtrVT, TGA, GOTPtr);
3330     }
3331     return DAG.getNode(PPCISD::ADD_TLS, dl, PtrVT, TPOffset, TGATLS);
3332   }
3333 
3334   if (Model == TLSModel::GeneralDynamic) {
3335     if (Subtarget.isUsingPCRelativeCalls()) {
3336       SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
3337                                                PPCII::MO_GOT_TLSGD_PCREL_FLAG);
3338       return DAG.getNode(PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR, dl, PtrVT, TGA);
3339     }
3340 
3341     SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
3342     SDValue GOTPtr;
3343     if (is64bit) {
3344       setUsesTOCBasePtr(DAG);
3345       SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
3346       GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSGD_HA, dl, PtrVT,
3347                                    GOTReg, TGA);
3348     } else {
3349       if (picLevel == PICLevel::SmallPIC)
3350         GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
3351       else
3352         GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
3353     }
3354     return DAG.getNode(PPCISD::ADDI_TLSGD_L_ADDR, dl, PtrVT,
3355                        GOTPtr, TGA, TGA);
3356   }
3357 
3358   if (Model == TLSModel::LocalDynamic) {
3359     if (Subtarget.isUsingPCRelativeCalls()) {
3360       SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
3361                                                PPCII::MO_GOT_TLSLD_PCREL_FLAG);
3362       SDValue MatPCRel =
3363           DAG.getNode(PPCISD::TLS_DYNAMIC_MAT_PCREL_ADDR, dl, PtrVT, TGA);
3364       return DAG.getNode(PPCISD::PADDI_DTPREL, dl, PtrVT, MatPCRel, TGA);
3365     }
3366 
3367     SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
3368     SDValue GOTPtr;
3369     if (is64bit) {
3370       setUsesTOCBasePtr(DAG);
3371       SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
3372       GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSLD_HA, dl, PtrVT,
3373                            GOTReg, TGA);
3374     } else {
3375       if (picLevel == PICLevel::SmallPIC)
3376         GOTPtr = DAG.getNode(PPCISD::GlobalBaseReg, dl, PtrVT);
3377       else
3378         GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
3379     }
3380     SDValue TLSAddr = DAG.getNode(PPCISD::ADDI_TLSLD_L_ADDR, dl,
3381                                   PtrVT, GOTPtr, TGA, TGA);
3382     SDValue DtvOffsetHi = DAG.getNode(PPCISD::ADDIS_DTPREL_HA, dl,
3383                                       PtrVT, TLSAddr, TGA);
3384     return DAG.getNode(PPCISD::ADDI_DTPREL_L, dl, PtrVT, DtvOffsetHi, TGA);
3385   }
3386 
3387   llvm_unreachable("Unknown TLS model!");
3388 }
3389 
LowerGlobalAddress(SDValue Op,SelectionDAG & DAG) const3390 SDValue PPCTargetLowering::LowerGlobalAddress(SDValue Op,
3391                                               SelectionDAG &DAG) const {
3392   EVT PtrVT = Op.getValueType();
3393   GlobalAddressSDNode *GSDN = cast<GlobalAddressSDNode>(Op);
3394   SDLoc DL(GSDN);
3395   const GlobalValue *GV = GSDN->getGlobal();
3396 
3397   // 64-bit SVR4 ABI & AIX ABI code is always position-independent.
3398   // The actual address of the GlobalValue is stored in the TOC.
3399   if (Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) {
3400     if (Subtarget.isUsingPCRelativeCalls()) {
3401       EVT Ty = getPointerTy(DAG.getDataLayout());
3402       if (isAccessedAsGotIndirect(Op)) {
3403         SDValue GA = DAG.getTargetGlobalAddress(GV, DL, Ty, GSDN->getOffset(),
3404                                                 PPCII::MO_PCREL_FLAG |
3405                                                     PPCII::MO_GOT_FLAG);
3406         SDValue MatPCRel = DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA);
3407         SDValue Load = DAG.getLoad(MVT::i64, DL, DAG.getEntryNode(), MatPCRel,
3408                                    MachinePointerInfo());
3409         return Load;
3410       } else {
3411         SDValue GA = DAG.getTargetGlobalAddress(GV, DL, Ty, GSDN->getOffset(),
3412                                                 PPCII::MO_PCREL_FLAG);
3413         return DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, Ty, GA);
3414       }
3415     }
3416     setUsesTOCBasePtr(DAG);
3417     SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset());
3418     return getTOCEntry(DAG, DL, GA);
3419   }
3420 
3421   unsigned MOHiFlag, MOLoFlag;
3422   bool IsPIC = isPositionIndependent();
3423   getLabelAccessInfo(IsPIC, Subtarget, MOHiFlag, MOLoFlag, GV);
3424 
3425   if (IsPIC && Subtarget.isSVR4ABI()) {
3426     SDValue GA = DAG.getTargetGlobalAddress(GV, DL, PtrVT,
3427                                             GSDN->getOffset(),
3428                                             PPCII::MO_PIC_FLAG);
3429     return getTOCEntry(DAG, DL, GA);
3430   }
3431 
3432   SDValue GAHi =
3433     DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOHiFlag);
3434   SDValue GALo =
3435     DAG.getTargetGlobalAddress(GV, DL, PtrVT, GSDN->getOffset(), MOLoFlag);
3436 
3437   return LowerLabelRef(GAHi, GALo, IsPIC, DAG);
3438 }
3439 
LowerSETCC(SDValue Op,SelectionDAG & DAG) const3440 SDValue PPCTargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
3441   bool IsStrict = Op->isStrictFPOpcode();
3442   ISD::CondCode CC =
3443       cast<CondCodeSDNode>(Op.getOperand(IsStrict ? 3 : 2))->get();
3444   SDValue LHS = Op.getOperand(IsStrict ? 1 : 0);
3445   SDValue RHS = Op.getOperand(IsStrict ? 2 : 1);
3446   SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
3447   EVT LHSVT = LHS.getValueType();
3448   SDLoc dl(Op);
3449 
3450   // Soften the setcc with libcall if it is fp128.
3451   if (LHSVT == MVT::f128) {
3452     assert(!Subtarget.hasP9Vector() &&
3453            "SETCC for f128 is already legal under Power9!");
3454     softenSetCCOperands(DAG, LHSVT, LHS, RHS, CC, dl, LHS, RHS, Chain,
3455                         Op->getOpcode() == ISD::STRICT_FSETCCS);
3456     if (RHS.getNode())
3457       LHS = DAG.getNode(ISD::SETCC, dl, Op.getValueType(), LHS, RHS,
3458                         DAG.getCondCode(CC));
3459     if (IsStrict)
3460       return DAG.getMergeValues({LHS, Chain}, dl);
3461     return LHS;
3462   }
3463 
3464   assert(!IsStrict && "Don't know how to handle STRICT_FSETCC!");
3465 
3466   if (Op.getValueType() == MVT::v2i64) {
3467     // When the operands themselves are v2i64 values, we need to do something
3468     // special because VSX has no underlying comparison operations for these.
3469     if (LHS.getValueType() == MVT::v2i64) {
3470       // Equality can be handled by casting to the legal type for Altivec
3471       // comparisons, everything else needs to be expanded.
3472       if (CC == ISD::SETEQ || CC == ISD::SETNE) {
3473         return DAG.getNode(
3474             ISD::BITCAST, dl, MVT::v2i64,
3475             DAG.getSetCC(dl, MVT::v4i32,
3476                          DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, LHS),
3477                          DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, RHS), CC));
3478       }
3479 
3480       return SDValue();
3481     }
3482 
3483     // We handle most of these in the usual way.
3484     return Op;
3485   }
3486 
3487   // If we're comparing for equality to zero, expose the fact that this is
3488   // implemented as a ctlz/srl pair on ppc, so that the dag combiner can
3489   // fold the new nodes.
3490   if (SDValue V = lowerCmpEqZeroToCtlzSrl(Op, DAG))
3491     return V;
3492 
3493   if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) {
3494     // Leave comparisons against 0 and -1 alone for now, since they're usually
3495     // optimized.  FIXME: revisit this when we can custom lower all setcc
3496     // optimizations.
3497     if (C->isAllOnesValue() || C->isNullValue())
3498       return SDValue();
3499   }
3500 
3501   // If we have an integer seteq/setne, turn it into a compare against zero
3502   // by xor'ing the rhs with the lhs, which is faster than setting a
3503   // condition register, reading it back out, and masking the correct bit.  The
3504   // normal approach here uses sub to do this instead of xor.  Using xor exposes
3505   // the result to other bit-twiddling opportunities.
3506   if (LHSVT.isInteger() && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
3507     EVT VT = Op.getValueType();
3508     SDValue Sub = DAG.getNode(ISD::XOR, dl, LHSVT, LHS, RHS);
3509     return DAG.getSetCC(dl, VT, Sub, DAG.getConstant(0, dl, LHSVT), CC);
3510   }
3511   return SDValue();
3512 }
3513 
LowerVAARG(SDValue Op,SelectionDAG & DAG) const3514 SDValue PPCTargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
3515   SDNode *Node = Op.getNode();
3516   EVT VT = Node->getValueType(0);
3517   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3518   SDValue InChain = Node->getOperand(0);
3519   SDValue VAListPtr = Node->getOperand(1);
3520   const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
3521   SDLoc dl(Node);
3522 
3523   assert(!Subtarget.isPPC64() && "LowerVAARG is PPC32 only");
3524 
3525   // gpr_index
3526   SDValue GprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain,
3527                                     VAListPtr, MachinePointerInfo(SV), MVT::i8);
3528   InChain = GprIndex.getValue(1);
3529 
3530   if (VT == MVT::i64) {
3531     // Check if GprIndex is even
3532     SDValue GprAnd = DAG.getNode(ISD::AND, dl, MVT::i32, GprIndex,
3533                                  DAG.getConstant(1, dl, MVT::i32));
3534     SDValue CC64 = DAG.getSetCC(dl, MVT::i32, GprAnd,
3535                                 DAG.getConstant(0, dl, MVT::i32), ISD::SETNE);
3536     SDValue GprIndexPlusOne = DAG.getNode(ISD::ADD, dl, MVT::i32, GprIndex,
3537                                           DAG.getConstant(1, dl, MVT::i32));
3538     // Align GprIndex to be even if it isn't
3539     GprIndex = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC64, GprIndexPlusOne,
3540                            GprIndex);
3541   }
3542 
3543   // fpr index is 1 byte after gpr
3544   SDValue FprPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
3545                                DAG.getConstant(1, dl, MVT::i32));
3546 
3547   // fpr
3548   SDValue FprIndex = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, InChain,
3549                                     FprPtr, MachinePointerInfo(SV), MVT::i8);
3550   InChain = FprIndex.getValue(1);
3551 
3552   SDValue RegSaveAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
3553                                        DAG.getConstant(8, dl, MVT::i32));
3554 
3555   SDValue OverflowAreaPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAListPtr,
3556                                         DAG.getConstant(4, dl, MVT::i32));
3557 
3558   // areas
3559   SDValue OverflowArea =
3560       DAG.getLoad(MVT::i32, dl, InChain, OverflowAreaPtr, MachinePointerInfo());
3561   InChain = OverflowArea.getValue(1);
3562 
3563   SDValue RegSaveArea =
3564       DAG.getLoad(MVT::i32, dl, InChain, RegSaveAreaPtr, MachinePointerInfo());
3565   InChain = RegSaveArea.getValue(1);
3566 
3567   // select overflow_area if index > 8
3568   SDValue CC = DAG.getSetCC(dl, MVT::i32, VT.isInteger() ? GprIndex : FprIndex,
3569                             DAG.getConstant(8, dl, MVT::i32), ISD::SETLT);
3570 
3571   // adjustment constant gpr_index * 4/8
3572   SDValue RegConstant = DAG.getNode(ISD::MUL, dl, MVT::i32,
3573                                     VT.isInteger() ? GprIndex : FprIndex,
3574                                     DAG.getConstant(VT.isInteger() ? 4 : 8, dl,
3575                                                     MVT::i32));
3576 
3577   // OurReg = RegSaveArea + RegConstant
3578   SDValue OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, RegSaveArea,
3579                                RegConstant);
3580 
3581   // Floating types are 32 bytes into RegSaveArea
3582   if (VT.isFloatingPoint())
3583     OurReg = DAG.getNode(ISD::ADD, dl, PtrVT, OurReg,
3584                          DAG.getConstant(32, dl, MVT::i32));
3585 
3586   // increase {f,g}pr_index by 1 (or 2 if VT is i64)
3587   SDValue IndexPlus1 = DAG.getNode(ISD::ADD, dl, MVT::i32,
3588                                    VT.isInteger() ? GprIndex : FprIndex,
3589                                    DAG.getConstant(VT == MVT::i64 ? 2 : 1, dl,
3590                                                    MVT::i32));
3591 
3592   InChain = DAG.getTruncStore(InChain, dl, IndexPlus1,
3593                               VT.isInteger() ? VAListPtr : FprPtr,
3594                               MachinePointerInfo(SV), MVT::i8);
3595 
3596   // determine if we should load from reg_save_area or overflow_area
3597   SDValue Result = DAG.getNode(ISD::SELECT, dl, PtrVT, CC, OurReg, OverflowArea);
3598 
3599   // increase overflow_area by 4/8 if gpr/fpr > 8
3600   SDValue OverflowAreaPlusN = DAG.getNode(ISD::ADD, dl, PtrVT, OverflowArea,
3601                                           DAG.getConstant(VT.isInteger() ? 4 : 8,
3602                                           dl, MVT::i32));
3603 
3604   OverflowArea = DAG.getNode(ISD::SELECT, dl, MVT::i32, CC, OverflowArea,
3605                              OverflowAreaPlusN);
3606 
3607   InChain = DAG.getTruncStore(InChain, dl, OverflowArea, OverflowAreaPtr,
3608                               MachinePointerInfo(), MVT::i32);
3609 
3610   return DAG.getLoad(VT, dl, InChain, Result, MachinePointerInfo());
3611 }
3612 
LowerVACOPY(SDValue Op,SelectionDAG & DAG) const3613 SDValue PPCTargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const {
3614   assert(!Subtarget.isPPC64() && "LowerVACOPY is PPC32 only");
3615 
3616   // We have to copy the entire va_list struct:
3617   // 2*sizeof(char) + 2 Byte alignment + 2*sizeof(char*) = 12 Byte
3618   return DAG.getMemcpy(Op.getOperand(0), Op, Op.getOperand(1), Op.getOperand(2),
3619                        DAG.getConstant(12, SDLoc(Op), MVT::i32), Align(8),
3620                        false, true, false, MachinePointerInfo(),
3621                        MachinePointerInfo());
3622 }
3623 
LowerADJUST_TRAMPOLINE(SDValue Op,SelectionDAG & DAG) const3624 SDValue PPCTargetLowering::LowerADJUST_TRAMPOLINE(SDValue Op,
3625                                                   SelectionDAG &DAG) const {
3626   if (Subtarget.isAIXABI())
3627     report_fatal_error("ADJUST_TRAMPOLINE operation is not supported on AIX.");
3628 
3629   return Op.getOperand(0);
3630 }
3631 
LowerINLINEASM(SDValue Op,SelectionDAG & DAG) const3632 SDValue PPCTargetLowering::LowerINLINEASM(SDValue Op, SelectionDAG &DAG) const {
3633   MachineFunction &MF = DAG.getMachineFunction();
3634   PPCFunctionInfo &MFI = *MF.getInfo<PPCFunctionInfo>();
3635 
3636   assert((Op.getOpcode() == ISD::INLINEASM ||
3637           Op.getOpcode() == ISD::INLINEASM_BR) &&
3638          "Expecting Inline ASM node.");
3639 
3640   // If an LR store is already known to be required then there is not point in
3641   // checking this ASM as well.
3642   if (MFI.isLRStoreRequired())
3643     return Op;
3644 
3645   // Inline ASM nodes have an optional last operand that is an incoming Flag of
3646   // type MVT::Glue. We want to ignore this last operand if that is the case.
3647   unsigned NumOps = Op.getNumOperands();
3648   if (Op.getOperand(NumOps - 1).getValueType() == MVT::Glue)
3649     --NumOps;
3650 
3651   // Check all operands that may contain the LR.
3652   for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
3653     unsigned Flags = cast<ConstantSDNode>(Op.getOperand(i))->getZExtValue();
3654     unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags);
3655     ++i; // Skip the ID value.
3656 
3657     switch (InlineAsm::getKind(Flags)) {
3658     default:
3659       llvm_unreachable("Bad flags!");
3660     case InlineAsm::Kind_RegUse:
3661     case InlineAsm::Kind_Imm:
3662     case InlineAsm::Kind_Mem:
3663       i += NumVals;
3664       break;
3665     case InlineAsm::Kind_Clobber:
3666     case InlineAsm::Kind_RegDef:
3667     case InlineAsm::Kind_RegDefEarlyClobber: {
3668       for (; NumVals; --NumVals, ++i) {
3669         Register Reg = cast<RegisterSDNode>(Op.getOperand(i))->getReg();
3670         if (Reg != PPC::LR && Reg != PPC::LR8)
3671           continue;
3672         MFI.setLRStoreRequired();
3673         return Op;
3674       }
3675       break;
3676     }
3677     }
3678   }
3679 
3680   return Op;
3681 }
3682 
LowerINIT_TRAMPOLINE(SDValue Op,SelectionDAG & DAG) const3683 SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
3684                                                 SelectionDAG &DAG) const {
3685   if (Subtarget.isAIXABI())
3686     report_fatal_error("INIT_TRAMPOLINE operation is not supported on AIX.");
3687 
3688   SDValue Chain = Op.getOperand(0);
3689   SDValue Trmp = Op.getOperand(1); // trampoline
3690   SDValue FPtr = Op.getOperand(2); // nested function
3691   SDValue Nest = Op.getOperand(3); // 'nest' parameter value
3692   SDLoc dl(Op);
3693 
3694   EVT PtrVT = getPointerTy(DAG.getDataLayout());
3695   bool isPPC64 = (PtrVT == MVT::i64);
3696   Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext());
3697 
3698   TargetLowering::ArgListTy Args;
3699   TargetLowering::ArgListEntry Entry;
3700 
3701   Entry.Ty = IntPtrTy;
3702   Entry.Node = Trmp; Args.push_back(Entry);
3703 
3704   // TrampSize == (isPPC64 ? 48 : 40);
3705   Entry.Node = DAG.getConstant(isPPC64 ? 48 : 40, dl,
3706                                isPPC64 ? MVT::i64 : MVT::i32);
3707   Args.push_back(Entry);
3708 
3709   Entry.Node = FPtr; Args.push_back(Entry);
3710   Entry.Node = Nest; Args.push_back(Entry);
3711 
3712   // Lower to a call to __trampoline_setup(Trmp, TrampSize, FPtr, ctx_reg)
3713   TargetLowering::CallLoweringInfo CLI(DAG);
3714   CLI.setDebugLoc(dl).setChain(Chain).setLibCallee(
3715       CallingConv::C, Type::getVoidTy(*DAG.getContext()),
3716       DAG.getExternalSymbol("__trampoline_setup", PtrVT), std::move(Args));
3717 
3718   std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
3719   return CallResult.second;
3720 }
3721 
LowerVASTART(SDValue Op,SelectionDAG & DAG) const3722 SDValue PPCTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
3723   MachineFunction &MF = DAG.getMachineFunction();
3724   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3725   EVT PtrVT = getPointerTy(MF.getDataLayout());
3726 
3727   SDLoc dl(Op);
3728 
3729   if (Subtarget.isPPC64() || Subtarget.isAIXABI()) {
3730     // vastart just stores the address of the VarArgsFrameIndex slot into the
3731     // memory location argument.
3732     SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
3733     const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3734     return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1),
3735                         MachinePointerInfo(SV));
3736   }
3737 
3738   // For the 32-bit SVR4 ABI we follow the layout of the va_list struct.
3739   // We suppose the given va_list is already allocated.
3740   //
3741   // typedef struct {
3742   //  char gpr;     /* index into the array of 8 GPRs
3743   //                 * stored in the register save area
3744   //                 * gpr=0 corresponds to r3,
3745   //                 * gpr=1 to r4, etc.
3746   //                 */
3747   //  char fpr;     /* index into the array of 8 FPRs
3748   //                 * stored in the register save area
3749   //                 * fpr=0 corresponds to f1,
3750   //                 * fpr=1 to f2, etc.
3751   //                 */
3752   //  char *overflow_arg_area;
3753   //                /* location on stack that holds
3754   //                 * the next overflow argument
3755   //                 */
3756   //  char *reg_save_area;
3757   //               /* where r3:r10 and f1:f8 (if saved)
3758   //                * are stored
3759   //                */
3760   // } va_list[1];
3761 
3762   SDValue ArgGPR = DAG.getConstant(FuncInfo->getVarArgsNumGPR(), dl, MVT::i32);
3763   SDValue ArgFPR = DAG.getConstant(FuncInfo->getVarArgsNumFPR(), dl, MVT::i32);
3764   SDValue StackOffsetFI = DAG.getFrameIndex(FuncInfo->getVarArgsStackOffset(),
3765                                             PtrVT);
3766   SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
3767                                  PtrVT);
3768 
3769   uint64_t FrameOffset = PtrVT.getSizeInBits()/8;
3770   SDValue ConstFrameOffset = DAG.getConstant(FrameOffset, dl, PtrVT);
3771 
3772   uint64_t StackOffset = PtrVT.getSizeInBits()/8 - 1;
3773   SDValue ConstStackOffset = DAG.getConstant(StackOffset, dl, PtrVT);
3774 
3775   uint64_t FPROffset = 1;
3776   SDValue ConstFPROffset = DAG.getConstant(FPROffset, dl, PtrVT);
3777 
3778   const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3779 
3780   // Store first byte : number of int regs
3781   SDValue firstStore =
3782       DAG.getTruncStore(Op.getOperand(0), dl, ArgGPR, Op.getOperand(1),
3783                         MachinePointerInfo(SV), MVT::i8);
3784   uint64_t nextOffset = FPROffset;
3785   SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, Op.getOperand(1),
3786                                   ConstFPROffset);
3787 
3788   // Store second byte : number of float regs
3789   SDValue secondStore =
3790       DAG.getTruncStore(firstStore, dl, ArgFPR, nextPtr,
3791                         MachinePointerInfo(SV, nextOffset), MVT::i8);
3792   nextOffset += StackOffset;
3793   nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstStackOffset);
3794 
3795   // Store second word : arguments given on stack
3796   SDValue thirdStore = DAG.getStore(secondStore, dl, StackOffsetFI, nextPtr,
3797                                     MachinePointerInfo(SV, nextOffset));
3798   nextOffset += FrameOffset;
3799   nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, nextPtr, ConstFrameOffset);
3800 
3801   // Store third word : arguments given in registers
3802   return DAG.getStore(thirdStore, dl, FR, nextPtr,
3803                       MachinePointerInfo(SV, nextOffset));
3804 }
3805 
3806 /// FPR - The set of FP registers that should be allocated for arguments
3807 /// on Darwin and AIX.
3808 static const MCPhysReg FPR[] = {PPC::F1,  PPC::F2,  PPC::F3, PPC::F4, PPC::F5,
3809                                 PPC::F6,  PPC::F7,  PPC::F8, PPC::F9, PPC::F10,
3810                                 PPC::F11, PPC::F12, PPC::F13};
3811 
3812 /// CalculateStackSlotSize - Calculates the size reserved for this argument on
3813 /// the stack.
CalculateStackSlotSize(EVT ArgVT,ISD::ArgFlagsTy Flags,unsigned PtrByteSize)3814 static unsigned CalculateStackSlotSize(EVT ArgVT, ISD::ArgFlagsTy Flags,
3815                                        unsigned PtrByteSize) {
3816   unsigned ArgSize = ArgVT.getStoreSize();
3817   if (Flags.isByVal())
3818     ArgSize = Flags.getByValSize();
3819 
3820   // Round up to multiples of the pointer size, except for array members,
3821   // which are always packed.
3822   if (!Flags.isInConsecutiveRegs())
3823     ArgSize = ((ArgSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3824 
3825   return ArgSize;
3826 }
3827 
3828 /// CalculateStackSlotAlignment - Calculates the alignment of this argument
3829 /// on the stack.
CalculateStackSlotAlignment(EVT ArgVT,EVT OrigVT,ISD::ArgFlagsTy Flags,unsigned PtrByteSize)3830 static Align CalculateStackSlotAlignment(EVT ArgVT, EVT OrigVT,
3831                                          ISD::ArgFlagsTy Flags,
3832                                          unsigned PtrByteSize) {
3833   Align Alignment(PtrByteSize);
3834 
3835   // Altivec parameters are padded to a 16 byte boundary.
3836   if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
3837       ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
3838       ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 ||
3839       ArgVT == MVT::v1i128 || ArgVT == MVT::f128)
3840     Alignment = Align(16);
3841 
3842   // ByVal parameters are aligned as requested.
3843   if (Flags.isByVal()) {
3844     auto BVAlign = Flags.getNonZeroByValAlign();
3845     if (BVAlign > PtrByteSize) {
3846       if (BVAlign.value() % PtrByteSize != 0)
3847         llvm_unreachable(
3848             "ByVal alignment is not a multiple of the pointer size");
3849 
3850       Alignment = BVAlign;
3851     }
3852   }
3853 
3854   // Array members are always packed to their original alignment.
3855   if (Flags.isInConsecutiveRegs()) {
3856     // If the array member was split into multiple registers, the first
3857     // needs to be aligned to the size of the full type.  (Except for
3858     // ppcf128, which is only aligned as its f64 components.)
3859     if (Flags.isSplit() && OrigVT != MVT::ppcf128)
3860       Alignment = Align(OrigVT.getStoreSize());
3861     else
3862       Alignment = Align(ArgVT.getStoreSize());
3863   }
3864 
3865   return Alignment;
3866 }
3867 
3868 /// CalculateStackSlotUsed - Return whether this argument will use its
3869 /// stack slot (instead of being passed in registers).  ArgOffset,
3870 /// AvailableFPRs, and AvailableVRs must hold the current argument
3871 /// position, and will be updated to account for this argument.
CalculateStackSlotUsed(EVT ArgVT,EVT OrigVT,ISD::ArgFlagsTy Flags,unsigned PtrByteSize,unsigned LinkageSize,unsigned ParamAreaSize,unsigned & ArgOffset,unsigned & AvailableFPRs,unsigned & AvailableVRs)3872 static bool CalculateStackSlotUsed(EVT ArgVT, EVT OrigVT, ISD::ArgFlagsTy Flags,
3873                                    unsigned PtrByteSize, unsigned LinkageSize,
3874                                    unsigned ParamAreaSize, unsigned &ArgOffset,
3875                                    unsigned &AvailableFPRs,
3876                                    unsigned &AvailableVRs) {
3877   bool UseMemory = false;
3878 
3879   // Respect alignment of argument on the stack.
3880   Align Alignment =
3881       CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
3882   ArgOffset = alignTo(ArgOffset, Alignment);
3883   // If there's no space left in the argument save area, we must
3884   // use memory (this check also catches zero-sized arguments).
3885   if (ArgOffset >= LinkageSize + ParamAreaSize)
3886     UseMemory = true;
3887 
3888   // Allocate argument on the stack.
3889   ArgOffset += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
3890   if (Flags.isInConsecutiveRegsLast())
3891     ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
3892   // If we overran the argument save area, we must use memory
3893   // (this check catches arguments passed partially in memory)
3894   if (ArgOffset > LinkageSize + ParamAreaSize)
3895     UseMemory = true;
3896 
3897   // However, if the argument is actually passed in an FPR or a VR,
3898   // we don't use memory after all.
3899   if (!Flags.isByVal()) {
3900     if (ArgVT == MVT::f32 || ArgVT == MVT::f64)
3901       if (AvailableFPRs > 0) {
3902         --AvailableFPRs;
3903         return false;
3904       }
3905     if (ArgVT == MVT::v4f32 || ArgVT == MVT::v4i32 ||
3906         ArgVT == MVT::v8i16 || ArgVT == MVT::v16i8 ||
3907         ArgVT == MVT::v2f64 || ArgVT == MVT::v2i64 ||
3908         ArgVT == MVT::v1i128 || ArgVT == MVT::f128)
3909       if (AvailableVRs > 0) {
3910         --AvailableVRs;
3911         return false;
3912       }
3913   }
3914 
3915   return UseMemory;
3916 }
3917 
3918 /// EnsureStackAlignment - Round stack frame size up from NumBytes to
3919 /// ensure minimum alignment required for target.
EnsureStackAlignment(const PPCFrameLowering * Lowering,unsigned NumBytes)3920 static unsigned EnsureStackAlignment(const PPCFrameLowering *Lowering,
3921                                      unsigned NumBytes) {
3922   return alignTo(NumBytes, Lowering->getStackAlign());
3923 }
3924 
LowerFormalArguments(SDValue Chain,CallingConv::ID CallConv,bool isVarArg,const SmallVectorImpl<ISD::InputArg> & Ins,const SDLoc & dl,SelectionDAG & DAG,SmallVectorImpl<SDValue> & InVals) const3925 SDValue PPCTargetLowering::LowerFormalArguments(
3926     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3927     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3928     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3929   if (Subtarget.isAIXABI())
3930     return LowerFormalArguments_AIX(Chain, CallConv, isVarArg, Ins, dl, DAG,
3931                                     InVals);
3932   if (Subtarget.is64BitELFABI())
3933     return LowerFormalArguments_64SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG,
3934                                        InVals);
3935   assert(Subtarget.is32BitELFABI());
3936   return LowerFormalArguments_32SVR4(Chain, CallConv, isVarArg, Ins, dl, DAG,
3937                                      InVals);
3938 }
3939 
LowerFormalArguments_32SVR4(SDValue Chain,CallingConv::ID CallConv,bool isVarArg,const SmallVectorImpl<ISD::InputArg> & Ins,const SDLoc & dl,SelectionDAG & DAG,SmallVectorImpl<SDValue> & InVals) const3940 SDValue PPCTargetLowering::LowerFormalArguments_32SVR4(
3941     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3942     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3943     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3944 
3945   // 32-bit SVR4 ABI Stack Frame Layout:
3946   //              +-----------------------------------+
3947   //        +-->  |            Back chain             |
3948   //        |     +-----------------------------------+
3949   //        |     | Floating-point register save area |
3950   //        |     +-----------------------------------+
3951   //        |     |    General register save area     |
3952   //        |     +-----------------------------------+
3953   //        |     |          CR save word             |
3954   //        |     +-----------------------------------+
3955   //        |     |         VRSAVE save word          |
3956   //        |     +-----------------------------------+
3957   //        |     |         Alignment padding         |
3958   //        |     +-----------------------------------+
3959   //        |     |     Vector register save area     |
3960   //        |     +-----------------------------------+
3961   //        |     |       Local variable space        |
3962   //        |     +-----------------------------------+
3963   //        |     |        Parameter list area        |
3964   //        |     +-----------------------------------+
3965   //        |     |           LR save word            |
3966   //        |     +-----------------------------------+
3967   // SP-->  +---  |            Back chain             |
3968   //              +-----------------------------------+
3969   //
3970   // Specifications:
3971   //   System V Application Binary Interface PowerPC Processor Supplement
3972   //   AltiVec Technology Programming Interface Manual
3973 
3974   MachineFunction &MF = DAG.getMachineFunction();
3975   MachineFrameInfo &MFI = MF.getFrameInfo();
3976   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
3977 
3978   EVT PtrVT = getPointerTy(MF.getDataLayout());
3979   // Potential tail calls could cause overwriting of argument stack slots.
3980   bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
3981                        (CallConv == CallingConv::Fast));
3982   const Align PtrAlign(4);
3983 
3984   // Assign locations to all of the incoming arguments.
3985   SmallVector<CCValAssign, 16> ArgLocs;
3986   PPCCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
3987                  *DAG.getContext());
3988 
3989   // Reserve space for the linkage area on the stack.
3990   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
3991   CCInfo.AllocateStack(LinkageSize, PtrAlign);
3992   if (useSoftFloat())
3993     CCInfo.PreAnalyzeFormalArguments(Ins);
3994 
3995   CCInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4);
3996   CCInfo.clearWasPPCF128();
3997 
3998   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3999     CCValAssign &VA = ArgLocs[i];
4000 
4001     // Arguments stored in registers.
4002     if (VA.isRegLoc()) {
4003       const TargetRegisterClass *RC;
4004       EVT ValVT = VA.getValVT();
4005 
4006       switch (ValVT.getSimpleVT().SimpleTy) {
4007         default:
4008           llvm_unreachable("ValVT not supported by formal arguments Lowering");
4009         case MVT::i1:
4010         case MVT::i32:
4011           RC = &PPC::GPRCRegClass;
4012           break;
4013         case MVT::f32:
4014           if (Subtarget.hasP8Vector())
4015             RC = &PPC::VSSRCRegClass;
4016           else if (Subtarget.hasSPE())
4017             RC = &PPC::GPRCRegClass;
4018           else
4019             RC = &PPC::F4RCRegClass;
4020           break;
4021         case MVT::f64:
4022           if (Subtarget.hasVSX())
4023             RC = &PPC::VSFRCRegClass;
4024           else if (Subtarget.hasSPE())
4025             // SPE passes doubles in GPR pairs.
4026             RC = &PPC::GPRCRegClass;
4027           else
4028             RC = &PPC::F8RCRegClass;
4029           break;
4030         case MVT::v16i8:
4031         case MVT::v8i16:
4032         case MVT::v4i32:
4033           RC = &PPC::VRRCRegClass;
4034           break;
4035         case MVT::v4f32:
4036           RC = &PPC::VRRCRegClass;
4037           break;
4038         case MVT::v2f64:
4039         case MVT::v2i64:
4040           RC = &PPC::VRRCRegClass;
4041           break;
4042       }
4043 
4044       SDValue ArgValue;
4045       // Transform the arguments stored in physical registers into
4046       // virtual ones.
4047       if (VA.getLocVT() == MVT::f64 && Subtarget.hasSPE()) {
4048         assert(i + 1 < e && "No second half of double precision argument");
4049         unsigned RegLo = MF.addLiveIn(VA.getLocReg(), RC);
4050         unsigned RegHi = MF.addLiveIn(ArgLocs[++i].getLocReg(), RC);
4051         SDValue ArgValueLo = DAG.getCopyFromReg(Chain, dl, RegLo, MVT::i32);
4052         SDValue ArgValueHi = DAG.getCopyFromReg(Chain, dl, RegHi, MVT::i32);
4053         if (!Subtarget.isLittleEndian())
4054           std::swap (ArgValueLo, ArgValueHi);
4055         ArgValue = DAG.getNode(PPCISD::BUILD_SPE64, dl, MVT::f64, ArgValueLo,
4056                                ArgValueHi);
4057       } else {
4058         unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
4059         ArgValue = DAG.getCopyFromReg(Chain, dl, Reg,
4060                                       ValVT == MVT::i1 ? MVT::i32 : ValVT);
4061         if (ValVT == MVT::i1)
4062           ArgValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, ArgValue);
4063       }
4064 
4065       InVals.push_back(ArgValue);
4066     } else {
4067       // Argument stored in memory.
4068       assert(VA.isMemLoc());
4069 
4070       // Get the extended size of the argument type in stack
4071       unsigned ArgSize = VA.getLocVT().getStoreSize();
4072       // Get the actual size of the argument type
4073       unsigned ObjSize = VA.getValVT().getStoreSize();
4074       unsigned ArgOffset = VA.getLocMemOffset();
4075       // Stack objects in PPC32 are right justified.
4076       ArgOffset += ArgSize - ObjSize;
4077       int FI = MFI.CreateFixedObject(ArgSize, ArgOffset, isImmutable);
4078 
4079       // Create load nodes to retrieve arguments from the stack.
4080       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4081       InVals.push_back(
4082           DAG.getLoad(VA.getValVT(), dl, Chain, FIN, MachinePointerInfo()));
4083     }
4084   }
4085 
4086   // Assign locations to all of the incoming aggregate by value arguments.
4087   // Aggregates passed by value are stored in the local variable space of the
4088   // caller's stack frame, right above the parameter list area.
4089   SmallVector<CCValAssign, 16> ByValArgLocs;
4090   CCState CCByValInfo(CallConv, isVarArg, DAG.getMachineFunction(),
4091                       ByValArgLocs, *DAG.getContext());
4092 
4093   // Reserve stack space for the allocations in CCInfo.
4094   CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrAlign);
4095 
4096   CCByValInfo.AnalyzeFormalArguments(Ins, CC_PPC32_SVR4_ByVal);
4097 
4098   // Area that is at least reserved in the caller of this function.
4099   unsigned MinReservedArea = CCByValInfo.getNextStackOffset();
4100   MinReservedArea = std::max(MinReservedArea, LinkageSize);
4101 
4102   // Set the size that is at least reserved in caller of this function.  Tail
4103   // call optimized function's reserved stack space needs to be aligned so that
4104   // taking the difference between two stack areas will result in an aligned
4105   // stack.
4106   MinReservedArea =
4107       EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
4108   FuncInfo->setMinReservedArea(MinReservedArea);
4109 
4110   SmallVector<SDValue, 8> MemOps;
4111 
4112   // If the function takes variable number of arguments, make a frame index for
4113   // the start of the first vararg value... for expansion of llvm.va_start.
4114   if (isVarArg) {
4115     static const MCPhysReg GPArgRegs[] = {
4116       PPC::R3, PPC::R4, PPC::R5, PPC::R6,
4117       PPC::R7, PPC::R8, PPC::R9, PPC::R10,
4118     };
4119     const unsigned NumGPArgRegs = array_lengthof(GPArgRegs);
4120 
4121     static const MCPhysReg FPArgRegs[] = {
4122       PPC::F1, PPC::F2, PPC::F3, PPC::F4, PPC::F5, PPC::F6, PPC::F7,
4123       PPC::F8
4124     };
4125     unsigned NumFPArgRegs = array_lengthof(FPArgRegs);
4126 
4127     if (useSoftFloat() || hasSPE())
4128        NumFPArgRegs = 0;
4129 
4130     FuncInfo->setVarArgsNumGPR(CCInfo.getFirstUnallocated(GPArgRegs));
4131     FuncInfo->setVarArgsNumFPR(CCInfo.getFirstUnallocated(FPArgRegs));
4132 
4133     // Make room for NumGPArgRegs and NumFPArgRegs.
4134     int Depth = NumGPArgRegs * PtrVT.getSizeInBits()/8 +
4135                 NumFPArgRegs * MVT(MVT::f64).getSizeInBits()/8;
4136 
4137     FuncInfo->setVarArgsStackOffset(
4138       MFI.CreateFixedObject(PtrVT.getSizeInBits()/8,
4139                             CCInfo.getNextStackOffset(), true));
4140 
4141     FuncInfo->setVarArgsFrameIndex(
4142         MFI.CreateStackObject(Depth, Align(8), false));
4143     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
4144 
4145     // The fixed integer arguments of a variadic function are stored to the
4146     // VarArgsFrameIndex on the stack so that they may be loaded by
4147     // dereferencing the result of va_next.
4148     for (unsigned GPRIndex = 0; GPRIndex != NumGPArgRegs; ++GPRIndex) {
4149       // Get an existing live-in vreg, or add a new one.
4150       unsigned VReg = MF.getRegInfo().getLiveInVirtReg(GPArgRegs[GPRIndex]);
4151       if (!VReg)
4152         VReg = MF.addLiveIn(GPArgRegs[GPRIndex], &PPC::GPRCRegClass);
4153 
4154       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4155       SDValue Store =
4156           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
4157       MemOps.push_back(Store);
4158       // Increment the address by four for the next argument to store
4159       SDValue PtrOff = DAG.getConstant(PtrVT.getSizeInBits()/8, dl, PtrVT);
4160       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
4161     }
4162 
4163     // FIXME 32-bit SVR4: We only need to save FP argument registers if CR bit 6
4164     // is set.
4165     // The double arguments are stored to the VarArgsFrameIndex
4166     // on the stack.
4167     for (unsigned FPRIndex = 0; FPRIndex != NumFPArgRegs; ++FPRIndex) {
4168       // Get an existing live-in vreg, or add a new one.
4169       unsigned VReg = MF.getRegInfo().getLiveInVirtReg(FPArgRegs[FPRIndex]);
4170       if (!VReg)
4171         VReg = MF.addLiveIn(FPArgRegs[FPRIndex], &PPC::F8RCRegClass);
4172 
4173       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::f64);
4174       SDValue Store =
4175           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
4176       MemOps.push_back(Store);
4177       // Increment the address by eight for the next argument to store
4178       SDValue PtrOff = DAG.getConstant(MVT(MVT::f64).getSizeInBits()/8, dl,
4179                                          PtrVT);
4180       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
4181     }
4182   }
4183 
4184   if (!MemOps.empty())
4185     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
4186 
4187   return Chain;
4188 }
4189 
4190 // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
4191 // value to MVT::i64 and then truncate to the correct register size.
extendArgForPPC64(ISD::ArgFlagsTy Flags,EVT ObjectVT,SelectionDAG & DAG,SDValue ArgVal,const SDLoc & dl) const4192 SDValue PPCTargetLowering::extendArgForPPC64(ISD::ArgFlagsTy Flags,
4193                                              EVT ObjectVT, SelectionDAG &DAG,
4194                                              SDValue ArgVal,
4195                                              const SDLoc &dl) const {
4196   if (Flags.isSExt())
4197     ArgVal = DAG.getNode(ISD::AssertSext, dl, MVT::i64, ArgVal,
4198                          DAG.getValueType(ObjectVT));
4199   else if (Flags.isZExt())
4200     ArgVal = DAG.getNode(ISD::AssertZext, dl, MVT::i64, ArgVal,
4201                          DAG.getValueType(ObjectVT));
4202 
4203   return DAG.getNode(ISD::TRUNCATE, dl, ObjectVT, ArgVal);
4204 }
4205 
LowerFormalArguments_64SVR4(SDValue Chain,CallingConv::ID CallConv,bool isVarArg,const SmallVectorImpl<ISD::InputArg> & Ins,const SDLoc & dl,SelectionDAG & DAG,SmallVectorImpl<SDValue> & InVals) const4206 SDValue PPCTargetLowering::LowerFormalArguments_64SVR4(
4207     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
4208     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
4209     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
4210   // TODO: add description of PPC stack frame format, or at least some docs.
4211   //
4212   bool isELFv2ABI = Subtarget.isELFv2ABI();
4213   bool isLittleEndian = Subtarget.isLittleEndian();
4214   MachineFunction &MF = DAG.getMachineFunction();
4215   MachineFrameInfo &MFI = MF.getFrameInfo();
4216   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
4217 
4218   assert(!(CallConv == CallingConv::Fast && isVarArg) &&
4219          "fastcc not supported on varargs functions");
4220 
4221   EVT PtrVT = getPointerTy(MF.getDataLayout());
4222   // Potential tail calls could cause overwriting of argument stack slots.
4223   bool isImmutable = !(getTargetMachine().Options.GuaranteedTailCallOpt &&
4224                        (CallConv == CallingConv::Fast));
4225   unsigned PtrByteSize = 8;
4226   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
4227 
4228   static const MCPhysReg GPR[] = {
4229     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
4230     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
4231   };
4232   static const MCPhysReg VR[] = {
4233     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
4234     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
4235   };
4236 
4237   const unsigned Num_GPR_Regs = array_lengthof(GPR);
4238   const unsigned Num_FPR_Regs = useSoftFloat() ? 0 : 13;
4239   const unsigned Num_VR_Regs  = array_lengthof(VR);
4240 
4241   // Do a first pass over the arguments to determine whether the ABI
4242   // guarantees that our caller has allocated the parameter save area
4243   // on its stack frame.  In the ELFv1 ABI, this is always the case;
4244   // in the ELFv2 ABI, it is true if this is a vararg function or if
4245   // any parameter is located in a stack slot.
4246 
4247   bool HasParameterArea = !isELFv2ABI || isVarArg;
4248   unsigned ParamAreaSize = Num_GPR_Regs * PtrByteSize;
4249   unsigned NumBytes = LinkageSize;
4250   unsigned AvailableFPRs = Num_FPR_Regs;
4251   unsigned AvailableVRs = Num_VR_Regs;
4252   for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
4253     if (Ins[i].Flags.isNest())
4254       continue;
4255 
4256     if (CalculateStackSlotUsed(Ins[i].VT, Ins[i].ArgVT, Ins[i].Flags,
4257                                PtrByteSize, LinkageSize, ParamAreaSize,
4258                                NumBytes, AvailableFPRs, AvailableVRs))
4259       HasParameterArea = true;
4260   }
4261 
4262   // Add DAG nodes to load the arguments or copy them out of registers.  On
4263   // entry to a function on PPC, the arguments start after the linkage area,
4264   // although the first ones are often in registers.
4265 
4266   unsigned ArgOffset = LinkageSize;
4267   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
4268   SmallVector<SDValue, 8> MemOps;
4269   Function::const_arg_iterator FuncArg = MF.getFunction().arg_begin();
4270   unsigned CurArgIdx = 0;
4271   for (unsigned ArgNo = 0, e = Ins.size(); ArgNo != e; ++ArgNo) {
4272     SDValue ArgVal;
4273     bool needsLoad = false;
4274     EVT ObjectVT = Ins[ArgNo].VT;
4275     EVT OrigVT = Ins[ArgNo].ArgVT;
4276     unsigned ObjSize = ObjectVT.getStoreSize();
4277     unsigned ArgSize = ObjSize;
4278     ISD::ArgFlagsTy Flags = Ins[ArgNo].Flags;
4279     if (Ins[ArgNo].isOrigArg()) {
4280       std::advance(FuncArg, Ins[ArgNo].getOrigArgIndex() - CurArgIdx);
4281       CurArgIdx = Ins[ArgNo].getOrigArgIndex();
4282     }
4283     // We re-align the argument offset for each argument, except when using the
4284     // fast calling convention, when we need to make sure we do that only when
4285     // we'll actually use a stack slot.
4286     unsigned CurArgOffset;
4287     Align Alignment;
4288     auto ComputeArgOffset = [&]() {
4289       /* Respect alignment of argument on the stack.  */
4290       Alignment =
4291           CalculateStackSlotAlignment(ObjectVT, OrigVT, Flags, PtrByteSize);
4292       ArgOffset = alignTo(ArgOffset, Alignment);
4293       CurArgOffset = ArgOffset;
4294     };
4295 
4296     if (CallConv != CallingConv::Fast) {
4297       ComputeArgOffset();
4298 
4299       /* Compute GPR index associated with argument offset.  */
4300       GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
4301       GPR_idx = std::min(GPR_idx, Num_GPR_Regs);
4302     }
4303 
4304     // FIXME the codegen can be much improved in some cases.
4305     // We do not have to keep everything in memory.
4306     if (Flags.isByVal()) {
4307       assert(Ins[ArgNo].isOrigArg() && "Byval arguments cannot be implicit");
4308 
4309       if (CallConv == CallingConv::Fast)
4310         ComputeArgOffset();
4311 
4312       // ObjSize is the true size, ArgSize rounded up to multiple of registers.
4313       ObjSize = Flags.getByValSize();
4314       ArgSize = ((ObjSize + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4315       // Empty aggregate parameters do not take up registers.  Examples:
4316       //   struct { } a;
4317       //   union  { } b;
4318       //   int c[0];
4319       // etc.  However, we have to provide a place-holder in InVals, so
4320       // pretend we have an 8-byte item at the current address for that
4321       // purpose.
4322       if (!ObjSize) {
4323         int FI = MFI.CreateFixedObject(PtrByteSize, ArgOffset, true);
4324         SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4325         InVals.push_back(FIN);
4326         continue;
4327       }
4328 
4329       // Create a stack object covering all stack doublewords occupied
4330       // by the argument.  If the argument is (fully or partially) on
4331       // the stack, or if the argument is fully in registers but the
4332       // caller has allocated the parameter save anyway, we can refer
4333       // directly to the caller's stack frame.  Otherwise, create a
4334       // local copy in our own frame.
4335       int FI;
4336       if (HasParameterArea ||
4337           ArgSize + ArgOffset > LinkageSize + Num_GPR_Regs * PtrByteSize)
4338         FI = MFI.CreateFixedObject(ArgSize, ArgOffset, false, true);
4339       else
4340         FI = MFI.CreateStackObject(ArgSize, Alignment, false);
4341       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4342 
4343       // Handle aggregates smaller than 8 bytes.
4344       if (ObjSize < PtrByteSize) {
4345         // The value of the object is its address, which differs from the
4346         // address of the enclosing doubleword on big-endian systems.
4347         SDValue Arg = FIN;
4348         if (!isLittleEndian) {
4349           SDValue ArgOff = DAG.getConstant(PtrByteSize - ObjSize, dl, PtrVT);
4350           Arg = DAG.getNode(ISD::ADD, dl, ArgOff.getValueType(), Arg, ArgOff);
4351         }
4352         InVals.push_back(Arg);
4353 
4354         if (GPR_idx != Num_GPR_Regs) {
4355           unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
4356           FuncInfo->addLiveInAttr(VReg, Flags);
4357           SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4358           SDValue Store;
4359 
4360           if (ObjSize==1 || ObjSize==2 || ObjSize==4) {
4361             EVT ObjType = (ObjSize == 1 ? MVT::i8 :
4362                            (ObjSize == 2 ? MVT::i16 : MVT::i32));
4363             Store = DAG.getTruncStore(Val.getValue(1), dl, Val, Arg,
4364                                       MachinePointerInfo(&*FuncArg), ObjType);
4365           } else {
4366             // For sizes that don't fit a truncating store (3, 5, 6, 7),
4367             // store the whole register as-is to the parameter save area
4368             // slot.
4369             Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
4370                                  MachinePointerInfo(&*FuncArg));
4371           }
4372 
4373           MemOps.push_back(Store);
4374         }
4375         // Whether we copied from a register or not, advance the offset
4376         // into the parameter save area by a full doubleword.
4377         ArgOffset += PtrByteSize;
4378         continue;
4379       }
4380 
4381       // The value of the object is its address, which is the address of
4382       // its first stack doubleword.
4383       InVals.push_back(FIN);
4384 
4385       // Store whatever pieces of the object are in registers to memory.
4386       for (unsigned j = 0; j < ArgSize; j += PtrByteSize) {
4387         if (GPR_idx == Num_GPR_Regs)
4388           break;
4389 
4390         unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4391         FuncInfo->addLiveInAttr(VReg, Flags);
4392         SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4393         SDValue Addr = FIN;
4394         if (j) {
4395           SDValue Off = DAG.getConstant(j, dl, PtrVT);
4396           Addr = DAG.getNode(ISD::ADD, dl, Off.getValueType(), Addr, Off);
4397         }
4398         SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, Addr,
4399                                      MachinePointerInfo(&*FuncArg, j));
4400         MemOps.push_back(Store);
4401         ++GPR_idx;
4402       }
4403       ArgOffset += ArgSize;
4404       continue;
4405     }
4406 
4407     switch (ObjectVT.getSimpleVT().SimpleTy) {
4408     default: llvm_unreachable("Unhandled argument type!");
4409     case MVT::i1:
4410     case MVT::i32:
4411     case MVT::i64:
4412       if (Flags.isNest()) {
4413         // The 'nest' parameter, if any, is passed in R11.
4414         unsigned VReg = MF.addLiveIn(PPC::X11, &PPC::G8RCRegClass);
4415         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
4416 
4417         if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
4418           ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
4419 
4420         break;
4421       }
4422 
4423       // These can be scalar arguments or elements of an integer array type
4424       // passed directly.  Clang may use those instead of "byval" aggregate
4425       // types to avoid forcing arguments to memory unnecessarily.
4426       if (GPR_idx != Num_GPR_Regs) {
4427         unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
4428         FuncInfo->addLiveInAttr(VReg, Flags);
4429         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
4430 
4431         if (ObjectVT == MVT::i32 || ObjectVT == MVT::i1)
4432           // PPC64 passes i8, i16, and i32 values in i64 registers. Promote
4433           // value to MVT::i64 and then truncate to the correct register size.
4434           ArgVal = extendArgForPPC64(Flags, ObjectVT, DAG, ArgVal, dl);
4435       } else {
4436         if (CallConv == CallingConv::Fast)
4437           ComputeArgOffset();
4438 
4439         needsLoad = true;
4440         ArgSize = PtrByteSize;
4441       }
4442       if (CallConv != CallingConv::Fast || needsLoad)
4443         ArgOffset += 8;
4444       break;
4445 
4446     case MVT::f32:
4447     case MVT::f64:
4448       // These can be scalar arguments or elements of a float array type
4449       // passed directly.  The latter are used to implement ELFv2 homogenous
4450       // float aggregates.
4451       if (FPR_idx != Num_FPR_Regs) {
4452         unsigned VReg;
4453 
4454         if (ObjectVT == MVT::f32)
4455           VReg = MF.addLiveIn(FPR[FPR_idx],
4456                               Subtarget.hasP8Vector()
4457                                   ? &PPC::VSSRCRegClass
4458                                   : &PPC::F4RCRegClass);
4459         else
4460           VReg = MF.addLiveIn(FPR[FPR_idx], Subtarget.hasVSX()
4461                                                 ? &PPC::VSFRCRegClass
4462                                                 : &PPC::F8RCRegClass);
4463 
4464         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4465         ++FPR_idx;
4466       } else if (GPR_idx != Num_GPR_Regs && CallConv != CallingConv::Fast) {
4467         // FIXME: We may want to re-enable this for CallingConv::Fast on the P8
4468         // once we support fp <-> gpr moves.
4469 
4470         // This can only ever happen in the presence of f32 array types,
4471         // since otherwise we never run out of FPRs before running out
4472         // of GPRs.
4473         unsigned VReg = MF.addLiveIn(GPR[GPR_idx++], &PPC::G8RCRegClass);
4474         FuncInfo->addLiveInAttr(VReg, Flags);
4475         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i64);
4476 
4477         if (ObjectVT == MVT::f32) {
4478           if ((ArgOffset % PtrByteSize) == (isLittleEndian ? 4 : 0))
4479             ArgVal = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgVal,
4480                                  DAG.getConstant(32, dl, MVT::i32));
4481           ArgVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, ArgVal);
4482         }
4483 
4484         ArgVal = DAG.getNode(ISD::BITCAST, dl, ObjectVT, ArgVal);
4485       } else {
4486         if (CallConv == CallingConv::Fast)
4487           ComputeArgOffset();
4488 
4489         needsLoad = true;
4490       }
4491 
4492       // When passing an array of floats, the array occupies consecutive
4493       // space in the argument area; only round up to the next doubleword
4494       // at the end of the array.  Otherwise, each float takes 8 bytes.
4495       if (CallConv != CallingConv::Fast || needsLoad) {
4496         ArgSize = Flags.isInConsecutiveRegs() ? ObjSize : PtrByteSize;
4497         ArgOffset += ArgSize;
4498         if (Flags.isInConsecutiveRegsLast())
4499           ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
4500       }
4501       break;
4502     case MVT::v4f32:
4503     case MVT::v4i32:
4504     case MVT::v8i16:
4505     case MVT::v16i8:
4506     case MVT::v2f64:
4507     case MVT::v2i64:
4508     case MVT::v1i128:
4509     case MVT::f128:
4510       // These can be scalar arguments or elements of a vector array type
4511       // passed directly.  The latter are used to implement ELFv2 homogenous
4512       // vector aggregates.
4513       if (VR_idx != Num_VR_Regs) {
4514         unsigned VReg = MF.addLiveIn(VR[VR_idx], &PPC::VRRCRegClass);
4515         ArgVal = DAG.getCopyFromReg(Chain, dl, VReg, ObjectVT);
4516         ++VR_idx;
4517       } else {
4518         if (CallConv == CallingConv::Fast)
4519           ComputeArgOffset();
4520         needsLoad = true;
4521       }
4522       if (CallConv != CallingConv::Fast || needsLoad)
4523         ArgOffset += 16;
4524       break;
4525     }
4526 
4527     // We need to load the argument to a virtual register if we determined
4528     // above that we ran out of physical registers of the appropriate type.
4529     if (needsLoad) {
4530       if (ObjSize < ArgSize && !isLittleEndian)
4531         CurArgOffset += ArgSize - ObjSize;
4532       int FI = MFI.CreateFixedObject(ObjSize, CurArgOffset, isImmutable);
4533       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4534       ArgVal = DAG.getLoad(ObjectVT, dl, Chain, FIN, MachinePointerInfo());
4535     }
4536 
4537     InVals.push_back(ArgVal);
4538   }
4539 
4540   // Area that is at least reserved in the caller of this function.
4541   unsigned MinReservedArea;
4542   if (HasParameterArea)
4543     MinReservedArea = std::max(ArgOffset, LinkageSize + 8 * PtrByteSize);
4544   else
4545     MinReservedArea = LinkageSize;
4546 
4547   // Set the size that is at least reserved in caller of this function.  Tail
4548   // call optimized functions' reserved stack space needs to be aligned so that
4549   // taking the difference between two stack areas will result in an aligned
4550   // stack.
4551   MinReservedArea =
4552       EnsureStackAlignment(Subtarget.getFrameLowering(), MinReservedArea);
4553   FuncInfo->setMinReservedArea(MinReservedArea);
4554 
4555   // If the function takes variable number of arguments, make a frame index for
4556   // the start of the first vararg value... for expansion of llvm.va_start.
4557   // On ELFv2ABI spec, it writes:
4558   // C programs that are intended to be *portable* across different compilers
4559   // and architectures must use the header file <stdarg.h> to deal with variable
4560   // argument lists.
4561   if (isVarArg && MFI.hasVAStart()) {
4562     int Depth = ArgOffset;
4563 
4564     FuncInfo->setVarArgsFrameIndex(
4565       MFI.CreateFixedObject(PtrByteSize, Depth, true));
4566     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
4567 
4568     // If this function is vararg, store any remaining integer argument regs
4569     // to their spots on the stack so that they may be loaded by dereferencing
4570     // the result of va_next.
4571     for (GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
4572          GPR_idx < Num_GPR_Regs; ++GPR_idx) {
4573       unsigned VReg = MF.addLiveIn(GPR[GPR_idx], &PPC::G8RCRegClass);
4574       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
4575       SDValue Store =
4576           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
4577       MemOps.push_back(Store);
4578       // Increment the address by four for the next argument to store
4579       SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT);
4580       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
4581     }
4582   }
4583 
4584   if (!MemOps.empty())
4585     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
4586 
4587   return Chain;
4588 }
4589 
4590 /// CalculateTailCallSPDiff - Get the amount the stack pointer has to be
4591 /// adjusted to accommodate the arguments for the tailcall.
CalculateTailCallSPDiff(SelectionDAG & DAG,bool isTailCall,unsigned ParamSize)4592 static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall,
4593                                    unsigned ParamSize) {
4594 
4595   if (!isTailCall) return 0;
4596 
4597   PPCFunctionInfo *FI = DAG.getMachineFunction().getInfo<PPCFunctionInfo>();
4598   unsigned CallerMinReservedArea = FI->getMinReservedArea();
4599   int SPDiff = (int)CallerMinReservedArea - (int)ParamSize;
4600   // Remember only if the new adjustment is bigger.
4601   if (SPDiff < FI->getTailCallSPDelta())
4602     FI->setTailCallSPDelta(SPDiff);
4603 
4604   return SPDiff;
4605 }
4606 
4607 static bool isFunctionGlobalAddress(SDValue Callee);
4608 
callsShareTOCBase(const Function * Caller,SDValue Callee,const TargetMachine & TM)4609 static bool callsShareTOCBase(const Function *Caller, SDValue Callee,
4610                               const TargetMachine &TM) {
4611   // It does not make sense to call callsShareTOCBase() with a caller that
4612   // is PC Relative since PC Relative callers do not have a TOC.
4613 #ifndef NDEBUG
4614   const PPCSubtarget *STICaller = &TM.getSubtarget<PPCSubtarget>(*Caller);
4615   assert(!STICaller->isUsingPCRelativeCalls() &&
4616          "PC Relative callers do not have a TOC and cannot share a TOC Base");
4617 #endif
4618 
4619   // Callee is either a GlobalAddress or an ExternalSymbol. ExternalSymbols
4620   // don't have enough information to determine if the caller and callee share
4621   // the same  TOC base, so we have to pessimistically assume they don't for
4622   // correctness.
4623   GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
4624   if (!G)
4625     return false;
4626 
4627   const GlobalValue *GV = G->getGlobal();
4628 
4629   // If the callee is preemptable, then the static linker will use a plt-stub
4630   // which saves the toc to the stack, and needs a nop after the call
4631   // instruction to convert to a toc-restore.
4632   if (!TM.shouldAssumeDSOLocal(*Caller->getParent(), GV))
4633     return false;
4634 
4635   // Functions with PC Relative enabled may clobber the TOC in the same DSO.
4636   // We may need a TOC restore in the situation where the caller requires a
4637   // valid TOC but the callee is PC Relative and does not.
4638   const Function *F = dyn_cast<Function>(GV);
4639   const GlobalAlias *Alias = dyn_cast<GlobalAlias>(GV);
4640 
4641   // If we have an Alias we can try to get the function from there.
4642   if (Alias) {
4643     const GlobalObject *GlobalObj = Alias->getBaseObject();
4644     F = dyn_cast<Function>(GlobalObj);
4645   }
4646 
4647   // If we still have no valid function pointer we do not have enough
4648   // information to determine if the callee uses PC Relative calls so we must
4649   // assume that it does.
4650   if (!F)
4651     return false;
4652 
4653   // If the callee uses PC Relative we cannot guarantee that the callee won't
4654   // clobber the TOC of the caller and so we must assume that the two
4655   // functions do not share a TOC base.
4656   const PPCSubtarget *STICallee = &TM.getSubtarget<PPCSubtarget>(*F);
4657   if (STICallee->isUsingPCRelativeCalls())
4658     return false;
4659 
4660   // If the GV is not a strong definition then we need to assume it can be
4661   // replaced by another function at link time. The function that replaces
4662   // it may not share the same TOC as the caller since the callee may be
4663   // replaced by a PC Relative version of the same function.
4664   if (!GV->isStrongDefinitionForLinker())
4665     return false;
4666 
4667   // The medium and large code models are expected to provide a sufficiently
4668   // large TOC to provide all data addressing needs of a module with a
4669   // single TOC.
4670   if (CodeModel::Medium == TM.getCodeModel() ||
4671       CodeModel::Large == TM.getCodeModel())
4672     return true;
4673 
4674   // Any explicitly-specified sections and section prefixes must also match.
4675   // Also, if we're using -ffunction-sections, then each function is always in
4676   // a different section (the same is true for COMDAT functions).
4677   if (TM.getFunctionSections() || GV->hasComdat() || Caller->hasComdat() ||
4678       GV->getSection() != Caller->getSection())
4679     return false;
4680   if (const auto *F = dyn_cast<Function>(GV)) {
4681     if (F->getSectionPrefix() != Caller->getSectionPrefix())
4682       return false;
4683   }
4684 
4685   return true;
4686 }
4687 
4688 static bool
needStackSlotPassParameters(const PPCSubtarget & Subtarget,const SmallVectorImpl<ISD::OutputArg> & Outs)4689 needStackSlotPassParameters(const PPCSubtarget &Subtarget,
4690                             const SmallVectorImpl<ISD::OutputArg> &Outs) {
4691   assert(Subtarget.is64BitELFABI());
4692 
4693   const unsigned PtrByteSize = 8;
4694   const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
4695 
4696   static const MCPhysReg GPR[] = {
4697     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
4698     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
4699   };
4700   static const MCPhysReg VR[] = {
4701     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
4702     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
4703   };
4704 
4705   const unsigned NumGPRs = array_lengthof(GPR);
4706   const unsigned NumFPRs = 13;
4707   const unsigned NumVRs = array_lengthof(VR);
4708   const unsigned ParamAreaSize = NumGPRs * PtrByteSize;
4709 
4710   unsigned NumBytes = LinkageSize;
4711   unsigned AvailableFPRs = NumFPRs;
4712   unsigned AvailableVRs = NumVRs;
4713 
4714   for (const ISD::OutputArg& Param : Outs) {
4715     if (Param.Flags.isNest()) continue;
4716 
4717     if (CalculateStackSlotUsed(Param.VT, Param.ArgVT, Param.Flags, PtrByteSize,
4718                                LinkageSize, ParamAreaSize, NumBytes,
4719                                AvailableFPRs, AvailableVRs))
4720       return true;
4721   }
4722   return false;
4723 }
4724 
hasSameArgumentList(const Function * CallerFn,const CallBase & CB)4725 static bool hasSameArgumentList(const Function *CallerFn, const CallBase &CB) {
4726   if (CB.arg_size() != CallerFn->arg_size())
4727     return false;
4728 
4729   auto CalleeArgIter = CB.arg_begin();
4730   auto CalleeArgEnd = CB.arg_end();
4731   Function::const_arg_iterator CallerArgIter = CallerFn->arg_begin();
4732 
4733   for (; CalleeArgIter != CalleeArgEnd; ++CalleeArgIter, ++CallerArgIter) {
4734     const Value* CalleeArg = *CalleeArgIter;
4735     const Value* CallerArg = &(*CallerArgIter);
4736     if (CalleeArg == CallerArg)
4737       continue;
4738 
4739     // e.g. @caller([4 x i64] %a, [4 x i64] %b) {
4740     //        tail call @callee([4 x i64] undef, [4 x i64] %b)
4741     //      }
4742     // 1st argument of callee is undef and has the same type as caller.
4743     if (CalleeArg->getType() == CallerArg->getType() &&
4744         isa<UndefValue>(CalleeArg))
4745       continue;
4746 
4747     return false;
4748   }
4749 
4750   return true;
4751 }
4752 
4753 // Returns true if TCO is possible between the callers and callees
4754 // calling conventions.
4755 static bool
areCallingConvEligibleForTCO_64SVR4(CallingConv::ID CallerCC,CallingConv::ID CalleeCC)4756 areCallingConvEligibleForTCO_64SVR4(CallingConv::ID CallerCC,
4757                                     CallingConv::ID CalleeCC) {
4758   // Tail calls are possible with fastcc and ccc.
4759   auto isTailCallableCC  = [] (CallingConv::ID CC){
4760       return  CC == CallingConv::C || CC == CallingConv::Fast;
4761   };
4762   if (!isTailCallableCC(CallerCC) || !isTailCallableCC(CalleeCC))
4763     return false;
4764 
4765   // We can safely tail call both fastcc and ccc callees from a c calling
4766   // convention caller. If the caller is fastcc, we may have less stack space
4767   // than a non-fastcc caller with the same signature so disable tail-calls in
4768   // that case.
4769   return CallerCC == CallingConv::C || CallerCC == CalleeCC;
4770 }
4771 
IsEligibleForTailCallOptimization_64SVR4(SDValue Callee,CallingConv::ID CalleeCC,const CallBase * CB,bool isVarArg,const SmallVectorImpl<ISD::OutputArg> & Outs,const SmallVectorImpl<ISD::InputArg> & Ins,SelectionDAG & DAG) const4772 bool PPCTargetLowering::IsEligibleForTailCallOptimization_64SVR4(
4773     SDValue Callee, CallingConv::ID CalleeCC, const CallBase *CB, bool isVarArg,
4774     const SmallVectorImpl<ISD::OutputArg> &Outs,
4775     const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
4776   bool TailCallOpt = getTargetMachine().Options.GuaranteedTailCallOpt;
4777 
4778   if (DisableSCO && !TailCallOpt) return false;
4779 
4780   // Variadic argument functions are not supported.
4781   if (isVarArg) return false;
4782 
4783   auto &Caller = DAG.getMachineFunction().getFunction();
4784   // Check that the calling conventions are compatible for tco.
4785   if (!areCallingConvEligibleForTCO_64SVR4(Caller.getCallingConv(), CalleeCC))
4786     return false;
4787 
4788   // Caller contains any byval parameter is not supported.
4789   if (any_of(Ins, [](const ISD::InputArg &IA) { return IA.Flags.isByVal(); }))
4790     return false;
4791 
4792   // Callee contains any byval parameter is not supported, too.
4793   // Note: This is a quick work around, because in some cases, e.g.
4794   // caller's stack size > callee's stack size, we are still able to apply
4795   // sibling call optimization. For example, gcc is able to do SCO for caller1
4796   // in the following example, but not for caller2.
4797   //   struct test {
4798   //     long int a;
4799   //     char ary[56];
4800   //   } gTest;
4801   //   __attribute__((noinline)) int callee(struct test v, struct test *b) {
4802   //     b->a = v.a;
4803   //     return 0;
4804   //   }
4805   //   void caller1(struct test a, struct test c, struct test *b) {
4806   //     callee(gTest, b); }
4807   //   void caller2(struct test *b) { callee(gTest, b); }
4808   if (any_of(Outs, [](const ISD::OutputArg& OA) { return OA.Flags.isByVal(); }))
4809     return false;
4810 
4811   // If callee and caller use different calling conventions, we cannot pass
4812   // parameters on stack since offsets for the parameter area may be different.
4813   if (Caller.getCallingConv() != CalleeCC &&
4814       needStackSlotPassParameters(Subtarget, Outs))
4815     return false;
4816 
4817   // All variants of 64-bit ELF ABIs without PC-Relative addressing require that
4818   // the caller and callee share the same TOC for TCO/SCO. If the caller and
4819   // callee potentially have different TOC bases then we cannot tail call since
4820   // we need to restore the TOC pointer after the call.
4821   // ref: https://bugzilla.mozilla.org/show_bug.cgi?id=973977
4822   // We cannot guarantee this for indirect calls or calls to external functions.
4823   // When PC-Relative addressing is used, the concept of the TOC is no longer
4824   // applicable so this check is not required.
4825   // Check first for indirect calls.
4826   if (!Subtarget.isUsingPCRelativeCalls() &&
4827       !isFunctionGlobalAddress(Callee) && !isa<ExternalSymbolSDNode>(Callee))
4828     return false;
4829 
4830   // Check if we share the TOC base.
4831   if (!Subtarget.isUsingPCRelativeCalls() &&
4832       !callsShareTOCBase(&Caller, Callee, getTargetMachine()))
4833     return false;
4834 
4835   // TCO allows altering callee ABI, so we don't have to check further.
4836   if (CalleeCC == CallingConv::Fast && TailCallOpt)
4837     return true;
4838 
4839   if (DisableSCO) return false;
4840 
4841   // If callee use the same argument list that caller is using, then we can
4842   // apply SCO on this case. If it is not, then we need to check if callee needs
4843   // stack for passing arguments.
4844   // PC Relative tail calls may not have a CallBase.
4845   // If there is no CallBase we cannot verify if we have the same argument
4846   // list so assume that we don't have the same argument list.
4847   if (CB && !hasSameArgumentList(&Caller, *CB) &&
4848       needStackSlotPassParameters(Subtarget, Outs))
4849     return false;
4850   else if (!CB && needStackSlotPassParameters(Subtarget, Outs))
4851     return false;
4852 
4853   return true;
4854 }
4855 
4856 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
4857 /// for tail call optimization. Targets which want to do tail call
4858 /// optimization should implement this function.
4859 bool
IsEligibleForTailCallOptimization(SDValue Callee,CallingConv::ID CalleeCC,bool isVarArg,const SmallVectorImpl<ISD::InputArg> & Ins,SelectionDAG & DAG) const4860 PPCTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
4861                                                      CallingConv::ID CalleeCC,
4862                                                      bool isVarArg,
4863                                       const SmallVectorImpl<ISD::InputArg> &Ins,
4864                                                      SelectionDAG& DAG) const {
4865   if (!getTargetMachine().Options.GuaranteedTailCallOpt)
4866     return false;
4867 
4868   // Variable argument functions are not supported.
4869   if (isVarArg)
4870     return false;
4871 
4872   MachineFunction &MF = DAG.getMachineFunction();
4873   CallingConv::ID CallerCC = MF.getFunction().getCallingConv();
4874   if (CalleeCC == CallingConv::Fast && CallerCC == CalleeCC) {
4875     // Functions containing by val parameters are not supported.
4876     for (unsigned i = 0; i != Ins.size(); i++) {
4877        ISD::ArgFlagsTy Flags = Ins[i].Flags;
4878        if (Flags.isByVal()) return false;
4879     }
4880 
4881     // Non-PIC/GOT tail calls are supported.
4882     if (getTargetMachine().getRelocationModel() != Reloc::PIC_)
4883       return true;
4884 
4885     // At the moment we can only do local tail calls (in same module, hidden
4886     // or protected) if we are generating PIC.
4887     if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
4888       return G->getGlobal()->hasHiddenVisibility()
4889           || G->getGlobal()->hasProtectedVisibility();
4890   }
4891 
4892   return false;
4893 }
4894 
4895 /// isCallCompatibleAddress - Return the immediate to use if the specified
4896 /// 32-bit value is representable in the immediate field of a BxA instruction.
isBLACompatibleAddress(SDValue Op,SelectionDAG & DAG)4897 static SDNode *isBLACompatibleAddress(SDValue Op, SelectionDAG &DAG) {
4898   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
4899   if (!C) return nullptr;
4900 
4901   int Addr = C->getZExtValue();
4902   if ((Addr & 3) != 0 ||  // Low 2 bits are implicitly zero.
4903       SignExtend32<26>(Addr) != Addr)
4904     return nullptr;  // Top 6 bits have to be sext of immediate.
4905 
4906   return DAG
4907       .getConstant(
4908           (int)C->getZExtValue() >> 2, SDLoc(Op),
4909           DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()))
4910       .getNode();
4911 }
4912 
4913 namespace {
4914 
4915 struct TailCallArgumentInfo {
4916   SDValue Arg;
4917   SDValue FrameIdxOp;
4918   int FrameIdx = 0;
4919 
4920   TailCallArgumentInfo() = default;
4921 };
4922 
4923 } // end anonymous namespace
4924 
4925 /// StoreTailCallArgumentsToStackSlot - Stores arguments to their stack slot.
StoreTailCallArgumentsToStackSlot(SelectionDAG & DAG,SDValue Chain,const SmallVectorImpl<TailCallArgumentInfo> & TailCallArgs,SmallVectorImpl<SDValue> & MemOpChains,const SDLoc & dl)4926 static void StoreTailCallArgumentsToStackSlot(
4927     SelectionDAG &DAG, SDValue Chain,
4928     const SmallVectorImpl<TailCallArgumentInfo> &TailCallArgs,
4929     SmallVectorImpl<SDValue> &MemOpChains, const SDLoc &dl) {
4930   for (unsigned i = 0, e = TailCallArgs.size(); i != e; ++i) {
4931     SDValue Arg = TailCallArgs[i].Arg;
4932     SDValue FIN = TailCallArgs[i].FrameIdxOp;
4933     int FI = TailCallArgs[i].FrameIdx;
4934     // Store relative to framepointer.
4935     MemOpChains.push_back(DAG.getStore(
4936         Chain, dl, Arg, FIN,
4937         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)));
4938   }
4939 }
4940 
4941 /// EmitTailCallStoreFPAndRetAddr - Move the frame pointer and return address to
4942 /// the appropriate stack slot for the tail call optimized function call.
EmitTailCallStoreFPAndRetAddr(SelectionDAG & DAG,SDValue Chain,SDValue OldRetAddr,SDValue OldFP,int SPDiff,const SDLoc & dl)4943 static SDValue EmitTailCallStoreFPAndRetAddr(SelectionDAG &DAG, SDValue Chain,
4944                                              SDValue OldRetAddr, SDValue OldFP,
4945                                              int SPDiff, const SDLoc &dl) {
4946   if (SPDiff) {
4947     // Calculate the new stack slot for the return address.
4948     MachineFunction &MF = DAG.getMachineFunction();
4949     const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
4950     const PPCFrameLowering *FL = Subtarget.getFrameLowering();
4951     bool isPPC64 = Subtarget.isPPC64();
4952     int SlotSize = isPPC64 ? 8 : 4;
4953     int NewRetAddrLoc = SPDiff + FL->getReturnSaveOffset();
4954     int NewRetAddr = MF.getFrameInfo().CreateFixedObject(SlotSize,
4955                                                          NewRetAddrLoc, true);
4956     EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
4957     SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewRetAddr, VT);
4958     Chain = DAG.getStore(Chain, dl, OldRetAddr, NewRetAddrFrIdx,
4959                          MachinePointerInfo::getFixedStack(MF, NewRetAddr));
4960   }
4961   return Chain;
4962 }
4963 
4964 /// CalculateTailCallArgDest - Remember Argument for later processing. Calculate
4965 /// the position of the argument.
4966 static void
CalculateTailCallArgDest(SelectionDAG & DAG,MachineFunction & MF,bool isPPC64,SDValue Arg,int SPDiff,unsigned ArgOffset,SmallVectorImpl<TailCallArgumentInfo> & TailCallArguments)4967 CalculateTailCallArgDest(SelectionDAG &DAG, MachineFunction &MF, bool isPPC64,
4968                          SDValue Arg, int SPDiff, unsigned ArgOffset,
4969                      SmallVectorImpl<TailCallArgumentInfo>& TailCallArguments) {
4970   int Offset = ArgOffset + SPDiff;
4971   uint32_t OpSize = (Arg.getValueSizeInBits() + 7) / 8;
4972   int FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true);
4973   EVT VT = isPPC64 ? MVT::i64 : MVT::i32;
4974   SDValue FIN = DAG.getFrameIndex(FI, VT);
4975   TailCallArgumentInfo Info;
4976   Info.Arg = Arg;
4977   Info.FrameIdxOp = FIN;
4978   Info.FrameIdx = FI;
4979   TailCallArguments.push_back(Info);
4980 }
4981 
4982 /// EmitTCFPAndRetAddrLoad - Emit load from frame pointer and return address
4983 /// stack slot. Returns the chain as result and the loaded frame pointers in
4984 /// LROpOut/FPOpout. Used when tail calling.
EmitTailCallLoadFPAndRetAddr(SelectionDAG & DAG,int SPDiff,SDValue Chain,SDValue & LROpOut,SDValue & FPOpOut,const SDLoc & dl) const4985 SDValue PPCTargetLowering::EmitTailCallLoadFPAndRetAddr(
4986     SelectionDAG &DAG, int SPDiff, SDValue Chain, SDValue &LROpOut,
4987     SDValue &FPOpOut, const SDLoc &dl) const {
4988   if (SPDiff) {
4989     // Load the LR and FP stack slot for later adjusting.
4990     EVT VT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
4991     LROpOut = getReturnAddrFrameIndex(DAG);
4992     LROpOut = DAG.getLoad(VT, dl, Chain, LROpOut, MachinePointerInfo());
4993     Chain = SDValue(LROpOut.getNode(), 1);
4994   }
4995   return Chain;
4996 }
4997 
4998 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
4999 /// by "Src" to address "Dst" of size "Size".  Alignment information is
5000 /// specified by the specific parameter attribute. The copy will be passed as
5001 /// a byval function parameter.
5002 /// Sometimes what we are copying is the end of a larger object, the part that
5003 /// does not fit in registers.
CreateCopyOfByValArgument(SDValue Src,SDValue Dst,SDValue Chain,ISD::ArgFlagsTy Flags,SelectionDAG & DAG,const SDLoc & dl)5004 static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst,
5005                                          SDValue Chain, ISD::ArgFlagsTy Flags,
5006                                          SelectionDAG &DAG, const SDLoc &dl) {
5007   SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32);
5008   return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode,
5009                        Flags.getNonZeroByValAlign(), false, false, false,
5010                        MachinePointerInfo(), MachinePointerInfo());
5011 }
5012 
5013 /// LowerMemOpCallTo - Store the argument to the stack or remember it in case of
5014 /// tail calls.
LowerMemOpCallTo(SelectionDAG & DAG,MachineFunction & MF,SDValue Chain,SDValue Arg,SDValue PtrOff,int SPDiff,unsigned ArgOffset,bool isPPC64,bool isTailCall,bool isVector,SmallVectorImpl<SDValue> & MemOpChains,SmallVectorImpl<TailCallArgumentInfo> & TailCallArguments,const SDLoc & dl)5015 static void LowerMemOpCallTo(
5016     SelectionDAG &DAG, MachineFunction &MF, SDValue Chain, SDValue Arg,
5017     SDValue PtrOff, int SPDiff, unsigned ArgOffset, bool isPPC64,
5018     bool isTailCall, bool isVector, SmallVectorImpl<SDValue> &MemOpChains,
5019     SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments, const SDLoc &dl) {
5020   EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
5021   if (!isTailCall) {
5022     if (isVector) {
5023       SDValue StackPtr;
5024       if (isPPC64)
5025         StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
5026       else
5027         StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
5028       PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr,
5029                            DAG.getConstant(ArgOffset, dl, PtrVT));
5030     }
5031     MemOpChains.push_back(
5032         DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
5033     // Calculate and remember argument location.
5034   } else CalculateTailCallArgDest(DAG, MF, isPPC64, Arg, SPDiff, ArgOffset,
5035                                   TailCallArguments);
5036 }
5037 
5038 static void
PrepareTailCall(SelectionDAG & DAG,SDValue & InFlag,SDValue & Chain,const SDLoc & dl,int SPDiff,unsigned NumBytes,SDValue LROp,SDValue FPOp,SmallVectorImpl<TailCallArgumentInfo> & TailCallArguments)5039 PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain,
5040                 const SDLoc &dl, int SPDiff, unsigned NumBytes, SDValue LROp,
5041                 SDValue FPOp,
5042                 SmallVectorImpl<TailCallArgumentInfo> &TailCallArguments) {
5043   // Emit a sequence of copyto/copyfrom virtual registers for arguments that
5044   // might overwrite each other in case of tail call optimization.
5045   SmallVector<SDValue, 8> MemOpChains2;
5046   // Do not flag preceding copytoreg stuff together with the following stuff.
5047   InFlag = SDValue();
5048   StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments,
5049                                     MemOpChains2, dl);
5050   if (!MemOpChains2.empty())
5051     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
5052 
5053   // Store the return address to the appropriate stack slot.
5054   Chain = EmitTailCallStoreFPAndRetAddr(DAG, Chain, LROp, FPOp, SPDiff, dl);
5055 
5056   // Emit callseq_end just before tailcall node.
5057   Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
5058                              DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
5059   InFlag = Chain.getValue(1);
5060 }
5061 
5062 // Is this global address that of a function that can be called by name? (as
5063 // opposed to something that must hold a descriptor for an indirect call).
isFunctionGlobalAddress(SDValue Callee)5064 static bool isFunctionGlobalAddress(SDValue Callee) {
5065   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
5066     if (Callee.getOpcode() == ISD::GlobalTLSAddress ||
5067         Callee.getOpcode() == ISD::TargetGlobalTLSAddress)
5068       return false;
5069 
5070     return G->getGlobal()->getValueType()->isFunctionTy();
5071   }
5072 
5073   return false;
5074 }
5075 
LowerCallResult(SDValue Chain,SDValue InFlag,CallingConv::ID CallConv,bool isVarArg,const SmallVectorImpl<ISD::InputArg> & Ins,const SDLoc & dl,SelectionDAG & DAG,SmallVectorImpl<SDValue> & InVals) const5076 SDValue PPCTargetLowering::LowerCallResult(
5077     SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
5078     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
5079     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
5080   SmallVector<CCValAssign, 16> RVLocs;
5081   CCState CCRetInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
5082                     *DAG.getContext());
5083 
5084   CCRetInfo.AnalyzeCallResult(
5085       Ins, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
5086                ? RetCC_PPC_Cold
5087                : RetCC_PPC);
5088 
5089   // Copy all of the result registers out of their specified physreg.
5090   for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
5091     CCValAssign &VA = RVLocs[i];
5092     assert(VA.isRegLoc() && "Can only return in registers!");
5093 
5094     SDValue Val;
5095 
5096     if (Subtarget.hasSPE() && VA.getLocVT() == MVT::f64) {
5097       SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
5098                                       InFlag);
5099       Chain = Lo.getValue(1);
5100       InFlag = Lo.getValue(2);
5101       VA = RVLocs[++i]; // skip ahead to next loc
5102       SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
5103                                       InFlag);
5104       Chain = Hi.getValue(1);
5105       InFlag = Hi.getValue(2);
5106       if (!Subtarget.isLittleEndian())
5107         std::swap (Lo, Hi);
5108       Val = DAG.getNode(PPCISD::BUILD_SPE64, dl, MVT::f64, Lo, Hi);
5109     } else {
5110       Val = DAG.getCopyFromReg(Chain, dl,
5111                                VA.getLocReg(), VA.getLocVT(), InFlag);
5112       Chain = Val.getValue(1);
5113       InFlag = Val.getValue(2);
5114     }
5115 
5116     switch (VA.getLocInfo()) {
5117     default: llvm_unreachable("Unknown loc info!");
5118     case CCValAssign::Full: break;
5119     case CCValAssign::AExt:
5120       Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
5121       break;
5122     case CCValAssign::ZExt:
5123       Val = DAG.getNode(ISD::AssertZext, dl, VA.getLocVT(), Val,
5124                         DAG.getValueType(VA.getValVT()));
5125       Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
5126       break;
5127     case CCValAssign::SExt:
5128       Val = DAG.getNode(ISD::AssertSext, dl, VA.getLocVT(), Val,
5129                         DAG.getValueType(VA.getValVT()));
5130       Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
5131       break;
5132     }
5133 
5134     InVals.push_back(Val);
5135   }
5136 
5137   return Chain;
5138 }
5139 
isIndirectCall(const SDValue & Callee,SelectionDAG & DAG,const PPCSubtarget & Subtarget,bool isPatchPoint)5140 static bool isIndirectCall(const SDValue &Callee, SelectionDAG &DAG,
5141                            const PPCSubtarget &Subtarget, bool isPatchPoint) {
5142   // PatchPoint calls are not indirect.
5143   if (isPatchPoint)
5144     return false;
5145 
5146   if (isFunctionGlobalAddress(Callee) || isa<ExternalSymbolSDNode>(Callee))
5147     return false;
5148 
5149   // Darwin, and 32-bit ELF can use a BLA. The descriptor based ABIs can not
5150   // becuase the immediate function pointer points to a descriptor instead of
5151   // a function entry point. The ELFv2 ABI cannot use a BLA because the function
5152   // pointer immediate points to the global entry point, while the BLA would
5153   // need to jump to the local entry point (see rL211174).
5154   if (!Subtarget.usesFunctionDescriptors() && !Subtarget.isELFv2ABI() &&
5155       isBLACompatibleAddress(Callee, DAG))
5156     return false;
5157 
5158   return true;
5159 }
5160 
5161 // AIX and 64-bit ELF ABIs w/o PCRel require a TOC save/restore around calls.
isTOCSaveRestoreRequired(const PPCSubtarget & Subtarget)5162 static inline bool isTOCSaveRestoreRequired(const PPCSubtarget &Subtarget) {
5163   return Subtarget.isAIXABI() ||
5164          (Subtarget.is64BitELFABI() && !Subtarget.isUsingPCRelativeCalls());
5165 }
5166 
getCallOpcode(PPCTargetLowering::CallFlags CFlags,const Function & Caller,const SDValue & Callee,const PPCSubtarget & Subtarget,const TargetMachine & TM)5167 static unsigned getCallOpcode(PPCTargetLowering::CallFlags CFlags,
5168                               const Function &Caller,
5169                               const SDValue &Callee,
5170                               const PPCSubtarget &Subtarget,
5171                               const TargetMachine &TM) {
5172   if (CFlags.IsTailCall)
5173     return PPCISD::TC_RETURN;
5174 
5175   // This is a call through a function pointer.
5176   if (CFlags.IsIndirect) {
5177     // AIX and the 64-bit ELF ABIs need to maintain the TOC pointer accross
5178     // indirect calls. The save of the caller's TOC pointer to the stack will be
5179     // inserted into the DAG as part of call lowering. The restore of the TOC
5180     // pointer is modeled by using a pseudo instruction for the call opcode that
5181     // represents the 2 instruction sequence of an indirect branch and link,
5182     // immediately followed by a load of the TOC pointer from the the stack save
5183     // slot into gpr2. For 64-bit ELFv2 ABI with PCRel, do not restore the TOC
5184     // as it is not saved or used.
5185     return isTOCSaveRestoreRequired(Subtarget) ? PPCISD::BCTRL_LOAD_TOC
5186                                                : PPCISD::BCTRL;
5187   }
5188 
5189   if (Subtarget.isUsingPCRelativeCalls()) {
5190     assert(Subtarget.is64BitELFABI() && "PC Relative is only on ELF ABI.");
5191     return PPCISD::CALL_NOTOC;
5192   }
5193 
5194   // The ABIs that maintain a TOC pointer accross calls need to have a nop
5195   // immediately following the call instruction if the caller and callee may
5196   // have different TOC bases. At link time if the linker determines the calls
5197   // may not share a TOC base, the call is redirected to a trampoline inserted
5198   // by the linker. The trampoline will (among other things) save the callers
5199   // TOC pointer at an ABI designated offset in the linkage area and the linker
5200   // will rewrite the nop to be a load of the TOC pointer from the linkage area
5201   // into gpr2.
5202   if (Subtarget.isAIXABI() || Subtarget.is64BitELFABI())
5203     return callsShareTOCBase(&Caller, Callee, TM) ? PPCISD::CALL
5204                                                   : PPCISD::CALL_NOP;
5205 
5206   return PPCISD::CALL;
5207 }
5208 
transformCallee(const SDValue & Callee,SelectionDAG & DAG,const SDLoc & dl,const PPCSubtarget & Subtarget)5209 static SDValue transformCallee(const SDValue &Callee, SelectionDAG &DAG,
5210                                const SDLoc &dl, const PPCSubtarget &Subtarget) {
5211   if (!Subtarget.usesFunctionDescriptors() && !Subtarget.isELFv2ABI())
5212     if (SDNode *Dest = isBLACompatibleAddress(Callee, DAG))
5213       return SDValue(Dest, 0);
5214 
5215   // Returns true if the callee is local, and false otherwise.
5216   auto isLocalCallee = [&]() {
5217     const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
5218     const Module *Mod = DAG.getMachineFunction().getFunction().getParent();
5219     const GlobalValue *GV = G ? G->getGlobal() : nullptr;
5220 
5221     return DAG.getTarget().shouldAssumeDSOLocal(*Mod, GV) &&
5222            !dyn_cast_or_null<GlobalIFunc>(GV);
5223   };
5224 
5225   // The PLT is only used in 32-bit ELF PIC mode.  Attempting to use the PLT in
5226   // a static relocation model causes some versions of GNU LD (2.17.50, at
5227   // least) to force BSS-PLT, instead of secure-PLT, even if all objects are
5228   // built with secure-PLT.
5229   bool UsePlt =
5230       Subtarget.is32BitELFABI() && !isLocalCallee() &&
5231       Subtarget.getTargetMachine().getRelocationModel() == Reloc::PIC_;
5232 
5233   const auto getAIXFuncEntryPointSymbolSDNode = [&](const GlobalValue *GV) {
5234     const TargetMachine &TM = Subtarget.getTargetMachine();
5235     const TargetLoweringObjectFile *TLOF = TM.getObjFileLowering();
5236     MCSymbolXCOFF *S =
5237         cast<MCSymbolXCOFF>(TLOF->getFunctionEntryPointSymbol(GV, TM));
5238 
5239     MVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
5240     return DAG.getMCSymbol(S, PtrVT);
5241   };
5242 
5243   if (isFunctionGlobalAddress(Callee)) {
5244     const GlobalValue *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal();
5245 
5246     if (Subtarget.isAIXABI()) {
5247       assert(!isa<GlobalIFunc>(GV) && "IFunc is not supported on AIX.");
5248       return getAIXFuncEntryPointSymbolSDNode(GV);
5249     }
5250     return DAG.getTargetGlobalAddress(GV, dl, Callee.getValueType(), 0,
5251                                       UsePlt ? PPCII::MO_PLT : 0);
5252   }
5253 
5254   if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
5255     const char *SymName = S->getSymbol();
5256     if (Subtarget.isAIXABI()) {
5257       // If there exists a user-declared function whose name is the same as the
5258       // ExternalSymbol's, then we pick up the user-declared version.
5259       const Module *Mod = DAG.getMachineFunction().getFunction().getParent();
5260       if (const Function *F =
5261               dyn_cast_or_null<Function>(Mod->getNamedValue(SymName)))
5262         return getAIXFuncEntryPointSymbolSDNode(F);
5263 
5264       // On AIX, direct function calls reference the symbol for the function's
5265       // entry point, which is named by prepending a "." before the function's
5266       // C-linkage name. A Qualname is returned here because an external
5267       // function entry point is a csect with XTY_ER property.
5268       const auto getExternalFunctionEntryPointSymbol = [&](StringRef SymName) {
5269         auto &Context = DAG.getMachineFunction().getMMI().getContext();
5270         MCSectionXCOFF *Sec = Context.getXCOFFSection(
5271             (Twine(".") + Twine(SymName)).str(), SectionKind::getMetadata(),
5272             XCOFF::CsectProperties(XCOFF::XMC_PR, XCOFF::XTY_ER));
5273         return Sec->getQualNameSymbol();
5274       };
5275 
5276       SymName = getExternalFunctionEntryPointSymbol(SymName)->getName().data();
5277     }
5278     return DAG.getTargetExternalSymbol(SymName, Callee.getValueType(),
5279                                        UsePlt ? PPCII::MO_PLT : 0);
5280   }
5281 
5282   // No transformation needed.
5283   assert(Callee.getNode() && "What no callee?");
5284   return Callee;
5285 }
5286 
getOutputChainFromCallSeq(SDValue CallSeqStart)5287 static SDValue getOutputChainFromCallSeq(SDValue CallSeqStart) {
5288   assert(CallSeqStart.getOpcode() == ISD::CALLSEQ_START &&
5289          "Expected a CALLSEQ_STARTSDNode.");
5290 
5291   // The last operand is the chain, except when the node has glue. If the node
5292   // has glue, then the last operand is the glue, and the chain is the second
5293   // last operand.
5294   SDValue LastValue = CallSeqStart.getValue(CallSeqStart->getNumValues() - 1);
5295   if (LastValue.getValueType() != MVT::Glue)
5296     return LastValue;
5297 
5298   return CallSeqStart.getValue(CallSeqStart->getNumValues() - 2);
5299 }
5300 
5301 // Creates the node that moves a functions address into the count register
5302 // to prepare for an indirect call instruction.
prepareIndirectCall(SelectionDAG & DAG,SDValue & Callee,SDValue & Glue,SDValue & Chain,const SDLoc & dl)5303 static void prepareIndirectCall(SelectionDAG &DAG, SDValue &Callee,
5304                                 SDValue &Glue, SDValue &Chain,
5305                                 const SDLoc &dl) {
5306   SDValue MTCTROps[] = {Chain, Callee, Glue};
5307   EVT ReturnTypes[] = {MVT::Other, MVT::Glue};
5308   Chain = DAG.getNode(PPCISD::MTCTR, dl, makeArrayRef(ReturnTypes, 2),
5309                       makeArrayRef(MTCTROps, Glue.getNode() ? 3 : 2));
5310   // The glue is the second value produced.
5311   Glue = Chain.getValue(1);
5312 }
5313 
prepareDescriptorIndirectCall(SelectionDAG & DAG,SDValue & Callee,SDValue & Glue,SDValue & Chain,SDValue CallSeqStart,const CallBase * CB,const SDLoc & dl,bool hasNest,const PPCSubtarget & Subtarget)5314 static void prepareDescriptorIndirectCall(SelectionDAG &DAG, SDValue &Callee,
5315                                           SDValue &Glue, SDValue &Chain,
5316                                           SDValue CallSeqStart,
5317                                           const CallBase *CB, const SDLoc &dl,
5318                                           bool hasNest,
5319                                           const PPCSubtarget &Subtarget) {
5320   // Function pointers in the 64-bit SVR4 ABI do not point to the function
5321   // entry point, but to the function descriptor (the function entry point
5322   // address is part of the function descriptor though).
5323   // The function descriptor is a three doubleword structure with the
5324   // following fields: function entry point, TOC base address and
5325   // environment pointer.
5326   // Thus for a call through a function pointer, the following actions need
5327   // to be performed:
5328   //   1. Save the TOC of the caller in the TOC save area of its stack
5329   //      frame (this is done in LowerCall_Darwin() or LowerCall_64SVR4()).
5330   //   2. Load the address of the function entry point from the function
5331   //      descriptor.
5332   //   3. Load the TOC of the callee from the function descriptor into r2.
5333   //   4. Load the environment pointer from the function descriptor into
5334   //      r11.
5335   //   5. Branch to the function entry point address.
5336   //   6. On return of the callee, the TOC of the caller needs to be
5337   //      restored (this is done in FinishCall()).
5338   //
5339   // The loads are scheduled at the beginning of the call sequence, and the
5340   // register copies are flagged together to ensure that no other
5341   // operations can be scheduled in between. E.g. without flagging the
5342   // copies together, a TOC access in the caller could be scheduled between
5343   // the assignment of the callee TOC and the branch to the callee, which leads
5344   // to incorrect code.
5345 
5346   // Start by loading the function address from the descriptor.
5347   SDValue LDChain = getOutputChainFromCallSeq(CallSeqStart);
5348   auto MMOFlags = Subtarget.hasInvariantFunctionDescriptors()
5349                       ? (MachineMemOperand::MODereferenceable |
5350                          MachineMemOperand::MOInvariant)
5351                       : MachineMemOperand::MONone;
5352 
5353   MachinePointerInfo MPI(CB ? CB->getCalledOperand() : nullptr);
5354 
5355   // Registers used in building the DAG.
5356   const MCRegister EnvPtrReg = Subtarget.getEnvironmentPointerRegister();
5357   const MCRegister TOCReg = Subtarget.getTOCPointerRegister();
5358 
5359   // Offsets of descriptor members.
5360   const unsigned TOCAnchorOffset = Subtarget.descriptorTOCAnchorOffset();
5361   const unsigned EnvPtrOffset = Subtarget.descriptorEnvironmentPointerOffset();
5362 
5363   const MVT RegVT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
5364   const unsigned Alignment = Subtarget.isPPC64() ? 8 : 4;
5365 
5366   // One load for the functions entry point address.
5367   SDValue LoadFuncPtr = DAG.getLoad(RegVT, dl, LDChain, Callee, MPI,
5368                                     Alignment, MMOFlags);
5369 
5370   // One for loading the TOC anchor for the module that contains the called
5371   // function.
5372   SDValue TOCOff = DAG.getIntPtrConstant(TOCAnchorOffset, dl);
5373   SDValue AddTOC = DAG.getNode(ISD::ADD, dl, RegVT, Callee, TOCOff);
5374   SDValue TOCPtr =
5375       DAG.getLoad(RegVT, dl, LDChain, AddTOC,
5376                   MPI.getWithOffset(TOCAnchorOffset), Alignment, MMOFlags);
5377 
5378   // One for loading the environment pointer.
5379   SDValue PtrOff = DAG.getIntPtrConstant(EnvPtrOffset, dl);
5380   SDValue AddPtr = DAG.getNode(ISD::ADD, dl, RegVT, Callee, PtrOff);
5381   SDValue LoadEnvPtr =
5382       DAG.getLoad(RegVT, dl, LDChain, AddPtr,
5383                   MPI.getWithOffset(EnvPtrOffset), Alignment, MMOFlags);
5384 
5385 
5386   // Then copy the newly loaded TOC anchor to the TOC pointer.
5387   SDValue TOCVal = DAG.getCopyToReg(Chain, dl, TOCReg, TOCPtr, Glue);
5388   Chain = TOCVal.getValue(0);
5389   Glue = TOCVal.getValue(1);
5390 
5391   // If the function call has an explicit 'nest' parameter, it takes the
5392   // place of the environment pointer.
5393   assert((!hasNest || !Subtarget.isAIXABI()) &&
5394          "Nest parameter is not supported on AIX.");
5395   if (!hasNest) {
5396     SDValue EnvVal = DAG.getCopyToReg(Chain, dl, EnvPtrReg, LoadEnvPtr, Glue);
5397     Chain = EnvVal.getValue(0);
5398     Glue = EnvVal.getValue(1);
5399   }
5400 
5401   // The rest of the indirect call sequence is the same as the non-descriptor
5402   // DAG.
5403   prepareIndirectCall(DAG, LoadFuncPtr, Glue, Chain, dl);
5404 }
5405 
5406 static void
buildCallOperands(SmallVectorImpl<SDValue> & Ops,PPCTargetLowering::CallFlags CFlags,const SDLoc & dl,SelectionDAG & DAG,SmallVector<std::pair<unsigned,SDValue>,8> & RegsToPass,SDValue Glue,SDValue Chain,SDValue & Callee,int SPDiff,const PPCSubtarget & Subtarget)5407 buildCallOperands(SmallVectorImpl<SDValue> &Ops,
5408                   PPCTargetLowering::CallFlags CFlags, const SDLoc &dl,
5409                   SelectionDAG &DAG,
5410                   SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass,
5411                   SDValue Glue, SDValue Chain, SDValue &Callee, int SPDiff,
5412                   const PPCSubtarget &Subtarget) {
5413   const bool IsPPC64 = Subtarget.isPPC64();
5414   // MVT for a general purpose register.
5415   const MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32;
5416 
5417   // First operand is always the chain.
5418   Ops.push_back(Chain);
5419 
5420   // If it's a direct call pass the callee as the second operand.
5421   if (!CFlags.IsIndirect)
5422     Ops.push_back(Callee);
5423   else {
5424     assert(!CFlags.IsPatchPoint && "Patch point calls are not indirect.");
5425 
5426     // For the TOC based ABIs, we have saved the TOC pointer to the linkage area
5427     // on the stack (this would have been done in `LowerCall_64SVR4` or
5428     // `LowerCall_AIX`). The call instruction is a pseudo instruction that
5429     // represents both the indirect branch and a load that restores the TOC
5430     // pointer from the linkage area. The operand for the TOC restore is an add
5431     // of the TOC save offset to the stack pointer. This must be the second
5432     // operand: after the chain input but before any other variadic arguments.
5433     // For 64-bit ELFv2 ABI with PCRel, do not restore the TOC as it is not
5434     // saved or used.
5435     if (isTOCSaveRestoreRequired(Subtarget)) {
5436       const MCRegister StackPtrReg = Subtarget.getStackPointerRegister();
5437 
5438       SDValue StackPtr = DAG.getRegister(StackPtrReg, RegVT);
5439       unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset();
5440       SDValue TOCOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
5441       SDValue AddTOC = DAG.getNode(ISD::ADD, dl, RegVT, StackPtr, TOCOff);
5442       Ops.push_back(AddTOC);
5443     }
5444 
5445     // Add the register used for the environment pointer.
5446     if (Subtarget.usesFunctionDescriptors() && !CFlags.HasNest)
5447       Ops.push_back(DAG.getRegister(Subtarget.getEnvironmentPointerRegister(),
5448                                     RegVT));
5449 
5450 
5451     // Add CTR register as callee so a bctr can be emitted later.
5452     if (CFlags.IsTailCall)
5453       Ops.push_back(DAG.getRegister(IsPPC64 ? PPC::CTR8 : PPC::CTR, RegVT));
5454   }
5455 
5456   // If this is a tail call add stack pointer delta.
5457   if (CFlags.IsTailCall)
5458     Ops.push_back(DAG.getConstant(SPDiff, dl, MVT::i32));
5459 
5460   // Add argument registers to the end of the list so that they are known live
5461   // into the call.
5462   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
5463     Ops.push_back(DAG.getRegister(RegsToPass[i].first,
5464                                   RegsToPass[i].second.getValueType()));
5465 
5466   // We cannot add R2/X2 as an operand here for PATCHPOINT, because there is
5467   // no way to mark dependencies as implicit here.
5468   // We will add the R2/X2 dependency in EmitInstrWithCustomInserter.
5469   if ((Subtarget.is64BitELFABI() || Subtarget.isAIXABI()) &&
5470        !CFlags.IsPatchPoint && !Subtarget.isUsingPCRelativeCalls())
5471     Ops.push_back(DAG.getRegister(Subtarget.getTOCPointerRegister(), RegVT));
5472 
5473   // Add implicit use of CR bit 6 for 32-bit SVR4 vararg calls
5474   if (CFlags.IsVarArg && Subtarget.is32BitELFABI())
5475     Ops.push_back(DAG.getRegister(PPC::CR1EQ, MVT::i32));
5476 
5477   // Add a register mask operand representing the call-preserved registers.
5478   const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
5479   const uint32_t *Mask =
5480       TRI->getCallPreservedMask(DAG.getMachineFunction(), CFlags.CallConv);
5481   assert(Mask && "Missing call preserved mask for calling convention");
5482   Ops.push_back(DAG.getRegisterMask(Mask));
5483 
5484   // If the glue is valid, it is the last operand.
5485   if (Glue.getNode())
5486     Ops.push_back(Glue);
5487 }
5488 
FinishCall(CallFlags CFlags,const SDLoc & dl,SelectionDAG & DAG,SmallVector<std::pair<unsigned,SDValue>,8> & RegsToPass,SDValue Glue,SDValue Chain,SDValue CallSeqStart,SDValue & Callee,int SPDiff,unsigned NumBytes,const SmallVectorImpl<ISD::InputArg> & Ins,SmallVectorImpl<SDValue> & InVals,const CallBase * CB) const5489 SDValue PPCTargetLowering::FinishCall(
5490     CallFlags CFlags, const SDLoc &dl, SelectionDAG &DAG,
5491     SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass, SDValue Glue,
5492     SDValue Chain, SDValue CallSeqStart, SDValue &Callee, int SPDiff,
5493     unsigned NumBytes, const SmallVectorImpl<ISD::InputArg> &Ins,
5494     SmallVectorImpl<SDValue> &InVals, const CallBase *CB) const {
5495 
5496   if ((Subtarget.is64BitELFABI() && !Subtarget.isUsingPCRelativeCalls()) ||
5497       Subtarget.isAIXABI())
5498     setUsesTOCBasePtr(DAG);
5499 
5500   unsigned CallOpc =
5501       getCallOpcode(CFlags, DAG.getMachineFunction().getFunction(), Callee,
5502                     Subtarget, DAG.getTarget());
5503 
5504   if (!CFlags.IsIndirect)
5505     Callee = transformCallee(Callee, DAG, dl, Subtarget);
5506   else if (Subtarget.usesFunctionDescriptors())
5507     prepareDescriptorIndirectCall(DAG, Callee, Glue, Chain, CallSeqStart, CB,
5508                                   dl, CFlags.HasNest, Subtarget);
5509   else
5510     prepareIndirectCall(DAG, Callee, Glue, Chain, dl);
5511 
5512   // Build the operand list for the call instruction.
5513   SmallVector<SDValue, 8> Ops;
5514   buildCallOperands(Ops, CFlags, dl, DAG, RegsToPass, Glue, Chain, Callee,
5515                     SPDiff, Subtarget);
5516 
5517   // Emit tail call.
5518   if (CFlags.IsTailCall) {
5519     // Indirect tail call when using PC Relative calls do not have the same
5520     // constraints.
5521     assert(((Callee.getOpcode() == ISD::Register &&
5522              cast<RegisterSDNode>(Callee)->getReg() == PPC::CTR) ||
5523             Callee.getOpcode() == ISD::TargetExternalSymbol ||
5524             Callee.getOpcode() == ISD::TargetGlobalAddress ||
5525             isa<ConstantSDNode>(Callee) ||
5526             (CFlags.IsIndirect && Subtarget.isUsingPCRelativeCalls())) &&
5527            "Expecting a global address, external symbol, absolute value, "
5528            "register or an indirect tail call when PC Relative calls are "
5529            "used.");
5530     // PC Relative calls also use TC_RETURN as the way to mark tail calls.
5531     assert(CallOpc == PPCISD::TC_RETURN &&
5532            "Unexpected call opcode for a tail call.");
5533     DAG.getMachineFunction().getFrameInfo().setHasTailCall();
5534     return DAG.getNode(CallOpc, dl, MVT::Other, Ops);
5535   }
5536 
5537   std::array<EVT, 2> ReturnTypes = {{MVT::Other, MVT::Glue}};
5538   Chain = DAG.getNode(CallOpc, dl, ReturnTypes, Ops);
5539   DAG.addNoMergeSiteInfo(Chain.getNode(), CFlags.NoMerge);
5540   Glue = Chain.getValue(1);
5541 
5542   // When performing tail call optimization the callee pops its arguments off
5543   // the stack. Account for this here so these bytes can be pushed back on in
5544   // PPCFrameLowering::eliminateCallFramePseudoInstr.
5545   int BytesCalleePops = (CFlags.CallConv == CallingConv::Fast &&
5546                          getTargetMachine().Options.GuaranteedTailCallOpt)
5547                             ? NumBytes
5548                             : 0;
5549 
5550   Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
5551                              DAG.getIntPtrConstant(BytesCalleePops, dl, true),
5552                              Glue, dl);
5553   Glue = Chain.getValue(1);
5554 
5555   return LowerCallResult(Chain, Glue, CFlags.CallConv, CFlags.IsVarArg, Ins, dl,
5556                          DAG, InVals);
5557 }
5558 
5559 SDValue
LowerCall(TargetLowering::CallLoweringInfo & CLI,SmallVectorImpl<SDValue> & InVals) const5560 PPCTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
5561                              SmallVectorImpl<SDValue> &InVals) const {
5562   SelectionDAG &DAG                     = CLI.DAG;
5563   SDLoc &dl                             = CLI.DL;
5564   SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
5565   SmallVectorImpl<SDValue> &OutVals     = CLI.OutVals;
5566   SmallVectorImpl<ISD::InputArg> &Ins   = CLI.Ins;
5567   SDValue Chain                         = CLI.Chain;
5568   SDValue Callee                        = CLI.Callee;
5569   bool &isTailCall                      = CLI.IsTailCall;
5570   CallingConv::ID CallConv              = CLI.CallConv;
5571   bool isVarArg                         = CLI.IsVarArg;
5572   bool isPatchPoint                     = CLI.IsPatchPoint;
5573   const CallBase *CB                    = CLI.CB;
5574 
5575   if (isTailCall) {
5576     if (Subtarget.useLongCalls() && !(CB && CB->isMustTailCall()))
5577       isTailCall = false;
5578     else if (Subtarget.isSVR4ABI() && Subtarget.isPPC64())
5579       isTailCall = IsEligibleForTailCallOptimization_64SVR4(
5580           Callee, CallConv, CB, isVarArg, Outs, Ins, DAG);
5581     else
5582       isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv, isVarArg,
5583                                                      Ins, DAG);
5584     if (isTailCall) {
5585       ++NumTailCalls;
5586       if (!getTargetMachine().Options.GuaranteedTailCallOpt)
5587         ++NumSiblingCalls;
5588 
5589       // PC Relative calls no longer guarantee that the callee is a Global
5590       // Address Node. The callee could be an indirect tail call in which
5591       // case the SDValue for the callee could be a load (to load the address
5592       // of a function pointer) or it may be a register copy (to move the
5593       // address of the callee from a function parameter into a virtual
5594       // register). It may also be an ExternalSymbolSDNode (ex memcopy).
5595       assert((Subtarget.isUsingPCRelativeCalls() ||
5596               isa<GlobalAddressSDNode>(Callee)) &&
5597              "Callee should be an llvm::Function object.");
5598 
5599       LLVM_DEBUG(dbgs() << "TCO caller: " << DAG.getMachineFunction().getName()
5600                         << "\nTCO callee: ");
5601       LLVM_DEBUG(Callee.dump());
5602     }
5603   }
5604 
5605   if (!isTailCall && CB && CB->isMustTailCall())
5606     report_fatal_error("failed to perform tail call elimination on a call "
5607                        "site marked musttail");
5608 
5609   // When long calls (i.e. indirect calls) are always used, calls are always
5610   // made via function pointer. If we have a function name, first translate it
5611   // into a pointer.
5612   if (Subtarget.useLongCalls() && isa<GlobalAddressSDNode>(Callee) &&
5613       !isTailCall)
5614     Callee = LowerGlobalAddress(Callee, DAG);
5615 
5616   CallFlags CFlags(
5617       CallConv, isTailCall, isVarArg, isPatchPoint,
5618       isIndirectCall(Callee, DAG, Subtarget, isPatchPoint),
5619       // hasNest
5620       Subtarget.is64BitELFABI() &&
5621           any_of(Outs, [](ISD::OutputArg Arg) { return Arg.Flags.isNest(); }),
5622       CLI.NoMerge);
5623 
5624   if (Subtarget.isAIXABI())
5625     return LowerCall_AIX(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
5626                          InVals, CB);
5627 
5628   assert(Subtarget.isSVR4ABI());
5629   if (Subtarget.isPPC64())
5630     return LowerCall_64SVR4(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
5631                             InVals, CB);
5632   return LowerCall_32SVR4(Chain, Callee, CFlags, Outs, OutVals, Ins, dl, DAG,
5633                           InVals, CB);
5634 }
5635 
LowerCall_32SVR4(SDValue Chain,SDValue Callee,CallFlags CFlags,const SmallVectorImpl<ISD::OutputArg> & Outs,const SmallVectorImpl<SDValue> & OutVals,const SmallVectorImpl<ISD::InputArg> & Ins,const SDLoc & dl,SelectionDAG & DAG,SmallVectorImpl<SDValue> & InVals,const CallBase * CB) const5636 SDValue PPCTargetLowering::LowerCall_32SVR4(
5637     SDValue Chain, SDValue Callee, CallFlags CFlags,
5638     const SmallVectorImpl<ISD::OutputArg> &Outs,
5639     const SmallVectorImpl<SDValue> &OutVals,
5640     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
5641     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
5642     const CallBase *CB) const {
5643   // See PPCTargetLowering::LowerFormalArguments_32SVR4() for a description
5644   // of the 32-bit SVR4 ABI stack frame layout.
5645 
5646   const CallingConv::ID CallConv = CFlags.CallConv;
5647   const bool IsVarArg = CFlags.IsVarArg;
5648   const bool IsTailCall = CFlags.IsTailCall;
5649 
5650   assert((CallConv == CallingConv::C ||
5651           CallConv == CallingConv::Cold ||
5652           CallConv == CallingConv::Fast) && "Unknown calling convention!");
5653 
5654   const Align PtrAlign(4);
5655 
5656   MachineFunction &MF = DAG.getMachineFunction();
5657 
5658   // Mark this function as potentially containing a function that contains a
5659   // tail call. As a consequence the frame pointer will be used for dynamicalloc
5660   // and restoring the callers stack pointer in this functions epilog. This is
5661   // done because by tail calling the called function might overwrite the value
5662   // in this function's (MF) stack pointer stack slot 0(SP).
5663   if (getTargetMachine().Options.GuaranteedTailCallOpt &&
5664       CallConv == CallingConv::Fast)
5665     MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
5666 
5667   // Count how many bytes are to be pushed on the stack, including the linkage
5668   // area, parameter list area and the part of the local variable space which
5669   // contains copies of aggregates which are passed by value.
5670 
5671   // Assign locations to all of the outgoing arguments.
5672   SmallVector<CCValAssign, 16> ArgLocs;
5673   PPCCCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
5674 
5675   // Reserve space for the linkage area on the stack.
5676   CCInfo.AllocateStack(Subtarget.getFrameLowering()->getLinkageSize(),
5677                        PtrAlign);
5678   if (useSoftFloat())
5679     CCInfo.PreAnalyzeCallOperands(Outs);
5680 
5681   if (IsVarArg) {
5682     // Handle fixed and variable vector arguments differently.
5683     // Fixed vector arguments go into registers as long as registers are
5684     // available. Variable vector arguments always go into memory.
5685     unsigned NumArgs = Outs.size();
5686 
5687     for (unsigned i = 0; i != NumArgs; ++i) {
5688       MVT ArgVT = Outs[i].VT;
5689       ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
5690       bool Result;
5691 
5692       if (Outs[i].IsFixed) {
5693         Result = CC_PPC32_SVR4(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags,
5694                                CCInfo);
5695       } else {
5696         Result = CC_PPC32_SVR4_VarArg(i, ArgVT, ArgVT, CCValAssign::Full,
5697                                       ArgFlags, CCInfo);
5698       }
5699 
5700       if (Result) {
5701 #ifndef NDEBUG
5702         errs() << "Call operand #" << i << " has unhandled type "
5703              << EVT(ArgVT).getEVTString() << "\n";
5704 #endif
5705         llvm_unreachable(nullptr);
5706       }
5707     }
5708   } else {
5709     // All arguments are treated the same.
5710     CCInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4);
5711   }
5712   CCInfo.clearWasPPCF128();
5713 
5714   // Assign locations to all of the outgoing aggregate by value arguments.
5715   SmallVector<CCValAssign, 16> ByValArgLocs;
5716   CCState CCByValInfo(CallConv, IsVarArg, MF, ByValArgLocs, *DAG.getContext());
5717 
5718   // Reserve stack space for the allocations in CCInfo.
5719   CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrAlign);
5720 
5721   CCByValInfo.AnalyzeCallOperands(Outs, CC_PPC32_SVR4_ByVal);
5722 
5723   // Size of the linkage area, parameter list area and the part of the local
5724   // space variable where copies of aggregates which are passed by value are
5725   // stored.
5726   unsigned NumBytes = CCByValInfo.getNextStackOffset();
5727 
5728   // Calculate by how many bytes the stack has to be adjusted in case of tail
5729   // call optimization.
5730   int SPDiff = CalculateTailCallSPDiff(DAG, IsTailCall, NumBytes);
5731 
5732   // Adjust the stack pointer for the new arguments...
5733   // These operations are automatically eliminated by the prolog/epilog pass
5734   Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
5735   SDValue CallSeqStart = Chain;
5736 
5737   // Load the return address and frame pointer so it can be moved somewhere else
5738   // later.
5739   SDValue LROp, FPOp;
5740   Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
5741 
5742   // Set up a copy of the stack pointer for use loading and storing any
5743   // arguments that may not fit in the registers available for argument
5744   // passing.
5745   SDValue StackPtr = DAG.getRegister(PPC::R1, MVT::i32);
5746 
5747   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
5748   SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
5749   SmallVector<SDValue, 8> MemOpChains;
5750 
5751   bool seenFloatArg = false;
5752   // Walk the register/memloc assignments, inserting copies/loads.
5753   // i - Tracks the index into the list of registers allocated for the call
5754   // RealArgIdx - Tracks the index into the list of actual function arguments
5755   // j - Tracks the index into the list of byval arguments
5756   for (unsigned i = 0, RealArgIdx = 0, j = 0, e = ArgLocs.size();
5757        i != e;
5758        ++i, ++RealArgIdx) {
5759     CCValAssign &VA = ArgLocs[i];
5760     SDValue Arg = OutVals[RealArgIdx];
5761     ISD::ArgFlagsTy Flags = Outs[RealArgIdx].Flags;
5762 
5763     if (Flags.isByVal()) {
5764       // Argument is an aggregate which is passed by value, thus we need to
5765       // create a copy of it in the local variable space of the current stack
5766       // frame (which is the stack frame of the caller) and pass the address of
5767       // this copy to the callee.
5768       assert((j < ByValArgLocs.size()) && "Index out of bounds!");
5769       CCValAssign &ByValVA = ByValArgLocs[j++];
5770       assert((VA.getValNo() == ByValVA.getValNo()) && "ValNo mismatch!");
5771 
5772       // Memory reserved in the local variable space of the callers stack frame.
5773       unsigned LocMemOffset = ByValVA.getLocMemOffset();
5774 
5775       SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
5776       PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()),
5777                            StackPtr, PtrOff);
5778 
5779       // Create a copy of the argument in the local area of the current
5780       // stack frame.
5781       SDValue MemcpyCall =
5782         CreateCopyOfByValArgument(Arg, PtrOff,
5783                                   CallSeqStart.getNode()->getOperand(0),
5784                                   Flags, DAG, dl);
5785 
5786       // This must go outside the CALLSEQ_START..END.
5787       SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, NumBytes, 0,
5788                                                      SDLoc(MemcpyCall));
5789       DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
5790                              NewCallSeqStart.getNode());
5791       Chain = CallSeqStart = NewCallSeqStart;
5792 
5793       // Pass the address of the aggregate copy on the stack either in a
5794       // physical register or in the parameter list area of the current stack
5795       // frame to the callee.
5796       Arg = PtrOff;
5797     }
5798 
5799     // When useCRBits() is true, there can be i1 arguments.
5800     // It is because getRegisterType(MVT::i1) => MVT::i1,
5801     // and for other integer types getRegisterType() => MVT::i32.
5802     // Extend i1 and ensure callee will get i32.
5803     if (Arg.getValueType() == MVT::i1)
5804       Arg = DAG.getNode(Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND,
5805                         dl, MVT::i32, Arg);
5806 
5807     if (VA.isRegLoc()) {
5808       seenFloatArg |= VA.getLocVT().isFloatingPoint();
5809       // Put argument in a physical register.
5810       if (Subtarget.hasSPE() && Arg.getValueType() == MVT::f64) {
5811         bool IsLE = Subtarget.isLittleEndian();
5812         SDValue SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
5813                         DAG.getIntPtrConstant(IsLE ? 0 : 1, dl));
5814         RegsToPass.push_back(std::make_pair(VA.getLocReg(), SVal.getValue(0)));
5815         SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
5816                            DAG.getIntPtrConstant(IsLE ? 1 : 0, dl));
5817         RegsToPass.push_back(std::make_pair(ArgLocs[++i].getLocReg(),
5818                              SVal.getValue(0)));
5819       } else
5820         RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
5821     } else {
5822       // Put argument in the parameter list area of the current stack frame.
5823       assert(VA.isMemLoc());
5824       unsigned LocMemOffset = VA.getLocMemOffset();
5825 
5826       if (!IsTailCall) {
5827         SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
5828         PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(MF.getDataLayout()),
5829                              StackPtr, PtrOff);
5830 
5831         MemOpChains.push_back(
5832             DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
5833       } else {
5834         // Calculate and remember argument location.
5835         CalculateTailCallArgDest(DAG, MF, false, Arg, SPDiff, LocMemOffset,
5836                                  TailCallArguments);
5837       }
5838     }
5839   }
5840 
5841   if (!MemOpChains.empty())
5842     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
5843 
5844   // Build a sequence of copy-to-reg nodes chained together with token chain
5845   // and flag operands which copy the outgoing args into the appropriate regs.
5846   SDValue InFlag;
5847   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
5848     Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
5849                              RegsToPass[i].second, InFlag);
5850     InFlag = Chain.getValue(1);
5851   }
5852 
5853   // Set CR bit 6 to true if this is a vararg call with floating args passed in
5854   // registers.
5855   if (IsVarArg) {
5856     SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
5857     SDValue Ops[] = { Chain, InFlag };
5858 
5859     Chain = DAG.getNode(seenFloatArg ? PPCISD::CR6SET : PPCISD::CR6UNSET,
5860                         dl, VTs, makeArrayRef(Ops, InFlag.getNode() ? 2 : 1));
5861 
5862     InFlag = Chain.getValue(1);
5863   }
5864 
5865   if (IsTailCall)
5866     PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
5867                     TailCallArguments);
5868 
5869   return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart,
5870                     Callee, SPDiff, NumBytes, Ins, InVals, CB);
5871 }
5872 
5873 // Copy an argument into memory, being careful to do this outside the
5874 // call sequence for the call to which the argument belongs.
createMemcpyOutsideCallSeq(SDValue Arg,SDValue PtrOff,SDValue CallSeqStart,ISD::ArgFlagsTy Flags,SelectionDAG & DAG,const SDLoc & dl) const5875 SDValue PPCTargetLowering::createMemcpyOutsideCallSeq(
5876     SDValue Arg, SDValue PtrOff, SDValue CallSeqStart, ISD::ArgFlagsTy Flags,
5877     SelectionDAG &DAG, const SDLoc &dl) const {
5878   SDValue MemcpyCall = CreateCopyOfByValArgument(Arg, PtrOff,
5879                         CallSeqStart.getNode()->getOperand(0),
5880                         Flags, DAG, dl);
5881   // The MEMCPY must go outside the CALLSEQ_START..END.
5882   int64_t FrameSize = CallSeqStart.getConstantOperandVal(1);
5883   SDValue NewCallSeqStart = DAG.getCALLSEQ_START(MemcpyCall, FrameSize, 0,
5884                                                  SDLoc(MemcpyCall));
5885   DAG.ReplaceAllUsesWith(CallSeqStart.getNode(),
5886                          NewCallSeqStart.getNode());
5887   return NewCallSeqStart;
5888 }
5889 
LowerCall_64SVR4(SDValue Chain,SDValue Callee,CallFlags CFlags,const SmallVectorImpl<ISD::OutputArg> & Outs,const SmallVectorImpl<SDValue> & OutVals,const SmallVectorImpl<ISD::InputArg> & Ins,const SDLoc & dl,SelectionDAG & DAG,SmallVectorImpl<SDValue> & InVals,const CallBase * CB) const5890 SDValue PPCTargetLowering::LowerCall_64SVR4(
5891     SDValue Chain, SDValue Callee, CallFlags CFlags,
5892     const SmallVectorImpl<ISD::OutputArg> &Outs,
5893     const SmallVectorImpl<SDValue> &OutVals,
5894     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
5895     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
5896     const CallBase *CB) const {
5897   bool isELFv2ABI = Subtarget.isELFv2ABI();
5898   bool isLittleEndian = Subtarget.isLittleEndian();
5899   unsigned NumOps = Outs.size();
5900   bool IsSibCall = false;
5901   bool IsFastCall = CFlags.CallConv == CallingConv::Fast;
5902 
5903   EVT PtrVT = getPointerTy(DAG.getDataLayout());
5904   unsigned PtrByteSize = 8;
5905 
5906   MachineFunction &MF = DAG.getMachineFunction();
5907 
5908   if (CFlags.IsTailCall && !getTargetMachine().Options.GuaranteedTailCallOpt)
5909     IsSibCall = true;
5910 
5911   // Mark this function as potentially containing a function that contains a
5912   // tail call. As a consequence the frame pointer will be used for dynamicalloc
5913   // and restoring the callers stack pointer in this functions epilog. This is
5914   // done because by tail calling the called function might overwrite the value
5915   // in this function's (MF) stack pointer stack slot 0(SP).
5916   if (getTargetMachine().Options.GuaranteedTailCallOpt && IsFastCall)
5917     MF.getInfo<PPCFunctionInfo>()->setHasFastCall();
5918 
5919   assert(!(IsFastCall && CFlags.IsVarArg) &&
5920          "fastcc not supported on varargs functions");
5921 
5922   // Count how many bytes are to be pushed on the stack, including the linkage
5923   // area, and parameter passing area.  On ELFv1, the linkage area is 48 bytes
5924   // reserved space for [SP][CR][LR][2 x unused][TOC]; on ELFv2, the linkage
5925   // area is 32 bytes reserved space for [SP][CR][LR][TOC].
5926   unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
5927   unsigned NumBytes = LinkageSize;
5928   unsigned GPR_idx = 0, FPR_idx = 0, VR_idx = 0;
5929 
5930   static const MCPhysReg GPR[] = {
5931     PPC::X3, PPC::X4, PPC::X5, PPC::X6,
5932     PPC::X7, PPC::X8, PPC::X9, PPC::X10,
5933   };
5934   static const MCPhysReg VR[] = {
5935     PPC::V2, PPC::V3, PPC::V4, PPC::V5, PPC::V6, PPC::V7, PPC::V8,
5936     PPC::V9, PPC::V10, PPC::V11, PPC::V12, PPC::V13
5937   };
5938 
5939   const unsigned NumGPRs = array_lengthof(GPR);
5940   const unsigned NumFPRs = useSoftFloat() ? 0 : 13;
5941   const unsigned NumVRs  = array_lengthof(VR);
5942 
5943   // On ELFv2, we can avoid allocating the parameter area if all the arguments
5944   // can be passed to the callee in registers.
5945   // For the fast calling convention, there is another check below.
5946   // Note: We should keep consistent with LowerFormalArguments_64SVR4()
5947   bool HasParameterArea = !isELFv2ABI || CFlags.IsVarArg || IsFastCall;
5948   if (!HasParameterArea) {
5949     unsigned ParamAreaSize = NumGPRs * PtrByteSize;
5950     unsigned AvailableFPRs = NumFPRs;
5951     unsigned AvailableVRs = NumVRs;
5952     unsigned NumBytesTmp = NumBytes;
5953     for (unsigned i = 0; i != NumOps; ++i) {
5954       if (Outs[i].Flags.isNest()) continue;
5955       if (CalculateStackSlotUsed(Outs[i].VT, Outs[i].ArgVT, Outs[i].Flags,
5956                                  PtrByteSize, LinkageSize, ParamAreaSize,
5957                                  NumBytesTmp, AvailableFPRs, AvailableVRs))
5958         HasParameterArea = true;
5959     }
5960   }
5961 
5962   // When using the fast calling convention, we don't provide backing for
5963   // arguments that will be in registers.
5964   unsigned NumGPRsUsed = 0, NumFPRsUsed = 0, NumVRsUsed = 0;
5965 
5966   // Avoid allocating parameter area for fastcc functions if all the arguments
5967   // can be passed in the registers.
5968   if (IsFastCall)
5969     HasParameterArea = false;
5970 
5971   // Add up all the space actually used.
5972   for (unsigned i = 0; i != NumOps; ++i) {
5973     ISD::ArgFlagsTy Flags = Outs[i].Flags;
5974     EVT ArgVT = Outs[i].VT;
5975     EVT OrigVT = Outs[i].ArgVT;
5976 
5977     if (Flags.isNest())
5978       continue;
5979 
5980     if (IsFastCall) {
5981       if (Flags.isByVal()) {
5982         NumGPRsUsed += (Flags.getByValSize()+7)/8;
5983         if (NumGPRsUsed > NumGPRs)
5984           HasParameterArea = true;
5985       } else {
5986         switch (ArgVT.getSimpleVT().SimpleTy) {
5987         default: llvm_unreachable("Unexpected ValueType for argument!");
5988         case MVT::i1:
5989         case MVT::i32:
5990         case MVT::i64:
5991           if (++NumGPRsUsed <= NumGPRs)
5992             continue;
5993           break;
5994         case MVT::v4i32:
5995         case MVT::v8i16:
5996         case MVT::v16i8:
5997         case MVT::v2f64:
5998         case MVT::v2i64:
5999         case MVT::v1i128:
6000         case MVT::f128:
6001           if (++NumVRsUsed <= NumVRs)
6002             continue;
6003           break;
6004         case MVT::v4f32:
6005           if (++NumVRsUsed <= NumVRs)
6006             continue;
6007           break;
6008         case MVT::f32:
6009         case MVT::f64:
6010           if (++NumFPRsUsed <= NumFPRs)
6011             continue;
6012           break;
6013         }
6014         HasParameterArea = true;
6015       }
6016     }
6017 
6018     /* Respect alignment of argument on the stack.  */
6019     auto Alignement =
6020         CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
6021     NumBytes = alignTo(NumBytes, Alignement);
6022 
6023     NumBytes += CalculateStackSlotSize(ArgVT, Flags, PtrByteSize);
6024     if (Flags.isInConsecutiveRegsLast())
6025       NumBytes = ((NumBytes + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
6026   }
6027 
6028   unsigned NumBytesActuallyUsed = NumBytes;
6029 
6030   // In the old ELFv1 ABI,
6031   // the prolog code of the callee may store up to 8 GPR argument registers to
6032   // the stack, allowing va_start to index over them in memory if its varargs.
6033   // Because we cannot tell if this is needed on the caller side, we have to
6034   // conservatively assume that it is needed.  As such, make sure we have at
6035   // least enough stack space for the caller to store the 8 GPRs.
6036   // In the ELFv2 ABI, we allocate the parameter area iff a callee
6037   // really requires memory operands, e.g. a vararg function.
6038   if (HasParameterArea)
6039     NumBytes = std::max(NumBytes, LinkageSize + 8 * PtrByteSize);
6040   else
6041     NumBytes = LinkageSize;
6042 
6043   // Tail call needs the stack to be aligned.
6044   if (getTargetMachine().Options.GuaranteedTailCallOpt && IsFastCall)
6045     NumBytes = EnsureStackAlignment(Subtarget.getFrameLowering(), NumBytes);
6046 
6047   int SPDiff = 0;
6048 
6049   // Calculate by how many bytes the stack has to be adjusted in case of tail
6050   // call optimization.
6051   if (!IsSibCall)
6052     SPDiff = CalculateTailCallSPDiff(DAG, CFlags.IsTailCall, NumBytes);
6053 
6054   // To protect arguments on the stack from being clobbered in a tail call,
6055   // force all the loads to happen before doing any other lowering.
6056   if (CFlags.IsTailCall)
6057     Chain = DAG.getStackArgumentTokenFactor(Chain);
6058 
6059   // Adjust the stack pointer for the new arguments...
6060   // These operations are automatically eliminated by the prolog/epilog pass
6061   if (!IsSibCall)
6062     Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
6063   SDValue CallSeqStart = Chain;
6064 
6065   // Load the return address and frame pointer so it can be move somewhere else
6066   // later.
6067   SDValue LROp, FPOp;
6068   Chain = EmitTailCallLoadFPAndRetAddr(DAG, SPDiff, Chain, LROp, FPOp, dl);
6069 
6070   // Set up a copy of the stack pointer for use loading and storing any
6071   // arguments that may not fit in the registers available for argument
6072   // passing.
6073   SDValue StackPtr = DAG.getRegister(PPC::X1, MVT::i64);
6074 
6075   // Figure out which arguments are going to go in registers, and which in
6076   // memory.  Also, if this is a vararg function, floating point operations
6077   // must be stored to our stack, and loaded into integer regs as well, if
6078   // any integer regs are available for argument passing.
6079   unsigned ArgOffset = LinkageSize;
6080 
6081   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
6082   SmallVector<TailCallArgumentInfo, 8> TailCallArguments;
6083 
6084   SmallVector<SDValue, 8> MemOpChains;
6085   for (unsigned i = 0; i != NumOps; ++i) {
6086     SDValue Arg = OutVals[i];
6087     ISD::ArgFlagsTy Flags = Outs[i].Flags;
6088     EVT ArgVT = Outs[i].VT;
6089     EVT OrigVT = Outs[i].ArgVT;
6090 
6091     // PtrOff will be used to store the current argument to the stack if a
6092     // register cannot be found for it.
6093     SDValue PtrOff;
6094 
6095     // We re-align the argument offset for each argument, except when using the
6096     // fast calling convention, when we need to make sure we do that only when
6097     // we'll actually use a stack slot.
6098     auto ComputePtrOff = [&]() {
6099       /* Respect alignment of argument on the stack.  */
6100       auto Alignment =
6101           CalculateStackSlotAlignment(ArgVT, OrigVT, Flags, PtrByteSize);
6102       ArgOffset = alignTo(ArgOffset, Alignment);
6103 
6104       PtrOff = DAG.getConstant(ArgOffset, dl, StackPtr.getValueType());
6105 
6106       PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
6107     };
6108 
6109     if (!IsFastCall) {
6110       ComputePtrOff();
6111 
6112       /* Compute GPR index associated with argument offset.  */
6113       GPR_idx = (ArgOffset - LinkageSize) / PtrByteSize;
6114       GPR_idx = std::min(GPR_idx, NumGPRs);
6115     }
6116 
6117     // Promote integers to 64-bit values.
6118     if (Arg.getValueType() == MVT::i32 || Arg.getValueType() == MVT::i1) {
6119       // FIXME: Should this use ANY_EXTEND if neither sext nor zext?
6120       unsigned ExtOp = Flags.isSExt() ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
6121       Arg = DAG.getNode(ExtOp, dl, MVT::i64, Arg);
6122     }
6123 
6124     // FIXME memcpy is used way more than necessary.  Correctness first.
6125     // Note: "by value" is code for passing a structure by value, not
6126     // basic types.
6127     if (Flags.isByVal()) {
6128       // Note: Size includes alignment padding, so
6129       //   struct x { short a; char b; }
6130       // will have Size = 4.  With #pragma pack(1), it will have Size = 3.
6131       // These are the proper values we need for right-justifying the
6132       // aggregate in a parameter register.
6133       unsigned Size = Flags.getByValSize();
6134 
6135       // An empty aggregate parameter takes up no storage and no
6136       // registers.
6137       if (Size == 0)
6138         continue;
6139 
6140       if (IsFastCall)
6141         ComputePtrOff();
6142 
6143       // All aggregates smaller than 8 bytes must be passed right-justified.
6144       if (Size==1 || Size==2 || Size==4) {
6145         EVT VT = (Size==1) ? MVT::i8 : ((Size==2) ? MVT::i16 : MVT::i32);
6146         if (GPR_idx != NumGPRs) {
6147           SDValue Load = DAG.getExtLoad(ISD::EXTLOAD, dl, PtrVT, Chain, Arg,
6148                                         MachinePointerInfo(), VT);
6149           MemOpChains.push_back(Load.getValue(1));
6150           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6151 
6152           ArgOffset += PtrByteSize;
6153           continue;
6154         }
6155       }
6156 
6157       if (GPR_idx == NumGPRs && Size < 8) {
6158         SDValue AddPtr = PtrOff;
6159         if (!isLittleEndian) {
6160           SDValue Const = DAG.getConstant(PtrByteSize - Size, dl,
6161                                           PtrOff.getValueType());
6162           AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
6163         }
6164         Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
6165                                                           CallSeqStart,
6166                                                           Flags, DAG, dl);
6167         ArgOffset += PtrByteSize;
6168         continue;
6169       }
6170       // Copy entire object into memory.  There are cases where gcc-generated
6171       // code assumes it is there, even if it could be put entirely into
6172       // registers.  (This is not what the doc says.)
6173 
6174       // FIXME: The above statement is likely due to a misunderstanding of the
6175       // documents.  All arguments must be copied into the parameter area BY
6176       // THE CALLEE in the event that the callee takes the address of any
6177       // formal argument.  That has not yet been implemented.  However, it is
6178       // reasonable to use the stack area as a staging area for the register
6179       // load.
6180 
6181       // Skip this for small aggregates, as we will use the same slot for a
6182       // right-justified copy, below.
6183       if (Size >= 8)
6184         Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, PtrOff,
6185                                                           CallSeqStart,
6186                                                           Flags, DAG, dl);
6187 
6188       // When a register is available, pass a small aggregate right-justified.
6189       if (Size < 8 && GPR_idx != NumGPRs) {
6190         // The easiest way to get this right-justified in a register
6191         // is to copy the structure into the rightmost portion of a
6192         // local variable slot, then load the whole slot into the
6193         // register.
6194         // FIXME: The memcpy seems to produce pretty awful code for
6195         // small aggregates, particularly for packed ones.
6196         // FIXME: It would be preferable to use the slot in the
6197         // parameter save area instead of a new local variable.
6198         SDValue AddPtr = PtrOff;
6199         if (!isLittleEndian) {
6200           SDValue Const = DAG.getConstant(8 - Size, dl, PtrOff.getValueType());
6201           AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, Const);
6202         }
6203         Chain = CallSeqStart = createMemcpyOutsideCallSeq(Arg, AddPtr,
6204                                                           CallSeqStart,
6205                                                           Flags, DAG, dl);
6206 
6207         // Load the slot into the register.
6208         SDValue Load =
6209             DAG.getLoad(PtrVT, dl, Chain, PtrOff, MachinePointerInfo());
6210         MemOpChains.push_back(Load.getValue(1));
6211         RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6212 
6213         // Done with this argument.
6214         ArgOffset += PtrByteSize;
6215         continue;
6216       }
6217 
6218       // For aggregates larger than PtrByteSize, copy the pieces of the
6219       // object that fit into registers from the parameter save area.
6220       for (unsigned j=0; j<Size; j+=PtrByteSize) {
6221         SDValue Const = DAG.getConstant(j, dl, PtrOff.getValueType());
6222         SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
6223         if (GPR_idx != NumGPRs) {
6224           SDValue Load =
6225               DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo());
6226           MemOpChains.push_back(Load.getValue(1));
6227           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6228           ArgOffset += PtrByteSize;
6229         } else {
6230           ArgOffset += ((Size - j + PtrByteSize-1)/PtrByteSize)*PtrByteSize;
6231           break;
6232         }
6233       }
6234       continue;
6235     }
6236 
6237     switch (Arg.getSimpleValueType().SimpleTy) {
6238     default: llvm_unreachable("Unexpected ValueType for argument!");
6239     case MVT::i1:
6240     case MVT::i32:
6241     case MVT::i64:
6242       if (Flags.isNest()) {
6243         // The 'nest' parameter, if any, is passed in R11.
6244         RegsToPass.push_back(std::make_pair(PPC::X11, Arg));
6245         break;
6246       }
6247 
6248       // These can be scalar arguments or elements of an integer array type
6249       // passed directly.  Clang may use those instead of "byval" aggregate
6250       // types to avoid forcing arguments to memory unnecessarily.
6251       if (GPR_idx != NumGPRs) {
6252         RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Arg));
6253       } else {
6254         if (IsFastCall)
6255           ComputePtrOff();
6256 
6257         assert(HasParameterArea &&
6258                "Parameter area must exist to pass an argument in memory.");
6259         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6260                          true, CFlags.IsTailCall, false, MemOpChains,
6261                          TailCallArguments, dl);
6262         if (IsFastCall)
6263           ArgOffset += PtrByteSize;
6264       }
6265       if (!IsFastCall)
6266         ArgOffset += PtrByteSize;
6267       break;
6268     case MVT::f32:
6269     case MVT::f64: {
6270       // These can be scalar arguments or elements of a float array type
6271       // passed directly.  The latter are used to implement ELFv2 homogenous
6272       // float aggregates.
6273 
6274       // Named arguments go into FPRs first, and once they overflow, the
6275       // remaining arguments go into GPRs and then the parameter save area.
6276       // Unnamed arguments for vararg functions always go to GPRs and
6277       // then the parameter save area.  For now, put all arguments to vararg
6278       // routines always in both locations (FPR *and* GPR or stack slot).
6279       bool NeedGPROrStack = CFlags.IsVarArg || FPR_idx == NumFPRs;
6280       bool NeededLoad = false;
6281 
6282       // First load the argument into the next available FPR.
6283       if (FPR_idx != NumFPRs)
6284         RegsToPass.push_back(std::make_pair(FPR[FPR_idx++], Arg));
6285 
6286       // Next, load the argument into GPR or stack slot if needed.
6287       if (!NeedGPROrStack)
6288         ;
6289       else if (GPR_idx != NumGPRs && !IsFastCall) {
6290         // FIXME: We may want to re-enable this for CallingConv::Fast on the P8
6291         // once we support fp <-> gpr moves.
6292 
6293         // In the non-vararg case, this can only ever happen in the
6294         // presence of f32 array types, since otherwise we never run
6295         // out of FPRs before running out of GPRs.
6296         SDValue ArgVal;
6297 
6298         // Double values are always passed in a single GPR.
6299         if (Arg.getValueType() != MVT::f32) {
6300           ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg);
6301 
6302         // Non-array float values are extended and passed in a GPR.
6303         } else if (!Flags.isInConsecutiveRegs()) {
6304           ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
6305           ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal);
6306 
6307         // If we have an array of floats, we collect every odd element
6308         // together with its predecessor into one GPR.
6309         } else if (ArgOffset % PtrByteSize != 0) {
6310           SDValue Lo, Hi;
6311           Lo = DAG.getNode(ISD::BITCAST, dl, MVT::i32, OutVals[i - 1]);
6312           Hi = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
6313           if (!isLittleEndian)
6314             std::swap(Lo, Hi);
6315           ArgVal = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
6316 
6317         // The final element, if even, goes into the first half of a GPR.
6318         } else if (Flags.isInConsecutiveRegsLast()) {
6319           ArgVal = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
6320           ArgVal = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i64, ArgVal);
6321           if (!isLittleEndian)
6322             ArgVal = DAG.getNode(ISD::SHL, dl, MVT::i64, ArgVal,
6323                                  DAG.getConstant(32, dl, MVT::i32));
6324 
6325         // Non-final even elements are skipped; they will be handled
6326         // together the with subsequent argument on the next go-around.
6327         } else
6328           ArgVal = SDValue();
6329 
6330         if (ArgVal.getNode())
6331           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], ArgVal));
6332       } else {
6333         if (IsFastCall)
6334           ComputePtrOff();
6335 
6336         // Single-precision floating-point values are mapped to the
6337         // second (rightmost) word of the stack doubleword.
6338         if (Arg.getValueType() == MVT::f32 &&
6339             !isLittleEndian && !Flags.isInConsecutiveRegs()) {
6340           SDValue ConstFour = DAG.getConstant(4, dl, PtrOff.getValueType());
6341           PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff, ConstFour);
6342         }
6343 
6344         assert(HasParameterArea &&
6345                "Parameter area must exist to pass an argument in memory.");
6346         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6347                          true, CFlags.IsTailCall, false, MemOpChains,
6348                          TailCallArguments, dl);
6349 
6350         NeededLoad = true;
6351       }
6352       // When passing an array of floats, the array occupies consecutive
6353       // space in the argument area; only round up to the next doubleword
6354       // at the end of the array.  Otherwise, each float takes 8 bytes.
6355       if (!IsFastCall || NeededLoad) {
6356         ArgOffset += (Arg.getValueType() == MVT::f32 &&
6357                       Flags.isInConsecutiveRegs()) ? 4 : 8;
6358         if (Flags.isInConsecutiveRegsLast())
6359           ArgOffset = ((ArgOffset + PtrByteSize - 1)/PtrByteSize) * PtrByteSize;
6360       }
6361       break;
6362     }
6363     case MVT::v4f32:
6364     case MVT::v4i32:
6365     case MVT::v8i16:
6366     case MVT::v16i8:
6367     case MVT::v2f64:
6368     case MVT::v2i64:
6369     case MVT::v1i128:
6370     case MVT::f128:
6371       // These can be scalar arguments or elements of a vector array type
6372       // passed directly.  The latter are used to implement ELFv2 homogenous
6373       // vector aggregates.
6374 
6375       // For a varargs call, named arguments go into VRs or on the stack as
6376       // usual; unnamed arguments always go to the stack or the corresponding
6377       // GPRs when within range.  For now, we always put the value in both
6378       // locations (or even all three).
6379       if (CFlags.IsVarArg) {
6380         assert(HasParameterArea &&
6381                "Parameter area must exist if we have a varargs call.");
6382         // We could elide this store in the case where the object fits
6383         // entirely in R registers.  Maybe later.
6384         SDValue Store =
6385             DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
6386         MemOpChains.push_back(Store);
6387         if (VR_idx != NumVRs) {
6388           SDValue Load =
6389               DAG.getLoad(MVT::v4f32, dl, Store, PtrOff, MachinePointerInfo());
6390           MemOpChains.push_back(Load.getValue(1));
6391           RegsToPass.push_back(std::make_pair(VR[VR_idx++], Load));
6392         }
6393         ArgOffset += 16;
6394         for (unsigned i=0; i<16; i+=PtrByteSize) {
6395           if (GPR_idx == NumGPRs)
6396             break;
6397           SDValue Ix = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
6398                                    DAG.getConstant(i, dl, PtrVT));
6399           SDValue Load =
6400               DAG.getLoad(PtrVT, dl, Store, Ix, MachinePointerInfo());
6401           MemOpChains.push_back(Load.getValue(1));
6402           RegsToPass.push_back(std::make_pair(GPR[GPR_idx++], Load));
6403         }
6404         break;
6405       }
6406 
6407       // Non-varargs Altivec params go into VRs or on the stack.
6408       if (VR_idx != NumVRs) {
6409         RegsToPass.push_back(std::make_pair(VR[VR_idx++], Arg));
6410       } else {
6411         if (IsFastCall)
6412           ComputePtrOff();
6413 
6414         assert(HasParameterArea &&
6415                "Parameter area must exist to pass an argument in memory.");
6416         LowerMemOpCallTo(DAG, MF, Chain, Arg, PtrOff, SPDiff, ArgOffset,
6417                          true, CFlags.IsTailCall, true, MemOpChains,
6418                          TailCallArguments, dl);
6419         if (IsFastCall)
6420           ArgOffset += 16;
6421       }
6422 
6423       if (!IsFastCall)
6424         ArgOffset += 16;
6425       break;
6426     }
6427   }
6428 
6429   assert((!HasParameterArea || NumBytesActuallyUsed == ArgOffset) &&
6430          "mismatch in size of parameter area");
6431   (void)NumBytesActuallyUsed;
6432 
6433   if (!MemOpChains.empty())
6434     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
6435 
6436   // Check if this is an indirect call (MTCTR/BCTRL).
6437   // See prepareDescriptorIndirectCall and buildCallOperands for more
6438   // information about calls through function pointers in the 64-bit SVR4 ABI.
6439   if (CFlags.IsIndirect) {
6440     // For 64-bit ELFv2 ABI with PCRel, do not save the TOC of the
6441     // caller in the TOC save area.
6442     if (isTOCSaveRestoreRequired(Subtarget)) {
6443       assert(!CFlags.IsTailCall && "Indirect tails calls not supported");
6444       // Load r2 into a virtual register and store it to the TOC save area.
6445       setUsesTOCBasePtr(DAG);
6446       SDValue Val = DAG.getCopyFromReg(Chain, dl, PPC::X2, MVT::i64);
6447       // TOC save area offset.
6448       unsigned TOCSaveOffset = Subtarget.getFrameLowering()->getTOCSaveOffset();
6449       SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
6450       SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
6451       Chain = DAG.getStore(Val.getValue(1), dl, Val, AddPtr,
6452                            MachinePointerInfo::getStack(
6453                                DAG.getMachineFunction(), TOCSaveOffset));
6454     }
6455     // In the ELFv2 ABI, R12 must contain the address of an indirect callee.
6456     // This does not mean the MTCTR instruction must use R12; it's easier
6457     // to model this as an extra parameter, so do that.
6458     if (isELFv2ABI && !CFlags.IsPatchPoint)
6459       RegsToPass.push_back(std::make_pair((unsigned)PPC::X12, Callee));
6460   }
6461 
6462   // Build a sequence of copy-to-reg nodes chained together with token chain
6463   // and flag operands which copy the outgoing args into the appropriate regs.
6464   SDValue InFlag;
6465   for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
6466     Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
6467                              RegsToPass[i].second, InFlag);
6468     InFlag = Chain.getValue(1);
6469   }
6470 
6471   if (CFlags.IsTailCall && !IsSibCall)
6472     PrepareTailCall(DAG, InFlag, Chain, dl, SPDiff, NumBytes, LROp, FPOp,
6473                     TailCallArguments);
6474 
6475   return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart,
6476                     Callee, SPDiff, NumBytes, Ins, InVals, CB);
6477 }
6478 
6479 // Returns true when the shadow of a general purpose argument register
6480 // in the parameter save area is aligned to at least 'RequiredAlign'.
isGPRShadowAligned(MCPhysReg Reg,Align RequiredAlign)6481 static bool isGPRShadowAligned(MCPhysReg Reg, Align RequiredAlign) {
6482   assert(RequiredAlign.value() <= 16 &&
6483          "Required alignment greater than stack alignment.");
6484   switch (Reg) {
6485   default:
6486     report_fatal_error("called on invalid register.");
6487   case PPC::R5:
6488   case PPC::R9:
6489   case PPC::X3:
6490   case PPC::X5:
6491   case PPC::X7:
6492   case PPC::X9:
6493     // These registers are 16 byte aligned which is the most strict aligment
6494     // we can support.
6495     return true;
6496   case PPC::R3:
6497   case PPC::R7:
6498   case PPC::X4:
6499   case PPC::X6:
6500   case PPC::X8:
6501   case PPC::X10:
6502     // The shadow of these registers in the PSA is 8 byte aligned.
6503     return RequiredAlign <= 8;
6504   case PPC::R4:
6505   case PPC::R6:
6506   case PPC::R8:
6507   case PPC::R10:
6508     return RequiredAlign <= 4;
6509   }
6510 }
6511 
CC_AIX(unsigned ValNo,MVT ValVT,MVT LocVT,CCValAssign::LocInfo LocInfo,ISD::ArgFlagsTy ArgFlags,CCState & S)6512 static bool CC_AIX(unsigned ValNo, MVT ValVT, MVT LocVT,
6513                    CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
6514                    CCState &S) {
6515   AIXCCState &State = static_cast<AIXCCState &>(S);
6516   const PPCSubtarget &Subtarget = static_cast<const PPCSubtarget &>(
6517       State.getMachineFunction().getSubtarget());
6518   const bool IsPPC64 = Subtarget.isPPC64();
6519   const Align PtrAlign = IsPPC64 ? Align(8) : Align(4);
6520   const MVT RegVT = IsPPC64 ? MVT::i64 : MVT::i32;
6521 
6522   if (ValVT == MVT::f128)
6523     report_fatal_error("f128 is unimplemented on AIX.");
6524 
6525   if (ArgFlags.isNest())
6526     report_fatal_error("Nest arguments are unimplemented.");
6527 
6528   static const MCPhysReg GPR_32[] = {// 32-bit registers.
6529                                      PPC::R3, PPC::R4, PPC::R5, PPC::R6,
6530                                      PPC::R7, PPC::R8, PPC::R9, PPC::R10};
6531   static const MCPhysReg GPR_64[] = {// 64-bit registers.
6532                                      PPC::X3, PPC::X4, PPC::X5, PPC::X6,
6533                                      PPC::X7, PPC::X8, PPC::X9, PPC::X10};
6534 
6535   static const MCPhysReg VR[] = {// Vector registers.
6536                                  PPC::V2,  PPC::V3,  PPC::V4,  PPC::V5,
6537                                  PPC::V6,  PPC::V7,  PPC::V8,  PPC::V9,
6538                                  PPC::V10, PPC::V11, PPC::V12, PPC::V13};
6539 
6540   if (ArgFlags.isByVal()) {
6541     if (ArgFlags.getNonZeroByValAlign() > PtrAlign)
6542       report_fatal_error("Pass-by-value arguments with alignment greater than "
6543                          "register width are not supported.");
6544 
6545     const unsigned ByValSize = ArgFlags.getByValSize();
6546 
6547     // An empty aggregate parameter takes up no storage and no registers,
6548     // but needs a MemLoc for a stack slot for the formal arguments side.
6549     if (ByValSize == 0) {
6550       State.addLoc(CCValAssign::getMem(ValNo, MVT::INVALID_SIMPLE_VALUE_TYPE,
6551                                        State.getNextStackOffset(), RegVT,
6552                                        LocInfo));
6553       return false;
6554     }
6555 
6556     const unsigned StackSize = alignTo(ByValSize, PtrAlign);
6557     unsigned Offset = State.AllocateStack(StackSize, PtrAlign);
6558     for (const unsigned E = Offset + StackSize; Offset < E;
6559          Offset += PtrAlign.value()) {
6560       if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32))
6561         State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, RegVT, LocInfo));
6562       else {
6563         State.addLoc(CCValAssign::getMem(ValNo, MVT::INVALID_SIMPLE_VALUE_TYPE,
6564                                          Offset, MVT::INVALID_SIMPLE_VALUE_TYPE,
6565                                          LocInfo));
6566         break;
6567       }
6568     }
6569     return false;
6570   }
6571 
6572   // Arguments always reserve parameter save area.
6573   switch (ValVT.SimpleTy) {
6574   default:
6575     report_fatal_error("Unhandled value type for argument.");
6576   case MVT::i64:
6577     // i64 arguments should have been split to i32 for PPC32.
6578     assert(IsPPC64 && "PPC32 should have split i64 values.");
6579     LLVM_FALLTHROUGH;
6580   case MVT::i1:
6581   case MVT::i32: {
6582     const unsigned Offset = State.AllocateStack(PtrAlign.value(), PtrAlign);
6583     // AIX integer arguments are always passed in register width.
6584     if (ValVT.getFixedSizeInBits() < RegVT.getFixedSizeInBits())
6585       LocInfo = ArgFlags.isSExt() ? CCValAssign::LocInfo::SExt
6586                                   : CCValAssign::LocInfo::ZExt;
6587     if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32))
6588       State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, RegVT, LocInfo));
6589     else
6590       State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, RegVT, LocInfo));
6591 
6592     return false;
6593   }
6594   case MVT::f32:
6595   case MVT::f64: {
6596     // Parameter save area (PSA) is reserved even if the float passes in fpr.
6597     const unsigned StoreSize = LocVT.getStoreSize();
6598     // Floats are always 4-byte aligned in the PSA on AIX.
6599     // This includes f64 in 64-bit mode for ABI compatibility.
6600     const unsigned Offset =
6601         State.AllocateStack(IsPPC64 ? 8 : StoreSize, Align(4));
6602     unsigned FReg = State.AllocateReg(FPR);
6603     if (FReg)
6604       State.addLoc(CCValAssign::getReg(ValNo, ValVT, FReg, LocVT, LocInfo));
6605 
6606     // Reserve and initialize GPRs or initialize the PSA as required.
6607     for (unsigned I = 0; I < StoreSize; I += PtrAlign.value()) {
6608       if (unsigned Reg = State.AllocateReg(IsPPC64 ? GPR_64 : GPR_32)) {
6609         assert(FReg && "An FPR should be available when a GPR is reserved.");
6610         if (State.isVarArg()) {
6611           // Successfully reserved GPRs are only initialized for vararg calls.
6612           // Custom handling is required for:
6613           //   f64 in PPC32 needs to be split into 2 GPRs.
6614           //   f32 in PPC64 needs to occupy only lower 32 bits of 64-bit GPR.
6615           State.addLoc(
6616               CCValAssign::getCustomReg(ValNo, ValVT, Reg, RegVT, LocInfo));
6617         }
6618       } else {
6619         // If there are insufficient GPRs, the PSA needs to be initialized.
6620         // Initialization occurs even if an FPR was initialized for
6621         // compatibility with the AIX XL compiler. The full memory for the
6622         // argument will be initialized even if a prior word is saved in GPR.
6623         // A custom memLoc is used when the argument also passes in FPR so
6624         // that the callee handling can skip over it easily.
6625         State.addLoc(
6626             FReg ? CCValAssign::getCustomMem(ValNo, ValVT, Offset, LocVT,
6627                                              LocInfo)
6628                  : CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
6629         break;
6630       }
6631     }
6632 
6633     return false;
6634   }
6635   case MVT::v4f32:
6636   case MVT::v4i32:
6637   case MVT::v8i16:
6638   case MVT::v16i8:
6639   case MVT::v2i64:
6640   case MVT::v2f64:
6641   case MVT::v1i128: {
6642     const unsigned VecSize = 16;
6643     const Align VecAlign(VecSize);
6644 
6645     if (!State.isVarArg()) {
6646       // If there are vector registers remaining we don't consume any stack
6647       // space.
6648       if (unsigned VReg = State.AllocateReg(VR)) {
6649         State.addLoc(CCValAssign::getReg(ValNo, ValVT, VReg, LocVT, LocInfo));
6650         return false;
6651       }
6652       // Vectors passed on the stack do not shadow GPRs or FPRs even though they
6653       // might be allocated in the portion of the PSA that is shadowed by the
6654       // GPRs.
6655       const unsigned Offset = State.AllocateStack(VecSize, VecAlign);
6656       State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
6657       return false;
6658     }
6659 
6660     const unsigned PtrSize = IsPPC64 ? 8 : 4;
6661     ArrayRef<MCPhysReg> GPRs = IsPPC64 ? GPR_64 : GPR_32;
6662 
6663     unsigned NextRegIndex = State.getFirstUnallocated(GPRs);
6664     // Burn any underaligned registers and their shadowed stack space until
6665     // we reach the required alignment.
6666     while (NextRegIndex != GPRs.size() &&
6667            !isGPRShadowAligned(GPRs[NextRegIndex], VecAlign)) {
6668       // Shadow allocate register and its stack shadow.
6669       unsigned Reg = State.AllocateReg(GPRs);
6670       State.AllocateStack(PtrSize, PtrAlign);
6671       assert(Reg && "Allocating register unexpectedly failed.");
6672       (void)Reg;
6673       NextRegIndex = State.getFirstUnallocated(GPRs);
6674     }
6675 
6676     // Vectors that are passed as fixed arguments are handled differently.
6677     // They are passed in VRs if any are available (unlike arguments passed
6678     // through ellipses) and shadow GPRs (unlike arguments to non-vaarg
6679     // functions)
6680     if (State.isFixed(ValNo)) {
6681       if (unsigned VReg = State.AllocateReg(VR)) {
6682         State.addLoc(CCValAssign::getReg(ValNo, ValVT, VReg, LocVT, LocInfo));
6683         // Shadow allocate GPRs and stack space even though we pass in a VR.
6684         for (unsigned I = 0; I != VecSize; I += PtrSize)
6685           State.AllocateReg(GPRs);
6686         State.AllocateStack(VecSize, VecAlign);
6687         return false;
6688       }
6689       // No vector registers remain so pass on the stack.
6690       const unsigned Offset = State.AllocateStack(VecSize, VecAlign);
6691       State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
6692       return false;
6693     }
6694 
6695     // If all GPRS are consumed then we pass the argument fully on the stack.
6696     if (NextRegIndex == GPRs.size()) {
6697       const unsigned Offset = State.AllocateStack(VecSize, VecAlign);
6698       State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
6699       return false;
6700     }
6701 
6702     // Corner case for 32-bit codegen. We have 2 registers to pass the first
6703     // half of the argument, and then need to pass the remaining half on the
6704     // stack.
6705     if (GPRs[NextRegIndex] == PPC::R9) {
6706       const unsigned Offset = State.AllocateStack(VecSize, VecAlign);
6707       State.addLoc(
6708           CCValAssign::getCustomMem(ValNo, ValVT, Offset, LocVT, LocInfo));
6709 
6710       const unsigned FirstReg = State.AllocateReg(PPC::R9);
6711       const unsigned SecondReg = State.AllocateReg(PPC::R10);
6712       assert(FirstReg && SecondReg &&
6713              "Allocating R9 or R10 unexpectedly failed.");
6714       State.addLoc(
6715           CCValAssign::getCustomReg(ValNo, ValVT, FirstReg, RegVT, LocInfo));
6716       State.addLoc(
6717           CCValAssign::getCustomReg(ValNo, ValVT, SecondReg, RegVT, LocInfo));
6718       return false;
6719     }
6720 
6721     // We have enough GPRs to fully pass the vector argument, and we have
6722     // already consumed any underaligned registers. Start with the custom
6723     // MemLoc and then the custom RegLocs.
6724     const unsigned Offset = State.AllocateStack(VecSize, VecAlign);
6725     State.addLoc(
6726         CCValAssign::getCustomMem(ValNo, ValVT, Offset, LocVT, LocInfo));
6727     for (unsigned I = 0; I != VecSize; I += PtrSize) {
6728       const unsigned Reg = State.AllocateReg(GPRs);
6729       assert(Reg && "Failed to allocated register for vararg vector argument");
6730       State.addLoc(
6731           CCValAssign::getCustomReg(ValNo, ValVT, Reg, RegVT, LocInfo));
6732     }
6733     return false;
6734   }
6735   }
6736   return true;
6737 }
6738 
getRegClassForSVT(MVT::SimpleValueType SVT,bool IsPPC64)6739 static const TargetRegisterClass *getRegClassForSVT(MVT::SimpleValueType SVT,
6740                                                     bool IsPPC64) {
6741   assert((IsPPC64 || SVT != MVT::i64) &&
6742          "i64 should have been split for 32-bit codegen.");
6743 
6744   switch (SVT) {
6745   default:
6746     report_fatal_error("Unexpected value type for formal argument");
6747   case MVT::i1:
6748   case MVT::i32:
6749   case MVT::i64:
6750     return IsPPC64 ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
6751   case MVT::f32:
6752     return &PPC::F4RCRegClass;
6753   case MVT::f64:
6754     return &PPC::F8RCRegClass;
6755   case MVT::v4f32:
6756   case MVT::v4i32:
6757   case MVT::v8i16:
6758   case MVT::v16i8:
6759   case MVT::v2i64:
6760   case MVT::v2f64:
6761   case MVT::v1i128:
6762     return &PPC::VRRCRegClass;
6763   }
6764 }
6765 
truncateScalarIntegerArg(ISD::ArgFlagsTy Flags,EVT ValVT,SelectionDAG & DAG,SDValue ArgValue,MVT LocVT,const SDLoc & dl)6766 static SDValue truncateScalarIntegerArg(ISD::ArgFlagsTy Flags, EVT ValVT,
6767                                         SelectionDAG &DAG, SDValue ArgValue,
6768                                         MVT LocVT, const SDLoc &dl) {
6769   assert(ValVT.isScalarInteger() && LocVT.isScalarInteger());
6770   assert(ValVT.getFixedSizeInBits() < LocVT.getFixedSizeInBits());
6771 
6772   if (Flags.isSExt())
6773     ArgValue = DAG.getNode(ISD::AssertSext, dl, LocVT, ArgValue,
6774                            DAG.getValueType(ValVT));
6775   else if (Flags.isZExt())
6776     ArgValue = DAG.getNode(ISD::AssertZext, dl, LocVT, ArgValue,
6777                            DAG.getValueType(ValVT));
6778 
6779   return DAG.getNode(ISD::TRUNCATE, dl, ValVT, ArgValue);
6780 }
6781 
mapArgRegToOffsetAIX(unsigned Reg,const PPCFrameLowering * FL)6782 static unsigned mapArgRegToOffsetAIX(unsigned Reg, const PPCFrameLowering *FL) {
6783   const unsigned LASize = FL->getLinkageSize();
6784 
6785   if (PPC::GPRCRegClass.contains(Reg)) {
6786     assert(Reg >= PPC::R3 && Reg <= PPC::R10 &&
6787            "Reg must be a valid argument register!");
6788     return LASize + 4 * (Reg - PPC::R3);
6789   }
6790 
6791   if (PPC::G8RCRegClass.contains(Reg)) {
6792     assert(Reg >= PPC::X3 && Reg <= PPC::X10 &&
6793            "Reg must be a valid argument register!");
6794     return LASize + 8 * (Reg - PPC::X3);
6795   }
6796 
6797   llvm_unreachable("Only general purpose registers expected.");
6798 }
6799 
6800 //   AIX ABI Stack Frame Layout:
6801 //
6802 //   Low Memory +--------------------------------------------+
6803 //   SP   +---> | Back chain                                 | ---+
6804 //        |     +--------------------------------------------+    |
6805 //        |     | Saved Condition Register                   |    |
6806 //        |     +--------------------------------------------+    |
6807 //        |     | Saved Linkage Register                     |    |
6808 //        |     +--------------------------------------------+    | Linkage Area
6809 //        |     | Reserved for compilers                     |    |
6810 //        |     +--------------------------------------------+    |
6811 //        |     | Reserved for binders                       |    |
6812 //        |     +--------------------------------------------+    |
6813 //        |     | Saved TOC pointer                          | ---+
6814 //        |     +--------------------------------------------+
6815 //        |     | Parameter save area                        |
6816 //        |     +--------------------------------------------+
6817 //        |     | Alloca space                               |
6818 //        |     +--------------------------------------------+
6819 //        |     | Local variable space                       |
6820 //        |     +--------------------------------------------+
6821 //        |     | Float/int conversion temporary             |
6822 //        |     +--------------------------------------------+
6823 //        |     | Save area for AltiVec registers            |
6824 //        |     +--------------------------------------------+
6825 //        |     | AltiVec alignment padding                  |
6826 //        |     +--------------------------------------------+
6827 //        |     | Save area for VRSAVE register              |
6828 //        |     +--------------------------------------------+
6829 //        |     | Save area for General Purpose registers    |
6830 //        |     +--------------------------------------------+
6831 //        |     | Save area for Floating Point registers     |
6832 //        |     +--------------------------------------------+
6833 //        +---- | Back chain                                 |
6834 // High Memory  +--------------------------------------------+
6835 //
6836 //  Specifications:
6837 //  AIX 7.2 Assembler Language Reference
6838 //  Subroutine linkage convention
6839 
LowerFormalArguments_AIX(SDValue Chain,CallingConv::ID CallConv,bool isVarArg,const SmallVectorImpl<ISD::InputArg> & Ins,const SDLoc & dl,SelectionDAG & DAG,SmallVectorImpl<SDValue> & InVals) const6840 SDValue PPCTargetLowering::LowerFormalArguments_AIX(
6841     SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
6842     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
6843     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
6844 
6845   assert((CallConv == CallingConv::C || CallConv == CallingConv::Cold ||
6846           CallConv == CallingConv::Fast) &&
6847          "Unexpected calling convention!");
6848 
6849   if (getTargetMachine().Options.GuaranteedTailCallOpt)
6850     report_fatal_error("Tail call support is unimplemented on AIX.");
6851 
6852   if (useSoftFloat())
6853     report_fatal_error("Soft float support is unimplemented on AIX.");
6854 
6855   const PPCSubtarget &Subtarget =
6856       static_cast<const PPCSubtarget &>(DAG.getSubtarget());
6857 
6858   const bool IsPPC64 = Subtarget.isPPC64();
6859   const unsigned PtrByteSize = IsPPC64 ? 8 : 4;
6860 
6861   // Assign locations to all of the incoming arguments.
6862   SmallVector<CCValAssign, 16> ArgLocs;
6863   MachineFunction &MF = DAG.getMachineFunction();
6864   MachineFrameInfo &MFI = MF.getFrameInfo();
6865   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
6866   AIXCCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
6867 
6868   const EVT PtrVT = getPointerTy(MF.getDataLayout());
6869   // Reserve space for the linkage area on the stack.
6870   const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
6871   CCInfo.AllocateStack(LinkageSize, Align(PtrByteSize));
6872   CCInfo.AnalyzeFormalArguments(Ins, CC_AIX);
6873 
6874   SmallVector<SDValue, 8> MemOps;
6875 
6876   for (size_t I = 0, End = ArgLocs.size(); I != End; /* No increment here */) {
6877     CCValAssign &VA = ArgLocs[I++];
6878     MVT LocVT = VA.getLocVT();
6879     MVT ValVT = VA.getValVT();
6880     ISD::ArgFlagsTy Flags = Ins[VA.getValNo()].Flags;
6881     // For compatibility with the AIX XL compiler, the float args in the
6882     // parameter save area are initialized even if the argument is available
6883     // in register.  The caller is required to initialize both the register
6884     // and memory, however, the callee can choose to expect it in either.
6885     // The memloc is dismissed here because the argument is retrieved from
6886     // the register.
6887     if (VA.isMemLoc() && VA.needsCustom() && ValVT.isFloatingPoint())
6888       continue;
6889 
6890     auto HandleMemLoc = [&]() {
6891       const unsigned LocSize = LocVT.getStoreSize();
6892       const unsigned ValSize = ValVT.getStoreSize();
6893       assert((ValSize <= LocSize) &&
6894              "Object size is larger than size of MemLoc");
6895       int CurArgOffset = VA.getLocMemOffset();
6896       // Objects are right-justified because AIX is big-endian.
6897       if (LocSize > ValSize)
6898         CurArgOffset += LocSize - ValSize;
6899       // Potential tail calls could cause overwriting of argument stack slots.
6900       const bool IsImmutable =
6901           !(getTargetMachine().Options.GuaranteedTailCallOpt &&
6902             (CallConv == CallingConv::Fast));
6903       int FI = MFI.CreateFixedObject(ValSize, CurArgOffset, IsImmutable);
6904       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
6905       SDValue ArgValue =
6906           DAG.getLoad(ValVT, dl, Chain, FIN, MachinePointerInfo());
6907       InVals.push_back(ArgValue);
6908     };
6909 
6910     // Vector arguments to VaArg functions are passed both on the stack, and
6911     // in any available GPRs. Load the value from the stack and add the GPRs
6912     // as live ins.
6913     if (VA.isMemLoc() && VA.needsCustom()) {
6914       assert(ValVT.isVector() && "Unexpected Custom MemLoc type.");
6915       assert(isVarArg && "Only use custom memloc for vararg.");
6916       // ValNo of the custom MemLoc, so we can compare it to the ValNo of the
6917       // matching custom RegLocs.
6918       const unsigned OriginalValNo = VA.getValNo();
6919       (void)OriginalValNo;
6920 
6921       auto HandleCustomVecRegLoc = [&]() {
6922         assert(I != End && ArgLocs[I].isRegLoc() && ArgLocs[I].needsCustom() &&
6923                "Missing custom RegLoc.");
6924         VA = ArgLocs[I++];
6925         assert(VA.getValVT().isVector() &&
6926                "Unexpected Val type for custom RegLoc.");
6927         assert(VA.getValNo() == OriginalValNo &&
6928                "ValNo mismatch between custom MemLoc and RegLoc.");
6929         MVT::SimpleValueType SVT = VA.getLocVT().SimpleTy;
6930         MF.addLiveIn(VA.getLocReg(), getRegClassForSVT(SVT, IsPPC64));
6931       };
6932 
6933       HandleMemLoc();
6934       // In 64-bit there will be exactly 2 custom RegLocs that follow, and in
6935       // in 32-bit there will be 2 custom RegLocs if we are passing in R9 and
6936       // R10.
6937       HandleCustomVecRegLoc();
6938       HandleCustomVecRegLoc();
6939 
6940       // If we are targeting 32-bit, there might be 2 extra custom RegLocs if
6941       // we passed the vector in R5, R6, R7 and R8.
6942       if (I != End && ArgLocs[I].isRegLoc() && ArgLocs[I].needsCustom()) {
6943         assert(!IsPPC64 &&
6944                "Only 2 custom RegLocs expected for 64-bit codegen.");
6945         HandleCustomVecRegLoc();
6946         HandleCustomVecRegLoc();
6947       }
6948 
6949       continue;
6950     }
6951 
6952     if (VA.isRegLoc()) {
6953       if (VA.getValVT().isScalarInteger())
6954         FuncInfo->appendParameterType(PPCFunctionInfo::FixedType);
6955       else if (VA.getValVT().isFloatingPoint() && !VA.getValVT().isVector())
6956         FuncInfo->appendParameterType(VA.getValVT().SimpleTy == MVT::f32
6957                                           ? PPCFunctionInfo::ShortFloatPoint
6958                                           : PPCFunctionInfo::LongFloatPoint);
6959     }
6960 
6961     if (Flags.isByVal() && VA.isMemLoc()) {
6962       const unsigned Size =
6963           alignTo(Flags.getByValSize() ? Flags.getByValSize() : PtrByteSize,
6964                   PtrByteSize);
6965       const int FI = MF.getFrameInfo().CreateFixedObject(
6966           Size, VA.getLocMemOffset(), /* IsImmutable */ false,
6967           /* IsAliased */ true);
6968       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
6969       InVals.push_back(FIN);
6970 
6971       continue;
6972     }
6973 
6974     if (Flags.isByVal()) {
6975       assert(VA.isRegLoc() && "MemLocs should already be handled.");
6976 
6977       const MCPhysReg ArgReg = VA.getLocReg();
6978       const PPCFrameLowering *FL = Subtarget.getFrameLowering();
6979 
6980       if (Flags.getNonZeroByValAlign() > PtrByteSize)
6981         report_fatal_error("Over aligned byvals not supported yet.");
6982 
6983       const unsigned StackSize = alignTo(Flags.getByValSize(), PtrByteSize);
6984       const int FI = MF.getFrameInfo().CreateFixedObject(
6985           StackSize, mapArgRegToOffsetAIX(ArgReg, FL), /* IsImmutable */ false,
6986           /* IsAliased */ true);
6987       SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
6988       InVals.push_back(FIN);
6989 
6990       // Add live ins for all the RegLocs for the same ByVal.
6991       const TargetRegisterClass *RegClass =
6992           IsPPC64 ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
6993 
6994       auto HandleRegLoc = [&, RegClass, LocVT](const MCPhysReg PhysReg,
6995                                                unsigned Offset) {
6996         const unsigned VReg = MF.addLiveIn(PhysReg, RegClass);
6997         // Since the callers side has left justified the aggregate in the
6998         // register, we can simply store the entire register into the stack
6999         // slot.
7000         SDValue CopyFrom = DAG.getCopyFromReg(Chain, dl, VReg, LocVT);
7001         // The store to the fixedstack object is needed becuase accessing a
7002         // field of the ByVal will use a gep and load. Ideally we will optimize
7003         // to extracting the value from the register directly, and elide the
7004         // stores when the arguments address is not taken, but that will need to
7005         // be future work.
7006         SDValue Store = DAG.getStore(
7007             CopyFrom.getValue(1), dl, CopyFrom,
7008             DAG.getObjectPtrOffset(dl, FIN, TypeSize::Fixed(Offset)),
7009             MachinePointerInfo::getFixedStack(MF, FI, Offset));
7010 
7011         MemOps.push_back(Store);
7012       };
7013 
7014       unsigned Offset = 0;
7015       HandleRegLoc(VA.getLocReg(), Offset);
7016       Offset += PtrByteSize;
7017       for (; Offset != StackSize && ArgLocs[I].isRegLoc();
7018            Offset += PtrByteSize) {
7019         assert(ArgLocs[I].getValNo() == VA.getValNo() &&
7020                "RegLocs should be for ByVal argument.");
7021 
7022         const CCValAssign RL = ArgLocs[I++];
7023         HandleRegLoc(RL.getLocReg(), Offset);
7024         FuncInfo->appendParameterType(PPCFunctionInfo::FixedType);
7025       }
7026 
7027       if (Offset != StackSize) {
7028         assert(ArgLocs[I].getValNo() == VA.getValNo() &&
7029                "Expected MemLoc for remaining bytes.");
7030         assert(ArgLocs[I].isMemLoc() && "Expected MemLoc for remaining bytes.");
7031         // Consume the MemLoc.The InVal has already been emitted, so nothing
7032         // more needs to be done.
7033         ++I;
7034       }
7035 
7036       continue;
7037     }
7038 
7039     if (VA.isRegLoc() && !VA.needsCustom()) {
7040       MVT::SimpleValueType SVT = ValVT.SimpleTy;
7041       unsigned VReg =
7042           MF.addLiveIn(VA.getLocReg(), getRegClassForSVT(SVT, IsPPC64));
7043       SDValue ArgValue = DAG.getCopyFromReg(Chain, dl, VReg, LocVT);
7044       if (ValVT.isScalarInteger() &&
7045           (ValVT.getFixedSizeInBits() < LocVT.getFixedSizeInBits())) {
7046         ArgValue =
7047             truncateScalarIntegerArg(Flags, ValVT, DAG, ArgValue, LocVT, dl);
7048       }
7049       InVals.push_back(ArgValue);
7050       continue;
7051     }
7052     if (VA.isMemLoc()) {
7053       HandleMemLoc();
7054       continue;
7055     }
7056   }
7057 
7058   // On AIX a minimum of 8 words is saved to the parameter save area.
7059   const unsigned MinParameterSaveArea = 8 * PtrByteSize;
7060   // Area that is at least reserved in the caller of this function.
7061   unsigned CallerReservedArea =
7062       std::max(CCInfo.getNextStackOffset(), LinkageSize + MinParameterSaveArea);
7063 
7064   // Set the size that is at least reserved in caller of this function. Tail
7065   // call optimized function's reserved stack space needs to be aligned so
7066   // that taking the difference between two stack areas will result in an
7067   // aligned stack.
7068   CallerReservedArea =
7069       EnsureStackAlignment(Subtarget.getFrameLowering(), CallerReservedArea);
7070   FuncInfo->setMinReservedArea(CallerReservedArea);
7071 
7072   if (isVarArg) {
7073     FuncInfo->setVarArgsFrameIndex(
7074         MFI.CreateFixedObject(PtrByteSize, CCInfo.getNextStackOffset(), true));
7075     SDValue FIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
7076 
7077     static const MCPhysReg GPR_32[] = {PPC::R3, PPC::R4, PPC::R5, PPC::R6,
7078                                        PPC::R7, PPC::R8, PPC::R9, PPC::R10};
7079 
7080     static const MCPhysReg GPR_64[] = {PPC::X3, PPC::X4, PPC::X5, PPC::X6,
7081                                        PPC::X7, PPC::X8, PPC::X9, PPC::X10};
7082     const unsigned NumGPArgRegs = array_lengthof(IsPPC64 ? GPR_64 : GPR_32);
7083 
7084     // The fixed integer arguments of a variadic function are stored to the
7085     // VarArgsFrameIndex on the stack so that they may be loaded by
7086     // dereferencing the result of va_next.
7087     for (unsigned GPRIndex =
7088              (CCInfo.getNextStackOffset() - LinkageSize) / PtrByteSize;
7089          GPRIndex < NumGPArgRegs; ++GPRIndex) {
7090 
7091       const unsigned VReg =
7092           IsPPC64 ? MF.addLiveIn(GPR_64[GPRIndex], &PPC::G8RCRegClass)
7093                   : MF.addLiveIn(GPR_32[GPRIndex], &PPC::GPRCRegClass);
7094 
7095       SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, PtrVT);
7096       SDValue Store =
7097           DAG.getStore(Val.getValue(1), dl, Val, FIN, MachinePointerInfo());
7098       MemOps.push_back(Store);
7099       // Increment the address for the next argument to store.
7100       SDValue PtrOff = DAG.getConstant(PtrByteSize, dl, PtrVT);
7101       FIN = DAG.getNode(ISD::ADD, dl, PtrOff.getValueType(), FIN, PtrOff);
7102     }
7103   }
7104 
7105   if (!MemOps.empty())
7106     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
7107 
7108   return Chain;
7109 }
7110 
LowerCall_AIX(SDValue Chain,SDValue Callee,CallFlags CFlags,const SmallVectorImpl<ISD::OutputArg> & Outs,const SmallVectorImpl<SDValue> & OutVals,const SmallVectorImpl<ISD::InputArg> & Ins,const SDLoc & dl,SelectionDAG & DAG,SmallVectorImpl<SDValue> & InVals,const CallBase * CB) const7111 SDValue PPCTargetLowering::LowerCall_AIX(
7112     SDValue Chain, SDValue Callee, CallFlags CFlags,
7113     const SmallVectorImpl<ISD::OutputArg> &Outs,
7114     const SmallVectorImpl<SDValue> &OutVals,
7115     const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
7116     SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
7117     const CallBase *CB) const {
7118   // See PPCTargetLowering::LowerFormalArguments_AIX() for a description of the
7119   // AIX ABI stack frame layout.
7120 
7121   assert((CFlags.CallConv == CallingConv::C ||
7122           CFlags.CallConv == CallingConv::Cold ||
7123           CFlags.CallConv == CallingConv::Fast) &&
7124          "Unexpected calling convention!");
7125 
7126   if (CFlags.IsPatchPoint)
7127     report_fatal_error("This call type is unimplemented on AIX.");
7128 
7129   const PPCSubtarget& Subtarget =
7130       static_cast<const PPCSubtarget&>(DAG.getSubtarget());
7131 
7132   MachineFunction &MF = DAG.getMachineFunction();
7133   SmallVector<CCValAssign, 16> ArgLocs;
7134   AIXCCState CCInfo(CFlags.CallConv, CFlags.IsVarArg, MF, ArgLocs,
7135                     *DAG.getContext());
7136 
7137   // Reserve space for the linkage save area (LSA) on the stack.
7138   // In both PPC32 and PPC64 there are 6 reserved slots in the LSA:
7139   //   [SP][CR][LR][2 x reserved][TOC].
7140   // The LSA is 24 bytes (6x4) in PPC32 and 48 bytes (6x8) in PPC64.
7141   const unsigned LinkageSize = Subtarget.getFrameLowering()->getLinkageSize();
7142   const bool IsPPC64 = Subtarget.isPPC64();
7143   const EVT PtrVT = getPointerTy(DAG.getDataLayout());
7144   const unsigned PtrByteSize = IsPPC64 ? 8 : 4;
7145   CCInfo.AllocateStack(LinkageSize, Align(PtrByteSize));
7146   CCInfo.AnalyzeCallOperands(Outs, CC_AIX);
7147 
7148   // The prolog code of the callee may store up to 8 GPR argument registers to
7149   // the stack, allowing va_start to index over them in memory if the callee
7150   // is variadic.
7151   // Because we cannot tell if this is needed on the caller side, we have to
7152   // conservatively assume that it is needed.  As such, make sure we have at
7153   // least enough stack space for the caller to store the 8 GPRs.
7154   const unsigned MinParameterSaveAreaSize = 8 * PtrByteSize;
7155   const unsigned NumBytes = std::max(LinkageSize + MinParameterSaveAreaSize,
7156                                      CCInfo.getNextStackOffset());
7157 
7158   // Adjust the stack pointer for the new arguments...
7159   // These operations are automatically eliminated by the prolog/epilog pass.
7160   Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
7161   SDValue CallSeqStart = Chain;
7162 
7163   SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
7164   SmallVector<SDValue, 8> MemOpChains;
7165 
7166   // Set up a copy of the stack pointer for loading and storing any
7167   // arguments that may not fit in the registers available for argument
7168   // passing.
7169   const SDValue StackPtr = IsPPC64 ? DAG.getRegister(PPC::X1, MVT::i64)
7170                                    : DAG.getRegister(PPC::R1, MVT::i32);
7171 
7172   for (unsigned I = 0, E = ArgLocs.size(); I != E;) {
7173     const unsigned ValNo = ArgLocs[I].getValNo();
7174     SDValue Arg = OutVals[ValNo];
7175     ISD::ArgFlagsTy Flags = Outs[ValNo].Flags;
7176 
7177     if (Flags.isByVal()) {
7178       const unsigned ByValSize = Flags.getByValSize();
7179 
7180       // Nothing to do for zero-sized ByVals on the caller side.
7181       if (!ByValSize) {
7182         ++I;
7183         continue;
7184       }
7185 
7186       auto GetLoad = [&](EVT VT, unsigned LoadOffset) {
7187         return DAG.getExtLoad(
7188             ISD::ZEXTLOAD, dl, PtrVT, Chain,
7189             (LoadOffset != 0)
7190                 ? DAG.getObjectPtrOffset(dl, Arg, TypeSize::Fixed(LoadOffset))
7191                 : Arg,
7192             MachinePointerInfo(), VT);
7193       };
7194 
7195       unsigned LoadOffset = 0;
7196 
7197       // Initialize registers, which are fully occupied by the by-val argument.
7198       while (LoadOffset + PtrByteSize <= ByValSize && ArgLocs[I].isRegLoc()) {
7199         SDValue Load = GetLoad(PtrVT, LoadOffset);
7200         MemOpChains.push_back(Load.getValue(1));
7201         LoadOffset += PtrByteSize;
7202         const CCValAssign &ByValVA = ArgLocs[I++];
7203         assert(ByValVA.getValNo() == ValNo &&
7204                "Unexpected location for pass-by-value argument.");
7205         RegsToPass.push_back(std::make_pair(ByValVA.getLocReg(), Load));
7206       }
7207 
7208       if (LoadOffset == ByValSize)
7209         continue;
7210 
7211       // There must be one more loc to handle the remainder.
7212       assert(ArgLocs[I].getValNo() == ValNo &&
7213              "Expected additional location for by-value argument.");
7214 
7215       if (ArgLocs[I].isMemLoc()) {
7216         assert(LoadOffset < ByValSize && "Unexpected memloc for by-val arg.");
7217         const CCValAssign &ByValVA = ArgLocs[I++];
7218         ISD::ArgFlagsTy MemcpyFlags = Flags;
7219         // Only memcpy the bytes that don't pass in register.
7220         MemcpyFlags.setByValSize(ByValSize - LoadOffset);
7221         Chain = CallSeqStart = createMemcpyOutsideCallSeq(
7222             (LoadOffset != 0)
7223                 ? DAG.getObjectPtrOffset(dl, Arg, TypeSize::Fixed(LoadOffset))
7224                 : Arg,
7225             DAG.getObjectPtrOffset(dl, StackPtr,
7226                                    TypeSize::Fixed(ByValVA.getLocMemOffset())),
7227             CallSeqStart, MemcpyFlags, DAG, dl);
7228         continue;
7229       }
7230 
7231       // Initialize the final register residue.
7232       // Any residue that occupies the final by-val arg register must be
7233       // left-justified on AIX. Loads must be a power-of-2 size and cannot be
7234       // larger than the ByValSize. For example: a 7 byte by-val arg requires 4,
7235       // 2 and 1 byte loads.
7236       const unsigned ResidueBytes = ByValSize % PtrByteSize;
7237       assert(ResidueBytes != 0 && LoadOffset + PtrByteSize > ByValSize &&
7238              "Unexpected register residue for by-value argument.");
7239       SDValue ResidueVal;
7240       for (unsigned Bytes = 0; Bytes != ResidueBytes;) {
7241         const unsigned N = PowerOf2Floor(ResidueBytes - Bytes);
7242         const MVT VT =
7243             N == 1 ? MVT::i8
7244                    : ((N == 2) ? MVT::i16 : (N == 4 ? MVT::i32 : MVT::i64));
7245         SDValue Load = GetLoad(VT, LoadOffset);
7246         MemOpChains.push_back(Load.getValue(1));
7247         LoadOffset += N;
7248         Bytes += N;
7249 
7250         // By-val arguments are passed left-justfied in register.
7251         // Every load here needs to be shifted, otherwise a full register load
7252         // should have been used.
7253         assert(PtrVT.getSimpleVT().getSizeInBits() > (Bytes * 8) &&
7254                "Unexpected load emitted during handling of pass-by-value "
7255                "argument.");
7256         unsigned NumSHLBits = PtrVT.getSimpleVT().getSizeInBits() - (Bytes * 8);
7257         EVT ShiftAmountTy =
7258             getShiftAmountTy(Load->getValueType(0), DAG.getDataLayout());
7259         SDValue SHLAmt = DAG.getConstant(NumSHLBits, dl, ShiftAmountTy);
7260         SDValue ShiftedLoad =
7261             DAG.getNode(ISD::SHL, dl, Load.getValueType(), Load, SHLAmt);
7262         ResidueVal = ResidueVal ? DAG.getNode(ISD::OR, dl, PtrVT, ResidueVal,
7263                                               ShiftedLoad)
7264                                 : ShiftedLoad;
7265       }
7266 
7267       const CCValAssign &ByValVA = ArgLocs[I++];
7268       RegsToPass.push_back(std::make_pair(ByValVA.getLocReg(), ResidueVal));
7269       continue;
7270     }
7271 
7272     CCValAssign &VA = ArgLocs[I++];
7273     const MVT LocVT = VA.getLocVT();
7274     const MVT ValVT = VA.getValVT();
7275 
7276     switch (VA.getLocInfo()) {
7277     default:
7278       report_fatal_error("Unexpected argument extension type.");
7279     case CCValAssign::Full:
7280       break;
7281     case CCValAssign::ZExt:
7282       Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
7283       break;
7284     case CCValAssign::SExt:
7285       Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
7286       break;
7287     }
7288 
7289     if (VA.isRegLoc() && !VA.needsCustom()) {
7290       RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
7291       continue;
7292     }
7293 
7294     // Vector arguments passed to VarArg functions need custom handling when
7295     // they are passed (at least partially) in GPRs.
7296     if (VA.isMemLoc() && VA.needsCustom() && ValVT.isVector()) {
7297       assert(CFlags.IsVarArg && "Custom MemLocs only used for Vector args.");
7298       // Store value to its stack slot.
7299       SDValue PtrOff =
7300           DAG.getConstant(VA.getLocMemOffset(), dl, StackPtr.getValueType());
7301       PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
7302       SDValue Store =
7303           DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo());
7304       MemOpChains.push_back(Store);
7305       const unsigned OriginalValNo = VA.getValNo();
7306       // Then load the GPRs from the stack
7307       unsigned LoadOffset = 0;
7308       auto HandleCustomVecRegLoc = [&]() {
7309         assert(I != E && "Unexpected end of CCvalAssigns.");
7310         assert(ArgLocs[I].isRegLoc() && ArgLocs[I].needsCustom() &&
7311                "Expected custom RegLoc.");
7312         CCValAssign RegVA = ArgLocs[I++];
7313         assert(RegVA.getValNo() == OriginalValNo &&
7314                "Custom MemLoc ValNo and custom RegLoc ValNo must match.");
7315         SDValue Add = DAG.getNode(ISD::ADD, dl, PtrVT, PtrOff,
7316                                   DAG.getConstant(LoadOffset, dl, PtrVT));
7317         SDValue Load = DAG.getLoad(PtrVT, dl, Store, Add, MachinePointerInfo());
7318         MemOpChains.push_back(Load.getValue(1));
7319         RegsToPass.push_back(std::make_pair(RegVA.getLocReg(), Load));
7320         LoadOffset += PtrByteSize;
7321       };
7322 
7323       // In 64-bit there will be exactly 2 custom RegLocs that follow, and in
7324       // in 32-bit there will be 2 custom RegLocs if we are passing in R9 and
7325       // R10.
7326       HandleCustomVecRegLoc();
7327       HandleCustomVecRegLoc();
7328 
7329       if (I != E && ArgLocs[I].isRegLoc() && ArgLocs[I].needsCustom() &&
7330           ArgLocs[I].getValNo() == OriginalValNo) {
7331         assert(!IsPPC64 &&
7332                "Only 2 custom RegLocs expected for 64-bit codegen.");
7333         HandleCustomVecRegLoc();
7334         HandleCustomVecRegLoc();
7335       }
7336 
7337       continue;
7338     }
7339 
7340     if (VA.isMemLoc()) {
7341       SDValue PtrOff =
7342           DAG.getConstant(VA.getLocMemOffset(), dl, StackPtr.getValueType());
7343       PtrOff = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
7344       MemOpChains.push_back(
7345           DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
7346 
7347       continue;
7348     }
7349 
7350     if (!ValVT.isFloatingPoint())
7351       report_fatal_error(
7352           "Unexpected register handling for calling convention.");
7353 
7354     // Custom handling is used for GPR initializations for vararg float
7355     // arguments.
7356     assert(VA.isRegLoc() && VA.needsCustom() && CFlags.IsVarArg &&
7357            LocVT.isInteger() &&
7358            "Custom register handling only expected for VarArg.");
7359 
7360     SDValue ArgAsInt =
7361         DAG.getBitcast(MVT::getIntegerVT(ValVT.getSizeInBits()), Arg);
7362 
7363     if (Arg.getValueType().getStoreSize() == LocVT.getStoreSize())
7364       // f32 in 32-bit GPR
7365       // f64 in 64-bit GPR
7366       RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgAsInt));
7367     else if (Arg.getValueType().getFixedSizeInBits() <
7368              LocVT.getFixedSizeInBits())
7369       // f32 in 64-bit GPR.
7370       RegsToPass.push_back(std::make_pair(
7371           VA.getLocReg(), DAG.getZExtOrTrunc(ArgAsInt, dl, LocVT)));
7372     else {
7373       // f64 in two 32-bit GPRs
7374       // The 2 GPRs are marked custom and expected to be adjacent in ArgLocs.
7375       assert(Arg.getValueType() == MVT::f64 && CFlags.IsVarArg && !IsPPC64 &&
7376              "Unexpected custom register for argument!");
7377       CCValAssign &GPR1 = VA;
7378       SDValue MSWAsI64 = DAG.getNode(ISD::SRL, dl, MVT::i64, ArgAsInt,
7379                                      DAG.getConstant(32, dl, MVT::i8));
7380       RegsToPass.push_back(std::make_pair(
7381           GPR1.getLocReg(), DAG.getZExtOrTrunc(MSWAsI64, dl, MVT::i32)));
7382 
7383       if (I != E) {
7384         // If only 1 GPR was available, there will only be one custom GPR and
7385         // the argument will also pass in memory.
7386         CCValAssign &PeekArg = ArgLocs[I];
7387         if (PeekArg.isRegLoc() && PeekArg.getValNo() == PeekArg.getValNo()) {
7388           assert(PeekArg.needsCustom() && "A second custom GPR is expected.");
7389           CCValAssign &GPR2 = ArgLocs[I++];
7390           RegsToPass.push_back(std::make_pair(
7391               GPR2.getLocReg(), DAG.getZExtOrTrunc(ArgAsInt, dl, MVT::i32)));
7392         }
7393       }
7394     }
7395   }
7396 
7397   if (!MemOpChains.empty())
7398     Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
7399 
7400   // For indirect calls, we need to save the TOC base to the stack for
7401   // restoration after the call.
7402   if (CFlags.IsIndirect) {
7403     assert(!CFlags.IsTailCall && "Indirect tail-calls not supported.");
7404     const MCRegister TOCBaseReg = Subtarget.getTOCPointerRegister();
7405     const MCRegister StackPtrReg = Subtarget.getStackPointerRegister();
7406     const MVT PtrVT = Subtarget.isPPC64() ? MVT::i64 : MVT::i32;
7407     const unsigned TOCSaveOffset =
7408         Subtarget.getFrameLowering()->getTOCSaveOffset();
7409 
7410     setUsesTOCBasePtr(DAG);
7411     SDValue Val = DAG.getCopyFromReg(Chain, dl, TOCBaseReg, PtrVT);
7412     SDValue PtrOff = DAG.getIntPtrConstant(TOCSaveOffset, dl);
7413     SDValue StackPtr = DAG.getRegister(StackPtrReg, PtrVT);
7414     SDValue AddPtr = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, PtrOff);
7415     Chain = DAG.getStore(
7416         Val.getValue(1), dl, Val, AddPtr,
7417         MachinePointerInfo::getStack(DAG.getMachineFunction(), TOCSaveOffset));
7418   }
7419 
7420   // Build a sequence of copy-to-reg nodes chained together with token chain
7421   // and flag operands which copy the outgoing args into the appropriate regs.
7422   SDValue InFlag;
7423   for (auto Reg : RegsToPass) {
7424     Chain = DAG.getCopyToReg(Chain, dl, Reg.first, Reg.second, InFlag);
7425     InFlag = Chain.getValue(1);
7426   }
7427 
7428   const int SPDiff = 0;
7429   return FinishCall(CFlags, dl, DAG, RegsToPass, InFlag, Chain, CallSeqStart,
7430                     Callee, SPDiff, NumBytes, Ins, InVals, CB);
7431 }
7432 
7433 bool
CanLowerReturn(CallingConv::ID CallConv,MachineFunction & MF,bool isVarArg,const SmallVectorImpl<ISD::OutputArg> & Outs,LLVMContext & Context) const7434 PPCTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
7435                                   MachineFunction &MF, bool isVarArg,
7436                                   const SmallVectorImpl<ISD::OutputArg> &Outs,
7437                                   LLVMContext &Context) const {
7438   SmallVector<CCValAssign, 16> RVLocs;
7439   CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
7440   return CCInfo.CheckReturn(
7441       Outs, (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
7442                 ? RetCC_PPC_Cold
7443                 : RetCC_PPC);
7444 }
7445 
7446 SDValue
LowerReturn(SDValue Chain,CallingConv::ID CallConv,bool isVarArg,const SmallVectorImpl<ISD::OutputArg> & Outs,const SmallVectorImpl<SDValue> & OutVals,const SDLoc & dl,SelectionDAG & DAG) const7447 PPCTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
7448                                bool isVarArg,
7449                                const SmallVectorImpl<ISD::OutputArg> &Outs,
7450                                const SmallVectorImpl<SDValue> &OutVals,
7451                                const SDLoc &dl, SelectionDAG &DAG) const {
7452   SmallVector<CCValAssign, 16> RVLocs;
7453   CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
7454                  *DAG.getContext());
7455   CCInfo.AnalyzeReturn(Outs,
7456                        (Subtarget.isSVR4ABI() && CallConv == CallingConv::Cold)
7457                            ? RetCC_PPC_Cold
7458                            : RetCC_PPC);
7459 
7460   SDValue Flag;
7461   SmallVector<SDValue, 4> RetOps(1, Chain);
7462 
7463   // Copy the result values into the output registers.
7464   for (unsigned i = 0, RealResIdx = 0; i != RVLocs.size(); ++i, ++RealResIdx) {
7465     CCValAssign &VA = RVLocs[i];
7466     assert(VA.isRegLoc() && "Can only return in registers!");
7467 
7468     SDValue Arg = OutVals[RealResIdx];
7469 
7470     switch (VA.getLocInfo()) {
7471     default: llvm_unreachable("Unknown loc info!");
7472     case CCValAssign::Full: break;
7473     case CCValAssign::AExt:
7474       Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
7475       break;
7476     case CCValAssign::ZExt:
7477       Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
7478       break;
7479     case CCValAssign::SExt:
7480       Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
7481       break;
7482     }
7483     if (Subtarget.hasSPE() && VA.getLocVT() == MVT::f64) {
7484       bool isLittleEndian = Subtarget.isLittleEndian();
7485       // Legalize ret f64 -> ret 2 x i32.
7486       SDValue SVal =
7487           DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
7488                       DAG.getIntPtrConstant(isLittleEndian ? 0 : 1, dl));
7489       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), SVal, Flag);
7490       RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
7491       SVal = DAG.getNode(PPCISD::EXTRACT_SPE, dl, MVT::i32, Arg,
7492                          DAG.getIntPtrConstant(isLittleEndian ? 1 : 0, dl));
7493       Flag = Chain.getValue(1);
7494       VA = RVLocs[++i]; // skip ahead to next loc
7495       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), SVal, Flag);
7496     } else
7497       Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag);
7498     Flag = Chain.getValue(1);
7499     RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
7500   }
7501 
7502   RetOps[0] = Chain;  // Update chain.
7503 
7504   // Add the flag if we have it.
7505   if (Flag.getNode())
7506     RetOps.push_back(Flag);
7507 
7508   return DAG.getNode(PPCISD::RET_FLAG, dl, MVT::Other, RetOps);
7509 }
7510 
7511 SDValue
LowerGET_DYNAMIC_AREA_OFFSET(SDValue Op,SelectionDAG & DAG) const7512 PPCTargetLowering::LowerGET_DYNAMIC_AREA_OFFSET(SDValue Op,
7513                                                 SelectionDAG &DAG) const {
7514   SDLoc dl(Op);
7515 
7516   // Get the correct type for integers.
7517   EVT IntVT = Op.getValueType();
7518 
7519   // Get the inputs.
7520   SDValue Chain = Op.getOperand(0);
7521   SDValue FPSIdx = getFramePointerFrameIndex(DAG);
7522   // Build a DYNAREAOFFSET node.
7523   SDValue Ops[2] = {Chain, FPSIdx};
7524   SDVTList VTs = DAG.getVTList(IntVT);
7525   return DAG.getNode(PPCISD::DYNAREAOFFSET, dl, VTs, Ops);
7526 }
7527 
LowerSTACKRESTORE(SDValue Op,SelectionDAG & DAG) const7528 SDValue PPCTargetLowering::LowerSTACKRESTORE(SDValue Op,
7529                                              SelectionDAG &DAG) const {
7530   // When we pop the dynamic allocation we need to restore the SP link.
7531   SDLoc dl(Op);
7532 
7533   // Get the correct type for pointers.
7534   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7535 
7536   // Construct the stack pointer operand.
7537   bool isPPC64 = Subtarget.isPPC64();
7538   unsigned SP = isPPC64 ? PPC::X1 : PPC::R1;
7539   SDValue StackPtr = DAG.getRegister(SP, PtrVT);
7540 
7541   // Get the operands for the STACKRESTORE.
7542   SDValue Chain = Op.getOperand(0);
7543   SDValue SaveSP = Op.getOperand(1);
7544 
7545   // Load the old link SP.
7546   SDValue LoadLinkSP =
7547       DAG.getLoad(PtrVT, dl, Chain, StackPtr, MachinePointerInfo());
7548 
7549   // Restore the stack pointer.
7550   Chain = DAG.getCopyToReg(LoadLinkSP.getValue(1), dl, SP, SaveSP);
7551 
7552   // Store the old link SP.
7553   return DAG.getStore(Chain, dl, LoadLinkSP, StackPtr, MachinePointerInfo());
7554 }
7555 
getReturnAddrFrameIndex(SelectionDAG & DAG) const7556 SDValue PPCTargetLowering::getReturnAddrFrameIndex(SelectionDAG &DAG) const {
7557   MachineFunction &MF = DAG.getMachineFunction();
7558   bool isPPC64 = Subtarget.isPPC64();
7559   EVT PtrVT = getPointerTy(MF.getDataLayout());
7560 
7561   // Get current frame pointer save index.  The users of this index will be
7562   // primarily DYNALLOC instructions.
7563   PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
7564   int RASI = FI->getReturnAddrSaveIndex();
7565 
7566   // If the frame pointer save index hasn't been defined yet.
7567   if (!RASI) {
7568     // Find out what the fix offset of the frame pointer save area.
7569     int LROffset = Subtarget.getFrameLowering()->getReturnSaveOffset();
7570     // Allocate the frame index for frame pointer save area.
7571     RASI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, LROffset, false);
7572     // Save the result.
7573     FI->setReturnAddrSaveIndex(RASI);
7574   }
7575   return DAG.getFrameIndex(RASI, PtrVT);
7576 }
7577 
7578 SDValue
getFramePointerFrameIndex(SelectionDAG & DAG) const7579 PPCTargetLowering::getFramePointerFrameIndex(SelectionDAG & DAG) const {
7580   MachineFunction &MF = DAG.getMachineFunction();
7581   bool isPPC64 = Subtarget.isPPC64();
7582   EVT PtrVT = getPointerTy(MF.getDataLayout());
7583 
7584   // Get current frame pointer save index.  The users of this index will be
7585   // primarily DYNALLOC instructions.
7586   PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
7587   int FPSI = FI->getFramePointerSaveIndex();
7588 
7589   // If the frame pointer save index hasn't been defined yet.
7590   if (!FPSI) {
7591     // Find out what the fix offset of the frame pointer save area.
7592     int FPOffset = Subtarget.getFrameLowering()->getFramePointerSaveOffset();
7593     // Allocate the frame index for frame pointer save area.
7594     FPSI = MF.getFrameInfo().CreateFixedObject(isPPC64? 8 : 4, FPOffset, true);
7595     // Save the result.
7596     FI->setFramePointerSaveIndex(FPSI);
7597   }
7598   return DAG.getFrameIndex(FPSI, PtrVT);
7599 }
7600 
LowerDYNAMIC_STACKALLOC(SDValue Op,SelectionDAG & DAG) const7601 SDValue PPCTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
7602                                                    SelectionDAG &DAG) const {
7603   MachineFunction &MF = DAG.getMachineFunction();
7604   // Get the inputs.
7605   SDValue Chain = Op.getOperand(0);
7606   SDValue Size  = Op.getOperand(1);
7607   SDLoc dl(Op);
7608 
7609   // Get the correct type for pointers.
7610   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7611   // Negate the size.
7612   SDValue NegSize = DAG.getNode(ISD::SUB, dl, PtrVT,
7613                                 DAG.getConstant(0, dl, PtrVT), Size);
7614   // Construct a node for the frame pointer save index.
7615   SDValue FPSIdx = getFramePointerFrameIndex(DAG);
7616   SDValue Ops[3] = { Chain, NegSize, FPSIdx };
7617   SDVTList VTs = DAG.getVTList(PtrVT, MVT::Other);
7618   if (hasInlineStackProbe(MF))
7619     return DAG.getNode(PPCISD::PROBED_ALLOCA, dl, VTs, Ops);
7620   return DAG.getNode(PPCISD::DYNALLOC, dl, VTs, Ops);
7621 }
7622 
LowerEH_DWARF_CFA(SDValue Op,SelectionDAG & DAG) const7623 SDValue PPCTargetLowering::LowerEH_DWARF_CFA(SDValue Op,
7624                                                      SelectionDAG &DAG) const {
7625   MachineFunction &MF = DAG.getMachineFunction();
7626 
7627   bool isPPC64 = Subtarget.isPPC64();
7628   EVT PtrVT = getPointerTy(DAG.getDataLayout());
7629 
7630   int FI = MF.getFrameInfo().CreateFixedObject(isPPC64 ? 8 : 4, 0, false);
7631   return DAG.getFrameIndex(FI, PtrVT);
7632 }
7633 
lowerEH_SJLJ_SETJMP(SDValue Op,SelectionDAG & DAG) const7634 SDValue PPCTargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
7635                                                SelectionDAG &DAG) const {
7636   SDLoc DL(Op);
7637   return DAG.getNode(PPCISD::EH_SJLJ_SETJMP, DL,
7638                      DAG.getVTList(MVT::i32, MVT::Other),
7639                      Op.getOperand(0), Op.getOperand(1));
7640 }
7641 
lowerEH_SJLJ_LONGJMP(SDValue Op,SelectionDAG & DAG) const7642 SDValue PPCTargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
7643                                                 SelectionDAG &DAG) const {
7644   SDLoc DL(Op);
7645   return DAG.getNode(PPCISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
7646                      Op.getOperand(0), Op.getOperand(1));
7647 }
7648 
LowerLOAD(SDValue Op,SelectionDAG & DAG) const7649 SDValue PPCTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
7650   if (Op.getValueType().isVector())
7651     return LowerVectorLoad(Op, DAG);
7652 
7653   assert(Op.getValueType() == MVT::i1 &&
7654          "Custom lowering only for i1 loads");
7655 
7656   // First, load 8 bits into 32 bits, then truncate to 1 bit.
7657 
7658   SDLoc dl(Op);
7659   LoadSDNode *LD = cast<LoadSDNode>(Op);
7660 
7661   SDValue Chain = LD->getChain();
7662   SDValue BasePtr = LD->getBasePtr();
7663   MachineMemOperand *MMO = LD->getMemOperand();
7664 
7665   SDValue NewLD =
7666       DAG.getExtLoad(ISD::EXTLOAD, dl, getPointerTy(DAG.getDataLayout()), Chain,
7667                      BasePtr, MVT::i8, MMO);
7668   SDValue Result = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewLD);
7669 
7670   SDValue Ops[] = { Result, SDValue(NewLD.getNode(), 1) };
7671   return DAG.getMergeValues(Ops, dl);
7672 }
7673 
LowerSTORE(SDValue Op,SelectionDAG & DAG) const7674 SDValue PPCTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
7675   if (Op.getOperand(1).getValueType().isVector())
7676     return LowerVectorStore(Op, DAG);
7677 
7678   assert(Op.getOperand(1).getValueType() == MVT::i1 &&
7679          "Custom lowering only for i1 stores");
7680 
7681   // First, zero extend to 32 bits, then use a truncating store to 8 bits.
7682 
7683   SDLoc dl(Op);
7684   StoreSDNode *ST = cast<StoreSDNode>(Op);
7685 
7686   SDValue Chain = ST->getChain();
7687   SDValue BasePtr = ST->getBasePtr();
7688   SDValue Value = ST->getValue();
7689   MachineMemOperand *MMO = ST->getMemOperand();
7690 
7691   Value = DAG.getNode(ISD::ZERO_EXTEND, dl, getPointerTy(DAG.getDataLayout()),
7692                       Value);
7693   return DAG.getTruncStore(Chain, dl, Value, BasePtr, MVT::i8, MMO);
7694 }
7695 
7696 // FIXME: Remove this once the ANDI glue bug is fixed:
LowerTRUNCATE(SDValue Op,SelectionDAG & DAG) const7697 SDValue PPCTargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
7698   assert(Op.getValueType() == MVT::i1 &&
7699          "Custom lowering only for i1 results");
7700 
7701   SDLoc DL(Op);
7702   return DAG.getNode(PPCISD::ANDI_rec_1_GT_BIT, DL, MVT::i1, Op.getOperand(0));
7703 }
7704 
LowerTRUNCATEVector(SDValue Op,SelectionDAG & DAG) const7705 SDValue PPCTargetLowering::LowerTRUNCATEVector(SDValue Op,
7706                                                SelectionDAG &DAG) const {
7707 
7708   // Implements a vector truncate that fits in a vector register as a shuffle.
7709   // We want to legalize vector truncates down to where the source fits in
7710   // a vector register (and target is therefore smaller than vector register
7711   // size).  At that point legalization will try to custom lower the sub-legal
7712   // result and get here - where we can contain the truncate as a single target
7713   // operation.
7714 
7715   // For example a trunc <2 x i16> to <2 x i8> could be visualized as follows:
7716   //   <MSB1|LSB1, MSB2|LSB2> to <LSB1, LSB2>
7717   //
7718   // We will implement it for big-endian ordering as this (where x denotes
7719   // undefined):
7720   //   < MSB1|LSB1, MSB2|LSB2, uu, uu, uu, uu, uu, uu> to
7721   //   < LSB1, LSB2, u, u, u, u, u, u, u, u, u, u, u, u, u, u>
7722   //
7723   // The same operation in little-endian ordering will be:
7724   //   <uu, uu, uu, uu, uu, uu, LSB2|MSB2, LSB1|MSB1> to
7725   //   <u, u, u, u, u, u, u, u, u, u, u, u, u, u, LSB2, LSB1>
7726 
7727   EVT TrgVT = Op.getValueType();
7728   assert(TrgVT.isVector() && "Vector type expected.");
7729   unsigned TrgNumElts = TrgVT.getVectorNumElements();
7730   EVT EltVT = TrgVT.getVectorElementType();
7731   if (!isOperationCustom(Op.getOpcode(), TrgVT) ||
7732       TrgVT.getSizeInBits() > 128 || !isPowerOf2_32(TrgNumElts) ||
7733       !isPowerOf2_32(EltVT.getSizeInBits()))
7734     return SDValue();
7735 
7736   SDValue N1 = Op.getOperand(0);
7737   EVT SrcVT = N1.getValueType();
7738   unsigned SrcSize = SrcVT.getSizeInBits();
7739   if (SrcSize > 256 ||
7740       !isPowerOf2_32(SrcVT.getVectorNumElements()) ||
7741       !isPowerOf2_32(SrcVT.getVectorElementType().getSizeInBits()))
7742     return SDValue();
7743   if (SrcSize == 256 && SrcVT.getVectorNumElements() < 2)
7744     return SDValue();
7745 
7746   unsigned WideNumElts = 128 / EltVT.getSizeInBits();
7747   EVT WideVT = EVT::getVectorVT(*DAG.getContext(), EltVT, WideNumElts);
7748 
7749   SDLoc DL(Op);
7750   SDValue Op1, Op2;
7751   if (SrcSize == 256) {
7752     EVT VecIdxTy = getVectorIdxTy(DAG.getDataLayout());
7753     EVT SplitVT =
7754         N1.getValueType().getHalfNumVectorElementsVT(*DAG.getContext());
7755     unsigned SplitNumElts = SplitVT.getVectorNumElements();
7756     Op1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, N1,
7757                       DAG.getConstant(0, DL, VecIdxTy));
7758     Op2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, N1,
7759                       DAG.getConstant(SplitNumElts, DL, VecIdxTy));
7760   }
7761   else {
7762     Op1 = SrcSize == 128 ? N1 : widenVec(DAG, N1, DL);
7763     Op2 = DAG.getUNDEF(WideVT);
7764   }
7765 
7766   // First list the elements we want to keep.
7767   unsigned SizeMult = SrcSize / TrgVT.getSizeInBits();
7768   SmallVector<int, 16> ShuffV;
7769   if (Subtarget.isLittleEndian())
7770     for (unsigned i = 0; i < TrgNumElts; ++i)
7771       ShuffV.push_back(i * SizeMult);
7772   else
7773     for (unsigned i = 1; i <= TrgNumElts; ++i)
7774       ShuffV.push_back(i * SizeMult - 1);
7775 
7776   // Populate the remaining elements with undefs.
7777   for (unsigned i = TrgNumElts; i < WideNumElts; ++i)
7778     // ShuffV.push_back(i + WideNumElts);
7779     ShuffV.push_back(WideNumElts + 1);
7780 
7781   Op1 = DAG.getNode(ISD::BITCAST, DL, WideVT, Op1);
7782   Op2 = DAG.getNode(ISD::BITCAST, DL, WideVT, Op2);
7783   return DAG.getVectorShuffle(WideVT, DL, Op1, Op2, ShuffV);
7784 }
7785 
7786 /// LowerSELECT_CC - Lower floating point select_cc's into fsel instruction when
7787 /// possible.
LowerSELECT_CC(SDValue Op,SelectionDAG & DAG) const7788 SDValue PPCTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
7789   ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
7790   EVT ResVT = Op.getValueType();
7791   EVT CmpVT = Op.getOperand(0).getValueType();
7792   SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
7793   SDValue TV  = Op.getOperand(2), FV  = Op.getOperand(3);
7794   SDLoc dl(Op);
7795 
7796   // Without power9-vector, we don't have native instruction for f128 comparison.
7797   // Following transformation to libcall is needed for setcc:
7798   // select_cc lhs, rhs, tv, fv, cc -> select_cc (setcc cc, x, y), 0, tv, fv, NE
7799   if (!Subtarget.hasP9Vector() && CmpVT == MVT::f128) {
7800     SDValue Z = DAG.getSetCC(
7801         dl, getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), CmpVT),
7802         LHS, RHS, CC);
7803     SDValue Zero = DAG.getConstant(0, dl, Z.getValueType());
7804     return DAG.getSelectCC(dl, Z, Zero, TV, FV, ISD::SETNE);
7805   }
7806 
7807   // Not FP, or using SPE? Not a fsel.
7808   if (!CmpVT.isFloatingPoint() || !TV.getValueType().isFloatingPoint() ||
7809       Subtarget.hasSPE())
7810     return Op;
7811 
7812   SDNodeFlags Flags = Op.getNode()->getFlags();
7813 
7814   // We have xsmaxcdp/xsmincdp which are OK to emit even in the
7815   // presence of infinities.
7816   if (Subtarget.hasP9Vector() && LHS == TV && RHS == FV) {
7817     switch (CC) {
7818     default:
7819       break;
7820     case ISD::SETOGT:
7821     case ISD::SETGT:
7822       return DAG.getNode(PPCISD::XSMAXCDP, dl, Op.getValueType(), LHS, RHS);
7823     case ISD::SETOLT:
7824     case ISD::SETLT:
7825       return DAG.getNode(PPCISD::XSMINCDP, dl, Op.getValueType(), LHS, RHS);
7826     }
7827   }
7828 
7829   // We might be able to do better than this under some circumstances, but in
7830   // general, fsel-based lowering of select is a finite-math-only optimization.
7831   // For more information, see section F.3 of the 2.06 ISA specification.
7832   // With ISA 3.0
7833   if ((!DAG.getTarget().Options.NoInfsFPMath && !Flags.hasNoInfs()) ||
7834       (!DAG.getTarget().Options.NoNaNsFPMath && !Flags.hasNoNaNs()))
7835     return Op;
7836 
7837   // If the RHS of the comparison is a 0.0, we don't need to do the
7838   // subtraction at all.
7839   SDValue Sel1;
7840   if (isFloatingPointZero(RHS))
7841     switch (CC) {
7842     default: break;       // SETUO etc aren't handled by fsel.
7843     case ISD::SETNE:
7844       std::swap(TV, FV);
7845       LLVM_FALLTHROUGH;
7846     case ISD::SETEQ:
7847       if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits
7848         LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
7849       Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV);
7850       if (Sel1.getValueType() == MVT::f32)   // Comparison is always 64-bits
7851         Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1);
7852       return DAG.getNode(PPCISD::FSEL, dl, ResVT,
7853                          DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), Sel1, FV);
7854     case ISD::SETULT:
7855     case ISD::SETLT:
7856       std::swap(TV, FV);  // fsel is natively setge, swap operands for setlt
7857       LLVM_FALLTHROUGH;
7858     case ISD::SETOGE:
7859     case ISD::SETGE:
7860       if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits
7861         LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
7862       return DAG.getNode(PPCISD::FSEL, dl, ResVT, LHS, TV, FV);
7863     case ISD::SETUGT:
7864     case ISD::SETGT:
7865       std::swap(TV, FV);  // fsel is natively setge, swap operands for setlt
7866       LLVM_FALLTHROUGH;
7867     case ISD::SETOLE:
7868     case ISD::SETLE:
7869       if (LHS.getValueType() == MVT::f32)   // Comparison is always 64-bits
7870         LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, LHS);
7871       return DAG.getNode(PPCISD::FSEL, dl, ResVT,
7872                          DAG.getNode(ISD::FNEG, dl, MVT::f64, LHS), TV, FV);
7873     }
7874 
7875   SDValue Cmp;
7876   switch (CC) {
7877   default: break;       // SETUO etc aren't handled by fsel.
7878   case ISD::SETNE:
7879     std::swap(TV, FV);
7880     LLVM_FALLTHROUGH;
7881   case ISD::SETEQ:
7882     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
7883     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
7884       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
7885     Sel1 = DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
7886     if (Sel1.getValueType() == MVT::f32)   // Comparison is always 64-bits
7887       Sel1 = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Sel1);
7888     return DAG.getNode(PPCISD::FSEL, dl, ResVT,
7889                        DAG.getNode(ISD::FNEG, dl, MVT::f64, Cmp), Sel1, FV);
7890   case ISD::SETULT:
7891   case ISD::SETLT:
7892     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
7893     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
7894       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
7895     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV);
7896   case ISD::SETOGE:
7897   case ISD::SETGE:
7898     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, LHS, RHS, Flags);
7899     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
7900       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
7901     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
7902   case ISD::SETUGT:
7903   case ISD::SETGT:
7904     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags);
7905     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
7906       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
7907     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, FV, TV);
7908   case ISD::SETOLE:
7909   case ISD::SETLE:
7910     Cmp = DAG.getNode(ISD::FSUB, dl, CmpVT, RHS, LHS, Flags);
7911     if (Cmp.getValueType() == MVT::f32)   // Comparison is always 64-bits
7912       Cmp = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Cmp);
7913     return DAG.getNode(PPCISD::FSEL, dl, ResVT, Cmp, TV, FV);
7914   }
7915   return Op;
7916 }
7917 
getPPCStrictOpcode(unsigned Opc)7918 static unsigned getPPCStrictOpcode(unsigned Opc) {
7919   switch (Opc) {
7920   default:
7921     llvm_unreachable("No strict version of this opcode!");
7922   case PPCISD::FCTIDZ:
7923     return PPCISD::STRICT_FCTIDZ;
7924   case PPCISD::FCTIWZ:
7925     return PPCISD::STRICT_FCTIWZ;
7926   case PPCISD::FCTIDUZ:
7927     return PPCISD::STRICT_FCTIDUZ;
7928   case PPCISD::FCTIWUZ:
7929     return PPCISD::STRICT_FCTIWUZ;
7930   case PPCISD::FCFID:
7931     return PPCISD::STRICT_FCFID;
7932   case PPCISD::FCFIDU:
7933     return PPCISD::STRICT_FCFIDU;
7934   case PPCISD::FCFIDS:
7935     return PPCISD::STRICT_FCFIDS;
7936   case PPCISD::FCFIDUS:
7937     return PPCISD::STRICT_FCFIDUS;
7938   }
7939 }
7940 
convertFPToInt(SDValue Op,SelectionDAG & DAG,const PPCSubtarget & Subtarget)7941 static SDValue convertFPToInt(SDValue Op, SelectionDAG &DAG,
7942                               const PPCSubtarget &Subtarget) {
7943   SDLoc dl(Op);
7944   bool IsStrict = Op->isStrictFPOpcode();
7945   bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT ||
7946                   Op.getOpcode() == ISD::STRICT_FP_TO_SINT;
7947 
7948   // TODO: Any other flags to propagate?
7949   SDNodeFlags Flags;
7950   Flags.setNoFPExcept(Op->getFlags().hasNoFPExcept());
7951 
7952   // For strict nodes, source is the second operand.
7953   SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
7954   SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
7955   assert(Src.getValueType().isFloatingPoint());
7956   if (Src.getValueType() == MVT::f32) {
7957     if (IsStrict) {
7958       Src =
7959           DAG.getNode(ISD::STRICT_FP_EXTEND, dl,
7960                       DAG.getVTList(MVT::f64, MVT::Other), {Chain, Src}, Flags);
7961       Chain = Src.getValue(1);
7962     } else
7963       Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
7964   }
7965   SDValue Conv;
7966   unsigned Opc = ISD::DELETED_NODE;
7967   switch (Op.getSimpleValueType().SimpleTy) {
7968   default: llvm_unreachable("Unhandled FP_TO_INT type in custom expander!");
7969   case MVT::i32:
7970     Opc = IsSigned ? PPCISD::FCTIWZ
7971                    : (Subtarget.hasFPCVT() ? PPCISD::FCTIWUZ : PPCISD::FCTIDZ);
7972     break;
7973   case MVT::i64:
7974     assert((IsSigned || Subtarget.hasFPCVT()) &&
7975            "i64 FP_TO_UINT is supported only with FPCVT");
7976     Opc = IsSigned ? PPCISD::FCTIDZ : PPCISD::FCTIDUZ;
7977   }
7978   if (IsStrict) {
7979     Opc = getPPCStrictOpcode(Opc);
7980     Conv = DAG.getNode(Opc, dl, DAG.getVTList(MVT::f64, MVT::Other),
7981                        {Chain, Src}, Flags);
7982   } else {
7983     Conv = DAG.getNode(Opc, dl, MVT::f64, Src);
7984   }
7985   return Conv;
7986 }
7987 
LowerFP_TO_INTForReuse(SDValue Op,ReuseLoadInfo & RLI,SelectionDAG & DAG,const SDLoc & dl) const7988 void PPCTargetLowering::LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI,
7989                                                SelectionDAG &DAG,
7990                                                const SDLoc &dl) const {
7991   SDValue Tmp = convertFPToInt(Op, DAG, Subtarget);
7992   bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT ||
7993                   Op.getOpcode() == ISD::STRICT_FP_TO_SINT;
7994   bool IsStrict = Op->isStrictFPOpcode();
7995 
7996   // Convert the FP value to an int value through memory.
7997   bool i32Stack = Op.getValueType() == MVT::i32 && Subtarget.hasSTFIWX() &&
7998                   (IsSigned || Subtarget.hasFPCVT());
7999   SDValue FIPtr = DAG.CreateStackTemporary(i32Stack ? MVT::i32 : MVT::f64);
8000   int FI = cast<FrameIndexSDNode>(FIPtr)->getIndex();
8001   MachinePointerInfo MPI =
8002       MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI);
8003 
8004   // Emit a store to the stack slot.
8005   SDValue Chain = IsStrict ? Tmp.getValue(1) : DAG.getEntryNode();
8006   Align Alignment(DAG.getEVTAlign(Tmp.getValueType()));
8007   if (i32Stack) {
8008     MachineFunction &MF = DAG.getMachineFunction();
8009     Alignment = Align(4);
8010     MachineMemOperand *MMO =
8011         MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Alignment);
8012     SDValue Ops[] = { Chain, Tmp, FIPtr };
8013     Chain = DAG.getMemIntrinsicNode(PPCISD::STFIWX, dl,
8014               DAG.getVTList(MVT::Other), Ops, MVT::i32, MMO);
8015   } else
8016     Chain = DAG.getStore(Chain, dl, Tmp, FIPtr, MPI, Alignment);
8017 
8018   // Result is a load from the stack slot.  If loading 4 bytes, make sure to
8019   // add in a bias on big endian.
8020   if (Op.getValueType() == MVT::i32 && !i32Stack) {
8021     FIPtr = DAG.getNode(ISD::ADD, dl, FIPtr.getValueType(), FIPtr,
8022                         DAG.getConstant(4, dl, FIPtr.getValueType()));
8023     MPI = MPI.getWithOffset(Subtarget.isLittleEndian() ? 0 : 4);
8024   }
8025 
8026   RLI.Chain = Chain;
8027   RLI.Ptr = FIPtr;
8028   RLI.MPI = MPI;
8029   RLI.Alignment = Alignment;
8030 }
8031 
8032 /// Custom lowers floating point to integer conversions to use
8033 /// the direct move instructions available in ISA 2.07 to avoid the
8034 /// need for load/store combinations.
LowerFP_TO_INTDirectMove(SDValue Op,SelectionDAG & DAG,const SDLoc & dl) const8035 SDValue PPCTargetLowering::LowerFP_TO_INTDirectMove(SDValue Op,
8036                                                     SelectionDAG &DAG,
8037                                                     const SDLoc &dl) const {
8038   SDValue Conv = convertFPToInt(Op, DAG, Subtarget);
8039   SDValue Mov = DAG.getNode(PPCISD::MFVSR, dl, Op.getValueType(), Conv);
8040   if (Op->isStrictFPOpcode())
8041     return DAG.getMergeValues({Mov, Conv.getValue(1)}, dl);
8042   else
8043     return Mov;
8044 }
8045 
LowerFP_TO_INT(SDValue Op,SelectionDAG & DAG,const SDLoc & dl) const8046 SDValue PPCTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
8047                                           const SDLoc &dl) const {
8048   bool IsStrict = Op->isStrictFPOpcode();
8049   bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT ||
8050                   Op.getOpcode() == ISD::STRICT_FP_TO_SINT;
8051   SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
8052   EVT SrcVT = Src.getValueType();
8053   EVT DstVT = Op.getValueType();
8054 
8055   // FP to INT conversions are legal for f128.
8056   if (SrcVT == MVT::f128)
8057     return Subtarget.hasP9Vector() ? Op : SDValue();
8058 
8059   // Expand ppcf128 to i32 by hand for the benefit of llvm-gcc bootstrap on
8060   // PPC (the libcall is not available).
8061   if (SrcVT == MVT::ppcf128) {
8062     if (DstVT == MVT::i32) {
8063       // TODO: Conservatively pass only nofpexcept flag here. Need to check and
8064       // set other fast-math flags to FP operations in both strict and
8065       // non-strict cases. (FP_TO_SINT, FSUB)
8066       SDNodeFlags Flags;
8067       Flags.setNoFPExcept(Op->getFlags().hasNoFPExcept());
8068 
8069       if (IsSigned) {
8070         SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::f64, Src,
8071                                  DAG.getIntPtrConstant(0, dl));
8072         SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::f64, Src,
8073                                  DAG.getIntPtrConstant(1, dl));
8074 
8075         // Add the two halves of the long double in round-to-zero mode, and use
8076         // a smaller FP_TO_SINT.
8077         if (IsStrict) {
8078           SDValue Res = DAG.getNode(PPCISD::STRICT_FADDRTZ, dl,
8079                                     DAG.getVTList(MVT::f64, MVT::Other),
8080                                     {Op.getOperand(0), Lo, Hi}, Flags);
8081           return DAG.getNode(ISD::STRICT_FP_TO_SINT, dl,
8082                              DAG.getVTList(MVT::i32, MVT::Other),
8083                              {Res.getValue(1), Res}, Flags);
8084         } else {
8085           SDValue Res = DAG.getNode(PPCISD::FADDRTZ, dl, MVT::f64, Lo, Hi);
8086           return DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Res);
8087         }
8088       } else {
8089         const uint64_t TwoE31[] = {0x41e0000000000000LL, 0};
8090         APFloat APF = APFloat(APFloat::PPCDoubleDouble(), APInt(128, TwoE31));
8091         SDValue Cst = DAG.getConstantFP(APF, dl, SrcVT);
8092         SDValue SignMask = DAG.getConstant(0x80000000, dl, DstVT);
8093         if (IsStrict) {
8094           // Sel = Src < 0x80000000
8095           // FltOfs = select Sel, 0.0, 0x80000000
8096           // IntOfs = select Sel, 0, 0x80000000
8097           // Result = fp_to_sint(Src - FltOfs) ^ IntOfs
8098           SDValue Chain = Op.getOperand(0);
8099           EVT SetCCVT =
8100               getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), SrcVT);
8101           EVT DstSetCCVT =
8102               getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), DstVT);
8103           SDValue Sel = DAG.getSetCC(dl, SetCCVT, Src, Cst, ISD::SETLT,
8104                                      Chain, true);
8105           Chain = Sel.getValue(1);
8106 
8107           SDValue FltOfs = DAG.getSelect(
8108               dl, SrcVT, Sel, DAG.getConstantFP(0.0, dl, SrcVT), Cst);
8109           Sel = DAG.getBoolExtOrTrunc(Sel, dl, DstSetCCVT, DstVT);
8110 
8111           SDValue Val = DAG.getNode(ISD::STRICT_FSUB, dl,
8112                                     DAG.getVTList(SrcVT, MVT::Other),
8113                                     {Chain, Src, FltOfs}, Flags);
8114           Chain = Val.getValue(1);
8115           SDValue SInt = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl,
8116                                      DAG.getVTList(DstVT, MVT::Other),
8117                                      {Chain, Val}, Flags);
8118           Chain = SInt.getValue(1);
8119           SDValue IntOfs = DAG.getSelect(
8120               dl, DstVT, Sel, DAG.getConstant(0, dl, DstVT), SignMask);
8121           SDValue Result = DAG.getNode(ISD::XOR, dl, DstVT, SInt, IntOfs);
8122           return DAG.getMergeValues({Result, Chain}, dl);
8123         } else {
8124           // X>=2^31 ? (int)(X-2^31)+0x80000000 : (int)X
8125           // FIXME: generated code sucks.
8126           SDValue True = DAG.getNode(ISD::FSUB, dl, MVT::ppcf128, Src, Cst);
8127           True = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, True);
8128           True = DAG.getNode(ISD::ADD, dl, MVT::i32, True, SignMask);
8129           SDValue False = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Src);
8130           return DAG.getSelectCC(dl, Src, Cst, True, False, ISD::SETGE);
8131         }
8132       }
8133     }
8134 
8135     return SDValue();
8136   }
8137 
8138   if (Subtarget.hasDirectMove() && Subtarget.isPPC64())
8139     return LowerFP_TO_INTDirectMove(Op, DAG, dl);
8140 
8141   ReuseLoadInfo RLI;
8142   LowerFP_TO_INTForReuse(Op, RLI, DAG, dl);
8143 
8144   return DAG.getLoad(Op.getValueType(), dl, RLI.Chain, RLI.Ptr, RLI.MPI,
8145                      RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges);
8146 }
8147 
8148 // We're trying to insert a regular store, S, and then a load, L. If the
8149 // incoming value, O, is a load, we might just be able to have our load use the
8150 // address used by O. However, we don't know if anything else will store to
8151 // that address before we can load from it. To prevent this situation, we need
8152 // to insert our load, L, into the chain as a peer of O. To do this, we give L
8153 // the same chain operand as O, we create a token factor from the chain results
8154 // of O and L, and we replace all uses of O's chain result with that token
8155 // factor (see spliceIntoChain below for this last part).
canReuseLoadAddress(SDValue Op,EVT MemVT,ReuseLoadInfo & RLI,SelectionDAG & DAG,ISD::LoadExtType ET) const8156 bool PPCTargetLowering::canReuseLoadAddress(SDValue Op, EVT MemVT,
8157                                             ReuseLoadInfo &RLI,
8158                                             SelectionDAG &DAG,
8159                                             ISD::LoadExtType ET) const {
8160   // Conservatively skip reusing for constrained FP nodes.
8161   if (Op->isStrictFPOpcode())
8162     return false;
8163 
8164   SDLoc dl(Op);
8165   bool ValidFPToUint = Op.getOpcode() == ISD::FP_TO_UINT &&
8166                        (Subtarget.hasFPCVT() || Op.getValueType() == MVT::i32);
8167   if (ET == ISD::NON_EXTLOAD &&
8168       (ValidFPToUint || Op.getOpcode() == ISD::FP_TO_SINT) &&
8169       isOperationLegalOrCustom(Op.getOpcode(),
8170                                Op.getOperand(0).getValueType())) {
8171 
8172     LowerFP_TO_INTForReuse(Op, RLI, DAG, dl);
8173     return true;
8174   }
8175 
8176   LoadSDNode *LD = dyn_cast<LoadSDNode>(Op);
8177   if (!LD || LD->getExtensionType() != ET || LD->isVolatile() ||
8178       LD->isNonTemporal())
8179     return false;
8180   if (LD->getMemoryVT() != MemVT)
8181     return false;
8182 
8183   // If the result of the load is an illegal type, then we can't build a
8184   // valid chain for reuse since the legalised loads and token factor node that
8185   // ties the legalised loads together uses a different output chain then the
8186   // illegal load.
8187   if (!isTypeLegal(LD->getValueType(0)))
8188     return false;
8189 
8190   RLI.Ptr = LD->getBasePtr();
8191   if (LD->isIndexed() && !LD->getOffset().isUndef()) {
8192     assert(LD->getAddressingMode() == ISD::PRE_INC &&
8193            "Non-pre-inc AM on PPC?");
8194     RLI.Ptr = DAG.getNode(ISD::ADD, dl, RLI.Ptr.getValueType(), RLI.Ptr,
8195                           LD->getOffset());
8196   }
8197 
8198   RLI.Chain = LD->getChain();
8199   RLI.MPI = LD->getPointerInfo();
8200   RLI.IsDereferenceable = LD->isDereferenceable();
8201   RLI.IsInvariant = LD->isInvariant();
8202   RLI.Alignment = LD->getAlign();
8203   RLI.AAInfo = LD->getAAInfo();
8204   RLI.Ranges = LD->getRanges();
8205 
8206   RLI.ResChain = SDValue(LD, LD->isIndexed() ? 2 : 1);
8207   return true;
8208 }
8209 
8210 // Given the head of the old chain, ResChain, insert a token factor containing
8211 // it and NewResChain, and make users of ResChain now be users of that token
8212 // factor.
8213 // TODO: Remove and use DAG::makeEquivalentMemoryOrdering() instead.
spliceIntoChain(SDValue ResChain,SDValue NewResChain,SelectionDAG & DAG) const8214 void PPCTargetLowering::spliceIntoChain(SDValue ResChain,
8215                                         SDValue NewResChain,
8216                                         SelectionDAG &DAG) const {
8217   if (!ResChain)
8218     return;
8219 
8220   SDLoc dl(NewResChain);
8221 
8222   SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
8223                            NewResChain, DAG.getUNDEF(MVT::Other));
8224   assert(TF.getNode() != NewResChain.getNode() &&
8225          "A new TF really is required here");
8226 
8227   DAG.ReplaceAllUsesOfValueWith(ResChain, TF);
8228   DAG.UpdateNodeOperands(TF.getNode(), ResChain, NewResChain);
8229 }
8230 
8231 /// Analyze profitability of direct move
8232 /// prefer float load to int load plus direct move
8233 /// when there is no integer use of int load
directMoveIsProfitable(const SDValue & Op) const8234 bool PPCTargetLowering::directMoveIsProfitable(const SDValue &Op) const {
8235   SDNode *Origin = Op.getOperand(0).getNode();
8236   if (Origin->getOpcode() != ISD::LOAD)
8237     return true;
8238 
8239   // If there is no LXSIBZX/LXSIHZX, like Power8,
8240   // prefer direct move if the memory size is 1 or 2 bytes.
8241   MachineMemOperand *MMO = cast<LoadSDNode>(Origin)->getMemOperand();
8242   if (!Subtarget.hasP9Vector() && MMO->getSize() <= 2)
8243     return true;
8244 
8245   for (SDNode::use_iterator UI = Origin->use_begin(),
8246                             UE = Origin->use_end();
8247        UI != UE; ++UI) {
8248 
8249     // Only look at the users of the loaded value.
8250     if (UI.getUse().get().getResNo() != 0)
8251       continue;
8252 
8253     if (UI->getOpcode() != ISD::SINT_TO_FP &&
8254         UI->getOpcode() != ISD::UINT_TO_FP &&
8255         UI->getOpcode() != ISD::STRICT_SINT_TO_FP &&
8256         UI->getOpcode() != ISD::STRICT_UINT_TO_FP)
8257       return true;
8258   }
8259 
8260   return false;
8261 }
8262 
convertIntToFP(SDValue Op,SDValue Src,SelectionDAG & DAG,const PPCSubtarget & Subtarget,SDValue Chain=SDValue ())8263 static SDValue convertIntToFP(SDValue Op, SDValue Src, SelectionDAG &DAG,
8264                               const PPCSubtarget &Subtarget,
8265                               SDValue Chain = SDValue()) {
8266   bool IsSigned = Op.getOpcode() == ISD::SINT_TO_FP ||
8267                   Op.getOpcode() == ISD::STRICT_SINT_TO_FP;
8268   SDLoc dl(Op);
8269 
8270   // TODO: Any other flags to propagate?
8271   SDNodeFlags Flags;
8272   Flags.setNoFPExcept(Op->getFlags().hasNoFPExcept());
8273 
8274   // If we have FCFIDS, then use it when converting to single-precision.
8275   // Otherwise, convert to double-precision and then round.
8276   bool IsSingle = Op.getValueType() == MVT::f32 && Subtarget.hasFPCVT();
8277   unsigned ConvOpc = IsSingle ? (IsSigned ? PPCISD::FCFIDS : PPCISD::FCFIDUS)
8278                               : (IsSigned ? PPCISD::FCFID : PPCISD::FCFIDU);
8279   EVT ConvTy = IsSingle ? MVT::f32 : MVT::f64;
8280   if (Op->isStrictFPOpcode()) {
8281     if (!Chain)
8282       Chain = Op.getOperand(0);
8283     return DAG.getNode(getPPCStrictOpcode(ConvOpc), dl,
8284                        DAG.getVTList(ConvTy, MVT::Other), {Chain, Src}, Flags);
8285   } else
8286     return DAG.getNode(ConvOpc, dl, ConvTy, Src);
8287 }
8288 
8289 /// Custom lowers integer to floating point conversions to use
8290 /// the direct move instructions available in ISA 2.07 to avoid the
8291 /// need for load/store combinations.
LowerINT_TO_FPDirectMove(SDValue Op,SelectionDAG & DAG,const SDLoc & dl) const8292 SDValue PPCTargetLowering::LowerINT_TO_FPDirectMove(SDValue Op,
8293                                                     SelectionDAG &DAG,
8294                                                     const SDLoc &dl) const {
8295   assert((Op.getValueType() == MVT::f32 ||
8296           Op.getValueType() == MVT::f64) &&
8297          "Invalid floating point type as target of conversion");
8298   assert(Subtarget.hasFPCVT() &&
8299          "Int to FP conversions with direct moves require FPCVT");
8300   SDValue Src = Op.getOperand(Op->isStrictFPOpcode() ? 1 : 0);
8301   bool WordInt = Src.getSimpleValueType().SimpleTy == MVT::i32;
8302   bool Signed = Op.getOpcode() == ISD::SINT_TO_FP ||
8303                 Op.getOpcode() == ISD::STRICT_SINT_TO_FP;
8304   unsigned MovOpc = (WordInt && !Signed) ? PPCISD::MTVSRZ : PPCISD::MTVSRA;
8305   SDValue Mov = DAG.getNode(MovOpc, dl, MVT::f64, Src);
8306   return convertIntToFP(Op, Mov, DAG, Subtarget);
8307 }
8308 
widenVec(SelectionDAG & DAG,SDValue Vec,const SDLoc & dl)8309 static SDValue widenVec(SelectionDAG &DAG, SDValue Vec, const SDLoc &dl) {
8310 
8311   EVT VecVT = Vec.getValueType();
8312   assert(VecVT.isVector() && "Expected a vector type.");
8313   assert(VecVT.getSizeInBits() < 128 && "Vector is already full width.");
8314 
8315   EVT EltVT = VecVT.getVectorElementType();
8316   unsigned WideNumElts = 128 / EltVT.getSizeInBits();
8317   EVT WideVT = EVT::getVectorVT(*DAG.getContext(), EltVT, WideNumElts);
8318 
8319   unsigned NumConcat = WideNumElts / VecVT.getVectorNumElements();
8320   SmallVector<SDValue, 16> Ops(NumConcat);
8321   Ops[0] = Vec;
8322   SDValue UndefVec = DAG.getUNDEF(VecVT);
8323   for (unsigned i = 1; i < NumConcat; ++i)
8324     Ops[i] = UndefVec;
8325 
8326   return DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT, Ops);
8327 }
8328 
LowerINT_TO_FPVector(SDValue Op,SelectionDAG & DAG,const SDLoc & dl) const8329 SDValue PPCTargetLowering::LowerINT_TO_FPVector(SDValue Op, SelectionDAG &DAG,
8330                                                 const SDLoc &dl) const {
8331   bool IsStrict = Op->isStrictFPOpcode();
8332   unsigned Opc = Op.getOpcode();
8333   SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
8334   assert((Opc == ISD::UINT_TO_FP || Opc == ISD::SINT_TO_FP ||
8335           Opc == ISD::STRICT_UINT_TO_FP || Opc == ISD::STRICT_SINT_TO_FP) &&
8336          "Unexpected conversion type");
8337   assert((Op.getValueType() == MVT::v2f64 || Op.getValueType() == MVT::v4f32) &&
8338          "Supports conversions to v2f64/v4f32 only.");
8339 
8340   // TODO: Any other flags to propagate?
8341   SDNodeFlags Flags;
8342   Flags.setNoFPExcept(Op->getFlags().hasNoFPExcept());
8343 
8344   bool SignedConv = Opc == ISD::SINT_TO_FP || Opc == ISD::STRICT_SINT_TO_FP;
8345   bool FourEltRes = Op.getValueType() == MVT::v4f32;
8346 
8347   SDValue Wide = widenVec(DAG, Src, dl);
8348   EVT WideVT = Wide.getValueType();
8349   unsigned WideNumElts = WideVT.getVectorNumElements();
8350   MVT IntermediateVT = FourEltRes ? MVT::v4i32 : MVT::v2i64;
8351 
8352   SmallVector<int, 16> ShuffV;
8353   for (unsigned i = 0; i < WideNumElts; ++i)
8354     ShuffV.push_back(i + WideNumElts);
8355 
8356   int Stride = FourEltRes ? WideNumElts / 4 : WideNumElts / 2;
8357   int SaveElts = FourEltRes ? 4 : 2;
8358   if (Subtarget.isLittleEndian())
8359     for (int i = 0; i < SaveElts; i++)
8360       ShuffV[i * Stride] = i;
8361   else
8362     for (int i = 1; i <= SaveElts; i++)
8363       ShuffV[i * Stride - 1] = i - 1;
8364 
8365   SDValue ShuffleSrc2 =
8366       SignedConv ? DAG.getUNDEF(WideVT) : DAG.getConstant(0, dl, WideVT);
8367   SDValue Arrange = DAG.getVectorShuffle(WideVT, dl, Wide, ShuffleSrc2, ShuffV);
8368 
8369   SDValue Extend;
8370   if (SignedConv) {
8371     Arrange = DAG.getBitcast(IntermediateVT, Arrange);
8372     EVT ExtVT = Src.getValueType();
8373     if (Subtarget.hasP9Altivec())
8374       ExtVT = EVT::getVectorVT(*DAG.getContext(), WideVT.getVectorElementType(),
8375                                IntermediateVT.getVectorNumElements());
8376 
8377     Extend = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, IntermediateVT, Arrange,
8378                          DAG.getValueType(ExtVT));
8379   } else
8380     Extend = DAG.getNode(ISD::BITCAST, dl, IntermediateVT, Arrange);
8381 
8382   if (IsStrict)
8383     return DAG.getNode(Opc, dl, DAG.getVTList(Op.getValueType(), MVT::Other),
8384                        {Op.getOperand(0), Extend}, Flags);
8385 
8386   return DAG.getNode(Opc, dl, Op.getValueType(), Extend);
8387 }
8388 
LowerINT_TO_FP(SDValue Op,SelectionDAG & DAG) const8389 SDValue PPCTargetLowering::LowerINT_TO_FP(SDValue Op,
8390                                           SelectionDAG &DAG) const {
8391   SDLoc dl(Op);
8392   bool IsSigned = Op.getOpcode() == ISD::SINT_TO_FP ||
8393                   Op.getOpcode() == ISD::STRICT_SINT_TO_FP;
8394   bool IsStrict = Op->isStrictFPOpcode();
8395   SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
8396   SDValue Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode();
8397 
8398   // TODO: Any other flags to propagate?
8399   SDNodeFlags Flags;
8400   Flags.setNoFPExcept(Op->getFlags().hasNoFPExcept());
8401 
8402   EVT InVT = Src.getValueType();
8403   EVT OutVT = Op.getValueType();
8404   if (OutVT.isVector() && OutVT.isFloatingPoint() &&
8405       isOperationCustom(Op.getOpcode(), InVT))
8406     return LowerINT_TO_FPVector(Op, DAG, dl);
8407 
8408   // Conversions to f128 are legal.
8409   if (Op.getValueType() == MVT::f128)
8410     return Subtarget.hasP9Vector() ? Op : SDValue();
8411 
8412   // Don't handle ppc_fp128 here; let it be lowered to a libcall.
8413   if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64)
8414     return SDValue();
8415 
8416   if (Src.getValueType() == MVT::i1) {
8417     SDValue Sel = DAG.getNode(ISD::SELECT, dl, Op.getValueType(), Src,
8418                               DAG.getConstantFP(1.0, dl, Op.getValueType()),
8419                               DAG.getConstantFP(0.0, dl, Op.getValueType()));
8420     if (IsStrict)
8421       return DAG.getMergeValues({Sel, Chain}, dl);
8422     else
8423       return Sel;
8424   }
8425 
8426   // If we have direct moves, we can do all the conversion, skip the store/load
8427   // however, without FPCVT we can't do most conversions.
8428   if (Subtarget.hasDirectMove() && directMoveIsProfitable(Op) &&
8429       Subtarget.isPPC64() && Subtarget.hasFPCVT())
8430     return LowerINT_TO_FPDirectMove(Op, DAG, dl);
8431 
8432   assert((IsSigned || Subtarget.hasFPCVT()) &&
8433          "UINT_TO_FP is supported only with FPCVT");
8434 
8435   if (Src.getValueType() == MVT::i64) {
8436     SDValue SINT = Src;
8437     // When converting to single-precision, we actually need to convert
8438     // to double-precision first and then round to single-precision.
8439     // To avoid double-rounding effects during that operation, we have
8440     // to prepare the input operand.  Bits that might be truncated when
8441     // converting to double-precision are replaced by a bit that won't
8442     // be lost at this stage, but is below the single-precision rounding
8443     // position.
8444     //
8445     // However, if -enable-unsafe-fp-math is in effect, accept double
8446     // rounding to avoid the extra overhead.
8447     if (Op.getValueType() == MVT::f32 &&
8448         !Subtarget.hasFPCVT() &&
8449         !DAG.getTarget().Options.UnsafeFPMath) {
8450 
8451       // Twiddle input to make sure the low 11 bits are zero.  (If this
8452       // is the case, we are guaranteed the value will fit into the 53 bit
8453       // mantissa of an IEEE double-precision value without rounding.)
8454       // If any of those low 11 bits were not zero originally, make sure
8455       // bit 12 (value 2048) is set instead, so that the final rounding
8456       // to single-precision gets the correct result.
8457       SDValue Round = DAG.getNode(ISD::AND, dl, MVT::i64,
8458                                   SINT, DAG.getConstant(2047, dl, MVT::i64));
8459       Round = DAG.getNode(ISD::ADD, dl, MVT::i64,
8460                           Round, DAG.getConstant(2047, dl, MVT::i64));
8461       Round = DAG.getNode(ISD::OR, dl, MVT::i64, Round, SINT);
8462       Round = DAG.getNode(ISD::AND, dl, MVT::i64,
8463                           Round, DAG.getConstant(-2048, dl, MVT::i64));
8464 
8465       // However, we cannot use that value unconditionally: if the magnitude
8466       // of the input value is small, the bit-twiddling we did above might
8467       // end up visibly changing the output.  Fortunately, in that case, we
8468       // don't need to twiddle bits since the original input will convert
8469       // exactly to double-precision floating-point already.  Therefore,
8470       // construct a conditional to use the original value if the top 11
8471       // bits are all sign-bit copies, and use the rounded value computed
8472       // above otherwise.
8473       SDValue Cond = DAG.getNode(ISD::SRA, dl, MVT::i64,
8474                                  SINT, DAG.getConstant(53, dl, MVT::i32));
8475       Cond = DAG.getNode(ISD::ADD, dl, MVT::i64,
8476                          Cond, DAG.getConstant(1, dl, MVT::i64));
8477       Cond = DAG.getSetCC(
8478           dl,
8479           getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i64),
8480           Cond, DAG.getConstant(1, dl, MVT::i64), ISD::SETUGT);
8481 
8482       SINT = DAG.getNode(ISD::SELECT, dl, MVT::i64, Cond, Round, SINT);
8483     }
8484 
8485     ReuseLoadInfo RLI;
8486     SDValue Bits;
8487 
8488     MachineFunction &MF = DAG.getMachineFunction();
8489     if (canReuseLoadAddress(SINT, MVT::i64, RLI, DAG)) {
8490       Bits = DAG.getLoad(MVT::f64, dl, RLI.Chain, RLI.Ptr, RLI.MPI,
8491                          RLI.Alignment, RLI.MMOFlags(), RLI.AAInfo, RLI.Ranges);
8492       spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
8493     } else if (Subtarget.hasLFIWAX() &&
8494                canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::SEXTLOAD)) {
8495       MachineMemOperand *MMO =
8496         MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8497                                 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8498       SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8499       Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWAX, dl,
8500                                      DAG.getVTList(MVT::f64, MVT::Other),
8501                                      Ops, MVT::i32, MMO);
8502       spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
8503     } else if (Subtarget.hasFPCVT() &&
8504                canReuseLoadAddress(SINT, MVT::i32, RLI, DAG, ISD::ZEXTLOAD)) {
8505       MachineMemOperand *MMO =
8506         MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8507                                 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8508       SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8509       Bits = DAG.getMemIntrinsicNode(PPCISD::LFIWZX, dl,
8510                                      DAG.getVTList(MVT::f64, MVT::Other),
8511                                      Ops, MVT::i32, MMO);
8512       spliceIntoChain(RLI.ResChain, Bits.getValue(1), DAG);
8513     } else if (((Subtarget.hasLFIWAX() &&
8514                  SINT.getOpcode() == ISD::SIGN_EXTEND) ||
8515                 (Subtarget.hasFPCVT() &&
8516                  SINT.getOpcode() == ISD::ZERO_EXTEND)) &&
8517                SINT.getOperand(0).getValueType() == MVT::i32) {
8518       MachineFrameInfo &MFI = MF.getFrameInfo();
8519       EVT PtrVT = getPointerTy(DAG.getDataLayout());
8520 
8521       int FrameIdx = MFI.CreateStackObject(4, Align(4), false);
8522       SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
8523 
8524       SDValue Store = DAG.getStore(Chain, dl, SINT.getOperand(0), FIdx,
8525                                    MachinePointerInfo::getFixedStack(
8526                                        DAG.getMachineFunction(), FrameIdx));
8527       Chain = Store;
8528 
8529       assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&
8530              "Expected an i32 store");
8531 
8532       RLI.Ptr = FIdx;
8533       RLI.Chain = Chain;
8534       RLI.MPI =
8535           MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
8536       RLI.Alignment = Align(4);
8537 
8538       MachineMemOperand *MMO =
8539         MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8540                                 RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8541       SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8542       Bits = DAG.getMemIntrinsicNode(SINT.getOpcode() == ISD::ZERO_EXTEND ?
8543                                      PPCISD::LFIWZX : PPCISD::LFIWAX,
8544                                      dl, DAG.getVTList(MVT::f64, MVT::Other),
8545                                      Ops, MVT::i32, MMO);
8546       Chain = Bits.getValue(1);
8547     } else
8548       Bits = DAG.getNode(ISD::BITCAST, dl, MVT::f64, SINT);
8549 
8550     SDValue FP = convertIntToFP(Op, Bits, DAG, Subtarget, Chain);
8551     if (IsStrict)
8552       Chain = FP.getValue(1);
8553 
8554     if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) {
8555       if (IsStrict)
8556         FP = DAG.getNode(ISD::STRICT_FP_ROUND, dl,
8557                          DAG.getVTList(MVT::f32, MVT::Other),
8558                          {Chain, FP, DAG.getIntPtrConstant(0, dl)}, Flags);
8559       else
8560         FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP,
8561                          DAG.getIntPtrConstant(0, dl));
8562     }
8563     return FP;
8564   }
8565 
8566   assert(Src.getValueType() == MVT::i32 &&
8567          "Unhandled INT_TO_FP type in custom expander!");
8568   // Since we only generate this in 64-bit mode, we can take advantage of
8569   // 64-bit registers.  In particular, sign extend the input value into the
8570   // 64-bit register with extsw, store the WHOLE 64-bit value into the stack
8571   // then lfd it and fcfid it.
8572   MachineFunction &MF = DAG.getMachineFunction();
8573   MachineFrameInfo &MFI = MF.getFrameInfo();
8574   EVT PtrVT = getPointerTy(MF.getDataLayout());
8575 
8576   SDValue Ld;
8577   if (Subtarget.hasLFIWAX() || Subtarget.hasFPCVT()) {
8578     ReuseLoadInfo RLI;
8579     bool ReusingLoad;
8580     if (!(ReusingLoad = canReuseLoadAddress(Src, MVT::i32, RLI, DAG))) {
8581       int FrameIdx = MFI.CreateStackObject(4, Align(4), false);
8582       SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
8583 
8584       SDValue Store = DAG.getStore(Chain, dl, Src, FIdx,
8585                                    MachinePointerInfo::getFixedStack(
8586                                        DAG.getMachineFunction(), FrameIdx));
8587       Chain = Store;
8588 
8589       assert(cast<StoreSDNode>(Store)->getMemoryVT() == MVT::i32 &&
8590              "Expected an i32 store");
8591 
8592       RLI.Ptr = FIdx;
8593       RLI.Chain = Chain;
8594       RLI.MPI =
8595           MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx);
8596       RLI.Alignment = Align(4);
8597     }
8598 
8599     MachineMemOperand *MMO =
8600       MF.getMachineMemOperand(RLI.MPI, MachineMemOperand::MOLoad, 4,
8601                               RLI.Alignment, RLI.AAInfo, RLI.Ranges);
8602     SDValue Ops[] = { RLI.Chain, RLI.Ptr };
8603     Ld = DAG.getMemIntrinsicNode(IsSigned ? PPCISD::LFIWAX : PPCISD::LFIWZX, dl,
8604                                  DAG.getVTList(MVT::f64, MVT::Other), Ops,
8605                                  MVT::i32, MMO);
8606     Chain = Ld.getValue(1);
8607     if (ReusingLoad)
8608       spliceIntoChain(RLI.ResChain, Ld.getValue(1), DAG);
8609   } else {
8610     assert(Subtarget.isPPC64() &&
8611            "i32->FP without LFIWAX supported only on PPC64");
8612 
8613     int FrameIdx = MFI.CreateStackObject(8, Align(8), false);
8614     SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
8615 
8616     SDValue Ext64 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i64, Src);
8617 
8618     // STD the extended value into the stack slot.
8619     SDValue Store = DAG.getStore(
8620         Chain, dl, Ext64, FIdx,
8621         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx));
8622     Chain = Store;
8623 
8624     // Load the value as a double.
8625     Ld = DAG.getLoad(
8626         MVT::f64, dl, Chain, FIdx,
8627         MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FrameIdx));
8628     Chain = Ld.getValue(1);
8629   }
8630 
8631   // FCFID it and return it.
8632   SDValue FP = convertIntToFP(Op, Ld, DAG, Subtarget, Chain);
8633   if (IsStrict)
8634     Chain = FP.getValue(1);
8635   if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) {
8636     if (IsStrict)
8637       FP = DAG.getNode(ISD::STRICT_FP_ROUND, dl,
8638                        DAG.getVTList(MVT::f32, MVT::Other),
8639                        {Chain, FP, DAG.getIntPtrConstant(0, dl)}, Flags);
8640     else
8641       FP = DAG.getNode(ISD::FP_ROUND, dl, MVT::f32, FP,
8642                        DAG.getIntPtrConstant(0, dl));
8643   }
8644   return FP;
8645 }
8646 
LowerFLT_ROUNDS_(SDValue Op,SelectionDAG & DAG) const8647 SDValue PPCTargetLowering::LowerFLT_ROUNDS_(SDValue Op,
8648                                             SelectionDAG &DAG) const {
8649   SDLoc dl(Op);
8650   /*
8651    The rounding mode is in bits 30:31 of FPSR, and has the following
8652    settings:
8653      00 Round to nearest
8654      01 Round to 0
8655      10 Round to +inf
8656      11 Round to -inf
8657 
8658   FLT_ROUNDS, on the other hand, expects the following:
8659     -1 Undefined
8660      0 Round to 0
8661      1 Round to nearest
8662      2 Round to +inf
8663      3 Round to -inf
8664 
8665   To perform the conversion, we do:
8666     ((FPSCR & 0x3) ^ ((~FPSCR & 0x3) >> 1))
8667   */
8668 
8669   MachineFunction &MF = DAG.getMachineFunction();
8670   EVT VT = Op.getValueType();
8671   EVT PtrVT = getPointerTy(MF.getDataLayout());
8672 
8673   // Save FP Control Word to register
8674   SDValue Chain = Op.getOperand(0);
8675   SDValue MFFS = DAG.getNode(PPCISD::MFFS, dl, {MVT::f64, MVT::Other}, Chain);
8676   Chain = MFFS.getValue(1);
8677 
8678   SDValue CWD;
8679   if (isTypeLegal(MVT::i64)) {
8680     CWD = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
8681                       DAG.getNode(ISD::BITCAST, dl, MVT::i64, MFFS));
8682   } else {
8683     // Save FP register to stack slot
8684     int SSFI = MF.getFrameInfo().CreateStackObject(8, Align(8), false);
8685     SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
8686     Chain = DAG.getStore(Chain, dl, MFFS, StackSlot, MachinePointerInfo());
8687 
8688     // Load FP Control Word from low 32 bits of stack slot.
8689     assert(hasBigEndianPartOrdering(MVT::i64, MF.getDataLayout()) &&
8690            "Stack slot adjustment is valid only on big endian subtargets!");
8691     SDValue Four = DAG.getConstant(4, dl, PtrVT);
8692     SDValue Addr = DAG.getNode(ISD::ADD, dl, PtrVT, StackSlot, Four);
8693     CWD = DAG.getLoad(MVT::i32, dl, Chain, Addr, MachinePointerInfo());
8694     Chain = CWD.getValue(1);
8695   }
8696 
8697   // Transform as necessary
8698   SDValue CWD1 =
8699     DAG.getNode(ISD::AND, dl, MVT::i32,
8700                 CWD, DAG.getConstant(3, dl, MVT::i32));
8701   SDValue CWD2 =
8702     DAG.getNode(ISD::SRL, dl, MVT::i32,
8703                 DAG.getNode(ISD::AND, dl, MVT::i32,
8704                             DAG.getNode(ISD::XOR, dl, MVT::i32,
8705                                         CWD, DAG.getConstant(3, dl, MVT::i32)),
8706                             DAG.getConstant(3, dl, MVT::i32)),
8707                 DAG.getConstant(1, dl, MVT::i32));
8708 
8709   SDValue RetVal =
8710     DAG.getNode(ISD::XOR, dl, MVT::i32, CWD1, CWD2);
8711 
8712   RetVal =
8713       DAG.getNode((VT.getSizeInBits() < 16 ? ISD::TRUNCATE : ISD::ZERO_EXTEND),
8714                   dl, VT, RetVal);
8715 
8716   return DAG.getMergeValues({RetVal, Chain}, dl);
8717 }
8718 
LowerSHL_PARTS(SDValue Op,SelectionDAG & DAG) const8719 SDValue PPCTargetLowering::LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const {
8720   EVT VT = Op.getValueType();
8721   unsigned BitWidth = VT.getSizeInBits();
8722   SDLoc dl(Op);
8723   assert(Op.getNumOperands() == 3 &&
8724          VT == Op.getOperand(1).getValueType() &&
8725          "Unexpected SHL!");
8726 
8727   // Expand into a bunch of logical ops.  Note that these ops
8728   // depend on the PPC behavior for oversized shift amounts.
8729   SDValue Lo = Op.getOperand(0);
8730   SDValue Hi = Op.getOperand(1);
8731   SDValue Amt = Op.getOperand(2);
8732   EVT AmtVT = Amt.getValueType();
8733 
8734   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
8735                              DAG.getConstant(BitWidth, dl, AmtVT), Amt);
8736   SDValue Tmp2 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Amt);
8737   SDValue Tmp3 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Tmp1);
8738   SDValue Tmp4 = DAG.getNode(ISD::OR , dl, VT, Tmp2, Tmp3);
8739   SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
8740                              DAG.getConstant(-BitWidth, dl, AmtVT));
8741   SDValue Tmp6 = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Tmp5);
8742   SDValue OutHi = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6);
8743   SDValue OutLo = DAG.getNode(PPCISD::SHL, dl, VT, Lo, Amt);
8744   SDValue OutOps[] = { OutLo, OutHi };
8745   return DAG.getMergeValues(OutOps, dl);
8746 }
8747 
LowerSRL_PARTS(SDValue Op,SelectionDAG & DAG) const8748 SDValue PPCTargetLowering::LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const {
8749   EVT VT = Op.getValueType();
8750   SDLoc dl(Op);
8751   unsigned BitWidth = VT.getSizeInBits();
8752   assert(Op.getNumOperands() == 3 &&
8753          VT == Op.getOperand(1).getValueType() &&
8754          "Unexpected SRL!");
8755 
8756   // Expand into a bunch of logical ops.  Note that these ops
8757   // depend on the PPC behavior for oversized shift amounts.
8758   SDValue Lo = Op.getOperand(0);
8759   SDValue Hi = Op.getOperand(1);
8760   SDValue Amt = Op.getOperand(2);
8761   EVT AmtVT = Amt.getValueType();
8762 
8763   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
8764                              DAG.getConstant(BitWidth, dl, AmtVT), Amt);
8765   SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt);
8766   SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1);
8767   SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3);
8768   SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
8769                              DAG.getConstant(-BitWidth, dl, AmtVT));
8770   SDValue Tmp6 = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Tmp5);
8771   SDValue OutLo = DAG.getNode(ISD::OR, dl, VT, Tmp4, Tmp6);
8772   SDValue OutHi = DAG.getNode(PPCISD::SRL, dl, VT, Hi, Amt);
8773   SDValue OutOps[] = { OutLo, OutHi };
8774   return DAG.getMergeValues(OutOps, dl);
8775 }
8776 
LowerSRA_PARTS(SDValue Op,SelectionDAG & DAG) const8777 SDValue PPCTargetLowering::LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const {
8778   SDLoc dl(Op);
8779   EVT VT = Op.getValueType();
8780   unsigned BitWidth = VT.getSizeInBits();
8781   assert(Op.getNumOperands() == 3 &&
8782          VT == Op.getOperand(1).getValueType() &&
8783          "Unexpected SRA!");
8784 
8785   // Expand into a bunch of logical ops, followed by a select_cc.
8786   SDValue Lo = Op.getOperand(0);
8787   SDValue Hi = Op.getOperand(1);
8788   SDValue Amt = Op.getOperand(2);
8789   EVT AmtVT = Amt.getValueType();
8790 
8791   SDValue Tmp1 = DAG.getNode(ISD::SUB, dl, AmtVT,
8792                              DAG.getConstant(BitWidth, dl, AmtVT), Amt);
8793   SDValue Tmp2 = DAG.getNode(PPCISD::SRL, dl, VT, Lo, Amt);
8794   SDValue Tmp3 = DAG.getNode(PPCISD::SHL, dl, VT, Hi, Tmp1);
8795   SDValue Tmp4 = DAG.getNode(ISD::OR, dl, VT, Tmp2, Tmp3);
8796   SDValue Tmp5 = DAG.getNode(ISD::ADD, dl, AmtVT, Amt,
8797                              DAG.getConstant(-BitWidth, dl, AmtVT));
8798   SDValue Tmp6 = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Tmp5);
8799   SDValue OutHi = DAG.getNode(PPCISD::SRA, dl, VT, Hi, Amt);
8800   SDValue OutLo = DAG.getSelectCC(dl, Tmp5, DAG.getConstant(0, dl, AmtVT),
8801                                   Tmp4, Tmp6, ISD::SETLE);
8802   SDValue OutOps[] = { OutLo, OutHi };
8803   return DAG.getMergeValues(OutOps, dl);
8804 }
8805 
LowerFunnelShift(SDValue Op,SelectionDAG & DAG) const8806 SDValue PPCTargetLowering::LowerFunnelShift(SDValue Op,
8807                                             SelectionDAG &DAG) const {
8808   SDLoc dl(Op);
8809   EVT VT = Op.getValueType();
8810   unsigned BitWidth = VT.getSizeInBits();
8811 
8812   bool IsFSHL = Op.getOpcode() == ISD::FSHL;
8813   SDValue X = Op.getOperand(0);
8814   SDValue Y = Op.getOperand(1);
8815   SDValue Z = Op.getOperand(2);
8816   EVT AmtVT = Z.getValueType();
8817 
8818   // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW)))
8819   // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW))
8820   // This is simpler than TargetLowering::expandFunnelShift because we can rely
8821   // on PowerPC shift by BW being well defined.
8822   Z = DAG.getNode(ISD::AND, dl, AmtVT, Z,
8823                   DAG.getConstant(BitWidth - 1, dl, AmtVT));
8824   SDValue SubZ =
8825       DAG.getNode(ISD::SUB, dl, AmtVT, DAG.getConstant(BitWidth, dl, AmtVT), Z);
8826   X = DAG.getNode(PPCISD::SHL, dl, VT, X, IsFSHL ? Z : SubZ);
8827   Y = DAG.getNode(PPCISD::SRL, dl, VT, Y, IsFSHL ? SubZ : Z);
8828   return DAG.getNode(ISD::OR, dl, VT, X, Y);
8829 }
8830 
8831 //===----------------------------------------------------------------------===//
8832 // Vector related lowering.
8833 //
8834 
8835 /// getCanonicalConstSplat - Build a canonical splat immediate of Val with an
8836 /// element size of SplatSize. Cast the result to VT.
getCanonicalConstSplat(uint64_t Val,unsigned SplatSize,EVT VT,SelectionDAG & DAG,const SDLoc & dl)8837 static SDValue getCanonicalConstSplat(uint64_t Val, unsigned SplatSize, EVT VT,
8838                                       SelectionDAG &DAG, const SDLoc &dl) {
8839   static const MVT VTys[] = { // canonical VT to use for each size.
8840     MVT::v16i8, MVT::v8i16, MVT::Other, MVT::v4i32
8841   };
8842 
8843   EVT ReqVT = VT != MVT::Other ? VT : VTys[SplatSize-1];
8844 
8845   // For a splat with all ones, turn it to vspltisb 0xFF to canonicalize.
8846   if (Val == ((1LLU << (SplatSize * 8)) - 1)) {
8847     SplatSize = 1;
8848     Val = 0xFF;
8849   }
8850 
8851   EVT CanonicalVT = VTys[SplatSize-1];
8852 
8853   // Build a canonical splat for this value.
8854   return DAG.getBitcast(ReqVT, DAG.getConstant(Val, dl, CanonicalVT));
8855 }
8856 
8857 /// BuildIntrinsicOp - Return a unary operator intrinsic node with the
8858 /// specified intrinsic ID.
BuildIntrinsicOp(unsigned IID,SDValue Op,SelectionDAG & DAG,const SDLoc & dl,EVT DestVT=MVT::Other)8859 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op, SelectionDAG &DAG,
8860                                 const SDLoc &dl, EVT DestVT = MVT::Other) {
8861   if (DestVT == MVT::Other) DestVT = Op.getValueType();
8862   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
8863                      DAG.getConstant(IID, dl, MVT::i32), Op);
8864 }
8865 
8866 /// BuildIntrinsicOp - Return a binary operator intrinsic node with the
8867 /// specified intrinsic ID.
BuildIntrinsicOp(unsigned IID,SDValue LHS,SDValue RHS,SelectionDAG & DAG,const SDLoc & dl,EVT DestVT=MVT::Other)8868 static SDValue BuildIntrinsicOp(unsigned IID, SDValue LHS, SDValue RHS,
8869                                 SelectionDAG &DAG, const SDLoc &dl,
8870                                 EVT DestVT = MVT::Other) {
8871   if (DestVT == MVT::Other) DestVT = LHS.getValueType();
8872   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
8873                      DAG.getConstant(IID, dl, MVT::i32), LHS, RHS);
8874 }
8875 
8876 /// BuildIntrinsicOp - Return a ternary operator intrinsic node with the
8877 /// specified intrinsic ID.
BuildIntrinsicOp(unsigned IID,SDValue Op0,SDValue Op1,SDValue Op2,SelectionDAG & DAG,const SDLoc & dl,EVT DestVT=MVT::Other)8878 static SDValue BuildIntrinsicOp(unsigned IID, SDValue Op0, SDValue Op1,
8879                                 SDValue Op2, SelectionDAG &DAG, const SDLoc &dl,
8880                                 EVT DestVT = MVT::Other) {
8881   if (DestVT == MVT::Other) DestVT = Op0.getValueType();
8882   return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, DestVT,
8883                      DAG.getConstant(IID, dl, MVT::i32), Op0, Op1, Op2);
8884 }
8885 
8886 /// BuildVSLDOI - Return a VECTOR_SHUFFLE that is a vsldoi of the specified
8887 /// amount.  The result has the specified value type.
BuildVSLDOI(SDValue LHS,SDValue RHS,unsigned Amt,EVT VT,SelectionDAG & DAG,const SDLoc & dl)8888 static SDValue BuildVSLDOI(SDValue LHS, SDValue RHS, unsigned Amt, EVT VT,
8889                            SelectionDAG &DAG, const SDLoc &dl) {
8890   // Force LHS/RHS to be the right type.
8891   LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, LHS);
8892   RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, RHS);
8893 
8894   int Ops[16];
8895   for (unsigned i = 0; i != 16; ++i)
8896     Ops[i] = i + Amt;
8897   SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, LHS, RHS, Ops);
8898   return DAG.getNode(ISD::BITCAST, dl, VT, T);
8899 }
8900 
8901 /// Do we have an efficient pattern in a .td file for this node?
8902 ///
8903 /// \param V - pointer to the BuildVectorSDNode being matched
8904 /// \param HasDirectMove - does this subtarget have VSR <-> GPR direct moves?
8905 ///
8906 /// There are some patterns where it is beneficial to keep a BUILD_VECTOR
8907 /// node as a BUILD_VECTOR node rather than expanding it. The patterns where
8908 /// the opposite is true (expansion is beneficial) are:
8909 /// - The node builds a vector out of integers that are not 32 or 64-bits
8910 /// - The node builds a vector out of constants
8911 /// - The node is a "load-and-splat"
8912 /// In all other cases, we will choose to keep the BUILD_VECTOR.
haveEfficientBuildVectorPattern(BuildVectorSDNode * V,bool HasDirectMove,bool HasP8Vector)8913 static bool haveEfficientBuildVectorPattern(BuildVectorSDNode *V,
8914                                             bool HasDirectMove,
8915                                             bool HasP8Vector) {
8916   EVT VecVT = V->getValueType(0);
8917   bool RightType = VecVT == MVT::v2f64 ||
8918     (HasP8Vector && VecVT == MVT::v4f32) ||
8919     (HasDirectMove && (VecVT == MVT::v2i64 || VecVT == MVT::v4i32));
8920   if (!RightType)
8921     return false;
8922 
8923   bool IsSplat = true;
8924   bool IsLoad = false;
8925   SDValue Op0 = V->getOperand(0);
8926 
8927   // This function is called in a block that confirms the node is not a constant
8928   // splat. So a constant BUILD_VECTOR here means the vector is built out of
8929   // different constants.
8930   if (V->isConstant())
8931     return false;
8932   for (int i = 0, e = V->getNumOperands(); i < e; ++i) {
8933     if (V->getOperand(i).isUndef())
8934       return false;
8935     // We want to expand nodes that represent load-and-splat even if the
8936     // loaded value is a floating point truncation or conversion to int.
8937     if (V->getOperand(i).getOpcode() == ISD::LOAD ||
8938         (V->getOperand(i).getOpcode() == ISD::FP_ROUND &&
8939          V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) ||
8940         (V->getOperand(i).getOpcode() == ISD::FP_TO_SINT &&
8941          V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD) ||
8942         (V->getOperand(i).getOpcode() == ISD::FP_TO_UINT &&
8943          V->getOperand(i).getOperand(0).getOpcode() == ISD::LOAD))
8944       IsLoad = true;
8945     // If the operands are different or the input is not a load and has more
8946     // uses than just this BV node, then it isn't a splat.
8947     if (V->getOperand(i) != Op0 ||
8948         (!IsLoad && !V->isOnlyUserOf(V->getOperand(i).getNode())))
8949       IsSplat = false;
8950   }
8951   return !(IsSplat && IsLoad);
8952 }
8953 
8954 // Lower BITCAST(f128, (build_pair i64, i64)) to BUILD_FP128.
LowerBITCAST(SDValue Op,SelectionDAG & DAG) const8955 SDValue PPCTargetLowering::LowerBITCAST(SDValue Op, SelectionDAG &DAG) const {
8956 
8957   SDLoc dl(Op);
8958   SDValue Op0 = Op->getOperand(0);
8959 
8960   if ((Op.getValueType() != MVT::f128) ||
8961       (Op0.getOpcode() != ISD::BUILD_PAIR) ||
8962       (Op0.getOperand(0).getValueType() != MVT::i64) ||
8963       (Op0.getOperand(1).getValueType() != MVT::i64))
8964     return SDValue();
8965 
8966   return DAG.getNode(PPCISD::BUILD_FP128, dl, MVT::f128, Op0.getOperand(0),
8967                      Op0.getOperand(1));
8968 }
8969 
getNormalLoadInput(const SDValue & Op,bool & IsPermuted)8970 static const SDValue *getNormalLoadInput(const SDValue &Op, bool &IsPermuted) {
8971   const SDValue *InputLoad = &Op;
8972   if (InputLoad->getOpcode() == ISD::BITCAST)
8973     InputLoad = &InputLoad->getOperand(0);
8974   if (InputLoad->getOpcode() == ISD::SCALAR_TO_VECTOR ||
8975       InputLoad->getOpcode() == PPCISD::SCALAR_TO_VECTOR_PERMUTED) {
8976     IsPermuted = InputLoad->getOpcode() == PPCISD::SCALAR_TO_VECTOR_PERMUTED;
8977     InputLoad = &InputLoad->getOperand(0);
8978   }
8979   if (InputLoad->getOpcode() != ISD::LOAD)
8980     return nullptr;
8981   LoadSDNode *LD = cast<LoadSDNode>(*InputLoad);
8982   return ISD::isNormalLoad(LD) ? InputLoad : nullptr;
8983 }
8984 
8985 // Convert the argument APFloat to a single precision APFloat if there is no
8986 // loss in information during the conversion to single precision APFloat and the
8987 // resulting number is not a denormal number. Return true if successful.
convertToNonDenormSingle(APFloat & ArgAPFloat)8988 bool llvm::convertToNonDenormSingle(APFloat &ArgAPFloat) {
8989   APFloat APFloatToConvert = ArgAPFloat;
8990   bool LosesInfo = true;
8991   APFloatToConvert.convert(APFloat::IEEEsingle(), APFloat::rmNearestTiesToEven,
8992                            &LosesInfo);
8993   bool Success = (!LosesInfo && !APFloatToConvert.isDenormal());
8994   if (Success)
8995     ArgAPFloat = APFloatToConvert;
8996   return Success;
8997 }
8998 
8999 // Bitcast the argument APInt to a double and convert it to a single precision
9000 // APFloat, bitcast the APFloat to an APInt and assign it to the original
9001 // argument if there is no loss in information during the conversion from
9002 // double to single precision APFloat and the resulting number is not a denormal
9003 // number. Return true if successful.
convertToNonDenormSingle(APInt & ArgAPInt)9004 bool llvm::convertToNonDenormSingle(APInt &ArgAPInt) {
9005   double DpValue = ArgAPInt.bitsToDouble();
9006   APFloat APFloatDp(DpValue);
9007   bool Success = convertToNonDenormSingle(APFloatDp);
9008   if (Success)
9009     ArgAPInt = APFloatDp.bitcastToAPInt();
9010   return Success;
9011 }
9012 
9013 // Nondestructive check for convertTonNonDenormSingle.
checkConvertToNonDenormSingle(APFloat & ArgAPFloat)9014 bool llvm::checkConvertToNonDenormSingle(APFloat &ArgAPFloat) {
9015   // Only convert if it loses info, since XXSPLTIDP should
9016   // handle the other case.
9017   APFloat APFloatToConvert = ArgAPFloat;
9018   bool LosesInfo = true;
9019   APFloatToConvert.convert(APFloat::IEEEsingle(), APFloat::rmNearestTiesToEven,
9020                            &LosesInfo);
9021 
9022   return (!LosesInfo && !APFloatToConvert.isDenormal());
9023 }
9024 
9025 // If this is a case we can't handle, return null and let the default
9026 // expansion code take care of it.  If we CAN select this case, and if it
9027 // selects to a single instruction, return Op.  Otherwise, if we can codegen
9028 // this case more efficiently than a constant pool load, lower it to the
9029 // sequence of ops that should be used.
LowerBUILD_VECTOR(SDValue Op,SelectionDAG & DAG) const9030 SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
9031                                              SelectionDAG &DAG) const {
9032   SDLoc dl(Op);
9033   BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
9034   assert(BVN && "Expected a BuildVectorSDNode in LowerBUILD_VECTOR");
9035 
9036   // Check if this is a splat of a constant value.
9037   APInt APSplatBits, APSplatUndef;
9038   unsigned SplatBitSize;
9039   bool HasAnyUndefs;
9040   bool BVNIsConstantSplat =
9041       BVN->isConstantSplat(APSplatBits, APSplatUndef, SplatBitSize,
9042                            HasAnyUndefs, 0, !Subtarget.isLittleEndian());
9043 
9044   // If it is a splat of a double, check if we can shrink it to a 32 bit
9045   // non-denormal float which when converted back to double gives us the same
9046   // double. This is to exploit the XXSPLTIDP instruction.
9047   // If we lose precision, we use XXSPLTI32DX.
9048   if (BVNIsConstantSplat && (SplatBitSize == 64) &&
9049       Subtarget.hasPrefixInstrs()) {
9050     // Check the type first to short-circuit so we don't modify APSplatBits if
9051     // this block isn't executed.
9052     if ((Op->getValueType(0) == MVT::v2f64) &&
9053         convertToNonDenormSingle(APSplatBits)) {
9054       SDValue SplatNode = DAG.getNode(
9055           PPCISD::XXSPLTI_SP_TO_DP, dl, MVT::v2f64,
9056           DAG.getTargetConstant(APSplatBits.getZExtValue(), dl, MVT::i32));
9057       return DAG.getBitcast(Op.getValueType(), SplatNode);
9058     } else {
9059       // We may lose precision, so we have to use XXSPLTI32DX.
9060 
9061       uint32_t Hi =
9062           (uint32_t)((APSplatBits.getZExtValue() & 0xFFFFFFFF00000000LL) >> 32);
9063       uint32_t Lo =
9064           (uint32_t)(APSplatBits.getZExtValue() & 0xFFFFFFFF);
9065       SDValue SplatNode = DAG.getUNDEF(MVT::v2i64);
9066 
9067       if (!Hi || !Lo)
9068         // If either load is 0, then we should generate XXLXOR to set to 0.
9069         SplatNode = DAG.getTargetConstant(0, dl, MVT::v2i64);
9070 
9071       if (Hi)
9072         SplatNode = DAG.getNode(
9073             PPCISD::XXSPLTI32DX, dl, MVT::v2i64, SplatNode,
9074             DAG.getTargetConstant(0, dl, MVT::i32),
9075             DAG.getTargetConstant(Hi, dl, MVT::i32));
9076 
9077       if (Lo)
9078         SplatNode =
9079             DAG.getNode(PPCISD::XXSPLTI32DX, dl, MVT::v2i64, SplatNode,
9080                         DAG.getTargetConstant(1, dl, MVT::i32),
9081                         DAG.getTargetConstant(Lo, dl, MVT::i32));
9082 
9083       return DAG.getBitcast(Op.getValueType(), SplatNode);
9084     }
9085   }
9086 
9087   if (!BVNIsConstantSplat || SplatBitSize > 32) {
9088 
9089     bool IsPermutedLoad = false;
9090     const SDValue *InputLoad =
9091         getNormalLoadInput(Op.getOperand(0), IsPermutedLoad);
9092     // Handle load-and-splat patterns as we have instructions that will do this
9093     // in one go.
9094     if (InputLoad && DAG.isSplatValue(Op, true)) {
9095       LoadSDNode *LD = cast<LoadSDNode>(*InputLoad);
9096 
9097       // We have handling for 4 and 8 byte elements.
9098       unsigned ElementSize = LD->getMemoryVT().getScalarSizeInBits();
9099 
9100       // Checking for a single use of this load, we have to check for vector
9101       // width (128 bits) / ElementSize uses (since each operand of the
9102       // BUILD_VECTOR is a separate use of the value.
9103       unsigned NumUsesOfInputLD = 128 / ElementSize;
9104       for (SDValue BVInOp : Op->ops())
9105         if (BVInOp.isUndef())
9106           NumUsesOfInputLD--;
9107       assert(NumUsesOfInputLD > 0 && "No uses of input LD of a build_vector?");
9108       if (InputLoad->getNode()->hasNUsesOfValue(NumUsesOfInputLD, 0) &&
9109           ((Subtarget.hasVSX() && ElementSize == 64) ||
9110            (Subtarget.hasP9Vector() && ElementSize == 32))) {
9111         SDValue Ops[] = {
9112           LD->getChain(),    // Chain
9113           LD->getBasePtr(),  // Ptr
9114           DAG.getValueType(Op.getValueType()) // VT
9115         };
9116         SDValue LdSplt = DAG.getMemIntrinsicNode(
9117             PPCISD::LD_SPLAT, dl, DAG.getVTList(Op.getValueType(), MVT::Other),
9118             Ops, LD->getMemoryVT(), LD->getMemOperand());
9119         // Replace all uses of the output chain of the original load with the
9120         // output chain of the new load.
9121         DAG.ReplaceAllUsesOfValueWith(InputLoad->getValue(1),
9122                                       LdSplt.getValue(1));
9123         return LdSplt;
9124       }
9125     }
9126 
9127     // In 64BIT mode BUILD_VECTOR nodes that are not constant splats of up to
9128     // 32-bits can be lowered to VSX instructions under certain conditions.
9129     // Without VSX, there is no pattern more efficient than expanding the node.
9130     if (Subtarget.hasVSX() && Subtarget.isPPC64() &&
9131         haveEfficientBuildVectorPattern(BVN, Subtarget.hasDirectMove(),
9132                                         Subtarget.hasP8Vector()))
9133       return Op;
9134     return SDValue();
9135   }
9136 
9137   uint64_t SplatBits = APSplatBits.getZExtValue();
9138   uint64_t SplatUndef = APSplatUndef.getZExtValue();
9139   unsigned SplatSize = SplatBitSize / 8;
9140 
9141   // First, handle single instruction cases.
9142 
9143   // All zeros?
9144   if (SplatBits == 0) {
9145     // Canonicalize all zero vectors to be v4i32.
9146     if (Op.getValueType() != MVT::v4i32 || HasAnyUndefs) {
9147       SDValue Z = DAG.getConstant(0, dl, MVT::v4i32);
9148       Op = DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Z);
9149     }
9150     return Op;
9151   }
9152 
9153   // We have XXSPLTIW for constant splats four bytes wide.
9154   // Given vector length is a multiple of 4, 2-byte splats can be replaced
9155   // with 4-byte splats. We replicate the SplatBits in case of 2-byte splat to
9156   // make a 4-byte splat element. For example: 2-byte splat of 0xABAB can be
9157   // turned into a 4-byte splat of 0xABABABAB.
9158   if (Subtarget.hasPrefixInstrs() && SplatSize == 2)
9159     return getCanonicalConstSplat(SplatBits | (SplatBits << 16), SplatSize * 2,
9160                                   Op.getValueType(), DAG, dl);
9161 
9162   if (Subtarget.hasPrefixInstrs() && SplatSize == 4)
9163     return getCanonicalConstSplat(SplatBits, SplatSize, Op.getValueType(), DAG,
9164                                   dl);
9165 
9166   // We have XXSPLTIB for constant splats one byte wide.
9167   if (Subtarget.hasP9Vector() && SplatSize == 1)
9168     return getCanonicalConstSplat(SplatBits, SplatSize, Op.getValueType(), DAG,
9169                                   dl);
9170 
9171   // If the sign extended value is in the range [-16,15], use VSPLTI[bhw].
9172   int32_t SextVal= (int32_t(SplatBits << (32-SplatBitSize)) >>
9173                     (32-SplatBitSize));
9174   if (SextVal >= -16 && SextVal <= 15)
9175     return getCanonicalConstSplat(SextVal, SplatSize, Op.getValueType(), DAG,
9176                                   dl);
9177 
9178   // Two instruction sequences.
9179 
9180   // If this value is in the range [-32,30] and is even, use:
9181   //     VSPLTI[bhw](val/2) + VSPLTI[bhw](val/2)
9182   // If this value is in the range [17,31] and is odd, use:
9183   //     VSPLTI[bhw](val-16) - VSPLTI[bhw](-16)
9184   // If this value is in the range [-31,-17] and is odd, use:
9185   //     VSPLTI[bhw](val+16) + VSPLTI[bhw](-16)
9186   // Note the last two are three-instruction sequences.
9187   if (SextVal >= -32 && SextVal <= 31) {
9188     // To avoid having these optimizations undone by constant folding,
9189     // we convert to a pseudo that will be expanded later into one of
9190     // the above forms.
9191     SDValue Elt = DAG.getConstant(SextVal, dl, MVT::i32);
9192     EVT VT = (SplatSize == 1 ? MVT::v16i8 :
9193               (SplatSize == 2 ? MVT::v8i16 : MVT::v4i32));
9194     SDValue EltSize = DAG.getConstant(SplatSize, dl, MVT::i32);
9195     SDValue RetVal = DAG.getNode(PPCISD::VADD_SPLAT, dl, VT, Elt, EltSize);
9196     if (VT == Op.getValueType())
9197       return RetVal;
9198     else
9199       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), RetVal);
9200   }
9201 
9202   // If this is 0x8000_0000 x 4, turn into vspltisw + vslw.  If it is
9203   // 0x7FFF_FFFF x 4, turn it into not(0x8000_0000).  This is important
9204   // for fneg/fabs.
9205   if (SplatSize == 4 && SplatBits == (0x7FFFFFFF&~SplatUndef)) {
9206     // Make -1 and vspltisw -1:
9207     SDValue OnesV = getCanonicalConstSplat(-1, 4, MVT::v4i32, DAG, dl);
9208 
9209     // Make the VSLW intrinsic, computing 0x8000_0000.
9210     SDValue Res = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, OnesV,
9211                                    OnesV, DAG, dl);
9212 
9213     // xor by OnesV to invert it.
9214     Res = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Res, OnesV);
9215     return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9216   }
9217 
9218   // Check to see if this is a wide variety of vsplti*, binop self cases.
9219   static const signed char SplatCsts[] = {
9220     -1, 1, -2, 2, -3, 3, -4, 4, -5, 5, -6, 6, -7, 7,
9221     -8, 8, -9, 9, -10, 10, -11, 11, -12, 12, -13, 13, 14, -14, 15, -15, -16
9222   };
9223 
9224   for (unsigned idx = 0; idx < array_lengthof(SplatCsts); ++idx) {
9225     // Indirect through the SplatCsts array so that we favor 'vsplti -1' for
9226     // cases which are ambiguous (e.g. formation of 0x8000_0000).  'vsplti -1'
9227     int i = SplatCsts[idx];
9228 
9229     // Figure out what shift amount will be used by altivec if shifted by i in
9230     // this splat size.
9231     unsigned TypeShiftAmt = i & (SplatBitSize-1);
9232 
9233     // vsplti + shl self.
9234     if (SextVal == (int)((unsigned)i << TypeShiftAmt)) {
9235       SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl);
9236       static const unsigned IIDs[] = { // Intrinsic to use for each size.
9237         Intrinsic::ppc_altivec_vslb, Intrinsic::ppc_altivec_vslh, 0,
9238         Intrinsic::ppc_altivec_vslw
9239       };
9240       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
9241       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9242     }
9243 
9244     // vsplti + srl self.
9245     if (SextVal == (int)((unsigned)i >> TypeShiftAmt)) {
9246       SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl);
9247       static const unsigned IIDs[] = { // Intrinsic to use for each size.
9248         Intrinsic::ppc_altivec_vsrb, Intrinsic::ppc_altivec_vsrh, 0,
9249         Intrinsic::ppc_altivec_vsrw
9250       };
9251       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
9252       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9253     }
9254 
9255     // vsplti + rol self.
9256     if (SextVal == (int)(((unsigned)i << TypeShiftAmt) |
9257                          ((unsigned)i >> (SplatBitSize-TypeShiftAmt)))) {
9258       SDValue Res = getCanonicalConstSplat(i, SplatSize, MVT::Other, DAG, dl);
9259       static const unsigned IIDs[] = { // Intrinsic to use for each size.
9260         Intrinsic::ppc_altivec_vrlb, Intrinsic::ppc_altivec_vrlh, 0,
9261         Intrinsic::ppc_altivec_vrlw
9262       };
9263       Res = BuildIntrinsicOp(IIDs[SplatSize-1], Res, Res, DAG, dl);
9264       return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
9265     }
9266 
9267     // t = vsplti c, result = vsldoi t, t, 1
9268     if (SextVal == (int)(((unsigned)i << 8) | (i < 0 ? 0xFF : 0))) {
9269       SDValue T = getCanonicalConstSplat(i, SplatSize, MVT::v16i8, DAG, dl);
9270       unsigned Amt = Subtarget.isLittleEndian() ? 15 : 1;
9271       return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
9272     }
9273     // t = vsplti c, result = vsldoi t, t, 2
9274     if (SextVal == (int)(((unsigned)i << 16) | (i < 0 ? 0xFFFF : 0))) {
9275       SDValue T = getCanonicalConstSplat(i, SplatSize, MVT::v16i8, DAG, dl);
9276       unsigned Amt = Subtarget.isLittleEndian() ? 14 : 2;
9277       return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
9278     }
9279     // t = vsplti c, result = vsldoi t, t, 3
9280     if (SextVal == (int)(((unsigned)i << 24) | (i < 0 ? 0xFFFFFF : 0))) {
9281       SDValue T = getCanonicalConstSplat(i, SplatSize, MVT::v16i8, DAG, dl);
9282       unsigned Amt = Subtarget.isLittleEndian() ? 13 : 3;
9283       return BuildVSLDOI(T, T, Amt, Op.getValueType(), DAG, dl);
9284     }
9285   }
9286 
9287   return SDValue();
9288 }
9289 
9290 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit
9291 /// the specified operations to build the shuffle.
GeneratePerfectShuffle(unsigned PFEntry,SDValue LHS,SDValue RHS,SelectionDAG & DAG,const SDLoc & dl)9292 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
9293                                       SDValue RHS, SelectionDAG &DAG,
9294                                       const SDLoc &dl) {
9295   unsigned OpNum = (PFEntry >> 26) & 0x0F;
9296   unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
9297   unsigned RHSID = (PFEntry >>  0) & ((1 << 13)-1);
9298 
9299   enum {
9300     OP_COPY = 0,  // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3>
9301     OP_VMRGHW,
9302     OP_VMRGLW,
9303     OP_VSPLTISW0,
9304     OP_VSPLTISW1,
9305     OP_VSPLTISW2,
9306     OP_VSPLTISW3,
9307     OP_VSLDOI4,
9308     OP_VSLDOI8,
9309     OP_VSLDOI12
9310   };
9311 
9312   if (OpNum == OP_COPY) {
9313     if (LHSID == (1*9+2)*9+3) return LHS;
9314     assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!");
9315     return RHS;
9316   }
9317 
9318   SDValue OpLHS, OpRHS;
9319   OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl);
9320   OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl);
9321 
9322   int ShufIdxs[16];
9323   switch (OpNum) {
9324   default: llvm_unreachable("Unknown i32 permute!");
9325   case OP_VMRGHW:
9326     ShufIdxs[ 0] =  0; ShufIdxs[ 1] =  1; ShufIdxs[ 2] =  2; ShufIdxs[ 3] =  3;
9327     ShufIdxs[ 4] = 16; ShufIdxs[ 5] = 17; ShufIdxs[ 6] = 18; ShufIdxs[ 7] = 19;
9328     ShufIdxs[ 8] =  4; ShufIdxs[ 9] =  5; ShufIdxs[10] =  6; ShufIdxs[11] =  7;
9329     ShufIdxs[12] = 20; ShufIdxs[13] = 21; ShufIdxs[14] = 22; ShufIdxs[15] = 23;
9330     break;
9331   case OP_VMRGLW:
9332     ShufIdxs[ 0] =  8; ShufIdxs[ 1] =  9; ShufIdxs[ 2] = 10; ShufIdxs[ 3] = 11;
9333     ShufIdxs[ 4] = 24; ShufIdxs[ 5] = 25; ShufIdxs[ 6] = 26; ShufIdxs[ 7] = 27;
9334     ShufIdxs[ 8] = 12; ShufIdxs[ 9] = 13; ShufIdxs[10] = 14; ShufIdxs[11] = 15;
9335     ShufIdxs[12] = 28; ShufIdxs[13] = 29; ShufIdxs[14] = 30; ShufIdxs[15] = 31;
9336     break;
9337   case OP_VSPLTISW0:
9338     for (unsigned i = 0; i != 16; ++i)
9339       ShufIdxs[i] = (i&3)+0;
9340     break;
9341   case OP_VSPLTISW1:
9342     for (unsigned i = 0; i != 16; ++i)
9343       ShufIdxs[i] = (i&3)+4;
9344     break;
9345   case OP_VSPLTISW2:
9346     for (unsigned i = 0; i != 16; ++i)
9347       ShufIdxs[i] = (i&3)+8;
9348     break;
9349   case OP_VSPLTISW3:
9350     for (unsigned i = 0; i != 16; ++i)
9351       ShufIdxs[i] = (i&3)+12;
9352     break;
9353   case OP_VSLDOI4:
9354     return BuildVSLDOI(OpLHS, OpRHS, 4, OpLHS.getValueType(), DAG, dl);
9355   case OP_VSLDOI8:
9356     return BuildVSLDOI(OpLHS, OpRHS, 8, OpLHS.getValueType(), DAG, dl);
9357   case OP_VSLDOI12:
9358     return BuildVSLDOI(OpLHS, OpRHS, 12, OpLHS.getValueType(), DAG, dl);
9359   }
9360   EVT VT = OpLHS.getValueType();
9361   OpLHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpLHS);
9362   OpRHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OpRHS);
9363   SDValue T = DAG.getVectorShuffle(MVT::v16i8, dl, OpLHS, OpRHS, ShufIdxs);
9364   return DAG.getNode(ISD::BITCAST, dl, VT, T);
9365 }
9366 
9367 /// lowerToVINSERTB - Return the SDValue if this VECTOR_SHUFFLE can be handled
9368 /// by the VINSERTB instruction introduced in ISA 3.0, else just return default
9369 /// SDValue.
lowerToVINSERTB(ShuffleVectorSDNode * N,SelectionDAG & DAG) const9370 SDValue PPCTargetLowering::lowerToVINSERTB(ShuffleVectorSDNode *N,
9371                                            SelectionDAG &DAG) const {
9372   const unsigned BytesInVector = 16;
9373   bool IsLE = Subtarget.isLittleEndian();
9374   SDLoc dl(N);
9375   SDValue V1 = N->getOperand(0);
9376   SDValue V2 = N->getOperand(1);
9377   unsigned ShiftElts = 0, InsertAtByte = 0;
9378   bool Swap = false;
9379 
9380   // Shifts required to get the byte we want at element 7.
9381   unsigned LittleEndianShifts[] = {8, 7,  6,  5,  4,  3,  2,  1,
9382                                    0, 15, 14, 13, 12, 11, 10, 9};
9383   unsigned BigEndianShifts[] = {9, 10, 11, 12, 13, 14, 15, 0,
9384                                 1, 2,  3,  4,  5,  6,  7,  8};
9385 
9386   ArrayRef<int> Mask = N->getMask();
9387   int OriginalOrder[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
9388 
9389   // For each mask element, find out if we're just inserting something
9390   // from V2 into V1 or vice versa.
9391   // Possible permutations inserting an element from V2 into V1:
9392   //   X, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
9393   //   0, X, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
9394   //   ...
9395   //   0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, X
9396   // Inserting from V1 into V2 will be similar, except mask range will be
9397   // [16,31].
9398 
9399   bool FoundCandidate = false;
9400   // If both vector operands for the shuffle are the same vector, the mask
9401   // will contain only elements from the first one and the second one will be
9402   // undef.
9403   unsigned VINSERTBSrcElem = IsLE ? 8 : 7;
9404   // Go through the mask of half-words to find an element that's being moved
9405   // from one vector to the other.
9406   for (unsigned i = 0; i < BytesInVector; ++i) {
9407     unsigned CurrentElement = Mask[i];
9408     // If 2nd operand is undefined, we should only look for element 7 in the
9409     // Mask.
9410     if (V2.isUndef() && CurrentElement != VINSERTBSrcElem)
9411       continue;
9412 
9413     bool OtherElementsInOrder = true;
9414     // Examine the other elements in the Mask to see if they're in original
9415     // order.
9416     for (unsigned j = 0; j < BytesInVector; ++j) {
9417       if (j == i)
9418         continue;
9419       // If CurrentElement is from V1 [0,15], then we the rest of the Mask to be
9420       // from V2 [16,31] and vice versa.  Unless the 2nd operand is undefined,
9421       // in which we always assume we're always picking from the 1st operand.
9422       int MaskOffset =
9423           (!V2.isUndef() && CurrentElement < BytesInVector) ? BytesInVector : 0;
9424       if (Mask[j] != OriginalOrder[j] + MaskOffset) {
9425         OtherElementsInOrder = false;
9426         break;
9427       }
9428     }
9429     // If other elements are in original order, we record the number of shifts
9430     // we need to get the element we want into element 7. Also record which byte
9431     // in the vector we should insert into.
9432     if (OtherElementsInOrder) {
9433       // If 2nd operand is undefined, we assume no shifts and no swapping.
9434       if (V2.isUndef()) {
9435         ShiftElts = 0;
9436         Swap = false;
9437       } else {
9438         // Only need the last 4-bits for shifts because operands will be swapped if CurrentElement is >= 2^4.
9439         ShiftElts = IsLE ? LittleEndianShifts[CurrentElement & 0xF]
9440                          : BigEndianShifts[CurrentElement & 0xF];
9441         Swap = CurrentElement < BytesInVector;
9442       }
9443       InsertAtByte = IsLE ? BytesInVector - (i + 1) : i;
9444       FoundCandidate = true;
9445       break;
9446     }
9447   }
9448 
9449   if (!FoundCandidate)
9450     return SDValue();
9451 
9452   // Candidate found, construct the proper SDAG sequence with VINSERTB,
9453   // optionally with VECSHL if shift is required.
9454   if (Swap)
9455     std::swap(V1, V2);
9456   if (V2.isUndef())
9457     V2 = V1;
9458   if (ShiftElts) {
9459     SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2,
9460                               DAG.getConstant(ShiftElts, dl, MVT::i32));
9461     return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, Shl,
9462                        DAG.getConstant(InsertAtByte, dl, MVT::i32));
9463   }
9464   return DAG.getNode(PPCISD::VECINSERT, dl, MVT::v16i8, V1, V2,
9465                      DAG.getConstant(InsertAtByte, dl, MVT::i32));
9466 }
9467 
9468 /// lowerToVINSERTH - Return the SDValue if this VECTOR_SHUFFLE can be handled
9469 /// by the VINSERTH instruction introduced in ISA 3.0, else just return default
9470 /// SDValue.
lowerToVINSERTH(ShuffleVectorSDNode * N,SelectionDAG & DAG) const9471 SDValue PPCTargetLowering::lowerToVINSERTH(ShuffleVectorSDNode *N,
9472                                            SelectionDAG &DAG) const {
9473   const unsigned NumHalfWords = 8;
9474   const unsigned BytesInVector = NumHalfWords * 2;
9475   // Check that the shuffle is on half-words.
9476   if (!isNByteElemShuffleMask(N, 2, 1))
9477     return SDValue();
9478 
9479   bool IsLE = Subtarget.isLittleEndian();
9480   SDLoc dl(N);
9481   SDValue V1 = N->getOperand(0);
9482   SDValue V2 = N->getOperand(1);
9483   unsigned ShiftElts = 0, InsertAtByte = 0;
9484   bool Swap = false;
9485 
9486   // Shifts required to get the half-word we want at element 3.
9487   unsigned LittleEndianShifts[] = {4, 3, 2, 1, 0, 7, 6, 5};
9488   unsigned BigEndianShifts[] = {5, 6, 7, 0, 1, 2, 3, 4};
9489 
9490   uint32_t Mask = 0;
9491   uint32_t OriginalOrderLow = 0x1234567;
9492   uint32_t OriginalOrderHigh = 0x89ABCDEF;
9493   // Now we look at mask elements 0,2,4,6,8,10,12,14.  Pack the mask into a
9494   // 32-bit space, only need 4-bit nibbles per element.
9495   for (unsigned i = 0; i < NumHalfWords; ++i) {
9496     unsigned MaskShift = (NumHalfWords - 1 - i) * 4;
9497     Mask |= ((uint32_t)(N->getMaskElt(i * 2) / 2) << MaskShift);
9498   }
9499 
9500   // For each mask element, find out if we're just inserting something
9501   // from V2 into V1 or vice versa.  Possible permutations inserting an element
9502   // from V2 into V1:
9503   //   X, 1, 2, 3, 4, 5, 6, 7
9504   //   0, X, 2, 3, 4, 5, 6, 7
9505   //   0, 1, X, 3, 4, 5, 6, 7
9506   //   0, 1, 2, X, 4, 5, 6, 7
9507   //   0, 1, 2, 3, X, 5, 6, 7
9508   //   0, 1, 2, 3, 4, X, 6, 7
9509   //   0, 1, 2, 3, 4, 5, X, 7
9510   //   0, 1, 2, 3, 4, 5, 6, X
9511   // Inserting from V1 into V2 will be similar, except mask range will be [8,15].
9512 
9513   bool FoundCandidate = false;
9514   // Go through the mask of half-words to find an element that's being moved
9515   // from one vector to the other.
9516   for (unsigned i = 0; i < NumHalfWords; ++i) {
9517     unsigned MaskShift = (NumHalfWords - 1 - i) * 4;
9518     uint32_t MaskOneElt = (Mask >> MaskShift) & 0xF;
9519     uint32_t MaskOtherElts = ~(0xF << MaskShift);
9520     uint32_t TargetOrder = 0x0;
9521 
9522     // If both vector operands for the shuffle are the same vector, the mask
9523     // will contain only elements from the first one and the second one will be
9524     // undef.
9525     if (V2.isUndef()) {
9526       ShiftElts = 0;
9527       unsigned VINSERTHSrcElem = IsLE ? 4 : 3;
9528       TargetOrder = OriginalOrderLow;
9529       Swap = false;
9530       // Skip if not the correct element or mask of other elements don't equal
9531       // to our expected order.
9532       if (MaskOneElt == VINSERTHSrcElem &&
9533           (Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) {
9534         InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2;
9535         FoundCandidate = true;
9536         break;
9537       }
9538     } else { // If both operands are defined.
9539       // Target order is [8,15] if the current mask is between [0,7].
9540       TargetOrder =
9541           (MaskOneElt < NumHalfWords) ? OriginalOrderHigh : OriginalOrderLow;
9542       // Skip if mask of other elements don't equal our expected order.
9543       if ((Mask & MaskOtherElts) == (TargetOrder & MaskOtherElts)) {
9544         // We only need the last 3 bits for the number of shifts.
9545         ShiftElts = IsLE ? LittleEndianShifts[MaskOneElt & 0x7]
9546                          : BigEndianShifts[MaskOneElt & 0x7];
9547         InsertAtByte = IsLE ? BytesInVector - (i + 1) * 2 : i * 2;
9548         Swap = MaskOneElt < NumHalfWords;
9549         FoundCandidate = true;
9550         break;
9551       }
9552     }
9553   }
9554 
9555   if (!FoundCandidate)
9556     return SDValue();
9557 
9558   // Candidate found, construct the proper SDAG sequence with VINSERTH,
9559   // optionally with VECSHL if shift is required.
9560   if (Swap)
9561     std::swap(V1, V2);
9562   if (V2.isUndef())
9563     V2 = V1;
9564   SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
9565   if (ShiftElts) {
9566     // Double ShiftElts because we're left shifting on v16i8 type.
9567     SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v16i8, V2, V2,
9568                               DAG.getConstant(2 * ShiftElts, dl, MVT::i32));
9569     SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, Shl);
9570     SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2,
9571                               DAG.getConstant(InsertAtByte, dl, MVT::i32));
9572     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9573   }
9574   SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2);
9575   SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v8i16, Conv1, Conv2,
9576                             DAG.getConstant(InsertAtByte, dl, MVT::i32));
9577   return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9578 }
9579 
9580 /// lowerToXXSPLTI32DX - Return the SDValue if this VECTOR_SHUFFLE can be
9581 /// handled by the XXSPLTI32DX instruction introduced in ISA 3.1, otherwise
9582 /// return the default SDValue.
lowerToXXSPLTI32DX(ShuffleVectorSDNode * SVN,SelectionDAG & DAG) const9583 SDValue PPCTargetLowering::lowerToXXSPLTI32DX(ShuffleVectorSDNode *SVN,
9584                                               SelectionDAG &DAG) const {
9585   // The LHS and RHS may be bitcasts to v16i8 as we canonicalize shuffles
9586   // to v16i8. Peek through the bitcasts to get the actual operands.
9587   SDValue LHS = peekThroughBitcasts(SVN->getOperand(0));
9588   SDValue RHS = peekThroughBitcasts(SVN->getOperand(1));
9589 
9590   auto ShuffleMask = SVN->getMask();
9591   SDValue VecShuffle(SVN, 0);
9592   SDLoc DL(SVN);
9593 
9594   // Check that we have a four byte shuffle.
9595   if (!isNByteElemShuffleMask(SVN, 4, 1))
9596     return SDValue();
9597 
9598   // Canonicalize the RHS being a BUILD_VECTOR when lowering to xxsplti32dx.
9599   if (RHS->getOpcode() != ISD::BUILD_VECTOR) {
9600     std::swap(LHS, RHS);
9601     VecShuffle = DAG.getCommutedVectorShuffle(*SVN);
9602     ShuffleMask = cast<ShuffleVectorSDNode>(VecShuffle)->getMask();
9603   }
9604 
9605   // Ensure that the RHS is a vector of constants.
9606   BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(RHS.getNode());
9607   if (!BVN)
9608     return SDValue();
9609 
9610   // Check if RHS is a splat of 4-bytes (or smaller).
9611   APInt APSplatValue, APSplatUndef;
9612   unsigned SplatBitSize;
9613   bool HasAnyUndefs;
9614   if (!BVN->isConstantSplat(APSplatValue, APSplatUndef, SplatBitSize,
9615                             HasAnyUndefs, 0, !Subtarget.isLittleEndian()) ||
9616       SplatBitSize > 32)
9617     return SDValue();
9618 
9619   // Check that the shuffle mask matches the semantics of XXSPLTI32DX.
9620   // The instruction splats a constant C into two words of the source vector
9621   // producing { C, Unchanged, C, Unchanged } or { Unchanged, C, Unchanged, C }.
9622   // Thus we check that the shuffle mask is the equivalent  of
9623   // <0, [4-7], 2, [4-7]> or <[4-7], 1, [4-7], 3> respectively.
9624   // Note: the check above of isNByteElemShuffleMask() ensures that the bytes
9625   // within each word are consecutive, so we only need to check the first byte.
9626   SDValue Index;
9627   bool IsLE = Subtarget.isLittleEndian();
9628   if ((ShuffleMask[0] == 0 && ShuffleMask[8] == 8) &&
9629       (ShuffleMask[4] % 4 == 0 && ShuffleMask[12] % 4 == 0 &&
9630        ShuffleMask[4] > 15 && ShuffleMask[12] > 15))
9631     Index = DAG.getTargetConstant(IsLE ? 0 : 1, DL, MVT::i32);
9632   else if ((ShuffleMask[4] == 4 && ShuffleMask[12] == 12) &&
9633            (ShuffleMask[0] % 4 == 0 && ShuffleMask[8] % 4 == 0 &&
9634             ShuffleMask[0] > 15 && ShuffleMask[8] > 15))
9635     Index = DAG.getTargetConstant(IsLE ? 1 : 0, DL, MVT::i32);
9636   else
9637     return SDValue();
9638 
9639   // If the splat is narrower than 32-bits, we need to get the 32-bit value
9640   // for XXSPLTI32DX.
9641   unsigned SplatVal = APSplatValue.getZExtValue();
9642   for (; SplatBitSize < 32; SplatBitSize <<= 1)
9643     SplatVal |= (SplatVal << SplatBitSize);
9644 
9645   SDValue SplatNode = DAG.getNode(
9646       PPCISD::XXSPLTI32DX, DL, MVT::v2i64, DAG.getBitcast(MVT::v2i64, LHS),
9647       Index, DAG.getTargetConstant(SplatVal, DL, MVT::i32));
9648   return DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, SplatNode);
9649 }
9650 
9651 /// LowerROTL - Custom lowering for ROTL(v1i128) to vector_shuffle(v16i8).
9652 /// We lower ROTL(v1i128) to vector_shuffle(v16i8) only if shift amount is
9653 /// a multiple of 8. Otherwise convert it to a scalar rotation(i128)
9654 /// i.e (or (shl x, C1), (srl x, 128-C1)).
LowerROTL(SDValue Op,SelectionDAG & DAG) const9655 SDValue PPCTargetLowering::LowerROTL(SDValue Op, SelectionDAG &DAG) const {
9656   assert(Op.getOpcode() == ISD::ROTL && "Should only be called for ISD::ROTL");
9657   assert(Op.getValueType() == MVT::v1i128 &&
9658          "Only set v1i128 as custom, other type shouldn't reach here!");
9659   SDLoc dl(Op);
9660   SDValue N0 = peekThroughBitcasts(Op.getOperand(0));
9661   SDValue N1 = peekThroughBitcasts(Op.getOperand(1));
9662   unsigned SHLAmt = N1.getConstantOperandVal(0);
9663   if (SHLAmt % 8 == 0) {
9664     SmallVector<int, 16> Mask(16, 0);
9665     std::iota(Mask.begin(), Mask.end(), 0);
9666     std::rotate(Mask.begin(), Mask.begin() + SHLAmt / 8, Mask.end());
9667     if (SDValue Shuffle =
9668             DAG.getVectorShuffle(MVT::v16i8, dl, DAG.getBitcast(MVT::v16i8, N0),
9669                                  DAG.getUNDEF(MVT::v16i8), Mask))
9670       return DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, Shuffle);
9671   }
9672   SDValue ArgVal = DAG.getBitcast(MVT::i128, N0);
9673   SDValue SHLOp = DAG.getNode(ISD::SHL, dl, MVT::i128, ArgVal,
9674                               DAG.getConstant(SHLAmt, dl, MVT::i32));
9675   SDValue SRLOp = DAG.getNode(ISD::SRL, dl, MVT::i128, ArgVal,
9676                               DAG.getConstant(128 - SHLAmt, dl, MVT::i32));
9677   SDValue OROp = DAG.getNode(ISD::OR, dl, MVT::i128, SHLOp, SRLOp);
9678   return DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, OROp);
9679 }
9680 
9681 /// LowerVECTOR_SHUFFLE - Return the code we lower for VECTOR_SHUFFLE.  If this
9682 /// is a shuffle we can handle in a single instruction, return it.  Otherwise,
9683 /// return the code it can be lowered into.  Worst case, it can always be
9684 /// lowered into a vperm.
LowerVECTOR_SHUFFLE(SDValue Op,SelectionDAG & DAG) const9685 SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
9686                                                SelectionDAG &DAG) const {
9687   SDLoc dl(Op);
9688   SDValue V1 = Op.getOperand(0);
9689   SDValue V2 = Op.getOperand(1);
9690   ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9691 
9692   // Any nodes that were combined in the target-independent combiner prior
9693   // to vector legalization will not be sent to the target combine. Try to
9694   // combine it here.
9695   if (SDValue NewShuffle = combineVectorShuffle(SVOp, DAG)) {
9696     if (!isa<ShuffleVectorSDNode>(NewShuffle))
9697       return NewShuffle;
9698     Op = NewShuffle;
9699     SVOp = cast<ShuffleVectorSDNode>(Op);
9700     V1 = Op.getOperand(0);
9701     V2 = Op.getOperand(1);
9702   }
9703   EVT VT = Op.getValueType();
9704   bool isLittleEndian = Subtarget.isLittleEndian();
9705 
9706   unsigned ShiftElts, InsertAtByte;
9707   bool Swap = false;
9708 
9709   // If this is a load-and-splat, we can do that with a single instruction
9710   // in some cases. However if the load has multiple uses, we don't want to
9711   // combine it because that will just produce multiple loads.
9712   bool IsPermutedLoad = false;
9713   const SDValue *InputLoad = getNormalLoadInput(V1, IsPermutedLoad);
9714   if (InputLoad && Subtarget.hasVSX() && V2.isUndef() &&
9715       (PPC::isSplatShuffleMask(SVOp, 4) || PPC::isSplatShuffleMask(SVOp, 8)) &&
9716       InputLoad->hasOneUse()) {
9717     bool IsFourByte = PPC::isSplatShuffleMask(SVOp, 4);
9718     int SplatIdx =
9719       PPC::getSplatIdxForPPCMnemonics(SVOp, IsFourByte ? 4 : 8, DAG);
9720 
9721     // The splat index for permuted loads will be in the left half of the vector
9722     // which is strictly wider than the loaded value by 8 bytes. So we need to
9723     // adjust the splat index to point to the correct address in memory.
9724     if (IsPermutedLoad) {
9725       assert((isLittleEndian || IsFourByte) &&
9726              "Unexpected size for permuted load on big endian target");
9727       SplatIdx += IsFourByte ? 2 : 1;
9728       assert((SplatIdx < (IsFourByte ? 4 : 2)) &&
9729              "Splat of a value outside of the loaded memory");
9730     }
9731 
9732     LoadSDNode *LD = cast<LoadSDNode>(*InputLoad);
9733     // For 4-byte load-and-splat, we need Power9.
9734     if ((IsFourByte && Subtarget.hasP9Vector()) || !IsFourByte) {
9735       uint64_t Offset = 0;
9736       if (IsFourByte)
9737         Offset = isLittleEndian ? (3 - SplatIdx) * 4 : SplatIdx * 4;
9738       else
9739         Offset = isLittleEndian ? (1 - SplatIdx) * 8 : SplatIdx * 8;
9740 
9741       // If the width of the load is the same as the width of the splat,
9742       // loading with an offset would load the wrong memory.
9743       if (LD->getValueType(0).getSizeInBits() == (IsFourByte ? 32 : 64))
9744         Offset = 0;
9745 
9746       SDValue BasePtr = LD->getBasePtr();
9747       if (Offset != 0)
9748         BasePtr = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
9749                               BasePtr, DAG.getIntPtrConstant(Offset, dl));
9750       SDValue Ops[] = {
9751         LD->getChain(),    // Chain
9752         BasePtr,           // BasePtr
9753         DAG.getValueType(Op.getValueType()) // VT
9754       };
9755       SDVTList VTL =
9756         DAG.getVTList(IsFourByte ? MVT::v4i32 : MVT::v2i64, MVT::Other);
9757       SDValue LdSplt =
9758         DAG.getMemIntrinsicNode(PPCISD::LD_SPLAT, dl, VTL,
9759                                 Ops, LD->getMemoryVT(), LD->getMemOperand());
9760       DAG.ReplaceAllUsesOfValueWith(InputLoad->getValue(1), LdSplt.getValue(1));
9761       if (LdSplt.getValueType() != SVOp->getValueType(0))
9762         LdSplt = DAG.getBitcast(SVOp->getValueType(0), LdSplt);
9763       return LdSplt;
9764     }
9765   }
9766   if (Subtarget.hasP9Vector() &&
9767       PPC::isXXINSERTWMask(SVOp, ShiftElts, InsertAtByte, Swap,
9768                            isLittleEndian)) {
9769     if (Swap)
9770       std::swap(V1, V2);
9771     SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
9772     SDValue Conv2 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2);
9773     if (ShiftElts) {
9774       SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv2, Conv2,
9775                                 DAG.getConstant(ShiftElts, dl, MVT::i32));
9776       SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Shl,
9777                                 DAG.getConstant(InsertAtByte, dl, MVT::i32));
9778       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9779     }
9780     SDValue Ins = DAG.getNode(PPCISD::VECINSERT, dl, MVT::v4i32, Conv1, Conv2,
9781                               DAG.getConstant(InsertAtByte, dl, MVT::i32));
9782     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Ins);
9783   }
9784 
9785   if (Subtarget.hasPrefixInstrs()) {
9786     SDValue SplatInsertNode;
9787     if ((SplatInsertNode = lowerToXXSPLTI32DX(SVOp, DAG)))
9788       return SplatInsertNode;
9789   }
9790 
9791   if (Subtarget.hasP9Altivec()) {
9792     SDValue NewISDNode;
9793     if ((NewISDNode = lowerToVINSERTH(SVOp, DAG)))
9794       return NewISDNode;
9795 
9796     if ((NewISDNode = lowerToVINSERTB(SVOp, DAG)))
9797       return NewISDNode;
9798   }
9799 
9800   if (Subtarget.hasVSX() &&
9801       PPC::isXXSLDWIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) {
9802     if (Swap)
9803       std::swap(V1, V2);
9804     SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
9805     SDValue Conv2 =
9806         DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V2.isUndef() ? V1 : V2);
9807 
9808     SDValue Shl = DAG.getNode(PPCISD::VECSHL, dl, MVT::v4i32, Conv1, Conv2,
9809                               DAG.getConstant(ShiftElts, dl, MVT::i32));
9810     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Shl);
9811   }
9812 
9813   if (Subtarget.hasVSX() &&
9814     PPC::isXXPERMDIShuffleMask(SVOp, ShiftElts, Swap, isLittleEndian)) {
9815     if (Swap)
9816       std::swap(V1, V2);
9817     SDValue Conv1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1);
9818     SDValue Conv2 =
9819         DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2.isUndef() ? V1 : V2);
9820 
9821     SDValue PermDI = DAG.getNode(PPCISD::XXPERMDI, dl, MVT::v2i64, Conv1, Conv2,
9822                               DAG.getConstant(ShiftElts, dl, MVT::i32));
9823     return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, PermDI);
9824   }
9825 
9826   if (Subtarget.hasP9Vector()) {
9827      if (PPC::isXXBRHShuffleMask(SVOp)) {
9828       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
9829       SDValue ReveHWord = DAG.getNode(ISD::BSWAP, dl, MVT::v8i16, Conv);
9830       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveHWord);
9831     } else if (PPC::isXXBRWShuffleMask(SVOp)) {
9832       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
9833       SDValue ReveWord = DAG.getNode(ISD::BSWAP, dl, MVT::v4i32, Conv);
9834       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveWord);
9835     } else if (PPC::isXXBRDShuffleMask(SVOp)) {
9836       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1);
9837       SDValue ReveDWord = DAG.getNode(ISD::BSWAP, dl, MVT::v2i64, Conv);
9838       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveDWord);
9839     } else if (PPC::isXXBRQShuffleMask(SVOp)) {
9840       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v1i128, V1);
9841       SDValue ReveQWord = DAG.getNode(ISD::BSWAP, dl, MVT::v1i128, Conv);
9842       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, ReveQWord);
9843     }
9844   }
9845 
9846   if (Subtarget.hasVSX()) {
9847     if (V2.isUndef() && PPC::isSplatShuffleMask(SVOp, 4)) {
9848       int SplatIdx = PPC::getSplatIdxForPPCMnemonics(SVOp, 4, DAG);
9849 
9850       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, V1);
9851       SDValue Splat = DAG.getNode(PPCISD::XXSPLT, dl, MVT::v4i32, Conv,
9852                                   DAG.getConstant(SplatIdx, dl, MVT::i32));
9853       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Splat);
9854     }
9855 
9856     // Left shifts of 8 bytes are actually swaps. Convert accordingly.
9857     if (V2.isUndef() && PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) == 8) {
9858       SDValue Conv = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1);
9859       SDValue Swap = DAG.getNode(PPCISD::SWAP_NO_CHAIN, dl, MVT::v2f64, Conv);
9860       return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, Swap);
9861     }
9862   }
9863 
9864   // Cases that are handled by instructions that take permute immediates
9865   // (such as vsplt*) should be left as VECTOR_SHUFFLE nodes so they can be
9866   // selected by the instruction selector.
9867   if (V2.isUndef()) {
9868     if (PPC::isSplatShuffleMask(SVOp, 1) ||
9869         PPC::isSplatShuffleMask(SVOp, 2) ||
9870         PPC::isSplatShuffleMask(SVOp, 4) ||
9871         PPC::isVPKUWUMShuffleMask(SVOp, 1, DAG) ||
9872         PPC::isVPKUHUMShuffleMask(SVOp, 1, DAG) ||
9873         PPC::isVSLDOIShuffleMask(SVOp, 1, DAG) != -1 ||
9874         PPC::isVMRGLShuffleMask(SVOp, 1, 1, DAG) ||
9875         PPC::isVMRGLShuffleMask(SVOp, 2, 1, DAG) ||
9876         PPC::isVMRGLShuffleMask(SVOp, 4, 1, DAG) ||
9877         PPC::isVMRGHShuffleMask(SVOp, 1, 1, DAG) ||
9878         PPC::isVMRGHShuffleMask(SVOp, 2, 1, DAG) ||
9879         PPC::isVMRGHShuffleMask(SVOp, 4, 1, DAG) ||
9880         (Subtarget.hasP8Altivec() && (
9881          PPC::isVPKUDUMShuffleMask(SVOp, 1, DAG) ||
9882          PPC::isVMRGEOShuffleMask(SVOp, true, 1, DAG) ||
9883          PPC::isVMRGEOShuffleMask(SVOp, false, 1, DAG)))) {
9884       return Op;
9885     }
9886   }
9887 
9888   // Altivec has a variety of "shuffle immediates" that take two vector inputs
9889   // and produce a fixed permutation.  If any of these match, do not lower to
9890   // VPERM.
9891   unsigned int ShuffleKind = isLittleEndian ? 2 : 0;
9892   if (PPC::isVPKUWUMShuffleMask(SVOp, ShuffleKind, DAG) ||
9893       PPC::isVPKUHUMShuffleMask(SVOp, ShuffleKind, DAG) ||
9894       PPC::isVSLDOIShuffleMask(SVOp, ShuffleKind, DAG) != -1 ||
9895       PPC::isVMRGLShuffleMask(SVOp, 1, ShuffleKind, DAG) ||
9896       PPC::isVMRGLShuffleMask(SVOp, 2, ShuffleKind, DAG) ||
9897       PPC::isVMRGLShuffleMask(SVOp, 4, ShuffleKind, DAG) ||
9898       PPC::isVMRGHShuffleMask(SVOp, 1, ShuffleKind, DAG) ||
9899       PPC::isVMRGHShuffleMask(SVOp, 2, ShuffleKind, DAG) ||
9900       PPC::isVMRGHShuffleMask(SVOp, 4, ShuffleKind, DAG) ||
9901       (Subtarget.hasP8Altivec() && (
9902        PPC::isVPKUDUMShuffleMask(SVOp, ShuffleKind, DAG) ||
9903        PPC::isVMRGEOShuffleMask(SVOp, true, ShuffleKind, DAG) ||
9904        PPC::isVMRGEOShuffleMask(SVOp, false, ShuffleKind, DAG))))
9905     return Op;
9906 
9907   // Check to see if this is a shuffle of 4-byte values.  If so, we can use our
9908   // perfect shuffle table to emit an optimal matching sequence.
9909   ArrayRef<int> PermMask = SVOp->getMask();
9910 
9911   unsigned PFIndexes[4];
9912   bool isFourElementShuffle = true;
9913   for (unsigned i = 0; i != 4 && isFourElementShuffle; ++i) { // Element number
9914     unsigned EltNo = 8;   // Start out undef.
9915     for (unsigned j = 0; j != 4; ++j) {  // Intra-element byte.
9916       if (PermMask[i*4+j] < 0)
9917         continue;   // Undef, ignore it.
9918 
9919       unsigned ByteSource = PermMask[i*4+j];
9920       if ((ByteSource & 3) != j) {
9921         isFourElementShuffle = false;
9922         break;
9923       }
9924 
9925       if (EltNo == 8) {
9926         EltNo = ByteSource/4;
9927       } else if (EltNo != ByteSource/4) {
9928         isFourElementShuffle = false;
9929         break;
9930       }
9931     }
9932     PFIndexes[i] = EltNo;
9933   }
9934 
9935   // If this shuffle can be expressed as a shuffle of 4-byte elements, use the
9936   // perfect shuffle vector to determine if it is cost effective to do this as
9937   // discrete instructions, or whether we should use a vperm.
9938   // For now, we skip this for little endian until such time as we have a
9939   // little-endian perfect shuffle table.
9940   if (isFourElementShuffle && !isLittleEndian) {
9941     // Compute the index in the perfect shuffle table.
9942     unsigned PFTableIndex =
9943       PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
9944 
9945     unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
9946     unsigned Cost  = (PFEntry >> 30);
9947 
9948     // Determining when to avoid vperm is tricky.  Many things affect the cost
9949     // of vperm, particularly how many times the perm mask needs to be computed.
9950     // For example, if the perm mask can be hoisted out of a loop or is already
9951     // used (perhaps because there are multiple permutes with the same shuffle
9952     // mask?) the vperm has a cost of 1.  OTOH, hoisting the permute mask out of
9953     // the loop requires an extra register.
9954     //
9955     // As a compromise, we only emit discrete instructions if the shuffle can be
9956     // generated in 3 or fewer operations.  When we have loop information
9957     // available, if this block is within a loop, we should avoid using vperm
9958     // for 3-operation perms and use a constant pool load instead.
9959     if (Cost < 3)
9960       return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl);
9961   }
9962 
9963   // Lower this to a VPERM(V1, V2, V3) expression, where V3 is a constant
9964   // vector that will get spilled to the constant pool.
9965   if (V2.isUndef()) V2 = V1;
9966 
9967   // The SHUFFLE_VECTOR mask is almost exactly what we want for vperm, except
9968   // that it is in input element units, not in bytes.  Convert now.
9969 
9970   // For little endian, the order of the input vectors is reversed, and
9971   // the permutation mask is complemented with respect to 31.  This is
9972   // necessary to produce proper semantics with the big-endian-biased vperm
9973   // instruction.
9974   EVT EltVT = V1.getValueType().getVectorElementType();
9975   unsigned BytesPerElement = EltVT.getSizeInBits()/8;
9976 
9977   SmallVector<SDValue, 16> ResultMask;
9978   for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
9979     unsigned SrcElt = PermMask[i] < 0 ? 0 : PermMask[i];
9980 
9981     for (unsigned j = 0; j != BytesPerElement; ++j)
9982       if (isLittleEndian)
9983         ResultMask.push_back(DAG.getConstant(31 - (SrcElt*BytesPerElement + j),
9984                                              dl, MVT::i32));
9985       else
9986         ResultMask.push_back(DAG.getConstant(SrcElt*BytesPerElement + j, dl,
9987                                              MVT::i32));
9988   }
9989 
9990   ShufflesHandledWithVPERM++;
9991   SDValue VPermMask = DAG.getBuildVector(MVT::v16i8, dl, ResultMask);
9992   LLVM_DEBUG(dbgs() << "Emitting a VPERM for the following shuffle:\n");
9993   LLVM_DEBUG(SVOp->dump());
9994   LLVM_DEBUG(dbgs() << "With the following permute control vector:\n");
9995   LLVM_DEBUG(VPermMask.dump());
9996 
9997   if (isLittleEndian)
9998     return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(),
9999                        V2, V1, VPermMask);
10000   else
10001     return DAG.getNode(PPCISD::VPERM, dl, V1.getValueType(),
10002                        V1, V2, VPermMask);
10003 }
10004 
10005 /// getVectorCompareInfo - Given an intrinsic, return false if it is not a
10006 /// vector comparison.  If it is, return true and fill in Opc/isDot with
10007 /// information about the intrinsic.
getVectorCompareInfo(SDValue Intrin,int & CompareOpc,bool & isDot,const PPCSubtarget & Subtarget)10008 static bool getVectorCompareInfo(SDValue Intrin, int &CompareOpc,
10009                                  bool &isDot, const PPCSubtarget &Subtarget) {
10010   unsigned IntrinsicID =
10011       cast<ConstantSDNode>(Intrin.getOperand(0))->getZExtValue();
10012   CompareOpc = -1;
10013   isDot = false;
10014   switch (IntrinsicID) {
10015   default:
10016     return false;
10017   // Comparison predicates.
10018   case Intrinsic::ppc_altivec_vcmpbfp_p:
10019     CompareOpc = 966;
10020     isDot = true;
10021     break;
10022   case Intrinsic::ppc_altivec_vcmpeqfp_p:
10023     CompareOpc = 198;
10024     isDot = true;
10025     break;
10026   case Intrinsic::ppc_altivec_vcmpequb_p:
10027     CompareOpc = 6;
10028     isDot = true;
10029     break;
10030   case Intrinsic::ppc_altivec_vcmpequh_p:
10031     CompareOpc = 70;
10032     isDot = true;
10033     break;
10034   case Intrinsic::ppc_altivec_vcmpequw_p:
10035     CompareOpc = 134;
10036     isDot = true;
10037     break;
10038   case Intrinsic::ppc_altivec_vcmpequd_p:
10039     if (Subtarget.hasVSX() || Subtarget.hasP8Altivec()) {
10040       CompareOpc = 199;
10041       isDot = true;
10042     } else
10043       return false;
10044     break;
10045   case Intrinsic::ppc_altivec_vcmpneb_p:
10046   case Intrinsic::ppc_altivec_vcmpneh_p:
10047   case Intrinsic::ppc_altivec_vcmpnew_p:
10048   case Intrinsic::ppc_altivec_vcmpnezb_p:
10049   case Intrinsic::ppc_altivec_vcmpnezh_p:
10050   case Intrinsic::ppc_altivec_vcmpnezw_p:
10051     if (Subtarget.hasP9Altivec()) {
10052       switch (IntrinsicID) {
10053       default:
10054         llvm_unreachable("Unknown comparison intrinsic.");
10055       case Intrinsic::ppc_altivec_vcmpneb_p:
10056         CompareOpc = 7;
10057         break;
10058       case Intrinsic::ppc_altivec_vcmpneh_p:
10059         CompareOpc = 71;
10060         break;
10061       case Intrinsic::ppc_altivec_vcmpnew_p:
10062         CompareOpc = 135;
10063         break;
10064       case Intrinsic::ppc_altivec_vcmpnezb_p:
10065         CompareOpc = 263;
10066         break;
10067       case Intrinsic::ppc_altivec_vcmpnezh_p:
10068         CompareOpc = 327;
10069         break;
10070       case Intrinsic::ppc_altivec_vcmpnezw_p:
10071         CompareOpc = 391;
10072         break;
10073       }
10074       isDot = true;
10075     } else
10076       return false;
10077     break;
10078   case Intrinsic::ppc_altivec_vcmpgefp_p:
10079     CompareOpc = 454;
10080     isDot = true;
10081     break;
10082   case Intrinsic::ppc_altivec_vcmpgtfp_p:
10083     CompareOpc = 710;
10084     isDot = true;
10085     break;
10086   case Intrinsic::ppc_altivec_vcmpgtsb_p:
10087     CompareOpc = 774;
10088     isDot = true;
10089     break;
10090   case Intrinsic::ppc_altivec_vcmpgtsh_p:
10091     CompareOpc = 838;
10092     isDot = true;
10093     break;
10094   case Intrinsic::ppc_altivec_vcmpgtsw_p:
10095     CompareOpc = 902;
10096     isDot = true;
10097     break;
10098   case Intrinsic::ppc_altivec_vcmpgtsd_p:
10099     if (Subtarget.hasVSX() || Subtarget.hasP8Altivec()) {
10100       CompareOpc = 967;
10101       isDot = true;
10102     } else
10103       return false;
10104     break;
10105   case Intrinsic::ppc_altivec_vcmpgtub_p:
10106     CompareOpc = 518;
10107     isDot = true;
10108     break;
10109   case Intrinsic::ppc_altivec_vcmpgtuh_p:
10110     CompareOpc = 582;
10111     isDot = true;
10112     break;
10113   case Intrinsic::ppc_altivec_vcmpgtuw_p:
10114     CompareOpc = 646;
10115     isDot = true;
10116     break;
10117   case Intrinsic::ppc_altivec_vcmpgtud_p:
10118     if (Subtarget.hasVSX() || Subtarget.hasP8Altivec()) {
10119       CompareOpc = 711;
10120       isDot = true;
10121     } else
10122       return false;
10123     break;
10124 
10125   case Intrinsic::ppc_altivec_vcmpequq:
10126   case Intrinsic::ppc_altivec_vcmpgtsq:
10127   case Intrinsic::ppc_altivec_vcmpgtuq:
10128     if (!Subtarget.isISA3_1())
10129       return false;
10130     switch (IntrinsicID) {
10131     default:
10132       llvm_unreachable("Unknown comparison intrinsic.");
10133     case Intrinsic::ppc_altivec_vcmpequq:
10134       CompareOpc = 455;
10135       break;
10136     case Intrinsic::ppc_altivec_vcmpgtsq:
10137       CompareOpc = 903;
10138       break;
10139     case Intrinsic::ppc_altivec_vcmpgtuq:
10140       CompareOpc = 647;
10141       break;
10142     }
10143     break;
10144 
10145   // VSX predicate comparisons use the same infrastructure
10146   case Intrinsic::ppc_vsx_xvcmpeqdp_p:
10147   case Intrinsic::ppc_vsx_xvcmpgedp_p:
10148   case Intrinsic::ppc_vsx_xvcmpgtdp_p:
10149   case Intrinsic::ppc_vsx_xvcmpeqsp_p:
10150   case Intrinsic::ppc_vsx_xvcmpgesp_p:
10151   case Intrinsic::ppc_vsx_xvcmpgtsp_p:
10152     if (Subtarget.hasVSX()) {
10153       switch (IntrinsicID) {
10154       case Intrinsic::ppc_vsx_xvcmpeqdp_p:
10155         CompareOpc = 99;
10156         break;
10157       case Intrinsic::ppc_vsx_xvcmpgedp_p:
10158         CompareOpc = 115;
10159         break;
10160       case Intrinsic::ppc_vsx_xvcmpgtdp_p:
10161         CompareOpc = 107;
10162         break;
10163       case Intrinsic::ppc_vsx_xvcmpeqsp_p:
10164         CompareOpc = 67;
10165         break;
10166       case Intrinsic::ppc_vsx_xvcmpgesp_p:
10167         CompareOpc = 83;
10168         break;
10169       case Intrinsic::ppc_vsx_xvcmpgtsp_p:
10170         CompareOpc = 75;
10171         break;
10172       }
10173       isDot = true;
10174     } else
10175       return false;
10176     break;
10177 
10178   // Normal Comparisons.
10179   case Intrinsic::ppc_altivec_vcmpbfp:
10180     CompareOpc = 966;
10181     break;
10182   case Intrinsic::ppc_altivec_vcmpeqfp:
10183     CompareOpc = 198;
10184     break;
10185   case Intrinsic::ppc_altivec_vcmpequb:
10186     CompareOpc = 6;
10187     break;
10188   case Intrinsic::ppc_altivec_vcmpequh:
10189     CompareOpc = 70;
10190     break;
10191   case Intrinsic::ppc_altivec_vcmpequw:
10192     CompareOpc = 134;
10193     break;
10194   case Intrinsic::ppc_altivec_vcmpequd:
10195     if (Subtarget.hasP8Altivec())
10196       CompareOpc = 199;
10197     else
10198       return false;
10199     break;
10200   case Intrinsic::ppc_altivec_vcmpneb:
10201   case Intrinsic::ppc_altivec_vcmpneh:
10202   case Intrinsic::ppc_altivec_vcmpnew:
10203   case Intrinsic::ppc_altivec_vcmpnezb:
10204   case Intrinsic::ppc_altivec_vcmpnezh:
10205   case Intrinsic::ppc_altivec_vcmpnezw:
10206     if (Subtarget.hasP9Altivec())
10207       switch (IntrinsicID) {
10208       default:
10209         llvm_unreachable("Unknown comparison intrinsic.");
10210       case Intrinsic::ppc_altivec_vcmpneb:
10211         CompareOpc = 7;
10212         break;
10213       case Intrinsic::ppc_altivec_vcmpneh:
10214         CompareOpc = 71;
10215         break;
10216       case Intrinsic::ppc_altivec_vcmpnew:
10217         CompareOpc = 135;
10218         break;
10219       case Intrinsic::ppc_altivec_vcmpnezb:
10220         CompareOpc = 263;
10221         break;
10222       case Intrinsic::ppc_altivec_vcmpnezh:
10223         CompareOpc = 327;
10224         break;
10225       case Intrinsic::ppc_altivec_vcmpnezw:
10226         CompareOpc = 391;
10227         break;
10228       }
10229     else
10230       return false;
10231     break;
10232   case Intrinsic::ppc_altivec_vcmpgefp:
10233     CompareOpc = 454;
10234     break;
10235   case Intrinsic::ppc_altivec_vcmpgtfp:
10236     CompareOpc = 710;
10237     break;
10238   case Intrinsic::ppc_altivec_vcmpgtsb:
10239     CompareOpc = 774;
10240     break;
10241   case Intrinsic::ppc_altivec_vcmpgtsh:
10242     CompareOpc = 838;
10243     break;
10244   case Intrinsic::ppc_altivec_vcmpgtsw:
10245     CompareOpc = 902;
10246     break;
10247   case Intrinsic::ppc_altivec_vcmpgtsd:
10248     if (Subtarget.hasP8Altivec())
10249       CompareOpc = 967;
10250     else
10251       return false;
10252     break;
10253   case Intrinsic::ppc_altivec_vcmpgtub:
10254     CompareOpc = 518;
10255     break;
10256   case Intrinsic::ppc_altivec_vcmpgtuh:
10257     CompareOpc = 582;
10258     break;
10259   case Intrinsic::ppc_altivec_vcmpgtuw:
10260     CompareOpc = 646;
10261     break;
10262   case Intrinsic::ppc_altivec_vcmpgtud:
10263     if (Subtarget.hasP8Altivec())
10264       CompareOpc = 711;
10265     else
10266       return false;
10267     break;
10268   case Intrinsic::ppc_altivec_vcmpequq_p:
10269   case Intrinsic::ppc_altivec_vcmpgtsq_p:
10270   case Intrinsic::ppc_altivec_vcmpgtuq_p:
10271     if (!Subtarget.isISA3_1())
10272       return false;
10273     switch (IntrinsicID) {
10274     default:
10275       llvm_unreachable("Unknown comparison intrinsic.");
10276     case Intrinsic::ppc_altivec_vcmpequq_p:
10277       CompareOpc = 455;
10278       break;
10279     case Intrinsic::ppc_altivec_vcmpgtsq_p:
10280       CompareOpc = 903;
10281       break;
10282     case Intrinsic::ppc_altivec_vcmpgtuq_p:
10283       CompareOpc = 647;
10284       break;
10285     }
10286     isDot = true;
10287     break;
10288   }
10289   return true;
10290 }
10291 
10292 /// LowerINTRINSIC_WO_CHAIN - If this is an intrinsic that we want to custom
10293 /// lower, do it, otherwise return null.
LowerINTRINSIC_WO_CHAIN(SDValue Op,SelectionDAG & DAG) const10294 SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
10295                                                    SelectionDAG &DAG) const {
10296   unsigned IntrinsicID =
10297     cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
10298 
10299   SDLoc dl(Op);
10300 
10301   switch (IntrinsicID) {
10302   case Intrinsic::thread_pointer:
10303     // Reads the thread pointer register, used for __builtin_thread_pointer.
10304     if (Subtarget.isPPC64())
10305       return DAG.getRegister(PPC::X13, MVT::i64);
10306     return DAG.getRegister(PPC::R2, MVT::i32);
10307 
10308   case Intrinsic::ppc_mma_disassemble_acc:
10309   case Intrinsic::ppc_vsx_disassemble_pair: {
10310     int NumVecs = 2;
10311     SDValue WideVec = Op.getOperand(1);
10312     if (IntrinsicID == Intrinsic::ppc_mma_disassemble_acc) {
10313       NumVecs = 4;
10314       WideVec = DAG.getNode(PPCISD::XXMFACC, dl, MVT::v512i1, WideVec);
10315     }
10316     SmallVector<SDValue, 4> RetOps;
10317     for (int VecNo = 0; VecNo < NumVecs; VecNo++) {
10318       SDValue Extract = DAG.getNode(
10319           PPCISD::EXTRACT_VSX_REG, dl, MVT::v16i8, WideVec,
10320           DAG.getConstant(Subtarget.isLittleEndian() ? NumVecs - 1 - VecNo
10321                                                      : VecNo,
10322                           dl, getPointerTy(DAG.getDataLayout())));
10323       RetOps.push_back(Extract);
10324     }
10325     return DAG.getMergeValues(RetOps, dl);
10326   }
10327   }
10328 
10329   // If this is a lowered altivec predicate compare, CompareOpc is set to the
10330   // opcode number of the comparison.
10331   int CompareOpc;
10332   bool isDot;
10333   if (!getVectorCompareInfo(Op, CompareOpc, isDot, Subtarget))
10334     return SDValue();    // Don't custom lower most intrinsics.
10335 
10336   // If this is a non-dot comparison, make the VCMP node and we are done.
10337   if (!isDot) {
10338     SDValue Tmp = DAG.getNode(PPCISD::VCMP, dl, Op.getOperand(2).getValueType(),
10339                               Op.getOperand(1), Op.getOperand(2),
10340                               DAG.getConstant(CompareOpc, dl, MVT::i32));
10341     return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Tmp);
10342   }
10343 
10344   // Create the PPCISD altivec 'dot' comparison node.
10345   SDValue Ops[] = {
10346     Op.getOperand(2),  // LHS
10347     Op.getOperand(3),  // RHS
10348     DAG.getConstant(CompareOpc, dl, MVT::i32)
10349   };
10350   EVT VTs[] = { Op.getOperand(2).getValueType(), MVT::Glue };
10351   SDValue CompNode = DAG.getNode(PPCISD::VCMP_rec, dl, VTs, Ops);
10352 
10353   // Now that we have the comparison, emit a copy from the CR to a GPR.
10354   // This is flagged to the above dot comparison.
10355   SDValue Flags = DAG.getNode(PPCISD::MFOCRF, dl, MVT::i32,
10356                                 DAG.getRegister(PPC::CR6, MVT::i32),
10357                                 CompNode.getValue(1));
10358 
10359   // Unpack the result based on how the target uses it.
10360   unsigned BitNo;   // Bit # of CR6.
10361   bool InvertBit;   // Invert result?
10362   switch (cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue()) {
10363   default:  // Can't happen, don't crash on invalid number though.
10364   case 0:   // Return the value of the EQ bit of CR6.
10365     BitNo = 0; InvertBit = false;
10366     break;
10367   case 1:   // Return the inverted value of the EQ bit of CR6.
10368     BitNo = 0; InvertBit = true;
10369     break;
10370   case 2:   // Return the value of the LT bit of CR6.
10371     BitNo = 2; InvertBit = false;
10372     break;
10373   case 3:   // Return the inverted value of the LT bit of CR6.
10374     BitNo = 2; InvertBit = true;
10375     break;
10376   }
10377 
10378   // Shift the bit into the low position.
10379   Flags = DAG.getNode(ISD::SRL, dl, MVT::i32, Flags,
10380                       DAG.getConstant(8 - (3 - BitNo), dl, MVT::i32));
10381   // Isolate the bit.
10382   Flags = DAG.getNode(ISD::AND, dl, MVT::i32, Flags,
10383                       DAG.getConstant(1, dl, MVT::i32));
10384 
10385   // If we are supposed to, toggle the bit.
10386   if (InvertBit)
10387     Flags = DAG.getNode(ISD::XOR, dl, MVT::i32, Flags,
10388                         DAG.getConstant(1, dl, MVT::i32));
10389   return Flags;
10390 }
10391 
LowerINTRINSIC_VOID(SDValue Op,SelectionDAG & DAG) const10392 SDValue PPCTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
10393                                                SelectionDAG &DAG) const {
10394   // SelectionDAGBuilder::visitTargetIntrinsic may insert one extra chain to
10395   // the beginning of the argument list.
10396   int ArgStart = isa<ConstantSDNode>(Op.getOperand(0)) ? 0 : 1;
10397   SDLoc DL(Op);
10398   switch (cast<ConstantSDNode>(Op.getOperand(ArgStart))->getZExtValue()) {
10399   case Intrinsic::ppc_cfence: {
10400     assert(ArgStart == 1 && "llvm.ppc.cfence must carry a chain argument.");
10401     assert(Subtarget.isPPC64() && "Only 64-bit is supported for now.");
10402     return SDValue(DAG.getMachineNode(PPC::CFENCE8, DL, MVT::Other,
10403                                       DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64,
10404                                                   Op.getOperand(ArgStart + 1)),
10405                                       Op.getOperand(0)),
10406                    0);
10407   }
10408   default:
10409     break;
10410   }
10411   return SDValue();
10412 }
10413 
10414 // Lower scalar BSWAP64 to xxbrd.
LowerBSWAP(SDValue Op,SelectionDAG & DAG) const10415 SDValue PPCTargetLowering::LowerBSWAP(SDValue Op, SelectionDAG &DAG) const {
10416   SDLoc dl(Op);
10417   if (!Subtarget.isPPC64())
10418     return Op;
10419   // MTVSRDD
10420   Op = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, Op.getOperand(0),
10421                    Op.getOperand(0));
10422   // XXBRD
10423   Op = DAG.getNode(ISD::BSWAP, dl, MVT::v2i64, Op);
10424   // MFVSRD
10425   int VectorIndex = 0;
10426   if (Subtarget.isLittleEndian())
10427     VectorIndex = 1;
10428   Op = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Op,
10429                    DAG.getTargetConstant(VectorIndex, dl, MVT::i32));
10430   return Op;
10431 }
10432 
10433 // ATOMIC_CMP_SWAP for i8/i16 needs to zero-extend its input since it will be
10434 // compared to a value that is atomically loaded (atomic loads zero-extend).
LowerATOMIC_CMP_SWAP(SDValue Op,SelectionDAG & DAG) const10435 SDValue PPCTargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op,
10436                                                 SelectionDAG &DAG) const {
10437   assert(Op.getOpcode() == ISD::ATOMIC_CMP_SWAP &&
10438          "Expecting an atomic compare-and-swap here.");
10439   SDLoc dl(Op);
10440   auto *AtomicNode = cast<AtomicSDNode>(Op.getNode());
10441   EVT MemVT = AtomicNode->getMemoryVT();
10442   if (MemVT.getSizeInBits() >= 32)
10443     return Op;
10444 
10445   SDValue CmpOp = Op.getOperand(2);
10446   // If this is already correctly zero-extended, leave it alone.
10447   auto HighBits = APInt::getHighBitsSet(32, 32 - MemVT.getSizeInBits());
10448   if (DAG.MaskedValueIsZero(CmpOp, HighBits))
10449     return Op;
10450 
10451   // Clear the high bits of the compare operand.
10452   unsigned MaskVal = (1 << MemVT.getSizeInBits()) - 1;
10453   SDValue NewCmpOp =
10454     DAG.getNode(ISD::AND, dl, MVT::i32, CmpOp,
10455                 DAG.getConstant(MaskVal, dl, MVT::i32));
10456 
10457   // Replace the existing compare operand with the properly zero-extended one.
10458   SmallVector<SDValue, 4> Ops;
10459   for (int i = 0, e = AtomicNode->getNumOperands(); i < e; i++)
10460     Ops.push_back(AtomicNode->getOperand(i));
10461   Ops[2] = NewCmpOp;
10462   MachineMemOperand *MMO = AtomicNode->getMemOperand();
10463   SDVTList Tys = DAG.getVTList(MVT::i32, MVT::Other);
10464   auto NodeTy =
10465     (MemVT == MVT::i8) ? PPCISD::ATOMIC_CMP_SWAP_8 : PPCISD::ATOMIC_CMP_SWAP_16;
10466   return DAG.getMemIntrinsicNode(NodeTy, dl, Tys, Ops, MemVT, MMO);
10467 }
10468 
LowerSCALAR_TO_VECTOR(SDValue Op,SelectionDAG & DAG) const10469 SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op,
10470                                                  SelectionDAG &DAG) const {
10471   SDLoc dl(Op);
10472   // Create a stack slot that is 16-byte aligned.
10473   MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
10474   int FrameIdx = MFI.CreateStackObject(16, Align(16), false);
10475   EVT PtrVT = getPointerTy(DAG.getDataLayout());
10476   SDValue FIdx = DAG.getFrameIndex(FrameIdx, PtrVT);
10477 
10478   // Store the input value into Value#0 of the stack slot.
10479   SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0), FIdx,
10480                                MachinePointerInfo());
10481   // Load it out.
10482   return DAG.getLoad(Op.getValueType(), dl, Store, FIdx, MachinePointerInfo());
10483 }
10484 
LowerINSERT_VECTOR_ELT(SDValue Op,SelectionDAG & DAG) const10485 SDValue PPCTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
10486                                                   SelectionDAG &DAG) const {
10487   assert(Op.getOpcode() == ISD::INSERT_VECTOR_ELT &&
10488          "Should only be called for ISD::INSERT_VECTOR_ELT");
10489 
10490   ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(2));
10491 
10492   EVT VT = Op.getValueType();
10493   SDLoc dl(Op);
10494   SDValue V1 = Op.getOperand(0);
10495   SDValue V2 = Op.getOperand(1);
10496   SDValue V3 = Op.getOperand(2);
10497 
10498   if (VT == MVT::v2f64 && C)
10499     return Op;
10500 
10501   if (Subtarget.isISA3_1()) {
10502     if ((VT == MVT::v2i64 || VT == MVT::v2f64) && !Subtarget.isPPC64())
10503       return SDValue();
10504     // On P10, we have legal lowering for constant and variable indices for
10505     // integer vectors.
10506     if (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32 ||
10507         VT == MVT::v2i64)
10508       return DAG.getNode(PPCISD::VECINSERT, dl, VT, V1, V2, V3);
10509     // For f32 and f64 vectors, we have legal lowering for variable indices.
10510     // For f32 we also have legal lowering when the element is loaded from
10511     // memory.
10512     if (VT == MVT::v4f32 || VT == MVT::v2f64) {
10513       if (!C || (VT == MVT::v4f32 && dyn_cast<LoadSDNode>(V2)))
10514         return DAG.getNode(PPCISD::VECINSERT, dl, VT, V1, V2, V3);
10515       return Op;
10516     }
10517   }
10518 
10519   // Before P10, we have legal lowering for constant indices but not for
10520   // variable ones.
10521   if (!C)
10522     return SDValue();
10523 
10524   // We can use MTVSRZ + VECINSERT for v8i16 and v16i8 types.
10525   if (VT == MVT::v8i16 || VT == MVT::v16i8) {
10526     SDValue Mtvsrz = DAG.getNode(PPCISD::MTVSRZ, dl, VT, V2);
10527     unsigned BytesInEachElement = VT.getVectorElementType().getSizeInBits() / 8;
10528     unsigned InsertAtElement = C->getZExtValue();
10529     unsigned InsertAtByte = InsertAtElement * BytesInEachElement;
10530     if (Subtarget.isLittleEndian()) {
10531       InsertAtByte = (16 - BytesInEachElement) - InsertAtByte;
10532     }
10533     return DAG.getNode(PPCISD::VECINSERT, dl, VT, V1, Mtvsrz,
10534                        DAG.getConstant(InsertAtByte, dl, MVT::i32));
10535   }
10536   return Op;
10537 }
10538 
LowerVectorLoad(SDValue Op,SelectionDAG & DAG) const10539 SDValue PPCTargetLowering::LowerVectorLoad(SDValue Op,
10540                                            SelectionDAG &DAG) const {
10541   SDLoc dl(Op);
10542   LoadSDNode *LN = cast<LoadSDNode>(Op.getNode());
10543   SDValue LoadChain = LN->getChain();
10544   SDValue BasePtr = LN->getBasePtr();
10545   EVT VT = Op.getValueType();
10546 
10547   if (VT != MVT::v256i1 && VT != MVT::v512i1)
10548     return Op;
10549 
10550   // Type v256i1 is used for pairs and v512i1 is used for accumulators.
10551   // Here we create 2 or 4 v16i8 loads to load the pair or accumulator value in
10552   // 2 or 4 vsx registers.
10553   assert((VT != MVT::v512i1 || Subtarget.hasMMA()) &&
10554          "Type unsupported without MMA");
10555   assert((VT != MVT::v256i1 || Subtarget.pairedVectorMemops()) &&
10556          "Type unsupported without paired vector support");
10557   Align Alignment = LN->getAlign();
10558   SmallVector<SDValue, 4> Loads;
10559   SmallVector<SDValue, 4> LoadChains;
10560   unsigned NumVecs = VT.getSizeInBits() / 128;
10561   for (unsigned Idx = 0; Idx < NumVecs; ++Idx) {
10562     SDValue Load =
10563         DAG.getLoad(MVT::v16i8, dl, LoadChain, BasePtr,
10564                     LN->getPointerInfo().getWithOffset(Idx * 16),
10565                     commonAlignment(Alignment, Idx * 16),
10566                     LN->getMemOperand()->getFlags(), LN->getAAInfo());
10567     BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
10568                           DAG.getConstant(16, dl, BasePtr.getValueType()));
10569     Loads.push_back(Load);
10570     LoadChains.push_back(Load.getValue(1));
10571   }
10572   if (Subtarget.isLittleEndian()) {
10573     std::reverse(Loads.begin(), Loads.end());
10574     std::reverse(LoadChains.begin(), LoadChains.end());
10575   }
10576   SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, LoadChains);
10577   SDValue Value =
10578       DAG.getNode(VT == MVT::v512i1 ? PPCISD::ACC_BUILD : PPCISD::PAIR_BUILD,
10579                   dl, VT, Loads);
10580   SDValue RetOps[] = {Value, TF};
10581   return DAG.getMergeValues(RetOps, dl);
10582 }
10583 
LowerVectorStore(SDValue Op,SelectionDAG & DAG) const10584 SDValue PPCTargetLowering::LowerVectorStore(SDValue Op,
10585                                             SelectionDAG &DAG) const {
10586   SDLoc dl(Op);
10587   StoreSDNode *SN = cast<StoreSDNode>(Op.getNode());
10588   SDValue StoreChain = SN->getChain();
10589   SDValue BasePtr = SN->getBasePtr();
10590   SDValue Value = SN->getValue();
10591   EVT StoreVT = Value.getValueType();
10592 
10593   if (StoreVT != MVT::v256i1 && StoreVT != MVT::v512i1)
10594     return Op;
10595 
10596   // Type v256i1 is used for pairs and v512i1 is used for accumulators.
10597   // Here we create 2 or 4 v16i8 stores to store the pair or accumulator
10598   // underlying registers individually.
10599   assert((StoreVT != MVT::v512i1 || Subtarget.hasMMA()) &&
10600          "Type unsupported without MMA");
10601   assert((StoreVT != MVT::v256i1 || Subtarget.pairedVectorMemops()) &&
10602          "Type unsupported without paired vector support");
10603   Align Alignment = SN->getAlign();
10604   SmallVector<SDValue, 4> Stores;
10605   unsigned NumVecs = 2;
10606   if (StoreVT == MVT::v512i1) {
10607     Value = DAG.getNode(PPCISD::XXMFACC, dl, MVT::v512i1, Value);
10608     NumVecs = 4;
10609   }
10610   for (unsigned Idx = 0; Idx < NumVecs; ++Idx) {
10611     unsigned VecNum = Subtarget.isLittleEndian() ? NumVecs - 1 - Idx : Idx;
10612     SDValue Elt = DAG.getNode(PPCISD::EXTRACT_VSX_REG, dl, MVT::v16i8, Value,
10613                               DAG.getConstant(VecNum, dl, getPointerTy(DAG.getDataLayout())));
10614     SDValue Store =
10615         DAG.getStore(StoreChain, dl, Elt, BasePtr,
10616                      SN->getPointerInfo().getWithOffset(Idx * 16),
10617                      commonAlignment(Alignment, Idx * 16),
10618                      SN->getMemOperand()->getFlags(), SN->getAAInfo());
10619     BasePtr = DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
10620                           DAG.getConstant(16, dl, BasePtr.getValueType()));
10621     Stores.push_back(Store);
10622   }
10623   SDValue TF = DAG.getTokenFactor(dl, Stores);
10624   return TF;
10625 }
10626 
LowerMUL(SDValue Op,SelectionDAG & DAG) const10627 SDValue PPCTargetLowering::LowerMUL(SDValue Op, SelectionDAG &DAG) const {
10628   SDLoc dl(Op);
10629   if (Op.getValueType() == MVT::v4i32) {
10630     SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
10631 
10632     SDValue Zero = getCanonicalConstSplat(0, 1, MVT::v4i32, DAG, dl);
10633     // +16 as shift amt.
10634     SDValue Neg16 = getCanonicalConstSplat(-16, 4, MVT::v4i32, DAG, dl);
10635     SDValue RHSSwap =   // = vrlw RHS, 16
10636       BuildIntrinsicOp(Intrinsic::ppc_altivec_vrlw, RHS, Neg16, DAG, dl);
10637 
10638     // Shrinkify inputs to v8i16.
10639     LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, LHS);
10640     RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHS);
10641     RHSSwap = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, RHSSwap);
10642 
10643     // Low parts multiplied together, generating 32-bit results (we ignore the
10644     // top parts).
10645     SDValue LoProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmulouh,
10646                                         LHS, RHS, DAG, dl, MVT::v4i32);
10647 
10648     SDValue HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmsumuhm,
10649                                       LHS, RHSSwap, Zero, DAG, dl, MVT::v4i32);
10650     // Shift the high parts up 16 bits.
10651     HiProd = BuildIntrinsicOp(Intrinsic::ppc_altivec_vslw, HiProd,
10652                               Neg16, DAG, dl);
10653     return DAG.getNode(ISD::ADD, dl, MVT::v4i32, LoProd, HiProd);
10654   } else if (Op.getValueType() == MVT::v16i8) {
10655     SDValue LHS = Op.getOperand(0), RHS = Op.getOperand(1);
10656     bool isLittleEndian = Subtarget.isLittleEndian();
10657 
10658     // Multiply the even 8-bit parts, producing 16-bit sums.
10659     SDValue EvenParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuleub,
10660                                            LHS, RHS, DAG, dl, MVT::v8i16);
10661     EvenParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, EvenParts);
10662 
10663     // Multiply the odd 8-bit parts, producing 16-bit sums.
10664     SDValue OddParts = BuildIntrinsicOp(Intrinsic::ppc_altivec_vmuloub,
10665                                           LHS, RHS, DAG, dl, MVT::v8i16);
10666     OddParts = DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, OddParts);
10667 
10668     // Merge the results together.  Because vmuleub and vmuloub are
10669     // instructions with a big-endian bias, we must reverse the
10670     // element numbering and reverse the meaning of "odd" and "even"
10671     // when generating little endian code.
10672     int Ops[16];
10673     for (unsigned i = 0; i != 8; ++i) {
10674       if (isLittleEndian) {
10675         Ops[i*2  ] = 2*i;
10676         Ops[i*2+1] = 2*i+16;
10677       } else {
10678         Ops[i*2  ] = 2*i+1;
10679         Ops[i*2+1] = 2*i+1+16;
10680       }
10681     }
10682     if (isLittleEndian)
10683       return DAG.getVectorShuffle(MVT::v16i8, dl, OddParts, EvenParts, Ops);
10684     else
10685       return DAG.getVectorShuffle(MVT::v16i8, dl, EvenParts, OddParts, Ops);
10686   } else {
10687     llvm_unreachable("Unknown mul to lower!");
10688   }
10689 }
10690 
LowerFP_ROUND(SDValue Op,SelectionDAG & DAG) const10691 SDValue PPCTargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
10692   bool IsStrict = Op->isStrictFPOpcode();
10693   if (Op.getOperand(IsStrict ? 1 : 0).getValueType() == MVT::f128 &&
10694       !Subtarget.hasP9Vector())
10695     return SDValue();
10696 
10697   return Op;
10698 }
10699 
10700 // Custom lowering for fpext vf32 to v2f64
LowerFP_EXTEND(SDValue Op,SelectionDAG & DAG) const10701 SDValue PPCTargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const {
10702 
10703   assert(Op.getOpcode() == ISD::FP_EXTEND &&
10704          "Should only be called for ISD::FP_EXTEND");
10705 
10706   // FIXME: handle extends from half precision float vectors on P9.
10707   // We only want to custom lower an extend from v2f32 to v2f64.
10708   if (Op.getValueType() != MVT::v2f64 ||
10709       Op.getOperand(0).getValueType() != MVT::v2f32)
10710     return SDValue();
10711 
10712   SDLoc dl(Op);
10713   SDValue Op0 = Op.getOperand(0);
10714 
10715   switch (Op0.getOpcode()) {
10716   default:
10717     return SDValue();
10718   case ISD::EXTRACT_SUBVECTOR: {
10719     assert(Op0.getNumOperands() == 2 &&
10720            isa<ConstantSDNode>(Op0->getOperand(1)) &&
10721            "Node should have 2 operands with second one being a constant!");
10722 
10723     if (Op0.getOperand(0).getValueType() != MVT::v4f32)
10724       return SDValue();
10725 
10726     // Custom lower is only done for high or low doubleword.
10727     int Idx = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
10728     if (Idx % 2 != 0)
10729       return SDValue();
10730 
10731     // Since input is v4f32, at this point Idx is either 0 or 2.
10732     // Shift to get the doubleword position we want.
10733     int DWord = Idx >> 1;
10734 
10735     // High and low word positions are different on little endian.
10736     if (Subtarget.isLittleEndian())
10737       DWord ^= 0x1;
10738 
10739     return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64,
10740                        Op0.getOperand(0), DAG.getConstant(DWord, dl, MVT::i32));
10741   }
10742   case ISD::FADD:
10743   case ISD::FMUL:
10744   case ISD::FSUB: {
10745     SDValue NewLoad[2];
10746     for (unsigned i = 0, ie = Op0.getNumOperands(); i != ie; ++i) {
10747       // Ensure both input are loads.
10748       SDValue LdOp = Op0.getOperand(i);
10749       if (LdOp.getOpcode() != ISD::LOAD)
10750         return SDValue();
10751       // Generate new load node.
10752       LoadSDNode *LD = cast<LoadSDNode>(LdOp);
10753       SDValue LoadOps[] = {LD->getChain(), LD->getBasePtr()};
10754       NewLoad[i] = DAG.getMemIntrinsicNode(
10755           PPCISD::LD_VSX_LH, dl, DAG.getVTList(MVT::v4f32, MVT::Other), LoadOps,
10756           LD->getMemoryVT(), LD->getMemOperand());
10757     }
10758     SDValue NewOp =
10759         DAG.getNode(Op0.getOpcode(), SDLoc(Op0), MVT::v4f32, NewLoad[0],
10760                     NewLoad[1], Op0.getNode()->getFlags());
10761     return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64, NewOp,
10762                        DAG.getConstant(0, dl, MVT::i32));
10763   }
10764   case ISD::LOAD: {
10765     LoadSDNode *LD = cast<LoadSDNode>(Op0);
10766     SDValue LoadOps[] = {LD->getChain(), LD->getBasePtr()};
10767     SDValue NewLd = DAG.getMemIntrinsicNode(
10768         PPCISD::LD_VSX_LH, dl, DAG.getVTList(MVT::v4f32, MVT::Other), LoadOps,
10769         LD->getMemoryVT(), LD->getMemOperand());
10770     return DAG.getNode(PPCISD::FP_EXTEND_HALF, dl, MVT::v2f64, NewLd,
10771                        DAG.getConstant(0, dl, MVT::i32));
10772   }
10773   }
10774   llvm_unreachable("ERROR:Should return for all cases within swtich.");
10775 }
10776 
10777 /// LowerOperation - Provide custom lowering hooks for some operations.
10778 ///
LowerOperation(SDValue Op,SelectionDAG & DAG) const10779 SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
10780   switch (Op.getOpcode()) {
10781   default: llvm_unreachable("Wasn't expecting to be able to lower this!");
10782   case ISD::ConstantPool:       return LowerConstantPool(Op, DAG);
10783   case ISD::BlockAddress:       return LowerBlockAddress(Op, DAG);
10784   case ISD::GlobalAddress:      return LowerGlobalAddress(Op, DAG);
10785   case ISD::GlobalTLSAddress:   return LowerGlobalTLSAddress(Op, DAG);
10786   case ISD::JumpTable:          return LowerJumpTable(Op, DAG);
10787   case ISD::STRICT_FSETCC:
10788   case ISD::STRICT_FSETCCS:
10789   case ISD::SETCC:              return LowerSETCC(Op, DAG);
10790   case ISD::INIT_TRAMPOLINE:    return LowerINIT_TRAMPOLINE(Op, DAG);
10791   case ISD::ADJUST_TRAMPOLINE:  return LowerADJUST_TRAMPOLINE(Op, DAG);
10792 
10793   case ISD::INLINEASM:
10794   case ISD::INLINEASM_BR:       return LowerINLINEASM(Op, DAG);
10795   // Variable argument lowering.
10796   case ISD::VASTART:            return LowerVASTART(Op, DAG);
10797   case ISD::VAARG:              return LowerVAARG(Op, DAG);
10798   case ISD::VACOPY:             return LowerVACOPY(Op, DAG);
10799 
10800   case ISD::STACKRESTORE:       return LowerSTACKRESTORE(Op, DAG);
10801   case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
10802   case ISD::GET_DYNAMIC_AREA_OFFSET:
10803     return LowerGET_DYNAMIC_AREA_OFFSET(Op, DAG);
10804 
10805   // Exception handling lowering.
10806   case ISD::EH_DWARF_CFA:       return LowerEH_DWARF_CFA(Op, DAG);
10807   case ISD::EH_SJLJ_SETJMP:     return lowerEH_SJLJ_SETJMP(Op, DAG);
10808   case ISD::EH_SJLJ_LONGJMP:    return lowerEH_SJLJ_LONGJMP(Op, DAG);
10809 
10810   case ISD::LOAD:               return LowerLOAD(Op, DAG);
10811   case ISD::STORE:              return LowerSTORE(Op, DAG);
10812   case ISD::TRUNCATE:           return LowerTRUNCATE(Op, DAG);
10813   case ISD::SELECT_CC:          return LowerSELECT_CC(Op, DAG);
10814   case ISD::STRICT_FP_TO_UINT:
10815   case ISD::STRICT_FP_TO_SINT:
10816   case ISD::FP_TO_UINT:
10817   case ISD::FP_TO_SINT:         return LowerFP_TO_INT(Op, DAG, SDLoc(Op));
10818   case ISD::STRICT_UINT_TO_FP:
10819   case ISD::STRICT_SINT_TO_FP:
10820   case ISD::UINT_TO_FP:
10821   case ISD::SINT_TO_FP:         return LowerINT_TO_FP(Op, DAG);
10822   case ISD::FLT_ROUNDS_:        return LowerFLT_ROUNDS_(Op, DAG);
10823 
10824   // Lower 64-bit shifts.
10825   case ISD::SHL_PARTS:          return LowerSHL_PARTS(Op, DAG);
10826   case ISD::SRL_PARTS:          return LowerSRL_PARTS(Op, DAG);
10827   case ISD::SRA_PARTS:          return LowerSRA_PARTS(Op, DAG);
10828 
10829   case ISD::FSHL:               return LowerFunnelShift(Op, DAG);
10830   case ISD::FSHR:               return LowerFunnelShift(Op, DAG);
10831 
10832   // Vector-related lowering.
10833   case ISD::BUILD_VECTOR:       return LowerBUILD_VECTOR(Op, DAG);
10834   case ISD::VECTOR_SHUFFLE:     return LowerVECTOR_SHUFFLE(Op, DAG);
10835   case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
10836   case ISD::SCALAR_TO_VECTOR:   return LowerSCALAR_TO_VECTOR(Op, DAG);
10837   case ISD::INSERT_VECTOR_ELT:  return LowerINSERT_VECTOR_ELT(Op, DAG);
10838   case ISD::MUL:                return LowerMUL(Op, DAG);
10839   case ISD::FP_EXTEND:          return LowerFP_EXTEND(Op, DAG);
10840   case ISD::STRICT_FP_ROUND:
10841   case ISD::FP_ROUND:
10842     return LowerFP_ROUND(Op, DAG);
10843   case ISD::ROTL:               return LowerROTL(Op, DAG);
10844 
10845   // For counter-based loop handling.
10846   case ISD::INTRINSIC_W_CHAIN:  return SDValue();
10847 
10848   case ISD::BITCAST:            return LowerBITCAST(Op, DAG);
10849 
10850   // Frame & Return address.
10851   case ISD::RETURNADDR:         return LowerRETURNADDR(Op, DAG);
10852   case ISD::FRAMEADDR:          return LowerFRAMEADDR(Op, DAG);
10853 
10854   case ISD::INTRINSIC_VOID:
10855     return LowerINTRINSIC_VOID(Op, DAG);
10856   case ISD::BSWAP:
10857     return LowerBSWAP(Op, DAG);
10858   case ISD::ATOMIC_CMP_SWAP:
10859     return LowerATOMIC_CMP_SWAP(Op, DAG);
10860   }
10861 }
10862 
ReplaceNodeResults(SDNode * N,SmallVectorImpl<SDValue> & Results,SelectionDAG & DAG) const10863 void PPCTargetLowering::ReplaceNodeResults(SDNode *N,
10864                                            SmallVectorImpl<SDValue>&Results,
10865                                            SelectionDAG &DAG) const {
10866   SDLoc dl(N);
10867   switch (N->getOpcode()) {
10868   default:
10869     llvm_unreachable("Do not know how to custom type legalize this operation!");
10870   case ISD::READCYCLECOUNTER: {
10871     SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
10872     SDValue RTB = DAG.getNode(PPCISD::READ_TIME_BASE, dl, VTs, N->getOperand(0));
10873 
10874     Results.push_back(
10875         DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, RTB, RTB.getValue(1)));
10876     Results.push_back(RTB.getValue(2));
10877     break;
10878   }
10879   case ISD::INTRINSIC_W_CHAIN: {
10880     if (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue() !=
10881         Intrinsic::loop_decrement)
10882       break;
10883 
10884     assert(N->getValueType(0) == MVT::i1 &&
10885            "Unexpected result type for CTR decrement intrinsic");
10886     EVT SVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
10887                                  N->getValueType(0));
10888     SDVTList VTs = DAG.getVTList(SVT, MVT::Other);
10889     SDValue NewInt = DAG.getNode(N->getOpcode(), dl, VTs, N->getOperand(0),
10890                                  N->getOperand(1));
10891 
10892     Results.push_back(DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewInt));
10893     Results.push_back(NewInt.getValue(1));
10894     break;
10895   }
10896   case ISD::VAARG: {
10897     if (!Subtarget.isSVR4ABI() || Subtarget.isPPC64())
10898       return;
10899 
10900     EVT VT = N->getValueType(0);
10901 
10902     if (VT == MVT::i64) {
10903       SDValue NewNode = LowerVAARG(SDValue(N, 1), DAG);
10904 
10905       Results.push_back(NewNode);
10906       Results.push_back(NewNode.getValue(1));
10907     }
10908     return;
10909   }
10910   case ISD::STRICT_FP_TO_SINT:
10911   case ISD::STRICT_FP_TO_UINT:
10912   case ISD::FP_TO_SINT:
10913   case ISD::FP_TO_UINT:
10914     // LowerFP_TO_INT() can only handle f32 and f64.
10915     if (N->getOperand(N->isStrictFPOpcode() ? 1 : 0).getValueType() ==
10916         MVT::ppcf128)
10917       return;
10918     Results.push_back(LowerFP_TO_INT(SDValue(N, 0), DAG, dl));
10919     return;
10920   case ISD::TRUNCATE: {
10921     if (!N->getValueType(0).isVector())
10922       return;
10923     SDValue Lowered = LowerTRUNCATEVector(SDValue(N, 0), DAG);
10924     if (Lowered)
10925       Results.push_back(Lowered);
10926     return;
10927   }
10928   case ISD::FSHL:
10929   case ISD::FSHR:
10930     // Don't handle funnel shifts here.
10931     return;
10932   case ISD::BITCAST:
10933     // Don't handle bitcast here.
10934     return;
10935   case ISD::FP_EXTEND:
10936     SDValue Lowered = LowerFP_EXTEND(SDValue(N, 0), DAG);
10937     if (Lowered)
10938       Results.push_back(Lowered);
10939     return;
10940   }
10941 }
10942 
10943 //===----------------------------------------------------------------------===//
10944 //  Other Lowering Code
10945 //===----------------------------------------------------------------------===//
10946 
callIntrinsic(IRBuilder<> & Builder,Intrinsic::ID Id)10947 static Instruction* callIntrinsic(IRBuilder<> &Builder, Intrinsic::ID Id) {
10948   Module *M = Builder.GetInsertBlock()->getParent()->getParent();
10949   Function *Func = Intrinsic::getDeclaration(M, Id);
10950   return Builder.CreateCall(Func, {});
10951 }
10952 
10953 // The mappings for emitLeading/TrailingFence is taken from
10954 // http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
emitLeadingFence(IRBuilder<> & Builder,Instruction * Inst,AtomicOrdering Ord) const10955 Instruction *PPCTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
10956                                                  Instruction *Inst,
10957                                                  AtomicOrdering Ord) const {
10958   if (Ord == AtomicOrdering::SequentiallyConsistent)
10959     return callIntrinsic(Builder, Intrinsic::ppc_sync);
10960   if (isReleaseOrStronger(Ord))
10961     return callIntrinsic(Builder, Intrinsic::ppc_lwsync);
10962   return nullptr;
10963 }
10964 
emitTrailingFence(IRBuilder<> & Builder,Instruction * Inst,AtomicOrdering Ord) const10965 Instruction *PPCTargetLowering::emitTrailingFence(IRBuilder<> &Builder,
10966                                                   Instruction *Inst,
10967                                                   AtomicOrdering Ord) const {
10968   if (Inst->hasAtomicLoad() && isAcquireOrStronger(Ord)) {
10969     // See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html and
10970     // http://www.rdrop.com/users/paulmck/scalability/paper/N2745r.2011.03.04a.html
10971     // and http://www.cl.cam.ac.uk/~pes20/cppppc/ for justification.
10972     if (isa<LoadInst>(Inst) && Subtarget.isPPC64())
10973       return Builder.CreateCall(
10974           Intrinsic::getDeclaration(
10975               Builder.GetInsertBlock()->getParent()->getParent(),
10976               Intrinsic::ppc_cfence, {Inst->getType()}),
10977           {Inst});
10978     // FIXME: Can use isync for rmw operation.
10979     return callIntrinsic(Builder, Intrinsic::ppc_lwsync);
10980   }
10981   return nullptr;
10982 }
10983 
10984 MachineBasicBlock *
EmitAtomicBinary(MachineInstr & MI,MachineBasicBlock * BB,unsigned AtomicSize,unsigned BinOpcode,unsigned CmpOpcode,unsigned CmpPred) const10985 PPCTargetLowering::EmitAtomicBinary(MachineInstr &MI, MachineBasicBlock *BB,
10986                                     unsigned AtomicSize,
10987                                     unsigned BinOpcode,
10988                                     unsigned CmpOpcode,
10989                                     unsigned CmpPred) const {
10990   // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
10991   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
10992 
10993   auto LoadMnemonic = PPC::LDARX;
10994   auto StoreMnemonic = PPC::STDCX;
10995   switch (AtomicSize) {
10996   default:
10997     llvm_unreachable("Unexpected size of atomic entity");
10998   case 1:
10999     LoadMnemonic = PPC::LBARX;
11000     StoreMnemonic = PPC::STBCX;
11001     assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4");
11002     break;
11003   case 2:
11004     LoadMnemonic = PPC::LHARX;
11005     StoreMnemonic = PPC::STHCX;
11006     assert(Subtarget.hasPartwordAtomics() && "Call this only with size >=4");
11007     break;
11008   case 4:
11009     LoadMnemonic = PPC::LWARX;
11010     StoreMnemonic = PPC::STWCX;
11011     break;
11012   case 8:
11013     LoadMnemonic = PPC::LDARX;
11014     StoreMnemonic = PPC::STDCX;
11015     break;
11016   }
11017 
11018   const BasicBlock *LLVM_BB = BB->getBasicBlock();
11019   MachineFunction *F = BB->getParent();
11020   MachineFunction::iterator It = ++BB->getIterator();
11021 
11022   Register dest = MI.getOperand(0).getReg();
11023   Register ptrA = MI.getOperand(1).getReg();
11024   Register ptrB = MI.getOperand(2).getReg();
11025   Register incr = MI.getOperand(3).getReg();
11026   DebugLoc dl = MI.getDebugLoc();
11027 
11028   MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
11029   MachineBasicBlock *loop2MBB =
11030     CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr;
11031   MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
11032   F->insert(It, loopMBB);
11033   if (CmpOpcode)
11034     F->insert(It, loop2MBB);
11035   F->insert(It, exitMBB);
11036   exitMBB->splice(exitMBB->begin(), BB,
11037                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
11038   exitMBB->transferSuccessorsAndUpdatePHIs(BB);
11039 
11040   MachineRegisterInfo &RegInfo = F->getRegInfo();
11041   Register TmpReg = (!BinOpcode) ? incr :
11042     RegInfo.createVirtualRegister( AtomicSize == 8 ? &PPC::G8RCRegClass
11043                                            : &PPC::GPRCRegClass);
11044 
11045   //  thisMBB:
11046   //   ...
11047   //   fallthrough --> loopMBB
11048   BB->addSuccessor(loopMBB);
11049 
11050   //  loopMBB:
11051   //   l[wd]arx dest, ptr
11052   //   add r0, dest, incr
11053   //   st[wd]cx. r0, ptr
11054   //   bne- loopMBB
11055   //   fallthrough --> exitMBB
11056 
11057   // For max/min...
11058   //  loopMBB:
11059   //   l[wd]arx dest, ptr
11060   //   cmpl?[wd] incr, dest
11061   //   bgt exitMBB
11062   //  loop2MBB:
11063   //   st[wd]cx. dest, ptr
11064   //   bne- loopMBB
11065   //   fallthrough --> exitMBB
11066 
11067   BB = loopMBB;
11068   BuildMI(BB, dl, TII->get(LoadMnemonic), dest)
11069     .addReg(ptrA).addReg(ptrB);
11070   if (BinOpcode)
11071     BuildMI(BB, dl, TII->get(BinOpcode), TmpReg).addReg(incr).addReg(dest);
11072   if (CmpOpcode) {
11073     // Signed comparisons of byte or halfword values must be sign-extended.
11074     if (CmpOpcode == PPC::CMPW && AtomicSize < 4) {
11075       Register ExtReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
11076       BuildMI(BB, dl, TII->get(AtomicSize == 1 ? PPC::EXTSB : PPC::EXTSH),
11077               ExtReg).addReg(dest);
11078       BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
11079         .addReg(incr).addReg(ExtReg);
11080     } else
11081       BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
11082         .addReg(incr).addReg(dest);
11083 
11084     BuildMI(BB, dl, TII->get(PPC::BCC))
11085       .addImm(CmpPred).addReg(PPC::CR0).addMBB(exitMBB);
11086     BB->addSuccessor(loop2MBB);
11087     BB->addSuccessor(exitMBB);
11088     BB = loop2MBB;
11089   }
11090   BuildMI(BB, dl, TII->get(StoreMnemonic))
11091     .addReg(TmpReg).addReg(ptrA).addReg(ptrB);
11092   BuildMI(BB, dl, TII->get(PPC::BCC))
11093     .addImm(PPC::PRED_NE).addReg(PPC::CR0).addMBB(loopMBB);
11094   BB->addSuccessor(loopMBB);
11095   BB->addSuccessor(exitMBB);
11096 
11097   //  exitMBB:
11098   //   ...
11099   BB = exitMBB;
11100   return BB;
11101 }
11102 
isSignExtended(MachineInstr & MI,const PPCInstrInfo * TII)11103 static bool isSignExtended(MachineInstr &MI, const PPCInstrInfo *TII) {
11104   switch(MI.getOpcode()) {
11105   default:
11106     return false;
11107   case PPC::COPY:
11108     return TII->isSignExtended(MI);
11109   case PPC::LHA:
11110   case PPC::LHA8:
11111   case PPC::LHAU:
11112   case PPC::LHAU8:
11113   case PPC::LHAUX:
11114   case PPC::LHAUX8:
11115   case PPC::LHAX:
11116   case PPC::LHAX8:
11117   case PPC::LWA:
11118   case PPC::LWAUX:
11119   case PPC::LWAX:
11120   case PPC::LWAX_32:
11121   case PPC::LWA_32:
11122   case PPC::PLHA:
11123   case PPC::PLHA8:
11124   case PPC::PLHA8pc:
11125   case PPC::PLHApc:
11126   case PPC::PLWA:
11127   case PPC::PLWA8:
11128   case PPC::PLWA8pc:
11129   case PPC::PLWApc:
11130   case PPC::EXTSB:
11131   case PPC::EXTSB8:
11132   case PPC::EXTSB8_32_64:
11133   case PPC::EXTSB8_rec:
11134   case PPC::EXTSB_rec:
11135   case PPC::EXTSH:
11136   case PPC::EXTSH8:
11137   case PPC::EXTSH8_32_64:
11138   case PPC::EXTSH8_rec:
11139   case PPC::EXTSH_rec:
11140   case PPC::EXTSW:
11141   case PPC::EXTSWSLI:
11142   case PPC::EXTSWSLI_32_64:
11143   case PPC::EXTSWSLI_32_64_rec:
11144   case PPC::EXTSWSLI_rec:
11145   case PPC::EXTSW_32:
11146   case PPC::EXTSW_32_64:
11147   case PPC::EXTSW_32_64_rec:
11148   case PPC::EXTSW_rec:
11149   case PPC::SRAW:
11150   case PPC::SRAWI:
11151   case PPC::SRAWI_rec:
11152   case PPC::SRAW_rec:
11153     return true;
11154   }
11155   return false;
11156 }
11157 
EmitPartwordAtomicBinary(MachineInstr & MI,MachineBasicBlock * BB,bool is8bit,unsigned BinOpcode,unsigned CmpOpcode,unsigned CmpPred) const11158 MachineBasicBlock *PPCTargetLowering::EmitPartwordAtomicBinary(
11159     MachineInstr &MI, MachineBasicBlock *BB,
11160     bool is8bit, // operation
11161     unsigned BinOpcode, unsigned CmpOpcode, unsigned CmpPred) const {
11162   // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
11163   const PPCInstrInfo *TII = Subtarget.getInstrInfo();
11164 
11165   // If this is a signed comparison and the value being compared is not known
11166   // to be sign extended, sign extend it here.
11167   DebugLoc dl = MI.getDebugLoc();
11168   MachineFunction *F = BB->getParent();
11169   MachineRegisterInfo &RegInfo = F->getRegInfo();
11170   Register incr = MI.getOperand(3).getReg();
11171   bool IsSignExtended = Register::isVirtualRegister(incr) &&
11172     isSignExtended(*RegInfo.getVRegDef(incr), TII);
11173 
11174   if (CmpOpcode == PPC::CMPW && !IsSignExtended) {
11175     Register ValueReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
11176     BuildMI(*BB, MI, dl, TII->get(is8bit ? PPC::EXTSB : PPC::EXTSH), ValueReg)
11177         .addReg(MI.getOperand(3).getReg());
11178     MI.getOperand(3).setReg(ValueReg);
11179   }
11180   // If we support part-word atomic mnemonics, just use them
11181   if (Subtarget.hasPartwordAtomics())
11182     return EmitAtomicBinary(MI, BB, is8bit ? 1 : 2, BinOpcode, CmpOpcode,
11183                             CmpPred);
11184 
11185   // In 64 bit mode we have to use 64 bits for addresses, even though the
11186   // lwarx/stwcx are 32 bits.  With the 32-bit atomics we can use address
11187   // registers without caring whether they're 32 or 64, but here we're
11188   // doing actual arithmetic on the addresses.
11189   bool is64bit = Subtarget.isPPC64();
11190   bool isLittleEndian = Subtarget.isLittleEndian();
11191   unsigned ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
11192 
11193   const BasicBlock *LLVM_BB = BB->getBasicBlock();
11194   MachineFunction::iterator It = ++BB->getIterator();
11195 
11196   Register dest = MI.getOperand(0).getReg();
11197   Register ptrA = MI.getOperand(1).getReg();
11198   Register ptrB = MI.getOperand(2).getReg();
11199 
11200   MachineBasicBlock *loopMBB = F->CreateMachineBasicBlock(LLVM_BB);
11201   MachineBasicBlock *loop2MBB =
11202       CmpOpcode ? F->CreateMachineBasicBlock(LLVM_BB) : nullptr;
11203   MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
11204   F->insert(It, loopMBB);
11205   if (CmpOpcode)
11206     F->insert(It, loop2MBB);
11207   F->insert(It, exitMBB);
11208   exitMBB->splice(exitMBB->begin(), BB,
11209                   std::next(MachineBasicBlock::iterator(MI)), BB->end());
11210   exitMBB->transferSuccessorsAndUpdatePHIs(BB);
11211 
11212   const TargetRegisterClass *RC =
11213       is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
11214   const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
11215 
11216   Register PtrReg = RegInfo.createVirtualRegister(RC);
11217   Register Shift1Reg = RegInfo.createVirtualRegister(GPRC);
11218   Register ShiftReg =
11219       isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC);
11220   Register Incr2Reg = RegInfo.createVirtualRegister(GPRC);
11221   Register MaskReg = RegInfo.createVirtualRegister(GPRC);
11222   Register Mask2Reg = RegInfo.createVirtualRegister(GPRC);
11223   Register Mask3Reg = RegInfo.createVirtualRegister(GPRC);
11224   Register Tmp2Reg = RegInfo.createVirtualRegister(GPRC);
11225   Register Tmp3Reg = RegInfo.createVirtualRegister(GPRC);
11226   Register Tmp4Reg = RegInfo.createVirtualRegister(GPRC);
11227   Register TmpDestReg = RegInfo.createVirtualRegister(GPRC);
11228   Register SrwDestReg = RegInfo.createVirtualRegister(GPRC);
11229   Register Ptr1Reg;
11230   Register TmpReg =
11231       (!BinOpcode) ? Incr2Reg : RegInfo.createVirtualRegister(GPRC);
11232 
11233   //  thisMBB:
11234   //   ...
11235   //   fallthrough --> loopMBB
11236   BB->addSuccessor(loopMBB);
11237 
11238   // The 4-byte load must be aligned, while a char or short may be
11239   // anywhere in the word.  Hence all this nasty bookkeeping code.
11240   //   add ptr1, ptrA, ptrB [copy if ptrA==0]
11241   //   rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27]
11242   //   xori shift, shift1, 24 [16]
11243   //   rlwinm ptr, ptr1, 0, 0, 29
11244   //   slw incr2, incr, shift
11245   //   li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535]
11246   //   slw mask, mask2, shift
11247   //  loopMBB:
11248   //   lwarx tmpDest, ptr
11249   //   add tmp, tmpDest, incr2
11250   //   andc tmp2, tmpDest, mask
11251   //   and tmp3, tmp, mask
11252   //   or tmp4, tmp3, tmp2
11253   //   stwcx. tmp4, ptr
11254   //   bne- loopMBB
11255   //   fallthrough --> exitMBB
11256   //   srw SrwDest, tmpDest, shift
11257   //   rlwinm SrwDest, SrwDest, 0, 24 [16], 31
11258   if (ptrA != ZeroReg) {
11259     Ptr1Reg = RegInfo.createVirtualRegister(RC);
11260     BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
11261         .addReg(ptrA)
11262         .addReg(ptrB);
11263   } else {
11264     Ptr1Reg = ptrB;
11265   }
11266   // We need use 32-bit subregister to avoid mismatch register class in 64-bit
11267   // mode.
11268   BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg)
11269       .addReg(Ptr1Reg, 0, is64bit ? PPC::sub_32 : 0)
11270       .addImm(3)
11271       .addImm(27)
11272       .addImm(is8bit ? 28 : 27);
11273   if (!isLittleEndian)
11274     BuildMI(BB, dl, TII->get(PPC::XORI), ShiftReg)
11275         .addReg(Shift1Reg)
11276         .addImm(is8bit ? 24 : 16);
11277   if (is64bit)
11278     BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg)
11279         .addReg(Ptr1Reg)
11280         .addImm(0)
11281         .addImm(61);
11282   else
11283     BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg)
11284         .addReg(Ptr1Reg)
11285         .addImm(0)
11286         .addImm(0)
11287         .addImm(29);
11288   BuildMI(BB, dl, TII->get(PPC::SLW), Incr2Reg).addReg(incr).addReg(ShiftReg);
11289   if (is8bit)
11290     BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255);
11291   else {
11292     BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0);
11293     BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg)
11294         .addReg(Mask3Reg)
11295         .addImm(65535);
11296   }
11297   BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg)
11298       .addReg(Mask2Reg)
11299       .addReg(ShiftReg);
11300 
11301   BB = loopMBB;
11302   BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg)
11303       .addReg(ZeroReg)
11304       .addReg(PtrReg);
11305   if (BinOpcode)
11306     BuildMI(BB, dl, TII->get(BinOpcode), TmpReg)
11307         .addReg(Incr2Reg)
11308         .addReg(TmpDestReg);
11309   BuildMI(BB, dl, TII->get(PPC::ANDC), Tmp2Reg)
11310       .addReg(TmpDestReg)
11311       .addReg(MaskReg);
11312   BuildMI(BB, dl, TII->get(PPC::AND), Tmp3Reg).addReg(TmpReg).addReg(MaskReg);
11313   if (CmpOpcode) {
11314     // For unsigned comparisons, we can directly compare the shifted values.
11315     // For signed comparisons we shift and sign extend.
11316     Register SReg = RegInfo.createVirtualRegister(GPRC);
11317     BuildMI(BB, dl, TII->get(PPC::AND), SReg)
11318         .addReg(TmpDestReg)
11319         .addReg(MaskReg);
11320     unsigned ValueReg = SReg;
11321     unsigned CmpReg = Incr2Reg;
11322     if (CmpOpcode == PPC::CMPW) {
11323       ValueReg = RegInfo.createVirtualRegister(GPRC);
11324       BuildMI(BB, dl, TII->get(PPC::SRW), ValueReg)
11325           .addReg(SReg)
11326           .addReg(ShiftReg);
11327       Register ValueSReg = RegInfo.createVirtualRegister(GPRC);
11328       BuildMI(BB, dl, TII->get(is8bit ? PPC::EXTSB : PPC::EXTSH), ValueSReg)
11329           .addReg(ValueReg);
11330       ValueReg = ValueSReg;
11331       CmpReg = incr;
11332     }
11333     BuildMI(BB, dl, TII->get(CmpOpcode), PPC::CR0)
11334         .addReg(CmpReg)
11335         .addReg(ValueReg);
11336     BuildMI(BB, dl, TII->get(PPC::BCC))
11337         .addImm(CmpPred)
11338         .addReg(PPC::CR0)
11339         .addMBB(exitMBB);
11340     BB->addSuccessor(loop2MBB);
11341     BB->addSuccessor(exitMBB);
11342     BB = loop2MBB;
11343   }
11344   BuildMI(BB, dl, TII->get(PPC::OR), Tmp4Reg).addReg(Tmp3Reg).addReg(Tmp2Reg);
11345   BuildMI(BB, dl, TII->get(PPC::STWCX))
11346       .addReg(Tmp4Reg)
11347       .addReg(ZeroReg)
11348       .addReg(PtrReg);
11349   BuildMI(BB, dl, TII->get(PPC::BCC))
11350       .addImm(PPC::PRED_NE)
11351       .addReg(PPC::CR0)
11352       .addMBB(loopMBB);
11353   BB->addSuccessor(loopMBB);
11354   BB->addSuccessor(exitMBB);
11355 
11356   //  exitMBB:
11357   //   ...
11358   BB = exitMBB;
11359   // Since the shift amount is not a constant, we need to clear
11360   // the upper bits with a separate RLWINM.
11361   BuildMI(*BB, BB->begin(), dl, TII->get(PPC::RLWINM), dest)
11362       .addReg(SrwDestReg)
11363       .addImm(0)
11364       .addImm(is8bit ? 24 : 16)
11365       .addImm(31);
11366   BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), SrwDestReg)
11367       .addReg(TmpDestReg)
11368       .addReg(ShiftReg);
11369   return BB;
11370 }
11371 
11372 llvm::MachineBasicBlock *
emitEHSjLjSetJmp(MachineInstr & MI,MachineBasicBlock * MBB) const11373 PPCTargetLowering::emitEHSjLjSetJmp(MachineInstr &MI,
11374                                     MachineBasicBlock *MBB) const {
11375   DebugLoc DL = MI.getDebugLoc();
11376   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11377   const PPCRegisterInfo *TRI = Subtarget.getRegisterInfo();
11378 
11379   MachineFunction *MF = MBB->getParent();
11380   MachineRegisterInfo &MRI = MF->getRegInfo();
11381 
11382   const BasicBlock *BB = MBB->getBasicBlock();
11383   MachineFunction::iterator I = ++MBB->getIterator();
11384 
11385   Register DstReg = MI.getOperand(0).getReg();
11386   const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
11387   assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!");
11388   Register mainDstReg = MRI.createVirtualRegister(RC);
11389   Register restoreDstReg = MRI.createVirtualRegister(RC);
11390 
11391   MVT PVT = getPointerTy(MF->getDataLayout());
11392   assert((PVT == MVT::i64 || PVT == MVT::i32) &&
11393          "Invalid Pointer Size!");
11394   // For v = setjmp(buf), we generate
11395   //
11396   // thisMBB:
11397   //  SjLjSetup mainMBB
11398   //  bl mainMBB
11399   //  v_restore = 1
11400   //  b sinkMBB
11401   //
11402   // mainMBB:
11403   //  buf[LabelOffset] = LR
11404   //  v_main = 0
11405   //
11406   // sinkMBB:
11407   //  v = phi(main, restore)
11408   //
11409 
11410   MachineBasicBlock *thisMBB = MBB;
11411   MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
11412   MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
11413   MF->insert(I, mainMBB);
11414   MF->insert(I, sinkMBB);
11415 
11416   MachineInstrBuilder MIB;
11417 
11418   // Transfer the remainder of BB and its successor edges to sinkMBB.
11419   sinkMBB->splice(sinkMBB->begin(), MBB,
11420                   std::next(MachineBasicBlock::iterator(MI)), MBB->end());
11421   sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
11422 
11423   // Note that the structure of the jmp_buf used here is not compatible
11424   // with that used by libc, and is not designed to be. Specifically, it
11425   // stores only those 'reserved' registers that LLVM does not otherwise
11426   // understand how to spill. Also, by convention, by the time this
11427   // intrinsic is called, Clang has already stored the frame address in the
11428   // first slot of the buffer and stack address in the third. Following the
11429   // X86 target code, we'll store the jump address in the second slot. We also
11430   // need to save the TOC pointer (R2) to handle jumps between shared
11431   // libraries, and that will be stored in the fourth slot. The thread
11432   // identifier (R13) is not affected.
11433 
11434   // thisMBB:
11435   const int64_t LabelOffset = 1 * PVT.getStoreSize();
11436   const int64_t TOCOffset   = 3 * PVT.getStoreSize();
11437   const int64_t BPOffset    = 4 * PVT.getStoreSize();
11438 
11439   // Prepare IP either in reg.
11440   const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
11441   Register LabelReg = MRI.createVirtualRegister(PtrRC);
11442   Register BufReg = MI.getOperand(1).getReg();
11443 
11444   if (Subtarget.is64BitELFABI()) {
11445     setUsesTOCBasePtr(*MBB->getParent());
11446     MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::STD))
11447               .addReg(PPC::X2)
11448               .addImm(TOCOffset)
11449               .addReg(BufReg)
11450               .cloneMemRefs(MI);
11451   }
11452 
11453   // Naked functions never have a base pointer, and so we use r1. For all
11454   // other functions, this decision must be delayed until during PEI.
11455   unsigned BaseReg;
11456   if (MF->getFunction().hasFnAttribute(Attribute::Naked))
11457     BaseReg = Subtarget.isPPC64() ? PPC::X1 : PPC::R1;
11458   else
11459     BaseReg = Subtarget.isPPC64() ? PPC::BP8 : PPC::BP;
11460 
11461   MIB = BuildMI(*thisMBB, MI, DL,
11462                 TII->get(Subtarget.isPPC64() ? PPC::STD : PPC::STW))
11463             .addReg(BaseReg)
11464             .addImm(BPOffset)
11465             .addReg(BufReg)
11466             .cloneMemRefs(MI);
11467 
11468   // Setup
11469   MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::BCLalways)).addMBB(mainMBB);
11470   MIB.addRegMask(TRI->getNoPreservedMask());
11471 
11472   BuildMI(*thisMBB, MI, DL, TII->get(PPC::LI), restoreDstReg).addImm(1);
11473 
11474   MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::EH_SjLj_Setup))
11475           .addMBB(mainMBB);
11476   MIB = BuildMI(*thisMBB, MI, DL, TII->get(PPC::B)).addMBB(sinkMBB);
11477 
11478   thisMBB->addSuccessor(mainMBB, BranchProbability::getZero());
11479   thisMBB->addSuccessor(sinkMBB, BranchProbability::getOne());
11480 
11481   // mainMBB:
11482   //  mainDstReg = 0
11483   MIB =
11484       BuildMI(mainMBB, DL,
11485               TII->get(Subtarget.isPPC64() ? PPC::MFLR8 : PPC::MFLR), LabelReg);
11486 
11487   // Store IP
11488   if (Subtarget.isPPC64()) {
11489     MIB = BuildMI(mainMBB, DL, TII->get(PPC::STD))
11490             .addReg(LabelReg)
11491             .addImm(LabelOffset)
11492             .addReg(BufReg);
11493   } else {
11494     MIB = BuildMI(mainMBB, DL, TII->get(PPC::STW))
11495             .addReg(LabelReg)
11496             .addImm(LabelOffset)
11497             .addReg(BufReg);
11498   }
11499   MIB.cloneMemRefs(MI);
11500 
11501   BuildMI(mainMBB, DL, TII->get(PPC::LI), mainDstReg).addImm(0);
11502   mainMBB->addSuccessor(sinkMBB);
11503 
11504   // sinkMBB:
11505   BuildMI(*sinkMBB, sinkMBB->begin(), DL,
11506           TII->get(PPC::PHI), DstReg)
11507     .addReg(mainDstReg).addMBB(mainMBB)
11508     .addReg(restoreDstReg).addMBB(thisMBB);
11509 
11510   MI.eraseFromParent();
11511   return sinkMBB;
11512 }
11513 
11514 MachineBasicBlock *
emitEHSjLjLongJmp(MachineInstr & MI,MachineBasicBlock * MBB) const11515 PPCTargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
11516                                      MachineBasicBlock *MBB) const {
11517   DebugLoc DL = MI.getDebugLoc();
11518   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11519 
11520   MachineFunction *MF = MBB->getParent();
11521   MachineRegisterInfo &MRI = MF->getRegInfo();
11522 
11523   MVT PVT = getPointerTy(MF->getDataLayout());
11524   assert((PVT == MVT::i64 || PVT == MVT::i32) &&
11525          "Invalid Pointer Size!");
11526 
11527   const TargetRegisterClass *RC =
11528     (PVT == MVT::i64) ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
11529   Register Tmp = MRI.createVirtualRegister(RC);
11530   // Since FP is only updated here but NOT referenced, it's treated as GPR.
11531   unsigned FP  = (PVT == MVT::i64) ? PPC::X31 : PPC::R31;
11532   unsigned SP  = (PVT == MVT::i64) ? PPC::X1 : PPC::R1;
11533   unsigned BP =
11534       (PVT == MVT::i64)
11535           ? PPC::X30
11536           : (Subtarget.isSVR4ABI() && isPositionIndependent() ? PPC::R29
11537                                                               : PPC::R30);
11538 
11539   MachineInstrBuilder MIB;
11540 
11541   const int64_t LabelOffset = 1 * PVT.getStoreSize();
11542   const int64_t SPOffset    = 2 * PVT.getStoreSize();
11543   const int64_t TOCOffset   = 3 * PVT.getStoreSize();
11544   const int64_t BPOffset    = 4 * PVT.getStoreSize();
11545 
11546   Register BufReg = MI.getOperand(0).getReg();
11547 
11548   // Reload FP (the jumped-to function may not have had a
11549   // frame pointer, and if so, then its r31 will be restored
11550   // as necessary).
11551   if (PVT == MVT::i64) {
11552     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), FP)
11553             .addImm(0)
11554             .addReg(BufReg);
11555   } else {
11556     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), FP)
11557             .addImm(0)
11558             .addReg(BufReg);
11559   }
11560   MIB.cloneMemRefs(MI);
11561 
11562   // Reload IP
11563   if (PVT == MVT::i64) {
11564     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), Tmp)
11565             .addImm(LabelOffset)
11566             .addReg(BufReg);
11567   } else {
11568     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), Tmp)
11569             .addImm(LabelOffset)
11570             .addReg(BufReg);
11571   }
11572   MIB.cloneMemRefs(MI);
11573 
11574   // Reload SP
11575   if (PVT == MVT::i64) {
11576     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), SP)
11577             .addImm(SPOffset)
11578             .addReg(BufReg);
11579   } else {
11580     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), SP)
11581             .addImm(SPOffset)
11582             .addReg(BufReg);
11583   }
11584   MIB.cloneMemRefs(MI);
11585 
11586   // Reload BP
11587   if (PVT == MVT::i64) {
11588     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), BP)
11589             .addImm(BPOffset)
11590             .addReg(BufReg);
11591   } else {
11592     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LWZ), BP)
11593             .addImm(BPOffset)
11594             .addReg(BufReg);
11595   }
11596   MIB.cloneMemRefs(MI);
11597 
11598   // Reload TOC
11599   if (PVT == MVT::i64 && Subtarget.isSVR4ABI()) {
11600     setUsesTOCBasePtr(*MBB->getParent());
11601     MIB = BuildMI(*MBB, MI, DL, TII->get(PPC::LD), PPC::X2)
11602               .addImm(TOCOffset)
11603               .addReg(BufReg)
11604               .cloneMemRefs(MI);
11605   }
11606 
11607   // Jump
11608   BuildMI(*MBB, MI, DL,
11609           TII->get(PVT == MVT::i64 ? PPC::MTCTR8 : PPC::MTCTR)).addReg(Tmp);
11610   BuildMI(*MBB, MI, DL, TII->get(PVT == MVT::i64 ? PPC::BCTR8 : PPC::BCTR));
11611 
11612   MI.eraseFromParent();
11613   return MBB;
11614 }
11615 
hasInlineStackProbe(MachineFunction & MF) const11616 bool PPCTargetLowering::hasInlineStackProbe(MachineFunction &MF) const {
11617   // If the function specifically requests inline stack probes, emit them.
11618   if (MF.getFunction().hasFnAttribute("probe-stack"))
11619     return MF.getFunction().getFnAttribute("probe-stack").getValueAsString() ==
11620            "inline-asm";
11621   return false;
11622 }
11623 
getStackProbeSize(MachineFunction & MF) const11624 unsigned PPCTargetLowering::getStackProbeSize(MachineFunction &MF) const {
11625   const TargetFrameLowering *TFI = Subtarget.getFrameLowering();
11626   unsigned StackAlign = TFI->getStackAlignment();
11627   assert(StackAlign >= 1 && isPowerOf2_32(StackAlign) &&
11628          "Unexpected stack alignment");
11629   // The default stack probe size is 4096 if the function has no
11630   // stack-probe-size attribute.
11631   unsigned StackProbeSize = 4096;
11632   const Function &Fn = MF.getFunction();
11633   if (Fn.hasFnAttribute("stack-probe-size"))
11634     Fn.getFnAttribute("stack-probe-size")
11635         .getValueAsString()
11636         .getAsInteger(0, StackProbeSize);
11637   // Round down to the stack alignment.
11638   StackProbeSize &= ~(StackAlign - 1);
11639   return StackProbeSize ? StackProbeSize : StackAlign;
11640 }
11641 
11642 // Lower dynamic stack allocation with probing. `emitProbedAlloca` is splitted
11643 // into three phases. In the first phase, it uses pseudo instruction
11644 // PREPARE_PROBED_ALLOCA to get the future result of actual FramePointer and
11645 // FinalStackPtr. In the second phase, it generates a loop for probing blocks.
11646 // At last, it uses pseudo instruction DYNAREAOFFSET to get the future result of
11647 // MaxCallFrameSize so that it can calculate correct data area pointer.
11648 MachineBasicBlock *
emitProbedAlloca(MachineInstr & MI,MachineBasicBlock * MBB) const11649 PPCTargetLowering::emitProbedAlloca(MachineInstr &MI,
11650                                     MachineBasicBlock *MBB) const {
11651   const bool isPPC64 = Subtarget.isPPC64();
11652   MachineFunction *MF = MBB->getParent();
11653   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11654   DebugLoc DL = MI.getDebugLoc();
11655   const unsigned ProbeSize = getStackProbeSize(*MF);
11656   const BasicBlock *ProbedBB = MBB->getBasicBlock();
11657   MachineRegisterInfo &MRI = MF->getRegInfo();
11658   // The CFG of probing stack looks as
11659   //         +-----+
11660   //         | MBB |
11661   //         +--+--+
11662   //            |
11663   //       +----v----+
11664   //  +--->+ TestMBB +---+
11665   //  |    +----+----+   |
11666   //  |         |        |
11667   //  |   +-----v----+   |
11668   //  +---+ BlockMBB |   |
11669   //      +----------+   |
11670   //                     |
11671   //       +---------+   |
11672   //       | TailMBB +<--+
11673   //       +---------+
11674   // In MBB, calculate previous frame pointer and final stack pointer.
11675   // In TestMBB, test if sp is equal to final stack pointer, if so, jump to
11676   // TailMBB. In BlockMBB, update the sp atomically and jump back to TestMBB.
11677   // TailMBB is spliced via \p MI.
11678   MachineBasicBlock *TestMBB = MF->CreateMachineBasicBlock(ProbedBB);
11679   MachineBasicBlock *TailMBB = MF->CreateMachineBasicBlock(ProbedBB);
11680   MachineBasicBlock *BlockMBB = MF->CreateMachineBasicBlock(ProbedBB);
11681 
11682   MachineFunction::iterator MBBIter = ++MBB->getIterator();
11683   MF->insert(MBBIter, TestMBB);
11684   MF->insert(MBBIter, BlockMBB);
11685   MF->insert(MBBIter, TailMBB);
11686 
11687   const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
11688   const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
11689 
11690   Register DstReg = MI.getOperand(0).getReg();
11691   Register NegSizeReg = MI.getOperand(1).getReg();
11692   Register SPReg = isPPC64 ? PPC::X1 : PPC::R1;
11693   Register FinalStackPtr = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11694   Register FramePointer = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11695   Register ActualNegSizeReg = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11696 
11697   // Since value of NegSizeReg might be realigned in prologepilog, insert a
11698   // PREPARE_PROBED_ALLOCA pseudo instruction to get actual FramePointer and
11699   // NegSize.
11700   unsigned ProbeOpc;
11701   if (!MRI.hasOneNonDBGUse(NegSizeReg))
11702     ProbeOpc =
11703         isPPC64 ? PPC::PREPARE_PROBED_ALLOCA_64 : PPC::PREPARE_PROBED_ALLOCA_32;
11704   else
11705     // By introducing PREPARE_PROBED_ALLOCA_NEGSIZE_OPT, ActualNegSizeReg
11706     // and NegSizeReg will be allocated in the same phyreg to avoid
11707     // redundant copy when NegSizeReg has only one use which is current MI and
11708     // will be replaced by PREPARE_PROBED_ALLOCA then.
11709     ProbeOpc = isPPC64 ? PPC::PREPARE_PROBED_ALLOCA_NEGSIZE_SAME_REG_64
11710                        : PPC::PREPARE_PROBED_ALLOCA_NEGSIZE_SAME_REG_32;
11711   BuildMI(*MBB, {MI}, DL, TII->get(ProbeOpc), FramePointer)
11712       .addDef(ActualNegSizeReg)
11713       .addReg(NegSizeReg)
11714       .add(MI.getOperand(2))
11715       .add(MI.getOperand(3));
11716 
11717   // Calculate final stack pointer, which equals to SP + ActualNegSize.
11718   BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::ADD8 : PPC::ADD4),
11719           FinalStackPtr)
11720       .addReg(SPReg)
11721       .addReg(ActualNegSizeReg);
11722 
11723   // Materialize a scratch register for update.
11724   int64_t NegProbeSize = -(int64_t)ProbeSize;
11725   assert(isInt<32>(NegProbeSize) && "Unhandled probe size!");
11726   Register ScratchReg = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11727   if (!isInt<16>(NegProbeSize)) {
11728     Register TempReg = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11729     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::LIS8 : PPC::LIS), TempReg)
11730         .addImm(NegProbeSize >> 16);
11731     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::ORI8 : PPC::ORI),
11732             ScratchReg)
11733         .addReg(TempReg)
11734         .addImm(NegProbeSize & 0xFFFF);
11735   } else
11736     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::LI8 : PPC::LI), ScratchReg)
11737         .addImm(NegProbeSize);
11738 
11739   {
11740     // Probing leading residual part.
11741     Register Div = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11742     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::DIVD : PPC::DIVW), Div)
11743         .addReg(ActualNegSizeReg)
11744         .addReg(ScratchReg);
11745     Register Mul = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11746     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::MULLD : PPC::MULLW), Mul)
11747         .addReg(Div)
11748         .addReg(ScratchReg);
11749     Register NegMod = MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11750     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::SUBF8 : PPC::SUBF), NegMod)
11751         .addReg(Mul)
11752         .addReg(ActualNegSizeReg);
11753     BuildMI(*MBB, {MI}, DL, TII->get(isPPC64 ? PPC::STDUX : PPC::STWUX), SPReg)
11754         .addReg(FramePointer)
11755         .addReg(SPReg)
11756         .addReg(NegMod);
11757   }
11758 
11759   {
11760     // Remaining part should be multiple of ProbeSize.
11761     Register CmpResult = MRI.createVirtualRegister(&PPC::CRRCRegClass);
11762     BuildMI(TestMBB, DL, TII->get(isPPC64 ? PPC::CMPD : PPC::CMPW), CmpResult)
11763         .addReg(SPReg)
11764         .addReg(FinalStackPtr);
11765     BuildMI(TestMBB, DL, TII->get(PPC::BCC))
11766         .addImm(PPC::PRED_EQ)
11767         .addReg(CmpResult)
11768         .addMBB(TailMBB);
11769     TestMBB->addSuccessor(BlockMBB);
11770     TestMBB->addSuccessor(TailMBB);
11771   }
11772 
11773   {
11774     // Touch the block.
11775     // |P...|P...|P...
11776     BuildMI(BlockMBB, DL, TII->get(isPPC64 ? PPC::STDUX : PPC::STWUX), SPReg)
11777         .addReg(FramePointer)
11778         .addReg(SPReg)
11779         .addReg(ScratchReg);
11780     BuildMI(BlockMBB, DL, TII->get(PPC::B)).addMBB(TestMBB);
11781     BlockMBB->addSuccessor(TestMBB);
11782   }
11783 
11784   // Calculation of MaxCallFrameSize is deferred to prologepilog, use
11785   // DYNAREAOFFSET pseudo instruction to get the future result.
11786   Register MaxCallFrameSizeReg =
11787       MRI.createVirtualRegister(isPPC64 ? G8RC : GPRC);
11788   BuildMI(TailMBB, DL,
11789           TII->get(isPPC64 ? PPC::DYNAREAOFFSET8 : PPC::DYNAREAOFFSET),
11790           MaxCallFrameSizeReg)
11791       .add(MI.getOperand(2))
11792       .add(MI.getOperand(3));
11793   BuildMI(TailMBB, DL, TII->get(isPPC64 ? PPC::ADD8 : PPC::ADD4), DstReg)
11794       .addReg(SPReg)
11795       .addReg(MaxCallFrameSizeReg);
11796 
11797   // Splice instructions after MI to TailMBB.
11798   TailMBB->splice(TailMBB->end(), MBB,
11799                   std::next(MachineBasicBlock::iterator(MI)), MBB->end());
11800   TailMBB->transferSuccessorsAndUpdatePHIs(MBB);
11801   MBB->addSuccessor(TestMBB);
11802 
11803   // Delete the pseudo instruction.
11804   MI.eraseFromParent();
11805 
11806   ++NumDynamicAllocaProbed;
11807   return TailMBB;
11808 }
11809 
11810 MachineBasicBlock *
EmitInstrWithCustomInserter(MachineInstr & MI,MachineBasicBlock * BB) const11811 PPCTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
11812                                                MachineBasicBlock *BB) const {
11813   if (MI.getOpcode() == TargetOpcode::STACKMAP ||
11814       MI.getOpcode() == TargetOpcode::PATCHPOINT) {
11815     if (Subtarget.is64BitELFABI() &&
11816         MI.getOpcode() == TargetOpcode::PATCHPOINT &&
11817         !Subtarget.isUsingPCRelativeCalls()) {
11818       // Call lowering should have added an r2 operand to indicate a dependence
11819       // on the TOC base pointer value. It can't however, because there is no
11820       // way to mark the dependence as implicit there, and so the stackmap code
11821       // will confuse it with a regular operand. Instead, add the dependence
11822       // here.
11823       MI.addOperand(MachineOperand::CreateReg(PPC::X2, false, true));
11824     }
11825 
11826     return emitPatchPoint(MI, BB);
11827   }
11828 
11829   if (MI.getOpcode() == PPC::EH_SjLj_SetJmp32 ||
11830       MI.getOpcode() == PPC::EH_SjLj_SetJmp64) {
11831     return emitEHSjLjSetJmp(MI, BB);
11832   } else if (MI.getOpcode() == PPC::EH_SjLj_LongJmp32 ||
11833              MI.getOpcode() == PPC::EH_SjLj_LongJmp64) {
11834     return emitEHSjLjLongJmp(MI, BB);
11835   }
11836 
11837   const TargetInstrInfo *TII = Subtarget.getInstrInfo();
11838 
11839   // To "insert" these instructions we actually have to insert their
11840   // control-flow patterns.
11841   const BasicBlock *LLVM_BB = BB->getBasicBlock();
11842   MachineFunction::iterator It = ++BB->getIterator();
11843 
11844   MachineFunction *F = BB->getParent();
11845 
11846   if (MI.getOpcode() == PPC::SELECT_CC_I4 ||
11847       MI.getOpcode() == PPC::SELECT_CC_I8 || MI.getOpcode() == PPC::SELECT_I4 ||
11848       MI.getOpcode() == PPC::SELECT_I8) {
11849     SmallVector<MachineOperand, 2> Cond;
11850     if (MI.getOpcode() == PPC::SELECT_CC_I4 ||
11851         MI.getOpcode() == PPC::SELECT_CC_I8)
11852       Cond.push_back(MI.getOperand(4));
11853     else
11854       Cond.push_back(MachineOperand::CreateImm(PPC::PRED_BIT_SET));
11855     Cond.push_back(MI.getOperand(1));
11856 
11857     DebugLoc dl = MI.getDebugLoc();
11858     TII->insertSelect(*BB, MI, dl, MI.getOperand(0).getReg(), Cond,
11859                       MI.getOperand(2).getReg(), MI.getOperand(3).getReg());
11860   } else if (MI.getOpcode() == PPC::SELECT_CC_F4 ||
11861              MI.getOpcode() == PPC::SELECT_CC_F8 ||
11862              MI.getOpcode() == PPC::SELECT_CC_F16 ||
11863              MI.getOpcode() == PPC::SELECT_CC_VRRC ||
11864              MI.getOpcode() == PPC::SELECT_CC_VSFRC ||
11865              MI.getOpcode() == PPC::SELECT_CC_VSSRC ||
11866              MI.getOpcode() == PPC::SELECT_CC_VSRC ||
11867              MI.getOpcode() == PPC::SELECT_CC_SPE4 ||
11868              MI.getOpcode() == PPC::SELECT_CC_SPE ||
11869              MI.getOpcode() == PPC::SELECT_F4 ||
11870              MI.getOpcode() == PPC::SELECT_F8 ||
11871              MI.getOpcode() == PPC::SELECT_F16 ||
11872              MI.getOpcode() == PPC::SELECT_SPE ||
11873              MI.getOpcode() == PPC::SELECT_SPE4 ||
11874              MI.getOpcode() == PPC::SELECT_VRRC ||
11875              MI.getOpcode() == PPC::SELECT_VSFRC ||
11876              MI.getOpcode() == PPC::SELECT_VSSRC ||
11877              MI.getOpcode() == PPC::SELECT_VSRC) {
11878     // The incoming instruction knows the destination vreg to set, the
11879     // condition code register to branch on, the true/false values to
11880     // select between, and a branch opcode to use.
11881 
11882     //  thisMBB:
11883     //  ...
11884     //   TrueVal = ...
11885     //   cmpTY ccX, r1, r2
11886     //   bCC copy1MBB
11887     //   fallthrough --> copy0MBB
11888     MachineBasicBlock *thisMBB = BB;
11889     MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
11890     MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
11891     DebugLoc dl = MI.getDebugLoc();
11892     F->insert(It, copy0MBB);
11893     F->insert(It, sinkMBB);
11894 
11895     // Transfer the remainder of BB and its successor edges to sinkMBB.
11896     sinkMBB->splice(sinkMBB->begin(), BB,
11897                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
11898     sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
11899 
11900     // Next, add the true and fallthrough blocks as its successors.
11901     BB->addSuccessor(copy0MBB);
11902     BB->addSuccessor(sinkMBB);
11903 
11904     if (MI.getOpcode() == PPC::SELECT_I4 || MI.getOpcode() == PPC::SELECT_I8 ||
11905         MI.getOpcode() == PPC::SELECT_F4 || MI.getOpcode() == PPC::SELECT_F8 ||
11906         MI.getOpcode() == PPC::SELECT_F16 ||
11907         MI.getOpcode() == PPC::SELECT_SPE4 ||
11908         MI.getOpcode() == PPC::SELECT_SPE ||
11909         MI.getOpcode() == PPC::SELECT_VRRC ||
11910         MI.getOpcode() == PPC::SELECT_VSFRC ||
11911         MI.getOpcode() == PPC::SELECT_VSSRC ||
11912         MI.getOpcode() == PPC::SELECT_VSRC) {
11913       BuildMI(BB, dl, TII->get(PPC::BC))
11914           .addReg(MI.getOperand(1).getReg())
11915           .addMBB(sinkMBB);
11916     } else {
11917       unsigned SelectPred = MI.getOperand(4).getImm();
11918       BuildMI(BB, dl, TII->get(PPC::BCC))
11919           .addImm(SelectPred)
11920           .addReg(MI.getOperand(1).getReg())
11921           .addMBB(sinkMBB);
11922     }
11923 
11924     //  copy0MBB:
11925     //   %FalseValue = ...
11926     //   # fallthrough to sinkMBB
11927     BB = copy0MBB;
11928 
11929     // Update machine-CFG edges
11930     BB->addSuccessor(sinkMBB);
11931 
11932     //  sinkMBB:
11933     //   %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
11934     //  ...
11935     BB = sinkMBB;
11936     BuildMI(*BB, BB->begin(), dl, TII->get(PPC::PHI), MI.getOperand(0).getReg())
11937         .addReg(MI.getOperand(3).getReg())
11938         .addMBB(copy0MBB)
11939         .addReg(MI.getOperand(2).getReg())
11940         .addMBB(thisMBB);
11941   } else if (MI.getOpcode() == PPC::ReadTB) {
11942     // To read the 64-bit time-base register on a 32-bit target, we read the
11943     // two halves. Should the counter have wrapped while it was being read, we
11944     // need to try again.
11945     // ...
11946     // readLoop:
11947     // mfspr Rx,TBU # load from TBU
11948     // mfspr Ry,TB  # load from TB
11949     // mfspr Rz,TBU # load from TBU
11950     // cmpw crX,Rx,Rz # check if 'old'='new'
11951     // bne readLoop   # branch if they're not equal
11952     // ...
11953 
11954     MachineBasicBlock *readMBB = F->CreateMachineBasicBlock(LLVM_BB);
11955     MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
11956     DebugLoc dl = MI.getDebugLoc();
11957     F->insert(It, readMBB);
11958     F->insert(It, sinkMBB);
11959 
11960     // Transfer the remainder of BB and its successor edges to sinkMBB.
11961     sinkMBB->splice(sinkMBB->begin(), BB,
11962                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
11963     sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
11964 
11965     BB->addSuccessor(readMBB);
11966     BB = readMBB;
11967 
11968     MachineRegisterInfo &RegInfo = F->getRegInfo();
11969     Register ReadAgainReg = RegInfo.createVirtualRegister(&PPC::GPRCRegClass);
11970     Register LoReg = MI.getOperand(0).getReg();
11971     Register HiReg = MI.getOperand(1).getReg();
11972 
11973     BuildMI(BB, dl, TII->get(PPC::MFSPR), HiReg).addImm(269);
11974     BuildMI(BB, dl, TII->get(PPC::MFSPR), LoReg).addImm(268);
11975     BuildMI(BB, dl, TII->get(PPC::MFSPR), ReadAgainReg).addImm(269);
11976 
11977     Register CmpReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass);
11978 
11979     BuildMI(BB, dl, TII->get(PPC::CMPW), CmpReg)
11980         .addReg(HiReg)
11981         .addReg(ReadAgainReg);
11982     BuildMI(BB, dl, TII->get(PPC::BCC))
11983         .addImm(PPC::PRED_NE)
11984         .addReg(CmpReg)
11985         .addMBB(readMBB);
11986 
11987     BB->addSuccessor(readMBB);
11988     BB->addSuccessor(sinkMBB);
11989   } else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I8)
11990     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4);
11991   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I16)
11992     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4);
11993   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I32)
11994     BB = EmitAtomicBinary(MI, BB, 4, PPC::ADD4);
11995   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_ADD_I64)
11996     BB = EmitAtomicBinary(MI, BB, 8, PPC::ADD8);
11997 
11998   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I8)
11999     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::AND);
12000   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I16)
12001     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::AND);
12002   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I32)
12003     BB = EmitAtomicBinary(MI, BB, 4, PPC::AND);
12004   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_AND_I64)
12005     BB = EmitAtomicBinary(MI, BB, 8, PPC::AND8);
12006 
12007   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I8)
12008     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::OR);
12009   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I16)
12010     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::OR);
12011   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I32)
12012     BB = EmitAtomicBinary(MI, BB, 4, PPC::OR);
12013   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_OR_I64)
12014     BB = EmitAtomicBinary(MI, BB, 8, PPC::OR8);
12015 
12016   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I8)
12017     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::XOR);
12018   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I16)
12019     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::XOR);
12020   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I32)
12021     BB = EmitAtomicBinary(MI, BB, 4, PPC::XOR);
12022   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_XOR_I64)
12023     BB = EmitAtomicBinary(MI, BB, 8, PPC::XOR8);
12024 
12025   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I8)
12026     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::NAND);
12027   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I16)
12028     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::NAND);
12029   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I32)
12030     BB = EmitAtomicBinary(MI, BB, 4, PPC::NAND);
12031   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_NAND_I64)
12032     BB = EmitAtomicBinary(MI, BB, 8, PPC::NAND8);
12033 
12034   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I8)
12035     BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::SUBF);
12036   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I16)
12037     BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::SUBF);
12038   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I32)
12039     BB = EmitAtomicBinary(MI, BB, 4, PPC::SUBF);
12040   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_SUB_I64)
12041     BB = EmitAtomicBinary(MI, BB, 8, PPC::SUBF8);
12042 
12043   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I8)
12044     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_GE);
12045   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I16)
12046     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_GE);
12047   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I32)
12048     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_GE);
12049   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MIN_I64)
12050     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_GE);
12051 
12052   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I8)
12053     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPW, PPC::PRED_LE);
12054   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I16)
12055     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPW, PPC::PRED_LE);
12056   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I32)
12057     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPW, PPC::PRED_LE);
12058   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_MAX_I64)
12059     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPD, PPC::PRED_LE);
12060 
12061   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I8)
12062     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_GE);
12063   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I16)
12064     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_GE);
12065   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I32)
12066     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_GE);
12067   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMIN_I64)
12068     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_GE);
12069 
12070   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I8)
12071     BB = EmitPartwordAtomicBinary(MI, BB, true, 0, PPC::CMPLW, PPC::PRED_LE);
12072   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I16)
12073     BB = EmitPartwordAtomicBinary(MI, BB, false, 0, PPC::CMPLW, PPC::PRED_LE);
12074   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I32)
12075     BB = EmitAtomicBinary(MI, BB, 4, 0, PPC::CMPLW, PPC::PRED_LE);
12076   else if (MI.getOpcode() == PPC::ATOMIC_LOAD_UMAX_I64)
12077     BB = EmitAtomicBinary(MI, BB, 8, 0, PPC::CMPLD, PPC::PRED_LE);
12078 
12079   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I8)
12080     BB = EmitPartwordAtomicBinary(MI, BB, true, 0);
12081   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I16)
12082     BB = EmitPartwordAtomicBinary(MI, BB, false, 0);
12083   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I32)
12084     BB = EmitAtomicBinary(MI, BB, 4, 0);
12085   else if (MI.getOpcode() == PPC::ATOMIC_SWAP_I64)
12086     BB = EmitAtomicBinary(MI, BB, 8, 0);
12087   else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I32 ||
12088            MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64 ||
12089            (Subtarget.hasPartwordAtomics() &&
12090             MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8) ||
12091            (Subtarget.hasPartwordAtomics() &&
12092             MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16)) {
12093     bool is64bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I64;
12094 
12095     auto LoadMnemonic = PPC::LDARX;
12096     auto StoreMnemonic = PPC::STDCX;
12097     switch (MI.getOpcode()) {
12098     default:
12099       llvm_unreachable("Compare and swap of unknown size");
12100     case PPC::ATOMIC_CMP_SWAP_I8:
12101       LoadMnemonic = PPC::LBARX;
12102       StoreMnemonic = PPC::STBCX;
12103       assert(Subtarget.hasPartwordAtomics() && "No support partword atomics.");
12104       break;
12105     case PPC::ATOMIC_CMP_SWAP_I16:
12106       LoadMnemonic = PPC::LHARX;
12107       StoreMnemonic = PPC::STHCX;
12108       assert(Subtarget.hasPartwordAtomics() && "No support partword atomics.");
12109       break;
12110     case PPC::ATOMIC_CMP_SWAP_I32:
12111       LoadMnemonic = PPC::LWARX;
12112       StoreMnemonic = PPC::STWCX;
12113       break;
12114     case PPC::ATOMIC_CMP_SWAP_I64:
12115       LoadMnemonic = PPC::LDARX;
12116       StoreMnemonic = PPC::STDCX;
12117       break;
12118     }
12119     Register dest = MI.getOperand(0).getReg();
12120     Register ptrA = MI.getOperand(1).getReg();
12121     Register ptrB = MI.getOperand(2).getReg();
12122     Register oldval = MI.getOperand(3).getReg();
12123     Register newval = MI.getOperand(4).getReg();
12124     DebugLoc dl = MI.getDebugLoc();
12125 
12126     MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB);
12127     MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB);
12128     MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB);
12129     MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
12130     F->insert(It, loop1MBB);
12131     F->insert(It, loop2MBB);
12132     F->insert(It, midMBB);
12133     F->insert(It, exitMBB);
12134     exitMBB->splice(exitMBB->begin(), BB,
12135                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
12136     exitMBB->transferSuccessorsAndUpdatePHIs(BB);
12137 
12138     //  thisMBB:
12139     //   ...
12140     //   fallthrough --> loopMBB
12141     BB->addSuccessor(loop1MBB);
12142 
12143     // loop1MBB:
12144     //   l[bhwd]arx dest, ptr
12145     //   cmp[wd] dest, oldval
12146     //   bne- midMBB
12147     // loop2MBB:
12148     //   st[bhwd]cx. newval, ptr
12149     //   bne- loopMBB
12150     //   b exitBB
12151     // midMBB:
12152     //   st[bhwd]cx. dest, ptr
12153     // exitBB:
12154     BB = loop1MBB;
12155     BuildMI(BB, dl, TII->get(LoadMnemonic), dest).addReg(ptrA).addReg(ptrB);
12156     BuildMI(BB, dl, TII->get(is64bit ? PPC::CMPD : PPC::CMPW), PPC::CR0)
12157         .addReg(oldval)
12158         .addReg(dest);
12159     BuildMI(BB, dl, TII->get(PPC::BCC))
12160         .addImm(PPC::PRED_NE)
12161         .addReg(PPC::CR0)
12162         .addMBB(midMBB);
12163     BB->addSuccessor(loop2MBB);
12164     BB->addSuccessor(midMBB);
12165 
12166     BB = loop2MBB;
12167     BuildMI(BB, dl, TII->get(StoreMnemonic))
12168         .addReg(newval)
12169         .addReg(ptrA)
12170         .addReg(ptrB);
12171     BuildMI(BB, dl, TII->get(PPC::BCC))
12172         .addImm(PPC::PRED_NE)
12173         .addReg(PPC::CR0)
12174         .addMBB(loop1MBB);
12175     BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB);
12176     BB->addSuccessor(loop1MBB);
12177     BB->addSuccessor(exitMBB);
12178 
12179     BB = midMBB;
12180     BuildMI(BB, dl, TII->get(StoreMnemonic))
12181         .addReg(dest)
12182         .addReg(ptrA)
12183         .addReg(ptrB);
12184     BB->addSuccessor(exitMBB);
12185 
12186     //  exitMBB:
12187     //   ...
12188     BB = exitMBB;
12189   } else if (MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8 ||
12190              MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I16) {
12191     // We must use 64-bit registers for addresses when targeting 64-bit,
12192     // since we're actually doing arithmetic on them.  Other registers
12193     // can be 32-bit.
12194     bool is64bit = Subtarget.isPPC64();
12195     bool isLittleEndian = Subtarget.isLittleEndian();
12196     bool is8bit = MI.getOpcode() == PPC::ATOMIC_CMP_SWAP_I8;
12197 
12198     Register dest = MI.getOperand(0).getReg();
12199     Register ptrA = MI.getOperand(1).getReg();
12200     Register ptrB = MI.getOperand(2).getReg();
12201     Register oldval = MI.getOperand(3).getReg();
12202     Register newval = MI.getOperand(4).getReg();
12203     DebugLoc dl = MI.getDebugLoc();
12204 
12205     MachineBasicBlock *loop1MBB = F->CreateMachineBasicBlock(LLVM_BB);
12206     MachineBasicBlock *loop2MBB = F->CreateMachineBasicBlock(LLVM_BB);
12207     MachineBasicBlock *midMBB = F->CreateMachineBasicBlock(LLVM_BB);
12208     MachineBasicBlock *exitMBB = F->CreateMachineBasicBlock(LLVM_BB);
12209     F->insert(It, loop1MBB);
12210     F->insert(It, loop2MBB);
12211     F->insert(It, midMBB);
12212     F->insert(It, exitMBB);
12213     exitMBB->splice(exitMBB->begin(), BB,
12214                     std::next(MachineBasicBlock::iterator(MI)), BB->end());
12215     exitMBB->transferSuccessorsAndUpdatePHIs(BB);
12216 
12217     MachineRegisterInfo &RegInfo = F->getRegInfo();
12218     const TargetRegisterClass *RC =
12219         is64bit ? &PPC::G8RCRegClass : &PPC::GPRCRegClass;
12220     const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
12221 
12222     Register PtrReg = RegInfo.createVirtualRegister(RC);
12223     Register Shift1Reg = RegInfo.createVirtualRegister(GPRC);
12224     Register ShiftReg =
12225         isLittleEndian ? Shift1Reg : RegInfo.createVirtualRegister(GPRC);
12226     Register NewVal2Reg = RegInfo.createVirtualRegister(GPRC);
12227     Register NewVal3Reg = RegInfo.createVirtualRegister(GPRC);
12228     Register OldVal2Reg = RegInfo.createVirtualRegister(GPRC);
12229     Register OldVal3Reg = RegInfo.createVirtualRegister(GPRC);
12230     Register MaskReg = RegInfo.createVirtualRegister(GPRC);
12231     Register Mask2Reg = RegInfo.createVirtualRegister(GPRC);
12232     Register Mask3Reg = RegInfo.createVirtualRegister(GPRC);
12233     Register Tmp2Reg = RegInfo.createVirtualRegister(GPRC);
12234     Register Tmp4Reg = RegInfo.createVirtualRegister(GPRC);
12235     Register TmpDestReg = RegInfo.createVirtualRegister(GPRC);
12236     Register Ptr1Reg;
12237     Register TmpReg = RegInfo.createVirtualRegister(GPRC);
12238     Register ZeroReg = is64bit ? PPC::ZERO8 : PPC::ZERO;
12239     //  thisMBB:
12240     //   ...
12241     //   fallthrough --> loopMBB
12242     BB->addSuccessor(loop1MBB);
12243 
12244     // The 4-byte load must be aligned, while a char or short may be
12245     // anywhere in the word.  Hence all this nasty bookkeeping code.
12246     //   add ptr1, ptrA, ptrB [copy if ptrA==0]
12247     //   rlwinm shift1, ptr1, 3, 27, 28 [3, 27, 27]
12248     //   xori shift, shift1, 24 [16]
12249     //   rlwinm ptr, ptr1, 0, 0, 29
12250     //   slw newval2, newval, shift
12251     //   slw oldval2, oldval,shift
12252     //   li mask2, 255 [li mask3, 0; ori mask2, mask3, 65535]
12253     //   slw mask, mask2, shift
12254     //   and newval3, newval2, mask
12255     //   and oldval3, oldval2, mask
12256     // loop1MBB:
12257     //   lwarx tmpDest, ptr
12258     //   and tmp, tmpDest, mask
12259     //   cmpw tmp, oldval3
12260     //   bne- midMBB
12261     // loop2MBB:
12262     //   andc tmp2, tmpDest, mask
12263     //   or tmp4, tmp2, newval3
12264     //   stwcx. tmp4, ptr
12265     //   bne- loop1MBB
12266     //   b exitBB
12267     // midMBB:
12268     //   stwcx. tmpDest, ptr
12269     // exitBB:
12270     //   srw dest, tmpDest, shift
12271     if (ptrA != ZeroReg) {
12272       Ptr1Reg = RegInfo.createVirtualRegister(RC);
12273       BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg)
12274           .addReg(ptrA)
12275           .addReg(ptrB);
12276     } else {
12277       Ptr1Reg = ptrB;
12278     }
12279 
12280     // We need use 32-bit subregister to avoid mismatch register class in 64-bit
12281     // mode.
12282     BuildMI(BB, dl, TII->get(PPC::RLWINM), Shift1Reg)
12283         .addReg(Ptr1Reg, 0, is64bit ? PPC::sub_32 : 0)
12284         .addImm(3)
12285         .addImm(27)
12286         .addImm(is8bit ? 28 : 27);
12287     if (!isLittleEndian)
12288       BuildMI(BB, dl, TII->get(PPC::XORI), ShiftReg)
12289           .addReg(Shift1Reg)
12290           .addImm(is8bit ? 24 : 16);
12291     if (is64bit)
12292       BuildMI(BB, dl, TII->get(PPC::RLDICR), PtrReg)
12293           .addReg(Ptr1Reg)
12294           .addImm(0)
12295           .addImm(61);
12296     else
12297       BuildMI(BB, dl, TII->get(PPC::RLWINM), PtrReg)
12298           .addReg(Ptr1Reg)
12299           .addImm(0)
12300           .addImm(0)
12301           .addImm(29);
12302     BuildMI(BB, dl, TII->get(PPC::SLW), NewVal2Reg)
12303         .addReg(newval)
12304         .addReg(ShiftReg);
12305     BuildMI(BB, dl, TII->get(PPC::SLW), OldVal2Reg)
12306         .addReg(oldval)
12307         .addReg(ShiftReg);
12308     if (is8bit)
12309       BuildMI(BB, dl, TII->get(PPC::LI), Mask2Reg).addImm(255);
12310     else {
12311       BuildMI(BB, dl, TII->get(PPC::LI), Mask3Reg).addImm(0);
12312       BuildMI(BB, dl, TII->get(PPC::ORI), Mask2Reg)
12313           .addReg(Mask3Reg)
12314           .addImm(65535);
12315     }
12316     BuildMI(BB, dl, TII->get(PPC::SLW), MaskReg)
12317         .addReg(Mask2Reg)
12318         .addReg(ShiftReg);
12319     BuildMI(BB, dl, TII->get(PPC::AND), NewVal3Reg)
12320         .addReg(NewVal2Reg)
12321         .addReg(MaskReg);
12322     BuildMI(BB, dl, TII->get(PPC::AND), OldVal3Reg)
12323         .addReg(OldVal2Reg)
12324         .addReg(MaskReg);
12325 
12326     BB = loop1MBB;
12327     BuildMI(BB, dl, TII->get(PPC::LWARX), TmpDestReg)
12328         .addReg(ZeroReg)
12329         .addReg(PtrReg);
12330     BuildMI(BB, dl, TII->get(PPC::AND), TmpReg)
12331         .addReg(TmpDestReg)
12332         .addReg(MaskReg);
12333     BuildMI(BB, dl, TII->get(PPC::CMPW), PPC::CR0)
12334         .addReg(TmpReg)
12335         .addReg(OldVal3Reg);
12336     BuildMI(BB, dl, TII->get(PPC::BCC))
12337         .addImm(PPC::PRED_NE)
12338         .addReg(PPC::CR0)
12339         .addMBB(midMBB);
12340     BB->addSuccessor(loop2MBB);
12341     BB->addSuccessor(midMBB);
12342 
12343     BB = loop2MBB;
12344     BuildMI(BB, dl, TII->get(PPC::ANDC), Tmp2Reg)
12345         .addReg(TmpDestReg)
12346         .addReg(MaskReg);
12347     BuildMI(BB, dl, TII->get(PPC::OR), Tmp4Reg)
12348         .addReg(Tmp2Reg)
12349         .addReg(NewVal3Reg);
12350     BuildMI(BB, dl, TII->get(PPC::STWCX))
12351         .addReg(Tmp4Reg)
12352         .addReg(ZeroReg)
12353         .addReg(PtrReg);
12354     BuildMI(BB, dl, TII->get(PPC::BCC))
12355         .addImm(PPC::PRED_NE)
12356         .addReg(PPC::CR0)
12357         .addMBB(loop1MBB);
12358     BuildMI(BB, dl, TII->get(PPC::B)).addMBB(exitMBB);
12359     BB->addSuccessor(loop1MBB);
12360     BB->addSuccessor(exitMBB);
12361 
12362     BB = midMBB;
12363     BuildMI(BB, dl, TII->get(PPC::STWCX))
12364         .addReg(TmpDestReg)
12365         .addReg(ZeroReg)
12366         .addReg(PtrReg);
12367     BB->addSuccessor(exitMBB);
12368 
12369     //  exitMBB:
12370     //   ...
12371     BB = exitMBB;
12372     BuildMI(*BB, BB->begin(), dl, TII->get(PPC::SRW), dest)
12373         .addReg(TmpReg)
12374         .addReg(ShiftReg);
12375   } else if (MI.getOpcode() == PPC::FADDrtz) {
12376     // This pseudo performs an FADD with rounding mode temporarily forced
12377     // to round-to-zero.  We emit this via custom inserter since the FPSCR
12378     // is not modeled at the SelectionDAG level.
12379     Register Dest = MI.getOperand(0).getReg();
12380     Register Src1 = MI.getOperand(1).getReg();
12381     Register Src2 = MI.getOperand(2).getReg();
12382     DebugLoc dl = MI.getDebugLoc();
12383 
12384     MachineRegisterInfo &RegInfo = F->getRegInfo();
12385     Register MFFSReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass);
12386 
12387     // Save FPSCR value.
12388     BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), MFFSReg);
12389 
12390     // Set rounding mode to round-to-zero.
12391     BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB1))
12392         .addImm(31)
12393         .addReg(PPC::RM, RegState::ImplicitDefine);
12394 
12395     BuildMI(*BB, MI, dl, TII->get(PPC::MTFSB0))
12396         .addImm(30)
12397         .addReg(PPC::RM, RegState::ImplicitDefine);
12398 
12399     // Perform addition.
12400     auto MIB = BuildMI(*BB, MI, dl, TII->get(PPC::FADD), Dest)
12401                    .addReg(Src1)
12402                    .addReg(Src2);
12403     if (MI.getFlag(MachineInstr::NoFPExcept))
12404       MIB.setMIFlag(MachineInstr::NoFPExcept);
12405 
12406     // Restore FPSCR value.
12407     BuildMI(*BB, MI, dl, TII->get(PPC::MTFSFb)).addImm(1).addReg(MFFSReg);
12408   } else if (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT ||
12409              MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT ||
12410              MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8 ||
12411              MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT8) {
12412     unsigned Opcode = (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8 ||
12413                        MI.getOpcode() == PPC::ANDI_rec_1_GT_BIT8)
12414                           ? PPC::ANDI8_rec
12415                           : PPC::ANDI_rec;
12416     bool IsEQ = (MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT ||
12417                  MI.getOpcode() == PPC::ANDI_rec_1_EQ_BIT8);
12418 
12419     MachineRegisterInfo &RegInfo = F->getRegInfo();
12420     Register Dest = RegInfo.createVirtualRegister(
12421         Opcode == PPC::ANDI_rec ? &PPC::GPRCRegClass : &PPC::G8RCRegClass);
12422 
12423     DebugLoc Dl = MI.getDebugLoc();
12424     BuildMI(*BB, MI, Dl, TII->get(Opcode), Dest)
12425         .addReg(MI.getOperand(1).getReg())
12426         .addImm(1);
12427     BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY),
12428             MI.getOperand(0).getReg())
12429         .addReg(IsEQ ? PPC::CR0EQ : PPC::CR0GT);
12430   } else if (MI.getOpcode() == PPC::TCHECK_RET) {
12431     DebugLoc Dl = MI.getDebugLoc();
12432     MachineRegisterInfo &RegInfo = F->getRegInfo();
12433     Register CRReg = RegInfo.createVirtualRegister(&PPC::CRRCRegClass);
12434     BuildMI(*BB, MI, Dl, TII->get(PPC::TCHECK), CRReg);
12435     BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY),
12436             MI.getOperand(0).getReg())
12437         .addReg(CRReg);
12438   } else if (MI.getOpcode() == PPC::TBEGIN_RET) {
12439     DebugLoc Dl = MI.getDebugLoc();
12440     unsigned Imm = MI.getOperand(1).getImm();
12441     BuildMI(*BB, MI, Dl, TII->get(PPC::TBEGIN)).addImm(Imm);
12442     BuildMI(*BB, MI, Dl, TII->get(TargetOpcode::COPY),
12443             MI.getOperand(0).getReg())
12444         .addReg(PPC::CR0EQ);
12445   } else if (MI.getOpcode() == PPC::SETRNDi) {
12446     DebugLoc dl = MI.getDebugLoc();
12447     Register OldFPSCRReg = MI.getOperand(0).getReg();
12448 
12449     // Save FPSCR value.
12450     BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), OldFPSCRReg);
12451 
12452     // The floating point rounding mode is in the bits 62:63 of FPCSR, and has
12453     // the following settings:
12454     //   00 Round to nearest
12455     //   01 Round to 0
12456     //   10 Round to +inf
12457     //   11 Round to -inf
12458 
12459     // When the operand is immediate, using the two least significant bits of
12460     // the immediate to set the bits 62:63 of FPSCR.
12461     unsigned Mode = MI.getOperand(1).getImm();
12462     BuildMI(*BB, MI, dl, TII->get((Mode & 1) ? PPC::MTFSB1 : PPC::MTFSB0))
12463         .addImm(31)
12464         .addReg(PPC::RM, RegState::ImplicitDefine);
12465 
12466     BuildMI(*BB, MI, dl, TII->get((Mode & 2) ? PPC::MTFSB1 : PPC::MTFSB0))
12467         .addImm(30)
12468         .addReg(PPC::RM, RegState::ImplicitDefine);
12469   } else if (MI.getOpcode() == PPC::SETRND) {
12470     DebugLoc dl = MI.getDebugLoc();
12471 
12472     // Copy register from F8RCRegClass::SrcReg to G8RCRegClass::DestReg
12473     // or copy register from G8RCRegClass::SrcReg to F8RCRegClass::DestReg.
12474     // If the target doesn't have DirectMove, we should use stack to do the
12475     // conversion, because the target doesn't have the instructions like mtvsrd
12476     // or mfvsrd to do this conversion directly.
12477     auto copyRegFromG8RCOrF8RC = [&] (unsigned DestReg, unsigned SrcReg) {
12478       if (Subtarget.hasDirectMove()) {
12479         BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), DestReg)
12480           .addReg(SrcReg);
12481       } else {
12482         // Use stack to do the register copy.
12483         unsigned StoreOp = PPC::STD, LoadOp = PPC::LFD;
12484         MachineRegisterInfo &RegInfo = F->getRegInfo();
12485         const TargetRegisterClass *RC = RegInfo.getRegClass(SrcReg);
12486         if (RC == &PPC::F8RCRegClass) {
12487           // Copy register from F8RCRegClass to G8RCRegclass.
12488           assert((RegInfo.getRegClass(DestReg) == &PPC::G8RCRegClass) &&
12489                  "Unsupported RegClass.");
12490 
12491           StoreOp = PPC::STFD;
12492           LoadOp = PPC::LD;
12493         } else {
12494           // Copy register from G8RCRegClass to F8RCRegclass.
12495           assert((RegInfo.getRegClass(SrcReg) == &PPC::G8RCRegClass) &&
12496                  (RegInfo.getRegClass(DestReg) == &PPC::F8RCRegClass) &&
12497                  "Unsupported RegClass.");
12498         }
12499 
12500         MachineFrameInfo &MFI = F->getFrameInfo();
12501         int FrameIdx = MFI.CreateStackObject(8, Align(8), false);
12502 
12503         MachineMemOperand *MMOStore = F->getMachineMemOperand(
12504             MachinePointerInfo::getFixedStack(*F, FrameIdx, 0),
12505             MachineMemOperand::MOStore, MFI.getObjectSize(FrameIdx),
12506             MFI.getObjectAlign(FrameIdx));
12507 
12508         // Store the SrcReg into the stack.
12509         BuildMI(*BB, MI, dl, TII->get(StoreOp))
12510           .addReg(SrcReg)
12511           .addImm(0)
12512           .addFrameIndex(FrameIdx)
12513           .addMemOperand(MMOStore);
12514 
12515         MachineMemOperand *MMOLoad = F->getMachineMemOperand(
12516             MachinePointerInfo::getFixedStack(*F, FrameIdx, 0),
12517             MachineMemOperand::MOLoad, MFI.getObjectSize(FrameIdx),
12518             MFI.getObjectAlign(FrameIdx));
12519 
12520         // Load from the stack where SrcReg is stored, and save to DestReg,
12521         // so we have done the RegClass conversion from RegClass::SrcReg to
12522         // RegClass::DestReg.
12523         BuildMI(*BB, MI, dl, TII->get(LoadOp), DestReg)
12524           .addImm(0)
12525           .addFrameIndex(FrameIdx)
12526           .addMemOperand(MMOLoad);
12527       }
12528     };
12529 
12530     Register OldFPSCRReg = MI.getOperand(0).getReg();
12531 
12532     // Save FPSCR value.
12533     BuildMI(*BB, MI, dl, TII->get(PPC::MFFS), OldFPSCRReg);
12534 
12535     // When the operand is gprc register, use two least significant bits of the
12536     // register and mtfsf instruction to set the bits 62:63 of FPSCR.
12537     //
12538     // copy OldFPSCRTmpReg, OldFPSCRReg
12539     // (INSERT_SUBREG ExtSrcReg, (IMPLICIT_DEF ImDefReg), SrcOp, 1)
12540     // rldimi NewFPSCRTmpReg, ExtSrcReg, OldFPSCRReg, 0, 62
12541     // copy NewFPSCRReg, NewFPSCRTmpReg
12542     // mtfsf 255, NewFPSCRReg
12543     MachineOperand SrcOp = MI.getOperand(1);
12544     MachineRegisterInfo &RegInfo = F->getRegInfo();
12545     Register OldFPSCRTmpReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
12546 
12547     copyRegFromG8RCOrF8RC(OldFPSCRTmpReg, OldFPSCRReg);
12548 
12549     Register ImDefReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
12550     Register ExtSrcReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
12551 
12552     // The first operand of INSERT_SUBREG should be a register which has
12553     // subregisters, we only care about its RegClass, so we should use an
12554     // IMPLICIT_DEF register.
12555     BuildMI(*BB, MI, dl, TII->get(TargetOpcode::IMPLICIT_DEF), ImDefReg);
12556     BuildMI(*BB, MI, dl, TII->get(PPC::INSERT_SUBREG), ExtSrcReg)
12557       .addReg(ImDefReg)
12558       .add(SrcOp)
12559       .addImm(1);
12560 
12561     Register NewFPSCRTmpReg = RegInfo.createVirtualRegister(&PPC::G8RCRegClass);
12562     BuildMI(*BB, MI, dl, TII->get(PPC::RLDIMI), NewFPSCRTmpReg)
12563       .addReg(OldFPSCRTmpReg)
12564       .addReg(ExtSrcReg)
12565       .addImm(0)
12566       .addImm(62);
12567 
12568     Register NewFPSCRReg = RegInfo.createVirtualRegister(&PPC::F8RCRegClass);
12569     copyRegFromG8RCOrF8RC(NewFPSCRReg, NewFPSCRTmpReg);
12570 
12571     // The mask 255 means that put the 32:63 bits of NewFPSCRReg to the 32:63
12572     // bits of FPSCR.
12573     BuildMI(*BB, MI, dl, TII->get(PPC::MTFSF))
12574       .addImm(255)
12575       .addReg(NewFPSCRReg)
12576       .addImm(0)
12577       .addImm(0);
12578   } else if (MI.getOpcode() == PPC::SETFLM) {
12579     DebugLoc Dl = MI.getDebugLoc();
12580 
12581     // Result of setflm is previous FPSCR content, so we need to save it first.
12582     Register OldFPSCRReg = MI.getOperand(0).getReg();
12583     BuildMI(*BB, MI, Dl, TII->get(PPC::MFFS), OldFPSCRReg);
12584 
12585     // Put bits in 32:63 to FPSCR.
12586     Register NewFPSCRReg = MI.getOperand(1).getReg();
12587     BuildMI(*BB, MI, Dl, TII->get(PPC::MTFSF))
12588         .addImm(255)
12589         .addReg(NewFPSCRReg)
12590         .addImm(0)
12591         .addImm(0);
12592   } else if (MI.getOpcode() == PPC::PROBED_ALLOCA_32 ||
12593              MI.getOpcode() == PPC::PROBED_ALLOCA_64) {
12594     return emitProbedAlloca(MI, BB);
12595   } else {
12596     llvm_unreachable("Unexpected instr type to insert");
12597   }
12598 
12599   MI.eraseFromParent(); // The pseudo instruction is gone now.
12600   return BB;
12601 }
12602 
12603 //===----------------------------------------------------------------------===//
12604 // Target Optimization Hooks
12605 //===----------------------------------------------------------------------===//
12606 
getEstimateRefinementSteps(EVT VT,const PPCSubtarget & Subtarget)12607 static int getEstimateRefinementSteps(EVT VT, const PPCSubtarget &Subtarget) {
12608   // For the estimates, convergence is quadratic, so we essentially double the
12609   // number of digits correct after every iteration. For both FRE and FRSQRTE,
12610   // the minimum architected relative accuracy is 2^-5. When hasRecipPrec(),
12611   // this is 2^-14. IEEE float has 23 digits and double has 52 digits.
12612   int RefinementSteps = Subtarget.hasRecipPrec() ? 1 : 3;
12613   if (VT.getScalarType() == MVT::f64)
12614     RefinementSteps++;
12615   return RefinementSteps;
12616 }
12617 
getSqrtInputTest(SDValue Op,SelectionDAG & DAG,const DenormalMode & Mode) const12618 SDValue PPCTargetLowering::getSqrtInputTest(SDValue Op, SelectionDAG &DAG,
12619                                             const DenormalMode &Mode) const {
12620   // We only have VSX Vector Test for software Square Root.
12621   EVT VT = Op.getValueType();
12622   if (!isTypeLegal(MVT::i1) ||
12623       (VT != MVT::f64 &&
12624        ((VT != MVT::v2f64 && VT != MVT::v4f32) || !Subtarget.hasVSX())))
12625     return TargetLowering::getSqrtInputTest(Op, DAG, Mode);
12626 
12627   SDLoc DL(Op);
12628   // The output register of FTSQRT is CR field.
12629   SDValue FTSQRT = DAG.getNode(PPCISD::FTSQRT, DL, MVT::i32, Op);
12630   // ftsqrt BF,FRB
12631   // Let e_b be the unbiased exponent of the double-precision
12632   // floating-point operand in register FRB.
12633   // fe_flag is set to 1 if either of the following conditions occurs.
12634   //   - The double-precision floating-point operand in register FRB is a zero,
12635   //     a NaN, or an infinity, or a negative value.
12636   //   - e_b is less than or equal to -970.
12637   // Otherwise fe_flag is set to 0.
12638   // Both VSX and non-VSX versions would set EQ bit in the CR if the number is
12639   // not eligible for iteration. (zero/negative/infinity/nan or unbiased
12640   // exponent is less than -970)
12641   SDValue SRIdxVal = DAG.getTargetConstant(PPC::sub_eq, DL, MVT::i32);
12642   return SDValue(DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, MVT::i1,
12643                                     FTSQRT, SRIdxVal),
12644                  0);
12645 }
12646 
12647 SDValue
getSqrtResultForDenormInput(SDValue Op,SelectionDAG & DAG) const12648 PPCTargetLowering::getSqrtResultForDenormInput(SDValue Op,
12649                                                SelectionDAG &DAG) const {
12650   // We only have VSX Vector Square Root.
12651   EVT VT = Op.getValueType();
12652   if (VT != MVT::f64 &&
12653       ((VT != MVT::v2f64 && VT != MVT::v4f32) || !Subtarget.hasVSX()))
12654     return TargetLowering::getSqrtResultForDenormInput(Op, DAG);
12655 
12656   return DAG.getNode(PPCISD::FSQRT, SDLoc(Op), VT, Op);
12657 }
12658 
getSqrtEstimate(SDValue Operand,SelectionDAG & DAG,int Enabled,int & RefinementSteps,bool & UseOneConstNR,bool Reciprocal) const12659 SDValue PPCTargetLowering::getSqrtEstimate(SDValue Operand, SelectionDAG &DAG,
12660                                            int Enabled, int &RefinementSteps,
12661                                            bool &UseOneConstNR,
12662                                            bool Reciprocal) const {
12663   EVT VT = Operand.getValueType();
12664   if ((VT == MVT::f32 && Subtarget.hasFRSQRTES()) ||
12665       (VT == MVT::f64 && Subtarget.hasFRSQRTE()) ||
12666       (VT == MVT::v4f32 && Subtarget.hasAltivec()) ||
12667       (VT == MVT::v2f64 && Subtarget.hasVSX())) {
12668     if (RefinementSteps == ReciprocalEstimate::Unspecified)
12669       RefinementSteps = getEstimateRefinementSteps(VT, Subtarget);
12670 
12671     // The Newton-Raphson computation with a single constant does not provide
12672     // enough accuracy on some CPUs.
12673     UseOneConstNR = !Subtarget.needsTwoConstNR();
12674     return DAG.getNode(PPCISD::FRSQRTE, SDLoc(Operand), VT, Operand);
12675   }
12676   return SDValue();
12677 }
12678 
getRecipEstimate(SDValue Operand,SelectionDAG & DAG,int Enabled,int & RefinementSteps) const12679 SDValue PPCTargetLowering::getRecipEstimate(SDValue Operand, SelectionDAG &DAG,
12680                                             int Enabled,
12681                                             int &RefinementSteps) const {
12682   EVT VT = Operand.getValueType();
12683   if ((VT == MVT::f32 && Subtarget.hasFRES()) ||
12684       (VT == MVT::f64 && Subtarget.hasFRE()) ||
12685       (VT == MVT::v4f32 && Subtarget.hasAltivec()) ||
12686       (VT == MVT::v2f64 && Subtarget.hasVSX())) {
12687     if (RefinementSteps == ReciprocalEstimate::Unspecified)
12688       RefinementSteps = getEstimateRefinementSteps(VT, Subtarget);
12689     return DAG.getNode(PPCISD::FRE, SDLoc(Operand), VT, Operand);
12690   }
12691   return SDValue();
12692 }
12693 
combineRepeatedFPDivisors() const12694 unsigned PPCTargetLowering::combineRepeatedFPDivisors() const {
12695   // Note: This functionality is used only when unsafe-fp-math is enabled, and
12696   // on cores with reciprocal estimates (which are used when unsafe-fp-math is
12697   // enabled for division), this functionality is redundant with the default
12698   // combiner logic (once the division -> reciprocal/multiply transformation
12699   // has taken place). As a result, this matters more for older cores than for
12700   // newer ones.
12701 
12702   // Combine multiple FDIVs with the same divisor into multiple FMULs by the
12703   // reciprocal if there are two or more FDIVs (for embedded cores with only
12704   // one FP pipeline) for three or more FDIVs (for generic OOO cores).
12705   switch (Subtarget.getCPUDirective()) {
12706   default:
12707     return 3;
12708   case PPC::DIR_440:
12709   case PPC::DIR_A2:
12710   case PPC::DIR_E500:
12711   case PPC::DIR_E500mc:
12712   case PPC::DIR_E5500:
12713     return 2;
12714   }
12715 }
12716 
12717 // isConsecutiveLSLoc needs to work even if all adds have not yet been
12718 // collapsed, and so we need to look through chains of them.
getBaseWithConstantOffset(SDValue Loc,SDValue & Base,int64_t & Offset,SelectionDAG & DAG)12719 static void getBaseWithConstantOffset(SDValue Loc, SDValue &Base,
12720                                      int64_t& Offset, SelectionDAG &DAG) {
12721   if (DAG.isBaseWithConstantOffset(Loc)) {
12722     Base = Loc.getOperand(0);
12723     Offset += cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue();
12724 
12725     // The base might itself be a base plus an offset, and if so, accumulate
12726     // that as well.
12727     getBaseWithConstantOffset(Loc.getOperand(0), Base, Offset, DAG);
12728   }
12729 }
12730 
isConsecutiveLSLoc(SDValue Loc,EVT VT,LSBaseSDNode * Base,unsigned Bytes,int Dist,SelectionDAG & DAG)12731 static bool isConsecutiveLSLoc(SDValue Loc, EVT VT, LSBaseSDNode *Base,
12732                             unsigned Bytes, int Dist,
12733                             SelectionDAG &DAG) {
12734   if (VT.getSizeInBits() / 8 != Bytes)
12735     return false;
12736 
12737   SDValue BaseLoc = Base->getBasePtr();
12738   if (Loc.getOpcode() == ISD::FrameIndex) {
12739     if (BaseLoc.getOpcode() != ISD::FrameIndex)
12740       return false;
12741     const MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
12742     int FI  = cast<FrameIndexSDNode>(Loc)->getIndex();
12743     int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex();
12744     int FS  = MFI.getObjectSize(FI);
12745     int BFS = MFI.getObjectSize(BFI);
12746     if (FS != BFS || FS != (int)Bytes) return false;
12747     return MFI.getObjectOffset(FI) == (MFI.getObjectOffset(BFI) + Dist*Bytes);
12748   }
12749 
12750   SDValue Base1 = Loc, Base2 = BaseLoc;
12751   int64_t Offset1 = 0, Offset2 = 0;
12752   getBaseWithConstantOffset(Loc, Base1, Offset1, DAG);
12753   getBaseWithConstantOffset(BaseLoc, Base2, Offset2, DAG);
12754   if (Base1 == Base2 && Offset1 == (Offset2 + Dist * Bytes))
12755     return true;
12756 
12757   const TargetLowering &TLI = DAG.getTargetLoweringInfo();
12758   const GlobalValue *GV1 = nullptr;
12759   const GlobalValue *GV2 = nullptr;
12760   Offset1 = 0;
12761   Offset2 = 0;
12762   bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1);
12763   bool isGA2 = TLI.isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2);
12764   if (isGA1 && isGA2 && GV1 == GV2)
12765     return Offset1 == (Offset2 + Dist*Bytes);
12766   return false;
12767 }
12768 
12769 // Like SelectionDAG::isConsecutiveLoad, but also works for stores, and does
12770 // not enforce equality of the chain operands.
isConsecutiveLS(SDNode * N,LSBaseSDNode * Base,unsigned Bytes,int Dist,SelectionDAG & DAG)12771 static bool isConsecutiveLS(SDNode *N, LSBaseSDNode *Base,
12772                             unsigned Bytes, int Dist,
12773                             SelectionDAG &DAG) {
12774   if (LSBaseSDNode *LS = dyn_cast<LSBaseSDNode>(N)) {
12775     EVT VT = LS->getMemoryVT();
12776     SDValue Loc = LS->getBasePtr();
12777     return isConsecutiveLSLoc(Loc, VT, Base, Bytes, Dist, DAG);
12778   }
12779 
12780   if (N->getOpcode() == ISD::INTRINSIC_W_CHAIN) {
12781     EVT VT;
12782     switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
12783     default: return false;
12784     case Intrinsic::ppc_altivec_lvx:
12785     case Intrinsic::ppc_altivec_lvxl:
12786     case Intrinsic::ppc_vsx_lxvw4x:
12787     case Intrinsic::ppc_vsx_lxvw4x_be:
12788       VT = MVT::v4i32;
12789       break;
12790     case Intrinsic::ppc_vsx_lxvd2x:
12791     case Intrinsic::ppc_vsx_lxvd2x_be:
12792       VT = MVT::v2f64;
12793       break;
12794     case Intrinsic::ppc_altivec_lvebx:
12795       VT = MVT::i8;
12796       break;
12797     case Intrinsic::ppc_altivec_lvehx:
12798       VT = MVT::i16;
12799       break;
12800     case Intrinsic::ppc_altivec_lvewx:
12801       VT = MVT::i32;
12802       break;
12803     }
12804 
12805     return isConsecutiveLSLoc(N->getOperand(2), VT, Base, Bytes, Dist, DAG);
12806   }
12807 
12808   if (N->getOpcode() == ISD::INTRINSIC_VOID) {
12809     EVT VT;
12810     switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
12811     default: return false;
12812     case Intrinsic::ppc_altivec_stvx:
12813     case Intrinsic::ppc_altivec_stvxl:
12814     case Intrinsic::ppc_vsx_stxvw4x:
12815       VT = MVT::v4i32;
12816       break;
12817     case Intrinsic::ppc_vsx_stxvd2x:
12818       VT = MVT::v2f64;
12819       break;
12820     case Intrinsic::ppc_vsx_stxvw4x_be:
12821       VT = MVT::v4i32;
12822       break;
12823     case Intrinsic::ppc_vsx_stxvd2x_be:
12824       VT = MVT::v2f64;
12825       break;
12826     case Intrinsic::ppc_altivec_stvebx:
12827       VT = MVT::i8;
12828       break;
12829     case Intrinsic::ppc_altivec_stvehx:
12830       VT = MVT::i16;
12831       break;
12832     case Intrinsic::ppc_altivec_stvewx:
12833       VT = MVT::i32;
12834       break;
12835     }
12836 
12837     return isConsecutiveLSLoc(N->getOperand(3), VT, Base, Bytes, Dist, DAG);
12838   }
12839 
12840   return false;
12841 }
12842 
12843 // Return true is there is a nearyby consecutive load to the one provided
12844 // (regardless of alignment). We search up and down the chain, looking though
12845 // token factors and other loads (but nothing else). As a result, a true result
12846 // indicates that it is safe to create a new consecutive load adjacent to the
12847 // load provided.
findConsecutiveLoad(LoadSDNode * LD,SelectionDAG & DAG)12848 static bool findConsecutiveLoad(LoadSDNode *LD, SelectionDAG &DAG) {
12849   SDValue Chain = LD->getChain();
12850   EVT VT = LD->getMemoryVT();
12851 
12852   SmallSet<SDNode *, 16> LoadRoots;
12853   SmallVector<SDNode *, 8> Queue(1, Chain.getNode());
12854   SmallSet<SDNode *, 16> Visited;
12855 
12856   // First, search up the chain, branching to follow all token-factor operands.
12857   // If we find a consecutive load, then we're done, otherwise, record all
12858   // nodes just above the top-level loads and token factors.
12859   while (!Queue.empty()) {
12860     SDNode *ChainNext = Queue.pop_back_val();
12861     if (!Visited.insert(ChainNext).second)
12862       continue;
12863 
12864     if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(ChainNext)) {
12865       if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG))
12866         return true;
12867 
12868       if (!Visited.count(ChainLD->getChain().getNode()))
12869         Queue.push_back(ChainLD->getChain().getNode());
12870     } else if (ChainNext->getOpcode() == ISD::TokenFactor) {
12871       for (const SDUse &O : ChainNext->ops())
12872         if (!Visited.count(O.getNode()))
12873           Queue.push_back(O.getNode());
12874     } else
12875       LoadRoots.insert(ChainNext);
12876   }
12877 
12878   // Second, search down the chain, starting from the top-level nodes recorded
12879   // in the first phase. These top-level nodes are the nodes just above all
12880   // loads and token factors. Starting with their uses, recursively look though
12881   // all loads (just the chain uses) and token factors to find a consecutive
12882   // load.
12883   Visited.clear();
12884   Queue.clear();
12885 
12886   for (SmallSet<SDNode *, 16>::iterator I = LoadRoots.begin(),
12887        IE = LoadRoots.end(); I != IE; ++I) {
12888     Queue.push_back(*I);
12889 
12890     while (!Queue.empty()) {
12891       SDNode *LoadRoot = Queue.pop_back_val();
12892       if (!Visited.insert(LoadRoot).second)
12893         continue;
12894 
12895       if (MemSDNode *ChainLD = dyn_cast<MemSDNode>(LoadRoot))
12896         if (isConsecutiveLS(ChainLD, LD, VT.getStoreSize(), 1, DAG))
12897           return true;
12898 
12899       for (SDNode::use_iterator UI = LoadRoot->use_begin(),
12900            UE = LoadRoot->use_end(); UI != UE; ++UI)
12901         if (((isa<MemSDNode>(*UI) &&
12902             cast<MemSDNode>(*UI)->getChain().getNode() == LoadRoot) ||
12903             UI->getOpcode() == ISD::TokenFactor) && !Visited.count(*UI))
12904           Queue.push_back(*UI);
12905     }
12906   }
12907 
12908   return false;
12909 }
12910 
12911 /// This function is called when we have proved that a SETCC node can be replaced
12912 /// by subtraction (and other supporting instructions) so that the result of
12913 /// comparison is kept in a GPR instead of CR. This function is purely for
12914 /// codegen purposes and has some flags to guide the codegen process.
generateEquivalentSub(SDNode * N,int Size,bool Complement,bool Swap,SDLoc & DL,SelectionDAG & DAG)12915 static SDValue generateEquivalentSub(SDNode *N, int Size, bool Complement,
12916                                      bool Swap, SDLoc &DL, SelectionDAG &DAG) {
12917   assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected.");
12918 
12919   // Zero extend the operands to the largest legal integer. Originally, they
12920   // must be of a strictly smaller size.
12921   auto Op0 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(0),
12922                          DAG.getConstant(Size, DL, MVT::i32));
12923   auto Op1 = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1),
12924                          DAG.getConstant(Size, DL, MVT::i32));
12925 
12926   // Swap if needed. Depends on the condition code.
12927   if (Swap)
12928     std::swap(Op0, Op1);
12929 
12930   // Subtract extended integers.
12931   auto SubNode = DAG.getNode(ISD::SUB, DL, MVT::i64, Op0, Op1);
12932 
12933   // Move the sign bit to the least significant position and zero out the rest.
12934   // Now the least significant bit carries the result of original comparison.
12935   auto Shifted = DAG.getNode(ISD::SRL, DL, MVT::i64, SubNode,
12936                              DAG.getConstant(Size - 1, DL, MVT::i32));
12937   auto Final = Shifted;
12938 
12939   // Complement the result if needed. Based on the condition code.
12940   if (Complement)
12941     Final = DAG.getNode(ISD::XOR, DL, MVT::i64, Shifted,
12942                         DAG.getConstant(1, DL, MVT::i64));
12943 
12944   return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Final);
12945 }
12946 
ConvertSETCCToSubtract(SDNode * N,DAGCombinerInfo & DCI) const12947 SDValue PPCTargetLowering::ConvertSETCCToSubtract(SDNode *N,
12948                                                   DAGCombinerInfo &DCI) const {
12949   assert(N->getOpcode() == ISD::SETCC && "ISD::SETCC Expected.");
12950 
12951   SelectionDAG &DAG = DCI.DAG;
12952   SDLoc DL(N);
12953 
12954   // Size of integers being compared has a critical role in the following
12955   // analysis, so we prefer to do this when all types are legal.
12956   if (!DCI.isAfterLegalizeDAG())
12957     return SDValue();
12958 
12959   // If all users of SETCC extend its value to a legal integer type
12960   // then we replace SETCC with a subtraction
12961   for (SDNode::use_iterator UI = N->use_begin(),
12962        UE = N->use_end(); UI != UE; ++UI) {
12963     if (UI->getOpcode() != ISD::ZERO_EXTEND)
12964       return SDValue();
12965   }
12966 
12967   ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
12968   auto OpSize = N->getOperand(0).getValueSizeInBits();
12969 
12970   unsigned Size = DAG.getDataLayout().getLargestLegalIntTypeSizeInBits();
12971 
12972   if (OpSize < Size) {
12973     switch (CC) {
12974     default: break;
12975     case ISD::SETULT:
12976       return generateEquivalentSub(N, Size, false, false, DL, DAG);
12977     case ISD::SETULE:
12978       return generateEquivalentSub(N, Size, true, true, DL, DAG);
12979     case ISD::SETUGT:
12980       return generateEquivalentSub(N, Size, false, true, DL, DAG);
12981     case ISD::SETUGE:
12982       return generateEquivalentSub(N, Size, true, false, DL, DAG);
12983     }
12984   }
12985 
12986   return SDValue();
12987 }
12988 
DAGCombineTruncBoolExt(SDNode * N,DAGCombinerInfo & DCI) const12989 SDValue PPCTargetLowering::DAGCombineTruncBoolExt(SDNode *N,
12990                                                   DAGCombinerInfo &DCI) const {
12991   SelectionDAG &DAG = DCI.DAG;
12992   SDLoc dl(N);
12993 
12994   assert(Subtarget.useCRBits() && "Expecting to be tracking CR bits");
12995   // If we're tracking CR bits, we need to be careful that we don't have:
12996   //   trunc(binary-ops(zext(x), zext(y)))
12997   // or
12998   //   trunc(binary-ops(binary-ops(zext(x), zext(y)), ...)
12999   // such that we're unnecessarily moving things into GPRs when it would be
13000   // better to keep them in CR bits.
13001 
13002   // Note that trunc here can be an actual i1 trunc, or can be the effective
13003   // truncation that comes from a setcc or select_cc.
13004   if (N->getOpcode() == ISD::TRUNCATE &&
13005       N->getValueType(0) != MVT::i1)
13006     return SDValue();
13007 
13008   if (N->getOperand(0).getValueType() != MVT::i32 &&
13009       N->getOperand(0).getValueType() != MVT::i64)
13010     return SDValue();
13011 
13012   if (N->getOpcode() == ISD::SETCC ||
13013       N->getOpcode() == ISD::SELECT_CC) {
13014     // If we're looking at a comparison, then we need to make sure that the
13015     // high bits (all except for the first) don't matter the result.
13016     ISD::CondCode CC =
13017       cast<CondCodeSDNode>(N->getOperand(
13018         N->getOpcode() == ISD::SETCC ? 2 : 4))->get();
13019     unsigned OpBits = N->getOperand(0).getValueSizeInBits();
13020 
13021     if (ISD::isSignedIntSetCC(CC)) {
13022       if (DAG.ComputeNumSignBits(N->getOperand(0)) != OpBits ||
13023           DAG.ComputeNumSignBits(N->getOperand(1)) != OpBits)
13024         return SDValue();
13025     } else if (ISD::isUnsignedIntSetCC(CC)) {
13026       if (!DAG.MaskedValueIsZero(N->getOperand(0),
13027                                  APInt::getHighBitsSet(OpBits, OpBits-1)) ||
13028           !DAG.MaskedValueIsZero(N->getOperand(1),
13029                                  APInt::getHighBitsSet(OpBits, OpBits-1)))
13030         return (N->getOpcode() == ISD::SETCC ? ConvertSETCCToSubtract(N, DCI)
13031                                              : SDValue());
13032     } else {
13033       // This is neither a signed nor an unsigned comparison, just make sure
13034       // that the high bits are equal.
13035       KnownBits Op1Known = DAG.computeKnownBits(N->getOperand(0));
13036       KnownBits Op2Known = DAG.computeKnownBits(N->getOperand(1));
13037 
13038       // We don't really care about what is known about the first bit (if
13039       // anything), so pretend that it is known zero for both to ensure they can
13040       // be compared as constants.
13041       Op1Known.Zero.setBit(0); Op1Known.One.clearBit(0);
13042       Op2Known.Zero.setBit(0); Op2Known.One.clearBit(0);
13043 
13044       if (!Op1Known.isConstant() || !Op2Known.isConstant() ||
13045           Op1Known.getConstant() != Op2Known.getConstant())
13046         return SDValue();
13047     }
13048   }
13049 
13050   // We now know that the higher-order bits are irrelevant, we just need to
13051   // make sure that all of the intermediate operations are bit operations, and
13052   // all inputs are extensions.
13053   if (N->getOperand(0).getOpcode() != ISD::AND &&
13054       N->getOperand(0).getOpcode() != ISD::OR  &&
13055       N->getOperand(0).getOpcode() != ISD::XOR &&
13056       N->getOperand(0).getOpcode() != ISD::SELECT &&
13057       N->getOperand(0).getOpcode() != ISD::SELECT_CC &&
13058       N->getOperand(0).getOpcode() != ISD::TRUNCATE &&
13059       N->getOperand(0).getOpcode() != ISD::SIGN_EXTEND &&
13060       N->getOperand(0).getOpcode() != ISD::ZERO_EXTEND &&
13061       N->getOperand(0).getOpcode() != ISD::ANY_EXTEND)
13062     return SDValue();
13063 
13064   if ((N->getOpcode() == ISD::SETCC || N->getOpcode() == ISD::SELECT_CC) &&
13065       N->getOperand(1).getOpcode() != ISD::AND &&
13066       N->getOperand(1).getOpcode() != ISD::OR  &&
13067       N->getOperand(1).getOpcode() != ISD::XOR &&
13068       N->getOperand(1).getOpcode() != ISD::SELECT &&
13069       N->getOperand(1).getOpcode() != ISD::SELECT_CC &&
13070       N->getOperand(1).getOpcode() != ISD::TRUNCATE &&
13071       N->getOperand(1).getOpcode() != ISD::SIGN_EXTEND &&
13072       N->getOperand(1).getOpcode() != ISD::ZERO_EXTEND &&
13073       N->getOperand(1).getOpcode() != ISD::ANY_EXTEND)
13074     return SDValue();
13075 
13076   SmallVector<SDValue, 4> Inputs;
13077   SmallVector<SDValue, 8> BinOps, PromOps;
13078   SmallPtrSet<SDNode *, 16> Visited;
13079 
13080   for (unsigned i = 0; i < 2; ++i) {
13081     if (((N->getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
13082           N->getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
13083           N->getOperand(i).getOpcode() == ISD::ANY_EXTEND) &&
13084           N->getOperand(i).getOperand(0).getValueType() == MVT::i1) ||
13085         isa<ConstantSDNode>(N->getOperand(i)))
13086       Inputs.push_back(N->getOperand(i));
13087     else
13088       BinOps.push_back(N->getOperand(i));
13089 
13090     if (N->getOpcode() == ISD::TRUNCATE)
13091       break;
13092   }
13093 
13094   // Visit all inputs, collect all binary operations (and, or, xor and
13095   // select) that are all fed by extensions.
13096   while (!BinOps.empty()) {
13097     SDValue BinOp = BinOps.pop_back_val();
13098 
13099     if (!Visited.insert(BinOp.getNode()).second)
13100       continue;
13101 
13102     PromOps.push_back(BinOp);
13103 
13104     for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) {
13105       // The condition of the select is not promoted.
13106       if (BinOp.getOpcode() == ISD::SELECT && i == 0)
13107         continue;
13108       if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3)
13109         continue;
13110 
13111       if (((BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
13112             BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
13113             BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) &&
13114            BinOp.getOperand(i).getOperand(0).getValueType() == MVT::i1) ||
13115           isa<ConstantSDNode>(BinOp.getOperand(i))) {
13116         Inputs.push_back(BinOp.getOperand(i));
13117       } else if (BinOp.getOperand(i).getOpcode() == ISD::AND ||
13118                  BinOp.getOperand(i).getOpcode() == ISD::OR  ||
13119                  BinOp.getOperand(i).getOpcode() == ISD::XOR ||
13120                  BinOp.getOperand(i).getOpcode() == ISD::SELECT ||
13121                  BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC ||
13122                  BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE ||
13123                  BinOp.getOperand(i).getOpcode() == ISD::SIGN_EXTEND ||
13124                  BinOp.getOperand(i).getOpcode() == ISD::ZERO_EXTEND ||
13125                  BinOp.getOperand(i).getOpcode() == ISD::ANY_EXTEND) {
13126         BinOps.push_back(BinOp.getOperand(i));
13127       } else {
13128         // We have an input that is not an extension or another binary
13129         // operation; we'll abort this transformation.
13130         return SDValue();
13131       }
13132     }
13133   }
13134 
13135   // Make sure that this is a self-contained cluster of operations (which
13136   // is not quite the same thing as saying that everything has only one
13137   // use).
13138   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
13139     if (isa<ConstantSDNode>(Inputs[i]))
13140       continue;
13141 
13142     for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(),
13143                               UE = Inputs[i].getNode()->use_end();
13144          UI != UE; ++UI) {
13145       SDNode *User = *UI;
13146       if (User != N && !Visited.count(User))
13147         return SDValue();
13148 
13149       // Make sure that we're not going to promote the non-output-value
13150       // operand(s) or SELECT or SELECT_CC.
13151       // FIXME: Although we could sometimes handle this, and it does occur in
13152       // practice that one of the condition inputs to the select is also one of
13153       // the outputs, we currently can't deal with this.
13154       if (User->getOpcode() == ISD::SELECT) {
13155         if (User->getOperand(0) == Inputs[i])
13156           return SDValue();
13157       } else if (User->getOpcode() == ISD::SELECT_CC) {
13158         if (User->getOperand(0) == Inputs[i] ||
13159             User->getOperand(1) == Inputs[i])
13160           return SDValue();
13161       }
13162     }
13163   }
13164 
13165   for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) {
13166     for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(),
13167                               UE = PromOps[i].getNode()->use_end();
13168          UI != UE; ++UI) {
13169       SDNode *User = *UI;
13170       if (User != N && !Visited.count(User))
13171         return SDValue();
13172 
13173       // Make sure that we're not going to promote the non-output-value
13174       // operand(s) or SELECT or SELECT_CC.
13175       // FIXME: Although we could sometimes handle this, and it does occur in
13176       // practice that one of the condition inputs to the select is also one of
13177       // the outputs, we currently can't deal with this.
13178       if (User->getOpcode() == ISD::SELECT) {
13179         if (User->getOperand(0) == PromOps[i])
13180           return SDValue();
13181       } else if (User->getOpcode() == ISD::SELECT_CC) {
13182         if (User->getOperand(0) == PromOps[i] ||
13183             User->getOperand(1) == PromOps[i])
13184           return SDValue();
13185       }
13186     }
13187   }
13188 
13189   // Replace all inputs with the extension operand.
13190   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
13191     // Constants may have users outside the cluster of to-be-promoted nodes,
13192     // and so we need to replace those as we do the promotions.
13193     if (isa<ConstantSDNode>(Inputs[i]))
13194       continue;
13195     else
13196       DAG.ReplaceAllUsesOfValueWith(Inputs[i], Inputs[i].getOperand(0));
13197   }
13198 
13199   std::list<HandleSDNode> PromOpHandles;
13200   for (auto &PromOp : PromOps)
13201     PromOpHandles.emplace_back(PromOp);
13202 
13203   // Replace all operations (these are all the same, but have a different
13204   // (i1) return type). DAG.getNode will validate that the types of
13205   // a binary operator match, so go through the list in reverse so that
13206   // we've likely promoted both operands first. Any intermediate truncations or
13207   // extensions disappear.
13208   while (!PromOpHandles.empty()) {
13209     SDValue PromOp = PromOpHandles.back().getValue();
13210     PromOpHandles.pop_back();
13211 
13212     if (PromOp.getOpcode() == ISD::TRUNCATE ||
13213         PromOp.getOpcode() == ISD::SIGN_EXTEND ||
13214         PromOp.getOpcode() == ISD::ZERO_EXTEND ||
13215         PromOp.getOpcode() == ISD::ANY_EXTEND) {
13216       if (!isa<ConstantSDNode>(PromOp.getOperand(0)) &&
13217           PromOp.getOperand(0).getValueType() != MVT::i1) {
13218         // The operand is not yet ready (see comment below).
13219         PromOpHandles.emplace_front(PromOp);
13220         continue;
13221       }
13222 
13223       SDValue RepValue = PromOp.getOperand(0);
13224       if (isa<ConstantSDNode>(RepValue))
13225         RepValue = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, RepValue);
13226 
13227       DAG.ReplaceAllUsesOfValueWith(PromOp, RepValue);
13228       continue;
13229     }
13230 
13231     unsigned C;
13232     switch (PromOp.getOpcode()) {
13233     default:             C = 0; break;
13234     case ISD::SELECT:    C = 1; break;
13235     case ISD::SELECT_CC: C = 2; break;
13236     }
13237 
13238     if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) &&
13239          PromOp.getOperand(C).getValueType() != MVT::i1) ||
13240         (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) &&
13241          PromOp.getOperand(C+1).getValueType() != MVT::i1)) {
13242       // The to-be-promoted operands of this node have not yet been
13243       // promoted (this should be rare because we're going through the
13244       // list backward, but if one of the operands has several users in
13245       // this cluster of to-be-promoted nodes, it is possible).
13246       PromOpHandles.emplace_front(PromOp);
13247       continue;
13248     }
13249 
13250     SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(),
13251                                 PromOp.getNode()->op_end());
13252 
13253     // If there are any constant inputs, make sure they're replaced now.
13254     for (unsigned i = 0; i < 2; ++i)
13255       if (isa<ConstantSDNode>(Ops[C+i]))
13256         Ops[C+i] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Ops[C+i]);
13257 
13258     DAG.ReplaceAllUsesOfValueWith(PromOp,
13259       DAG.getNode(PromOp.getOpcode(), dl, MVT::i1, Ops));
13260   }
13261 
13262   // Now we're left with the initial truncation itself.
13263   if (N->getOpcode() == ISD::TRUNCATE)
13264     return N->getOperand(0);
13265 
13266   // Otherwise, this is a comparison. The operands to be compared have just
13267   // changed type (to i1), but everything else is the same.
13268   return SDValue(N, 0);
13269 }
13270 
DAGCombineExtBoolTrunc(SDNode * N,DAGCombinerInfo & DCI) const13271 SDValue PPCTargetLowering::DAGCombineExtBoolTrunc(SDNode *N,
13272                                                   DAGCombinerInfo &DCI) const {
13273   SelectionDAG &DAG = DCI.DAG;
13274   SDLoc dl(N);
13275 
13276   // If we're tracking CR bits, we need to be careful that we don't have:
13277   //   zext(binary-ops(trunc(x), trunc(y)))
13278   // or
13279   //   zext(binary-ops(binary-ops(trunc(x), trunc(y)), ...)
13280   // such that we're unnecessarily moving things into CR bits that can more
13281   // efficiently stay in GPRs. Note that if we're not certain that the high
13282   // bits are set as required by the final extension, we still may need to do
13283   // some masking to get the proper behavior.
13284 
13285   // This same functionality is important on PPC64 when dealing with
13286   // 32-to-64-bit extensions; these occur often when 32-bit values are used as
13287   // the return values of functions. Because it is so similar, it is handled
13288   // here as well.
13289 
13290   if (N->getValueType(0) != MVT::i32 &&
13291       N->getValueType(0) != MVT::i64)
13292     return SDValue();
13293 
13294   if (!((N->getOperand(0).getValueType() == MVT::i1 && Subtarget.useCRBits()) ||
13295         (N->getOperand(0).getValueType() == MVT::i32 && Subtarget.isPPC64())))
13296     return SDValue();
13297 
13298   if (N->getOperand(0).getOpcode() != ISD::AND &&
13299       N->getOperand(0).getOpcode() != ISD::OR  &&
13300       N->getOperand(0).getOpcode() != ISD::XOR &&
13301       N->getOperand(0).getOpcode() != ISD::SELECT &&
13302       N->getOperand(0).getOpcode() != ISD::SELECT_CC)
13303     return SDValue();
13304 
13305   SmallVector<SDValue, 4> Inputs;
13306   SmallVector<SDValue, 8> BinOps(1, N->getOperand(0)), PromOps;
13307   SmallPtrSet<SDNode *, 16> Visited;
13308 
13309   // Visit all inputs, collect all binary operations (and, or, xor and
13310   // select) that are all fed by truncations.
13311   while (!BinOps.empty()) {
13312     SDValue BinOp = BinOps.pop_back_val();
13313 
13314     if (!Visited.insert(BinOp.getNode()).second)
13315       continue;
13316 
13317     PromOps.push_back(BinOp);
13318 
13319     for (unsigned i = 0, ie = BinOp.getNumOperands(); i != ie; ++i) {
13320       // The condition of the select is not promoted.
13321       if (BinOp.getOpcode() == ISD::SELECT && i == 0)
13322         continue;
13323       if (BinOp.getOpcode() == ISD::SELECT_CC && i != 2 && i != 3)
13324         continue;
13325 
13326       if (BinOp.getOperand(i).getOpcode() == ISD::TRUNCATE ||
13327           isa<ConstantSDNode>(BinOp.getOperand(i))) {
13328         Inputs.push_back(BinOp.getOperand(i));
13329       } else if (BinOp.getOperand(i).getOpcode() == ISD::AND ||
13330                  BinOp.getOperand(i).getOpcode() == ISD::OR  ||
13331                  BinOp.getOperand(i).getOpcode() == ISD::XOR ||
13332                  BinOp.getOperand(i).getOpcode() == ISD::SELECT ||
13333                  BinOp.getOperand(i).getOpcode() == ISD::SELECT_CC) {
13334         BinOps.push_back(BinOp.getOperand(i));
13335       } else {
13336         // We have an input that is not a truncation or another binary
13337         // operation; we'll abort this transformation.
13338         return SDValue();
13339       }
13340     }
13341   }
13342 
13343   // The operands of a select that must be truncated when the select is
13344   // promoted because the operand is actually part of the to-be-promoted set.
13345   DenseMap<SDNode *, EVT> SelectTruncOp[2];
13346 
13347   // Make sure that this is a self-contained cluster of operations (which
13348   // is not quite the same thing as saying that everything has only one
13349   // use).
13350   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
13351     if (isa<ConstantSDNode>(Inputs[i]))
13352       continue;
13353 
13354     for (SDNode::use_iterator UI = Inputs[i].getNode()->use_begin(),
13355                               UE = Inputs[i].getNode()->use_end();
13356          UI != UE; ++UI) {
13357       SDNode *User = *UI;
13358       if (User != N && !Visited.count(User))
13359         return SDValue();
13360 
13361       // If we're going to promote the non-output-value operand(s) or SELECT or
13362       // SELECT_CC, record them for truncation.
13363       if (User->getOpcode() == ISD::SELECT) {
13364         if (User->getOperand(0) == Inputs[i])
13365           SelectTruncOp[0].insert(std::make_pair(User,
13366                                     User->getOperand(0).getValueType()));
13367       } else if (User->getOpcode() == ISD::SELECT_CC) {
13368         if (User->getOperand(0) == Inputs[i])
13369           SelectTruncOp[0].insert(std::make_pair(User,
13370                                     User->getOperand(0).getValueType()));
13371         if (User->getOperand(1) == Inputs[i])
13372           SelectTruncOp[1].insert(std::make_pair(User,
13373                                     User->getOperand(1).getValueType()));
13374       }
13375     }
13376   }
13377 
13378   for (unsigned i = 0, ie = PromOps.size(); i != ie; ++i) {
13379     for (SDNode::use_iterator UI = PromOps[i].getNode()->use_begin(),
13380                               UE = PromOps[i].getNode()->use_end();
13381          UI != UE; ++UI) {
13382       SDNode *User = *UI;
13383       if (User != N && !Visited.count(User))
13384         return SDValue();
13385 
13386       // If we're going to promote the non-output-value operand(s) or SELECT or
13387       // SELECT_CC, record them for truncation.
13388       if (User->getOpcode() == ISD::SELECT) {
13389         if (User->getOperand(0) == PromOps[i])
13390           SelectTruncOp[0].insert(std::make_pair(User,
13391                                     User->getOperand(0).getValueType()));
13392       } else if (User->getOpcode() == ISD::SELECT_CC) {
13393         if (User->getOperand(0) == PromOps[i])
13394           SelectTruncOp[0].insert(std::make_pair(User,
13395                                     User->getOperand(0).getValueType()));
13396         if (User->getOperand(1) == PromOps[i])
13397           SelectTruncOp[1].insert(std::make_pair(User,
13398                                     User->getOperand(1).getValueType()));
13399       }
13400     }
13401   }
13402 
13403   unsigned PromBits = N->getOperand(0).getValueSizeInBits();
13404   bool ReallyNeedsExt = false;
13405   if (N->getOpcode() != ISD::ANY_EXTEND) {
13406     // If all of the inputs are not already sign/zero extended, then
13407     // we'll still need to do that at the end.
13408     for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
13409       if (isa<ConstantSDNode>(Inputs[i]))
13410         continue;
13411 
13412       unsigned OpBits =
13413         Inputs[i].getOperand(0).getValueSizeInBits();
13414       assert(PromBits < OpBits && "Truncation not to a smaller bit count?");
13415 
13416       if ((N->getOpcode() == ISD::ZERO_EXTEND &&
13417            !DAG.MaskedValueIsZero(Inputs[i].getOperand(0),
13418                                   APInt::getHighBitsSet(OpBits,
13419                                                         OpBits-PromBits))) ||
13420           (N->getOpcode() == ISD::SIGN_EXTEND &&
13421            DAG.ComputeNumSignBits(Inputs[i].getOperand(0)) <
13422              (OpBits-(PromBits-1)))) {
13423         ReallyNeedsExt = true;
13424         break;
13425       }
13426     }
13427   }
13428 
13429   // Replace all inputs, either with the truncation operand, or a
13430   // truncation or extension to the final output type.
13431   for (unsigned i = 0, ie = Inputs.size(); i != ie; ++i) {
13432     // Constant inputs need to be replaced with the to-be-promoted nodes that
13433     // use them because they might have users outside of the cluster of
13434     // promoted nodes.
13435     if (isa<ConstantSDNode>(Inputs[i]))
13436       continue;
13437 
13438     SDValue InSrc = Inputs[i].getOperand(0);
13439     if (Inputs[i].getValueType() == N->getValueType(0))
13440       DAG.ReplaceAllUsesOfValueWith(Inputs[i], InSrc);
13441     else if (N->getOpcode() == ISD::SIGN_EXTEND)
13442       DAG.ReplaceAllUsesOfValueWith(Inputs[i],
13443         DAG.getSExtOrTrunc(InSrc, dl, N->getValueType(0)));
13444     else if (N->getOpcode() == ISD::ZERO_EXTEND)
13445       DAG.ReplaceAllUsesOfValueWith(Inputs[i],
13446         DAG.getZExtOrTrunc(InSrc, dl, N->getValueType(0)));
13447     else
13448       DAG.ReplaceAllUsesOfValueWith(Inputs[i],
13449         DAG.getAnyExtOrTrunc(InSrc, dl, N->getValueType(0)));
13450   }
13451 
13452   std::list<HandleSDNode> PromOpHandles;
13453   for (auto &PromOp : PromOps)
13454     PromOpHandles.emplace_back(PromOp);
13455 
13456   // Replace all operations (these are all the same, but have a different
13457   // (promoted) return type). DAG.getNode will validate that the types of
13458   // a binary operator match, so go through the list in reverse so that
13459   // we've likely promoted both operands first.
13460   while (!PromOpHandles.empty()) {
13461     SDValue PromOp = PromOpHandles.back().getValue();
13462     PromOpHandles.pop_back();
13463 
13464     unsigned C;
13465     switch (PromOp.getOpcode()) {
13466     default:             C = 0; break;
13467     case ISD::SELECT:    C = 1; break;
13468     case ISD::SELECT_CC: C = 2; break;
13469     }
13470 
13471     if ((!isa<ConstantSDNode>(PromOp.getOperand(C)) &&
13472          PromOp.getOperand(C).getValueType() != N->getValueType(0)) ||
13473         (!isa<ConstantSDNode>(PromOp.getOperand(C+1)) &&
13474          PromOp.getOperand(C+1).getValueType() != N->getValueType(0))) {
13475       // The to-be-promoted operands of this node have not yet been
13476       // promoted (this should be rare because we're going through the
13477       // list backward, but if one of the operands has several users in
13478       // this cluster of to-be-promoted nodes, it is possible).
13479       PromOpHandles.emplace_front(PromOp);
13480       continue;
13481     }
13482 
13483     // For SELECT and SELECT_CC nodes, we do a similar check for any
13484     // to-be-promoted comparison inputs.
13485     if (PromOp.getOpcode() == ISD::SELECT ||
13486         PromOp.getOpcode() == ISD::SELECT_CC) {
13487       if ((SelectTruncOp[0].count(PromOp.getNode()) &&
13488            PromOp.getOperand(0).getValueType() != N->getValueType(0)) ||
13489           (SelectTruncOp[1].count(PromOp.getNode()) &&
13490            PromOp.getOperand(1).getValueType() != N->getValueType(0))) {
13491         PromOpHandles.emplace_front(PromOp);
13492         continue;
13493       }
13494     }
13495 
13496     SmallVector<SDValue, 3> Ops(PromOp.getNode()->op_begin(),
13497                                 PromOp.getNode()->op_end());
13498 
13499     // If this node has constant inputs, then they'll need to be promoted here.
13500     for (unsigned i = 0; i < 2; ++i) {
13501       if (!isa<ConstantSDNode>(Ops[C+i]))
13502         continue;
13503       if (Ops[C+i].getValueType() == N->getValueType(0))
13504         continue;
13505 
13506       if (N->getOpcode() == ISD::SIGN_EXTEND)
13507         Ops[C+i] = DAG.getSExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
13508       else if (N->getOpcode() == ISD::ZERO_EXTEND)
13509         Ops[C+i] = DAG.getZExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
13510       else
13511         Ops[C+i] = DAG.getAnyExtOrTrunc(Ops[C+i], dl, N->getValueType(0));
13512     }
13513 
13514     // If we've promoted the comparison inputs of a SELECT or SELECT_CC,
13515     // truncate them again to the original value type.
13516     if (PromOp.getOpcode() == ISD::SELECT ||
13517         PromOp.getOpcode() == ISD::SELECT_CC) {
13518       auto SI0 = SelectTruncOp[0].find(PromOp.getNode());
13519       if (SI0 != SelectTruncOp[0].end())
13520         Ops[0] = DAG.getNode(ISD::TRUNCATE, dl, SI0->second, Ops[0]);
13521       auto SI1 = SelectTruncOp[1].find(PromOp.getNode());
13522       if (SI1 != SelectTruncOp[1].end())
13523         Ops[1] = DAG.getNode(ISD::TRUNCATE, dl, SI1->second, Ops[1]);
13524     }
13525 
13526     DAG.ReplaceAllUsesOfValueWith(PromOp,
13527       DAG.getNode(PromOp.getOpcode(), dl, N->getValueType(0), Ops));
13528   }
13529 
13530   // Now we're left with the initial extension itself.
13531   if (!ReallyNeedsExt)
13532     return N->getOperand(0);
13533 
13534   // To zero extend, just mask off everything except for the first bit (in the
13535   // i1 case).
13536   if (N->getOpcode() == ISD::ZERO_EXTEND)
13537     return DAG.getNode(ISD::AND, dl, N->getValueType(0), N->getOperand(0),
13538                        DAG.getConstant(APInt::getLowBitsSet(
13539                                          N->getValueSizeInBits(0), PromBits),
13540                                        dl, N->getValueType(0)));
13541 
13542   assert(N->getOpcode() == ISD::SIGN_EXTEND &&
13543          "Invalid extension type");
13544   EVT ShiftAmountTy = getShiftAmountTy(N->getValueType(0), DAG.getDataLayout());
13545   SDValue ShiftCst =
13546       DAG.getConstant(N->getValueSizeInBits(0) - PromBits, dl, ShiftAmountTy);
13547   return DAG.getNode(
13548       ISD::SRA, dl, N->getValueType(0),
13549       DAG.getNode(ISD::SHL, dl, N->getValueType(0), N->getOperand(0), ShiftCst),
13550       ShiftCst);
13551 }
13552 
combineSetCC(SDNode * N,DAGCombinerInfo & DCI) const13553 SDValue PPCTargetLowering::combineSetCC(SDNode *N,
13554                                         DAGCombinerInfo &DCI) const {
13555   assert(N->getOpcode() == ISD::SETCC &&
13556          "Should be called with a SETCC node");
13557 
13558   ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
13559   if (CC == ISD::SETNE || CC == ISD::SETEQ) {
13560     SDValue LHS = N->getOperand(0);
13561     SDValue RHS = N->getOperand(1);
13562 
13563     // If there is a '0 - y' pattern, canonicalize the pattern to the RHS.
13564     if (LHS.getOpcode() == ISD::SUB && isNullConstant(LHS.getOperand(0)) &&
13565         LHS.hasOneUse())
13566       std::swap(LHS, RHS);
13567 
13568     // x == 0-y --> x+y == 0
13569     // x != 0-y --> x+y != 0
13570     if (RHS.getOpcode() == ISD::SUB && isNullConstant(RHS.getOperand(0)) &&
13571         RHS.hasOneUse()) {
13572       SDLoc DL(N);
13573       SelectionDAG &DAG = DCI.DAG;
13574       EVT VT = N->getValueType(0);
13575       EVT OpVT = LHS.getValueType();
13576       SDValue Add = DAG.getNode(ISD::ADD, DL, OpVT, LHS, RHS.getOperand(1));
13577       return DAG.getSetCC(DL, VT, Add, DAG.getConstant(0, DL, OpVT), CC);
13578     }
13579   }
13580 
13581   return DAGCombineTruncBoolExt(N, DCI);
13582 }
13583 
13584 // Is this an extending load from an f32 to an f64?
isFPExtLoad(SDValue Op)13585 static bool isFPExtLoad(SDValue Op) {
13586   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op.getNode()))
13587     return LD->getExtensionType() == ISD::EXTLOAD &&
13588       Op.getValueType() == MVT::f64;
13589   return false;
13590 }
13591 
13592 /// Reduces the number of fp-to-int conversion when building a vector.
13593 ///
13594 /// If this vector is built out of floating to integer conversions,
13595 /// transform it to a vector built out of floating point values followed by a
13596 /// single floating to integer conversion of the vector.
13597 /// Namely  (build_vector (fptosi $A), (fptosi $B), ...)
13598 /// becomes (fptosi (build_vector ($A, $B, ...)))
13599 SDValue PPCTargetLowering::
combineElementTruncationToVectorTruncation(SDNode * N,DAGCombinerInfo & DCI) const13600 combineElementTruncationToVectorTruncation(SDNode *N,
13601                                            DAGCombinerInfo &DCI) const {
13602   assert(N->getOpcode() == ISD::BUILD_VECTOR &&
13603          "Should be called with a BUILD_VECTOR node");
13604 
13605   SelectionDAG &DAG = DCI.DAG;
13606   SDLoc dl(N);
13607 
13608   SDValue FirstInput = N->getOperand(0);
13609   assert(FirstInput.getOpcode() == PPCISD::MFVSR &&
13610          "The input operand must be an fp-to-int conversion.");
13611 
13612   // This combine happens after legalization so the fp_to_[su]i nodes are
13613   // already converted to PPCSISD nodes.
13614   unsigned FirstConversion = FirstInput.getOperand(0).getOpcode();
13615   if (FirstConversion == PPCISD::FCTIDZ ||
13616       FirstConversion == PPCISD::FCTIDUZ ||
13617       FirstConversion == PPCISD::FCTIWZ ||
13618       FirstConversion == PPCISD::FCTIWUZ) {
13619     bool IsSplat = true;
13620     bool Is32Bit = FirstConversion == PPCISD::FCTIWZ ||
13621       FirstConversion == PPCISD::FCTIWUZ;
13622     EVT SrcVT = FirstInput.getOperand(0).getValueType();
13623     SmallVector<SDValue, 4> Ops;
13624     EVT TargetVT = N->getValueType(0);
13625     for (int i = 0, e = N->getNumOperands(); i < e; ++i) {
13626       SDValue NextOp = N->getOperand(i);
13627       if (NextOp.getOpcode() != PPCISD::MFVSR)
13628         return SDValue();
13629       unsigned NextConversion = NextOp.getOperand(0).getOpcode();
13630       if (NextConversion != FirstConversion)
13631         return SDValue();
13632       // If we are converting to 32-bit integers, we need to add an FP_ROUND.
13633       // This is not valid if the input was originally double precision. It is
13634       // also not profitable to do unless this is an extending load in which
13635       // case doing this combine will allow us to combine consecutive loads.
13636       if (Is32Bit && !isFPExtLoad(NextOp.getOperand(0).getOperand(0)))
13637         return SDValue();
13638       if (N->getOperand(i) != FirstInput)
13639         IsSplat = false;
13640     }
13641 
13642     // If this is a splat, we leave it as-is since there will be only a single
13643     // fp-to-int conversion followed by a splat of the integer. This is better
13644     // for 32-bit and smaller ints and neutral for 64-bit ints.
13645     if (IsSplat)
13646       return SDValue();
13647 
13648     // Now that we know we have the right type of node, get its operands
13649     for (int i = 0, e = N->getNumOperands(); i < e; ++i) {
13650       SDValue In = N->getOperand(i).getOperand(0);
13651       if (Is32Bit) {
13652         // For 32-bit values, we need to add an FP_ROUND node (if we made it
13653         // here, we know that all inputs are extending loads so this is safe).
13654         if (In.isUndef())
13655           Ops.push_back(DAG.getUNDEF(SrcVT));
13656         else {
13657           SDValue Trunc = DAG.getNode(ISD::FP_ROUND, dl,
13658                                       MVT::f32, In.getOperand(0),
13659                                       DAG.getIntPtrConstant(1, dl));
13660           Ops.push_back(Trunc);
13661         }
13662       } else
13663         Ops.push_back(In.isUndef() ? DAG.getUNDEF(SrcVT) : In.getOperand(0));
13664     }
13665 
13666     unsigned Opcode;
13667     if (FirstConversion == PPCISD::FCTIDZ ||
13668         FirstConversion == PPCISD::FCTIWZ)
13669       Opcode = ISD::FP_TO_SINT;
13670     else
13671       Opcode = ISD::FP_TO_UINT;
13672 
13673     EVT NewVT = TargetVT == MVT::v2i64 ? MVT::v2f64 : MVT::v4f32;
13674     SDValue BV = DAG.getBuildVector(NewVT, dl, Ops);
13675     return DAG.getNode(Opcode, dl, TargetVT, BV);
13676   }
13677   return SDValue();
13678 }
13679 
13680 /// Reduce the number of loads when building a vector.
13681 ///
13682 /// Building a vector out of multiple loads can be converted to a load
13683 /// of the vector type if the loads are consecutive. If the loads are
13684 /// consecutive but in descending order, a shuffle is added at the end
13685 /// to reorder the vector.
combineBVOfConsecutiveLoads(SDNode * N,SelectionDAG & DAG)13686 static SDValue combineBVOfConsecutiveLoads(SDNode *N, SelectionDAG &DAG) {
13687   assert(N->getOpcode() == ISD::BUILD_VECTOR &&
13688          "Should be called with a BUILD_VECTOR node");
13689 
13690   SDLoc dl(N);
13691 
13692   // Return early for non byte-sized type, as they can't be consecutive.
13693   if (!N->getValueType(0).getVectorElementType().isByteSized())
13694     return SDValue();
13695 
13696   bool InputsAreConsecutiveLoads = true;
13697   bool InputsAreReverseConsecutive = true;
13698   unsigned ElemSize = N->getValueType(0).getScalarType().getStoreSize();
13699   SDValue FirstInput = N->getOperand(0);
13700   bool IsRoundOfExtLoad = false;
13701 
13702   if (FirstInput.getOpcode() == ISD::FP_ROUND &&
13703       FirstInput.getOperand(0).getOpcode() == ISD::LOAD) {
13704     LoadSDNode *LD = dyn_cast<LoadSDNode>(FirstInput.getOperand(0));
13705     IsRoundOfExtLoad = LD->getExtensionType() == ISD::EXTLOAD;
13706   }
13707   // Not a build vector of (possibly fp_rounded) loads.
13708   if ((!IsRoundOfExtLoad && FirstInput.getOpcode() != ISD::LOAD) ||
13709       N->getNumOperands() == 1)
13710     return SDValue();
13711 
13712   for (int i = 1, e = N->getNumOperands(); i < e; ++i) {
13713     // If any inputs are fp_round(extload), they all must be.
13714     if (IsRoundOfExtLoad && N->getOperand(i).getOpcode() != ISD::FP_ROUND)
13715       return SDValue();
13716 
13717     SDValue NextInput = IsRoundOfExtLoad ? N->getOperand(i).getOperand(0) :
13718       N->getOperand(i);
13719     if (NextInput.getOpcode() != ISD::LOAD)
13720       return SDValue();
13721 
13722     SDValue PreviousInput =
13723       IsRoundOfExtLoad ? N->getOperand(i-1).getOperand(0) : N->getOperand(i-1);
13724     LoadSDNode *LD1 = dyn_cast<LoadSDNode>(PreviousInput);
13725     LoadSDNode *LD2 = dyn_cast<LoadSDNode>(NextInput);
13726 
13727     // If any inputs are fp_round(extload), they all must be.
13728     if (IsRoundOfExtLoad && LD2->getExtensionType() != ISD::EXTLOAD)
13729       return SDValue();
13730 
13731     if (!isConsecutiveLS(LD2, LD1, ElemSize, 1, DAG))
13732       InputsAreConsecutiveLoads = false;
13733     if (!isConsecutiveLS(LD1, LD2, ElemSize, 1, DAG))
13734       InputsAreReverseConsecutive = false;
13735 
13736     // Exit early if the loads are neither consecutive nor reverse consecutive.
13737     if (!InputsAreConsecutiveLoads && !InputsAreReverseConsecutive)
13738       return SDValue();
13739   }
13740 
13741   assert(!(InputsAreConsecutiveLoads && InputsAreReverseConsecutive) &&
13742          "The loads cannot be both consecutive and reverse consecutive.");
13743 
13744   SDValue FirstLoadOp =
13745     IsRoundOfExtLoad ? FirstInput.getOperand(0) : FirstInput;
13746   SDValue LastLoadOp =
13747     IsRoundOfExtLoad ? N->getOperand(N->getNumOperands()-1).getOperand(0) :
13748                        N->getOperand(N->getNumOperands()-1);
13749 
13750   LoadSDNode *LD1 = dyn_cast<LoadSDNode>(FirstLoadOp);
13751   LoadSDNode *LDL = dyn_cast<LoadSDNode>(LastLoadOp);
13752   if (InputsAreConsecutiveLoads) {
13753     assert(LD1 && "Input needs to be a LoadSDNode.");
13754     return DAG.getLoad(N->getValueType(0), dl, LD1->getChain(),
13755                        LD1->getBasePtr(), LD1->getPointerInfo(),
13756                        LD1->getAlignment());
13757   }
13758   if (InputsAreReverseConsecutive) {
13759     assert(LDL && "Input needs to be a LoadSDNode.");
13760     SDValue Load = DAG.getLoad(N->getValueType(0), dl, LDL->getChain(),
13761                                LDL->getBasePtr(), LDL->getPointerInfo(),
13762                                LDL->getAlignment());
13763     SmallVector<int, 16> Ops;
13764     for (int i = N->getNumOperands() - 1; i >= 0; i--)
13765       Ops.push_back(i);
13766 
13767     return DAG.getVectorShuffle(N->getValueType(0), dl, Load,
13768                                 DAG.getUNDEF(N->getValueType(0)), Ops);
13769   }
13770   return SDValue();
13771 }
13772 
13773 // This function adds the required vector_shuffle needed to get
13774 // the elements of the vector extract in the correct position
13775 // as specified by the CorrectElems encoding.
addShuffleForVecExtend(SDNode * N,SelectionDAG & DAG,SDValue Input,uint64_t Elems,uint64_t CorrectElems)13776 static SDValue addShuffleForVecExtend(SDNode *N, SelectionDAG &DAG,
13777                                       SDValue Input, uint64_t Elems,
13778                                       uint64_t CorrectElems) {
13779   SDLoc dl(N);
13780 
13781   unsigned NumElems = Input.getValueType().getVectorNumElements();
13782   SmallVector<int, 16> ShuffleMask(NumElems, -1);
13783 
13784   // Knowing the element indices being extracted from the original
13785   // vector and the order in which they're being inserted, just put
13786   // them at element indices required for the instruction.
13787   for (unsigned i = 0; i < N->getNumOperands(); i++) {
13788     if (DAG.getDataLayout().isLittleEndian())
13789       ShuffleMask[CorrectElems & 0xF] = Elems & 0xF;
13790     else
13791       ShuffleMask[(CorrectElems & 0xF0) >> 4] = (Elems & 0xF0) >> 4;
13792     CorrectElems = CorrectElems >> 8;
13793     Elems = Elems >> 8;
13794   }
13795 
13796   SDValue Shuffle =
13797       DAG.getVectorShuffle(Input.getValueType(), dl, Input,
13798                            DAG.getUNDEF(Input.getValueType()), ShuffleMask);
13799 
13800   EVT VT = N->getValueType(0);
13801   SDValue Conv = DAG.getBitcast(VT, Shuffle);
13802 
13803   EVT ExtVT = EVT::getVectorVT(*DAG.getContext(),
13804                                Input.getValueType().getVectorElementType(),
13805                                VT.getVectorNumElements());
13806   return DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, VT, Conv,
13807                      DAG.getValueType(ExtVT));
13808 }
13809 
13810 // Look for build vector patterns where input operands come from sign
13811 // extended vector_extract elements of specific indices. If the correct indices
13812 // aren't used, add a vector shuffle to fix up the indices and create
13813 // SIGN_EXTEND_INREG node which selects the vector sign extend instructions
13814 // during instruction selection.
combineBVOfVecSExt(SDNode * N,SelectionDAG & DAG)13815 static SDValue combineBVOfVecSExt(SDNode *N, SelectionDAG &DAG) {
13816   // This array encodes the indices that the vector sign extend instructions
13817   // extract from when extending from one type to another for both BE and LE.
13818   // The right nibble of each byte corresponds to the LE incides.
13819   // and the left nibble of each byte corresponds to the BE incides.
13820   // For example: 0x3074B8FC  byte->word
13821   // For LE: the allowed indices are: 0x0,0x4,0x8,0xC
13822   // For BE: the allowed indices are: 0x3,0x7,0xB,0xF
13823   // For example: 0x000070F8  byte->double word
13824   // For LE: the allowed indices are: 0x0,0x8
13825   // For BE: the allowed indices are: 0x7,0xF
13826   uint64_t TargetElems[] = {
13827       0x3074B8FC, // b->w
13828       0x000070F8, // b->d
13829       0x10325476, // h->w
13830       0x00003074, // h->d
13831       0x00001032, // w->d
13832   };
13833 
13834   uint64_t Elems = 0;
13835   int Index;
13836   SDValue Input;
13837 
13838   auto isSExtOfVecExtract = [&](SDValue Op) -> bool {
13839     if (!Op)
13840       return false;
13841     if (Op.getOpcode() != ISD::SIGN_EXTEND &&
13842         Op.getOpcode() != ISD::SIGN_EXTEND_INREG)
13843       return false;
13844 
13845     // A SIGN_EXTEND_INREG might be fed by an ANY_EXTEND to produce a value
13846     // of the right width.
13847     SDValue Extract = Op.getOperand(0);
13848     if (Extract.getOpcode() == ISD::ANY_EXTEND)
13849       Extract = Extract.getOperand(0);
13850     if (Extract.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
13851       return false;
13852 
13853     ConstantSDNode *ExtOp = dyn_cast<ConstantSDNode>(Extract.getOperand(1));
13854     if (!ExtOp)
13855       return false;
13856 
13857     Index = ExtOp->getZExtValue();
13858     if (Input && Input != Extract.getOperand(0))
13859       return false;
13860 
13861     if (!Input)
13862       Input = Extract.getOperand(0);
13863 
13864     Elems = Elems << 8;
13865     Index = DAG.getDataLayout().isLittleEndian() ? Index : Index << 4;
13866     Elems |= Index;
13867 
13868     return true;
13869   };
13870 
13871   // If the build vector operands aren't sign extended vector extracts,
13872   // of the same input vector, then return.
13873   for (unsigned i = 0; i < N->getNumOperands(); i++) {
13874     if (!isSExtOfVecExtract(N->getOperand(i))) {
13875       return SDValue();
13876     }
13877   }
13878 
13879   // If the vector extract indicies are not correct, add the appropriate
13880   // vector_shuffle.
13881   int TgtElemArrayIdx;
13882   int InputSize = Input.getValueType().getScalarSizeInBits();
13883   int OutputSize = N->getValueType(0).getScalarSizeInBits();
13884   if (InputSize + OutputSize == 40)
13885     TgtElemArrayIdx = 0;
13886   else if (InputSize + OutputSize == 72)
13887     TgtElemArrayIdx = 1;
13888   else if (InputSize + OutputSize == 48)
13889     TgtElemArrayIdx = 2;
13890   else if (InputSize + OutputSize == 80)
13891     TgtElemArrayIdx = 3;
13892   else if (InputSize + OutputSize == 96)
13893     TgtElemArrayIdx = 4;
13894   else
13895     return SDValue();
13896 
13897   uint64_t CorrectElems = TargetElems[TgtElemArrayIdx];
13898   CorrectElems = DAG.getDataLayout().isLittleEndian()
13899                      ? CorrectElems & 0x0F0F0F0F0F0F0F0F
13900                      : CorrectElems & 0xF0F0F0F0F0F0F0F0;
13901   if (Elems != CorrectElems) {
13902     return addShuffleForVecExtend(N, DAG, Input, Elems, CorrectElems);
13903   }
13904 
13905   // Regular lowering will catch cases where a shuffle is not needed.
13906   return SDValue();
13907 }
13908 
13909 // Look for the pattern of a load from a narrow width to i128, feeding
13910 // into a BUILD_VECTOR of v1i128. Replace this sequence with a PPCISD node
13911 // (LXVRZX). This node represents a zero extending load that will be matched
13912 // to the Load VSX Vector Rightmost instructions.
combineBVZEXTLOAD(SDNode * N,SelectionDAG & DAG)13913 static SDValue combineBVZEXTLOAD(SDNode *N, SelectionDAG &DAG) {
13914   SDLoc DL(N);
13915 
13916   // This combine is only eligible for a BUILD_VECTOR of v1i128.
13917   if (N->getValueType(0) != MVT::v1i128)
13918     return SDValue();
13919 
13920   SDValue Operand = N->getOperand(0);
13921   // Proceed with the transformation if the operand to the BUILD_VECTOR
13922   // is a load instruction.
13923   if (Operand.getOpcode() != ISD::LOAD)
13924     return SDValue();
13925 
13926   LoadSDNode *LD = dyn_cast<LoadSDNode>(Operand);
13927   EVT MemoryType = LD->getMemoryVT();
13928 
13929   // This transformation is only valid if the we are loading either a byte,
13930   // halfword, word, or doubleword.
13931   bool ValidLDType = MemoryType == MVT::i8 || MemoryType == MVT::i16 ||
13932                      MemoryType == MVT::i32 || MemoryType == MVT::i64;
13933 
13934   // Ensure that the load from the narrow width is being zero extended to i128.
13935   if (!ValidLDType ||
13936       (LD->getExtensionType() != ISD::ZEXTLOAD &&
13937        LD->getExtensionType() != ISD::EXTLOAD))
13938     return SDValue();
13939 
13940   SDValue LoadOps[] = {
13941       LD->getChain(), LD->getBasePtr(),
13942       DAG.getIntPtrConstant(MemoryType.getScalarSizeInBits(), DL)};
13943 
13944   return DAG.getMemIntrinsicNode(PPCISD::LXVRZX, DL,
13945                                  DAG.getVTList(MVT::v1i128, MVT::Other),
13946                                  LoadOps, MemoryType, LD->getMemOperand());
13947 }
13948 
DAGCombineBuildVector(SDNode * N,DAGCombinerInfo & DCI) const13949 SDValue PPCTargetLowering::DAGCombineBuildVector(SDNode *N,
13950                                                  DAGCombinerInfo &DCI) const {
13951   assert(N->getOpcode() == ISD::BUILD_VECTOR &&
13952          "Should be called with a BUILD_VECTOR node");
13953 
13954   SelectionDAG &DAG = DCI.DAG;
13955   SDLoc dl(N);
13956 
13957   if (!Subtarget.hasVSX())
13958     return SDValue();
13959 
13960   // The target independent DAG combiner will leave a build_vector of
13961   // float-to-int conversions intact. We can generate MUCH better code for
13962   // a float-to-int conversion of a vector of floats.
13963   SDValue FirstInput = N->getOperand(0);
13964   if (FirstInput.getOpcode() == PPCISD::MFVSR) {
13965     SDValue Reduced = combineElementTruncationToVectorTruncation(N, DCI);
13966     if (Reduced)
13967       return Reduced;
13968   }
13969 
13970   // If we're building a vector out of consecutive loads, just load that
13971   // vector type.
13972   SDValue Reduced = combineBVOfConsecutiveLoads(N, DAG);
13973   if (Reduced)
13974     return Reduced;
13975 
13976   // If we're building a vector out of extended elements from another vector
13977   // we have P9 vector integer extend instructions. The code assumes legal
13978   // input types (i.e. it can't handle things like v4i16) so do not run before
13979   // legalization.
13980   if (Subtarget.hasP9Altivec() && !DCI.isBeforeLegalize()) {
13981     Reduced = combineBVOfVecSExt(N, DAG);
13982     if (Reduced)
13983       return Reduced;
13984   }
13985 
13986   // On Power10, the Load VSX Vector Rightmost instructions can be utilized
13987   // if this is a BUILD_VECTOR of v1i128, and if the operand to the BUILD_VECTOR
13988   // is a load from <valid narrow width> to i128.
13989   if (Subtarget.isISA3_1()) {
13990     SDValue BVOfZLoad = combineBVZEXTLOAD(N, DAG);
13991     if (BVOfZLoad)
13992       return BVOfZLoad;
13993   }
13994 
13995   if (N->getValueType(0) != MVT::v2f64)
13996     return SDValue();
13997 
13998   // Looking for:
13999   // (build_vector ([su]int_to_fp (extractelt 0)), [su]int_to_fp (extractelt 1))
14000   if (FirstInput.getOpcode() != ISD::SINT_TO_FP &&
14001       FirstInput.getOpcode() != ISD::UINT_TO_FP)
14002     return SDValue();
14003   if (N->getOperand(1).getOpcode() != ISD::SINT_TO_FP &&
14004       N->getOperand(1).getOpcode() != ISD::UINT_TO_FP)
14005     return SDValue();
14006   if (FirstInput.getOpcode() != N->getOperand(1).getOpcode())
14007     return SDValue();
14008 
14009   SDValue Ext1 = FirstInput.getOperand(0);
14010   SDValue Ext2 = N->getOperand(1).getOperand(0);
14011   if(Ext1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
14012      Ext2.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
14013     return SDValue();
14014 
14015   ConstantSDNode *Ext1Op = dyn_cast<ConstantSDNode>(Ext1.getOperand(1));
14016   ConstantSDNode *Ext2Op = dyn_cast<ConstantSDNode>(Ext2.getOperand(1));
14017   if (!Ext1Op || !Ext2Op)
14018     return SDValue();
14019   if (Ext1.getOperand(0).getValueType() != MVT::v4i32 ||
14020       Ext1.getOperand(0) != Ext2.getOperand(0))
14021     return SDValue();
14022 
14023   int FirstElem = Ext1Op->getZExtValue();
14024   int SecondElem = Ext2Op->getZExtValue();
14025   int SubvecIdx;
14026   if (FirstElem == 0 && SecondElem == 1)
14027     SubvecIdx = Subtarget.isLittleEndian() ? 1 : 0;
14028   else if (FirstElem == 2 && SecondElem == 3)
14029     SubvecIdx = Subtarget.isLittleEndian() ? 0 : 1;
14030   else
14031     return SDValue();
14032 
14033   SDValue SrcVec = Ext1.getOperand(0);
14034   auto NodeType = (N->getOperand(1).getOpcode() == ISD::SINT_TO_FP) ?
14035     PPCISD::SINT_VEC_TO_FP : PPCISD::UINT_VEC_TO_FP;
14036   return DAG.getNode(NodeType, dl, MVT::v2f64,
14037                      SrcVec, DAG.getIntPtrConstant(SubvecIdx, dl));
14038 }
14039 
combineFPToIntToFP(SDNode * N,DAGCombinerInfo & DCI) const14040 SDValue PPCTargetLowering::combineFPToIntToFP(SDNode *N,
14041                                               DAGCombinerInfo &DCI) const {
14042   assert((N->getOpcode() == ISD::SINT_TO_FP ||
14043           N->getOpcode() == ISD::UINT_TO_FP) &&
14044          "Need an int -> FP conversion node here");
14045 
14046   if (useSoftFloat() || !Subtarget.has64BitSupport())
14047     return SDValue();
14048 
14049   SelectionDAG &DAG = DCI.DAG;
14050   SDLoc dl(N);
14051   SDValue Op(N, 0);
14052 
14053   // Don't handle ppc_fp128 here or conversions that are out-of-range capable
14054   // from the hardware.
14055   if (Op.getValueType() != MVT::f32 && Op.getValueType() != MVT::f64)
14056     return SDValue();
14057   if (!Op.getOperand(0).getValueType().isSimple())
14058     return SDValue();
14059   if (Op.getOperand(0).getValueType().getSimpleVT() <= MVT(MVT::i1) ||
14060       Op.getOperand(0).getValueType().getSimpleVT() > MVT(MVT::i64))
14061     return SDValue();
14062 
14063   SDValue FirstOperand(Op.getOperand(0));
14064   bool SubWordLoad = FirstOperand.getOpcode() == ISD::LOAD &&
14065     (FirstOperand.getValueType() == MVT::i8 ||
14066      FirstOperand.getValueType() == MVT::i16);
14067   if (Subtarget.hasP9Vector() && Subtarget.hasP9Altivec() && SubWordLoad) {
14068     bool Signed = N->getOpcode() == ISD::SINT_TO_FP;
14069     bool DstDouble = Op.getValueType() == MVT::f64;
14070     unsigned ConvOp = Signed ?
14071       (DstDouble ? PPCISD::FCFID  : PPCISD::FCFIDS) :
14072       (DstDouble ? PPCISD::FCFIDU : PPCISD::FCFIDUS);
14073     SDValue WidthConst =
14074       DAG.getIntPtrConstant(FirstOperand.getValueType() == MVT::i8 ? 1 : 2,
14075                             dl, false);
14076     LoadSDNode *LDN = cast<LoadSDNode>(FirstOperand.getNode());
14077     SDValue Ops[] = { LDN->getChain(), LDN->getBasePtr(), WidthConst };
14078     SDValue Ld = DAG.getMemIntrinsicNode(PPCISD::LXSIZX, dl,
14079                                          DAG.getVTList(MVT::f64, MVT::Other),
14080                                          Ops, MVT::i8, LDN->getMemOperand());
14081 
14082     // For signed conversion, we need to sign-extend the value in the VSR
14083     if (Signed) {
14084       SDValue ExtOps[] = { Ld, WidthConst };
14085       SDValue Ext = DAG.getNode(PPCISD::VEXTS, dl, MVT::f64, ExtOps);
14086       return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ext);
14087     } else
14088       return DAG.getNode(ConvOp, dl, DstDouble ? MVT::f64 : MVT::f32, Ld);
14089   }
14090 
14091 
14092   // For i32 intermediate values, unfortunately, the conversion functions
14093   // leave the upper 32 bits of the value are undefined. Within the set of
14094   // scalar instructions, we have no method for zero- or sign-extending the
14095   // value. Thus, we cannot handle i32 intermediate values here.
14096   if (Op.getOperand(0).getValueType() == MVT::i32)
14097     return SDValue();
14098 
14099   assert((Op.getOpcode() == ISD::SINT_TO_FP || Subtarget.hasFPCVT()) &&
14100          "UINT_TO_FP is supported only with FPCVT");
14101 
14102   // If we have FCFIDS, then use it when converting to single-precision.
14103   // Otherwise, convert to double-precision and then round.
14104   unsigned FCFOp = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
14105                        ? (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDUS
14106                                                             : PPCISD::FCFIDS)
14107                        : (Op.getOpcode() == ISD::UINT_TO_FP ? PPCISD::FCFIDU
14108                                                             : PPCISD::FCFID);
14109   MVT FCFTy = (Subtarget.hasFPCVT() && Op.getValueType() == MVT::f32)
14110                   ? MVT::f32
14111                   : MVT::f64;
14112 
14113   // If we're converting from a float, to an int, and back to a float again,
14114   // then we don't need the store/load pair at all.
14115   if ((Op.getOperand(0).getOpcode() == ISD::FP_TO_UINT &&
14116        Subtarget.hasFPCVT()) ||
14117       (Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT)) {
14118     SDValue Src = Op.getOperand(0).getOperand(0);
14119     if (Src.getValueType() == MVT::f32) {
14120       Src = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Src);
14121       DCI.AddToWorklist(Src.getNode());
14122     } else if (Src.getValueType() != MVT::f64) {
14123       // Make sure that we don't pick up a ppc_fp128 source value.
14124       return SDValue();
14125     }
14126 
14127     unsigned FCTOp =
14128       Op.getOperand(0).getOpcode() == ISD::FP_TO_SINT ? PPCISD::FCTIDZ :
14129                                                         PPCISD::FCTIDUZ;
14130 
14131     SDValue Tmp = DAG.getNode(FCTOp, dl, MVT::f64, Src);
14132     SDValue FP = DAG.getNode(FCFOp, dl, FCFTy, Tmp);
14133 
14134     if (Op.getValueType() == MVT::f32 && !Subtarget.hasFPCVT()) {
14135       FP = DAG.getNode(ISD::FP_ROUND, dl,
14136                        MVT::f32, FP, DAG.getIntPtrConstant(0, dl));
14137       DCI.AddToWorklist(FP.getNode());
14138     }
14139 
14140     return FP;
14141   }
14142 
14143   return SDValue();
14144 }
14145 
14146 // expandVSXLoadForLE - Convert VSX loads (which may be intrinsics for
14147 // builtins) into loads with swaps.
expandVSXLoadForLE(SDNode * N,DAGCombinerInfo & DCI) const14148 SDValue PPCTargetLowering::expandVSXLoadForLE(SDNode *N,
14149                                               DAGCombinerInfo &DCI) const {
14150   SelectionDAG &DAG = DCI.DAG;
14151   SDLoc dl(N);
14152   SDValue Chain;
14153   SDValue Base;
14154   MachineMemOperand *MMO;
14155 
14156   switch (N->getOpcode()) {
14157   default:
14158     llvm_unreachable("Unexpected opcode for little endian VSX load");
14159   case ISD::LOAD: {
14160     LoadSDNode *LD = cast<LoadSDNode>(N);
14161     Chain = LD->getChain();
14162     Base = LD->getBasePtr();
14163     MMO = LD->getMemOperand();
14164     // If the MMO suggests this isn't a load of a full vector, leave
14165     // things alone.  For a built-in, we have to make the change for
14166     // correctness, so if there is a size problem that will be a bug.
14167     if (MMO->getSize() < 16)
14168       return SDValue();
14169     break;
14170   }
14171   case ISD::INTRINSIC_W_CHAIN: {
14172     MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N);
14173     Chain = Intrin->getChain();
14174     // Similarly to the store case below, Intrin->getBasePtr() doesn't get
14175     // us what we want. Get operand 2 instead.
14176     Base = Intrin->getOperand(2);
14177     MMO = Intrin->getMemOperand();
14178     break;
14179   }
14180   }
14181 
14182   MVT VecTy = N->getValueType(0).getSimpleVT();
14183 
14184   // Do not expand to PPCISD::LXVD2X + PPCISD::XXSWAPD when the load is
14185   // aligned and the type is a vector with elements up to 4 bytes
14186   if (Subtarget.needsSwapsForVSXMemOps() && MMO->getAlign() >= Align(16) &&
14187       VecTy.getScalarSizeInBits() <= 32) {
14188     return SDValue();
14189   }
14190 
14191   SDValue LoadOps[] = { Chain, Base };
14192   SDValue Load = DAG.getMemIntrinsicNode(PPCISD::LXVD2X, dl,
14193                                          DAG.getVTList(MVT::v2f64, MVT::Other),
14194                                          LoadOps, MVT::v2f64, MMO);
14195 
14196   DCI.AddToWorklist(Load.getNode());
14197   Chain = Load.getValue(1);
14198   SDValue Swap = DAG.getNode(
14199       PPCISD::XXSWAPD, dl, DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Load);
14200   DCI.AddToWorklist(Swap.getNode());
14201 
14202   // Add a bitcast if the resulting load type doesn't match v2f64.
14203   if (VecTy != MVT::v2f64) {
14204     SDValue N = DAG.getNode(ISD::BITCAST, dl, VecTy, Swap);
14205     DCI.AddToWorklist(N.getNode());
14206     // Package {bitcast value, swap's chain} to match Load's shape.
14207     return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(VecTy, MVT::Other),
14208                        N, Swap.getValue(1));
14209   }
14210 
14211   return Swap;
14212 }
14213 
14214 // expandVSXStoreForLE - Convert VSX stores (which may be intrinsics for
14215 // builtins) into stores with swaps.
expandVSXStoreForLE(SDNode * N,DAGCombinerInfo & DCI) const14216 SDValue PPCTargetLowering::expandVSXStoreForLE(SDNode *N,
14217                                                DAGCombinerInfo &DCI) const {
14218   SelectionDAG &DAG = DCI.DAG;
14219   SDLoc dl(N);
14220   SDValue Chain;
14221   SDValue Base;
14222   unsigned SrcOpnd;
14223   MachineMemOperand *MMO;
14224 
14225   switch (N->getOpcode()) {
14226   default:
14227     llvm_unreachable("Unexpected opcode for little endian VSX store");
14228   case ISD::STORE: {
14229     StoreSDNode *ST = cast<StoreSDNode>(N);
14230     Chain = ST->getChain();
14231     Base = ST->getBasePtr();
14232     MMO = ST->getMemOperand();
14233     SrcOpnd = 1;
14234     // If the MMO suggests this isn't a store of a full vector, leave
14235     // things alone.  For a built-in, we have to make the change for
14236     // correctness, so if there is a size problem that will be a bug.
14237     if (MMO->getSize() < 16)
14238       return SDValue();
14239     break;
14240   }
14241   case ISD::INTRINSIC_VOID: {
14242     MemIntrinsicSDNode *Intrin = cast<MemIntrinsicSDNode>(N);
14243     Chain = Intrin->getChain();
14244     // Intrin->getBasePtr() oddly does not get what we want.
14245     Base = Intrin->getOperand(3);
14246     MMO = Intrin->getMemOperand();
14247     SrcOpnd = 2;
14248     break;
14249   }
14250   }
14251 
14252   SDValue Src = N->getOperand(SrcOpnd);
14253   MVT VecTy = Src.getValueType().getSimpleVT();
14254 
14255   // Do not expand to PPCISD::XXSWAPD and PPCISD::STXVD2X when the load is
14256   // aligned and the type is a vector with elements up to 4 bytes
14257   if (Subtarget.needsSwapsForVSXMemOps() && MMO->getAlign() >= Align(16) &&
14258       VecTy.getScalarSizeInBits() <= 32) {
14259     return SDValue();
14260   }
14261 
14262   // All stores are done as v2f64 and possible bit cast.
14263   if (VecTy != MVT::v2f64) {
14264     Src = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Src);
14265     DCI.AddToWorklist(Src.getNode());
14266   }
14267 
14268   SDValue Swap = DAG.getNode(PPCISD::XXSWAPD, dl,
14269                              DAG.getVTList(MVT::v2f64, MVT::Other), Chain, Src);
14270   DCI.AddToWorklist(Swap.getNode());
14271   Chain = Swap.getValue(1);
14272   SDValue StoreOps[] = { Chain, Swap, Base };
14273   SDValue Store = DAG.getMemIntrinsicNode(PPCISD::STXVD2X, dl,
14274                                           DAG.getVTList(MVT::Other),
14275                                           StoreOps, VecTy, MMO);
14276   DCI.AddToWorklist(Store.getNode());
14277   return Store;
14278 }
14279 
14280 // Handle DAG combine for STORE (FP_TO_INT F).
combineStoreFPToInt(SDNode * N,DAGCombinerInfo & DCI) const14281 SDValue PPCTargetLowering::combineStoreFPToInt(SDNode *N,
14282                                                DAGCombinerInfo &DCI) const {
14283 
14284   SelectionDAG &DAG = DCI.DAG;
14285   SDLoc dl(N);
14286   unsigned Opcode = N->getOperand(1).getOpcode();
14287 
14288   assert((Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT)
14289          && "Not a FP_TO_INT Instruction!");
14290 
14291   SDValue Val = N->getOperand(1).getOperand(0);
14292   EVT Op1VT = N->getOperand(1).getValueType();
14293   EVT ResVT = Val.getValueType();
14294 
14295   if (!isTypeLegal(ResVT))
14296     return SDValue();
14297 
14298   // Only perform combine for conversion to i64/i32 or power9 i16/i8.
14299   bool ValidTypeForStoreFltAsInt =
14300         (Op1VT == MVT::i32 || Op1VT == MVT::i64 ||
14301          (Subtarget.hasP9Vector() && (Op1VT == MVT::i16 || Op1VT == MVT::i8)));
14302 
14303   if (ResVT == MVT::f128 && !Subtarget.hasP9Vector())
14304     return SDValue();
14305 
14306   if (ResVT == MVT::ppcf128 || !Subtarget.hasP8Vector() ||
14307       cast<StoreSDNode>(N)->isTruncatingStore() || !ValidTypeForStoreFltAsInt)
14308     return SDValue();
14309 
14310   // Extend f32 values to f64
14311   if (ResVT.getScalarSizeInBits() == 32) {
14312     Val = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f64, Val);
14313     DCI.AddToWorklist(Val.getNode());
14314   }
14315 
14316   // Set signed or unsigned conversion opcode.
14317   unsigned ConvOpcode = (Opcode == ISD::FP_TO_SINT) ?
14318                           PPCISD::FP_TO_SINT_IN_VSR :
14319                           PPCISD::FP_TO_UINT_IN_VSR;
14320 
14321   Val = DAG.getNode(ConvOpcode,
14322                     dl, ResVT == MVT::f128 ? MVT::f128 : MVT::f64, Val);
14323   DCI.AddToWorklist(Val.getNode());
14324 
14325   // Set number of bytes being converted.
14326   unsigned ByteSize = Op1VT.getScalarSizeInBits() / 8;
14327   SDValue Ops[] = { N->getOperand(0), Val, N->getOperand(2),
14328                     DAG.getIntPtrConstant(ByteSize, dl, false),
14329                     DAG.getValueType(Op1VT) };
14330 
14331   Val = DAG.getMemIntrinsicNode(PPCISD::ST_VSR_SCAL_INT, dl,
14332           DAG.getVTList(MVT::Other), Ops,
14333           cast<StoreSDNode>(N)->getMemoryVT(),
14334           cast<StoreSDNode>(N)->getMemOperand());
14335 
14336   DCI.AddToWorklist(Val.getNode());
14337   return Val;
14338 }
14339 
isAlternatingShuffMask(const ArrayRef<int> & Mask,int NumElts)14340 static bool isAlternatingShuffMask(const ArrayRef<int> &Mask, int NumElts) {
14341   // Check that the source of the element keeps flipping
14342   // (i.e. Mask[i] < NumElts -> Mask[i+i] >= NumElts).
14343   bool PrevElemFromFirstVec = Mask[0] < NumElts;
14344   for (int i = 1, e = Mask.size(); i < e; i++) {
14345     if (PrevElemFromFirstVec && Mask[i] < NumElts)
14346       return false;
14347     if (!PrevElemFromFirstVec && Mask[i] >= NumElts)
14348       return false;
14349     PrevElemFromFirstVec = !PrevElemFromFirstVec;
14350   }
14351   return true;
14352 }
14353 
isSplatBV(SDValue Op)14354 static bool isSplatBV(SDValue Op) {
14355   if (Op.getOpcode() != ISD::BUILD_VECTOR)
14356     return false;
14357   SDValue FirstOp;
14358 
14359   // Find first non-undef input.
14360   for (int i = 0, e = Op.getNumOperands(); i < e; i++) {
14361     FirstOp = Op.getOperand(i);
14362     if (!FirstOp.isUndef())
14363       break;
14364   }
14365 
14366   // All inputs are undef or the same as the first non-undef input.
14367   for (int i = 1, e = Op.getNumOperands(); i < e; i++)
14368     if (Op.getOperand(i) != FirstOp && !Op.getOperand(i).isUndef())
14369       return false;
14370   return true;
14371 }
14372 
isScalarToVec(SDValue Op)14373 static SDValue isScalarToVec(SDValue Op) {
14374   if (Op.getOpcode() == ISD::SCALAR_TO_VECTOR)
14375     return Op;
14376   if (Op.getOpcode() != ISD::BITCAST)
14377     return SDValue();
14378   Op = Op.getOperand(0);
14379   if (Op.getOpcode() == ISD::SCALAR_TO_VECTOR)
14380     return Op;
14381   return SDValue();
14382 }
14383 
14384 // Fix up the shuffle mask to account for the fact that the result of
14385 // scalar_to_vector is not in lane zero. This just takes all values in
14386 // the ranges specified by the min/max indices and adds the number of
14387 // elements required to ensure each element comes from the respective
14388 // position in the valid lane.
14389 // On little endian, that's just the corresponding element in the other
14390 // half of the vector. On big endian, it is in the same half but right
14391 // justified rather than left justified in that half.
fixupShuffleMaskForPermutedSToV(SmallVectorImpl<int> & ShuffV,int LHSMaxIdx,int RHSMinIdx,int RHSMaxIdx,int HalfVec,unsigned ValidLaneWidth,const PPCSubtarget & Subtarget)14392 static void fixupShuffleMaskForPermutedSToV(SmallVectorImpl<int> &ShuffV,
14393                                             int LHSMaxIdx, int RHSMinIdx,
14394                                             int RHSMaxIdx, int HalfVec,
14395                                             unsigned ValidLaneWidth,
14396                                             const PPCSubtarget &Subtarget) {
14397   for (int i = 0, e = ShuffV.size(); i < e; i++) {
14398     int Idx = ShuffV[i];
14399     if ((Idx >= 0 && Idx < LHSMaxIdx) || (Idx >= RHSMinIdx && Idx < RHSMaxIdx))
14400       ShuffV[i] +=
14401           Subtarget.isLittleEndian() ? HalfVec : HalfVec - ValidLaneWidth;
14402   }
14403 }
14404 
14405 // Replace a SCALAR_TO_VECTOR with a SCALAR_TO_VECTOR_PERMUTED except if
14406 // the original is:
14407 // (<n x Ty> (scalar_to_vector (Ty (extract_elt <n x Ty> %a, C))))
14408 // In such a case, just change the shuffle mask to extract the element
14409 // from the permuted index.
getSToVPermuted(SDValue OrigSToV,SelectionDAG & DAG,const PPCSubtarget & Subtarget)14410 static SDValue getSToVPermuted(SDValue OrigSToV, SelectionDAG &DAG,
14411                                const PPCSubtarget &Subtarget) {
14412   SDLoc dl(OrigSToV);
14413   EVT VT = OrigSToV.getValueType();
14414   assert(OrigSToV.getOpcode() == ISD::SCALAR_TO_VECTOR &&
14415          "Expecting a SCALAR_TO_VECTOR here");
14416   SDValue Input = OrigSToV.getOperand(0);
14417 
14418   if (Input.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
14419     ConstantSDNode *Idx = dyn_cast<ConstantSDNode>(Input.getOperand(1));
14420     SDValue OrigVector = Input.getOperand(0);
14421 
14422     // Can't handle non-const element indices or different vector types
14423     // for the input to the extract and the output of the scalar_to_vector.
14424     if (Idx && VT == OrigVector.getValueType()) {
14425       unsigned NumElts = VT.getVectorNumElements();
14426       assert(
14427           NumElts > 1 &&
14428           "Cannot produce a permuted scalar_to_vector for one element vector");
14429       SmallVector<int, 16> NewMask(NumElts, -1);
14430       unsigned ResultInElt = NumElts / 2;
14431       ResultInElt -= Subtarget.isLittleEndian() ? 0 : 1;
14432       NewMask[ResultInElt] = Idx->getZExtValue();
14433       return DAG.getVectorShuffle(VT, dl, OrigVector, OrigVector, NewMask);
14434     }
14435   }
14436   return DAG.getNode(PPCISD::SCALAR_TO_VECTOR_PERMUTED, dl, VT,
14437                      OrigSToV.getOperand(0));
14438 }
14439 
14440 // On little endian subtargets, combine shuffles such as:
14441 // vector_shuffle<16,1,17,3,18,5,19,7,20,9,21,11,22,13,23,15>, <zero>, %b
14442 // into:
14443 // vector_shuffle<16,0,17,1,18,2,19,3,20,4,21,5,22,6,23,7>, <zero>, %b
14444 // because the latter can be matched to a single instruction merge.
14445 // Furthermore, SCALAR_TO_VECTOR on little endian always involves a permute
14446 // to put the value into element zero. Adjust the shuffle mask so that the
14447 // vector can remain in permuted form (to prevent a swap prior to a shuffle).
14448 // On big endian targets, this is still useful for SCALAR_TO_VECTOR
14449 // nodes with elements smaller than doubleword because all the ways
14450 // of getting scalar data into a vector register put the value in the
14451 // rightmost element of the left half of the vector.
combineVectorShuffle(ShuffleVectorSDNode * SVN,SelectionDAG & DAG) const14452 SDValue PPCTargetLowering::combineVectorShuffle(ShuffleVectorSDNode *SVN,
14453                                                 SelectionDAG &DAG) const {
14454   SDValue LHS = SVN->getOperand(0);
14455   SDValue RHS = SVN->getOperand(1);
14456   auto Mask = SVN->getMask();
14457   int NumElts = LHS.getValueType().getVectorNumElements();
14458   SDValue Res(SVN, 0);
14459   SDLoc dl(SVN);
14460   bool IsLittleEndian = Subtarget.isLittleEndian();
14461 
14462   // On little endian targets, do these combines on all VSX targets since
14463   // canonical shuffles match efficient permutes. On big endian targets,
14464   // this is only useful for targets with direct moves.
14465   if (!Subtarget.hasDirectMove() && !(IsLittleEndian && Subtarget.hasVSX()))
14466     return Res;
14467 
14468   // If this is not a shuffle of a shuffle and the first element comes from
14469   // the second vector, canonicalize to the commuted form. This will make it
14470   // more likely to match one of the single instruction patterns.
14471   if (Mask[0] >= NumElts && LHS.getOpcode() != ISD::VECTOR_SHUFFLE &&
14472       RHS.getOpcode() != ISD::VECTOR_SHUFFLE) {
14473     std::swap(LHS, RHS);
14474     Res = DAG.getCommutedVectorShuffle(*SVN);
14475     Mask = cast<ShuffleVectorSDNode>(Res)->getMask();
14476   }
14477 
14478   // Adjust the shuffle mask if either input vector comes from a
14479   // SCALAR_TO_VECTOR and keep the respective input vector in permuted
14480   // form (to prevent the need for a swap).
14481   SmallVector<int, 16> ShuffV(Mask.begin(), Mask.end());
14482   SDValue SToVLHS = isScalarToVec(LHS);
14483   SDValue SToVRHS = isScalarToVec(RHS);
14484   if (SToVLHS || SToVRHS) {
14485     int NumEltsIn = SToVLHS ? SToVLHS.getValueType().getVectorNumElements()
14486                             : SToVRHS.getValueType().getVectorNumElements();
14487     int NumEltsOut = ShuffV.size();
14488     unsigned InElemSizeInBits =
14489         SToVLHS ? SToVLHS.getValueType().getScalarSizeInBits()
14490                 : SToVRHS.getValueType().getScalarSizeInBits();
14491     unsigned OutElemSizeInBits = SToVLHS
14492                                      ? LHS.getValueType().getScalarSizeInBits()
14493                                      : RHS.getValueType().getScalarSizeInBits();
14494 
14495     // The width of the "valid lane" (i.e. the lane that contains the value that
14496     // is vectorized) needs to be expressed in terms of the number of elements
14497     // of the shuffle. It is thereby the ratio of the values before and after
14498     // any bitcast.
14499     unsigned ValidLaneWidth = InElemSizeInBits / OutElemSizeInBits;
14500 
14501     // Initially assume that neither input is permuted. These will be adjusted
14502     // accordingly if either input is.
14503     int LHSMaxIdx = -1;
14504     int RHSMinIdx = -1;
14505     int RHSMaxIdx = -1;
14506     int HalfVec = LHS.getValueType().getVectorNumElements() / 2;
14507 
14508     // Get the permuted scalar to vector nodes for the source(s) that come from
14509     // ISD::SCALAR_TO_VECTOR.
14510     // On big endian systems, this only makes sense for element sizes smaller
14511     // than 64 bits since for 64-bit elements, all instructions already put
14512     // the value into element zero.
14513     if (SToVLHS) {
14514       if (!IsLittleEndian && InElemSizeInBits >= 64)
14515         return Res;
14516       // Set up the values for the shuffle vector fixup.
14517       LHSMaxIdx = NumEltsOut / NumEltsIn;
14518       SToVLHS = getSToVPermuted(SToVLHS, DAG, Subtarget);
14519       if (SToVLHS.getValueType() != LHS.getValueType())
14520         SToVLHS = DAG.getBitcast(LHS.getValueType(), SToVLHS);
14521       LHS = SToVLHS;
14522     }
14523     if (SToVRHS) {
14524       if (!IsLittleEndian && InElemSizeInBits >= 64)
14525         return Res;
14526       RHSMinIdx = NumEltsOut;
14527       RHSMaxIdx = NumEltsOut / NumEltsIn + RHSMinIdx;
14528       SToVRHS = getSToVPermuted(SToVRHS, DAG, Subtarget);
14529       if (SToVRHS.getValueType() != RHS.getValueType())
14530         SToVRHS = DAG.getBitcast(RHS.getValueType(), SToVRHS);
14531       RHS = SToVRHS;
14532     }
14533 
14534     // Fix up the shuffle mask to reflect where the desired element actually is.
14535     // The minimum and maximum indices that correspond to element zero for both
14536     // the LHS and RHS are computed and will control which shuffle mask entries
14537     // are to be changed. For example, if the RHS is permuted, any shuffle mask
14538     // entries in the range [RHSMinIdx,RHSMaxIdx) will be adjusted.
14539     fixupShuffleMaskForPermutedSToV(ShuffV, LHSMaxIdx, RHSMinIdx, RHSMaxIdx,
14540                                     HalfVec, ValidLaneWidth, Subtarget);
14541     Res = DAG.getVectorShuffle(SVN->getValueType(0), dl, LHS, RHS, ShuffV);
14542 
14543     // We may have simplified away the shuffle. We won't be able to do anything
14544     // further with it here.
14545     if (!isa<ShuffleVectorSDNode>(Res))
14546       return Res;
14547     Mask = cast<ShuffleVectorSDNode>(Res)->getMask();
14548   }
14549 
14550   SDValue TheSplat = IsLittleEndian ? RHS : LHS;
14551   // The common case after we commuted the shuffle is that the RHS is a splat
14552   // and we have elements coming in from the splat at indices that are not
14553   // conducive to using a merge.
14554   // Example:
14555   // vector_shuffle<0,17,1,19,2,21,3,23,4,25,5,27,6,29,7,31> t1, <zero>
14556   if (!isSplatBV(TheSplat))
14557     return Res;
14558 
14559   // We are looking for a mask such that all even elements are from
14560   // one vector and all odd elements from the other.
14561   if (!isAlternatingShuffMask(Mask, NumElts))
14562     return Res;
14563 
14564   // Adjust the mask so we are pulling in the same index from the splat
14565   // as the index from the interesting vector in consecutive elements.
14566   if (IsLittleEndian) {
14567     // Example (even elements from first vector):
14568     // vector_shuffle<0,16,1,17,2,18,3,19,4,20,5,21,6,22,7,23> t1, <zero>
14569     if (Mask[0] < NumElts)
14570       for (int i = 1, e = Mask.size(); i < e; i += 2)
14571         ShuffV[i] = (ShuffV[i - 1] + NumElts);
14572     // Example (odd elements from first vector):
14573     // vector_shuffle<16,0,17,1,18,2,19,3,20,4,21,5,22,6,23,7> t1, <zero>
14574     else
14575       for (int i = 0, e = Mask.size(); i < e; i += 2)
14576         ShuffV[i] = (ShuffV[i + 1] + NumElts);
14577   } else {
14578     // Example (even elements from first vector):
14579     // vector_shuffle<0,16,1,17,2,18,3,19,4,20,5,21,6,22,7,23> <zero>, t1
14580     if (Mask[0] < NumElts)
14581       for (int i = 0, e = Mask.size(); i < e; i += 2)
14582         ShuffV[i] = ShuffV[i + 1] - NumElts;
14583     // Example (odd elements from first vector):
14584     // vector_shuffle<16,0,17,1,18,2,19,3,20,4,21,5,22,6,23,7> <zero>, t1
14585     else
14586       for (int i = 1, e = Mask.size(); i < e; i += 2)
14587         ShuffV[i] = ShuffV[i - 1] - NumElts;
14588   }
14589 
14590   // If the RHS has undefs, we need to remove them since we may have created
14591   // a shuffle that adds those instead of the splat value.
14592   SDValue SplatVal =
14593       cast<BuildVectorSDNode>(TheSplat.getNode())->getSplatValue();
14594   TheSplat = DAG.getSplatBuildVector(TheSplat.getValueType(), dl, SplatVal);
14595 
14596   if (IsLittleEndian)
14597     RHS = TheSplat;
14598   else
14599     LHS = TheSplat;
14600   return DAG.getVectorShuffle(SVN->getValueType(0), dl, LHS, RHS, ShuffV);
14601 }
14602 
combineVReverseMemOP(ShuffleVectorSDNode * SVN,LSBaseSDNode * LSBase,DAGCombinerInfo & DCI) const14603 SDValue PPCTargetLowering::combineVReverseMemOP(ShuffleVectorSDNode *SVN,
14604                                                 LSBaseSDNode *LSBase,
14605                                                 DAGCombinerInfo &DCI) const {
14606   assert((ISD::isNormalLoad(LSBase) || ISD::isNormalStore(LSBase)) &&
14607         "Not a reverse memop pattern!");
14608 
14609   auto IsElementReverse = [](const ShuffleVectorSDNode *SVN) -> bool {
14610     auto Mask = SVN->getMask();
14611     int i = 0;
14612     auto I = Mask.rbegin();
14613     auto E = Mask.rend();
14614 
14615     for (; I != E; ++I) {
14616       if (*I != i)
14617         return false;
14618       i++;
14619     }
14620     return true;
14621   };
14622 
14623   SelectionDAG &DAG = DCI.DAG;
14624   EVT VT = SVN->getValueType(0);
14625 
14626   if (!isTypeLegal(VT) || !Subtarget.isLittleEndian() || !Subtarget.hasVSX())
14627     return SDValue();
14628 
14629   // Before P9, we have PPCVSXSwapRemoval pass to hack the element order.
14630   // See comment in PPCVSXSwapRemoval.cpp.
14631   // It is conflict with PPCVSXSwapRemoval opt. So we don't do it.
14632   if (!Subtarget.hasP9Vector())
14633     return SDValue();
14634 
14635   if(!IsElementReverse(SVN))
14636     return SDValue();
14637 
14638   if (LSBase->getOpcode() == ISD::LOAD) {
14639     // If the load return value 0 has more than one user except the
14640     // shufflevector instruction, it is not profitable to replace the
14641     // shufflevector with a reverse load.
14642     for (SDNode::use_iterator UI = LSBase->use_begin(), UE = LSBase->use_end();
14643          UI != UE; ++UI)
14644       if (UI.getUse().getResNo() == 0 && UI->getOpcode() != ISD::VECTOR_SHUFFLE)
14645         return SDValue();
14646 
14647     SDLoc dl(LSBase);
14648     SDValue LoadOps[] = {LSBase->getChain(), LSBase->getBasePtr()};
14649     return DAG.getMemIntrinsicNode(
14650         PPCISD::LOAD_VEC_BE, dl, DAG.getVTList(VT, MVT::Other), LoadOps,
14651         LSBase->getMemoryVT(), LSBase->getMemOperand());
14652   }
14653 
14654   if (LSBase->getOpcode() == ISD::STORE) {
14655     // If there are other uses of the shuffle, the swap cannot be avoided.
14656     // Forcing the use of an X-Form (since swapped stores only have
14657     // X-Forms) without removing the swap is unprofitable.
14658     if (!SVN->hasOneUse())
14659       return SDValue();
14660 
14661     SDLoc dl(LSBase);
14662     SDValue StoreOps[] = {LSBase->getChain(), SVN->getOperand(0),
14663                           LSBase->getBasePtr()};
14664     return DAG.getMemIntrinsicNode(
14665         PPCISD::STORE_VEC_BE, dl, DAG.getVTList(MVT::Other), StoreOps,
14666         LSBase->getMemoryVT(), LSBase->getMemOperand());
14667   }
14668 
14669   llvm_unreachable("Expected a load or store node here");
14670 }
14671 
PerformDAGCombine(SDNode * N,DAGCombinerInfo & DCI) const14672 SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
14673                                              DAGCombinerInfo &DCI) const {
14674   SelectionDAG &DAG = DCI.DAG;
14675   SDLoc dl(N);
14676   switch (N->getOpcode()) {
14677   default: break;
14678   case ISD::ADD:
14679     return combineADD(N, DCI);
14680   case ISD::SHL:
14681     return combineSHL(N, DCI);
14682   case ISD::SRA:
14683     return combineSRA(N, DCI);
14684   case ISD::SRL:
14685     return combineSRL(N, DCI);
14686   case ISD::MUL:
14687     return combineMUL(N, DCI);
14688   case ISD::FMA:
14689   case PPCISD::FNMSUB:
14690     return combineFMALike(N, DCI);
14691   case PPCISD::SHL:
14692     if (isNullConstant(N->getOperand(0))) // 0 << V -> 0.
14693         return N->getOperand(0);
14694     break;
14695   case PPCISD::SRL:
14696     if (isNullConstant(N->getOperand(0))) // 0 >>u V -> 0.
14697         return N->getOperand(0);
14698     break;
14699   case PPCISD::SRA:
14700     if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(0))) {
14701       if (C->isNullValue() ||   //  0 >>s V -> 0.
14702           C->isAllOnesValue())    // -1 >>s V -> -1.
14703         return N->getOperand(0);
14704     }
14705     break;
14706   case ISD::SIGN_EXTEND:
14707   case ISD::ZERO_EXTEND:
14708   case ISD::ANY_EXTEND:
14709     return DAGCombineExtBoolTrunc(N, DCI);
14710   case ISD::TRUNCATE:
14711     return combineTRUNCATE(N, DCI);
14712   case ISD::SETCC:
14713     if (SDValue CSCC = combineSetCC(N, DCI))
14714       return CSCC;
14715     LLVM_FALLTHROUGH;
14716   case ISD::SELECT_CC:
14717     return DAGCombineTruncBoolExt(N, DCI);
14718   case ISD::SINT_TO_FP:
14719   case ISD::UINT_TO_FP:
14720     return combineFPToIntToFP(N, DCI);
14721   case ISD::VECTOR_SHUFFLE:
14722     if (ISD::isNormalLoad(N->getOperand(0).getNode())) {
14723       LSBaseSDNode* LSBase = cast<LSBaseSDNode>(N->getOperand(0));
14724       return combineVReverseMemOP(cast<ShuffleVectorSDNode>(N), LSBase, DCI);
14725     }
14726     return combineVectorShuffle(cast<ShuffleVectorSDNode>(N), DCI.DAG);
14727   case ISD::STORE: {
14728 
14729     EVT Op1VT = N->getOperand(1).getValueType();
14730     unsigned Opcode = N->getOperand(1).getOpcode();
14731 
14732     if (Opcode == ISD::FP_TO_SINT || Opcode == ISD::FP_TO_UINT) {
14733       SDValue Val= combineStoreFPToInt(N, DCI);
14734       if (Val)
14735         return Val;
14736     }
14737 
14738     if (Opcode == ISD::VECTOR_SHUFFLE && ISD::isNormalStore(N)) {
14739       ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N->getOperand(1));
14740       SDValue Val= combineVReverseMemOP(SVN, cast<LSBaseSDNode>(N), DCI);
14741       if (Val)
14742         return Val;
14743     }
14744 
14745     // Turn STORE (BSWAP) -> sthbrx/stwbrx.
14746     if (cast<StoreSDNode>(N)->isUnindexed() && Opcode == ISD::BSWAP &&
14747         N->getOperand(1).getNode()->hasOneUse() &&
14748         (Op1VT == MVT::i32 || Op1VT == MVT::i16 ||
14749          (Subtarget.hasLDBRX() && Subtarget.isPPC64() && Op1VT == MVT::i64))) {
14750 
14751       // STBRX can only handle simple types and it makes no sense to store less
14752       // two bytes in byte-reversed order.
14753       EVT mVT = cast<StoreSDNode>(N)->getMemoryVT();
14754       if (mVT.isExtended() || mVT.getSizeInBits() < 16)
14755         break;
14756 
14757       SDValue BSwapOp = N->getOperand(1).getOperand(0);
14758       // Do an any-extend to 32-bits if this is a half-word input.
14759       if (BSwapOp.getValueType() == MVT::i16)
14760         BSwapOp = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, BSwapOp);
14761 
14762       // If the type of BSWAP operand is wider than stored memory width
14763       // it need to be shifted to the right side before STBRX.
14764       if (Op1VT.bitsGT(mVT)) {
14765         int Shift = Op1VT.getSizeInBits() - mVT.getSizeInBits();
14766         BSwapOp = DAG.getNode(ISD::SRL, dl, Op1VT, BSwapOp,
14767                               DAG.getConstant(Shift, dl, MVT::i32));
14768         // Need to truncate if this is a bswap of i64 stored as i32/i16.
14769         if (Op1VT == MVT::i64)
14770           BSwapOp = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BSwapOp);
14771       }
14772 
14773       SDValue Ops[] = {
14774         N->getOperand(0), BSwapOp, N->getOperand(2), DAG.getValueType(mVT)
14775       };
14776       return
14777         DAG.getMemIntrinsicNode(PPCISD::STBRX, dl, DAG.getVTList(MVT::Other),
14778                                 Ops, cast<StoreSDNode>(N)->getMemoryVT(),
14779                                 cast<StoreSDNode>(N)->getMemOperand());
14780     }
14781 
14782     // STORE Constant:i32<0>  ->  STORE<trunc to i32> Constant:i64<0>
14783     // So it can increase the chance of CSE constant construction.
14784     if (Subtarget.isPPC64() && !DCI.isBeforeLegalize() &&
14785         isa<ConstantSDNode>(N->getOperand(1)) && Op1VT == MVT::i32) {
14786       // Need to sign-extended to 64-bits to handle negative values.
14787       EVT MemVT = cast<StoreSDNode>(N)->getMemoryVT();
14788       uint64_t Val64 = SignExtend64(N->getConstantOperandVal(1),
14789                                     MemVT.getSizeInBits());
14790       SDValue Const64 = DAG.getConstant(Val64, dl, MVT::i64);
14791 
14792       // DAG.getTruncStore() can't be used here because it doesn't accept
14793       // the general (base + offset) addressing mode.
14794       // So we use UpdateNodeOperands and setTruncatingStore instead.
14795       DAG.UpdateNodeOperands(N, N->getOperand(0), Const64, N->getOperand(2),
14796                              N->getOperand(3));
14797       cast<StoreSDNode>(N)->setTruncatingStore(true);
14798       return SDValue(N, 0);
14799     }
14800 
14801     // For little endian, VSX stores require generating xxswapd/lxvd2x.
14802     // Not needed on ISA 3.0 based CPUs since we have a non-permuting store.
14803     if (Op1VT.isSimple()) {
14804       MVT StoreVT = Op1VT.getSimpleVT();
14805       if (Subtarget.needsSwapsForVSXMemOps() &&
14806           (StoreVT == MVT::v2f64 || StoreVT == MVT::v2i64 ||
14807            StoreVT == MVT::v4f32 || StoreVT == MVT::v4i32))
14808         return expandVSXStoreForLE(N, DCI);
14809     }
14810     break;
14811   }
14812   case ISD::LOAD: {
14813     LoadSDNode *LD = cast<LoadSDNode>(N);
14814     EVT VT = LD->getValueType(0);
14815 
14816     // For little endian, VSX loads require generating lxvd2x/xxswapd.
14817     // Not needed on ISA 3.0 based CPUs since we have a non-permuting load.
14818     if (VT.isSimple()) {
14819       MVT LoadVT = VT.getSimpleVT();
14820       if (Subtarget.needsSwapsForVSXMemOps() &&
14821           (LoadVT == MVT::v2f64 || LoadVT == MVT::v2i64 ||
14822            LoadVT == MVT::v4f32 || LoadVT == MVT::v4i32))
14823         return expandVSXLoadForLE(N, DCI);
14824     }
14825 
14826     // We sometimes end up with a 64-bit integer load, from which we extract
14827     // two single-precision floating-point numbers. This happens with
14828     // std::complex<float>, and other similar structures, because of the way we
14829     // canonicalize structure copies. However, if we lack direct moves,
14830     // then the final bitcasts from the extracted integer values to the
14831     // floating-point numbers turn into store/load pairs. Even with direct moves,
14832     // just loading the two floating-point numbers is likely better.
14833     auto ReplaceTwoFloatLoad = [&]() {
14834       if (VT != MVT::i64)
14835         return false;
14836 
14837       if (LD->getExtensionType() != ISD::NON_EXTLOAD ||
14838           LD->isVolatile())
14839         return false;
14840 
14841       //  We're looking for a sequence like this:
14842       //  t13: i64,ch = load<LD8[%ref.tmp]> t0, t6, undef:i64
14843       //      t16: i64 = srl t13, Constant:i32<32>
14844       //    t17: i32 = truncate t16
14845       //  t18: f32 = bitcast t17
14846       //    t19: i32 = truncate t13
14847       //  t20: f32 = bitcast t19
14848 
14849       if (!LD->hasNUsesOfValue(2, 0))
14850         return false;
14851 
14852       auto UI = LD->use_begin();
14853       while (UI.getUse().getResNo() != 0) ++UI;
14854       SDNode *Trunc = *UI++;
14855       while (UI.getUse().getResNo() != 0) ++UI;
14856       SDNode *RightShift = *UI;
14857       if (Trunc->getOpcode() != ISD::TRUNCATE)
14858         std::swap(Trunc, RightShift);
14859 
14860       if (Trunc->getOpcode() != ISD::TRUNCATE ||
14861           Trunc->getValueType(0) != MVT::i32 ||
14862           !Trunc->hasOneUse())
14863         return false;
14864       if (RightShift->getOpcode() != ISD::SRL ||
14865           !isa<ConstantSDNode>(RightShift->getOperand(1)) ||
14866           RightShift->getConstantOperandVal(1) != 32 ||
14867           !RightShift->hasOneUse())
14868         return false;
14869 
14870       SDNode *Trunc2 = *RightShift->use_begin();
14871       if (Trunc2->getOpcode() != ISD::TRUNCATE ||
14872           Trunc2->getValueType(0) != MVT::i32 ||
14873           !Trunc2->hasOneUse())
14874         return false;
14875 
14876       SDNode *Bitcast = *Trunc->use_begin();
14877       SDNode *Bitcast2 = *Trunc2->use_begin();
14878 
14879       if (Bitcast->getOpcode() != ISD::BITCAST ||
14880           Bitcast->getValueType(0) != MVT::f32)
14881         return false;
14882       if (Bitcast2->getOpcode() != ISD::BITCAST ||
14883           Bitcast2->getValueType(0) != MVT::f32)
14884         return false;
14885 
14886       if (Subtarget.isLittleEndian())
14887         std::swap(Bitcast, Bitcast2);
14888 
14889       // Bitcast has the second float (in memory-layout order) and Bitcast2
14890       // has the first one.
14891 
14892       SDValue BasePtr = LD->getBasePtr();
14893       if (LD->isIndexed()) {
14894         assert(LD->getAddressingMode() == ISD::PRE_INC &&
14895                "Non-pre-inc AM on PPC?");
14896         BasePtr =
14897           DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(), BasePtr,
14898                       LD->getOffset());
14899       }
14900 
14901       auto MMOFlags =
14902           LD->getMemOperand()->getFlags() & ~MachineMemOperand::MOVolatile;
14903       SDValue FloatLoad = DAG.getLoad(MVT::f32, dl, LD->getChain(), BasePtr,
14904                                       LD->getPointerInfo(), LD->getAlignment(),
14905                                       MMOFlags, LD->getAAInfo());
14906       SDValue AddPtr =
14907         DAG.getNode(ISD::ADD, dl, BasePtr.getValueType(),
14908                     BasePtr, DAG.getIntPtrConstant(4, dl));
14909       SDValue FloatLoad2 = DAG.getLoad(
14910           MVT::f32, dl, SDValue(FloatLoad.getNode(), 1), AddPtr,
14911           LD->getPointerInfo().getWithOffset(4),
14912           MinAlign(LD->getAlignment(), 4), MMOFlags, LD->getAAInfo());
14913 
14914       if (LD->isIndexed()) {
14915         // Note that DAGCombine should re-form any pre-increment load(s) from
14916         // what is produced here if that makes sense.
14917         DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), BasePtr);
14918       }
14919 
14920       DCI.CombineTo(Bitcast2, FloatLoad);
14921       DCI.CombineTo(Bitcast, FloatLoad2);
14922 
14923       DAG.ReplaceAllUsesOfValueWith(SDValue(LD, LD->isIndexed() ? 2 : 1),
14924                                     SDValue(FloatLoad2.getNode(), 1));
14925       return true;
14926     };
14927 
14928     if (ReplaceTwoFloatLoad())
14929       return SDValue(N, 0);
14930 
14931     EVT MemVT = LD->getMemoryVT();
14932     Type *Ty = MemVT.getTypeForEVT(*DAG.getContext());
14933     Align ABIAlignment = DAG.getDataLayout().getABITypeAlign(Ty);
14934     if (LD->isUnindexed() && VT.isVector() &&
14935         ((Subtarget.hasAltivec() && ISD::isNON_EXTLoad(N) &&
14936           // P8 and later hardware should just use LOAD.
14937           !Subtarget.hasP8Vector() &&
14938           (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32 ||
14939            VT == MVT::v4f32))) &&
14940         LD->getAlign() < ABIAlignment) {
14941       // This is a type-legal unaligned Altivec load.
14942       SDValue Chain = LD->getChain();
14943       SDValue Ptr = LD->getBasePtr();
14944       bool isLittleEndian = Subtarget.isLittleEndian();
14945 
14946       // This implements the loading of unaligned vectors as described in
14947       // the venerable Apple Velocity Engine overview. Specifically:
14948       // https://developer.apple.com/hardwaredrivers/ve/alignment.html
14949       // https://developer.apple.com/hardwaredrivers/ve/code_optimization.html
14950       //
14951       // The general idea is to expand a sequence of one or more unaligned
14952       // loads into an alignment-based permutation-control instruction (lvsl
14953       // or lvsr), a series of regular vector loads (which always truncate
14954       // their input address to an aligned address), and a series of
14955       // permutations.  The results of these permutations are the requested
14956       // loaded values.  The trick is that the last "extra" load is not taken
14957       // from the address you might suspect (sizeof(vector) bytes after the
14958       // last requested load), but rather sizeof(vector) - 1 bytes after the
14959       // last requested vector. The point of this is to avoid a page fault if
14960       // the base address happened to be aligned. This works because if the
14961       // base address is aligned, then adding less than a full vector length
14962       // will cause the last vector in the sequence to be (re)loaded.
14963       // Otherwise, the next vector will be fetched as you might suspect was
14964       // necessary.
14965 
14966       // We might be able to reuse the permutation generation from
14967       // a different base address offset from this one by an aligned amount.
14968       // The INTRINSIC_WO_CHAIN DAG combine will attempt to perform this
14969       // optimization later.
14970       Intrinsic::ID Intr, IntrLD, IntrPerm;
14971       MVT PermCntlTy, PermTy, LDTy;
14972       Intr = isLittleEndian ? Intrinsic::ppc_altivec_lvsr
14973                             : Intrinsic::ppc_altivec_lvsl;
14974       IntrLD = Intrinsic::ppc_altivec_lvx;
14975       IntrPerm = Intrinsic::ppc_altivec_vperm;
14976       PermCntlTy = MVT::v16i8;
14977       PermTy = MVT::v4i32;
14978       LDTy = MVT::v4i32;
14979 
14980       SDValue PermCntl = BuildIntrinsicOp(Intr, Ptr, DAG, dl, PermCntlTy);
14981 
14982       // Create the new MMO for the new base load. It is like the original MMO,
14983       // but represents an area in memory almost twice the vector size centered
14984       // on the original address. If the address is unaligned, we might start
14985       // reading up to (sizeof(vector)-1) bytes below the address of the
14986       // original unaligned load.
14987       MachineFunction &MF = DAG.getMachineFunction();
14988       MachineMemOperand *BaseMMO =
14989         MF.getMachineMemOperand(LD->getMemOperand(),
14990                                 -(long)MemVT.getStoreSize()+1,
14991                                 2*MemVT.getStoreSize()-1);
14992 
14993       // Create the new base load.
14994       SDValue LDXIntID =
14995           DAG.getTargetConstant(IntrLD, dl, getPointerTy(MF.getDataLayout()));
14996       SDValue BaseLoadOps[] = { Chain, LDXIntID, Ptr };
14997       SDValue BaseLoad =
14998         DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl,
14999                                 DAG.getVTList(PermTy, MVT::Other),
15000                                 BaseLoadOps, LDTy, BaseMMO);
15001 
15002       // Note that the value of IncOffset (which is provided to the next
15003       // load's pointer info offset value, and thus used to calculate the
15004       // alignment), and the value of IncValue (which is actually used to
15005       // increment the pointer value) are different! This is because we
15006       // require the next load to appear to be aligned, even though it
15007       // is actually offset from the base pointer by a lesser amount.
15008       int IncOffset = VT.getSizeInBits() / 8;
15009       int IncValue = IncOffset;
15010 
15011       // Walk (both up and down) the chain looking for another load at the real
15012       // (aligned) offset (the alignment of the other load does not matter in
15013       // this case). If found, then do not use the offset reduction trick, as
15014       // that will prevent the loads from being later combined (as they would
15015       // otherwise be duplicates).
15016       if (!findConsecutiveLoad(LD, DAG))
15017         --IncValue;
15018 
15019       SDValue Increment =
15020           DAG.getConstant(IncValue, dl, getPointerTy(MF.getDataLayout()));
15021       Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
15022 
15023       MachineMemOperand *ExtraMMO =
15024         MF.getMachineMemOperand(LD->getMemOperand(),
15025                                 1, 2*MemVT.getStoreSize()-1);
15026       SDValue ExtraLoadOps[] = { Chain, LDXIntID, Ptr };
15027       SDValue ExtraLoad =
15028         DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, dl,
15029                                 DAG.getVTList(PermTy, MVT::Other),
15030                                 ExtraLoadOps, LDTy, ExtraMMO);
15031 
15032       SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
15033         BaseLoad.getValue(1), ExtraLoad.getValue(1));
15034 
15035       // Because vperm has a big-endian bias, we must reverse the order
15036       // of the input vectors and complement the permute control vector
15037       // when generating little endian code.  We have already handled the
15038       // latter by using lvsr instead of lvsl, so just reverse BaseLoad
15039       // and ExtraLoad here.
15040       SDValue Perm;
15041       if (isLittleEndian)
15042         Perm = BuildIntrinsicOp(IntrPerm,
15043                                 ExtraLoad, BaseLoad, PermCntl, DAG, dl);
15044       else
15045         Perm = BuildIntrinsicOp(IntrPerm,
15046                                 BaseLoad, ExtraLoad, PermCntl, DAG, dl);
15047 
15048       if (VT != PermTy)
15049         Perm = Subtarget.hasAltivec()
15050                    ? DAG.getNode(ISD::BITCAST, dl, VT, Perm)
15051                    : DAG.getNode(ISD::FP_ROUND, dl, VT, Perm,
15052                                  DAG.getTargetConstant(1, dl, MVT::i64));
15053                                // second argument is 1 because this rounding
15054                                // is always exact.
15055 
15056       // The output of the permutation is our loaded result, the TokenFactor is
15057       // our new chain.
15058       DCI.CombineTo(N, Perm, TF);
15059       return SDValue(N, 0);
15060     }
15061     }
15062     break;
15063     case ISD::INTRINSIC_WO_CHAIN: {
15064       bool isLittleEndian = Subtarget.isLittleEndian();
15065       unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
15066       Intrinsic::ID Intr = (isLittleEndian ? Intrinsic::ppc_altivec_lvsr
15067                                            : Intrinsic::ppc_altivec_lvsl);
15068       if (IID == Intr && N->getOperand(1)->getOpcode() == ISD::ADD) {
15069         SDValue Add = N->getOperand(1);
15070 
15071         int Bits = 4 /* 16 byte alignment */;
15072 
15073         if (DAG.MaskedValueIsZero(Add->getOperand(1),
15074                                   APInt::getAllOnesValue(Bits /* alignment */)
15075                                       .zext(Add.getScalarValueSizeInBits()))) {
15076           SDNode *BasePtr = Add->getOperand(0).getNode();
15077           for (SDNode::use_iterator UI = BasePtr->use_begin(),
15078                                     UE = BasePtr->use_end();
15079                UI != UE; ++UI) {
15080             if (UI->getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
15081                 cast<ConstantSDNode>(UI->getOperand(0))->getZExtValue() ==
15082                     IID) {
15083               // We've found another LVSL/LVSR, and this address is an aligned
15084               // multiple of that one. The results will be the same, so use the
15085               // one we've just found instead.
15086 
15087               return SDValue(*UI, 0);
15088             }
15089           }
15090         }
15091 
15092         if (isa<ConstantSDNode>(Add->getOperand(1))) {
15093           SDNode *BasePtr = Add->getOperand(0).getNode();
15094           for (SDNode::use_iterator UI = BasePtr->use_begin(),
15095                UE = BasePtr->use_end(); UI != UE; ++UI) {
15096             if (UI->getOpcode() == ISD::ADD &&
15097                 isa<ConstantSDNode>(UI->getOperand(1)) &&
15098                 (cast<ConstantSDNode>(Add->getOperand(1))->getZExtValue() -
15099                  cast<ConstantSDNode>(UI->getOperand(1))->getZExtValue()) %
15100                 (1ULL << Bits) == 0) {
15101               SDNode *OtherAdd = *UI;
15102               for (SDNode::use_iterator VI = OtherAdd->use_begin(),
15103                    VE = OtherAdd->use_end(); VI != VE; ++VI) {
15104                 if (VI->getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
15105                     cast<ConstantSDNode>(VI->getOperand(0))->getZExtValue() == IID) {
15106                   return SDValue(*VI, 0);
15107                 }
15108               }
15109             }
15110           }
15111         }
15112       }
15113 
15114       // Combine vmaxsw/h/b(a, a's negation) to abs(a)
15115       // Expose the vabsduw/h/b opportunity for down stream
15116       if (!DCI.isAfterLegalizeDAG() && Subtarget.hasP9Altivec() &&
15117           (IID == Intrinsic::ppc_altivec_vmaxsw ||
15118            IID == Intrinsic::ppc_altivec_vmaxsh ||
15119            IID == Intrinsic::ppc_altivec_vmaxsb)) {
15120         SDValue V1 = N->getOperand(1);
15121         SDValue V2 = N->getOperand(2);
15122         if ((V1.getSimpleValueType() == MVT::v4i32 ||
15123              V1.getSimpleValueType() == MVT::v8i16 ||
15124              V1.getSimpleValueType() == MVT::v16i8) &&
15125             V1.getSimpleValueType() == V2.getSimpleValueType()) {
15126           // (0-a, a)
15127           if (V1.getOpcode() == ISD::SUB &&
15128               ISD::isBuildVectorAllZeros(V1.getOperand(0).getNode()) &&
15129               V1.getOperand(1) == V2) {
15130             return DAG.getNode(ISD::ABS, dl, V2.getValueType(), V2);
15131           }
15132           // (a, 0-a)
15133           if (V2.getOpcode() == ISD::SUB &&
15134               ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode()) &&
15135               V2.getOperand(1) == V1) {
15136             return DAG.getNode(ISD::ABS, dl, V1.getValueType(), V1);
15137           }
15138           // (x-y, y-x)
15139           if (V1.getOpcode() == ISD::SUB && V2.getOpcode() == ISD::SUB &&
15140               V1.getOperand(0) == V2.getOperand(1) &&
15141               V1.getOperand(1) == V2.getOperand(0)) {
15142             return DAG.getNode(ISD::ABS, dl, V1.getValueType(), V1);
15143           }
15144         }
15145       }
15146     }
15147 
15148     break;
15149   case ISD::INTRINSIC_W_CHAIN:
15150     // For little endian, VSX loads require generating lxvd2x/xxswapd.
15151     // Not needed on ISA 3.0 based CPUs since we have a non-permuting load.
15152     if (Subtarget.needsSwapsForVSXMemOps()) {
15153       switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
15154       default:
15155         break;
15156       case Intrinsic::ppc_vsx_lxvw4x:
15157       case Intrinsic::ppc_vsx_lxvd2x:
15158         return expandVSXLoadForLE(N, DCI);
15159       }
15160     }
15161     break;
15162   case ISD::INTRINSIC_VOID:
15163     // For little endian, VSX stores require generating xxswapd/stxvd2x.
15164     // Not needed on ISA 3.0 based CPUs since we have a non-permuting store.
15165     if (Subtarget.needsSwapsForVSXMemOps()) {
15166       switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
15167       default:
15168         break;
15169       case Intrinsic::ppc_vsx_stxvw4x:
15170       case Intrinsic::ppc_vsx_stxvd2x:
15171         return expandVSXStoreForLE(N, DCI);
15172       }
15173     }
15174     break;
15175   case ISD::BSWAP:
15176     // Turn BSWAP (LOAD) -> lhbrx/lwbrx.
15177     if (ISD::isNON_EXTLoad(N->getOperand(0).getNode()) &&
15178         N->getOperand(0).hasOneUse() &&
15179         (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i16 ||
15180          (Subtarget.hasLDBRX() && Subtarget.isPPC64() &&
15181           N->getValueType(0) == MVT::i64))) {
15182       SDValue Load = N->getOperand(0);
15183       LoadSDNode *LD = cast<LoadSDNode>(Load);
15184       // Create the byte-swapping load.
15185       SDValue Ops[] = {
15186         LD->getChain(),    // Chain
15187         LD->getBasePtr(),  // Ptr
15188         DAG.getValueType(N->getValueType(0)) // VT
15189       };
15190       SDValue BSLoad =
15191         DAG.getMemIntrinsicNode(PPCISD::LBRX, dl,
15192                                 DAG.getVTList(N->getValueType(0) == MVT::i64 ?
15193                                               MVT::i64 : MVT::i32, MVT::Other),
15194                                 Ops, LD->getMemoryVT(), LD->getMemOperand());
15195 
15196       // If this is an i16 load, insert the truncate.
15197       SDValue ResVal = BSLoad;
15198       if (N->getValueType(0) == MVT::i16)
15199         ResVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, BSLoad);
15200 
15201       // First, combine the bswap away.  This makes the value produced by the
15202       // load dead.
15203       DCI.CombineTo(N, ResVal);
15204 
15205       // Next, combine the load away, we give it a bogus result value but a real
15206       // chain result.  The result value is dead because the bswap is dead.
15207       DCI.CombineTo(Load.getNode(), ResVal, BSLoad.getValue(1));
15208 
15209       // Return N so it doesn't get rechecked!
15210       return SDValue(N, 0);
15211     }
15212     break;
15213   case PPCISD::VCMP:
15214     // If a VCMP_rec node already exists with exactly the same operands as this
15215     // node, use its result instead of this node (VCMP_rec computes both a CR6
15216     // and a normal output).
15217     //
15218     if (!N->getOperand(0).hasOneUse() &&
15219         !N->getOperand(1).hasOneUse() &&
15220         !N->getOperand(2).hasOneUse()) {
15221 
15222       // Scan all of the users of the LHS, looking for VCMP_rec's that match.
15223       SDNode *VCMPrecNode = nullptr;
15224 
15225       SDNode *LHSN = N->getOperand(0).getNode();
15226       for (SDNode::use_iterator UI = LHSN->use_begin(), E = LHSN->use_end();
15227            UI != E; ++UI)
15228         if (UI->getOpcode() == PPCISD::VCMP_rec &&
15229             UI->getOperand(1) == N->getOperand(1) &&
15230             UI->getOperand(2) == N->getOperand(2) &&
15231             UI->getOperand(0) == N->getOperand(0)) {
15232           VCMPrecNode = *UI;
15233           break;
15234         }
15235 
15236       // If there is no VCMP_rec node, or if the flag value has a single use,
15237       // don't transform this.
15238       if (!VCMPrecNode || VCMPrecNode->hasNUsesOfValue(0, 1))
15239         break;
15240 
15241       // Look at the (necessarily single) use of the flag value.  If it has a
15242       // chain, this transformation is more complex.  Note that multiple things
15243       // could use the value result, which we should ignore.
15244       SDNode *FlagUser = nullptr;
15245       for (SDNode::use_iterator UI = VCMPrecNode->use_begin();
15246            FlagUser == nullptr; ++UI) {
15247         assert(UI != VCMPrecNode->use_end() && "Didn't find user!");
15248         SDNode *User = *UI;
15249         for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) {
15250           if (User->getOperand(i) == SDValue(VCMPrecNode, 1)) {
15251             FlagUser = User;
15252             break;
15253           }
15254         }
15255       }
15256 
15257       // If the user is a MFOCRF instruction, we know this is safe.
15258       // Otherwise we give up for right now.
15259       if (FlagUser->getOpcode() == PPCISD::MFOCRF)
15260         return SDValue(VCMPrecNode, 0);
15261     }
15262     break;
15263   case ISD::BRCOND: {
15264     SDValue Cond = N->getOperand(1);
15265     SDValue Target = N->getOperand(2);
15266 
15267     if (Cond.getOpcode() == ISD::INTRINSIC_W_CHAIN &&
15268         cast<ConstantSDNode>(Cond.getOperand(1))->getZExtValue() ==
15269           Intrinsic::loop_decrement) {
15270 
15271       // We now need to make the intrinsic dead (it cannot be instruction
15272       // selected).
15273       DAG.ReplaceAllUsesOfValueWith(Cond.getValue(1), Cond.getOperand(0));
15274       assert(Cond.getNode()->hasOneUse() &&
15275              "Counter decrement has more than one use");
15276 
15277       return DAG.getNode(PPCISD::BDNZ, dl, MVT::Other,
15278                          N->getOperand(0), Target);
15279     }
15280   }
15281   break;
15282   case ISD::BR_CC: {
15283     // If this is a branch on an altivec predicate comparison, lower this so
15284     // that we don't have to do a MFOCRF: instead, branch directly on CR6.  This
15285     // lowering is done pre-legalize, because the legalizer lowers the predicate
15286     // compare down to code that is difficult to reassemble.
15287     ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(1))->get();
15288     SDValue LHS = N->getOperand(2), RHS = N->getOperand(3);
15289 
15290     // Sometimes the promoted value of the intrinsic is ANDed by some non-zero
15291     // value. If so, pass-through the AND to get to the intrinsic.
15292     if (LHS.getOpcode() == ISD::AND &&
15293         LHS.getOperand(0).getOpcode() == ISD::INTRINSIC_W_CHAIN &&
15294         cast<ConstantSDNode>(LHS.getOperand(0).getOperand(1))->getZExtValue() ==
15295           Intrinsic::loop_decrement &&
15296         isa<ConstantSDNode>(LHS.getOperand(1)) &&
15297         !isNullConstant(LHS.getOperand(1)))
15298       LHS = LHS.getOperand(0);
15299 
15300     if (LHS.getOpcode() == ISD::INTRINSIC_W_CHAIN &&
15301         cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() ==
15302           Intrinsic::loop_decrement &&
15303         isa<ConstantSDNode>(RHS)) {
15304       assert((CC == ISD::SETEQ || CC == ISD::SETNE) &&
15305              "Counter decrement comparison is not EQ or NE");
15306 
15307       unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue();
15308       bool isBDNZ = (CC == ISD::SETEQ && Val) ||
15309                     (CC == ISD::SETNE && !Val);
15310 
15311       // We now need to make the intrinsic dead (it cannot be instruction
15312       // selected).
15313       DAG.ReplaceAllUsesOfValueWith(LHS.getValue(1), LHS.getOperand(0));
15314       assert(LHS.getNode()->hasOneUse() &&
15315              "Counter decrement has more than one use");
15316 
15317       return DAG.getNode(isBDNZ ? PPCISD::BDNZ : PPCISD::BDZ, dl, MVT::Other,
15318                          N->getOperand(0), N->getOperand(4));
15319     }
15320 
15321     int CompareOpc;
15322     bool isDot;
15323 
15324     if (LHS.getOpcode() == ISD::INTRINSIC_WO_CHAIN &&
15325         isa<ConstantSDNode>(RHS) && (CC == ISD::SETEQ || CC == ISD::SETNE) &&
15326         getVectorCompareInfo(LHS, CompareOpc, isDot, Subtarget)) {
15327       assert(isDot && "Can't compare against a vector result!");
15328 
15329       // If this is a comparison against something other than 0/1, then we know
15330       // that the condition is never/always true.
15331       unsigned Val = cast<ConstantSDNode>(RHS)->getZExtValue();
15332       if (Val != 0 && Val != 1) {
15333         if (CC == ISD::SETEQ)      // Cond never true, remove branch.
15334           return N->getOperand(0);
15335         // Always !=, turn it into an unconditional branch.
15336         return DAG.getNode(ISD::BR, dl, MVT::Other,
15337                            N->getOperand(0), N->getOperand(4));
15338       }
15339 
15340       bool BranchOnWhenPredTrue = (CC == ISD::SETEQ) ^ (Val == 0);
15341 
15342       // Create the PPCISD altivec 'dot' comparison node.
15343       SDValue Ops[] = {
15344         LHS.getOperand(2),  // LHS of compare
15345         LHS.getOperand(3),  // RHS of compare
15346         DAG.getConstant(CompareOpc, dl, MVT::i32)
15347       };
15348       EVT VTs[] = { LHS.getOperand(2).getValueType(), MVT::Glue };
15349       SDValue CompNode = DAG.getNode(PPCISD::VCMP_rec, dl, VTs, Ops);
15350 
15351       // Unpack the result based on how the target uses it.
15352       PPC::Predicate CompOpc;
15353       switch (cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue()) {
15354       default:  // Can't happen, don't crash on invalid number though.
15355       case 0:   // Branch on the value of the EQ bit of CR6.
15356         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_EQ : PPC::PRED_NE;
15357         break;
15358       case 1:   // Branch on the inverted value of the EQ bit of CR6.
15359         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_NE : PPC::PRED_EQ;
15360         break;
15361       case 2:   // Branch on the value of the LT bit of CR6.
15362         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_LT : PPC::PRED_GE;
15363         break;
15364       case 3:   // Branch on the inverted value of the LT bit of CR6.
15365         CompOpc = BranchOnWhenPredTrue ? PPC::PRED_GE : PPC::PRED_LT;
15366         break;
15367       }
15368 
15369       return DAG.getNode(PPCISD::COND_BRANCH, dl, MVT::Other, N->getOperand(0),
15370                          DAG.getConstant(CompOpc, dl, MVT::i32),
15371                          DAG.getRegister(PPC::CR6, MVT::i32),
15372                          N->getOperand(4), CompNode.getValue(1));
15373     }
15374     break;
15375   }
15376   case ISD::BUILD_VECTOR:
15377     return DAGCombineBuildVector(N, DCI);
15378   case ISD::ABS:
15379     return combineABS(N, DCI);
15380   case ISD::VSELECT:
15381     return combineVSelect(N, DCI);
15382   }
15383 
15384   return SDValue();
15385 }
15386 
15387 SDValue
BuildSDIVPow2(SDNode * N,const APInt & Divisor,SelectionDAG & DAG,SmallVectorImpl<SDNode * > & Created) const15388 PPCTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
15389                                  SelectionDAG &DAG,
15390                                  SmallVectorImpl<SDNode *> &Created) const {
15391   // fold (sdiv X, pow2)
15392   EVT VT = N->getValueType(0);
15393   if (VT == MVT::i64 && !Subtarget.isPPC64())
15394     return SDValue();
15395   if ((VT != MVT::i32 && VT != MVT::i64) ||
15396       !(Divisor.isPowerOf2() || (-Divisor).isPowerOf2()))
15397     return SDValue();
15398 
15399   SDLoc DL(N);
15400   SDValue N0 = N->getOperand(0);
15401 
15402   bool IsNegPow2 = (-Divisor).isPowerOf2();
15403   unsigned Lg2 = (IsNegPow2 ? -Divisor : Divisor).countTrailingZeros();
15404   SDValue ShiftAmt = DAG.getConstant(Lg2, DL, VT);
15405 
15406   SDValue Op = DAG.getNode(PPCISD::SRA_ADDZE, DL, VT, N0, ShiftAmt);
15407   Created.push_back(Op.getNode());
15408 
15409   if (IsNegPow2) {
15410     Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op);
15411     Created.push_back(Op.getNode());
15412   }
15413 
15414   return Op;
15415 }
15416 
15417 //===----------------------------------------------------------------------===//
15418 // Inline Assembly Support
15419 //===----------------------------------------------------------------------===//
15420 
computeKnownBitsForTargetNode(const SDValue Op,KnownBits & Known,const APInt & DemandedElts,const SelectionDAG & DAG,unsigned Depth) const15421 void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
15422                                                       KnownBits &Known,
15423                                                       const APInt &DemandedElts,
15424                                                       const SelectionDAG &DAG,
15425                                                       unsigned Depth) const {
15426   Known.resetAll();
15427   switch (Op.getOpcode()) {
15428   default: break;
15429   case PPCISD::LBRX: {
15430     // lhbrx is known to have the top bits cleared out.
15431     if (cast<VTSDNode>(Op.getOperand(2))->getVT() == MVT::i16)
15432       Known.Zero = 0xFFFF0000;
15433     break;
15434   }
15435   case ISD::INTRINSIC_WO_CHAIN: {
15436     switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) {
15437     default: break;
15438     case Intrinsic::ppc_altivec_vcmpbfp_p:
15439     case Intrinsic::ppc_altivec_vcmpeqfp_p:
15440     case Intrinsic::ppc_altivec_vcmpequb_p:
15441     case Intrinsic::ppc_altivec_vcmpequh_p:
15442     case Intrinsic::ppc_altivec_vcmpequw_p:
15443     case Intrinsic::ppc_altivec_vcmpequd_p:
15444     case Intrinsic::ppc_altivec_vcmpequq_p:
15445     case Intrinsic::ppc_altivec_vcmpgefp_p:
15446     case Intrinsic::ppc_altivec_vcmpgtfp_p:
15447     case Intrinsic::ppc_altivec_vcmpgtsb_p:
15448     case Intrinsic::ppc_altivec_vcmpgtsh_p:
15449     case Intrinsic::ppc_altivec_vcmpgtsw_p:
15450     case Intrinsic::ppc_altivec_vcmpgtsd_p:
15451     case Intrinsic::ppc_altivec_vcmpgtsq_p:
15452     case Intrinsic::ppc_altivec_vcmpgtub_p:
15453     case Intrinsic::ppc_altivec_vcmpgtuh_p:
15454     case Intrinsic::ppc_altivec_vcmpgtuw_p:
15455     case Intrinsic::ppc_altivec_vcmpgtud_p:
15456     case Intrinsic::ppc_altivec_vcmpgtuq_p:
15457       Known.Zero = ~1U;  // All bits but the low one are known to be zero.
15458       break;
15459     }
15460   }
15461   }
15462 }
15463 
getPrefLoopAlignment(MachineLoop * ML) const15464 Align PPCTargetLowering::getPrefLoopAlignment(MachineLoop *ML) const {
15465   switch (Subtarget.getCPUDirective()) {
15466   default: break;
15467   case PPC::DIR_970:
15468   case PPC::DIR_PWR4:
15469   case PPC::DIR_PWR5:
15470   case PPC::DIR_PWR5X:
15471   case PPC::DIR_PWR6:
15472   case PPC::DIR_PWR6X:
15473   case PPC::DIR_PWR7:
15474   case PPC::DIR_PWR8:
15475   case PPC::DIR_PWR9:
15476   case PPC::DIR_PWR10:
15477   case PPC::DIR_PWR_FUTURE: {
15478     if (!ML)
15479       break;
15480 
15481     if (!DisableInnermostLoopAlign32) {
15482       // If the nested loop is an innermost loop, prefer to a 32-byte alignment,
15483       // so that we can decrease cache misses and branch-prediction misses.
15484       // Actual alignment of the loop will depend on the hotness check and other
15485       // logic in alignBlocks.
15486       if (ML->getLoopDepth() > 1 && ML->getSubLoops().empty())
15487         return Align(32);
15488     }
15489 
15490     const PPCInstrInfo *TII = Subtarget.getInstrInfo();
15491 
15492     // For small loops (between 5 and 8 instructions), align to a 32-byte
15493     // boundary so that the entire loop fits in one instruction-cache line.
15494     uint64_t LoopSize = 0;
15495     for (auto I = ML->block_begin(), IE = ML->block_end(); I != IE; ++I)
15496       for (auto J = (*I)->begin(), JE = (*I)->end(); J != JE; ++J) {
15497         LoopSize += TII->getInstSizeInBytes(*J);
15498         if (LoopSize > 32)
15499           break;
15500       }
15501 
15502     if (LoopSize > 16 && LoopSize <= 32)
15503       return Align(32);
15504 
15505     break;
15506   }
15507   }
15508 
15509   return TargetLowering::getPrefLoopAlignment(ML);
15510 }
15511 
15512 /// getConstraintType - Given a constraint, return the type of
15513 /// constraint it is for this target.
15514 PPCTargetLowering::ConstraintType
getConstraintType(StringRef Constraint) const15515 PPCTargetLowering::getConstraintType(StringRef Constraint) const {
15516   if (Constraint.size() == 1) {
15517     switch (Constraint[0]) {
15518     default: break;
15519     case 'b':
15520     case 'r':
15521     case 'f':
15522     case 'd':
15523     case 'v':
15524     case 'y':
15525       return C_RegisterClass;
15526     case 'Z':
15527       // FIXME: While Z does indicate a memory constraint, it specifically
15528       // indicates an r+r address (used in conjunction with the 'y' modifier
15529       // in the replacement string). Currently, we're forcing the base
15530       // register to be r0 in the asm printer (which is interpreted as zero)
15531       // and forming the complete address in the second register. This is
15532       // suboptimal.
15533       return C_Memory;
15534     }
15535   } else if (Constraint == "wc") { // individual CR bits.
15536     return C_RegisterClass;
15537   } else if (Constraint == "wa" || Constraint == "wd" ||
15538              Constraint == "wf" || Constraint == "ws" ||
15539              Constraint == "wi" || Constraint == "ww") {
15540     return C_RegisterClass; // VSX registers.
15541   }
15542   return TargetLowering::getConstraintType(Constraint);
15543 }
15544 
15545 /// Examine constraint type and operand type and determine a weight value.
15546 /// This object must already have been set up with the operand type
15547 /// and the current alternative constraint selected.
15548 TargetLowering::ConstraintWeight
getSingleConstraintMatchWeight(AsmOperandInfo & info,const char * constraint) const15549 PPCTargetLowering::getSingleConstraintMatchWeight(
15550     AsmOperandInfo &info, const char *constraint) const {
15551   ConstraintWeight weight = CW_Invalid;
15552   Value *CallOperandVal = info.CallOperandVal;
15553     // If we don't have a value, we can't do a match,
15554     // but allow it at the lowest weight.
15555   if (!CallOperandVal)
15556     return CW_Default;
15557   Type *type = CallOperandVal->getType();
15558 
15559   // Look at the constraint type.
15560   if (StringRef(constraint) == "wc" && type->isIntegerTy(1))
15561     return CW_Register; // an individual CR bit.
15562   else if ((StringRef(constraint) == "wa" ||
15563             StringRef(constraint) == "wd" ||
15564             StringRef(constraint) == "wf") &&
15565            type->isVectorTy())
15566     return CW_Register;
15567   else if (StringRef(constraint) == "wi" && type->isIntegerTy(64))
15568     return CW_Register; // just hold 64-bit integers data.
15569   else if (StringRef(constraint) == "ws" && type->isDoubleTy())
15570     return CW_Register;
15571   else if (StringRef(constraint) == "ww" && type->isFloatTy())
15572     return CW_Register;
15573 
15574   switch (*constraint) {
15575   default:
15576     weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
15577     break;
15578   case 'b':
15579     if (type->isIntegerTy())
15580       weight = CW_Register;
15581     break;
15582   case 'f':
15583     if (type->isFloatTy())
15584       weight = CW_Register;
15585     break;
15586   case 'd':
15587     if (type->isDoubleTy())
15588       weight = CW_Register;
15589     break;
15590   case 'v':
15591     if (type->isVectorTy())
15592       weight = CW_Register;
15593     break;
15594   case 'y':
15595     weight = CW_Register;
15596     break;
15597   case 'Z':
15598     weight = CW_Memory;
15599     break;
15600   }
15601   return weight;
15602 }
15603 
15604 std::pair<unsigned, const TargetRegisterClass *>
getRegForInlineAsmConstraint(const TargetRegisterInfo * TRI,StringRef Constraint,MVT VT) const15605 PPCTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
15606                                                 StringRef Constraint,
15607                                                 MVT VT) const {
15608   if (Constraint.size() == 1) {
15609     // GCC RS6000 Constraint Letters
15610     switch (Constraint[0]) {
15611     case 'b':   // R1-R31
15612       if (VT == MVT::i64 && Subtarget.isPPC64())
15613         return std::make_pair(0U, &PPC::G8RC_NOX0RegClass);
15614       return std::make_pair(0U, &PPC::GPRC_NOR0RegClass);
15615     case 'r':   // R0-R31
15616       if (VT == MVT::i64 && Subtarget.isPPC64())
15617         return std::make_pair(0U, &PPC::G8RCRegClass);
15618       return std::make_pair(0U, &PPC::GPRCRegClass);
15619     // 'd' and 'f' constraints are both defined to be "the floating point
15620     // registers", where one is for 32-bit and the other for 64-bit. We don't
15621     // really care overly much here so just give them all the same reg classes.
15622     case 'd':
15623     case 'f':
15624       if (Subtarget.hasSPE()) {
15625         if (VT == MVT::f32 || VT == MVT::i32)
15626           return std::make_pair(0U, &PPC::GPRCRegClass);
15627         if (VT == MVT::f64 || VT == MVT::i64)
15628           return std::make_pair(0U, &PPC::SPERCRegClass);
15629       } else {
15630         if (VT == MVT::f32 || VT == MVT::i32)
15631           return std::make_pair(0U, &PPC::F4RCRegClass);
15632         if (VT == MVT::f64 || VT == MVT::i64)
15633           return std::make_pair(0U, &PPC::F8RCRegClass);
15634       }
15635       break;
15636     case 'v':
15637       if (Subtarget.hasAltivec())
15638         return std::make_pair(0U, &PPC::VRRCRegClass);
15639       break;
15640     case 'y':   // crrc
15641       return std::make_pair(0U, &PPC::CRRCRegClass);
15642     }
15643   } else if (Constraint == "wc" && Subtarget.useCRBits()) {
15644     // An individual CR bit.
15645     return std::make_pair(0U, &PPC::CRBITRCRegClass);
15646   } else if ((Constraint == "wa" || Constraint == "wd" ||
15647              Constraint == "wf" || Constraint == "wi") &&
15648              Subtarget.hasVSX()) {
15649     return std::make_pair(0U, &PPC::VSRCRegClass);
15650   } else if ((Constraint == "ws" || Constraint == "ww") && Subtarget.hasVSX()) {
15651     if (VT == MVT::f32 && Subtarget.hasP8Vector())
15652       return std::make_pair(0U, &PPC::VSSRCRegClass);
15653     else
15654       return std::make_pair(0U, &PPC::VSFRCRegClass);
15655   } else if (Constraint == "lr") {
15656     if (VT == MVT::i64)
15657       return std::make_pair(0U, &PPC::LR8RCRegClass);
15658     else
15659       return std::make_pair(0U, &PPC::LRRCRegClass);
15660   }
15661 
15662   // Handle special cases of physical registers that are not properly handled
15663   // by the base class.
15664   if (Constraint[0] == '{' && Constraint[Constraint.size() - 1] == '}') {
15665     // If we name a VSX register, we can't defer to the base class because it
15666     // will not recognize the correct register (their names will be VSL{0-31}
15667     // and V{0-31} so they won't match). So we match them here.
15668     if (Constraint.size() > 3 && Constraint[1] == 'v' && Constraint[2] == 's') {
15669       int VSNum = atoi(Constraint.data() + 3);
15670       assert(VSNum >= 0 && VSNum <= 63 &&
15671              "Attempted to access a vsr out of range");
15672       if (VSNum < 32)
15673         return std::make_pair(PPC::VSL0 + VSNum, &PPC::VSRCRegClass);
15674       return std::make_pair(PPC::V0 + VSNum - 32, &PPC::VSRCRegClass);
15675     }
15676 
15677     // For float registers, we can't defer to the base class as it will match
15678     // the SPILLTOVSRRC class.
15679     if (Constraint.size() > 3 && Constraint[1] == 'f') {
15680       int RegNum = atoi(Constraint.data() + 2);
15681       if (RegNum > 31 || RegNum < 0)
15682         report_fatal_error("Invalid floating point register number");
15683       if (VT == MVT::f32 || VT == MVT::i32)
15684         return Subtarget.hasSPE()
15685                    ? std::make_pair(PPC::R0 + RegNum, &PPC::GPRCRegClass)
15686                    : std::make_pair(PPC::F0 + RegNum, &PPC::F4RCRegClass);
15687       if (VT == MVT::f64 || VT == MVT::i64)
15688         return Subtarget.hasSPE()
15689                    ? std::make_pair(PPC::S0 + RegNum, &PPC::SPERCRegClass)
15690                    : std::make_pair(PPC::F0 + RegNum, &PPC::F8RCRegClass);
15691     }
15692   }
15693 
15694   std::pair<unsigned, const TargetRegisterClass *> R =
15695       TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
15696 
15697   // r[0-9]+ are used, on PPC64, to refer to the corresponding 64-bit registers
15698   // (which we call X[0-9]+). If a 64-bit value has been requested, and a
15699   // 32-bit GPR has been selected, then 'upgrade' it to the 64-bit parent
15700   // register.
15701   // FIXME: If TargetLowering::getRegForInlineAsmConstraint could somehow use
15702   // the AsmName field from *RegisterInfo.td, then this would not be necessary.
15703   if (R.first && VT == MVT::i64 && Subtarget.isPPC64() &&
15704       PPC::GPRCRegClass.contains(R.first))
15705     return std::make_pair(TRI->getMatchingSuperReg(R.first,
15706                             PPC::sub_32, &PPC::G8RCRegClass),
15707                           &PPC::G8RCRegClass);
15708 
15709   // GCC accepts 'cc' as an alias for 'cr0', and we need to do the same.
15710   if (!R.second && StringRef("{cc}").equals_lower(Constraint)) {
15711     R.first = PPC::CR0;
15712     R.second = &PPC::CRRCRegClass;
15713   }
15714   // FIXME: This warning should ideally be emitted in the front end.
15715   const auto &TM = getTargetMachine();
15716   if (Subtarget.isAIXABI() && !TM.getAIXExtendedAltivecABI()) {
15717     if (((R.first >= PPC::V20 && R.first <= PPC::V31) ||
15718          (R.first >= PPC::VF20 && R.first <= PPC::VF31)) &&
15719         (R.second == &PPC::VSRCRegClass || R.second == &PPC::VSFRCRegClass))
15720       errs() << "warning: vector registers 20 to 32 are reserved in the "
15721                 "default AIX AltiVec ABI and cannot be used\n";
15722   }
15723 
15724   return R;
15725 }
15726 
15727 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
15728 /// vector.  If it is invalid, don't add anything to Ops.
LowerAsmOperandForConstraint(SDValue Op,std::string & Constraint,std::vector<SDValue> & Ops,SelectionDAG & DAG) const15729 void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
15730                                                      std::string &Constraint,
15731                                                      std::vector<SDValue>&Ops,
15732                                                      SelectionDAG &DAG) const {
15733   SDValue Result;
15734 
15735   // Only support length 1 constraints.
15736   if (Constraint.length() > 1) return;
15737 
15738   char Letter = Constraint[0];
15739   switch (Letter) {
15740   default: break;
15741   case 'I':
15742   case 'J':
15743   case 'K':
15744   case 'L':
15745   case 'M':
15746   case 'N':
15747   case 'O':
15748   case 'P': {
15749     ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op);
15750     if (!CST) return; // Must be an immediate to match.
15751     SDLoc dl(Op);
15752     int64_t Value = CST->getSExtValue();
15753     EVT TCVT = MVT::i64; // All constants taken to be 64 bits so that negative
15754                          // numbers are printed as such.
15755     switch (Letter) {
15756     default: llvm_unreachable("Unknown constraint letter!");
15757     case 'I':  // "I" is a signed 16-bit constant.
15758       if (isInt<16>(Value))
15759         Result = DAG.getTargetConstant(Value, dl, TCVT);
15760       break;
15761     case 'J':  // "J" is a constant with only the high-order 16 bits nonzero.
15762       if (isShiftedUInt<16, 16>(Value))
15763         Result = DAG.getTargetConstant(Value, dl, TCVT);
15764       break;
15765     case 'L':  // "L" is a signed 16-bit constant shifted left 16 bits.
15766       if (isShiftedInt<16, 16>(Value))
15767         Result = DAG.getTargetConstant(Value, dl, TCVT);
15768       break;
15769     case 'K':  // "K" is a constant with only the low-order 16 bits nonzero.
15770       if (isUInt<16>(Value))
15771         Result = DAG.getTargetConstant(Value, dl, TCVT);
15772       break;
15773     case 'M':  // "M" is a constant that is greater than 31.
15774       if (Value > 31)
15775         Result = DAG.getTargetConstant(Value, dl, TCVT);
15776       break;
15777     case 'N':  // "N" is a positive constant that is an exact power of two.
15778       if (Value > 0 && isPowerOf2_64(Value))
15779         Result = DAG.getTargetConstant(Value, dl, TCVT);
15780       break;
15781     case 'O':  // "O" is the constant zero.
15782       if (Value == 0)
15783         Result = DAG.getTargetConstant(Value, dl, TCVT);
15784       break;
15785     case 'P':  // "P" is a constant whose negation is a signed 16-bit constant.
15786       if (isInt<16>(-Value))
15787         Result = DAG.getTargetConstant(Value, dl, TCVT);
15788       break;
15789     }
15790     break;
15791   }
15792   }
15793 
15794   if (Result.getNode()) {
15795     Ops.push_back(Result);
15796     return;
15797   }
15798 
15799   // Handle standard constraint letters.
15800   TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
15801 }
15802 
15803 // isLegalAddressingMode - Return true if the addressing mode represented
15804 // by AM is legal for this target, for a load/store of the specified type.
isLegalAddressingMode(const DataLayout & DL,const AddrMode & AM,Type * Ty,unsigned AS,Instruction * I) const15805 bool PPCTargetLowering::isLegalAddressingMode(const DataLayout &DL,
15806                                               const AddrMode &AM, Type *Ty,
15807                                               unsigned AS,
15808                                               Instruction *I) const {
15809   // Vector type r+i form is supported since power9 as DQ form. We don't check
15810   // the offset matching DQ form requirement(off % 16 == 0), because on PowerPC,
15811   // imm form is preferred and the offset can be adjusted to use imm form later
15812   // in pass PPCLoopInstrFormPrep. Also in LSR, for one LSRUse, it uses min and
15813   // max offset to check legal addressing mode, we should be a little aggressive
15814   // to contain other offsets for that LSRUse.
15815   if (Ty->isVectorTy() && AM.BaseOffs != 0 && !Subtarget.hasP9Vector())
15816     return false;
15817 
15818   // PPC allows a sign-extended 16-bit immediate field.
15819   if (AM.BaseOffs <= -(1LL << 16) || AM.BaseOffs >= (1LL << 16)-1)
15820     return false;
15821 
15822   // No global is ever allowed as a base.
15823   if (AM.BaseGV)
15824     return false;
15825 
15826   // PPC only support r+r,
15827   switch (AM.Scale) {
15828   case 0:  // "r+i" or just "i", depending on HasBaseReg.
15829     break;
15830   case 1:
15831     if (AM.HasBaseReg && AM.BaseOffs)  // "r+r+i" is not allowed.
15832       return false;
15833     // Otherwise we have r+r or r+i.
15834     break;
15835   case 2:
15836     if (AM.HasBaseReg || AM.BaseOffs)  // 2*r+r  or  2*r+i is not allowed.
15837       return false;
15838     // Allow 2*r as r+r.
15839     break;
15840   default:
15841     // No other scales are supported.
15842     return false;
15843   }
15844 
15845   return true;
15846 }
15847 
LowerRETURNADDR(SDValue Op,SelectionDAG & DAG) const15848 SDValue PPCTargetLowering::LowerRETURNADDR(SDValue Op,
15849                                            SelectionDAG &DAG) const {
15850   MachineFunction &MF = DAG.getMachineFunction();
15851   MachineFrameInfo &MFI = MF.getFrameInfo();
15852   MFI.setReturnAddressIsTaken(true);
15853 
15854   if (verifyReturnAddressArgumentIsConstant(Op, DAG))
15855     return SDValue();
15856 
15857   SDLoc dl(Op);
15858   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
15859 
15860   // Make sure the function does not optimize away the store of the RA to
15861   // the stack.
15862   PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
15863   FuncInfo->setLRStoreRequired();
15864   bool isPPC64 = Subtarget.isPPC64();
15865   auto PtrVT = getPointerTy(MF.getDataLayout());
15866 
15867   if (Depth > 0) {
15868     SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
15869     SDValue Offset =
15870         DAG.getConstant(Subtarget.getFrameLowering()->getReturnSaveOffset(), dl,
15871                         isPPC64 ? MVT::i64 : MVT::i32);
15872     return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
15873                        DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset),
15874                        MachinePointerInfo());
15875   }
15876 
15877   // Just load the return address off the stack.
15878   SDValue RetAddrFI = getReturnAddrFrameIndex(DAG);
15879   return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI,
15880                      MachinePointerInfo());
15881 }
15882 
LowerFRAMEADDR(SDValue Op,SelectionDAG & DAG) const15883 SDValue PPCTargetLowering::LowerFRAMEADDR(SDValue Op,
15884                                           SelectionDAG &DAG) const {
15885   SDLoc dl(Op);
15886   unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
15887 
15888   MachineFunction &MF = DAG.getMachineFunction();
15889   MachineFrameInfo &MFI = MF.getFrameInfo();
15890   MFI.setFrameAddressIsTaken(true);
15891 
15892   EVT PtrVT = getPointerTy(MF.getDataLayout());
15893   bool isPPC64 = PtrVT == MVT::i64;
15894 
15895   // Naked functions never have a frame pointer, and so we use r1. For all
15896   // other functions, this decision must be delayed until during PEI.
15897   unsigned FrameReg;
15898   if (MF.getFunction().hasFnAttribute(Attribute::Naked))
15899     FrameReg = isPPC64 ? PPC::X1 : PPC::R1;
15900   else
15901     FrameReg = isPPC64 ? PPC::FP8 : PPC::FP;
15902 
15903   SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg,
15904                                          PtrVT);
15905   while (Depth--)
15906     FrameAddr = DAG.getLoad(Op.getValueType(), dl, DAG.getEntryNode(),
15907                             FrameAddr, MachinePointerInfo());
15908   return FrameAddr;
15909 }
15910 
15911 // FIXME? Maybe this could be a TableGen attribute on some registers and
15912 // this table could be generated automatically from RegInfo.
getRegisterByName(const char * RegName,LLT VT,const MachineFunction & MF) const15913 Register PPCTargetLowering::getRegisterByName(const char* RegName, LLT VT,
15914                                               const MachineFunction &MF) const {
15915   bool isPPC64 = Subtarget.isPPC64();
15916 
15917   bool is64Bit = isPPC64 && VT == LLT::scalar(64);
15918   if (!is64Bit && VT != LLT::scalar(32))
15919     report_fatal_error("Invalid register global variable type");
15920 
15921   Register Reg = StringSwitch<Register>(RegName)
15922                      .Case("r1", is64Bit ? PPC::X1 : PPC::R1)
15923                      .Case("r2", isPPC64 ? Register() : PPC::R2)
15924                      .Case("r13", (is64Bit ? PPC::X13 : PPC::R13))
15925                      .Default(Register());
15926 
15927   if (Reg)
15928     return Reg;
15929   report_fatal_error("Invalid register name global variable");
15930 }
15931 
isAccessedAsGotIndirect(SDValue GA) const15932 bool PPCTargetLowering::isAccessedAsGotIndirect(SDValue GA) const {
15933   // 32-bit SVR4 ABI access everything as got-indirect.
15934   if (Subtarget.is32BitELFABI())
15935     return true;
15936 
15937   // AIX accesses everything indirectly through the TOC, which is similar to
15938   // the GOT.
15939   if (Subtarget.isAIXABI())
15940     return true;
15941 
15942   CodeModel::Model CModel = getTargetMachine().getCodeModel();
15943   // If it is small or large code model, module locals are accessed
15944   // indirectly by loading their address from .toc/.got.
15945   if (CModel == CodeModel::Small || CModel == CodeModel::Large)
15946     return true;
15947 
15948   // JumpTable and BlockAddress are accessed as got-indirect.
15949   if (isa<JumpTableSDNode>(GA) || isa<BlockAddressSDNode>(GA))
15950     return true;
15951 
15952   if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(GA))
15953     return Subtarget.isGVIndirectSymbol(G->getGlobal());
15954 
15955   return false;
15956 }
15957 
15958 bool
isOffsetFoldingLegal(const GlobalAddressSDNode * GA) const15959 PPCTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
15960   // The PowerPC target isn't yet aware of offsets.
15961   return false;
15962 }
15963 
getTgtMemIntrinsic(IntrinsicInfo & Info,const CallInst & I,MachineFunction & MF,unsigned Intrinsic) const15964 bool PPCTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
15965                                            const CallInst &I,
15966                                            MachineFunction &MF,
15967                                            unsigned Intrinsic) const {
15968   switch (Intrinsic) {
15969   case Intrinsic::ppc_altivec_lvx:
15970   case Intrinsic::ppc_altivec_lvxl:
15971   case Intrinsic::ppc_altivec_lvebx:
15972   case Intrinsic::ppc_altivec_lvehx:
15973   case Intrinsic::ppc_altivec_lvewx:
15974   case Intrinsic::ppc_vsx_lxvd2x:
15975   case Intrinsic::ppc_vsx_lxvw4x:
15976   case Intrinsic::ppc_vsx_lxvd2x_be:
15977   case Intrinsic::ppc_vsx_lxvw4x_be:
15978   case Intrinsic::ppc_vsx_lxvl:
15979   case Intrinsic::ppc_vsx_lxvll: {
15980     EVT VT;
15981     switch (Intrinsic) {
15982     case Intrinsic::ppc_altivec_lvebx:
15983       VT = MVT::i8;
15984       break;
15985     case Intrinsic::ppc_altivec_lvehx:
15986       VT = MVT::i16;
15987       break;
15988     case Intrinsic::ppc_altivec_lvewx:
15989       VT = MVT::i32;
15990       break;
15991     case Intrinsic::ppc_vsx_lxvd2x:
15992     case Intrinsic::ppc_vsx_lxvd2x_be:
15993       VT = MVT::v2f64;
15994       break;
15995     default:
15996       VT = MVT::v4i32;
15997       break;
15998     }
15999 
16000     Info.opc = ISD::INTRINSIC_W_CHAIN;
16001     Info.memVT = VT;
16002     Info.ptrVal = I.getArgOperand(0);
16003     Info.offset = -VT.getStoreSize()+1;
16004     Info.size = 2*VT.getStoreSize()-1;
16005     Info.align = Align(1);
16006     Info.flags = MachineMemOperand::MOLoad;
16007     return true;
16008   }
16009   case Intrinsic::ppc_altivec_stvx:
16010   case Intrinsic::ppc_altivec_stvxl:
16011   case Intrinsic::ppc_altivec_stvebx:
16012   case Intrinsic::ppc_altivec_stvehx:
16013   case Intrinsic::ppc_altivec_stvewx:
16014   case Intrinsic::ppc_vsx_stxvd2x:
16015   case Intrinsic::ppc_vsx_stxvw4x:
16016   case Intrinsic::ppc_vsx_stxvd2x_be:
16017   case Intrinsic::ppc_vsx_stxvw4x_be:
16018   case Intrinsic::ppc_vsx_stxvl:
16019   case Intrinsic::ppc_vsx_stxvll: {
16020     EVT VT;
16021     switch (Intrinsic) {
16022     case Intrinsic::ppc_altivec_stvebx:
16023       VT = MVT::i8;
16024       break;
16025     case Intrinsic::ppc_altivec_stvehx:
16026       VT = MVT::i16;
16027       break;
16028     case Intrinsic::ppc_altivec_stvewx:
16029       VT = MVT::i32;
16030       break;
16031     case Intrinsic::ppc_vsx_stxvd2x:
16032     case Intrinsic::ppc_vsx_stxvd2x_be:
16033       VT = MVT::v2f64;
16034       break;
16035     default:
16036       VT = MVT::v4i32;
16037       break;
16038     }
16039 
16040     Info.opc = ISD::INTRINSIC_VOID;
16041     Info.memVT = VT;
16042     Info.ptrVal = I.getArgOperand(1);
16043     Info.offset = -VT.getStoreSize()+1;
16044     Info.size = 2*VT.getStoreSize()-1;
16045     Info.align = Align(1);
16046     Info.flags = MachineMemOperand::MOStore;
16047     return true;
16048   }
16049   default:
16050     break;
16051   }
16052 
16053   return false;
16054 }
16055 
16056 /// It returns EVT::Other if the type should be determined using generic
16057 /// target-independent logic.
getOptimalMemOpType(const MemOp & Op,const AttributeList & FuncAttributes) const16058 EVT PPCTargetLowering::getOptimalMemOpType(
16059     const MemOp &Op, const AttributeList &FuncAttributes) const {
16060   if (getTargetMachine().getOptLevel() != CodeGenOpt::None) {
16061     // We should use Altivec/VSX loads and stores when available. For unaligned
16062     // addresses, unaligned VSX loads are only fast starting with the P8.
16063     if (Subtarget.hasAltivec() && Op.size() >= 16 &&
16064         (Op.isAligned(Align(16)) ||
16065          ((Op.isMemset() && Subtarget.hasVSX()) || Subtarget.hasP8Vector())))
16066       return MVT::v4i32;
16067   }
16068 
16069   if (Subtarget.isPPC64()) {
16070     return MVT::i64;
16071   }
16072 
16073   return MVT::i32;
16074 }
16075 
16076 /// Returns true if it is beneficial to convert a load of a constant
16077 /// to just the constant itself.
shouldConvertConstantLoadToIntImm(const APInt & Imm,Type * Ty) const16078 bool PPCTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
16079                                                           Type *Ty) const {
16080   assert(Ty->isIntegerTy());
16081 
16082   unsigned BitSize = Ty->getPrimitiveSizeInBits();
16083   return !(BitSize == 0 || BitSize > 64);
16084 }
16085 
isTruncateFree(Type * Ty1,Type * Ty2) const16086 bool PPCTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
16087   if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
16088     return false;
16089   unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
16090   unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
16091   return NumBits1 == 64 && NumBits2 == 32;
16092 }
16093 
isTruncateFree(EVT VT1,EVT VT2) const16094 bool PPCTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
16095   if (!VT1.isInteger() || !VT2.isInteger())
16096     return false;
16097   unsigned NumBits1 = VT1.getSizeInBits();
16098   unsigned NumBits2 = VT2.getSizeInBits();
16099   return NumBits1 == 64 && NumBits2 == 32;
16100 }
16101 
isZExtFree(SDValue Val,EVT VT2) const16102 bool PPCTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
16103   // Generally speaking, zexts are not free, but they are free when they can be
16104   // folded with other operations.
16105   if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Val)) {
16106     EVT MemVT = LD->getMemoryVT();
16107     if ((MemVT == MVT::i1 || MemVT == MVT::i8 || MemVT == MVT::i16 ||
16108          (Subtarget.isPPC64() && MemVT == MVT::i32)) &&
16109         (LD->getExtensionType() == ISD::NON_EXTLOAD ||
16110          LD->getExtensionType() == ISD::ZEXTLOAD))
16111       return true;
16112   }
16113 
16114   // FIXME: Add other cases...
16115   //  - 32-bit shifts with a zext to i64
16116   //  - zext after ctlz, bswap, etc.
16117   //  - zext after and by a constant mask
16118 
16119   return TargetLowering::isZExtFree(Val, VT2);
16120 }
16121 
isFPExtFree(EVT DestVT,EVT SrcVT) const16122 bool PPCTargetLowering::isFPExtFree(EVT DestVT, EVT SrcVT) const {
16123   assert(DestVT.isFloatingPoint() && SrcVT.isFloatingPoint() &&
16124          "invalid fpext types");
16125   // Extending to float128 is not free.
16126   if (DestVT == MVT::f128)
16127     return false;
16128   return true;
16129 }
16130 
isLegalICmpImmediate(int64_t Imm) const16131 bool PPCTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
16132   return isInt<16>(Imm) || isUInt<16>(Imm);
16133 }
16134 
isLegalAddImmediate(int64_t Imm) const16135 bool PPCTargetLowering::isLegalAddImmediate(int64_t Imm) const {
16136   return isInt<16>(Imm) || isUInt<16>(Imm);
16137 }
16138 
allowsMisalignedMemoryAccesses(EVT VT,unsigned,Align,MachineMemOperand::Flags,bool * Fast) const16139 bool PPCTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, unsigned, Align,
16140                                                        MachineMemOperand::Flags,
16141                                                        bool *Fast) const {
16142   if (DisablePPCUnaligned)
16143     return false;
16144 
16145   // PowerPC supports unaligned memory access for simple non-vector types.
16146   // Although accessing unaligned addresses is not as efficient as accessing
16147   // aligned addresses, it is generally more efficient than manual expansion,
16148   // and generally only traps for software emulation when crossing page
16149   // boundaries.
16150 
16151   if (!VT.isSimple())
16152     return false;
16153 
16154   if (VT.isFloatingPoint() && !VT.isVector() &&
16155       !Subtarget.allowsUnalignedFPAccess())
16156     return false;
16157 
16158   if (VT.getSimpleVT().isVector()) {
16159     if (Subtarget.hasVSX()) {
16160       if (VT != MVT::v2f64 && VT != MVT::v2i64 &&
16161           VT != MVT::v4f32 && VT != MVT::v4i32)
16162         return false;
16163     } else {
16164       return false;
16165     }
16166   }
16167 
16168   if (VT == MVT::ppcf128)
16169     return false;
16170 
16171   if (Fast)
16172     *Fast = true;
16173 
16174   return true;
16175 }
16176 
decomposeMulByConstant(LLVMContext & Context,EVT VT,SDValue C) const16177 bool PPCTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
16178                                                SDValue C) const {
16179   // Check integral scalar types.
16180   if (!VT.isScalarInteger())
16181     return false;
16182   if (auto *ConstNode = dyn_cast<ConstantSDNode>(C.getNode())) {
16183     if (!ConstNode->getAPIntValue().isSignedIntN(64))
16184       return false;
16185     // This transformation will generate >= 2 operations. But the following
16186     // cases will generate <= 2 instructions during ISEL. So exclude them.
16187     // 1. If the constant multiplier fits 16 bits, it can be handled by one
16188     // HW instruction, ie. MULLI
16189     // 2. If the multiplier after shifted fits 16 bits, an extra shift
16190     // instruction is needed than case 1, ie. MULLI and RLDICR
16191     int64_t Imm = ConstNode->getSExtValue();
16192     unsigned Shift = countTrailingZeros<uint64_t>(Imm);
16193     Imm >>= Shift;
16194     if (isInt<16>(Imm))
16195       return false;
16196     uint64_t UImm = static_cast<uint64_t>(Imm);
16197     if (isPowerOf2_64(UImm + 1) || isPowerOf2_64(UImm - 1) ||
16198         isPowerOf2_64(1 - UImm) || isPowerOf2_64(-1 - UImm))
16199       return true;
16200   }
16201   return false;
16202 }
16203 
isFMAFasterThanFMulAndFAdd(const MachineFunction & MF,EVT VT) const16204 bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
16205                                                    EVT VT) const {
16206   return isFMAFasterThanFMulAndFAdd(
16207       MF.getFunction(), VT.getTypeForEVT(MF.getFunction().getContext()));
16208 }
16209 
isFMAFasterThanFMulAndFAdd(const Function & F,Type * Ty) const16210 bool PPCTargetLowering::isFMAFasterThanFMulAndFAdd(const Function &F,
16211                                                    Type *Ty) const {
16212   switch (Ty->getScalarType()->getTypeID()) {
16213   case Type::FloatTyID:
16214   case Type::DoubleTyID:
16215     return true;
16216   case Type::FP128TyID:
16217     return Subtarget.hasP9Vector();
16218   default:
16219     return false;
16220   }
16221 }
16222 
16223 // FIXME: add more patterns which are not profitable to hoist.
isProfitableToHoist(Instruction * I) const16224 bool PPCTargetLowering::isProfitableToHoist(Instruction *I) const {
16225   if (!I->hasOneUse())
16226     return true;
16227 
16228   Instruction *User = I->user_back();
16229   assert(User && "A single use instruction with no uses.");
16230 
16231   switch (I->getOpcode()) {
16232   case Instruction::FMul: {
16233     // Don't break FMA, PowerPC prefers FMA.
16234     if (User->getOpcode() != Instruction::FSub &&
16235         User->getOpcode() != Instruction::FAdd)
16236       return true;
16237 
16238     const TargetOptions &Options = getTargetMachine().Options;
16239     const Function *F = I->getFunction();
16240     const DataLayout &DL = F->getParent()->getDataLayout();
16241     Type *Ty = User->getOperand(0)->getType();
16242 
16243     return !(
16244         isFMAFasterThanFMulAndFAdd(*F, Ty) &&
16245         isOperationLegalOrCustom(ISD::FMA, getValueType(DL, Ty)) &&
16246         (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath));
16247   }
16248   case Instruction::Load: {
16249     // Don't break "store (load float*)" pattern, this pattern will be combined
16250     // to "store (load int32)" in later InstCombine pass. See function
16251     // combineLoadToOperationType. On PowerPC, loading a float point takes more
16252     // cycles than loading a 32 bit integer.
16253     LoadInst *LI = cast<LoadInst>(I);
16254     // For the loads that combineLoadToOperationType does nothing, like
16255     // ordered load, it should be profitable to hoist them.
16256     // For swifterror load, it can only be used for pointer to pointer type, so
16257     // later type check should get rid of this case.
16258     if (!LI->isUnordered())
16259       return true;
16260 
16261     if (User->getOpcode() != Instruction::Store)
16262       return true;
16263 
16264     if (I->getType()->getTypeID() != Type::FloatTyID)
16265       return true;
16266 
16267     return false;
16268   }
16269   default:
16270     return true;
16271   }
16272   return true;
16273 }
16274 
16275 const MCPhysReg *
getScratchRegisters(CallingConv::ID) const16276 PPCTargetLowering::getScratchRegisters(CallingConv::ID) const {
16277   // LR is a callee-save register, but we must treat it as clobbered by any call
16278   // site. Hence we include LR in the scratch registers, which are in turn added
16279   // as implicit-defs for stackmaps and patchpoints. The same reasoning applies
16280   // to CTR, which is used by any indirect call.
16281   static const MCPhysReg ScratchRegs[] = {
16282     PPC::X12, PPC::LR8, PPC::CTR8, 0
16283   };
16284 
16285   return ScratchRegs;
16286 }
16287 
getExceptionPointerRegister(const Constant * PersonalityFn) const16288 Register PPCTargetLowering::getExceptionPointerRegister(
16289     const Constant *PersonalityFn) const {
16290   return Subtarget.isPPC64() ? PPC::X3 : PPC::R3;
16291 }
16292 
getExceptionSelectorRegister(const Constant * PersonalityFn) const16293 Register PPCTargetLowering::getExceptionSelectorRegister(
16294     const Constant *PersonalityFn) const {
16295   return Subtarget.isPPC64() ? PPC::X4 : PPC::R4;
16296 }
16297 
16298 bool
shouldExpandBuildVectorWithShuffles(EVT VT,unsigned DefinedValues) const16299 PPCTargetLowering::shouldExpandBuildVectorWithShuffles(
16300                      EVT VT , unsigned DefinedValues) const {
16301   if (VT == MVT::v2i64)
16302     return Subtarget.hasDirectMove(); // Don't need stack ops with direct moves
16303 
16304   if (Subtarget.hasVSX())
16305     return true;
16306 
16307   return TargetLowering::shouldExpandBuildVectorWithShuffles(VT, DefinedValues);
16308 }
16309 
getSchedulingPreference(SDNode * N) const16310 Sched::Preference PPCTargetLowering::getSchedulingPreference(SDNode *N) const {
16311   if (DisableILPPref || Subtarget.enableMachineScheduler())
16312     return TargetLowering::getSchedulingPreference(N);
16313 
16314   return Sched::ILP;
16315 }
16316 
16317 // Create a fast isel object.
16318 FastISel *
createFastISel(FunctionLoweringInfo & FuncInfo,const TargetLibraryInfo * LibInfo) const16319 PPCTargetLowering::createFastISel(FunctionLoweringInfo &FuncInfo,
16320                                   const TargetLibraryInfo *LibInfo) const {
16321   return PPC::createFastISel(FuncInfo, LibInfo);
16322 }
16323 
16324 // 'Inverted' means the FMA opcode after negating one multiplicand.
16325 // For example, (fma -a b c) = (fnmsub a b c)
invertFMAOpcode(unsigned Opc)16326 static unsigned invertFMAOpcode(unsigned Opc) {
16327   switch (Opc) {
16328   default:
16329     llvm_unreachable("Invalid FMA opcode for PowerPC!");
16330   case ISD::FMA:
16331     return PPCISD::FNMSUB;
16332   case PPCISD::FNMSUB:
16333     return ISD::FMA;
16334   }
16335 }
16336 
getNegatedExpression(SDValue Op,SelectionDAG & DAG,bool LegalOps,bool OptForSize,NegatibleCost & Cost,unsigned Depth) const16337 SDValue PPCTargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG,
16338                                                 bool LegalOps, bool OptForSize,
16339                                                 NegatibleCost &Cost,
16340                                                 unsigned Depth) const {
16341   if (Depth > SelectionDAG::MaxRecursionDepth)
16342     return SDValue();
16343 
16344   unsigned Opc = Op.getOpcode();
16345   EVT VT = Op.getValueType();
16346   SDNodeFlags Flags = Op.getNode()->getFlags();
16347 
16348   switch (Opc) {
16349   case PPCISD::FNMSUB:
16350     if (!Op.hasOneUse() || !isTypeLegal(VT))
16351       break;
16352 
16353     const TargetOptions &Options = getTargetMachine().Options;
16354     SDValue N0 = Op.getOperand(0);
16355     SDValue N1 = Op.getOperand(1);
16356     SDValue N2 = Op.getOperand(2);
16357     SDLoc Loc(Op);
16358 
16359     NegatibleCost N2Cost = NegatibleCost::Expensive;
16360     SDValue NegN2 =
16361         getNegatedExpression(N2, DAG, LegalOps, OptForSize, N2Cost, Depth + 1);
16362 
16363     if (!NegN2)
16364       return SDValue();
16365 
16366     // (fneg (fnmsub a b c)) => (fnmsub (fneg a) b (fneg c))
16367     // (fneg (fnmsub a b c)) => (fnmsub a (fneg b) (fneg c))
16368     // These transformations may change sign of zeroes. For example,
16369     // -(-ab-(-c))=-0 while -(-(ab-c))=+0 when a=b=c=1.
16370     if (Flags.hasNoSignedZeros() || Options.NoSignedZerosFPMath) {
16371       // Try and choose the cheaper one to negate.
16372       NegatibleCost N0Cost = NegatibleCost::Expensive;
16373       SDValue NegN0 = getNegatedExpression(N0, DAG, LegalOps, OptForSize,
16374                                            N0Cost, Depth + 1);
16375 
16376       NegatibleCost N1Cost = NegatibleCost::Expensive;
16377       SDValue NegN1 = getNegatedExpression(N1, DAG, LegalOps, OptForSize,
16378                                            N1Cost, Depth + 1);
16379 
16380       if (NegN0 && N0Cost <= N1Cost) {
16381         Cost = std::min(N0Cost, N2Cost);
16382         return DAG.getNode(Opc, Loc, VT, NegN0, N1, NegN2, Flags);
16383       } else if (NegN1) {
16384         Cost = std::min(N1Cost, N2Cost);
16385         return DAG.getNode(Opc, Loc, VT, N0, NegN1, NegN2, Flags);
16386       }
16387     }
16388 
16389     // (fneg (fnmsub a b c)) => (fma a b (fneg c))
16390     if (isOperationLegal(ISD::FMA, VT)) {
16391       Cost = N2Cost;
16392       return DAG.getNode(ISD::FMA, Loc, VT, N0, N1, NegN2, Flags);
16393     }
16394 
16395     break;
16396   }
16397 
16398   return TargetLowering::getNegatedExpression(Op, DAG, LegalOps, OptForSize,
16399                                               Cost, Depth);
16400 }
16401 
16402 // Override to enable LOAD_STACK_GUARD lowering on Linux.
useLoadStackGuardNode() const16403 bool PPCTargetLowering::useLoadStackGuardNode() const {
16404   if (!Subtarget.isTargetLinux())
16405     return TargetLowering::useLoadStackGuardNode();
16406   return true;
16407 }
16408 
16409 // Override to disable global variable loading on Linux.
insertSSPDeclarations(Module & M) const16410 void PPCTargetLowering::insertSSPDeclarations(Module &M) const {
16411   if (!Subtarget.isTargetLinux())
16412     return TargetLowering::insertSSPDeclarations(M);
16413 }
16414 
isFPImmLegal(const APFloat & Imm,EVT VT,bool ForCodeSize) const16415 bool PPCTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
16416                                      bool ForCodeSize) const {
16417   if (!VT.isSimple() || !Subtarget.hasVSX())
16418     return false;
16419 
16420   switch(VT.getSimpleVT().SimpleTy) {
16421   default:
16422     // For FP types that are currently not supported by PPC backend, return
16423     // false. Examples: f16, f80.
16424     return false;
16425   case MVT::f32:
16426   case MVT::f64:
16427     if (Subtarget.hasPrefixInstrs()) {
16428       // we can materialize all immediatess via XXSPLTI32DX and XXSPLTIDP.
16429       return true;
16430     }
16431     LLVM_FALLTHROUGH;
16432   case MVT::ppcf128:
16433     return Imm.isPosZero();
16434   }
16435 }
16436 
16437 // For vector shift operation op, fold
16438 // (op x, (and y, ((1 << numbits(x)) - 1))) -> (target op x, y)
stripModuloOnShift(const TargetLowering & TLI,SDNode * N,SelectionDAG & DAG)16439 static SDValue stripModuloOnShift(const TargetLowering &TLI, SDNode *N,
16440                                   SelectionDAG &DAG) {
16441   SDValue N0 = N->getOperand(0);
16442   SDValue N1 = N->getOperand(1);
16443   EVT VT = N0.getValueType();
16444   unsigned OpSizeInBits = VT.getScalarSizeInBits();
16445   unsigned Opcode = N->getOpcode();
16446   unsigned TargetOpcode;
16447 
16448   switch (Opcode) {
16449   default:
16450     llvm_unreachable("Unexpected shift operation");
16451   case ISD::SHL:
16452     TargetOpcode = PPCISD::SHL;
16453     break;
16454   case ISD::SRL:
16455     TargetOpcode = PPCISD::SRL;
16456     break;
16457   case ISD::SRA:
16458     TargetOpcode = PPCISD::SRA;
16459     break;
16460   }
16461 
16462   if (VT.isVector() && TLI.isOperationLegal(Opcode, VT) &&
16463       N1->getOpcode() == ISD::AND)
16464     if (ConstantSDNode *Mask = isConstOrConstSplat(N1->getOperand(1)))
16465       if (Mask->getZExtValue() == OpSizeInBits - 1)
16466         return DAG.getNode(TargetOpcode, SDLoc(N), VT, N0, N1->getOperand(0));
16467 
16468   return SDValue();
16469 }
16470 
combineSHL(SDNode * N,DAGCombinerInfo & DCI) const16471 SDValue PPCTargetLowering::combineSHL(SDNode *N, DAGCombinerInfo &DCI) const {
16472   if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
16473     return Value;
16474 
16475   SDValue N0 = N->getOperand(0);
16476   ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N->getOperand(1));
16477   if (!Subtarget.isISA3_0() || !Subtarget.isPPC64() ||
16478       N0.getOpcode() != ISD::SIGN_EXTEND ||
16479       N0.getOperand(0).getValueType() != MVT::i32 || CN1 == nullptr ||
16480       N->getValueType(0) != MVT::i64)
16481     return SDValue();
16482 
16483   // We can't save an operation here if the value is already extended, and
16484   // the existing shift is easier to combine.
16485   SDValue ExtsSrc = N0.getOperand(0);
16486   if (ExtsSrc.getOpcode() == ISD::TRUNCATE &&
16487       ExtsSrc.getOperand(0).getOpcode() == ISD::AssertSext)
16488     return SDValue();
16489 
16490   SDLoc DL(N0);
16491   SDValue ShiftBy = SDValue(CN1, 0);
16492   // We want the shift amount to be i32 on the extswli, but the shift could
16493   // have an i64.
16494   if (ShiftBy.getValueType() == MVT::i64)
16495     ShiftBy = DCI.DAG.getConstant(CN1->getZExtValue(), DL, MVT::i32);
16496 
16497   return DCI.DAG.getNode(PPCISD::EXTSWSLI, DL, MVT::i64, N0->getOperand(0),
16498                          ShiftBy);
16499 }
16500 
combineSRA(SDNode * N,DAGCombinerInfo & DCI) const16501 SDValue PPCTargetLowering::combineSRA(SDNode *N, DAGCombinerInfo &DCI) const {
16502   if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
16503     return Value;
16504 
16505   return SDValue();
16506 }
16507 
combineSRL(SDNode * N,DAGCombinerInfo & DCI) const16508 SDValue PPCTargetLowering::combineSRL(SDNode *N, DAGCombinerInfo &DCI) const {
16509   if (auto Value = stripModuloOnShift(*this, N, DCI.DAG))
16510     return Value;
16511 
16512   return SDValue();
16513 }
16514 
16515 // Transform (add X, (zext(setne Z, C))) -> (addze X, (addic (addi Z, -C), -1))
16516 // Transform (add X, (zext(sete  Z, C))) -> (addze X, (subfic (addi Z, -C), 0))
16517 // When C is zero, the equation (addi Z, -C) can be simplified to Z
16518 // Requirement: -C in [-32768, 32767], X and Z are MVT::i64 types
combineADDToADDZE(SDNode * N,SelectionDAG & DAG,const PPCSubtarget & Subtarget)16519 static SDValue combineADDToADDZE(SDNode *N, SelectionDAG &DAG,
16520                                  const PPCSubtarget &Subtarget) {
16521   if (!Subtarget.isPPC64())
16522     return SDValue();
16523 
16524   SDValue LHS = N->getOperand(0);
16525   SDValue RHS = N->getOperand(1);
16526 
16527   auto isZextOfCompareWithConstant = [](SDValue Op) {
16528     if (Op.getOpcode() != ISD::ZERO_EXTEND || !Op.hasOneUse() ||
16529         Op.getValueType() != MVT::i64)
16530       return false;
16531 
16532     SDValue Cmp = Op.getOperand(0);
16533     if (Cmp.getOpcode() != ISD::SETCC || !Cmp.hasOneUse() ||
16534         Cmp.getOperand(0).getValueType() != MVT::i64)
16535       return false;
16536 
16537     if (auto *Constant = dyn_cast<ConstantSDNode>(Cmp.getOperand(1))) {
16538       int64_t NegConstant = 0 - Constant->getSExtValue();
16539       // Due to the limitations of the addi instruction,
16540       // -C is required to be [-32768, 32767].
16541       return isInt<16>(NegConstant);
16542     }
16543 
16544     return false;
16545   };
16546 
16547   bool LHSHasPattern = isZextOfCompareWithConstant(LHS);
16548   bool RHSHasPattern = isZextOfCompareWithConstant(RHS);
16549 
16550   // If there is a pattern, canonicalize a zext operand to the RHS.
16551   if (LHSHasPattern && !RHSHasPattern)
16552     std::swap(LHS, RHS);
16553   else if (!LHSHasPattern && !RHSHasPattern)
16554     return SDValue();
16555 
16556   SDLoc DL(N);
16557   SDVTList VTs = DAG.getVTList(MVT::i64, MVT::Glue);
16558   SDValue Cmp = RHS.getOperand(0);
16559   SDValue Z = Cmp.getOperand(0);
16560   auto *Constant = dyn_cast<ConstantSDNode>(Cmp.getOperand(1));
16561 
16562   assert(Constant && "Constant Should not be a null pointer.");
16563   int64_t NegConstant = 0 - Constant->getSExtValue();
16564 
16565   switch(cast<CondCodeSDNode>(Cmp.getOperand(2))->get()) {
16566   default: break;
16567   case ISD::SETNE: {
16568     //                                 when C == 0
16569     //                             --> addze X, (addic Z, -1).carry
16570     //                            /
16571     // add X, (zext(setne Z, C))--
16572     //                            \    when -32768 <= -C <= 32767 && C != 0
16573     //                             --> addze X, (addic (addi Z, -C), -1).carry
16574     SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::i64, Z,
16575                               DAG.getConstant(NegConstant, DL, MVT::i64));
16576     SDValue AddOrZ = NegConstant != 0 ? Add : Z;
16577     SDValue Addc = DAG.getNode(ISD::ADDC, DL, DAG.getVTList(MVT::i64, MVT::Glue),
16578                                AddOrZ, DAG.getConstant(-1ULL, DL, MVT::i64));
16579     return DAG.getNode(ISD::ADDE, DL, VTs, LHS, DAG.getConstant(0, DL, MVT::i64),
16580                        SDValue(Addc.getNode(), 1));
16581     }
16582   case ISD::SETEQ: {
16583     //                                 when C == 0
16584     //                             --> addze X, (subfic Z, 0).carry
16585     //                            /
16586     // add X, (zext(sete  Z, C))--
16587     //                            \    when -32768 <= -C <= 32767 && C != 0
16588     //                             --> addze X, (subfic (addi Z, -C), 0).carry
16589     SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::i64, Z,
16590                               DAG.getConstant(NegConstant, DL, MVT::i64));
16591     SDValue AddOrZ = NegConstant != 0 ? Add : Z;
16592     SDValue Subc = DAG.getNode(ISD::SUBC, DL, DAG.getVTList(MVT::i64, MVT::Glue),
16593                                DAG.getConstant(0, DL, MVT::i64), AddOrZ);
16594     return DAG.getNode(ISD::ADDE, DL, VTs, LHS, DAG.getConstant(0, DL, MVT::i64),
16595                        SDValue(Subc.getNode(), 1));
16596     }
16597   }
16598 
16599   return SDValue();
16600 }
16601 
16602 // Transform
16603 // (add C1, (MAT_PCREL_ADDR GlobalAddr+C2)) to
16604 // (MAT_PCREL_ADDR GlobalAddr+(C1+C2))
16605 // In this case both C1 and C2 must be known constants.
16606 // C1+C2 must fit into a 34 bit signed integer.
combineADDToMAT_PCREL_ADDR(SDNode * N,SelectionDAG & DAG,const PPCSubtarget & Subtarget)16607 static SDValue combineADDToMAT_PCREL_ADDR(SDNode *N, SelectionDAG &DAG,
16608                                           const PPCSubtarget &Subtarget) {
16609   if (!Subtarget.isUsingPCRelativeCalls())
16610     return SDValue();
16611 
16612   // Check both Operand 0 and Operand 1 of the ADD node for the PCRel node.
16613   // If we find that node try to cast the Global Address and the Constant.
16614   SDValue LHS = N->getOperand(0);
16615   SDValue RHS = N->getOperand(1);
16616 
16617   if (LHS.getOpcode() != PPCISD::MAT_PCREL_ADDR)
16618     std::swap(LHS, RHS);
16619 
16620   if (LHS.getOpcode() != PPCISD::MAT_PCREL_ADDR)
16621     return SDValue();
16622 
16623   // Operand zero of PPCISD::MAT_PCREL_ADDR is the GA node.
16624   GlobalAddressSDNode *GSDN = dyn_cast<GlobalAddressSDNode>(LHS.getOperand(0));
16625   ConstantSDNode* ConstNode = dyn_cast<ConstantSDNode>(RHS);
16626 
16627   // Check that both casts succeeded.
16628   if (!GSDN || !ConstNode)
16629     return SDValue();
16630 
16631   int64_t NewOffset = GSDN->getOffset() + ConstNode->getSExtValue();
16632   SDLoc DL(GSDN);
16633 
16634   // The signed int offset needs to fit in 34 bits.
16635   if (!isInt<34>(NewOffset))
16636     return SDValue();
16637 
16638   // The new global address is a copy of the old global address except
16639   // that it has the updated Offset.
16640   SDValue GA =
16641       DAG.getTargetGlobalAddress(GSDN->getGlobal(), DL, GSDN->getValueType(0),
16642                                  NewOffset, GSDN->getTargetFlags());
16643   SDValue MatPCRel =
16644       DAG.getNode(PPCISD::MAT_PCREL_ADDR, DL, GSDN->getValueType(0), GA);
16645   return MatPCRel;
16646 }
16647 
combineADD(SDNode * N,DAGCombinerInfo & DCI) const16648 SDValue PPCTargetLowering::combineADD(SDNode *N, DAGCombinerInfo &DCI) const {
16649   if (auto Value = combineADDToADDZE(N, DCI.DAG, Subtarget))
16650     return Value;
16651 
16652   if (auto Value = combineADDToMAT_PCREL_ADDR(N, DCI.DAG, Subtarget))
16653     return Value;
16654 
16655   return SDValue();
16656 }
16657 
16658 // Detect TRUNCATE operations on bitcasts of float128 values.
16659 // What we are looking for here is the situtation where we extract a subset
16660 // of bits from a 128 bit float.
16661 // This can be of two forms:
16662 // 1) BITCAST of f128 feeding TRUNCATE
16663 // 2) BITCAST of f128 feeding SRL (a shift) feeding TRUNCATE
16664 // The reason this is required is because we do not have a legal i128 type
16665 // and so we want to prevent having to store the f128 and then reload part
16666 // of it.
combineTRUNCATE(SDNode * N,DAGCombinerInfo & DCI) const16667 SDValue PPCTargetLowering::combineTRUNCATE(SDNode *N,
16668                                            DAGCombinerInfo &DCI) const {
16669   // If we are using CRBits then try that first.
16670   if (Subtarget.useCRBits()) {
16671     // Check if CRBits did anything and return that if it did.
16672     if (SDValue CRTruncValue = DAGCombineTruncBoolExt(N, DCI))
16673       return CRTruncValue;
16674   }
16675 
16676   SDLoc dl(N);
16677   SDValue Op0 = N->getOperand(0);
16678 
16679   // fold (truncate (abs (sub (zext a), (zext b)))) -> (vabsd a, b)
16680   if (Subtarget.hasP9Altivec() && Op0.getOpcode() == ISD::ABS) {
16681     EVT VT = N->getValueType(0);
16682     if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8)
16683       return SDValue();
16684     SDValue Sub = Op0.getOperand(0);
16685     if (Sub.getOpcode() == ISD::SUB) {
16686       SDValue SubOp0 = Sub.getOperand(0);
16687       SDValue SubOp1 = Sub.getOperand(1);
16688       if ((SubOp0.getOpcode() == ISD::ZERO_EXTEND) &&
16689           (SubOp1.getOpcode() == ISD::ZERO_EXTEND)) {
16690         return DCI.DAG.getNode(PPCISD::VABSD, dl, VT, SubOp0.getOperand(0),
16691                                SubOp1.getOperand(0),
16692                                DCI.DAG.getTargetConstant(0, dl, MVT::i32));
16693       }
16694     }
16695   }
16696 
16697   // Looking for a truncate of i128 to i64.
16698   if (Op0.getValueType() != MVT::i128 || N->getValueType(0) != MVT::i64)
16699     return SDValue();
16700 
16701   int EltToExtract = DCI.DAG.getDataLayout().isBigEndian() ? 1 : 0;
16702 
16703   // SRL feeding TRUNCATE.
16704   if (Op0.getOpcode() == ISD::SRL) {
16705     ConstantSDNode *ConstNode = dyn_cast<ConstantSDNode>(Op0.getOperand(1));
16706     // The right shift has to be by 64 bits.
16707     if (!ConstNode || ConstNode->getZExtValue() != 64)
16708       return SDValue();
16709 
16710     // Switch the element number to extract.
16711     EltToExtract = EltToExtract ? 0 : 1;
16712     // Update Op0 past the SRL.
16713     Op0 = Op0.getOperand(0);
16714   }
16715 
16716   // BITCAST feeding a TRUNCATE possibly via SRL.
16717   if (Op0.getOpcode() == ISD::BITCAST &&
16718       Op0.getValueType() == MVT::i128 &&
16719       Op0.getOperand(0).getValueType() == MVT::f128) {
16720     SDValue Bitcast = DCI.DAG.getBitcast(MVT::v2i64, Op0.getOperand(0));
16721     return DCI.DAG.getNode(
16722         ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Bitcast,
16723         DCI.DAG.getTargetConstant(EltToExtract, dl, MVT::i32));
16724   }
16725   return SDValue();
16726 }
16727 
combineMUL(SDNode * N,DAGCombinerInfo & DCI) const16728 SDValue PPCTargetLowering::combineMUL(SDNode *N, DAGCombinerInfo &DCI) const {
16729   SelectionDAG &DAG = DCI.DAG;
16730 
16731   ConstantSDNode *ConstOpOrElement = isConstOrConstSplat(N->getOperand(1));
16732   if (!ConstOpOrElement)
16733     return SDValue();
16734 
16735   // An imul is usually smaller than the alternative sequence for legal type.
16736   if (DAG.getMachineFunction().getFunction().hasMinSize() &&
16737       isOperationLegal(ISD::MUL, N->getValueType(0)))
16738     return SDValue();
16739 
16740   auto IsProfitable = [this](bool IsNeg, bool IsAddOne, EVT VT) -> bool {
16741     switch (this->Subtarget.getCPUDirective()) {
16742     default:
16743       // TODO: enhance the condition for subtarget before pwr8
16744       return false;
16745     case PPC::DIR_PWR8:
16746       //  type        mul     add    shl
16747       // scalar        4       1      1
16748       // vector        7       2      2
16749       return true;
16750     case PPC::DIR_PWR9:
16751     case PPC::DIR_PWR10:
16752     case PPC::DIR_PWR_FUTURE:
16753       //  type        mul     add    shl
16754       // scalar        5       2      2
16755       // vector        7       2      2
16756 
16757       // The cycle RATIO of related operations are showed as a table above.
16758       // Because mul is 5(scalar)/7(vector), add/sub/shl are all 2 for both
16759       // scalar and vector type. For 2 instrs patterns, add/sub + shl
16760       // are 4, it is always profitable; but for 3 instrs patterns
16761       // (mul x, -(2^N + 1)) => -(add (shl x, N), x), sub + add + shl are 6.
16762       // So we should only do it for vector type.
16763       return IsAddOne && IsNeg ? VT.isVector() : true;
16764     }
16765   };
16766 
16767   EVT VT = N->getValueType(0);
16768   SDLoc DL(N);
16769 
16770   const APInt &MulAmt = ConstOpOrElement->getAPIntValue();
16771   bool IsNeg = MulAmt.isNegative();
16772   APInt MulAmtAbs = MulAmt.abs();
16773 
16774   if ((MulAmtAbs - 1).isPowerOf2()) {
16775     // (mul x, 2^N + 1) => (add (shl x, N), x)
16776     // (mul x, -(2^N + 1)) => -(add (shl x, N), x)
16777 
16778     if (!IsProfitable(IsNeg, true, VT))
16779       return SDValue();
16780 
16781     SDValue Op0 = N->getOperand(0);
16782     SDValue Op1 =
16783         DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
16784                     DAG.getConstant((MulAmtAbs - 1).logBase2(), DL, VT));
16785     SDValue Res = DAG.getNode(ISD::ADD, DL, VT, Op0, Op1);
16786 
16787     if (!IsNeg)
16788       return Res;
16789 
16790     return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Res);
16791   } else if ((MulAmtAbs + 1).isPowerOf2()) {
16792     // (mul x, 2^N - 1) => (sub (shl x, N), x)
16793     // (mul x, -(2^N - 1)) => (sub x, (shl x, N))
16794 
16795     if (!IsProfitable(IsNeg, false, VT))
16796       return SDValue();
16797 
16798     SDValue Op0 = N->getOperand(0);
16799     SDValue Op1 =
16800         DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
16801                     DAG.getConstant((MulAmtAbs + 1).logBase2(), DL, VT));
16802 
16803     if (!IsNeg)
16804       return DAG.getNode(ISD::SUB, DL, VT, Op1, Op0);
16805     else
16806       return DAG.getNode(ISD::SUB, DL, VT, Op0, Op1);
16807 
16808   } else {
16809     return SDValue();
16810   }
16811 }
16812 
16813 // Combine fma-like op (like fnmsub) with fnegs to appropriate op. Do this
16814 // in combiner since we need to check SD flags and other subtarget features.
combineFMALike(SDNode * N,DAGCombinerInfo & DCI) const16815 SDValue PPCTargetLowering::combineFMALike(SDNode *N,
16816                                           DAGCombinerInfo &DCI) const {
16817   SDValue N0 = N->getOperand(0);
16818   SDValue N1 = N->getOperand(1);
16819   SDValue N2 = N->getOperand(2);
16820   SDNodeFlags Flags = N->getFlags();
16821   EVT VT = N->getValueType(0);
16822   SelectionDAG &DAG = DCI.DAG;
16823   const TargetOptions &Options = getTargetMachine().Options;
16824   unsigned Opc = N->getOpcode();
16825   bool CodeSize = DAG.getMachineFunction().getFunction().hasOptSize();
16826   bool LegalOps = !DCI.isBeforeLegalizeOps();
16827   SDLoc Loc(N);
16828 
16829   if (!isOperationLegal(ISD::FMA, VT))
16830     return SDValue();
16831 
16832   // Allowing transformation to FNMSUB may change sign of zeroes when ab-c=0
16833   // since (fnmsub a b c)=-0 while c-ab=+0.
16834   if (!Flags.hasNoSignedZeros() && !Options.NoSignedZerosFPMath)
16835     return SDValue();
16836 
16837   // (fma (fneg a) b c) => (fnmsub a b c)
16838   // (fnmsub (fneg a) b c) => (fma a b c)
16839   if (SDValue NegN0 = getCheaperNegatedExpression(N0, DAG, LegalOps, CodeSize))
16840     return DAG.getNode(invertFMAOpcode(Opc), Loc, VT, NegN0, N1, N2, Flags);
16841 
16842   // (fma a (fneg b) c) => (fnmsub a b c)
16843   // (fnmsub a (fneg b) c) => (fma a b c)
16844   if (SDValue NegN1 = getCheaperNegatedExpression(N1, DAG, LegalOps, CodeSize))
16845     return DAG.getNode(invertFMAOpcode(Opc), Loc, VT, N0, NegN1, N2, Flags);
16846 
16847   return SDValue();
16848 }
16849 
mayBeEmittedAsTailCall(const CallInst * CI) const16850 bool PPCTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
16851   // Only duplicate to increase tail-calls for the 64bit SysV ABIs.
16852   if (!Subtarget.is64BitELFABI())
16853     return false;
16854 
16855   // If not a tail call then no need to proceed.
16856   if (!CI->isTailCall())
16857     return false;
16858 
16859   // If sibling calls have been disabled and tail-calls aren't guaranteed
16860   // there is no reason to duplicate.
16861   auto &TM = getTargetMachine();
16862   if (!TM.Options.GuaranteedTailCallOpt && DisableSCO)
16863     return false;
16864 
16865   // Can't tail call a function called indirectly, or if it has variadic args.
16866   const Function *Callee = CI->getCalledFunction();
16867   if (!Callee || Callee->isVarArg())
16868     return false;
16869 
16870   // Make sure the callee and caller calling conventions are eligible for tco.
16871   const Function *Caller = CI->getParent()->getParent();
16872   if (!areCallingConvEligibleForTCO_64SVR4(Caller->getCallingConv(),
16873                                            CI->getCallingConv()))
16874       return false;
16875 
16876   // If the function is local then we have a good chance at tail-calling it
16877   return getTargetMachine().shouldAssumeDSOLocal(*Caller->getParent(), Callee);
16878 }
16879 
hasBitPreservingFPLogic(EVT VT) const16880 bool PPCTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
16881   if (!Subtarget.hasVSX())
16882     return false;
16883   if (Subtarget.hasP9Vector() && VT == MVT::f128)
16884     return true;
16885   return VT == MVT::f32 || VT == MVT::f64 ||
16886     VT == MVT::v4f32 || VT == MVT::v2f64;
16887 }
16888 
16889 bool PPCTargetLowering::
isMaskAndCmp0FoldingBeneficial(const Instruction & AndI) const16890 isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const {
16891   const Value *Mask = AndI.getOperand(1);
16892   // If the mask is suitable for andi. or andis. we should sink the and.
16893   if (const ConstantInt *CI = dyn_cast<ConstantInt>(Mask)) {
16894     // Can't handle constants wider than 64-bits.
16895     if (CI->getBitWidth() > 64)
16896       return false;
16897     int64_t ConstVal = CI->getZExtValue();
16898     return isUInt<16>(ConstVal) ||
16899       (isUInt<16>(ConstVal >> 16) && !(ConstVal & 0xFFFF));
16900   }
16901 
16902   // For non-constant masks, we can always use the record-form and.
16903   return true;
16904 }
16905 
16906 // Transform (abs (sub (zext a), (zext b))) to (vabsd a b 0)
16907 // Transform (abs (sub (zext a), (zext_invec b))) to (vabsd a b 0)
16908 // Transform (abs (sub (zext_invec a), (zext_invec b))) to (vabsd a b 0)
16909 // Transform (abs (sub (zext_invec a), (zext b))) to (vabsd a b 0)
16910 // Transform (abs (sub a, b) to (vabsd a b 1)) if a & b of type v4i32
combineABS(SDNode * N,DAGCombinerInfo & DCI) const16911 SDValue PPCTargetLowering::combineABS(SDNode *N, DAGCombinerInfo &DCI) const {
16912   assert((N->getOpcode() == ISD::ABS) && "Need ABS node here");
16913   assert(Subtarget.hasP9Altivec() &&
16914          "Only combine this when P9 altivec supported!");
16915   EVT VT = N->getValueType(0);
16916   if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8)
16917     return SDValue();
16918 
16919   SelectionDAG &DAG = DCI.DAG;
16920   SDLoc dl(N);
16921   if (N->getOperand(0).getOpcode() == ISD::SUB) {
16922     // Even for signed integers, if it's known to be positive (as signed
16923     // integer) due to zero-extended inputs.
16924     unsigned SubOpcd0 = N->getOperand(0)->getOperand(0).getOpcode();
16925     unsigned SubOpcd1 = N->getOperand(0)->getOperand(1).getOpcode();
16926     if ((SubOpcd0 == ISD::ZERO_EXTEND ||
16927          SubOpcd0 == ISD::ZERO_EXTEND_VECTOR_INREG) &&
16928         (SubOpcd1 == ISD::ZERO_EXTEND ||
16929          SubOpcd1 == ISD::ZERO_EXTEND_VECTOR_INREG)) {
16930       return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(0).getValueType(),
16931                          N->getOperand(0)->getOperand(0),
16932                          N->getOperand(0)->getOperand(1),
16933                          DAG.getTargetConstant(0, dl, MVT::i32));
16934     }
16935 
16936     // For type v4i32, it can be optimized with xvnegsp + vabsduw
16937     if (N->getOperand(0).getValueType() == MVT::v4i32 &&
16938         N->getOperand(0).hasOneUse()) {
16939       return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(0).getValueType(),
16940                          N->getOperand(0)->getOperand(0),
16941                          N->getOperand(0)->getOperand(1),
16942                          DAG.getTargetConstant(1, dl, MVT::i32));
16943     }
16944   }
16945 
16946   return SDValue();
16947 }
16948 
16949 // For type v4i32/v8ii16/v16i8, transform
16950 // from (vselect (setcc a, b, setugt), (sub a, b), (sub b, a)) to (vabsd a, b)
16951 // from (vselect (setcc a, b, setuge), (sub a, b), (sub b, a)) to (vabsd a, b)
16952 // from (vselect (setcc a, b, setult), (sub b, a), (sub a, b)) to (vabsd a, b)
16953 // from (vselect (setcc a, b, setule), (sub b, a), (sub a, b)) to (vabsd a, b)
combineVSelect(SDNode * N,DAGCombinerInfo & DCI) const16954 SDValue PPCTargetLowering::combineVSelect(SDNode *N,
16955                                           DAGCombinerInfo &DCI) const {
16956   assert((N->getOpcode() == ISD::VSELECT) && "Need VSELECT node here");
16957   assert(Subtarget.hasP9Altivec() &&
16958          "Only combine this when P9 altivec supported!");
16959 
16960   SelectionDAG &DAG = DCI.DAG;
16961   SDLoc dl(N);
16962   SDValue Cond = N->getOperand(0);
16963   SDValue TrueOpnd = N->getOperand(1);
16964   SDValue FalseOpnd = N->getOperand(2);
16965   EVT VT = N->getOperand(1).getValueType();
16966 
16967   if (Cond.getOpcode() != ISD::SETCC || TrueOpnd.getOpcode() != ISD::SUB ||
16968       FalseOpnd.getOpcode() != ISD::SUB)
16969     return SDValue();
16970 
16971   // ABSD only available for type v4i32/v8i16/v16i8
16972   if (VT != MVT::v4i32 && VT != MVT::v8i16 && VT != MVT::v16i8)
16973     return SDValue();
16974 
16975   // At least to save one more dependent computation
16976   if (!(Cond.hasOneUse() || TrueOpnd.hasOneUse() || FalseOpnd.hasOneUse()))
16977     return SDValue();
16978 
16979   ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
16980 
16981   // Can only handle unsigned comparison here
16982   switch (CC) {
16983   default:
16984     return SDValue();
16985   case ISD::SETUGT:
16986   case ISD::SETUGE:
16987     break;
16988   case ISD::SETULT:
16989   case ISD::SETULE:
16990     std::swap(TrueOpnd, FalseOpnd);
16991     break;
16992   }
16993 
16994   SDValue CmpOpnd1 = Cond.getOperand(0);
16995   SDValue CmpOpnd2 = Cond.getOperand(1);
16996 
16997   // SETCC CmpOpnd1 CmpOpnd2 cond
16998   // TrueOpnd = CmpOpnd1 - CmpOpnd2
16999   // FalseOpnd = CmpOpnd2 - CmpOpnd1
17000   if (TrueOpnd.getOperand(0) == CmpOpnd1 &&
17001       TrueOpnd.getOperand(1) == CmpOpnd2 &&
17002       FalseOpnd.getOperand(0) == CmpOpnd2 &&
17003       FalseOpnd.getOperand(1) == CmpOpnd1) {
17004     return DAG.getNode(PPCISD::VABSD, dl, N->getOperand(1).getValueType(),
17005                        CmpOpnd1, CmpOpnd2,
17006                        DAG.getTargetConstant(0, dl, MVT::i32));
17007   }
17008 
17009   return SDValue();
17010 }
17011 
17012 /// getAddrModeForFlags - Based on the set of address flags, select the most
17013 /// optimal instruction format to match by.
getAddrModeForFlags(unsigned Flags) const17014 PPC::AddrMode PPCTargetLowering::getAddrModeForFlags(unsigned Flags) const {
17015   // This is not a node we should be handling here.
17016   if (Flags == PPC::MOF_None)
17017     return PPC::AM_None;
17018   // Unaligned D-Forms are tried first, followed by the aligned D-Forms.
17019   for (auto FlagSet : AddrModesMap.at(PPC::AM_DForm))
17020     if ((Flags & FlagSet) == FlagSet)
17021       return PPC::AM_DForm;
17022   for (auto FlagSet : AddrModesMap.at(PPC::AM_DSForm))
17023     if ((Flags & FlagSet) == FlagSet)
17024       return PPC::AM_DSForm;
17025   for (auto FlagSet : AddrModesMap.at(PPC::AM_DQForm))
17026     if ((Flags & FlagSet) == FlagSet)
17027       return PPC::AM_DQForm;
17028   // If no other forms are selected, return an X-Form as it is the most
17029   // general addressing mode.
17030   return PPC::AM_XForm;
17031 }
17032 
17033 /// Set alignment flags based on whether or not the Frame Index is aligned.
17034 /// Utilized when computing flags for address computation when selecting
17035 /// load and store instructions.
setAlignFlagsForFI(SDValue N,unsigned & FlagSet,SelectionDAG & DAG)17036 static void setAlignFlagsForFI(SDValue N, unsigned &FlagSet,
17037                                SelectionDAG &DAG) {
17038   bool IsAdd = ((N.getOpcode() == ISD::ADD) || (N.getOpcode() == ISD::OR));
17039   FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(IsAdd ? N.getOperand(0) : N);
17040   if (!FI)
17041     return;
17042   const MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
17043   unsigned FrameIndexAlign = MFI.getObjectAlign(FI->getIndex()).value();
17044   // If this is (add $FI, $S16Imm), the alignment flags are already set
17045   // based on the immediate. We just need to clear the alignment flags
17046   // if the FI alignment is weaker.
17047   if ((FrameIndexAlign % 4) != 0)
17048     FlagSet &= ~PPC::MOF_RPlusSImm16Mult4;
17049   if ((FrameIndexAlign % 16) != 0)
17050     FlagSet &= ~PPC::MOF_RPlusSImm16Mult16;
17051   // If the address is a plain FrameIndex, set alignment flags based on
17052   // FI alignment.
17053   if (!IsAdd) {
17054     if ((FrameIndexAlign % 4) == 0)
17055       FlagSet |= PPC::MOF_RPlusSImm16Mult4;
17056     if ((FrameIndexAlign % 16) == 0)
17057       FlagSet |= PPC::MOF_RPlusSImm16Mult16;
17058   }
17059 }
17060 
17061 /// Given a node, compute flags that are used for address computation when
17062 /// selecting load and store instructions. The flags computed are stored in
17063 /// FlagSet. This function takes into account whether the node is a constant,
17064 /// an ADD, OR, or a constant, and computes the address flags accordingly.
computeFlagsForAddressComputation(SDValue N,unsigned & FlagSet,SelectionDAG & DAG)17065 static void computeFlagsForAddressComputation(SDValue N, unsigned &FlagSet,
17066                                               SelectionDAG &DAG) {
17067   // Set the alignment flags for the node depending on if the node is
17068   // 4-byte or 16-byte aligned.
17069   auto SetAlignFlagsForImm = [&](uint64_t Imm) {
17070     if ((Imm & 0x3) == 0)
17071       FlagSet |= PPC::MOF_RPlusSImm16Mult4;
17072     if ((Imm & 0xf) == 0)
17073       FlagSet |= PPC::MOF_RPlusSImm16Mult16;
17074   };
17075 
17076   if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N)) {
17077     // All 32-bit constants can be computed as LIS + Disp.
17078     const APInt &ConstImm = CN->getAPIntValue();
17079     if (ConstImm.isSignedIntN(32)) { // Flag to handle 32-bit constants.
17080       FlagSet |= PPC::MOF_AddrIsSImm32;
17081       SetAlignFlagsForImm(ConstImm.getZExtValue());
17082       setAlignFlagsForFI(N, FlagSet, DAG);
17083     }
17084     if (ConstImm.isSignedIntN(34)) // Flag to handle 34-bit constants.
17085       FlagSet |= PPC::MOF_RPlusSImm34;
17086     else // Let constant materialization handle large constants.
17087       FlagSet |= PPC::MOF_NotAddNorCst;
17088   } else if (N.getOpcode() == ISD::ADD || provablyDisjointOr(DAG, N)) {
17089     // This address can be represented as an addition of:
17090     // - Register + Imm16 (possibly a multiple of 4/16)
17091     // - Register + Imm34
17092     // - Register + PPCISD::Lo
17093     // - Register + Register
17094     // In any case, we won't have to match this as Base + Zero.
17095     SDValue RHS = N.getOperand(1);
17096     if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(RHS)) {
17097       const APInt &ConstImm = CN->getAPIntValue();
17098       if (ConstImm.isSignedIntN(16)) {
17099         FlagSet |= PPC::MOF_RPlusSImm16; // Signed 16-bit immediates.
17100         SetAlignFlagsForImm(ConstImm.getZExtValue());
17101         setAlignFlagsForFI(N, FlagSet, DAG);
17102       }
17103       if (ConstImm.isSignedIntN(34))
17104         FlagSet |= PPC::MOF_RPlusSImm34; // Signed 34-bit immediates.
17105       else
17106         FlagSet |= PPC::MOF_RPlusR; // Register.
17107     } else if (RHS.getOpcode() == PPCISD::Lo &&
17108                !cast<ConstantSDNode>(RHS.getOperand(1))->getZExtValue())
17109       FlagSet |= PPC::MOF_RPlusLo; // PPCISD::Lo.
17110     else
17111       FlagSet |= PPC::MOF_RPlusR;
17112   } else { // The address computation is not a constant or an addition.
17113     setAlignFlagsForFI(N, FlagSet, DAG);
17114     FlagSet |= PPC::MOF_NotAddNorCst;
17115   }
17116 }
17117 
17118 /// computeMOFlags - Given a node N and it's Parent (a MemSDNode), compute
17119 /// the address flags of the load/store instruction that is to be matched.
computeMOFlags(const SDNode * Parent,SDValue N,SelectionDAG & DAG) const17120 unsigned PPCTargetLowering::computeMOFlags(const SDNode *Parent, SDValue N,
17121                                            SelectionDAG &DAG) const {
17122   unsigned FlagSet = PPC::MOF_None;
17123 
17124   // Compute subtarget flags.
17125   if (!Subtarget.hasP9Vector())
17126     FlagSet |= PPC::MOF_SubtargetBeforeP9;
17127   else {
17128     FlagSet |= PPC::MOF_SubtargetP9;
17129     if (Subtarget.hasPrefixInstrs())
17130       FlagSet |= PPC::MOF_SubtargetP10;
17131   }
17132   if (Subtarget.hasSPE())
17133     FlagSet |= PPC::MOF_SubtargetSPE;
17134 
17135   // Mark this as something we don't want to handle here if it is atomic
17136   // or pre-increment instruction.
17137   if (const LSBaseSDNode *LSB = dyn_cast<LSBaseSDNode>(Parent))
17138     if (LSB->isIndexed())
17139       return PPC::MOF_None;
17140 
17141   // Compute in-memory type flags. This is based on if there are scalars,
17142   // floats or vectors.
17143   const MemSDNode *MN = dyn_cast<MemSDNode>(Parent);
17144   assert(MN && "Parent should be a MemSDNode!");
17145   EVT MemVT = MN->getMemoryVT();
17146   unsigned Size = MemVT.getSizeInBits();
17147   if (MemVT.isScalarInteger()) {
17148     assert(Size <= 64 && "Not expecting scalar integers larger than 8 bytes!");
17149     if (Size < 32)
17150       FlagSet |= PPC::MOF_SubWordInt;
17151     else if (Size == 32)
17152       FlagSet |= PPC::MOF_WordInt;
17153     else
17154       FlagSet |= PPC::MOF_DoubleWordInt;
17155   } else if (MemVT.isVector() && !MemVT.isFloatingPoint()) { // Integer vectors.
17156     if (Size == 128)
17157       FlagSet |= PPC::MOF_Vector;
17158     else if (Size == 256)
17159       FlagSet |= PPC::MOF_Vector256;
17160     else
17161       llvm_unreachable("Not expecting illegal vectors!");
17162   } else { // Floating point type: can be scalar, f128 or vector types.
17163     if (Size == 32 || Size == 64)
17164       FlagSet |= PPC::MOF_ScalarFloat;
17165     else if (MemVT == MVT::f128 || MemVT.isVector())
17166       FlagSet |= PPC::MOF_Vector;
17167     else
17168       llvm_unreachable("Not expecting illegal scalar floats!");
17169   }
17170 
17171   // Compute flags for address computation.
17172   computeFlagsForAddressComputation(N, FlagSet, DAG);
17173 
17174   // Compute type extension flags.
17175   if (const LoadSDNode *LN = dyn_cast<LoadSDNode>(Parent)) {
17176     switch (LN->getExtensionType()) {
17177     case ISD::SEXTLOAD:
17178       FlagSet |= PPC::MOF_SExt;
17179       break;
17180     case ISD::EXTLOAD:
17181     case ISD::ZEXTLOAD:
17182       FlagSet |= PPC::MOF_ZExt;
17183       break;
17184     case ISD::NON_EXTLOAD:
17185       FlagSet |= PPC::MOF_NoExt;
17186       break;
17187     }
17188   } else
17189     FlagSet |= PPC::MOF_NoExt;
17190 
17191   // For integers, no extension is the same as zero extension.
17192   // We set the extension mode to zero extension so we don't have
17193   // to add separate entries in AddrModesMap for loads and stores.
17194   if (MemVT.isScalarInteger() && (FlagSet & PPC::MOF_NoExt)) {
17195     FlagSet |= PPC::MOF_ZExt;
17196     FlagSet &= ~PPC::MOF_NoExt;
17197   }
17198 
17199   // If we don't have prefixed instructions, 34-bit constants should be
17200   // treated as PPC::MOF_NotAddNorCst so they can match D-Forms.
17201   bool IsNonP1034BitConst =
17202       ((PPC::MOF_RPlusSImm34 | PPC::MOF_AddrIsSImm32 | PPC::MOF_SubtargetP10) &
17203        FlagSet) == PPC::MOF_RPlusSImm34;
17204   if (N.getOpcode() != ISD::ADD && N.getOpcode() != ISD::OR &&
17205       IsNonP1034BitConst)
17206     FlagSet |= PPC::MOF_NotAddNorCst;
17207 
17208   return FlagSet;
17209 }
17210 
17211 /// SelectForceXFormMode - Given the specified address, force it to be
17212 /// represented as an indexed [r+r] operation (an XForm instruction).
SelectForceXFormMode(SDValue N,SDValue & Disp,SDValue & Base,SelectionDAG & DAG) const17213 PPC::AddrMode PPCTargetLowering::SelectForceXFormMode(SDValue N, SDValue &Disp,
17214                                                       SDValue &Base,
17215                                                       SelectionDAG &DAG) const {
17216 
17217   PPC::AddrMode Mode = PPC::AM_XForm;
17218   int16_t ForceXFormImm = 0;
17219   if (provablyDisjointOr(DAG, N) &&
17220       !isIntS16Immediate(N.getOperand(1), ForceXFormImm)) {
17221     Disp = N.getOperand(0);
17222     Base = N.getOperand(1);
17223     return Mode;
17224   }
17225 
17226   // If the address is the result of an add, we will utilize the fact that the
17227   // address calculation includes an implicit add.  However, we can reduce
17228   // register pressure if we do not materialize a constant just for use as the
17229   // index register.  We only get rid of the add if it is not an add of a
17230   // value and a 16-bit signed constant and both have a single use.
17231   if (N.getOpcode() == ISD::ADD &&
17232       (!isIntS16Immediate(N.getOperand(1), ForceXFormImm) ||
17233        !N.getOperand(1).hasOneUse() || !N.getOperand(0).hasOneUse())) {
17234     Disp = N.getOperand(0);
17235     Base = N.getOperand(1);
17236     return Mode;
17237   }
17238 
17239   // Otherwise, use R0 as the base register.
17240   Disp = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
17241                          N.getValueType());
17242   Base = N;
17243 
17244   return Mode;
17245 }
17246 
17247 /// SelectOptimalAddrMode - Based on a node N and it's Parent (a MemSDNode),
17248 /// compute the address flags of the node, get the optimal address mode based
17249 /// on the flags, and set the Base and Disp based on the address mode.
SelectOptimalAddrMode(const SDNode * Parent,SDValue N,SDValue & Disp,SDValue & Base,SelectionDAG & DAG,MaybeAlign Align) const17250 PPC::AddrMode PPCTargetLowering::SelectOptimalAddrMode(const SDNode *Parent,
17251                                                        SDValue N, SDValue &Disp,
17252                                                        SDValue &Base,
17253                                                        SelectionDAG &DAG,
17254                                                        MaybeAlign Align) const {
17255   SDLoc DL(Parent);
17256 
17257   // Compute the address flags.
17258   unsigned Flags = computeMOFlags(Parent, N, DAG);
17259 
17260   // Get the optimal address mode based on the Flags.
17261   PPC::AddrMode Mode = getAddrModeForFlags(Flags);
17262 
17263   // Set Base and Disp accordingly depending on the address mode.
17264   switch (Mode) {
17265   case PPC::AM_DForm:
17266   case PPC::AM_DSForm:
17267   case PPC::AM_DQForm: {
17268     // This is a register plus a 16-bit immediate. The base will be the
17269     // register and the displacement will be the immediate unless it
17270     // isn't sufficiently aligned.
17271     if (Flags & PPC::MOF_RPlusSImm16) {
17272       SDValue Op0 = N.getOperand(0);
17273       SDValue Op1 = N.getOperand(1);
17274       ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Op1);
17275       int16_t Imm = CN->getAPIntValue().getZExtValue();
17276       if (!Align || isAligned(*Align, Imm)) {
17277         Disp = DAG.getTargetConstant(Imm, DL, N.getValueType());
17278         Base = Op0;
17279         if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Op0)) {
17280           Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
17281           fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
17282         }
17283         break;
17284       }
17285     }
17286     // This is a register plus the @lo relocation. The base is the register
17287     // and the displacement is the global address.
17288     else if (Flags & PPC::MOF_RPlusLo) {
17289       Disp = N.getOperand(1).getOperand(0); // The global address.
17290       assert(Disp.getOpcode() == ISD::TargetGlobalAddress ||
17291              Disp.getOpcode() == ISD::TargetGlobalTLSAddress ||
17292              Disp.getOpcode() == ISD::TargetConstantPool ||
17293              Disp.getOpcode() == ISD::TargetJumpTable);
17294       Base = N.getOperand(0);
17295       break;
17296     }
17297     // This is a constant address at most 32 bits. The base will be
17298     // zero or load-immediate-shifted and the displacement will be
17299     // the low 16 bits of the address.
17300     else if (Flags & PPC::MOF_AddrIsSImm32) {
17301       ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N);
17302       EVT CNType = CN->getValueType(0);
17303       uint64_t CNImm = CN->getZExtValue();
17304       // If this address fits entirely in a 16-bit sext immediate field, codegen
17305       // this as "d, 0".
17306       int16_t Imm;
17307       if (isIntS16Immediate(CN, Imm) && (!Align || isAligned(*Align, Imm))) {
17308         Disp = DAG.getTargetConstant(Imm, DL, CNType);
17309         Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
17310                                CNType);
17311         break;
17312       }
17313       // Handle 32-bit sext immediate with LIS + Addr mode.
17314       if ((CNType == MVT::i32 || isInt<32>(CNImm)) &&
17315           (!Align || isAligned(*Align, CNImm))) {
17316         int32_t Addr = (int32_t)CNImm;
17317         // Otherwise, break this down into LIS + Disp.
17318         Disp = DAG.getTargetConstant((int16_t)Addr, DL, MVT::i32);
17319         Base =
17320             DAG.getTargetConstant((Addr - (int16_t)Addr) >> 16, DL, MVT::i32);
17321         uint32_t LIS = CNType == MVT::i32 ? PPC::LIS : PPC::LIS8;
17322         Base = SDValue(DAG.getMachineNode(LIS, DL, CNType, Base), 0);
17323         break;
17324       }
17325     }
17326     // Otherwise, the PPC:MOF_NotAdd flag is set. Load/Store is Non-foldable.
17327     Disp = DAG.getTargetConstant(0, DL, getPointerTy(DAG.getDataLayout()));
17328     if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N)) {
17329       Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
17330       fixupFuncForFI(DAG, FI->getIndex(), N.getValueType());
17331     } else
17332       Base = N;
17333     break;
17334   }
17335   case PPC::AM_None:
17336     break;
17337   default: { // By default, X-Form is always available to be selected.
17338     // When a frame index is not aligned, we also match by XForm.
17339     FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N);
17340     Base = FI ? N : N.getOperand(1);
17341     Disp = FI ? DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
17342                                 N.getValueType())
17343               : N.getOperand(0);
17344     break;
17345   }
17346   }
17347   return Mode;
17348 }
17349