1 //===-- PPCRegisterInfo.cpp - PowerPC Register Information ----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the PowerPC implementation of the TargetRegisterInfo
10 // class.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "PPCRegisterInfo.h"
15 #include "PPCFrameLowering.h"
16 #include "PPCInstrBuilder.h"
17 #include "PPCMachineFunctionInfo.h"
18 #include "PPCSubtarget.h"
19 #include "PPCTargetMachine.h"
20 #include "llvm/ADT/BitVector.h"
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineFunction.h"
25 #include "llvm/CodeGen/MachineInstrBuilder.h"
26 #include "llvm/CodeGen/MachineModuleInfo.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/RegisterScavenging.h"
29 #include "llvm/CodeGen/TargetFrameLowering.h"
30 #include "llvm/CodeGen/TargetInstrInfo.h"
31 #include "llvm/IR/CallingConv.h"
32 #include "llvm/IR/Constants.h"
33 #include "llvm/IR/Function.h"
34 #include "llvm/IR/Type.h"
35 #include "llvm/Support/CommandLine.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Support/MathExtras.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include "llvm/Target/TargetMachine.h"
41 #include "llvm/Target/TargetOptions.h"
42 #include <cstdlib>
43 
44 using namespace llvm;
45 
46 #define DEBUG_TYPE "reginfo"
47 
48 #define GET_REGINFO_TARGET_DESC
49 #include "PPCGenRegisterInfo.inc"
50 
51 STATISTIC(InflateGPRC, "Number of gprc inputs for getLargestLegalClass");
52 STATISTIC(InflateGP8RC, "Number of g8rc inputs for getLargestLegalClass");
53 
54 static cl::opt<bool>
55 EnableBasePointer("ppc-use-base-pointer", cl::Hidden, cl::init(true),
56          cl::desc("Enable use of a base pointer for complex stack frames"));
57 
58 static cl::opt<bool>
59 AlwaysBasePointer("ppc-always-use-base-pointer", cl::Hidden, cl::init(false),
60          cl::desc("Force the use of a base pointer in every function"));
61 
62 static cl::opt<bool>
63 EnableGPRToVecSpills("ppc-enable-gpr-to-vsr-spills", cl::Hidden, cl::init(false),
64          cl::desc("Enable spills from gpr to vsr rather than stack"));
65 
66 static cl::opt<bool>
67 StackPtrConst("ppc-stack-ptr-caller-preserved",
68                 cl::desc("Consider R1 caller preserved so stack saves of "
69                          "caller preserved registers can be LICM candidates"),
70                 cl::init(true), cl::Hidden);
71 
72 static cl::opt<unsigned>
73 MaxCRBitSpillDist("ppc-max-crbit-spill-dist",
74                   cl::desc("Maximum search distance for definition of CR bit "
75                            "spill on ppc"),
76                   cl::Hidden, cl::init(100));
77 
78 // Copies/moves of physical accumulators are expensive operations
79 // that should be avoided whenever possible. MMA instructions are
80 // meant to be used in performance-sensitive computational kernels.
81 // This option is provided, at least for the time being, to give the
82 // user a tool to detect this expensive operation and either rework
83 // their code or report a compiler bug if that turns out to be the
84 // cause.
85 #ifndef NDEBUG
86 static cl::opt<bool>
87 ReportAccMoves("ppc-report-acc-moves",
88                cl::desc("Emit information about accumulator register spills "
89                         "and copies"),
90                cl::Hidden, cl::init(false));
91 #endif
92 
93 static unsigned offsetMinAlignForOpcode(unsigned OpC);
94 
PPCRegisterInfo(const PPCTargetMachine & TM)95 PPCRegisterInfo::PPCRegisterInfo(const PPCTargetMachine &TM)
96   : PPCGenRegisterInfo(TM.isPPC64() ? PPC::LR8 : PPC::LR,
97                        TM.isPPC64() ? 0 : 1,
98                        TM.isPPC64() ? 0 : 1),
99     TM(TM) {
100   ImmToIdxMap[PPC::LD]   = PPC::LDX;    ImmToIdxMap[PPC::STD]  = PPC::STDX;
101   ImmToIdxMap[PPC::LBZ]  = PPC::LBZX;   ImmToIdxMap[PPC::STB]  = PPC::STBX;
102   ImmToIdxMap[PPC::LHZ]  = PPC::LHZX;   ImmToIdxMap[PPC::LHA]  = PPC::LHAX;
103   ImmToIdxMap[PPC::LWZ]  = PPC::LWZX;   ImmToIdxMap[PPC::LWA]  = PPC::LWAX;
104   ImmToIdxMap[PPC::LFS]  = PPC::LFSX;   ImmToIdxMap[PPC::LFD]  = PPC::LFDX;
105   ImmToIdxMap[PPC::STH]  = PPC::STHX;   ImmToIdxMap[PPC::STW]  = PPC::STWX;
106   ImmToIdxMap[PPC::STFS] = PPC::STFSX;  ImmToIdxMap[PPC::STFD] = PPC::STFDX;
107   ImmToIdxMap[PPC::ADDI] = PPC::ADD4;
108   ImmToIdxMap[PPC::LWA_32] = PPC::LWAX_32;
109 
110   // 64-bit
111   ImmToIdxMap[PPC::LHA8] = PPC::LHAX8; ImmToIdxMap[PPC::LBZ8] = PPC::LBZX8;
112   ImmToIdxMap[PPC::LHZ8] = PPC::LHZX8; ImmToIdxMap[PPC::LWZ8] = PPC::LWZX8;
113   ImmToIdxMap[PPC::STB8] = PPC::STBX8; ImmToIdxMap[PPC::STH8] = PPC::STHX8;
114   ImmToIdxMap[PPC::STW8] = PPC::STWX8; ImmToIdxMap[PPC::STDU] = PPC::STDUX;
115   ImmToIdxMap[PPC::ADDI8] = PPC::ADD8;
116 
117   // VSX
118   ImmToIdxMap[PPC::DFLOADf32] = PPC::LXSSPX;
119   ImmToIdxMap[PPC::DFLOADf64] = PPC::LXSDX;
120   ImmToIdxMap[PPC::SPILLTOVSR_LD] = PPC::SPILLTOVSR_LDX;
121   ImmToIdxMap[PPC::SPILLTOVSR_ST] = PPC::SPILLTOVSR_STX;
122   ImmToIdxMap[PPC::DFSTOREf32] = PPC::STXSSPX;
123   ImmToIdxMap[PPC::DFSTOREf64] = PPC::STXSDX;
124   ImmToIdxMap[PPC::LXV] = PPC::LXVX;
125   ImmToIdxMap[PPC::LXSD] = PPC::LXSDX;
126   ImmToIdxMap[PPC::LXSSP] = PPC::LXSSPX;
127   ImmToIdxMap[PPC::STXV] = PPC::STXVX;
128   ImmToIdxMap[PPC::STXSD] = PPC::STXSDX;
129   ImmToIdxMap[PPC::STXSSP] = PPC::STXSSPX;
130 
131   // SPE
132   ImmToIdxMap[PPC::EVLDD] = PPC::EVLDDX;
133   ImmToIdxMap[PPC::EVSTDD] = PPC::EVSTDDX;
134   ImmToIdxMap[PPC::SPESTW] = PPC::SPESTWX;
135   ImmToIdxMap[PPC::SPELWZ] = PPC::SPELWZX;
136 
137   // Power10
138   ImmToIdxMap[PPC::PLBZ]   = PPC::LBZX; ImmToIdxMap[PPC::PLBZ8]   = PPC::LBZX8;
139   ImmToIdxMap[PPC::PLHZ]   = PPC::LHZX; ImmToIdxMap[PPC::PLHZ8]   = PPC::LHZX8;
140   ImmToIdxMap[PPC::PLHA]   = PPC::LHAX; ImmToIdxMap[PPC::PLHA8]   = PPC::LHAX8;
141   ImmToIdxMap[PPC::PLWZ]   = PPC::LWZX; ImmToIdxMap[PPC::PLWZ8]   = PPC::LWZX8;
142   ImmToIdxMap[PPC::PLWA]   = PPC::LWAX; ImmToIdxMap[PPC::PLWA8]   = PPC::LWAX;
143   ImmToIdxMap[PPC::PLD]    = PPC::LDX;  ImmToIdxMap[PPC::PSTD]   = PPC::STDX;
144 
145   ImmToIdxMap[PPC::PSTB]   = PPC::STBX; ImmToIdxMap[PPC::PSTB8]   = PPC::STBX8;
146   ImmToIdxMap[PPC::PSTH]   = PPC::STHX; ImmToIdxMap[PPC::PSTH8]   = PPC::STHX8;
147   ImmToIdxMap[PPC::PSTW]   = PPC::STWX; ImmToIdxMap[PPC::PSTW8]   = PPC::STWX8;
148 
149   ImmToIdxMap[PPC::PLFS]   = PPC::LFSX; ImmToIdxMap[PPC::PSTFS]   = PPC::STFSX;
150   ImmToIdxMap[PPC::PLFD]   = PPC::LFDX; ImmToIdxMap[PPC::PSTFD]   = PPC::STFDX;
151   ImmToIdxMap[PPC::PLXSSP] = PPC::LXSSPX; ImmToIdxMap[PPC::PSTXSSP] = PPC::STXSSPX;
152   ImmToIdxMap[PPC::PLXSD]  = PPC::LXSDX; ImmToIdxMap[PPC::PSTXSD]  = PPC::STXSDX;
153   ImmToIdxMap[PPC::PLXV]   = PPC::LXVX; ImmToIdxMap[PPC::PSTXV]  = PPC::STXVX;
154 
155   ImmToIdxMap[PPC::LXVP]   = PPC::LXVPX;
156   ImmToIdxMap[PPC::STXVP]  = PPC::STXVPX;
157   ImmToIdxMap[PPC::PLXVP]  = PPC::LXVPX;
158   ImmToIdxMap[PPC::PSTXVP] = PPC::STXVPX;
159 }
160 
161 /// getPointerRegClass - Return the register class to use to hold pointers.
162 /// This is used for addressing modes.
163 const TargetRegisterClass *
getPointerRegClass(const MachineFunction & MF,unsigned Kind) const164 PPCRegisterInfo::getPointerRegClass(const MachineFunction &MF, unsigned Kind)
165                                                                        const {
166   // Note that PPCInstrInfo::FoldImmediate also directly uses this Kind value
167   // when it checks for ZERO folding.
168   if (Kind == 1) {
169     if (TM.isPPC64())
170       return &PPC::G8RC_NOX0RegClass;
171     return &PPC::GPRC_NOR0RegClass;
172   }
173 
174   if (TM.isPPC64())
175     return &PPC::G8RCRegClass;
176   return &PPC::GPRCRegClass;
177 }
178 
179 const MCPhysReg*
getCalleeSavedRegs(const MachineFunction * MF) const180 PPCRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
181   const PPCSubtarget &Subtarget = MF->getSubtarget<PPCSubtarget>();
182   if (MF->getFunction().getCallingConv() == CallingConv::AnyReg) {
183     if (!TM.isPPC64() && Subtarget.isAIXABI())
184       report_fatal_error("AnyReg unimplemented on 32-bit AIX.");
185     if (Subtarget.hasVSX()) {
186       if (Subtarget.isAIXABI() && !TM.getAIXExtendedAltivecABI())
187         return CSR_64_AllRegs_AIX_Dflt_VSX_SaveList;
188       return CSR_64_AllRegs_VSX_SaveList;
189     }
190     if (Subtarget.hasAltivec()) {
191       if (Subtarget.isAIXABI() && !TM.getAIXExtendedAltivecABI())
192         return CSR_64_AllRegs_AIX_Dflt_Altivec_SaveList;
193       return CSR_64_AllRegs_Altivec_SaveList;
194     }
195     return CSR_64_AllRegs_SaveList;
196   }
197 
198   // On PPC64, we might need to save r2 (but only if it is not reserved).
199   // We do not need to treat R2 as callee-saved when using PC-Relative calls
200   // because any direct uses of R2 will cause it to be reserved. If the function
201   // is a leaf or the only uses of R2 are implicit uses for calls, the calls
202   // will use the @notoc relocation which will cause this function to set the
203   // st_other bit to 1, thereby communicating to its caller that it arbitrarily
204   // clobbers the TOC.
205   bool SaveR2 = MF->getRegInfo().isAllocatable(PPC::X2) &&
206                 !Subtarget.isUsingPCRelativeCalls();
207 
208   // Cold calling convention CSRs.
209   if (MF->getFunction().getCallingConv() == CallingConv::Cold) {
210     if (Subtarget.isAIXABI())
211       report_fatal_error("Cold calling unimplemented on AIX.");
212     if (TM.isPPC64()) {
213       if (Subtarget.hasAltivec())
214         return SaveR2 ? CSR_SVR64_ColdCC_R2_Altivec_SaveList
215                       : CSR_SVR64_ColdCC_Altivec_SaveList;
216       return SaveR2 ? CSR_SVR64_ColdCC_R2_SaveList
217                     : CSR_SVR64_ColdCC_SaveList;
218     }
219     // 32-bit targets.
220     if (Subtarget.hasAltivec())
221       return CSR_SVR32_ColdCC_Altivec_SaveList;
222     else if (Subtarget.hasSPE())
223       return CSR_SVR32_ColdCC_SPE_SaveList;
224     return CSR_SVR32_ColdCC_SaveList;
225   }
226   // Standard calling convention CSRs.
227   if (TM.isPPC64()) {
228     if (Subtarget.hasAltivec() &&
229         (!Subtarget.isAIXABI() || TM.getAIXExtendedAltivecABI())) {
230       return SaveR2 ? CSR_PPC64_R2_Altivec_SaveList
231                     : CSR_PPC64_Altivec_SaveList;
232     }
233     return SaveR2 ? CSR_PPC64_R2_SaveList : CSR_PPC64_SaveList;
234   }
235   // 32-bit targets.
236   if (Subtarget.isAIXABI()) {
237     if (Subtarget.hasAltivec())
238       return TM.getAIXExtendedAltivecABI() ? CSR_AIX32_Altivec_SaveList
239                                            : CSR_AIX32_SaveList;
240     return CSR_AIX32_SaveList;
241   }
242   if (Subtarget.hasAltivec())
243     return CSR_SVR432_Altivec_SaveList;
244   else if (Subtarget.hasSPE())
245     return CSR_SVR432_SPE_SaveList;
246   return CSR_SVR432_SaveList;
247 }
248 
249 const uint32_t *
getCallPreservedMask(const MachineFunction & MF,CallingConv::ID CC) const250 PPCRegisterInfo::getCallPreservedMask(const MachineFunction &MF,
251                                       CallingConv::ID CC) const {
252   const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
253   if (CC == CallingConv::AnyReg) {
254     if (Subtarget.hasVSX()) {
255       if (Subtarget.isAIXABI() && !TM.getAIXExtendedAltivecABI())
256         return CSR_64_AllRegs_AIX_Dflt_VSX_RegMask;
257       return CSR_64_AllRegs_VSX_RegMask;
258     }
259     if (Subtarget.hasAltivec()) {
260       if (Subtarget.isAIXABI() && !TM.getAIXExtendedAltivecABI())
261         return CSR_64_AllRegs_AIX_Dflt_Altivec_RegMask;
262       return CSR_64_AllRegs_Altivec_RegMask;
263     }
264     return CSR_64_AllRegs_RegMask;
265   }
266 
267   if (Subtarget.isAIXABI()) {
268     return TM.isPPC64()
269                ? ((Subtarget.hasAltivec() && TM.getAIXExtendedAltivecABI())
270                       ? CSR_PPC64_Altivec_RegMask
271                       : CSR_PPC64_RegMask)
272                : ((Subtarget.hasAltivec() && TM.getAIXExtendedAltivecABI())
273                       ? CSR_AIX32_Altivec_RegMask
274                       : CSR_AIX32_RegMask);
275   }
276 
277   if (CC == CallingConv::Cold) {
278     return TM.isPPC64() ? (Subtarget.hasAltivec() ? CSR_SVR64_ColdCC_Altivec_RegMask
279                                                   : CSR_SVR64_ColdCC_RegMask)
280                         : (Subtarget.hasAltivec() ? CSR_SVR32_ColdCC_Altivec_RegMask
281                                                   : (Subtarget.hasSPE()
282                                                   ? CSR_SVR32_ColdCC_SPE_RegMask
283                                                   : CSR_SVR32_ColdCC_RegMask));
284   }
285 
286   return TM.isPPC64() ? (Subtarget.hasAltivec() ? CSR_PPC64_Altivec_RegMask
287                                                 : CSR_PPC64_RegMask)
288                       : (Subtarget.hasAltivec()
289                              ? CSR_SVR432_Altivec_RegMask
290                              : (Subtarget.hasSPE() ? CSR_SVR432_SPE_RegMask
291                                                    : CSR_SVR432_RegMask));
292 }
293 
294 const uint32_t*
getNoPreservedMask() const295 PPCRegisterInfo::getNoPreservedMask() const {
296   return CSR_NoRegs_RegMask;
297 }
298 
adjustStackMapLiveOutMask(uint32_t * Mask) const299 void PPCRegisterInfo::adjustStackMapLiveOutMask(uint32_t *Mask) const {
300   for (unsigned PseudoReg : {PPC::ZERO, PPC::ZERO8, PPC::RM})
301     Mask[PseudoReg / 32] &= ~(1u << (PseudoReg % 32));
302 }
303 
getReservedRegs(const MachineFunction & MF) const304 BitVector PPCRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
305   BitVector Reserved(getNumRegs());
306   const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
307   const PPCFrameLowering *TFI = getFrameLowering(MF);
308 
309   // The ZERO register is not really a register, but the representation of r0
310   // when used in instructions that treat r0 as the constant 0.
311   markSuperRegs(Reserved, PPC::ZERO);
312 
313   // The FP register is also not really a register, but is the representation
314   // of the frame pointer register used by ISD::FRAMEADDR.
315   markSuperRegs(Reserved, PPC::FP);
316 
317   // The BP register is also not really a register, but is the representation
318   // of the base pointer register used by setjmp.
319   markSuperRegs(Reserved, PPC::BP);
320 
321   // The counter registers must be reserved so that counter-based loops can
322   // be correctly formed (and the mtctr instructions are not DCE'd).
323   markSuperRegs(Reserved, PPC::CTR);
324   markSuperRegs(Reserved, PPC::CTR8);
325 
326   markSuperRegs(Reserved, PPC::R1);
327   markSuperRegs(Reserved, PPC::LR);
328   markSuperRegs(Reserved, PPC::LR8);
329   markSuperRegs(Reserved, PPC::RM);
330 
331   markSuperRegs(Reserved, PPC::VRSAVE);
332 
333   // The SVR4 ABI reserves r2 and r13
334   if (Subtarget.isSVR4ABI()) {
335     // We only reserve r2 if we need to use the TOC pointer. If we have no
336     // explicit uses of the TOC pointer (meaning we're a leaf function with
337     // no constant-pool loads, etc.) and we have no potential uses inside an
338     // inline asm block, then we can treat r2 has an ordinary callee-saved
339     // register.
340     const PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
341     if (!TM.isPPC64() || FuncInfo->usesTOCBasePtr() || MF.hasInlineAsm())
342       markSuperRegs(Reserved, PPC::R2);  // System-reserved register
343     markSuperRegs(Reserved, PPC::R13); // Small Data Area pointer register
344   }
345 
346   // Always reserve r2 on AIX for now.
347   // TODO: Make r2 allocatable on AIX/XCOFF for some leaf functions.
348   if (Subtarget.isAIXABI())
349     markSuperRegs(Reserved, PPC::R2);  // System-reserved register
350 
351   // On PPC64, r13 is the thread pointer. Never allocate this register.
352   if (TM.isPPC64())
353     markSuperRegs(Reserved, PPC::R13);
354 
355   if (TFI->needsFP(MF))
356     markSuperRegs(Reserved, PPC::R31);
357 
358   bool IsPositionIndependent = TM.isPositionIndependent();
359   if (hasBasePointer(MF)) {
360     if (Subtarget.is32BitELFABI() && IsPositionIndependent)
361       markSuperRegs(Reserved, PPC::R29);
362     else
363       markSuperRegs(Reserved, PPC::R30);
364   }
365 
366   if (Subtarget.is32BitELFABI() && IsPositionIndependent)
367     markSuperRegs(Reserved, PPC::R30);
368 
369   // Reserve Altivec registers when Altivec is unavailable.
370   if (!Subtarget.hasAltivec())
371     for (TargetRegisterClass::iterator I = PPC::VRRCRegClass.begin(),
372          IE = PPC::VRRCRegClass.end(); I != IE; ++I)
373       markSuperRegs(Reserved, *I);
374 
375   if (Subtarget.isAIXABI() && Subtarget.hasAltivec() &&
376       !TM.getAIXExtendedAltivecABI()) {
377     //  In the AIX default Altivec ABI, vector registers VR20-VR31 are reserved
378     //  and cannot be used.
379     for (auto Reg : CSR_Altivec_SaveList) {
380       if (Reg == 0)
381         break;
382       markSuperRegs(Reserved, Reg);
383       for (MCRegAliasIterator AS(Reg, this, true); AS.isValid(); ++AS) {
384         Reserved.set(*AS);
385       }
386     }
387   }
388 
389   assert(checkAllSuperRegsMarked(Reserved));
390   return Reserved;
391 }
392 
requiresFrameIndexScavenging(const MachineFunction & MF) const393 bool PPCRegisterInfo::requiresFrameIndexScavenging(const MachineFunction &MF) const {
394   const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
395   const PPCInstrInfo *InstrInfo =  Subtarget.getInstrInfo();
396   const MachineFrameInfo &MFI = MF.getFrameInfo();
397   const std::vector<CalleeSavedInfo> &Info = MFI.getCalleeSavedInfo();
398 
399   LLVM_DEBUG(dbgs() << "requiresFrameIndexScavenging for " << MF.getName()
400                     << ".\n");
401   // If the callee saved info is invalid we have to default to true for safety.
402   if (!MFI.isCalleeSavedInfoValid()) {
403     LLVM_DEBUG(dbgs() << "TRUE - Invalid callee saved info.\n");
404     return true;
405   }
406 
407   // We will require the use of X-Forms because the frame is larger than what
408   // can be represented in signed 16 bits that fit in the immediate of a D-Form.
409   // If we need an X-Form then we need a register to store the address offset.
410   unsigned FrameSize = MFI.getStackSize();
411   // Signed 16 bits means that the FrameSize cannot be more than 15 bits.
412   if (FrameSize & ~0x7FFF) {
413     LLVM_DEBUG(dbgs() << "TRUE - Frame size is too large for D-Form.\n");
414     return true;
415   }
416 
417   // The callee saved info is valid so it can be traversed.
418   // Checking for registers that need saving that do not have load or store
419   // forms where the address offset is an immediate.
420   for (unsigned i = 0; i < Info.size(); i++) {
421     // If the spill is to a register no scavenging is required.
422     if (Info[i].isSpilledToReg())
423       continue;
424 
425     int FrIdx = Info[i].getFrameIdx();
426     unsigned Reg = Info[i].getReg();
427 
428     const TargetRegisterClass *RC = getMinimalPhysRegClass(Reg);
429     unsigned Opcode = InstrInfo->getStoreOpcodeForSpill(RC);
430     if (!MFI.isFixedObjectIndex(FrIdx)) {
431       // This is not a fixed object. If it requires alignment then we may still
432       // need to use the XForm.
433       if (offsetMinAlignForOpcode(Opcode) > 1) {
434         LLVM_DEBUG(dbgs() << "Memory Operand: " << InstrInfo->getName(Opcode)
435                           << " for register " << printReg(Reg, this) << ".\n");
436         LLVM_DEBUG(dbgs() << "TRUE - Not fixed frame object that requires "
437                           << "alignment.\n");
438         return true;
439       }
440     }
441 
442     // This is eiher:
443     // 1) A fixed frame index object which we know are aligned so
444     // as long as we have a valid DForm/DSForm/DQForm (non XForm) we don't
445     // need to consider the alignment here.
446     // 2) A not fixed object but in that case we now know that the min required
447     // alignment is no more than 1 based on the previous check.
448     if (InstrInfo->isXFormMemOp(Opcode)) {
449       LLVM_DEBUG(dbgs() << "Memory Operand: " << InstrInfo->getName(Opcode)
450                         << " for register " << printReg(Reg, this) << ".\n");
451       LLVM_DEBUG(dbgs() << "TRUE - Memory operand is X-Form.\n");
452       return true;
453     }
454   }
455   LLVM_DEBUG(dbgs() << "FALSE - Scavenging is not required.\n");
456   return false;
457 }
458 
requiresVirtualBaseRegisters(const MachineFunction & MF) const459 bool PPCRegisterInfo::requiresVirtualBaseRegisters(
460     const MachineFunction &MF) const {
461   const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
462   // Do not use virtual base registers when ROP protection is turned on.
463   // Virtual base registers break the layout of the local variable space and may
464   // push the ROP Hash location past the 512 byte range of the ROP store
465   // instruction.
466   return !Subtarget.hasROPProtect();
467 }
468 
isCallerPreservedPhysReg(MCRegister PhysReg,const MachineFunction & MF) const469 bool PPCRegisterInfo::isCallerPreservedPhysReg(MCRegister PhysReg,
470                                                const MachineFunction &MF) const {
471   assert(Register::isPhysicalRegister(PhysReg));
472   const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
473   const MachineFrameInfo &MFI = MF.getFrameInfo();
474 
475   if (!Subtarget.is64BitELFABI() && !Subtarget.isAIXABI())
476     return false;
477   if (PhysReg == Subtarget.getTOCPointerRegister())
478     // X2/R2 is guaranteed to be preserved within a function if it is reserved.
479     // The reason it's reserved is that it's the TOC pointer (and the function
480     // uses the TOC). In functions where it isn't reserved (i.e. leaf functions
481     // with no TOC access), we can't claim that it is preserved.
482     return (getReservedRegs(MF).test(PhysReg));
483   if (StackPtrConst && PhysReg == Subtarget.getStackPointerRegister() &&
484       !MFI.hasVarSizedObjects() && !MFI.hasOpaqueSPAdjustment())
485     // The value of the stack pointer does not change within a function after
486     // the prologue and before the epilogue if there are no dynamic allocations
487     // and no inline asm which clobbers X1/R1.
488     return true;
489   return false;
490 }
491 
getRegAllocationHints(Register VirtReg,ArrayRef<MCPhysReg> Order,SmallVectorImpl<MCPhysReg> & Hints,const MachineFunction & MF,const VirtRegMap * VRM,const LiveRegMatrix * Matrix) const492 bool PPCRegisterInfo::getRegAllocationHints(Register VirtReg,
493                                             ArrayRef<MCPhysReg> Order,
494                                             SmallVectorImpl<MCPhysReg> &Hints,
495                                             const MachineFunction &MF,
496                                             const VirtRegMap *VRM,
497                                             const LiveRegMatrix *Matrix) const {
498   const MachineRegisterInfo *MRI = &MF.getRegInfo();
499 
500   // Call the base implementation first to set any hints based on the usual
501   // heuristics and decide what the return value should be. We want to return
502   // the same value returned by the base implementation. If the base
503   // implementation decides to return true and force the allocation then we
504   // will leave it as such. On the other hand if the base implementation
505   // decides to return false the following code will not force the allocation
506   // as we are just looking to provide a hint.
507   bool BaseImplRetVal = TargetRegisterInfo::getRegAllocationHints(
508       VirtReg, Order, Hints, MF, VRM, Matrix);
509   // We are interested in instructions that copy values to ACC/UACC.
510   // The copy into UACC will be simply a COPY to a subreg so we
511   // want to allocate the corresponding physical subreg for the source.
512   // The copy into ACC will be a BUILD_UACC so we want to allocate
513   // the same number UACC for the source.
514   for (MachineInstr &Use : MRI->reg_nodbg_instructions(VirtReg)) {
515     const MachineOperand *ResultOp = nullptr;
516     Register ResultReg;
517     switch (Use.getOpcode()) {
518     case TargetOpcode::COPY: {
519       ResultOp = &Use.getOperand(0);
520       ResultReg = ResultOp->getReg();
521       if (Register::isVirtualRegister(ResultReg) &&
522           MRI->getRegClass(ResultReg)->contains(PPC::UACC0) &&
523           VRM->hasPhys(ResultReg)) {
524         Register UACCPhys = VRM->getPhys(ResultReg);
525         Register HintReg = getSubReg(UACCPhys, ResultOp->getSubReg());
526         // Ensure that the hint is a VSRp register.
527         if (HintReg >= PPC::VSRp0 && HintReg <= PPC::VSRp31)
528           Hints.push_back(HintReg);
529       }
530       break;
531     }
532     case PPC::BUILD_UACC: {
533       ResultOp = &Use.getOperand(0);
534       ResultReg = ResultOp->getReg();
535       if (MRI->getRegClass(ResultReg)->contains(PPC::ACC0) &&
536           VRM->hasPhys(ResultReg)) {
537         Register ACCPhys = VRM->getPhys(ResultReg);
538         assert((ACCPhys >= PPC::ACC0 && ACCPhys <= PPC::ACC7) &&
539                "Expecting an ACC register for BUILD_UACC.");
540         Register HintReg = PPC::UACC0 + (ACCPhys - PPC::ACC0);
541         Hints.push_back(HintReg);
542       }
543       break;
544     }
545     }
546   }
547   return BaseImplRetVal;
548 }
549 
getRegPressureLimit(const TargetRegisterClass * RC,MachineFunction & MF) const550 unsigned PPCRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
551                                               MachineFunction &MF) const {
552   const PPCFrameLowering *TFI = getFrameLowering(MF);
553   const unsigned DefaultSafety = 1;
554 
555   switch (RC->getID()) {
556   default:
557     return 0;
558   case PPC::G8RC_NOX0RegClassID:
559   case PPC::GPRC_NOR0RegClassID:
560   case PPC::SPERCRegClassID:
561   case PPC::G8RCRegClassID:
562   case PPC::GPRCRegClassID: {
563     unsigned FP = TFI->hasFP(MF) ? 1 : 0;
564     return 32 - FP - DefaultSafety;
565   }
566   case PPC::F4RCRegClassID:
567   case PPC::F8RCRegClassID:
568   case PPC::VSLRCRegClassID:
569     return 32 - DefaultSafety;
570   case PPC::VFRCRegClassID:
571   case PPC::VRRCRegClassID: {
572     const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
573     // Vector registers VR20-VR31 are reserved and cannot be used in the default
574     // Altivec ABI on AIX.
575     if (!TM.getAIXExtendedAltivecABI() && Subtarget.isAIXABI())
576       return 20 - DefaultSafety;
577   }
578     return 32 - DefaultSafety;
579   case PPC::VSFRCRegClassID:
580   case PPC::VSSRCRegClassID:
581   case PPC::VSRCRegClassID: {
582     const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
583     if (!TM.getAIXExtendedAltivecABI() && Subtarget.isAIXABI())
584       // Vector registers VR20-VR31 are reserved and cannot be used in the
585       // default Altivec ABI on AIX.
586       return 52 - DefaultSafety;
587   }
588     return 64 - DefaultSafety;
589   case PPC::CRRCRegClassID:
590     return 8 - DefaultSafety;
591   }
592 }
593 
594 const TargetRegisterClass *
getLargestLegalSuperClass(const TargetRegisterClass * RC,const MachineFunction & MF) const595 PPCRegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC,
596                                            const MachineFunction &MF) const {
597   const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
598   const auto *DefaultSuperclass =
599       TargetRegisterInfo::getLargestLegalSuperClass(RC, MF);
600   if (Subtarget.hasVSX()) {
601     // With VSX, we can inflate various sub-register classes to the full VSX
602     // register set.
603 
604     // For Power9 we allow the user to enable GPR to vector spills.
605     // FIXME: Currently limited to spilling GP8RC. A follow on patch will add
606     // support to spill GPRC.
607     if (TM.isELFv2ABI() || Subtarget.isAIXABI()) {
608       if (Subtarget.hasP9Vector() && EnableGPRToVecSpills &&
609           RC == &PPC::G8RCRegClass) {
610         InflateGP8RC++;
611         return &PPC::SPILLTOVSRRCRegClass;
612       }
613       if (RC == &PPC::GPRCRegClass && EnableGPRToVecSpills)
614         InflateGPRC++;
615     }
616 
617     for (const auto *I = RC->getSuperClasses(); *I; ++I) {
618       if (getRegSizeInBits(**I) != getRegSizeInBits(*RC))
619         continue;
620 
621       switch ((*I)->getID()) {
622       case PPC::VSSRCRegClassID:
623         return Subtarget.hasP8Vector() ? *I : DefaultSuperclass;
624       case PPC::VSFRCRegClassID:
625       case PPC::VSRCRegClassID:
626         return *I;
627       case PPC::VSRpRCRegClassID:
628         return Subtarget.pairedVectorMemops() ? *I : DefaultSuperclass;
629       case PPC::ACCRCRegClassID:
630       case PPC::UACCRCRegClassID:
631         return Subtarget.hasMMA() ? *I : DefaultSuperclass;
632       }
633     }
634   }
635 
636   return DefaultSuperclass;
637 }
638 
639 //===----------------------------------------------------------------------===//
640 // Stack Frame Processing methods
641 //===----------------------------------------------------------------------===//
642 
643 /// lowerDynamicAlloc - Generate the code for allocating an object in the
644 /// current frame.  The sequence of code will be in the general form
645 ///
646 ///   addi   R0, SP, \#frameSize ; get the address of the previous frame
647 ///   stwxu  R0, SP, Rnegsize   ; add and update the SP with the negated size
648 ///   addi   Rnew, SP, \#maxCalFrameSize ; get the top of the allocation
649 ///
lowerDynamicAlloc(MachineBasicBlock::iterator II) const650 void PPCRegisterInfo::lowerDynamicAlloc(MachineBasicBlock::iterator II) const {
651   // Get the instruction.
652   MachineInstr &MI = *II;
653   // Get the instruction's basic block.
654   MachineBasicBlock &MBB = *MI.getParent();
655   // Get the basic block's function.
656   MachineFunction &MF = *MBB.getParent();
657   // Get the frame info.
658   MachineFrameInfo &MFI = MF.getFrameInfo();
659   const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
660   // Get the instruction info.
661   const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
662   // Determine whether 64-bit pointers are used.
663   bool LP64 = TM.isPPC64();
664   DebugLoc dl = MI.getDebugLoc();
665 
666   // Get the maximum call stack size.
667   unsigned maxCallFrameSize = MFI.getMaxCallFrameSize();
668   Align MaxAlign = MFI.getMaxAlign();
669   assert(isAligned(MaxAlign, maxCallFrameSize) &&
670          "Maximum call-frame size not sufficiently aligned");
671   (void)MaxAlign;
672 
673   const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
674   const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
675   Register Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC);
676   bool KillNegSizeReg = MI.getOperand(1).isKill();
677   Register NegSizeReg = MI.getOperand(1).getReg();
678 
679   prepareDynamicAlloca(II, NegSizeReg, KillNegSizeReg, Reg);
680   // Grow the stack and update the stack pointer link, then determine the
681   // address of new allocated space.
682   if (LP64) {
683     BuildMI(MBB, II, dl, TII.get(PPC::STDUX), PPC::X1)
684         .addReg(Reg, RegState::Kill)
685         .addReg(PPC::X1)
686         .addReg(NegSizeReg, getKillRegState(KillNegSizeReg));
687     BuildMI(MBB, II, dl, TII.get(PPC::ADDI8), MI.getOperand(0).getReg())
688         .addReg(PPC::X1)
689         .addImm(maxCallFrameSize);
690   } else {
691     BuildMI(MBB, II, dl, TII.get(PPC::STWUX), PPC::R1)
692         .addReg(Reg, RegState::Kill)
693         .addReg(PPC::R1)
694         .addReg(NegSizeReg, getKillRegState(KillNegSizeReg));
695     BuildMI(MBB, II, dl, TII.get(PPC::ADDI), MI.getOperand(0).getReg())
696         .addReg(PPC::R1)
697         .addImm(maxCallFrameSize);
698   }
699 
700   // Discard the DYNALLOC instruction.
701   MBB.erase(II);
702 }
703 
704 /// To accomplish dynamic stack allocation, we have to calculate exact size
705 /// subtracted from the stack pointer according alignment information and get
706 /// previous frame pointer.
prepareDynamicAlloca(MachineBasicBlock::iterator II,Register & NegSizeReg,bool & KillNegSizeReg,Register & FramePointer) const707 void PPCRegisterInfo::prepareDynamicAlloca(MachineBasicBlock::iterator II,
708                                            Register &NegSizeReg,
709                                            bool &KillNegSizeReg,
710                                            Register &FramePointer) const {
711   // Get the instruction.
712   MachineInstr &MI = *II;
713   // Get the instruction's basic block.
714   MachineBasicBlock &MBB = *MI.getParent();
715   // Get the basic block's function.
716   MachineFunction &MF = *MBB.getParent();
717   // Get the frame info.
718   MachineFrameInfo &MFI = MF.getFrameInfo();
719   const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
720   // Get the instruction info.
721   const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
722   // Determine whether 64-bit pointers are used.
723   bool LP64 = TM.isPPC64();
724   DebugLoc dl = MI.getDebugLoc();
725   // Get the total frame size.
726   unsigned FrameSize = MFI.getStackSize();
727 
728   // Get stack alignments.
729   const PPCFrameLowering *TFI = getFrameLowering(MF);
730   Align TargetAlign = TFI->getStackAlign();
731   Align MaxAlign = MFI.getMaxAlign();
732 
733   // Determine the previous frame's address.  If FrameSize can't be
734   // represented as 16 bits or we need special alignment, then we load the
735   // previous frame's address from 0(SP).  Why not do an addis of the hi?
736   // Because R0 is our only safe tmp register and addi/addis treat R0 as zero.
737   // Constructing the constant and adding would take 3 instructions.
738   // Fortunately, a frame greater than 32K is rare.
739   const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
740   const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
741 
742   if (MaxAlign < TargetAlign && isInt<16>(FrameSize)) {
743     if (LP64)
744       BuildMI(MBB, II, dl, TII.get(PPC::ADDI8), FramePointer)
745           .addReg(PPC::X31)
746           .addImm(FrameSize);
747     else
748       BuildMI(MBB, II, dl, TII.get(PPC::ADDI), FramePointer)
749           .addReg(PPC::R31)
750           .addImm(FrameSize);
751   } else if (LP64) {
752     BuildMI(MBB, II, dl, TII.get(PPC::LD), FramePointer)
753         .addImm(0)
754         .addReg(PPC::X1);
755   } else {
756     BuildMI(MBB, II, dl, TII.get(PPC::LWZ), FramePointer)
757         .addImm(0)
758         .addReg(PPC::R1);
759   }
760   // Determine the actual NegSizeReg according to alignment info.
761   if (LP64) {
762     if (MaxAlign > TargetAlign) {
763       unsigned UnalNegSizeReg = NegSizeReg;
764       NegSizeReg = MF.getRegInfo().createVirtualRegister(G8RC);
765 
766       // Unfortunately, there is no andi, only andi., and we can't insert that
767       // here because we might clobber cr0 while it is live.
768       BuildMI(MBB, II, dl, TII.get(PPC::LI8), NegSizeReg)
769           .addImm(~(MaxAlign.value() - 1));
770 
771       unsigned NegSizeReg1 = NegSizeReg;
772       NegSizeReg = MF.getRegInfo().createVirtualRegister(G8RC);
773       BuildMI(MBB, II, dl, TII.get(PPC::AND8), NegSizeReg)
774           .addReg(UnalNegSizeReg, getKillRegState(KillNegSizeReg))
775           .addReg(NegSizeReg1, RegState::Kill);
776       KillNegSizeReg = true;
777     }
778   } else {
779     if (MaxAlign > TargetAlign) {
780       unsigned UnalNegSizeReg = NegSizeReg;
781       NegSizeReg = MF.getRegInfo().createVirtualRegister(GPRC);
782 
783       // Unfortunately, there is no andi, only andi., and we can't insert that
784       // here because we might clobber cr0 while it is live.
785       BuildMI(MBB, II, dl, TII.get(PPC::LI), NegSizeReg)
786           .addImm(~(MaxAlign.value() - 1));
787 
788       unsigned NegSizeReg1 = NegSizeReg;
789       NegSizeReg = MF.getRegInfo().createVirtualRegister(GPRC);
790       BuildMI(MBB, II, dl, TII.get(PPC::AND), NegSizeReg)
791           .addReg(UnalNegSizeReg, getKillRegState(KillNegSizeReg))
792           .addReg(NegSizeReg1, RegState::Kill);
793       KillNegSizeReg = true;
794     }
795   }
796 }
797 
lowerPrepareProbedAlloca(MachineBasicBlock::iterator II) const798 void PPCRegisterInfo::lowerPrepareProbedAlloca(
799     MachineBasicBlock::iterator II) const {
800   MachineInstr &MI = *II;
801   // Get the instruction's basic block.
802   MachineBasicBlock &MBB = *MI.getParent();
803   // Get the basic block's function.
804   MachineFunction &MF = *MBB.getParent();
805   const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
806   // Get the instruction info.
807   const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
808   // Determine whether 64-bit pointers are used.
809   bool LP64 = TM.isPPC64();
810   DebugLoc dl = MI.getDebugLoc();
811   Register FramePointer = MI.getOperand(0).getReg();
812   const Register ActualNegSizeReg = MI.getOperand(1).getReg();
813   bool KillNegSizeReg = MI.getOperand(2).isKill();
814   Register NegSizeReg = MI.getOperand(2).getReg();
815   const MCInstrDesc &CopyInst = TII.get(LP64 ? PPC::OR8 : PPC::OR);
816   // RegAllocator might allocate FramePointer and NegSizeReg in the same phyreg.
817   if (FramePointer == NegSizeReg) {
818     assert(KillNegSizeReg && "FramePointer is a def and NegSizeReg is an use, "
819                              "NegSizeReg should be killed");
820     // FramePointer is clobbered earlier than the use of NegSizeReg in
821     // prepareDynamicAlloca, save NegSizeReg in ActualNegSizeReg to avoid
822     // misuse.
823     BuildMI(MBB, II, dl, CopyInst, ActualNegSizeReg)
824         .addReg(NegSizeReg)
825         .addReg(NegSizeReg);
826     NegSizeReg = ActualNegSizeReg;
827     KillNegSizeReg = false;
828   }
829   prepareDynamicAlloca(II, NegSizeReg, KillNegSizeReg, FramePointer);
830   // NegSizeReg might be updated in prepareDynamicAlloca if MaxAlign >
831   // TargetAlign.
832   if (NegSizeReg != ActualNegSizeReg)
833     BuildMI(MBB, II, dl, CopyInst, ActualNegSizeReg)
834         .addReg(NegSizeReg)
835         .addReg(NegSizeReg);
836   MBB.erase(II);
837 }
838 
lowerDynamicAreaOffset(MachineBasicBlock::iterator II) const839 void PPCRegisterInfo::lowerDynamicAreaOffset(
840     MachineBasicBlock::iterator II) const {
841   // Get the instruction.
842   MachineInstr &MI = *II;
843   // Get the instruction's basic block.
844   MachineBasicBlock &MBB = *MI.getParent();
845   // Get the basic block's function.
846   MachineFunction &MF = *MBB.getParent();
847   // Get the frame info.
848   MachineFrameInfo &MFI = MF.getFrameInfo();
849   const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
850   // Get the instruction info.
851   const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
852 
853   unsigned maxCallFrameSize = MFI.getMaxCallFrameSize();
854   bool is64Bit = TM.isPPC64();
855   DebugLoc dl = MI.getDebugLoc();
856   BuildMI(MBB, II, dl, TII.get(is64Bit ? PPC::LI8 : PPC::LI),
857           MI.getOperand(0).getReg())
858       .addImm(maxCallFrameSize);
859   MBB.erase(II);
860 }
861 
862 /// lowerCRSpilling - Generate the code for spilling a CR register. Instead of
863 /// reserving a whole register (R0), we scrounge for one here. This generates
864 /// code like this:
865 ///
866 ///   mfcr rA                  ; Move the conditional register into GPR rA.
867 ///   rlwinm rA, rA, SB, 0, 31 ; Shift the bits left so they are in CR0's slot.
868 ///   stw rA, FI               ; Store rA to the frame.
869 ///
lowerCRSpilling(MachineBasicBlock::iterator II,unsigned FrameIndex) const870 void PPCRegisterInfo::lowerCRSpilling(MachineBasicBlock::iterator II,
871                                       unsigned FrameIndex) const {
872   // Get the instruction.
873   MachineInstr &MI = *II;       // ; SPILL_CR <SrcReg>, <offset>
874   // Get the instruction's basic block.
875   MachineBasicBlock &MBB = *MI.getParent();
876   MachineFunction &MF = *MBB.getParent();
877   const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
878   const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
879   DebugLoc dl = MI.getDebugLoc();
880 
881   bool LP64 = TM.isPPC64();
882   const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
883   const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
884 
885   Register Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC);
886   Register SrcReg = MI.getOperand(0).getReg();
887 
888   // We need to store the CR in the low 4-bits of the saved value. First, issue
889   // an MFOCRF to save all of the CRBits and, if needed, kill the SrcReg.
890   BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::MFOCRF8 : PPC::MFOCRF), Reg)
891       .addReg(SrcReg, getKillRegState(MI.getOperand(0).isKill()));
892 
893   // If the saved register wasn't CR0, shift the bits left so that they are in
894   // CR0's slot.
895   if (SrcReg != PPC::CR0) {
896     Register Reg1 = Reg;
897     Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC);
898 
899     // rlwinm rA, rA, ShiftBits, 0, 31.
900     BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::RLWINM8 : PPC::RLWINM), Reg)
901       .addReg(Reg1, RegState::Kill)
902       .addImm(getEncodingValue(SrcReg) * 4)
903       .addImm(0)
904       .addImm(31);
905   }
906 
907   addFrameReference(BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::STW8 : PPC::STW))
908                     .addReg(Reg, RegState::Kill),
909                     FrameIndex);
910 
911   // Discard the pseudo instruction.
912   MBB.erase(II);
913 }
914 
lowerCRRestore(MachineBasicBlock::iterator II,unsigned FrameIndex) const915 void PPCRegisterInfo::lowerCRRestore(MachineBasicBlock::iterator II,
916                                       unsigned FrameIndex) const {
917   // Get the instruction.
918   MachineInstr &MI = *II;       // ; <DestReg> = RESTORE_CR <offset>
919   // Get the instruction's basic block.
920   MachineBasicBlock &MBB = *MI.getParent();
921   MachineFunction &MF = *MBB.getParent();
922   const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
923   const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
924   DebugLoc dl = MI.getDebugLoc();
925 
926   bool LP64 = TM.isPPC64();
927   const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
928   const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
929 
930   Register Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC);
931   Register DestReg = MI.getOperand(0).getReg();
932   assert(MI.definesRegister(DestReg) &&
933     "RESTORE_CR does not define its destination");
934 
935   addFrameReference(BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::LWZ8 : PPC::LWZ),
936                               Reg), FrameIndex);
937 
938   // If the reloaded register isn't CR0, shift the bits right so that they are
939   // in the right CR's slot.
940   if (DestReg != PPC::CR0) {
941     Register Reg1 = Reg;
942     Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC);
943 
944     unsigned ShiftBits = getEncodingValue(DestReg)*4;
945     // rlwinm r11, r11, 32-ShiftBits, 0, 31.
946     BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::RLWINM8 : PPC::RLWINM), Reg)
947              .addReg(Reg1, RegState::Kill).addImm(32-ShiftBits).addImm(0)
948              .addImm(31);
949   }
950 
951   BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::MTOCRF8 : PPC::MTOCRF), DestReg)
952              .addReg(Reg, RegState::Kill);
953 
954   // Discard the pseudo instruction.
955   MBB.erase(II);
956 }
957 
lowerCRBitSpilling(MachineBasicBlock::iterator II,unsigned FrameIndex) const958 void PPCRegisterInfo::lowerCRBitSpilling(MachineBasicBlock::iterator II,
959                                          unsigned FrameIndex) const {
960   // Get the instruction.
961   MachineInstr &MI = *II;       // ; SPILL_CRBIT <SrcReg>, <offset>
962   // Get the instruction's basic block.
963   MachineBasicBlock &MBB = *MI.getParent();
964   MachineFunction &MF = *MBB.getParent();
965   const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
966   const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
967   const TargetRegisterInfo* TRI = Subtarget.getRegisterInfo();
968   DebugLoc dl = MI.getDebugLoc();
969 
970   bool LP64 = TM.isPPC64();
971   const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
972   const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
973 
974   Register Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC);
975   Register SrcReg = MI.getOperand(0).getReg();
976 
977   // Search up the BB to find the definition of the CR bit.
978   MachineBasicBlock::reverse_iterator Ins = MI;
979   MachineBasicBlock::reverse_iterator Rend = MBB.rend();
980   ++Ins;
981   unsigned CRBitSpillDistance = 0;
982   bool SeenUse = false;
983   for (; Ins != Rend; ++Ins) {
984     // Definition found.
985     if (Ins->modifiesRegister(SrcReg, TRI))
986       break;
987     // Use found.
988     if (Ins->readsRegister(SrcReg, TRI))
989       SeenUse = true;
990     // Unable to find CR bit definition within maximum search distance.
991     if (CRBitSpillDistance == MaxCRBitSpillDist) {
992       Ins = MI;
993       break;
994     }
995     // Skip debug instructions when counting CR bit spill distance.
996     if (!Ins->isDebugInstr())
997       CRBitSpillDistance++;
998   }
999 
1000   // Unable to find the definition of the CR bit in the MBB.
1001   if (Ins == MBB.rend())
1002     Ins = MI;
1003 
1004   bool SpillsKnownBit = false;
1005   // There is no need to extract the CR bit if its value is already known.
1006   switch (Ins->getOpcode()) {
1007   case PPC::CRUNSET:
1008     BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::LI8 : PPC::LI), Reg)
1009       .addImm(0);
1010     SpillsKnownBit = true;
1011     break;
1012   case PPC::CRSET:
1013     BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::LIS8 : PPC::LIS), Reg)
1014       .addImm(-32768);
1015     SpillsKnownBit = true;
1016     break;
1017   default:
1018     // On Power10, we can use SETNBC to spill all CR bits. SETNBC will set all
1019     // bits (specifically, it produces a -1 if the CR bit is set). Ultimately,
1020     // the bit that is of importance to us is bit 32 (bit 0 of a 32-bit
1021     // register), and SETNBC will set this.
1022     if (Subtarget.isISA3_1()) {
1023       BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::SETNBC8 : PPC::SETNBC), Reg)
1024           .addReg(SrcReg, RegState::Undef);
1025       break;
1026     }
1027 
1028     // On Power9, we can use SETB to extract the LT bit. This only works for
1029     // the LT bit since SETB produces -1/1/0 for LT/GT/<neither>. So the value
1030     // of the bit we care about (32-bit sign bit) will be set to the value of
1031     // the LT bit (regardless of the other bits in the CR field).
1032     if (Subtarget.isISA3_0()) {
1033       if (SrcReg == PPC::CR0LT || SrcReg == PPC::CR1LT ||
1034           SrcReg == PPC::CR2LT || SrcReg == PPC::CR3LT ||
1035           SrcReg == PPC::CR4LT || SrcReg == PPC::CR5LT ||
1036           SrcReg == PPC::CR6LT || SrcReg == PPC::CR7LT) {
1037         BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::SETB8 : PPC::SETB), Reg)
1038           .addReg(getCRFromCRBit(SrcReg), RegState::Undef);
1039         break;
1040       }
1041     }
1042 
1043     // We need to move the CR field that contains the CR bit we are spilling.
1044     // The super register may not be explicitly defined (i.e. it can be defined
1045     // by a CR-logical that only defines the subreg) so we state that the CR
1046     // field is undef. Also, in order to preserve the kill flag on the CR bit,
1047     // we add it as an implicit use.
1048     BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::MFOCRF8 : PPC::MFOCRF), Reg)
1049       .addReg(getCRFromCRBit(SrcReg), RegState::Undef)
1050       .addReg(SrcReg,
1051               RegState::Implicit | getKillRegState(MI.getOperand(0).isKill()));
1052 
1053     // If the saved register wasn't CR0LT, shift the bits left so that the bit
1054     // to store is the first one. Mask all but that bit.
1055     Register Reg1 = Reg;
1056     Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC);
1057 
1058     // rlwinm rA, rA, ShiftBits, 0, 0.
1059     BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::RLWINM8 : PPC::RLWINM), Reg)
1060       .addReg(Reg1, RegState::Kill)
1061       .addImm(getEncodingValue(SrcReg))
1062       .addImm(0).addImm(0);
1063   }
1064   addFrameReference(BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::STW8 : PPC::STW))
1065                     .addReg(Reg, RegState::Kill),
1066                     FrameIndex);
1067 
1068   bool KillsCRBit = MI.killsRegister(SrcReg, TRI);
1069   // Discard the pseudo instruction.
1070   MBB.erase(II);
1071   if (SpillsKnownBit && KillsCRBit && !SeenUse) {
1072     Ins->setDesc(TII.get(PPC::UNENCODED_NOP));
1073     Ins->RemoveOperand(0);
1074   }
1075 }
1076 
lowerCRBitRestore(MachineBasicBlock::iterator II,unsigned FrameIndex) const1077 void PPCRegisterInfo::lowerCRBitRestore(MachineBasicBlock::iterator II,
1078                                       unsigned FrameIndex) const {
1079   // Get the instruction.
1080   MachineInstr &MI = *II;       // ; <DestReg> = RESTORE_CRBIT <offset>
1081   // Get the instruction's basic block.
1082   MachineBasicBlock &MBB = *MI.getParent();
1083   MachineFunction &MF = *MBB.getParent();
1084   const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
1085   const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
1086   DebugLoc dl = MI.getDebugLoc();
1087 
1088   bool LP64 = TM.isPPC64();
1089   const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
1090   const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
1091 
1092   Register Reg = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC);
1093   Register DestReg = MI.getOperand(0).getReg();
1094   assert(MI.definesRegister(DestReg) &&
1095     "RESTORE_CRBIT does not define its destination");
1096 
1097   addFrameReference(BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::LWZ8 : PPC::LWZ),
1098                               Reg), FrameIndex);
1099 
1100   BuildMI(MBB, II, dl, TII.get(TargetOpcode::IMPLICIT_DEF), DestReg);
1101 
1102   Register RegO = MF.getRegInfo().createVirtualRegister(LP64 ? G8RC : GPRC);
1103   BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::MFOCRF8 : PPC::MFOCRF), RegO)
1104           .addReg(getCRFromCRBit(DestReg));
1105 
1106   unsigned ShiftBits = getEncodingValue(DestReg);
1107   // rlwimi r11, r10, 32-ShiftBits, ..., ...
1108   BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::RLWIMI8 : PPC::RLWIMI), RegO)
1109       .addReg(RegO, RegState::Kill)
1110       .addReg(Reg, RegState::Kill)
1111       .addImm(ShiftBits ? 32 - ShiftBits : 0)
1112       .addImm(ShiftBits)
1113       .addImm(ShiftBits);
1114 
1115   BuildMI(MBB, II, dl, TII.get(LP64 ? PPC::MTOCRF8 : PPC::MTOCRF),
1116           getCRFromCRBit(DestReg))
1117       .addReg(RegO, RegState::Kill)
1118       // Make sure we have a use dependency all the way through this
1119       // sequence of instructions. We can't have the other bits in the CR
1120       // modified in between the mfocrf and the mtocrf.
1121       .addReg(getCRFromCRBit(DestReg), RegState::Implicit);
1122 
1123   // Discard the pseudo instruction.
1124   MBB.erase(II);
1125 }
1126 
emitAccCopyInfo(MachineBasicBlock & MBB,MCRegister DestReg,MCRegister SrcReg)1127 void PPCRegisterInfo::emitAccCopyInfo(MachineBasicBlock &MBB,
1128                                       MCRegister DestReg, MCRegister SrcReg) {
1129 #ifdef NDEBUG
1130   return;
1131 #else
1132   if (ReportAccMoves) {
1133     std::string Dest = PPC::ACCRCRegClass.contains(DestReg) ? "acc" : "uacc";
1134     std::string Src = PPC::ACCRCRegClass.contains(SrcReg) ? "acc" : "uacc";
1135     dbgs() << "Emitting copy from " << Src << " to " << Dest << ":\n";
1136     MBB.dump();
1137   }
1138 #endif
1139 }
1140 
emitAccSpillRestoreInfo(MachineBasicBlock & MBB,bool IsPrimed,bool IsRestore)1141 static void emitAccSpillRestoreInfo(MachineBasicBlock &MBB, bool IsPrimed,
1142                                     bool IsRestore) {
1143 #ifdef NDEBUG
1144   return;
1145 #else
1146   if (ReportAccMoves) {
1147     dbgs() << "Emitting " << (IsPrimed ? "acc" : "uacc") << " register "
1148            << (IsRestore ? "restore" : "spill") << ":\n";
1149     MBB.dump();
1150   }
1151 #endif
1152 }
1153 
1154 /// lowerACCSpilling - Generate the code for spilling the accumulator register.
1155 /// Similarly to other spills/reloads that use pseudo-ops, we do not actually
1156 /// eliminate the FrameIndex here nor compute the stack offset. We simply
1157 /// create a real instruction with an FI and rely on eliminateFrameIndex to
1158 /// handle the FI elimination.
lowerACCSpilling(MachineBasicBlock::iterator II,unsigned FrameIndex) const1159 void PPCRegisterInfo::lowerACCSpilling(MachineBasicBlock::iterator II,
1160                                        unsigned FrameIndex) const {
1161   MachineInstr &MI = *II; // SPILL_ACC <SrcReg>, <offset>
1162   MachineBasicBlock &MBB = *MI.getParent();
1163   MachineFunction &MF = *MBB.getParent();
1164   const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
1165   const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
1166   DebugLoc DL = MI.getDebugLoc();
1167   Register SrcReg = MI.getOperand(0).getReg();
1168   bool IsKilled = MI.getOperand(0).isKill();
1169 
1170   bool IsPrimed = PPC::ACCRCRegClass.contains(SrcReg);
1171   Register Reg =
1172       PPC::VSRp0 + (SrcReg - (IsPrimed ? PPC::ACC0 : PPC::UACC0)) * 2;
1173   bool IsLittleEndian = Subtarget.isLittleEndian();
1174 
1175   emitAccSpillRestoreInfo(MBB, IsPrimed, false);
1176 
1177   // De-prime the register being spilled, create two stores for the pair
1178   // subregisters accounting for endianness and then re-prime the register if
1179   // it isn't killed.  This uses the Offset parameter to addFrameReference() to
1180   // adjust the offset of the store that is within the 64-byte stack slot.
1181   if (IsPrimed)
1182     BuildMI(MBB, II, DL, TII.get(PPC::XXMFACC), SrcReg).addReg(SrcReg);
1183   addFrameReference(BuildMI(MBB, II, DL, TII.get(PPC::STXVP))
1184                         .addReg(Reg, getKillRegState(IsKilled)),
1185                     FrameIndex, IsLittleEndian ? 32 : 0);
1186   addFrameReference(BuildMI(MBB, II, DL, TII.get(PPC::STXVP))
1187                         .addReg(Reg + 1, getKillRegState(IsKilled)),
1188                     FrameIndex, IsLittleEndian ? 0 : 32);
1189   if (IsPrimed && !IsKilled)
1190     BuildMI(MBB, II, DL, TII.get(PPC::XXMTACC), SrcReg).addReg(SrcReg);
1191 
1192   // Discard the pseudo instruction.
1193   MBB.erase(II);
1194 }
1195 
1196 /// lowerACCRestore - Generate the code to restore the accumulator register.
lowerACCRestore(MachineBasicBlock::iterator II,unsigned FrameIndex) const1197 void PPCRegisterInfo::lowerACCRestore(MachineBasicBlock::iterator II,
1198                                       unsigned FrameIndex) const {
1199   MachineInstr &MI = *II; // <DestReg> = RESTORE_ACC <offset>
1200   MachineBasicBlock &MBB = *MI.getParent();
1201   MachineFunction &MF = *MBB.getParent();
1202   const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
1203   const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
1204   DebugLoc DL = MI.getDebugLoc();
1205 
1206   Register DestReg = MI.getOperand(0).getReg();
1207   assert(MI.definesRegister(DestReg) &&
1208          "RESTORE_ACC does not define its destination");
1209 
1210   bool IsPrimed = PPC::ACCRCRegClass.contains(DestReg);
1211   Register Reg =
1212       PPC::VSRp0 + (DestReg - (IsPrimed ? PPC::ACC0 : PPC::UACC0)) * 2;
1213   bool IsLittleEndian = Subtarget.isLittleEndian();
1214 
1215   emitAccSpillRestoreInfo(MBB, IsPrimed, true);
1216 
1217   // Create two loads for the pair subregisters accounting for endianness and
1218   // then prime the accumulator register being restored.
1219   addFrameReference(BuildMI(MBB, II, DL, TII.get(PPC::LXVP), Reg),
1220                     FrameIndex, IsLittleEndian ? 32 : 0);
1221   addFrameReference(BuildMI(MBB, II, DL, TII.get(PPC::LXVP), Reg + 1),
1222                     FrameIndex, IsLittleEndian ? 0 : 32);
1223   if (IsPrimed)
1224     BuildMI(MBB, II, DL, TII.get(PPC::XXMTACC), DestReg).addReg(DestReg);
1225 
1226   // Discard the pseudo instruction.
1227   MBB.erase(II);
1228 }
1229 
1230 /// lowerQuadwordSpilling - Generate code to spill paired general register.
lowerQuadwordSpilling(MachineBasicBlock::iterator II,unsigned FrameIndex) const1231 void PPCRegisterInfo::lowerQuadwordSpilling(MachineBasicBlock::iterator II,
1232                                             unsigned FrameIndex) const {
1233   MachineInstr &MI = *II;
1234   MachineBasicBlock &MBB = *MI.getParent();
1235   MachineFunction &MF = *MBB.getParent();
1236   const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
1237   const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
1238   DebugLoc DL = MI.getDebugLoc();
1239 
1240   Register SrcReg = MI.getOperand(0).getReg();
1241   bool IsKilled = MI.getOperand(0).isKill();
1242 
1243   Register Reg = PPC::X0 + (SrcReg - PPC::G8p0) * 2;
1244   bool IsLittleEndian = Subtarget.isLittleEndian();
1245 
1246   addFrameReference(BuildMI(MBB, II, DL, TII.get(PPC::STD))
1247                         .addReg(Reg, getKillRegState(IsKilled)),
1248                     FrameIndex, IsLittleEndian ? 8 : 0);
1249   addFrameReference(BuildMI(MBB, II, DL, TII.get(PPC::STD))
1250                         .addReg(Reg + 1, getKillRegState(IsKilled)),
1251                     FrameIndex, IsLittleEndian ? 0 : 8);
1252 
1253   // Discard the pseudo instruction.
1254   MBB.erase(II);
1255 }
1256 
1257 /// lowerQuadwordRestore - Generate code to restore paired general register.
lowerQuadwordRestore(MachineBasicBlock::iterator II,unsigned FrameIndex) const1258 void PPCRegisterInfo::lowerQuadwordRestore(MachineBasicBlock::iterator II,
1259                                            unsigned FrameIndex) const {
1260   MachineInstr &MI = *II;
1261   MachineBasicBlock &MBB = *MI.getParent();
1262   MachineFunction &MF = *MBB.getParent();
1263   const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
1264   const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
1265   DebugLoc DL = MI.getDebugLoc();
1266 
1267   Register DestReg = MI.getOperand(0).getReg();
1268   assert(MI.definesRegister(DestReg) &&
1269          "RESTORE_QUADWORD does not define its destination");
1270 
1271   Register Reg = PPC::X0 + (DestReg - PPC::G8p0) * 2;
1272   bool IsLittleEndian = Subtarget.isLittleEndian();
1273 
1274   addFrameReference(BuildMI(MBB, II, DL, TII.get(PPC::LD), Reg), FrameIndex,
1275                     IsLittleEndian ? 8 : 0);
1276   addFrameReference(BuildMI(MBB, II, DL, TII.get(PPC::LD), Reg + 1), FrameIndex,
1277                     IsLittleEndian ? 0 : 8);
1278 
1279   // Discard the pseudo instruction.
1280   MBB.erase(II);
1281 }
1282 
hasReservedSpillSlot(const MachineFunction & MF,Register Reg,int & FrameIdx) const1283 bool PPCRegisterInfo::hasReservedSpillSlot(const MachineFunction &MF,
1284                                            Register Reg, int &FrameIdx) const {
1285   // For the nonvolatile condition registers (CR2, CR3, CR4) return true to
1286   // prevent allocating an additional frame slot.
1287   // For 64-bit ELF and AIX, the CR save area is in the linkage area at SP+8,
1288   // for 32-bit AIX the CR save area is in the linkage area at SP+4.
1289   // We have created a FrameIndex to that spill slot to keep the CalleSaveInfos
1290   // valid.
1291   // For 32-bit ELF, we have previously created the stack slot if needed, so
1292   // return its FrameIdx.
1293   if (PPC::CR2 <= Reg && Reg <= PPC::CR4) {
1294     FrameIdx = MF.getInfo<PPCFunctionInfo>()->getCRSpillFrameIndex();
1295     return true;
1296   }
1297   return false;
1298 }
1299 
1300 // If the offset must be a multiple of some value, return what that value is.
offsetMinAlignForOpcode(unsigned OpC)1301 static unsigned offsetMinAlignForOpcode(unsigned OpC) {
1302   switch (OpC) {
1303   default:
1304     return 1;
1305   case PPC::LWA:
1306   case PPC::LWA_32:
1307   case PPC::LD:
1308   case PPC::LDU:
1309   case PPC::STD:
1310   case PPC::STDU:
1311   case PPC::DFLOADf32:
1312   case PPC::DFLOADf64:
1313   case PPC::DFSTOREf32:
1314   case PPC::DFSTOREf64:
1315   case PPC::LXSD:
1316   case PPC::LXSSP:
1317   case PPC::STXSD:
1318   case PPC::STXSSP:
1319   case PPC::STQ:
1320     return 4;
1321   case PPC::EVLDD:
1322   case PPC::EVSTDD:
1323     return 8;
1324   case PPC::LXV:
1325   case PPC::STXV:
1326   case PPC::LQ:
1327   case PPC::LXVP:
1328   case PPC::STXVP:
1329     return 16;
1330   }
1331 }
1332 
1333 // If the offset must be a multiple of some value, return what that value is.
offsetMinAlign(const MachineInstr & MI)1334 static unsigned offsetMinAlign(const MachineInstr &MI) {
1335   unsigned OpC = MI.getOpcode();
1336   return offsetMinAlignForOpcode(OpC);
1337 }
1338 
1339 // Return the OffsetOperandNo given the FIOperandNum (and the instruction).
getOffsetONFromFION(const MachineInstr & MI,unsigned FIOperandNum)1340 static unsigned getOffsetONFromFION(const MachineInstr &MI,
1341                                     unsigned FIOperandNum) {
1342   // Take into account whether it's an add or mem instruction
1343   unsigned OffsetOperandNo = (FIOperandNum == 2) ? 1 : 2;
1344   if (MI.isInlineAsm())
1345     OffsetOperandNo = FIOperandNum - 1;
1346   else if (MI.getOpcode() == TargetOpcode::STACKMAP ||
1347            MI.getOpcode() == TargetOpcode::PATCHPOINT)
1348     OffsetOperandNo = FIOperandNum + 1;
1349 
1350   return OffsetOperandNo;
1351 }
1352 
1353 void
eliminateFrameIndex(MachineBasicBlock::iterator II,int SPAdj,unsigned FIOperandNum,RegScavenger * RS) const1354 PPCRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
1355                                      int SPAdj, unsigned FIOperandNum,
1356                                      RegScavenger *RS) const {
1357   assert(SPAdj == 0 && "Unexpected");
1358 
1359   // Get the instruction.
1360   MachineInstr &MI = *II;
1361   // Get the instruction's basic block.
1362   MachineBasicBlock &MBB = *MI.getParent();
1363   // Get the basic block's function.
1364   MachineFunction &MF = *MBB.getParent();
1365   const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
1366   // Get the instruction info.
1367   const PPCInstrInfo &TII = *Subtarget.getInstrInfo();
1368   // Get the frame info.
1369   MachineFrameInfo &MFI = MF.getFrameInfo();
1370   DebugLoc dl = MI.getDebugLoc();
1371 
1372   unsigned OffsetOperandNo = getOffsetONFromFION(MI, FIOperandNum);
1373 
1374   // Get the frame index.
1375   int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
1376 
1377   // Get the frame pointer save index.  Users of this index are primarily
1378   // DYNALLOC instructions.
1379   PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
1380   int FPSI = FI->getFramePointerSaveIndex();
1381   // Get the instruction opcode.
1382   unsigned OpC = MI.getOpcode();
1383 
1384   if ((OpC == PPC::DYNAREAOFFSET || OpC == PPC::DYNAREAOFFSET8)) {
1385     lowerDynamicAreaOffset(II);
1386     return;
1387   }
1388 
1389   // Special case for dynamic alloca.
1390   if (FPSI && FrameIndex == FPSI &&
1391       (OpC == PPC::DYNALLOC || OpC == PPC::DYNALLOC8)) {
1392     lowerDynamicAlloc(II);
1393     return;
1394   }
1395 
1396   if (FPSI && FrameIndex == FPSI &&
1397       (OpC == PPC::PREPARE_PROBED_ALLOCA_64 ||
1398        OpC == PPC::PREPARE_PROBED_ALLOCA_32 ||
1399        OpC == PPC::PREPARE_PROBED_ALLOCA_NEGSIZE_SAME_REG_64 ||
1400        OpC == PPC::PREPARE_PROBED_ALLOCA_NEGSIZE_SAME_REG_32)) {
1401     lowerPrepareProbedAlloca(II);
1402     return;
1403   }
1404 
1405   // Special case for pseudo-ops SPILL_CR and RESTORE_CR, etc.
1406   if (OpC == PPC::SPILL_CR) {
1407     lowerCRSpilling(II, FrameIndex);
1408     return;
1409   } else if (OpC == PPC::RESTORE_CR) {
1410     lowerCRRestore(II, FrameIndex);
1411     return;
1412   } else if (OpC == PPC::SPILL_CRBIT) {
1413     lowerCRBitSpilling(II, FrameIndex);
1414     return;
1415   } else if (OpC == PPC::RESTORE_CRBIT) {
1416     lowerCRBitRestore(II, FrameIndex);
1417     return;
1418   } else if (OpC == PPC::SPILL_ACC || OpC == PPC::SPILL_UACC) {
1419     lowerACCSpilling(II, FrameIndex);
1420     return;
1421   } else if (OpC == PPC::RESTORE_ACC || OpC == PPC::RESTORE_UACC) {
1422     lowerACCRestore(II, FrameIndex);
1423     return;
1424   } else if (OpC == PPC::SPILL_QUADWORD) {
1425     lowerQuadwordSpilling(II, FrameIndex);
1426     return;
1427   } else if (OpC == PPC::RESTORE_QUADWORD) {
1428     lowerQuadwordRestore(II, FrameIndex);
1429     return;
1430   }
1431 
1432   // Replace the FrameIndex with base register with GPR1 (SP) or GPR31 (FP).
1433   MI.getOperand(FIOperandNum).ChangeToRegister(
1434     FrameIndex < 0 ? getBaseRegister(MF) : getFrameRegister(MF), false);
1435 
1436   // If the instruction is not present in ImmToIdxMap, then it has no immediate
1437   // form (and must be r+r).
1438   bool noImmForm = !MI.isInlineAsm() && OpC != TargetOpcode::STACKMAP &&
1439                    OpC != TargetOpcode::PATCHPOINT && !ImmToIdxMap.count(OpC);
1440 
1441   // Now add the frame object offset to the offset from r1.
1442   int Offset = MFI.getObjectOffset(FrameIndex);
1443   Offset += MI.getOperand(OffsetOperandNo).getImm();
1444 
1445   // If we're not using a Frame Pointer that has been set to the value of the
1446   // SP before having the stack size subtracted from it, then add the stack size
1447   // to Offset to get the correct offset.
1448   // Naked functions have stack size 0, although getStackSize may not reflect
1449   // that because we didn't call all the pieces that compute it for naked
1450   // functions.
1451   if (!MF.getFunction().hasFnAttribute(Attribute::Naked)) {
1452     if (!(hasBasePointer(MF) && FrameIndex < 0))
1453       Offset += MFI.getStackSize();
1454   }
1455 
1456   // If we encounter an LXVP/STXVP with an offset that doesn't fit, we can
1457   // transform it to the prefixed version so we don't have to use the XForm.
1458   if ((OpC == PPC::LXVP || OpC == PPC::STXVP) &&
1459       (!isInt<16>(Offset) || (Offset % offsetMinAlign(MI)) != 0) &&
1460       Subtarget.hasPrefixInstrs()) {
1461     unsigned NewOpc = OpC == PPC::LXVP ? PPC::PLXVP : PPC::PSTXVP;
1462     MI.setDesc(TII.get(NewOpc));
1463     OpC = NewOpc;
1464   }
1465 
1466   // If we can, encode the offset directly into the instruction.  If this is a
1467   // normal PPC "ri" instruction, any 16-bit value can be safely encoded.  If
1468   // this is a PPC64 "ix" instruction, only a 16-bit value with the low two bits
1469   // clear can be encoded.  This is extremely uncommon, because normally you
1470   // only "std" to a stack slot that is at least 4-byte aligned, but it can
1471   // happen in invalid code.
1472   assert(OpC != PPC::DBG_VALUE &&
1473          "This should be handled in a target-independent way");
1474   // FIXME: This should be factored out to a separate function as prefixed
1475   // instructions add a number of opcodes for which we can use 34-bit imm.
1476   bool OffsetFitsMnemonic = (OpC == PPC::EVSTDD || OpC == PPC::EVLDD) ?
1477                             isUInt<8>(Offset) :
1478                             isInt<16>(Offset);
1479   if (TII.isPrefixed(MI.getOpcode()))
1480     OffsetFitsMnemonic = isInt<34>(Offset);
1481   if (!noImmForm && ((OffsetFitsMnemonic &&
1482                       ((Offset % offsetMinAlign(MI)) == 0)) ||
1483                      OpC == TargetOpcode::STACKMAP ||
1484                      OpC == TargetOpcode::PATCHPOINT)) {
1485     MI.getOperand(OffsetOperandNo).ChangeToImmediate(Offset);
1486     return;
1487   }
1488 
1489   // The offset doesn't fit into a single register, scavenge one to build the
1490   // offset in.
1491 
1492   bool is64Bit = TM.isPPC64();
1493   const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
1494   const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
1495   const TargetRegisterClass *RC = is64Bit ? G8RC : GPRC;
1496   Register SRegHi = MF.getRegInfo().createVirtualRegister(RC),
1497            SReg = MF.getRegInfo().createVirtualRegister(RC);
1498 
1499   // Insert a set of rA with the full offset value before the ld, st, or add
1500   if (isInt<16>(Offset))
1501     BuildMI(MBB, II, dl, TII.get(is64Bit ? PPC::LI8 : PPC::LI), SReg)
1502       .addImm(Offset);
1503   else {
1504     BuildMI(MBB, II, dl, TII.get(is64Bit ? PPC::LIS8 : PPC::LIS), SRegHi)
1505       .addImm(Offset >> 16);
1506     BuildMI(MBB, II, dl, TII.get(is64Bit ? PPC::ORI8 : PPC::ORI), SReg)
1507       .addReg(SRegHi, RegState::Kill)
1508       .addImm(Offset);
1509   }
1510 
1511   // Convert into indexed form of the instruction:
1512   //
1513   //   sth 0:rA, 1:imm 2:(rB) ==> sthx 0:rA, 2:rB, 1:r0
1514   //   addi 0:rA 1:rB, 2, imm ==> add 0:rA, 1:rB, 2:r0
1515   unsigned OperandBase;
1516 
1517   if (noImmForm)
1518     OperandBase = 1;
1519   else if (OpC != TargetOpcode::INLINEASM &&
1520            OpC != TargetOpcode::INLINEASM_BR) {
1521     assert(ImmToIdxMap.count(OpC) &&
1522            "No indexed form of load or store available!");
1523     unsigned NewOpcode = ImmToIdxMap.find(OpC)->second;
1524     MI.setDesc(TII.get(NewOpcode));
1525     OperandBase = 1;
1526   } else {
1527     OperandBase = OffsetOperandNo;
1528   }
1529 
1530   Register StackReg = MI.getOperand(FIOperandNum).getReg();
1531   MI.getOperand(OperandBase).ChangeToRegister(StackReg, false);
1532   MI.getOperand(OperandBase + 1).ChangeToRegister(SReg, false, false, true);
1533 }
1534 
getFrameRegister(const MachineFunction & MF) const1535 Register PPCRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
1536   const PPCFrameLowering *TFI = getFrameLowering(MF);
1537 
1538   if (!TM.isPPC64())
1539     return TFI->hasFP(MF) ? PPC::R31 : PPC::R1;
1540   else
1541     return TFI->hasFP(MF) ? PPC::X31 : PPC::X1;
1542 }
1543 
getBaseRegister(const MachineFunction & MF) const1544 Register PPCRegisterInfo::getBaseRegister(const MachineFunction &MF) const {
1545   const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
1546   if (!hasBasePointer(MF))
1547     return getFrameRegister(MF);
1548 
1549   if (TM.isPPC64())
1550     return PPC::X30;
1551 
1552   if (Subtarget.isSVR4ABI() && TM.isPositionIndependent())
1553     return PPC::R29;
1554 
1555   return PPC::R30;
1556 }
1557 
hasBasePointer(const MachineFunction & MF) const1558 bool PPCRegisterInfo::hasBasePointer(const MachineFunction &MF) const {
1559   if (!EnableBasePointer)
1560     return false;
1561   if (AlwaysBasePointer)
1562     return true;
1563 
1564   // If we need to realign the stack, then the stack pointer can no longer
1565   // serve as an offset into the caller's stack space. As a result, we need a
1566   // base pointer.
1567   return hasStackRealignment(MF);
1568 }
1569 
1570 /// Returns true if the instruction's frame index
1571 /// reference would be better served by a base register other than FP
1572 /// or SP. Used by LocalStackFrameAllocation to determine which frame index
1573 /// references it should create new base registers for.
1574 bool PPCRegisterInfo::
needsFrameBaseReg(MachineInstr * MI,int64_t Offset) const1575 needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const {
1576   assert(Offset < 0 && "Local offset must be negative");
1577 
1578   // It's the load/store FI references that cause issues, as it can be difficult
1579   // to materialize the offset if it won't fit in the literal field. Estimate
1580   // based on the size of the local frame and some conservative assumptions
1581   // about the rest of the stack frame (note, this is pre-regalloc, so
1582   // we don't know everything for certain yet) whether this offset is likely
1583   // to be out of range of the immediate. Return true if so.
1584 
1585   // We only generate virtual base registers for loads and stores that have
1586   // an r+i form. Return false for everything else.
1587   unsigned OpC = MI->getOpcode();
1588   if (!ImmToIdxMap.count(OpC))
1589     return false;
1590 
1591   // Don't generate a new virtual base register just to add zero to it.
1592   if ((OpC == PPC::ADDI || OpC == PPC::ADDI8) &&
1593       MI->getOperand(2).getImm() == 0)
1594     return false;
1595 
1596   MachineBasicBlock &MBB = *MI->getParent();
1597   MachineFunction &MF = *MBB.getParent();
1598   const PPCFrameLowering *TFI = getFrameLowering(MF);
1599   unsigned StackEst = TFI->determineFrameLayout(MF, true);
1600 
1601   // If we likely don't need a stack frame, then we probably don't need a
1602   // virtual base register either.
1603   if (!StackEst)
1604     return false;
1605 
1606   // Estimate an offset from the stack pointer.
1607   // The incoming offset is relating to the SP at the start of the function,
1608   // but when we access the local it'll be relative to the SP after local
1609   // allocation, so adjust our SP-relative offset by that allocation size.
1610   Offset += StackEst;
1611 
1612   // The frame pointer will point to the end of the stack, so estimate the
1613   // offset as the difference between the object offset and the FP location.
1614   return !isFrameOffsetLegal(MI, getBaseRegister(MF), Offset);
1615 }
1616 
1617 /// Insert defining instruction(s) for BaseReg to
1618 /// be a pointer to FrameIdx at the beginning of the basic block.
materializeFrameBaseRegister(MachineBasicBlock * MBB,int FrameIdx,int64_t Offset) const1619 Register PPCRegisterInfo::materializeFrameBaseRegister(MachineBasicBlock *MBB,
1620                                                        int FrameIdx,
1621                                                        int64_t Offset) const {
1622   unsigned ADDriOpc = TM.isPPC64() ? PPC::ADDI8 : PPC::ADDI;
1623 
1624   MachineBasicBlock::iterator Ins = MBB->begin();
1625   DebugLoc DL;                  // Defaults to "unknown"
1626   if (Ins != MBB->end())
1627     DL = Ins->getDebugLoc();
1628 
1629   const MachineFunction &MF = *MBB->getParent();
1630   const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
1631   const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
1632   const MCInstrDesc &MCID = TII.get(ADDriOpc);
1633   MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
1634   const TargetRegisterClass *RC = getPointerRegClass(MF);
1635   Register BaseReg = MRI.createVirtualRegister(RC);
1636   MRI.constrainRegClass(BaseReg, TII.getRegClass(MCID, 0, this, MF));
1637 
1638   BuildMI(*MBB, Ins, DL, MCID, BaseReg)
1639     .addFrameIndex(FrameIdx).addImm(Offset);
1640 
1641   return BaseReg;
1642 }
1643 
resolveFrameIndex(MachineInstr & MI,Register BaseReg,int64_t Offset) const1644 void PPCRegisterInfo::resolveFrameIndex(MachineInstr &MI, Register BaseReg,
1645                                         int64_t Offset) const {
1646   unsigned FIOperandNum = 0;
1647   while (!MI.getOperand(FIOperandNum).isFI()) {
1648     ++FIOperandNum;
1649     assert(FIOperandNum < MI.getNumOperands() &&
1650            "Instr doesn't have FrameIndex operand!");
1651   }
1652 
1653   MI.getOperand(FIOperandNum).ChangeToRegister(BaseReg, false);
1654   unsigned OffsetOperandNo = getOffsetONFromFION(MI, FIOperandNum);
1655   Offset += MI.getOperand(OffsetOperandNo).getImm();
1656   MI.getOperand(OffsetOperandNo).ChangeToImmediate(Offset);
1657 
1658   MachineBasicBlock &MBB = *MI.getParent();
1659   MachineFunction &MF = *MBB.getParent();
1660   const PPCSubtarget &Subtarget = MF.getSubtarget<PPCSubtarget>();
1661   const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
1662   const MCInstrDesc &MCID = MI.getDesc();
1663   MachineRegisterInfo &MRI = MF.getRegInfo();
1664   MRI.constrainRegClass(BaseReg,
1665                         TII.getRegClass(MCID, FIOperandNum, this, MF));
1666 }
1667 
isFrameOffsetLegal(const MachineInstr * MI,Register BaseReg,int64_t Offset) const1668 bool PPCRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI,
1669                                          Register BaseReg,
1670                                          int64_t Offset) const {
1671   unsigned FIOperandNum = 0;
1672   while (!MI->getOperand(FIOperandNum).isFI()) {
1673     ++FIOperandNum;
1674     assert(FIOperandNum < MI->getNumOperands() &&
1675            "Instr doesn't have FrameIndex operand!");
1676   }
1677 
1678   unsigned OffsetOperandNo = getOffsetONFromFION(*MI, FIOperandNum);
1679   Offset += MI->getOperand(OffsetOperandNo).getImm();
1680 
1681   return MI->getOpcode() == PPC::DBG_VALUE || // DBG_VALUE is always Reg+Imm
1682          MI->getOpcode() == TargetOpcode::STACKMAP ||
1683          MI->getOpcode() == TargetOpcode::PATCHPOINT ||
1684          (isInt<16>(Offset) && (Offset % offsetMinAlign(*MI)) == 0);
1685 }
1686