1 //===-- PPCFrameLowering.cpp - PPC Frame Information ----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the PPC implementation of TargetFrameLowering class.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "MCTargetDesc/PPCPredicates.h"
14 #include "PPCFrameLowering.h"
15 #include "PPCInstrBuilder.h"
16 #include "PPCInstrInfo.h"
17 #include "PPCMachineFunctionInfo.h"
18 #include "PPCSubtarget.h"
19 #include "PPCTargetMachine.h"
20 #include "llvm/ADT/Statistic.h"
21 #include "llvm/CodeGen/MachineFrameInfo.h"
22 #include "llvm/CodeGen/MachineFunction.h"
23 #include "llvm/CodeGen/MachineInstrBuilder.h"
24 #include "llvm/CodeGen/MachineModuleInfo.h"
25 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #include "llvm/CodeGen/RegisterScavenging.h"
27 #include "llvm/IR/Function.h"
28 #include "llvm/Target/TargetOptions.h"
29
30 using namespace llvm;
31
32 #define DEBUG_TYPE "framelowering"
33 STATISTIC(NumPESpillVSR, "Number of spills to vector in prologue");
34 STATISTIC(NumPEReloadVSR, "Number of reloads from vector in epilogue");
35 STATISTIC(NumPrologProbed, "Number of prologues probed");
36
37 static cl::opt<bool>
38 EnablePEVectorSpills("ppc-enable-pe-vector-spills",
39 cl::desc("Enable spills in prologue to vector registers."),
40 cl::init(false), cl::Hidden);
41
computeReturnSaveOffset(const PPCSubtarget & STI)42 static unsigned computeReturnSaveOffset(const PPCSubtarget &STI) {
43 if (STI.isAIXABI())
44 return STI.isPPC64() ? 16 : 8;
45 // SVR4 ABI:
46 return STI.isPPC64() ? 16 : 4;
47 }
48
computeTOCSaveOffset(const PPCSubtarget & STI)49 static unsigned computeTOCSaveOffset(const PPCSubtarget &STI) {
50 if (STI.isAIXABI())
51 return STI.isPPC64() ? 40 : 20;
52 return STI.isELFv2ABI() ? 24 : 40;
53 }
54
computeFramePointerSaveOffset(const PPCSubtarget & STI)55 static unsigned computeFramePointerSaveOffset(const PPCSubtarget &STI) {
56 // First slot in the general register save area.
57 return STI.isPPC64() ? -8U : -4U;
58 }
59
computeLinkageSize(const PPCSubtarget & STI)60 static unsigned computeLinkageSize(const PPCSubtarget &STI) {
61 if (STI.isAIXABI() || STI.isPPC64())
62 return (STI.isELFv2ABI() ? 4 : 6) * (STI.isPPC64() ? 8 : 4);
63
64 // 32-bit SVR4 ABI:
65 return 8;
66 }
67
computeBasePointerSaveOffset(const PPCSubtarget & STI)68 static unsigned computeBasePointerSaveOffset(const PPCSubtarget &STI) {
69 // Third slot in the general purpose register save area.
70 if (STI.is32BitELFABI() && STI.getTargetMachine().isPositionIndependent())
71 return -12U;
72
73 // Second slot in the general purpose register save area.
74 return STI.isPPC64() ? -16U : -8U;
75 }
76
computeCRSaveOffset(const PPCSubtarget & STI)77 static unsigned computeCRSaveOffset(const PPCSubtarget &STI) {
78 return (STI.isAIXABI() && !STI.isPPC64()) ? 4 : 8;
79 }
80
PPCFrameLowering(const PPCSubtarget & STI)81 PPCFrameLowering::PPCFrameLowering(const PPCSubtarget &STI)
82 : TargetFrameLowering(TargetFrameLowering::StackGrowsDown,
83 STI.getPlatformStackAlignment(), 0),
84 Subtarget(STI), ReturnSaveOffset(computeReturnSaveOffset(Subtarget)),
85 TOCSaveOffset(computeTOCSaveOffset(Subtarget)),
86 FramePointerSaveOffset(computeFramePointerSaveOffset(Subtarget)),
87 LinkageSize(computeLinkageSize(Subtarget)),
88 BasePointerSaveOffset(computeBasePointerSaveOffset(Subtarget)),
89 CRSaveOffset(computeCRSaveOffset(Subtarget)) {}
90
91 // With the SVR4 ABI, callee-saved registers have fixed offsets on the stack.
getCalleeSavedSpillSlots(unsigned & NumEntries) const92 const PPCFrameLowering::SpillSlot *PPCFrameLowering::getCalleeSavedSpillSlots(
93 unsigned &NumEntries) const {
94
95 // Floating-point register save area offsets.
96 #define CALLEE_SAVED_FPRS \
97 {PPC::F31, -8}, \
98 {PPC::F30, -16}, \
99 {PPC::F29, -24}, \
100 {PPC::F28, -32}, \
101 {PPC::F27, -40}, \
102 {PPC::F26, -48}, \
103 {PPC::F25, -56}, \
104 {PPC::F24, -64}, \
105 {PPC::F23, -72}, \
106 {PPC::F22, -80}, \
107 {PPC::F21, -88}, \
108 {PPC::F20, -96}, \
109 {PPC::F19, -104}, \
110 {PPC::F18, -112}, \
111 {PPC::F17, -120}, \
112 {PPC::F16, -128}, \
113 {PPC::F15, -136}, \
114 {PPC::F14, -144}
115
116 // 32-bit general purpose register save area offsets shared by ELF and
117 // AIX. AIX has an extra CSR with r13.
118 #define CALLEE_SAVED_GPRS32 \
119 {PPC::R31, -4}, \
120 {PPC::R30, -8}, \
121 {PPC::R29, -12}, \
122 {PPC::R28, -16}, \
123 {PPC::R27, -20}, \
124 {PPC::R26, -24}, \
125 {PPC::R25, -28}, \
126 {PPC::R24, -32}, \
127 {PPC::R23, -36}, \
128 {PPC::R22, -40}, \
129 {PPC::R21, -44}, \
130 {PPC::R20, -48}, \
131 {PPC::R19, -52}, \
132 {PPC::R18, -56}, \
133 {PPC::R17, -60}, \
134 {PPC::R16, -64}, \
135 {PPC::R15, -68}, \
136 {PPC::R14, -72}
137
138 // 64-bit general purpose register save area offsets.
139 #define CALLEE_SAVED_GPRS64 \
140 {PPC::X31, -8}, \
141 {PPC::X30, -16}, \
142 {PPC::X29, -24}, \
143 {PPC::X28, -32}, \
144 {PPC::X27, -40}, \
145 {PPC::X26, -48}, \
146 {PPC::X25, -56}, \
147 {PPC::X24, -64}, \
148 {PPC::X23, -72}, \
149 {PPC::X22, -80}, \
150 {PPC::X21, -88}, \
151 {PPC::X20, -96}, \
152 {PPC::X19, -104}, \
153 {PPC::X18, -112}, \
154 {PPC::X17, -120}, \
155 {PPC::X16, -128}, \
156 {PPC::X15, -136}, \
157 {PPC::X14, -144}
158
159 // Vector register save area offsets.
160 #define CALLEE_SAVED_VRS \
161 {PPC::V31, -16}, \
162 {PPC::V30, -32}, \
163 {PPC::V29, -48}, \
164 {PPC::V28, -64}, \
165 {PPC::V27, -80}, \
166 {PPC::V26, -96}, \
167 {PPC::V25, -112}, \
168 {PPC::V24, -128}, \
169 {PPC::V23, -144}, \
170 {PPC::V22, -160}, \
171 {PPC::V21, -176}, \
172 {PPC::V20, -192}
173
174 // Note that the offsets here overlap, but this is fixed up in
175 // processFunctionBeforeFrameFinalized.
176
177 static const SpillSlot ELFOffsets32[] = {
178 CALLEE_SAVED_FPRS,
179 CALLEE_SAVED_GPRS32,
180
181 // CR save area offset. We map each of the nonvolatile CR fields
182 // to the slot for CR2, which is the first of the nonvolatile CR
183 // fields to be assigned, so that we only allocate one save slot.
184 // See PPCRegisterInfo::hasReservedSpillSlot() for more information.
185 {PPC::CR2, -4},
186
187 // VRSAVE save area offset.
188 {PPC::VRSAVE, -4},
189
190 CALLEE_SAVED_VRS,
191
192 // SPE register save area (overlaps Vector save area).
193 {PPC::S31, -8},
194 {PPC::S30, -16},
195 {PPC::S29, -24},
196 {PPC::S28, -32},
197 {PPC::S27, -40},
198 {PPC::S26, -48},
199 {PPC::S25, -56},
200 {PPC::S24, -64},
201 {PPC::S23, -72},
202 {PPC::S22, -80},
203 {PPC::S21, -88},
204 {PPC::S20, -96},
205 {PPC::S19, -104},
206 {PPC::S18, -112},
207 {PPC::S17, -120},
208 {PPC::S16, -128},
209 {PPC::S15, -136},
210 {PPC::S14, -144}};
211
212 static const SpillSlot ELFOffsets64[] = {
213 CALLEE_SAVED_FPRS,
214 CALLEE_SAVED_GPRS64,
215
216 // VRSAVE save area offset.
217 {PPC::VRSAVE, -4},
218 CALLEE_SAVED_VRS
219 };
220
221 static const SpillSlot AIXOffsets32[] = {CALLEE_SAVED_FPRS,
222 CALLEE_SAVED_GPRS32,
223 // Add AIX's extra CSR.
224 {PPC::R13, -76},
225 CALLEE_SAVED_VRS};
226
227 static const SpillSlot AIXOffsets64[] = {
228 CALLEE_SAVED_FPRS, CALLEE_SAVED_GPRS64, CALLEE_SAVED_VRS};
229
230 if (Subtarget.is64BitELFABI()) {
231 NumEntries = array_lengthof(ELFOffsets64);
232 return ELFOffsets64;
233 }
234
235 if (Subtarget.is32BitELFABI()) {
236 NumEntries = array_lengthof(ELFOffsets32);
237 return ELFOffsets32;
238 }
239
240 assert(Subtarget.isAIXABI() && "Unexpected ABI.");
241
242 if (Subtarget.isPPC64()) {
243 NumEntries = array_lengthof(AIXOffsets64);
244 return AIXOffsets64;
245 }
246
247 NumEntries = array_lengthof(AIXOffsets32);
248 return AIXOffsets32;
249 }
250
spillsCR(const MachineFunction & MF)251 static bool spillsCR(const MachineFunction &MF) {
252 const PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
253 return FuncInfo->isCRSpilled();
254 }
255
hasSpills(const MachineFunction & MF)256 static bool hasSpills(const MachineFunction &MF) {
257 const PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
258 return FuncInfo->hasSpills();
259 }
260
hasNonRISpills(const MachineFunction & MF)261 static bool hasNonRISpills(const MachineFunction &MF) {
262 const PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>();
263 return FuncInfo->hasNonRISpills();
264 }
265
266 /// MustSaveLR - Return true if this function requires that we save the LR
267 /// register onto the stack in the prolog and restore it in the epilog of the
268 /// function.
MustSaveLR(const MachineFunction & MF,unsigned LR)269 static bool MustSaveLR(const MachineFunction &MF, unsigned LR) {
270 const PPCFunctionInfo *MFI = MF.getInfo<PPCFunctionInfo>();
271
272 // We need a save/restore of LR if there is any def of LR (which is
273 // defined by calls, including the PIC setup sequence), or if there is
274 // some use of the LR stack slot (e.g. for builtin_return_address).
275 // (LR comes in 32 and 64 bit versions.)
276 MachineRegisterInfo::def_iterator RI = MF.getRegInfo().def_begin(LR);
277 return RI !=MF.getRegInfo().def_end() || MFI->isLRStoreRequired();
278 }
279
280 /// determineFrameLayoutAndUpdate - Determine the size of the frame and maximum
281 /// call frame size. Update the MachineFunction object with the stack size.
282 unsigned
determineFrameLayoutAndUpdate(MachineFunction & MF,bool UseEstimate) const283 PPCFrameLowering::determineFrameLayoutAndUpdate(MachineFunction &MF,
284 bool UseEstimate) const {
285 unsigned NewMaxCallFrameSize = 0;
286 unsigned FrameSize = determineFrameLayout(MF, UseEstimate,
287 &NewMaxCallFrameSize);
288 MF.getFrameInfo().setStackSize(FrameSize);
289 MF.getFrameInfo().setMaxCallFrameSize(NewMaxCallFrameSize);
290 return FrameSize;
291 }
292
293 /// determineFrameLayout - Determine the size of the frame and maximum call
294 /// frame size.
295 unsigned
determineFrameLayout(const MachineFunction & MF,bool UseEstimate,unsigned * NewMaxCallFrameSize) const296 PPCFrameLowering::determineFrameLayout(const MachineFunction &MF,
297 bool UseEstimate,
298 unsigned *NewMaxCallFrameSize) const {
299 const MachineFrameInfo &MFI = MF.getFrameInfo();
300 const PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
301
302 // Get the number of bytes to allocate from the FrameInfo
303 unsigned FrameSize =
304 UseEstimate ? MFI.estimateStackSize(MF) : MFI.getStackSize();
305
306 // Get stack alignments. The frame must be aligned to the greatest of these:
307 Align TargetAlign = getStackAlign(); // alignment required per the ABI
308 Align MaxAlign = MFI.getMaxAlign(); // algmt required by data in frame
309 Align Alignment = std::max(TargetAlign, MaxAlign);
310
311 const PPCRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
312
313 unsigned LR = RegInfo->getRARegister();
314 bool DisableRedZone = MF.getFunction().hasFnAttribute(Attribute::NoRedZone);
315 bool CanUseRedZone = !MFI.hasVarSizedObjects() && // No dynamic alloca.
316 !MFI.adjustsStack() && // No calls.
317 !MustSaveLR(MF, LR) && // No need to save LR.
318 !FI->mustSaveTOC() && // No need to save TOC.
319 !RegInfo->hasBasePointer(MF); // No special alignment.
320
321 // Note: for PPC32 SVR4ABI, we can still generate stackless
322 // code if all local vars are reg-allocated.
323 bool FitsInRedZone = FrameSize <= Subtarget.getRedZoneSize();
324
325 // Check whether we can skip adjusting the stack pointer (by using red zone)
326 if (!DisableRedZone && CanUseRedZone && FitsInRedZone) {
327 // No need for frame
328 return 0;
329 }
330
331 // Get the maximum call frame size of all the calls.
332 unsigned maxCallFrameSize = MFI.getMaxCallFrameSize();
333
334 // Maximum call frame needs to be at least big enough for linkage area.
335 unsigned minCallFrameSize = getLinkageSize();
336 maxCallFrameSize = std::max(maxCallFrameSize, minCallFrameSize);
337
338 // If we have dynamic alloca then maxCallFrameSize needs to be aligned so
339 // that allocations will be aligned.
340 if (MFI.hasVarSizedObjects())
341 maxCallFrameSize = alignTo(maxCallFrameSize, Alignment);
342
343 // Update the new max call frame size if the caller passes in a valid pointer.
344 if (NewMaxCallFrameSize)
345 *NewMaxCallFrameSize = maxCallFrameSize;
346
347 // Include call frame size in total.
348 FrameSize += maxCallFrameSize;
349
350 // Make sure the frame is aligned.
351 FrameSize = alignTo(FrameSize, Alignment);
352
353 return FrameSize;
354 }
355
356 // hasFP - Return true if the specified function actually has a dedicated frame
357 // pointer register.
hasFP(const MachineFunction & MF) const358 bool PPCFrameLowering::hasFP(const MachineFunction &MF) const {
359 const MachineFrameInfo &MFI = MF.getFrameInfo();
360 // FIXME: This is pretty much broken by design: hasFP() might be called really
361 // early, before the stack layout was calculated and thus hasFP() might return
362 // true or false here depending on the time of call.
363 return (MFI.getStackSize()) && needsFP(MF);
364 }
365
366 // needsFP - Return true if the specified function should have a dedicated frame
367 // pointer register. This is true if the function has variable sized allocas or
368 // if frame pointer elimination is disabled.
needsFP(const MachineFunction & MF) const369 bool PPCFrameLowering::needsFP(const MachineFunction &MF) const {
370 const MachineFrameInfo &MFI = MF.getFrameInfo();
371
372 // Naked functions have no stack frame pushed, so we don't have a frame
373 // pointer.
374 if (MF.getFunction().hasFnAttribute(Attribute::Naked))
375 return false;
376
377 return MF.getTarget().Options.DisableFramePointerElim(MF) ||
378 MFI.hasVarSizedObjects() || MFI.hasStackMap() || MFI.hasPatchPoint() ||
379 MF.exposesReturnsTwice() ||
380 (MF.getTarget().Options.GuaranteedTailCallOpt &&
381 MF.getInfo<PPCFunctionInfo>()->hasFastCall());
382 }
383
replaceFPWithRealFP(MachineFunction & MF) const384 void PPCFrameLowering::replaceFPWithRealFP(MachineFunction &MF) const {
385 bool is31 = needsFP(MF);
386 unsigned FPReg = is31 ? PPC::R31 : PPC::R1;
387 unsigned FP8Reg = is31 ? PPC::X31 : PPC::X1;
388
389 const PPCRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
390 bool HasBP = RegInfo->hasBasePointer(MF);
391 unsigned BPReg = HasBP ? (unsigned) RegInfo->getBaseRegister(MF) : FPReg;
392 unsigned BP8Reg = HasBP ? (unsigned) PPC::X30 : FP8Reg;
393
394 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
395 BI != BE; ++BI)
396 for (MachineBasicBlock::iterator MBBI = BI->end(); MBBI != BI->begin(); ) {
397 --MBBI;
398 for (unsigned I = 0, E = MBBI->getNumOperands(); I != E; ++I) {
399 MachineOperand &MO = MBBI->getOperand(I);
400 if (!MO.isReg())
401 continue;
402
403 switch (MO.getReg()) {
404 case PPC::FP:
405 MO.setReg(FPReg);
406 break;
407 case PPC::FP8:
408 MO.setReg(FP8Reg);
409 break;
410 case PPC::BP:
411 MO.setReg(BPReg);
412 break;
413 case PPC::BP8:
414 MO.setReg(BP8Reg);
415 break;
416
417 }
418 }
419 }
420 }
421
422 /* This function will do the following:
423 - If MBB is an entry or exit block, set SR1 and SR2 to R0 and R12
424 respectively (defaults recommended by the ABI) and return true
425 - If MBB is not an entry block, initialize the register scavenger and look
426 for available registers.
427 - If the defaults (R0/R12) are available, return true
428 - If TwoUniqueRegsRequired is set to true, it looks for two unique
429 registers. Otherwise, look for a single available register.
430 - If the required registers are found, set SR1 and SR2 and return true.
431 - If the required registers are not found, set SR2 or both SR1 and SR2 to
432 PPC::NoRegister and return false.
433
434 Note that if both SR1 and SR2 are valid parameters and TwoUniqueRegsRequired
435 is not set, this function will attempt to find two different registers, but
436 still return true if only one register is available (and set SR1 == SR2).
437 */
438 bool
findScratchRegister(MachineBasicBlock * MBB,bool UseAtEnd,bool TwoUniqueRegsRequired,Register * SR1,Register * SR2) const439 PPCFrameLowering::findScratchRegister(MachineBasicBlock *MBB,
440 bool UseAtEnd,
441 bool TwoUniqueRegsRequired,
442 Register *SR1,
443 Register *SR2) const {
444 RegScavenger RS;
445 Register R0 = Subtarget.isPPC64() ? PPC::X0 : PPC::R0;
446 Register R12 = Subtarget.isPPC64() ? PPC::X12 : PPC::R12;
447
448 // Set the defaults for the two scratch registers.
449 if (SR1)
450 *SR1 = R0;
451
452 if (SR2) {
453 assert (SR1 && "Asking for the second scratch register but not the first?");
454 *SR2 = R12;
455 }
456
457 // If MBB is an entry or exit block, use R0 and R12 as the scratch registers.
458 if ((UseAtEnd && MBB->isReturnBlock()) ||
459 (!UseAtEnd && (&MBB->getParent()->front() == MBB)))
460 return true;
461
462 RS.enterBasicBlock(*MBB);
463
464 if (UseAtEnd && !MBB->empty()) {
465 // The scratch register will be used at the end of the block, so must
466 // consider all registers used within the block
467
468 MachineBasicBlock::iterator MBBI = MBB->getFirstTerminator();
469 // If no terminator, back iterator up to previous instruction.
470 if (MBBI == MBB->end())
471 MBBI = std::prev(MBBI);
472
473 if (MBBI != MBB->begin())
474 RS.forward(MBBI);
475 }
476
477 // If the two registers are available, we're all good.
478 // Note that we only return here if both R0 and R12 are available because
479 // although the function may not require two unique registers, it may benefit
480 // from having two so we should try to provide them.
481 if (!RS.isRegUsed(R0) && !RS.isRegUsed(R12))
482 return true;
483
484 // Get the list of callee-saved registers for the target.
485 const PPCRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
486 const MCPhysReg *CSRegs = RegInfo->getCalleeSavedRegs(MBB->getParent());
487
488 // Get all the available registers in the block.
489 BitVector BV = RS.getRegsAvailable(Subtarget.isPPC64() ? &PPC::G8RCRegClass :
490 &PPC::GPRCRegClass);
491
492 // We shouldn't use callee-saved registers as scratch registers as they may be
493 // available when looking for a candidate block for shrink wrapping but not
494 // available when the actual prologue/epilogue is being emitted because they
495 // were added as live-in to the prologue block by PrologueEpilogueInserter.
496 for (int i = 0; CSRegs[i]; ++i)
497 BV.reset(CSRegs[i]);
498
499 // Set the first scratch register to the first available one.
500 if (SR1) {
501 int FirstScratchReg = BV.find_first();
502 *SR1 = FirstScratchReg == -1 ? (unsigned)PPC::NoRegister : FirstScratchReg;
503 }
504
505 // If there is another one available, set the second scratch register to that.
506 // Otherwise, set it to either PPC::NoRegister if this function requires two
507 // or to whatever SR1 is set to if this function doesn't require two.
508 if (SR2) {
509 int SecondScratchReg = BV.find_next(*SR1);
510 if (SecondScratchReg != -1)
511 *SR2 = SecondScratchReg;
512 else
513 *SR2 = TwoUniqueRegsRequired ? Register() : *SR1;
514 }
515
516 // Now that we've done our best to provide both registers, double check
517 // whether we were unable to provide enough.
518 if (BV.count() < (TwoUniqueRegsRequired ? 2U : 1U))
519 return false;
520
521 return true;
522 }
523
524 // We need a scratch register for spilling LR and for spilling CR. By default,
525 // we use two scratch registers to hide latency. However, if only one scratch
526 // register is available, we can adjust for that by not overlapping the spill
527 // code. However, if we need to realign the stack (i.e. have a base pointer)
528 // and the stack frame is large, we need two scratch registers.
529 // Also, stack probe requires two scratch registers, one for old sp, one for
530 // large frame and large probe size.
531 bool
twoUniqueScratchRegsRequired(MachineBasicBlock * MBB) const532 PPCFrameLowering::twoUniqueScratchRegsRequired(MachineBasicBlock *MBB) const {
533 const PPCRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
534 MachineFunction &MF = *(MBB->getParent());
535 bool HasBP = RegInfo->hasBasePointer(MF);
536 unsigned FrameSize = determineFrameLayout(MF);
537 int NegFrameSize = -FrameSize;
538 bool IsLargeFrame = !isInt<16>(NegFrameSize);
539 MachineFrameInfo &MFI = MF.getFrameInfo();
540 Align MaxAlign = MFI.getMaxAlign();
541 bool HasRedZone = Subtarget.isPPC64() || !Subtarget.isSVR4ABI();
542 const PPCTargetLowering &TLI = *Subtarget.getTargetLowering();
543
544 return ((IsLargeFrame || !HasRedZone) && HasBP && MaxAlign > 1) ||
545 TLI.hasInlineStackProbe(MF);
546 }
547
canUseAsPrologue(const MachineBasicBlock & MBB) const548 bool PPCFrameLowering::canUseAsPrologue(const MachineBasicBlock &MBB) const {
549 MachineBasicBlock *TmpMBB = const_cast<MachineBasicBlock *>(&MBB);
550
551 return findScratchRegister(TmpMBB, false,
552 twoUniqueScratchRegsRequired(TmpMBB));
553 }
554
canUseAsEpilogue(const MachineBasicBlock & MBB) const555 bool PPCFrameLowering::canUseAsEpilogue(const MachineBasicBlock &MBB) const {
556 MachineBasicBlock *TmpMBB = const_cast<MachineBasicBlock *>(&MBB);
557
558 return findScratchRegister(TmpMBB, true);
559 }
560
stackUpdateCanBeMoved(MachineFunction & MF) const561 bool PPCFrameLowering::stackUpdateCanBeMoved(MachineFunction &MF) const {
562 const PPCRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
563 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
564
565 // Abort if there is no register info or function info.
566 if (!RegInfo || !FI)
567 return false;
568
569 // Only move the stack update on ELFv2 ABI and PPC64.
570 if (!Subtarget.isELFv2ABI() || !Subtarget.isPPC64())
571 return false;
572
573 // Check the frame size first and return false if it does not fit the
574 // requirements.
575 // We need a non-zero frame size as well as a frame that will fit in the red
576 // zone. This is because by moving the stack pointer update we are now storing
577 // to the red zone until the stack pointer is updated. If we get an interrupt
578 // inside the prologue but before the stack update we now have a number of
579 // stores to the red zone and those stores must all fit.
580 MachineFrameInfo &MFI = MF.getFrameInfo();
581 unsigned FrameSize = MFI.getStackSize();
582 if (!FrameSize || FrameSize > Subtarget.getRedZoneSize())
583 return false;
584
585 // Frame pointers and base pointers complicate matters so don't do anything
586 // if we have them. For example having a frame pointer will sometimes require
587 // a copy of r1 into r31 and that makes keeping track of updates to r1 more
588 // difficult. Similar situation exists with setjmp.
589 if (hasFP(MF) || RegInfo->hasBasePointer(MF) || MF.exposesReturnsTwice())
590 return false;
591
592 // Calls to fast_cc functions use different rules for passing parameters on
593 // the stack from the ABI and using PIC base in the function imposes
594 // similar restrictions to using the base pointer. It is not generally safe
595 // to move the stack pointer update in these situations.
596 if (FI->hasFastCall() || FI->usesPICBase())
597 return false;
598
599 // Finally we can move the stack update if we do not require register
600 // scavenging. Register scavenging can introduce more spills and so
601 // may make the frame size larger than we have computed.
602 return !RegInfo->requiresFrameIndexScavenging(MF);
603 }
604
emitPrologue(MachineFunction & MF,MachineBasicBlock & MBB) const605 void PPCFrameLowering::emitPrologue(MachineFunction &MF,
606 MachineBasicBlock &MBB) const {
607 MachineBasicBlock::iterator MBBI = MBB.begin();
608 MachineFrameInfo &MFI = MF.getFrameInfo();
609 const PPCInstrInfo &TII = *Subtarget.getInstrInfo();
610 const PPCRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
611 const PPCTargetLowering &TLI = *Subtarget.getTargetLowering();
612
613 MachineModuleInfo &MMI = MF.getMMI();
614 const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
615 DebugLoc dl;
616 // AIX assembler does not support cfi directives.
617 const bool needsCFI = MF.needsFrameMoves() && !Subtarget.isAIXABI();
618
619 // Get processor type.
620 bool isPPC64 = Subtarget.isPPC64();
621 // Get the ABI.
622 bool isSVR4ABI = Subtarget.isSVR4ABI();
623 bool isELFv2ABI = Subtarget.isELFv2ABI();
624 assert((isSVR4ABI || Subtarget.isAIXABI()) && "Unsupported PPC ABI.");
625
626 // Work out frame sizes.
627 unsigned FrameSize = determineFrameLayoutAndUpdate(MF);
628 int NegFrameSize = -FrameSize;
629 if (!isInt<32>(NegFrameSize))
630 llvm_unreachable("Unhandled stack size!");
631
632 if (MFI.isFrameAddressTaken())
633 replaceFPWithRealFP(MF);
634
635 // Check if the link register (LR) must be saved.
636 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
637 bool MustSaveLR = FI->mustSaveLR();
638 bool MustSaveTOC = FI->mustSaveTOC();
639 const SmallVectorImpl<Register> &MustSaveCRs = FI->getMustSaveCRs();
640 bool MustSaveCR = !MustSaveCRs.empty();
641 // Do we have a frame pointer and/or base pointer for this function?
642 bool HasFP = hasFP(MF);
643 bool HasBP = RegInfo->hasBasePointer(MF);
644 bool HasRedZone = isPPC64 || !isSVR4ABI;
645 bool HasROPProtect = Subtarget.hasROPProtect();
646 bool HasPrivileged = Subtarget.hasPrivileged();
647
648 Register SPReg = isPPC64 ? PPC::X1 : PPC::R1;
649 Register BPReg = RegInfo->getBaseRegister(MF);
650 Register FPReg = isPPC64 ? PPC::X31 : PPC::R31;
651 Register LRReg = isPPC64 ? PPC::LR8 : PPC::LR;
652 Register TOCReg = isPPC64 ? PPC::X2 : PPC::R2;
653 Register ScratchReg;
654 Register TempReg = isPPC64 ? PPC::X12 : PPC::R12; // another scratch reg
655 // ...(R12/X12 is volatile in both Darwin & SVR4, & can't be a function arg.)
656 const MCInstrDesc& MFLRInst = TII.get(isPPC64 ? PPC::MFLR8
657 : PPC::MFLR );
658 const MCInstrDesc& StoreInst = TII.get(isPPC64 ? PPC::STD
659 : PPC::STW );
660 const MCInstrDesc& StoreUpdtInst = TII.get(isPPC64 ? PPC::STDU
661 : PPC::STWU );
662 const MCInstrDesc& StoreUpdtIdxInst = TII.get(isPPC64 ? PPC::STDUX
663 : PPC::STWUX);
664 const MCInstrDesc& LoadImmShiftedInst = TII.get(isPPC64 ? PPC::LIS8
665 : PPC::LIS );
666 const MCInstrDesc& OrImmInst = TII.get(isPPC64 ? PPC::ORI8
667 : PPC::ORI );
668 const MCInstrDesc& OrInst = TII.get(isPPC64 ? PPC::OR8
669 : PPC::OR );
670 const MCInstrDesc& SubtractCarryingInst = TII.get(isPPC64 ? PPC::SUBFC8
671 : PPC::SUBFC);
672 const MCInstrDesc& SubtractImmCarryingInst = TII.get(isPPC64 ? PPC::SUBFIC8
673 : PPC::SUBFIC);
674 const MCInstrDesc &MoveFromCondRegInst = TII.get(isPPC64 ? PPC::MFCR8
675 : PPC::MFCR);
676 const MCInstrDesc &StoreWordInst = TII.get(isPPC64 ? PPC::STW8 : PPC::STW);
677 const MCInstrDesc &HashST =
678 TII.get(HasPrivileged ? PPC::HASHSTP : PPC::HASHST);
679
680 // Regarding this assert: Even though LR is saved in the caller's frame (i.e.,
681 // LROffset is positive), that slot is callee-owned. Because PPC32 SVR4 has no
682 // Red Zone, an asynchronous event (a form of "callee") could claim a frame &
683 // overwrite it, so PPC32 SVR4 must claim at least a minimal frame to save LR.
684 assert((isPPC64 || !isSVR4ABI || !(!FrameSize && (MustSaveLR || HasFP))) &&
685 "FrameSize must be >0 to save/restore the FP or LR for 32-bit SVR4.");
686
687 // Using the same bool variable as below to suppress compiler warnings.
688 bool SingleScratchReg = findScratchRegister(
689 &MBB, false, twoUniqueScratchRegsRequired(&MBB), &ScratchReg, &TempReg);
690 assert(SingleScratchReg &&
691 "Required number of registers not available in this block");
692
693 SingleScratchReg = ScratchReg == TempReg;
694
695 int LROffset = getReturnSaveOffset();
696
697 int FPOffset = 0;
698 if (HasFP) {
699 MachineFrameInfo &MFI = MF.getFrameInfo();
700 int FPIndex = FI->getFramePointerSaveIndex();
701 assert(FPIndex && "No Frame Pointer Save Slot!");
702 FPOffset = MFI.getObjectOffset(FPIndex);
703 }
704
705 int BPOffset = 0;
706 if (HasBP) {
707 MachineFrameInfo &MFI = MF.getFrameInfo();
708 int BPIndex = FI->getBasePointerSaveIndex();
709 assert(BPIndex && "No Base Pointer Save Slot!");
710 BPOffset = MFI.getObjectOffset(BPIndex);
711 }
712
713 int PBPOffset = 0;
714 if (FI->usesPICBase()) {
715 MachineFrameInfo &MFI = MF.getFrameInfo();
716 int PBPIndex = FI->getPICBasePointerSaveIndex();
717 assert(PBPIndex && "No PIC Base Pointer Save Slot!");
718 PBPOffset = MFI.getObjectOffset(PBPIndex);
719 }
720
721 // Get stack alignments.
722 Align MaxAlign = MFI.getMaxAlign();
723 if (HasBP && MaxAlign > 1)
724 assert(Log2(MaxAlign) < 16 && "Invalid alignment!");
725
726 // Frames of 32KB & larger require special handling because they cannot be
727 // indexed into with a simple STDU/STWU/STD/STW immediate offset operand.
728 bool isLargeFrame = !isInt<16>(NegFrameSize);
729
730 // Check if we can move the stack update instruction (stdu) down the prologue
731 // past the callee saves. Hopefully this will avoid the situation where the
732 // saves are waiting for the update on the store with update to complete.
733 MachineBasicBlock::iterator StackUpdateLoc = MBBI;
734 bool MovingStackUpdateDown = false;
735
736 // Check if we can move the stack update.
737 if (stackUpdateCanBeMoved(MF)) {
738 const std::vector<CalleeSavedInfo> &Info = MFI.getCalleeSavedInfo();
739 for (CalleeSavedInfo CSI : Info) {
740 // If the callee saved register is spilled to a register instead of the
741 // stack then the spill no longer uses the stack pointer.
742 // This can lead to two consequences:
743 // 1) We no longer need to update the stack because the function does not
744 // spill any callee saved registers to stack.
745 // 2) We have a situation where we still have to update the stack pointer
746 // even though some registers are spilled to other registers. In
747 // this case the current code moves the stack update to an incorrect
748 // position.
749 // In either case we should abort moving the stack update operation.
750 if (CSI.isSpilledToReg()) {
751 StackUpdateLoc = MBBI;
752 MovingStackUpdateDown = false;
753 break;
754 }
755
756 int FrIdx = CSI.getFrameIdx();
757 // If the frame index is not negative the callee saved info belongs to a
758 // stack object that is not a fixed stack object. We ignore non-fixed
759 // stack objects because we won't move the stack update pointer past them.
760 if (FrIdx >= 0)
761 continue;
762
763 if (MFI.isFixedObjectIndex(FrIdx) && MFI.getObjectOffset(FrIdx) < 0) {
764 StackUpdateLoc++;
765 MovingStackUpdateDown = true;
766 } else {
767 // We need all of the Frame Indices to meet these conditions.
768 // If they do not, abort the whole operation.
769 StackUpdateLoc = MBBI;
770 MovingStackUpdateDown = false;
771 break;
772 }
773 }
774
775 // If the operation was not aborted then update the object offset.
776 if (MovingStackUpdateDown) {
777 for (CalleeSavedInfo CSI : Info) {
778 int FrIdx = CSI.getFrameIdx();
779 if (FrIdx < 0)
780 MFI.setObjectOffset(FrIdx, MFI.getObjectOffset(FrIdx) + NegFrameSize);
781 }
782 }
783 }
784
785 // Where in the prologue we move the CR fields depends on how many scratch
786 // registers we have, and if we need to save the link register or not. This
787 // lambda is to avoid duplicating the logic in 2 places.
788 auto BuildMoveFromCR = [&]() {
789 if (isELFv2ABI && MustSaveCRs.size() == 1) {
790 // In the ELFv2 ABI, we are not required to save all CR fields.
791 // If only one CR field is clobbered, it is more efficient to use
792 // mfocrf to selectively save just that field, because mfocrf has short
793 // latency compares to mfcr.
794 assert(isPPC64 && "V2 ABI is 64-bit only.");
795 MachineInstrBuilder MIB =
796 BuildMI(MBB, MBBI, dl, TII.get(PPC::MFOCRF8), TempReg);
797 MIB.addReg(MustSaveCRs[0], RegState::Kill);
798 } else {
799 MachineInstrBuilder MIB =
800 BuildMI(MBB, MBBI, dl, MoveFromCondRegInst, TempReg);
801 for (unsigned CRfield : MustSaveCRs)
802 MIB.addReg(CRfield, RegState::ImplicitKill);
803 }
804 };
805
806 // If we need to spill the CR and the LR but we don't have two separate
807 // registers available, we must spill them one at a time
808 if (MustSaveCR && SingleScratchReg && MustSaveLR) {
809 BuildMoveFromCR();
810 BuildMI(MBB, MBBI, dl, StoreWordInst)
811 .addReg(TempReg, getKillRegState(true))
812 .addImm(CRSaveOffset)
813 .addReg(SPReg);
814 }
815
816 if (MustSaveLR)
817 BuildMI(MBB, MBBI, dl, MFLRInst, ScratchReg);
818
819 if (MustSaveCR && !(SingleScratchReg && MustSaveLR))
820 BuildMoveFromCR();
821
822 if (HasRedZone) {
823 if (HasFP)
824 BuildMI(MBB, MBBI, dl, StoreInst)
825 .addReg(FPReg)
826 .addImm(FPOffset)
827 .addReg(SPReg);
828 if (FI->usesPICBase())
829 BuildMI(MBB, MBBI, dl, StoreInst)
830 .addReg(PPC::R30)
831 .addImm(PBPOffset)
832 .addReg(SPReg);
833 if (HasBP)
834 BuildMI(MBB, MBBI, dl, StoreInst)
835 .addReg(BPReg)
836 .addImm(BPOffset)
837 .addReg(SPReg);
838 }
839
840 // Generate the instruction to store the LR. In the case where ROP protection
841 // is required the register holding the LR should not be killed as it will be
842 // used by the hash store instruction.
843 if (MustSaveLR) {
844 BuildMI(MBB, StackUpdateLoc, dl, StoreInst)
845 .addReg(ScratchReg, getKillRegState(!HasROPProtect))
846 .addImm(LROffset)
847 .addReg(SPReg);
848
849 // Add the ROP protection Hash Store instruction.
850 // NOTE: This is technically a violation of the ABI. The hash can be saved
851 // up to 512 bytes into the Protected Zone. This can be outside of the
852 // initial 288 byte volatile program storage region in the Protected Zone.
853 // However, this restriction will be removed in an upcoming revision of the
854 // ABI.
855 if (HasROPProtect) {
856 const int SaveIndex = FI->getROPProtectionHashSaveIndex();
857 const int ImmOffset = MFI.getObjectOffset(SaveIndex);
858 assert((ImmOffset <= -8 && ImmOffset >= -512) &&
859 "ROP hash save offset out of range.");
860 assert(((ImmOffset & 0x7) == 0) &&
861 "ROP hash save offset must be 8 byte aligned.");
862 BuildMI(MBB, StackUpdateLoc, dl, HashST)
863 .addReg(ScratchReg, getKillRegState(true))
864 .addImm(ImmOffset)
865 .addReg(SPReg);
866 }
867 }
868
869 if (MustSaveCR &&
870 !(SingleScratchReg && MustSaveLR)) {
871 assert(HasRedZone && "A red zone is always available on PPC64");
872 BuildMI(MBB, MBBI, dl, StoreWordInst)
873 .addReg(TempReg, getKillRegState(true))
874 .addImm(CRSaveOffset)
875 .addReg(SPReg);
876 }
877
878 // Skip the rest if this is a leaf function & all spills fit in the Red Zone.
879 if (!FrameSize)
880 return;
881
882 // Adjust stack pointer: r1 += NegFrameSize.
883 // If there is a preferred stack alignment, align R1 now
884
885 if (HasBP && HasRedZone) {
886 // Save a copy of r1 as the base pointer.
887 BuildMI(MBB, MBBI, dl, OrInst, BPReg)
888 .addReg(SPReg)
889 .addReg(SPReg);
890 }
891
892 // Have we generated a STUX instruction to claim stack frame? If so,
893 // the negated frame size will be placed in ScratchReg.
894 bool HasSTUX = false;
895
896 // If FrameSize <= TLI.getStackProbeSize(MF), as POWER ABI requires backchain
897 // pointer is always stored at SP, we will get a free probe due to an essential
898 // STU(X) instruction.
899 if (TLI.hasInlineStackProbe(MF) && FrameSize > TLI.getStackProbeSize(MF)) {
900 // To be consistent with other targets, a pseudo instruction is emitted and
901 // will be later expanded in `inlineStackProbe`.
902 BuildMI(MBB, MBBI, dl,
903 TII.get(isPPC64 ? PPC::PROBED_STACKALLOC_64
904 : PPC::PROBED_STACKALLOC_32))
905 .addDef(ScratchReg)
906 .addDef(TempReg) // TempReg stores the old sp.
907 .addImm(NegFrameSize);
908 // FIXME: HasSTUX is only read if HasRedZone is not set, in such case, we
909 // update the ScratchReg to meet the assumption that ScratchReg contains
910 // the NegFrameSize. This solution is rather tricky.
911 if (!HasRedZone) {
912 BuildMI(MBB, MBBI, dl, TII.get(PPC::SUBF), ScratchReg)
913 .addReg(TempReg)
914 .addReg(SPReg);
915 HasSTUX = true;
916 }
917 } else {
918 // This condition must be kept in sync with canUseAsPrologue.
919 if (HasBP && MaxAlign > 1) {
920 if (isPPC64)
921 BuildMI(MBB, MBBI, dl, TII.get(PPC::RLDICL), ScratchReg)
922 .addReg(SPReg)
923 .addImm(0)
924 .addImm(64 - Log2(MaxAlign));
925 else // PPC32...
926 BuildMI(MBB, MBBI, dl, TII.get(PPC::RLWINM), ScratchReg)
927 .addReg(SPReg)
928 .addImm(0)
929 .addImm(32 - Log2(MaxAlign))
930 .addImm(31);
931 if (!isLargeFrame) {
932 BuildMI(MBB, MBBI, dl, SubtractImmCarryingInst, ScratchReg)
933 .addReg(ScratchReg, RegState::Kill)
934 .addImm(NegFrameSize);
935 } else {
936 assert(!SingleScratchReg && "Only a single scratch reg available");
937 BuildMI(MBB, MBBI, dl, LoadImmShiftedInst, TempReg)
938 .addImm(NegFrameSize >> 16);
939 BuildMI(MBB, MBBI, dl, OrImmInst, TempReg)
940 .addReg(TempReg, RegState::Kill)
941 .addImm(NegFrameSize & 0xFFFF);
942 BuildMI(MBB, MBBI, dl, SubtractCarryingInst, ScratchReg)
943 .addReg(ScratchReg, RegState::Kill)
944 .addReg(TempReg, RegState::Kill);
945 }
946
947 BuildMI(MBB, MBBI, dl, StoreUpdtIdxInst, SPReg)
948 .addReg(SPReg, RegState::Kill)
949 .addReg(SPReg)
950 .addReg(ScratchReg);
951 HasSTUX = true;
952
953 } else if (!isLargeFrame) {
954 BuildMI(MBB, StackUpdateLoc, dl, StoreUpdtInst, SPReg)
955 .addReg(SPReg)
956 .addImm(NegFrameSize)
957 .addReg(SPReg);
958
959 } else {
960 BuildMI(MBB, MBBI, dl, LoadImmShiftedInst, ScratchReg)
961 .addImm(NegFrameSize >> 16);
962 BuildMI(MBB, MBBI, dl, OrImmInst, ScratchReg)
963 .addReg(ScratchReg, RegState::Kill)
964 .addImm(NegFrameSize & 0xFFFF);
965 BuildMI(MBB, MBBI, dl, StoreUpdtIdxInst, SPReg)
966 .addReg(SPReg, RegState::Kill)
967 .addReg(SPReg)
968 .addReg(ScratchReg);
969 HasSTUX = true;
970 }
971 }
972
973 // Save the TOC register after the stack pointer update if a prologue TOC
974 // save is required for the function.
975 if (MustSaveTOC) {
976 assert(isELFv2ABI && "TOC saves in the prologue only supported on ELFv2");
977 BuildMI(MBB, StackUpdateLoc, dl, TII.get(PPC::STD))
978 .addReg(TOCReg, getKillRegState(true))
979 .addImm(TOCSaveOffset)
980 .addReg(SPReg);
981 }
982
983 if (!HasRedZone) {
984 assert(!isPPC64 && "A red zone is always available on PPC64");
985 if (HasSTUX) {
986 // The negated frame size is in ScratchReg, and the SPReg has been
987 // decremented by the frame size: SPReg = old SPReg + ScratchReg.
988 // Since FPOffset, PBPOffset, etc. are relative to the beginning of
989 // the stack frame (i.e. the old SP), ideally, we would put the old
990 // SP into a register and use it as the base for the stores. The
991 // problem is that the only available register may be ScratchReg,
992 // which could be R0, and R0 cannot be used as a base address.
993
994 // First, set ScratchReg to the old SP. This may need to be modified
995 // later.
996 BuildMI(MBB, MBBI, dl, TII.get(PPC::SUBF), ScratchReg)
997 .addReg(ScratchReg, RegState::Kill)
998 .addReg(SPReg);
999
1000 if (ScratchReg == PPC::R0) {
1001 // R0 cannot be used as a base register, but it can be used as an
1002 // index in a store-indexed.
1003 int LastOffset = 0;
1004 if (HasFP) {
1005 // R0 += (FPOffset-LastOffset).
1006 // Need addic, since addi treats R0 as 0.
1007 BuildMI(MBB, MBBI, dl, TII.get(PPC::ADDIC), ScratchReg)
1008 .addReg(ScratchReg)
1009 .addImm(FPOffset-LastOffset);
1010 LastOffset = FPOffset;
1011 // Store FP into *R0.
1012 BuildMI(MBB, MBBI, dl, TII.get(PPC::STWX))
1013 .addReg(FPReg, RegState::Kill) // Save FP.
1014 .addReg(PPC::ZERO)
1015 .addReg(ScratchReg); // This will be the index (R0 is ok here).
1016 }
1017 if (FI->usesPICBase()) {
1018 // R0 += (PBPOffset-LastOffset).
1019 BuildMI(MBB, MBBI, dl, TII.get(PPC::ADDIC), ScratchReg)
1020 .addReg(ScratchReg)
1021 .addImm(PBPOffset-LastOffset);
1022 LastOffset = PBPOffset;
1023 BuildMI(MBB, MBBI, dl, TII.get(PPC::STWX))
1024 .addReg(PPC::R30, RegState::Kill) // Save PIC base pointer.
1025 .addReg(PPC::ZERO)
1026 .addReg(ScratchReg); // This will be the index (R0 is ok here).
1027 }
1028 if (HasBP) {
1029 // R0 += (BPOffset-LastOffset).
1030 BuildMI(MBB, MBBI, dl, TII.get(PPC::ADDIC), ScratchReg)
1031 .addReg(ScratchReg)
1032 .addImm(BPOffset-LastOffset);
1033 LastOffset = BPOffset;
1034 BuildMI(MBB, MBBI, dl, TII.get(PPC::STWX))
1035 .addReg(BPReg, RegState::Kill) // Save BP.
1036 .addReg(PPC::ZERO)
1037 .addReg(ScratchReg); // This will be the index (R0 is ok here).
1038 // BP = R0-LastOffset
1039 BuildMI(MBB, MBBI, dl, TII.get(PPC::ADDIC), BPReg)
1040 .addReg(ScratchReg, RegState::Kill)
1041 .addImm(-LastOffset);
1042 }
1043 } else {
1044 // ScratchReg is not R0, so use it as the base register. It is
1045 // already set to the old SP, so we can use the offsets directly.
1046
1047 // Now that the stack frame has been allocated, save all the necessary
1048 // registers using ScratchReg as the base address.
1049 if (HasFP)
1050 BuildMI(MBB, MBBI, dl, StoreInst)
1051 .addReg(FPReg)
1052 .addImm(FPOffset)
1053 .addReg(ScratchReg);
1054 if (FI->usesPICBase())
1055 BuildMI(MBB, MBBI, dl, StoreInst)
1056 .addReg(PPC::R30)
1057 .addImm(PBPOffset)
1058 .addReg(ScratchReg);
1059 if (HasBP) {
1060 BuildMI(MBB, MBBI, dl, StoreInst)
1061 .addReg(BPReg)
1062 .addImm(BPOffset)
1063 .addReg(ScratchReg);
1064 BuildMI(MBB, MBBI, dl, OrInst, BPReg)
1065 .addReg(ScratchReg, RegState::Kill)
1066 .addReg(ScratchReg);
1067 }
1068 }
1069 } else {
1070 // The frame size is a known 16-bit constant (fitting in the immediate
1071 // field of STWU). To be here we have to be compiling for PPC32.
1072 // Since the SPReg has been decreased by FrameSize, add it back to each
1073 // offset.
1074 if (HasFP)
1075 BuildMI(MBB, MBBI, dl, StoreInst)
1076 .addReg(FPReg)
1077 .addImm(FrameSize + FPOffset)
1078 .addReg(SPReg);
1079 if (FI->usesPICBase())
1080 BuildMI(MBB, MBBI, dl, StoreInst)
1081 .addReg(PPC::R30)
1082 .addImm(FrameSize + PBPOffset)
1083 .addReg(SPReg);
1084 if (HasBP) {
1085 BuildMI(MBB, MBBI, dl, StoreInst)
1086 .addReg(BPReg)
1087 .addImm(FrameSize + BPOffset)
1088 .addReg(SPReg);
1089 BuildMI(MBB, MBBI, dl, TII.get(PPC::ADDI), BPReg)
1090 .addReg(SPReg)
1091 .addImm(FrameSize);
1092 }
1093 }
1094 }
1095
1096 // Add Call Frame Information for the instructions we generated above.
1097 if (needsCFI) {
1098 unsigned CFIIndex;
1099
1100 if (HasBP) {
1101 // Define CFA in terms of BP. Do this in preference to using FP/SP,
1102 // because if the stack needed aligning then CFA won't be at a fixed
1103 // offset from FP/SP.
1104 unsigned Reg = MRI->getDwarfRegNum(BPReg, true);
1105 CFIIndex = MF.addFrameInst(
1106 MCCFIInstruction::createDefCfaRegister(nullptr, Reg));
1107 } else {
1108 // Adjust the definition of CFA to account for the change in SP.
1109 assert(NegFrameSize);
1110 CFIIndex = MF.addFrameInst(
1111 MCCFIInstruction::cfiDefCfaOffset(nullptr, -NegFrameSize));
1112 }
1113 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
1114 .addCFIIndex(CFIIndex);
1115
1116 if (HasFP) {
1117 // Describe where FP was saved, at a fixed offset from CFA.
1118 unsigned Reg = MRI->getDwarfRegNum(FPReg, true);
1119 CFIIndex = MF.addFrameInst(
1120 MCCFIInstruction::createOffset(nullptr, Reg, FPOffset));
1121 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
1122 .addCFIIndex(CFIIndex);
1123 }
1124
1125 if (FI->usesPICBase()) {
1126 // Describe where FP was saved, at a fixed offset from CFA.
1127 unsigned Reg = MRI->getDwarfRegNum(PPC::R30, true);
1128 CFIIndex = MF.addFrameInst(
1129 MCCFIInstruction::createOffset(nullptr, Reg, PBPOffset));
1130 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
1131 .addCFIIndex(CFIIndex);
1132 }
1133
1134 if (HasBP) {
1135 // Describe where BP was saved, at a fixed offset from CFA.
1136 unsigned Reg = MRI->getDwarfRegNum(BPReg, true);
1137 CFIIndex = MF.addFrameInst(
1138 MCCFIInstruction::createOffset(nullptr, Reg, BPOffset));
1139 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
1140 .addCFIIndex(CFIIndex);
1141 }
1142
1143 if (MustSaveLR) {
1144 // Describe where LR was saved, at a fixed offset from CFA.
1145 unsigned Reg = MRI->getDwarfRegNum(LRReg, true);
1146 CFIIndex = MF.addFrameInst(
1147 MCCFIInstruction::createOffset(nullptr, Reg, LROffset));
1148 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
1149 .addCFIIndex(CFIIndex);
1150 }
1151 }
1152
1153 // If there is a frame pointer, copy R1 into R31
1154 if (HasFP) {
1155 BuildMI(MBB, MBBI, dl, OrInst, FPReg)
1156 .addReg(SPReg)
1157 .addReg(SPReg);
1158
1159 if (!HasBP && needsCFI) {
1160 // Change the definition of CFA from SP+offset to FP+offset, because SP
1161 // will change at every alloca.
1162 unsigned Reg = MRI->getDwarfRegNum(FPReg, true);
1163 unsigned CFIIndex = MF.addFrameInst(
1164 MCCFIInstruction::createDefCfaRegister(nullptr, Reg));
1165
1166 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
1167 .addCFIIndex(CFIIndex);
1168 }
1169 }
1170
1171 if (needsCFI) {
1172 // Describe where callee saved registers were saved, at fixed offsets from
1173 // CFA.
1174 const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
1175 for (unsigned I = 0, E = CSI.size(); I != E; ++I) {
1176 unsigned Reg = CSI[I].getReg();
1177 if (Reg == PPC::LR || Reg == PPC::LR8 || Reg == PPC::RM) continue;
1178
1179 // This is a bit of a hack: CR2LT, CR2GT, CR2EQ and CR2UN are just
1180 // subregisters of CR2. We just need to emit a move of CR2.
1181 if (PPC::CRBITRCRegClass.contains(Reg))
1182 continue;
1183
1184 if ((Reg == PPC::X2 || Reg == PPC::R2) && MustSaveTOC)
1185 continue;
1186
1187 // For SVR4, don't emit a move for the CR spill slot if we haven't
1188 // spilled CRs.
1189 if (isSVR4ABI && (PPC::CR2 <= Reg && Reg <= PPC::CR4)
1190 && !MustSaveCR)
1191 continue;
1192
1193 // For 64-bit SVR4 when we have spilled CRs, the spill location
1194 // is SP+8, not a frame-relative slot.
1195 if (isSVR4ABI && isPPC64 && (PPC::CR2 <= Reg && Reg <= PPC::CR4)) {
1196 // In the ELFv1 ABI, only CR2 is noted in CFI and stands in for
1197 // the whole CR word. In the ELFv2 ABI, every CR that was
1198 // actually saved gets its own CFI record.
1199 unsigned CRReg = isELFv2ABI? Reg : (unsigned) PPC::CR2;
1200 unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createOffset(
1201 nullptr, MRI->getDwarfRegNum(CRReg, true), CRSaveOffset));
1202 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
1203 .addCFIIndex(CFIIndex);
1204 continue;
1205 }
1206
1207 if (CSI[I].isSpilledToReg()) {
1208 unsigned SpilledReg = CSI[I].getDstReg();
1209 unsigned CFIRegister = MF.addFrameInst(MCCFIInstruction::createRegister(
1210 nullptr, MRI->getDwarfRegNum(Reg, true),
1211 MRI->getDwarfRegNum(SpilledReg, true)));
1212 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
1213 .addCFIIndex(CFIRegister);
1214 } else {
1215 int Offset = MFI.getObjectOffset(CSI[I].getFrameIdx());
1216 // We have changed the object offset above but we do not want to change
1217 // the actual offsets in the CFI instruction so we have to undo the
1218 // offset change here.
1219 if (MovingStackUpdateDown)
1220 Offset -= NegFrameSize;
1221
1222 unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createOffset(
1223 nullptr, MRI->getDwarfRegNum(Reg, true), Offset));
1224 BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
1225 .addCFIIndex(CFIIndex);
1226 }
1227 }
1228 }
1229 }
1230
inlineStackProbe(MachineFunction & MF,MachineBasicBlock & PrologMBB) const1231 void PPCFrameLowering::inlineStackProbe(MachineFunction &MF,
1232 MachineBasicBlock &PrologMBB) const {
1233 // TODO: Generate CFI instructions.
1234 bool isPPC64 = Subtarget.isPPC64();
1235 const PPCTargetLowering &TLI = *Subtarget.getTargetLowering();
1236 const PPCInstrInfo &TII = *Subtarget.getInstrInfo();
1237 MachineFrameInfo &MFI = MF.getFrameInfo();
1238 MachineModuleInfo &MMI = MF.getMMI();
1239 const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
1240 // AIX assembler does not support cfi directives.
1241 const bool needsCFI = MF.needsFrameMoves() && !Subtarget.isAIXABI();
1242 auto StackAllocMIPos = llvm::find_if(PrologMBB, [](MachineInstr &MI) {
1243 int Opc = MI.getOpcode();
1244 return Opc == PPC::PROBED_STACKALLOC_64 || Opc == PPC::PROBED_STACKALLOC_32;
1245 });
1246 if (StackAllocMIPos == PrologMBB.end())
1247 return;
1248 const BasicBlock *ProbedBB = PrologMBB.getBasicBlock();
1249 MachineBasicBlock *CurrentMBB = &PrologMBB;
1250 DebugLoc DL = PrologMBB.findDebugLoc(StackAllocMIPos);
1251 MachineInstr &MI = *StackAllocMIPos;
1252 int64_t NegFrameSize = MI.getOperand(2).getImm();
1253 unsigned ProbeSize = TLI.getStackProbeSize(MF);
1254 int64_t NegProbeSize = -(int64_t)ProbeSize;
1255 assert(isInt<32>(NegProbeSize) && "Unhandled probe size");
1256 int64_t NumBlocks = NegFrameSize / NegProbeSize;
1257 int64_t NegResidualSize = NegFrameSize % NegProbeSize;
1258 Register SPReg = isPPC64 ? PPC::X1 : PPC::R1;
1259 Register ScratchReg = MI.getOperand(0).getReg();
1260 Register FPReg = MI.getOperand(1).getReg();
1261 const PPCRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
1262 bool HasBP = RegInfo->hasBasePointer(MF);
1263 Register BPReg = RegInfo->getBaseRegister(MF);
1264 Align MaxAlign = MFI.getMaxAlign();
1265 const MCInstrDesc &CopyInst = TII.get(isPPC64 ? PPC::OR8 : PPC::OR);
1266 // Subroutines to generate .cfi_* directives.
1267 auto buildDefCFAReg = [&](MachineBasicBlock &MBB,
1268 MachineBasicBlock::iterator MBBI, Register Reg) {
1269 unsigned RegNum = MRI->getDwarfRegNum(Reg, true);
1270 unsigned CFIIndex = MF.addFrameInst(
1271 MCCFIInstruction::createDefCfaRegister(nullptr, RegNum));
1272 BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
1273 .addCFIIndex(CFIIndex);
1274 };
1275 auto buildDefCFA = [&](MachineBasicBlock &MBB,
1276 MachineBasicBlock::iterator MBBI, Register Reg,
1277 int Offset) {
1278 unsigned RegNum = MRI->getDwarfRegNum(Reg, true);
1279 unsigned CFIIndex = MBB.getParent()->addFrameInst(
1280 MCCFIInstruction::cfiDefCfa(nullptr, RegNum, Offset));
1281 BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
1282 .addCFIIndex(CFIIndex);
1283 };
1284 // Subroutine to determine if we can use the Imm as part of d-form.
1285 auto CanUseDForm = [](int64_t Imm) { return isInt<16>(Imm) && Imm % 4 == 0; };
1286 // Subroutine to materialize the Imm into TempReg.
1287 auto MaterializeImm = [&](MachineBasicBlock &MBB,
1288 MachineBasicBlock::iterator MBBI, int64_t Imm,
1289 Register &TempReg) {
1290 assert(isInt<32>(Imm) && "Unhandled imm");
1291 if (isInt<16>(Imm))
1292 BuildMI(MBB, MBBI, DL, TII.get(isPPC64 ? PPC::LI8 : PPC::LI), TempReg)
1293 .addImm(Imm);
1294 else {
1295 BuildMI(MBB, MBBI, DL, TII.get(isPPC64 ? PPC::LIS8 : PPC::LIS), TempReg)
1296 .addImm(Imm >> 16);
1297 BuildMI(MBB, MBBI, DL, TII.get(isPPC64 ? PPC::ORI8 : PPC::ORI), TempReg)
1298 .addReg(TempReg)
1299 .addImm(Imm & 0xFFFF);
1300 }
1301 };
1302 // Subroutine to store frame pointer and decrease stack pointer by probe size.
1303 auto allocateAndProbe = [&](MachineBasicBlock &MBB,
1304 MachineBasicBlock::iterator MBBI, int64_t NegSize,
1305 Register NegSizeReg, bool UseDForm,
1306 Register StoreReg) {
1307 if (UseDForm)
1308 BuildMI(MBB, MBBI, DL, TII.get(isPPC64 ? PPC::STDU : PPC::STWU), SPReg)
1309 .addReg(StoreReg)
1310 .addImm(NegSize)
1311 .addReg(SPReg);
1312 else
1313 BuildMI(MBB, MBBI, DL, TII.get(isPPC64 ? PPC::STDUX : PPC::STWUX), SPReg)
1314 .addReg(StoreReg)
1315 .addReg(SPReg)
1316 .addReg(NegSizeReg);
1317 };
1318 // Used to probe realignment gap [stackptr - (stackptr % align), stackptr)
1319 // when HasBP && isPPC64. In such scenario, normally we have r0, r1, r12, r30
1320 // available and r1 is already copied to r30 which is BPReg. So BPReg stores
1321 // the value of stackptr.
1322 // First we have to probe tail interval whose size is less than probesize,
1323 // i.e., [stackptr - (stackptr % align) % probesize, stackptr). At this stage,
1324 // ScratchReg stores the value of ((stackptr % align) % probesize). Then we
1325 // probe each block sized probesize until stackptr meets
1326 // (stackptr - (stackptr % align)). At this stage, ScratchReg is materialized
1327 // as negprobesize. At both stages, TempReg stores the value of
1328 // (stackptr - (stackptr % align)).
1329 auto dynamicProbe = [&](MachineBasicBlock &MBB,
1330 MachineBasicBlock::iterator MBBI, Register ScratchReg,
1331 Register TempReg) {
1332 assert(HasBP && isPPC64 && "Probe alignment part not available");
1333 assert(isPowerOf2_64(ProbeSize) && "Probe size should be power of 2");
1334 // ScratchReg = stackptr % align
1335 BuildMI(MBB, MBBI, DL, TII.get(PPC::RLDICL), ScratchReg)
1336 .addReg(BPReg)
1337 .addImm(0)
1338 .addImm(64 - Log2(MaxAlign));
1339 // TempReg = stackptr - (stackptr % align)
1340 BuildMI(MBB, MBBI, DL, TII.get(PPC::SUBFC8), TempReg)
1341 .addReg(ScratchReg)
1342 .addReg(BPReg);
1343 // ScratchReg = (stackptr % align) % probesize
1344 BuildMI(MBB, MBBI, DL, TII.get(PPC::RLDICL), ScratchReg)
1345 .addReg(ScratchReg)
1346 .addImm(0)
1347 .addImm(64 - Log2(ProbeSize));
1348 Register CRReg = PPC::CR0;
1349 // If (stackptr % align) % probesize == 0, we should not generate probe
1350 // code. Layout of output assembly kinda like:
1351 // bb.0:
1352 // ...
1353 // cmpldi $scratchreg, 0
1354 // beq bb.2
1355 // bb.1: # Probe tail interval
1356 // neg $scratchreg, $scratchreg
1357 // stdux $bpreg, r1, $scratchreg
1358 // bb.2:
1359 // <materialize negprobesize into $scratchreg>
1360 // cmpd r1, $tempreg
1361 // beq bb.4
1362 // bb.3: # Loop to probe each block
1363 // stdux $bpreg, r1, $scratchreg
1364 // cmpd r1, $tempreg
1365 // bne bb.3
1366 // bb.4:
1367 // ...
1368 MachineFunction::iterator MBBInsertPoint = std::next(MBB.getIterator());
1369 MachineBasicBlock *ProbeResidualMBB = MF.CreateMachineBasicBlock(ProbedBB);
1370 MF.insert(MBBInsertPoint, ProbeResidualMBB);
1371 MachineBasicBlock *ProbeLoopPreHeaderMBB =
1372 MF.CreateMachineBasicBlock(ProbedBB);
1373 MF.insert(MBBInsertPoint, ProbeLoopPreHeaderMBB);
1374 MachineBasicBlock *ProbeLoopBodyMBB = MF.CreateMachineBasicBlock(ProbedBB);
1375 MF.insert(MBBInsertPoint, ProbeLoopBodyMBB);
1376 MachineBasicBlock *ProbeExitMBB = MF.CreateMachineBasicBlock(ProbedBB);
1377 MF.insert(MBBInsertPoint, ProbeExitMBB);
1378 // bb.4
1379 ProbeExitMBB->splice(ProbeExitMBB->end(), &MBB, MBBI, MBB.end());
1380 ProbeExitMBB->transferSuccessorsAndUpdatePHIs(&MBB);
1381 // bb.0
1382 BuildMI(&MBB, DL, TII.get(PPC::CMPDI), CRReg).addReg(ScratchReg).addImm(0);
1383 BuildMI(&MBB, DL, TII.get(PPC::BCC))
1384 .addImm(PPC::PRED_EQ)
1385 .addReg(CRReg)
1386 .addMBB(ProbeLoopPreHeaderMBB);
1387 MBB.addSuccessor(ProbeResidualMBB);
1388 MBB.addSuccessor(ProbeLoopPreHeaderMBB);
1389 // bb.1
1390 BuildMI(ProbeResidualMBB, DL, TII.get(PPC::NEG8), ScratchReg)
1391 .addReg(ScratchReg);
1392 allocateAndProbe(*ProbeResidualMBB, ProbeResidualMBB->end(), 0, ScratchReg,
1393 false, BPReg);
1394 ProbeResidualMBB->addSuccessor(ProbeLoopPreHeaderMBB);
1395 // bb.2
1396 MaterializeImm(*ProbeLoopPreHeaderMBB, ProbeLoopPreHeaderMBB->end(),
1397 NegProbeSize, ScratchReg);
1398 BuildMI(ProbeLoopPreHeaderMBB, DL, TII.get(PPC::CMPD), CRReg)
1399 .addReg(SPReg)
1400 .addReg(TempReg);
1401 BuildMI(ProbeLoopPreHeaderMBB, DL, TII.get(PPC::BCC))
1402 .addImm(PPC::PRED_EQ)
1403 .addReg(CRReg)
1404 .addMBB(ProbeExitMBB);
1405 ProbeLoopPreHeaderMBB->addSuccessor(ProbeLoopBodyMBB);
1406 ProbeLoopPreHeaderMBB->addSuccessor(ProbeExitMBB);
1407 // bb.3
1408 allocateAndProbe(*ProbeLoopBodyMBB, ProbeLoopBodyMBB->end(), 0, ScratchReg,
1409 false, BPReg);
1410 BuildMI(ProbeLoopBodyMBB, DL, TII.get(PPC::CMPD), CRReg)
1411 .addReg(SPReg)
1412 .addReg(TempReg);
1413 BuildMI(ProbeLoopBodyMBB, DL, TII.get(PPC::BCC))
1414 .addImm(PPC::PRED_NE)
1415 .addReg(CRReg)
1416 .addMBB(ProbeLoopBodyMBB);
1417 ProbeLoopBodyMBB->addSuccessor(ProbeExitMBB);
1418 ProbeLoopBodyMBB->addSuccessor(ProbeLoopBodyMBB);
1419 // Update liveins.
1420 recomputeLiveIns(*ProbeResidualMBB);
1421 recomputeLiveIns(*ProbeLoopPreHeaderMBB);
1422 recomputeLiveIns(*ProbeLoopBodyMBB);
1423 recomputeLiveIns(*ProbeExitMBB);
1424 return ProbeExitMBB;
1425 };
1426 // For case HasBP && MaxAlign > 1, we have to realign the SP by performing
1427 // SP = SP - SP % MaxAlign.
1428 if (HasBP && MaxAlign > 1) {
1429 // FIXME: Currently only probe the gap [stackptr & alignmask, stackptr) in
1430 // 64-bit mode.
1431 if (isPPC64) {
1432 // Use BPReg to calculate CFA.
1433 if (needsCFI)
1434 buildDefCFA(*CurrentMBB, {MI}, BPReg, 0);
1435 // Since we have SPReg copied to BPReg at the moment, FPReg can be used as
1436 // TempReg.
1437 Register TempReg = FPReg;
1438 CurrentMBB = dynamicProbe(*CurrentMBB, {MI}, ScratchReg, TempReg);
1439 // Copy BPReg to FPReg to meet the definition of PROBED_STACKALLOC_64.
1440 BuildMI(*CurrentMBB, {MI}, DL, CopyInst, FPReg)
1441 .addReg(BPReg)
1442 .addReg(BPReg);
1443 } else {
1444 // Initialize current frame pointer.
1445 BuildMI(*CurrentMBB, {MI}, DL, CopyInst, FPReg)
1446 .addReg(SPReg)
1447 .addReg(SPReg);
1448 // Use FPReg to calculate CFA.
1449 if (needsCFI)
1450 buildDefCFA(*CurrentMBB, {MI}, FPReg, 0);
1451 BuildMI(*CurrentMBB, {MI}, DL, TII.get(PPC::RLWINM), ScratchReg)
1452 .addReg(FPReg)
1453 .addImm(0)
1454 .addImm(32 - Log2(MaxAlign))
1455 .addImm(31);
1456 BuildMI(*CurrentMBB, {MI}, DL, TII.get(PPC::SUBFC), SPReg)
1457 .addReg(ScratchReg)
1458 .addReg(SPReg);
1459 }
1460 } else {
1461 // Initialize current frame pointer.
1462 BuildMI(*CurrentMBB, {MI}, DL, CopyInst, FPReg).addReg(SPReg).addReg(SPReg);
1463 // Use FPReg to calculate CFA.
1464 if (needsCFI)
1465 buildDefCFA(*CurrentMBB, {MI}, FPReg, 0);
1466 }
1467 // Probe residual part.
1468 if (NegResidualSize) {
1469 bool ResidualUseDForm = CanUseDForm(NegResidualSize);
1470 if (!ResidualUseDForm)
1471 MaterializeImm(*CurrentMBB, {MI}, NegResidualSize, ScratchReg);
1472 allocateAndProbe(*CurrentMBB, {MI}, NegResidualSize, ScratchReg,
1473 ResidualUseDForm, FPReg);
1474 }
1475 bool UseDForm = CanUseDForm(NegProbeSize);
1476 // If number of blocks is small, just probe them directly.
1477 if (NumBlocks < 3) {
1478 if (!UseDForm)
1479 MaterializeImm(*CurrentMBB, {MI}, NegProbeSize, ScratchReg);
1480 for (int i = 0; i < NumBlocks; ++i)
1481 allocateAndProbe(*CurrentMBB, {MI}, NegProbeSize, ScratchReg, UseDForm,
1482 FPReg);
1483 if (needsCFI) {
1484 // Restore using SPReg to calculate CFA.
1485 buildDefCFAReg(*CurrentMBB, {MI}, SPReg);
1486 }
1487 } else {
1488 // Since CTR is a volatile register and current shrinkwrap implementation
1489 // won't choose an MBB in a loop as the PrologMBB, it's safe to synthesize a
1490 // CTR loop to probe.
1491 // Calculate trip count and stores it in CTRReg.
1492 MaterializeImm(*CurrentMBB, {MI}, NumBlocks, ScratchReg);
1493 BuildMI(*CurrentMBB, {MI}, DL, TII.get(isPPC64 ? PPC::MTCTR8 : PPC::MTCTR))
1494 .addReg(ScratchReg, RegState::Kill);
1495 if (!UseDForm)
1496 MaterializeImm(*CurrentMBB, {MI}, NegProbeSize, ScratchReg);
1497 // Create MBBs of the loop.
1498 MachineFunction::iterator MBBInsertPoint =
1499 std::next(CurrentMBB->getIterator());
1500 MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(ProbedBB);
1501 MF.insert(MBBInsertPoint, LoopMBB);
1502 MachineBasicBlock *ExitMBB = MF.CreateMachineBasicBlock(ProbedBB);
1503 MF.insert(MBBInsertPoint, ExitMBB);
1504 // Synthesize the loop body.
1505 allocateAndProbe(*LoopMBB, LoopMBB->end(), NegProbeSize, ScratchReg,
1506 UseDForm, FPReg);
1507 BuildMI(LoopMBB, DL, TII.get(isPPC64 ? PPC::BDNZ8 : PPC::BDNZ))
1508 .addMBB(LoopMBB);
1509 LoopMBB->addSuccessor(ExitMBB);
1510 LoopMBB->addSuccessor(LoopMBB);
1511 // Synthesize the exit MBB.
1512 ExitMBB->splice(ExitMBB->end(), CurrentMBB,
1513 std::next(MachineBasicBlock::iterator(MI)),
1514 CurrentMBB->end());
1515 ExitMBB->transferSuccessorsAndUpdatePHIs(CurrentMBB);
1516 CurrentMBB->addSuccessor(LoopMBB);
1517 if (needsCFI) {
1518 // Restore using SPReg to calculate CFA.
1519 buildDefCFAReg(*ExitMBB, ExitMBB->begin(), SPReg);
1520 }
1521 // Update liveins.
1522 recomputeLiveIns(*LoopMBB);
1523 recomputeLiveIns(*ExitMBB);
1524 }
1525 ++NumPrologProbed;
1526 MI.eraseFromParent();
1527 }
1528
emitEpilogue(MachineFunction & MF,MachineBasicBlock & MBB) const1529 void PPCFrameLowering::emitEpilogue(MachineFunction &MF,
1530 MachineBasicBlock &MBB) const {
1531 MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator();
1532 DebugLoc dl;
1533
1534 if (MBBI != MBB.end())
1535 dl = MBBI->getDebugLoc();
1536
1537 const PPCInstrInfo &TII = *Subtarget.getInstrInfo();
1538 const PPCRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
1539
1540 // Get alignment info so we know how to restore the SP.
1541 const MachineFrameInfo &MFI = MF.getFrameInfo();
1542
1543 // Get the number of bytes allocated from the FrameInfo.
1544 int FrameSize = MFI.getStackSize();
1545
1546 // Get processor type.
1547 bool isPPC64 = Subtarget.isPPC64();
1548
1549 // Check if the link register (LR) has been saved.
1550 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
1551 bool MustSaveLR = FI->mustSaveLR();
1552 const SmallVectorImpl<Register> &MustSaveCRs = FI->getMustSaveCRs();
1553 bool MustSaveCR = !MustSaveCRs.empty();
1554 // Do we have a frame pointer and/or base pointer for this function?
1555 bool HasFP = hasFP(MF);
1556 bool HasBP = RegInfo->hasBasePointer(MF);
1557 bool HasRedZone = Subtarget.isPPC64() || !Subtarget.isSVR4ABI();
1558 bool HasROPProtect = Subtarget.hasROPProtect();
1559 bool HasPrivileged = Subtarget.hasPrivileged();
1560
1561 Register SPReg = isPPC64 ? PPC::X1 : PPC::R1;
1562 Register BPReg = RegInfo->getBaseRegister(MF);
1563 Register FPReg = isPPC64 ? PPC::X31 : PPC::R31;
1564 Register ScratchReg;
1565 Register TempReg = isPPC64 ? PPC::X12 : PPC::R12; // another scratch reg
1566 const MCInstrDesc& MTLRInst = TII.get( isPPC64 ? PPC::MTLR8
1567 : PPC::MTLR );
1568 const MCInstrDesc& LoadInst = TII.get( isPPC64 ? PPC::LD
1569 : PPC::LWZ );
1570 const MCInstrDesc& LoadImmShiftedInst = TII.get( isPPC64 ? PPC::LIS8
1571 : PPC::LIS );
1572 const MCInstrDesc& OrInst = TII.get(isPPC64 ? PPC::OR8
1573 : PPC::OR );
1574 const MCInstrDesc& OrImmInst = TII.get( isPPC64 ? PPC::ORI8
1575 : PPC::ORI );
1576 const MCInstrDesc& AddImmInst = TII.get( isPPC64 ? PPC::ADDI8
1577 : PPC::ADDI );
1578 const MCInstrDesc& AddInst = TII.get( isPPC64 ? PPC::ADD8
1579 : PPC::ADD4 );
1580 const MCInstrDesc& LoadWordInst = TII.get( isPPC64 ? PPC::LWZ8
1581 : PPC::LWZ);
1582 const MCInstrDesc& MoveToCRInst = TII.get( isPPC64 ? PPC::MTOCRF8
1583 : PPC::MTOCRF);
1584 const MCInstrDesc &HashChk =
1585 TII.get(HasPrivileged ? PPC::HASHCHKP : PPC::HASHCHK);
1586 int LROffset = getReturnSaveOffset();
1587
1588 int FPOffset = 0;
1589
1590 // Using the same bool variable as below to suppress compiler warnings.
1591 bool SingleScratchReg = findScratchRegister(&MBB, true, false, &ScratchReg,
1592 &TempReg);
1593 assert(SingleScratchReg &&
1594 "Could not find an available scratch register");
1595
1596 SingleScratchReg = ScratchReg == TempReg;
1597
1598 if (HasFP) {
1599 int FPIndex = FI->getFramePointerSaveIndex();
1600 assert(FPIndex && "No Frame Pointer Save Slot!");
1601 FPOffset = MFI.getObjectOffset(FPIndex);
1602 }
1603
1604 int BPOffset = 0;
1605 if (HasBP) {
1606 int BPIndex = FI->getBasePointerSaveIndex();
1607 assert(BPIndex && "No Base Pointer Save Slot!");
1608 BPOffset = MFI.getObjectOffset(BPIndex);
1609 }
1610
1611 int PBPOffset = 0;
1612 if (FI->usesPICBase()) {
1613 int PBPIndex = FI->getPICBasePointerSaveIndex();
1614 assert(PBPIndex && "No PIC Base Pointer Save Slot!");
1615 PBPOffset = MFI.getObjectOffset(PBPIndex);
1616 }
1617
1618 bool IsReturnBlock = (MBBI != MBB.end() && MBBI->isReturn());
1619
1620 if (IsReturnBlock) {
1621 unsigned RetOpcode = MBBI->getOpcode();
1622 bool UsesTCRet = RetOpcode == PPC::TCRETURNri ||
1623 RetOpcode == PPC::TCRETURNdi ||
1624 RetOpcode == PPC::TCRETURNai ||
1625 RetOpcode == PPC::TCRETURNri8 ||
1626 RetOpcode == PPC::TCRETURNdi8 ||
1627 RetOpcode == PPC::TCRETURNai8;
1628
1629 if (UsesTCRet) {
1630 int MaxTCRetDelta = FI->getTailCallSPDelta();
1631 MachineOperand &StackAdjust = MBBI->getOperand(1);
1632 assert(StackAdjust.isImm() && "Expecting immediate value.");
1633 // Adjust stack pointer.
1634 int StackAdj = StackAdjust.getImm();
1635 int Delta = StackAdj - MaxTCRetDelta;
1636 assert((Delta >= 0) && "Delta must be positive");
1637 if (MaxTCRetDelta>0)
1638 FrameSize += (StackAdj +Delta);
1639 else
1640 FrameSize += StackAdj;
1641 }
1642 }
1643
1644 // Frames of 32KB & larger require special handling because they cannot be
1645 // indexed into with a simple LD/LWZ immediate offset operand.
1646 bool isLargeFrame = !isInt<16>(FrameSize);
1647
1648 // On targets without red zone, the SP needs to be restored last, so that
1649 // all live contents of the stack frame are upwards of the SP. This means
1650 // that we cannot restore SP just now, since there may be more registers
1651 // to restore from the stack frame (e.g. R31). If the frame size is not
1652 // a simple immediate value, we will need a spare register to hold the
1653 // restored SP. If the frame size is known and small, we can simply adjust
1654 // the offsets of the registers to be restored, and still use SP to restore
1655 // them. In such case, the final update of SP will be to add the frame
1656 // size to it.
1657 // To simplify the code, set RBReg to the base register used to restore
1658 // values from the stack, and set SPAdd to the value that needs to be added
1659 // to the SP at the end. The default values are as if red zone was present.
1660 unsigned RBReg = SPReg;
1661 unsigned SPAdd = 0;
1662
1663 // Check if we can move the stack update instruction up the epilogue
1664 // past the callee saves. This will allow the move to LR instruction
1665 // to be executed before the restores of the callee saves which means
1666 // that the callee saves can hide the latency from the MTLR instrcution.
1667 MachineBasicBlock::iterator StackUpdateLoc = MBBI;
1668 if (stackUpdateCanBeMoved(MF)) {
1669 const std::vector<CalleeSavedInfo> & Info = MFI.getCalleeSavedInfo();
1670 for (CalleeSavedInfo CSI : Info) {
1671 // If the callee saved register is spilled to another register abort the
1672 // stack update movement.
1673 if (CSI.isSpilledToReg()) {
1674 StackUpdateLoc = MBBI;
1675 break;
1676 }
1677 int FrIdx = CSI.getFrameIdx();
1678 // If the frame index is not negative the callee saved info belongs to a
1679 // stack object that is not a fixed stack object. We ignore non-fixed
1680 // stack objects because we won't move the update of the stack pointer
1681 // past them.
1682 if (FrIdx >= 0)
1683 continue;
1684
1685 if (MFI.isFixedObjectIndex(FrIdx) && MFI.getObjectOffset(FrIdx) < 0)
1686 StackUpdateLoc--;
1687 else {
1688 // Abort the operation as we can't update all CSR restores.
1689 StackUpdateLoc = MBBI;
1690 break;
1691 }
1692 }
1693 }
1694
1695 if (FrameSize) {
1696 // In the prologue, the loaded (or persistent) stack pointer value is
1697 // offset by the STDU/STDUX/STWU/STWUX instruction. For targets with red
1698 // zone add this offset back now.
1699
1700 // If the function has a base pointer, the stack pointer has been copied
1701 // to it so we can restore it by copying in the other direction.
1702 if (HasRedZone && HasBP) {
1703 BuildMI(MBB, MBBI, dl, OrInst, RBReg).
1704 addReg(BPReg).
1705 addReg(BPReg);
1706 }
1707 // If this function contained a fastcc call and GuaranteedTailCallOpt is
1708 // enabled (=> hasFastCall()==true) the fastcc call might contain a tail
1709 // call which invalidates the stack pointer value in SP(0). So we use the
1710 // value of R31 in this case. Similar situation exists with setjmp.
1711 else if (FI->hasFastCall() || MF.exposesReturnsTwice()) {
1712 assert(HasFP && "Expecting a valid frame pointer.");
1713 if (!HasRedZone)
1714 RBReg = FPReg;
1715 if (!isLargeFrame) {
1716 BuildMI(MBB, MBBI, dl, AddImmInst, RBReg)
1717 .addReg(FPReg).addImm(FrameSize);
1718 } else {
1719 BuildMI(MBB, MBBI, dl, LoadImmShiftedInst, ScratchReg)
1720 .addImm(FrameSize >> 16);
1721 BuildMI(MBB, MBBI, dl, OrImmInst, ScratchReg)
1722 .addReg(ScratchReg, RegState::Kill)
1723 .addImm(FrameSize & 0xFFFF);
1724 BuildMI(MBB, MBBI, dl, AddInst)
1725 .addReg(RBReg)
1726 .addReg(FPReg)
1727 .addReg(ScratchReg);
1728 }
1729 } else if (!isLargeFrame && !HasBP && !MFI.hasVarSizedObjects()) {
1730 if (HasRedZone) {
1731 BuildMI(MBB, StackUpdateLoc, dl, AddImmInst, SPReg)
1732 .addReg(SPReg)
1733 .addImm(FrameSize);
1734 } else {
1735 // Make sure that adding FrameSize will not overflow the max offset
1736 // size.
1737 assert(FPOffset <= 0 && BPOffset <= 0 && PBPOffset <= 0 &&
1738 "Local offsets should be negative");
1739 SPAdd = FrameSize;
1740 FPOffset += FrameSize;
1741 BPOffset += FrameSize;
1742 PBPOffset += FrameSize;
1743 }
1744 } else {
1745 // We don't want to use ScratchReg as a base register, because it
1746 // could happen to be R0. Use FP instead, but make sure to preserve it.
1747 if (!HasRedZone) {
1748 // If FP is not saved, copy it to ScratchReg.
1749 if (!HasFP)
1750 BuildMI(MBB, MBBI, dl, OrInst, ScratchReg)
1751 .addReg(FPReg)
1752 .addReg(FPReg);
1753 RBReg = FPReg;
1754 }
1755 BuildMI(MBB, StackUpdateLoc, dl, LoadInst, RBReg)
1756 .addImm(0)
1757 .addReg(SPReg);
1758 }
1759 }
1760 assert(RBReg != ScratchReg && "Should have avoided ScratchReg");
1761 // If there is no red zone, ScratchReg may be needed for holding a useful
1762 // value (although not the base register). Make sure it is not overwritten
1763 // too early.
1764
1765 // If we need to restore both the LR and the CR and we only have one
1766 // available scratch register, we must do them one at a time.
1767 if (MustSaveCR && SingleScratchReg && MustSaveLR) {
1768 // Here TempReg == ScratchReg, and in the absence of red zone ScratchReg
1769 // is live here.
1770 assert(HasRedZone && "Expecting red zone");
1771 BuildMI(MBB, MBBI, dl, LoadWordInst, TempReg)
1772 .addImm(CRSaveOffset)
1773 .addReg(SPReg);
1774 for (unsigned i = 0, e = MustSaveCRs.size(); i != e; ++i)
1775 BuildMI(MBB, MBBI, dl, MoveToCRInst, MustSaveCRs[i])
1776 .addReg(TempReg, getKillRegState(i == e-1));
1777 }
1778
1779 // Delay restoring of the LR if ScratchReg is needed. This is ok, since
1780 // LR is stored in the caller's stack frame. ScratchReg will be needed
1781 // if RBReg is anything other than SP. We shouldn't use ScratchReg as
1782 // a base register anyway, because it may happen to be R0.
1783 bool LoadedLR = false;
1784 if (MustSaveLR && RBReg == SPReg && isInt<16>(LROffset+SPAdd)) {
1785 BuildMI(MBB, StackUpdateLoc, dl, LoadInst, ScratchReg)
1786 .addImm(LROffset+SPAdd)
1787 .addReg(RBReg);
1788 LoadedLR = true;
1789 }
1790
1791 if (MustSaveCR && !(SingleScratchReg && MustSaveLR)) {
1792 assert(RBReg == SPReg && "Should be using SP as a base register");
1793 BuildMI(MBB, MBBI, dl, LoadWordInst, TempReg)
1794 .addImm(CRSaveOffset)
1795 .addReg(RBReg);
1796 }
1797
1798 if (HasFP) {
1799 // If there is red zone, restore FP directly, since SP has already been
1800 // restored. Otherwise, restore the value of FP into ScratchReg.
1801 if (HasRedZone || RBReg == SPReg)
1802 BuildMI(MBB, MBBI, dl, LoadInst, FPReg)
1803 .addImm(FPOffset)
1804 .addReg(SPReg);
1805 else
1806 BuildMI(MBB, MBBI, dl, LoadInst, ScratchReg)
1807 .addImm(FPOffset)
1808 .addReg(RBReg);
1809 }
1810
1811 if (FI->usesPICBase())
1812 BuildMI(MBB, MBBI, dl, LoadInst, PPC::R30)
1813 .addImm(PBPOffset)
1814 .addReg(RBReg);
1815
1816 if (HasBP)
1817 BuildMI(MBB, MBBI, dl, LoadInst, BPReg)
1818 .addImm(BPOffset)
1819 .addReg(RBReg);
1820
1821 // There is nothing more to be loaded from the stack, so now we can
1822 // restore SP: SP = RBReg + SPAdd.
1823 if (RBReg != SPReg || SPAdd != 0) {
1824 assert(!HasRedZone && "This should not happen with red zone");
1825 // If SPAdd is 0, generate a copy.
1826 if (SPAdd == 0)
1827 BuildMI(MBB, MBBI, dl, OrInst, SPReg)
1828 .addReg(RBReg)
1829 .addReg(RBReg);
1830 else
1831 BuildMI(MBB, MBBI, dl, AddImmInst, SPReg)
1832 .addReg(RBReg)
1833 .addImm(SPAdd);
1834
1835 assert(RBReg != ScratchReg && "Should be using FP or SP as base register");
1836 if (RBReg == FPReg)
1837 BuildMI(MBB, MBBI, dl, OrInst, FPReg)
1838 .addReg(ScratchReg)
1839 .addReg(ScratchReg);
1840
1841 // Now load the LR from the caller's stack frame.
1842 if (MustSaveLR && !LoadedLR)
1843 BuildMI(MBB, MBBI, dl, LoadInst, ScratchReg)
1844 .addImm(LROffset)
1845 .addReg(SPReg);
1846 }
1847
1848 if (MustSaveCR &&
1849 !(SingleScratchReg && MustSaveLR))
1850 for (unsigned i = 0, e = MustSaveCRs.size(); i != e; ++i)
1851 BuildMI(MBB, MBBI, dl, MoveToCRInst, MustSaveCRs[i])
1852 .addReg(TempReg, getKillRegState(i == e-1));
1853
1854 if (MustSaveLR) {
1855 // If ROP protection is required, an extra instruction is added to compute a
1856 // hash and then compare it to the hash stored in the prologue.
1857 if (HasROPProtect) {
1858 const int SaveIndex = FI->getROPProtectionHashSaveIndex();
1859 const int ImmOffset = MFI.getObjectOffset(SaveIndex);
1860 assert((ImmOffset <= -8 && ImmOffset >= -512) &&
1861 "ROP hash check location offset out of range.");
1862 assert(((ImmOffset & 0x7) == 0) &&
1863 "ROP hash check location offset must be 8 byte aligned.");
1864 BuildMI(MBB, StackUpdateLoc, dl, HashChk)
1865 .addReg(ScratchReg)
1866 .addImm(ImmOffset)
1867 .addReg(SPReg);
1868 }
1869 BuildMI(MBB, StackUpdateLoc, dl, MTLRInst).addReg(ScratchReg);
1870 }
1871
1872 // Callee pop calling convention. Pop parameter/linkage area. Used for tail
1873 // call optimization
1874 if (IsReturnBlock) {
1875 unsigned RetOpcode = MBBI->getOpcode();
1876 if (MF.getTarget().Options.GuaranteedTailCallOpt &&
1877 (RetOpcode == PPC::BLR || RetOpcode == PPC::BLR8) &&
1878 MF.getFunction().getCallingConv() == CallingConv::Fast) {
1879 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
1880 unsigned CallerAllocatedAmt = FI->getMinReservedArea();
1881
1882 if (CallerAllocatedAmt && isInt<16>(CallerAllocatedAmt)) {
1883 BuildMI(MBB, MBBI, dl, AddImmInst, SPReg)
1884 .addReg(SPReg).addImm(CallerAllocatedAmt);
1885 } else {
1886 BuildMI(MBB, MBBI, dl, LoadImmShiftedInst, ScratchReg)
1887 .addImm(CallerAllocatedAmt >> 16);
1888 BuildMI(MBB, MBBI, dl, OrImmInst, ScratchReg)
1889 .addReg(ScratchReg, RegState::Kill)
1890 .addImm(CallerAllocatedAmt & 0xFFFF);
1891 BuildMI(MBB, MBBI, dl, AddInst)
1892 .addReg(SPReg)
1893 .addReg(FPReg)
1894 .addReg(ScratchReg);
1895 }
1896 } else {
1897 createTailCallBranchInstr(MBB);
1898 }
1899 }
1900 }
1901
createTailCallBranchInstr(MachineBasicBlock & MBB) const1902 void PPCFrameLowering::createTailCallBranchInstr(MachineBasicBlock &MBB) const {
1903 MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator();
1904
1905 // If we got this far a first terminator should exist.
1906 assert(MBBI != MBB.end() && "Failed to find the first terminator.");
1907
1908 DebugLoc dl = MBBI->getDebugLoc();
1909 const PPCInstrInfo &TII = *Subtarget.getInstrInfo();
1910
1911 // Create branch instruction for pseudo tail call return instruction.
1912 // The TCRETURNdi variants are direct calls. Valid targets for those are
1913 // MO_GlobalAddress operands as well as MO_ExternalSymbol with PC-Rel
1914 // since we can tail call external functions with PC-Rel (i.e. we don't need
1915 // to worry about different TOC pointers). Some of the external functions will
1916 // be MO_GlobalAddress while others like memcpy for example, are going to
1917 // be MO_ExternalSymbol.
1918 unsigned RetOpcode = MBBI->getOpcode();
1919 if (RetOpcode == PPC::TCRETURNdi) {
1920 MBBI = MBB.getLastNonDebugInstr();
1921 MachineOperand &JumpTarget = MBBI->getOperand(0);
1922 if (JumpTarget.isGlobal())
1923 BuildMI(MBB, MBBI, dl, TII.get(PPC::TAILB)).
1924 addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset());
1925 else if (JumpTarget.isSymbol())
1926 BuildMI(MBB, MBBI, dl, TII.get(PPC::TAILB)).
1927 addExternalSymbol(JumpTarget.getSymbolName());
1928 else
1929 llvm_unreachable("Expecting Global or External Symbol");
1930 } else if (RetOpcode == PPC::TCRETURNri) {
1931 MBBI = MBB.getLastNonDebugInstr();
1932 assert(MBBI->getOperand(0).isReg() && "Expecting register operand.");
1933 BuildMI(MBB, MBBI, dl, TII.get(PPC::TAILBCTR));
1934 } else if (RetOpcode == PPC::TCRETURNai) {
1935 MBBI = MBB.getLastNonDebugInstr();
1936 MachineOperand &JumpTarget = MBBI->getOperand(0);
1937 BuildMI(MBB, MBBI, dl, TII.get(PPC::TAILBA)).addImm(JumpTarget.getImm());
1938 } else if (RetOpcode == PPC::TCRETURNdi8) {
1939 MBBI = MBB.getLastNonDebugInstr();
1940 MachineOperand &JumpTarget = MBBI->getOperand(0);
1941 if (JumpTarget.isGlobal())
1942 BuildMI(MBB, MBBI, dl, TII.get(PPC::TAILB8)).
1943 addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset());
1944 else if (JumpTarget.isSymbol())
1945 BuildMI(MBB, MBBI, dl, TII.get(PPC::TAILB8)).
1946 addExternalSymbol(JumpTarget.getSymbolName());
1947 else
1948 llvm_unreachable("Expecting Global or External Symbol");
1949 } else if (RetOpcode == PPC::TCRETURNri8) {
1950 MBBI = MBB.getLastNonDebugInstr();
1951 assert(MBBI->getOperand(0).isReg() && "Expecting register operand.");
1952 BuildMI(MBB, MBBI, dl, TII.get(PPC::TAILBCTR8));
1953 } else if (RetOpcode == PPC::TCRETURNai8) {
1954 MBBI = MBB.getLastNonDebugInstr();
1955 MachineOperand &JumpTarget = MBBI->getOperand(0);
1956 BuildMI(MBB, MBBI, dl, TII.get(PPC::TAILBA8)).addImm(JumpTarget.getImm());
1957 }
1958 }
1959
determineCalleeSaves(MachineFunction & MF,BitVector & SavedRegs,RegScavenger * RS) const1960 void PPCFrameLowering::determineCalleeSaves(MachineFunction &MF,
1961 BitVector &SavedRegs,
1962 RegScavenger *RS) const {
1963 TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS);
1964
1965 const PPCRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
1966
1967 // Save and clear the LR state.
1968 PPCFunctionInfo *FI = MF.getInfo<PPCFunctionInfo>();
1969 unsigned LR = RegInfo->getRARegister();
1970 FI->setMustSaveLR(MustSaveLR(MF, LR));
1971 SavedRegs.reset(LR);
1972
1973 // Save R31 if necessary
1974 int FPSI = FI->getFramePointerSaveIndex();
1975 const bool isPPC64 = Subtarget.isPPC64();
1976 MachineFrameInfo &MFI = MF.getFrameInfo();
1977
1978 // If the frame pointer save index hasn't been defined yet.
1979 if (!FPSI && needsFP(MF)) {
1980 // Find out what the fix offset of the frame pointer save area.
1981 int FPOffset = getFramePointerSaveOffset();
1982 // Allocate the frame index for frame pointer save area.
1983 FPSI = MFI.CreateFixedObject(isPPC64? 8 : 4, FPOffset, true);
1984 // Save the result.
1985 FI->setFramePointerSaveIndex(FPSI);
1986 }
1987
1988 int BPSI = FI->getBasePointerSaveIndex();
1989 if (!BPSI && RegInfo->hasBasePointer(MF)) {
1990 int BPOffset = getBasePointerSaveOffset();
1991 // Allocate the frame index for the base pointer save area.
1992 BPSI = MFI.CreateFixedObject(isPPC64? 8 : 4, BPOffset, true);
1993 // Save the result.
1994 FI->setBasePointerSaveIndex(BPSI);
1995 }
1996
1997 // Reserve stack space for the PIC Base register (R30).
1998 // Only used in SVR4 32-bit.
1999 if (FI->usesPICBase()) {
2000 int PBPSI = MFI.CreateFixedObject(4, -8, true);
2001 FI->setPICBasePointerSaveIndex(PBPSI);
2002 }
2003
2004 // Make sure we don't explicitly spill r31, because, for example, we have
2005 // some inline asm which explicitly clobbers it, when we otherwise have a
2006 // frame pointer and are using r31's spill slot for the prologue/epilogue
2007 // code. Same goes for the base pointer and the PIC base register.
2008 if (needsFP(MF))
2009 SavedRegs.reset(isPPC64 ? PPC::X31 : PPC::R31);
2010 if (RegInfo->hasBasePointer(MF))
2011 SavedRegs.reset(RegInfo->getBaseRegister(MF));
2012 if (FI->usesPICBase())
2013 SavedRegs.reset(PPC::R30);
2014
2015 // Reserve stack space to move the linkage area to in case of a tail call.
2016 int TCSPDelta = 0;
2017 if (MF.getTarget().Options.GuaranteedTailCallOpt &&
2018 (TCSPDelta = FI->getTailCallSPDelta()) < 0) {
2019 MFI.CreateFixedObject(-1 * TCSPDelta, TCSPDelta, true);
2020 }
2021
2022 // Allocate the nonvolatile CR spill slot iff the function uses CR 2, 3, or 4.
2023 // For 64-bit SVR4, and all flavors of AIX we create a FixedStack
2024 // object at the offset of the CR-save slot in the linkage area. The actual
2025 // save and restore of the condition register will be created as part of the
2026 // prologue and epilogue insertion, but the FixedStack object is needed to
2027 // keep the CalleSavedInfo valid.
2028 if ((SavedRegs.test(PPC::CR2) || SavedRegs.test(PPC::CR3) ||
2029 SavedRegs.test(PPC::CR4))) {
2030 const uint64_t SpillSize = 4; // Condition register is always 4 bytes.
2031 const int64_t SpillOffset =
2032 Subtarget.isPPC64() ? 8 : Subtarget.isAIXABI() ? 4 : -4;
2033 int FrameIdx =
2034 MFI.CreateFixedObject(SpillSize, SpillOffset,
2035 /* IsImmutable */ true, /* IsAliased */ false);
2036 FI->setCRSpillFrameIndex(FrameIdx);
2037 }
2038 }
2039
processFunctionBeforeFrameFinalized(MachineFunction & MF,RegScavenger * RS) const2040 void PPCFrameLowering::processFunctionBeforeFrameFinalized(MachineFunction &MF,
2041 RegScavenger *RS) const {
2042 // Get callee saved register information.
2043 MachineFrameInfo &MFI = MF.getFrameInfo();
2044 const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
2045
2046 // If the function is shrink-wrapped, and if the function has a tail call, the
2047 // tail call might not be in the new RestoreBlock, so real branch instruction
2048 // won't be generated by emitEpilogue(), because shrink-wrap has chosen new
2049 // RestoreBlock. So we handle this case here.
2050 if (MFI.getSavePoint() && MFI.hasTailCall()) {
2051 MachineBasicBlock *RestoreBlock = MFI.getRestorePoint();
2052 for (MachineBasicBlock &MBB : MF) {
2053 if (MBB.isReturnBlock() && (&MBB) != RestoreBlock)
2054 createTailCallBranchInstr(MBB);
2055 }
2056 }
2057
2058 // Early exit if no callee saved registers are modified!
2059 if (CSI.empty() && !needsFP(MF)) {
2060 addScavengingSpillSlot(MF, RS);
2061 return;
2062 }
2063
2064 unsigned MinGPR = PPC::R31;
2065 unsigned MinG8R = PPC::X31;
2066 unsigned MinFPR = PPC::F31;
2067 unsigned MinVR = Subtarget.hasSPE() ? PPC::S31 : PPC::V31;
2068
2069 bool HasGPSaveArea = false;
2070 bool HasG8SaveArea = false;
2071 bool HasFPSaveArea = false;
2072 bool HasVRSaveArea = false;
2073
2074 SmallVector<CalleeSavedInfo, 18> GPRegs;
2075 SmallVector<CalleeSavedInfo, 18> G8Regs;
2076 SmallVector<CalleeSavedInfo, 18> FPRegs;
2077 SmallVector<CalleeSavedInfo, 18> VRegs;
2078
2079 for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
2080 unsigned Reg = CSI[i].getReg();
2081 assert((!MF.getInfo<PPCFunctionInfo>()->mustSaveTOC() ||
2082 (Reg != PPC::X2 && Reg != PPC::R2)) &&
2083 "Not expecting to try to spill R2 in a function that must save TOC");
2084 if (PPC::GPRCRegClass.contains(Reg)) {
2085 HasGPSaveArea = true;
2086
2087 GPRegs.push_back(CSI[i]);
2088
2089 if (Reg < MinGPR) {
2090 MinGPR = Reg;
2091 }
2092 } else if (PPC::G8RCRegClass.contains(Reg)) {
2093 HasG8SaveArea = true;
2094
2095 G8Regs.push_back(CSI[i]);
2096
2097 if (Reg < MinG8R) {
2098 MinG8R = Reg;
2099 }
2100 } else if (PPC::F8RCRegClass.contains(Reg)) {
2101 HasFPSaveArea = true;
2102
2103 FPRegs.push_back(CSI[i]);
2104
2105 if (Reg < MinFPR) {
2106 MinFPR = Reg;
2107 }
2108 } else if (PPC::CRBITRCRegClass.contains(Reg) ||
2109 PPC::CRRCRegClass.contains(Reg)) {
2110 ; // do nothing, as we already know whether CRs are spilled
2111 } else if (PPC::VRRCRegClass.contains(Reg) ||
2112 PPC::SPERCRegClass.contains(Reg)) {
2113 // Altivec and SPE are mutually exclusive, but have the same stack
2114 // alignment requirements, so overload the save area for both cases.
2115 HasVRSaveArea = true;
2116
2117 VRegs.push_back(CSI[i]);
2118
2119 if (Reg < MinVR) {
2120 MinVR = Reg;
2121 }
2122 } else {
2123 llvm_unreachable("Unknown RegisterClass!");
2124 }
2125 }
2126
2127 PPCFunctionInfo *PFI = MF.getInfo<PPCFunctionInfo>();
2128 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
2129
2130 int64_t LowerBound = 0;
2131
2132 // Take into account stack space reserved for tail calls.
2133 int TCSPDelta = 0;
2134 if (MF.getTarget().Options.GuaranteedTailCallOpt &&
2135 (TCSPDelta = PFI->getTailCallSPDelta()) < 0) {
2136 LowerBound = TCSPDelta;
2137 }
2138
2139 // The Floating-point register save area is right below the back chain word
2140 // of the previous stack frame.
2141 if (HasFPSaveArea) {
2142 for (unsigned i = 0, e = FPRegs.size(); i != e; ++i) {
2143 int FI = FPRegs[i].getFrameIdx();
2144
2145 MFI.setObjectOffset(FI, LowerBound + MFI.getObjectOffset(FI));
2146 }
2147
2148 LowerBound -= (31 - TRI->getEncodingValue(MinFPR) + 1) * 8;
2149 }
2150
2151 // Check whether the frame pointer register is allocated. If so, make sure it
2152 // is spilled to the correct offset.
2153 if (needsFP(MF)) {
2154 int FI = PFI->getFramePointerSaveIndex();
2155 assert(FI && "No Frame Pointer Save Slot!");
2156 MFI.setObjectOffset(FI, LowerBound + MFI.getObjectOffset(FI));
2157 // FP is R31/X31, so no need to update MinGPR/MinG8R.
2158 HasGPSaveArea = true;
2159 }
2160
2161 if (PFI->usesPICBase()) {
2162 int FI = PFI->getPICBasePointerSaveIndex();
2163 assert(FI && "No PIC Base Pointer Save Slot!");
2164 MFI.setObjectOffset(FI, LowerBound + MFI.getObjectOffset(FI));
2165
2166 MinGPR = std::min<unsigned>(MinGPR, PPC::R30);
2167 HasGPSaveArea = true;
2168 }
2169
2170 const PPCRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
2171 if (RegInfo->hasBasePointer(MF)) {
2172 int FI = PFI->getBasePointerSaveIndex();
2173 assert(FI && "No Base Pointer Save Slot!");
2174 MFI.setObjectOffset(FI, LowerBound + MFI.getObjectOffset(FI));
2175
2176 Register BP = RegInfo->getBaseRegister(MF);
2177 if (PPC::G8RCRegClass.contains(BP)) {
2178 MinG8R = std::min<unsigned>(MinG8R, BP);
2179 HasG8SaveArea = true;
2180 } else if (PPC::GPRCRegClass.contains(BP)) {
2181 MinGPR = std::min<unsigned>(MinGPR, BP);
2182 HasGPSaveArea = true;
2183 }
2184 }
2185
2186 // General register save area starts right below the Floating-point
2187 // register save area.
2188 if (HasGPSaveArea || HasG8SaveArea) {
2189 // Move general register save area spill slots down, taking into account
2190 // the size of the Floating-point register save area.
2191 for (unsigned i = 0, e = GPRegs.size(); i != e; ++i) {
2192 if (!GPRegs[i].isSpilledToReg()) {
2193 int FI = GPRegs[i].getFrameIdx();
2194 MFI.setObjectOffset(FI, LowerBound + MFI.getObjectOffset(FI));
2195 }
2196 }
2197
2198 // Move general register save area spill slots down, taking into account
2199 // the size of the Floating-point register save area.
2200 for (unsigned i = 0, e = G8Regs.size(); i != e; ++i) {
2201 if (!G8Regs[i].isSpilledToReg()) {
2202 int FI = G8Regs[i].getFrameIdx();
2203 MFI.setObjectOffset(FI, LowerBound + MFI.getObjectOffset(FI));
2204 }
2205 }
2206
2207 unsigned MinReg =
2208 std::min<unsigned>(TRI->getEncodingValue(MinGPR),
2209 TRI->getEncodingValue(MinG8R));
2210
2211 const unsigned GPRegSize = Subtarget.isPPC64() ? 8 : 4;
2212 LowerBound -= (31 - MinReg + 1) * GPRegSize;
2213 }
2214
2215 // For 32-bit only, the CR save area is below the general register
2216 // save area. For 64-bit SVR4, the CR save area is addressed relative
2217 // to the stack pointer and hence does not need an adjustment here.
2218 // Only CR2 (the first nonvolatile spilled) has an associated frame
2219 // index so that we have a single uniform save area.
2220 if (spillsCR(MF) && Subtarget.is32BitELFABI()) {
2221 // Adjust the frame index of the CR spill slot.
2222 for (const auto &CSInfo : CSI) {
2223 if (CSInfo.getReg() == PPC::CR2) {
2224 int FI = CSInfo.getFrameIdx();
2225 MFI.setObjectOffset(FI, LowerBound + MFI.getObjectOffset(FI));
2226 break;
2227 }
2228 }
2229
2230 LowerBound -= 4; // The CR save area is always 4 bytes long.
2231 }
2232
2233 // Both Altivec and SPE have the same alignment and padding requirements
2234 // within the stack frame.
2235 if (HasVRSaveArea) {
2236 // Insert alignment padding, we need 16-byte alignment. Note: for positive
2237 // number the alignment formula is : y = (x + (n-1)) & (~(n-1)). But since
2238 // we are using negative number here (the stack grows downward). We should
2239 // use formula : y = x & (~(n-1)). Where x is the size before aligning, n
2240 // is the alignment size ( n = 16 here) and y is the size after aligning.
2241 assert(LowerBound <= 0 && "Expect LowerBound have a non-positive value!");
2242 LowerBound &= ~(15);
2243
2244 for (unsigned i = 0, e = VRegs.size(); i != e; ++i) {
2245 int FI = VRegs[i].getFrameIdx();
2246
2247 MFI.setObjectOffset(FI, LowerBound + MFI.getObjectOffset(FI));
2248 }
2249 }
2250
2251 addScavengingSpillSlot(MF, RS);
2252 }
2253
2254 void
addScavengingSpillSlot(MachineFunction & MF,RegScavenger * RS) const2255 PPCFrameLowering::addScavengingSpillSlot(MachineFunction &MF,
2256 RegScavenger *RS) const {
2257 // Reserve a slot closest to SP or frame pointer if we have a dynalloc or
2258 // a large stack, which will require scavenging a register to materialize a
2259 // large offset.
2260
2261 // We need to have a scavenger spill slot for spills if the frame size is
2262 // large. In case there is no free register for large-offset addressing,
2263 // this slot is used for the necessary emergency spill. Also, we need the
2264 // slot for dynamic stack allocations.
2265
2266 // The scavenger might be invoked if the frame offset does not fit into
2267 // the 16-bit immediate. We don't know the complete frame size here
2268 // because we've not yet computed callee-saved register spills or the
2269 // needed alignment padding.
2270 unsigned StackSize = determineFrameLayout(MF, true);
2271 MachineFrameInfo &MFI = MF.getFrameInfo();
2272 if (MFI.hasVarSizedObjects() || spillsCR(MF) || hasNonRISpills(MF) ||
2273 (hasSpills(MF) && !isInt<16>(StackSize))) {
2274 const TargetRegisterClass &GPRC = PPC::GPRCRegClass;
2275 const TargetRegisterClass &G8RC = PPC::G8RCRegClass;
2276 const TargetRegisterClass &RC = Subtarget.isPPC64() ? G8RC : GPRC;
2277 const TargetRegisterInfo &TRI = *Subtarget.getRegisterInfo();
2278 unsigned Size = TRI.getSpillSize(RC);
2279 Align Alignment = TRI.getSpillAlign(RC);
2280 RS->addScavengingFrameIndex(MFI.CreateStackObject(Size, Alignment, false));
2281
2282 // Might we have over-aligned allocas?
2283 bool HasAlVars =
2284 MFI.hasVarSizedObjects() && MFI.getMaxAlign() > getStackAlign();
2285
2286 // These kinds of spills might need two registers.
2287 if (spillsCR(MF) || HasAlVars)
2288 RS->addScavengingFrameIndex(
2289 MFI.CreateStackObject(Size, Alignment, false));
2290 }
2291 }
2292
2293 // This function checks if a callee saved gpr can be spilled to a volatile
2294 // vector register. This occurs for leaf functions when the option
2295 // ppc-enable-pe-vector-spills is enabled. If there are any remaining registers
2296 // which were not spilled to vectors, return false so the target independent
2297 // code can handle them by assigning a FrameIdx to a stack slot.
assignCalleeSavedSpillSlots(MachineFunction & MF,const TargetRegisterInfo * TRI,std::vector<CalleeSavedInfo> & CSI) const2298 bool PPCFrameLowering::assignCalleeSavedSpillSlots(
2299 MachineFunction &MF, const TargetRegisterInfo *TRI,
2300 std::vector<CalleeSavedInfo> &CSI) const {
2301
2302 if (CSI.empty())
2303 return true; // Early exit if no callee saved registers are modified!
2304
2305 // Early exit if cannot spill gprs to volatile vector registers.
2306 MachineFrameInfo &MFI = MF.getFrameInfo();
2307 if (!EnablePEVectorSpills || MFI.hasCalls() || !Subtarget.hasP9Vector())
2308 return false;
2309
2310 // Build a BitVector of VSRs that can be used for spilling GPRs.
2311 BitVector BVAllocatable = TRI->getAllocatableSet(MF);
2312 BitVector BVCalleeSaved(TRI->getNumRegs());
2313 const PPCRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
2314 const MCPhysReg *CSRegs = RegInfo->getCalleeSavedRegs(&MF);
2315 for (unsigned i = 0; CSRegs[i]; ++i)
2316 BVCalleeSaved.set(CSRegs[i]);
2317
2318 for (unsigned Reg : BVAllocatable.set_bits()) {
2319 // Set to 0 if the register is not a volatile VSX register, or if it is
2320 // used in the function.
2321 if (BVCalleeSaved[Reg] || !PPC::VSRCRegClass.contains(Reg) ||
2322 MF.getRegInfo().isPhysRegUsed(Reg))
2323 BVAllocatable.reset(Reg);
2324 }
2325
2326 bool AllSpilledToReg = true;
2327 unsigned LastVSRUsedForSpill = 0;
2328 for (auto &CS : CSI) {
2329 if (BVAllocatable.none())
2330 return false;
2331
2332 unsigned Reg = CS.getReg();
2333
2334 if (!PPC::G8RCRegClass.contains(Reg)) {
2335 AllSpilledToReg = false;
2336 continue;
2337 }
2338
2339 // For P9, we can reuse LastVSRUsedForSpill to spill two GPRs
2340 // into one VSR using the mtvsrdd instruction.
2341 if (LastVSRUsedForSpill != 0) {
2342 CS.setDstReg(LastVSRUsedForSpill);
2343 BVAllocatable.reset(LastVSRUsedForSpill);
2344 LastVSRUsedForSpill = 0;
2345 continue;
2346 }
2347
2348 unsigned VolatileVFReg = BVAllocatable.find_first();
2349 if (VolatileVFReg < BVAllocatable.size()) {
2350 CS.setDstReg(VolatileVFReg);
2351 LastVSRUsedForSpill = VolatileVFReg;
2352 } else {
2353 AllSpilledToReg = false;
2354 }
2355 }
2356 return AllSpilledToReg;
2357 }
2358
spillCalleeSavedRegisters(MachineBasicBlock & MBB,MachineBasicBlock::iterator MI,ArrayRef<CalleeSavedInfo> CSI,const TargetRegisterInfo * TRI) const2359 bool PPCFrameLowering::spillCalleeSavedRegisters(
2360 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
2361 ArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const {
2362
2363 MachineFunction *MF = MBB.getParent();
2364 const PPCInstrInfo &TII = *Subtarget.getInstrInfo();
2365 PPCFunctionInfo *FI = MF->getInfo<PPCFunctionInfo>();
2366 bool MustSaveTOC = FI->mustSaveTOC();
2367 DebugLoc DL;
2368 bool CRSpilled = false;
2369 MachineInstrBuilder CRMIB;
2370 BitVector Spilled(TRI->getNumRegs());
2371
2372 VSRContainingGPRs.clear();
2373
2374 // Map each VSR to GPRs to be spilled with into it. Single VSR can contain one
2375 // or two GPRs, so we need table to record information for later save/restore.
2376 llvm::for_each(CSI, [&](const CalleeSavedInfo &Info) {
2377 if (Info.isSpilledToReg()) {
2378 auto &SpilledVSR =
2379 VSRContainingGPRs.FindAndConstruct(Info.getDstReg()).second;
2380 assert(SpilledVSR.second == 0 &&
2381 "Can't spill more than two GPRs into VSR!");
2382 if (SpilledVSR.first == 0)
2383 SpilledVSR.first = Info.getReg();
2384 else
2385 SpilledVSR.second = Info.getReg();
2386 }
2387 });
2388
2389 for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
2390 unsigned Reg = CSI[i].getReg();
2391
2392 // CR2 through CR4 are the nonvolatile CR fields.
2393 bool IsCRField = PPC::CR2 <= Reg && Reg <= PPC::CR4;
2394
2395 // Add the callee-saved register as live-in; it's killed at the spill.
2396 // Do not do this for callee-saved registers that are live-in to the
2397 // function because they will already be marked live-in and this will be
2398 // adding it for a second time. It is an error to add the same register
2399 // to the set more than once.
2400 const MachineRegisterInfo &MRI = MF->getRegInfo();
2401 bool IsLiveIn = MRI.isLiveIn(Reg);
2402 if (!IsLiveIn)
2403 MBB.addLiveIn(Reg);
2404
2405 if (CRSpilled && IsCRField) {
2406 CRMIB.addReg(Reg, RegState::ImplicitKill);
2407 continue;
2408 }
2409
2410 // The actual spill will happen in the prologue.
2411 if ((Reg == PPC::X2 || Reg == PPC::R2) && MustSaveTOC)
2412 continue;
2413
2414 // Insert the spill to the stack frame.
2415 if (IsCRField) {
2416 PPCFunctionInfo *FuncInfo = MF->getInfo<PPCFunctionInfo>();
2417 if (!Subtarget.is32BitELFABI()) {
2418 // The actual spill will happen at the start of the prologue.
2419 FuncInfo->addMustSaveCR(Reg);
2420 } else {
2421 CRSpilled = true;
2422 FuncInfo->setSpillsCR();
2423
2424 // 32-bit: FP-relative. Note that we made sure CR2-CR4 all have
2425 // the same frame index in PPCRegisterInfo::hasReservedSpillSlot.
2426 CRMIB = BuildMI(*MF, DL, TII.get(PPC::MFCR), PPC::R12)
2427 .addReg(Reg, RegState::ImplicitKill);
2428
2429 MBB.insert(MI, CRMIB);
2430 MBB.insert(MI, addFrameReference(BuildMI(*MF, DL, TII.get(PPC::STW))
2431 .addReg(PPC::R12,
2432 getKillRegState(true)),
2433 CSI[i].getFrameIdx()));
2434 }
2435 } else {
2436 if (CSI[i].isSpilledToReg()) {
2437 unsigned Dst = CSI[i].getDstReg();
2438
2439 if (Spilled[Dst])
2440 continue;
2441
2442 if (VSRContainingGPRs[Dst].second != 0) {
2443 assert(Subtarget.hasP9Vector() &&
2444 "mtvsrdd is unavailable on pre-P9 targets.");
2445
2446 NumPESpillVSR += 2;
2447 BuildMI(MBB, MI, DL, TII.get(PPC::MTVSRDD), Dst)
2448 .addReg(VSRContainingGPRs[Dst].first, getKillRegState(true))
2449 .addReg(VSRContainingGPRs[Dst].second, getKillRegState(true));
2450 } else if (VSRContainingGPRs[Dst].second == 0) {
2451 assert(Subtarget.hasP8Vector() &&
2452 "Can't move GPR to VSR on pre-P8 targets.");
2453
2454 ++NumPESpillVSR;
2455 BuildMI(MBB, MI, DL, TII.get(PPC::MTVSRD),
2456 TRI->getSubReg(Dst, PPC::sub_64))
2457 .addReg(VSRContainingGPRs[Dst].first, getKillRegState(true));
2458 } else {
2459 llvm_unreachable("More than two GPRs spilled to a VSR!");
2460 }
2461 Spilled.set(Dst);
2462 } else {
2463 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
2464 // Use !IsLiveIn for the kill flag.
2465 // We do not want to kill registers that are live in this function
2466 // before their use because they will become undefined registers.
2467 // Functions without NoUnwind need to preserve the order of elements in
2468 // saved vector registers.
2469 if (Subtarget.needsSwapsForVSXMemOps() &&
2470 !MF->getFunction().hasFnAttribute(Attribute::NoUnwind))
2471 TII.storeRegToStackSlotNoUpd(MBB, MI, Reg, !IsLiveIn,
2472 CSI[i].getFrameIdx(), RC, TRI);
2473 else
2474 TII.storeRegToStackSlot(MBB, MI, Reg, !IsLiveIn, CSI[i].getFrameIdx(),
2475 RC, TRI);
2476 }
2477 }
2478 }
2479 return true;
2480 }
2481
restoreCRs(bool is31,bool CR2Spilled,bool CR3Spilled,bool CR4Spilled,MachineBasicBlock & MBB,MachineBasicBlock::iterator MI,ArrayRef<CalleeSavedInfo> CSI,unsigned CSIIndex)2482 static void restoreCRs(bool is31, bool CR2Spilled, bool CR3Spilled,
2483 bool CR4Spilled, MachineBasicBlock &MBB,
2484 MachineBasicBlock::iterator MI,
2485 ArrayRef<CalleeSavedInfo> CSI, unsigned CSIIndex) {
2486
2487 MachineFunction *MF = MBB.getParent();
2488 const PPCInstrInfo &TII = *MF->getSubtarget<PPCSubtarget>().getInstrInfo();
2489 DebugLoc DL;
2490 unsigned MoveReg = PPC::R12;
2491
2492 // 32-bit: FP-relative
2493 MBB.insert(MI,
2494 addFrameReference(BuildMI(*MF, DL, TII.get(PPC::LWZ), MoveReg),
2495 CSI[CSIIndex].getFrameIdx()));
2496
2497 unsigned RestoreOp = PPC::MTOCRF;
2498 if (CR2Spilled)
2499 MBB.insert(MI, BuildMI(*MF, DL, TII.get(RestoreOp), PPC::CR2)
2500 .addReg(MoveReg, getKillRegState(!CR3Spilled && !CR4Spilled)));
2501
2502 if (CR3Spilled)
2503 MBB.insert(MI, BuildMI(*MF, DL, TII.get(RestoreOp), PPC::CR3)
2504 .addReg(MoveReg, getKillRegState(!CR4Spilled)));
2505
2506 if (CR4Spilled)
2507 MBB.insert(MI, BuildMI(*MF, DL, TII.get(RestoreOp), PPC::CR4)
2508 .addReg(MoveReg, getKillRegState(true)));
2509 }
2510
2511 MachineBasicBlock::iterator PPCFrameLowering::
eliminateCallFramePseudoInstr(MachineFunction & MF,MachineBasicBlock & MBB,MachineBasicBlock::iterator I) const2512 eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
2513 MachineBasicBlock::iterator I) const {
2514 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
2515 if (MF.getTarget().Options.GuaranteedTailCallOpt &&
2516 I->getOpcode() == PPC::ADJCALLSTACKUP) {
2517 // Add (actually subtract) back the amount the callee popped on return.
2518 if (int CalleeAmt = I->getOperand(1).getImm()) {
2519 bool is64Bit = Subtarget.isPPC64();
2520 CalleeAmt *= -1;
2521 unsigned StackReg = is64Bit ? PPC::X1 : PPC::R1;
2522 unsigned TmpReg = is64Bit ? PPC::X0 : PPC::R0;
2523 unsigned ADDIInstr = is64Bit ? PPC::ADDI8 : PPC::ADDI;
2524 unsigned ADDInstr = is64Bit ? PPC::ADD8 : PPC::ADD4;
2525 unsigned LISInstr = is64Bit ? PPC::LIS8 : PPC::LIS;
2526 unsigned ORIInstr = is64Bit ? PPC::ORI8 : PPC::ORI;
2527 const DebugLoc &dl = I->getDebugLoc();
2528
2529 if (isInt<16>(CalleeAmt)) {
2530 BuildMI(MBB, I, dl, TII.get(ADDIInstr), StackReg)
2531 .addReg(StackReg, RegState::Kill)
2532 .addImm(CalleeAmt);
2533 } else {
2534 MachineBasicBlock::iterator MBBI = I;
2535 BuildMI(MBB, MBBI, dl, TII.get(LISInstr), TmpReg)
2536 .addImm(CalleeAmt >> 16);
2537 BuildMI(MBB, MBBI, dl, TII.get(ORIInstr), TmpReg)
2538 .addReg(TmpReg, RegState::Kill)
2539 .addImm(CalleeAmt & 0xFFFF);
2540 BuildMI(MBB, MBBI, dl, TII.get(ADDInstr), StackReg)
2541 .addReg(StackReg, RegState::Kill)
2542 .addReg(TmpReg);
2543 }
2544 }
2545 }
2546 // Simply discard ADJCALLSTACKDOWN, ADJCALLSTACKUP instructions.
2547 return MBB.erase(I);
2548 }
2549
isCalleeSavedCR(unsigned Reg)2550 static bool isCalleeSavedCR(unsigned Reg) {
2551 return PPC::CR2 == Reg || Reg == PPC::CR3 || Reg == PPC::CR4;
2552 }
2553
restoreCalleeSavedRegisters(MachineBasicBlock & MBB,MachineBasicBlock::iterator MI,MutableArrayRef<CalleeSavedInfo> CSI,const TargetRegisterInfo * TRI) const2554 bool PPCFrameLowering::restoreCalleeSavedRegisters(
2555 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
2556 MutableArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const {
2557 MachineFunction *MF = MBB.getParent();
2558 const PPCInstrInfo &TII = *Subtarget.getInstrInfo();
2559 PPCFunctionInfo *FI = MF->getInfo<PPCFunctionInfo>();
2560 bool MustSaveTOC = FI->mustSaveTOC();
2561 bool CR2Spilled = false;
2562 bool CR3Spilled = false;
2563 bool CR4Spilled = false;
2564 unsigned CSIIndex = 0;
2565 BitVector Restored(TRI->getNumRegs());
2566
2567 // Initialize insertion-point logic; we will be restoring in reverse
2568 // order of spill.
2569 MachineBasicBlock::iterator I = MI, BeforeI = I;
2570 bool AtStart = I == MBB.begin();
2571
2572 if (!AtStart)
2573 --BeforeI;
2574
2575 for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
2576 unsigned Reg = CSI[i].getReg();
2577
2578 if ((Reg == PPC::X2 || Reg == PPC::R2) && MustSaveTOC)
2579 continue;
2580
2581 // Restore of callee saved condition register field is handled during
2582 // epilogue insertion.
2583 if (isCalleeSavedCR(Reg) && !Subtarget.is32BitELFABI())
2584 continue;
2585
2586 if (Reg == PPC::CR2) {
2587 CR2Spilled = true;
2588 // The spill slot is associated only with CR2, which is the
2589 // first nonvolatile spilled. Save it here.
2590 CSIIndex = i;
2591 continue;
2592 } else if (Reg == PPC::CR3) {
2593 CR3Spilled = true;
2594 continue;
2595 } else if (Reg == PPC::CR4) {
2596 CR4Spilled = true;
2597 continue;
2598 } else {
2599 // On 32-bit ELF when we first encounter a non-CR register after seeing at
2600 // least one CR register, restore all spilled CRs together.
2601 if (CR2Spilled || CR3Spilled || CR4Spilled) {
2602 bool is31 = needsFP(*MF);
2603 restoreCRs(is31, CR2Spilled, CR3Spilled, CR4Spilled, MBB, I, CSI,
2604 CSIIndex);
2605 CR2Spilled = CR3Spilled = CR4Spilled = false;
2606 }
2607
2608 if (CSI[i].isSpilledToReg()) {
2609 DebugLoc DL;
2610 unsigned Dst = CSI[i].getDstReg();
2611
2612 if (Restored[Dst])
2613 continue;
2614
2615 if (VSRContainingGPRs[Dst].second != 0) {
2616 assert(Subtarget.hasP9Vector());
2617 NumPEReloadVSR += 2;
2618 BuildMI(MBB, I, DL, TII.get(PPC::MFVSRLD),
2619 VSRContainingGPRs[Dst].second)
2620 .addReg(Dst);
2621 BuildMI(MBB, I, DL, TII.get(PPC::MFVSRD),
2622 VSRContainingGPRs[Dst].first)
2623 .addReg(TRI->getSubReg(Dst, PPC::sub_64), getKillRegState(true));
2624 } else if (VSRContainingGPRs[Dst].second == 0) {
2625 assert(Subtarget.hasP8Vector());
2626 ++NumPEReloadVSR;
2627 BuildMI(MBB, I, DL, TII.get(PPC::MFVSRD),
2628 VSRContainingGPRs[Dst].first)
2629 .addReg(TRI->getSubReg(Dst, PPC::sub_64), getKillRegState(true));
2630 } else {
2631 llvm_unreachable("More than two GPRs spilled to a VSR!");
2632 }
2633
2634 Restored.set(Dst);
2635
2636 } else {
2637 // Default behavior for non-CR saves.
2638 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
2639
2640 // Functions without NoUnwind need to preserve the order of elements in
2641 // saved vector registers.
2642 if (Subtarget.needsSwapsForVSXMemOps() &&
2643 !MF->getFunction().hasFnAttribute(Attribute::NoUnwind))
2644 TII.loadRegFromStackSlotNoUpd(MBB, I, Reg, CSI[i].getFrameIdx(), RC,
2645 TRI);
2646 else
2647 TII.loadRegFromStackSlot(MBB, I, Reg, CSI[i].getFrameIdx(), RC, TRI);
2648
2649 assert(I != MBB.begin() &&
2650 "loadRegFromStackSlot didn't insert any code!");
2651 }
2652 }
2653
2654 // Insert in reverse order.
2655 if (AtStart)
2656 I = MBB.begin();
2657 else {
2658 I = BeforeI;
2659 ++I;
2660 }
2661 }
2662
2663 // If we haven't yet spilled the CRs, do so now.
2664 if (CR2Spilled || CR3Spilled || CR4Spilled) {
2665 assert(Subtarget.is32BitELFABI() &&
2666 "Only set CR[2|3|4]Spilled on 32-bit SVR4.");
2667 bool is31 = needsFP(*MF);
2668 restoreCRs(is31, CR2Spilled, CR3Spilled, CR4Spilled, MBB, I, CSI, CSIIndex);
2669 }
2670
2671 return true;
2672 }
2673
getTOCSaveOffset() const2674 unsigned PPCFrameLowering::getTOCSaveOffset() const {
2675 return TOCSaveOffset;
2676 }
2677
getFramePointerSaveOffset() const2678 unsigned PPCFrameLowering::getFramePointerSaveOffset() const {
2679 return FramePointerSaveOffset;
2680 }
2681
getBasePointerSaveOffset() const2682 unsigned PPCFrameLowering::getBasePointerSaveOffset() const {
2683 return BasePointerSaveOffset;
2684 }
2685
enableShrinkWrapping(const MachineFunction & MF) const2686 bool PPCFrameLowering::enableShrinkWrapping(const MachineFunction &MF) const {
2687 if (MF.getInfo<PPCFunctionInfo>()->shrinkWrapDisabled())
2688 return false;
2689 return !MF.getSubtarget<PPCSubtarget>().is32BitELFABI();
2690 }
2691