1 //===-- X86FrameLowering.cpp - X86 Frame Information ----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the X86 implementation of TargetFrameLowering class.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "X86FrameLowering.h"
14 #include "X86InstrBuilder.h"
15 #include "X86InstrInfo.h"
16 #include "X86MachineFunctionInfo.h"
17 #include "X86Subtarget.h"
18 #include "X86TargetMachine.h"
19 #include "llvm/ADT/SmallSet.h"
20 #include "llvm/ADT/Statistic.h"
21 #include "llvm/Analysis/EHPersonalities.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/CodeGen/MachineFunction.h"
24 #include "llvm/CodeGen/MachineInstrBuilder.h"
25 #include "llvm/CodeGen/MachineModuleInfo.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/WinEHFuncInfo.h"
28 #include "llvm/IR/DataLayout.h"
29 #include "llvm/IR/Function.h"
30 #include "llvm/MC/MCAsmInfo.h"
31 #include "llvm/MC/MCSymbol.h"
32 #include "llvm/Support/Debug.h"
33 #include "llvm/Target/TargetOptions.h"
34 #include <cstdlib>
35
36 #define DEBUG_TYPE "x86-fl"
37
38 STATISTIC(NumFrameLoopProbe, "Number of loop stack probes used in prologue");
39 STATISTIC(NumFrameExtraProbe,
40 "Number of extra stack probes generated in prologue");
41
42 using namespace llvm;
43
X86FrameLowering(const X86Subtarget & STI,MaybeAlign StackAlignOverride)44 X86FrameLowering::X86FrameLowering(const X86Subtarget &STI,
45 MaybeAlign StackAlignOverride)
46 : TargetFrameLowering(StackGrowsDown, StackAlignOverride.valueOrOne(),
47 STI.is64Bit() ? -8 : -4),
48 STI(STI), TII(*STI.getInstrInfo()), TRI(STI.getRegisterInfo()) {
49 // Cache a bunch of frame-related predicates for this subtarget.
50 SlotSize = TRI->getSlotSize();
51 Is64Bit = STI.is64Bit();
52 IsLP64 = STI.isTarget64BitLP64();
53 // standard x86_64 and NaCl use 64-bit frame/stack pointers, x32 - 32-bit.
54 Uses64BitFramePtr = STI.isTarget64BitLP64() || STI.isTargetNaCl64();
55 StackPtr = TRI->getStackRegister();
56 }
57
hasReservedCallFrame(const MachineFunction & MF) const58 bool X86FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
59 return !MF.getFrameInfo().hasVarSizedObjects() &&
60 !MF.getInfo<X86MachineFunctionInfo>()->getHasPushSequences() &&
61 !MF.getInfo<X86MachineFunctionInfo>()->hasPreallocatedCall();
62 }
63
64 /// canSimplifyCallFramePseudos - If there is a reserved call frame, the
65 /// call frame pseudos can be simplified. Having a FP, as in the default
66 /// implementation, is not sufficient here since we can't always use it.
67 /// Use a more nuanced condition.
68 bool
canSimplifyCallFramePseudos(const MachineFunction & MF) const69 X86FrameLowering::canSimplifyCallFramePseudos(const MachineFunction &MF) const {
70 return hasReservedCallFrame(MF) ||
71 MF.getInfo<X86MachineFunctionInfo>()->hasPreallocatedCall() ||
72 (hasFP(MF) && !TRI->needsStackRealignment(MF)) ||
73 TRI->hasBasePointer(MF);
74 }
75
76 // needsFrameIndexResolution - Do we need to perform FI resolution for
77 // this function. Normally, this is required only when the function
78 // has any stack objects. However, FI resolution actually has another job,
79 // not apparent from the title - it resolves callframesetup/destroy
80 // that were not simplified earlier.
81 // So, this is required for x86 functions that have push sequences even
82 // when there are no stack objects.
83 bool
needsFrameIndexResolution(const MachineFunction & MF) const84 X86FrameLowering::needsFrameIndexResolution(const MachineFunction &MF) const {
85 return MF.getFrameInfo().hasStackObjects() ||
86 MF.getInfo<X86MachineFunctionInfo>()->getHasPushSequences();
87 }
88
89 /// hasFP - Return true if the specified function should have a dedicated frame
90 /// pointer register. This is true if the function has variable sized allocas
91 /// or if frame pointer elimination is disabled.
hasFP(const MachineFunction & MF) const92 bool X86FrameLowering::hasFP(const MachineFunction &MF) const {
93 const MachineFrameInfo &MFI = MF.getFrameInfo();
94 return (MF.getTarget().Options.DisableFramePointerElim(MF) ||
95 TRI->needsStackRealignment(MF) || MFI.hasVarSizedObjects() ||
96 MFI.isFrameAddressTaken() || MFI.hasOpaqueSPAdjustment() ||
97 MF.getInfo<X86MachineFunctionInfo>()->getForceFramePointer() ||
98 MF.getInfo<X86MachineFunctionInfo>()->hasPreallocatedCall() ||
99 MF.callsUnwindInit() || MF.hasEHFunclets() || MF.callsEHReturn() ||
100 MFI.hasStackMap() || MFI.hasPatchPoint() ||
101 MFI.hasCopyImplyingStackAdjustment());
102 }
103
getSUBriOpcode(bool IsLP64,int64_t Imm)104 static unsigned getSUBriOpcode(bool IsLP64, int64_t Imm) {
105 if (IsLP64) {
106 if (isInt<8>(Imm))
107 return X86::SUB64ri8;
108 return X86::SUB64ri32;
109 } else {
110 if (isInt<8>(Imm))
111 return X86::SUB32ri8;
112 return X86::SUB32ri;
113 }
114 }
115
getADDriOpcode(bool IsLP64,int64_t Imm)116 static unsigned getADDriOpcode(bool IsLP64, int64_t Imm) {
117 if (IsLP64) {
118 if (isInt<8>(Imm))
119 return X86::ADD64ri8;
120 return X86::ADD64ri32;
121 } else {
122 if (isInt<8>(Imm))
123 return X86::ADD32ri8;
124 return X86::ADD32ri;
125 }
126 }
127
getSUBrrOpcode(bool IsLP64)128 static unsigned getSUBrrOpcode(bool IsLP64) {
129 return IsLP64 ? X86::SUB64rr : X86::SUB32rr;
130 }
131
getADDrrOpcode(bool IsLP64)132 static unsigned getADDrrOpcode(bool IsLP64) {
133 return IsLP64 ? X86::ADD64rr : X86::ADD32rr;
134 }
135
getANDriOpcode(bool IsLP64,int64_t Imm)136 static unsigned getANDriOpcode(bool IsLP64, int64_t Imm) {
137 if (IsLP64) {
138 if (isInt<8>(Imm))
139 return X86::AND64ri8;
140 return X86::AND64ri32;
141 }
142 if (isInt<8>(Imm))
143 return X86::AND32ri8;
144 return X86::AND32ri;
145 }
146
getLEArOpcode(bool IsLP64)147 static unsigned getLEArOpcode(bool IsLP64) {
148 return IsLP64 ? X86::LEA64r : X86::LEA32r;
149 }
150
151 /// findDeadCallerSavedReg - Return a caller-saved register that isn't live
152 /// when it reaches the "return" instruction. We can then pop a stack object
153 /// to this register without worry about clobbering it.
findDeadCallerSavedReg(MachineBasicBlock & MBB,MachineBasicBlock::iterator & MBBI,const X86RegisterInfo * TRI,bool Is64Bit)154 static unsigned findDeadCallerSavedReg(MachineBasicBlock &MBB,
155 MachineBasicBlock::iterator &MBBI,
156 const X86RegisterInfo *TRI,
157 bool Is64Bit) {
158 const MachineFunction *MF = MBB.getParent();
159 if (MF->callsEHReturn())
160 return 0;
161
162 const TargetRegisterClass &AvailableRegs = *TRI->getGPRsForTailCall(*MF);
163
164 if (MBBI == MBB.end())
165 return 0;
166
167 switch (MBBI->getOpcode()) {
168 default: return 0;
169 case TargetOpcode::PATCHABLE_RET:
170 case X86::RET:
171 case X86::RETL:
172 case X86::RETQ:
173 case X86::RETIL:
174 case X86::RETIQ:
175 case X86::TCRETURNdi:
176 case X86::TCRETURNri:
177 case X86::TCRETURNmi:
178 case X86::TCRETURNdi64:
179 case X86::TCRETURNri64:
180 case X86::TCRETURNmi64:
181 case X86::EH_RETURN:
182 case X86::EH_RETURN64: {
183 SmallSet<uint16_t, 8> Uses;
184 for (unsigned i = 0, e = MBBI->getNumOperands(); i != e; ++i) {
185 MachineOperand &MO = MBBI->getOperand(i);
186 if (!MO.isReg() || MO.isDef())
187 continue;
188 Register Reg = MO.getReg();
189 if (!Reg)
190 continue;
191 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
192 Uses.insert(*AI);
193 }
194
195 for (auto CS : AvailableRegs)
196 if (!Uses.count(CS) && CS != X86::RIP && CS != X86::RSP &&
197 CS != X86::ESP)
198 return CS;
199 }
200 }
201
202 return 0;
203 }
204
isEAXLiveIn(MachineBasicBlock & MBB)205 static bool isEAXLiveIn(MachineBasicBlock &MBB) {
206 for (MachineBasicBlock::RegisterMaskPair RegMask : MBB.liveins()) {
207 unsigned Reg = RegMask.PhysReg;
208
209 if (Reg == X86::RAX || Reg == X86::EAX || Reg == X86::AX ||
210 Reg == X86::AH || Reg == X86::AL)
211 return true;
212 }
213
214 return false;
215 }
216
217 /// Check if the flags need to be preserved before the terminators.
218 /// This would be the case, if the eflags is live-in of the region
219 /// composed by the terminators or live-out of that region, without
220 /// being defined by a terminator.
221 static bool
flagsNeedToBePreservedBeforeTheTerminators(const MachineBasicBlock & MBB)222 flagsNeedToBePreservedBeforeTheTerminators(const MachineBasicBlock &MBB) {
223 for (const MachineInstr &MI : MBB.terminators()) {
224 bool BreakNext = false;
225 for (const MachineOperand &MO : MI.operands()) {
226 if (!MO.isReg())
227 continue;
228 Register Reg = MO.getReg();
229 if (Reg != X86::EFLAGS)
230 continue;
231
232 // This terminator needs an eflags that is not defined
233 // by a previous another terminator:
234 // EFLAGS is live-in of the region composed by the terminators.
235 if (!MO.isDef())
236 return true;
237 // This terminator defines the eflags, i.e., we don't need to preserve it.
238 // However, we still need to check this specific terminator does not
239 // read a live-in value.
240 BreakNext = true;
241 }
242 // We found a definition of the eflags, no need to preserve them.
243 if (BreakNext)
244 return false;
245 }
246
247 // None of the terminators use or define the eflags.
248 // Check if they are live-out, that would imply we need to preserve them.
249 for (const MachineBasicBlock *Succ : MBB.successors())
250 if (Succ->isLiveIn(X86::EFLAGS))
251 return true;
252
253 return false;
254 }
255
256 /// emitSPUpdate - Emit a series of instructions to increment / decrement the
257 /// stack pointer by a constant value.
emitSPUpdate(MachineBasicBlock & MBB,MachineBasicBlock::iterator & MBBI,const DebugLoc & DL,int64_t NumBytes,bool InEpilogue) const258 void X86FrameLowering::emitSPUpdate(MachineBasicBlock &MBB,
259 MachineBasicBlock::iterator &MBBI,
260 const DebugLoc &DL,
261 int64_t NumBytes, bool InEpilogue) const {
262 bool isSub = NumBytes < 0;
263 uint64_t Offset = isSub ? -NumBytes : NumBytes;
264 MachineInstr::MIFlag Flag =
265 isSub ? MachineInstr::FrameSetup : MachineInstr::FrameDestroy;
266
267 uint64_t Chunk = (1LL << 31) - 1;
268
269 MachineFunction &MF = *MBB.getParent();
270 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
271 const X86TargetLowering &TLI = *STI.getTargetLowering();
272 const bool EmitInlineStackProbe = TLI.hasInlineStackProbe(MF);
273
274 // It's ok to not take into account large chunks when probing, as the
275 // allocation is split in smaller chunks anyway.
276 if (EmitInlineStackProbe && !InEpilogue) {
277
278 // This pseudo-instruction is going to be expanded, potentially using a
279 // loop, by inlineStackProbe().
280 BuildMI(MBB, MBBI, DL, TII.get(X86::STACKALLOC_W_PROBING)).addImm(Offset);
281 return;
282 } else if (Offset > Chunk) {
283 // Rather than emit a long series of instructions for large offsets,
284 // load the offset into a register and do one sub/add
285 unsigned Reg = 0;
286 unsigned Rax = (unsigned)(Is64Bit ? X86::RAX : X86::EAX);
287
288 if (isSub && !isEAXLiveIn(MBB))
289 Reg = Rax;
290 else
291 Reg = findDeadCallerSavedReg(MBB, MBBI, TRI, Is64Bit);
292
293 unsigned MovRIOpc = Is64Bit ? X86::MOV64ri : X86::MOV32ri;
294 unsigned AddSubRROpc =
295 isSub ? getSUBrrOpcode(Is64Bit) : getADDrrOpcode(Is64Bit);
296 if (Reg) {
297 BuildMI(MBB, MBBI, DL, TII.get(MovRIOpc), Reg)
298 .addImm(Offset)
299 .setMIFlag(Flag);
300 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(AddSubRROpc), StackPtr)
301 .addReg(StackPtr)
302 .addReg(Reg);
303 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
304 return;
305 } else if (Offset > 8 * Chunk) {
306 // If we would need more than 8 add or sub instructions (a >16GB stack
307 // frame), it's worth spilling RAX to materialize this immediate.
308 // pushq %rax
309 // movabsq +-$Offset+-SlotSize, %rax
310 // addq %rsp, %rax
311 // xchg %rax, (%rsp)
312 // movq (%rsp), %rsp
313 assert(Is64Bit && "can't have 32-bit 16GB stack frame");
314 BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH64r))
315 .addReg(Rax, RegState::Kill)
316 .setMIFlag(Flag);
317 // Subtract is not commutative, so negate the offset and always use add.
318 // Subtract 8 less and add 8 more to account for the PUSH we just did.
319 if (isSub)
320 Offset = -(Offset - SlotSize);
321 else
322 Offset = Offset + SlotSize;
323 BuildMI(MBB, MBBI, DL, TII.get(MovRIOpc), Rax)
324 .addImm(Offset)
325 .setMIFlag(Flag);
326 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(X86::ADD64rr), Rax)
327 .addReg(Rax)
328 .addReg(StackPtr);
329 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
330 // Exchange the new SP in RAX with the top of the stack.
331 addRegOffset(
332 BuildMI(MBB, MBBI, DL, TII.get(X86::XCHG64rm), Rax).addReg(Rax),
333 StackPtr, false, 0);
334 // Load new SP from the top of the stack into RSP.
335 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64rm), StackPtr),
336 StackPtr, false, 0);
337 return;
338 }
339 }
340
341 while (Offset) {
342 uint64_t ThisVal = std::min(Offset, Chunk);
343 if (ThisVal == SlotSize) {
344 // Use push / pop for slot sized adjustments as a size optimization. We
345 // need to find a dead register when using pop.
346 unsigned Reg = isSub
347 ? (unsigned)(Is64Bit ? X86::RAX : X86::EAX)
348 : findDeadCallerSavedReg(MBB, MBBI, TRI, Is64Bit);
349 if (Reg) {
350 unsigned Opc = isSub
351 ? (Is64Bit ? X86::PUSH64r : X86::PUSH32r)
352 : (Is64Bit ? X86::POP64r : X86::POP32r);
353 BuildMI(MBB, MBBI, DL, TII.get(Opc))
354 .addReg(Reg, getDefRegState(!isSub) | getUndefRegState(isSub))
355 .setMIFlag(Flag);
356 Offset -= ThisVal;
357 continue;
358 }
359 }
360
361 BuildStackAdjustment(MBB, MBBI, DL, isSub ? -ThisVal : ThisVal, InEpilogue)
362 .setMIFlag(Flag);
363
364 Offset -= ThisVal;
365 }
366 }
367
BuildStackAdjustment(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,const DebugLoc & DL,int64_t Offset,bool InEpilogue) const368 MachineInstrBuilder X86FrameLowering::BuildStackAdjustment(
369 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
370 const DebugLoc &DL, int64_t Offset, bool InEpilogue) const {
371 assert(Offset != 0 && "zero offset stack adjustment requested");
372
373 // On Atom, using LEA to adjust SP is preferred, but using it in the epilogue
374 // is tricky.
375 bool UseLEA;
376 if (!InEpilogue) {
377 // Check if inserting the prologue at the beginning
378 // of MBB would require to use LEA operations.
379 // We need to use LEA operations if EFLAGS is live in, because
380 // it means an instruction will read it before it gets defined.
381 UseLEA = STI.useLeaForSP() || MBB.isLiveIn(X86::EFLAGS);
382 } else {
383 // If we can use LEA for SP but we shouldn't, check that none
384 // of the terminators uses the eflags. Otherwise we will insert
385 // a ADD that will redefine the eflags and break the condition.
386 // Alternatively, we could move the ADD, but this may not be possible
387 // and is an optimization anyway.
388 UseLEA = canUseLEAForSPInEpilogue(*MBB.getParent());
389 if (UseLEA && !STI.useLeaForSP())
390 UseLEA = flagsNeedToBePreservedBeforeTheTerminators(MBB);
391 // If that assert breaks, that means we do not do the right thing
392 // in canUseAsEpilogue.
393 assert((UseLEA || !flagsNeedToBePreservedBeforeTheTerminators(MBB)) &&
394 "We shouldn't have allowed this insertion point");
395 }
396
397 MachineInstrBuilder MI;
398 if (UseLEA) {
399 MI = addRegOffset(BuildMI(MBB, MBBI, DL,
400 TII.get(getLEArOpcode(Uses64BitFramePtr)),
401 StackPtr),
402 StackPtr, false, Offset);
403 } else {
404 bool IsSub = Offset < 0;
405 uint64_t AbsOffset = IsSub ? -Offset : Offset;
406 const unsigned Opc = IsSub ? getSUBriOpcode(Uses64BitFramePtr, AbsOffset)
407 : getADDriOpcode(Uses64BitFramePtr, AbsOffset);
408 MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
409 .addReg(StackPtr)
410 .addImm(AbsOffset);
411 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
412 }
413 return MI;
414 }
415
mergeSPUpdates(MachineBasicBlock & MBB,MachineBasicBlock::iterator & MBBI,bool doMergeWithPrevious) const416 int X86FrameLowering::mergeSPUpdates(MachineBasicBlock &MBB,
417 MachineBasicBlock::iterator &MBBI,
418 bool doMergeWithPrevious) const {
419 if ((doMergeWithPrevious && MBBI == MBB.begin()) ||
420 (!doMergeWithPrevious && MBBI == MBB.end()))
421 return 0;
422
423 MachineBasicBlock::iterator PI = doMergeWithPrevious ? std::prev(MBBI) : MBBI;
424
425 PI = skipDebugInstructionsBackward(PI, MBB.begin());
426 // It is assumed that ADD/SUB/LEA instruction is succeded by one CFI
427 // instruction, and that there are no DBG_VALUE or other instructions between
428 // ADD/SUB/LEA and its corresponding CFI instruction.
429 /* TODO: Add support for the case where there are multiple CFI instructions
430 below the ADD/SUB/LEA, e.g.:
431 ...
432 add
433 cfi_def_cfa_offset
434 cfi_offset
435 ...
436 */
437 if (doMergeWithPrevious && PI != MBB.begin() && PI->isCFIInstruction())
438 PI = std::prev(PI);
439
440 unsigned Opc = PI->getOpcode();
441 int Offset = 0;
442
443 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
444 Opc == X86::ADD32ri || Opc == X86::ADD32ri8) &&
445 PI->getOperand(0).getReg() == StackPtr){
446 assert(PI->getOperand(1).getReg() == StackPtr);
447 Offset = PI->getOperand(2).getImm();
448 } else if ((Opc == X86::LEA32r || Opc == X86::LEA64_32r) &&
449 PI->getOperand(0).getReg() == StackPtr &&
450 PI->getOperand(1).getReg() == StackPtr &&
451 PI->getOperand(2).getImm() == 1 &&
452 PI->getOperand(3).getReg() == X86::NoRegister &&
453 PI->getOperand(5).getReg() == X86::NoRegister) {
454 // For LEAs we have: def = lea SP, FI, noreg, Offset, noreg.
455 Offset = PI->getOperand(4).getImm();
456 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
457 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
458 PI->getOperand(0).getReg() == StackPtr) {
459 assert(PI->getOperand(1).getReg() == StackPtr);
460 Offset = -PI->getOperand(2).getImm();
461 } else
462 return 0;
463
464 PI = MBB.erase(PI);
465 if (PI != MBB.end() && PI->isCFIInstruction()) PI = MBB.erase(PI);
466 if (!doMergeWithPrevious)
467 MBBI = skipDebugInstructionsForward(PI, MBB.end());
468
469 return Offset;
470 }
471
BuildCFI(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,const DebugLoc & DL,const MCCFIInstruction & CFIInst) const472 void X86FrameLowering::BuildCFI(MachineBasicBlock &MBB,
473 MachineBasicBlock::iterator MBBI,
474 const DebugLoc &DL,
475 const MCCFIInstruction &CFIInst) const {
476 MachineFunction &MF = *MBB.getParent();
477 unsigned CFIIndex = MF.addFrameInst(CFIInst);
478 BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
479 .addCFIIndex(CFIIndex);
480 }
481
482 /// Emits Dwarf Info specifying offsets of callee saved registers and
483 /// frame pointer. This is called only when basic block sections are enabled.
emitCalleeSavedFrameMoves(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI) const484 void X86FrameLowering::emitCalleeSavedFrameMoves(
485 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI) const {
486 MachineFunction &MF = *MBB.getParent();
487 if (!hasFP(MF)) {
488 emitCalleeSavedFrameMoves(MBB, MBBI, DebugLoc{}, true);
489 return;
490 }
491 const MachineModuleInfo &MMI = MF.getMMI();
492 const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
493 const unsigned FramePtr = TRI->getFrameRegister(MF);
494 const unsigned MachineFramePtr =
495 STI.isTarget64BitILP32() ? unsigned(getX86SubSuperRegister(FramePtr, 64))
496 : FramePtr;
497 unsigned DwarfReg = MRI->getDwarfRegNum(MachineFramePtr, true);
498 // Offset = space for return address + size of the frame pointer itself.
499 unsigned Offset = (Is64Bit ? 8 : 4) + (Uses64BitFramePtr ? 8 : 4);
500 BuildCFI(MBB, MBBI, DebugLoc{},
501 MCCFIInstruction::createOffset(nullptr, DwarfReg, -Offset));
502 emitCalleeSavedFrameMoves(MBB, MBBI, DebugLoc{}, true);
503 }
504
emitCalleeSavedFrameMoves(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,const DebugLoc & DL,bool IsPrologue) const505 void X86FrameLowering::emitCalleeSavedFrameMoves(
506 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
507 const DebugLoc &DL, bool IsPrologue) const {
508 MachineFunction &MF = *MBB.getParent();
509 MachineFrameInfo &MFI = MF.getFrameInfo();
510 MachineModuleInfo &MMI = MF.getMMI();
511 const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
512
513 // Add callee saved registers to move list.
514 const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
515 if (CSI.empty()) return;
516
517 // Calculate offsets.
518 for (std::vector<CalleeSavedInfo>::const_iterator
519 I = CSI.begin(), E = CSI.end(); I != E; ++I) {
520 int64_t Offset = MFI.getObjectOffset(I->getFrameIdx());
521 unsigned Reg = I->getReg();
522 unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true);
523
524 if (IsPrologue) {
525 BuildCFI(MBB, MBBI, DL,
526 MCCFIInstruction::createOffset(nullptr, DwarfReg, Offset));
527 } else {
528 BuildCFI(MBB, MBBI, DL,
529 MCCFIInstruction::createRestore(nullptr, DwarfReg));
530 }
531 }
532 }
533
emitStackProbe(MachineFunction & MF,MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,const DebugLoc & DL,bool InProlog) const534 void X86FrameLowering::emitStackProbe(MachineFunction &MF,
535 MachineBasicBlock &MBB,
536 MachineBasicBlock::iterator MBBI,
537 const DebugLoc &DL, bool InProlog) const {
538 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
539 if (STI.isTargetWindowsCoreCLR()) {
540 if (InProlog) {
541 BuildMI(MBB, MBBI, DL, TII.get(X86::STACKALLOC_W_PROBING))
542 .addImm(0 /* no explicit stack size */);
543 } else {
544 emitStackProbeInline(MF, MBB, MBBI, DL, false);
545 }
546 } else {
547 emitStackProbeCall(MF, MBB, MBBI, DL, InProlog);
548 }
549 }
550
inlineStackProbe(MachineFunction & MF,MachineBasicBlock & PrologMBB) const551 void X86FrameLowering::inlineStackProbe(MachineFunction &MF,
552 MachineBasicBlock &PrologMBB) const {
553 auto Where = llvm::find_if(PrologMBB, [](MachineInstr &MI) {
554 return MI.getOpcode() == X86::STACKALLOC_W_PROBING;
555 });
556 if (Where != PrologMBB.end()) {
557 DebugLoc DL = PrologMBB.findDebugLoc(Where);
558 emitStackProbeInline(MF, PrologMBB, Where, DL, true);
559 Where->eraseFromParent();
560 }
561 }
562
emitStackProbeInline(MachineFunction & MF,MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,const DebugLoc & DL,bool InProlog) const563 void X86FrameLowering::emitStackProbeInline(MachineFunction &MF,
564 MachineBasicBlock &MBB,
565 MachineBasicBlock::iterator MBBI,
566 const DebugLoc &DL,
567 bool InProlog) const {
568 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
569 if (STI.isTargetWindowsCoreCLR() && STI.is64Bit())
570 emitStackProbeInlineWindowsCoreCLR64(MF, MBB, MBBI, DL, InProlog);
571 else
572 emitStackProbeInlineGeneric(MF, MBB, MBBI, DL, InProlog);
573 }
574
emitStackProbeInlineGeneric(MachineFunction & MF,MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,const DebugLoc & DL,bool InProlog) const575 void X86FrameLowering::emitStackProbeInlineGeneric(
576 MachineFunction &MF, MachineBasicBlock &MBB,
577 MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool InProlog) const {
578 MachineInstr &AllocWithProbe = *MBBI;
579 uint64_t Offset = AllocWithProbe.getOperand(0).getImm();
580
581 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
582 const X86TargetLowering &TLI = *STI.getTargetLowering();
583 assert(!(STI.is64Bit() && STI.isTargetWindowsCoreCLR()) &&
584 "different expansion expected for CoreCLR 64 bit");
585
586 const uint64_t StackProbeSize = TLI.getStackProbeSize(MF);
587 uint64_t ProbeChunk = StackProbeSize * 8;
588
589 // Synthesize a loop or unroll it, depending on the number of iterations.
590 if (Offset > ProbeChunk) {
591 emitStackProbeInlineGenericLoop(MF, MBB, MBBI, DL, Offset);
592 } else {
593 emitStackProbeInlineGenericBlock(MF, MBB, MBBI, DL, Offset);
594 }
595 }
596
emitStackProbeInlineGenericBlock(MachineFunction & MF,MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,const DebugLoc & DL,uint64_t Offset) const597 void X86FrameLowering::emitStackProbeInlineGenericBlock(
598 MachineFunction &MF, MachineBasicBlock &MBB,
599 MachineBasicBlock::iterator MBBI, const DebugLoc &DL,
600 uint64_t Offset) const {
601
602 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
603 const X86TargetLowering &TLI = *STI.getTargetLowering();
604 const unsigned Opc = getSUBriOpcode(Uses64BitFramePtr, Offset);
605 const unsigned MovMIOpc = Is64Bit ? X86::MOV64mi32 : X86::MOV32mi;
606 const uint64_t StackProbeSize = TLI.getStackProbeSize(MF);
607 uint64_t CurrentOffset = 0;
608 // 0 Thanks to return address being saved on the stack
609 uint64_t CurrentProbeOffset = 0;
610
611 // For the first N - 1 pages, just probe. I tried to take advantage of
612 // natural probes but it implies much more logic and there was very few
613 // interesting natural probes to interleave.
614 while (CurrentOffset + StackProbeSize < Offset) {
615 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
616 .addReg(StackPtr)
617 .addImm(StackProbeSize)
618 .setMIFlag(MachineInstr::FrameSetup);
619 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
620
621
622 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(MovMIOpc))
623 .setMIFlag(MachineInstr::FrameSetup),
624 StackPtr, false, 0)
625 .addImm(0)
626 .setMIFlag(MachineInstr::FrameSetup);
627 NumFrameExtraProbe++;
628 CurrentOffset += StackProbeSize;
629 CurrentProbeOffset += StackProbeSize;
630 }
631
632 uint64_t ChunkSize = Offset - CurrentOffset;
633 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
634 .addReg(StackPtr)
635 .addImm(ChunkSize)
636 .setMIFlag(MachineInstr::FrameSetup);
637 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
638 }
639
emitStackProbeInlineGenericLoop(MachineFunction & MF,MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,const DebugLoc & DL,uint64_t Offset) const640 void X86FrameLowering::emitStackProbeInlineGenericLoop(
641 MachineFunction &MF, MachineBasicBlock &MBB,
642 MachineBasicBlock::iterator MBBI, const DebugLoc &DL,
643 uint64_t Offset) const {
644 assert(Offset && "null offset");
645
646 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
647 const X86TargetLowering &TLI = *STI.getTargetLowering();
648 const unsigned MovMIOpc = Is64Bit ? X86::MOV64mi32 : X86::MOV32mi;
649 const uint64_t StackProbeSize = TLI.getStackProbeSize(MF);
650
651 // Synthesize a loop
652 NumFrameLoopProbe++;
653 const BasicBlock *LLVM_BB = MBB.getBasicBlock();
654
655 MachineBasicBlock *testMBB = MF.CreateMachineBasicBlock(LLVM_BB);
656 MachineBasicBlock *tailMBB = MF.CreateMachineBasicBlock(LLVM_BB);
657
658 MachineFunction::iterator MBBIter = ++MBB.getIterator();
659 MF.insert(MBBIter, testMBB);
660 MF.insert(MBBIter, tailMBB);
661
662 Register FinalStackProbed = Uses64BitFramePtr ? X86::R11 : X86::R11D;
663 BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::COPY), FinalStackProbed)
664 .addReg(StackPtr)
665 .setMIFlag(MachineInstr::FrameSetup);
666
667 // save loop bound
668 {
669 const unsigned Opc = getSUBriOpcode(Uses64BitFramePtr, Offset);
670 BuildMI(MBB, MBBI, DL, TII.get(Opc), FinalStackProbed)
671 .addReg(FinalStackProbed)
672 .addImm(Offset / StackProbeSize * StackProbeSize)
673 .setMIFlag(MachineInstr::FrameSetup);
674 }
675
676 // allocate a page
677 {
678 const unsigned Opc = getSUBriOpcode(Uses64BitFramePtr, StackProbeSize);
679 BuildMI(testMBB, DL, TII.get(Opc), StackPtr)
680 .addReg(StackPtr)
681 .addImm(StackProbeSize)
682 .setMIFlag(MachineInstr::FrameSetup);
683 }
684
685 // touch the page
686 addRegOffset(BuildMI(testMBB, DL, TII.get(MovMIOpc))
687 .setMIFlag(MachineInstr::FrameSetup),
688 StackPtr, false, 0)
689 .addImm(0)
690 .setMIFlag(MachineInstr::FrameSetup);
691
692 // cmp with stack pointer bound
693 BuildMI(testMBB, DL, TII.get(Uses64BitFramePtr ? X86::CMP64rr : X86::CMP32rr))
694 .addReg(StackPtr)
695 .addReg(FinalStackProbed)
696 .setMIFlag(MachineInstr::FrameSetup);
697
698 // jump
699 BuildMI(testMBB, DL, TII.get(X86::JCC_1))
700 .addMBB(testMBB)
701 .addImm(X86::COND_NE)
702 .setMIFlag(MachineInstr::FrameSetup);
703 testMBB->addSuccessor(testMBB);
704 testMBB->addSuccessor(tailMBB);
705
706 // BB management
707 tailMBB->splice(tailMBB->end(), &MBB, MBBI, MBB.end());
708 tailMBB->transferSuccessorsAndUpdatePHIs(&MBB);
709 MBB.addSuccessor(testMBB);
710
711 // handle tail
712 unsigned TailOffset = Offset % StackProbeSize;
713 if (TailOffset) {
714 const unsigned Opc = getSUBriOpcode(Uses64BitFramePtr, TailOffset);
715 BuildMI(*tailMBB, tailMBB->begin(), DL, TII.get(Opc), StackPtr)
716 .addReg(StackPtr)
717 .addImm(TailOffset)
718 .setMIFlag(MachineInstr::FrameSetup);
719 }
720
721 // Update Live In information
722 recomputeLiveIns(*testMBB);
723 recomputeLiveIns(*tailMBB);
724 }
725
emitStackProbeInlineWindowsCoreCLR64(MachineFunction & MF,MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,const DebugLoc & DL,bool InProlog) const726 void X86FrameLowering::emitStackProbeInlineWindowsCoreCLR64(
727 MachineFunction &MF, MachineBasicBlock &MBB,
728 MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool InProlog) const {
729 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
730 assert(STI.is64Bit() && "different expansion needed for 32 bit");
731 assert(STI.isTargetWindowsCoreCLR() && "custom expansion expects CoreCLR");
732 const TargetInstrInfo &TII = *STI.getInstrInfo();
733 const BasicBlock *LLVM_BB = MBB.getBasicBlock();
734
735 // RAX contains the number of bytes of desired stack adjustment.
736 // The handling here assumes this value has already been updated so as to
737 // maintain stack alignment.
738 //
739 // We need to exit with RSP modified by this amount and execute suitable
740 // page touches to notify the OS that we're growing the stack responsibly.
741 // All stack probing must be done without modifying RSP.
742 //
743 // MBB:
744 // SizeReg = RAX;
745 // ZeroReg = 0
746 // CopyReg = RSP
747 // Flags, TestReg = CopyReg - SizeReg
748 // FinalReg = !Flags.Ovf ? TestReg : ZeroReg
749 // LimitReg = gs magic thread env access
750 // if FinalReg >= LimitReg goto ContinueMBB
751 // RoundBB:
752 // RoundReg = page address of FinalReg
753 // LoopMBB:
754 // LoopReg = PHI(LimitReg,ProbeReg)
755 // ProbeReg = LoopReg - PageSize
756 // [ProbeReg] = 0
757 // if (ProbeReg > RoundReg) goto LoopMBB
758 // ContinueMBB:
759 // RSP = RSP - RAX
760 // [rest of original MBB]
761
762 // Set up the new basic blocks
763 MachineBasicBlock *RoundMBB = MF.CreateMachineBasicBlock(LLVM_BB);
764 MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB);
765 MachineBasicBlock *ContinueMBB = MF.CreateMachineBasicBlock(LLVM_BB);
766
767 MachineFunction::iterator MBBIter = std::next(MBB.getIterator());
768 MF.insert(MBBIter, RoundMBB);
769 MF.insert(MBBIter, LoopMBB);
770 MF.insert(MBBIter, ContinueMBB);
771
772 // Split MBB and move the tail portion down to ContinueMBB.
773 MachineBasicBlock::iterator BeforeMBBI = std::prev(MBBI);
774 ContinueMBB->splice(ContinueMBB->begin(), &MBB, MBBI, MBB.end());
775 ContinueMBB->transferSuccessorsAndUpdatePHIs(&MBB);
776
777 // Some useful constants
778 const int64_t ThreadEnvironmentStackLimit = 0x10;
779 const int64_t PageSize = 0x1000;
780 const int64_t PageMask = ~(PageSize - 1);
781
782 // Registers we need. For the normal case we use virtual
783 // registers. For the prolog expansion we use RAX, RCX and RDX.
784 MachineRegisterInfo &MRI = MF.getRegInfo();
785 const TargetRegisterClass *RegClass = &X86::GR64RegClass;
786 const Register SizeReg = InProlog ? X86::RAX
787 : MRI.createVirtualRegister(RegClass),
788 ZeroReg = InProlog ? X86::RCX
789 : MRI.createVirtualRegister(RegClass),
790 CopyReg = InProlog ? X86::RDX
791 : MRI.createVirtualRegister(RegClass),
792 TestReg = InProlog ? X86::RDX
793 : MRI.createVirtualRegister(RegClass),
794 FinalReg = InProlog ? X86::RDX
795 : MRI.createVirtualRegister(RegClass),
796 RoundedReg = InProlog ? X86::RDX
797 : MRI.createVirtualRegister(RegClass),
798 LimitReg = InProlog ? X86::RCX
799 : MRI.createVirtualRegister(RegClass),
800 JoinReg = InProlog ? X86::RCX
801 : MRI.createVirtualRegister(RegClass),
802 ProbeReg = InProlog ? X86::RCX
803 : MRI.createVirtualRegister(RegClass);
804
805 // SP-relative offsets where we can save RCX and RDX.
806 int64_t RCXShadowSlot = 0;
807 int64_t RDXShadowSlot = 0;
808
809 // If inlining in the prolog, save RCX and RDX.
810 if (InProlog) {
811 // Compute the offsets. We need to account for things already
812 // pushed onto the stack at this point: return address, frame
813 // pointer (if used), and callee saves.
814 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
815 const int64_t CalleeSaveSize = X86FI->getCalleeSavedFrameSize();
816 const bool HasFP = hasFP(MF);
817
818 // Check if we need to spill RCX and/or RDX.
819 // Here we assume that no earlier prologue instruction changes RCX and/or
820 // RDX, so checking the block live-ins is enough.
821 const bool IsRCXLiveIn = MBB.isLiveIn(X86::RCX);
822 const bool IsRDXLiveIn = MBB.isLiveIn(X86::RDX);
823 int64_t InitSlot = 8 + CalleeSaveSize + (HasFP ? 8 : 0);
824 // Assign the initial slot to both registers, then change RDX's slot if both
825 // need to be spilled.
826 if (IsRCXLiveIn)
827 RCXShadowSlot = InitSlot;
828 if (IsRDXLiveIn)
829 RDXShadowSlot = InitSlot;
830 if (IsRDXLiveIn && IsRCXLiveIn)
831 RDXShadowSlot += 8;
832 // Emit the saves if needed.
833 if (IsRCXLiveIn)
834 addRegOffset(BuildMI(&MBB, DL, TII.get(X86::MOV64mr)), X86::RSP, false,
835 RCXShadowSlot)
836 .addReg(X86::RCX);
837 if (IsRDXLiveIn)
838 addRegOffset(BuildMI(&MBB, DL, TII.get(X86::MOV64mr)), X86::RSP, false,
839 RDXShadowSlot)
840 .addReg(X86::RDX);
841 } else {
842 // Not in the prolog. Copy RAX to a virtual reg.
843 BuildMI(&MBB, DL, TII.get(X86::MOV64rr), SizeReg).addReg(X86::RAX);
844 }
845
846 // Add code to MBB to check for overflow and set the new target stack pointer
847 // to zero if so.
848 BuildMI(&MBB, DL, TII.get(X86::XOR64rr), ZeroReg)
849 .addReg(ZeroReg, RegState::Undef)
850 .addReg(ZeroReg, RegState::Undef);
851 BuildMI(&MBB, DL, TII.get(X86::MOV64rr), CopyReg).addReg(X86::RSP);
852 BuildMI(&MBB, DL, TII.get(X86::SUB64rr), TestReg)
853 .addReg(CopyReg)
854 .addReg(SizeReg);
855 BuildMI(&MBB, DL, TII.get(X86::CMOV64rr), FinalReg)
856 .addReg(TestReg)
857 .addReg(ZeroReg)
858 .addImm(X86::COND_B);
859
860 // FinalReg now holds final stack pointer value, or zero if
861 // allocation would overflow. Compare against the current stack
862 // limit from the thread environment block. Note this limit is the
863 // lowest touched page on the stack, not the point at which the OS
864 // will cause an overflow exception, so this is just an optimization
865 // to avoid unnecessarily touching pages that are below the current
866 // SP but already committed to the stack by the OS.
867 BuildMI(&MBB, DL, TII.get(X86::MOV64rm), LimitReg)
868 .addReg(0)
869 .addImm(1)
870 .addReg(0)
871 .addImm(ThreadEnvironmentStackLimit)
872 .addReg(X86::GS);
873 BuildMI(&MBB, DL, TII.get(X86::CMP64rr)).addReg(FinalReg).addReg(LimitReg);
874 // Jump if the desired stack pointer is at or above the stack limit.
875 BuildMI(&MBB, DL, TII.get(X86::JCC_1)).addMBB(ContinueMBB).addImm(X86::COND_AE);
876
877 // Add code to roundMBB to round the final stack pointer to a page boundary.
878 RoundMBB->addLiveIn(FinalReg);
879 BuildMI(RoundMBB, DL, TII.get(X86::AND64ri32), RoundedReg)
880 .addReg(FinalReg)
881 .addImm(PageMask);
882 BuildMI(RoundMBB, DL, TII.get(X86::JMP_1)).addMBB(LoopMBB);
883
884 // LimitReg now holds the current stack limit, RoundedReg page-rounded
885 // final RSP value. Add code to loopMBB to decrement LimitReg page-by-page
886 // and probe until we reach RoundedReg.
887 if (!InProlog) {
888 BuildMI(LoopMBB, DL, TII.get(X86::PHI), JoinReg)
889 .addReg(LimitReg)
890 .addMBB(RoundMBB)
891 .addReg(ProbeReg)
892 .addMBB(LoopMBB);
893 }
894
895 LoopMBB->addLiveIn(JoinReg);
896 addRegOffset(BuildMI(LoopMBB, DL, TII.get(X86::LEA64r), ProbeReg), JoinReg,
897 false, -PageSize);
898
899 // Probe by storing a byte onto the stack.
900 BuildMI(LoopMBB, DL, TII.get(X86::MOV8mi))
901 .addReg(ProbeReg)
902 .addImm(1)
903 .addReg(0)
904 .addImm(0)
905 .addReg(0)
906 .addImm(0);
907
908 LoopMBB->addLiveIn(RoundedReg);
909 BuildMI(LoopMBB, DL, TII.get(X86::CMP64rr))
910 .addReg(RoundedReg)
911 .addReg(ProbeReg);
912 BuildMI(LoopMBB, DL, TII.get(X86::JCC_1)).addMBB(LoopMBB).addImm(X86::COND_NE);
913
914 MachineBasicBlock::iterator ContinueMBBI = ContinueMBB->getFirstNonPHI();
915
916 // If in prolog, restore RDX and RCX.
917 if (InProlog) {
918 if (RCXShadowSlot) // It means we spilled RCX in the prologue.
919 addRegOffset(BuildMI(*ContinueMBB, ContinueMBBI, DL,
920 TII.get(X86::MOV64rm), X86::RCX),
921 X86::RSP, false, RCXShadowSlot);
922 if (RDXShadowSlot) // It means we spilled RDX in the prologue.
923 addRegOffset(BuildMI(*ContinueMBB, ContinueMBBI, DL,
924 TII.get(X86::MOV64rm), X86::RDX),
925 X86::RSP, false, RDXShadowSlot);
926 }
927
928 // Now that the probing is done, add code to continueMBB to update
929 // the stack pointer for real.
930 ContinueMBB->addLiveIn(SizeReg);
931 BuildMI(*ContinueMBB, ContinueMBBI, DL, TII.get(X86::SUB64rr), X86::RSP)
932 .addReg(X86::RSP)
933 .addReg(SizeReg);
934
935 // Add the control flow edges we need.
936 MBB.addSuccessor(ContinueMBB);
937 MBB.addSuccessor(RoundMBB);
938 RoundMBB->addSuccessor(LoopMBB);
939 LoopMBB->addSuccessor(ContinueMBB);
940 LoopMBB->addSuccessor(LoopMBB);
941
942 // Mark all the instructions added to the prolog as frame setup.
943 if (InProlog) {
944 for (++BeforeMBBI; BeforeMBBI != MBB.end(); ++BeforeMBBI) {
945 BeforeMBBI->setFlag(MachineInstr::FrameSetup);
946 }
947 for (MachineInstr &MI : *RoundMBB) {
948 MI.setFlag(MachineInstr::FrameSetup);
949 }
950 for (MachineInstr &MI : *LoopMBB) {
951 MI.setFlag(MachineInstr::FrameSetup);
952 }
953 for (MachineBasicBlock::iterator CMBBI = ContinueMBB->begin();
954 CMBBI != ContinueMBBI; ++CMBBI) {
955 CMBBI->setFlag(MachineInstr::FrameSetup);
956 }
957 }
958 }
959
emitStackProbeCall(MachineFunction & MF,MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,const DebugLoc & DL,bool InProlog) const960 void X86FrameLowering::emitStackProbeCall(MachineFunction &MF,
961 MachineBasicBlock &MBB,
962 MachineBasicBlock::iterator MBBI,
963 const DebugLoc &DL,
964 bool InProlog) const {
965 bool IsLargeCodeModel = MF.getTarget().getCodeModel() == CodeModel::Large;
966
967 // FIXME: Add indirect thunk support and remove this.
968 if (Is64Bit && IsLargeCodeModel && STI.useIndirectThunkCalls())
969 report_fatal_error("Emitting stack probe calls on 64-bit with the large "
970 "code model and indirect thunks not yet implemented.");
971
972 unsigned CallOp;
973 if (Is64Bit)
974 CallOp = IsLargeCodeModel ? X86::CALL64r : X86::CALL64pcrel32;
975 else
976 CallOp = X86::CALLpcrel32;
977
978 StringRef Symbol = STI.getTargetLowering()->getStackProbeSymbolName(MF);
979
980 MachineInstrBuilder CI;
981 MachineBasicBlock::iterator ExpansionMBBI = std::prev(MBBI);
982
983 // All current stack probes take AX and SP as input, clobber flags, and
984 // preserve all registers. x86_64 probes leave RSP unmodified.
985 if (Is64Bit && MF.getTarget().getCodeModel() == CodeModel::Large) {
986 // For the large code model, we have to call through a register. Use R11,
987 // as it is scratch in all supported calling conventions.
988 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri), X86::R11)
989 .addExternalSymbol(MF.createExternalSymbolName(Symbol));
990 CI = BuildMI(MBB, MBBI, DL, TII.get(CallOp)).addReg(X86::R11);
991 } else {
992 CI = BuildMI(MBB, MBBI, DL, TII.get(CallOp))
993 .addExternalSymbol(MF.createExternalSymbolName(Symbol));
994 }
995
996 unsigned AX = Uses64BitFramePtr ? X86::RAX : X86::EAX;
997 unsigned SP = Uses64BitFramePtr ? X86::RSP : X86::ESP;
998 CI.addReg(AX, RegState::Implicit)
999 .addReg(SP, RegState::Implicit)
1000 .addReg(AX, RegState::Define | RegState::Implicit)
1001 .addReg(SP, RegState::Define | RegState::Implicit)
1002 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit);
1003
1004 if (STI.isTargetWin64() || !STI.isOSWindows()) {
1005 // MSVC x32's _chkstk and cygwin/mingw's _alloca adjust %esp themselves.
1006 // MSVC x64's __chkstk and cygwin/mingw's ___chkstk_ms do not adjust %rsp
1007 // themselves. They also does not clobber %rax so we can reuse it when
1008 // adjusting %rsp.
1009 // All other platforms do not specify a particular ABI for the stack probe
1010 // function, so we arbitrarily define it to not adjust %esp/%rsp itself.
1011 BuildMI(MBB, MBBI, DL, TII.get(getSUBrrOpcode(Uses64BitFramePtr)), SP)
1012 .addReg(SP)
1013 .addReg(AX);
1014 }
1015
1016 if (InProlog) {
1017 // Apply the frame setup flag to all inserted instrs.
1018 for (++ExpansionMBBI; ExpansionMBBI != MBBI; ++ExpansionMBBI)
1019 ExpansionMBBI->setFlag(MachineInstr::FrameSetup);
1020 }
1021 }
1022
calculateSetFPREG(uint64_t SPAdjust)1023 static unsigned calculateSetFPREG(uint64_t SPAdjust) {
1024 // Win64 ABI has a less restrictive limitation of 240; 128 works equally well
1025 // and might require smaller successive adjustments.
1026 const uint64_t Win64MaxSEHOffset = 128;
1027 uint64_t SEHFrameOffset = std::min(SPAdjust, Win64MaxSEHOffset);
1028 // Win64 ABI requires 16-byte alignment for the UWOP_SET_FPREG opcode.
1029 return SEHFrameOffset & -16;
1030 }
1031
1032 // If we're forcing a stack realignment we can't rely on just the frame
1033 // info, we need to know the ABI stack alignment as well in case we
1034 // have a call out. Otherwise just make sure we have some alignment - we'll
1035 // go with the minimum SlotSize.
calculateMaxStackAlign(const MachineFunction & MF) const1036 uint64_t X86FrameLowering::calculateMaxStackAlign(const MachineFunction &MF) const {
1037 const MachineFrameInfo &MFI = MF.getFrameInfo();
1038 Align MaxAlign = MFI.getMaxAlign(); // Desired stack alignment.
1039 Align StackAlign = getStackAlign();
1040 if (MF.getFunction().hasFnAttribute("stackrealign")) {
1041 if (MFI.hasCalls())
1042 MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign;
1043 else if (MaxAlign < SlotSize)
1044 MaxAlign = Align(SlotSize);
1045 }
1046 return MaxAlign.value();
1047 }
1048
BuildStackAlignAND(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,const DebugLoc & DL,unsigned Reg,uint64_t MaxAlign) const1049 void X86FrameLowering::BuildStackAlignAND(MachineBasicBlock &MBB,
1050 MachineBasicBlock::iterator MBBI,
1051 const DebugLoc &DL, unsigned Reg,
1052 uint64_t MaxAlign) const {
1053 uint64_t Val = -MaxAlign;
1054 unsigned AndOp = getANDriOpcode(Uses64BitFramePtr, Val);
1055 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(AndOp), Reg)
1056 .addReg(Reg)
1057 .addImm(Val)
1058 .setMIFlag(MachineInstr::FrameSetup);
1059
1060 // The EFLAGS implicit def is dead.
1061 MI->getOperand(3).setIsDead();
1062 }
1063
has128ByteRedZone(const MachineFunction & MF) const1064 bool X86FrameLowering::has128ByteRedZone(const MachineFunction& MF) const {
1065 // x86-64 (non Win64) has a 128 byte red zone which is guaranteed not to be
1066 // clobbered by any interrupt handler.
1067 assert(&STI == &MF.getSubtarget<X86Subtarget>() &&
1068 "MF used frame lowering for wrong subtarget");
1069 const Function &Fn = MF.getFunction();
1070 const bool IsWin64CC = STI.isCallingConvWin64(Fn.getCallingConv());
1071 return Is64Bit && !IsWin64CC && !Fn.hasFnAttribute(Attribute::NoRedZone);
1072 }
1073
1074
1075 /// emitPrologue - Push callee-saved registers onto the stack, which
1076 /// automatically adjust the stack pointer. Adjust the stack pointer to allocate
1077 /// space for local variables. Also emit labels used by the exception handler to
1078 /// generate the exception handling frames.
1079
1080 /*
1081 Here's a gist of what gets emitted:
1082
1083 ; Establish frame pointer, if needed
1084 [if needs FP]
1085 push %rbp
1086 .cfi_def_cfa_offset 16
1087 .cfi_offset %rbp, -16
1088 .seh_pushreg %rpb
1089 mov %rsp, %rbp
1090 .cfi_def_cfa_register %rbp
1091
1092 ; Spill general-purpose registers
1093 [for all callee-saved GPRs]
1094 pushq %<reg>
1095 [if not needs FP]
1096 .cfi_def_cfa_offset (offset from RETADDR)
1097 .seh_pushreg %<reg>
1098
1099 ; If the required stack alignment > default stack alignment
1100 ; rsp needs to be re-aligned. This creates a "re-alignment gap"
1101 ; of unknown size in the stack frame.
1102 [if stack needs re-alignment]
1103 and $MASK, %rsp
1104
1105 ; Allocate space for locals
1106 [if target is Windows and allocated space > 4096 bytes]
1107 ; Windows needs special care for allocations larger
1108 ; than one page.
1109 mov $NNN, %rax
1110 call ___chkstk_ms/___chkstk
1111 sub %rax, %rsp
1112 [else]
1113 sub $NNN, %rsp
1114
1115 [if needs FP]
1116 .seh_stackalloc (size of XMM spill slots)
1117 .seh_setframe %rbp, SEHFrameOffset ; = size of all spill slots
1118 [else]
1119 .seh_stackalloc NNN
1120
1121 ; Spill XMMs
1122 ; Note, that while only Windows 64 ABI specifies XMMs as callee-preserved,
1123 ; they may get spilled on any platform, if the current function
1124 ; calls @llvm.eh.unwind.init
1125 [if needs FP]
1126 [for all callee-saved XMM registers]
1127 movaps %<xmm reg>, -MMM(%rbp)
1128 [for all callee-saved XMM registers]
1129 .seh_savexmm %<xmm reg>, (-MMM + SEHFrameOffset)
1130 ; i.e. the offset relative to (%rbp - SEHFrameOffset)
1131 [else]
1132 [for all callee-saved XMM registers]
1133 movaps %<xmm reg>, KKK(%rsp)
1134 [for all callee-saved XMM registers]
1135 .seh_savexmm %<xmm reg>, KKK
1136
1137 .seh_endprologue
1138
1139 [if needs base pointer]
1140 mov %rsp, %rbx
1141 [if needs to restore base pointer]
1142 mov %rsp, -MMM(%rbp)
1143
1144 ; Emit CFI info
1145 [if needs FP]
1146 [for all callee-saved registers]
1147 .cfi_offset %<reg>, (offset from %rbp)
1148 [else]
1149 .cfi_def_cfa_offset (offset from RETADDR)
1150 [for all callee-saved registers]
1151 .cfi_offset %<reg>, (offset from %rsp)
1152
1153 Notes:
1154 - .seh directives are emitted only for Windows 64 ABI
1155 - .cv_fpo directives are emitted on win32 when emitting CodeView
1156 - .cfi directives are emitted for all other ABIs
1157 - for 32-bit code, substitute %e?? registers for %r??
1158 */
1159
emitPrologue(MachineFunction & MF,MachineBasicBlock & MBB) const1160 void X86FrameLowering::emitPrologue(MachineFunction &MF,
1161 MachineBasicBlock &MBB) const {
1162 assert(&STI == &MF.getSubtarget<X86Subtarget>() &&
1163 "MF used frame lowering for wrong subtarget");
1164 MachineBasicBlock::iterator MBBI = MBB.begin();
1165 MachineFrameInfo &MFI = MF.getFrameInfo();
1166 const Function &Fn = MF.getFunction();
1167 MachineModuleInfo &MMI = MF.getMMI();
1168 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
1169 uint64_t MaxAlign = calculateMaxStackAlign(MF); // Desired stack alignment.
1170 uint64_t StackSize = MFI.getStackSize(); // Number of bytes to allocate.
1171 bool IsFunclet = MBB.isEHFuncletEntry();
1172 EHPersonality Personality = EHPersonality::Unknown;
1173 if (Fn.hasPersonalityFn())
1174 Personality = classifyEHPersonality(Fn.getPersonalityFn());
1175 bool FnHasClrFunclet =
1176 MF.hasEHFunclets() && Personality == EHPersonality::CoreCLR;
1177 bool IsClrFunclet = IsFunclet && FnHasClrFunclet;
1178 bool HasFP = hasFP(MF);
1179 bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
1180 bool NeedsWin64CFI = IsWin64Prologue && Fn.needsUnwindTableEntry();
1181 // FIXME: Emit FPO data for EH funclets.
1182 bool NeedsWinFPO =
1183 !IsFunclet && STI.isTargetWin32() && MMI.getModule()->getCodeViewFlag();
1184 bool NeedsWinCFI = NeedsWin64CFI || NeedsWinFPO;
1185 bool NeedsDwarfCFI = !IsWin64Prologue && MF.needsFrameMoves();
1186 Register FramePtr = TRI->getFrameRegister(MF);
1187 const Register MachineFramePtr =
1188 STI.isTarget64BitILP32()
1189 ? Register(getX86SubSuperRegister(FramePtr, 64)) : FramePtr;
1190 Register BasePtr = TRI->getBaseRegister();
1191 bool HasWinCFI = false;
1192
1193 // Debug location must be unknown since the first debug location is used
1194 // to determine the end of the prologue.
1195 DebugLoc DL;
1196
1197 // Add RETADDR move area to callee saved frame size.
1198 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
1199 if (TailCallReturnAddrDelta && IsWin64Prologue)
1200 report_fatal_error("Can't handle guaranteed tail call under win64 yet");
1201
1202 if (TailCallReturnAddrDelta < 0)
1203 X86FI->setCalleeSavedFrameSize(
1204 X86FI->getCalleeSavedFrameSize() - TailCallReturnAddrDelta);
1205
1206 const bool EmitStackProbeCall =
1207 STI.getTargetLowering()->hasStackProbeSymbol(MF);
1208 unsigned StackProbeSize = STI.getTargetLowering()->getStackProbeSize(MF);
1209
1210 // Re-align the stack on 64-bit if the x86-interrupt calling convention is
1211 // used and an error code was pushed, since the x86-64 ABI requires a 16-byte
1212 // stack alignment.
1213 if (Fn.getCallingConv() == CallingConv::X86_INTR && Is64Bit &&
1214 Fn.arg_size() == 2) {
1215 StackSize += 8;
1216 MFI.setStackSize(StackSize);
1217 emitSPUpdate(MBB, MBBI, DL, -8, /*InEpilogue=*/false);
1218 }
1219
1220 // If this is x86-64 and the Red Zone is not disabled, if we are a leaf
1221 // function, and use up to 128 bytes of stack space, don't have a frame
1222 // pointer, calls, or dynamic alloca then we do not need to adjust the
1223 // stack pointer (we fit in the Red Zone). We also check that we don't
1224 // push and pop from the stack.
1225 if (has128ByteRedZone(MF) && !TRI->needsStackRealignment(MF) &&
1226 !MFI.hasVarSizedObjects() && // No dynamic alloca.
1227 !MFI.adjustsStack() && // No calls.
1228 !EmitStackProbeCall && // No stack probes.
1229 !MFI.hasCopyImplyingStackAdjustment() && // Don't push and pop.
1230 !MF.shouldSplitStack()) { // Regular stack
1231 uint64_t MinSize = X86FI->getCalleeSavedFrameSize();
1232 if (HasFP) MinSize += SlotSize;
1233 X86FI->setUsesRedZone(MinSize > 0 || StackSize > 0);
1234 StackSize = std::max(MinSize, StackSize > 128 ? StackSize - 128 : 0);
1235 MFI.setStackSize(StackSize);
1236 }
1237
1238 // Insert stack pointer adjustment for later moving of return addr. Only
1239 // applies to tail call optimized functions where the callee argument stack
1240 // size is bigger than the callers.
1241 if (TailCallReturnAddrDelta < 0) {
1242 BuildStackAdjustment(MBB, MBBI, DL, TailCallReturnAddrDelta,
1243 /*InEpilogue=*/false)
1244 .setMIFlag(MachineInstr::FrameSetup);
1245 }
1246
1247 // Mapping for machine moves:
1248 //
1249 // DST: VirtualFP AND
1250 // SRC: VirtualFP => DW_CFA_def_cfa_offset
1251 // ELSE => DW_CFA_def_cfa
1252 //
1253 // SRC: VirtualFP AND
1254 // DST: Register => DW_CFA_def_cfa_register
1255 //
1256 // ELSE
1257 // OFFSET < 0 => DW_CFA_offset_extended_sf
1258 // REG < 64 => DW_CFA_offset + Reg
1259 // ELSE => DW_CFA_offset_extended
1260
1261 uint64_t NumBytes = 0;
1262 int stackGrowth = -SlotSize;
1263
1264 // Find the funclet establisher parameter
1265 Register Establisher = X86::NoRegister;
1266 if (IsClrFunclet)
1267 Establisher = Uses64BitFramePtr ? X86::RCX : X86::ECX;
1268 else if (IsFunclet)
1269 Establisher = Uses64BitFramePtr ? X86::RDX : X86::EDX;
1270
1271 if (IsWin64Prologue && IsFunclet && !IsClrFunclet) {
1272 // Immediately spill establisher into the home slot.
1273 // The runtime cares about this.
1274 // MOV64mr %rdx, 16(%rsp)
1275 unsigned MOVmr = Uses64BitFramePtr ? X86::MOV64mr : X86::MOV32mr;
1276 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(MOVmr)), StackPtr, true, 16)
1277 .addReg(Establisher)
1278 .setMIFlag(MachineInstr::FrameSetup);
1279 MBB.addLiveIn(Establisher);
1280 }
1281
1282 if (HasFP) {
1283 assert(MF.getRegInfo().isReserved(MachineFramePtr) && "FP reserved");
1284
1285 // Calculate required stack adjustment.
1286 uint64_t FrameSize = StackSize - SlotSize;
1287 // If required, include space for extra hidden slot for stashing base pointer.
1288 if (X86FI->getRestoreBasePointer())
1289 FrameSize += SlotSize;
1290
1291 NumBytes = FrameSize - X86FI->getCalleeSavedFrameSize();
1292
1293 // Callee-saved registers are pushed on stack before the stack is realigned.
1294 if (TRI->needsStackRealignment(MF) && !IsWin64Prologue)
1295 NumBytes = alignTo(NumBytes, MaxAlign);
1296
1297 // Save EBP/RBP into the appropriate stack slot.
1298 BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::PUSH64r : X86::PUSH32r))
1299 .addReg(MachineFramePtr, RegState::Kill)
1300 .setMIFlag(MachineInstr::FrameSetup);
1301
1302 if (NeedsDwarfCFI) {
1303 // Mark the place where EBP/RBP was saved.
1304 // Define the current CFA rule to use the provided offset.
1305 assert(StackSize);
1306 BuildCFI(MBB, MBBI, DL,
1307 MCCFIInstruction::cfiDefCfaOffset(nullptr, -2 * stackGrowth));
1308
1309 // Change the rule for the FramePtr to be an "offset" rule.
1310 unsigned DwarfFramePtr = TRI->getDwarfRegNum(MachineFramePtr, true);
1311 BuildCFI(MBB, MBBI, DL, MCCFIInstruction::createOffset(
1312 nullptr, DwarfFramePtr, 2 * stackGrowth));
1313 }
1314
1315 if (NeedsWinCFI) {
1316 HasWinCFI = true;
1317 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_PushReg))
1318 .addImm(FramePtr)
1319 .setMIFlag(MachineInstr::FrameSetup);
1320 }
1321
1322 if (!IsWin64Prologue && !IsFunclet) {
1323 // Update EBP with the new base value.
1324 BuildMI(MBB, MBBI, DL,
1325 TII.get(Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr),
1326 FramePtr)
1327 .addReg(StackPtr)
1328 .setMIFlag(MachineInstr::FrameSetup);
1329
1330 if (NeedsDwarfCFI) {
1331 // Mark effective beginning of when frame pointer becomes valid.
1332 // Define the current CFA to use the EBP/RBP register.
1333 unsigned DwarfFramePtr = TRI->getDwarfRegNum(MachineFramePtr, true);
1334 BuildCFI(MBB, MBBI, DL, MCCFIInstruction::createDefCfaRegister(
1335 nullptr, DwarfFramePtr));
1336 }
1337
1338 if (NeedsWinFPO) {
1339 // .cv_fpo_setframe $FramePtr
1340 HasWinCFI = true;
1341 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SetFrame))
1342 .addImm(FramePtr)
1343 .addImm(0)
1344 .setMIFlag(MachineInstr::FrameSetup);
1345 }
1346 }
1347 } else {
1348 assert(!IsFunclet && "funclets without FPs not yet implemented");
1349 NumBytes = StackSize - X86FI->getCalleeSavedFrameSize();
1350 }
1351
1352 // Update the offset adjustment, which is mainly used by codeview to translate
1353 // from ESP to VFRAME relative local variable offsets.
1354 if (!IsFunclet) {
1355 if (HasFP && TRI->needsStackRealignment(MF))
1356 MFI.setOffsetAdjustment(-NumBytes);
1357 else
1358 MFI.setOffsetAdjustment(-StackSize);
1359 }
1360
1361 // For EH funclets, only allocate enough space for outgoing calls. Save the
1362 // NumBytes value that we would've used for the parent frame.
1363 unsigned ParentFrameNumBytes = NumBytes;
1364 if (IsFunclet)
1365 NumBytes = getWinEHFuncletFrameSize(MF);
1366
1367 // Skip the callee-saved push instructions.
1368 bool PushedRegs = false;
1369 int StackOffset = 2 * stackGrowth;
1370
1371 while (MBBI != MBB.end() &&
1372 MBBI->getFlag(MachineInstr::FrameSetup) &&
1373 (MBBI->getOpcode() == X86::PUSH32r ||
1374 MBBI->getOpcode() == X86::PUSH64r)) {
1375 PushedRegs = true;
1376 Register Reg = MBBI->getOperand(0).getReg();
1377 ++MBBI;
1378
1379 if (!HasFP && NeedsDwarfCFI) {
1380 // Mark callee-saved push instruction.
1381 // Define the current CFA rule to use the provided offset.
1382 assert(StackSize);
1383 BuildCFI(MBB, MBBI, DL,
1384 MCCFIInstruction::cfiDefCfaOffset(nullptr, -StackOffset));
1385 StackOffset += stackGrowth;
1386 }
1387
1388 if (NeedsWinCFI) {
1389 HasWinCFI = true;
1390 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_PushReg))
1391 .addImm(Reg)
1392 .setMIFlag(MachineInstr::FrameSetup);
1393 }
1394 }
1395
1396 // Realign stack after we pushed callee-saved registers (so that we'll be
1397 // able to calculate their offsets from the frame pointer).
1398 // Don't do this for Win64, it needs to realign the stack after the prologue.
1399 if (!IsWin64Prologue && !IsFunclet && TRI->needsStackRealignment(MF)) {
1400 assert(HasFP && "There should be a frame pointer if stack is realigned.");
1401 BuildStackAlignAND(MBB, MBBI, DL, StackPtr, MaxAlign);
1402
1403 if (NeedsWinCFI) {
1404 HasWinCFI = true;
1405 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_StackAlign))
1406 .addImm(MaxAlign)
1407 .setMIFlag(MachineInstr::FrameSetup);
1408 }
1409 }
1410
1411 // If there is an SUB32ri of ESP immediately before this instruction, merge
1412 // the two. This can be the case when tail call elimination is enabled and
1413 // the callee has more arguments then the caller.
1414 NumBytes -= mergeSPUpdates(MBB, MBBI, true);
1415
1416 // Adjust stack pointer: ESP -= numbytes.
1417
1418 // Windows and cygwin/mingw require a prologue helper routine when allocating
1419 // more than 4K bytes on the stack. Windows uses __chkstk and cygwin/mingw
1420 // uses __alloca. __alloca and the 32-bit version of __chkstk will probe the
1421 // stack and adjust the stack pointer in one go. The 64-bit version of
1422 // __chkstk is only responsible for probing the stack. The 64-bit prologue is
1423 // responsible for adjusting the stack pointer. Touching the stack at 4K
1424 // increments is necessary to ensure that the guard pages used by the OS
1425 // virtual memory manager are allocated in correct sequence.
1426 uint64_t AlignedNumBytes = NumBytes;
1427 if (IsWin64Prologue && !IsFunclet && TRI->needsStackRealignment(MF))
1428 AlignedNumBytes = alignTo(AlignedNumBytes, MaxAlign);
1429 if (AlignedNumBytes >= StackProbeSize && EmitStackProbeCall) {
1430 assert(!X86FI->getUsesRedZone() &&
1431 "The Red Zone is not accounted for in stack probes");
1432
1433 // Check whether EAX is livein for this block.
1434 bool isEAXAlive = isEAXLiveIn(MBB);
1435
1436 if (isEAXAlive) {
1437 if (Is64Bit) {
1438 // Save RAX
1439 BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH64r))
1440 .addReg(X86::RAX, RegState::Kill)
1441 .setMIFlag(MachineInstr::FrameSetup);
1442 } else {
1443 // Save EAX
1444 BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH32r))
1445 .addReg(X86::EAX, RegState::Kill)
1446 .setMIFlag(MachineInstr::FrameSetup);
1447 }
1448 }
1449
1450 if (Is64Bit) {
1451 // Handle the 64-bit Windows ABI case where we need to call __chkstk.
1452 // Function prologue is responsible for adjusting the stack pointer.
1453 int64_t Alloc = isEAXAlive ? NumBytes - 8 : NumBytes;
1454 if (isUInt<32>(Alloc)) {
1455 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX)
1456 .addImm(Alloc)
1457 .setMIFlag(MachineInstr::FrameSetup);
1458 } else if (isInt<32>(Alloc)) {
1459 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri32), X86::RAX)
1460 .addImm(Alloc)
1461 .setMIFlag(MachineInstr::FrameSetup);
1462 } else {
1463 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri), X86::RAX)
1464 .addImm(Alloc)
1465 .setMIFlag(MachineInstr::FrameSetup);
1466 }
1467 } else {
1468 // Allocate NumBytes-4 bytes on stack in case of isEAXAlive.
1469 // We'll also use 4 already allocated bytes for EAX.
1470 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX)
1471 .addImm(isEAXAlive ? NumBytes - 4 : NumBytes)
1472 .setMIFlag(MachineInstr::FrameSetup);
1473 }
1474
1475 // Call __chkstk, __chkstk_ms, or __alloca.
1476 emitStackProbe(MF, MBB, MBBI, DL, true);
1477
1478 if (isEAXAlive) {
1479 // Restore RAX/EAX
1480 MachineInstr *MI;
1481 if (Is64Bit)
1482 MI = addRegOffset(BuildMI(MF, DL, TII.get(X86::MOV64rm), X86::RAX),
1483 StackPtr, false, NumBytes - 8);
1484 else
1485 MI = addRegOffset(BuildMI(MF, DL, TII.get(X86::MOV32rm), X86::EAX),
1486 StackPtr, false, NumBytes - 4);
1487 MI->setFlag(MachineInstr::FrameSetup);
1488 MBB.insert(MBBI, MI);
1489 }
1490 } else if (NumBytes) {
1491 emitSPUpdate(MBB, MBBI, DL, -(int64_t)NumBytes, /*InEpilogue=*/false);
1492 }
1493
1494 if (NeedsWinCFI && NumBytes) {
1495 HasWinCFI = true;
1496 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_StackAlloc))
1497 .addImm(NumBytes)
1498 .setMIFlag(MachineInstr::FrameSetup);
1499 }
1500
1501 int SEHFrameOffset = 0;
1502 unsigned SPOrEstablisher;
1503 if (IsFunclet) {
1504 if (IsClrFunclet) {
1505 // The establisher parameter passed to a CLR funclet is actually a pointer
1506 // to the (mostly empty) frame of its nearest enclosing funclet; we have
1507 // to find the root function establisher frame by loading the PSPSym from
1508 // the intermediate frame.
1509 unsigned PSPSlotOffset = getPSPSlotOffsetFromSP(MF);
1510 MachinePointerInfo NoInfo;
1511 MBB.addLiveIn(Establisher);
1512 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64rm), Establisher),
1513 Establisher, false, PSPSlotOffset)
1514 .addMemOperand(MF.getMachineMemOperand(
1515 NoInfo, MachineMemOperand::MOLoad, SlotSize, Align(SlotSize)));
1516 ;
1517 // Save the root establisher back into the current funclet's (mostly
1518 // empty) frame, in case a sub-funclet or the GC needs it.
1519 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64mr)), StackPtr,
1520 false, PSPSlotOffset)
1521 .addReg(Establisher)
1522 .addMemOperand(MF.getMachineMemOperand(
1523 NoInfo,
1524 MachineMemOperand::MOStore | MachineMemOperand::MOVolatile,
1525 SlotSize, Align(SlotSize)));
1526 }
1527 SPOrEstablisher = Establisher;
1528 } else {
1529 SPOrEstablisher = StackPtr;
1530 }
1531
1532 if (IsWin64Prologue && HasFP) {
1533 // Set RBP to a small fixed offset from RSP. In the funclet case, we base
1534 // this calculation on the incoming establisher, which holds the value of
1535 // RSP from the parent frame at the end of the prologue.
1536 SEHFrameOffset = calculateSetFPREG(ParentFrameNumBytes);
1537 if (SEHFrameOffset)
1538 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::LEA64r), FramePtr),
1539 SPOrEstablisher, false, SEHFrameOffset);
1540 else
1541 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64rr), FramePtr)
1542 .addReg(SPOrEstablisher);
1543
1544 // If this is not a funclet, emit the CFI describing our frame pointer.
1545 if (NeedsWinCFI && !IsFunclet) {
1546 assert(!NeedsWinFPO && "this setframe incompatible with FPO data");
1547 HasWinCFI = true;
1548 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SetFrame))
1549 .addImm(FramePtr)
1550 .addImm(SEHFrameOffset)
1551 .setMIFlag(MachineInstr::FrameSetup);
1552 if (isAsynchronousEHPersonality(Personality))
1553 MF.getWinEHFuncInfo()->SEHSetFrameOffset = SEHFrameOffset;
1554 }
1555 } else if (IsFunclet && STI.is32Bit()) {
1556 // Reset EBP / ESI to something good for funclets.
1557 MBBI = restoreWin32EHStackPointers(MBB, MBBI, DL);
1558 // If we're a catch funclet, we can be returned to via catchret. Save ESP
1559 // into the registration node so that the runtime will restore it for us.
1560 if (!MBB.isCleanupFuncletEntry()) {
1561 assert(Personality == EHPersonality::MSVC_CXX);
1562 Register FrameReg;
1563 int FI = MF.getWinEHFuncInfo()->EHRegNodeFrameIndex;
1564 int64_t EHRegOffset = getFrameIndexReference(MF, FI, FrameReg);
1565 // ESP is the first field, so no extra displacement is needed.
1566 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32mr)), FrameReg,
1567 false, EHRegOffset)
1568 .addReg(X86::ESP);
1569 }
1570 }
1571
1572 while (MBBI != MBB.end() && MBBI->getFlag(MachineInstr::FrameSetup)) {
1573 const MachineInstr &FrameInstr = *MBBI;
1574 ++MBBI;
1575
1576 if (NeedsWinCFI) {
1577 int FI;
1578 if (unsigned Reg = TII.isStoreToStackSlot(FrameInstr, FI)) {
1579 if (X86::FR64RegClass.contains(Reg)) {
1580 int Offset;
1581 Register IgnoredFrameReg;
1582 if (IsWin64Prologue && IsFunclet)
1583 Offset = getWin64EHFrameIndexRef(MF, FI, IgnoredFrameReg);
1584 else
1585 Offset = getFrameIndexReference(MF, FI, IgnoredFrameReg) +
1586 SEHFrameOffset;
1587
1588 HasWinCFI = true;
1589 assert(!NeedsWinFPO && "SEH_SaveXMM incompatible with FPO data");
1590 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SaveXMM))
1591 .addImm(Reg)
1592 .addImm(Offset)
1593 .setMIFlag(MachineInstr::FrameSetup);
1594 }
1595 }
1596 }
1597 }
1598
1599 if (NeedsWinCFI && HasWinCFI)
1600 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_EndPrologue))
1601 .setMIFlag(MachineInstr::FrameSetup);
1602
1603 if (FnHasClrFunclet && !IsFunclet) {
1604 // Save the so-called Initial-SP (i.e. the value of the stack pointer
1605 // immediately after the prolog) into the PSPSlot so that funclets
1606 // and the GC can recover it.
1607 unsigned PSPSlotOffset = getPSPSlotOffsetFromSP(MF);
1608 auto PSPInfo = MachinePointerInfo::getFixedStack(
1609 MF, MF.getWinEHFuncInfo()->PSPSymFrameIdx);
1610 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64mr)), StackPtr, false,
1611 PSPSlotOffset)
1612 .addReg(StackPtr)
1613 .addMemOperand(MF.getMachineMemOperand(
1614 PSPInfo, MachineMemOperand::MOStore | MachineMemOperand::MOVolatile,
1615 SlotSize, Align(SlotSize)));
1616 }
1617
1618 // Realign stack after we spilled callee-saved registers (so that we'll be
1619 // able to calculate their offsets from the frame pointer).
1620 // Win64 requires aligning the stack after the prologue.
1621 if (IsWin64Prologue && TRI->needsStackRealignment(MF)) {
1622 assert(HasFP && "There should be a frame pointer if stack is realigned.");
1623 BuildStackAlignAND(MBB, MBBI, DL, SPOrEstablisher, MaxAlign);
1624 }
1625
1626 // We already dealt with stack realignment and funclets above.
1627 if (IsFunclet && STI.is32Bit())
1628 return;
1629
1630 // If we need a base pointer, set it up here. It's whatever the value
1631 // of the stack pointer is at this point. Any variable size objects
1632 // will be allocated after this, so we can still use the base pointer
1633 // to reference locals.
1634 if (TRI->hasBasePointer(MF)) {
1635 // Update the base pointer with the current stack pointer.
1636 unsigned Opc = Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr;
1637 BuildMI(MBB, MBBI, DL, TII.get(Opc), BasePtr)
1638 .addReg(SPOrEstablisher)
1639 .setMIFlag(MachineInstr::FrameSetup);
1640 if (X86FI->getRestoreBasePointer()) {
1641 // Stash value of base pointer. Saving RSP instead of EBP shortens
1642 // dependence chain. Used by SjLj EH.
1643 unsigned Opm = Uses64BitFramePtr ? X86::MOV64mr : X86::MOV32mr;
1644 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opm)),
1645 FramePtr, true, X86FI->getRestoreBasePointerOffset())
1646 .addReg(SPOrEstablisher)
1647 .setMIFlag(MachineInstr::FrameSetup);
1648 }
1649
1650 if (X86FI->getHasSEHFramePtrSave() && !IsFunclet) {
1651 // Stash the value of the frame pointer relative to the base pointer for
1652 // Win32 EH. This supports Win32 EH, which does the inverse of the above:
1653 // it recovers the frame pointer from the base pointer rather than the
1654 // other way around.
1655 unsigned Opm = Uses64BitFramePtr ? X86::MOV64mr : X86::MOV32mr;
1656 Register UsedReg;
1657 int Offset =
1658 getFrameIndexReference(MF, X86FI->getSEHFramePtrSaveIndex(), UsedReg);
1659 assert(UsedReg == BasePtr);
1660 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opm)), UsedReg, true, Offset)
1661 .addReg(FramePtr)
1662 .setMIFlag(MachineInstr::FrameSetup);
1663 }
1664 }
1665
1666 if (((!HasFP && NumBytes) || PushedRegs) && NeedsDwarfCFI) {
1667 // Mark end of stack pointer adjustment.
1668 if (!HasFP && NumBytes) {
1669 // Define the current CFA rule to use the provided offset.
1670 assert(StackSize);
1671 BuildCFI(
1672 MBB, MBBI, DL,
1673 MCCFIInstruction::cfiDefCfaOffset(nullptr, StackSize - stackGrowth));
1674 }
1675
1676 // Emit DWARF info specifying the offsets of the callee-saved registers.
1677 emitCalleeSavedFrameMoves(MBB, MBBI, DL, true);
1678 }
1679
1680 // X86 Interrupt handling function cannot assume anything about the direction
1681 // flag (DF in EFLAGS register). Clear this flag by creating "cld" instruction
1682 // in each prologue of interrupt handler function.
1683 //
1684 // FIXME: Create "cld" instruction only in these cases:
1685 // 1. The interrupt handling function uses any of the "rep" instructions.
1686 // 2. Interrupt handling function calls another function.
1687 //
1688 if (Fn.getCallingConv() == CallingConv::X86_INTR)
1689 BuildMI(MBB, MBBI, DL, TII.get(X86::CLD))
1690 .setMIFlag(MachineInstr::FrameSetup);
1691
1692 // At this point we know if the function has WinCFI or not.
1693 MF.setHasWinCFI(HasWinCFI);
1694 }
1695
canUseLEAForSPInEpilogue(const MachineFunction & MF) const1696 bool X86FrameLowering::canUseLEAForSPInEpilogue(
1697 const MachineFunction &MF) const {
1698 // We can't use LEA instructions for adjusting the stack pointer if we don't
1699 // have a frame pointer in the Win64 ABI. Only ADD instructions may be used
1700 // to deallocate the stack.
1701 // This means that we can use LEA for SP in two situations:
1702 // 1. We *aren't* using the Win64 ABI which means we are free to use LEA.
1703 // 2. We *have* a frame pointer which means we are permitted to use LEA.
1704 return !MF.getTarget().getMCAsmInfo()->usesWindowsCFI() || hasFP(MF);
1705 }
1706
isFuncletReturnInstr(MachineInstr & MI)1707 static bool isFuncletReturnInstr(MachineInstr &MI) {
1708 switch (MI.getOpcode()) {
1709 case X86::CATCHRET:
1710 case X86::CLEANUPRET:
1711 return true;
1712 default:
1713 return false;
1714 }
1715 llvm_unreachable("impossible");
1716 }
1717
1718 // CLR funclets use a special "Previous Stack Pointer Symbol" slot on the
1719 // stack. It holds a pointer to the bottom of the root function frame. The
1720 // establisher frame pointer passed to a nested funclet may point to the
1721 // (mostly empty) frame of its parent funclet, but it will need to find
1722 // the frame of the root function to access locals. To facilitate this,
1723 // every funclet copies the pointer to the bottom of the root function
1724 // frame into a PSPSym slot in its own (mostly empty) stack frame. Using the
1725 // same offset for the PSPSym in the root function frame that's used in the
1726 // funclets' frames allows each funclet to dynamically accept any ancestor
1727 // frame as its establisher argument (the runtime doesn't guarantee the
1728 // immediate parent for some reason lost to history), and also allows the GC,
1729 // which uses the PSPSym for some bookkeeping, to find it in any funclet's
1730 // frame with only a single offset reported for the entire method.
1731 unsigned
getPSPSlotOffsetFromSP(const MachineFunction & MF) const1732 X86FrameLowering::getPSPSlotOffsetFromSP(const MachineFunction &MF) const {
1733 const WinEHFuncInfo &Info = *MF.getWinEHFuncInfo();
1734 Register SPReg;
1735 int Offset = getFrameIndexReferencePreferSP(MF, Info.PSPSymFrameIdx, SPReg,
1736 /*IgnoreSPUpdates*/ true);
1737 assert(Offset >= 0 && SPReg == TRI->getStackRegister());
1738 return static_cast<unsigned>(Offset);
1739 }
1740
1741 unsigned
getWinEHFuncletFrameSize(const MachineFunction & MF) const1742 X86FrameLowering::getWinEHFuncletFrameSize(const MachineFunction &MF) const {
1743 const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
1744 // This is the size of the pushed CSRs.
1745 unsigned CSSize = X86FI->getCalleeSavedFrameSize();
1746 // This is the size of callee saved XMMs.
1747 const auto& WinEHXMMSlotInfo = X86FI->getWinEHXMMSlotInfo();
1748 unsigned XMMSize = WinEHXMMSlotInfo.size() *
1749 TRI->getSpillSize(X86::VR128RegClass);
1750 // This is the amount of stack a funclet needs to allocate.
1751 unsigned UsedSize;
1752 EHPersonality Personality =
1753 classifyEHPersonality(MF.getFunction().getPersonalityFn());
1754 if (Personality == EHPersonality::CoreCLR) {
1755 // CLR funclets need to hold enough space to include the PSPSym, at the
1756 // same offset from the stack pointer (immediately after the prolog) as it
1757 // resides at in the main function.
1758 UsedSize = getPSPSlotOffsetFromSP(MF) + SlotSize;
1759 } else {
1760 // Other funclets just need enough stack for outgoing call arguments.
1761 UsedSize = MF.getFrameInfo().getMaxCallFrameSize();
1762 }
1763 // RBP is not included in the callee saved register block. After pushing RBP,
1764 // everything is 16 byte aligned. Everything we allocate before an outgoing
1765 // call must also be 16 byte aligned.
1766 unsigned FrameSizeMinusRBP = alignTo(CSSize + UsedSize, getStackAlign());
1767 // Subtract out the size of the callee saved registers. This is how much stack
1768 // each funclet will allocate.
1769 return FrameSizeMinusRBP + XMMSize - CSSize;
1770 }
1771
isTailCallOpcode(unsigned Opc)1772 static bool isTailCallOpcode(unsigned Opc) {
1773 return Opc == X86::TCRETURNri || Opc == X86::TCRETURNdi ||
1774 Opc == X86::TCRETURNmi ||
1775 Opc == X86::TCRETURNri64 || Opc == X86::TCRETURNdi64 ||
1776 Opc == X86::TCRETURNmi64;
1777 }
1778
emitEpilogue(MachineFunction & MF,MachineBasicBlock & MBB) const1779 void X86FrameLowering::emitEpilogue(MachineFunction &MF,
1780 MachineBasicBlock &MBB) const {
1781 const MachineFrameInfo &MFI = MF.getFrameInfo();
1782 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
1783 MachineBasicBlock::iterator Terminator = MBB.getFirstTerminator();
1784 MachineBasicBlock::iterator MBBI = Terminator;
1785 DebugLoc DL;
1786 if (MBBI != MBB.end())
1787 DL = MBBI->getDebugLoc();
1788 // standard x86_64 and NaCl use 64-bit frame/stack pointers, x32 - 32-bit.
1789 const bool Is64BitILP32 = STI.isTarget64BitILP32();
1790 Register FramePtr = TRI->getFrameRegister(MF);
1791 unsigned MachineFramePtr =
1792 Is64BitILP32 ? Register(getX86SubSuperRegister(FramePtr, 64)) : FramePtr;
1793
1794 bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
1795 bool NeedsWin64CFI =
1796 IsWin64Prologue && MF.getFunction().needsUnwindTableEntry();
1797 bool IsFunclet = MBBI == MBB.end() ? false : isFuncletReturnInstr(*MBBI);
1798
1799 // Get the number of bytes to allocate from the FrameInfo.
1800 uint64_t StackSize = MFI.getStackSize();
1801 uint64_t MaxAlign = calculateMaxStackAlign(MF);
1802 unsigned CSSize = X86FI->getCalleeSavedFrameSize();
1803 bool HasFP = hasFP(MF);
1804 uint64_t NumBytes = 0;
1805
1806 bool NeedsDwarfCFI = (!MF.getTarget().getTargetTriple().isOSDarwin() &&
1807 !MF.getTarget().getTargetTriple().isOSWindows()) &&
1808 MF.needsFrameMoves();
1809
1810 if (IsFunclet) {
1811 assert(HasFP && "EH funclets without FP not yet implemented");
1812 NumBytes = getWinEHFuncletFrameSize(MF);
1813 } else if (HasFP) {
1814 // Calculate required stack adjustment.
1815 uint64_t FrameSize = StackSize - SlotSize;
1816 NumBytes = FrameSize - CSSize;
1817
1818 // Callee-saved registers were pushed on stack before the stack was
1819 // realigned.
1820 if (TRI->needsStackRealignment(MF) && !IsWin64Prologue)
1821 NumBytes = alignTo(FrameSize, MaxAlign);
1822 } else {
1823 NumBytes = StackSize - CSSize;
1824 }
1825 uint64_t SEHStackAllocAmt = NumBytes;
1826
1827 // AfterPop is the position to insert .cfi_restore.
1828 MachineBasicBlock::iterator AfterPop = MBBI;
1829 if (HasFP) {
1830 // Pop EBP.
1831 BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::POP64r : X86::POP32r),
1832 MachineFramePtr)
1833 .setMIFlag(MachineInstr::FrameDestroy);
1834 if (NeedsDwarfCFI) {
1835 unsigned DwarfStackPtr =
1836 TRI->getDwarfRegNum(Is64Bit ? X86::RSP : X86::ESP, true);
1837 BuildCFI(MBB, MBBI, DL,
1838 MCCFIInstruction::cfiDefCfa(nullptr, DwarfStackPtr, SlotSize));
1839 if (!MBB.succ_empty() && !MBB.isReturnBlock()) {
1840 unsigned DwarfFramePtr = TRI->getDwarfRegNum(MachineFramePtr, true);
1841 BuildCFI(MBB, AfterPop, DL,
1842 MCCFIInstruction::createRestore(nullptr, DwarfFramePtr));
1843 --MBBI;
1844 --AfterPop;
1845 }
1846 --MBBI;
1847 }
1848 }
1849
1850 MachineBasicBlock::iterator FirstCSPop = MBBI;
1851 // Skip the callee-saved pop instructions.
1852 while (MBBI != MBB.begin()) {
1853 MachineBasicBlock::iterator PI = std::prev(MBBI);
1854 unsigned Opc = PI->getOpcode();
1855
1856 if (Opc != X86::DBG_VALUE && !PI->isTerminator()) {
1857 if ((Opc != X86::POP32r || !PI->getFlag(MachineInstr::FrameDestroy)) &&
1858 (Opc != X86::POP64r || !PI->getFlag(MachineInstr::FrameDestroy)))
1859 break;
1860 FirstCSPop = PI;
1861 }
1862
1863 --MBBI;
1864 }
1865 MBBI = FirstCSPop;
1866
1867 if (IsFunclet && Terminator->getOpcode() == X86::CATCHRET)
1868 emitCatchRetReturnValue(MBB, FirstCSPop, &*Terminator);
1869
1870 if (MBBI != MBB.end())
1871 DL = MBBI->getDebugLoc();
1872
1873 // If there is an ADD32ri or SUB32ri of ESP immediately before this
1874 // instruction, merge the two instructions.
1875 if (NumBytes || MFI.hasVarSizedObjects())
1876 NumBytes += mergeSPUpdates(MBB, MBBI, true);
1877
1878 // If dynamic alloca is used, then reset esp to point to the last callee-saved
1879 // slot before popping them off! Same applies for the case, when stack was
1880 // realigned. Don't do this if this was a funclet epilogue, since the funclets
1881 // will not do realignment or dynamic stack allocation.
1882 if ((TRI->needsStackRealignment(MF) || MFI.hasVarSizedObjects()) &&
1883 !IsFunclet) {
1884 if (TRI->needsStackRealignment(MF))
1885 MBBI = FirstCSPop;
1886 unsigned SEHFrameOffset = calculateSetFPREG(SEHStackAllocAmt);
1887 uint64_t LEAAmount =
1888 IsWin64Prologue ? SEHStackAllocAmt - SEHFrameOffset : -CSSize;
1889
1890 // There are only two legal forms of epilogue:
1891 // - add SEHAllocationSize, %rsp
1892 // - lea SEHAllocationSize(%FramePtr), %rsp
1893 //
1894 // 'mov %FramePtr, %rsp' will not be recognized as an epilogue sequence.
1895 // However, we may use this sequence if we have a frame pointer because the
1896 // effects of the prologue can safely be undone.
1897 if (LEAAmount != 0) {
1898 unsigned Opc = getLEArOpcode(Uses64BitFramePtr);
1899 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr),
1900 FramePtr, false, LEAAmount);
1901 --MBBI;
1902 } else {
1903 unsigned Opc = (Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr);
1904 BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
1905 .addReg(FramePtr);
1906 --MBBI;
1907 }
1908 } else if (NumBytes) {
1909 // Adjust stack pointer back: ESP += numbytes.
1910 emitSPUpdate(MBB, MBBI, DL, NumBytes, /*InEpilogue=*/true);
1911 if (!hasFP(MF) && NeedsDwarfCFI) {
1912 // Define the current CFA rule to use the provided offset.
1913 BuildCFI(MBB, MBBI, DL,
1914 MCCFIInstruction::cfiDefCfaOffset(nullptr, CSSize + SlotSize));
1915 }
1916 --MBBI;
1917 }
1918
1919 // Windows unwinder will not invoke function's exception handler if IP is
1920 // either in prologue or in epilogue. This behavior causes a problem when a
1921 // call immediately precedes an epilogue, because the return address points
1922 // into the epilogue. To cope with that, we insert an epilogue marker here,
1923 // then replace it with a 'nop' if it ends up immediately after a CALL in the
1924 // final emitted code.
1925 if (NeedsWin64CFI && MF.hasWinCFI())
1926 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_Epilogue));
1927
1928 if (!hasFP(MF) && NeedsDwarfCFI) {
1929 MBBI = FirstCSPop;
1930 int64_t Offset = -CSSize - SlotSize;
1931 // Mark callee-saved pop instruction.
1932 // Define the current CFA rule to use the provided offset.
1933 while (MBBI != MBB.end()) {
1934 MachineBasicBlock::iterator PI = MBBI;
1935 unsigned Opc = PI->getOpcode();
1936 ++MBBI;
1937 if (Opc == X86::POP32r || Opc == X86::POP64r) {
1938 Offset += SlotSize;
1939 BuildCFI(MBB, MBBI, DL,
1940 MCCFIInstruction::cfiDefCfaOffset(nullptr, -Offset));
1941 }
1942 }
1943 }
1944
1945 // Emit DWARF info specifying the restores of the callee-saved registers.
1946 // For epilogue with return inside or being other block without successor,
1947 // no need to generate .cfi_restore for callee-saved registers.
1948 if (NeedsDwarfCFI && !MBB.succ_empty() && !MBB.isReturnBlock()) {
1949 emitCalleeSavedFrameMoves(MBB, AfterPop, DL, false);
1950 }
1951
1952 if (Terminator == MBB.end() || !isTailCallOpcode(Terminator->getOpcode())) {
1953 // Add the return addr area delta back since we are not tail calling.
1954 int Offset = -1 * X86FI->getTCReturnAddrDelta();
1955 assert(Offset >= 0 && "TCDelta should never be positive");
1956 if (Offset) {
1957 // Check for possible merge with preceding ADD instruction.
1958 Offset += mergeSPUpdates(MBB, Terminator, true);
1959 emitSPUpdate(MBB, Terminator, DL, Offset, /*InEpilogue=*/true);
1960 }
1961 }
1962 }
1963
getFrameIndexReference(const MachineFunction & MF,int FI,Register & FrameReg) const1964 int X86FrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI,
1965 Register &FrameReg) const {
1966 const MachineFrameInfo &MFI = MF.getFrameInfo();
1967
1968 bool IsFixed = MFI.isFixedObjectIndex(FI);
1969 // We can't calculate offset from frame pointer if the stack is realigned,
1970 // so enforce usage of stack/base pointer. The base pointer is used when we
1971 // have dynamic allocas in addition to dynamic realignment.
1972 if (TRI->hasBasePointer(MF))
1973 FrameReg = IsFixed ? TRI->getFramePtr() : TRI->getBaseRegister();
1974 else if (TRI->needsStackRealignment(MF))
1975 FrameReg = IsFixed ? TRI->getFramePtr() : TRI->getStackRegister();
1976 else
1977 FrameReg = TRI->getFrameRegister(MF);
1978
1979 // Offset will hold the offset from the stack pointer at function entry to the
1980 // object.
1981 // We need to factor in additional offsets applied during the prologue to the
1982 // frame, base, and stack pointer depending on which is used.
1983 int Offset = MFI.getObjectOffset(FI) - getOffsetOfLocalArea();
1984 const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
1985 unsigned CSSize = X86FI->getCalleeSavedFrameSize();
1986 uint64_t StackSize = MFI.getStackSize();
1987 bool HasFP = hasFP(MF);
1988 bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
1989 int64_t FPDelta = 0;
1990
1991 // In an x86 interrupt, remove the offset we added to account for the return
1992 // address from any stack object allocated in the caller's frame. Interrupts
1993 // do not have a standard return address. Fixed objects in the current frame,
1994 // such as SSE register spills, should not get this treatment.
1995 if (MF.getFunction().getCallingConv() == CallingConv::X86_INTR &&
1996 Offset >= 0) {
1997 Offset += getOffsetOfLocalArea();
1998 }
1999
2000 if (IsWin64Prologue) {
2001 assert(!MFI.hasCalls() || (StackSize % 16) == 8);
2002
2003 // Calculate required stack adjustment.
2004 uint64_t FrameSize = StackSize - SlotSize;
2005 // If required, include space for extra hidden slot for stashing base pointer.
2006 if (X86FI->getRestoreBasePointer())
2007 FrameSize += SlotSize;
2008 uint64_t NumBytes = FrameSize - CSSize;
2009
2010 uint64_t SEHFrameOffset = calculateSetFPREG(NumBytes);
2011 if (FI && FI == X86FI->getFAIndex())
2012 return -SEHFrameOffset;
2013
2014 // FPDelta is the offset from the "traditional" FP location of the old base
2015 // pointer followed by return address and the location required by the
2016 // restricted Win64 prologue.
2017 // Add FPDelta to all offsets below that go through the frame pointer.
2018 FPDelta = FrameSize - SEHFrameOffset;
2019 assert((!MFI.hasCalls() || (FPDelta % 16) == 0) &&
2020 "FPDelta isn't aligned per the Win64 ABI!");
2021 }
2022
2023
2024 if (TRI->hasBasePointer(MF)) {
2025 assert(HasFP && "VLAs and dynamic stack realign, but no FP?!");
2026 if (FI < 0) {
2027 // Skip the saved EBP.
2028 return Offset + SlotSize + FPDelta;
2029 } else {
2030 assert(isAligned(MFI.getObjectAlign(FI), -(Offset + StackSize)));
2031 return Offset + StackSize;
2032 }
2033 } else if (TRI->needsStackRealignment(MF)) {
2034 if (FI < 0) {
2035 // Skip the saved EBP.
2036 return Offset + SlotSize + FPDelta;
2037 } else {
2038 assert(isAligned(MFI.getObjectAlign(FI), -(Offset + StackSize)));
2039 return Offset + StackSize;
2040 }
2041 // FIXME: Support tail calls
2042 } else {
2043 if (!HasFP)
2044 return Offset + StackSize;
2045
2046 // Skip the saved EBP.
2047 Offset += SlotSize;
2048
2049 // Skip the RETADDR move area
2050 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
2051 if (TailCallReturnAddrDelta < 0)
2052 Offset -= TailCallReturnAddrDelta;
2053 }
2054
2055 return Offset + FPDelta;
2056 }
2057
getWin64EHFrameIndexRef(const MachineFunction & MF,int FI,Register & FrameReg) const2058 int X86FrameLowering::getWin64EHFrameIndexRef(const MachineFunction &MF, int FI,
2059 Register &FrameReg) const {
2060 const MachineFrameInfo &MFI = MF.getFrameInfo();
2061 const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
2062 const auto& WinEHXMMSlotInfo = X86FI->getWinEHXMMSlotInfo();
2063 const auto it = WinEHXMMSlotInfo.find(FI);
2064
2065 if (it == WinEHXMMSlotInfo.end())
2066 return getFrameIndexReference(MF, FI, FrameReg);
2067
2068 FrameReg = TRI->getStackRegister();
2069 return alignDown(MFI.getMaxCallFrameSize(), getStackAlign().value()) +
2070 it->second;
2071 }
2072
getFrameIndexReferenceSP(const MachineFunction & MF,int FI,Register & FrameReg,int Adjustment) const2073 int X86FrameLowering::getFrameIndexReferenceSP(const MachineFunction &MF,
2074 int FI, Register &FrameReg,
2075 int Adjustment) const {
2076 const MachineFrameInfo &MFI = MF.getFrameInfo();
2077 FrameReg = TRI->getStackRegister();
2078 return MFI.getObjectOffset(FI) - getOffsetOfLocalArea() + Adjustment;
2079 }
2080
getFrameIndexReferencePreferSP(const MachineFunction & MF,int FI,Register & FrameReg,bool IgnoreSPUpdates) const2081 int X86FrameLowering::getFrameIndexReferencePreferSP(
2082 const MachineFunction &MF, int FI, Register &FrameReg,
2083 bool IgnoreSPUpdates) const {
2084
2085 const MachineFrameInfo &MFI = MF.getFrameInfo();
2086 // Does not include any dynamic realign.
2087 const uint64_t StackSize = MFI.getStackSize();
2088 // LLVM arranges the stack as follows:
2089 // ...
2090 // ARG2
2091 // ARG1
2092 // RETADDR
2093 // PUSH RBP <-- RBP points here
2094 // PUSH CSRs
2095 // ~~~~~~~ <-- possible stack realignment (non-win64)
2096 // ...
2097 // STACK OBJECTS
2098 // ... <-- RSP after prologue points here
2099 // ~~~~~~~ <-- possible stack realignment (win64)
2100 //
2101 // if (hasVarSizedObjects()):
2102 // ... <-- "base pointer" (ESI/RBX) points here
2103 // DYNAMIC ALLOCAS
2104 // ... <-- RSP points here
2105 //
2106 // Case 1: In the simple case of no stack realignment and no dynamic
2107 // allocas, both "fixed" stack objects (arguments and CSRs) are addressable
2108 // with fixed offsets from RSP.
2109 //
2110 // Case 2: In the case of stack realignment with no dynamic allocas, fixed
2111 // stack objects are addressed with RBP and regular stack objects with RSP.
2112 //
2113 // Case 3: In the case of dynamic allocas and stack realignment, RSP is used
2114 // to address stack arguments for outgoing calls and nothing else. The "base
2115 // pointer" points to local variables, and RBP points to fixed objects.
2116 //
2117 // In cases 2 and 3, we can only answer for non-fixed stack objects, and the
2118 // answer we give is relative to the SP after the prologue, and not the
2119 // SP in the middle of the function.
2120
2121 if (MFI.isFixedObjectIndex(FI) && TRI->needsStackRealignment(MF) &&
2122 !STI.isTargetWin64())
2123 return getFrameIndexReference(MF, FI, FrameReg);
2124
2125 // If !hasReservedCallFrame the function might have SP adjustement in the
2126 // body. So, even though the offset is statically known, it depends on where
2127 // we are in the function.
2128 if (!IgnoreSPUpdates && !hasReservedCallFrame(MF))
2129 return getFrameIndexReference(MF, FI, FrameReg);
2130
2131 // We don't handle tail calls, and shouldn't be seeing them either.
2132 assert(MF.getInfo<X86MachineFunctionInfo>()->getTCReturnAddrDelta() >= 0 &&
2133 "we don't handle this case!");
2134
2135 // This is how the math works out:
2136 //
2137 // %rsp grows (i.e. gets lower) left to right. Each box below is
2138 // one word (eight bytes). Obj0 is the stack slot we're trying to
2139 // get to.
2140 //
2141 // ----------------------------------
2142 // | BP | Obj0 | Obj1 | ... | ObjN |
2143 // ----------------------------------
2144 // ^ ^ ^ ^
2145 // A B C E
2146 //
2147 // A is the incoming stack pointer.
2148 // (B - A) is the local area offset (-8 for x86-64) [1]
2149 // (C - A) is the Offset returned by MFI.getObjectOffset for Obj0 [2]
2150 //
2151 // |(E - B)| is the StackSize (absolute value, positive). For a
2152 // stack that grown down, this works out to be (B - E). [3]
2153 //
2154 // E is also the value of %rsp after stack has been set up, and we
2155 // want (C - E) -- the value we can add to %rsp to get to Obj0. Now
2156 // (C - E) == (C - A) - (B - A) + (B - E)
2157 // { Using [1], [2] and [3] above }
2158 // == getObjectOffset - LocalAreaOffset + StackSize
2159
2160 return getFrameIndexReferenceSP(MF, FI, FrameReg, StackSize);
2161 }
2162
assignCalleeSavedSpillSlots(MachineFunction & MF,const TargetRegisterInfo * TRI,std::vector<CalleeSavedInfo> & CSI) const2163 bool X86FrameLowering::assignCalleeSavedSpillSlots(
2164 MachineFunction &MF, const TargetRegisterInfo *TRI,
2165 std::vector<CalleeSavedInfo> &CSI) const {
2166 MachineFrameInfo &MFI = MF.getFrameInfo();
2167 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
2168
2169 unsigned CalleeSavedFrameSize = 0;
2170 unsigned XMMCalleeSavedFrameSize = 0;
2171 auto &WinEHXMMSlotInfo = X86FI->getWinEHXMMSlotInfo();
2172 int SpillSlotOffset = getOffsetOfLocalArea() + X86FI->getTCReturnAddrDelta();
2173
2174 int64_t TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
2175
2176 if (TailCallReturnAddrDelta < 0) {
2177 // create RETURNADDR area
2178 // arg
2179 // arg
2180 // RETADDR
2181 // { ...
2182 // RETADDR area
2183 // ...
2184 // }
2185 // [EBP]
2186 MFI.CreateFixedObject(-TailCallReturnAddrDelta,
2187 TailCallReturnAddrDelta - SlotSize, true);
2188 }
2189
2190 // Spill the BasePtr if it's used.
2191 if (this->TRI->hasBasePointer(MF)) {
2192 // Allocate a spill slot for EBP if we have a base pointer and EH funclets.
2193 if (MF.hasEHFunclets()) {
2194 int FI = MFI.CreateSpillStackObject(SlotSize, Align(SlotSize));
2195 X86FI->setHasSEHFramePtrSave(true);
2196 X86FI->setSEHFramePtrSaveIndex(FI);
2197 }
2198 }
2199
2200 if (hasFP(MF)) {
2201 // emitPrologue always spills frame register the first thing.
2202 SpillSlotOffset -= SlotSize;
2203 MFI.CreateFixedSpillStackObject(SlotSize, SpillSlotOffset);
2204
2205 // Since emitPrologue and emitEpilogue will handle spilling and restoring of
2206 // the frame register, we can delete it from CSI list and not have to worry
2207 // about avoiding it later.
2208 Register FPReg = TRI->getFrameRegister(MF);
2209 for (unsigned i = 0; i < CSI.size(); ++i) {
2210 if (TRI->regsOverlap(CSI[i].getReg(),FPReg)) {
2211 CSI.erase(CSI.begin() + i);
2212 break;
2213 }
2214 }
2215 }
2216
2217 // Assign slots for GPRs. It increases frame size.
2218 for (unsigned i = CSI.size(); i != 0; --i) {
2219 unsigned Reg = CSI[i - 1].getReg();
2220
2221 if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg))
2222 continue;
2223
2224 SpillSlotOffset -= SlotSize;
2225 CalleeSavedFrameSize += SlotSize;
2226
2227 int SlotIndex = MFI.CreateFixedSpillStackObject(SlotSize, SpillSlotOffset);
2228 CSI[i - 1].setFrameIdx(SlotIndex);
2229 }
2230
2231 X86FI->setCalleeSavedFrameSize(CalleeSavedFrameSize);
2232 MFI.setCVBytesOfCalleeSavedRegisters(CalleeSavedFrameSize);
2233
2234 // Assign slots for XMMs.
2235 for (unsigned i = CSI.size(); i != 0; --i) {
2236 unsigned Reg = CSI[i - 1].getReg();
2237 if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg))
2238 continue;
2239
2240 // If this is k-register make sure we lookup via the largest legal type.
2241 MVT VT = MVT::Other;
2242 if (X86::VK16RegClass.contains(Reg))
2243 VT = STI.hasBWI() ? MVT::v64i1 : MVT::v16i1;
2244
2245 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT);
2246 unsigned Size = TRI->getSpillSize(*RC);
2247 Align Alignment = TRI->getSpillAlign(*RC);
2248 // ensure alignment
2249 assert(SpillSlotOffset < 0 && "SpillSlotOffset should always < 0 on X86");
2250 SpillSlotOffset = -alignTo(-SpillSlotOffset, Alignment);
2251
2252 // spill into slot
2253 SpillSlotOffset -= Size;
2254 int SlotIndex = MFI.CreateFixedSpillStackObject(Size, SpillSlotOffset);
2255 CSI[i - 1].setFrameIdx(SlotIndex);
2256 MFI.ensureMaxAlignment(Alignment);
2257
2258 // Save the start offset and size of XMM in stack frame for funclets.
2259 if (X86::VR128RegClass.contains(Reg)) {
2260 WinEHXMMSlotInfo[SlotIndex] = XMMCalleeSavedFrameSize;
2261 XMMCalleeSavedFrameSize += Size;
2262 }
2263 }
2264
2265 return true;
2266 }
2267
spillCalleeSavedRegisters(MachineBasicBlock & MBB,MachineBasicBlock::iterator MI,ArrayRef<CalleeSavedInfo> CSI,const TargetRegisterInfo * TRI) const2268 bool X86FrameLowering::spillCalleeSavedRegisters(
2269 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
2270 ArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const {
2271 DebugLoc DL = MBB.findDebugLoc(MI);
2272
2273 // Don't save CSRs in 32-bit EH funclets. The caller saves EBX, EBP, ESI, EDI
2274 // for us, and there are no XMM CSRs on Win32.
2275 if (MBB.isEHFuncletEntry() && STI.is32Bit() && STI.isOSWindows())
2276 return true;
2277
2278 // Push GPRs. It increases frame size.
2279 const MachineFunction &MF = *MBB.getParent();
2280 unsigned Opc = STI.is64Bit() ? X86::PUSH64r : X86::PUSH32r;
2281 for (unsigned i = CSI.size(); i != 0; --i) {
2282 unsigned Reg = CSI[i - 1].getReg();
2283
2284 if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg))
2285 continue;
2286
2287 const MachineRegisterInfo &MRI = MF.getRegInfo();
2288 bool isLiveIn = MRI.isLiveIn(Reg);
2289 if (!isLiveIn)
2290 MBB.addLiveIn(Reg);
2291
2292 // Decide whether we can add a kill flag to the use.
2293 bool CanKill = !isLiveIn;
2294 // Check if any subregister is live-in
2295 if (CanKill) {
2296 for (MCRegAliasIterator AReg(Reg, TRI, false); AReg.isValid(); ++AReg) {
2297 if (MRI.isLiveIn(*AReg)) {
2298 CanKill = false;
2299 break;
2300 }
2301 }
2302 }
2303
2304 // Do not set a kill flag on values that are also marked as live-in. This
2305 // happens with the @llvm-returnaddress intrinsic and with arguments
2306 // passed in callee saved registers.
2307 // Omitting the kill flags is conservatively correct even if the live-in
2308 // is not used after all.
2309 BuildMI(MBB, MI, DL, TII.get(Opc)).addReg(Reg, getKillRegState(CanKill))
2310 .setMIFlag(MachineInstr::FrameSetup);
2311 }
2312
2313 // Make XMM regs spilled. X86 does not have ability of push/pop XMM.
2314 // It can be done by spilling XMMs to stack frame.
2315 for (unsigned i = CSI.size(); i != 0; --i) {
2316 unsigned Reg = CSI[i-1].getReg();
2317 if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg))
2318 continue;
2319
2320 // If this is k-register make sure we lookup via the largest legal type.
2321 MVT VT = MVT::Other;
2322 if (X86::VK16RegClass.contains(Reg))
2323 VT = STI.hasBWI() ? MVT::v64i1 : MVT::v16i1;
2324
2325 // Add the callee-saved register as live-in. It's killed at the spill.
2326 MBB.addLiveIn(Reg);
2327 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT);
2328
2329 TII.storeRegToStackSlot(MBB, MI, Reg, true, CSI[i - 1].getFrameIdx(), RC,
2330 TRI);
2331 --MI;
2332 MI->setFlag(MachineInstr::FrameSetup);
2333 ++MI;
2334 }
2335
2336 return true;
2337 }
2338
emitCatchRetReturnValue(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,MachineInstr * CatchRet) const2339 void X86FrameLowering::emitCatchRetReturnValue(MachineBasicBlock &MBB,
2340 MachineBasicBlock::iterator MBBI,
2341 MachineInstr *CatchRet) const {
2342 // SEH shouldn't use catchret.
2343 assert(!isAsynchronousEHPersonality(classifyEHPersonality(
2344 MBB.getParent()->getFunction().getPersonalityFn())) &&
2345 "SEH should not use CATCHRET");
2346 DebugLoc DL = CatchRet->getDebugLoc();
2347 MachineBasicBlock *CatchRetTarget = CatchRet->getOperand(0).getMBB();
2348
2349 // Fill EAX/RAX with the address of the target block.
2350 if (STI.is64Bit()) {
2351 // LEA64r CatchRetTarget(%rip), %rax
2352 BuildMI(MBB, MBBI, DL, TII.get(X86::LEA64r), X86::RAX)
2353 .addReg(X86::RIP)
2354 .addImm(0)
2355 .addReg(0)
2356 .addMBB(CatchRetTarget)
2357 .addReg(0);
2358 } else {
2359 // MOV32ri $CatchRetTarget, %eax
2360 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX)
2361 .addMBB(CatchRetTarget);
2362 }
2363
2364 // Record that we've taken the address of CatchRetTarget and no longer just
2365 // reference it in a terminator.
2366 CatchRetTarget->setHasAddressTaken();
2367 }
2368
restoreCalleeSavedRegisters(MachineBasicBlock & MBB,MachineBasicBlock::iterator MI,MutableArrayRef<CalleeSavedInfo> CSI,const TargetRegisterInfo * TRI) const2369 bool X86FrameLowering::restoreCalleeSavedRegisters(
2370 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
2371 MutableArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const {
2372 if (CSI.empty())
2373 return false;
2374
2375 if (MI != MBB.end() && isFuncletReturnInstr(*MI) && STI.isOSWindows()) {
2376 // Don't restore CSRs in 32-bit EH funclets. Matches
2377 // spillCalleeSavedRegisters.
2378 if (STI.is32Bit())
2379 return true;
2380 // Don't restore CSRs before an SEH catchret. SEH except blocks do not form
2381 // funclets. emitEpilogue transforms these to normal jumps.
2382 if (MI->getOpcode() == X86::CATCHRET) {
2383 const Function &F = MBB.getParent()->getFunction();
2384 bool IsSEH = isAsynchronousEHPersonality(
2385 classifyEHPersonality(F.getPersonalityFn()));
2386 if (IsSEH)
2387 return true;
2388 }
2389 }
2390
2391 DebugLoc DL = MBB.findDebugLoc(MI);
2392
2393 // Reload XMMs from stack frame.
2394 for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
2395 unsigned Reg = CSI[i].getReg();
2396 if (X86::GR64RegClass.contains(Reg) ||
2397 X86::GR32RegClass.contains(Reg))
2398 continue;
2399
2400 // If this is k-register make sure we lookup via the largest legal type.
2401 MVT VT = MVT::Other;
2402 if (X86::VK16RegClass.contains(Reg))
2403 VT = STI.hasBWI() ? MVT::v64i1 : MVT::v16i1;
2404
2405 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT);
2406 TII.loadRegFromStackSlot(MBB, MI, Reg, CSI[i].getFrameIdx(), RC, TRI);
2407 }
2408
2409 // POP GPRs.
2410 unsigned Opc = STI.is64Bit() ? X86::POP64r : X86::POP32r;
2411 for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
2412 unsigned Reg = CSI[i].getReg();
2413 if (!X86::GR64RegClass.contains(Reg) &&
2414 !X86::GR32RegClass.contains(Reg))
2415 continue;
2416
2417 BuildMI(MBB, MI, DL, TII.get(Opc), Reg)
2418 .setMIFlag(MachineInstr::FrameDestroy);
2419 }
2420 return true;
2421 }
2422
determineCalleeSaves(MachineFunction & MF,BitVector & SavedRegs,RegScavenger * RS) const2423 void X86FrameLowering::determineCalleeSaves(MachineFunction &MF,
2424 BitVector &SavedRegs,
2425 RegScavenger *RS) const {
2426 TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS);
2427
2428 // Spill the BasePtr if it's used.
2429 if (TRI->hasBasePointer(MF)){
2430 Register BasePtr = TRI->getBaseRegister();
2431 if (STI.isTarget64BitILP32())
2432 BasePtr = getX86SubSuperRegister(BasePtr, 64);
2433 SavedRegs.set(BasePtr);
2434 }
2435 }
2436
2437 static bool
HasNestArgument(const MachineFunction * MF)2438 HasNestArgument(const MachineFunction *MF) {
2439 const Function &F = MF->getFunction();
2440 for (Function::const_arg_iterator I = F.arg_begin(), E = F.arg_end();
2441 I != E; I++) {
2442 if (I->hasNestAttr() && !I->use_empty())
2443 return true;
2444 }
2445 return false;
2446 }
2447
2448 /// GetScratchRegister - Get a temp register for performing work in the
2449 /// segmented stack and the Erlang/HiPE stack prologue. Depending on platform
2450 /// and the properties of the function either one or two registers will be
2451 /// needed. Set primary to true for the first register, false for the second.
2452 static unsigned
GetScratchRegister(bool Is64Bit,bool IsLP64,const MachineFunction & MF,bool Primary)2453 GetScratchRegister(bool Is64Bit, bool IsLP64, const MachineFunction &MF, bool Primary) {
2454 CallingConv::ID CallingConvention = MF.getFunction().getCallingConv();
2455
2456 // Erlang stuff.
2457 if (CallingConvention == CallingConv::HiPE) {
2458 if (Is64Bit)
2459 return Primary ? X86::R14 : X86::R13;
2460 else
2461 return Primary ? X86::EBX : X86::EDI;
2462 }
2463
2464 if (Is64Bit) {
2465 if (IsLP64)
2466 return Primary ? X86::R11 : X86::R12;
2467 else
2468 return Primary ? X86::R11D : X86::R12D;
2469 }
2470
2471 bool IsNested = HasNestArgument(&MF);
2472
2473 if (CallingConvention == CallingConv::X86_FastCall ||
2474 CallingConvention == CallingConv::Fast ||
2475 CallingConvention == CallingConv::Tail) {
2476 if (IsNested)
2477 report_fatal_error("Segmented stacks does not support fastcall with "
2478 "nested function.");
2479 return Primary ? X86::EAX : X86::ECX;
2480 }
2481 if (IsNested)
2482 return Primary ? X86::EDX : X86::EAX;
2483 return Primary ? X86::ECX : X86::EAX;
2484 }
2485
2486 // The stack limit in the TCB is set to this many bytes above the actual stack
2487 // limit.
2488 static const uint64_t kSplitStackAvailable = 256;
2489
adjustForSegmentedStacks(MachineFunction & MF,MachineBasicBlock & PrologueMBB) const2490 void X86FrameLowering::adjustForSegmentedStacks(
2491 MachineFunction &MF, MachineBasicBlock &PrologueMBB) const {
2492 MachineFrameInfo &MFI = MF.getFrameInfo();
2493 uint64_t StackSize;
2494 unsigned TlsReg, TlsOffset;
2495 DebugLoc DL;
2496
2497 // To support shrink-wrapping we would need to insert the new blocks
2498 // at the right place and update the branches to PrologueMBB.
2499 assert(&(*MF.begin()) == &PrologueMBB && "Shrink-wrapping not supported yet");
2500
2501 unsigned ScratchReg = GetScratchRegister(Is64Bit, IsLP64, MF, true);
2502 assert(!MF.getRegInfo().isLiveIn(ScratchReg) &&
2503 "Scratch register is live-in");
2504
2505 if (MF.getFunction().isVarArg())
2506 report_fatal_error("Segmented stacks do not support vararg functions.");
2507 if (!STI.isTargetLinux() && !STI.isTargetDarwin() && !STI.isTargetWin32() &&
2508 !STI.isTargetWin64() && !STI.isTargetFreeBSD() &&
2509 !STI.isTargetDragonFly())
2510 report_fatal_error("Segmented stacks not supported on this platform.");
2511
2512 // Eventually StackSize will be calculated by a link-time pass; which will
2513 // also decide whether checking code needs to be injected into this particular
2514 // prologue.
2515 StackSize = MFI.getStackSize();
2516
2517 // Do not generate a prologue for leaf functions with a stack of size zero.
2518 // For non-leaf functions we have to allow for the possibility that the
2519 // callis to a non-split function, as in PR37807. This function could also
2520 // take the address of a non-split function. When the linker tries to adjust
2521 // its non-existent prologue, it would fail with an error. Mark the object
2522 // file so that such failures are not errors. See this Go language bug-report
2523 // https://go-review.googlesource.com/c/go/+/148819/
2524 if (StackSize == 0 && !MFI.hasTailCall()) {
2525 MF.getMMI().setHasNosplitStack(true);
2526 return;
2527 }
2528
2529 MachineBasicBlock *allocMBB = MF.CreateMachineBasicBlock();
2530 MachineBasicBlock *checkMBB = MF.CreateMachineBasicBlock();
2531 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
2532 bool IsNested = false;
2533
2534 // We need to know if the function has a nest argument only in 64 bit mode.
2535 if (Is64Bit)
2536 IsNested = HasNestArgument(&MF);
2537
2538 // The MOV R10, RAX needs to be in a different block, since the RET we emit in
2539 // allocMBB needs to be last (terminating) instruction.
2540
2541 for (const auto &LI : PrologueMBB.liveins()) {
2542 allocMBB->addLiveIn(LI);
2543 checkMBB->addLiveIn(LI);
2544 }
2545
2546 if (IsNested)
2547 allocMBB->addLiveIn(IsLP64 ? X86::R10 : X86::R10D);
2548
2549 MF.push_front(allocMBB);
2550 MF.push_front(checkMBB);
2551
2552 // When the frame size is less than 256 we just compare the stack
2553 // boundary directly to the value of the stack pointer, per gcc.
2554 bool CompareStackPointer = StackSize < kSplitStackAvailable;
2555
2556 // Read the limit off the current stacklet off the stack_guard location.
2557 if (Is64Bit) {
2558 if (STI.isTargetLinux()) {
2559 TlsReg = X86::FS;
2560 TlsOffset = IsLP64 ? 0x70 : 0x40;
2561 } else if (STI.isTargetDarwin()) {
2562 TlsReg = X86::GS;
2563 TlsOffset = 0x60 + 90*8; // See pthread_machdep.h. Steal TLS slot 90.
2564 } else if (STI.isTargetWin64()) {
2565 TlsReg = X86::GS;
2566 TlsOffset = 0x28; // pvArbitrary, reserved for application use
2567 } else if (STI.isTargetFreeBSD()) {
2568 TlsReg = X86::FS;
2569 TlsOffset = 0x18;
2570 } else if (STI.isTargetDragonFly()) {
2571 TlsReg = X86::FS;
2572 TlsOffset = 0x20; // use tls_tcb.tcb_segstack
2573 } else {
2574 report_fatal_error("Segmented stacks not supported on this platform.");
2575 }
2576
2577 if (CompareStackPointer)
2578 ScratchReg = IsLP64 ? X86::RSP : X86::ESP;
2579 else
2580 BuildMI(checkMBB, DL, TII.get(IsLP64 ? X86::LEA64r : X86::LEA64_32r), ScratchReg).addReg(X86::RSP)
2581 .addImm(1).addReg(0).addImm(-StackSize).addReg(0);
2582
2583 BuildMI(checkMBB, DL, TII.get(IsLP64 ? X86::CMP64rm : X86::CMP32rm)).addReg(ScratchReg)
2584 .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg);
2585 } else {
2586 if (STI.isTargetLinux()) {
2587 TlsReg = X86::GS;
2588 TlsOffset = 0x30;
2589 } else if (STI.isTargetDarwin()) {
2590 TlsReg = X86::GS;
2591 TlsOffset = 0x48 + 90*4;
2592 } else if (STI.isTargetWin32()) {
2593 TlsReg = X86::FS;
2594 TlsOffset = 0x14; // pvArbitrary, reserved for application use
2595 } else if (STI.isTargetDragonFly()) {
2596 TlsReg = X86::FS;
2597 TlsOffset = 0x10; // use tls_tcb.tcb_segstack
2598 } else if (STI.isTargetFreeBSD()) {
2599 report_fatal_error("Segmented stacks not supported on FreeBSD i386.");
2600 } else {
2601 report_fatal_error("Segmented stacks not supported on this platform.");
2602 }
2603
2604 if (CompareStackPointer)
2605 ScratchReg = X86::ESP;
2606 else
2607 BuildMI(checkMBB, DL, TII.get(X86::LEA32r), ScratchReg).addReg(X86::ESP)
2608 .addImm(1).addReg(0).addImm(-StackSize).addReg(0);
2609
2610 if (STI.isTargetLinux() || STI.isTargetWin32() || STI.isTargetWin64() ||
2611 STI.isTargetDragonFly()) {
2612 BuildMI(checkMBB, DL, TII.get(X86::CMP32rm)).addReg(ScratchReg)
2613 .addReg(0).addImm(0).addReg(0).addImm(TlsOffset).addReg(TlsReg);
2614 } else if (STI.isTargetDarwin()) {
2615
2616 // TlsOffset doesn't fit into a mod r/m byte so we need an extra register.
2617 unsigned ScratchReg2;
2618 bool SaveScratch2;
2619 if (CompareStackPointer) {
2620 // The primary scratch register is available for holding the TLS offset.
2621 ScratchReg2 = GetScratchRegister(Is64Bit, IsLP64, MF, true);
2622 SaveScratch2 = false;
2623 } else {
2624 // Need to use a second register to hold the TLS offset
2625 ScratchReg2 = GetScratchRegister(Is64Bit, IsLP64, MF, false);
2626
2627 // Unfortunately, with fastcc the second scratch register may hold an
2628 // argument.
2629 SaveScratch2 = MF.getRegInfo().isLiveIn(ScratchReg2);
2630 }
2631
2632 // If Scratch2 is live-in then it needs to be saved.
2633 assert((!MF.getRegInfo().isLiveIn(ScratchReg2) || SaveScratch2) &&
2634 "Scratch register is live-in and not saved");
2635
2636 if (SaveScratch2)
2637 BuildMI(checkMBB, DL, TII.get(X86::PUSH32r))
2638 .addReg(ScratchReg2, RegState::Kill);
2639
2640 BuildMI(checkMBB, DL, TII.get(X86::MOV32ri), ScratchReg2)
2641 .addImm(TlsOffset);
2642 BuildMI(checkMBB, DL, TII.get(X86::CMP32rm))
2643 .addReg(ScratchReg)
2644 .addReg(ScratchReg2).addImm(1).addReg(0)
2645 .addImm(0)
2646 .addReg(TlsReg);
2647
2648 if (SaveScratch2)
2649 BuildMI(checkMBB, DL, TII.get(X86::POP32r), ScratchReg2);
2650 }
2651 }
2652
2653 // This jump is taken if SP >= (Stacklet Limit + Stack Space required).
2654 // It jumps to normal execution of the function body.
2655 BuildMI(checkMBB, DL, TII.get(X86::JCC_1)).addMBB(&PrologueMBB).addImm(X86::COND_A);
2656
2657 // On 32 bit we first push the arguments size and then the frame size. On 64
2658 // bit, we pass the stack frame size in r10 and the argument size in r11.
2659 if (Is64Bit) {
2660 // Functions with nested arguments use R10, so it needs to be saved across
2661 // the call to _morestack
2662
2663 const unsigned RegAX = IsLP64 ? X86::RAX : X86::EAX;
2664 const unsigned Reg10 = IsLP64 ? X86::R10 : X86::R10D;
2665 const unsigned Reg11 = IsLP64 ? X86::R11 : X86::R11D;
2666 const unsigned MOVrr = IsLP64 ? X86::MOV64rr : X86::MOV32rr;
2667 const unsigned MOVri = IsLP64 ? X86::MOV64ri : X86::MOV32ri;
2668
2669 if (IsNested)
2670 BuildMI(allocMBB, DL, TII.get(MOVrr), RegAX).addReg(Reg10);
2671
2672 BuildMI(allocMBB, DL, TII.get(MOVri), Reg10)
2673 .addImm(StackSize);
2674 BuildMI(allocMBB, DL, TII.get(MOVri), Reg11)
2675 .addImm(X86FI->getArgumentStackSize());
2676 } else {
2677 BuildMI(allocMBB, DL, TII.get(X86::PUSHi32))
2678 .addImm(X86FI->getArgumentStackSize());
2679 BuildMI(allocMBB, DL, TII.get(X86::PUSHi32))
2680 .addImm(StackSize);
2681 }
2682
2683 // __morestack is in libgcc
2684 if (Is64Bit && MF.getTarget().getCodeModel() == CodeModel::Large) {
2685 // Under the large code model, we cannot assume that __morestack lives
2686 // within 2^31 bytes of the call site, so we cannot use pc-relative
2687 // addressing. We cannot perform the call via a temporary register,
2688 // as the rax register may be used to store the static chain, and all
2689 // other suitable registers may be either callee-save or used for
2690 // parameter passing. We cannot use the stack at this point either
2691 // because __morestack manipulates the stack directly.
2692 //
2693 // To avoid these issues, perform an indirect call via a read-only memory
2694 // location containing the address.
2695 //
2696 // This solution is not perfect, as it assumes that the .rodata section
2697 // is laid out within 2^31 bytes of each function body, but this seems
2698 // to be sufficient for JIT.
2699 // FIXME: Add retpoline support and remove the error here..
2700 if (STI.useIndirectThunkCalls())
2701 report_fatal_error("Emitting morestack calls on 64-bit with the large "
2702 "code model and thunks not yet implemented.");
2703 BuildMI(allocMBB, DL, TII.get(X86::CALL64m))
2704 .addReg(X86::RIP)
2705 .addImm(0)
2706 .addReg(0)
2707 .addExternalSymbol("__morestack_addr")
2708 .addReg(0);
2709 MF.getMMI().setUsesMorestackAddr(true);
2710 } else {
2711 if (Is64Bit)
2712 BuildMI(allocMBB, DL, TII.get(X86::CALL64pcrel32))
2713 .addExternalSymbol("__morestack");
2714 else
2715 BuildMI(allocMBB, DL, TII.get(X86::CALLpcrel32))
2716 .addExternalSymbol("__morestack");
2717 }
2718
2719 if (IsNested)
2720 BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET_RESTORE_R10));
2721 else
2722 BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET));
2723
2724 allocMBB->addSuccessor(&PrologueMBB);
2725
2726 checkMBB->addSuccessor(allocMBB, BranchProbability::getZero());
2727 checkMBB->addSuccessor(&PrologueMBB, BranchProbability::getOne());
2728
2729 #ifdef EXPENSIVE_CHECKS
2730 MF.verify();
2731 #endif
2732 }
2733
2734 /// Lookup an ERTS parameter in the !hipe.literals named metadata node.
2735 /// HiPE provides Erlang Runtime System-internal parameters, such as PCB offsets
2736 /// to fields it needs, through a named metadata node "hipe.literals" containing
2737 /// name-value pairs.
getHiPELiteral(NamedMDNode * HiPELiteralsMD,const StringRef LiteralName)2738 static unsigned getHiPELiteral(
2739 NamedMDNode *HiPELiteralsMD, const StringRef LiteralName) {
2740 for (int i = 0, e = HiPELiteralsMD->getNumOperands(); i != e; ++i) {
2741 MDNode *Node = HiPELiteralsMD->getOperand(i);
2742 if (Node->getNumOperands() != 2) continue;
2743 MDString *NodeName = dyn_cast<MDString>(Node->getOperand(0));
2744 ValueAsMetadata *NodeVal = dyn_cast<ValueAsMetadata>(Node->getOperand(1));
2745 if (!NodeName || !NodeVal) continue;
2746 ConstantInt *ValConst = dyn_cast_or_null<ConstantInt>(NodeVal->getValue());
2747 if (ValConst && NodeName->getString() == LiteralName) {
2748 return ValConst->getZExtValue();
2749 }
2750 }
2751
2752 report_fatal_error("HiPE literal " + LiteralName
2753 + " required but not provided");
2754 }
2755
2756 // Return true if there are no non-ehpad successors to MBB and there are no
2757 // non-meta instructions between MBBI and MBB.end().
blockEndIsUnreachable(const MachineBasicBlock & MBB,MachineBasicBlock::const_iterator MBBI)2758 static bool blockEndIsUnreachable(const MachineBasicBlock &MBB,
2759 MachineBasicBlock::const_iterator MBBI) {
2760 return std::all_of(
2761 MBB.succ_begin(), MBB.succ_end(),
2762 [](const MachineBasicBlock *Succ) { return Succ->isEHPad(); }) &&
2763 std::all_of(MBBI, MBB.end(), [](const MachineInstr &MI) {
2764 return MI.isMetaInstruction();
2765 });
2766 }
2767
2768 /// Erlang programs may need a special prologue to handle the stack size they
2769 /// might need at runtime. That is because Erlang/OTP does not implement a C
2770 /// stack but uses a custom implementation of hybrid stack/heap architecture.
2771 /// (for more information see Eric Stenman's Ph.D. thesis:
2772 /// http://publications.uu.se/uu/fulltext/nbn_se_uu_diva-2688.pdf)
2773 ///
2774 /// CheckStack:
2775 /// temp0 = sp - MaxStack
2776 /// if( temp0 < SP_LIMIT(P) ) goto IncStack else goto OldStart
2777 /// OldStart:
2778 /// ...
2779 /// IncStack:
2780 /// call inc_stack # doubles the stack space
2781 /// temp0 = sp - MaxStack
2782 /// if( temp0 < SP_LIMIT(P) ) goto IncStack else goto OldStart
adjustForHiPEPrologue(MachineFunction & MF,MachineBasicBlock & PrologueMBB) const2783 void X86FrameLowering::adjustForHiPEPrologue(
2784 MachineFunction &MF, MachineBasicBlock &PrologueMBB) const {
2785 MachineFrameInfo &MFI = MF.getFrameInfo();
2786 DebugLoc DL;
2787
2788 // To support shrink-wrapping we would need to insert the new blocks
2789 // at the right place and update the branches to PrologueMBB.
2790 assert(&(*MF.begin()) == &PrologueMBB && "Shrink-wrapping not supported yet");
2791
2792 // HiPE-specific values
2793 NamedMDNode *HiPELiteralsMD = MF.getMMI().getModule()
2794 ->getNamedMetadata("hipe.literals");
2795 if (!HiPELiteralsMD)
2796 report_fatal_error(
2797 "Can't generate HiPE prologue without runtime parameters");
2798 const unsigned HipeLeafWords
2799 = getHiPELiteral(HiPELiteralsMD,
2800 Is64Bit ? "AMD64_LEAF_WORDS" : "X86_LEAF_WORDS");
2801 const unsigned CCRegisteredArgs = Is64Bit ? 6 : 5;
2802 const unsigned Guaranteed = HipeLeafWords * SlotSize;
2803 unsigned CallerStkArity = MF.getFunction().arg_size() > CCRegisteredArgs ?
2804 MF.getFunction().arg_size() - CCRegisteredArgs : 0;
2805 unsigned MaxStack = MFI.getStackSize() + CallerStkArity*SlotSize + SlotSize;
2806
2807 assert(STI.isTargetLinux() &&
2808 "HiPE prologue is only supported on Linux operating systems.");
2809
2810 // Compute the largest caller's frame that is needed to fit the callees'
2811 // frames. This 'MaxStack' is computed from:
2812 //
2813 // a) the fixed frame size, which is the space needed for all spilled temps,
2814 // b) outgoing on-stack parameter areas, and
2815 // c) the minimum stack space this function needs to make available for the
2816 // functions it calls (a tunable ABI property).
2817 if (MFI.hasCalls()) {
2818 unsigned MoreStackForCalls = 0;
2819
2820 for (auto &MBB : MF) {
2821 for (auto &MI : MBB) {
2822 if (!MI.isCall())
2823 continue;
2824
2825 // Get callee operand.
2826 const MachineOperand &MO = MI.getOperand(0);
2827
2828 // Only take account of global function calls (no closures etc.).
2829 if (!MO.isGlobal())
2830 continue;
2831
2832 const Function *F = dyn_cast<Function>(MO.getGlobal());
2833 if (!F)
2834 continue;
2835
2836 // Do not update 'MaxStack' for primitive and built-in functions
2837 // (encoded with names either starting with "erlang."/"bif_" or not
2838 // having a ".", such as a simple <Module>.<Function>.<Arity>, or an
2839 // "_", such as the BIF "suspend_0") as they are executed on another
2840 // stack.
2841 if (F->getName().find("erlang.") != StringRef::npos ||
2842 F->getName().find("bif_") != StringRef::npos ||
2843 F->getName().find_first_of("._") == StringRef::npos)
2844 continue;
2845
2846 unsigned CalleeStkArity =
2847 F->arg_size() > CCRegisteredArgs ? F->arg_size()-CCRegisteredArgs : 0;
2848 if (HipeLeafWords - 1 > CalleeStkArity)
2849 MoreStackForCalls = std::max(MoreStackForCalls,
2850 (HipeLeafWords - 1 - CalleeStkArity) * SlotSize);
2851 }
2852 }
2853 MaxStack += MoreStackForCalls;
2854 }
2855
2856 // If the stack frame needed is larger than the guaranteed then runtime checks
2857 // and calls to "inc_stack_0" BIF should be inserted in the assembly prologue.
2858 if (MaxStack > Guaranteed) {
2859 MachineBasicBlock *stackCheckMBB = MF.CreateMachineBasicBlock();
2860 MachineBasicBlock *incStackMBB = MF.CreateMachineBasicBlock();
2861
2862 for (const auto &LI : PrologueMBB.liveins()) {
2863 stackCheckMBB->addLiveIn(LI);
2864 incStackMBB->addLiveIn(LI);
2865 }
2866
2867 MF.push_front(incStackMBB);
2868 MF.push_front(stackCheckMBB);
2869
2870 unsigned ScratchReg, SPReg, PReg, SPLimitOffset;
2871 unsigned LEAop, CMPop, CALLop;
2872 SPLimitOffset = getHiPELiteral(HiPELiteralsMD, "P_NSP_LIMIT");
2873 if (Is64Bit) {
2874 SPReg = X86::RSP;
2875 PReg = X86::RBP;
2876 LEAop = X86::LEA64r;
2877 CMPop = X86::CMP64rm;
2878 CALLop = X86::CALL64pcrel32;
2879 } else {
2880 SPReg = X86::ESP;
2881 PReg = X86::EBP;
2882 LEAop = X86::LEA32r;
2883 CMPop = X86::CMP32rm;
2884 CALLop = X86::CALLpcrel32;
2885 }
2886
2887 ScratchReg = GetScratchRegister(Is64Bit, IsLP64, MF, true);
2888 assert(!MF.getRegInfo().isLiveIn(ScratchReg) &&
2889 "HiPE prologue scratch register is live-in");
2890
2891 // Create new MBB for StackCheck:
2892 addRegOffset(BuildMI(stackCheckMBB, DL, TII.get(LEAop), ScratchReg),
2893 SPReg, false, -MaxStack);
2894 // SPLimitOffset is in a fixed heap location (pointed by BP).
2895 addRegOffset(BuildMI(stackCheckMBB, DL, TII.get(CMPop))
2896 .addReg(ScratchReg), PReg, false, SPLimitOffset);
2897 BuildMI(stackCheckMBB, DL, TII.get(X86::JCC_1)).addMBB(&PrologueMBB).addImm(X86::COND_AE);
2898
2899 // Create new MBB for IncStack:
2900 BuildMI(incStackMBB, DL, TII.get(CALLop)).
2901 addExternalSymbol("inc_stack_0");
2902 addRegOffset(BuildMI(incStackMBB, DL, TII.get(LEAop), ScratchReg),
2903 SPReg, false, -MaxStack);
2904 addRegOffset(BuildMI(incStackMBB, DL, TII.get(CMPop))
2905 .addReg(ScratchReg), PReg, false, SPLimitOffset);
2906 BuildMI(incStackMBB, DL, TII.get(X86::JCC_1)).addMBB(incStackMBB).addImm(X86::COND_LE);
2907
2908 stackCheckMBB->addSuccessor(&PrologueMBB, {99, 100});
2909 stackCheckMBB->addSuccessor(incStackMBB, {1, 100});
2910 incStackMBB->addSuccessor(&PrologueMBB, {99, 100});
2911 incStackMBB->addSuccessor(incStackMBB, {1, 100});
2912 }
2913 #ifdef EXPENSIVE_CHECKS
2914 MF.verify();
2915 #endif
2916 }
2917
adjustStackWithPops(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,const DebugLoc & DL,int Offset) const2918 bool X86FrameLowering::adjustStackWithPops(MachineBasicBlock &MBB,
2919 MachineBasicBlock::iterator MBBI,
2920 const DebugLoc &DL,
2921 int Offset) const {
2922
2923 if (Offset <= 0)
2924 return false;
2925
2926 if (Offset % SlotSize)
2927 return false;
2928
2929 int NumPops = Offset / SlotSize;
2930 // This is only worth it if we have at most 2 pops.
2931 if (NumPops != 1 && NumPops != 2)
2932 return false;
2933
2934 // Handle only the trivial case where the adjustment directly follows
2935 // a call. This is the most common one, anyway.
2936 if (MBBI == MBB.begin())
2937 return false;
2938 MachineBasicBlock::iterator Prev = std::prev(MBBI);
2939 if (!Prev->isCall() || !Prev->getOperand(1).isRegMask())
2940 return false;
2941
2942 unsigned Regs[2];
2943 unsigned FoundRegs = 0;
2944
2945 auto &MRI = MBB.getParent()->getRegInfo();
2946 auto RegMask = Prev->getOperand(1);
2947
2948 auto &RegClass =
2949 Is64Bit ? X86::GR64_NOREX_NOSPRegClass : X86::GR32_NOREX_NOSPRegClass;
2950 // Try to find up to NumPops free registers.
2951 for (auto Candidate : RegClass) {
2952
2953 // Poor man's liveness:
2954 // Since we're immediately after a call, any register that is clobbered
2955 // by the call and not defined by it can be considered dead.
2956 if (!RegMask.clobbersPhysReg(Candidate))
2957 continue;
2958
2959 // Don't clobber reserved registers
2960 if (MRI.isReserved(Candidate))
2961 continue;
2962
2963 bool IsDef = false;
2964 for (const MachineOperand &MO : Prev->implicit_operands()) {
2965 if (MO.isReg() && MO.isDef() &&
2966 TRI->isSuperOrSubRegisterEq(MO.getReg(), Candidate)) {
2967 IsDef = true;
2968 break;
2969 }
2970 }
2971
2972 if (IsDef)
2973 continue;
2974
2975 Regs[FoundRegs++] = Candidate;
2976 if (FoundRegs == (unsigned)NumPops)
2977 break;
2978 }
2979
2980 if (FoundRegs == 0)
2981 return false;
2982
2983 // If we found only one free register, but need two, reuse the same one twice.
2984 while (FoundRegs < (unsigned)NumPops)
2985 Regs[FoundRegs++] = Regs[0];
2986
2987 for (int i = 0; i < NumPops; ++i)
2988 BuildMI(MBB, MBBI, DL,
2989 TII.get(STI.is64Bit() ? X86::POP64r : X86::POP32r), Regs[i]);
2990
2991 return true;
2992 }
2993
2994 MachineBasicBlock::iterator X86FrameLowering::
eliminateCallFramePseudoInstr(MachineFunction & MF,MachineBasicBlock & MBB,MachineBasicBlock::iterator I) const2995 eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
2996 MachineBasicBlock::iterator I) const {
2997 bool reserveCallFrame = hasReservedCallFrame(MF);
2998 unsigned Opcode = I->getOpcode();
2999 bool isDestroy = Opcode == TII.getCallFrameDestroyOpcode();
3000 DebugLoc DL = I->getDebugLoc();
3001 uint64_t Amount = TII.getFrameSize(*I);
3002 uint64_t InternalAmt = (isDestroy || Amount) ? TII.getFrameAdjustment(*I) : 0;
3003 I = MBB.erase(I);
3004 auto InsertPos = skipDebugInstructionsForward(I, MBB.end());
3005
3006 // Try to avoid emitting dead SP adjustments if the block end is unreachable,
3007 // typically because the function is marked noreturn (abort, throw,
3008 // assert_fail, etc).
3009 if (isDestroy && blockEndIsUnreachable(MBB, I))
3010 return I;
3011
3012 if (!reserveCallFrame) {
3013 // If the stack pointer can be changed after prologue, turn the
3014 // adjcallstackup instruction into a 'sub ESP, <amt>' and the
3015 // adjcallstackdown instruction into 'add ESP, <amt>'
3016
3017 // We need to keep the stack aligned properly. To do this, we round the
3018 // amount of space needed for the outgoing arguments up to the next
3019 // alignment boundary.
3020 Amount = alignTo(Amount, getStackAlign());
3021
3022 const Function &F = MF.getFunction();
3023 bool WindowsCFI = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
3024 bool DwarfCFI = !WindowsCFI && MF.needsFrameMoves();
3025
3026 // If we have any exception handlers in this function, and we adjust
3027 // the SP before calls, we may need to indicate this to the unwinder
3028 // using GNU_ARGS_SIZE. Note that this may be necessary even when
3029 // Amount == 0, because the preceding function may have set a non-0
3030 // GNU_ARGS_SIZE.
3031 // TODO: We don't need to reset this between subsequent functions,
3032 // if it didn't change.
3033 bool HasDwarfEHHandlers = !WindowsCFI && !MF.getLandingPads().empty();
3034
3035 if (HasDwarfEHHandlers && !isDestroy &&
3036 MF.getInfo<X86MachineFunctionInfo>()->getHasPushSequences())
3037 BuildCFI(MBB, InsertPos, DL,
3038 MCCFIInstruction::createGnuArgsSize(nullptr, Amount));
3039
3040 if (Amount == 0)
3041 return I;
3042
3043 // Factor out the amount that gets handled inside the sequence
3044 // (Pushes of argument for frame setup, callee pops for frame destroy)
3045 Amount -= InternalAmt;
3046
3047 // TODO: This is needed only if we require precise CFA.
3048 // If this is a callee-pop calling convention, emit a CFA adjust for
3049 // the amount the callee popped.
3050 if (isDestroy && InternalAmt && DwarfCFI && !hasFP(MF))
3051 BuildCFI(MBB, InsertPos, DL,
3052 MCCFIInstruction::createAdjustCfaOffset(nullptr, -InternalAmt));
3053
3054 // Add Amount to SP to destroy a frame, or subtract to setup.
3055 int64_t StackAdjustment = isDestroy ? Amount : -Amount;
3056
3057 if (StackAdjustment) {
3058 // Merge with any previous or following adjustment instruction. Note: the
3059 // instructions merged with here do not have CFI, so their stack
3060 // adjustments do not feed into CfaAdjustment.
3061 StackAdjustment += mergeSPUpdates(MBB, InsertPos, true);
3062 StackAdjustment += mergeSPUpdates(MBB, InsertPos, false);
3063
3064 if (StackAdjustment) {
3065 if (!(F.hasMinSize() &&
3066 adjustStackWithPops(MBB, InsertPos, DL, StackAdjustment)))
3067 BuildStackAdjustment(MBB, InsertPos, DL, StackAdjustment,
3068 /*InEpilogue=*/false);
3069 }
3070 }
3071
3072 if (DwarfCFI && !hasFP(MF)) {
3073 // If we don't have FP, but need to generate unwind information,
3074 // we need to set the correct CFA offset after the stack adjustment.
3075 // How much we adjust the CFA offset depends on whether we're emitting
3076 // CFI only for EH purposes or for debugging. EH only requires the CFA
3077 // offset to be correct at each call site, while for debugging we want
3078 // it to be more precise.
3079
3080 int64_t CfaAdjustment = -StackAdjustment;
3081 // TODO: When not using precise CFA, we also need to adjust for the
3082 // InternalAmt here.
3083 if (CfaAdjustment) {
3084 BuildCFI(MBB, InsertPos, DL,
3085 MCCFIInstruction::createAdjustCfaOffset(nullptr,
3086 CfaAdjustment));
3087 }
3088 }
3089
3090 return I;
3091 }
3092
3093 if (InternalAmt) {
3094 MachineBasicBlock::iterator CI = I;
3095 MachineBasicBlock::iterator B = MBB.begin();
3096 while (CI != B && !std::prev(CI)->isCall())
3097 --CI;
3098 BuildStackAdjustment(MBB, CI, DL, -InternalAmt, /*InEpilogue=*/false);
3099 }
3100
3101 return I;
3102 }
3103
canUseAsPrologue(const MachineBasicBlock & MBB) const3104 bool X86FrameLowering::canUseAsPrologue(const MachineBasicBlock &MBB) const {
3105 assert(MBB.getParent() && "Block is not attached to a function!");
3106 const MachineFunction &MF = *MBB.getParent();
3107 return !TRI->needsStackRealignment(MF) || !MBB.isLiveIn(X86::EFLAGS);
3108 }
3109
canUseAsEpilogue(const MachineBasicBlock & MBB) const3110 bool X86FrameLowering::canUseAsEpilogue(const MachineBasicBlock &MBB) const {
3111 assert(MBB.getParent() && "Block is not attached to a function!");
3112
3113 // Win64 has strict requirements in terms of epilogue and we are
3114 // not taking a chance at messing with them.
3115 // I.e., unless this block is already an exit block, we can't use
3116 // it as an epilogue.
3117 if (STI.isTargetWin64() && !MBB.succ_empty() && !MBB.isReturnBlock())
3118 return false;
3119
3120 if (canUseLEAForSPInEpilogue(*MBB.getParent()))
3121 return true;
3122
3123 // If we cannot use LEA to adjust SP, we may need to use ADD, which
3124 // clobbers the EFLAGS. Check that we do not need to preserve it,
3125 // otherwise, conservatively assume this is not
3126 // safe to insert the epilogue here.
3127 return !flagsNeedToBePreservedBeforeTheTerminators(MBB);
3128 }
3129
enableShrinkWrapping(const MachineFunction & MF) const3130 bool X86FrameLowering::enableShrinkWrapping(const MachineFunction &MF) const {
3131 // If we may need to emit frameless compact unwind information, give
3132 // up as this is currently broken: PR25614.
3133 return (MF.getFunction().hasFnAttribute(Attribute::NoUnwind) || hasFP(MF)) &&
3134 // The lowering of segmented stack and HiPE only support entry blocks
3135 // as prologue blocks: PR26107.
3136 // This limitation may be lifted if we fix:
3137 // - adjustForSegmentedStacks
3138 // - adjustForHiPEPrologue
3139 MF.getFunction().getCallingConv() != CallingConv::HiPE &&
3140 !MF.shouldSplitStack();
3141 }
3142
restoreWin32EHStackPointers(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,const DebugLoc & DL,bool RestoreSP) const3143 MachineBasicBlock::iterator X86FrameLowering::restoreWin32EHStackPointers(
3144 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
3145 const DebugLoc &DL, bool RestoreSP) const {
3146 assert(STI.isTargetWindowsMSVC() && "funclets only supported in MSVC env");
3147 assert(STI.isTargetWin32() && "EBP/ESI restoration only required on win32");
3148 assert(STI.is32Bit() && !Uses64BitFramePtr &&
3149 "restoring EBP/ESI on non-32-bit target");
3150
3151 MachineFunction &MF = *MBB.getParent();
3152 Register FramePtr = TRI->getFrameRegister(MF);
3153 Register BasePtr = TRI->getBaseRegister();
3154 WinEHFuncInfo &FuncInfo = *MF.getWinEHFuncInfo();
3155 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
3156 MachineFrameInfo &MFI = MF.getFrameInfo();
3157
3158 // FIXME: Don't set FrameSetup flag in catchret case.
3159
3160 int FI = FuncInfo.EHRegNodeFrameIndex;
3161 int EHRegSize = MFI.getObjectSize(FI);
3162
3163 if (RestoreSP) {
3164 // MOV32rm -EHRegSize(%ebp), %esp
3165 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32rm), X86::ESP),
3166 X86::EBP, true, -EHRegSize)
3167 .setMIFlag(MachineInstr::FrameSetup);
3168 }
3169
3170 Register UsedReg;
3171 int EHRegOffset = getFrameIndexReference(MF, FI, UsedReg);
3172 int EndOffset = -EHRegOffset - EHRegSize;
3173 FuncInfo.EHRegNodeEndOffset = EndOffset;
3174
3175 if (UsedReg == FramePtr) {
3176 // ADD $offset, %ebp
3177 unsigned ADDri = getADDriOpcode(false, EndOffset);
3178 BuildMI(MBB, MBBI, DL, TII.get(ADDri), FramePtr)
3179 .addReg(FramePtr)
3180 .addImm(EndOffset)
3181 .setMIFlag(MachineInstr::FrameSetup)
3182 ->getOperand(3)
3183 .setIsDead();
3184 assert(EndOffset >= 0 &&
3185 "end of registration object above normal EBP position!");
3186 } else if (UsedReg == BasePtr) {
3187 // LEA offset(%ebp), %esi
3188 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::LEA32r), BasePtr),
3189 FramePtr, false, EndOffset)
3190 .setMIFlag(MachineInstr::FrameSetup);
3191 // MOV32rm SavedEBPOffset(%esi), %ebp
3192 assert(X86FI->getHasSEHFramePtrSave());
3193 int Offset =
3194 getFrameIndexReference(MF, X86FI->getSEHFramePtrSaveIndex(), UsedReg);
3195 assert(UsedReg == BasePtr);
3196 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32rm), FramePtr),
3197 UsedReg, true, Offset)
3198 .setMIFlag(MachineInstr::FrameSetup);
3199 } else {
3200 llvm_unreachable("32-bit frames with WinEH must use FramePtr or BasePtr");
3201 }
3202 return MBBI;
3203 }
3204
getInitialCFAOffset(const MachineFunction & MF) const3205 int X86FrameLowering::getInitialCFAOffset(const MachineFunction &MF) const {
3206 return TRI->getSlotSize();
3207 }
3208
3209 Register
getInitialCFARegister(const MachineFunction & MF) const3210 X86FrameLowering::getInitialCFARegister(const MachineFunction &MF) const {
3211 return TRI->getDwarfRegNum(StackPtr, true);
3212 }
3213
3214 namespace {
3215 // Struct used by orderFrameObjects to help sort the stack objects.
3216 struct X86FrameSortingObject {
3217 bool IsValid = false; // true if we care about this Object.
3218 unsigned ObjectIndex = 0; // Index of Object into MFI list.
3219 unsigned ObjectSize = 0; // Size of Object in bytes.
3220 Align ObjectAlignment = Align(1); // Alignment of Object in bytes.
3221 unsigned ObjectNumUses = 0; // Object static number of uses.
3222 };
3223
3224 // The comparison function we use for std::sort to order our local
3225 // stack symbols. The current algorithm is to use an estimated
3226 // "density". This takes into consideration the size and number of
3227 // uses each object has in order to roughly minimize code size.
3228 // So, for example, an object of size 16B that is referenced 5 times
3229 // will get higher priority than 4 4B objects referenced 1 time each.
3230 // It's not perfect and we may be able to squeeze a few more bytes out of
3231 // it (for example : 0(esp) requires fewer bytes, symbols allocated at the
3232 // fringe end can have special consideration, given their size is less
3233 // important, etc.), but the algorithmic complexity grows too much to be
3234 // worth the extra gains we get. This gets us pretty close.
3235 // The final order leaves us with objects with highest priority going
3236 // at the end of our list.
3237 struct X86FrameSortingComparator {
operator ()__anona9e92e610411::X86FrameSortingComparator3238 inline bool operator()(const X86FrameSortingObject &A,
3239 const X86FrameSortingObject &B) {
3240 uint64_t DensityAScaled, DensityBScaled;
3241
3242 // For consistency in our comparison, all invalid objects are placed
3243 // at the end. This also allows us to stop walking when we hit the
3244 // first invalid item after it's all sorted.
3245 if (!A.IsValid)
3246 return false;
3247 if (!B.IsValid)
3248 return true;
3249
3250 // The density is calculated by doing :
3251 // (double)DensityA = A.ObjectNumUses / A.ObjectSize
3252 // (double)DensityB = B.ObjectNumUses / B.ObjectSize
3253 // Since this approach may cause inconsistencies in
3254 // the floating point <, >, == comparisons, depending on the floating
3255 // point model with which the compiler was built, we're going
3256 // to scale both sides by multiplying with
3257 // A.ObjectSize * B.ObjectSize. This ends up factoring away
3258 // the division and, with it, the need for any floating point
3259 // arithmetic.
3260 DensityAScaled = static_cast<uint64_t>(A.ObjectNumUses) *
3261 static_cast<uint64_t>(B.ObjectSize);
3262 DensityBScaled = static_cast<uint64_t>(B.ObjectNumUses) *
3263 static_cast<uint64_t>(A.ObjectSize);
3264
3265 // If the two densities are equal, prioritize highest alignment
3266 // objects. This allows for similar alignment objects
3267 // to be packed together (given the same density).
3268 // There's room for improvement here, also, since we can pack
3269 // similar alignment (different density) objects next to each
3270 // other to save padding. This will also require further
3271 // complexity/iterations, and the overall gain isn't worth it,
3272 // in general. Something to keep in mind, though.
3273 if (DensityAScaled == DensityBScaled)
3274 return A.ObjectAlignment < B.ObjectAlignment;
3275
3276 return DensityAScaled < DensityBScaled;
3277 }
3278 };
3279 } // namespace
3280
3281 // Order the symbols in the local stack.
3282 // We want to place the local stack objects in some sort of sensible order.
3283 // The heuristic we use is to try and pack them according to static number
3284 // of uses and size of object in order to minimize code size.
orderFrameObjects(const MachineFunction & MF,SmallVectorImpl<int> & ObjectsToAllocate) const3285 void X86FrameLowering::orderFrameObjects(
3286 const MachineFunction &MF, SmallVectorImpl<int> &ObjectsToAllocate) const {
3287 const MachineFrameInfo &MFI = MF.getFrameInfo();
3288
3289 // Don't waste time if there's nothing to do.
3290 if (ObjectsToAllocate.empty())
3291 return;
3292
3293 // Create an array of all MFI objects. We won't need all of these
3294 // objects, but we're going to create a full array of them to make
3295 // it easier to index into when we're counting "uses" down below.
3296 // We want to be able to easily/cheaply access an object by simply
3297 // indexing into it, instead of having to search for it every time.
3298 std::vector<X86FrameSortingObject> SortingObjects(MFI.getObjectIndexEnd());
3299
3300 // Walk the objects we care about and mark them as such in our working
3301 // struct.
3302 for (auto &Obj : ObjectsToAllocate) {
3303 SortingObjects[Obj].IsValid = true;
3304 SortingObjects[Obj].ObjectIndex = Obj;
3305 SortingObjects[Obj].ObjectAlignment = MFI.getObjectAlign(Obj);
3306 // Set the size.
3307 int ObjectSize = MFI.getObjectSize(Obj);
3308 if (ObjectSize == 0)
3309 // Variable size. Just use 4.
3310 SortingObjects[Obj].ObjectSize = 4;
3311 else
3312 SortingObjects[Obj].ObjectSize = ObjectSize;
3313 }
3314
3315 // Count the number of uses for each object.
3316 for (auto &MBB : MF) {
3317 for (auto &MI : MBB) {
3318 if (MI.isDebugInstr())
3319 continue;
3320 for (const MachineOperand &MO : MI.operands()) {
3321 // Check to see if it's a local stack symbol.
3322 if (!MO.isFI())
3323 continue;
3324 int Index = MO.getIndex();
3325 // Check to see if it falls within our range, and is tagged
3326 // to require ordering.
3327 if (Index >= 0 && Index < MFI.getObjectIndexEnd() &&
3328 SortingObjects[Index].IsValid)
3329 SortingObjects[Index].ObjectNumUses++;
3330 }
3331 }
3332 }
3333
3334 // Sort the objects using X86FrameSortingAlgorithm (see its comment for
3335 // info).
3336 llvm::stable_sort(SortingObjects, X86FrameSortingComparator());
3337
3338 // Now modify the original list to represent the final order that
3339 // we want. The order will depend on whether we're going to access them
3340 // from the stack pointer or the frame pointer. For SP, the list should
3341 // end up with the END containing objects that we want with smaller offsets.
3342 // For FP, it should be flipped.
3343 int i = 0;
3344 for (auto &Obj : SortingObjects) {
3345 // All invalid items are sorted at the end, so it's safe to stop.
3346 if (!Obj.IsValid)
3347 break;
3348 ObjectsToAllocate[i++] = Obj.ObjectIndex;
3349 }
3350
3351 // Flip it if we're accessing off of the FP.
3352 if (!TRI->needsStackRealignment(MF) && hasFP(MF))
3353 std::reverse(ObjectsToAllocate.begin(), ObjectsToAllocate.end());
3354 }
3355
3356
getWinEHParentFrameOffset(const MachineFunction & MF) const3357 unsigned X86FrameLowering::getWinEHParentFrameOffset(const MachineFunction &MF) const {
3358 // RDX, the parent frame pointer, is homed into 16(%rsp) in the prologue.
3359 unsigned Offset = 16;
3360 // RBP is immediately pushed.
3361 Offset += SlotSize;
3362 // All callee-saved registers are then pushed.
3363 Offset += MF.getInfo<X86MachineFunctionInfo>()->getCalleeSavedFrameSize();
3364 // Every funclet allocates enough stack space for the largest outgoing call.
3365 Offset += getWinEHFuncletFrameSize(MF);
3366 return Offset;
3367 }
3368
processFunctionBeforeFrameFinalized(MachineFunction & MF,RegScavenger * RS) const3369 void X86FrameLowering::processFunctionBeforeFrameFinalized(
3370 MachineFunction &MF, RegScavenger *RS) const {
3371 // Mark the function as not having WinCFI. We will set it back to true in
3372 // emitPrologue if it gets called and emits CFI.
3373 MF.setHasWinCFI(false);
3374
3375 // If this function isn't doing Win64-style C++ EH, we don't need to do
3376 // anything.
3377 const Function &F = MF.getFunction();
3378 if (!STI.is64Bit() || !MF.hasEHFunclets() ||
3379 classifyEHPersonality(F.getPersonalityFn()) != EHPersonality::MSVC_CXX)
3380 return;
3381
3382 // Win64 C++ EH needs to allocate the UnwindHelp object at some fixed offset
3383 // relative to RSP after the prologue. Find the offset of the last fixed
3384 // object, so that we can allocate a slot immediately following it. If there
3385 // were no fixed objects, use offset -SlotSize, which is immediately after the
3386 // return address. Fixed objects have negative frame indices.
3387 MachineFrameInfo &MFI = MF.getFrameInfo();
3388 WinEHFuncInfo &EHInfo = *MF.getWinEHFuncInfo();
3389 int64_t MinFixedObjOffset = -SlotSize;
3390 for (int I = MFI.getObjectIndexBegin(); I < 0; ++I)
3391 MinFixedObjOffset = std::min(MinFixedObjOffset, MFI.getObjectOffset(I));
3392
3393 for (WinEHTryBlockMapEntry &TBME : EHInfo.TryBlockMap) {
3394 for (WinEHHandlerType &H : TBME.HandlerArray) {
3395 int FrameIndex = H.CatchObj.FrameIndex;
3396 if (FrameIndex != INT_MAX) {
3397 // Ensure alignment.
3398 unsigned Align = MFI.getObjectAlign(FrameIndex).value();
3399 MinFixedObjOffset -= std::abs(MinFixedObjOffset) % Align;
3400 MinFixedObjOffset -= MFI.getObjectSize(FrameIndex);
3401 MFI.setObjectOffset(FrameIndex, MinFixedObjOffset);
3402 }
3403 }
3404 }
3405
3406 // Ensure alignment.
3407 MinFixedObjOffset -= std::abs(MinFixedObjOffset) % 8;
3408 int64_t UnwindHelpOffset = MinFixedObjOffset - SlotSize;
3409 int UnwindHelpFI =
3410 MFI.CreateFixedObject(SlotSize, UnwindHelpOffset, /*IsImmutable=*/false);
3411 EHInfo.UnwindHelpFrameIdx = UnwindHelpFI;
3412
3413 // Store -2 into UnwindHelp on function entry. We have to scan forwards past
3414 // other frame setup instructions.
3415 MachineBasicBlock &MBB = MF.front();
3416 auto MBBI = MBB.begin();
3417 while (MBBI != MBB.end() && MBBI->getFlag(MachineInstr::FrameSetup))
3418 ++MBBI;
3419
3420 DebugLoc DL = MBB.findDebugLoc(MBBI);
3421 addFrameReference(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64mi32)),
3422 UnwindHelpFI)
3423 .addImm(-2);
3424 }
3425
processFunctionBeforeFrameIndicesReplaced(MachineFunction & MF,RegScavenger * RS) const3426 void X86FrameLowering::processFunctionBeforeFrameIndicesReplaced(
3427 MachineFunction &MF, RegScavenger *RS) const {
3428 if (STI.is32Bit() && MF.hasEHFunclets())
3429 restoreWinEHStackPointersInParent(MF);
3430 }
3431
restoreWinEHStackPointersInParent(MachineFunction & MF) const3432 void X86FrameLowering::restoreWinEHStackPointersInParent(
3433 MachineFunction &MF) const {
3434 // 32-bit functions have to restore stack pointers when control is transferred
3435 // back to the parent function. These blocks are identified as eh pads that
3436 // are not funclet entries.
3437 bool IsSEH = isAsynchronousEHPersonality(
3438 classifyEHPersonality(MF.getFunction().getPersonalityFn()));
3439 for (MachineBasicBlock &MBB : MF) {
3440 bool NeedsRestore = MBB.isEHPad() && !MBB.isEHFuncletEntry();
3441 if (NeedsRestore)
3442 restoreWin32EHStackPointers(MBB, MBB.begin(), DebugLoc(),
3443 /*RestoreSP=*/IsSEH);
3444 }
3445 }
3446