1 //===-- X86FrameLowering.cpp - X86 Frame Information ----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the X86 implementation of TargetFrameLowering class.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "X86FrameLowering.h"
14 #include "X86InstrBuilder.h"
15 #include "X86InstrInfo.h"
16 #include "X86MachineFunctionInfo.h"
17 #include "X86Subtarget.h"
18 #include "X86TargetMachine.h"
19 #include "llvm/ADT/SmallSet.h"
20 #include "llvm/ADT/Statistic.h"
21 #include "llvm/Analysis/EHPersonalities.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/CodeGen/MachineFunction.h"
24 #include "llvm/CodeGen/MachineInstrBuilder.h"
25 #include "llvm/CodeGen/MachineModuleInfo.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/WinEHFuncInfo.h"
28 #include "llvm/IR/DataLayout.h"
29 #include "llvm/IR/Function.h"
30 #include "llvm/MC/MCAsmInfo.h"
31 #include "llvm/MC/MCObjectFileInfo.h"
32 #include "llvm/MC/MCSymbol.h"
33 #include "llvm/Support/Debug.h"
34 #include "llvm/Target/TargetOptions.h"
35 #include <cstdlib>
36
37 #define DEBUG_TYPE "x86-fl"
38
39 STATISTIC(NumFrameLoopProbe, "Number of loop stack probes used in prologue");
40 STATISTIC(NumFrameExtraProbe,
41 "Number of extra stack probes generated in prologue");
42
43 using namespace llvm;
44
X86FrameLowering(const X86Subtarget & STI,MaybeAlign StackAlignOverride)45 X86FrameLowering::X86FrameLowering(const X86Subtarget &STI,
46 MaybeAlign StackAlignOverride)
47 : TargetFrameLowering(StackGrowsDown, StackAlignOverride.valueOrOne(),
48 STI.is64Bit() ? -8 : -4),
49 STI(STI), TII(*STI.getInstrInfo()), TRI(STI.getRegisterInfo()) {
50 // Cache a bunch of frame-related predicates for this subtarget.
51 SlotSize = TRI->getSlotSize();
52 Is64Bit = STI.is64Bit();
53 IsLP64 = STI.isTarget64BitLP64();
54 // standard x86_64 and NaCl use 64-bit frame/stack pointers, x32 - 32-bit.
55 Uses64BitFramePtr = STI.isTarget64BitLP64() || STI.isTargetNaCl64();
56 StackPtr = TRI->getStackRegister();
57 }
58
hasReservedCallFrame(const MachineFunction & MF) const59 bool X86FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
60 return !MF.getFrameInfo().hasVarSizedObjects() &&
61 !MF.getInfo<X86MachineFunctionInfo>()->getHasPushSequences() &&
62 !MF.getInfo<X86MachineFunctionInfo>()->hasPreallocatedCall();
63 }
64
65 /// canSimplifyCallFramePseudos - If there is a reserved call frame, the
66 /// call frame pseudos can be simplified. Having a FP, as in the default
67 /// implementation, is not sufficient here since we can't always use it.
68 /// Use a more nuanced condition.
69 bool
canSimplifyCallFramePseudos(const MachineFunction & MF) const70 X86FrameLowering::canSimplifyCallFramePseudos(const MachineFunction &MF) const {
71 return hasReservedCallFrame(MF) ||
72 MF.getInfo<X86MachineFunctionInfo>()->hasPreallocatedCall() ||
73 (hasFP(MF) && !TRI->hasStackRealignment(MF)) ||
74 TRI->hasBasePointer(MF);
75 }
76
77 // needsFrameIndexResolution - Do we need to perform FI resolution for
78 // this function. Normally, this is required only when the function
79 // has any stack objects. However, FI resolution actually has another job,
80 // not apparent from the title - it resolves callframesetup/destroy
81 // that were not simplified earlier.
82 // So, this is required for x86 functions that have push sequences even
83 // when there are no stack objects.
84 bool
needsFrameIndexResolution(const MachineFunction & MF) const85 X86FrameLowering::needsFrameIndexResolution(const MachineFunction &MF) const {
86 return MF.getFrameInfo().hasStackObjects() ||
87 MF.getInfo<X86MachineFunctionInfo>()->getHasPushSequences();
88 }
89
90 /// hasFP - Return true if the specified function should have a dedicated frame
91 /// pointer register. This is true if the function has variable sized allocas
92 /// or if frame pointer elimination is disabled.
hasFP(const MachineFunction & MF) const93 bool X86FrameLowering::hasFP(const MachineFunction &MF) const {
94 const MachineFrameInfo &MFI = MF.getFrameInfo();
95 return (MF.getTarget().Options.DisableFramePointerElim(MF) ||
96 TRI->hasStackRealignment(MF) || MFI.hasVarSizedObjects() ||
97 MFI.isFrameAddressTaken() || MFI.hasOpaqueSPAdjustment() ||
98 MF.getInfo<X86MachineFunctionInfo>()->getForceFramePointer() ||
99 MF.getInfo<X86MachineFunctionInfo>()->hasPreallocatedCall() ||
100 MF.callsUnwindInit() || MF.hasEHFunclets() || MF.callsEHReturn() ||
101 MFI.hasStackMap() || MFI.hasPatchPoint() ||
102 MFI.hasCopyImplyingStackAdjustment());
103 }
104
getSUBriOpcode(bool IsLP64,int64_t Imm)105 static unsigned getSUBriOpcode(bool IsLP64, int64_t Imm) {
106 if (IsLP64) {
107 if (isInt<8>(Imm))
108 return X86::SUB64ri8;
109 return X86::SUB64ri32;
110 } else {
111 if (isInt<8>(Imm))
112 return X86::SUB32ri8;
113 return X86::SUB32ri;
114 }
115 }
116
getADDriOpcode(bool IsLP64,int64_t Imm)117 static unsigned getADDriOpcode(bool IsLP64, int64_t Imm) {
118 if (IsLP64) {
119 if (isInt<8>(Imm))
120 return X86::ADD64ri8;
121 return X86::ADD64ri32;
122 } else {
123 if (isInt<8>(Imm))
124 return X86::ADD32ri8;
125 return X86::ADD32ri;
126 }
127 }
128
getSUBrrOpcode(bool IsLP64)129 static unsigned getSUBrrOpcode(bool IsLP64) {
130 return IsLP64 ? X86::SUB64rr : X86::SUB32rr;
131 }
132
getADDrrOpcode(bool IsLP64)133 static unsigned getADDrrOpcode(bool IsLP64) {
134 return IsLP64 ? X86::ADD64rr : X86::ADD32rr;
135 }
136
getANDriOpcode(bool IsLP64,int64_t Imm)137 static unsigned getANDriOpcode(bool IsLP64, int64_t Imm) {
138 if (IsLP64) {
139 if (isInt<8>(Imm))
140 return X86::AND64ri8;
141 return X86::AND64ri32;
142 }
143 if (isInt<8>(Imm))
144 return X86::AND32ri8;
145 return X86::AND32ri;
146 }
147
getLEArOpcode(bool IsLP64)148 static unsigned getLEArOpcode(bool IsLP64) {
149 return IsLP64 ? X86::LEA64r : X86::LEA32r;
150 }
151
isEAXLiveIn(MachineBasicBlock & MBB)152 static bool isEAXLiveIn(MachineBasicBlock &MBB) {
153 for (MachineBasicBlock::RegisterMaskPair RegMask : MBB.liveins()) {
154 unsigned Reg = RegMask.PhysReg;
155
156 if (Reg == X86::RAX || Reg == X86::EAX || Reg == X86::AX ||
157 Reg == X86::AH || Reg == X86::AL)
158 return true;
159 }
160
161 return false;
162 }
163
164 /// Check if the flags need to be preserved before the terminators.
165 /// This would be the case, if the eflags is live-in of the region
166 /// composed by the terminators or live-out of that region, without
167 /// being defined by a terminator.
168 static bool
flagsNeedToBePreservedBeforeTheTerminators(const MachineBasicBlock & MBB)169 flagsNeedToBePreservedBeforeTheTerminators(const MachineBasicBlock &MBB) {
170 for (const MachineInstr &MI : MBB.terminators()) {
171 bool BreakNext = false;
172 for (const MachineOperand &MO : MI.operands()) {
173 if (!MO.isReg())
174 continue;
175 Register Reg = MO.getReg();
176 if (Reg != X86::EFLAGS)
177 continue;
178
179 // This terminator needs an eflags that is not defined
180 // by a previous another terminator:
181 // EFLAGS is live-in of the region composed by the terminators.
182 if (!MO.isDef())
183 return true;
184 // This terminator defines the eflags, i.e., we don't need to preserve it.
185 // However, we still need to check this specific terminator does not
186 // read a live-in value.
187 BreakNext = true;
188 }
189 // We found a definition of the eflags, no need to preserve them.
190 if (BreakNext)
191 return false;
192 }
193
194 // None of the terminators use or define the eflags.
195 // Check if they are live-out, that would imply we need to preserve them.
196 for (const MachineBasicBlock *Succ : MBB.successors())
197 if (Succ->isLiveIn(X86::EFLAGS))
198 return true;
199
200 return false;
201 }
202
203 /// emitSPUpdate - Emit a series of instructions to increment / decrement the
204 /// stack pointer by a constant value.
emitSPUpdate(MachineBasicBlock & MBB,MachineBasicBlock::iterator & MBBI,const DebugLoc & DL,int64_t NumBytes,bool InEpilogue) const205 void X86FrameLowering::emitSPUpdate(MachineBasicBlock &MBB,
206 MachineBasicBlock::iterator &MBBI,
207 const DebugLoc &DL,
208 int64_t NumBytes, bool InEpilogue) const {
209 bool isSub = NumBytes < 0;
210 uint64_t Offset = isSub ? -NumBytes : NumBytes;
211 MachineInstr::MIFlag Flag =
212 isSub ? MachineInstr::FrameSetup : MachineInstr::FrameDestroy;
213
214 uint64_t Chunk = (1LL << 31) - 1;
215
216 MachineFunction &MF = *MBB.getParent();
217 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
218 const X86TargetLowering &TLI = *STI.getTargetLowering();
219 const bool EmitInlineStackProbe = TLI.hasInlineStackProbe(MF);
220
221 // It's ok to not take into account large chunks when probing, as the
222 // allocation is split in smaller chunks anyway.
223 if (EmitInlineStackProbe && !InEpilogue) {
224
225 // This pseudo-instruction is going to be expanded, potentially using a
226 // loop, by inlineStackProbe().
227 BuildMI(MBB, MBBI, DL, TII.get(X86::STACKALLOC_W_PROBING)).addImm(Offset);
228 return;
229 } else if (Offset > Chunk) {
230 // Rather than emit a long series of instructions for large offsets,
231 // load the offset into a register and do one sub/add
232 unsigned Reg = 0;
233 unsigned Rax = (unsigned)(Is64Bit ? X86::RAX : X86::EAX);
234
235 if (isSub && !isEAXLiveIn(MBB))
236 Reg = Rax;
237 else
238 Reg = TRI->findDeadCallerSavedReg(MBB, MBBI);
239
240 unsigned MovRIOpc = Is64Bit ? X86::MOV64ri : X86::MOV32ri;
241 unsigned AddSubRROpc =
242 isSub ? getSUBrrOpcode(Is64Bit) : getADDrrOpcode(Is64Bit);
243 if (Reg) {
244 BuildMI(MBB, MBBI, DL, TII.get(MovRIOpc), Reg)
245 .addImm(Offset)
246 .setMIFlag(Flag);
247 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(AddSubRROpc), StackPtr)
248 .addReg(StackPtr)
249 .addReg(Reg);
250 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
251 return;
252 } else if (Offset > 8 * Chunk) {
253 // If we would need more than 8 add or sub instructions (a >16GB stack
254 // frame), it's worth spilling RAX to materialize this immediate.
255 // pushq %rax
256 // movabsq +-$Offset+-SlotSize, %rax
257 // addq %rsp, %rax
258 // xchg %rax, (%rsp)
259 // movq (%rsp), %rsp
260 assert(Is64Bit && "can't have 32-bit 16GB stack frame");
261 BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH64r))
262 .addReg(Rax, RegState::Kill)
263 .setMIFlag(Flag);
264 // Subtract is not commutative, so negate the offset and always use add.
265 // Subtract 8 less and add 8 more to account for the PUSH we just did.
266 if (isSub)
267 Offset = -(Offset - SlotSize);
268 else
269 Offset = Offset + SlotSize;
270 BuildMI(MBB, MBBI, DL, TII.get(MovRIOpc), Rax)
271 .addImm(Offset)
272 .setMIFlag(Flag);
273 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(X86::ADD64rr), Rax)
274 .addReg(Rax)
275 .addReg(StackPtr);
276 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
277 // Exchange the new SP in RAX with the top of the stack.
278 addRegOffset(
279 BuildMI(MBB, MBBI, DL, TII.get(X86::XCHG64rm), Rax).addReg(Rax),
280 StackPtr, false, 0);
281 // Load new SP from the top of the stack into RSP.
282 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64rm), StackPtr),
283 StackPtr, false, 0);
284 return;
285 }
286 }
287
288 while (Offset) {
289 uint64_t ThisVal = std::min(Offset, Chunk);
290 if (ThisVal == SlotSize) {
291 // Use push / pop for slot sized adjustments as a size optimization. We
292 // need to find a dead register when using pop.
293 unsigned Reg = isSub
294 ? (unsigned)(Is64Bit ? X86::RAX : X86::EAX)
295 : TRI->findDeadCallerSavedReg(MBB, MBBI);
296 if (Reg) {
297 unsigned Opc = isSub
298 ? (Is64Bit ? X86::PUSH64r : X86::PUSH32r)
299 : (Is64Bit ? X86::POP64r : X86::POP32r);
300 BuildMI(MBB, MBBI, DL, TII.get(Opc))
301 .addReg(Reg, getDefRegState(!isSub) | getUndefRegState(isSub))
302 .setMIFlag(Flag);
303 Offset -= ThisVal;
304 continue;
305 }
306 }
307
308 BuildStackAdjustment(MBB, MBBI, DL, isSub ? -ThisVal : ThisVal, InEpilogue)
309 .setMIFlag(Flag);
310
311 Offset -= ThisVal;
312 }
313 }
314
BuildStackAdjustment(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,const DebugLoc & DL,int64_t Offset,bool InEpilogue) const315 MachineInstrBuilder X86FrameLowering::BuildStackAdjustment(
316 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
317 const DebugLoc &DL, int64_t Offset, bool InEpilogue) const {
318 assert(Offset != 0 && "zero offset stack adjustment requested");
319
320 // On Atom, using LEA to adjust SP is preferred, but using it in the epilogue
321 // is tricky.
322 bool UseLEA;
323 if (!InEpilogue) {
324 // Check if inserting the prologue at the beginning
325 // of MBB would require to use LEA operations.
326 // We need to use LEA operations if EFLAGS is live in, because
327 // it means an instruction will read it before it gets defined.
328 UseLEA = STI.useLeaForSP() || MBB.isLiveIn(X86::EFLAGS);
329 } else {
330 // If we can use LEA for SP but we shouldn't, check that none
331 // of the terminators uses the eflags. Otherwise we will insert
332 // a ADD that will redefine the eflags and break the condition.
333 // Alternatively, we could move the ADD, but this may not be possible
334 // and is an optimization anyway.
335 UseLEA = canUseLEAForSPInEpilogue(*MBB.getParent());
336 if (UseLEA && !STI.useLeaForSP())
337 UseLEA = flagsNeedToBePreservedBeforeTheTerminators(MBB);
338 // If that assert breaks, that means we do not do the right thing
339 // in canUseAsEpilogue.
340 assert((UseLEA || !flagsNeedToBePreservedBeforeTheTerminators(MBB)) &&
341 "We shouldn't have allowed this insertion point");
342 }
343
344 MachineInstrBuilder MI;
345 if (UseLEA) {
346 MI = addRegOffset(BuildMI(MBB, MBBI, DL,
347 TII.get(getLEArOpcode(Uses64BitFramePtr)),
348 StackPtr),
349 StackPtr, false, Offset);
350 } else {
351 bool IsSub = Offset < 0;
352 uint64_t AbsOffset = IsSub ? -Offset : Offset;
353 const unsigned Opc = IsSub ? getSUBriOpcode(Uses64BitFramePtr, AbsOffset)
354 : getADDriOpcode(Uses64BitFramePtr, AbsOffset);
355 MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
356 .addReg(StackPtr)
357 .addImm(AbsOffset);
358 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
359 }
360 return MI;
361 }
362
mergeSPUpdates(MachineBasicBlock & MBB,MachineBasicBlock::iterator & MBBI,bool doMergeWithPrevious) const363 int X86FrameLowering::mergeSPUpdates(MachineBasicBlock &MBB,
364 MachineBasicBlock::iterator &MBBI,
365 bool doMergeWithPrevious) const {
366 if ((doMergeWithPrevious && MBBI == MBB.begin()) ||
367 (!doMergeWithPrevious && MBBI == MBB.end()))
368 return 0;
369
370 MachineBasicBlock::iterator PI = doMergeWithPrevious ? std::prev(MBBI) : MBBI;
371
372 PI = skipDebugInstructionsBackward(PI, MBB.begin());
373 // It is assumed that ADD/SUB/LEA instruction is succeded by one CFI
374 // instruction, and that there are no DBG_VALUE or other instructions between
375 // ADD/SUB/LEA and its corresponding CFI instruction.
376 /* TODO: Add support for the case where there are multiple CFI instructions
377 below the ADD/SUB/LEA, e.g.:
378 ...
379 add
380 cfi_def_cfa_offset
381 cfi_offset
382 ...
383 */
384 if (doMergeWithPrevious && PI != MBB.begin() && PI->isCFIInstruction())
385 PI = std::prev(PI);
386
387 unsigned Opc = PI->getOpcode();
388 int Offset = 0;
389
390 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
391 Opc == X86::ADD32ri || Opc == X86::ADD32ri8) &&
392 PI->getOperand(0).getReg() == StackPtr){
393 assert(PI->getOperand(1).getReg() == StackPtr);
394 Offset = PI->getOperand(2).getImm();
395 } else if ((Opc == X86::LEA32r || Opc == X86::LEA64_32r) &&
396 PI->getOperand(0).getReg() == StackPtr &&
397 PI->getOperand(1).getReg() == StackPtr &&
398 PI->getOperand(2).getImm() == 1 &&
399 PI->getOperand(3).getReg() == X86::NoRegister &&
400 PI->getOperand(5).getReg() == X86::NoRegister) {
401 // For LEAs we have: def = lea SP, FI, noreg, Offset, noreg.
402 Offset = PI->getOperand(4).getImm();
403 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
404 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
405 PI->getOperand(0).getReg() == StackPtr) {
406 assert(PI->getOperand(1).getReg() == StackPtr);
407 Offset = -PI->getOperand(2).getImm();
408 } else
409 return 0;
410
411 PI = MBB.erase(PI);
412 if (PI != MBB.end() && PI->isCFIInstruction()) {
413 auto CIs = MBB.getParent()->getFrameInstructions();
414 MCCFIInstruction CI = CIs[PI->getOperand(0).getCFIIndex()];
415 if (CI.getOperation() == MCCFIInstruction::OpDefCfaOffset ||
416 CI.getOperation() == MCCFIInstruction::OpAdjustCfaOffset)
417 PI = MBB.erase(PI);
418 }
419 if (!doMergeWithPrevious)
420 MBBI = skipDebugInstructionsForward(PI, MBB.end());
421
422 return Offset;
423 }
424
BuildCFI(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,const DebugLoc & DL,const MCCFIInstruction & CFIInst) const425 void X86FrameLowering::BuildCFI(MachineBasicBlock &MBB,
426 MachineBasicBlock::iterator MBBI,
427 const DebugLoc &DL,
428 const MCCFIInstruction &CFIInst) const {
429 MachineFunction &MF = *MBB.getParent();
430 unsigned CFIIndex = MF.addFrameInst(CFIInst);
431 BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
432 .addCFIIndex(CFIIndex);
433 }
434
435 /// Emits Dwarf Info specifying offsets of callee saved registers and
436 /// frame pointer. This is called only when basic block sections are enabled.
emitCalleeSavedFrameMoves(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI) const437 void X86FrameLowering::emitCalleeSavedFrameMoves(
438 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI) const {
439 MachineFunction &MF = *MBB.getParent();
440 if (!hasFP(MF)) {
441 emitCalleeSavedFrameMoves(MBB, MBBI, DebugLoc{}, true);
442 return;
443 }
444 const MachineModuleInfo &MMI = MF.getMMI();
445 const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
446 const Register FramePtr = TRI->getFrameRegister(MF);
447 const Register MachineFramePtr =
448 STI.isTarget64BitILP32() ? Register(getX86SubSuperRegister(FramePtr, 64))
449 : FramePtr;
450 unsigned DwarfReg = MRI->getDwarfRegNum(MachineFramePtr, true);
451 // Offset = space for return address + size of the frame pointer itself.
452 unsigned Offset = (Is64Bit ? 8 : 4) + (Uses64BitFramePtr ? 8 : 4);
453 BuildCFI(MBB, MBBI, DebugLoc{},
454 MCCFIInstruction::createOffset(nullptr, DwarfReg, -Offset));
455 emitCalleeSavedFrameMoves(MBB, MBBI, DebugLoc{}, true);
456 }
457
emitCalleeSavedFrameMoves(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,const DebugLoc & DL,bool IsPrologue) const458 void X86FrameLowering::emitCalleeSavedFrameMoves(
459 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
460 const DebugLoc &DL, bool IsPrologue) const {
461 MachineFunction &MF = *MBB.getParent();
462 MachineFrameInfo &MFI = MF.getFrameInfo();
463 MachineModuleInfo &MMI = MF.getMMI();
464 const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
465
466 // Add callee saved registers to move list.
467 const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
468 if (CSI.empty()) return;
469
470 // Calculate offsets.
471 for (std::vector<CalleeSavedInfo>::const_iterator
472 I = CSI.begin(), E = CSI.end(); I != E; ++I) {
473 int64_t Offset = MFI.getObjectOffset(I->getFrameIdx());
474 unsigned Reg = I->getReg();
475 unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true);
476
477 if (IsPrologue) {
478 BuildCFI(MBB, MBBI, DL,
479 MCCFIInstruction::createOffset(nullptr, DwarfReg, Offset));
480 } else {
481 BuildCFI(MBB, MBBI, DL,
482 MCCFIInstruction::createRestore(nullptr, DwarfReg));
483 }
484 }
485 }
486
emitStackProbe(MachineFunction & MF,MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,const DebugLoc & DL,bool InProlog) const487 void X86FrameLowering::emitStackProbe(MachineFunction &MF,
488 MachineBasicBlock &MBB,
489 MachineBasicBlock::iterator MBBI,
490 const DebugLoc &DL, bool InProlog) const {
491 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
492 if (STI.isTargetWindowsCoreCLR()) {
493 if (InProlog) {
494 BuildMI(MBB, MBBI, DL, TII.get(X86::STACKALLOC_W_PROBING))
495 .addImm(0 /* no explicit stack size */);
496 } else {
497 emitStackProbeInline(MF, MBB, MBBI, DL, false);
498 }
499 } else {
500 emitStackProbeCall(MF, MBB, MBBI, DL, InProlog);
501 }
502 }
503
inlineStackProbe(MachineFunction & MF,MachineBasicBlock & PrologMBB) const504 void X86FrameLowering::inlineStackProbe(MachineFunction &MF,
505 MachineBasicBlock &PrologMBB) const {
506 auto Where = llvm::find_if(PrologMBB, [](MachineInstr &MI) {
507 return MI.getOpcode() == X86::STACKALLOC_W_PROBING;
508 });
509 if (Where != PrologMBB.end()) {
510 DebugLoc DL = PrologMBB.findDebugLoc(Where);
511 emitStackProbeInline(MF, PrologMBB, Where, DL, true);
512 Where->eraseFromParent();
513 }
514 }
515
emitStackProbeInline(MachineFunction & MF,MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,const DebugLoc & DL,bool InProlog) const516 void X86FrameLowering::emitStackProbeInline(MachineFunction &MF,
517 MachineBasicBlock &MBB,
518 MachineBasicBlock::iterator MBBI,
519 const DebugLoc &DL,
520 bool InProlog) const {
521 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
522 if (STI.isTargetWindowsCoreCLR() && STI.is64Bit())
523 emitStackProbeInlineWindowsCoreCLR64(MF, MBB, MBBI, DL, InProlog);
524 else
525 emitStackProbeInlineGeneric(MF, MBB, MBBI, DL, InProlog);
526 }
527
emitStackProbeInlineGeneric(MachineFunction & MF,MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,const DebugLoc & DL,bool InProlog) const528 void X86FrameLowering::emitStackProbeInlineGeneric(
529 MachineFunction &MF, MachineBasicBlock &MBB,
530 MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool InProlog) const {
531 MachineInstr &AllocWithProbe = *MBBI;
532 uint64_t Offset = AllocWithProbe.getOperand(0).getImm();
533
534 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
535 const X86TargetLowering &TLI = *STI.getTargetLowering();
536 assert(!(STI.is64Bit() && STI.isTargetWindowsCoreCLR()) &&
537 "different expansion expected for CoreCLR 64 bit");
538
539 const uint64_t StackProbeSize = TLI.getStackProbeSize(MF);
540 uint64_t ProbeChunk = StackProbeSize * 8;
541
542 uint64_t MaxAlign =
543 TRI->hasStackRealignment(MF) ? calculateMaxStackAlign(MF) : 0;
544
545 // Synthesize a loop or unroll it, depending on the number of iterations.
546 // BuildStackAlignAND ensures that only MaxAlign % StackProbeSize bits left
547 // between the unaligned rsp and current rsp.
548 if (Offset > ProbeChunk) {
549 emitStackProbeInlineGenericLoop(MF, MBB, MBBI, DL, Offset,
550 MaxAlign % StackProbeSize);
551 } else {
552 emitStackProbeInlineGenericBlock(MF, MBB, MBBI, DL, Offset,
553 MaxAlign % StackProbeSize);
554 }
555 }
556
emitStackProbeInlineGenericBlock(MachineFunction & MF,MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,const DebugLoc & DL,uint64_t Offset,uint64_t AlignOffset) const557 void X86FrameLowering::emitStackProbeInlineGenericBlock(
558 MachineFunction &MF, MachineBasicBlock &MBB,
559 MachineBasicBlock::iterator MBBI, const DebugLoc &DL, uint64_t Offset,
560 uint64_t AlignOffset) const {
561
562 const bool NeedsDwarfCFI = needsDwarfCFI(MF);
563 const bool HasFP = hasFP(MF);
564 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
565 const X86TargetLowering &TLI = *STI.getTargetLowering();
566 const unsigned Opc = getSUBriOpcode(Uses64BitFramePtr, Offset);
567 const unsigned MovMIOpc = Is64Bit ? X86::MOV64mi32 : X86::MOV32mi;
568 const uint64_t StackProbeSize = TLI.getStackProbeSize(MF);
569
570 uint64_t CurrentOffset = 0;
571
572 assert(AlignOffset < StackProbeSize);
573
574 // If the offset is so small it fits within a page, there's nothing to do.
575 if (StackProbeSize < Offset + AlignOffset) {
576
577 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
578 .addReg(StackPtr)
579 .addImm(StackProbeSize - AlignOffset)
580 .setMIFlag(MachineInstr::FrameSetup);
581 if (!HasFP && NeedsDwarfCFI) {
582 BuildCFI(MBB, MBBI, DL,
583 MCCFIInstruction::createAdjustCfaOffset(
584 nullptr, StackProbeSize - AlignOffset));
585 }
586 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
587
588 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(MovMIOpc))
589 .setMIFlag(MachineInstr::FrameSetup),
590 StackPtr, false, 0)
591 .addImm(0)
592 .setMIFlag(MachineInstr::FrameSetup);
593 NumFrameExtraProbe++;
594 CurrentOffset = StackProbeSize - AlignOffset;
595 }
596
597 // For the next N - 1 pages, just probe. I tried to take advantage of
598 // natural probes but it implies much more logic and there was very few
599 // interesting natural probes to interleave.
600 while (CurrentOffset + StackProbeSize < Offset) {
601 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
602 .addReg(StackPtr)
603 .addImm(StackProbeSize)
604 .setMIFlag(MachineInstr::FrameSetup);
605 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
606
607 if (!HasFP && NeedsDwarfCFI) {
608 BuildCFI(
609 MBB, MBBI, DL,
610 MCCFIInstruction::createAdjustCfaOffset(nullptr, StackProbeSize));
611 }
612 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(MovMIOpc))
613 .setMIFlag(MachineInstr::FrameSetup),
614 StackPtr, false, 0)
615 .addImm(0)
616 .setMIFlag(MachineInstr::FrameSetup);
617 NumFrameExtraProbe++;
618 CurrentOffset += StackProbeSize;
619 }
620
621 // No need to probe the tail, it is smaller than a Page.
622 uint64_t ChunkSize = Offset - CurrentOffset;
623 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
624 .addReg(StackPtr)
625 .addImm(ChunkSize)
626 .setMIFlag(MachineInstr::FrameSetup);
627 // No need to adjust Dwarf CFA offset here, the last position of the stack has
628 // been defined
629 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
630 }
631
emitStackProbeInlineGenericLoop(MachineFunction & MF,MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,const DebugLoc & DL,uint64_t Offset,uint64_t AlignOffset) const632 void X86FrameLowering::emitStackProbeInlineGenericLoop(
633 MachineFunction &MF, MachineBasicBlock &MBB,
634 MachineBasicBlock::iterator MBBI, const DebugLoc &DL, uint64_t Offset,
635 uint64_t AlignOffset) const {
636 assert(Offset && "null offset");
637
638 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
639 const X86TargetLowering &TLI = *STI.getTargetLowering();
640 const unsigned MovMIOpc = Is64Bit ? X86::MOV64mi32 : X86::MOV32mi;
641 const uint64_t StackProbeSize = TLI.getStackProbeSize(MF);
642
643 if (AlignOffset) {
644 if (AlignOffset < StackProbeSize) {
645 // Perform a first smaller allocation followed by a probe.
646 const unsigned SUBOpc = getSUBriOpcode(Uses64BitFramePtr, AlignOffset);
647 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(SUBOpc), StackPtr)
648 .addReg(StackPtr)
649 .addImm(AlignOffset)
650 .setMIFlag(MachineInstr::FrameSetup);
651 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
652
653 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(MovMIOpc))
654 .setMIFlag(MachineInstr::FrameSetup),
655 StackPtr, false, 0)
656 .addImm(0)
657 .setMIFlag(MachineInstr::FrameSetup);
658 NumFrameExtraProbe++;
659 Offset -= AlignOffset;
660 }
661 }
662
663 // Synthesize a loop
664 NumFrameLoopProbe++;
665 const BasicBlock *LLVM_BB = MBB.getBasicBlock();
666
667 MachineBasicBlock *testMBB = MF.CreateMachineBasicBlock(LLVM_BB);
668 MachineBasicBlock *tailMBB = MF.CreateMachineBasicBlock(LLVM_BB);
669
670 MachineFunction::iterator MBBIter = ++MBB.getIterator();
671 MF.insert(MBBIter, testMBB);
672 MF.insert(MBBIter, tailMBB);
673
674 Register FinalStackProbed = Uses64BitFramePtr ? X86::R11
675 : Is64Bit ? X86::R11D
676 : X86::EAX;
677 BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::COPY), FinalStackProbed)
678 .addReg(StackPtr)
679 .setMIFlag(MachineInstr::FrameSetup);
680
681 // save loop bound
682 {
683 const unsigned SUBOpc = getSUBriOpcode(Uses64BitFramePtr, Offset);
684 BuildMI(MBB, MBBI, DL, TII.get(SUBOpc), FinalStackProbed)
685 .addReg(FinalStackProbed)
686 .addImm(Offset / StackProbeSize * StackProbeSize)
687 .setMIFlag(MachineInstr::FrameSetup);
688 }
689
690 // allocate a page
691 {
692 const unsigned SUBOpc = getSUBriOpcode(Uses64BitFramePtr, StackProbeSize);
693 BuildMI(testMBB, DL, TII.get(SUBOpc), StackPtr)
694 .addReg(StackPtr)
695 .addImm(StackProbeSize)
696 .setMIFlag(MachineInstr::FrameSetup);
697 }
698
699 // touch the page
700 addRegOffset(BuildMI(testMBB, DL, TII.get(MovMIOpc))
701 .setMIFlag(MachineInstr::FrameSetup),
702 StackPtr, false, 0)
703 .addImm(0)
704 .setMIFlag(MachineInstr::FrameSetup);
705
706 // cmp with stack pointer bound
707 BuildMI(testMBB, DL, TII.get(Uses64BitFramePtr ? X86::CMP64rr : X86::CMP32rr))
708 .addReg(StackPtr)
709 .addReg(FinalStackProbed)
710 .setMIFlag(MachineInstr::FrameSetup);
711
712 // jump
713 BuildMI(testMBB, DL, TII.get(X86::JCC_1))
714 .addMBB(testMBB)
715 .addImm(X86::COND_NE)
716 .setMIFlag(MachineInstr::FrameSetup);
717 testMBB->addSuccessor(testMBB);
718 testMBB->addSuccessor(tailMBB);
719
720 // BB management
721 tailMBB->splice(tailMBB->end(), &MBB, MBBI, MBB.end());
722 tailMBB->transferSuccessorsAndUpdatePHIs(&MBB);
723 MBB.addSuccessor(testMBB);
724
725 // handle tail
726 unsigned TailOffset = Offset % StackProbeSize;
727 if (TailOffset) {
728 const unsigned Opc = getSUBriOpcode(Uses64BitFramePtr, TailOffset);
729 BuildMI(*tailMBB, tailMBB->begin(), DL, TII.get(Opc), StackPtr)
730 .addReg(StackPtr)
731 .addImm(TailOffset)
732 .setMIFlag(MachineInstr::FrameSetup);
733 }
734
735 // Update Live In information
736 recomputeLiveIns(*testMBB);
737 recomputeLiveIns(*tailMBB);
738 }
739
emitStackProbeInlineWindowsCoreCLR64(MachineFunction & MF,MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,const DebugLoc & DL,bool InProlog) const740 void X86FrameLowering::emitStackProbeInlineWindowsCoreCLR64(
741 MachineFunction &MF, MachineBasicBlock &MBB,
742 MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool InProlog) const {
743 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
744 assert(STI.is64Bit() && "different expansion needed for 32 bit");
745 assert(STI.isTargetWindowsCoreCLR() && "custom expansion expects CoreCLR");
746 const TargetInstrInfo &TII = *STI.getInstrInfo();
747 const BasicBlock *LLVM_BB = MBB.getBasicBlock();
748
749 // RAX contains the number of bytes of desired stack adjustment.
750 // The handling here assumes this value has already been updated so as to
751 // maintain stack alignment.
752 //
753 // We need to exit with RSP modified by this amount and execute suitable
754 // page touches to notify the OS that we're growing the stack responsibly.
755 // All stack probing must be done without modifying RSP.
756 //
757 // MBB:
758 // SizeReg = RAX;
759 // ZeroReg = 0
760 // CopyReg = RSP
761 // Flags, TestReg = CopyReg - SizeReg
762 // FinalReg = !Flags.Ovf ? TestReg : ZeroReg
763 // LimitReg = gs magic thread env access
764 // if FinalReg >= LimitReg goto ContinueMBB
765 // RoundBB:
766 // RoundReg = page address of FinalReg
767 // LoopMBB:
768 // LoopReg = PHI(LimitReg,ProbeReg)
769 // ProbeReg = LoopReg - PageSize
770 // [ProbeReg] = 0
771 // if (ProbeReg > RoundReg) goto LoopMBB
772 // ContinueMBB:
773 // RSP = RSP - RAX
774 // [rest of original MBB]
775
776 // Set up the new basic blocks
777 MachineBasicBlock *RoundMBB = MF.CreateMachineBasicBlock(LLVM_BB);
778 MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB);
779 MachineBasicBlock *ContinueMBB = MF.CreateMachineBasicBlock(LLVM_BB);
780
781 MachineFunction::iterator MBBIter = std::next(MBB.getIterator());
782 MF.insert(MBBIter, RoundMBB);
783 MF.insert(MBBIter, LoopMBB);
784 MF.insert(MBBIter, ContinueMBB);
785
786 // Split MBB and move the tail portion down to ContinueMBB.
787 MachineBasicBlock::iterator BeforeMBBI = std::prev(MBBI);
788 ContinueMBB->splice(ContinueMBB->begin(), &MBB, MBBI, MBB.end());
789 ContinueMBB->transferSuccessorsAndUpdatePHIs(&MBB);
790
791 // Some useful constants
792 const int64_t ThreadEnvironmentStackLimit = 0x10;
793 const int64_t PageSize = 0x1000;
794 const int64_t PageMask = ~(PageSize - 1);
795
796 // Registers we need. For the normal case we use virtual
797 // registers. For the prolog expansion we use RAX, RCX and RDX.
798 MachineRegisterInfo &MRI = MF.getRegInfo();
799 const TargetRegisterClass *RegClass = &X86::GR64RegClass;
800 const Register SizeReg = InProlog ? X86::RAX
801 : MRI.createVirtualRegister(RegClass),
802 ZeroReg = InProlog ? X86::RCX
803 : MRI.createVirtualRegister(RegClass),
804 CopyReg = InProlog ? X86::RDX
805 : MRI.createVirtualRegister(RegClass),
806 TestReg = InProlog ? X86::RDX
807 : MRI.createVirtualRegister(RegClass),
808 FinalReg = InProlog ? X86::RDX
809 : MRI.createVirtualRegister(RegClass),
810 RoundedReg = InProlog ? X86::RDX
811 : MRI.createVirtualRegister(RegClass),
812 LimitReg = InProlog ? X86::RCX
813 : MRI.createVirtualRegister(RegClass),
814 JoinReg = InProlog ? X86::RCX
815 : MRI.createVirtualRegister(RegClass),
816 ProbeReg = InProlog ? X86::RCX
817 : MRI.createVirtualRegister(RegClass);
818
819 // SP-relative offsets where we can save RCX and RDX.
820 int64_t RCXShadowSlot = 0;
821 int64_t RDXShadowSlot = 0;
822
823 // If inlining in the prolog, save RCX and RDX.
824 if (InProlog) {
825 // Compute the offsets. We need to account for things already
826 // pushed onto the stack at this point: return address, frame
827 // pointer (if used), and callee saves.
828 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
829 const int64_t CalleeSaveSize = X86FI->getCalleeSavedFrameSize();
830 const bool HasFP = hasFP(MF);
831
832 // Check if we need to spill RCX and/or RDX.
833 // Here we assume that no earlier prologue instruction changes RCX and/or
834 // RDX, so checking the block live-ins is enough.
835 const bool IsRCXLiveIn = MBB.isLiveIn(X86::RCX);
836 const bool IsRDXLiveIn = MBB.isLiveIn(X86::RDX);
837 int64_t InitSlot = 8 + CalleeSaveSize + (HasFP ? 8 : 0);
838 // Assign the initial slot to both registers, then change RDX's slot if both
839 // need to be spilled.
840 if (IsRCXLiveIn)
841 RCXShadowSlot = InitSlot;
842 if (IsRDXLiveIn)
843 RDXShadowSlot = InitSlot;
844 if (IsRDXLiveIn && IsRCXLiveIn)
845 RDXShadowSlot += 8;
846 // Emit the saves if needed.
847 if (IsRCXLiveIn)
848 addRegOffset(BuildMI(&MBB, DL, TII.get(X86::MOV64mr)), X86::RSP, false,
849 RCXShadowSlot)
850 .addReg(X86::RCX);
851 if (IsRDXLiveIn)
852 addRegOffset(BuildMI(&MBB, DL, TII.get(X86::MOV64mr)), X86::RSP, false,
853 RDXShadowSlot)
854 .addReg(X86::RDX);
855 } else {
856 // Not in the prolog. Copy RAX to a virtual reg.
857 BuildMI(&MBB, DL, TII.get(X86::MOV64rr), SizeReg).addReg(X86::RAX);
858 }
859
860 // Add code to MBB to check for overflow and set the new target stack pointer
861 // to zero if so.
862 BuildMI(&MBB, DL, TII.get(X86::XOR64rr), ZeroReg)
863 .addReg(ZeroReg, RegState::Undef)
864 .addReg(ZeroReg, RegState::Undef);
865 BuildMI(&MBB, DL, TII.get(X86::MOV64rr), CopyReg).addReg(X86::RSP);
866 BuildMI(&MBB, DL, TII.get(X86::SUB64rr), TestReg)
867 .addReg(CopyReg)
868 .addReg(SizeReg);
869 BuildMI(&MBB, DL, TII.get(X86::CMOV64rr), FinalReg)
870 .addReg(TestReg)
871 .addReg(ZeroReg)
872 .addImm(X86::COND_B);
873
874 // FinalReg now holds final stack pointer value, or zero if
875 // allocation would overflow. Compare against the current stack
876 // limit from the thread environment block. Note this limit is the
877 // lowest touched page on the stack, not the point at which the OS
878 // will cause an overflow exception, so this is just an optimization
879 // to avoid unnecessarily touching pages that are below the current
880 // SP but already committed to the stack by the OS.
881 BuildMI(&MBB, DL, TII.get(X86::MOV64rm), LimitReg)
882 .addReg(0)
883 .addImm(1)
884 .addReg(0)
885 .addImm(ThreadEnvironmentStackLimit)
886 .addReg(X86::GS);
887 BuildMI(&MBB, DL, TII.get(X86::CMP64rr)).addReg(FinalReg).addReg(LimitReg);
888 // Jump if the desired stack pointer is at or above the stack limit.
889 BuildMI(&MBB, DL, TII.get(X86::JCC_1)).addMBB(ContinueMBB).addImm(X86::COND_AE);
890
891 // Add code to roundMBB to round the final stack pointer to a page boundary.
892 RoundMBB->addLiveIn(FinalReg);
893 BuildMI(RoundMBB, DL, TII.get(X86::AND64ri32), RoundedReg)
894 .addReg(FinalReg)
895 .addImm(PageMask);
896 BuildMI(RoundMBB, DL, TII.get(X86::JMP_1)).addMBB(LoopMBB);
897
898 // LimitReg now holds the current stack limit, RoundedReg page-rounded
899 // final RSP value. Add code to loopMBB to decrement LimitReg page-by-page
900 // and probe until we reach RoundedReg.
901 if (!InProlog) {
902 BuildMI(LoopMBB, DL, TII.get(X86::PHI), JoinReg)
903 .addReg(LimitReg)
904 .addMBB(RoundMBB)
905 .addReg(ProbeReg)
906 .addMBB(LoopMBB);
907 }
908
909 LoopMBB->addLiveIn(JoinReg);
910 addRegOffset(BuildMI(LoopMBB, DL, TII.get(X86::LEA64r), ProbeReg), JoinReg,
911 false, -PageSize);
912
913 // Probe by storing a byte onto the stack.
914 BuildMI(LoopMBB, DL, TII.get(X86::MOV8mi))
915 .addReg(ProbeReg)
916 .addImm(1)
917 .addReg(0)
918 .addImm(0)
919 .addReg(0)
920 .addImm(0);
921
922 LoopMBB->addLiveIn(RoundedReg);
923 BuildMI(LoopMBB, DL, TII.get(X86::CMP64rr))
924 .addReg(RoundedReg)
925 .addReg(ProbeReg);
926 BuildMI(LoopMBB, DL, TII.get(X86::JCC_1)).addMBB(LoopMBB).addImm(X86::COND_NE);
927
928 MachineBasicBlock::iterator ContinueMBBI = ContinueMBB->getFirstNonPHI();
929
930 // If in prolog, restore RDX and RCX.
931 if (InProlog) {
932 if (RCXShadowSlot) // It means we spilled RCX in the prologue.
933 addRegOffset(BuildMI(*ContinueMBB, ContinueMBBI, DL,
934 TII.get(X86::MOV64rm), X86::RCX),
935 X86::RSP, false, RCXShadowSlot);
936 if (RDXShadowSlot) // It means we spilled RDX in the prologue.
937 addRegOffset(BuildMI(*ContinueMBB, ContinueMBBI, DL,
938 TII.get(X86::MOV64rm), X86::RDX),
939 X86::RSP, false, RDXShadowSlot);
940 }
941
942 // Now that the probing is done, add code to continueMBB to update
943 // the stack pointer for real.
944 ContinueMBB->addLiveIn(SizeReg);
945 BuildMI(*ContinueMBB, ContinueMBBI, DL, TII.get(X86::SUB64rr), X86::RSP)
946 .addReg(X86::RSP)
947 .addReg(SizeReg);
948
949 // Add the control flow edges we need.
950 MBB.addSuccessor(ContinueMBB);
951 MBB.addSuccessor(RoundMBB);
952 RoundMBB->addSuccessor(LoopMBB);
953 LoopMBB->addSuccessor(ContinueMBB);
954 LoopMBB->addSuccessor(LoopMBB);
955
956 // Mark all the instructions added to the prolog as frame setup.
957 if (InProlog) {
958 for (++BeforeMBBI; BeforeMBBI != MBB.end(); ++BeforeMBBI) {
959 BeforeMBBI->setFlag(MachineInstr::FrameSetup);
960 }
961 for (MachineInstr &MI : *RoundMBB) {
962 MI.setFlag(MachineInstr::FrameSetup);
963 }
964 for (MachineInstr &MI : *LoopMBB) {
965 MI.setFlag(MachineInstr::FrameSetup);
966 }
967 for (MachineBasicBlock::iterator CMBBI = ContinueMBB->begin();
968 CMBBI != ContinueMBBI; ++CMBBI) {
969 CMBBI->setFlag(MachineInstr::FrameSetup);
970 }
971 }
972 }
973
emitStackProbeCall(MachineFunction & MF,MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,const DebugLoc & DL,bool InProlog) const974 void X86FrameLowering::emitStackProbeCall(MachineFunction &MF,
975 MachineBasicBlock &MBB,
976 MachineBasicBlock::iterator MBBI,
977 const DebugLoc &DL,
978 bool InProlog) const {
979 bool IsLargeCodeModel = MF.getTarget().getCodeModel() == CodeModel::Large;
980
981 // FIXME: Add indirect thunk support and remove this.
982 if (Is64Bit && IsLargeCodeModel && STI.useIndirectThunkCalls())
983 report_fatal_error("Emitting stack probe calls on 64-bit with the large "
984 "code model and indirect thunks not yet implemented.");
985
986 unsigned CallOp;
987 if (Is64Bit)
988 CallOp = IsLargeCodeModel ? X86::CALL64r : X86::CALL64pcrel32;
989 else
990 CallOp = X86::CALLpcrel32;
991
992 StringRef Symbol = STI.getTargetLowering()->getStackProbeSymbolName(MF);
993
994 MachineInstrBuilder CI;
995 MachineBasicBlock::iterator ExpansionMBBI = std::prev(MBBI);
996
997 // All current stack probes take AX and SP as input, clobber flags, and
998 // preserve all registers. x86_64 probes leave RSP unmodified.
999 if (Is64Bit && MF.getTarget().getCodeModel() == CodeModel::Large) {
1000 // For the large code model, we have to call through a register. Use R11,
1001 // as it is scratch in all supported calling conventions.
1002 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri), X86::R11)
1003 .addExternalSymbol(MF.createExternalSymbolName(Symbol));
1004 CI = BuildMI(MBB, MBBI, DL, TII.get(CallOp)).addReg(X86::R11);
1005 } else {
1006 CI = BuildMI(MBB, MBBI, DL, TII.get(CallOp))
1007 .addExternalSymbol(MF.createExternalSymbolName(Symbol));
1008 }
1009
1010 unsigned AX = Uses64BitFramePtr ? X86::RAX : X86::EAX;
1011 unsigned SP = Uses64BitFramePtr ? X86::RSP : X86::ESP;
1012 CI.addReg(AX, RegState::Implicit)
1013 .addReg(SP, RegState::Implicit)
1014 .addReg(AX, RegState::Define | RegState::Implicit)
1015 .addReg(SP, RegState::Define | RegState::Implicit)
1016 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit);
1017
1018 if (STI.isTargetWin64() || !STI.isOSWindows()) {
1019 // MSVC x32's _chkstk and cygwin/mingw's _alloca adjust %esp themselves.
1020 // MSVC x64's __chkstk and cygwin/mingw's ___chkstk_ms do not adjust %rsp
1021 // themselves. They also does not clobber %rax so we can reuse it when
1022 // adjusting %rsp.
1023 // All other platforms do not specify a particular ABI for the stack probe
1024 // function, so we arbitrarily define it to not adjust %esp/%rsp itself.
1025 BuildMI(MBB, MBBI, DL, TII.get(getSUBrrOpcode(Uses64BitFramePtr)), SP)
1026 .addReg(SP)
1027 .addReg(AX);
1028 }
1029
1030 if (InProlog) {
1031 // Apply the frame setup flag to all inserted instrs.
1032 for (++ExpansionMBBI; ExpansionMBBI != MBBI; ++ExpansionMBBI)
1033 ExpansionMBBI->setFlag(MachineInstr::FrameSetup);
1034 }
1035 }
1036
calculateSetFPREG(uint64_t SPAdjust)1037 static unsigned calculateSetFPREG(uint64_t SPAdjust) {
1038 // Win64 ABI has a less restrictive limitation of 240; 128 works equally well
1039 // and might require smaller successive adjustments.
1040 const uint64_t Win64MaxSEHOffset = 128;
1041 uint64_t SEHFrameOffset = std::min(SPAdjust, Win64MaxSEHOffset);
1042 // Win64 ABI requires 16-byte alignment for the UWOP_SET_FPREG opcode.
1043 return SEHFrameOffset & -16;
1044 }
1045
1046 // If we're forcing a stack realignment we can't rely on just the frame
1047 // info, we need to know the ABI stack alignment as well in case we
1048 // have a call out. Otherwise just make sure we have some alignment - we'll
1049 // go with the minimum SlotSize.
calculateMaxStackAlign(const MachineFunction & MF) const1050 uint64_t X86FrameLowering::calculateMaxStackAlign(const MachineFunction &MF) const {
1051 const MachineFrameInfo &MFI = MF.getFrameInfo();
1052 Align MaxAlign = MFI.getMaxAlign(); // Desired stack alignment.
1053 Align StackAlign = getStackAlign();
1054 if (MF.getFunction().hasFnAttribute("stackrealign")) {
1055 if (MFI.hasCalls())
1056 MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign;
1057 else if (MaxAlign < SlotSize)
1058 MaxAlign = Align(SlotSize);
1059 }
1060 return MaxAlign.value();
1061 }
1062
BuildStackAlignAND(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,const DebugLoc & DL,unsigned Reg,uint64_t MaxAlign) const1063 void X86FrameLowering::BuildStackAlignAND(MachineBasicBlock &MBB,
1064 MachineBasicBlock::iterator MBBI,
1065 const DebugLoc &DL, unsigned Reg,
1066 uint64_t MaxAlign) const {
1067 uint64_t Val = -MaxAlign;
1068 unsigned AndOp = getANDriOpcode(Uses64BitFramePtr, Val);
1069
1070 MachineFunction &MF = *MBB.getParent();
1071 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
1072 const X86TargetLowering &TLI = *STI.getTargetLowering();
1073 const uint64_t StackProbeSize = TLI.getStackProbeSize(MF);
1074 const bool EmitInlineStackProbe = TLI.hasInlineStackProbe(MF);
1075
1076 // We want to make sure that (in worst case) less than StackProbeSize bytes
1077 // are not probed after the AND. This assumption is used in
1078 // emitStackProbeInlineGeneric.
1079 if (Reg == StackPtr && EmitInlineStackProbe && MaxAlign >= StackProbeSize) {
1080 {
1081 NumFrameLoopProbe++;
1082 MachineBasicBlock *entryMBB =
1083 MF.CreateMachineBasicBlock(MBB.getBasicBlock());
1084 MachineBasicBlock *headMBB =
1085 MF.CreateMachineBasicBlock(MBB.getBasicBlock());
1086 MachineBasicBlock *bodyMBB =
1087 MF.CreateMachineBasicBlock(MBB.getBasicBlock());
1088 MachineBasicBlock *footMBB =
1089 MF.CreateMachineBasicBlock(MBB.getBasicBlock());
1090
1091 MachineFunction::iterator MBBIter = MBB.getIterator();
1092 MF.insert(MBBIter, entryMBB);
1093 MF.insert(MBBIter, headMBB);
1094 MF.insert(MBBIter, bodyMBB);
1095 MF.insert(MBBIter, footMBB);
1096 const unsigned MovMIOpc = Is64Bit ? X86::MOV64mi32 : X86::MOV32mi;
1097 Register FinalStackProbed = Uses64BitFramePtr ? X86::R11
1098 : Is64Bit ? X86::R11D
1099 : X86::EAX;
1100
1101 // Setup entry block
1102 {
1103
1104 entryMBB->splice(entryMBB->end(), &MBB, MBB.begin(), MBBI);
1105 BuildMI(entryMBB, DL, TII.get(TargetOpcode::COPY), FinalStackProbed)
1106 .addReg(StackPtr)
1107 .setMIFlag(MachineInstr::FrameSetup);
1108 MachineInstr *MI =
1109 BuildMI(entryMBB, DL, TII.get(AndOp), FinalStackProbed)
1110 .addReg(FinalStackProbed)
1111 .addImm(Val)
1112 .setMIFlag(MachineInstr::FrameSetup);
1113
1114 // The EFLAGS implicit def is dead.
1115 MI->getOperand(3).setIsDead();
1116
1117 BuildMI(entryMBB, DL,
1118 TII.get(Uses64BitFramePtr ? X86::CMP64rr : X86::CMP32rr))
1119 .addReg(FinalStackProbed)
1120 .addReg(StackPtr)
1121 .setMIFlag(MachineInstr::FrameSetup);
1122 BuildMI(entryMBB, DL, TII.get(X86::JCC_1))
1123 .addMBB(&MBB)
1124 .addImm(X86::COND_E)
1125 .setMIFlag(MachineInstr::FrameSetup);
1126 entryMBB->addSuccessor(headMBB);
1127 entryMBB->addSuccessor(&MBB);
1128 }
1129
1130 // Loop entry block
1131
1132 {
1133 const unsigned SUBOpc =
1134 getSUBriOpcode(Uses64BitFramePtr, StackProbeSize);
1135 BuildMI(headMBB, DL, TII.get(SUBOpc), StackPtr)
1136 .addReg(StackPtr)
1137 .addImm(StackProbeSize)
1138 .setMIFlag(MachineInstr::FrameSetup);
1139
1140 BuildMI(headMBB, DL,
1141 TII.get(Uses64BitFramePtr ? X86::CMP64rr : X86::CMP32rr))
1142 .addReg(FinalStackProbed)
1143 .addReg(StackPtr)
1144 .setMIFlag(MachineInstr::FrameSetup);
1145
1146 // jump
1147 BuildMI(headMBB, DL, TII.get(X86::JCC_1))
1148 .addMBB(footMBB)
1149 .addImm(X86::COND_B)
1150 .setMIFlag(MachineInstr::FrameSetup);
1151
1152 headMBB->addSuccessor(bodyMBB);
1153 headMBB->addSuccessor(footMBB);
1154 }
1155
1156 // setup loop body
1157 {
1158 addRegOffset(BuildMI(bodyMBB, DL, TII.get(MovMIOpc))
1159 .setMIFlag(MachineInstr::FrameSetup),
1160 StackPtr, false, 0)
1161 .addImm(0)
1162 .setMIFlag(MachineInstr::FrameSetup);
1163
1164 const unsigned SUBOpc =
1165 getSUBriOpcode(Uses64BitFramePtr, StackProbeSize);
1166 BuildMI(bodyMBB, DL, TII.get(SUBOpc), StackPtr)
1167 .addReg(StackPtr)
1168 .addImm(StackProbeSize)
1169 .setMIFlag(MachineInstr::FrameSetup);
1170
1171 // cmp with stack pointer bound
1172 BuildMI(bodyMBB, DL,
1173 TII.get(Uses64BitFramePtr ? X86::CMP64rr : X86::CMP32rr))
1174 .addReg(FinalStackProbed)
1175 .addReg(StackPtr)
1176 .setMIFlag(MachineInstr::FrameSetup);
1177
1178 // jump
1179 BuildMI(bodyMBB, DL, TII.get(X86::JCC_1))
1180 .addMBB(bodyMBB)
1181 .addImm(X86::COND_B)
1182 .setMIFlag(MachineInstr::FrameSetup);
1183 bodyMBB->addSuccessor(bodyMBB);
1184 bodyMBB->addSuccessor(footMBB);
1185 }
1186
1187 // setup loop footer
1188 {
1189 BuildMI(footMBB, DL, TII.get(TargetOpcode::COPY), StackPtr)
1190 .addReg(FinalStackProbed)
1191 .setMIFlag(MachineInstr::FrameSetup);
1192 addRegOffset(BuildMI(footMBB, DL, TII.get(MovMIOpc))
1193 .setMIFlag(MachineInstr::FrameSetup),
1194 StackPtr, false, 0)
1195 .addImm(0)
1196 .setMIFlag(MachineInstr::FrameSetup);
1197 footMBB->addSuccessor(&MBB);
1198 }
1199
1200 recomputeLiveIns(*headMBB);
1201 recomputeLiveIns(*bodyMBB);
1202 recomputeLiveIns(*footMBB);
1203 recomputeLiveIns(MBB);
1204 }
1205 } else {
1206 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(AndOp), Reg)
1207 .addReg(Reg)
1208 .addImm(Val)
1209 .setMIFlag(MachineInstr::FrameSetup);
1210
1211 // The EFLAGS implicit def is dead.
1212 MI->getOperand(3).setIsDead();
1213 }
1214 }
1215
has128ByteRedZone(const MachineFunction & MF) const1216 bool X86FrameLowering::has128ByteRedZone(const MachineFunction& MF) const {
1217 // x86-64 (non Win64) has a 128 byte red zone which is guaranteed not to be
1218 // clobbered by any interrupt handler.
1219 assert(&STI == &MF.getSubtarget<X86Subtarget>() &&
1220 "MF used frame lowering for wrong subtarget");
1221 const Function &Fn = MF.getFunction();
1222 const bool IsWin64CC = STI.isCallingConvWin64(Fn.getCallingConv());
1223 return Is64Bit && !IsWin64CC && !Fn.hasFnAttribute(Attribute::NoRedZone);
1224 }
1225
isWin64Prologue(const MachineFunction & MF) const1226 bool X86FrameLowering::isWin64Prologue(const MachineFunction &MF) const {
1227 return MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
1228 }
1229
needsDwarfCFI(const MachineFunction & MF) const1230 bool X86FrameLowering::needsDwarfCFI(const MachineFunction &MF) const {
1231 return !isWin64Prologue(MF) && MF.needsFrameMoves();
1232 }
1233
1234 /// emitPrologue - Push callee-saved registers onto the stack, which
1235 /// automatically adjust the stack pointer. Adjust the stack pointer to allocate
1236 /// space for local variables. Also emit labels used by the exception handler to
1237 /// generate the exception handling frames.
1238
1239 /*
1240 Here's a gist of what gets emitted:
1241
1242 ; Establish frame pointer, if needed
1243 [if needs FP]
1244 push %rbp
1245 .cfi_def_cfa_offset 16
1246 .cfi_offset %rbp, -16
1247 .seh_pushreg %rpb
1248 mov %rsp, %rbp
1249 .cfi_def_cfa_register %rbp
1250
1251 ; Spill general-purpose registers
1252 [for all callee-saved GPRs]
1253 pushq %<reg>
1254 [if not needs FP]
1255 .cfi_def_cfa_offset (offset from RETADDR)
1256 .seh_pushreg %<reg>
1257
1258 ; If the required stack alignment > default stack alignment
1259 ; rsp needs to be re-aligned. This creates a "re-alignment gap"
1260 ; of unknown size in the stack frame.
1261 [if stack needs re-alignment]
1262 and $MASK, %rsp
1263
1264 ; Allocate space for locals
1265 [if target is Windows and allocated space > 4096 bytes]
1266 ; Windows needs special care for allocations larger
1267 ; than one page.
1268 mov $NNN, %rax
1269 call ___chkstk_ms/___chkstk
1270 sub %rax, %rsp
1271 [else]
1272 sub $NNN, %rsp
1273
1274 [if needs FP]
1275 .seh_stackalloc (size of XMM spill slots)
1276 .seh_setframe %rbp, SEHFrameOffset ; = size of all spill slots
1277 [else]
1278 .seh_stackalloc NNN
1279
1280 ; Spill XMMs
1281 ; Note, that while only Windows 64 ABI specifies XMMs as callee-preserved,
1282 ; they may get spilled on any platform, if the current function
1283 ; calls @llvm.eh.unwind.init
1284 [if needs FP]
1285 [for all callee-saved XMM registers]
1286 movaps %<xmm reg>, -MMM(%rbp)
1287 [for all callee-saved XMM registers]
1288 .seh_savexmm %<xmm reg>, (-MMM + SEHFrameOffset)
1289 ; i.e. the offset relative to (%rbp - SEHFrameOffset)
1290 [else]
1291 [for all callee-saved XMM registers]
1292 movaps %<xmm reg>, KKK(%rsp)
1293 [for all callee-saved XMM registers]
1294 .seh_savexmm %<xmm reg>, KKK
1295
1296 .seh_endprologue
1297
1298 [if needs base pointer]
1299 mov %rsp, %rbx
1300 [if needs to restore base pointer]
1301 mov %rsp, -MMM(%rbp)
1302
1303 ; Emit CFI info
1304 [if needs FP]
1305 [for all callee-saved registers]
1306 .cfi_offset %<reg>, (offset from %rbp)
1307 [else]
1308 .cfi_def_cfa_offset (offset from RETADDR)
1309 [for all callee-saved registers]
1310 .cfi_offset %<reg>, (offset from %rsp)
1311
1312 Notes:
1313 - .seh directives are emitted only for Windows 64 ABI
1314 - .cv_fpo directives are emitted on win32 when emitting CodeView
1315 - .cfi directives are emitted for all other ABIs
1316 - for 32-bit code, substitute %e?? registers for %r??
1317 */
1318
emitPrologue(MachineFunction & MF,MachineBasicBlock & MBB) const1319 void X86FrameLowering::emitPrologue(MachineFunction &MF,
1320 MachineBasicBlock &MBB) const {
1321 assert(&STI == &MF.getSubtarget<X86Subtarget>() &&
1322 "MF used frame lowering for wrong subtarget");
1323 MachineBasicBlock::iterator MBBI = MBB.begin();
1324 MachineFrameInfo &MFI = MF.getFrameInfo();
1325 const Function &Fn = MF.getFunction();
1326 MachineModuleInfo &MMI = MF.getMMI();
1327 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
1328 uint64_t MaxAlign = calculateMaxStackAlign(MF); // Desired stack alignment.
1329 uint64_t StackSize = MFI.getStackSize(); // Number of bytes to allocate.
1330 bool IsFunclet = MBB.isEHFuncletEntry();
1331 EHPersonality Personality = EHPersonality::Unknown;
1332 if (Fn.hasPersonalityFn())
1333 Personality = classifyEHPersonality(Fn.getPersonalityFn());
1334 bool FnHasClrFunclet =
1335 MF.hasEHFunclets() && Personality == EHPersonality::CoreCLR;
1336 bool IsClrFunclet = IsFunclet && FnHasClrFunclet;
1337 bool HasFP = hasFP(MF);
1338 bool IsWin64Prologue = isWin64Prologue(MF);
1339 bool NeedsWin64CFI = IsWin64Prologue && Fn.needsUnwindTableEntry();
1340 // FIXME: Emit FPO data for EH funclets.
1341 bool NeedsWinFPO =
1342 !IsFunclet && STI.isTargetWin32() && MMI.getModule()->getCodeViewFlag();
1343 bool NeedsWinCFI = NeedsWin64CFI || NeedsWinFPO;
1344 bool NeedsDwarfCFI = needsDwarfCFI(MF);
1345 Register FramePtr = TRI->getFrameRegister(MF);
1346 const Register MachineFramePtr =
1347 STI.isTarget64BitILP32()
1348 ? Register(getX86SubSuperRegister(FramePtr, 64)) : FramePtr;
1349 Register BasePtr = TRI->getBaseRegister();
1350 bool HasWinCFI = false;
1351
1352 // Debug location must be unknown since the first debug location is used
1353 // to determine the end of the prologue.
1354 DebugLoc DL;
1355
1356 // Add RETADDR move area to callee saved frame size.
1357 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
1358 if (TailCallReturnAddrDelta && IsWin64Prologue)
1359 report_fatal_error("Can't handle guaranteed tail call under win64 yet");
1360
1361 if (TailCallReturnAddrDelta < 0)
1362 X86FI->setCalleeSavedFrameSize(
1363 X86FI->getCalleeSavedFrameSize() - TailCallReturnAddrDelta);
1364
1365 const bool EmitStackProbeCall =
1366 STI.getTargetLowering()->hasStackProbeSymbol(MF);
1367 unsigned StackProbeSize = STI.getTargetLowering()->getStackProbeSize(MF);
1368
1369 if (HasFP && X86FI->hasSwiftAsyncContext()) {
1370 BuildMI(MBB, MBBI, DL, TII.get(X86::BTS64ri8),
1371 MachineFramePtr)
1372 .addUse(MachineFramePtr)
1373 .addImm(60)
1374 .setMIFlag(MachineInstr::FrameSetup);
1375 }
1376
1377 // Re-align the stack on 64-bit if the x86-interrupt calling convention is
1378 // used and an error code was pushed, since the x86-64 ABI requires a 16-byte
1379 // stack alignment.
1380 if (Fn.getCallingConv() == CallingConv::X86_INTR && Is64Bit &&
1381 Fn.arg_size() == 2) {
1382 StackSize += 8;
1383 MFI.setStackSize(StackSize);
1384 emitSPUpdate(MBB, MBBI, DL, -8, /*InEpilogue=*/false);
1385 }
1386
1387 // If this is x86-64 and the Red Zone is not disabled, if we are a leaf
1388 // function, and use up to 128 bytes of stack space, don't have a frame
1389 // pointer, calls, or dynamic alloca then we do not need to adjust the
1390 // stack pointer (we fit in the Red Zone). We also check that we don't
1391 // push and pop from the stack.
1392 if (has128ByteRedZone(MF) && !TRI->hasStackRealignment(MF) &&
1393 !MFI.hasVarSizedObjects() && // No dynamic alloca.
1394 !MFI.adjustsStack() && // No calls.
1395 !EmitStackProbeCall && // No stack probes.
1396 !MFI.hasCopyImplyingStackAdjustment() && // Don't push and pop.
1397 !MF.shouldSplitStack()) { // Regular stack
1398 uint64_t MinSize = X86FI->getCalleeSavedFrameSize();
1399 if (HasFP) MinSize += SlotSize;
1400 X86FI->setUsesRedZone(MinSize > 0 || StackSize > 0);
1401 StackSize = std::max(MinSize, StackSize > 128 ? StackSize - 128 : 0);
1402 MFI.setStackSize(StackSize);
1403 }
1404
1405 // Insert stack pointer adjustment for later moving of return addr. Only
1406 // applies to tail call optimized functions where the callee argument stack
1407 // size is bigger than the callers.
1408 if (TailCallReturnAddrDelta < 0) {
1409 BuildStackAdjustment(MBB, MBBI, DL, TailCallReturnAddrDelta,
1410 /*InEpilogue=*/false)
1411 .setMIFlag(MachineInstr::FrameSetup);
1412 }
1413
1414 // Mapping for machine moves:
1415 //
1416 // DST: VirtualFP AND
1417 // SRC: VirtualFP => DW_CFA_def_cfa_offset
1418 // ELSE => DW_CFA_def_cfa
1419 //
1420 // SRC: VirtualFP AND
1421 // DST: Register => DW_CFA_def_cfa_register
1422 //
1423 // ELSE
1424 // OFFSET < 0 => DW_CFA_offset_extended_sf
1425 // REG < 64 => DW_CFA_offset + Reg
1426 // ELSE => DW_CFA_offset_extended
1427
1428 uint64_t NumBytes = 0;
1429 int stackGrowth = -SlotSize;
1430
1431 // Find the funclet establisher parameter
1432 Register Establisher = X86::NoRegister;
1433 if (IsClrFunclet)
1434 Establisher = Uses64BitFramePtr ? X86::RCX : X86::ECX;
1435 else if (IsFunclet)
1436 Establisher = Uses64BitFramePtr ? X86::RDX : X86::EDX;
1437
1438 if (IsWin64Prologue && IsFunclet && !IsClrFunclet) {
1439 // Immediately spill establisher into the home slot.
1440 // The runtime cares about this.
1441 // MOV64mr %rdx, 16(%rsp)
1442 unsigned MOVmr = Uses64BitFramePtr ? X86::MOV64mr : X86::MOV32mr;
1443 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(MOVmr)), StackPtr, true, 16)
1444 .addReg(Establisher)
1445 .setMIFlag(MachineInstr::FrameSetup);
1446 MBB.addLiveIn(Establisher);
1447 }
1448
1449 if (HasFP) {
1450 assert(MF.getRegInfo().isReserved(MachineFramePtr) && "FP reserved");
1451
1452 // Calculate required stack adjustment.
1453 uint64_t FrameSize = StackSize - SlotSize;
1454 // If required, include space for extra hidden slot for stashing base pointer.
1455 if (X86FI->getRestoreBasePointer())
1456 FrameSize += SlotSize;
1457
1458 NumBytes = FrameSize - X86FI->getCalleeSavedFrameSize();
1459
1460 // Callee-saved registers are pushed on stack before the stack is realigned.
1461 if (TRI->hasStackRealignment(MF) && !IsWin64Prologue)
1462 NumBytes = alignTo(NumBytes, MaxAlign);
1463
1464 // Save EBP/RBP into the appropriate stack slot.
1465 BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::PUSH64r : X86::PUSH32r))
1466 .addReg(MachineFramePtr, RegState::Kill)
1467 .setMIFlag(MachineInstr::FrameSetup);
1468
1469 if (NeedsDwarfCFI) {
1470 // Mark the place where EBP/RBP was saved.
1471 // Define the current CFA rule to use the provided offset.
1472 assert(StackSize);
1473 BuildCFI(MBB, MBBI, DL,
1474 MCCFIInstruction::cfiDefCfaOffset(nullptr, -2 * stackGrowth));
1475
1476 // Change the rule for the FramePtr to be an "offset" rule.
1477 unsigned DwarfFramePtr = TRI->getDwarfRegNum(MachineFramePtr, true);
1478 BuildCFI(MBB, MBBI, DL, MCCFIInstruction::createOffset(
1479 nullptr, DwarfFramePtr, 2 * stackGrowth));
1480 }
1481
1482 if (NeedsWinCFI) {
1483 HasWinCFI = true;
1484 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_PushReg))
1485 .addImm(FramePtr)
1486 .setMIFlag(MachineInstr::FrameSetup);
1487 }
1488
1489 if (!IsFunclet) {
1490 if (X86FI->hasSwiftAsyncContext()) {
1491 const auto &Attrs = MF.getFunction().getAttributes();
1492
1493 // Before we update the live frame pointer we have to ensure there's a
1494 // valid (or null) asynchronous context in its slot just before FP in
1495 // the frame record, so store it now.
1496 if (Attrs.hasAttrSomewhere(Attribute::SwiftAsync)) {
1497 // We have an initial context in r14, store it just before the frame
1498 // pointer.
1499 MBB.addLiveIn(X86::R14);
1500 BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH64r))
1501 .addReg(X86::R14)
1502 .setMIFlag(MachineInstr::FrameSetup);
1503 } else {
1504 // No initial context, store null so that there's no pointer that
1505 // could be misused.
1506 BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH64i8))
1507 .addImm(0)
1508 .setMIFlag(MachineInstr::FrameSetup);
1509 }
1510
1511 if (NeedsWinCFI) {
1512 HasWinCFI = true;
1513 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_PushReg))
1514 .addImm(X86::R14)
1515 .setMIFlag(MachineInstr::FrameSetup);
1516 }
1517
1518 BuildMI(MBB, MBBI, DL, TII.get(X86::LEA64r), FramePtr)
1519 .addUse(X86::RSP)
1520 .addImm(1)
1521 .addUse(X86::NoRegister)
1522 .addImm(8)
1523 .addUse(X86::NoRegister)
1524 .setMIFlag(MachineInstr::FrameSetup);
1525 BuildMI(MBB, MBBI, DL, TII.get(X86::SUB64ri8), X86::RSP)
1526 .addUse(X86::RSP)
1527 .addImm(8)
1528 .setMIFlag(MachineInstr::FrameSetup);
1529 }
1530
1531 if (!IsWin64Prologue && !IsFunclet) {
1532 // Update EBP with the new base value.
1533 if (!X86FI->hasSwiftAsyncContext())
1534 BuildMI(MBB, MBBI, DL,
1535 TII.get(Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr),
1536 FramePtr)
1537 .addReg(StackPtr)
1538 .setMIFlag(MachineInstr::FrameSetup);
1539
1540 if (NeedsDwarfCFI) {
1541 // Mark effective beginning of when frame pointer becomes valid.
1542 // Define the current CFA to use the EBP/RBP register.
1543 unsigned DwarfFramePtr = TRI->getDwarfRegNum(MachineFramePtr, true);
1544 BuildCFI(
1545 MBB, MBBI, DL,
1546 MCCFIInstruction::createDefCfaRegister(nullptr, DwarfFramePtr));
1547 }
1548
1549 if (NeedsWinFPO) {
1550 // .cv_fpo_setframe $FramePtr
1551 HasWinCFI = true;
1552 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SetFrame))
1553 .addImm(FramePtr)
1554 .addImm(0)
1555 .setMIFlag(MachineInstr::FrameSetup);
1556 }
1557 }
1558 }
1559 } else {
1560 assert(!IsFunclet && "funclets without FPs not yet implemented");
1561 NumBytes = StackSize - X86FI->getCalleeSavedFrameSize();
1562 }
1563
1564 // Update the offset adjustment, which is mainly used by codeview to translate
1565 // from ESP to VFRAME relative local variable offsets.
1566 if (!IsFunclet) {
1567 if (HasFP && TRI->hasStackRealignment(MF))
1568 MFI.setOffsetAdjustment(-NumBytes);
1569 else
1570 MFI.setOffsetAdjustment(-StackSize);
1571 }
1572
1573 // For EH funclets, only allocate enough space for outgoing calls. Save the
1574 // NumBytes value that we would've used for the parent frame.
1575 unsigned ParentFrameNumBytes = NumBytes;
1576 if (IsFunclet)
1577 NumBytes = getWinEHFuncletFrameSize(MF);
1578
1579 // Skip the callee-saved push instructions.
1580 bool PushedRegs = false;
1581 int StackOffset = 2 * stackGrowth;
1582
1583 while (MBBI != MBB.end() &&
1584 MBBI->getFlag(MachineInstr::FrameSetup) &&
1585 (MBBI->getOpcode() == X86::PUSH32r ||
1586 MBBI->getOpcode() == X86::PUSH64r)) {
1587 PushedRegs = true;
1588 Register Reg = MBBI->getOperand(0).getReg();
1589 ++MBBI;
1590
1591 if (!HasFP && NeedsDwarfCFI) {
1592 // Mark callee-saved push instruction.
1593 // Define the current CFA rule to use the provided offset.
1594 assert(StackSize);
1595 BuildCFI(MBB, MBBI, DL,
1596 MCCFIInstruction::cfiDefCfaOffset(nullptr, -StackOffset));
1597 StackOffset += stackGrowth;
1598 }
1599
1600 if (NeedsWinCFI) {
1601 HasWinCFI = true;
1602 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_PushReg))
1603 .addImm(Reg)
1604 .setMIFlag(MachineInstr::FrameSetup);
1605 }
1606 }
1607
1608 // Realign stack after we pushed callee-saved registers (so that we'll be
1609 // able to calculate their offsets from the frame pointer).
1610 // Don't do this for Win64, it needs to realign the stack after the prologue.
1611 if (!IsWin64Prologue && !IsFunclet && TRI->hasStackRealignment(MF)) {
1612 assert(HasFP && "There should be a frame pointer if stack is realigned.");
1613 BuildStackAlignAND(MBB, MBBI, DL, StackPtr, MaxAlign);
1614
1615 if (NeedsWinCFI) {
1616 HasWinCFI = true;
1617 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_StackAlign))
1618 .addImm(MaxAlign)
1619 .setMIFlag(MachineInstr::FrameSetup);
1620 }
1621 }
1622
1623 // If there is an SUB32ri of ESP immediately before this instruction, merge
1624 // the two. This can be the case when tail call elimination is enabled and
1625 // the callee has more arguments then the caller.
1626 NumBytes -= mergeSPUpdates(MBB, MBBI, true);
1627
1628 // Adjust stack pointer: ESP -= numbytes.
1629
1630 // Windows and cygwin/mingw require a prologue helper routine when allocating
1631 // more than 4K bytes on the stack. Windows uses __chkstk and cygwin/mingw
1632 // uses __alloca. __alloca and the 32-bit version of __chkstk will probe the
1633 // stack and adjust the stack pointer in one go. The 64-bit version of
1634 // __chkstk is only responsible for probing the stack. The 64-bit prologue is
1635 // responsible for adjusting the stack pointer. Touching the stack at 4K
1636 // increments is necessary to ensure that the guard pages used by the OS
1637 // virtual memory manager are allocated in correct sequence.
1638 uint64_t AlignedNumBytes = NumBytes;
1639 if (IsWin64Prologue && !IsFunclet && TRI->hasStackRealignment(MF))
1640 AlignedNumBytes = alignTo(AlignedNumBytes, MaxAlign);
1641 if (AlignedNumBytes >= StackProbeSize && EmitStackProbeCall) {
1642 assert(!X86FI->getUsesRedZone() &&
1643 "The Red Zone is not accounted for in stack probes");
1644
1645 // Check whether EAX is livein for this block.
1646 bool isEAXAlive = isEAXLiveIn(MBB);
1647
1648 if (isEAXAlive) {
1649 if (Is64Bit) {
1650 // Save RAX
1651 BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH64r))
1652 .addReg(X86::RAX, RegState::Kill)
1653 .setMIFlag(MachineInstr::FrameSetup);
1654 } else {
1655 // Save EAX
1656 BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH32r))
1657 .addReg(X86::EAX, RegState::Kill)
1658 .setMIFlag(MachineInstr::FrameSetup);
1659 }
1660 }
1661
1662 if (Is64Bit) {
1663 // Handle the 64-bit Windows ABI case where we need to call __chkstk.
1664 // Function prologue is responsible for adjusting the stack pointer.
1665 int64_t Alloc = isEAXAlive ? NumBytes - 8 : NumBytes;
1666 if (isUInt<32>(Alloc)) {
1667 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX)
1668 .addImm(Alloc)
1669 .setMIFlag(MachineInstr::FrameSetup);
1670 } else if (isInt<32>(Alloc)) {
1671 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri32), X86::RAX)
1672 .addImm(Alloc)
1673 .setMIFlag(MachineInstr::FrameSetup);
1674 } else {
1675 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri), X86::RAX)
1676 .addImm(Alloc)
1677 .setMIFlag(MachineInstr::FrameSetup);
1678 }
1679 } else {
1680 // Allocate NumBytes-4 bytes on stack in case of isEAXAlive.
1681 // We'll also use 4 already allocated bytes for EAX.
1682 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX)
1683 .addImm(isEAXAlive ? NumBytes - 4 : NumBytes)
1684 .setMIFlag(MachineInstr::FrameSetup);
1685 }
1686
1687 // Call __chkstk, __chkstk_ms, or __alloca.
1688 emitStackProbe(MF, MBB, MBBI, DL, true);
1689
1690 if (isEAXAlive) {
1691 // Restore RAX/EAX
1692 MachineInstr *MI;
1693 if (Is64Bit)
1694 MI = addRegOffset(BuildMI(MF, DL, TII.get(X86::MOV64rm), X86::RAX),
1695 StackPtr, false, NumBytes - 8);
1696 else
1697 MI = addRegOffset(BuildMI(MF, DL, TII.get(X86::MOV32rm), X86::EAX),
1698 StackPtr, false, NumBytes - 4);
1699 MI->setFlag(MachineInstr::FrameSetup);
1700 MBB.insert(MBBI, MI);
1701 }
1702 } else if (NumBytes) {
1703 emitSPUpdate(MBB, MBBI, DL, -(int64_t)NumBytes, /*InEpilogue=*/false);
1704 }
1705
1706 if (NeedsWinCFI && NumBytes) {
1707 HasWinCFI = true;
1708 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_StackAlloc))
1709 .addImm(NumBytes)
1710 .setMIFlag(MachineInstr::FrameSetup);
1711 }
1712
1713 int SEHFrameOffset = 0;
1714 unsigned SPOrEstablisher;
1715 if (IsFunclet) {
1716 if (IsClrFunclet) {
1717 // The establisher parameter passed to a CLR funclet is actually a pointer
1718 // to the (mostly empty) frame of its nearest enclosing funclet; we have
1719 // to find the root function establisher frame by loading the PSPSym from
1720 // the intermediate frame.
1721 unsigned PSPSlotOffset = getPSPSlotOffsetFromSP(MF);
1722 MachinePointerInfo NoInfo;
1723 MBB.addLiveIn(Establisher);
1724 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64rm), Establisher),
1725 Establisher, false, PSPSlotOffset)
1726 .addMemOperand(MF.getMachineMemOperand(
1727 NoInfo, MachineMemOperand::MOLoad, SlotSize, Align(SlotSize)));
1728 ;
1729 // Save the root establisher back into the current funclet's (mostly
1730 // empty) frame, in case a sub-funclet or the GC needs it.
1731 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64mr)), StackPtr,
1732 false, PSPSlotOffset)
1733 .addReg(Establisher)
1734 .addMemOperand(MF.getMachineMemOperand(
1735 NoInfo,
1736 MachineMemOperand::MOStore | MachineMemOperand::MOVolatile,
1737 SlotSize, Align(SlotSize)));
1738 }
1739 SPOrEstablisher = Establisher;
1740 } else {
1741 SPOrEstablisher = StackPtr;
1742 }
1743
1744 if (IsWin64Prologue && HasFP) {
1745 // Set RBP to a small fixed offset from RSP. In the funclet case, we base
1746 // this calculation on the incoming establisher, which holds the value of
1747 // RSP from the parent frame at the end of the prologue.
1748 SEHFrameOffset = calculateSetFPREG(ParentFrameNumBytes);
1749 if (SEHFrameOffset)
1750 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::LEA64r), FramePtr),
1751 SPOrEstablisher, false, SEHFrameOffset);
1752 else
1753 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64rr), FramePtr)
1754 .addReg(SPOrEstablisher);
1755
1756 // If this is not a funclet, emit the CFI describing our frame pointer.
1757 if (NeedsWinCFI && !IsFunclet) {
1758 assert(!NeedsWinFPO && "this setframe incompatible with FPO data");
1759 HasWinCFI = true;
1760 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SetFrame))
1761 .addImm(FramePtr)
1762 .addImm(SEHFrameOffset)
1763 .setMIFlag(MachineInstr::FrameSetup);
1764 if (isAsynchronousEHPersonality(Personality))
1765 MF.getWinEHFuncInfo()->SEHSetFrameOffset = SEHFrameOffset;
1766 }
1767 } else if (IsFunclet && STI.is32Bit()) {
1768 // Reset EBP / ESI to something good for funclets.
1769 MBBI = restoreWin32EHStackPointers(MBB, MBBI, DL);
1770 // If we're a catch funclet, we can be returned to via catchret. Save ESP
1771 // into the registration node so that the runtime will restore it for us.
1772 if (!MBB.isCleanupFuncletEntry()) {
1773 assert(Personality == EHPersonality::MSVC_CXX);
1774 Register FrameReg;
1775 int FI = MF.getWinEHFuncInfo()->EHRegNodeFrameIndex;
1776 int64_t EHRegOffset = getFrameIndexReference(MF, FI, FrameReg).getFixed();
1777 // ESP is the first field, so no extra displacement is needed.
1778 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32mr)), FrameReg,
1779 false, EHRegOffset)
1780 .addReg(X86::ESP);
1781 }
1782 }
1783
1784 while (MBBI != MBB.end() && MBBI->getFlag(MachineInstr::FrameSetup)) {
1785 const MachineInstr &FrameInstr = *MBBI;
1786 ++MBBI;
1787
1788 if (NeedsWinCFI) {
1789 int FI;
1790 if (unsigned Reg = TII.isStoreToStackSlot(FrameInstr, FI)) {
1791 if (X86::FR64RegClass.contains(Reg)) {
1792 int Offset;
1793 Register IgnoredFrameReg;
1794 if (IsWin64Prologue && IsFunclet)
1795 Offset = getWin64EHFrameIndexRef(MF, FI, IgnoredFrameReg);
1796 else
1797 Offset =
1798 getFrameIndexReference(MF, FI, IgnoredFrameReg).getFixed() +
1799 SEHFrameOffset;
1800
1801 HasWinCFI = true;
1802 assert(!NeedsWinFPO && "SEH_SaveXMM incompatible with FPO data");
1803 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SaveXMM))
1804 .addImm(Reg)
1805 .addImm(Offset)
1806 .setMIFlag(MachineInstr::FrameSetup);
1807 }
1808 }
1809 }
1810 }
1811
1812 if (NeedsWinCFI && HasWinCFI)
1813 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_EndPrologue))
1814 .setMIFlag(MachineInstr::FrameSetup);
1815
1816 if (FnHasClrFunclet && !IsFunclet) {
1817 // Save the so-called Initial-SP (i.e. the value of the stack pointer
1818 // immediately after the prolog) into the PSPSlot so that funclets
1819 // and the GC can recover it.
1820 unsigned PSPSlotOffset = getPSPSlotOffsetFromSP(MF);
1821 auto PSPInfo = MachinePointerInfo::getFixedStack(
1822 MF, MF.getWinEHFuncInfo()->PSPSymFrameIdx);
1823 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64mr)), StackPtr, false,
1824 PSPSlotOffset)
1825 .addReg(StackPtr)
1826 .addMemOperand(MF.getMachineMemOperand(
1827 PSPInfo, MachineMemOperand::MOStore | MachineMemOperand::MOVolatile,
1828 SlotSize, Align(SlotSize)));
1829 }
1830
1831 // Realign stack after we spilled callee-saved registers (so that we'll be
1832 // able to calculate their offsets from the frame pointer).
1833 // Win64 requires aligning the stack after the prologue.
1834 if (IsWin64Prologue && TRI->hasStackRealignment(MF)) {
1835 assert(HasFP && "There should be a frame pointer if stack is realigned.");
1836 BuildStackAlignAND(MBB, MBBI, DL, SPOrEstablisher, MaxAlign);
1837 }
1838
1839 // We already dealt with stack realignment and funclets above.
1840 if (IsFunclet && STI.is32Bit())
1841 return;
1842
1843 // If we need a base pointer, set it up here. It's whatever the value
1844 // of the stack pointer is at this point. Any variable size objects
1845 // will be allocated after this, so we can still use the base pointer
1846 // to reference locals.
1847 if (TRI->hasBasePointer(MF)) {
1848 // Update the base pointer with the current stack pointer.
1849 unsigned Opc = Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr;
1850 BuildMI(MBB, MBBI, DL, TII.get(Opc), BasePtr)
1851 .addReg(SPOrEstablisher)
1852 .setMIFlag(MachineInstr::FrameSetup);
1853 if (X86FI->getRestoreBasePointer()) {
1854 // Stash value of base pointer. Saving RSP instead of EBP shortens
1855 // dependence chain. Used by SjLj EH.
1856 unsigned Opm = Uses64BitFramePtr ? X86::MOV64mr : X86::MOV32mr;
1857 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opm)),
1858 FramePtr, true, X86FI->getRestoreBasePointerOffset())
1859 .addReg(SPOrEstablisher)
1860 .setMIFlag(MachineInstr::FrameSetup);
1861 }
1862
1863 if (X86FI->getHasSEHFramePtrSave() && !IsFunclet) {
1864 // Stash the value of the frame pointer relative to the base pointer for
1865 // Win32 EH. This supports Win32 EH, which does the inverse of the above:
1866 // it recovers the frame pointer from the base pointer rather than the
1867 // other way around.
1868 unsigned Opm = Uses64BitFramePtr ? X86::MOV64mr : X86::MOV32mr;
1869 Register UsedReg;
1870 int Offset =
1871 getFrameIndexReference(MF, X86FI->getSEHFramePtrSaveIndex(), UsedReg)
1872 .getFixed();
1873 assert(UsedReg == BasePtr);
1874 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opm)), UsedReg, true, Offset)
1875 .addReg(FramePtr)
1876 .setMIFlag(MachineInstr::FrameSetup);
1877 }
1878 }
1879
1880 if (((!HasFP && NumBytes) || PushedRegs) && NeedsDwarfCFI) {
1881 // Mark end of stack pointer adjustment.
1882 if (!HasFP && NumBytes) {
1883 // Define the current CFA rule to use the provided offset.
1884 assert(StackSize);
1885 BuildCFI(
1886 MBB, MBBI, DL,
1887 MCCFIInstruction::cfiDefCfaOffset(nullptr, StackSize - stackGrowth));
1888 }
1889
1890 // Emit DWARF info specifying the offsets of the callee-saved registers.
1891 emitCalleeSavedFrameMoves(MBB, MBBI, DL, true);
1892 }
1893
1894 // X86 Interrupt handling function cannot assume anything about the direction
1895 // flag (DF in EFLAGS register). Clear this flag by creating "cld" instruction
1896 // in each prologue of interrupt handler function.
1897 //
1898 // FIXME: Create "cld" instruction only in these cases:
1899 // 1. The interrupt handling function uses any of the "rep" instructions.
1900 // 2. Interrupt handling function calls another function.
1901 //
1902 if (Fn.getCallingConv() == CallingConv::X86_INTR)
1903 BuildMI(MBB, MBBI, DL, TII.get(X86::CLD))
1904 .setMIFlag(MachineInstr::FrameSetup);
1905
1906 // At this point we know if the function has WinCFI or not.
1907 MF.setHasWinCFI(HasWinCFI);
1908 }
1909
canUseLEAForSPInEpilogue(const MachineFunction & MF) const1910 bool X86FrameLowering::canUseLEAForSPInEpilogue(
1911 const MachineFunction &MF) const {
1912 // We can't use LEA instructions for adjusting the stack pointer if we don't
1913 // have a frame pointer in the Win64 ABI. Only ADD instructions may be used
1914 // to deallocate the stack.
1915 // This means that we can use LEA for SP in two situations:
1916 // 1. We *aren't* using the Win64 ABI which means we are free to use LEA.
1917 // 2. We *have* a frame pointer which means we are permitted to use LEA.
1918 return !MF.getTarget().getMCAsmInfo()->usesWindowsCFI() || hasFP(MF);
1919 }
1920
isFuncletReturnInstr(MachineInstr & MI)1921 static bool isFuncletReturnInstr(MachineInstr &MI) {
1922 switch (MI.getOpcode()) {
1923 case X86::CATCHRET:
1924 case X86::CLEANUPRET:
1925 return true;
1926 default:
1927 return false;
1928 }
1929 llvm_unreachable("impossible");
1930 }
1931
1932 // CLR funclets use a special "Previous Stack Pointer Symbol" slot on the
1933 // stack. It holds a pointer to the bottom of the root function frame. The
1934 // establisher frame pointer passed to a nested funclet may point to the
1935 // (mostly empty) frame of its parent funclet, but it will need to find
1936 // the frame of the root function to access locals. To facilitate this,
1937 // every funclet copies the pointer to the bottom of the root function
1938 // frame into a PSPSym slot in its own (mostly empty) stack frame. Using the
1939 // same offset for the PSPSym in the root function frame that's used in the
1940 // funclets' frames allows each funclet to dynamically accept any ancestor
1941 // frame as its establisher argument (the runtime doesn't guarantee the
1942 // immediate parent for some reason lost to history), and also allows the GC,
1943 // which uses the PSPSym for some bookkeeping, to find it in any funclet's
1944 // frame with only a single offset reported for the entire method.
1945 unsigned
getPSPSlotOffsetFromSP(const MachineFunction & MF) const1946 X86FrameLowering::getPSPSlotOffsetFromSP(const MachineFunction &MF) const {
1947 const WinEHFuncInfo &Info = *MF.getWinEHFuncInfo();
1948 Register SPReg;
1949 int Offset = getFrameIndexReferencePreferSP(MF, Info.PSPSymFrameIdx, SPReg,
1950 /*IgnoreSPUpdates*/ true)
1951 .getFixed();
1952 assert(Offset >= 0 && SPReg == TRI->getStackRegister());
1953 return static_cast<unsigned>(Offset);
1954 }
1955
1956 unsigned
getWinEHFuncletFrameSize(const MachineFunction & MF) const1957 X86FrameLowering::getWinEHFuncletFrameSize(const MachineFunction &MF) const {
1958 const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
1959 // This is the size of the pushed CSRs.
1960 unsigned CSSize = X86FI->getCalleeSavedFrameSize();
1961 // This is the size of callee saved XMMs.
1962 const auto& WinEHXMMSlotInfo = X86FI->getWinEHXMMSlotInfo();
1963 unsigned XMMSize = WinEHXMMSlotInfo.size() *
1964 TRI->getSpillSize(X86::VR128RegClass);
1965 // This is the amount of stack a funclet needs to allocate.
1966 unsigned UsedSize;
1967 EHPersonality Personality =
1968 classifyEHPersonality(MF.getFunction().getPersonalityFn());
1969 if (Personality == EHPersonality::CoreCLR) {
1970 // CLR funclets need to hold enough space to include the PSPSym, at the
1971 // same offset from the stack pointer (immediately after the prolog) as it
1972 // resides at in the main function.
1973 UsedSize = getPSPSlotOffsetFromSP(MF) + SlotSize;
1974 } else {
1975 // Other funclets just need enough stack for outgoing call arguments.
1976 UsedSize = MF.getFrameInfo().getMaxCallFrameSize();
1977 }
1978 // RBP is not included in the callee saved register block. After pushing RBP,
1979 // everything is 16 byte aligned. Everything we allocate before an outgoing
1980 // call must also be 16 byte aligned.
1981 unsigned FrameSizeMinusRBP = alignTo(CSSize + UsedSize, getStackAlign());
1982 // Subtract out the size of the callee saved registers. This is how much stack
1983 // each funclet will allocate.
1984 return FrameSizeMinusRBP + XMMSize - CSSize;
1985 }
1986
isTailCallOpcode(unsigned Opc)1987 static bool isTailCallOpcode(unsigned Opc) {
1988 return Opc == X86::TCRETURNri || Opc == X86::TCRETURNdi ||
1989 Opc == X86::TCRETURNmi ||
1990 Opc == X86::TCRETURNri64 || Opc == X86::TCRETURNdi64 ||
1991 Opc == X86::TCRETURNmi64;
1992 }
1993
emitEpilogue(MachineFunction & MF,MachineBasicBlock & MBB) const1994 void X86FrameLowering::emitEpilogue(MachineFunction &MF,
1995 MachineBasicBlock &MBB) const {
1996 const MachineFrameInfo &MFI = MF.getFrameInfo();
1997 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
1998 MachineBasicBlock::iterator Terminator = MBB.getFirstTerminator();
1999 MachineBasicBlock::iterator MBBI = Terminator;
2000 DebugLoc DL;
2001 if (MBBI != MBB.end())
2002 DL = MBBI->getDebugLoc();
2003 // standard x86_64 and NaCl use 64-bit frame/stack pointers, x32 - 32-bit.
2004 const bool Is64BitILP32 = STI.isTarget64BitILP32();
2005 Register FramePtr = TRI->getFrameRegister(MF);
2006 Register MachineFramePtr =
2007 Is64BitILP32 ? Register(getX86SubSuperRegister(FramePtr, 64)) : FramePtr;
2008
2009 bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
2010 bool NeedsWin64CFI =
2011 IsWin64Prologue && MF.getFunction().needsUnwindTableEntry();
2012 bool IsFunclet = MBBI == MBB.end() ? false : isFuncletReturnInstr(*MBBI);
2013
2014 // Get the number of bytes to allocate from the FrameInfo.
2015 uint64_t StackSize = MFI.getStackSize();
2016 uint64_t MaxAlign = calculateMaxStackAlign(MF);
2017 unsigned CSSize = X86FI->getCalleeSavedFrameSize();
2018 bool HasFP = hasFP(MF);
2019 uint64_t NumBytes = 0;
2020
2021 bool NeedsDwarfCFI = (!MF.getTarget().getTargetTriple().isOSDarwin() &&
2022 !MF.getTarget().getTargetTriple().isOSWindows()) &&
2023 MF.needsFrameMoves();
2024
2025 if (IsFunclet) {
2026 assert(HasFP && "EH funclets without FP not yet implemented");
2027 NumBytes = getWinEHFuncletFrameSize(MF);
2028 } else if (HasFP) {
2029 // Calculate required stack adjustment.
2030 uint64_t FrameSize = StackSize - SlotSize;
2031 NumBytes = FrameSize - CSSize;
2032
2033 // Callee-saved registers were pushed on stack before the stack was
2034 // realigned.
2035 if (TRI->hasStackRealignment(MF) && !IsWin64Prologue)
2036 NumBytes = alignTo(FrameSize, MaxAlign);
2037 } else {
2038 NumBytes = StackSize - CSSize;
2039 }
2040 uint64_t SEHStackAllocAmt = NumBytes;
2041
2042 // AfterPop is the position to insert .cfi_restore.
2043 MachineBasicBlock::iterator AfterPop = MBBI;
2044 if (HasFP) {
2045 if (X86FI->hasSwiftAsyncContext()) {
2046 // Discard the context.
2047 int Offset = 16 + mergeSPUpdates(MBB, MBBI, true);
2048 emitSPUpdate(MBB, MBBI, DL, Offset, /*InEpilogue*/true);
2049 }
2050 // Pop EBP.
2051 BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::POP64r : X86::POP32r),
2052 MachineFramePtr)
2053 .setMIFlag(MachineInstr::FrameDestroy);
2054
2055 // We need to reset FP to its untagged state on return. Bit 60 is currently
2056 // used to show the presence of an extended frame.
2057 if (X86FI->hasSwiftAsyncContext()) {
2058 BuildMI(MBB, MBBI, DL, TII.get(X86::BTR64ri8),
2059 MachineFramePtr)
2060 .addUse(MachineFramePtr)
2061 .addImm(60)
2062 .setMIFlag(MachineInstr::FrameDestroy);
2063 }
2064
2065 if (NeedsDwarfCFI) {
2066 unsigned DwarfStackPtr =
2067 TRI->getDwarfRegNum(Is64Bit ? X86::RSP : X86::ESP, true);
2068 BuildCFI(MBB, MBBI, DL,
2069 MCCFIInstruction::cfiDefCfa(nullptr, DwarfStackPtr, SlotSize));
2070 if (!MBB.succ_empty() && !MBB.isReturnBlock()) {
2071 unsigned DwarfFramePtr = TRI->getDwarfRegNum(MachineFramePtr, true);
2072 BuildCFI(MBB, AfterPop, DL,
2073 MCCFIInstruction::createRestore(nullptr, DwarfFramePtr));
2074 --MBBI;
2075 --AfterPop;
2076 }
2077 --MBBI;
2078 }
2079 }
2080
2081 MachineBasicBlock::iterator FirstCSPop = MBBI;
2082 // Skip the callee-saved pop instructions.
2083 while (MBBI != MBB.begin()) {
2084 MachineBasicBlock::iterator PI = std::prev(MBBI);
2085 unsigned Opc = PI->getOpcode();
2086
2087 if (Opc != X86::DBG_VALUE && !PI->isTerminator()) {
2088 if ((Opc != X86::POP32r || !PI->getFlag(MachineInstr::FrameDestroy)) &&
2089 (Opc != X86::POP64r || !PI->getFlag(MachineInstr::FrameDestroy)) &&
2090 (Opc != X86::BTR64ri8 || !PI->getFlag(MachineInstr::FrameDestroy)) &&
2091 (Opc != X86::ADD64ri8 || !PI->getFlag(MachineInstr::FrameDestroy)))
2092 break;
2093 FirstCSPop = PI;
2094 }
2095
2096 --MBBI;
2097 }
2098 MBBI = FirstCSPop;
2099
2100 if (IsFunclet && Terminator->getOpcode() == X86::CATCHRET)
2101 emitCatchRetReturnValue(MBB, FirstCSPop, &*Terminator);
2102
2103 if (MBBI != MBB.end())
2104 DL = MBBI->getDebugLoc();
2105
2106 // If there is an ADD32ri or SUB32ri of ESP immediately before this
2107 // instruction, merge the two instructions.
2108 if (NumBytes || MFI.hasVarSizedObjects())
2109 NumBytes += mergeSPUpdates(MBB, MBBI, true);
2110
2111 // If dynamic alloca is used, then reset esp to point to the last callee-saved
2112 // slot before popping them off! Same applies for the case, when stack was
2113 // realigned. Don't do this if this was a funclet epilogue, since the funclets
2114 // will not do realignment or dynamic stack allocation.
2115 if (((TRI->hasStackRealignment(MF)) || MFI.hasVarSizedObjects()) &&
2116 !IsFunclet) {
2117 if (TRI->hasStackRealignment(MF))
2118 MBBI = FirstCSPop;
2119 unsigned SEHFrameOffset = calculateSetFPREG(SEHStackAllocAmt);
2120 uint64_t LEAAmount =
2121 IsWin64Prologue ? SEHStackAllocAmt - SEHFrameOffset : -CSSize;
2122
2123 if (X86FI->hasSwiftAsyncContext())
2124 LEAAmount -= 16;
2125
2126 // There are only two legal forms of epilogue:
2127 // - add SEHAllocationSize, %rsp
2128 // - lea SEHAllocationSize(%FramePtr), %rsp
2129 //
2130 // 'mov %FramePtr, %rsp' will not be recognized as an epilogue sequence.
2131 // However, we may use this sequence if we have a frame pointer because the
2132 // effects of the prologue can safely be undone.
2133 if (LEAAmount != 0) {
2134 unsigned Opc = getLEArOpcode(Uses64BitFramePtr);
2135 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr),
2136 FramePtr, false, LEAAmount);
2137 --MBBI;
2138 } else {
2139 unsigned Opc = (Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr);
2140 BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
2141 .addReg(FramePtr);
2142 --MBBI;
2143 }
2144 } else if (NumBytes) {
2145 // Adjust stack pointer back: ESP += numbytes.
2146 emitSPUpdate(MBB, MBBI, DL, NumBytes, /*InEpilogue=*/true);
2147 if (!hasFP(MF) && NeedsDwarfCFI) {
2148 // Define the current CFA rule to use the provided offset.
2149 BuildCFI(MBB, MBBI, DL,
2150 MCCFIInstruction::cfiDefCfaOffset(nullptr, CSSize + SlotSize));
2151 }
2152 --MBBI;
2153 }
2154
2155 // Windows unwinder will not invoke function's exception handler if IP is
2156 // either in prologue or in epilogue. This behavior causes a problem when a
2157 // call immediately precedes an epilogue, because the return address points
2158 // into the epilogue. To cope with that, we insert an epilogue marker here,
2159 // then replace it with a 'nop' if it ends up immediately after a CALL in the
2160 // final emitted code.
2161 if (NeedsWin64CFI && MF.hasWinCFI())
2162 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_Epilogue));
2163
2164 if (!hasFP(MF) && NeedsDwarfCFI) {
2165 MBBI = FirstCSPop;
2166 int64_t Offset = -CSSize - SlotSize;
2167 // Mark callee-saved pop instruction.
2168 // Define the current CFA rule to use the provided offset.
2169 while (MBBI != MBB.end()) {
2170 MachineBasicBlock::iterator PI = MBBI;
2171 unsigned Opc = PI->getOpcode();
2172 ++MBBI;
2173 if (Opc == X86::POP32r || Opc == X86::POP64r) {
2174 Offset += SlotSize;
2175 BuildCFI(MBB, MBBI, DL,
2176 MCCFIInstruction::cfiDefCfaOffset(nullptr, -Offset));
2177 }
2178 }
2179 }
2180
2181 // Emit DWARF info specifying the restores of the callee-saved registers.
2182 // For epilogue with return inside or being other block without successor,
2183 // no need to generate .cfi_restore for callee-saved registers.
2184 if (NeedsDwarfCFI && !MBB.succ_empty() && !MBB.isReturnBlock()) {
2185 emitCalleeSavedFrameMoves(MBB, AfterPop, DL, false);
2186 }
2187
2188 if (Terminator == MBB.end() || !isTailCallOpcode(Terminator->getOpcode())) {
2189 // Add the return addr area delta back since we are not tail calling.
2190 int Offset = -1 * X86FI->getTCReturnAddrDelta();
2191 assert(Offset >= 0 && "TCDelta should never be positive");
2192 if (Offset) {
2193 // Check for possible merge with preceding ADD instruction.
2194 Offset += mergeSPUpdates(MBB, Terminator, true);
2195 emitSPUpdate(MBB, Terminator, DL, Offset, /*InEpilogue=*/true);
2196 }
2197 }
2198
2199 // Emit tilerelease for AMX kernel.
2200 const MachineRegisterInfo &MRI = MF.getRegInfo();
2201 const TargetRegisterClass *RC = TRI->getRegClass(X86::TILERegClassID);
2202 for (unsigned I = 0; I < RC->getNumRegs(); I++)
2203 if (!MRI.reg_nodbg_empty(X86::TMM0 + I)) {
2204 BuildMI(MBB, Terminator, DL, TII.get(X86::TILERELEASE));
2205 break;
2206 }
2207 }
2208
getFrameIndexReference(const MachineFunction & MF,int FI,Register & FrameReg) const2209 StackOffset X86FrameLowering::getFrameIndexReference(const MachineFunction &MF,
2210 int FI,
2211 Register &FrameReg) const {
2212 const MachineFrameInfo &MFI = MF.getFrameInfo();
2213
2214 bool IsFixed = MFI.isFixedObjectIndex(FI);
2215 // We can't calculate offset from frame pointer if the stack is realigned,
2216 // so enforce usage of stack/base pointer. The base pointer is used when we
2217 // have dynamic allocas in addition to dynamic realignment.
2218 if (TRI->hasBasePointer(MF))
2219 FrameReg = IsFixed ? TRI->getFramePtr() : TRI->getBaseRegister();
2220 else if (TRI->hasStackRealignment(MF))
2221 FrameReg = IsFixed ? TRI->getFramePtr() : TRI->getStackRegister();
2222 else
2223 FrameReg = TRI->getFrameRegister(MF);
2224
2225 // Offset will hold the offset from the stack pointer at function entry to the
2226 // object.
2227 // We need to factor in additional offsets applied during the prologue to the
2228 // frame, base, and stack pointer depending on which is used.
2229 int Offset = MFI.getObjectOffset(FI) - getOffsetOfLocalArea();
2230 const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
2231 unsigned CSSize = X86FI->getCalleeSavedFrameSize();
2232 uint64_t StackSize = MFI.getStackSize();
2233 bool HasFP = hasFP(MF);
2234 bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
2235 int64_t FPDelta = 0;
2236
2237 // In an x86 interrupt, remove the offset we added to account for the return
2238 // address from any stack object allocated in the caller's frame. Interrupts
2239 // do not have a standard return address. Fixed objects in the current frame,
2240 // such as SSE register spills, should not get this treatment.
2241 if (MF.getFunction().getCallingConv() == CallingConv::X86_INTR &&
2242 Offset >= 0) {
2243 Offset += getOffsetOfLocalArea();
2244 }
2245
2246 if (IsWin64Prologue) {
2247 assert(!MFI.hasCalls() || (StackSize % 16) == 8);
2248
2249 // Calculate required stack adjustment.
2250 uint64_t FrameSize = StackSize - SlotSize;
2251 // If required, include space for extra hidden slot for stashing base pointer.
2252 if (X86FI->getRestoreBasePointer())
2253 FrameSize += SlotSize;
2254 uint64_t NumBytes = FrameSize - CSSize;
2255
2256 uint64_t SEHFrameOffset = calculateSetFPREG(NumBytes);
2257 if (FI && FI == X86FI->getFAIndex())
2258 return StackOffset::getFixed(-SEHFrameOffset);
2259
2260 // FPDelta is the offset from the "traditional" FP location of the old base
2261 // pointer followed by return address and the location required by the
2262 // restricted Win64 prologue.
2263 // Add FPDelta to all offsets below that go through the frame pointer.
2264 FPDelta = FrameSize - SEHFrameOffset;
2265 assert((!MFI.hasCalls() || (FPDelta % 16) == 0) &&
2266 "FPDelta isn't aligned per the Win64 ABI!");
2267 }
2268
2269
2270 if (TRI->hasBasePointer(MF)) {
2271 assert(HasFP && "VLAs and dynamic stack realign, but no FP?!");
2272 if (FI < 0) {
2273 // Skip the saved EBP.
2274 return StackOffset::getFixed(Offset + SlotSize + FPDelta);
2275 } else {
2276 assert(isAligned(MFI.getObjectAlign(FI), -(Offset + StackSize)));
2277 return StackOffset::getFixed(Offset + StackSize);
2278 }
2279 } else if (TRI->hasStackRealignment(MF)) {
2280 if (FI < 0) {
2281 // Skip the saved EBP.
2282 return StackOffset::getFixed(Offset + SlotSize + FPDelta);
2283 } else {
2284 assert(isAligned(MFI.getObjectAlign(FI), -(Offset + StackSize)));
2285 return StackOffset::getFixed(Offset + StackSize);
2286 }
2287 // FIXME: Support tail calls
2288 } else {
2289 if (!HasFP)
2290 return StackOffset::getFixed(Offset + StackSize);
2291
2292 // Skip the saved EBP.
2293 Offset += SlotSize;
2294
2295 // Skip the RETADDR move area
2296 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
2297 if (TailCallReturnAddrDelta < 0)
2298 Offset -= TailCallReturnAddrDelta;
2299 }
2300
2301 return StackOffset::getFixed(Offset + FPDelta);
2302 }
2303
getWin64EHFrameIndexRef(const MachineFunction & MF,int FI,Register & FrameReg) const2304 int X86FrameLowering::getWin64EHFrameIndexRef(const MachineFunction &MF, int FI,
2305 Register &FrameReg) const {
2306 const MachineFrameInfo &MFI = MF.getFrameInfo();
2307 const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
2308 const auto& WinEHXMMSlotInfo = X86FI->getWinEHXMMSlotInfo();
2309 const auto it = WinEHXMMSlotInfo.find(FI);
2310
2311 if (it == WinEHXMMSlotInfo.end())
2312 return getFrameIndexReference(MF, FI, FrameReg).getFixed();
2313
2314 FrameReg = TRI->getStackRegister();
2315 return alignDown(MFI.getMaxCallFrameSize(), getStackAlign().value()) +
2316 it->second;
2317 }
2318
2319 StackOffset
getFrameIndexReferenceSP(const MachineFunction & MF,int FI,Register & FrameReg,int Adjustment) const2320 X86FrameLowering::getFrameIndexReferenceSP(const MachineFunction &MF, int FI,
2321 Register &FrameReg,
2322 int Adjustment) const {
2323 const MachineFrameInfo &MFI = MF.getFrameInfo();
2324 FrameReg = TRI->getStackRegister();
2325 return StackOffset::getFixed(MFI.getObjectOffset(FI) -
2326 getOffsetOfLocalArea() + Adjustment);
2327 }
2328
2329 StackOffset
getFrameIndexReferencePreferSP(const MachineFunction & MF,int FI,Register & FrameReg,bool IgnoreSPUpdates) const2330 X86FrameLowering::getFrameIndexReferencePreferSP(const MachineFunction &MF,
2331 int FI, Register &FrameReg,
2332 bool IgnoreSPUpdates) const {
2333
2334 const MachineFrameInfo &MFI = MF.getFrameInfo();
2335 // Does not include any dynamic realign.
2336 const uint64_t StackSize = MFI.getStackSize();
2337 // LLVM arranges the stack as follows:
2338 // ...
2339 // ARG2
2340 // ARG1
2341 // RETADDR
2342 // PUSH RBP <-- RBP points here
2343 // PUSH CSRs
2344 // ~~~~~~~ <-- possible stack realignment (non-win64)
2345 // ...
2346 // STACK OBJECTS
2347 // ... <-- RSP after prologue points here
2348 // ~~~~~~~ <-- possible stack realignment (win64)
2349 //
2350 // if (hasVarSizedObjects()):
2351 // ... <-- "base pointer" (ESI/RBX) points here
2352 // DYNAMIC ALLOCAS
2353 // ... <-- RSP points here
2354 //
2355 // Case 1: In the simple case of no stack realignment and no dynamic
2356 // allocas, both "fixed" stack objects (arguments and CSRs) are addressable
2357 // with fixed offsets from RSP.
2358 //
2359 // Case 2: In the case of stack realignment with no dynamic allocas, fixed
2360 // stack objects are addressed with RBP and regular stack objects with RSP.
2361 //
2362 // Case 3: In the case of dynamic allocas and stack realignment, RSP is used
2363 // to address stack arguments for outgoing calls and nothing else. The "base
2364 // pointer" points to local variables, and RBP points to fixed objects.
2365 //
2366 // In cases 2 and 3, we can only answer for non-fixed stack objects, and the
2367 // answer we give is relative to the SP after the prologue, and not the
2368 // SP in the middle of the function.
2369
2370 if (MFI.isFixedObjectIndex(FI) && TRI->hasStackRealignment(MF) &&
2371 !STI.isTargetWin64())
2372 return getFrameIndexReference(MF, FI, FrameReg);
2373
2374 // If !hasReservedCallFrame the function might have SP adjustement in the
2375 // body. So, even though the offset is statically known, it depends on where
2376 // we are in the function.
2377 if (!IgnoreSPUpdates && !hasReservedCallFrame(MF))
2378 return getFrameIndexReference(MF, FI, FrameReg);
2379
2380 // We don't handle tail calls, and shouldn't be seeing them either.
2381 assert(MF.getInfo<X86MachineFunctionInfo>()->getTCReturnAddrDelta() >= 0 &&
2382 "we don't handle this case!");
2383
2384 // This is how the math works out:
2385 //
2386 // %rsp grows (i.e. gets lower) left to right. Each box below is
2387 // one word (eight bytes). Obj0 is the stack slot we're trying to
2388 // get to.
2389 //
2390 // ----------------------------------
2391 // | BP | Obj0 | Obj1 | ... | ObjN |
2392 // ----------------------------------
2393 // ^ ^ ^ ^
2394 // A B C E
2395 //
2396 // A is the incoming stack pointer.
2397 // (B - A) is the local area offset (-8 for x86-64) [1]
2398 // (C - A) is the Offset returned by MFI.getObjectOffset for Obj0 [2]
2399 //
2400 // |(E - B)| is the StackSize (absolute value, positive). For a
2401 // stack that grown down, this works out to be (B - E). [3]
2402 //
2403 // E is also the value of %rsp after stack has been set up, and we
2404 // want (C - E) -- the value we can add to %rsp to get to Obj0. Now
2405 // (C - E) == (C - A) - (B - A) + (B - E)
2406 // { Using [1], [2] and [3] above }
2407 // == getObjectOffset - LocalAreaOffset + StackSize
2408
2409 return getFrameIndexReferenceSP(MF, FI, FrameReg, StackSize);
2410 }
2411
assignCalleeSavedSpillSlots(MachineFunction & MF,const TargetRegisterInfo * TRI,std::vector<CalleeSavedInfo> & CSI) const2412 bool X86FrameLowering::assignCalleeSavedSpillSlots(
2413 MachineFunction &MF, const TargetRegisterInfo *TRI,
2414 std::vector<CalleeSavedInfo> &CSI) const {
2415 MachineFrameInfo &MFI = MF.getFrameInfo();
2416 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
2417
2418 unsigned CalleeSavedFrameSize = 0;
2419 unsigned XMMCalleeSavedFrameSize = 0;
2420 auto &WinEHXMMSlotInfo = X86FI->getWinEHXMMSlotInfo();
2421 int SpillSlotOffset = getOffsetOfLocalArea() + X86FI->getTCReturnAddrDelta();
2422
2423 int64_t TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
2424
2425 if (TailCallReturnAddrDelta < 0) {
2426 // create RETURNADDR area
2427 // arg
2428 // arg
2429 // RETADDR
2430 // { ...
2431 // RETADDR area
2432 // ...
2433 // }
2434 // [EBP]
2435 MFI.CreateFixedObject(-TailCallReturnAddrDelta,
2436 TailCallReturnAddrDelta - SlotSize, true);
2437 }
2438
2439 // Spill the BasePtr if it's used.
2440 if (this->TRI->hasBasePointer(MF)) {
2441 // Allocate a spill slot for EBP if we have a base pointer and EH funclets.
2442 if (MF.hasEHFunclets()) {
2443 int FI = MFI.CreateSpillStackObject(SlotSize, Align(SlotSize));
2444 X86FI->setHasSEHFramePtrSave(true);
2445 X86FI->setSEHFramePtrSaveIndex(FI);
2446 }
2447 }
2448
2449 if (hasFP(MF)) {
2450 // emitPrologue always spills frame register the first thing.
2451 SpillSlotOffset -= SlotSize;
2452 MFI.CreateFixedSpillStackObject(SlotSize, SpillSlotOffset);
2453
2454 // The async context lives directly before the frame pointer, and we
2455 // allocate a second slot to preserve stack alignment.
2456 if (X86FI->hasSwiftAsyncContext()) {
2457 SpillSlotOffset -= SlotSize;
2458 MFI.CreateFixedSpillStackObject(SlotSize, SpillSlotOffset);
2459 SpillSlotOffset -= SlotSize;
2460 }
2461
2462 // Since emitPrologue and emitEpilogue will handle spilling and restoring of
2463 // the frame register, we can delete it from CSI list and not have to worry
2464 // about avoiding it later.
2465 Register FPReg = TRI->getFrameRegister(MF);
2466 for (unsigned i = 0; i < CSI.size(); ++i) {
2467 if (TRI->regsOverlap(CSI[i].getReg(),FPReg)) {
2468 CSI.erase(CSI.begin() + i);
2469 break;
2470 }
2471 }
2472 }
2473
2474 // Assign slots for GPRs. It increases frame size.
2475 for (unsigned i = CSI.size(); i != 0; --i) {
2476 unsigned Reg = CSI[i - 1].getReg();
2477
2478 if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg))
2479 continue;
2480
2481 SpillSlotOffset -= SlotSize;
2482 CalleeSavedFrameSize += SlotSize;
2483
2484 int SlotIndex = MFI.CreateFixedSpillStackObject(SlotSize, SpillSlotOffset);
2485 CSI[i - 1].setFrameIdx(SlotIndex);
2486 }
2487
2488 X86FI->setCalleeSavedFrameSize(CalleeSavedFrameSize);
2489 MFI.setCVBytesOfCalleeSavedRegisters(CalleeSavedFrameSize);
2490
2491 // Assign slots for XMMs.
2492 for (unsigned i = CSI.size(); i != 0; --i) {
2493 unsigned Reg = CSI[i - 1].getReg();
2494 if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg))
2495 continue;
2496
2497 // If this is k-register make sure we lookup via the largest legal type.
2498 MVT VT = MVT::Other;
2499 if (X86::VK16RegClass.contains(Reg))
2500 VT = STI.hasBWI() ? MVT::v64i1 : MVT::v16i1;
2501
2502 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT);
2503 unsigned Size = TRI->getSpillSize(*RC);
2504 Align Alignment = TRI->getSpillAlign(*RC);
2505 // ensure alignment
2506 assert(SpillSlotOffset < 0 && "SpillSlotOffset should always < 0 on X86");
2507 SpillSlotOffset = -alignTo(-SpillSlotOffset, Alignment);
2508
2509 // spill into slot
2510 SpillSlotOffset -= Size;
2511 int SlotIndex = MFI.CreateFixedSpillStackObject(Size, SpillSlotOffset);
2512 CSI[i - 1].setFrameIdx(SlotIndex);
2513 MFI.ensureMaxAlignment(Alignment);
2514
2515 // Save the start offset and size of XMM in stack frame for funclets.
2516 if (X86::VR128RegClass.contains(Reg)) {
2517 WinEHXMMSlotInfo[SlotIndex] = XMMCalleeSavedFrameSize;
2518 XMMCalleeSavedFrameSize += Size;
2519 }
2520 }
2521
2522 return true;
2523 }
2524
spillCalleeSavedRegisters(MachineBasicBlock & MBB,MachineBasicBlock::iterator MI,ArrayRef<CalleeSavedInfo> CSI,const TargetRegisterInfo * TRI) const2525 bool X86FrameLowering::spillCalleeSavedRegisters(
2526 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
2527 ArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const {
2528 DebugLoc DL = MBB.findDebugLoc(MI);
2529
2530 // Don't save CSRs in 32-bit EH funclets. The caller saves EBX, EBP, ESI, EDI
2531 // for us, and there are no XMM CSRs on Win32.
2532 if (MBB.isEHFuncletEntry() && STI.is32Bit() && STI.isOSWindows())
2533 return true;
2534
2535 // Push GPRs. It increases frame size.
2536 const MachineFunction &MF = *MBB.getParent();
2537 unsigned Opc = STI.is64Bit() ? X86::PUSH64r : X86::PUSH32r;
2538 for (unsigned i = CSI.size(); i != 0; --i) {
2539 unsigned Reg = CSI[i - 1].getReg();
2540
2541 if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg))
2542 continue;
2543
2544 const MachineRegisterInfo &MRI = MF.getRegInfo();
2545 bool isLiveIn = MRI.isLiveIn(Reg);
2546 if (!isLiveIn)
2547 MBB.addLiveIn(Reg);
2548
2549 // Decide whether we can add a kill flag to the use.
2550 bool CanKill = !isLiveIn;
2551 // Check if any subregister is live-in
2552 if (CanKill) {
2553 for (MCRegAliasIterator AReg(Reg, TRI, false); AReg.isValid(); ++AReg) {
2554 if (MRI.isLiveIn(*AReg)) {
2555 CanKill = false;
2556 break;
2557 }
2558 }
2559 }
2560
2561 // Do not set a kill flag on values that are also marked as live-in. This
2562 // happens with the @llvm-returnaddress intrinsic and with arguments
2563 // passed in callee saved registers.
2564 // Omitting the kill flags is conservatively correct even if the live-in
2565 // is not used after all.
2566 BuildMI(MBB, MI, DL, TII.get(Opc)).addReg(Reg, getKillRegState(CanKill))
2567 .setMIFlag(MachineInstr::FrameSetup);
2568 }
2569
2570 // Make XMM regs spilled. X86 does not have ability of push/pop XMM.
2571 // It can be done by spilling XMMs to stack frame.
2572 for (unsigned i = CSI.size(); i != 0; --i) {
2573 unsigned Reg = CSI[i-1].getReg();
2574 if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg))
2575 continue;
2576
2577 // If this is k-register make sure we lookup via the largest legal type.
2578 MVT VT = MVT::Other;
2579 if (X86::VK16RegClass.contains(Reg))
2580 VT = STI.hasBWI() ? MVT::v64i1 : MVT::v16i1;
2581
2582 // Add the callee-saved register as live-in. It's killed at the spill.
2583 MBB.addLiveIn(Reg);
2584 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT);
2585
2586 TII.storeRegToStackSlot(MBB, MI, Reg, true, CSI[i - 1].getFrameIdx(), RC,
2587 TRI);
2588 --MI;
2589 MI->setFlag(MachineInstr::FrameSetup);
2590 ++MI;
2591 }
2592
2593 return true;
2594 }
2595
emitCatchRetReturnValue(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,MachineInstr * CatchRet) const2596 void X86FrameLowering::emitCatchRetReturnValue(MachineBasicBlock &MBB,
2597 MachineBasicBlock::iterator MBBI,
2598 MachineInstr *CatchRet) const {
2599 // SEH shouldn't use catchret.
2600 assert(!isAsynchronousEHPersonality(classifyEHPersonality(
2601 MBB.getParent()->getFunction().getPersonalityFn())) &&
2602 "SEH should not use CATCHRET");
2603 const DebugLoc &DL = CatchRet->getDebugLoc();
2604 MachineBasicBlock *CatchRetTarget = CatchRet->getOperand(0).getMBB();
2605
2606 // Fill EAX/RAX with the address of the target block.
2607 if (STI.is64Bit()) {
2608 // LEA64r CatchRetTarget(%rip), %rax
2609 BuildMI(MBB, MBBI, DL, TII.get(X86::LEA64r), X86::RAX)
2610 .addReg(X86::RIP)
2611 .addImm(0)
2612 .addReg(0)
2613 .addMBB(CatchRetTarget)
2614 .addReg(0);
2615 } else {
2616 // MOV32ri $CatchRetTarget, %eax
2617 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX)
2618 .addMBB(CatchRetTarget);
2619 }
2620
2621 // Record that we've taken the address of CatchRetTarget and no longer just
2622 // reference it in a terminator.
2623 CatchRetTarget->setHasAddressTaken();
2624 }
2625
restoreCalleeSavedRegisters(MachineBasicBlock & MBB,MachineBasicBlock::iterator MI,MutableArrayRef<CalleeSavedInfo> CSI,const TargetRegisterInfo * TRI) const2626 bool X86FrameLowering::restoreCalleeSavedRegisters(
2627 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
2628 MutableArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const {
2629 if (CSI.empty())
2630 return false;
2631
2632 if (MI != MBB.end() && isFuncletReturnInstr(*MI) && STI.isOSWindows()) {
2633 // Don't restore CSRs in 32-bit EH funclets. Matches
2634 // spillCalleeSavedRegisters.
2635 if (STI.is32Bit())
2636 return true;
2637 // Don't restore CSRs before an SEH catchret. SEH except blocks do not form
2638 // funclets. emitEpilogue transforms these to normal jumps.
2639 if (MI->getOpcode() == X86::CATCHRET) {
2640 const Function &F = MBB.getParent()->getFunction();
2641 bool IsSEH = isAsynchronousEHPersonality(
2642 classifyEHPersonality(F.getPersonalityFn()));
2643 if (IsSEH)
2644 return true;
2645 }
2646 }
2647
2648 DebugLoc DL = MBB.findDebugLoc(MI);
2649
2650 // Reload XMMs from stack frame.
2651 for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
2652 unsigned Reg = CSI[i].getReg();
2653 if (X86::GR64RegClass.contains(Reg) ||
2654 X86::GR32RegClass.contains(Reg))
2655 continue;
2656
2657 // If this is k-register make sure we lookup via the largest legal type.
2658 MVT VT = MVT::Other;
2659 if (X86::VK16RegClass.contains(Reg))
2660 VT = STI.hasBWI() ? MVT::v64i1 : MVT::v16i1;
2661
2662 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT);
2663 TII.loadRegFromStackSlot(MBB, MI, Reg, CSI[i].getFrameIdx(), RC, TRI);
2664 }
2665
2666 // POP GPRs.
2667 unsigned Opc = STI.is64Bit() ? X86::POP64r : X86::POP32r;
2668 for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
2669 unsigned Reg = CSI[i].getReg();
2670 if (!X86::GR64RegClass.contains(Reg) &&
2671 !X86::GR32RegClass.contains(Reg))
2672 continue;
2673
2674 BuildMI(MBB, MI, DL, TII.get(Opc), Reg)
2675 .setMIFlag(MachineInstr::FrameDestroy);
2676 }
2677 return true;
2678 }
2679
determineCalleeSaves(MachineFunction & MF,BitVector & SavedRegs,RegScavenger * RS) const2680 void X86FrameLowering::determineCalleeSaves(MachineFunction &MF,
2681 BitVector &SavedRegs,
2682 RegScavenger *RS) const {
2683 TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS);
2684
2685 // Spill the BasePtr if it's used.
2686 if (TRI->hasBasePointer(MF)){
2687 Register BasePtr = TRI->getBaseRegister();
2688 if (STI.isTarget64BitILP32())
2689 BasePtr = getX86SubSuperRegister(BasePtr, 64);
2690 SavedRegs.set(BasePtr);
2691 }
2692 }
2693
2694 static bool
HasNestArgument(const MachineFunction * MF)2695 HasNestArgument(const MachineFunction *MF) {
2696 const Function &F = MF->getFunction();
2697 for (Function::const_arg_iterator I = F.arg_begin(), E = F.arg_end();
2698 I != E; I++) {
2699 if (I->hasNestAttr() && !I->use_empty())
2700 return true;
2701 }
2702 return false;
2703 }
2704
2705 /// GetScratchRegister - Get a temp register for performing work in the
2706 /// segmented stack and the Erlang/HiPE stack prologue. Depending on platform
2707 /// and the properties of the function either one or two registers will be
2708 /// needed. Set primary to true for the first register, false for the second.
2709 static unsigned
GetScratchRegister(bool Is64Bit,bool IsLP64,const MachineFunction & MF,bool Primary)2710 GetScratchRegister(bool Is64Bit, bool IsLP64, const MachineFunction &MF, bool Primary) {
2711 CallingConv::ID CallingConvention = MF.getFunction().getCallingConv();
2712
2713 // Erlang stuff.
2714 if (CallingConvention == CallingConv::HiPE) {
2715 if (Is64Bit)
2716 return Primary ? X86::R14 : X86::R13;
2717 else
2718 return Primary ? X86::EBX : X86::EDI;
2719 }
2720
2721 if (Is64Bit) {
2722 if (IsLP64)
2723 return Primary ? X86::R11 : X86::R12;
2724 else
2725 return Primary ? X86::R11D : X86::R12D;
2726 }
2727
2728 bool IsNested = HasNestArgument(&MF);
2729
2730 if (CallingConvention == CallingConv::X86_FastCall ||
2731 CallingConvention == CallingConv::Fast ||
2732 CallingConvention == CallingConv::Tail) {
2733 if (IsNested)
2734 report_fatal_error("Segmented stacks does not support fastcall with "
2735 "nested function.");
2736 return Primary ? X86::EAX : X86::ECX;
2737 }
2738 if (IsNested)
2739 return Primary ? X86::EDX : X86::EAX;
2740 return Primary ? X86::ECX : X86::EAX;
2741 }
2742
2743 // The stack limit in the TCB is set to this many bytes above the actual stack
2744 // limit.
2745 static const uint64_t kSplitStackAvailable = 256;
2746
adjustForSegmentedStacks(MachineFunction & MF,MachineBasicBlock & PrologueMBB) const2747 void X86FrameLowering::adjustForSegmentedStacks(
2748 MachineFunction &MF, MachineBasicBlock &PrologueMBB) const {
2749 MachineFrameInfo &MFI = MF.getFrameInfo();
2750 uint64_t StackSize;
2751 unsigned TlsReg, TlsOffset;
2752 DebugLoc DL;
2753
2754 // To support shrink-wrapping we would need to insert the new blocks
2755 // at the right place and update the branches to PrologueMBB.
2756 assert(&(*MF.begin()) == &PrologueMBB && "Shrink-wrapping not supported yet");
2757
2758 unsigned ScratchReg = GetScratchRegister(Is64Bit, IsLP64, MF, true);
2759 assert(!MF.getRegInfo().isLiveIn(ScratchReg) &&
2760 "Scratch register is live-in");
2761
2762 if (MF.getFunction().isVarArg())
2763 report_fatal_error("Segmented stacks do not support vararg functions.");
2764 if (!STI.isTargetLinux() && !STI.isTargetDarwin() && !STI.isTargetWin32() &&
2765 !STI.isTargetWin64() && !STI.isTargetFreeBSD() &&
2766 !STI.isTargetDragonFly())
2767 report_fatal_error("Segmented stacks not supported on this platform.");
2768
2769 // Eventually StackSize will be calculated by a link-time pass; which will
2770 // also decide whether checking code needs to be injected into this particular
2771 // prologue.
2772 StackSize = MFI.getStackSize();
2773
2774 // Do not generate a prologue for leaf functions with a stack of size zero.
2775 // For non-leaf functions we have to allow for the possibility that the
2776 // callis to a non-split function, as in PR37807. This function could also
2777 // take the address of a non-split function. When the linker tries to adjust
2778 // its non-existent prologue, it would fail with an error. Mark the object
2779 // file so that such failures are not errors. See this Go language bug-report
2780 // https://go-review.googlesource.com/c/go/+/148819/
2781 if (StackSize == 0 && !MFI.hasTailCall()) {
2782 MF.getMMI().setHasNosplitStack(true);
2783 return;
2784 }
2785
2786 MachineBasicBlock *allocMBB = MF.CreateMachineBasicBlock();
2787 MachineBasicBlock *checkMBB = MF.CreateMachineBasicBlock();
2788 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
2789 bool IsNested = false;
2790
2791 // We need to know if the function has a nest argument only in 64 bit mode.
2792 if (Is64Bit)
2793 IsNested = HasNestArgument(&MF);
2794
2795 // The MOV R10, RAX needs to be in a different block, since the RET we emit in
2796 // allocMBB needs to be last (terminating) instruction.
2797
2798 for (const auto &LI : PrologueMBB.liveins()) {
2799 allocMBB->addLiveIn(LI);
2800 checkMBB->addLiveIn(LI);
2801 }
2802
2803 if (IsNested)
2804 allocMBB->addLiveIn(IsLP64 ? X86::R10 : X86::R10D);
2805
2806 MF.push_front(allocMBB);
2807 MF.push_front(checkMBB);
2808
2809 // When the frame size is less than 256 we just compare the stack
2810 // boundary directly to the value of the stack pointer, per gcc.
2811 bool CompareStackPointer = StackSize < kSplitStackAvailable;
2812
2813 // Read the limit off the current stacklet off the stack_guard location.
2814 if (Is64Bit) {
2815 if (STI.isTargetLinux()) {
2816 TlsReg = X86::FS;
2817 TlsOffset = IsLP64 ? 0x70 : 0x40;
2818 } else if (STI.isTargetDarwin()) {
2819 TlsReg = X86::GS;
2820 TlsOffset = 0x60 + 90*8; // See pthread_machdep.h. Steal TLS slot 90.
2821 } else if (STI.isTargetWin64()) {
2822 TlsReg = X86::GS;
2823 TlsOffset = 0x28; // pvArbitrary, reserved for application use
2824 } else if (STI.isTargetFreeBSD()) {
2825 TlsReg = X86::FS;
2826 TlsOffset = 0x18;
2827 } else if (STI.isTargetDragonFly()) {
2828 TlsReg = X86::FS;
2829 TlsOffset = 0x20; // use tls_tcb.tcb_segstack
2830 } else {
2831 report_fatal_error("Segmented stacks not supported on this platform.");
2832 }
2833
2834 if (CompareStackPointer)
2835 ScratchReg = IsLP64 ? X86::RSP : X86::ESP;
2836 else
2837 BuildMI(checkMBB, DL, TII.get(IsLP64 ? X86::LEA64r : X86::LEA64_32r), ScratchReg).addReg(X86::RSP)
2838 .addImm(1).addReg(0).addImm(-StackSize).addReg(0);
2839
2840 BuildMI(checkMBB, DL, TII.get(IsLP64 ? X86::CMP64rm : X86::CMP32rm)).addReg(ScratchReg)
2841 .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg);
2842 } else {
2843 if (STI.isTargetLinux()) {
2844 TlsReg = X86::GS;
2845 TlsOffset = 0x30;
2846 } else if (STI.isTargetDarwin()) {
2847 TlsReg = X86::GS;
2848 TlsOffset = 0x48 + 90*4;
2849 } else if (STI.isTargetWin32()) {
2850 TlsReg = X86::FS;
2851 TlsOffset = 0x14; // pvArbitrary, reserved for application use
2852 } else if (STI.isTargetDragonFly()) {
2853 TlsReg = X86::FS;
2854 TlsOffset = 0x10; // use tls_tcb.tcb_segstack
2855 } else if (STI.isTargetFreeBSD()) {
2856 report_fatal_error("Segmented stacks not supported on FreeBSD i386.");
2857 } else {
2858 report_fatal_error("Segmented stacks not supported on this platform.");
2859 }
2860
2861 if (CompareStackPointer)
2862 ScratchReg = X86::ESP;
2863 else
2864 BuildMI(checkMBB, DL, TII.get(X86::LEA32r), ScratchReg).addReg(X86::ESP)
2865 .addImm(1).addReg(0).addImm(-StackSize).addReg(0);
2866
2867 if (STI.isTargetLinux() || STI.isTargetWin32() || STI.isTargetWin64() ||
2868 STI.isTargetDragonFly()) {
2869 BuildMI(checkMBB, DL, TII.get(X86::CMP32rm)).addReg(ScratchReg)
2870 .addReg(0).addImm(0).addReg(0).addImm(TlsOffset).addReg(TlsReg);
2871 } else if (STI.isTargetDarwin()) {
2872
2873 // TlsOffset doesn't fit into a mod r/m byte so we need an extra register.
2874 unsigned ScratchReg2;
2875 bool SaveScratch2;
2876 if (CompareStackPointer) {
2877 // The primary scratch register is available for holding the TLS offset.
2878 ScratchReg2 = GetScratchRegister(Is64Bit, IsLP64, MF, true);
2879 SaveScratch2 = false;
2880 } else {
2881 // Need to use a second register to hold the TLS offset
2882 ScratchReg2 = GetScratchRegister(Is64Bit, IsLP64, MF, false);
2883
2884 // Unfortunately, with fastcc the second scratch register may hold an
2885 // argument.
2886 SaveScratch2 = MF.getRegInfo().isLiveIn(ScratchReg2);
2887 }
2888
2889 // If Scratch2 is live-in then it needs to be saved.
2890 assert((!MF.getRegInfo().isLiveIn(ScratchReg2) || SaveScratch2) &&
2891 "Scratch register is live-in and not saved");
2892
2893 if (SaveScratch2)
2894 BuildMI(checkMBB, DL, TII.get(X86::PUSH32r))
2895 .addReg(ScratchReg2, RegState::Kill);
2896
2897 BuildMI(checkMBB, DL, TII.get(X86::MOV32ri), ScratchReg2)
2898 .addImm(TlsOffset);
2899 BuildMI(checkMBB, DL, TII.get(X86::CMP32rm))
2900 .addReg(ScratchReg)
2901 .addReg(ScratchReg2).addImm(1).addReg(0)
2902 .addImm(0)
2903 .addReg(TlsReg);
2904
2905 if (SaveScratch2)
2906 BuildMI(checkMBB, DL, TII.get(X86::POP32r), ScratchReg2);
2907 }
2908 }
2909
2910 // This jump is taken if SP >= (Stacklet Limit + Stack Space required).
2911 // It jumps to normal execution of the function body.
2912 BuildMI(checkMBB, DL, TII.get(X86::JCC_1)).addMBB(&PrologueMBB).addImm(X86::COND_A);
2913
2914 // On 32 bit we first push the arguments size and then the frame size. On 64
2915 // bit, we pass the stack frame size in r10 and the argument size in r11.
2916 if (Is64Bit) {
2917 // Functions with nested arguments use R10, so it needs to be saved across
2918 // the call to _morestack
2919
2920 const unsigned RegAX = IsLP64 ? X86::RAX : X86::EAX;
2921 const unsigned Reg10 = IsLP64 ? X86::R10 : X86::R10D;
2922 const unsigned Reg11 = IsLP64 ? X86::R11 : X86::R11D;
2923 const unsigned MOVrr = IsLP64 ? X86::MOV64rr : X86::MOV32rr;
2924 const unsigned MOVri = IsLP64 ? X86::MOV64ri : X86::MOV32ri;
2925
2926 if (IsNested)
2927 BuildMI(allocMBB, DL, TII.get(MOVrr), RegAX).addReg(Reg10);
2928
2929 BuildMI(allocMBB, DL, TII.get(MOVri), Reg10)
2930 .addImm(StackSize);
2931 BuildMI(allocMBB, DL, TII.get(MOVri), Reg11)
2932 .addImm(X86FI->getArgumentStackSize());
2933 } else {
2934 BuildMI(allocMBB, DL, TII.get(X86::PUSHi32))
2935 .addImm(X86FI->getArgumentStackSize());
2936 BuildMI(allocMBB, DL, TII.get(X86::PUSHi32))
2937 .addImm(StackSize);
2938 }
2939
2940 // __morestack is in libgcc
2941 if (Is64Bit && MF.getTarget().getCodeModel() == CodeModel::Large) {
2942 // Under the large code model, we cannot assume that __morestack lives
2943 // within 2^31 bytes of the call site, so we cannot use pc-relative
2944 // addressing. We cannot perform the call via a temporary register,
2945 // as the rax register may be used to store the static chain, and all
2946 // other suitable registers may be either callee-save or used for
2947 // parameter passing. We cannot use the stack at this point either
2948 // because __morestack manipulates the stack directly.
2949 //
2950 // To avoid these issues, perform an indirect call via a read-only memory
2951 // location containing the address.
2952 //
2953 // This solution is not perfect, as it assumes that the .rodata section
2954 // is laid out within 2^31 bytes of each function body, but this seems
2955 // to be sufficient for JIT.
2956 // FIXME: Add retpoline support and remove the error here..
2957 if (STI.useIndirectThunkCalls())
2958 report_fatal_error("Emitting morestack calls on 64-bit with the large "
2959 "code model and thunks not yet implemented.");
2960 BuildMI(allocMBB, DL, TII.get(X86::CALL64m))
2961 .addReg(X86::RIP)
2962 .addImm(0)
2963 .addReg(0)
2964 .addExternalSymbol("__morestack_addr")
2965 .addReg(0);
2966 MF.getMMI().setUsesMorestackAddr(true);
2967 } else {
2968 if (Is64Bit)
2969 BuildMI(allocMBB, DL, TII.get(X86::CALL64pcrel32))
2970 .addExternalSymbol("__morestack");
2971 else
2972 BuildMI(allocMBB, DL, TII.get(X86::CALLpcrel32))
2973 .addExternalSymbol("__morestack");
2974 }
2975
2976 if (IsNested)
2977 BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET_RESTORE_R10));
2978 else
2979 BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET));
2980
2981 allocMBB->addSuccessor(&PrologueMBB);
2982
2983 checkMBB->addSuccessor(allocMBB, BranchProbability::getZero());
2984 checkMBB->addSuccessor(&PrologueMBB, BranchProbability::getOne());
2985
2986 #ifdef EXPENSIVE_CHECKS
2987 MF.verify();
2988 #endif
2989 }
2990
2991 /// Lookup an ERTS parameter in the !hipe.literals named metadata node.
2992 /// HiPE provides Erlang Runtime System-internal parameters, such as PCB offsets
2993 /// to fields it needs, through a named metadata node "hipe.literals" containing
2994 /// name-value pairs.
getHiPELiteral(NamedMDNode * HiPELiteralsMD,const StringRef LiteralName)2995 static unsigned getHiPELiteral(
2996 NamedMDNode *HiPELiteralsMD, const StringRef LiteralName) {
2997 for (int i = 0, e = HiPELiteralsMD->getNumOperands(); i != e; ++i) {
2998 MDNode *Node = HiPELiteralsMD->getOperand(i);
2999 if (Node->getNumOperands() != 2) continue;
3000 MDString *NodeName = dyn_cast<MDString>(Node->getOperand(0));
3001 ValueAsMetadata *NodeVal = dyn_cast<ValueAsMetadata>(Node->getOperand(1));
3002 if (!NodeName || !NodeVal) continue;
3003 ConstantInt *ValConst = dyn_cast_or_null<ConstantInt>(NodeVal->getValue());
3004 if (ValConst && NodeName->getString() == LiteralName) {
3005 return ValConst->getZExtValue();
3006 }
3007 }
3008
3009 report_fatal_error("HiPE literal " + LiteralName
3010 + " required but not provided");
3011 }
3012
3013 // Return true if there are no non-ehpad successors to MBB and there are no
3014 // non-meta instructions between MBBI and MBB.end().
blockEndIsUnreachable(const MachineBasicBlock & MBB,MachineBasicBlock::const_iterator MBBI)3015 static bool blockEndIsUnreachable(const MachineBasicBlock &MBB,
3016 MachineBasicBlock::const_iterator MBBI) {
3017 return llvm::all_of(
3018 MBB.successors(),
3019 [](const MachineBasicBlock *Succ) { return Succ->isEHPad(); }) &&
3020 std::all_of(MBBI, MBB.end(), [](const MachineInstr &MI) {
3021 return MI.isMetaInstruction();
3022 });
3023 }
3024
3025 /// Erlang programs may need a special prologue to handle the stack size they
3026 /// might need at runtime. That is because Erlang/OTP does not implement a C
3027 /// stack but uses a custom implementation of hybrid stack/heap architecture.
3028 /// (for more information see Eric Stenman's Ph.D. thesis:
3029 /// http://publications.uu.se/uu/fulltext/nbn_se_uu_diva-2688.pdf)
3030 ///
3031 /// CheckStack:
3032 /// temp0 = sp - MaxStack
3033 /// if( temp0 < SP_LIMIT(P) ) goto IncStack else goto OldStart
3034 /// OldStart:
3035 /// ...
3036 /// IncStack:
3037 /// call inc_stack # doubles the stack space
3038 /// temp0 = sp - MaxStack
3039 /// if( temp0 < SP_LIMIT(P) ) goto IncStack else goto OldStart
adjustForHiPEPrologue(MachineFunction & MF,MachineBasicBlock & PrologueMBB) const3040 void X86FrameLowering::adjustForHiPEPrologue(
3041 MachineFunction &MF, MachineBasicBlock &PrologueMBB) const {
3042 MachineFrameInfo &MFI = MF.getFrameInfo();
3043 DebugLoc DL;
3044
3045 // To support shrink-wrapping we would need to insert the new blocks
3046 // at the right place and update the branches to PrologueMBB.
3047 assert(&(*MF.begin()) == &PrologueMBB && "Shrink-wrapping not supported yet");
3048
3049 // HiPE-specific values
3050 NamedMDNode *HiPELiteralsMD = MF.getMMI().getModule()
3051 ->getNamedMetadata("hipe.literals");
3052 if (!HiPELiteralsMD)
3053 report_fatal_error(
3054 "Can't generate HiPE prologue without runtime parameters");
3055 const unsigned HipeLeafWords
3056 = getHiPELiteral(HiPELiteralsMD,
3057 Is64Bit ? "AMD64_LEAF_WORDS" : "X86_LEAF_WORDS");
3058 const unsigned CCRegisteredArgs = Is64Bit ? 6 : 5;
3059 const unsigned Guaranteed = HipeLeafWords * SlotSize;
3060 unsigned CallerStkArity = MF.getFunction().arg_size() > CCRegisteredArgs ?
3061 MF.getFunction().arg_size() - CCRegisteredArgs : 0;
3062 unsigned MaxStack = MFI.getStackSize() + CallerStkArity*SlotSize + SlotSize;
3063
3064 assert(STI.isTargetLinux() &&
3065 "HiPE prologue is only supported on Linux operating systems.");
3066
3067 // Compute the largest caller's frame that is needed to fit the callees'
3068 // frames. This 'MaxStack' is computed from:
3069 //
3070 // a) the fixed frame size, which is the space needed for all spilled temps,
3071 // b) outgoing on-stack parameter areas, and
3072 // c) the minimum stack space this function needs to make available for the
3073 // functions it calls (a tunable ABI property).
3074 if (MFI.hasCalls()) {
3075 unsigned MoreStackForCalls = 0;
3076
3077 for (auto &MBB : MF) {
3078 for (auto &MI : MBB) {
3079 if (!MI.isCall())
3080 continue;
3081
3082 // Get callee operand.
3083 const MachineOperand &MO = MI.getOperand(0);
3084
3085 // Only take account of global function calls (no closures etc.).
3086 if (!MO.isGlobal())
3087 continue;
3088
3089 const Function *F = dyn_cast<Function>(MO.getGlobal());
3090 if (!F)
3091 continue;
3092
3093 // Do not update 'MaxStack' for primitive and built-in functions
3094 // (encoded with names either starting with "erlang."/"bif_" or not
3095 // having a ".", such as a simple <Module>.<Function>.<Arity>, or an
3096 // "_", such as the BIF "suspend_0") as they are executed on another
3097 // stack.
3098 if (F->getName().find("erlang.") != StringRef::npos ||
3099 F->getName().find("bif_") != StringRef::npos ||
3100 F->getName().find_first_of("._") == StringRef::npos)
3101 continue;
3102
3103 unsigned CalleeStkArity =
3104 F->arg_size() > CCRegisteredArgs ? F->arg_size()-CCRegisteredArgs : 0;
3105 if (HipeLeafWords - 1 > CalleeStkArity)
3106 MoreStackForCalls = std::max(MoreStackForCalls,
3107 (HipeLeafWords - 1 - CalleeStkArity) * SlotSize);
3108 }
3109 }
3110 MaxStack += MoreStackForCalls;
3111 }
3112
3113 // If the stack frame needed is larger than the guaranteed then runtime checks
3114 // and calls to "inc_stack_0" BIF should be inserted in the assembly prologue.
3115 if (MaxStack > Guaranteed) {
3116 MachineBasicBlock *stackCheckMBB = MF.CreateMachineBasicBlock();
3117 MachineBasicBlock *incStackMBB = MF.CreateMachineBasicBlock();
3118
3119 for (const auto &LI : PrologueMBB.liveins()) {
3120 stackCheckMBB->addLiveIn(LI);
3121 incStackMBB->addLiveIn(LI);
3122 }
3123
3124 MF.push_front(incStackMBB);
3125 MF.push_front(stackCheckMBB);
3126
3127 unsigned ScratchReg, SPReg, PReg, SPLimitOffset;
3128 unsigned LEAop, CMPop, CALLop;
3129 SPLimitOffset = getHiPELiteral(HiPELiteralsMD, "P_NSP_LIMIT");
3130 if (Is64Bit) {
3131 SPReg = X86::RSP;
3132 PReg = X86::RBP;
3133 LEAop = X86::LEA64r;
3134 CMPop = X86::CMP64rm;
3135 CALLop = X86::CALL64pcrel32;
3136 } else {
3137 SPReg = X86::ESP;
3138 PReg = X86::EBP;
3139 LEAop = X86::LEA32r;
3140 CMPop = X86::CMP32rm;
3141 CALLop = X86::CALLpcrel32;
3142 }
3143
3144 ScratchReg = GetScratchRegister(Is64Bit, IsLP64, MF, true);
3145 assert(!MF.getRegInfo().isLiveIn(ScratchReg) &&
3146 "HiPE prologue scratch register is live-in");
3147
3148 // Create new MBB for StackCheck:
3149 addRegOffset(BuildMI(stackCheckMBB, DL, TII.get(LEAop), ScratchReg),
3150 SPReg, false, -MaxStack);
3151 // SPLimitOffset is in a fixed heap location (pointed by BP).
3152 addRegOffset(BuildMI(stackCheckMBB, DL, TII.get(CMPop))
3153 .addReg(ScratchReg), PReg, false, SPLimitOffset);
3154 BuildMI(stackCheckMBB, DL, TII.get(X86::JCC_1)).addMBB(&PrologueMBB).addImm(X86::COND_AE);
3155
3156 // Create new MBB for IncStack:
3157 BuildMI(incStackMBB, DL, TII.get(CALLop)).
3158 addExternalSymbol("inc_stack_0");
3159 addRegOffset(BuildMI(incStackMBB, DL, TII.get(LEAop), ScratchReg),
3160 SPReg, false, -MaxStack);
3161 addRegOffset(BuildMI(incStackMBB, DL, TII.get(CMPop))
3162 .addReg(ScratchReg), PReg, false, SPLimitOffset);
3163 BuildMI(incStackMBB, DL, TII.get(X86::JCC_1)).addMBB(incStackMBB).addImm(X86::COND_LE);
3164
3165 stackCheckMBB->addSuccessor(&PrologueMBB, {99, 100});
3166 stackCheckMBB->addSuccessor(incStackMBB, {1, 100});
3167 incStackMBB->addSuccessor(&PrologueMBB, {99, 100});
3168 incStackMBB->addSuccessor(incStackMBB, {1, 100});
3169 }
3170 #ifdef EXPENSIVE_CHECKS
3171 MF.verify();
3172 #endif
3173 }
3174
adjustStackWithPops(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,const DebugLoc & DL,int Offset) const3175 bool X86FrameLowering::adjustStackWithPops(MachineBasicBlock &MBB,
3176 MachineBasicBlock::iterator MBBI,
3177 const DebugLoc &DL,
3178 int Offset) const {
3179 if (Offset <= 0)
3180 return false;
3181
3182 if (Offset % SlotSize)
3183 return false;
3184
3185 int NumPops = Offset / SlotSize;
3186 // This is only worth it if we have at most 2 pops.
3187 if (NumPops != 1 && NumPops != 2)
3188 return false;
3189
3190 // Handle only the trivial case where the adjustment directly follows
3191 // a call. This is the most common one, anyway.
3192 if (MBBI == MBB.begin())
3193 return false;
3194 MachineBasicBlock::iterator Prev = std::prev(MBBI);
3195 if (!Prev->isCall() || !Prev->getOperand(1).isRegMask())
3196 return false;
3197
3198 unsigned Regs[2];
3199 unsigned FoundRegs = 0;
3200
3201 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
3202 const MachineOperand &RegMask = Prev->getOperand(1);
3203
3204 auto &RegClass =
3205 Is64Bit ? X86::GR64_NOREX_NOSPRegClass : X86::GR32_NOREX_NOSPRegClass;
3206 // Try to find up to NumPops free registers.
3207 for (auto Candidate : RegClass) {
3208 // Poor man's liveness:
3209 // Since we're immediately after a call, any register that is clobbered
3210 // by the call and not defined by it can be considered dead.
3211 if (!RegMask.clobbersPhysReg(Candidate))
3212 continue;
3213
3214 // Don't clobber reserved registers
3215 if (MRI.isReserved(Candidate))
3216 continue;
3217
3218 bool IsDef = false;
3219 for (const MachineOperand &MO : Prev->implicit_operands()) {
3220 if (MO.isReg() && MO.isDef() &&
3221 TRI->isSuperOrSubRegisterEq(MO.getReg(), Candidate)) {
3222 IsDef = true;
3223 break;
3224 }
3225 }
3226
3227 if (IsDef)
3228 continue;
3229
3230 Regs[FoundRegs++] = Candidate;
3231 if (FoundRegs == (unsigned)NumPops)
3232 break;
3233 }
3234
3235 if (FoundRegs == 0)
3236 return false;
3237
3238 // If we found only one free register, but need two, reuse the same one twice.
3239 while (FoundRegs < (unsigned)NumPops)
3240 Regs[FoundRegs++] = Regs[0];
3241
3242 for (int i = 0; i < NumPops; ++i)
3243 BuildMI(MBB, MBBI, DL,
3244 TII.get(STI.is64Bit() ? X86::POP64r : X86::POP32r), Regs[i]);
3245
3246 return true;
3247 }
3248
3249 MachineBasicBlock::iterator X86FrameLowering::
eliminateCallFramePseudoInstr(MachineFunction & MF,MachineBasicBlock & MBB,MachineBasicBlock::iterator I) const3250 eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
3251 MachineBasicBlock::iterator I) const {
3252 bool reserveCallFrame = hasReservedCallFrame(MF);
3253 unsigned Opcode = I->getOpcode();
3254 bool isDestroy = Opcode == TII.getCallFrameDestroyOpcode();
3255 DebugLoc DL = I->getDebugLoc(); // copy DebugLoc as I will be erased.
3256 uint64_t Amount = TII.getFrameSize(*I);
3257 uint64_t InternalAmt = (isDestroy || Amount) ? TII.getFrameAdjustment(*I) : 0;
3258 I = MBB.erase(I);
3259 auto InsertPos = skipDebugInstructionsForward(I, MBB.end());
3260
3261 // Try to avoid emitting dead SP adjustments if the block end is unreachable,
3262 // typically because the function is marked noreturn (abort, throw,
3263 // assert_fail, etc).
3264 if (isDestroy && blockEndIsUnreachable(MBB, I))
3265 return I;
3266
3267 if (!reserveCallFrame) {
3268 // If the stack pointer can be changed after prologue, turn the
3269 // adjcallstackup instruction into a 'sub ESP, <amt>' and the
3270 // adjcallstackdown instruction into 'add ESP, <amt>'
3271
3272 // We need to keep the stack aligned properly. To do this, we round the
3273 // amount of space needed for the outgoing arguments up to the next
3274 // alignment boundary.
3275 Amount = alignTo(Amount, getStackAlign());
3276
3277 const Function &F = MF.getFunction();
3278 bool WindowsCFI = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
3279 bool DwarfCFI = !WindowsCFI && MF.needsFrameMoves();
3280
3281 // If we have any exception handlers in this function, and we adjust
3282 // the SP before calls, we may need to indicate this to the unwinder
3283 // using GNU_ARGS_SIZE. Note that this may be necessary even when
3284 // Amount == 0, because the preceding function may have set a non-0
3285 // GNU_ARGS_SIZE.
3286 // TODO: We don't need to reset this between subsequent functions,
3287 // if it didn't change.
3288 bool HasDwarfEHHandlers = !WindowsCFI && !MF.getLandingPads().empty();
3289
3290 if (HasDwarfEHHandlers && !isDestroy &&
3291 MF.getInfo<X86MachineFunctionInfo>()->getHasPushSequences())
3292 BuildCFI(MBB, InsertPos, DL,
3293 MCCFIInstruction::createGnuArgsSize(nullptr, Amount));
3294
3295 if (Amount == 0)
3296 return I;
3297
3298 // Factor out the amount that gets handled inside the sequence
3299 // (Pushes of argument for frame setup, callee pops for frame destroy)
3300 Amount -= InternalAmt;
3301
3302 // TODO: This is needed only if we require precise CFA.
3303 // If this is a callee-pop calling convention, emit a CFA adjust for
3304 // the amount the callee popped.
3305 if (isDestroy && InternalAmt && DwarfCFI && !hasFP(MF))
3306 BuildCFI(MBB, InsertPos, DL,
3307 MCCFIInstruction::createAdjustCfaOffset(nullptr, -InternalAmt));
3308
3309 // Add Amount to SP to destroy a frame, or subtract to setup.
3310 int64_t StackAdjustment = isDestroy ? Amount : -Amount;
3311
3312 if (StackAdjustment) {
3313 // Merge with any previous or following adjustment instruction. Note: the
3314 // instructions merged with here do not have CFI, so their stack
3315 // adjustments do not feed into CfaAdjustment.
3316 StackAdjustment += mergeSPUpdates(MBB, InsertPos, true);
3317 StackAdjustment += mergeSPUpdates(MBB, InsertPos, false);
3318
3319 if (StackAdjustment) {
3320 if (!(F.hasMinSize() &&
3321 adjustStackWithPops(MBB, InsertPos, DL, StackAdjustment)))
3322 BuildStackAdjustment(MBB, InsertPos, DL, StackAdjustment,
3323 /*InEpilogue=*/false);
3324 }
3325 }
3326
3327 if (DwarfCFI && !hasFP(MF)) {
3328 // If we don't have FP, but need to generate unwind information,
3329 // we need to set the correct CFA offset after the stack adjustment.
3330 // How much we adjust the CFA offset depends on whether we're emitting
3331 // CFI only for EH purposes or for debugging. EH only requires the CFA
3332 // offset to be correct at each call site, while for debugging we want
3333 // it to be more precise.
3334
3335 int64_t CfaAdjustment = -StackAdjustment;
3336 // TODO: When not using precise CFA, we also need to adjust for the
3337 // InternalAmt here.
3338 if (CfaAdjustment) {
3339 BuildCFI(MBB, InsertPos, DL,
3340 MCCFIInstruction::createAdjustCfaOffset(nullptr,
3341 CfaAdjustment));
3342 }
3343 }
3344
3345 return I;
3346 }
3347
3348 if (InternalAmt) {
3349 MachineBasicBlock::iterator CI = I;
3350 MachineBasicBlock::iterator B = MBB.begin();
3351 while (CI != B && !std::prev(CI)->isCall())
3352 --CI;
3353 BuildStackAdjustment(MBB, CI, DL, -InternalAmt, /*InEpilogue=*/false);
3354 }
3355
3356 return I;
3357 }
3358
canUseAsPrologue(const MachineBasicBlock & MBB) const3359 bool X86FrameLowering::canUseAsPrologue(const MachineBasicBlock &MBB) const {
3360 assert(MBB.getParent() && "Block is not attached to a function!");
3361 const MachineFunction &MF = *MBB.getParent();
3362 if (!MBB.isLiveIn(X86::EFLAGS))
3363 return true;
3364
3365 const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
3366 return !TRI->hasStackRealignment(MF) && !X86FI->hasSwiftAsyncContext();
3367 }
3368
canUseAsEpilogue(const MachineBasicBlock & MBB) const3369 bool X86FrameLowering::canUseAsEpilogue(const MachineBasicBlock &MBB) const {
3370 assert(MBB.getParent() && "Block is not attached to a function!");
3371
3372 // Win64 has strict requirements in terms of epilogue and we are
3373 // not taking a chance at messing with them.
3374 // I.e., unless this block is already an exit block, we can't use
3375 // it as an epilogue.
3376 if (STI.isTargetWin64() && !MBB.succ_empty() && !MBB.isReturnBlock())
3377 return false;
3378
3379 // Swift async context epilogue has a BTR instruction that clobbers parts of
3380 // EFLAGS.
3381 const MachineFunction &MF = *MBB.getParent();
3382 if (MF.getInfo<X86MachineFunctionInfo>()->hasSwiftAsyncContext())
3383 return !flagsNeedToBePreservedBeforeTheTerminators(MBB);
3384
3385 if (canUseLEAForSPInEpilogue(*MBB.getParent()))
3386 return true;
3387
3388 // If we cannot use LEA to adjust SP, we may need to use ADD, which
3389 // clobbers the EFLAGS. Check that we do not need to preserve it,
3390 // otherwise, conservatively assume this is not
3391 // safe to insert the epilogue here.
3392 return !flagsNeedToBePreservedBeforeTheTerminators(MBB);
3393 }
3394
enableShrinkWrapping(const MachineFunction & MF) const3395 bool X86FrameLowering::enableShrinkWrapping(const MachineFunction &MF) const {
3396 // If we may need to emit frameless compact unwind information, give
3397 // up as this is currently broken: PR25614.
3398 bool CompactUnwind =
3399 MF.getMMI().getContext().getObjectFileInfo()->getCompactUnwindSection() !=
3400 nullptr;
3401 return (MF.getFunction().hasFnAttribute(Attribute::NoUnwind) || hasFP(MF) ||
3402 !CompactUnwind) &&
3403 // The lowering of segmented stack and HiPE only support entry
3404 // blocks as prologue blocks: PR26107. This limitation may be
3405 // lifted if we fix:
3406 // - adjustForSegmentedStacks
3407 // - adjustForHiPEPrologue
3408 MF.getFunction().getCallingConv() != CallingConv::HiPE &&
3409 !MF.shouldSplitStack();
3410 }
3411
restoreWin32EHStackPointers(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,const DebugLoc & DL,bool RestoreSP) const3412 MachineBasicBlock::iterator X86FrameLowering::restoreWin32EHStackPointers(
3413 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
3414 const DebugLoc &DL, bool RestoreSP) const {
3415 assert(STI.isTargetWindowsMSVC() && "funclets only supported in MSVC env");
3416 assert(STI.isTargetWin32() && "EBP/ESI restoration only required on win32");
3417 assert(STI.is32Bit() && !Uses64BitFramePtr &&
3418 "restoring EBP/ESI on non-32-bit target");
3419
3420 MachineFunction &MF = *MBB.getParent();
3421 Register FramePtr = TRI->getFrameRegister(MF);
3422 Register BasePtr = TRI->getBaseRegister();
3423 WinEHFuncInfo &FuncInfo = *MF.getWinEHFuncInfo();
3424 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
3425 MachineFrameInfo &MFI = MF.getFrameInfo();
3426
3427 // FIXME: Don't set FrameSetup flag in catchret case.
3428
3429 int FI = FuncInfo.EHRegNodeFrameIndex;
3430 int EHRegSize = MFI.getObjectSize(FI);
3431
3432 if (RestoreSP) {
3433 // MOV32rm -EHRegSize(%ebp), %esp
3434 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32rm), X86::ESP),
3435 X86::EBP, true, -EHRegSize)
3436 .setMIFlag(MachineInstr::FrameSetup);
3437 }
3438
3439 Register UsedReg;
3440 int EHRegOffset = getFrameIndexReference(MF, FI, UsedReg).getFixed();
3441 int EndOffset = -EHRegOffset - EHRegSize;
3442 FuncInfo.EHRegNodeEndOffset = EndOffset;
3443
3444 if (UsedReg == FramePtr) {
3445 // ADD $offset, %ebp
3446 unsigned ADDri = getADDriOpcode(false, EndOffset);
3447 BuildMI(MBB, MBBI, DL, TII.get(ADDri), FramePtr)
3448 .addReg(FramePtr)
3449 .addImm(EndOffset)
3450 .setMIFlag(MachineInstr::FrameSetup)
3451 ->getOperand(3)
3452 .setIsDead();
3453 assert(EndOffset >= 0 &&
3454 "end of registration object above normal EBP position!");
3455 } else if (UsedReg == BasePtr) {
3456 // LEA offset(%ebp), %esi
3457 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::LEA32r), BasePtr),
3458 FramePtr, false, EndOffset)
3459 .setMIFlag(MachineInstr::FrameSetup);
3460 // MOV32rm SavedEBPOffset(%esi), %ebp
3461 assert(X86FI->getHasSEHFramePtrSave());
3462 int Offset =
3463 getFrameIndexReference(MF, X86FI->getSEHFramePtrSaveIndex(), UsedReg)
3464 .getFixed();
3465 assert(UsedReg == BasePtr);
3466 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32rm), FramePtr),
3467 UsedReg, true, Offset)
3468 .setMIFlag(MachineInstr::FrameSetup);
3469 } else {
3470 llvm_unreachable("32-bit frames with WinEH must use FramePtr or BasePtr");
3471 }
3472 return MBBI;
3473 }
3474
getInitialCFAOffset(const MachineFunction & MF) const3475 int X86FrameLowering::getInitialCFAOffset(const MachineFunction &MF) const {
3476 return TRI->getSlotSize();
3477 }
3478
3479 Register
getInitialCFARegister(const MachineFunction & MF) const3480 X86FrameLowering::getInitialCFARegister(const MachineFunction &MF) const {
3481 return TRI->getDwarfRegNum(StackPtr, true);
3482 }
3483
3484 namespace {
3485 // Struct used by orderFrameObjects to help sort the stack objects.
3486 struct X86FrameSortingObject {
3487 bool IsValid = false; // true if we care about this Object.
3488 unsigned ObjectIndex = 0; // Index of Object into MFI list.
3489 unsigned ObjectSize = 0; // Size of Object in bytes.
3490 Align ObjectAlignment = Align(1); // Alignment of Object in bytes.
3491 unsigned ObjectNumUses = 0; // Object static number of uses.
3492 };
3493
3494 // The comparison function we use for std::sort to order our local
3495 // stack symbols. The current algorithm is to use an estimated
3496 // "density". This takes into consideration the size and number of
3497 // uses each object has in order to roughly minimize code size.
3498 // So, for example, an object of size 16B that is referenced 5 times
3499 // will get higher priority than 4 4B objects referenced 1 time each.
3500 // It's not perfect and we may be able to squeeze a few more bytes out of
3501 // it (for example : 0(esp) requires fewer bytes, symbols allocated at the
3502 // fringe end can have special consideration, given their size is less
3503 // important, etc.), but the algorithmic complexity grows too much to be
3504 // worth the extra gains we get. This gets us pretty close.
3505 // The final order leaves us with objects with highest priority going
3506 // at the end of our list.
3507 struct X86FrameSortingComparator {
operator ()__anondd71ec990411::X86FrameSortingComparator3508 inline bool operator()(const X86FrameSortingObject &A,
3509 const X86FrameSortingObject &B) const {
3510 uint64_t DensityAScaled, DensityBScaled;
3511
3512 // For consistency in our comparison, all invalid objects are placed
3513 // at the end. This also allows us to stop walking when we hit the
3514 // first invalid item after it's all sorted.
3515 if (!A.IsValid)
3516 return false;
3517 if (!B.IsValid)
3518 return true;
3519
3520 // The density is calculated by doing :
3521 // (double)DensityA = A.ObjectNumUses / A.ObjectSize
3522 // (double)DensityB = B.ObjectNumUses / B.ObjectSize
3523 // Since this approach may cause inconsistencies in
3524 // the floating point <, >, == comparisons, depending on the floating
3525 // point model with which the compiler was built, we're going
3526 // to scale both sides by multiplying with
3527 // A.ObjectSize * B.ObjectSize. This ends up factoring away
3528 // the division and, with it, the need for any floating point
3529 // arithmetic.
3530 DensityAScaled = static_cast<uint64_t>(A.ObjectNumUses) *
3531 static_cast<uint64_t>(B.ObjectSize);
3532 DensityBScaled = static_cast<uint64_t>(B.ObjectNumUses) *
3533 static_cast<uint64_t>(A.ObjectSize);
3534
3535 // If the two densities are equal, prioritize highest alignment
3536 // objects. This allows for similar alignment objects
3537 // to be packed together (given the same density).
3538 // There's room for improvement here, also, since we can pack
3539 // similar alignment (different density) objects next to each
3540 // other to save padding. This will also require further
3541 // complexity/iterations, and the overall gain isn't worth it,
3542 // in general. Something to keep in mind, though.
3543 if (DensityAScaled == DensityBScaled)
3544 return A.ObjectAlignment < B.ObjectAlignment;
3545
3546 return DensityAScaled < DensityBScaled;
3547 }
3548 };
3549 } // namespace
3550
3551 // Order the symbols in the local stack.
3552 // We want to place the local stack objects in some sort of sensible order.
3553 // The heuristic we use is to try and pack them according to static number
3554 // of uses and size of object in order to minimize code size.
orderFrameObjects(const MachineFunction & MF,SmallVectorImpl<int> & ObjectsToAllocate) const3555 void X86FrameLowering::orderFrameObjects(
3556 const MachineFunction &MF, SmallVectorImpl<int> &ObjectsToAllocate) const {
3557 const MachineFrameInfo &MFI = MF.getFrameInfo();
3558
3559 // Don't waste time if there's nothing to do.
3560 if (ObjectsToAllocate.empty())
3561 return;
3562
3563 // Create an array of all MFI objects. We won't need all of these
3564 // objects, but we're going to create a full array of them to make
3565 // it easier to index into when we're counting "uses" down below.
3566 // We want to be able to easily/cheaply access an object by simply
3567 // indexing into it, instead of having to search for it every time.
3568 std::vector<X86FrameSortingObject> SortingObjects(MFI.getObjectIndexEnd());
3569
3570 // Walk the objects we care about and mark them as such in our working
3571 // struct.
3572 for (auto &Obj : ObjectsToAllocate) {
3573 SortingObjects[Obj].IsValid = true;
3574 SortingObjects[Obj].ObjectIndex = Obj;
3575 SortingObjects[Obj].ObjectAlignment = MFI.getObjectAlign(Obj);
3576 // Set the size.
3577 int ObjectSize = MFI.getObjectSize(Obj);
3578 if (ObjectSize == 0)
3579 // Variable size. Just use 4.
3580 SortingObjects[Obj].ObjectSize = 4;
3581 else
3582 SortingObjects[Obj].ObjectSize = ObjectSize;
3583 }
3584
3585 // Count the number of uses for each object.
3586 for (auto &MBB : MF) {
3587 for (auto &MI : MBB) {
3588 if (MI.isDebugInstr())
3589 continue;
3590 for (const MachineOperand &MO : MI.operands()) {
3591 // Check to see if it's a local stack symbol.
3592 if (!MO.isFI())
3593 continue;
3594 int Index = MO.getIndex();
3595 // Check to see if it falls within our range, and is tagged
3596 // to require ordering.
3597 if (Index >= 0 && Index < MFI.getObjectIndexEnd() &&
3598 SortingObjects[Index].IsValid)
3599 SortingObjects[Index].ObjectNumUses++;
3600 }
3601 }
3602 }
3603
3604 // Sort the objects using X86FrameSortingAlgorithm (see its comment for
3605 // info).
3606 llvm::stable_sort(SortingObjects, X86FrameSortingComparator());
3607
3608 // Now modify the original list to represent the final order that
3609 // we want. The order will depend on whether we're going to access them
3610 // from the stack pointer or the frame pointer. For SP, the list should
3611 // end up with the END containing objects that we want with smaller offsets.
3612 // For FP, it should be flipped.
3613 int i = 0;
3614 for (auto &Obj : SortingObjects) {
3615 // All invalid items are sorted at the end, so it's safe to stop.
3616 if (!Obj.IsValid)
3617 break;
3618 ObjectsToAllocate[i++] = Obj.ObjectIndex;
3619 }
3620
3621 // Flip it if we're accessing off of the FP.
3622 if (!TRI->hasStackRealignment(MF) && hasFP(MF))
3623 std::reverse(ObjectsToAllocate.begin(), ObjectsToAllocate.end());
3624 }
3625
3626
getWinEHParentFrameOffset(const MachineFunction & MF) const3627 unsigned X86FrameLowering::getWinEHParentFrameOffset(const MachineFunction &MF) const {
3628 // RDX, the parent frame pointer, is homed into 16(%rsp) in the prologue.
3629 unsigned Offset = 16;
3630 // RBP is immediately pushed.
3631 Offset += SlotSize;
3632 // All callee-saved registers are then pushed.
3633 Offset += MF.getInfo<X86MachineFunctionInfo>()->getCalleeSavedFrameSize();
3634 // Every funclet allocates enough stack space for the largest outgoing call.
3635 Offset += getWinEHFuncletFrameSize(MF);
3636 return Offset;
3637 }
3638
processFunctionBeforeFrameFinalized(MachineFunction & MF,RegScavenger * RS) const3639 void X86FrameLowering::processFunctionBeforeFrameFinalized(
3640 MachineFunction &MF, RegScavenger *RS) const {
3641 // Mark the function as not having WinCFI. We will set it back to true in
3642 // emitPrologue if it gets called and emits CFI.
3643 MF.setHasWinCFI(false);
3644
3645 // If we are using Windows x64 CFI, ensure that the stack is always 8 byte
3646 // aligned. The format doesn't support misaligned stack adjustments.
3647 if (MF.getTarget().getMCAsmInfo()->usesWindowsCFI())
3648 MF.getFrameInfo().ensureMaxAlignment(Align(SlotSize));
3649
3650 // If this function isn't doing Win64-style C++ EH, we don't need to do
3651 // anything.
3652 if (STI.is64Bit() && MF.hasEHFunclets() &&
3653 classifyEHPersonality(MF.getFunction().getPersonalityFn()) ==
3654 EHPersonality::MSVC_CXX) {
3655 adjustFrameForMsvcCxxEh(MF);
3656 }
3657 }
3658
adjustFrameForMsvcCxxEh(MachineFunction & MF) const3659 void X86FrameLowering::adjustFrameForMsvcCxxEh(MachineFunction &MF) const {
3660 // Win64 C++ EH needs to allocate the UnwindHelp object at some fixed offset
3661 // relative to RSP after the prologue. Find the offset of the last fixed
3662 // object, so that we can allocate a slot immediately following it. If there
3663 // were no fixed objects, use offset -SlotSize, which is immediately after the
3664 // return address. Fixed objects have negative frame indices.
3665 MachineFrameInfo &MFI = MF.getFrameInfo();
3666 WinEHFuncInfo &EHInfo = *MF.getWinEHFuncInfo();
3667 int64_t MinFixedObjOffset = -SlotSize;
3668 for (int I = MFI.getObjectIndexBegin(); I < 0; ++I)
3669 MinFixedObjOffset = std::min(MinFixedObjOffset, MFI.getObjectOffset(I));
3670
3671 for (WinEHTryBlockMapEntry &TBME : EHInfo.TryBlockMap) {
3672 for (WinEHHandlerType &H : TBME.HandlerArray) {
3673 int FrameIndex = H.CatchObj.FrameIndex;
3674 if (FrameIndex != INT_MAX) {
3675 // Ensure alignment.
3676 unsigned Align = MFI.getObjectAlign(FrameIndex).value();
3677 MinFixedObjOffset -= std::abs(MinFixedObjOffset) % Align;
3678 MinFixedObjOffset -= MFI.getObjectSize(FrameIndex);
3679 MFI.setObjectOffset(FrameIndex, MinFixedObjOffset);
3680 }
3681 }
3682 }
3683
3684 // Ensure alignment.
3685 MinFixedObjOffset -= std::abs(MinFixedObjOffset) % 8;
3686 int64_t UnwindHelpOffset = MinFixedObjOffset - SlotSize;
3687 int UnwindHelpFI =
3688 MFI.CreateFixedObject(SlotSize, UnwindHelpOffset, /*IsImmutable=*/false);
3689 EHInfo.UnwindHelpFrameIdx = UnwindHelpFI;
3690
3691 // Store -2 into UnwindHelp on function entry. We have to scan forwards past
3692 // other frame setup instructions.
3693 MachineBasicBlock &MBB = MF.front();
3694 auto MBBI = MBB.begin();
3695 while (MBBI != MBB.end() && MBBI->getFlag(MachineInstr::FrameSetup))
3696 ++MBBI;
3697
3698 DebugLoc DL = MBB.findDebugLoc(MBBI);
3699 addFrameReference(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64mi32)),
3700 UnwindHelpFI)
3701 .addImm(-2);
3702 }
3703
processFunctionBeforeFrameIndicesReplaced(MachineFunction & MF,RegScavenger * RS) const3704 void X86FrameLowering::processFunctionBeforeFrameIndicesReplaced(
3705 MachineFunction &MF, RegScavenger *RS) const {
3706 if (STI.is32Bit() && MF.hasEHFunclets())
3707 restoreWinEHStackPointersInParent(MF);
3708 }
3709
restoreWinEHStackPointersInParent(MachineFunction & MF) const3710 void X86FrameLowering::restoreWinEHStackPointersInParent(
3711 MachineFunction &MF) const {
3712 // 32-bit functions have to restore stack pointers when control is transferred
3713 // back to the parent function. These blocks are identified as eh pads that
3714 // are not funclet entries.
3715 bool IsSEH = isAsynchronousEHPersonality(
3716 classifyEHPersonality(MF.getFunction().getPersonalityFn()));
3717 for (MachineBasicBlock &MBB : MF) {
3718 bool NeedsRestore = MBB.isEHPad() && !MBB.isEHFuncletEntry();
3719 if (NeedsRestore)
3720 restoreWin32EHStackPointers(MBB, MBB.begin(), DebugLoc(),
3721 /*RestoreSP=*/IsSEH);
3722 }
3723 }
3724