1 //===-- X86FrameLowering.cpp - X86 Frame Information ----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the X86 implementation of TargetFrameLowering class.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "X86FrameLowering.h"
14 #include "MCTargetDesc/X86MCTargetDesc.h"
15 #include "X86InstrBuilder.h"
16 #include "X86InstrInfo.h"
17 #include "X86MachineFunctionInfo.h"
18 #include "X86Subtarget.h"
19 #include "X86TargetMachine.h"
20 #include "llvm/ADT/Statistic.h"
21 #include "llvm/CodeGen/LivePhysRegs.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/CodeGen/MachineFunction.h"
24 #include "llvm/CodeGen/MachineInstrBuilder.h"
25 #include "llvm/CodeGen/MachineModuleInfo.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/WinEHFuncInfo.h"
28 #include "llvm/IR/DataLayout.h"
29 #include "llvm/IR/EHPersonalities.h"
30 #include "llvm/IR/Function.h"
31 #include "llvm/MC/MCAsmInfo.h"
32 #include "llvm/MC/MCObjectFileInfo.h"
33 #include "llvm/MC/MCSymbol.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/Support/LEB128.h"
36 #include "llvm/Target/TargetOptions.h"
37 #include <cstdlib>
38
39 #define DEBUG_TYPE "x86-fl"
40
41 STATISTIC(NumFrameLoopProbe, "Number of loop stack probes used in prologue");
42 STATISTIC(NumFrameExtraProbe,
43 "Number of extra stack probes generated in prologue");
44 STATISTIC(NumFunctionUsingPush2Pop2, "Number of funtions using push2/pop2");
45
46 using namespace llvm;
47
X86FrameLowering(const X86Subtarget & STI,MaybeAlign StackAlignOverride)48 X86FrameLowering::X86FrameLowering(const X86Subtarget &STI,
49 MaybeAlign StackAlignOverride)
50 : TargetFrameLowering(StackGrowsDown, StackAlignOverride.valueOrOne(),
51 STI.is64Bit() ? -8 : -4),
52 STI(STI), TII(*STI.getInstrInfo()), TRI(STI.getRegisterInfo()) {
53 // Cache a bunch of frame-related predicates for this subtarget.
54 SlotSize = TRI->getSlotSize();
55 Is64Bit = STI.is64Bit();
56 IsLP64 = STI.isTarget64BitLP64();
57 // standard x86_64 and NaCl use 64-bit frame/stack pointers, x32 - 32-bit.
58 Uses64BitFramePtr = STI.isTarget64BitLP64() || STI.isTargetNaCl64();
59 StackPtr = TRI->getStackRegister();
60 }
61
hasReservedCallFrame(const MachineFunction & MF) const62 bool X86FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
63 return !MF.getFrameInfo().hasVarSizedObjects() &&
64 !MF.getInfo<X86MachineFunctionInfo>()->getHasPushSequences() &&
65 !MF.getInfo<X86MachineFunctionInfo>()->hasPreallocatedCall();
66 }
67
68 /// canSimplifyCallFramePseudos - If there is a reserved call frame, the
69 /// call frame pseudos can be simplified. Having a FP, as in the default
70 /// implementation, is not sufficient here since we can't always use it.
71 /// Use a more nuanced condition.
canSimplifyCallFramePseudos(const MachineFunction & MF) const72 bool X86FrameLowering::canSimplifyCallFramePseudos(
73 const MachineFunction &MF) const {
74 return hasReservedCallFrame(MF) ||
75 MF.getInfo<X86MachineFunctionInfo>()->hasPreallocatedCall() ||
76 (hasFP(MF) && !TRI->hasStackRealignment(MF)) ||
77 TRI->hasBasePointer(MF);
78 }
79
80 // needsFrameIndexResolution - Do we need to perform FI resolution for
81 // this function. Normally, this is required only when the function
82 // has any stack objects. However, FI resolution actually has another job,
83 // not apparent from the title - it resolves callframesetup/destroy
84 // that were not simplified earlier.
85 // So, this is required for x86 functions that have push sequences even
86 // when there are no stack objects.
needsFrameIndexResolution(const MachineFunction & MF) const87 bool X86FrameLowering::needsFrameIndexResolution(
88 const MachineFunction &MF) const {
89 return MF.getFrameInfo().hasStackObjects() ||
90 MF.getInfo<X86MachineFunctionInfo>()->getHasPushSequences();
91 }
92
93 /// hasFP - Return true if the specified function should have a dedicated frame
94 /// pointer register. This is true if the function has variable sized allocas
95 /// or if frame pointer elimination is disabled.
hasFP(const MachineFunction & MF) const96 bool X86FrameLowering::hasFP(const MachineFunction &MF) const {
97 const MachineFrameInfo &MFI = MF.getFrameInfo();
98 return (MF.getTarget().Options.DisableFramePointerElim(MF) ||
99 TRI->hasStackRealignment(MF) || MFI.hasVarSizedObjects() ||
100 MFI.isFrameAddressTaken() || MFI.hasOpaqueSPAdjustment() ||
101 MF.getInfo<X86MachineFunctionInfo>()->getForceFramePointer() ||
102 MF.getInfo<X86MachineFunctionInfo>()->hasPreallocatedCall() ||
103 MF.callsUnwindInit() || MF.hasEHFunclets() || MF.callsEHReturn() ||
104 MFI.hasStackMap() || MFI.hasPatchPoint() ||
105 (isWin64Prologue(MF) && MFI.hasCopyImplyingStackAdjustment()));
106 }
107
getSUBriOpcode(bool IsLP64)108 static unsigned getSUBriOpcode(bool IsLP64) {
109 return IsLP64 ? X86::SUB64ri32 : X86::SUB32ri;
110 }
111
getADDriOpcode(bool IsLP64)112 static unsigned getADDriOpcode(bool IsLP64) {
113 return IsLP64 ? X86::ADD64ri32 : X86::ADD32ri;
114 }
115
getSUBrrOpcode(bool IsLP64)116 static unsigned getSUBrrOpcode(bool IsLP64) {
117 return IsLP64 ? X86::SUB64rr : X86::SUB32rr;
118 }
119
getADDrrOpcode(bool IsLP64)120 static unsigned getADDrrOpcode(bool IsLP64) {
121 return IsLP64 ? X86::ADD64rr : X86::ADD32rr;
122 }
123
getANDriOpcode(bool IsLP64,int64_t Imm)124 static unsigned getANDriOpcode(bool IsLP64, int64_t Imm) {
125 return IsLP64 ? X86::AND64ri32 : X86::AND32ri;
126 }
127
getLEArOpcode(bool IsLP64)128 static unsigned getLEArOpcode(bool IsLP64) {
129 return IsLP64 ? X86::LEA64r : X86::LEA32r;
130 }
131
getMOVriOpcode(bool Use64BitReg,int64_t Imm)132 static unsigned getMOVriOpcode(bool Use64BitReg, int64_t Imm) {
133 if (Use64BitReg) {
134 if (isUInt<32>(Imm))
135 return X86::MOV32ri64;
136 if (isInt<32>(Imm))
137 return X86::MOV64ri32;
138 return X86::MOV64ri;
139 }
140 return X86::MOV32ri;
141 }
142
143 // Push-Pop Acceleration (PPX) hint is used to indicate that the POP reads the
144 // value written by the PUSH from the stack. The processor tracks these marked
145 // instructions internally and fast-forwards register data between matching PUSH
146 // and POP instructions, without going through memory or through the training
147 // loop of the Fast Store Forwarding Predictor (FSFP). Instead, a more efficient
148 // memory-renaming optimization can be used.
149 //
150 // The PPX hint is purely a performance hint. Instructions with this hint have
151 // the same functional semantics as those without. PPX hints set by the
152 // compiler that violate the balancing rule may turn off the PPX optimization,
153 // but they will not affect program semantics.
154 //
155 // Hence, PPX is used for balanced spill/reloads (Exceptions and setjmp/longjmp
156 // are not considered).
157 //
158 // PUSH2 and POP2 are instructions for (respectively) pushing/popping 2
159 // GPRs at a time to/from the stack.
getPUSHOpcode(const X86Subtarget & ST)160 static unsigned getPUSHOpcode(const X86Subtarget &ST) {
161 return ST.is64Bit() ? (ST.hasPPX() ? X86::PUSHP64r : X86::PUSH64r)
162 : X86::PUSH32r;
163 }
getPOPOpcode(const X86Subtarget & ST)164 static unsigned getPOPOpcode(const X86Subtarget &ST) {
165 return ST.is64Bit() ? (ST.hasPPX() ? X86::POPP64r : X86::POP64r)
166 : X86::POP32r;
167 }
getPUSH2Opcode(const X86Subtarget & ST)168 static unsigned getPUSH2Opcode(const X86Subtarget &ST) {
169 return ST.hasPPX() ? X86::PUSH2P : X86::PUSH2;
170 }
getPOP2Opcode(const X86Subtarget & ST)171 static unsigned getPOP2Opcode(const X86Subtarget &ST) {
172 return ST.hasPPX() ? X86::POP2P : X86::POP2;
173 }
174
isEAXLiveIn(MachineBasicBlock & MBB)175 static bool isEAXLiveIn(MachineBasicBlock &MBB) {
176 for (MachineBasicBlock::RegisterMaskPair RegMask : MBB.liveins()) {
177 unsigned Reg = RegMask.PhysReg;
178
179 if (Reg == X86::RAX || Reg == X86::EAX || Reg == X86::AX ||
180 Reg == X86::AH || Reg == X86::AL)
181 return true;
182 }
183
184 return false;
185 }
186
187 /// Check if the flags need to be preserved before the terminators.
188 /// This would be the case, if the eflags is live-in of the region
189 /// composed by the terminators or live-out of that region, without
190 /// being defined by a terminator.
191 static bool
flagsNeedToBePreservedBeforeTheTerminators(const MachineBasicBlock & MBB)192 flagsNeedToBePreservedBeforeTheTerminators(const MachineBasicBlock &MBB) {
193 for (const MachineInstr &MI : MBB.terminators()) {
194 bool BreakNext = false;
195 for (const MachineOperand &MO : MI.operands()) {
196 if (!MO.isReg())
197 continue;
198 Register Reg = MO.getReg();
199 if (Reg != X86::EFLAGS)
200 continue;
201
202 // This terminator needs an eflags that is not defined
203 // by a previous another terminator:
204 // EFLAGS is live-in of the region composed by the terminators.
205 if (!MO.isDef())
206 return true;
207 // This terminator defines the eflags, i.e., we don't need to preserve it.
208 // However, we still need to check this specific terminator does not
209 // read a live-in value.
210 BreakNext = true;
211 }
212 // We found a definition of the eflags, no need to preserve them.
213 if (BreakNext)
214 return false;
215 }
216
217 // None of the terminators use or define the eflags.
218 // Check if they are live-out, that would imply we need to preserve them.
219 for (const MachineBasicBlock *Succ : MBB.successors())
220 if (Succ->isLiveIn(X86::EFLAGS))
221 return true;
222
223 return false;
224 }
225
226 /// emitSPUpdate - Emit a series of instructions to increment / decrement the
227 /// stack pointer by a constant value.
emitSPUpdate(MachineBasicBlock & MBB,MachineBasicBlock::iterator & MBBI,const DebugLoc & DL,int64_t NumBytes,bool InEpilogue) const228 void X86FrameLowering::emitSPUpdate(MachineBasicBlock &MBB,
229 MachineBasicBlock::iterator &MBBI,
230 const DebugLoc &DL, int64_t NumBytes,
231 bool InEpilogue) const {
232 bool isSub = NumBytes < 0;
233 uint64_t Offset = isSub ? -NumBytes : NumBytes;
234 MachineInstr::MIFlag Flag =
235 isSub ? MachineInstr::FrameSetup : MachineInstr::FrameDestroy;
236
237 uint64_t Chunk = (1LL << 31) - 1;
238
239 MachineFunction &MF = *MBB.getParent();
240 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
241 const X86TargetLowering &TLI = *STI.getTargetLowering();
242 const bool EmitInlineStackProbe = TLI.hasInlineStackProbe(MF);
243
244 // It's ok to not take into account large chunks when probing, as the
245 // allocation is split in smaller chunks anyway.
246 if (EmitInlineStackProbe && !InEpilogue) {
247
248 // This pseudo-instruction is going to be expanded, potentially using a
249 // loop, by inlineStackProbe().
250 BuildMI(MBB, MBBI, DL, TII.get(X86::STACKALLOC_W_PROBING)).addImm(Offset);
251 return;
252 } else if (Offset > Chunk) {
253 // Rather than emit a long series of instructions for large offsets,
254 // load the offset into a register and do one sub/add
255 unsigned Reg = 0;
256 unsigned Rax = (unsigned)(Is64Bit ? X86::RAX : X86::EAX);
257
258 if (isSub && !isEAXLiveIn(MBB))
259 Reg = Rax;
260 else
261 Reg = TRI->findDeadCallerSavedReg(MBB, MBBI);
262
263 unsigned AddSubRROpc =
264 isSub ? getSUBrrOpcode(Is64Bit) : getADDrrOpcode(Is64Bit);
265 if (Reg) {
266 BuildMI(MBB, MBBI, DL, TII.get(getMOVriOpcode(Is64Bit, Offset)), Reg)
267 .addImm(Offset)
268 .setMIFlag(Flag);
269 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(AddSubRROpc), StackPtr)
270 .addReg(StackPtr)
271 .addReg(Reg);
272 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
273 return;
274 } else if (Offset > 8 * Chunk) {
275 // If we would need more than 8 add or sub instructions (a >16GB stack
276 // frame), it's worth spilling RAX to materialize this immediate.
277 // pushq %rax
278 // movabsq +-$Offset+-SlotSize, %rax
279 // addq %rsp, %rax
280 // xchg %rax, (%rsp)
281 // movq (%rsp), %rsp
282 assert(Is64Bit && "can't have 32-bit 16GB stack frame");
283 BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH64r))
284 .addReg(Rax, RegState::Kill)
285 .setMIFlag(Flag);
286 // Subtract is not commutative, so negate the offset and always use add.
287 // Subtract 8 less and add 8 more to account for the PUSH we just did.
288 if (isSub)
289 Offset = -(Offset - SlotSize);
290 else
291 Offset = Offset + SlotSize;
292 BuildMI(MBB, MBBI, DL, TII.get(getMOVriOpcode(Is64Bit, Offset)), Rax)
293 .addImm(Offset)
294 .setMIFlag(Flag);
295 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(X86::ADD64rr), Rax)
296 .addReg(Rax)
297 .addReg(StackPtr);
298 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
299 // Exchange the new SP in RAX with the top of the stack.
300 addRegOffset(
301 BuildMI(MBB, MBBI, DL, TII.get(X86::XCHG64rm), Rax).addReg(Rax),
302 StackPtr, false, 0);
303 // Load new SP from the top of the stack into RSP.
304 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64rm), StackPtr),
305 StackPtr, false, 0);
306 return;
307 }
308 }
309
310 while (Offset) {
311 uint64_t ThisVal = std::min(Offset, Chunk);
312 if (ThisVal == SlotSize) {
313 // Use push / pop for slot sized adjustments as a size optimization. We
314 // need to find a dead register when using pop.
315 unsigned Reg = isSub ? (unsigned)(Is64Bit ? X86::RAX : X86::EAX)
316 : TRI->findDeadCallerSavedReg(MBB, MBBI);
317 if (Reg) {
318 unsigned Opc = isSub ? (Is64Bit ? X86::PUSH64r : X86::PUSH32r)
319 : (Is64Bit ? X86::POP64r : X86::POP32r);
320 BuildMI(MBB, MBBI, DL, TII.get(Opc))
321 .addReg(Reg, getDefRegState(!isSub) | getUndefRegState(isSub))
322 .setMIFlag(Flag);
323 Offset -= ThisVal;
324 continue;
325 }
326 }
327
328 BuildStackAdjustment(MBB, MBBI, DL, isSub ? -ThisVal : ThisVal, InEpilogue)
329 .setMIFlag(Flag);
330
331 Offset -= ThisVal;
332 }
333 }
334
BuildStackAdjustment(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,const DebugLoc & DL,int64_t Offset,bool InEpilogue) const335 MachineInstrBuilder X86FrameLowering::BuildStackAdjustment(
336 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
337 const DebugLoc &DL, int64_t Offset, bool InEpilogue) const {
338 assert(Offset != 0 && "zero offset stack adjustment requested");
339
340 // On Atom, using LEA to adjust SP is preferred, but using it in the epilogue
341 // is tricky.
342 bool UseLEA;
343 if (!InEpilogue) {
344 // Check if inserting the prologue at the beginning
345 // of MBB would require to use LEA operations.
346 // We need to use LEA operations if EFLAGS is live in, because
347 // it means an instruction will read it before it gets defined.
348 UseLEA = STI.useLeaForSP() || MBB.isLiveIn(X86::EFLAGS);
349 } else {
350 // If we can use LEA for SP but we shouldn't, check that none
351 // of the terminators uses the eflags. Otherwise we will insert
352 // a ADD that will redefine the eflags and break the condition.
353 // Alternatively, we could move the ADD, but this may not be possible
354 // and is an optimization anyway.
355 UseLEA = canUseLEAForSPInEpilogue(*MBB.getParent());
356 if (UseLEA && !STI.useLeaForSP())
357 UseLEA = flagsNeedToBePreservedBeforeTheTerminators(MBB);
358 // If that assert breaks, that means we do not do the right thing
359 // in canUseAsEpilogue.
360 assert((UseLEA || !flagsNeedToBePreservedBeforeTheTerminators(MBB)) &&
361 "We shouldn't have allowed this insertion point");
362 }
363
364 MachineInstrBuilder MI;
365 if (UseLEA) {
366 MI = addRegOffset(BuildMI(MBB, MBBI, DL,
367 TII.get(getLEArOpcode(Uses64BitFramePtr)),
368 StackPtr),
369 StackPtr, false, Offset);
370 } else {
371 bool IsSub = Offset < 0;
372 uint64_t AbsOffset = IsSub ? -Offset : Offset;
373 const unsigned Opc = IsSub ? getSUBriOpcode(Uses64BitFramePtr)
374 : getADDriOpcode(Uses64BitFramePtr);
375 MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
376 .addReg(StackPtr)
377 .addImm(AbsOffset);
378 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
379 }
380 return MI;
381 }
382
mergeSPUpdates(MachineBasicBlock & MBB,MachineBasicBlock::iterator & MBBI,bool doMergeWithPrevious) const383 int X86FrameLowering::mergeSPUpdates(MachineBasicBlock &MBB,
384 MachineBasicBlock::iterator &MBBI,
385 bool doMergeWithPrevious) const {
386 if ((doMergeWithPrevious && MBBI == MBB.begin()) ||
387 (!doMergeWithPrevious && MBBI == MBB.end()))
388 return 0;
389
390 MachineBasicBlock::iterator PI = doMergeWithPrevious ? std::prev(MBBI) : MBBI;
391
392 PI = skipDebugInstructionsBackward(PI, MBB.begin());
393 // It is assumed that ADD/SUB/LEA instruction is succeded by one CFI
394 // instruction, and that there are no DBG_VALUE or other instructions between
395 // ADD/SUB/LEA and its corresponding CFI instruction.
396 /* TODO: Add support for the case where there are multiple CFI instructions
397 below the ADD/SUB/LEA, e.g.:
398 ...
399 add
400 cfi_def_cfa_offset
401 cfi_offset
402 ...
403 */
404 if (doMergeWithPrevious && PI != MBB.begin() && PI->isCFIInstruction())
405 PI = std::prev(PI);
406
407 unsigned Opc = PI->getOpcode();
408 int Offset = 0;
409
410 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD32ri) &&
411 PI->getOperand(0).getReg() == StackPtr) {
412 assert(PI->getOperand(1).getReg() == StackPtr);
413 Offset = PI->getOperand(2).getImm();
414 } else if ((Opc == X86::LEA32r || Opc == X86::LEA64_32r) &&
415 PI->getOperand(0).getReg() == StackPtr &&
416 PI->getOperand(1).getReg() == StackPtr &&
417 PI->getOperand(2).getImm() == 1 &&
418 PI->getOperand(3).getReg() == X86::NoRegister &&
419 PI->getOperand(5).getReg() == X86::NoRegister) {
420 // For LEAs we have: def = lea SP, FI, noreg, Offset, noreg.
421 Offset = PI->getOperand(4).getImm();
422 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB32ri) &&
423 PI->getOperand(0).getReg() == StackPtr) {
424 assert(PI->getOperand(1).getReg() == StackPtr);
425 Offset = -PI->getOperand(2).getImm();
426 } else
427 return 0;
428
429 PI = MBB.erase(PI);
430 if (PI != MBB.end() && PI->isCFIInstruction()) {
431 auto CIs = MBB.getParent()->getFrameInstructions();
432 MCCFIInstruction CI = CIs[PI->getOperand(0).getCFIIndex()];
433 if (CI.getOperation() == MCCFIInstruction::OpDefCfaOffset ||
434 CI.getOperation() == MCCFIInstruction::OpAdjustCfaOffset)
435 PI = MBB.erase(PI);
436 }
437 if (!doMergeWithPrevious)
438 MBBI = skipDebugInstructionsForward(PI, MBB.end());
439
440 return Offset;
441 }
442
BuildCFI(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,const DebugLoc & DL,const MCCFIInstruction & CFIInst,MachineInstr::MIFlag Flag) const443 void X86FrameLowering::BuildCFI(MachineBasicBlock &MBB,
444 MachineBasicBlock::iterator MBBI,
445 const DebugLoc &DL,
446 const MCCFIInstruction &CFIInst,
447 MachineInstr::MIFlag Flag) const {
448 MachineFunction &MF = *MBB.getParent();
449 unsigned CFIIndex = MF.addFrameInst(CFIInst);
450
451 if (CFIInst.getOperation() == MCCFIInstruction::OpAdjustCfaOffset)
452 MF.getInfo<X86MachineFunctionInfo>()->setHasCFIAdjustCfa(true);
453
454 BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::CFI_INSTRUCTION))
455 .addCFIIndex(CFIIndex)
456 .setMIFlag(Flag);
457 }
458
459 /// Emits Dwarf Info specifying offsets of callee saved registers and
460 /// frame pointer. This is called only when basic block sections are enabled.
emitCalleeSavedFrameMovesFullCFA(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI) const461 void X86FrameLowering::emitCalleeSavedFrameMovesFullCFA(
462 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI) const {
463 MachineFunction &MF = *MBB.getParent();
464 if (!hasFP(MF)) {
465 emitCalleeSavedFrameMoves(MBB, MBBI, DebugLoc{}, true);
466 return;
467 }
468 const MachineModuleInfo &MMI = MF.getMMI();
469 const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
470 const Register FramePtr = TRI->getFrameRegister(MF);
471 const Register MachineFramePtr =
472 STI.isTarget64BitILP32() ? Register(getX86SubSuperRegister(FramePtr, 64))
473 : FramePtr;
474 unsigned DwarfReg = MRI->getDwarfRegNum(MachineFramePtr, true);
475 // Offset = space for return address + size of the frame pointer itself.
476 unsigned Offset = (Is64Bit ? 8 : 4) + (Uses64BitFramePtr ? 8 : 4);
477 BuildCFI(MBB, MBBI, DebugLoc{},
478 MCCFIInstruction::createOffset(nullptr, DwarfReg, -Offset));
479 emitCalleeSavedFrameMoves(MBB, MBBI, DebugLoc{}, true);
480 }
481
emitCalleeSavedFrameMoves(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,const DebugLoc & DL,bool IsPrologue) const482 void X86FrameLowering::emitCalleeSavedFrameMoves(
483 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
484 const DebugLoc &DL, bool IsPrologue) const {
485 MachineFunction &MF = *MBB.getParent();
486 MachineFrameInfo &MFI = MF.getFrameInfo();
487 MachineModuleInfo &MMI = MF.getMMI();
488 const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
489 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
490
491 // Add callee saved registers to move list.
492 const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
493
494 // Calculate offsets.
495 for (const CalleeSavedInfo &I : CSI) {
496 int64_t Offset = MFI.getObjectOffset(I.getFrameIdx());
497 Register Reg = I.getReg();
498 unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true);
499
500 if (IsPrologue) {
501 if (X86FI->getStackPtrSaveMI()) {
502 // +2*SlotSize because there is return address and ebp at the bottom
503 // of the stack.
504 // | retaddr |
505 // | ebp |
506 // | |<--ebp
507 Offset += 2 * SlotSize;
508 SmallString<64> CfaExpr;
509 CfaExpr.push_back(dwarf::DW_CFA_expression);
510 uint8_t buffer[16];
511 CfaExpr.append(buffer, buffer + encodeULEB128(DwarfReg, buffer));
512 CfaExpr.push_back(2);
513 Register FramePtr = TRI->getFrameRegister(MF);
514 const Register MachineFramePtr =
515 STI.isTarget64BitILP32()
516 ? Register(getX86SubSuperRegister(FramePtr, 64))
517 : FramePtr;
518 unsigned DwarfFramePtr = MRI->getDwarfRegNum(MachineFramePtr, true);
519 CfaExpr.push_back((uint8_t)(dwarf::DW_OP_breg0 + DwarfFramePtr));
520 CfaExpr.append(buffer, buffer + encodeSLEB128(Offset, buffer));
521 BuildCFI(MBB, MBBI, DL,
522 MCCFIInstruction::createEscape(nullptr, CfaExpr.str()),
523 MachineInstr::FrameSetup);
524 } else {
525 BuildCFI(MBB, MBBI, DL,
526 MCCFIInstruction::createOffset(nullptr, DwarfReg, Offset));
527 }
528 } else {
529 BuildCFI(MBB, MBBI, DL,
530 MCCFIInstruction::createRestore(nullptr, DwarfReg));
531 }
532 }
533 if (auto *MI = X86FI->getStackPtrSaveMI()) {
534 int FI = MI->getOperand(1).getIndex();
535 int64_t Offset = MFI.getObjectOffset(FI) + 2 * SlotSize;
536 SmallString<64> CfaExpr;
537 Register FramePtr = TRI->getFrameRegister(MF);
538 const Register MachineFramePtr =
539 STI.isTarget64BitILP32()
540 ? Register(getX86SubSuperRegister(FramePtr, 64))
541 : FramePtr;
542 unsigned DwarfFramePtr = MRI->getDwarfRegNum(MachineFramePtr, true);
543 CfaExpr.push_back((uint8_t)(dwarf::DW_OP_breg0 + DwarfFramePtr));
544 uint8_t buffer[16];
545 CfaExpr.append(buffer, buffer + encodeSLEB128(Offset, buffer));
546 CfaExpr.push_back(dwarf::DW_OP_deref);
547
548 SmallString<64> DefCfaExpr;
549 DefCfaExpr.push_back(dwarf::DW_CFA_def_cfa_expression);
550 DefCfaExpr.append(buffer, buffer + encodeSLEB128(CfaExpr.size(), buffer));
551 DefCfaExpr.append(CfaExpr.str());
552 // DW_CFA_def_cfa_expression: DW_OP_breg5 offset, DW_OP_deref
553 BuildCFI(MBB, MBBI, DL,
554 MCCFIInstruction::createEscape(nullptr, DefCfaExpr.str()),
555 MachineInstr::FrameSetup);
556 }
557 }
558
emitZeroCallUsedRegs(BitVector RegsToZero,MachineBasicBlock & MBB) const559 void X86FrameLowering::emitZeroCallUsedRegs(BitVector RegsToZero,
560 MachineBasicBlock &MBB) const {
561 const MachineFunction &MF = *MBB.getParent();
562
563 // Insertion point.
564 MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator();
565
566 // Fake a debug loc.
567 DebugLoc DL;
568 if (MBBI != MBB.end())
569 DL = MBBI->getDebugLoc();
570
571 // Zero out FP stack if referenced. Do this outside of the loop below so that
572 // it's done only once.
573 const X86Subtarget &ST = MF.getSubtarget<X86Subtarget>();
574 for (MCRegister Reg : RegsToZero.set_bits()) {
575 if (!X86::RFP80RegClass.contains(Reg))
576 continue;
577
578 unsigned NumFPRegs = ST.is64Bit() ? 8 : 7;
579 for (unsigned i = 0; i != NumFPRegs; ++i)
580 BuildMI(MBB, MBBI, DL, TII.get(X86::LD_F0));
581
582 for (unsigned i = 0; i != NumFPRegs; ++i)
583 BuildMI(MBB, MBBI, DL, TII.get(X86::ST_FPrr)).addReg(X86::ST0);
584 break;
585 }
586
587 // For GPRs, we only care to clear out the 32-bit register.
588 BitVector GPRsToZero(TRI->getNumRegs());
589 for (MCRegister Reg : RegsToZero.set_bits())
590 if (TRI->isGeneralPurposeRegister(MF, Reg)) {
591 GPRsToZero.set(getX86SubSuperRegister(Reg, 32));
592 RegsToZero.reset(Reg);
593 }
594
595 // Zero out the GPRs first.
596 for (MCRegister Reg : GPRsToZero.set_bits())
597 TII.buildClearRegister(Reg, MBB, MBBI, DL);
598
599 // Zero out the remaining registers.
600 for (MCRegister Reg : RegsToZero.set_bits())
601 TII.buildClearRegister(Reg, MBB, MBBI, DL);
602 }
603
emitStackProbe(MachineFunction & MF,MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,const DebugLoc & DL,bool InProlog,std::optional<MachineFunction::DebugInstrOperandPair> InstrNum) const604 void X86FrameLowering::emitStackProbe(
605 MachineFunction &MF, MachineBasicBlock &MBB,
606 MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool InProlog,
607 std::optional<MachineFunction::DebugInstrOperandPair> InstrNum) const {
608 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
609 if (STI.isTargetWindowsCoreCLR()) {
610 if (InProlog) {
611 BuildMI(MBB, MBBI, DL, TII.get(X86::STACKALLOC_W_PROBING))
612 .addImm(0 /* no explicit stack size */);
613 } else {
614 emitStackProbeInline(MF, MBB, MBBI, DL, false);
615 }
616 } else {
617 emitStackProbeCall(MF, MBB, MBBI, DL, InProlog, InstrNum);
618 }
619 }
620
stackProbeFunctionModifiesSP() const621 bool X86FrameLowering::stackProbeFunctionModifiesSP() const {
622 return STI.isOSWindows() && !STI.isTargetWin64();
623 }
624
inlineStackProbe(MachineFunction & MF,MachineBasicBlock & PrologMBB) const625 void X86FrameLowering::inlineStackProbe(MachineFunction &MF,
626 MachineBasicBlock &PrologMBB) const {
627 auto Where = llvm::find_if(PrologMBB, [](MachineInstr &MI) {
628 return MI.getOpcode() == X86::STACKALLOC_W_PROBING;
629 });
630 if (Where != PrologMBB.end()) {
631 DebugLoc DL = PrologMBB.findDebugLoc(Where);
632 emitStackProbeInline(MF, PrologMBB, Where, DL, true);
633 Where->eraseFromParent();
634 }
635 }
636
emitStackProbeInline(MachineFunction & MF,MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,const DebugLoc & DL,bool InProlog) const637 void X86FrameLowering::emitStackProbeInline(MachineFunction &MF,
638 MachineBasicBlock &MBB,
639 MachineBasicBlock::iterator MBBI,
640 const DebugLoc &DL,
641 bool InProlog) const {
642 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
643 if (STI.isTargetWindowsCoreCLR() && STI.is64Bit())
644 emitStackProbeInlineWindowsCoreCLR64(MF, MBB, MBBI, DL, InProlog);
645 else
646 emitStackProbeInlineGeneric(MF, MBB, MBBI, DL, InProlog);
647 }
648
emitStackProbeInlineGeneric(MachineFunction & MF,MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,const DebugLoc & DL,bool InProlog) const649 void X86FrameLowering::emitStackProbeInlineGeneric(
650 MachineFunction &MF, MachineBasicBlock &MBB,
651 MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool InProlog) const {
652 MachineInstr &AllocWithProbe = *MBBI;
653 uint64_t Offset = AllocWithProbe.getOperand(0).getImm();
654
655 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
656 const X86TargetLowering &TLI = *STI.getTargetLowering();
657 assert(!(STI.is64Bit() && STI.isTargetWindowsCoreCLR()) &&
658 "different expansion expected for CoreCLR 64 bit");
659
660 const uint64_t StackProbeSize = TLI.getStackProbeSize(MF);
661 uint64_t ProbeChunk = StackProbeSize * 8;
662
663 uint64_t MaxAlign =
664 TRI->hasStackRealignment(MF) ? calculateMaxStackAlign(MF) : 0;
665
666 // Synthesize a loop or unroll it, depending on the number of iterations.
667 // BuildStackAlignAND ensures that only MaxAlign % StackProbeSize bits left
668 // between the unaligned rsp and current rsp.
669 if (Offset > ProbeChunk) {
670 emitStackProbeInlineGenericLoop(MF, MBB, MBBI, DL, Offset,
671 MaxAlign % StackProbeSize);
672 } else {
673 emitStackProbeInlineGenericBlock(MF, MBB, MBBI, DL, Offset,
674 MaxAlign % StackProbeSize);
675 }
676 }
677
emitStackProbeInlineGenericBlock(MachineFunction & MF,MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,const DebugLoc & DL,uint64_t Offset,uint64_t AlignOffset) const678 void X86FrameLowering::emitStackProbeInlineGenericBlock(
679 MachineFunction &MF, MachineBasicBlock &MBB,
680 MachineBasicBlock::iterator MBBI, const DebugLoc &DL, uint64_t Offset,
681 uint64_t AlignOffset) const {
682
683 const bool NeedsDwarfCFI = needsDwarfCFI(MF);
684 const bool HasFP = hasFP(MF);
685 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
686 const X86TargetLowering &TLI = *STI.getTargetLowering();
687 const unsigned MovMIOpc = Is64Bit ? X86::MOV64mi32 : X86::MOV32mi;
688 const uint64_t StackProbeSize = TLI.getStackProbeSize(MF);
689
690 uint64_t CurrentOffset = 0;
691
692 assert(AlignOffset < StackProbeSize);
693
694 // If the offset is so small it fits within a page, there's nothing to do.
695 if (StackProbeSize < Offset + AlignOffset) {
696
697 uint64_t StackAdjustment = StackProbeSize - AlignOffset;
698 BuildStackAdjustment(MBB, MBBI, DL, -StackAdjustment, /*InEpilogue=*/false)
699 .setMIFlag(MachineInstr::FrameSetup);
700 if (!HasFP && NeedsDwarfCFI) {
701 BuildCFI(
702 MBB, MBBI, DL,
703 MCCFIInstruction::createAdjustCfaOffset(nullptr, StackAdjustment));
704 }
705
706 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(MovMIOpc))
707 .setMIFlag(MachineInstr::FrameSetup),
708 StackPtr, false, 0)
709 .addImm(0)
710 .setMIFlag(MachineInstr::FrameSetup);
711 NumFrameExtraProbe++;
712 CurrentOffset = StackProbeSize - AlignOffset;
713 }
714
715 // For the next N - 1 pages, just probe. I tried to take advantage of
716 // natural probes but it implies much more logic and there was very few
717 // interesting natural probes to interleave.
718 while (CurrentOffset + StackProbeSize < Offset) {
719 BuildStackAdjustment(MBB, MBBI, DL, -StackProbeSize, /*InEpilogue=*/false)
720 .setMIFlag(MachineInstr::FrameSetup);
721
722 if (!HasFP && NeedsDwarfCFI) {
723 BuildCFI(
724 MBB, MBBI, DL,
725 MCCFIInstruction::createAdjustCfaOffset(nullptr, StackProbeSize));
726 }
727 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(MovMIOpc))
728 .setMIFlag(MachineInstr::FrameSetup),
729 StackPtr, false, 0)
730 .addImm(0)
731 .setMIFlag(MachineInstr::FrameSetup);
732 NumFrameExtraProbe++;
733 CurrentOffset += StackProbeSize;
734 }
735
736 // No need to probe the tail, it is smaller than a Page.
737 uint64_t ChunkSize = Offset - CurrentOffset;
738 if (ChunkSize == SlotSize) {
739 // Use push for slot sized adjustments as a size optimization,
740 // like emitSPUpdate does when not probing.
741 unsigned Reg = Is64Bit ? X86::RAX : X86::EAX;
742 unsigned Opc = Is64Bit ? X86::PUSH64r : X86::PUSH32r;
743 BuildMI(MBB, MBBI, DL, TII.get(Opc))
744 .addReg(Reg, RegState::Undef)
745 .setMIFlag(MachineInstr::FrameSetup);
746 } else {
747 BuildStackAdjustment(MBB, MBBI, DL, -ChunkSize, /*InEpilogue=*/false)
748 .setMIFlag(MachineInstr::FrameSetup);
749 }
750 // No need to adjust Dwarf CFA offset here, the last position of the stack has
751 // been defined
752 }
753
emitStackProbeInlineGenericLoop(MachineFunction & MF,MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,const DebugLoc & DL,uint64_t Offset,uint64_t AlignOffset) const754 void X86FrameLowering::emitStackProbeInlineGenericLoop(
755 MachineFunction &MF, MachineBasicBlock &MBB,
756 MachineBasicBlock::iterator MBBI, const DebugLoc &DL, uint64_t Offset,
757 uint64_t AlignOffset) const {
758 assert(Offset && "null offset");
759
760 assert(MBB.computeRegisterLiveness(TRI, X86::EFLAGS, MBBI) !=
761 MachineBasicBlock::LQR_Live &&
762 "Inline stack probe loop will clobber live EFLAGS.");
763
764 const bool NeedsDwarfCFI = needsDwarfCFI(MF);
765 const bool HasFP = hasFP(MF);
766 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
767 const X86TargetLowering &TLI = *STI.getTargetLowering();
768 const unsigned MovMIOpc = Is64Bit ? X86::MOV64mi32 : X86::MOV32mi;
769 const uint64_t StackProbeSize = TLI.getStackProbeSize(MF);
770
771 if (AlignOffset) {
772 if (AlignOffset < StackProbeSize) {
773 // Perform a first smaller allocation followed by a probe.
774 BuildStackAdjustment(MBB, MBBI, DL, -AlignOffset, /*InEpilogue=*/false)
775 .setMIFlag(MachineInstr::FrameSetup);
776
777 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(MovMIOpc))
778 .setMIFlag(MachineInstr::FrameSetup),
779 StackPtr, false, 0)
780 .addImm(0)
781 .setMIFlag(MachineInstr::FrameSetup);
782 NumFrameExtraProbe++;
783 Offset -= AlignOffset;
784 }
785 }
786
787 // Synthesize a loop
788 NumFrameLoopProbe++;
789 const BasicBlock *LLVM_BB = MBB.getBasicBlock();
790
791 MachineBasicBlock *testMBB = MF.CreateMachineBasicBlock(LLVM_BB);
792 MachineBasicBlock *tailMBB = MF.CreateMachineBasicBlock(LLVM_BB);
793
794 MachineFunction::iterator MBBIter = ++MBB.getIterator();
795 MF.insert(MBBIter, testMBB);
796 MF.insert(MBBIter, tailMBB);
797
798 Register FinalStackProbed = Uses64BitFramePtr ? X86::R11
799 : Is64Bit ? X86::R11D
800 : X86::EAX;
801
802 BuildMI(MBB, MBBI, DL, TII.get(TargetOpcode::COPY), FinalStackProbed)
803 .addReg(StackPtr)
804 .setMIFlag(MachineInstr::FrameSetup);
805
806 // save loop bound
807 {
808 const unsigned BoundOffset = alignDown(Offset, StackProbeSize);
809 const unsigned SUBOpc = getSUBriOpcode(Uses64BitFramePtr);
810 BuildMI(MBB, MBBI, DL, TII.get(SUBOpc), FinalStackProbed)
811 .addReg(FinalStackProbed)
812 .addImm(BoundOffset)
813 .setMIFlag(MachineInstr::FrameSetup);
814
815 // while in the loop, use loop-invariant reg for CFI,
816 // instead of the stack pointer, which changes during the loop
817 if (!HasFP && NeedsDwarfCFI) {
818 // x32 uses the same DWARF register numbers as x86-64,
819 // so there isn't a register number for r11d, we must use r11 instead
820 const Register DwarfFinalStackProbed =
821 STI.isTarget64BitILP32()
822 ? Register(getX86SubSuperRegister(FinalStackProbed, 64))
823 : FinalStackProbed;
824
825 BuildCFI(MBB, MBBI, DL,
826 MCCFIInstruction::createDefCfaRegister(
827 nullptr, TRI->getDwarfRegNum(DwarfFinalStackProbed, true)));
828 BuildCFI(MBB, MBBI, DL,
829 MCCFIInstruction::createAdjustCfaOffset(nullptr, BoundOffset));
830 }
831 }
832
833 // allocate a page
834 BuildStackAdjustment(*testMBB, testMBB->end(), DL, -StackProbeSize,
835 /*InEpilogue=*/false)
836 .setMIFlag(MachineInstr::FrameSetup);
837
838 // touch the page
839 addRegOffset(BuildMI(testMBB, DL, TII.get(MovMIOpc))
840 .setMIFlag(MachineInstr::FrameSetup),
841 StackPtr, false, 0)
842 .addImm(0)
843 .setMIFlag(MachineInstr::FrameSetup);
844
845 // cmp with stack pointer bound
846 BuildMI(testMBB, DL, TII.get(Uses64BitFramePtr ? X86::CMP64rr : X86::CMP32rr))
847 .addReg(StackPtr)
848 .addReg(FinalStackProbed)
849 .setMIFlag(MachineInstr::FrameSetup);
850
851 // jump
852 BuildMI(testMBB, DL, TII.get(X86::JCC_1))
853 .addMBB(testMBB)
854 .addImm(X86::COND_NE)
855 .setMIFlag(MachineInstr::FrameSetup);
856 testMBB->addSuccessor(testMBB);
857 testMBB->addSuccessor(tailMBB);
858
859 // BB management
860 tailMBB->splice(tailMBB->end(), &MBB, MBBI, MBB.end());
861 tailMBB->transferSuccessorsAndUpdatePHIs(&MBB);
862 MBB.addSuccessor(testMBB);
863
864 // handle tail
865 const uint64_t TailOffset = Offset % StackProbeSize;
866 MachineBasicBlock::iterator TailMBBIter = tailMBB->begin();
867 if (TailOffset) {
868 BuildStackAdjustment(*tailMBB, TailMBBIter, DL, -TailOffset,
869 /*InEpilogue=*/false)
870 .setMIFlag(MachineInstr::FrameSetup);
871 }
872
873 // after the loop, switch back to stack pointer for CFI
874 if (!HasFP && NeedsDwarfCFI) {
875 // x32 uses the same DWARF register numbers as x86-64,
876 // so there isn't a register number for esp, we must use rsp instead
877 const Register DwarfStackPtr =
878 STI.isTarget64BitILP32()
879 ? Register(getX86SubSuperRegister(StackPtr, 64))
880 : Register(StackPtr);
881
882 BuildCFI(*tailMBB, TailMBBIter, DL,
883 MCCFIInstruction::createDefCfaRegister(
884 nullptr, TRI->getDwarfRegNum(DwarfStackPtr, true)));
885 }
886
887 // Update Live In information
888 bool anyChange = false;
889 do {
890 anyChange = recomputeLiveIns(*tailMBB) || recomputeLiveIns(*testMBB);
891 } while (anyChange);
892 }
893
emitStackProbeInlineWindowsCoreCLR64(MachineFunction & MF,MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,const DebugLoc & DL,bool InProlog) const894 void X86FrameLowering::emitStackProbeInlineWindowsCoreCLR64(
895 MachineFunction &MF, MachineBasicBlock &MBB,
896 MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool InProlog) const {
897 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
898 assert(STI.is64Bit() && "different expansion needed for 32 bit");
899 assert(STI.isTargetWindowsCoreCLR() && "custom expansion expects CoreCLR");
900 const TargetInstrInfo &TII = *STI.getInstrInfo();
901 const BasicBlock *LLVM_BB = MBB.getBasicBlock();
902
903 assert(MBB.computeRegisterLiveness(TRI, X86::EFLAGS, MBBI) !=
904 MachineBasicBlock::LQR_Live &&
905 "Inline stack probe loop will clobber live EFLAGS.");
906
907 // RAX contains the number of bytes of desired stack adjustment.
908 // The handling here assumes this value has already been updated so as to
909 // maintain stack alignment.
910 //
911 // We need to exit with RSP modified by this amount and execute suitable
912 // page touches to notify the OS that we're growing the stack responsibly.
913 // All stack probing must be done without modifying RSP.
914 //
915 // MBB:
916 // SizeReg = RAX;
917 // ZeroReg = 0
918 // CopyReg = RSP
919 // Flags, TestReg = CopyReg - SizeReg
920 // FinalReg = !Flags.Ovf ? TestReg : ZeroReg
921 // LimitReg = gs magic thread env access
922 // if FinalReg >= LimitReg goto ContinueMBB
923 // RoundBB:
924 // RoundReg = page address of FinalReg
925 // LoopMBB:
926 // LoopReg = PHI(LimitReg,ProbeReg)
927 // ProbeReg = LoopReg - PageSize
928 // [ProbeReg] = 0
929 // if (ProbeReg > RoundReg) goto LoopMBB
930 // ContinueMBB:
931 // RSP = RSP - RAX
932 // [rest of original MBB]
933
934 // Set up the new basic blocks
935 MachineBasicBlock *RoundMBB = MF.CreateMachineBasicBlock(LLVM_BB);
936 MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB);
937 MachineBasicBlock *ContinueMBB = MF.CreateMachineBasicBlock(LLVM_BB);
938
939 MachineFunction::iterator MBBIter = std::next(MBB.getIterator());
940 MF.insert(MBBIter, RoundMBB);
941 MF.insert(MBBIter, LoopMBB);
942 MF.insert(MBBIter, ContinueMBB);
943
944 // Split MBB and move the tail portion down to ContinueMBB.
945 MachineBasicBlock::iterator BeforeMBBI = std::prev(MBBI);
946 ContinueMBB->splice(ContinueMBB->begin(), &MBB, MBBI, MBB.end());
947 ContinueMBB->transferSuccessorsAndUpdatePHIs(&MBB);
948
949 // Some useful constants
950 const int64_t ThreadEnvironmentStackLimit = 0x10;
951 const int64_t PageSize = 0x1000;
952 const int64_t PageMask = ~(PageSize - 1);
953
954 // Registers we need. For the normal case we use virtual
955 // registers. For the prolog expansion we use RAX, RCX and RDX.
956 MachineRegisterInfo &MRI = MF.getRegInfo();
957 const TargetRegisterClass *RegClass = &X86::GR64RegClass;
958 const Register
959 SizeReg = InProlog ? X86::RAX : MRI.createVirtualRegister(RegClass),
960 ZeroReg = InProlog ? X86::RCX : MRI.createVirtualRegister(RegClass),
961 CopyReg = InProlog ? X86::RDX : MRI.createVirtualRegister(RegClass),
962 TestReg = InProlog ? X86::RDX : MRI.createVirtualRegister(RegClass),
963 FinalReg = InProlog ? X86::RDX : MRI.createVirtualRegister(RegClass),
964 RoundedReg = InProlog ? X86::RDX : MRI.createVirtualRegister(RegClass),
965 LimitReg = InProlog ? X86::RCX : MRI.createVirtualRegister(RegClass),
966 JoinReg = InProlog ? X86::RCX : MRI.createVirtualRegister(RegClass),
967 ProbeReg = InProlog ? X86::RCX : MRI.createVirtualRegister(RegClass);
968
969 // SP-relative offsets where we can save RCX and RDX.
970 int64_t RCXShadowSlot = 0;
971 int64_t RDXShadowSlot = 0;
972
973 // If inlining in the prolog, save RCX and RDX.
974 if (InProlog) {
975 // Compute the offsets. We need to account for things already
976 // pushed onto the stack at this point: return address, frame
977 // pointer (if used), and callee saves.
978 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
979 const int64_t CalleeSaveSize = X86FI->getCalleeSavedFrameSize();
980 const bool HasFP = hasFP(MF);
981
982 // Check if we need to spill RCX and/or RDX.
983 // Here we assume that no earlier prologue instruction changes RCX and/or
984 // RDX, so checking the block live-ins is enough.
985 const bool IsRCXLiveIn = MBB.isLiveIn(X86::RCX);
986 const bool IsRDXLiveIn = MBB.isLiveIn(X86::RDX);
987 int64_t InitSlot = 8 + CalleeSaveSize + (HasFP ? 8 : 0);
988 // Assign the initial slot to both registers, then change RDX's slot if both
989 // need to be spilled.
990 if (IsRCXLiveIn)
991 RCXShadowSlot = InitSlot;
992 if (IsRDXLiveIn)
993 RDXShadowSlot = InitSlot;
994 if (IsRDXLiveIn && IsRCXLiveIn)
995 RDXShadowSlot += 8;
996 // Emit the saves if needed.
997 if (IsRCXLiveIn)
998 addRegOffset(BuildMI(&MBB, DL, TII.get(X86::MOV64mr)), X86::RSP, false,
999 RCXShadowSlot)
1000 .addReg(X86::RCX);
1001 if (IsRDXLiveIn)
1002 addRegOffset(BuildMI(&MBB, DL, TII.get(X86::MOV64mr)), X86::RSP, false,
1003 RDXShadowSlot)
1004 .addReg(X86::RDX);
1005 } else {
1006 // Not in the prolog. Copy RAX to a virtual reg.
1007 BuildMI(&MBB, DL, TII.get(X86::MOV64rr), SizeReg).addReg(X86::RAX);
1008 }
1009
1010 // Add code to MBB to check for overflow and set the new target stack pointer
1011 // to zero if so.
1012 BuildMI(&MBB, DL, TII.get(X86::XOR64rr), ZeroReg)
1013 .addReg(ZeroReg, RegState::Undef)
1014 .addReg(ZeroReg, RegState::Undef);
1015 BuildMI(&MBB, DL, TII.get(X86::MOV64rr), CopyReg).addReg(X86::RSP);
1016 BuildMI(&MBB, DL, TII.get(X86::SUB64rr), TestReg)
1017 .addReg(CopyReg)
1018 .addReg(SizeReg);
1019 BuildMI(&MBB, DL, TII.get(X86::CMOV64rr), FinalReg)
1020 .addReg(TestReg)
1021 .addReg(ZeroReg)
1022 .addImm(X86::COND_B);
1023
1024 // FinalReg now holds final stack pointer value, or zero if
1025 // allocation would overflow. Compare against the current stack
1026 // limit from the thread environment block. Note this limit is the
1027 // lowest touched page on the stack, not the point at which the OS
1028 // will cause an overflow exception, so this is just an optimization
1029 // to avoid unnecessarily touching pages that are below the current
1030 // SP but already committed to the stack by the OS.
1031 BuildMI(&MBB, DL, TII.get(X86::MOV64rm), LimitReg)
1032 .addReg(0)
1033 .addImm(1)
1034 .addReg(0)
1035 .addImm(ThreadEnvironmentStackLimit)
1036 .addReg(X86::GS);
1037 BuildMI(&MBB, DL, TII.get(X86::CMP64rr)).addReg(FinalReg).addReg(LimitReg);
1038 // Jump if the desired stack pointer is at or above the stack limit.
1039 BuildMI(&MBB, DL, TII.get(X86::JCC_1))
1040 .addMBB(ContinueMBB)
1041 .addImm(X86::COND_AE);
1042
1043 // Add code to roundMBB to round the final stack pointer to a page boundary.
1044 RoundMBB->addLiveIn(FinalReg);
1045 BuildMI(RoundMBB, DL, TII.get(X86::AND64ri32), RoundedReg)
1046 .addReg(FinalReg)
1047 .addImm(PageMask);
1048 BuildMI(RoundMBB, DL, TII.get(X86::JMP_1)).addMBB(LoopMBB);
1049
1050 // LimitReg now holds the current stack limit, RoundedReg page-rounded
1051 // final RSP value. Add code to loopMBB to decrement LimitReg page-by-page
1052 // and probe until we reach RoundedReg.
1053 if (!InProlog) {
1054 BuildMI(LoopMBB, DL, TII.get(X86::PHI), JoinReg)
1055 .addReg(LimitReg)
1056 .addMBB(RoundMBB)
1057 .addReg(ProbeReg)
1058 .addMBB(LoopMBB);
1059 }
1060
1061 LoopMBB->addLiveIn(JoinReg);
1062 addRegOffset(BuildMI(LoopMBB, DL, TII.get(X86::LEA64r), ProbeReg), JoinReg,
1063 false, -PageSize);
1064
1065 // Probe by storing a byte onto the stack.
1066 BuildMI(LoopMBB, DL, TII.get(X86::MOV8mi))
1067 .addReg(ProbeReg)
1068 .addImm(1)
1069 .addReg(0)
1070 .addImm(0)
1071 .addReg(0)
1072 .addImm(0);
1073
1074 LoopMBB->addLiveIn(RoundedReg);
1075 BuildMI(LoopMBB, DL, TII.get(X86::CMP64rr))
1076 .addReg(RoundedReg)
1077 .addReg(ProbeReg);
1078 BuildMI(LoopMBB, DL, TII.get(X86::JCC_1))
1079 .addMBB(LoopMBB)
1080 .addImm(X86::COND_NE);
1081
1082 MachineBasicBlock::iterator ContinueMBBI = ContinueMBB->getFirstNonPHI();
1083
1084 // If in prolog, restore RDX and RCX.
1085 if (InProlog) {
1086 if (RCXShadowSlot) // It means we spilled RCX in the prologue.
1087 addRegOffset(BuildMI(*ContinueMBB, ContinueMBBI, DL,
1088 TII.get(X86::MOV64rm), X86::RCX),
1089 X86::RSP, false, RCXShadowSlot);
1090 if (RDXShadowSlot) // It means we spilled RDX in the prologue.
1091 addRegOffset(BuildMI(*ContinueMBB, ContinueMBBI, DL,
1092 TII.get(X86::MOV64rm), X86::RDX),
1093 X86::RSP, false, RDXShadowSlot);
1094 }
1095
1096 // Now that the probing is done, add code to continueMBB to update
1097 // the stack pointer for real.
1098 ContinueMBB->addLiveIn(SizeReg);
1099 BuildMI(*ContinueMBB, ContinueMBBI, DL, TII.get(X86::SUB64rr), X86::RSP)
1100 .addReg(X86::RSP)
1101 .addReg(SizeReg);
1102
1103 // Add the control flow edges we need.
1104 MBB.addSuccessor(ContinueMBB);
1105 MBB.addSuccessor(RoundMBB);
1106 RoundMBB->addSuccessor(LoopMBB);
1107 LoopMBB->addSuccessor(ContinueMBB);
1108 LoopMBB->addSuccessor(LoopMBB);
1109
1110 // Mark all the instructions added to the prolog as frame setup.
1111 if (InProlog) {
1112 for (++BeforeMBBI; BeforeMBBI != MBB.end(); ++BeforeMBBI) {
1113 BeforeMBBI->setFlag(MachineInstr::FrameSetup);
1114 }
1115 for (MachineInstr &MI : *RoundMBB) {
1116 MI.setFlag(MachineInstr::FrameSetup);
1117 }
1118 for (MachineInstr &MI : *LoopMBB) {
1119 MI.setFlag(MachineInstr::FrameSetup);
1120 }
1121 for (MachineInstr &MI :
1122 llvm::make_range(ContinueMBB->begin(), ContinueMBBI)) {
1123 MI.setFlag(MachineInstr::FrameSetup);
1124 }
1125 }
1126 }
1127
emitStackProbeCall(MachineFunction & MF,MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,const DebugLoc & DL,bool InProlog,std::optional<MachineFunction::DebugInstrOperandPair> InstrNum) const1128 void X86FrameLowering::emitStackProbeCall(
1129 MachineFunction &MF, MachineBasicBlock &MBB,
1130 MachineBasicBlock::iterator MBBI, const DebugLoc &DL, bool InProlog,
1131 std::optional<MachineFunction::DebugInstrOperandPair> InstrNum) const {
1132 bool IsLargeCodeModel = MF.getTarget().getCodeModel() == CodeModel::Large;
1133
1134 // FIXME: Add indirect thunk support and remove this.
1135 if (Is64Bit && IsLargeCodeModel && STI.useIndirectThunkCalls())
1136 report_fatal_error("Emitting stack probe calls on 64-bit with the large "
1137 "code model and indirect thunks not yet implemented.");
1138
1139 assert(MBB.computeRegisterLiveness(TRI, X86::EFLAGS, MBBI) !=
1140 MachineBasicBlock::LQR_Live &&
1141 "Stack probe calls will clobber live EFLAGS.");
1142
1143 unsigned CallOp;
1144 if (Is64Bit)
1145 CallOp = IsLargeCodeModel ? X86::CALL64r : X86::CALL64pcrel32;
1146 else
1147 CallOp = X86::CALLpcrel32;
1148
1149 StringRef Symbol = STI.getTargetLowering()->getStackProbeSymbolName(MF);
1150
1151 MachineInstrBuilder CI;
1152 MachineBasicBlock::iterator ExpansionMBBI = std::prev(MBBI);
1153
1154 // All current stack probes take AX and SP as input, clobber flags, and
1155 // preserve all registers. x86_64 probes leave RSP unmodified.
1156 if (Is64Bit && MF.getTarget().getCodeModel() == CodeModel::Large) {
1157 // For the large code model, we have to call through a register. Use R11,
1158 // as it is scratch in all supported calling conventions.
1159 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri), X86::R11)
1160 .addExternalSymbol(MF.createExternalSymbolName(Symbol));
1161 CI = BuildMI(MBB, MBBI, DL, TII.get(CallOp)).addReg(X86::R11);
1162 } else {
1163 CI = BuildMI(MBB, MBBI, DL, TII.get(CallOp))
1164 .addExternalSymbol(MF.createExternalSymbolName(Symbol));
1165 }
1166
1167 unsigned AX = Uses64BitFramePtr ? X86::RAX : X86::EAX;
1168 unsigned SP = Uses64BitFramePtr ? X86::RSP : X86::ESP;
1169 CI.addReg(AX, RegState::Implicit)
1170 .addReg(SP, RegState::Implicit)
1171 .addReg(AX, RegState::Define | RegState::Implicit)
1172 .addReg(SP, RegState::Define | RegState::Implicit)
1173 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit);
1174
1175 MachineInstr *ModInst = CI;
1176 if (STI.isTargetWin64() || !STI.isOSWindows()) {
1177 // MSVC x32's _chkstk and cygwin/mingw's _alloca adjust %esp themselves.
1178 // MSVC x64's __chkstk and cygwin/mingw's ___chkstk_ms do not adjust %rsp
1179 // themselves. They also does not clobber %rax so we can reuse it when
1180 // adjusting %rsp.
1181 // All other platforms do not specify a particular ABI for the stack probe
1182 // function, so we arbitrarily define it to not adjust %esp/%rsp itself.
1183 ModInst =
1184 BuildMI(MBB, MBBI, DL, TII.get(getSUBrrOpcode(Uses64BitFramePtr)), SP)
1185 .addReg(SP)
1186 .addReg(AX);
1187 }
1188
1189 // DebugInfo variable locations -- if there's an instruction number for the
1190 // allocation (i.e., DYN_ALLOC_*), substitute it for the instruction that
1191 // modifies SP.
1192 if (InstrNum) {
1193 if (STI.isTargetWin64() || !STI.isOSWindows()) {
1194 // Label destination operand of the subtract.
1195 MF.makeDebugValueSubstitution(*InstrNum,
1196 {ModInst->getDebugInstrNum(), 0});
1197 } else {
1198 // Label the call. The operand number is the penultimate operand, zero
1199 // based.
1200 unsigned SPDefOperand = ModInst->getNumOperands() - 2;
1201 MF.makeDebugValueSubstitution(
1202 *InstrNum, {ModInst->getDebugInstrNum(), SPDefOperand});
1203 }
1204 }
1205
1206 if (InProlog) {
1207 // Apply the frame setup flag to all inserted instrs.
1208 for (++ExpansionMBBI; ExpansionMBBI != MBBI; ++ExpansionMBBI)
1209 ExpansionMBBI->setFlag(MachineInstr::FrameSetup);
1210 }
1211 }
1212
calculateSetFPREG(uint64_t SPAdjust)1213 static unsigned calculateSetFPREG(uint64_t SPAdjust) {
1214 // Win64 ABI has a less restrictive limitation of 240; 128 works equally well
1215 // and might require smaller successive adjustments.
1216 const uint64_t Win64MaxSEHOffset = 128;
1217 uint64_t SEHFrameOffset = std::min(SPAdjust, Win64MaxSEHOffset);
1218 // Win64 ABI requires 16-byte alignment for the UWOP_SET_FPREG opcode.
1219 return SEHFrameOffset & -16;
1220 }
1221
1222 // If we're forcing a stack realignment we can't rely on just the frame
1223 // info, we need to know the ABI stack alignment as well in case we
1224 // have a call out. Otherwise just make sure we have some alignment - we'll
1225 // go with the minimum SlotSize.
1226 uint64_t
calculateMaxStackAlign(const MachineFunction & MF) const1227 X86FrameLowering::calculateMaxStackAlign(const MachineFunction &MF) const {
1228 const MachineFrameInfo &MFI = MF.getFrameInfo();
1229 Align MaxAlign = MFI.getMaxAlign(); // Desired stack alignment.
1230 Align StackAlign = getStackAlign();
1231 bool HasRealign = MF.getFunction().hasFnAttribute("stackrealign");
1232 if (HasRealign) {
1233 if (MFI.hasCalls())
1234 MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign;
1235 else if (MaxAlign < SlotSize)
1236 MaxAlign = Align(SlotSize);
1237 }
1238
1239 if (!Is64Bit && MF.getFunction().getCallingConv() == CallingConv::X86_INTR) {
1240 if (HasRealign)
1241 MaxAlign = (MaxAlign > 16) ? MaxAlign : Align(16);
1242 else
1243 MaxAlign = Align(16);
1244 }
1245 return MaxAlign.value();
1246 }
1247
BuildStackAlignAND(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,const DebugLoc & DL,unsigned Reg,uint64_t MaxAlign) const1248 void X86FrameLowering::BuildStackAlignAND(MachineBasicBlock &MBB,
1249 MachineBasicBlock::iterator MBBI,
1250 const DebugLoc &DL, unsigned Reg,
1251 uint64_t MaxAlign) const {
1252 uint64_t Val = -MaxAlign;
1253 unsigned AndOp = getANDriOpcode(Uses64BitFramePtr, Val);
1254
1255 MachineFunction &MF = *MBB.getParent();
1256 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
1257 const X86TargetLowering &TLI = *STI.getTargetLowering();
1258 const uint64_t StackProbeSize = TLI.getStackProbeSize(MF);
1259 const bool EmitInlineStackProbe = TLI.hasInlineStackProbe(MF);
1260
1261 // We want to make sure that (in worst case) less than StackProbeSize bytes
1262 // are not probed after the AND. This assumption is used in
1263 // emitStackProbeInlineGeneric.
1264 if (Reg == StackPtr && EmitInlineStackProbe && MaxAlign >= StackProbeSize) {
1265 {
1266 NumFrameLoopProbe++;
1267 MachineBasicBlock *entryMBB =
1268 MF.CreateMachineBasicBlock(MBB.getBasicBlock());
1269 MachineBasicBlock *headMBB =
1270 MF.CreateMachineBasicBlock(MBB.getBasicBlock());
1271 MachineBasicBlock *bodyMBB =
1272 MF.CreateMachineBasicBlock(MBB.getBasicBlock());
1273 MachineBasicBlock *footMBB =
1274 MF.CreateMachineBasicBlock(MBB.getBasicBlock());
1275
1276 MachineFunction::iterator MBBIter = MBB.getIterator();
1277 MF.insert(MBBIter, entryMBB);
1278 MF.insert(MBBIter, headMBB);
1279 MF.insert(MBBIter, bodyMBB);
1280 MF.insert(MBBIter, footMBB);
1281 const unsigned MovMIOpc = Is64Bit ? X86::MOV64mi32 : X86::MOV32mi;
1282 Register FinalStackProbed = Uses64BitFramePtr ? X86::R11
1283 : Is64Bit ? X86::R11D
1284 : X86::EAX;
1285
1286 // Setup entry block
1287 {
1288
1289 entryMBB->splice(entryMBB->end(), &MBB, MBB.begin(), MBBI);
1290 BuildMI(entryMBB, DL, TII.get(TargetOpcode::COPY), FinalStackProbed)
1291 .addReg(StackPtr)
1292 .setMIFlag(MachineInstr::FrameSetup);
1293 MachineInstr *MI =
1294 BuildMI(entryMBB, DL, TII.get(AndOp), FinalStackProbed)
1295 .addReg(FinalStackProbed)
1296 .addImm(Val)
1297 .setMIFlag(MachineInstr::FrameSetup);
1298
1299 // The EFLAGS implicit def is dead.
1300 MI->getOperand(3).setIsDead();
1301
1302 BuildMI(entryMBB, DL,
1303 TII.get(Uses64BitFramePtr ? X86::CMP64rr : X86::CMP32rr))
1304 .addReg(FinalStackProbed)
1305 .addReg(StackPtr)
1306 .setMIFlag(MachineInstr::FrameSetup);
1307 BuildMI(entryMBB, DL, TII.get(X86::JCC_1))
1308 .addMBB(&MBB)
1309 .addImm(X86::COND_E)
1310 .setMIFlag(MachineInstr::FrameSetup);
1311 entryMBB->addSuccessor(headMBB);
1312 entryMBB->addSuccessor(&MBB);
1313 }
1314
1315 // Loop entry block
1316
1317 {
1318 const unsigned SUBOpc = getSUBriOpcode(Uses64BitFramePtr);
1319 BuildMI(headMBB, DL, TII.get(SUBOpc), StackPtr)
1320 .addReg(StackPtr)
1321 .addImm(StackProbeSize)
1322 .setMIFlag(MachineInstr::FrameSetup);
1323
1324 BuildMI(headMBB, DL,
1325 TII.get(Uses64BitFramePtr ? X86::CMP64rr : X86::CMP32rr))
1326 .addReg(StackPtr)
1327 .addReg(FinalStackProbed)
1328 .setMIFlag(MachineInstr::FrameSetup);
1329
1330 // jump to the footer if StackPtr < FinalStackProbed
1331 BuildMI(headMBB, DL, TII.get(X86::JCC_1))
1332 .addMBB(footMBB)
1333 .addImm(X86::COND_B)
1334 .setMIFlag(MachineInstr::FrameSetup);
1335
1336 headMBB->addSuccessor(bodyMBB);
1337 headMBB->addSuccessor(footMBB);
1338 }
1339
1340 // setup loop body
1341 {
1342 addRegOffset(BuildMI(bodyMBB, DL, TII.get(MovMIOpc))
1343 .setMIFlag(MachineInstr::FrameSetup),
1344 StackPtr, false, 0)
1345 .addImm(0)
1346 .setMIFlag(MachineInstr::FrameSetup);
1347
1348 const unsigned SUBOpc = getSUBriOpcode(Uses64BitFramePtr);
1349 BuildMI(bodyMBB, DL, TII.get(SUBOpc), StackPtr)
1350 .addReg(StackPtr)
1351 .addImm(StackProbeSize)
1352 .setMIFlag(MachineInstr::FrameSetup);
1353
1354 // cmp with stack pointer bound
1355 BuildMI(bodyMBB, DL,
1356 TII.get(Uses64BitFramePtr ? X86::CMP64rr : X86::CMP32rr))
1357 .addReg(FinalStackProbed)
1358 .addReg(StackPtr)
1359 .setMIFlag(MachineInstr::FrameSetup);
1360
1361 // jump back while FinalStackProbed < StackPtr
1362 BuildMI(bodyMBB, DL, TII.get(X86::JCC_1))
1363 .addMBB(bodyMBB)
1364 .addImm(X86::COND_B)
1365 .setMIFlag(MachineInstr::FrameSetup);
1366 bodyMBB->addSuccessor(bodyMBB);
1367 bodyMBB->addSuccessor(footMBB);
1368 }
1369
1370 // setup loop footer
1371 {
1372 BuildMI(footMBB, DL, TII.get(TargetOpcode::COPY), StackPtr)
1373 .addReg(FinalStackProbed)
1374 .setMIFlag(MachineInstr::FrameSetup);
1375 addRegOffset(BuildMI(footMBB, DL, TII.get(MovMIOpc))
1376 .setMIFlag(MachineInstr::FrameSetup),
1377 StackPtr, false, 0)
1378 .addImm(0)
1379 .setMIFlag(MachineInstr::FrameSetup);
1380 footMBB->addSuccessor(&MBB);
1381 }
1382
1383 bool anyChange = false;
1384 do {
1385 anyChange = recomputeLiveIns(*footMBB) || recomputeLiveIns(*bodyMBB) ||
1386 recomputeLiveIns(*headMBB) || recomputeLiveIns(MBB);
1387 } while (anyChange);
1388 }
1389 } else {
1390 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(AndOp), Reg)
1391 .addReg(Reg)
1392 .addImm(Val)
1393 .setMIFlag(MachineInstr::FrameSetup);
1394
1395 // The EFLAGS implicit def is dead.
1396 MI->getOperand(3).setIsDead();
1397 }
1398 }
1399
has128ByteRedZone(const MachineFunction & MF) const1400 bool X86FrameLowering::has128ByteRedZone(const MachineFunction &MF) const {
1401 // x86-64 (non Win64) has a 128 byte red zone which is guaranteed not to be
1402 // clobbered by any interrupt handler.
1403 assert(&STI == &MF.getSubtarget<X86Subtarget>() &&
1404 "MF used frame lowering for wrong subtarget");
1405 const Function &Fn = MF.getFunction();
1406 const bool IsWin64CC = STI.isCallingConvWin64(Fn.getCallingConv());
1407 return Is64Bit && !IsWin64CC && !Fn.hasFnAttribute(Attribute::NoRedZone);
1408 }
1409
1410 /// Return true if we need to use the restricted Windows x64 prologue and
1411 /// epilogue code patterns that can be described with WinCFI (.seh_*
1412 /// directives).
isWin64Prologue(const MachineFunction & MF) const1413 bool X86FrameLowering::isWin64Prologue(const MachineFunction &MF) const {
1414 return MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
1415 }
1416
needsDwarfCFI(const MachineFunction & MF) const1417 bool X86FrameLowering::needsDwarfCFI(const MachineFunction &MF) const {
1418 return !isWin64Prologue(MF) && MF.needsFrameMoves();
1419 }
1420
1421 /// emitPrologue - Push callee-saved registers onto the stack, which
1422 /// automatically adjust the stack pointer. Adjust the stack pointer to allocate
1423 /// space for local variables. Also emit labels used by the exception handler to
1424 /// generate the exception handling frames.
1425
1426 /*
1427 Here's a gist of what gets emitted:
1428
1429 ; Establish frame pointer, if needed
1430 [if needs FP]
1431 push %rbp
1432 .cfi_def_cfa_offset 16
1433 .cfi_offset %rbp, -16
1434 .seh_pushreg %rpb
1435 mov %rsp, %rbp
1436 .cfi_def_cfa_register %rbp
1437
1438 ; Spill general-purpose registers
1439 [for all callee-saved GPRs]
1440 pushq %<reg>
1441 [if not needs FP]
1442 .cfi_def_cfa_offset (offset from RETADDR)
1443 .seh_pushreg %<reg>
1444
1445 ; If the required stack alignment > default stack alignment
1446 ; rsp needs to be re-aligned. This creates a "re-alignment gap"
1447 ; of unknown size in the stack frame.
1448 [if stack needs re-alignment]
1449 and $MASK, %rsp
1450
1451 ; Allocate space for locals
1452 [if target is Windows and allocated space > 4096 bytes]
1453 ; Windows needs special care for allocations larger
1454 ; than one page.
1455 mov $NNN, %rax
1456 call ___chkstk_ms/___chkstk
1457 sub %rax, %rsp
1458 [else]
1459 sub $NNN, %rsp
1460
1461 [if needs FP]
1462 .seh_stackalloc (size of XMM spill slots)
1463 .seh_setframe %rbp, SEHFrameOffset ; = size of all spill slots
1464 [else]
1465 .seh_stackalloc NNN
1466
1467 ; Spill XMMs
1468 ; Note, that while only Windows 64 ABI specifies XMMs as callee-preserved,
1469 ; they may get spilled on any platform, if the current function
1470 ; calls @llvm.eh.unwind.init
1471 [if needs FP]
1472 [for all callee-saved XMM registers]
1473 movaps %<xmm reg>, -MMM(%rbp)
1474 [for all callee-saved XMM registers]
1475 .seh_savexmm %<xmm reg>, (-MMM + SEHFrameOffset)
1476 ; i.e. the offset relative to (%rbp - SEHFrameOffset)
1477 [else]
1478 [for all callee-saved XMM registers]
1479 movaps %<xmm reg>, KKK(%rsp)
1480 [for all callee-saved XMM registers]
1481 .seh_savexmm %<xmm reg>, KKK
1482
1483 .seh_endprologue
1484
1485 [if needs base pointer]
1486 mov %rsp, %rbx
1487 [if needs to restore base pointer]
1488 mov %rsp, -MMM(%rbp)
1489
1490 ; Emit CFI info
1491 [if needs FP]
1492 [for all callee-saved registers]
1493 .cfi_offset %<reg>, (offset from %rbp)
1494 [else]
1495 .cfi_def_cfa_offset (offset from RETADDR)
1496 [for all callee-saved registers]
1497 .cfi_offset %<reg>, (offset from %rsp)
1498
1499 Notes:
1500 - .seh directives are emitted only for Windows 64 ABI
1501 - .cv_fpo directives are emitted on win32 when emitting CodeView
1502 - .cfi directives are emitted for all other ABIs
1503 - for 32-bit code, substitute %e?? registers for %r??
1504 */
1505
emitPrologue(MachineFunction & MF,MachineBasicBlock & MBB) const1506 void X86FrameLowering::emitPrologue(MachineFunction &MF,
1507 MachineBasicBlock &MBB) const {
1508 assert(&STI == &MF.getSubtarget<X86Subtarget>() &&
1509 "MF used frame lowering for wrong subtarget");
1510 MachineBasicBlock::iterator MBBI = MBB.begin();
1511 MachineFrameInfo &MFI = MF.getFrameInfo();
1512 const Function &Fn = MF.getFunction();
1513 MachineModuleInfo &MMI = MF.getMMI();
1514 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
1515 uint64_t MaxAlign = calculateMaxStackAlign(MF); // Desired stack alignment.
1516 uint64_t StackSize = MFI.getStackSize(); // Number of bytes to allocate.
1517 bool IsFunclet = MBB.isEHFuncletEntry();
1518 EHPersonality Personality = EHPersonality::Unknown;
1519 if (Fn.hasPersonalityFn())
1520 Personality = classifyEHPersonality(Fn.getPersonalityFn());
1521 bool FnHasClrFunclet =
1522 MF.hasEHFunclets() && Personality == EHPersonality::CoreCLR;
1523 bool IsClrFunclet = IsFunclet && FnHasClrFunclet;
1524 bool HasFP = hasFP(MF);
1525 bool IsWin64Prologue = isWin64Prologue(MF);
1526 bool NeedsWin64CFI = IsWin64Prologue && Fn.needsUnwindTableEntry();
1527 // FIXME: Emit FPO data for EH funclets.
1528 bool NeedsWinFPO =
1529 !IsFunclet && STI.isTargetWin32() && MMI.getModule()->getCodeViewFlag();
1530 bool NeedsWinCFI = NeedsWin64CFI || NeedsWinFPO;
1531 bool NeedsDwarfCFI = needsDwarfCFI(MF);
1532 Register FramePtr = TRI->getFrameRegister(MF);
1533 const Register MachineFramePtr =
1534 STI.isTarget64BitILP32() ? Register(getX86SubSuperRegister(FramePtr, 64))
1535 : FramePtr;
1536 Register BasePtr = TRI->getBaseRegister();
1537 bool HasWinCFI = false;
1538
1539 // Debug location must be unknown since the first debug location is used
1540 // to determine the end of the prologue.
1541 DebugLoc DL;
1542 Register ArgBaseReg;
1543
1544 // Emit extra prolog for argument stack slot reference.
1545 if (auto *MI = X86FI->getStackPtrSaveMI()) {
1546 // MI is lea instruction that created in X86ArgumentStackSlotPass.
1547 // Creat extra prolog for stack realignment.
1548 ArgBaseReg = MI->getOperand(0).getReg();
1549 // leal 4(%esp), %basereg
1550 // .cfi_def_cfa %basereg, 0
1551 // andl $-128, %esp
1552 // pushl -4(%basereg)
1553 BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::LEA64r : X86::LEA32r),
1554 ArgBaseReg)
1555 .addUse(StackPtr)
1556 .addImm(1)
1557 .addUse(X86::NoRegister)
1558 .addImm(SlotSize)
1559 .addUse(X86::NoRegister)
1560 .setMIFlag(MachineInstr::FrameSetup);
1561 if (NeedsDwarfCFI) {
1562 // .cfi_def_cfa %basereg, 0
1563 unsigned DwarfStackPtr = TRI->getDwarfRegNum(ArgBaseReg, true);
1564 BuildCFI(MBB, MBBI, DL,
1565 MCCFIInstruction::cfiDefCfa(nullptr, DwarfStackPtr, 0),
1566 MachineInstr::FrameSetup);
1567 }
1568 BuildStackAlignAND(MBB, MBBI, DL, StackPtr, MaxAlign);
1569 int64_t Offset = -(int64_t)SlotSize;
1570 BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::PUSH64rmm : X86::PUSH32rmm))
1571 .addReg(ArgBaseReg)
1572 .addImm(1)
1573 .addReg(X86::NoRegister)
1574 .addImm(Offset)
1575 .addReg(X86::NoRegister)
1576 .setMIFlag(MachineInstr::FrameSetup);
1577 }
1578
1579 // Space reserved for stack-based arguments when making a (ABI-guaranteed)
1580 // tail call.
1581 unsigned TailCallArgReserveSize = -X86FI->getTCReturnAddrDelta();
1582 if (TailCallArgReserveSize && IsWin64Prologue)
1583 report_fatal_error("Can't handle guaranteed tail call under win64 yet");
1584
1585 const bool EmitStackProbeCall =
1586 STI.getTargetLowering()->hasStackProbeSymbol(MF);
1587 unsigned StackProbeSize = STI.getTargetLowering()->getStackProbeSize(MF);
1588
1589 if (HasFP && X86FI->hasSwiftAsyncContext()) {
1590 switch (MF.getTarget().Options.SwiftAsyncFramePointer) {
1591 case SwiftAsyncFramePointerMode::DeploymentBased:
1592 if (STI.swiftAsyncContextIsDynamicallySet()) {
1593 // The special symbol below is absolute and has a *value* suitable to be
1594 // combined with the frame pointer directly.
1595 BuildMI(MBB, MBBI, DL, TII.get(X86::OR64rm), MachineFramePtr)
1596 .addUse(MachineFramePtr)
1597 .addUse(X86::RIP)
1598 .addImm(1)
1599 .addUse(X86::NoRegister)
1600 .addExternalSymbol("swift_async_extendedFramePointerFlags",
1601 X86II::MO_GOTPCREL)
1602 .addUse(X86::NoRegister);
1603 break;
1604 }
1605 [[fallthrough]];
1606
1607 case SwiftAsyncFramePointerMode::Always:
1608 BuildMI(MBB, MBBI, DL, TII.get(X86::BTS64ri8), MachineFramePtr)
1609 .addUse(MachineFramePtr)
1610 .addImm(60)
1611 .setMIFlag(MachineInstr::FrameSetup);
1612 break;
1613
1614 case SwiftAsyncFramePointerMode::Never:
1615 break;
1616 }
1617 }
1618
1619 // Re-align the stack on 64-bit if the x86-interrupt calling convention is
1620 // used and an error code was pushed, since the x86-64 ABI requires a 16-byte
1621 // stack alignment.
1622 if (Fn.getCallingConv() == CallingConv::X86_INTR && Is64Bit &&
1623 Fn.arg_size() == 2) {
1624 StackSize += 8;
1625 MFI.setStackSize(StackSize);
1626
1627 // Update the stack pointer by pushing a register. This is the instruction
1628 // emitted that would be end up being emitted by a call to `emitSPUpdate`.
1629 // Hard-coding the update to a push avoids emitting a second
1630 // `STACKALLOC_W_PROBING` instruction in the save block: We know that stack
1631 // probing isn't needed anyways for an 8-byte update.
1632 // Pushing a register leaves us in a similar situation to a regular
1633 // function call where we know that the address at (rsp-8) is writeable.
1634 // That way we avoid any off-by-ones with stack probing for additional
1635 // stack pointer updates later on.
1636 BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH64r))
1637 .addReg(X86::RAX, RegState::Undef)
1638 .setMIFlag(MachineInstr::FrameSetup);
1639 }
1640
1641 // If this is x86-64 and the Red Zone is not disabled, if we are a leaf
1642 // function, and use up to 128 bytes of stack space, don't have a frame
1643 // pointer, calls, or dynamic alloca then we do not need to adjust the
1644 // stack pointer (we fit in the Red Zone). We also check that we don't
1645 // push and pop from the stack.
1646 if (has128ByteRedZone(MF) && !TRI->hasStackRealignment(MF) &&
1647 !MFI.hasVarSizedObjects() && // No dynamic alloca.
1648 !MFI.adjustsStack() && // No calls.
1649 !EmitStackProbeCall && // No stack probes.
1650 !MFI.hasCopyImplyingStackAdjustment() && // Don't push and pop.
1651 !MF.shouldSplitStack()) { // Regular stack
1652 uint64_t MinSize =
1653 X86FI->getCalleeSavedFrameSize() - X86FI->getTCReturnAddrDelta();
1654 if (HasFP)
1655 MinSize += SlotSize;
1656 X86FI->setUsesRedZone(MinSize > 0 || StackSize > 0);
1657 StackSize = std::max(MinSize, StackSize > 128 ? StackSize - 128 : 0);
1658 MFI.setStackSize(StackSize);
1659 }
1660
1661 // Insert stack pointer adjustment for later moving of return addr. Only
1662 // applies to tail call optimized functions where the callee argument stack
1663 // size is bigger than the callers.
1664 if (TailCallArgReserveSize != 0) {
1665 BuildStackAdjustment(MBB, MBBI, DL, -(int)TailCallArgReserveSize,
1666 /*InEpilogue=*/false)
1667 .setMIFlag(MachineInstr::FrameSetup);
1668 }
1669
1670 // Mapping for machine moves:
1671 //
1672 // DST: VirtualFP AND
1673 // SRC: VirtualFP => DW_CFA_def_cfa_offset
1674 // ELSE => DW_CFA_def_cfa
1675 //
1676 // SRC: VirtualFP AND
1677 // DST: Register => DW_CFA_def_cfa_register
1678 //
1679 // ELSE
1680 // OFFSET < 0 => DW_CFA_offset_extended_sf
1681 // REG < 64 => DW_CFA_offset + Reg
1682 // ELSE => DW_CFA_offset_extended
1683
1684 uint64_t NumBytes = 0;
1685 int stackGrowth = -SlotSize;
1686
1687 // Find the funclet establisher parameter
1688 Register Establisher = X86::NoRegister;
1689 if (IsClrFunclet)
1690 Establisher = Uses64BitFramePtr ? X86::RCX : X86::ECX;
1691 else if (IsFunclet)
1692 Establisher = Uses64BitFramePtr ? X86::RDX : X86::EDX;
1693
1694 if (IsWin64Prologue && IsFunclet && !IsClrFunclet) {
1695 // Immediately spill establisher into the home slot.
1696 // The runtime cares about this.
1697 // MOV64mr %rdx, 16(%rsp)
1698 unsigned MOVmr = Uses64BitFramePtr ? X86::MOV64mr : X86::MOV32mr;
1699 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(MOVmr)), StackPtr, true, 16)
1700 .addReg(Establisher)
1701 .setMIFlag(MachineInstr::FrameSetup);
1702 MBB.addLiveIn(Establisher);
1703 }
1704
1705 if (HasFP) {
1706 assert(MF.getRegInfo().isReserved(MachineFramePtr) && "FP reserved");
1707
1708 // Calculate required stack adjustment.
1709 uint64_t FrameSize = StackSize - SlotSize;
1710 NumBytes =
1711 FrameSize - (X86FI->getCalleeSavedFrameSize() + TailCallArgReserveSize);
1712
1713 // Callee-saved registers are pushed on stack before the stack is realigned.
1714 if (TRI->hasStackRealignment(MF) && !IsWin64Prologue)
1715 NumBytes = alignTo(NumBytes, MaxAlign);
1716
1717 // Save EBP/RBP into the appropriate stack slot.
1718 BuildMI(MBB, MBBI, DL,
1719 TII.get(getPUSHOpcode(MF.getSubtarget<X86Subtarget>())))
1720 .addReg(MachineFramePtr, RegState::Kill)
1721 .setMIFlag(MachineInstr::FrameSetup);
1722
1723 if (NeedsDwarfCFI && !ArgBaseReg.isValid()) {
1724 // Mark the place where EBP/RBP was saved.
1725 // Define the current CFA rule to use the provided offset.
1726 assert(StackSize);
1727 BuildCFI(MBB, MBBI, DL,
1728 MCCFIInstruction::cfiDefCfaOffset(
1729 nullptr, -2 * stackGrowth + (int)TailCallArgReserveSize),
1730 MachineInstr::FrameSetup);
1731
1732 // Change the rule for the FramePtr to be an "offset" rule.
1733 unsigned DwarfFramePtr = TRI->getDwarfRegNum(MachineFramePtr, true);
1734 BuildCFI(MBB, MBBI, DL,
1735 MCCFIInstruction::createOffset(nullptr, DwarfFramePtr,
1736 2 * stackGrowth -
1737 (int)TailCallArgReserveSize),
1738 MachineInstr::FrameSetup);
1739 }
1740
1741 if (NeedsWinCFI) {
1742 HasWinCFI = true;
1743 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_PushReg))
1744 .addImm(FramePtr)
1745 .setMIFlag(MachineInstr::FrameSetup);
1746 }
1747
1748 if (!IsFunclet) {
1749 if (X86FI->hasSwiftAsyncContext()) {
1750 const auto &Attrs = MF.getFunction().getAttributes();
1751
1752 // Before we update the live frame pointer we have to ensure there's a
1753 // valid (or null) asynchronous context in its slot just before FP in
1754 // the frame record, so store it now.
1755 if (Attrs.hasAttrSomewhere(Attribute::SwiftAsync)) {
1756 // We have an initial context in r14, store it just before the frame
1757 // pointer.
1758 MBB.addLiveIn(X86::R14);
1759 BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH64r))
1760 .addReg(X86::R14)
1761 .setMIFlag(MachineInstr::FrameSetup);
1762 } else {
1763 // No initial context, store null so that there's no pointer that
1764 // could be misused.
1765 BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH64i32))
1766 .addImm(0)
1767 .setMIFlag(MachineInstr::FrameSetup);
1768 }
1769
1770 if (NeedsWinCFI) {
1771 HasWinCFI = true;
1772 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_PushReg))
1773 .addImm(X86::R14)
1774 .setMIFlag(MachineInstr::FrameSetup);
1775 }
1776
1777 BuildMI(MBB, MBBI, DL, TII.get(X86::LEA64r), FramePtr)
1778 .addUse(X86::RSP)
1779 .addImm(1)
1780 .addUse(X86::NoRegister)
1781 .addImm(8)
1782 .addUse(X86::NoRegister)
1783 .setMIFlag(MachineInstr::FrameSetup);
1784 BuildMI(MBB, MBBI, DL, TII.get(X86::SUB64ri32), X86::RSP)
1785 .addUse(X86::RSP)
1786 .addImm(8)
1787 .setMIFlag(MachineInstr::FrameSetup);
1788 }
1789
1790 if (!IsWin64Prologue && !IsFunclet) {
1791 // Update EBP with the new base value.
1792 if (!X86FI->hasSwiftAsyncContext())
1793 BuildMI(MBB, MBBI, DL,
1794 TII.get(Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr),
1795 FramePtr)
1796 .addReg(StackPtr)
1797 .setMIFlag(MachineInstr::FrameSetup);
1798
1799 if (NeedsDwarfCFI) {
1800 if (ArgBaseReg.isValid()) {
1801 SmallString<64> CfaExpr;
1802 CfaExpr.push_back(dwarf::DW_CFA_expression);
1803 uint8_t buffer[16];
1804 unsigned DwarfReg = TRI->getDwarfRegNum(MachineFramePtr, true);
1805 CfaExpr.append(buffer, buffer + encodeULEB128(DwarfReg, buffer));
1806 CfaExpr.push_back(2);
1807 CfaExpr.push_back((uint8_t)(dwarf::DW_OP_breg0 + DwarfReg));
1808 CfaExpr.push_back(0);
1809 // DW_CFA_expression: reg5 DW_OP_breg5 +0
1810 BuildCFI(MBB, MBBI, DL,
1811 MCCFIInstruction::createEscape(nullptr, CfaExpr.str()),
1812 MachineInstr::FrameSetup);
1813 } else {
1814 // Mark effective beginning of when frame pointer becomes valid.
1815 // Define the current CFA to use the EBP/RBP register.
1816 unsigned DwarfFramePtr = TRI->getDwarfRegNum(MachineFramePtr, true);
1817 BuildCFI(
1818 MBB, MBBI, DL,
1819 MCCFIInstruction::createDefCfaRegister(nullptr, DwarfFramePtr),
1820 MachineInstr::FrameSetup);
1821 }
1822 }
1823
1824 if (NeedsWinFPO) {
1825 // .cv_fpo_setframe $FramePtr
1826 HasWinCFI = true;
1827 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SetFrame))
1828 .addImm(FramePtr)
1829 .addImm(0)
1830 .setMIFlag(MachineInstr::FrameSetup);
1831 }
1832 }
1833 }
1834 } else {
1835 assert(!IsFunclet && "funclets without FPs not yet implemented");
1836 NumBytes =
1837 StackSize - (X86FI->getCalleeSavedFrameSize() + TailCallArgReserveSize);
1838 }
1839
1840 // Update the offset adjustment, which is mainly used by codeview to translate
1841 // from ESP to VFRAME relative local variable offsets.
1842 if (!IsFunclet) {
1843 if (HasFP && TRI->hasStackRealignment(MF))
1844 MFI.setOffsetAdjustment(-NumBytes);
1845 else
1846 MFI.setOffsetAdjustment(-StackSize);
1847 }
1848
1849 // For EH funclets, only allocate enough space for outgoing calls. Save the
1850 // NumBytes value that we would've used for the parent frame.
1851 unsigned ParentFrameNumBytes = NumBytes;
1852 if (IsFunclet)
1853 NumBytes = getWinEHFuncletFrameSize(MF);
1854
1855 // Skip the callee-saved push instructions.
1856 bool PushedRegs = false;
1857 int StackOffset = 2 * stackGrowth;
1858 MachineBasicBlock::const_iterator LastCSPush = MBBI;
1859 auto IsCSPush = [&](const MachineBasicBlock::iterator &MBBI) {
1860 if (MBBI == MBB.end() || !MBBI->getFlag(MachineInstr::FrameSetup))
1861 return false;
1862 unsigned Opc = MBBI->getOpcode();
1863 return Opc == X86::PUSH32r || Opc == X86::PUSH64r || Opc == X86::PUSHP64r ||
1864 Opc == X86::PUSH2 || Opc == X86::PUSH2P;
1865 };
1866
1867 while (IsCSPush(MBBI)) {
1868 PushedRegs = true;
1869 Register Reg = MBBI->getOperand(0).getReg();
1870 LastCSPush = MBBI;
1871 ++MBBI;
1872 unsigned Opc = LastCSPush->getOpcode();
1873
1874 if (!HasFP && NeedsDwarfCFI) {
1875 // Mark callee-saved push instruction.
1876 // Define the current CFA rule to use the provided offset.
1877 assert(StackSize);
1878 // Compared to push, push2 introduces more stack offset (one more
1879 // register).
1880 if (Opc == X86::PUSH2 || Opc == X86::PUSH2P)
1881 StackOffset += stackGrowth;
1882 BuildCFI(MBB, MBBI, DL,
1883 MCCFIInstruction::cfiDefCfaOffset(nullptr, -StackOffset),
1884 MachineInstr::FrameSetup);
1885 StackOffset += stackGrowth;
1886 }
1887
1888 if (NeedsWinCFI) {
1889 HasWinCFI = true;
1890 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_PushReg))
1891 .addImm(Reg)
1892 .setMIFlag(MachineInstr::FrameSetup);
1893 if (Opc == X86::PUSH2 || Opc == X86::PUSH2P)
1894 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_PushReg))
1895 .addImm(LastCSPush->getOperand(1).getReg())
1896 .setMIFlag(MachineInstr::FrameSetup);
1897 }
1898 }
1899
1900 // Realign stack after we pushed callee-saved registers (so that we'll be
1901 // able to calculate their offsets from the frame pointer).
1902 // Don't do this for Win64, it needs to realign the stack after the prologue.
1903 if (!IsWin64Prologue && !IsFunclet && TRI->hasStackRealignment(MF) &&
1904 !ArgBaseReg.isValid()) {
1905 assert(HasFP && "There should be a frame pointer if stack is realigned.");
1906 BuildStackAlignAND(MBB, MBBI, DL, StackPtr, MaxAlign);
1907
1908 if (NeedsWinCFI) {
1909 HasWinCFI = true;
1910 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_StackAlign))
1911 .addImm(MaxAlign)
1912 .setMIFlag(MachineInstr::FrameSetup);
1913 }
1914 }
1915
1916 // If there is an SUB32ri of ESP immediately before this instruction, merge
1917 // the two. This can be the case when tail call elimination is enabled and
1918 // the callee has more arguments then the caller.
1919 NumBytes -= mergeSPUpdates(MBB, MBBI, true);
1920
1921 // Adjust stack pointer: ESP -= numbytes.
1922
1923 // Windows and cygwin/mingw require a prologue helper routine when allocating
1924 // more than 4K bytes on the stack. Windows uses __chkstk and cygwin/mingw
1925 // uses __alloca. __alloca and the 32-bit version of __chkstk will probe the
1926 // stack and adjust the stack pointer in one go. The 64-bit version of
1927 // __chkstk is only responsible for probing the stack. The 64-bit prologue is
1928 // responsible for adjusting the stack pointer. Touching the stack at 4K
1929 // increments is necessary to ensure that the guard pages used by the OS
1930 // virtual memory manager are allocated in correct sequence.
1931 uint64_t AlignedNumBytes = NumBytes;
1932 if (IsWin64Prologue && !IsFunclet && TRI->hasStackRealignment(MF))
1933 AlignedNumBytes = alignTo(AlignedNumBytes, MaxAlign);
1934 if (AlignedNumBytes >= StackProbeSize && EmitStackProbeCall) {
1935 assert(!X86FI->getUsesRedZone() &&
1936 "The Red Zone is not accounted for in stack probes");
1937
1938 // Check whether EAX is livein for this block.
1939 bool isEAXAlive = isEAXLiveIn(MBB);
1940
1941 if (isEAXAlive) {
1942 if (Is64Bit) {
1943 // Save RAX
1944 BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH64r))
1945 .addReg(X86::RAX, RegState::Kill)
1946 .setMIFlag(MachineInstr::FrameSetup);
1947 } else {
1948 // Save EAX
1949 BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH32r))
1950 .addReg(X86::EAX, RegState::Kill)
1951 .setMIFlag(MachineInstr::FrameSetup);
1952 }
1953 }
1954
1955 if (Is64Bit) {
1956 // Handle the 64-bit Windows ABI case where we need to call __chkstk.
1957 // Function prologue is responsible for adjusting the stack pointer.
1958 int64_t Alloc = isEAXAlive ? NumBytes - 8 : NumBytes;
1959 BuildMI(MBB, MBBI, DL, TII.get(getMOVriOpcode(Is64Bit, Alloc)), X86::RAX)
1960 .addImm(Alloc)
1961 .setMIFlag(MachineInstr::FrameSetup);
1962 } else {
1963 // Allocate NumBytes-4 bytes on stack in case of isEAXAlive.
1964 // We'll also use 4 already allocated bytes for EAX.
1965 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX)
1966 .addImm(isEAXAlive ? NumBytes - 4 : NumBytes)
1967 .setMIFlag(MachineInstr::FrameSetup);
1968 }
1969
1970 // Call __chkstk, __chkstk_ms, or __alloca.
1971 emitStackProbe(MF, MBB, MBBI, DL, true);
1972
1973 if (isEAXAlive) {
1974 // Restore RAX/EAX
1975 MachineInstr *MI;
1976 if (Is64Bit)
1977 MI = addRegOffset(BuildMI(MF, DL, TII.get(X86::MOV64rm), X86::RAX),
1978 StackPtr, false, NumBytes - 8);
1979 else
1980 MI = addRegOffset(BuildMI(MF, DL, TII.get(X86::MOV32rm), X86::EAX),
1981 StackPtr, false, NumBytes - 4);
1982 MI->setFlag(MachineInstr::FrameSetup);
1983 MBB.insert(MBBI, MI);
1984 }
1985 } else if (NumBytes) {
1986 emitSPUpdate(MBB, MBBI, DL, -(int64_t)NumBytes, /*InEpilogue=*/false);
1987 }
1988
1989 if (NeedsWinCFI && NumBytes) {
1990 HasWinCFI = true;
1991 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_StackAlloc))
1992 .addImm(NumBytes)
1993 .setMIFlag(MachineInstr::FrameSetup);
1994 }
1995
1996 int SEHFrameOffset = 0;
1997 unsigned SPOrEstablisher;
1998 if (IsFunclet) {
1999 if (IsClrFunclet) {
2000 // The establisher parameter passed to a CLR funclet is actually a pointer
2001 // to the (mostly empty) frame of its nearest enclosing funclet; we have
2002 // to find the root function establisher frame by loading the PSPSym from
2003 // the intermediate frame.
2004 unsigned PSPSlotOffset = getPSPSlotOffsetFromSP(MF);
2005 MachinePointerInfo NoInfo;
2006 MBB.addLiveIn(Establisher);
2007 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64rm), Establisher),
2008 Establisher, false, PSPSlotOffset)
2009 .addMemOperand(MF.getMachineMemOperand(
2010 NoInfo, MachineMemOperand::MOLoad, SlotSize, Align(SlotSize)));
2011 ;
2012 // Save the root establisher back into the current funclet's (mostly
2013 // empty) frame, in case a sub-funclet or the GC needs it.
2014 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64mr)), StackPtr,
2015 false, PSPSlotOffset)
2016 .addReg(Establisher)
2017 .addMemOperand(MF.getMachineMemOperand(
2018 NoInfo,
2019 MachineMemOperand::MOStore | MachineMemOperand::MOVolatile,
2020 SlotSize, Align(SlotSize)));
2021 }
2022 SPOrEstablisher = Establisher;
2023 } else {
2024 SPOrEstablisher = StackPtr;
2025 }
2026
2027 if (IsWin64Prologue && HasFP) {
2028 // Set RBP to a small fixed offset from RSP. In the funclet case, we base
2029 // this calculation on the incoming establisher, which holds the value of
2030 // RSP from the parent frame at the end of the prologue.
2031 SEHFrameOffset = calculateSetFPREG(ParentFrameNumBytes);
2032 if (SEHFrameOffset)
2033 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::LEA64r), FramePtr),
2034 SPOrEstablisher, false, SEHFrameOffset);
2035 else
2036 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64rr), FramePtr)
2037 .addReg(SPOrEstablisher);
2038
2039 // If this is not a funclet, emit the CFI describing our frame pointer.
2040 if (NeedsWinCFI && !IsFunclet) {
2041 assert(!NeedsWinFPO && "this setframe incompatible with FPO data");
2042 HasWinCFI = true;
2043 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SetFrame))
2044 .addImm(FramePtr)
2045 .addImm(SEHFrameOffset)
2046 .setMIFlag(MachineInstr::FrameSetup);
2047 if (isAsynchronousEHPersonality(Personality))
2048 MF.getWinEHFuncInfo()->SEHSetFrameOffset = SEHFrameOffset;
2049 }
2050 } else if (IsFunclet && STI.is32Bit()) {
2051 // Reset EBP / ESI to something good for funclets.
2052 MBBI = restoreWin32EHStackPointers(MBB, MBBI, DL);
2053 // If we're a catch funclet, we can be returned to via catchret. Save ESP
2054 // into the registration node so that the runtime will restore it for us.
2055 if (!MBB.isCleanupFuncletEntry()) {
2056 assert(Personality == EHPersonality::MSVC_CXX);
2057 Register FrameReg;
2058 int FI = MF.getWinEHFuncInfo()->EHRegNodeFrameIndex;
2059 int64_t EHRegOffset = getFrameIndexReference(MF, FI, FrameReg).getFixed();
2060 // ESP is the first field, so no extra displacement is needed.
2061 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32mr)), FrameReg,
2062 false, EHRegOffset)
2063 .addReg(X86::ESP);
2064 }
2065 }
2066
2067 while (MBBI != MBB.end() && MBBI->getFlag(MachineInstr::FrameSetup)) {
2068 const MachineInstr &FrameInstr = *MBBI;
2069 ++MBBI;
2070
2071 if (NeedsWinCFI) {
2072 int FI;
2073 if (unsigned Reg = TII.isStoreToStackSlot(FrameInstr, FI)) {
2074 if (X86::FR64RegClass.contains(Reg)) {
2075 int Offset;
2076 Register IgnoredFrameReg;
2077 if (IsWin64Prologue && IsFunclet)
2078 Offset = getWin64EHFrameIndexRef(MF, FI, IgnoredFrameReg);
2079 else
2080 Offset =
2081 getFrameIndexReference(MF, FI, IgnoredFrameReg).getFixed() +
2082 SEHFrameOffset;
2083
2084 HasWinCFI = true;
2085 assert(!NeedsWinFPO && "SEH_SaveXMM incompatible with FPO data");
2086 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_SaveXMM))
2087 .addImm(Reg)
2088 .addImm(Offset)
2089 .setMIFlag(MachineInstr::FrameSetup);
2090 }
2091 }
2092 }
2093 }
2094
2095 if (NeedsWinCFI && HasWinCFI)
2096 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_EndPrologue))
2097 .setMIFlag(MachineInstr::FrameSetup);
2098
2099 if (FnHasClrFunclet && !IsFunclet) {
2100 // Save the so-called Initial-SP (i.e. the value of the stack pointer
2101 // immediately after the prolog) into the PSPSlot so that funclets
2102 // and the GC can recover it.
2103 unsigned PSPSlotOffset = getPSPSlotOffsetFromSP(MF);
2104 auto PSPInfo = MachinePointerInfo::getFixedStack(
2105 MF, MF.getWinEHFuncInfo()->PSPSymFrameIdx);
2106 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64mr)), StackPtr, false,
2107 PSPSlotOffset)
2108 .addReg(StackPtr)
2109 .addMemOperand(MF.getMachineMemOperand(
2110 PSPInfo, MachineMemOperand::MOStore | MachineMemOperand::MOVolatile,
2111 SlotSize, Align(SlotSize)));
2112 }
2113
2114 // Realign stack after we spilled callee-saved registers (so that we'll be
2115 // able to calculate their offsets from the frame pointer).
2116 // Win64 requires aligning the stack after the prologue.
2117 if (IsWin64Prologue && TRI->hasStackRealignment(MF)) {
2118 assert(HasFP && "There should be a frame pointer if stack is realigned.");
2119 BuildStackAlignAND(MBB, MBBI, DL, SPOrEstablisher, MaxAlign);
2120 }
2121
2122 // We already dealt with stack realignment and funclets above.
2123 if (IsFunclet && STI.is32Bit())
2124 return;
2125
2126 // If we need a base pointer, set it up here. It's whatever the value
2127 // of the stack pointer is at this point. Any variable size objects
2128 // will be allocated after this, so we can still use the base pointer
2129 // to reference locals.
2130 if (TRI->hasBasePointer(MF)) {
2131 // Update the base pointer with the current stack pointer.
2132 unsigned Opc = Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr;
2133 BuildMI(MBB, MBBI, DL, TII.get(Opc), BasePtr)
2134 .addReg(SPOrEstablisher)
2135 .setMIFlag(MachineInstr::FrameSetup);
2136 if (X86FI->getRestoreBasePointer()) {
2137 // Stash value of base pointer. Saving RSP instead of EBP shortens
2138 // dependence chain. Used by SjLj EH.
2139 unsigned Opm = Uses64BitFramePtr ? X86::MOV64mr : X86::MOV32mr;
2140 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opm)), FramePtr, true,
2141 X86FI->getRestoreBasePointerOffset())
2142 .addReg(SPOrEstablisher)
2143 .setMIFlag(MachineInstr::FrameSetup);
2144 }
2145
2146 if (X86FI->getHasSEHFramePtrSave() && !IsFunclet) {
2147 // Stash the value of the frame pointer relative to the base pointer for
2148 // Win32 EH. This supports Win32 EH, which does the inverse of the above:
2149 // it recovers the frame pointer from the base pointer rather than the
2150 // other way around.
2151 unsigned Opm = Uses64BitFramePtr ? X86::MOV64mr : X86::MOV32mr;
2152 Register UsedReg;
2153 int Offset =
2154 getFrameIndexReference(MF, X86FI->getSEHFramePtrSaveIndex(), UsedReg)
2155 .getFixed();
2156 assert(UsedReg == BasePtr);
2157 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opm)), UsedReg, true, Offset)
2158 .addReg(FramePtr)
2159 .setMIFlag(MachineInstr::FrameSetup);
2160 }
2161 }
2162 if (ArgBaseReg.isValid()) {
2163 // Save argument base pointer.
2164 auto *MI = X86FI->getStackPtrSaveMI();
2165 int FI = MI->getOperand(1).getIndex();
2166 unsigned MOVmr = Is64Bit ? X86::MOV64mr : X86::MOV32mr;
2167 // movl %basereg, offset(%ebp)
2168 addFrameReference(BuildMI(MBB, MBBI, DL, TII.get(MOVmr)), FI)
2169 .addReg(ArgBaseReg)
2170 .setMIFlag(MachineInstr::FrameSetup);
2171 }
2172
2173 if (((!HasFP && NumBytes) || PushedRegs) && NeedsDwarfCFI) {
2174 // Mark end of stack pointer adjustment.
2175 if (!HasFP && NumBytes) {
2176 // Define the current CFA rule to use the provided offset.
2177 assert(StackSize);
2178 BuildCFI(
2179 MBB, MBBI, DL,
2180 MCCFIInstruction::cfiDefCfaOffset(nullptr, StackSize - stackGrowth),
2181 MachineInstr::FrameSetup);
2182 }
2183
2184 // Emit DWARF info specifying the offsets of the callee-saved registers.
2185 emitCalleeSavedFrameMoves(MBB, MBBI, DL, true);
2186 }
2187
2188 // X86 Interrupt handling function cannot assume anything about the direction
2189 // flag (DF in EFLAGS register). Clear this flag by creating "cld" instruction
2190 // in each prologue of interrupt handler function.
2191 //
2192 // FIXME: Create "cld" instruction only in these cases:
2193 // 1. The interrupt handling function uses any of the "rep" instructions.
2194 // 2. Interrupt handling function calls another function.
2195 //
2196 if (Fn.getCallingConv() == CallingConv::X86_INTR)
2197 BuildMI(MBB, MBBI, DL, TII.get(X86::CLD))
2198 .setMIFlag(MachineInstr::FrameSetup);
2199
2200 // At this point we know if the function has WinCFI or not.
2201 MF.setHasWinCFI(HasWinCFI);
2202 }
2203
canUseLEAForSPInEpilogue(const MachineFunction & MF) const2204 bool X86FrameLowering::canUseLEAForSPInEpilogue(
2205 const MachineFunction &MF) const {
2206 // We can't use LEA instructions for adjusting the stack pointer if we don't
2207 // have a frame pointer in the Win64 ABI. Only ADD instructions may be used
2208 // to deallocate the stack.
2209 // This means that we can use LEA for SP in two situations:
2210 // 1. We *aren't* using the Win64 ABI which means we are free to use LEA.
2211 // 2. We *have* a frame pointer which means we are permitted to use LEA.
2212 return !MF.getTarget().getMCAsmInfo()->usesWindowsCFI() || hasFP(MF);
2213 }
2214
isFuncletReturnInstr(MachineInstr & MI)2215 static bool isFuncletReturnInstr(MachineInstr &MI) {
2216 switch (MI.getOpcode()) {
2217 case X86::CATCHRET:
2218 case X86::CLEANUPRET:
2219 return true;
2220 default:
2221 return false;
2222 }
2223 llvm_unreachable("impossible");
2224 }
2225
2226 // CLR funclets use a special "Previous Stack Pointer Symbol" slot on the
2227 // stack. It holds a pointer to the bottom of the root function frame. The
2228 // establisher frame pointer passed to a nested funclet may point to the
2229 // (mostly empty) frame of its parent funclet, but it will need to find
2230 // the frame of the root function to access locals. To facilitate this,
2231 // every funclet copies the pointer to the bottom of the root function
2232 // frame into a PSPSym slot in its own (mostly empty) stack frame. Using the
2233 // same offset for the PSPSym in the root function frame that's used in the
2234 // funclets' frames allows each funclet to dynamically accept any ancestor
2235 // frame as its establisher argument (the runtime doesn't guarantee the
2236 // immediate parent for some reason lost to history), and also allows the GC,
2237 // which uses the PSPSym for some bookkeeping, to find it in any funclet's
2238 // frame with only a single offset reported for the entire method.
2239 unsigned
getPSPSlotOffsetFromSP(const MachineFunction & MF) const2240 X86FrameLowering::getPSPSlotOffsetFromSP(const MachineFunction &MF) const {
2241 const WinEHFuncInfo &Info = *MF.getWinEHFuncInfo();
2242 Register SPReg;
2243 int Offset = getFrameIndexReferencePreferSP(MF, Info.PSPSymFrameIdx, SPReg,
2244 /*IgnoreSPUpdates*/ true)
2245 .getFixed();
2246 assert(Offset >= 0 && SPReg == TRI->getStackRegister());
2247 return static_cast<unsigned>(Offset);
2248 }
2249
2250 unsigned
getWinEHFuncletFrameSize(const MachineFunction & MF) const2251 X86FrameLowering::getWinEHFuncletFrameSize(const MachineFunction &MF) const {
2252 const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
2253 // This is the size of the pushed CSRs.
2254 unsigned CSSize = X86FI->getCalleeSavedFrameSize();
2255 // This is the size of callee saved XMMs.
2256 const auto &WinEHXMMSlotInfo = X86FI->getWinEHXMMSlotInfo();
2257 unsigned XMMSize =
2258 WinEHXMMSlotInfo.size() * TRI->getSpillSize(X86::VR128RegClass);
2259 // This is the amount of stack a funclet needs to allocate.
2260 unsigned UsedSize;
2261 EHPersonality Personality =
2262 classifyEHPersonality(MF.getFunction().getPersonalityFn());
2263 if (Personality == EHPersonality::CoreCLR) {
2264 // CLR funclets need to hold enough space to include the PSPSym, at the
2265 // same offset from the stack pointer (immediately after the prolog) as it
2266 // resides at in the main function.
2267 UsedSize = getPSPSlotOffsetFromSP(MF) + SlotSize;
2268 } else {
2269 // Other funclets just need enough stack for outgoing call arguments.
2270 UsedSize = MF.getFrameInfo().getMaxCallFrameSize();
2271 }
2272 // RBP is not included in the callee saved register block. After pushing RBP,
2273 // everything is 16 byte aligned. Everything we allocate before an outgoing
2274 // call must also be 16 byte aligned.
2275 unsigned FrameSizeMinusRBP = alignTo(CSSize + UsedSize, getStackAlign());
2276 // Subtract out the size of the callee saved registers. This is how much stack
2277 // each funclet will allocate.
2278 return FrameSizeMinusRBP + XMMSize - CSSize;
2279 }
2280
isTailCallOpcode(unsigned Opc)2281 static bool isTailCallOpcode(unsigned Opc) {
2282 return Opc == X86::TCRETURNri || Opc == X86::TCRETURNdi ||
2283 Opc == X86::TCRETURNmi || Opc == X86::TCRETURNri64 ||
2284 Opc == X86::TCRETURNdi64 || Opc == X86::TCRETURNmi64;
2285 }
2286
emitEpilogue(MachineFunction & MF,MachineBasicBlock & MBB) const2287 void X86FrameLowering::emitEpilogue(MachineFunction &MF,
2288 MachineBasicBlock &MBB) const {
2289 const MachineFrameInfo &MFI = MF.getFrameInfo();
2290 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
2291 MachineBasicBlock::iterator Terminator = MBB.getFirstTerminator();
2292 MachineBasicBlock::iterator MBBI = Terminator;
2293 DebugLoc DL;
2294 if (MBBI != MBB.end())
2295 DL = MBBI->getDebugLoc();
2296 // standard x86_64 and NaCl use 64-bit frame/stack pointers, x32 - 32-bit.
2297 const bool Is64BitILP32 = STI.isTarget64BitILP32();
2298 Register FramePtr = TRI->getFrameRegister(MF);
2299 Register MachineFramePtr =
2300 Is64BitILP32 ? Register(getX86SubSuperRegister(FramePtr, 64)) : FramePtr;
2301
2302 bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
2303 bool NeedsWin64CFI =
2304 IsWin64Prologue && MF.getFunction().needsUnwindTableEntry();
2305 bool IsFunclet = MBBI == MBB.end() ? false : isFuncletReturnInstr(*MBBI);
2306
2307 // Get the number of bytes to allocate from the FrameInfo.
2308 uint64_t StackSize = MFI.getStackSize();
2309 uint64_t MaxAlign = calculateMaxStackAlign(MF);
2310 unsigned CSSize = X86FI->getCalleeSavedFrameSize();
2311 unsigned TailCallArgReserveSize = -X86FI->getTCReturnAddrDelta();
2312 bool HasFP = hasFP(MF);
2313 uint64_t NumBytes = 0;
2314
2315 bool NeedsDwarfCFI = (!MF.getTarget().getTargetTriple().isOSDarwin() &&
2316 !MF.getTarget().getTargetTriple().isOSWindows()) &&
2317 MF.needsFrameMoves();
2318
2319 Register ArgBaseReg;
2320 if (auto *MI = X86FI->getStackPtrSaveMI()) {
2321 unsigned Opc = X86::LEA32r;
2322 Register StackReg = X86::ESP;
2323 ArgBaseReg = MI->getOperand(0).getReg();
2324 if (STI.is64Bit()) {
2325 Opc = X86::LEA64r;
2326 StackReg = X86::RSP;
2327 }
2328 // leal -4(%basereg), %esp
2329 // .cfi_def_cfa %esp, 4
2330 BuildMI(MBB, MBBI, DL, TII.get(Opc), StackReg)
2331 .addUse(ArgBaseReg)
2332 .addImm(1)
2333 .addUse(X86::NoRegister)
2334 .addImm(-(int64_t)SlotSize)
2335 .addUse(X86::NoRegister)
2336 .setMIFlag(MachineInstr::FrameDestroy);
2337 if (NeedsDwarfCFI) {
2338 unsigned DwarfStackPtr = TRI->getDwarfRegNum(StackReg, true);
2339 BuildCFI(MBB, MBBI, DL,
2340 MCCFIInstruction::cfiDefCfa(nullptr, DwarfStackPtr, SlotSize),
2341 MachineInstr::FrameDestroy);
2342 --MBBI;
2343 }
2344 --MBBI;
2345 }
2346
2347 if (IsFunclet) {
2348 assert(HasFP && "EH funclets without FP not yet implemented");
2349 NumBytes = getWinEHFuncletFrameSize(MF);
2350 } else if (HasFP) {
2351 // Calculate required stack adjustment.
2352 uint64_t FrameSize = StackSize - SlotSize;
2353 NumBytes = FrameSize - CSSize - TailCallArgReserveSize;
2354
2355 // Callee-saved registers were pushed on stack before the stack was
2356 // realigned.
2357 if (TRI->hasStackRealignment(MF) && !IsWin64Prologue)
2358 NumBytes = alignTo(FrameSize, MaxAlign);
2359 } else {
2360 NumBytes = StackSize - CSSize - TailCallArgReserveSize;
2361 }
2362 uint64_t SEHStackAllocAmt = NumBytes;
2363
2364 // AfterPop is the position to insert .cfi_restore.
2365 MachineBasicBlock::iterator AfterPop = MBBI;
2366 if (HasFP) {
2367 if (X86FI->hasSwiftAsyncContext()) {
2368 // Discard the context.
2369 int Offset = 16 + mergeSPUpdates(MBB, MBBI, true);
2370 emitSPUpdate(MBB, MBBI, DL, Offset, /*InEpilogue*/ true);
2371 }
2372 // Pop EBP.
2373 BuildMI(MBB, MBBI, DL,
2374 TII.get(getPOPOpcode(MF.getSubtarget<X86Subtarget>())),
2375 MachineFramePtr)
2376 .setMIFlag(MachineInstr::FrameDestroy);
2377
2378 // We need to reset FP to its untagged state on return. Bit 60 is currently
2379 // used to show the presence of an extended frame.
2380 if (X86FI->hasSwiftAsyncContext()) {
2381 BuildMI(MBB, MBBI, DL, TII.get(X86::BTR64ri8), MachineFramePtr)
2382 .addUse(MachineFramePtr)
2383 .addImm(60)
2384 .setMIFlag(MachineInstr::FrameDestroy);
2385 }
2386
2387 if (NeedsDwarfCFI) {
2388 if (!ArgBaseReg.isValid()) {
2389 unsigned DwarfStackPtr =
2390 TRI->getDwarfRegNum(Is64Bit ? X86::RSP : X86::ESP, true);
2391 BuildCFI(MBB, MBBI, DL,
2392 MCCFIInstruction::cfiDefCfa(nullptr, DwarfStackPtr, SlotSize),
2393 MachineInstr::FrameDestroy);
2394 }
2395 if (!MBB.succ_empty() && !MBB.isReturnBlock()) {
2396 unsigned DwarfFramePtr = TRI->getDwarfRegNum(MachineFramePtr, true);
2397 BuildCFI(MBB, AfterPop, DL,
2398 MCCFIInstruction::createRestore(nullptr, DwarfFramePtr),
2399 MachineInstr::FrameDestroy);
2400 --MBBI;
2401 --AfterPop;
2402 }
2403 --MBBI;
2404 }
2405 }
2406
2407 MachineBasicBlock::iterator FirstCSPop = MBBI;
2408 // Skip the callee-saved pop instructions.
2409 while (MBBI != MBB.begin()) {
2410 MachineBasicBlock::iterator PI = std::prev(MBBI);
2411 unsigned Opc = PI->getOpcode();
2412
2413 if (Opc != X86::DBG_VALUE && !PI->isTerminator()) {
2414 if (!PI->getFlag(MachineInstr::FrameDestroy) ||
2415 (Opc != X86::POP32r && Opc != X86::POP64r && Opc != X86::BTR64ri8 &&
2416 Opc != X86::ADD64ri32 && Opc != X86::POPP64r && Opc != X86::POP2 &&
2417 Opc != X86::POP2P && Opc != X86::LEA64r))
2418 break;
2419 FirstCSPop = PI;
2420 }
2421
2422 --MBBI;
2423 }
2424 if (ArgBaseReg.isValid()) {
2425 // Restore argument base pointer.
2426 auto *MI = X86FI->getStackPtrSaveMI();
2427 int FI = MI->getOperand(1).getIndex();
2428 unsigned MOVrm = Is64Bit ? X86::MOV64rm : X86::MOV32rm;
2429 // movl offset(%ebp), %basereg
2430 addFrameReference(BuildMI(MBB, MBBI, DL, TII.get(MOVrm), ArgBaseReg), FI)
2431 .setMIFlag(MachineInstr::FrameDestroy);
2432 }
2433 MBBI = FirstCSPop;
2434
2435 if (IsFunclet && Terminator->getOpcode() == X86::CATCHRET)
2436 emitCatchRetReturnValue(MBB, FirstCSPop, &*Terminator);
2437
2438 if (MBBI != MBB.end())
2439 DL = MBBI->getDebugLoc();
2440 // If there is an ADD32ri or SUB32ri of ESP immediately before this
2441 // instruction, merge the two instructions.
2442 if (NumBytes || MFI.hasVarSizedObjects())
2443 NumBytes += mergeSPUpdates(MBB, MBBI, true);
2444
2445 // If dynamic alloca is used, then reset esp to point to the last callee-saved
2446 // slot before popping them off! Same applies for the case, when stack was
2447 // realigned. Don't do this if this was a funclet epilogue, since the funclets
2448 // will not do realignment or dynamic stack allocation.
2449 if (((TRI->hasStackRealignment(MF)) || MFI.hasVarSizedObjects()) &&
2450 !IsFunclet) {
2451 if (TRI->hasStackRealignment(MF))
2452 MBBI = FirstCSPop;
2453 unsigned SEHFrameOffset = calculateSetFPREG(SEHStackAllocAmt);
2454 uint64_t LEAAmount =
2455 IsWin64Prologue ? SEHStackAllocAmt - SEHFrameOffset : -CSSize;
2456
2457 if (X86FI->hasSwiftAsyncContext())
2458 LEAAmount -= 16;
2459
2460 // There are only two legal forms of epilogue:
2461 // - add SEHAllocationSize, %rsp
2462 // - lea SEHAllocationSize(%FramePtr), %rsp
2463 //
2464 // 'mov %FramePtr, %rsp' will not be recognized as an epilogue sequence.
2465 // However, we may use this sequence if we have a frame pointer because the
2466 // effects of the prologue can safely be undone.
2467 if (LEAAmount != 0) {
2468 unsigned Opc = getLEArOpcode(Uses64BitFramePtr);
2469 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr), FramePtr,
2470 false, LEAAmount);
2471 --MBBI;
2472 } else {
2473 unsigned Opc = (Uses64BitFramePtr ? X86::MOV64rr : X86::MOV32rr);
2474 BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr).addReg(FramePtr);
2475 --MBBI;
2476 }
2477 } else if (NumBytes) {
2478 // Adjust stack pointer back: ESP += numbytes.
2479 emitSPUpdate(MBB, MBBI, DL, NumBytes, /*InEpilogue=*/true);
2480 if (!HasFP && NeedsDwarfCFI) {
2481 // Define the current CFA rule to use the provided offset.
2482 BuildCFI(MBB, MBBI, DL,
2483 MCCFIInstruction::cfiDefCfaOffset(
2484 nullptr, CSSize + TailCallArgReserveSize + SlotSize),
2485 MachineInstr::FrameDestroy);
2486 }
2487 --MBBI;
2488 }
2489
2490 // Windows unwinder will not invoke function's exception handler if IP is
2491 // either in prologue or in epilogue. This behavior causes a problem when a
2492 // call immediately precedes an epilogue, because the return address points
2493 // into the epilogue. To cope with that, we insert an epilogue marker here,
2494 // then replace it with a 'nop' if it ends up immediately after a CALL in the
2495 // final emitted code.
2496 if (NeedsWin64CFI && MF.hasWinCFI())
2497 BuildMI(MBB, MBBI, DL, TII.get(X86::SEH_Epilogue));
2498
2499 if (!HasFP && NeedsDwarfCFI) {
2500 MBBI = FirstCSPop;
2501 int64_t Offset = -CSSize - SlotSize;
2502 // Mark callee-saved pop instruction.
2503 // Define the current CFA rule to use the provided offset.
2504 while (MBBI != MBB.end()) {
2505 MachineBasicBlock::iterator PI = MBBI;
2506 unsigned Opc = PI->getOpcode();
2507 ++MBBI;
2508 if (Opc == X86::POP32r || Opc == X86::POP64r || Opc == X86::POPP64r ||
2509 Opc == X86::POP2 || Opc == X86::POP2P) {
2510 Offset += SlotSize;
2511 // Compared to pop, pop2 introduces more stack offset (one more
2512 // register).
2513 if (Opc == X86::POP2 || Opc == X86::POP2P)
2514 Offset += SlotSize;
2515 BuildCFI(MBB, MBBI, DL,
2516 MCCFIInstruction::cfiDefCfaOffset(nullptr, -Offset),
2517 MachineInstr::FrameDestroy);
2518 }
2519 }
2520 }
2521
2522 // Emit DWARF info specifying the restores of the callee-saved registers.
2523 // For epilogue with return inside or being other block without successor,
2524 // no need to generate .cfi_restore for callee-saved registers.
2525 if (NeedsDwarfCFI && !MBB.succ_empty())
2526 emitCalleeSavedFrameMoves(MBB, AfterPop, DL, false);
2527
2528 if (Terminator == MBB.end() || !isTailCallOpcode(Terminator->getOpcode())) {
2529 // Add the return addr area delta back since we are not tail calling.
2530 int Offset = -1 * X86FI->getTCReturnAddrDelta();
2531 assert(Offset >= 0 && "TCDelta should never be positive");
2532 if (Offset) {
2533 // Check for possible merge with preceding ADD instruction.
2534 Offset += mergeSPUpdates(MBB, Terminator, true);
2535 emitSPUpdate(MBB, Terminator, DL, Offset, /*InEpilogue=*/true);
2536 }
2537 }
2538
2539 // Emit tilerelease for AMX kernel.
2540 if (X86FI->hasVirtualTileReg())
2541 BuildMI(MBB, Terminator, DL, TII.get(X86::TILERELEASE));
2542 }
2543
getFrameIndexReference(const MachineFunction & MF,int FI,Register & FrameReg) const2544 StackOffset X86FrameLowering::getFrameIndexReference(const MachineFunction &MF,
2545 int FI,
2546 Register &FrameReg) const {
2547 const MachineFrameInfo &MFI = MF.getFrameInfo();
2548
2549 bool IsFixed = MFI.isFixedObjectIndex(FI);
2550 // We can't calculate offset from frame pointer if the stack is realigned,
2551 // so enforce usage of stack/base pointer. The base pointer is used when we
2552 // have dynamic allocas in addition to dynamic realignment.
2553 if (TRI->hasBasePointer(MF))
2554 FrameReg = IsFixed ? TRI->getFramePtr() : TRI->getBaseRegister();
2555 else if (TRI->hasStackRealignment(MF))
2556 FrameReg = IsFixed ? TRI->getFramePtr() : TRI->getStackRegister();
2557 else
2558 FrameReg = TRI->getFrameRegister(MF);
2559
2560 // Offset will hold the offset from the stack pointer at function entry to the
2561 // object.
2562 // We need to factor in additional offsets applied during the prologue to the
2563 // frame, base, and stack pointer depending on which is used.
2564 int Offset = MFI.getObjectOffset(FI) - getOffsetOfLocalArea();
2565 const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
2566 unsigned CSSize = X86FI->getCalleeSavedFrameSize();
2567 uint64_t StackSize = MFI.getStackSize();
2568 bool IsWin64Prologue = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
2569 int64_t FPDelta = 0;
2570
2571 // In an x86 interrupt, remove the offset we added to account for the return
2572 // address from any stack object allocated in the caller's frame. Interrupts
2573 // do not have a standard return address. Fixed objects in the current frame,
2574 // such as SSE register spills, should not get this treatment.
2575 if (MF.getFunction().getCallingConv() == CallingConv::X86_INTR &&
2576 Offset >= 0) {
2577 Offset += getOffsetOfLocalArea();
2578 }
2579
2580 if (IsWin64Prologue) {
2581 assert(!MFI.hasCalls() || (StackSize % 16) == 8);
2582
2583 // Calculate required stack adjustment.
2584 uint64_t FrameSize = StackSize - SlotSize;
2585 // If required, include space for extra hidden slot for stashing base
2586 // pointer.
2587 if (X86FI->getRestoreBasePointer())
2588 FrameSize += SlotSize;
2589 uint64_t NumBytes = FrameSize - CSSize;
2590
2591 uint64_t SEHFrameOffset = calculateSetFPREG(NumBytes);
2592 if (FI && FI == X86FI->getFAIndex())
2593 return StackOffset::getFixed(-SEHFrameOffset);
2594
2595 // FPDelta is the offset from the "traditional" FP location of the old base
2596 // pointer followed by return address and the location required by the
2597 // restricted Win64 prologue.
2598 // Add FPDelta to all offsets below that go through the frame pointer.
2599 FPDelta = FrameSize - SEHFrameOffset;
2600 assert((!MFI.hasCalls() || (FPDelta % 16) == 0) &&
2601 "FPDelta isn't aligned per the Win64 ABI!");
2602 }
2603
2604 if (FrameReg == TRI->getFramePtr()) {
2605 // Skip saved EBP/RBP
2606 Offset += SlotSize;
2607
2608 // Account for restricted Windows prologue.
2609 Offset += FPDelta;
2610
2611 // Skip the RETADDR move area
2612 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
2613 if (TailCallReturnAddrDelta < 0)
2614 Offset -= TailCallReturnAddrDelta;
2615
2616 return StackOffset::getFixed(Offset);
2617 }
2618
2619 // FrameReg is either the stack pointer or a base pointer. But the base is
2620 // located at the end of the statically known StackSize so the distinction
2621 // doesn't really matter.
2622 if (TRI->hasStackRealignment(MF) || TRI->hasBasePointer(MF))
2623 assert(isAligned(MFI.getObjectAlign(FI), -(Offset + StackSize)));
2624 return StackOffset::getFixed(Offset + StackSize);
2625 }
2626
getWin64EHFrameIndexRef(const MachineFunction & MF,int FI,Register & FrameReg) const2627 int X86FrameLowering::getWin64EHFrameIndexRef(const MachineFunction &MF, int FI,
2628 Register &FrameReg) const {
2629 const MachineFrameInfo &MFI = MF.getFrameInfo();
2630 const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
2631 const auto &WinEHXMMSlotInfo = X86FI->getWinEHXMMSlotInfo();
2632 const auto it = WinEHXMMSlotInfo.find(FI);
2633
2634 if (it == WinEHXMMSlotInfo.end())
2635 return getFrameIndexReference(MF, FI, FrameReg).getFixed();
2636
2637 FrameReg = TRI->getStackRegister();
2638 return alignDown(MFI.getMaxCallFrameSize(), getStackAlign().value()) +
2639 it->second;
2640 }
2641
2642 StackOffset
getFrameIndexReferenceSP(const MachineFunction & MF,int FI,Register & FrameReg,int Adjustment) const2643 X86FrameLowering::getFrameIndexReferenceSP(const MachineFunction &MF, int FI,
2644 Register &FrameReg,
2645 int Adjustment) const {
2646 const MachineFrameInfo &MFI = MF.getFrameInfo();
2647 FrameReg = TRI->getStackRegister();
2648 return StackOffset::getFixed(MFI.getObjectOffset(FI) -
2649 getOffsetOfLocalArea() + Adjustment);
2650 }
2651
2652 StackOffset
getFrameIndexReferencePreferSP(const MachineFunction & MF,int FI,Register & FrameReg,bool IgnoreSPUpdates) const2653 X86FrameLowering::getFrameIndexReferencePreferSP(const MachineFunction &MF,
2654 int FI, Register &FrameReg,
2655 bool IgnoreSPUpdates) const {
2656
2657 const MachineFrameInfo &MFI = MF.getFrameInfo();
2658 // Does not include any dynamic realign.
2659 const uint64_t StackSize = MFI.getStackSize();
2660 // LLVM arranges the stack as follows:
2661 // ...
2662 // ARG2
2663 // ARG1
2664 // RETADDR
2665 // PUSH RBP <-- RBP points here
2666 // PUSH CSRs
2667 // ~~~~~~~ <-- possible stack realignment (non-win64)
2668 // ...
2669 // STACK OBJECTS
2670 // ... <-- RSP after prologue points here
2671 // ~~~~~~~ <-- possible stack realignment (win64)
2672 //
2673 // if (hasVarSizedObjects()):
2674 // ... <-- "base pointer" (ESI/RBX) points here
2675 // DYNAMIC ALLOCAS
2676 // ... <-- RSP points here
2677 //
2678 // Case 1: In the simple case of no stack realignment and no dynamic
2679 // allocas, both "fixed" stack objects (arguments and CSRs) are addressable
2680 // with fixed offsets from RSP.
2681 //
2682 // Case 2: In the case of stack realignment with no dynamic allocas, fixed
2683 // stack objects are addressed with RBP and regular stack objects with RSP.
2684 //
2685 // Case 3: In the case of dynamic allocas and stack realignment, RSP is used
2686 // to address stack arguments for outgoing calls and nothing else. The "base
2687 // pointer" points to local variables, and RBP points to fixed objects.
2688 //
2689 // In cases 2 and 3, we can only answer for non-fixed stack objects, and the
2690 // answer we give is relative to the SP after the prologue, and not the
2691 // SP in the middle of the function.
2692
2693 if (MFI.isFixedObjectIndex(FI) && TRI->hasStackRealignment(MF) &&
2694 !STI.isTargetWin64())
2695 return getFrameIndexReference(MF, FI, FrameReg);
2696
2697 // If !hasReservedCallFrame the function might have SP adjustement in the
2698 // body. So, even though the offset is statically known, it depends on where
2699 // we are in the function.
2700 if (!IgnoreSPUpdates && !hasReservedCallFrame(MF))
2701 return getFrameIndexReference(MF, FI, FrameReg);
2702
2703 // We don't handle tail calls, and shouldn't be seeing them either.
2704 assert(MF.getInfo<X86MachineFunctionInfo>()->getTCReturnAddrDelta() >= 0 &&
2705 "we don't handle this case!");
2706
2707 // This is how the math works out:
2708 //
2709 // %rsp grows (i.e. gets lower) left to right. Each box below is
2710 // one word (eight bytes). Obj0 is the stack slot we're trying to
2711 // get to.
2712 //
2713 // ----------------------------------
2714 // | BP | Obj0 | Obj1 | ... | ObjN |
2715 // ----------------------------------
2716 // ^ ^ ^ ^
2717 // A B C E
2718 //
2719 // A is the incoming stack pointer.
2720 // (B - A) is the local area offset (-8 for x86-64) [1]
2721 // (C - A) is the Offset returned by MFI.getObjectOffset for Obj0 [2]
2722 //
2723 // |(E - B)| is the StackSize (absolute value, positive). For a
2724 // stack that grown down, this works out to be (B - E). [3]
2725 //
2726 // E is also the value of %rsp after stack has been set up, and we
2727 // want (C - E) -- the value we can add to %rsp to get to Obj0. Now
2728 // (C - E) == (C - A) - (B - A) + (B - E)
2729 // { Using [1], [2] and [3] above }
2730 // == getObjectOffset - LocalAreaOffset + StackSize
2731
2732 return getFrameIndexReferenceSP(MF, FI, FrameReg, StackSize);
2733 }
2734
assignCalleeSavedSpillSlots(MachineFunction & MF,const TargetRegisterInfo * TRI,std::vector<CalleeSavedInfo> & CSI) const2735 bool X86FrameLowering::assignCalleeSavedSpillSlots(
2736 MachineFunction &MF, const TargetRegisterInfo *TRI,
2737 std::vector<CalleeSavedInfo> &CSI) const {
2738 MachineFrameInfo &MFI = MF.getFrameInfo();
2739 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
2740
2741 unsigned CalleeSavedFrameSize = 0;
2742 unsigned XMMCalleeSavedFrameSize = 0;
2743 auto &WinEHXMMSlotInfo = X86FI->getWinEHXMMSlotInfo();
2744 int SpillSlotOffset = getOffsetOfLocalArea() + X86FI->getTCReturnAddrDelta();
2745
2746 int64_t TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
2747
2748 if (TailCallReturnAddrDelta < 0) {
2749 // create RETURNADDR area
2750 // arg
2751 // arg
2752 // RETADDR
2753 // { ...
2754 // RETADDR area
2755 // ...
2756 // }
2757 // [EBP]
2758 MFI.CreateFixedObject(-TailCallReturnAddrDelta,
2759 TailCallReturnAddrDelta - SlotSize, true);
2760 }
2761
2762 // Spill the BasePtr if it's used.
2763 if (this->TRI->hasBasePointer(MF)) {
2764 // Allocate a spill slot for EBP if we have a base pointer and EH funclets.
2765 if (MF.hasEHFunclets()) {
2766 int FI = MFI.CreateSpillStackObject(SlotSize, Align(SlotSize));
2767 X86FI->setHasSEHFramePtrSave(true);
2768 X86FI->setSEHFramePtrSaveIndex(FI);
2769 }
2770 }
2771
2772 if (hasFP(MF)) {
2773 // emitPrologue always spills frame register the first thing.
2774 SpillSlotOffset -= SlotSize;
2775 MFI.CreateFixedSpillStackObject(SlotSize, SpillSlotOffset);
2776
2777 // The async context lives directly before the frame pointer, and we
2778 // allocate a second slot to preserve stack alignment.
2779 if (X86FI->hasSwiftAsyncContext()) {
2780 SpillSlotOffset -= SlotSize;
2781 MFI.CreateFixedSpillStackObject(SlotSize, SpillSlotOffset);
2782 SpillSlotOffset -= SlotSize;
2783 }
2784
2785 // Since emitPrologue and emitEpilogue will handle spilling and restoring of
2786 // the frame register, we can delete it from CSI list and not have to worry
2787 // about avoiding it later.
2788 Register FPReg = TRI->getFrameRegister(MF);
2789 for (unsigned i = 0; i < CSI.size(); ++i) {
2790 if (TRI->regsOverlap(CSI[i].getReg(), FPReg)) {
2791 CSI.erase(CSI.begin() + i);
2792 break;
2793 }
2794 }
2795 }
2796
2797 // Strategy:
2798 // 1. Use push2 when
2799 // a) number of CSR > 1 if no need padding
2800 // b) number of CSR > 2 if need padding
2801 // 2. When the number of CSR push is odd
2802 // a. Start to use push2 from the 1st push if stack is 16B aligned.
2803 // b. Start to use push2 from the 2nd push if stack is not 16B aligned.
2804 // 3. When the number of CSR push is even, start to use push2 from the 1st
2805 // push and make the stack 16B aligned before the push
2806 unsigned NumRegsForPush2 = 0;
2807 if (STI.hasPush2Pop2()) {
2808 unsigned NumCSGPR = llvm::count_if(CSI, [](const CalleeSavedInfo &I) {
2809 return X86::GR64RegClass.contains(I.getReg());
2810 });
2811 bool NeedPadding = (SpillSlotOffset % 16 != 0) && (NumCSGPR % 2 == 0);
2812 bool UsePush2Pop2 = NeedPadding ? NumCSGPR > 2 : NumCSGPR > 1;
2813 X86FI->setPadForPush2Pop2(NeedPadding && UsePush2Pop2);
2814 NumRegsForPush2 = UsePush2Pop2 ? alignDown(NumCSGPR, 2) : 0;
2815 if (X86FI->padForPush2Pop2()) {
2816 SpillSlotOffset -= SlotSize;
2817 MFI.CreateFixedSpillStackObject(SlotSize, SpillSlotOffset);
2818 }
2819 }
2820
2821 // Assign slots for GPRs. It increases frame size.
2822 for (CalleeSavedInfo &I : llvm::reverse(CSI)) {
2823 Register Reg = I.getReg();
2824
2825 if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg))
2826 continue;
2827
2828 // A CSR is a candidate for push2/pop2 when it's slot offset is 16B aligned
2829 // or only an odd number of registers in the candidates.
2830 if (X86FI->getNumCandidatesForPush2Pop2() < NumRegsForPush2 &&
2831 (SpillSlotOffset % 16 == 0 ||
2832 X86FI->getNumCandidatesForPush2Pop2() % 2))
2833 X86FI->addCandidateForPush2Pop2(Reg);
2834
2835 SpillSlotOffset -= SlotSize;
2836 CalleeSavedFrameSize += SlotSize;
2837
2838 int SlotIndex = MFI.CreateFixedSpillStackObject(SlotSize, SpillSlotOffset);
2839 I.setFrameIdx(SlotIndex);
2840 }
2841
2842 // Adjust the offset of spill slot as we know the accurate callee saved frame
2843 // size.
2844 if (X86FI->getRestoreBasePointer()) {
2845 SpillSlotOffset -= SlotSize;
2846 CalleeSavedFrameSize += SlotSize;
2847
2848 MFI.CreateFixedSpillStackObject(SlotSize, SpillSlotOffset);
2849 // TODO: saving the slot index is better?
2850 X86FI->setRestoreBasePointer(CalleeSavedFrameSize);
2851 }
2852 assert(X86FI->getNumCandidatesForPush2Pop2() % 2 == 0 &&
2853 "Expect even candidates for push2/pop2");
2854 if (X86FI->getNumCandidatesForPush2Pop2())
2855 ++NumFunctionUsingPush2Pop2;
2856 X86FI->setCalleeSavedFrameSize(CalleeSavedFrameSize);
2857 MFI.setCVBytesOfCalleeSavedRegisters(CalleeSavedFrameSize);
2858
2859 // Assign slots for XMMs.
2860 for (CalleeSavedInfo &I : llvm::reverse(CSI)) {
2861 Register Reg = I.getReg();
2862 if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg))
2863 continue;
2864
2865 // If this is k-register make sure we lookup via the largest legal type.
2866 MVT VT = MVT::Other;
2867 if (X86::VK16RegClass.contains(Reg))
2868 VT = STI.hasBWI() ? MVT::v64i1 : MVT::v16i1;
2869
2870 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT);
2871 unsigned Size = TRI->getSpillSize(*RC);
2872 Align Alignment = TRI->getSpillAlign(*RC);
2873 // ensure alignment
2874 assert(SpillSlotOffset < 0 && "SpillSlotOffset should always < 0 on X86");
2875 SpillSlotOffset = -alignTo(-SpillSlotOffset, Alignment);
2876
2877 // spill into slot
2878 SpillSlotOffset -= Size;
2879 int SlotIndex = MFI.CreateFixedSpillStackObject(Size, SpillSlotOffset);
2880 I.setFrameIdx(SlotIndex);
2881 MFI.ensureMaxAlignment(Alignment);
2882
2883 // Save the start offset and size of XMM in stack frame for funclets.
2884 if (X86::VR128RegClass.contains(Reg)) {
2885 WinEHXMMSlotInfo[SlotIndex] = XMMCalleeSavedFrameSize;
2886 XMMCalleeSavedFrameSize += Size;
2887 }
2888 }
2889
2890 return true;
2891 }
2892
spillCalleeSavedRegisters(MachineBasicBlock & MBB,MachineBasicBlock::iterator MI,ArrayRef<CalleeSavedInfo> CSI,const TargetRegisterInfo * TRI) const2893 bool X86FrameLowering::spillCalleeSavedRegisters(
2894 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
2895 ArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const {
2896 DebugLoc DL = MBB.findDebugLoc(MI);
2897
2898 // Don't save CSRs in 32-bit EH funclets. The caller saves EBX, EBP, ESI, EDI
2899 // for us, and there are no XMM CSRs on Win32.
2900 if (MBB.isEHFuncletEntry() && STI.is32Bit() && STI.isOSWindows())
2901 return true;
2902
2903 // Push GPRs. It increases frame size.
2904 const MachineFunction &MF = *MBB.getParent();
2905 const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
2906 if (X86FI->padForPush2Pop2())
2907 emitSPUpdate(MBB, MI, DL, -(int64_t)SlotSize, /*InEpilogue=*/false);
2908
2909 // Update LiveIn of the basic block and decide whether we can add a kill flag
2910 // to the use.
2911 auto UpdateLiveInCheckCanKill = [&](Register Reg) {
2912 const MachineRegisterInfo &MRI = MF.getRegInfo();
2913 // Do not set a kill flag on values that are also marked as live-in. This
2914 // happens with the @llvm-returnaddress intrinsic and with arguments
2915 // passed in callee saved registers.
2916 // Omitting the kill flags is conservatively correct even if the live-in
2917 // is not used after all.
2918 if (MRI.isLiveIn(Reg))
2919 return false;
2920 MBB.addLiveIn(Reg);
2921 // Check if any subregister is live-in
2922 for (MCRegAliasIterator AReg(Reg, TRI, false); AReg.isValid(); ++AReg)
2923 if (MRI.isLiveIn(*AReg))
2924 return false;
2925 return true;
2926 };
2927 auto UpdateLiveInGetKillRegState = [&](Register Reg) {
2928 return getKillRegState(UpdateLiveInCheckCanKill(Reg));
2929 };
2930
2931 for (auto RI = CSI.rbegin(), RE = CSI.rend(); RI != RE; ++RI) {
2932 Register Reg = RI->getReg();
2933 if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg))
2934 continue;
2935
2936 if (X86FI->isCandidateForPush2Pop2(Reg)) {
2937 Register Reg2 = (++RI)->getReg();
2938 BuildMI(MBB, MI, DL, TII.get(getPUSH2Opcode(STI)))
2939 .addReg(Reg, UpdateLiveInGetKillRegState(Reg))
2940 .addReg(Reg2, UpdateLiveInGetKillRegState(Reg2))
2941 .setMIFlag(MachineInstr::FrameSetup);
2942 } else {
2943 BuildMI(MBB, MI, DL, TII.get(getPUSHOpcode(STI)))
2944 .addReg(Reg, UpdateLiveInGetKillRegState(Reg))
2945 .setMIFlag(MachineInstr::FrameSetup);
2946 }
2947 }
2948
2949 if (X86FI->getRestoreBasePointer()) {
2950 unsigned Opc = STI.is64Bit() ? X86::PUSH64r : X86::PUSH32r;
2951 Register BaseReg = this->TRI->getBaseRegister();
2952 BuildMI(MBB, MI, DL, TII.get(Opc))
2953 .addReg(BaseReg, getKillRegState(true))
2954 .setMIFlag(MachineInstr::FrameSetup);
2955 }
2956
2957 // Make XMM regs spilled. X86 does not have ability of push/pop XMM.
2958 // It can be done by spilling XMMs to stack frame.
2959 for (const CalleeSavedInfo &I : llvm::reverse(CSI)) {
2960 Register Reg = I.getReg();
2961 if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg))
2962 continue;
2963
2964 // If this is k-register make sure we lookup via the largest legal type.
2965 MVT VT = MVT::Other;
2966 if (X86::VK16RegClass.contains(Reg))
2967 VT = STI.hasBWI() ? MVT::v64i1 : MVT::v16i1;
2968
2969 // Add the callee-saved register as live-in. It's killed at the spill.
2970 MBB.addLiveIn(Reg);
2971 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT);
2972
2973 TII.storeRegToStackSlot(MBB, MI, Reg, true, I.getFrameIdx(), RC, TRI,
2974 Register());
2975 --MI;
2976 MI->setFlag(MachineInstr::FrameSetup);
2977 ++MI;
2978 }
2979
2980 return true;
2981 }
2982
emitCatchRetReturnValue(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,MachineInstr * CatchRet) const2983 void X86FrameLowering::emitCatchRetReturnValue(MachineBasicBlock &MBB,
2984 MachineBasicBlock::iterator MBBI,
2985 MachineInstr *CatchRet) const {
2986 // SEH shouldn't use catchret.
2987 assert(!isAsynchronousEHPersonality(classifyEHPersonality(
2988 MBB.getParent()->getFunction().getPersonalityFn())) &&
2989 "SEH should not use CATCHRET");
2990 const DebugLoc &DL = CatchRet->getDebugLoc();
2991 MachineBasicBlock *CatchRetTarget = CatchRet->getOperand(0).getMBB();
2992
2993 // Fill EAX/RAX with the address of the target block.
2994 if (STI.is64Bit()) {
2995 // LEA64r CatchRetTarget(%rip), %rax
2996 BuildMI(MBB, MBBI, DL, TII.get(X86::LEA64r), X86::RAX)
2997 .addReg(X86::RIP)
2998 .addImm(0)
2999 .addReg(0)
3000 .addMBB(CatchRetTarget)
3001 .addReg(0);
3002 } else {
3003 // MOV32ri $CatchRetTarget, %eax
3004 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX)
3005 .addMBB(CatchRetTarget);
3006 }
3007
3008 // Record that we've taken the address of CatchRetTarget and no longer just
3009 // reference it in a terminator.
3010 CatchRetTarget->setMachineBlockAddressTaken();
3011 }
3012
restoreCalleeSavedRegisters(MachineBasicBlock & MBB,MachineBasicBlock::iterator MI,MutableArrayRef<CalleeSavedInfo> CSI,const TargetRegisterInfo * TRI) const3013 bool X86FrameLowering::restoreCalleeSavedRegisters(
3014 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
3015 MutableArrayRef<CalleeSavedInfo> CSI, const TargetRegisterInfo *TRI) const {
3016 if (CSI.empty())
3017 return false;
3018
3019 if (MI != MBB.end() && isFuncletReturnInstr(*MI) && STI.isOSWindows()) {
3020 // Don't restore CSRs in 32-bit EH funclets. Matches
3021 // spillCalleeSavedRegisters.
3022 if (STI.is32Bit())
3023 return true;
3024 // Don't restore CSRs before an SEH catchret. SEH except blocks do not form
3025 // funclets. emitEpilogue transforms these to normal jumps.
3026 if (MI->getOpcode() == X86::CATCHRET) {
3027 const Function &F = MBB.getParent()->getFunction();
3028 bool IsSEH = isAsynchronousEHPersonality(
3029 classifyEHPersonality(F.getPersonalityFn()));
3030 if (IsSEH)
3031 return true;
3032 }
3033 }
3034
3035 DebugLoc DL = MBB.findDebugLoc(MI);
3036
3037 // Reload XMMs from stack frame.
3038 for (const CalleeSavedInfo &I : CSI) {
3039 Register Reg = I.getReg();
3040 if (X86::GR64RegClass.contains(Reg) || X86::GR32RegClass.contains(Reg))
3041 continue;
3042
3043 // If this is k-register make sure we lookup via the largest legal type.
3044 MVT VT = MVT::Other;
3045 if (X86::VK16RegClass.contains(Reg))
3046 VT = STI.hasBWI() ? MVT::v64i1 : MVT::v16i1;
3047
3048 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT);
3049 TII.loadRegFromStackSlot(MBB, MI, Reg, I.getFrameIdx(), RC, TRI,
3050 Register());
3051 }
3052
3053 // Clear the stack slot for spill base pointer register.
3054 MachineFunction &MF = *MBB.getParent();
3055 const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
3056 if (X86FI->getRestoreBasePointer()) {
3057 unsigned Opc = STI.is64Bit() ? X86::POP64r : X86::POP32r;
3058 Register BaseReg = this->TRI->getBaseRegister();
3059 BuildMI(MBB, MI, DL, TII.get(Opc), BaseReg)
3060 .setMIFlag(MachineInstr::FrameDestroy);
3061 }
3062
3063 // POP GPRs.
3064 for (auto I = CSI.begin(), E = CSI.end(); I != E; ++I) {
3065 Register Reg = I->getReg();
3066 if (!X86::GR64RegClass.contains(Reg) && !X86::GR32RegClass.contains(Reg))
3067 continue;
3068
3069 if (X86FI->isCandidateForPush2Pop2(Reg))
3070 BuildMI(MBB, MI, DL, TII.get(getPOP2Opcode(STI)), Reg)
3071 .addReg((++I)->getReg(), RegState::Define)
3072 .setMIFlag(MachineInstr::FrameDestroy);
3073 else
3074 BuildMI(MBB, MI, DL, TII.get(getPOPOpcode(STI)), Reg)
3075 .setMIFlag(MachineInstr::FrameDestroy);
3076 }
3077 if (X86FI->padForPush2Pop2())
3078 emitSPUpdate(MBB, MI, DL, SlotSize, /*InEpilogue=*/true);
3079
3080 return true;
3081 }
3082
determineCalleeSaves(MachineFunction & MF,BitVector & SavedRegs,RegScavenger * RS) const3083 void X86FrameLowering::determineCalleeSaves(MachineFunction &MF,
3084 BitVector &SavedRegs,
3085 RegScavenger *RS) const {
3086 TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS);
3087
3088 // Spill the BasePtr if it's used.
3089 if (TRI->hasBasePointer(MF)) {
3090 Register BasePtr = TRI->getBaseRegister();
3091 if (STI.isTarget64BitILP32())
3092 BasePtr = getX86SubSuperRegister(BasePtr, 64);
3093 SavedRegs.set(BasePtr);
3094 }
3095 }
3096
HasNestArgument(const MachineFunction * MF)3097 static bool HasNestArgument(const MachineFunction *MF) {
3098 const Function &F = MF->getFunction();
3099 for (Function::const_arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E;
3100 I++) {
3101 if (I->hasNestAttr() && !I->use_empty())
3102 return true;
3103 }
3104 return false;
3105 }
3106
3107 /// GetScratchRegister - Get a temp register for performing work in the
3108 /// segmented stack and the Erlang/HiPE stack prologue. Depending on platform
3109 /// and the properties of the function either one or two registers will be
3110 /// needed. Set primary to true for the first register, false for the second.
GetScratchRegister(bool Is64Bit,bool IsLP64,const MachineFunction & MF,bool Primary)3111 static unsigned GetScratchRegister(bool Is64Bit, bool IsLP64,
3112 const MachineFunction &MF, bool Primary) {
3113 CallingConv::ID CallingConvention = MF.getFunction().getCallingConv();
3114
3115 // Erlang stuff.
3116 if (CallingConvention == CallingConv::HiPE) {
3117 if (Is64Bit)
3118 return Primary ? X86::R14 : X86::R13;
3119 else
3120 return Primary ? X86::EBX : X86::EDI;
3121 }
3122
3123 if (Is64Bit) {
3124 if (IsLP64)
3125 return Primary ? X86::R11 : X86::R12;
3126 else
3127 return Primary ? X86::R11D : X86::R12D;
3128 }
3129
3130 bool IsNested = HasNestArgument(&MF);
3131
3132 if (CallingConvention == CallingConv::X86_FastCall ||
3133 CallingConvention == CallingConv::Fast ||
3134 CallingConvention == CallingConv::Tail) {
3135 if (IsNested)
3136 report_fatal_error("Segmented stacks does not support fastcall with "
3137 "nested function.");
3138 return Primary ? X86::EAX : X86::ECX;
3139 }
3140 if (IsNested)
3141 return Primary ? X86::EDX : X86::EAX;
3142 return Primary ? X86::ECX : X86::EAX;
3143 }
3144
3145 // The stack limit in the TCB is set to this many bytes above the actual stack
3146 // limit.
3147 static const uint64_t kSplitStackAvailable = 256;
3148
adjustForSegmentedStacks(MachineFunction & MF,MachineBasicBlock & PrologueMBB) const3149 void X86FrameLowering::adjustForSegmentedStacks(
3150 MachineFunction &MF, MachineBasicBlock &PrologueMBB) const {
3151 MachineFrameInfo &MFI = MF.getFrameInfo();
3152 uint64_t StackSize;
3153 unsigned TlsReg, TlsOffset;
3154 DebugLoc DL;
3155
3156 // To support shrink-wrapping we would need to insert the new blocks
3157 // at the right place and update the branches to PrologueMBB.
3158 assert(&(*MF.begin()) == &PrologueMBB && "Shrink-wrapping not supported yet");
3159
3160 unsigned ScratchReg = GetScratchRegister(Is64Bit, IsLP64, MF, true);
3161 assert(!MF.getRegInfo().isLiveIn(ScratchReg) &&
3162 "Scratch register is live-in");
3163
3164 if (MF.getFunction().isVarArg())
3165 report_fatal_error("Segmented stacks do not support vararg functions.");
3166 if (!STI.isTargetLinux() && !STI.isTargetDarwin() && !STI.isTargetWin32() &&
3167 !STI.isTargetWin64() && !STI.isTargetFreeBSD() &&
3168 !STI.isTargetDragonFly())
3169 report_fatal_error("Segmented stacks not supported on this platform.");
3170
3171 // Eventually StackSize will be calculated by a link-time pass; which will
3172 // also decide whether checking code needs to be injected into this particular
3173 // prologue.
3174 StackSize = MFI.getStackSize();
3175
3176 if (!MFI.needsSplitStackProlog())
3177 return;
3178
3179 MachineBasicBlock *allocMBB = MF.CreateMachineBasicBlock();
3180 MachineBasicBlock *checkMBB = MF.CreateMachineBasicBlock();
3181 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
3182 bool IsNested = false;
3183
3184 // We need to know if the function has a nest argument only in 64 bit mode.
3185 if (Is64Bit)
3186 IsNested = HasNestArgument(&MF);
3187
3188 // The MOV R10, RAX needs to be in a different block, since the RET we emit in
3189 // allocMBB needs to be last (terminating) instruction.
3190
3191 for (const auto &LI : PrologueMBB.liveins()) {
3192 allocMBB->addLiveIn(LI);
3193 checkMBB->addLiveIn(LI);
3194 }
3195
3196 if (IsNested)
3197 allocMBB->addLiveIn(IsLP64 ? X86::R10 : X86::R10D);
3198
3199 MF.push_front(allocMBB);
3200 MF.push_front(checkMBB);
3201
3202 // When the frame size is less than 256 we just compare the stack
3203 // boundary directly to the value of the stack pointer, per gcc.
3204 bool CompareStackPointer = StackSize < kSplitStackAvailable;
3205
3206 // Read the limit off the current stacklet off the stack_guard location.
3207 if (Is64Bit) {
3208 if (STI.isTargetLinux()) {
3209 TlsReg = X86::FS;
3210 TlsOffset = IsLP64 ? 0x70 : 0x40;
3211 } else if (STI.isTargetDarwin()) {
3212 TlsReg = X86::GS;
3213 TlsOffset = 0x60 + 90 * 8; // See pthread_machdep.h. Steal TLS slot 90.
3214 } else if (STI.isTargetWin64()) {
3215 TlsReg = X86::GS;
3216 TlsOffset = 0x28; // pvArbitrary, reserved for application use
3217 } else if (STI.isTargetFreeBSD()) {
3218 TlsReg = X86::FS;
3219 TlsOffset = 0x18;
3220 } else if (STI.isTargetDragonFly()) {
3221 TlsReg = X86::FS;
3222 TlsOffset = 0x20; // use tls_tcb.tcb_segstack
3223 } else {
3224 report_fatal_error("Segmented stacks not supported on this platform.");
3225 }
3226
3227 if (CompareStackPointer)
3228 ScratchReg = IsLP64 ? X86::RSP : X86::ESP;
3229 else
3230 BuildMI(checkMBB, DL, TII.get(IsLP64 ? X86::LEA64r : X86::LEA64_32r),
3231 ScratchReg)
3232 .addReg(X86::RSP)
3233 .addImm(1)
3234 .addReg(0)
3235 .addImm(-StackSize)
3236 .addReg(0);
3237
3238 BuildMI(checkMBB, DL, TII.get(IsLP64 ? X86::CMP64rm : X86::CMP32rm))
3239 .addReg(ScratchReg)
3240 .addReg(0)
3241 .addImm(1)
3242 .addReg(0)
3243 .addImm(TlsOffset)
3244 .addReg(TlsReg);
3245 } else {
3246 if (STI.isTargetLinux()) {
3247 TlsReg = X86::GS;
3248 TlsOffset = 0x30;
3249 } else if (STI.isTargetDarwin()) {
3250 TlsReg = X86::GS;
3251 TlsOffset = 0x48 + 90 * 4;
3252 } else if (STI.isTargetWin32()) {
3253 TlsReg = X86::FS;
3254 TlsOffset = 0x14; // pvArbitrary, reserved for application use
3255 } else if (STI.isTargetDragonFly()) {
3256 TlsReg = X86::FS;
3257 TlsOffset = 0x10; // use tls_tcb.tcb_segstack
3258 } else if (STI.isTargetFreeBSD()) {
3259 report_fatal_error("Segmented stacks not supported on FreeBSD i386.");
3260 } else {
3261 report_fatal_error("Segmented stacks not supported on this platform.");
3262 }
3263
3264 if (CompareStackPointer)
3265 ScratchReg = X86::ESP;
3266 else
3267 BuildMI(checkMBB, DL, TII.get(X86::LEA32r), ScratchReg)
3268 .addReg(X86::ESP)
3269 .addImm(1)
3270 .addReg(0)
3271 .addImm(-StackSize)
3272 .addReg(0);
3273
3274 if (STI.isTargetLinux() || STI.isTargetWin32() || STI.isTargetWin64() ||
3275 STI.isTargetDragonFly()) {
3276 BuildMI(checkMBB, DL, TII.get(X86::CMP32rm))
3277 .addReg(ScratchReg)
3278 .addReg(0)
3279 .addImm(0)
3280 .addReg(0)
3281 .addImm(TlsOffset)
3282 .addReg(TlsReg);
3283 } else if (STI.isTargetDarwin()) {
3284
3285 // TlsOffset doesn't fit into a mod r/m byte so we need an extra register.
3286 unsigned ScratchReg2;
3287 bool SaveScratch2;
3288 if (CompareStackPointer) {
3289 // The primary scratch register is available for holding the TLS offset.
3290 ScratchReg2 = GetScratchRegister(Is64Bit, IsLP64, MF, true);
3291 SaveScratch2 = false;
3292 } else {
3293 // Need to use a second register to hold the TLS offset
3294 ScratchReg2 = GetScratchRegister(Is64Bit, IsLP64, MF, false);
3295
3296 // Unfortunately, with fastcc the second scratch register may hold an
3297 // argument.
3298 SaveScratch2 = MF.getRegInfo().isLiveIn(ScratchReg2);
3299 }
3300
3301 // If Scratch2 is live-in then it needs to be saved.
3302 assert((!MF.getRegInfo().isLiveIn(ScratchReg2) || SaveScratch2) &&
3303 "Scratch register is live-in and not saved");
3304
3305 if (SaveScratch2)
3306 BuildMI(checkMBB, DL, TII.get(X86::PUSH32r))
3307 .addReg(ScratchReg2, RegState::Kill);
3308
3309 BuildMI(checkMBB, DL, TII.get(X86::MOV32ri), ScratchReg2)
3310 .addImm(TlsOffset);
3311 BuildMI(checkMBB, DL, TII.get(X86::CMP32rm))
3312 .addReg(ScratchReg)
3313 .addReg(ScratchReg2)
3314 .addImm(1)
3315 .addReg(0)
3316 .addImm(0)
3317 .addReg(TlsReg);
3318
3319 if (SaveScratch2)
3320 BuildMI(checkMBB, DL, TII.get(X86::POP32r), ScratchReg2);
3321 }
3322 }
3323
3324 // This jump is taken if SP >= (Stacklet Limit + Stack Space required).
3325 // It jumps to normal execution of the function body.
3326 BuildMI(checkMBB, DL, TII.get(X86::JCC_1))
3327 .addMBB(&PrologueMBB)
3328 .addImm(X86::COND_A);
3329
3330 // On 32 bit we first push the arguments size and then the frame size. On 64
3331 // bit, we pass the stack frame size in r10 and the argument size in r11.
3332 if (Is64Bit) {
3333 // Functions with nested arguments use R10, so it needs to be saved across
3334 // the call to _morestack
3335
3336 const unsigned RegAX = IsLP64 ? X86::RAX : X86::EAX;
3337 const unsigned Reg10 = IsLP64 ? X86::R10 : X86::R10D;
3338 const unsigned Reg11 = IsLP64 ? X86::R11 : X86::R11D;
3339 const unsigned MOVrr = IsLP64 ? X86::MOV64rr : X86::MOV32rr;
3340
3341 if (IsNested)
3342 BuildMI(allocMBB, DL, TII.get(MOVrr), RegAX).addReg(Reg10);
3343
3344 BuildMI(allocMBB, DL, TII.get(getMOVriOpcode(IsLP64, StackSize)), Reg10)
3345 .addImm(StackSize);
3346 BuildMI(allocMBB, DL,
3347 TII.get(getMOVriOpcode(IsLP64, X86FI->getArgumentStackSize())),
3348 Reg11)
3349 .addImm(X86FI->getArgumentStackSize());
3350 } else {
3351 BuildMI(allocMBB, DL, TII.get(X86::PUSH32i))
3352 .addImm(X86FI->getArgumentStackSize());
3353 BuildMI(allocMBB, DL, TII.get(X86::PUSH32i)).addImm(StackSize);
3354 }
3355
3356 // __morestack is in libgcc
3357 if (Is64Bit && MF.getTarget().getCodeModel() == CodeModel::Large) {
3358 // Under the large code model, we cannot assume that __morestack lives
3359 // within 2^31 bytes of the call site, so we cannot use pc-relative
3360 // addressing. We cannot perform the call via a temporary register,
3361 // as the rax register may be used to store the static chain, and all
3362 // other suitable registers may be either callee-save or used for
3363 // parameter passing. We cannot use the stack at this point either
3364 // because __morestack manipulates the stack directly.
3365 //
3366 // To avoid these issues, perform an indirect call via a read-only memory
3367 // location containing the address.
3368 //
3369 // This solution is not perfect, as it assumes that the .rodata section
3370 // is laid out within 2^31 bytes of each function body, but this seems
3371 // to be sufficient for JIT.
3372 // FIXME: Add retpoline support and remove the error here..
3373 if (STI.useIndirectThunkCalls())
3374 report_fatal_error("Emitting morestack calls on 64-bit with the large "
3375 "code model and thunks not yet implemented.");
3376 BuildMI(allocMBB, DL, TII.get(X86::CALL64m))
3377 .addReg(X86::RIP)
3378 .addImm(0)
3379 .addReg(0)
3380 .addExternalSymbol("__morestack_addr")
3381 .addReg(0);
3382 } else {
3383 if (Is64Bit)
3384 BuildMI(allocMBB, DL, TII.get(X86::CALL64pcrel32))
3385 .addExternalSymbol("__morestack");
3386 else
3387 BuildMI(allocMBB, DL, TII.get(X86::CALLpcrel32))
3388 .addExternalSymbol("__morestack");
3389 }
3390
3391 if (IsNested)
3392 BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET_RESTORE_R10));
3393 else
3394 BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET));
3395
3396 allocMBB->addSuccessor(&PrologueMBB);
3397
3398 checkMBB->addSuccessor(allocMBB, BranchProbability::getZero());
3399 checkMBB->addSuccessor(&PrologueMBB, BranchProbability::getOne());
3400
3401 #ifdef EXPENSIVE_CHECKS
3402 MF.verify();
3403 #endif
3404 }
3405
3406 /// Lookup an ERTS parameter in the !hipe.literals named metadata node.
3407 /// HiPE provides Erlang Runtime System-internal parameters, such as PCB offsets
3408 /// to fields it needs, through a named metadata node "hipe.literals" containing
3409 /// name-value pairs.
getHiPELiteral(NamedMDNode * HiPELiteralsMD,const StringRef LiteralName)3410 static unsigned getHiPELiteral(NamedMDNode *HiPELiteralsMD,
3411 const StringRef LiteralName) {
3412 for (int i = 0, e = HiPELiteralsMD->getNumOperands(); i != e; ++i) {
3413 MDNode *Node = HiPELiteralsMD->getOperand(i);
3414 if (Node->getNumOperands() != 2)
3415 continue;
3416 MDString *NodeName = dyn_cast<MDString>(Node->getOperand(0));
3417 ValueAsMetadata *NodeVal = dyn_cast<ValueAsMetadata>(Node->getOperand(1));
3418 if (!NodeName || !NodeVal)
3419 continue;
3420 ConstantInt *ValConst = dyn_cast_or_null<ConstantInt>(NodeVal->getValue());
3421 if (ValConst && NodeName->getString() == LiteralName) {
3422 return ValConst->getZExtValue();
3423 }
3424 }
3425
3426 report_fatal_error("HiPE literal " + LiteralName +
3427 " required but not provided");
3428 }
3429
3430 // Return true if there are no non-ehpad successors to MBB and there are no
3431 // non-meta instructions between MBBI and MBB.end().
blockEndIsUnreachable(const MachineBasicBlock & MBB,MachineBasicBlock::const_iterator MBBI)3432 static bool blockEndIsUnreachable(const MachineBasicBlock &MBB,
3433 MachineBasicBlock::const_iterator MBBI) {
3434 return llvm::all_of(
3435 MBB.successors(),
3436 [](const MachineBasicBlock *Succ) { return Succ->isEHPad(); }) &&
3437 std::all_of(MBBI, MBB.end(), [](const MachineInstr &MI) {
3438 return MI.isMetaInstruction();
3439 });
3440 }
3441
3442 /// Erlang programs may need a special prologue to handle the stack size they
3443 /// might need at runtime. That is because Erlang/OTP does not implement a C
3444 /// stack but uses a custom implementation of hybrid stack/heap architecture.
3445 /// (for more information see Eric Stenman's Ph.D. thesis:
3446 /// http://publications.uu.se/uu/fulltext/nbn_se_uu_diva-2688.pdf)
3447 ///
3448 /// CheckStack:
3449 /// temp0 = sp - MaxStack
3450 /// if( temp0 < SP_LIMIT(P) ) goto IncStack else goto OldStart
3451 /// OldStart:
3452 /// ...
3453 /// IncStack:
3454 /// call inc_stack # doubles the stack space
3455 /// temp0 = sp - MaxStack
3456 /// if( temp0 < SP_LIMIT(P) ) goto IncStack else goto OldStart
adjustForHiPEPrologue(MachineFunction & MF,MachineBasicBlock & PrologueMBB) const3457 void X86FrameLowering::adjustForHiPEPrologue(
3458 MachineFunction &MF, MachineBasicBlock &PrologueMBB) const {
3459 MachineFrameInfo &MFI = MF.getFrameInfo();
3460 DebugLoc DL;
3461
3462 // To support shrink-wrapping we would need to insert the new blocks
3463 // at the right place and update the branches to PrologueMBB.
3464 assert(&(*MF.begin()) == &PrologueMBB && "Shrink-wrapping not supported yet");
3465
3466 // HiPE-specific values
3467 NamedMDNode *HiPELiteralsMD =
3468 MF.getMMI().getModule()->getNamedMetadata("hipe.literals");
3469 if (!HiPELiteralsMD)
3470 report_fatal_error(
3471 "Can't generate HiPE prologue without runtime parameters");
3472 const unsigned HipeLeafWords = getHiPELiteral(
3473 HiPELiteralsMD, Is64Bit ? "AMD64_LEAF_WORDS" : "X86_LEAF_WORDS");
3474 const unsigned CCRegisteredArgs = Is64Bit ? 6 : 5;
3475 const unsigned Guaranteed = HipeLeafWords * SlotSize;
3476 unsigned CallerStkArity = MF.getFunction().arg_size() > CCRegisteredArgs
3477 ? MF.getFunction().arg_size() - CCRegisteredArgs
3478 : 0;
3479 unsigned MaxStack = MFI.getStackSize() + CallerStkArity * SlotSize + SlotSize;
3480
3481 assert(STI.isTargetLinux() &&
3482 "HiPE prologue is only supported on Linux operating systems.");
3483
3484 // Compute the largest caller's frame that is needed to fit the callees'
3485 // frames. This 'MaxStack' is computed from:
3486 //
3487 // a) the fixed frame size, which is the space needed for all spilled temps,
3488 // b) outgoing on-stack parameter areas, and
3489 // c) the minimum stack space this function needs to make available for the
3490 // functions it calls (a tunable ABI property).
3491 if (MFI.hasCalls()) {
3492 unsigned MoreStackForCalls = 0;
3493
3494 for (auto &MBB : MF) {
3495 for (auto &MI : MBB) {
3496 if (!MI.isCall())
3497 continue;
3498
3499 // Get callee operand.
3500 const MachineOperand &MO = MI.getOperand(0);
3501
3502 // Only take account of global function calls (no closures etc.).
3503 if (!MO.isGlobal())
3504 continue;
3505
3506 const Function *F = dyn_cast<Function>(MO.getGlobal());
3507 if (!F)
3508 continue;
3509
3510 // Do not update 'MaxStack' for primitive and built-in functions
3511 // (encoded with names either starting with "erlang."/"bif_" or not
3512 // having a ".", such as a simple <Module>.<Function>.<Arity>, or an
3513 // "_", such as the BIF "suspend_0") as they are executed on another
3514 // stack.
3515 if (F->getName().contains("erlang.") || F->getName().contains("bif_") ||
3516 F->getName().find_first_of("._") == StringRef::npos)
3517 continue;
3518
3519 unsigned CalleeStkArity = F->arg_size() > CCRegisteredArgs
3520 ? F->arg_size() - CCRegisteredArgs
3521 : 0;
3522 if (HipeLeafWords - 1 > CalleeStkArity)
3523 MoreStackForCalls =
3524 std::max(MoreStackForCalls,
3525 (HipeLeafWords - 1 - CalleeStkArity) * SlotSize);
3526 }
3527 }
3528 MaxStack += MoreStackForCalls;
3529 }
3530
3531 // If the stack frame needed is larger than the guaranteed then runtime checks
3532 // and calls to "inc_stack_0" BIF should be inserted in the assembly prologue.
3533 if (MaxStack > Guaranteed) {
3534 MachineBasicBlock *stackCheckMBB = MF.CreateMachineBasicBlock();
3535 MachineBasicBlock *incStackMBB = MF.CreateMachineBasicBlock();
3536
3537 for (const auto &LI : PrologueMBB.liveins()) {
3538 stackCheckMBB->addLiveIn(LI);
3539 incStackMBB->addLiveIn(LI);
3540 }
3541
3542 MF.push_front(incStackMBB);
3543 MF.push_front(stackCheckMBB);
3544
3545 unsigned ScratchReg, SPReg, PReg, SPLimitOffset;
3546 unsigned LEAop, CMPop, CALLop;
3547 SPLimitOffset = getHiPELiteral(HiPELiteralsMD, "P_NSP_LIMIT");
3548 if (Is64Bit) {
3549 SPReg = X86::RSP;
3550 PReg = X86::RBP;
3551 LEAop = X86::LEA64r;
3552 CMPop = X86::CMP64rm;
3553 CALLop = X86::CALL64pcrel32;
3554 } else {
3555 SPReg = X86::ESP;
3556 PReg = X86::EBP;
3557 LEAop = X86::LEA32r;
3558 CMPop = X86::CMP32rm;
3559 CALLop = X86::CALLpcrel32;
3560 }
3561
3562 ScratchReg = GetScratchRegister(Is64Bit, IsLP64, MF, true);
3563 assert(!MF.getRegInfo().isLiveIn(ScratchReg) &&
3564 "HiPE prologue scratch register is live-in");
3565
3566 // Create new MBB for StackCheck:
3567 addRegOffset(BuildMI(stackCheckMBB, DL, TII.get(LEAop), ScratchReg), SPReg,
3568 false, -MaxStack);
3569 // SPLimitOffset is in a fixed heap location (pointed by BP).
3570 addRegOffset(BuildMI(stackCheckMBB, DL, TII.get(CMPop)).addReg(ScratchReg),
3571 PReg, false, SPLimitOffset);
3572 BuildMI(stackCheckMBB, DL, TII.get(X86::JCC_1))
3573 .addMBB(&PrologueMBB)
3574 .addImm(X86::COND_AE);
3575
3576 // Create new MBB for IncStack:
3577 BuildMI(incStackMBB, DL, TII.get(CALLop)).addExternalSymbol("inc_stack_0");
3578 addRegOffset(BuildMI(incStackMBB, DL, TII.get(LEAop), ScratchReg), SPReg,
3579 false, -MaxStack);
3580 addRegOffset(BuildMI(incStackMBB, DL, TII.get(CMPop)).addReg(ScratchReg),
3581 PReg, false, SPLimitOffset);
3582 BuildMI(incStackMBB, DL, TII.get(X86::JCC_1))
3583 .addMBB(incStackMBB)
3584 .addImm(X86::COND_LE);
3585
3586 stackCheckMBB->addSuccessor(&PrologueMBB, {99, 100});
3587 stackCheckMBB->addSuccessor(incStackMBB, {1, 100});
3588 incStackMBB->addSuccessor(&PrologueMBB, {99, 100});
3589 incStackMBB->addSuccessor(incStackMBB, {1, 100});
3590 }
3591 #ifdef EXPENSIVE_CHECKS
3592 MF.verify();
3593 #endif
3594 }
3595
adjustStackWithPops(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,const DebugLoc & DL,int Offset) const3596 bool X86FrameLowering::adjustStackWithPops(MachineBasicBlock &MBB,
3597 MachineBasicBlock::iterator MBBI,
3598 const DebugLoc &DL,
3599 int Offset) const {
3600 if (Offset <= 0)
3601 return false;
3602
3603 if (Offset % SlotSize)
3604 return false;
3605
3606 int NumPops = Offset / SlotSize;
3607 // This is only worth it if we have at most 2 pops.
3608 if (NumPops != 1 && NumPops != 2)
3609 return false;
3610
3611 // Handle only the trivial case where the adjustment directly follows
3612 // a call. This is the most common one, anyway.
3613 if (MBBI == MBB.begin())
3614 return false;
3615 MachineBasicBlock::iterator Prev = std::prev(MBBI);
3616 if (!Prev->isCall() || !Prev->getOperand(1).isRegMask())
3617 return false;
3618
3619 unsigned Regs[2];
3620 unsigned FoundRegs = 0;
3621
3622 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
3623 const MachineOperand &RegMask = Prev->getOperand(1);
3624
3625 auto &RegClass =
3626 Is64Bit ? X86::GR64_NOREX_NOSPRegClass : X86::GR32_NOREX_NOSPRegClass;
3627 // Try to find up to NumPops free registers.
3628 for (auto Candidate : RegClass) {
3629 // Poor man's liveness:
3630 // Since we're immediately after a call, any register that is clobbered
3631 // by the call and not defined by it can be considered dead.
3632 if (!RegMask.clobbersPhysReg(Candidate))
3633 continue;
3634
3635 // Don't clobber reserved registers
3636 if (MRI.isReserved(Candidate))
3637 continue;
3638
3639 bool IsDef = false;
3640 for (const MachineOperand &MO : Prev->implicit_operands()) {
3641 if (MO.isReg() && MO.isDef() &&
3642 TRI->isSuperOrSubRegisterEq(MO.getReg(), Candidate)) {
3643 IsDef = true;
3644 break;
3645 }
3646 }
3647
3648 if (IsDef)
3649 continue;
3650
3651 Regs[FoundRegs++] = Candidate;
3652 if (FoundRegs == (unsigned)NumPops)
3653 break;
3654 }
3655
3656 if (FoundRegs == 0)
3657 return false;
3658
3659 // If we found only one free register, but need two, reuse the same one twice.
3660 while (FoundRegs < (unsigned)NumPops)
3661 Regs[FoundRegs++] = Regs[0];
3662
3663 for (int i = 0; i < NumPops; ++i)
3664 BuildMI(MBB, MBBI, DL, TII.get(STI.is64Bit() ? X86::POP64r : X86::POP32r),
3665 Regs[i]);
3666
3667 return true;
3668 }
3669
eliminateCallFramePseudoInstr(MachineFunction & MF,MachineBasicBlock & MBB,MachineBasicBlock::iterator I) const3670 MachineBasicBlock::iterator X86FrameLowering::eliminateCallFramePseudoInstr(
3671 MachineFunction &MF, MachineBasicBlock &MBB,
3672 MachineBasicBlock::iterator I) const {
3673 bool reserveCallFrame = hasReservedCallFrame(MF);
3674 unsigned Opcode = I->getOpcode();
3675 bool isDestroy = Opcode == TII.getCallFrameDestroyOpcode();
3676 DebugLoc DL = I->getDebugLoc(); // copy DebugLoc as I will be erased.
3677 uint64_t Amount = TII.getFrameSize(*I);
3678 uint64_t InternalAmt = (isDestroy || Amount) ? TII.getFrameAdjustment(*I) : 0;
3679 I = MBB.erase(I);
3680 auto InsertPos = skipDebugInstructionsForward(I, MBB.end());
3681
3682 // Try to avoid emitting dead SP adjustments if the block end is unreachable,
3683 // typically because the function is marked noreturn (abort, throw,
3684 // assert_fail, etc).
3685 if (isDestroy && blockEndIsUnreachable(MBB, I))
3686 return I;
3687
3688 if (!reserveCallFrame) {
3689 // If the stack pointer can be changed after prologue, turn the
3690 // adjcallstackup instruction into a 'sub ESP, <amt>' and the
3691 // adjcallstackdown instruction into 'add ESP, <amt>'
3692
3693 // We need to keep the stack aligned properly. To do this, we round the
3694 // amount of space needed for the outgoing arguments up to the next
3695 // alignment boundary.
3696 Amount = alignTo(Amount, getStackAlign());
3697
3698 const Function &F = MF.getFunction();
3699 bool WindowsCFI = MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
3700 bool DwarfCFI = !WindowsCFI && MF.needsFrameMoves();
3701
3702 // If we have any exception handlers in this function, and we adjust
3703 // the SP before calls, we may need to indicate this to the unwinder
3704 // using GNU_ARGS_SIZE. Note that this may be necessary even when
3705 // Amount == 0, because the preceding function may have set a non-0
3706 // GNU_ARGS_SIZE.
3707 // TODO: We don't need to reset this between subsequent functions,
3708 // if it didn't change.
3709 bool HasDwarfEHHandlers = !WindowsCFI && !MF.getLandingPads().empty();
3710
3711 if (HasDwarfEHHandlers && !isDestroy &&
3712 MF.getInfo<X86MachineFunctionInfo>()->getHasPushSequences())
3713 BuildCFI(MBB, InsertPos, DL,
3714 MCCFIInstruction::createGnuArgsSize(nullptr, Amount));
3715
3716 if (Amount == 0)
3717 return I;
3718
3719 // Factor out the amount that gets handled inside the sequence
3720 // (Pushes of argument for frame setup, callee pops for frame destroy)
3721 Amount -= InternalAmt;
3722
3723 // TODO: This is needed only if we require precise CFA.
3724 // If this is a callee-pop calling convention, emit a CFA adjust for
3725 // the amount the callee popped.
3726 if (isDestroy && InternalAmt && DwarfCFI && !hasFP(MF))
3727 BuildCFI(MBB, InsertPos, DL,
3728 MCCFIInstruction::createAdjustCfaOffset(nullptr, -InternalAmt));
3729
3730 // Add Amount to SP to destroy a frame, or subtract to setup.
3731 int64_t StackAdjustment = isDestroy ? Amount : -Amount;
3732
3733 if (StackAdjustment) {
3734 // Merge with any previous or following adjustment instruction. Note: the
3735 // instructions merged with here do not have CFI, so their stack
3736 // adjustments do not feed into CfaAdjustment.
3737 StackAdjustment += mergeSPUpdates(MBB, InsertPos, true);
3738 StackAdjustment += mergeSPUpdates(MBB, InsertPos, false);
3739
3740 if (StackAdjustment) {
3741 if (!(F.hasMinSize() &&
3742 adjustStackWithPops(MBB, InsertPos, DL, StackAdjustment)))
3743 BuildStackAdjustment(MBB, InsertPos, DL, StackAdjustment,
3744 /*InEpilogue=*/false);
3745 }
3746 }
3747
3748 if (DwarfCFI && !hasFP(MF)) {
3749 // If we don't have FP, but need to generate unwind information,
3750 // we need to set the correct CFA offset after the stack adjustment.
3751 // How much we adjust the CFA offset depends on whether we're emitting
3752 // CFI only for EH purposes or for debugging. EH only requires the CFA
3753 // offset to be correct at each call site, while for debugging we want
3754 // it to be more precise.
3755
3756 int64_t CfaAdjustment = -StackAdjustment;
3757 // TODO: When not using precise CFA, we also need to adjust for the
3758 // InternalAmt here.
3759 if (CfaAdjustment) {
3760 BuildCFI(
3761 MBB, InsertPos, DL,
3762 MCCFIInstruction::createAdjustCfaOffset(nullptr, CfaAdjustment));
3763 }
3764 }
3765
3766 return I;
3767 }
3768
3769 if (InternalAmt) {
3770 MachineBasicBlock::iterator CI = I;
3771 MachineBasicBlock::iterator B = MBB.begin();
3772 while (CI != B && !std::prev(CI)->isCall())
3773 --CI;
3774 BuildStackAdjustment(MBB, CI, DL, -InternalAmt, /*InEpilogue=*/false);
3775 }
3776
3777 return I;
3778 }
3779
canUseAsPrologue(const MachineBasicBlock & MBB) const3780 bool X86FrameLowering::canUseAsPrologue(const MachineBasicBlock &MBB) const {
3781 assert(MBB.getParent() && "Block is not attached to a function!");
3782 const MachineFunction &MF = *MBB.getParent();
3783 if (!MBB.isLiveIn(X86::EFLAGS))
3784 return true;
3785
3786 // If stack probes have to loop inline or call, that will clobber EFLAGS.
3787 // FIXME: we could allow cases that will use emitStackProbeInlineGenericBlock.
3788 const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>();
3789 const X86TargetLowering &TLI = *STI.getTargetLowering();
3790 if (TLI.hasInlineStackProbe(MF) || TLI.hasStackProbeSymbol(MF))
3791 return false;
3792
3793 const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
3794 return !TRI->hasStackRealignment(MF) && !X86FI->hasSwiftAsyncContext();
3795 }
3796
canUseAsEpilogue(const MachineBasicBlock & MBB) const3797 bool X86FrameLowering::canUseAsEpilogue(const MachineBasicBlock &MBB) const {
3798 assert(MBB.getParent() && "Block is not attached to a function!");
3799
3800 // Win64 has strict requirements in terms of epilogue and we are
3801 // not taking a chance at messing with them.
3802 // I.e., unless this block is already an exit block, we can't use
3803 // it as an epilogue.
3804 if (STI.isTargetWin64() && !MBB.succ_empty() && !MBB.isReturnBlock())
3805 return false;
3806
3807 // Swift async context epilogue has a BTR instruction that clobbers parts of
3808 // EFLAGS.
3809 const MachineFunction &MF = *MBB.getParent();
3810 if (MF.getInfo<X86MachineFunctionInfo>()->hasSwiftAsyncContext())
3811 return !flagsNeedToBePreservedBeforeTheTerminators(MBB);
3812
3813 if (canUseLEAForSPInEpilogue(*MBB.getParent()))
3814 return true;
3815
3816 // If we cannot use LEA to adjust SP, we may need to use ADD, which
3817 // clobbers the EFLAGS. Check that we do not need to preserve it,
3818 // otherwise, conservatively assume this is not
3819 // safe to insert the epilogue here.
3820 return !flagsNeedToBePreservedBeforeTheTerminators(MBB);
3821 }
3822
enableShrinkWrapping(const MachineFunction & MF) const3823 bool X86FrameLowering::enableShrinkWrapping(const MachineFunction &MF) const {
3824 // If we may need to emit frameless compact unwind information, give
3825 // up as this is currently broken: PR25614.
3826 bool CompactUnwind =
3827 MF.getMMI().getContext().getObjectFileInfo()->getCompactUnwindSection() !=
3828 nullptr;
3829 return (MF.getFunction().hasFnAttribute(Attribute::NoUnwind) || hasFP(MF) ||
3830 !CompactUnwind) &&
3831 // The lowering of segmented stack and HiPE only support entry
3832 // blocks as prologue blocks: PR26107. This limitation may be
3833 // lifted if we fix:
3834 // - adjustForSegmentedStacks
3835 // - adjustForHiPEPrologue
3836 MF.getFunction().getCallingConv() != CallingConv::HiPE &&
3837 !MF.shouldSplitStack();
3838 }
3839
restoreWin32EHStackPointers(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,const DebugLoc & DL,bool RestoreSP) const3840 MachineBasicBlock::iterator X86FrameLowering::restoreWin32EHStackPointers(
3841 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
3842 const DebugLoc &DL, bool RestoreSP) const {
3843 assert(STI.isTargetWindowsMSVC() && "funclets only supported in MSVC env");
3844 assert(STI.isTargetWin32() && "EBP/ESI restoration only required on win32");
3845 assert(STI.is32Bit() && !Uses64BitFramePtr &&
3846 "restoring EBP/ESI on non-32-bit target");
3847
3848 MachineFunction &MF = *MBB.getParent();
3849 Register FramePtr = TRI->getFrameRegister(MF);
3850 Register BasePtr = TRI->getBaseRegister();
3851 WinEHFuncInfo &FuncInfo = *MF.getWinEHFuncInfo();
3852 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
3853 MachineFrameInfo &MFI = MF.getFrameInfo();
3854
3855 // FIXME: Don't set FrameSetup flag in catchret case.
3856
3857 int FI = FuncInfo.EHRegNodeFrameIndex;
3858 int EHRegSize = MFI.getObjectSize(FI);
3859
3860 if (RestoreSP) {
3861 // MOV32rm -EHRegSize(%ebp), %esp
3862 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32rm), X86::ESP),
3863 X86::EBP, true, -EHRegSize)
3864 .setMIFlag(MachineInstr::FrameSetup);
3865 }
3866
3867 Register UsedReg;
3868 int EHRegOffset = getFrameIndexReference(MF, FI, UsedReg).getFixed();
3869 int EndOffset = -EHRegOffset - EHRegSize;
3870 FuncInfo.EHRegNodeEndOffset = EndOffset;
3871
3872 if (UsedReg == FramePtr) {
3873 // ADD $offset, %ebp
3874 unsigned ADDri = getADDriOpcode(false);
3875 BuildMI(MBB, MBBI, DL, TII.get(ADDri), FramePtr)
3876 .addReg(FramePtr)
3877 .addImm(EndOffset)
3878 .setMIFlag(MachineInstr::FrameSetup)
3879 ->getOperand(3)
3880 .setIsDead();
3881 assert(EndOffset >= 0 &&
3882 "end of registration object above normal EBP position!");
3883 } else if (UsedReg == BasePtr) {
3884 // LEA offset(%ebp), %esi
3885 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::LEA32r), BasePtr),
3886 FramePtr, false, EndOffset)
3887 .setMIFlag(MachineInstr::FrameSetup);
3888 // MOV32rm SavedEBPOffset(%esi), %ebp
3889 assert(X86FI->getHasSEHFramePtrSave());
3890 int Offset =
3891 getFrameIndexReference(MF, X86FI->getSEHFramePtrSaveIndex(), UsedReg)
3892 .getFixed();
3893 assert(UsedReg == BasePtr);
3894 addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32rm), FramePtr),
3895 UsedReg, true, Offset)
3896 .setMIFlag(MachineInstr::FrameSetup);
3897 } else {
3898 llvm_unreachable("32-bit frames with WinEH must use FramePtr or BasePtr");
3899 }
3900 return MBBI;
3901 }
3902
getInitialCFAOffset(const MachineFunction & MF) const3903 int X86FrameLowering::getInitialCFAOffset(const MachineFunction &MF) const {
3904 return TRI->getSlotSize();
3905 }
3906
3907 Register
getInitialCFARegister(const MachineFunction & MF) const3908 X86FrameLowering::getInitialCFARegister(const MachineFunction &MF) const {
3909 return StackPtr;
3910 }
3911
3912 TargetFrameLowering::DwarfFrameBase
getDwarfFrameBase(const MachineFunction & MF) const3913 X86FrameLowering::getDwarfFrameBase(const MachineFunction &MF) const {
3914 const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
3915 Register FrameRegister = RI->getFrameRegister(MF);
3916 if (getInitialCFARegister(MF) == FrameRegister &&
3917 MF.getInfo<X86MachineFunctionInfo>()->hasCFIAdjustCfa()) {
3918 DwarfFrameBase FrameBase;
3919 FrameBase.Kind = DwarfFrameBase::CFA;
3920 FrameBase.Location.Offset =
3921 -MF.getFrameInfo().getStackSize() - getInitialCFAOffset(MF);
3922 return FrameBase;
3923 }
3924
3925 return DwarfFrameBase{DwarfFrameBase::Register, {FrameRegister}};
3926 }
3927
3928 namespace {
3929 // Struct used by orderFrameObjects to help sort the stack objects.
3930 struct X86FrameSortingObject {
3931 bool IsValid = false; // true if we care about this Object.
3932 unsigned ObjectIndex = 0; // Index of Object into MFI list.
3933 unsigned ObjectSize = 0; // Size of Object in bytes.
3934 Align ObjectAlignment = Align(1); // Alignment of Object in bytes.
3935 unsigned ObjectNumUses = 0; // Object static number of uses.
3936 };
3937
3938 // The comparison function we use for std::sort to order our local
3939 // stack symbols. The current algorithm is to use an estimated
3940 // "density". This takes into consideration the size and number of
3941 // uses each object has in order to roughly minimize code size.
3942 // So, for example, an object of size 16B that is referenced 5 times
3943 // will get higher priority than 4 4B objects referenced 1 time each.
3944 // It's not perfect and we may be able to squeeze a few more bytes out of
3945 // it (for example : 0(esp) requires fewer bytes, symbols allocated at the
3946 // fringe end can have special consideration, given their size is less
3947 // important, etc.), but the algorithmic complexity grows too much to be
3948 // worth the extra gains we get. This gets us pretty close.
3949 // The final order leaves us with objects with highest priority going
3950 // at the end of our list.
3951 struct X86FrameSortingComparator {
operator ()__anona9b9ffdb0811::X86FrameSortingComparator3952 inline bool operator()(const X86FrameSortingObject &A,
3953 const X86FrameSortingObject &B) const {
3954 uint64_t DensityAScaled, DensityBScaled;
3955
3956 // For consistency in our comparison, all invalid objects are placed
3957 // at the end. This also allows us to stop walking when we hit the
3958 // first invalid item after it's all sorted.
3959 if (!A.IsValid)
3960 return false;
3961 if (!B.IsValid)
3962 return true;
3963
3964 // The density is calculated by doing :
3965 // (double)DensityA = A.ObjectNumUses / A.ObjectSize
3966 // (double)DensityB = B.ObjectNumUses / B.ObjectSize
3967 // Since this approach may cause inconsistencies in
3968 // the floating point <, >, == comparisons, depending on the floating
3969 // point model with which the compiler was built, we're going
3970 // to scale both sides by multiplying with
3971 // A.ObjectSize * B.ObjectSize. This ends up factoring away
3972 // the division and, with it, the need for any floating point
3973 // arithmetic.
3974 DensityAScaled = static_cast<uint64_t>(A.ObjectNumUses) *
3975 static_cast<uint64_t>(B.ObjectSize);
3976 DensityBScaled = static_cast<uint64_t>(B.ObjectNumUses) *
3977 static_cast<uint64_t>(A.ObjectSize);
3978
3979 // If the two densities are equal, prioritize highest alignment
3980 // objects. This allows for similar alignment objects
3981 // to be packed together (given the same density).
3982 // There's room for improvement here, also, since we can pack
3983 // similar alignment (different density) objects next to each
3984 // other to save padding. This will also require further
3985 // complexity/iterations, and the overall gain isn't worth it,
3986 // in general. Something to keep in mind, though.
3987 if (DensityAScaled == DensityBScaled)
3988 return A.ObjectAlignment < B.ObjectAlignment;
3989
3990 return DensityAScaled < DensityBScaled;
3991 }
3992 };
3993 } // namespace
3994
3995 // Order the symbols in the local stack.
3996 // We want to place the local stack objects in some sort of sensible order.
3997 // The heuristic we use is to try and pack them according to static number
3998 // of uses and size of object in order to minimize code size.
orderFrameObjects(const MachineFunction & MF,SmallVectorImpl<int> & ObjectsToAllocate) const3999 void X86FrameLowering::orderFrameObjects(
4000 const MachineFunction &MF, SmallVectorImpl<int> &ObjectsToAllocate) const {
4001 const MachineFrameInfo &MFI = MF.getFrameInfo();
4002
4003 // Don't waste time if there's nothing to do.
4004 if (ObjectsToAllocate.empty())
4005 return;
4006
4007 // Create an array of all MFI objects. We won't need all of these
4008 // objects, but we're going to create a full array of them to make
4009 // it easier to index into when we're counting "uses" down below.
4010 // We want to be able to easily/cheaply access an object by simply
4011 // indexing into it, instead of having to search for it every time.
4012 std::vector<X86FrameSortingObject> SortingObjects(MFI.getObjectIndexEnd());
4013
4014 // Walk the objects we care about and mark them as such in our working
4015 // struct.
4016 for (auto &Obj : ObjectsToAllocate) {
4017 SortingObjects[Obj].IsValid = true;
4018 SortingObjects[Obj].ObjectIndex = Obj;
4019 SortingObjects[Obj].ObjectAlignment = MFI.getObjectAlign(Obj);
4020 // Set the size.
4021 int ObjectSize = MFI.getObjectSize(Obj);
4022 if (ObjectSize == 0)
4023 // Variable size. Just use 4.
4024 SortingObjects[Obj].ObjectSize = 4;
4025 else
4026 SortingObjects[Obj].ObjectSize = ObjectSize;
4027 }
4028
4029 // Count the number of uses for each object.
4030 for (auto &MBB : MF) {
4031 for (auto &MI : MBB) {
4032 if (MI.isDebugInstr())
4033 continue;
4034 for (const MachineOperand &MO : MI.operands()) {
4035 // Check to see if it's a local stack symbol.
4036 if (!MO.isFI())
4037 continue;
4038 int Index = MO.getIndex();
4039 // Check to see if it falls within our range, and is tagged
4040 // to require ordering.
4041 if (Index >= 0 && Index < MFI.getObjectIndexEnd() &&
4042 SortingObjects[Index].IsValid)
4043 SortingObjects[Index].ObjectNumUses++;
4044 }
4045 }
4046 }
4047
4048 // Sort the objects using X86FrameSortingAlgorithm (see its comment for
4049 // info).
4050 llvm::stable_sort(SortingObjects, X86FrameSortingComparator());
4051
4052 // Now modify the original list to represent the final order that
4053 // we want. The order will depend on whether we're going to access them
4054 // from the stack pointer or the frame pointer. For SP, the list should
4055 // end up with the END containing objects that we want with smaller offsets.
4056 // For FP, it should be flipped.
4057 int i = 0;
4058 for (auto &Obj : SortingObjects) {
4059 // All invalid items are sorted at the end, so it's safe to stop.
4060 if (!Obj.IsValid)
4061 break;
4062 ObjectsToAllocate[i++] = Obj.ObjectIndex;
4063 }
4064
4065 // Flip it if we're accessing off of the FP.
4066 if (!TRI->hasStackRealignment(MF) && hasFP(MF))
4067 std::reverse(ObjectsToAllocate.begin(), ObjectsToAllocate.end());
4068 }
4069
4070 unsigned
getWinEHParentFrameOffset(const MachineFunction & MF) const4071 X86FrameLowering::getWinEHParentFrameOffset(const MachineFunction &MF) const {
4072 // RDX, the parent frame pointer, is homed into 16(%rsp) in the prologue.
4073 unsigned Offset = 16;
4074 // RBP is immediately pushed.
4075 Offset += SlotSize;
4076 // All callee-saved registers are then pushed.
4077 Offset += MF.getInfo<X86MachineFunctionInfo>()->getCalleeSavedFrameSize();
4078 // Every funclet allocates enough stack space for the largest outgoing call.
4079 Offset += getWinEHFuncletFrameSize(MF);
4080 return Offset;
4081 }
4082
processFunctionBeforeFrameFinalized(MachineFunction & MF,RegScavenger * RS) const4083 void X86FrameLowering::processFunctionBeforeFrameFinalized(
4084 MachineFunction &MF, RegScavenger *RS) const {
4085 // Mark the function as not having WinCFI. We will set it back to true in
4086 // emitPrologue if it gets called and emits CFI.
4087 MF.setHasWinCFI(false);
4088
4089 // If we are using Windows x64 CFI, ensure that the stack is always 8 byte
4090 // aligned. The format doesn't support misaligned stack adjustments.
4091 if (MF.getTarget().getMCAsmInfo()->usesWindowsCFI())
4092 MF.getFrameInfo().ensureMaxAlignment(Align(SlotSize));
4093
4094 // If this function isn't doing Win64-style C++ EH, we don't need to do
4095 // anything.
4096 if (STI.is64Bit() && MF.hasEHFunclets() &&
4097 classifyEHPersonality(MF.getFunction().getPersonalityFn()) ==
4098 EHPersonality::MSVC_CXX) {
4099 adjustFrameForMsvcCxxEh(MF);
4100 }
4101 }
4102
adjustFrameForMsvcCxxEh(MachineFunction & MF) const4103 void X86FrameLowering::adjustFrameForMsvcCxxEh(MachineFunction &MF) const {
4104 // Win64 C++ EH needs to allocate the UnwindHelp object at some fixed offset
4105 // relative to RSP after the prologue. Find the offset of the last fixed
4106 // object, so that we can allocate a slot immediately following it. If there
4107 // were no fixed objects, use offset -SlotSize, which is immediately after the
4108 // return address. Fixed objects have negative frame indices.
4109 MachineFrameInfo &MFI = MF.getFrameInfo();
4110 WinEHFuncInfo &EHInfo = *MF.getWinEHFuncInfo();
4111 int64_t MinFixedObjOffset = -SlotSize;
4112 for (int I = MFI.getObjectIndexBegin(); I < 0; ++I)
4113 MinFixedObjOffset = std::min(MinFixedObjOffset, MFI.getObjectOffset(I));
4114
4115 for (WinEHTryBlockMapEntry &TBME : EHInfo.TryBlockMap) {
4116 for (WinEHHandlerType &H : TBME.HandlerArray) {
4117 int FrameIndex = H.CatchObj.FrameIndex;
4118 if (FrameIndex != INT_MAX) {
4119 // Ensure alignment.
4120 unsigned Align = MFI.getObjectAlign(FrameIndex).value();
4121 MinFixedObjOffset -= std::abs(MinFixedObjOffset) % Align;
4122 MinFixedObjOffset -= MFI.getObjectSize(FrameIndex);
4123 MFI.setObjectOffset(FrameIndex, MinFixedObjOffset);
4124 }
4125 }
4126 }
4127
4128 // Ensure alignment.
4129 MinFixedObjOffset -= std::abs(MinFixedObjOffset) % 8;
4130 int64_t UnwindHelpOffset = MinFixedObjOffset - SlotSize;
4131 int UnwindHelpFI =
4132 MFI.CreateFixedObject(SlotSize, UnwindHelpOffset, /*IsImmutable=*/false);
4133 EHInfo.UnwindHelpFrameIdx = UnwindHelpFI;
4134
4135 // Store -2 into UnwindHelp on function entry. We have to scan forwards past
4136 // other frame setup instructions.
4137 MachineBasicBlock &MBB = MF.front();
4138 auto MBBI = MBB.begin();
4139 while (MBBI != MBB.end() && MBBI->getFlag(MachineInstr::FrameSetup))
4140 ++MBBI;
4141
4142 DebugLoc DL = MBB.findDebugLoc(MBBI);
4143 addFrameReference(BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64mi32)),
4144 UnwindHelpFI)
4145 .addImm(-2);
4146 }
4147
processFunctionBeforeFrameIndicesReplaced(MachineFunction & MF,RegScavenger * RS) const4148 void X86FrameLowering::processFunctionBeforeFrameIndicesReplaced(
4149 MachineFunction &MF, RegScavenger *RS) const {
4150 auto *X86FI = MF.getInfo<X86MachineFunctionInfo>();
4151
4152 if (STI.is32Bit() && MF.hasEHFunclets())
4153 restoreWinEHStackPointersInParent(MF);
4154 // We have emitted prolog and epilog. Don't need stack pointer saving
4155 // instruction any more.
4156 if (MachineInstr *MI = X86FI->getStackPtrSaveMI()) {
4157 MI->eraseFromParent();
4158 X86FI->setStackPtrSaveMI(nullptr);
4159 }
4160 }
4161
restoreWinEHStackPointersInParent(MachineFunction & MF) const4162 void X86FrameLowering::restoreWinEHStackPointersInParent(
4163 MachineFunction &MF) const {
4164 // 32-bit functions have to restore stack pointers when control is transferred
4165 // back to the parent function. These blocks are identified as eh pads that
4166 // are not funclet entries.
4167 bool IsSEH = isAsynchronousEHPersonality(
4168 classifyEHPersonality(MF.getFunction().getPersonalityFn()));
4169 for (MachineBasicBlock &MBB : MF) {
4170 bool NeedsRestore = MBB.isEHPad() && !MBB.isEHFuncletEntry();
4171 if (NeedsRestore)
4172 restoreWin32EHStackPointers(MBB, MBB.begin(), DebugLoc(),
4173 /*RestoreSP=*/IsSEH);
4174 }
4175 }
4176