1 //===-- FunctionLoweringInfo.cpp ------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This implements routines for translating functions from LLVM IR into
10 // Machine IR.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "llvm/CodeGen/FunctionLoweringInfo.h"
15 #include "llvm/ADT/APInt.h"
16 #include "llvm/Analysis/LegacyDivergenceAnalysis.h"
17 #include "llvm/CodeGen/Analysis.h"
18 #include "llvm/CodeGen/MachineFrameInfo.h"
19 #include "llvm/CodeGen/MachineFunction.h"
20 #include "llvm/CodeGen/MachineInstrBuilder.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
22 #include "llvm/CodeGen/TargetFrameLowering.h"
23 #include "llvm/CodeGen/TargetInstrInfo.h"
24 #include "llvm/CodeGen/TargetLowering.h"
25 #include "llvm/CodeGen/TargetRegisterInfo.h"
26 #include "llvm/CodeGen/TargetSubtargetInfo.h"
27 #include "llvm/CodeGen/WasmEHFuncInfo.h"
28 #include "llvm/CodeGen/WinEHFuncInfo.h"
29 #include "llvm/IR/DataLayout.h"
30 #include "llvm/IR/DerivedTypes.h"
31 #include "llvm/IR/Function.h"
32 #include "llvm/IR/Instructions.h"
33 #include "llvm/IR/IntrinsicInst.h"
34 #include "llvm/IR/LLVMContext.h"
35 #include "llvm/IR/Module.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Support/MathExtras.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include "llvm/Target/TargetOptions.h"
41 #include <algorithm>
42 using namespace llvm;
43
44 #define DEBUG_TYPE "function-lowering-info"
45
46 /// isUsedOutsideOfDefiningBlock - Return true if this instruction is used by
47 /// PHI nodes or outside of the basic block that defines it, or used by a
48 /// switch or atomic instruction, which may expand to multiple basic blocks.
isUsedOutsideOfDefiningBlock(const Instruction * I)49 static bool isUsedOutsideOfDefiningBlock(const Instruction *I) {
50 if (I->use_empty()) return false;
51 if (isa<PHINode>(I)) return true;
52 const BasicBlock *BB = I->getParent();
53 for (const User *U : I->users())
54 if (cast<Instruction>(U)->getParent() != BB || isa<PHINode>(U))
55 return true;
56
57 return false;
58 }
59
getPreferredExtendForValue(const Value * V)60 static ISD::NodeType getPreferredExtendForValue(const Value *V) {
61 // For the users of the source value being used for compare instruction, if
62 // the number of signed predicate is greater than unsigned predicate, we
63 // prefer to use SIGN_EXTEND.
64 //
65 // With this optimization, we would be able to reduce some redundant sign or
66 // zero extension instruction, and eventually more machine CSE opportunities
67 // can be exposed.
68 ISD::NodeType ExtendKind = ISD::ANY_EXTEND;
69 unsigned NumOfSigned = 0, NumOfUnsigned = 0;
70 for (const User *U : V->users()) {
71 if (const auto *CI = dyn_cast<CmpInst>(U)) {
72 NumOfSigned += CI->isSigned();
73 NumOfUnsigned += CI->isUnsigned();
74 }
75 }
76 if (NumOfSigned > NumOfUnsigned)
77 ExtendKind = ISD::SIGN_EXTEND;
78
79 return ExtendKind;
80 }
81
set(const Function & fn,MachineFunction & mf,SelectionDAG * DAG)82 void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf,
83 SelectionDAG *DAG) {
84 Fn = &fn;
85 MF = &mf;
86 TLI = MF->getSubtarget().getTargetLowering();
87 RegInfo = &MF->getRegInfo();
88 const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
89 DA = DAG->getDivergenceAnalysis();
90
91 // Check whether the function can return without sret-demotion.
92 SmallVector<ISD::OutputArg, 4> Outs;
93 CallingConv::ID CC = Fn->getCallingConv();
94
95 GetReturnInfo(CC, Fn->getReturnType(), Fn->getAttributes(), Outs, *TLI,
96 mf.getDataLayout());
97 CanLowerReturn =
98 TLI->CanLowerReturn(CC, *MF, Fn->isVarArg(), Outs, Fn->getContext());
99
100 // If this personality uses funclets, we need to do a bit more work.
101 DenseMap<const AllocaInst *, TinyPtrVector<int *>> CatchObjects;
102 EHPersonality Personality = classifyEHPersonality(
103 Fn->hasPersonalityFn() ? Fn->getPersonalityFn() : nullptr);
104 if (isFuncletEHPersonality(Personality)) {
105 // Calculate state numbers if we haven't already.
106 WinEHFuncInfo &EHInfo = *MF->getWinEHFuncInfo();
107 if (Personality == EHPersonality::MSVC_CXX)
108 calculateWinCXXEHStateNumbers(&fn, EHInfo);
109 else if (isAsynchronousEHPersonality(Personality))
110 calculateSEHStateNumbers(&fn, EHInfo);
111 else if (Personality == EHPersonality::CoreCLR)
112 calculateClrEHStateNumbers(&fn, EHInfo);
113
114 // Map all BB references in the WinEH data to MBBs.
115 for (WinEHTryBlockMapEntry &TBME : EHInfo.TryBlockMap) {
116 for (WinEHHandlerType &H : TBME.HandlerArray) {
117 if (const AllocaInst *AI = H.CatchObj.Alloca)
118 CatchObjects.insert({AI, {}}).first->second.push_back(
119 &H.CatchObj.FrameIndex);
120 else
121 H.CatchObj.FrameIndex = INT_MAX;
122 }
123 }
124 }
125 if (Personality == EHPersonality::Wasm_CXX) {
126 WasmEHFuncInfo &EHInfo = *MF->getWasmEHFuncInfo();
127 calculateWasmEHInfo(&fn, EHInfo);
128 }
129
130 // Initialize the mapping of values to registers. This is only set up for
131 // instruction values that are used outside of the block that defines
132 // them.
133 const Align StackAlign = TFI->getStackAlign();
134 for (const BasicBlock &BB : *Fn) {
135 for (const Instruction &I : BB) {
136 if (const AllocaInst *AI = dyn_cast<AllocaInst>(&I)) {
137 Type *Ty = AI->getAllocatedType();
138 Align TyPrefAlign = MF->getDataLayout().getPrefTypeAlign(Ty);
139 // The "specified" alignment is the alignment written on the alloca,
140 // or the preferred alignment of the type if none is specified.
141 //
142 // (Unspecified alignment on allocas will be going away soon.)
143 Align SpecifiedAlign = AI->getAlign();
144
145 // If the preferred alignment of the type is higher than the specified
146 // alignment of the alloca, promote the alignment, as long as it doesn't
147 // require realigning the stack.
148 //
149 // FIXME: Do we really want to second-guess the IR in isel?
150 Align Alignment =
151 std::max(std::min(TyPrefAlign, StackAlign), SpecifiedAlign);
152
153 // Static allocas can be folded into the initial stack frame
154 // adjustment. For targets that don't realign the stack, don't
155 // do this if there is an extra alignment requirement.
156 if (AI->isStaticAlloca() &&
157 (TFI->isStackRealignable() || (Alignment <= StackAlign))) {
158 const ConstantInt *CUI = cast<ConstantInt>(AI->getArraySize());
159 uint64_t TySize =
160 MF->getDataLayout().getTypeAllocSize(Ty).getKnownMinSize();
161
162 TySize *= CUI->getZExtValue(); // Get total allocated size.
163 if (TySize == 0) TySize = 1; // Don't create zero-sized stack objects.
164 int FrameIndex = INT_MAX;
165 auto Iter = CatchObjects.find(AI);
166 if (Iter != CatchObjects.end() && TLI->needsFixedCatchObjects()) {
167 FrameIndex = MF->getFrameInfo().CreateFixedObject(
168 TySize, 0, /*IsImmutable=*/false, /*isAliased=*/true);
169 MF->getFrameInfo().setObjectAlignment(FrameIndex, Alignment);
170 } else {
171 FrameIndex = MF->getFrameInfo().CreateStackObject(TySize, Alignment,
172 false, AI);
173 }
174
175 // Scalable vectors may need a special StackID to distinguish
176 // them from other (fixed size) stack objects.
177 if (isa<ScalableVectorType>(Ty))
178 MF->getFrameInfo().setStackID(FrameIndex,
179 TFI->getStackIDForScalableVectors());
180
181 StaticAllocaMap[AI] = FrameIndex;
182 // Update the catch handler information.
183 if (Iter != CatchObjects.end()) {
184 for (int *CatchObjPtr : Iter->second)
185 *CatchObjPtr = FrameIndex;
186 }
187 } else {
188 // FIXME: Overaligned static allocas should be grouped into
189 // a single dynamic allocation instead of using a separate
190 // stack allocation for each one.
191 // Inform the Frame Information that we have variable-sized objects.
192 MF->getFrameInfo().CreateVariableSizedObject(
193 Alignment <= StackAlign ? Align(1) : Alignment, AI);
194 }
195 }
196
197 // Look for inline asm that clobbers the SP register.
198 if (auto *Call = dyn_cast<CallBase>(&I)) {
199 if (Call->isInlineAsm()) {
200 Register SP = TLI->getStackPointerRegisterToSaveRestore();
201 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
202 std::vector<TargetLowering::AsmOperandInfo> Ops =
203 TLI->ParseConstraints(Fn->getParent()->getDataLayout(), TRI,
204 *Call);
205 for (TargetLowering::AsmOperandInfo &Op : Ops) {
206 if (Op.Type == InlineAsm::isClobber) {
207 // Clobbers don't have SDValue operands, hence SDValue().
208 TLI->ComputeConstraintToUse(Op, SDValue(), DAG);
209 std::pair<unsigned, const TargetRegisterClass *> PhysReg =
210 TLI->getRegForInlineAsmConstraint(TRI, Op.ConstraintCode,
211 Op.ConstraintVT);
212 if (PhysReg.first == SP)
213 MF->getFrameInfo().setHasOpaqueSPAdjustment(true);
214 }
215 }
216 }
217 }
218
219 // Look for calls to the @llvm.va_start intrinsic. We can omit some
220 // prologue boilerplate for variadic functions that don't examine their
221 // arguments.
222 if (const auto *II = dyn_cast<IntrinsicInst>(&I)) {
223 if (II->getIntrinsicID() == Intrinsic::vastart)
224 MF->getFrameInfo().setHasVAStart(true);
225 }
226
227 // If we have a musttail call in a variadic function, we need to ensure we
228 // forward implicit register parameters.
229 if (const auto *CI = dyn_cast<CallInst>(&I)) {
230 if (CI->isMustTailCall() && Fn->isVarArg())
231 MF->getFrameInfo().setHasMustTailInVarArgFunc(true);
232 }
233
234 // Mark values used outside their block as exported, by allocating
235 // a virtual register for them.
236 if (isUsedOutsideOfDefiningBlock(&I))
237 if (!isa<AllocaInst>(I) || !StaticAllocaMap.count(cast<AllocaInst>(&I)))
238 InitializeRegForValue(&I);
239
240 // Decide the preferred extend type for a value.
241 PreferredExtendType[&I] = getPreferredExtendForValue(&I);
242 }
243 }
244
245 // Create an initial MachineBasicBlock for each LLVM BasicBlock in F. This
246 // also creates the initial PHI MachineInstrs, though none of the input
247 // operands are populated.
248 for (const BasicBlock &BB : *Fn) {
249 // Don't create MachineBasicBlocks for imaginary EH pad blocks. These blocks
250 // are really data, and no instructions can live here.
251 if (BB.isEHPad()) {
252 const Instruction *PadInst = BB.getFirstNonPHI();
253 // If this is a non-landingpad EH pad, mark this function as using
254 // funclets.
255 // FIXME: SEH catchpads do not create EH scope/funclets, so we could avoid
256 // setting this in such cases in order to improve frame layout.
257 if (!isa<LandingPadInst>(PadInst)) {
258 MF->setHasEHScopes(true);
259 MF->setHasEHFunclets(true);
260 MF->getFrameInfo().setHasOpaqueSPAdjustment(true);
261 }
262 if (isa<CatchSwitchInst>(PadInst)) {
263 assert(&*BB.begin() == PadInst &&
264 "WinEHPrepare failed to remove PHIs from imaginary BBs");
265 continue;
266 }
267 if (isa<FuncletPadInst>(PadInst))
268 assert(&*BB.begin() == PadInst && "WinEHPrepare failed to demote PHIs");
269 }
270
271 MachineBasicBlock *MBB = mf.CreateMachineBasicBlock(&BB);
272 MBBMap[&BB] = MBB;
273 MF->push_back(MBB);
274
275 // Transfer the address-taken flag. This is necessary because there could
276 // be multiple MachineBasicBlocks corresponding to one BasicBlock, and only
277 // the first one should be marked.
278 if (BB.hasAddressTaken())
279 MBB->setHasAddressTaken();
280
281 // Mark landing pad blocks.
282 if (BB.isEHPad())
283 MBB->setIsEHPad();
284
285 // Create Machine PHI nodes for LLVM PHI nodes, lowering them as
286 // appropriate.
287 for (const PHINode &PN : BB.phis()) {
288 if (PN.use_empty())
289 continue;
290
291 // Skip empty types
292 if (PN.getType()->isEmptyTy())
293 continue;
294
295 DebugLoc DL = PN.getDebugLoc();
296 unsigned PHIReg = ValueMap[&PN];
297 assert(PHIReg && "PHI node does not have an assigned virtual register!");
298
299 SmallVector<EVT, 4> ValueVTs;
300 ComputeValueVTs(*TLI, MF->getDataLayout(), PN.getType(), ValueVTs);
301 for (EVT VT : ValueVTs) {
302 unsigned NumRegisters = TLI->getNumRegisters(Fn->getContext(), VT);
303 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
304 for (unsigned i = 0; i != NumRegisters; ++i)
305 BuildMI(MBB, DL, TII->get(TargetOpcode::PHI), PHIReg + i);
306 PHIReg += NumRegisters;
307 }
308 }
309 }
310
311 if (isFuncletEHPersonality(Personality)) {
312 WinEHFuncInfo &EHInfo = *MF->getWinEHFuncInfo();
313
314 // Map all BB references in the WinEH data to MBBs.
315 for (WinEHTryBlockMapEntry &TBME : EHInfo.TryBlockMap) {
316 for (WinEHHandlerType &H : TBME.HandlerArray) {
317 if (H.Handler)
318 H.Handler = MBBMap[H.Handler.get<const BasicBlock *>()];
319 }
320 }
321 for (CxxUnwindMapEntry &UME : EHInfo.CxxUnwindMap)
322 if (UME.Cleanup)
323 UME.Cleanup = MBBMap[UME.Cleanup.get<const BasicBlock *>()];
324 for (SEHUnwindMapEntry &UME : EHInfo.SEHUnwindMap) {
325 const auto *BB = UME.Handler.get<const BasicBlock *>();
326 UME.Handler = MBBMap[BB];
327 }
328 for (ClrEHUnwindMapEntry &CME : EHInfo.ClrEHUnwindMap) {
329 const auto *BB = CME.Handler.get<const BasicBlock *>();
330 CME.Handler = MBBMap[BB];
331 }
332 }
333
334 else if (Personality == EHPersonality::Wasm_CXX) {
335 WasmEHFuncInfo &EHInfo = *MF->getWasmEHFuncInfo();
336 // Map all BB references in the WinEH data to MBBs.
337 DenseMap<BBOrMBB, BBOrMBB> NewMap;
338 for (auto &KV : EHInfo.EHPadUnwindMap) {
339 const auto *Src = KV.first.get<const BasicBlock *>();
340 const auto *Dst = KV.second.get<const BasicBlock *>();
341 NewMap[MBBMap[Src]] = MBBMap[Dst];
342 }
343 EHInfo.EHPadUnwindMap = std::move(NewMap);
344 }
345 }
346
347 /// clear - Clear out all the function-specific state. This returns this
348 /// FunctionLoweringInfo to an empty state, ready to be used for a
349 /// different function.
clear()350 void FunctionLoweringInfo::clear() {
351 MBBMap.clear();
352 ValueMap.clear();
353 VirtReg2Value.clear();
354 StaticAllocaMap.clear();
355 LiveOutRegInfo.clear();
356 VisitedBBs.clear();
357 ArgDbgValues.clear();
358 DescribedArgs.clear();
359 ByValArgFrameIndexMap.clear();
360 RegFixups.clear();
361 RegsWithFixups.clear();
362 StatepointStackSlots.clear();
363 StatepointRelocationMaps.clear();
364 PreferredExtendType.clear();
365 }
366
367 /// CreateReg - Allocate a single virtual register for the given type.
CreateReg(MVT VT,bool isDivergent)368 Register FunctionLoweringInfo::CreateReg(MVT VT, bool isDivergent) {
369 return RegInfo->createVirtualRegister(
370 MF->getSubtarget().getTargetLowering()->getRegClassFor(VT, isDivergent));
371 }
372
373 /// CreateRegs - Allocate the appropriate number of virtual registers of
374 /// the correctly promoted or expanded types. Assign these registers
375 /// consecutive vreg numbers and return the first assigned number.
376 ///
377 /// In the case that the given value has struct or array type, this function
378 /// will assign registers for each member or element.
379 ///
CreateRegs(Type * Ty,bool isDivergent)380 Register FunctionLoweringInfo::CreateRegs(Type *Ty, bool isDivergent) {
381 const TargetLowering *TLI = MF->getSubtarget().getTargetLowering();
382
383 SmallVector<EVT, 4> ValueVTs;
384 ComputeValueVTs(*TLI, MF->getDataLayout(), Ty, ValueVTs);
385
386 Register FirstReg;
387 for (unsigned Value = 0, e = ValueVTs.size(); Value != e; ++Value) {
388 EVT ValueVT = ValueVTs[Value];
389 MVT RegisterVT = TLI->getRegisterType(Ty->getContext(), ValueVT);
390
391 unsigned NumRegs = TLI->getNumRegisters(Ty->getContext(), ValueVT);
392 for (unsigned i = 0; i != NumRegs; ++i) {
393 Register R = CreateReg(RegisterVT, isDivergent);
394 if (!FirstReg) FirstReg = R;
395 }
396 }
397 return FirstReg;
398 }
399
CreateRegs(const Value * V)400 Register FunctionLoweringInfo::CreateRegs(const Value *V) {
401 return CreateRegs(V->getType(), DA && DA->isDivergent(V) &&
402 !TLI->requiresUniformRegister(*MF, V));
403 }
404
405 /// GetLiveOutRegInfo - Gets LiveOutInfo for a register, returning NULL if the
406 /// register is a PHI destination and the PHI's LiveOutInfo is not valid. If
407 /// the register's LiveOutInfo is for a smaller bit width, it is extended to
408 /// the larger bit width by zero extension. The bit width must be no smaller
409 /// than the LiveOutInfo's existing bit width.
410 const FunctionLoweringInfo::LiveOutInfo *
GetLiveOutRegInfo(Register Reg,unsigned BitWidth)411 FunctionLoweringInfo::GetLiveOutRegInfo(Register Reg, unsigned BitWidth) {
412 if (!LiveOutRegInfo.inBounds(Reg))
413 return nullptr;
414
415 LiveOutInfo *LOI = &LiveOutRegInfo[Reg];
416 if (!LOI->IsValid)
417 return nullptr;
418
419 if (BitWidth > LOI->Known.getBitWidth()) {
420 LOI->NumSignBits = 1;
421 LOI->Known = LOI->Known.anyext(BitWidth);
422 }
423
424 return LOI;
425 }
426
427 /// ComputePHILiveOutRegInfo - Compute LiveOutInfo for a PHI's destination
428 /// register based on the LiveOutInfo of its operands.
ComputePHILiveOutRegInfo(const PHINode * PN)429 void FunctionLoweringInfo::ComputePHILiveOutRegInfo(const PHINode *PN) {
430 Type *Ty = PN->getType();
431 if (!Ty->isIntegerTy() || Ty->isVectorTy())
432 return;
433
434 SmallVector<EVT, 1> ValueVTs;
435 ComputeValueVTs(*TLI, MF->getDataLayout(), Ty, ValueVTs);
436 assert(ValueVTs.size() == 1 &&
437 "PHIs with non-vector integer types should have a single VT.");
438 EVT IntVT = ValueVTs[0];
439
440 if (TLI->getNumRegisters(PN->getContext(), IntVT) != 1)
441 return;
442 IntVT = TLI->getTypeToTransformTo(PN->getContext(), IntVT);
443 unsigned BitWidth = IntVT.getSizeInBits();
444
445 Register DestReg = ValueMap[PN];
446 if (!Register::isVirtualRegister(DestReg))
447 return;
448 LiveOutRegInfo.grow(DestReg);
449 LiveOutInfo &DestLOI = LiveOutRegInfo[DestReg];
450
451 Value *V = PN->getIncomingValue(0);
452 if (isa<UndefValue>(V) || isa<ConstantExpr>(V)) {
453 DestLOI.NumSignBits = 1;
454 DestLOI.Known = KnownBits(BitWidth);
455 return;
456 }
457
458 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
459 APInt Val = CI->getValue().zextOrTrunc(BitWidth);
460 DestLOI.NumSignBits = Val.getNumSignBits();
461 DestLOI.Known.Zero = ~Val;
462 DestLOI.Known.One = Val;
463 } else {
464 assert(ValueMap.count(V) && "V should have been placed in ValueMap when its"
465 "CopyToReg node was created.");
466 Register SrcReg = ValueMap[V];
467 if (!Register::isVirtualRegister(SrcReg)) {
468 DestLOI.IsValid = false;
469 return;
470 }
471 const LiveOutInfo *SrcLOI = GetLiveOutRegInfo(SrcReg, BitWidth);
472 if (!SrcLOI) {
473 DestLOI.IsValid = false;
474 return;
475 }
476 DestLOI = *SrcLOI;
477 }
478
479 assert(DestLOI.Known.Zero.getBitWidth() == BitWidth &&
480 DestLOI.Known.One.getBitWidth() == BitWidth &&
481 "Masks should have the same bit width as the type.");
482
483 for (unsigned i = 1, e = PN->getNumIncomingValues(); i != e; ++i) {
484 Value *V = PN->getIncomingValue(i);
485 if (isa<UndefValue>(V) || isa<ConstantExpr>(V)) {
486 DestLOI.NumSignBits = 1;
487 DestLOI.Known = KnownBits(BitWidth);
488 return;
489 }
490
491 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
492 APInt Val = CI->getValue().zextOrTrunc(BitWidth);
493 DestLOI.NumSignBits = std::min(DestLOI.NumSignBits, Val.getNumSignBits());
494 DestLOI.Known.Zero &= ~Val;
495 DestLOI.Known.One &= Val;
496 continue;
497 }
498
499 assert(ValueMap.count(V) && "V should have been placed in ValueMap when "
500 "its CopyToReg node was created.");
501 Register SrcReg = ValueMap[V];
502 if (!SrcReg.isVirtual()) {
503 DestLOI.IsValid = false;
504 return;
505 }
506 const LiveOutInfo *SrcLOI = GetLiveOutRegInfo(SrcReg, BitWidth);
507 if (!SrcLOI) {
508 DestLOI.IsValid = false;
509 return;
510 }
511 DestLOI.NumSignBits = std::min(DestLOI.NumSignBits, SrcLOI->NumSignBits);
512 DestLOI.Known.Zero &= SrcLOI->Known.Zero;
513 DestLOI.Known.One &= SrcLOI->Known.One;
514 }
515 }
516
517 /// setArgumentFrameIndex - Record frame index for the byval
518 /// argument. This overrides previous frame index entry for this argument,
519 /// if any.
setArgumentFrameIndex(const Argument * A,int FI)520 void FunctionLoweringInfo::setArgumentFrameIndex(const Argument *A,
521 int FI) {
522 ByValArgFrameIndexMap[A] = FI;
523 }
524
525 /// getArgumentFrameIndex - Get frame index for the byval argument.
526 /// If the argument does not have any assigned frame index then 0 is
527 /// returned.
getArgumentFrameIndex(const Argument * A)528 int FunctionLoweringInfo::getArgumentFrameIndex(const Argument *A) {
529 auto I = ByValArgFrameIndexMap.find(A);
530 if (I != ByValArgFrameIndexMap.end())
531 return I->second;
532 LLVM_DEBUG(dbgs() << "Argument does not have assigned frame index!\n");
533 return INT_MAX;
534 }
535
getCatchPadExceptionPointerVReg(const Value * CPI,const TargetRegisterClass * RC)536 Register FunctionLoweringInfo::getCatchPadExceptionPointerVReg(
537 const Value *CPI, const TargetRegisterClass *RC) {
538 MachineRegisterInfo &MRI = MF->getRegInfo();
539 auto I = CatchPadExceptionPointers.insert({CPI, 0});
540 Register &VReg = I.first->second;
541 if (I.second)
542 VReg = MRI.createVirtualRegister(RC);
543 assert(VReg && "null vreg in exception pointer table!");
544 return VReg;
545 }
546
547 const Value *
getValueFromVirtualReg(Register Vreg)548 FunctionLoweringInfo::getValueFromVirtualReg(Register Vreg) {
549 if (VirtReg2Value.empty()) {
550 SmallVector<EVT, 4> ValueVTs;
551 for (auto &P : ValueMap) {
552 ValueVTs.clear();
553 ComputeValueVTs(*TLI, Fn->getParent()->getDataLayout(),
554 P.first->getType(), ValueVTs);
555 unsigned Reg = P.second;
556 for (EVT VT : ValueVTs) {
557 unsigned NumRegisters = TLI->getNumRegisters(Fn->getContext(), VT);
558 for (unsigned i = 0, e = NumRegisters; i != e; ++i)
559 VirtReg2Value[Reg++] = P.first;
560 }
561 }
562 }
563 return VirtReg2Value.lookup(Vreg);
564 }
565