1 //===- llvm/lib/Target/ARM/ARMCallLowering.cpp - Call lowering ------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This file implements the lowering of LLVM calls to machine code calls for
11 /// GlobalISel.
12 //
13 //===----------------------------------------------------------------------===//
14
15 #include "ARMCallLowering.h"
16 #include "ARMBaseInstrInfo.h"
17 #include "ARMISelLowering.h"
18 #include "ARMSubtarget.h"
19 #include "Utils/ARMBaseInfo.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/CodeGen/Analysis.h"
22 #include "llvm/CodeGen/CallingConvLower.h"
23 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
24 #include "llvm/CodeGen/GlobalISel/Utils.h"
25 #include "llvm/CodeGen/LowLevelType.h"
26 #include "llvm/CodeGen/MachineBasicBlock.h"
27 #include "llvm/CodeGen/MachineFrameInfo.h"
28 #include "llvm/CodeGen/MachineFunction.h"
29 #include "llvm/CodeGen/MachineInstrBuilder.h"
30 #include "llvm/CodeGen/MachineMemOperand.h"
31 #include "llvm/CodeGen/MachineOperand.h"
32 #include "llvm/CodeGen/MachineRegisterInfo.h"
33 #include "llvm/CodeGen/TargetRegisterInfo.h"
34 #include "llvm/CodeGen/TargetSubtargetInfo.h"
35 #include "llvm/CodeGen/ValueTypes.h"
36 #include "llvm/IR/Attributes.h"
37 #include "llvm/IR/DataLayout.h"
38 #include "llvm/IR/DerivedTypes.h"
39 #include "llvm/IR/Function.h"
40 #include "llvm/IR/Type.h"
41 #include "llvm/IR/Value.h"
42 #include "llvm/Support/Casting.h"
43 #include "llvm/Support/LowLevelTypeImpl.h"
44 #include "llvm/Support/MachineValueType.h"
45 #include <algorithm>
46 #include <cassert>
47 #include <cstdint>
48 #include <utility>
49
50 using namespace llvm;
51
ARMCallLowering(const ARMTargetLowering & TLI)52 ARMCallLowering::ARMCallLowering(const ARMTargetLowering &TLI)
53 : CallLowering(&TLI) {}
54
isSupportedType(const DataLayout & DL,const ARMTargetLowering & TLI,Type * T)55 static bool isSupportedType(const DataLayout &DL, const ARMTargetLowering &TLI,
56 Type *T) {
57 if (T->isArrayTy())
58 return isSupportedType(DL, TLI, T->getArrayElementType());
59
60 if (T->isStructTy()) {
61 // For now we only allow homogeneous structs that we can manipulate with
62 // G_MERGE_VALUES and G_UNMERGE_VALUES
63 auto StructT = cast<StructType>(T);
64 for (unsigned i = 1, e = StructT->getNumElements(); i != e; ++i)
65 if (StructT->getElementType(i) != StructT->getElementType(0))
66 return false;
67 return isSupportedType(DL, TLI, StructT->getElementType(0));
68 }
69
70 EVT VT = TLI.getValueType(DL, T, true);
71 if (!VT.isSimple() || VT.isVector() ||
72 !(VT.isInteger() || VT.isFloatingPoint()))
73 return false;
74
75 unsigned VTSize = VT.getSimpleVT().getSizeInBits();
76
77 if (VTSize == 64)
78 // FIXME: Support i64 too
79 return VT.isFloatingPoint();
80
81 return VTSize == 1 || VTSize == 8 || VTSize == 16 || VTSize == 32;
82 }
83
84 namespace {
85
86 /// Helper class for values going out through an ABI boundary (used for handling
87 /// function return values and call parameters).
88 struct ARMOutgoingValueHandler : public CallLowering::OutgoingValueHandler {
ARMOutgoingValueHandler__anonb59403710111::ARMOutgoingValueHandler89 ARMOutgoingValueHandler(MachineIRBuilder &MIRBuilder,
90 MachineRegisterInfo &MRI, MachineInstrBuilder &MIB,
91 CCAssignFn *AssignFn)
92 : OutgoingValueHandler(MIRBuilder, MRI, AssignFn), MIB(MIB) {}
93
getStackAddress__anonb59403710111::ARMOutgoingValueHandler94 Register getStackAddress(uint64_t Size, int64_t Offset,
95 MachinePointerInfo &MPO) override {
96 assert((Size == 1 || Size == 2 || Size == 4 || Size == 8) &&
97 "Unsupported size");
98
99 LLT p0 = LLT::pointer(0, 32);
100 LLT s32 = LLT::scalar(32);
101 auto SPReg = MIRBuilder.buildCopy(p0, Register(ARM::SP));
102
103 auto OffsetReg = MIRBuilder.buildConstant(s32, Offset);
104
105 auto AddrReg = MIRBuilder.buildPtrAdd(p0, SPReg, OffsetReg);
106
107 MPO = MachinePointerInfo::getStack(MIRBuilder.getMF(), Offset);
108 return AddrReg.getReg(0);
109 }
110
assignValueToReg__anonb59403710111::ARMOutgoingValueHandler111 void assignValueToReg(Register ValVReg, Register PhysReg,
112 CCValAssign &VA) override {
113 assert(VA.isRegLoc() && "Value shouldn't be assigned to reg");
114 assert(VA.getLocReg() == PhysReg && "Assigning to the wrong reg?");
115
116 assert(VA.getValVT().getSizeInBits() <= 64 && "Unsupported value size");
117 assert(VA.getLocVT().getSizeInBits() <= 64 && "Unsupported location size");
118
119 Register ExtReg = extendRegister(ValVReg, VA);
120 MIRBuilder.buildCopy(PhysReg, ExtReg);
121 MIB.addUse(PhysReg, RegState::Implicit);
122 }
123
assignValueToAddress__anonb59403710111::ARMOutgoingValueHandler124 void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size,
125 MachinePointerInfo &MPO, CCValAssign &VA) override {
126 assert((Size == 1 || Size == 2 || Size == 4 || Size == 8) &&
127 "Unsupported size");
128
129 Register ExtReg = extendRegister(ValVReg, VA);
130 auto MMO = MIRBuilder.getMF().getMachineMemOperand(
131 MPO, MachineMemOperand::MOStore, VA.getLocVT().getStoreSize(),
132 Align(1));
133 MIRBuilder.buildStore(ExtReg, Addr, *MMO);
134 }
135
assignCustomValue__anonb59403710111::ARMOutgoingValueHandler136 unsigned assignCustomValue(const CallLowering::ArgInfo &Arg,
137 ArrayRef<CCValAssign> VAs) override {
138 assert(Arg.Regs.size() == 1 && "Can't handle multple regs yet");
139
140 CCValAssign VA = VAs[0];
141 assert(VA.needsCustom() && "Value doesn't need custom handling");
142
143 // Custom lowering for other types, such as f16, is currently not supported
144 if (VA.getValVT() != MVT::f64)
145 return 0;
146
147 CCValAssign NextVA = VAs[1];
148 assert(NextVA.needsCustom() && "Value doesn't need custom handling");
149 assert(NextVA.getValVT() == MVT::f64 && "Unsupported type");
150
151 assert(VA.getValNo() == NextVA.getValNo() &&
152 "Values belong to different arguments");
153
154 assert(VA.isRegLoc() && "Value should be in reg");
155 assert(NextVA.isRegLoc() && "Value should be in reg");
156
157 Register NewRegs[] = {MRI.createGenericVirtualRegister(LLT::scalar(32)),
158 MRI.createGenericVirtualRegister(LLT::scalar(32))};
159 MIRBuilder.buildUnmerge(NewRegs, Arg.Regs[0]);
160
161 bool IsLittle = MIRBuilder.getMF().getSubtarget<ARMSubtarget>().isLittle();
162 if (!IsLittle)
163 std::swap(NewRegs[0], NewRegs[1]);
164
165 assignValueToReg(NewRegs[0], VA.getLocReg(), VA);
166 assignValueToReg(NewRegs[1], NextVA.getLocReg(), NextVA);
167
168 return 1;
169 }
170
assignArg__anonb59403710111::ARMOutgoingValueHandler171 bool assignArg(unsigned ValNo, MVT ValVT, MVT LocVT,
172 CCValAssign::LocInfo LocInfo,
173 const CallLowering::ArgInfo &Info, ISD::ArgFlagsTy Flags,
174 CCState &State) override {
175 if (AssignFn(ValNo, ValVT, LocVT, LocInfo, Flags, State))
176 return true;
177
178 StackSize =
179 std::max(StackSize, static_cast<uint64_t>(State.getNextStackOffset()));
180 return false;
181 }
182
183 MachineInstrBuilder &MIB;
184 uint64_t StackSize = 0;
185 };
186
187 } // end anonymous namespace
188
splitToValueTypes(const ArgInfo & OrigArg,SmallVectorImpl<ArgInfo> & SplitArgs,MachineFunction & MF) const189 void ARMCallLowering::splitToValueTypes(const ArgInfo &OrigArg,
190 SmallVectorImpl<ArgInfo> &SplitArgs,
191 MachineFunction &MF) const {
192 const ARMTargetLowering &TLI = *getTLI<ARMTargetLowering>();
193 LLVMContext &Ctx = OrigArg.Ty->getContext();
194 const DataLayout &DL = MF.getDataLayout();
195 const Function &F = MF.getFunction();
196
197 SmallVector<EVT, 4> SplitVTs;
198 ComputeValueVTs(TLI, DL, OrigArg.Ty, SplitVTs, nullptr, nullptr, 0);
199 assert(OrigArg.Regs.size() == SplitVTs.size() && "Regs / types mismatch");
200
201 if (SplitVTs.size() == 1) {
202 // Even if there is no splitting to do, we still want to replace the
203 // original type (e.g. pointer type -> integer).
204 auto Flags = OrigArg.Flags[0];
205 Flags.setOrigAlign(DL.getABITypeAlign(OrigArg.Ty));
206 SplitArgs.emplace_back(OrigArg.Regs[0], SplitVTs[0].getTypeForEVT(Ctx),
207 Flags, OrigArg.IsFixed);
208 return;
209 }
210
211 // Create one ArgInfo for each virtual register.
212 for (unsigned i = 0, e = SplitVTs.size(); i != e; ++i) {
213 EVT SplitVT = SplitVTs[i];
214 Type *SplitTy = SplitVT.getTypeForEVT(Ctx);
215 auto Flags = OrigArg.Flags[0];
216
217 Flags.setOrigAlign(DL.getABITypeAlign(SplitTy));
218
219 bool NeedsConsecutiveRegisters =
220 TLI.functionArgumentNeedsConsecutiveRegisters(
221 SplitTy, F.getCallingConv(), F.isVarArg());
222 if (NeedsConsecutiveRegisters) {
223 Flags.setInConsecutiveRegs();
224 if (i == e - 1)
225 Flags.setInConsecutiveRegsLast();
226 }
227
228 // FIXME: We also want to split SplitTy further.
229 Register PartReg = OrigArg.Regs[i];
230 SplitArgs.emplace_back(PartReg, SplitTy, Flags, OrigArg.IsFixed);
231 }
232 }
233
234 /// Lower the return value for the already existing \p Ret. This assumes that
235 /// \p MIRBuilder's insertion point is correct.
lowerReturnVal(MachineIRBuilder & MIRBuilder,const Value * Val,ArrayRef<Register> VRegs,MachineInstrBuilder & Ret) const236 bool ARMCallLowering::lowerReturnVal(MachineIRBuilder &MIRBuilder,
237 const Value *Val, ArrayRef<Register> VRegs,
238 MachineInstrBuilder &Ret) const {
239 if (!Val)
240 // Nothing to do here.
241 return true;
242
243 auto &MF = MIRBuilder.getMF();
244 const auto &F = MF.getFunction();
245
246 auto DL = MF.getDataLayout();
247 auto &TLI = *getTLI<ARMTargetLowering>();
248 if (!isSupportedType(DL, TLI, Val->getType()))
249 return false;
250
251 ArgInfo OrigRetInfo(VRegs, Val->getType());
252 setArgFlags(OrigRetInfo, AttributeList::ReturnIndex, DL, F);
253
254 SmallVector<ArgInfo, 4> SplitRetInfos;
255 splitToValueTypes(OrigRetInfo, SplitRetInfos, MF);
256
257 CCAssignFn *AssignFn =
258 TLI.CCAssignFnForReturn(F.getCallingConv(), F.isVarArg());
259
260 ARMOutgoingValueHandler RetHandler(MIRBuilder, MF.getRegInfo(), Ret,
261 AssignFn);
262 return handleAssignments(MIRBuilder, SplitRetInfos, RetHandler);
263 }
264
lowerReturn(MachineIRBuilder & MIRBuilder,const Value * Val,ArrayRef<Register> VRegs) const265 bool ARMCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
266 const Value *Val,
267 ArrayRef<Register> VRegs) const {
268 assert(!Val == VRegs.empty() && "Return value without a vreg");
269
270 auto const &ST = MIRBuilder.getMF().getSubtarget<ARMSubtarget>();
271 unsigned Opcode = ST.getReturnOpcode();
272 auto Ret = MIRBuilder.buildInstrNoInsert(Opcode).add(predOps(ARMCC::AL));
273
274 if (!lowerReturnVal(MIRBuilder, Val, VRegs, Ret))
275 return false;
276
277 MIRBuilder.insertInstr(Ret);
278 return true;
279 }
280
281 namespace {
282
283 /// Helper class for values coming in through an ABI boundary (used for handling
284 /// formal arguments and call return values).
285 struct ARMIncomingValueHandler : public CallLowering::IncomingValueHandler {
ARMIncomingValueHandler__anonb59403710211::ARMIncomingValueHandler286 ARMIncomingValueHandler(MachineIRBuilder &MIRBuilder,
287 MachineRegisterInfo &MRI, CCAssignFn AssignFn)
288 : IncomingValueHandler(MIRBuilder, MRI, AssignFn) {}
289
getStackAddress__anonb59403710211::ARMIncomingValueHandler290 Register getStackAddress(uint64_t Size, int64_t Offset,
291 MachinePointerInfo &MPO) override {
292 assert((Size == 1 || Size == 2 || Size == 4 || Size == 8) &&
293 "Unsupported size");
294
295 auto &MFI = MIRBuilder.getMF().getFrameInfo();
296
297 int FI = MFI.CreateFixedObject(Size, Offset, true);
298 MPO = MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI);
299
300 return MIRBuilder.buildFrameIndex(LLT::pointer(MPO.getAddrSpace(), 32), FI)
301 .getReg(0);
302 }
303
assignValueToAddress__anonb59403710211::ARMIncomingValueHandler304 void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size,
305 MachinePointerInfo &MPO, CCValAssign &VA) override {
306 assert((Size == 1 || Size == 2 || Size == 4 || Size == 8) &&
307 "Unsupported size");
308
309 if (VA.getLocInfo() == CCValAssign::SExt ||
310 VA.getLocInfo() == CCValAssign::ZExt) {
311 // If the value is zero- or sign-extended, its size becomes 4 bytes, so
312 // that's what we should load.
313 Size = 4;
314 assert(MRI.getType(ValVReg).isScalar() && "Only scalars supported atm");
315
316 auto LoadVReg = buildLoad(LLT::scalar(32), Addr, Size, MPO);
317 MIRBuilder.buildTrunc(ValVReg, LoadVReg);
318 } else {
319 // If the value is not extended, a simple load will suffice.
320 buildLoad(ValVReg, Addr, Size, MPO);
321 }
322 }
323
buildLoad__anonb59403710211::ARMIncomingValueHandler324 MachineInstrBuilder buildLoad(const DstOp &Res, Register Addr, uint64_t Size,
325 MachinePointerInfo &MPO) {
326 MachineFunction &MF = MIRBuilder.getMF();
327
328 auto MMO = MF.getMachineMemOperand(MPO, MachineMemOperand::MOLoad, Size,
329 inferAlignFromPtrInfo(MF, MPO));
330 return MIRBuilder.buildLoad(Res, Addr, *MMO);
331 }
332
assignValueToReg__anonb59403710211::ARMIncomingValueHandler333 void assignValueToReg(Register ValVReg, Register PhysReg,
334 CCValAssign &VA) override {
335 assert(VA.isRegLoc() && "Value shouldn't be assigned to reg");
336 assert(VA.getLocReg() == PhysReg && "Assigning to the wrong reg?");
337
338 uint64_t ValSize = VA.getValVT().getFixedSizeInBits();
339 uint64_t LocSize = VA.getLocVT().getFixedSizeInBits();
340
341 assert(ValSize <= 64 && "Unsupported value size");
342 assert(LocSize <= 64 && "Unsupported location size");
343
344 markPhysRegUsed(PhysReg);
345 if (ValSize == LocSize) {
346 MIRBuilder.buildCopy(ValVReg, PhysReg);
347 } else {
348 assert(ValSize < LocSize && "Extensions not supported");
349
350 // We cannot create a truncating copy, nor a trunc of a physical register.
351 // Therefore, we need to copy the content of the physical register into a
352 // virtual one and then truncate that.
353 auto PhysRegToVReg = MIRBuilder.buildCopy(LLT::scalar(LocSize), PhysReg);
354 MIRBuilder.buildTrunc(ValVReg, PhysRegToVReg);
355 }
356 }
357
assignCustomValue__anonb59403710211::ARMIncomingValueHandler358 unsigned assignCustomValue(const ARMCallLowering::ArgInfo &Arg,
359 ArrayRef<CCValAssign> VAs) override {
360 assert(Arg.Regs.size() == 1 && "Can't handle multple regs yet");
361
362 CCValAssign VA = VAs[0];
363 assert(VA.needsCustom() && "Value doesn't need custom handling");
364
365 // Custom lowering for other types, such as f16, is currently not supported
366 if (VA.getValVT() != MVT::f64)
367 return 0;
368
369 CCValAssign NextVA = VAs[1];
370 assert(NextVA.needsCustom() && "Value doesn't need custom handling");
371 assert(NextVA.getValVT() == MVT::f64 && "Unsupported type");
372
373 assert(VA.getValNo() == NextVA.getValNo() &&
374 "Values belong to different arguments");
375
376 assert(VA.isRegLoc() && "Value should be in reg");
377 assert(NextVA.isRegLoc() && "Value should be in reg");
378
379 Register NewRegs[] = {MRI.createGenericVirtualRegister(LLT::scalar(32)),
380 MRI.createGenericVirtualRegister(LLT::scalar(32))};
381
382 assignValueToReg(NewRegs[0], VA.getLocReg(), VA);
383 assignValueToReg(NewRegs[1], NextVA.getLocReg(), NextVA);
384
385 bool IsLittle = MIRBuilder.getMF().getSubtarget<ARMSubtarget>().isLittle();
386 if (!IsLittle)
387 std::swap(NewRegs[0], NewRegs[1]);
388
389 MIRBuilder.buildMerge(Arg.Regs[0], NewRegs);
390
391 return 1;
392 }
393
394 /// Marking a physical register as used is different between formal
395 /// parameters, where it's a basic block live-in, and call returns, where it's
396 /// an implicit-def of the call instruction.
397 virtual void markPhysRegUsed(unsigned PhysReg) = 0;
398 };
399
400 struct FormalArgHandler : public ARMIncomingValueHandler {
FormalArgHandler__anonb59403710211::FormalArgHandler401 FormalArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
402 CCAssignFn AssignFn)
403 : ARMIncomingValueHandler(MIRBuilder, MRI, AssignFn) {}
404
markPhysRegUsed__anonb59403710211::FormalArgHandler405 void markPhysRegUsed(unsigned PhysReg) override {
406 MIRBuilder.getMRI()->addLiveIn(PhysReg);
407 MIRBuilder.getMBB().addLiveIn(PhysReg);
408 }
409 };
410
411 } // end anonymous namespace
412
lowerFormalArguments(MachineIRBuilder & MIRBuilder,const Function & F,ArrayRef<ArrayRef<Register>> VRegs) const413 bool ARMCallLowering::lowerFormalArguments(
414 MachineIRBuilder &MIRBuilder, const Function &F,
415 ArrayRef<ArrayRef<Register>> VRegs) const {
416 auto &TLI = *getTLI<ARMTargetLowering>();
417 auto Subtarget = TLI.getSubtarget();
418
419 if (Subtarget->isThumb1Only())
420 return false;
421
422 // Quick exit if there aren't any args
423 if (F.arg_empty())
424 return true;
425
426 if (F.isVarArg())
427 return false;
428
429 auto &MF = MIRBuilder.getMF();
430 auto &MBB = MIRBuilder.getMBB();
431 auto DL = MF.getDataLayout();
432
433 for (auto &Arg : F.args()) {
434 if (!isSupportedType(DL, TLI, Arg.getType()))
435 return false;
436 if (Arg.hasPassPointeeByValueCopyAttr())
437 return false;
438 }
439
440 CCAssignFn *AssignFn =
441 TLI.CCAssignFnForCall(F.getCallingConv(), F.isVarArg());
442
443 FormalArgHandler ArgHandler(MIRBuilder, MIRBuilder.getMF().getRegInfo(),
444 AssignFn);
445
446 SmallVector<ArgInfo, 8> SplitArgInfos;
447 unsigned Idx = 0;
448 for (auto &Arg : F.args()) {
449 ArgInfo OrigArgInfo(VRegs[Idx], Arg.getType());
450
451 setArgFlags(OrigArgInfo, Idx + AttributeList::FirstArgIndex, DL, F);
452 splitToValueTypes(OrigArgInfo, SplitArgInfos, MF);
453
454 Idx++;
455 }
456
457 if (!MBB.empty())
458 MIRBuilder.setInstr(*MBB.begin());
459
460 if (!handleAssignments(MIRBuilder, SplitArgInfos, ArgHandler))
461 return false;
462
463 // Move back to the end of the basic block.
464 MIRBuilder.setMBB(MBB);
465 return true;
466 }
467
468 namespace {
469
470 struct CallReturnHandler : public ARMIncomingValueHandler {
CallReturnHandler__anonb59403710311::CallReturnHandler471 CallReturnHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
472 MachineInstrBuilder MIB, CCAssignFn *AssignFn)
473 : ARMIncomingValueHandler(MIRBuilder, MRI, AssignFn), MIB(MIB) {}
474
markPhysRegUsed__anonb59403710311::CallReturnHandler475 void markPhysRegUsed(unsigned PhysReg) override {
476 MIB.addDef(PhysReg, RegState::Implicit);
477 }
478
479 MachineInstrBuilder MIB;
480 };
481
482 // FIXME: This should move to the ARMSubtarget when it supports all the opcodes.
getCallOpcode(const ARMSubtarget & STI,bool isDirect)483 unsigned getCallOpcode(const ARMSubtarget &STI, bool isDirect) {
484 if (isDirect)
485 return STI.isThumb() ? ARM::tBL : ARM::BL;
486
487 if (STI.isThumb())
488 return ARM::tBLXr;
489
490 if (STI.hasV5TOps())
491 return ARM::BLX;
492
493 if (STI.hasV4TOps())
494 return ARM::BX_CALL;
495
496 return ARM::BMOVPCRX_CALL;
497 }
498 } // end anonymous namespace
499
lowerCall(MachineIRBuilder & MIRBuilder,CallLoweringInfo & Info) const500 bool ARMCallLowering::lowerCall(MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info) const {
501 MachineFunction &MF = MIRBuilder.getMF();
502 const auto &TLI = *getTLI<ARMTargetLowering>();
503 const auto &DL = MF.getDataLayout();
504 const auto &STI = MF.getSubtarget<ARMSubtarget>();
505 const TargetRegisterInfo *TRI = STI.getRegisterInfo();
506 MachineRegisterInfo &MRI = MF.getRegInfo();
507
508 if (STI.genLongCalls())
509 return false;
510
511 if (STI.isThumb1Only())
512 return false;
513
514 auto CallSeqStart = MIRBuilder.buildInstr(ARM::ADJCALLSTACKDOWN);
515
516 // Create the call instruction so we can add the implicit uses of arg
517 // registers, but don't insert it yet.
518 bool IsDirect = !Info.Callee.isReg();
519 auto CallOpcode = getCallOpcode(STI, IsDirect);
520 auto MIB = MIRBuilder.buildInstrNoInsert(CallOpcode);
521
522 bool IsThumb = STI.isThumb();
523 if (IsThumb)
524 MIB.add(predOps(ARMCC::AL));
525
526 MIB.add(Info.Callee);
527 if (!IsDirect) {
528 auto CalleeReg = Info.Callee.getReg();
529 if (CalleeReg && !Register::isPhysicalRegister(CalleeReg)) {
530 unsigned CalleeIdx = IsThumb ? 2 : 0;
531 MIB->getOperand(CalleeIdx).setReg(constrainOperandRegClass(
532 MF, *TRI, MRI, *STI.getInstrInfo(), *STI.getRegBankInfo(),
533 *MIB.getInstr(), MIB->getDesc(), Info.Callee, CalleeIdx));
534 }
535 }
536
537 MIB.addRegMask(TRI->getCallPreservedMask(MF, Info.CallConv));
538
539 bool IsVarArg = false;
540 SmallVector<ArgInfo, 8> ArgInfos;
541 for (auto Arg : Info.OrigArgs) {
542 if (!isSupportedType(DL, TLI, Arg.Ty))
543 return false;
544
545 if (!Arg.IsFixed)
546 IsVarArg = true;
547
548 if (Arg.Flags[0].isByVal())
549 return false;
550
551 splitToValueTypes(Arg, ArgInfos, MF);
552 }
553
554 auto ArgAssignFn = TLI.CCAssignFnForCall(Info.CallConv, IsVarArg);
555 ARMOutgoingValueHandler ArgHandler(MIRBuilder, MRI, MIB, ArgAssignFn);
556 if (!handleAssignments(MIRBuilder, ArgInfos, ArgHandler))
557 return false;
558
559 // Now we can add the actual call instruction to the correct basic block.
560 MIRBuilder.insertInstr(MIB);
561
562 if (!Info.OrigRet.Ty->isVoidTy()) {
563 if (!isSupportedType(DL, TLI, Info.OrigRet.Ty))
564 return false;
565
566 ArgInfos.clear();
567 splitToValueTypes(Info.OrigRet, ArgInfos, MF);
568 auto RetAssignFn = TLI.CCAssignFnForReturn(Info.CallConv, IsVarArg);
569 CallReturnHandler RetHandler(MIRBuilder, MRI, MIB, RetAssignFn);
570 if (!handleAssignments(MIRBuilder, ArgInfos, RetHandler))
571 return false;
572 }
573
574 // We now know the size of the stack - update the ADJCALLSTACKDOWN
575 // accordingly.
576 CallSeqStart.addImm(ArgHandler.StackSize).addImm(0).add(predOps(ARMCC::AL));
577
578 MIRBuilder.buildInstr(ARM::ADJCALLSTACKUP)
579 .addImm(ArgHandler.StackSize)
580 .addImm(0)
581 .add(predOps(ARMCC::AL));
582
583 return true;
584 }
585