1 //===--- AArch64CallLowering.cpp - Call lowering --------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file implements the lowering of LLVM calls to machine code calls for
11 /// GlobalISel.
12 ///
13 //===----------------------------------------------------------------------===//
14 
15 #include "AArch64CallLowering.h"
16 #include "AArch64ISelLowering.h"
17 #include "AArch64MachineFunctionInfo.h"
18 #include "AArch64RegisterInfo.h"
19 #include "AArch64Subtarget.h"
20 #include "llvm/ADT/ArrayRef.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/Analysis/ObjCARCUtil.h"
23 #include "llvm/CodeGen/Analysis.h"
24 #include "llvm/CodeGen/CallingConvLower.h"
25 #include "llvm/CodeGen/FunctionLoweringInfo.h"
26 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
27 #include "llvm/CodeGen/GlobalISel/Utils.h"
28 #include "llvm/CodeGen/LowLevelTypeUtils.h"
29 #include "llvm/CodeGen/MachineBasicBlock.h"
30 #include "llvm/CodeGen/MachineFrameInfo.h"
31 #include "llvm/CodeGen/MachineFunction.h"
32 #include "llvm/CodeGen/MachineInstrBuilder.h"
33 #include "llvm/CodeGen/MachineMemOperand.h"
34 #include "llvm/CodeGen/MachineOperand.h"
35 #include "llvm/CodeGen/MachineRegisterInfo.h"
36 #include "llvm/CodeGen/MachineValueType.h"
37 #include "llvm/CodeGen/TargetRegisterInfo.h"
38 #include "llvm/CodeGen/TargetSubtargetInfo.h"
39 #include "llvm/CodeGen/ValueTypes.h"
40 #include "llvm/IR/Argument.h"
41 #include "llvm/IR/Attributes.h"
42 #include "llvm/IR/Function.h"
43 #include "llvm/IR/Type.h"
44 #include "llvm/IR/Value.h"
45 #include <algorithm>
46 #include <cassert>
47 #include <cstdint>
48 #include <iterator>
49 
50 #define DEBUG_TYPE "aarch64-call-lowering"
51 
52 using namespace llvm;
53 
AArch64CallLowering(const AArch64TargetLowering & TLI)54 AArch64CallLowering::AArch64CallLowering(const AArch64TargetLowering &TLI)
55   : CallLowering(&TLI) {}
56 
applyStackPassedSmallTypeDAGHack(EVT OrigVT,MVT & ValVT,MVT & LocVT)57 static void applyStackPassedSmallTypeDAGHack(EVT OrigVT, MVT &ValVT,
58                                              MVT &LocVT) {
59   // If ValVT is i1/i8/i16, we should set LocVT to i8/i8/i16. This is a legacy
60   // hack because the DAG calls the assignment function with pre-legalized
61   // register typed values, not the raw type.
62   //
63   // This hack is not applied to return values which are not passed on the
64   // stack.
65   if (OrigVT == MVT::i1 || OrigVT == MVT::i8)
66     ValVT = LocVT = MVT::i8;
67   else if (OrigVT == MVT::i16)
68     ValVT = LocVT = MVT::i16;
69 }
70 
71 // Account for i1/i8/i16 stack passed value hack
getStackValueStoreTypeHack(const CCValAssign & VA)72 static LLT getStackValueStoreTypeHack(const CCValAssign &VA) {
73   const MVT ValVT = VA.getValVT();
74   return (ValVT == MVT::i8 || ValVT == MVT::i16) ? LLT(ValVT)
75                                                  : LLT(VA.getLocVT());
76 }
77 
78 namespace {
79 
80 struct AArch64IncomingValueAssigner
81     : public CallLowering::IncomingValueAssigner {
AArch64IncomingValueAssigner__anon8a8c25f50111::AArch64IncomingValueAssigner82   AArch64IncomingValueAssigner(CCAssignFn *AssignFn_,
83                                CCAssignFn *AssignFnVarArg_)
84       : IncomingValueAssigner(AssignFn_, AssignFnVarArg_) {}
85 
assignArg__anon8a8c25f50111::AArch64IncomingValueAssigner86   bool assignArg(unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT,
87                  CCValAssign::LocInfo LocInfo,
88                  const CallLowering::ArgInfo &Info, ISD::ArgFlagsTy Flags,
89                  CCState &State) override {
90     applyStackPassedSmallTypeDAGHack(OrigVT, ValVT, LocVT);
91     return IncomingValueAssigner::assignArg(ValNo, OrigVT, ValVT, LocVT,
92                                             LocInfo, Info, Flags, State);
93   }
94 };
95 
96 struct AArch64OutgoingValueAssigner
97     : public CallLowering::OutgoingValueAssigner {
98   const AArch64Subtarget &Subtarget;
99 
100   /// Track if this is used for a return instead of function argument
101   /// passing. We apply a hack to i1/i8/i16 stack passed values, but do not use
102   /// stack passed returns for them and cannot apply the type adjustment.
103   bool IsReturn;
104 
AArch64OutgoingValueAssigner__anon8a8c25f50111::AArch64OutgoingValueAssigner105   AArch64OutgoingValueAssigner(CCAssignFn *AssignFn_,
106                                CCAssignFn *AssignFnVarArg_,
107                                const AArch64Subtarget &Subtarget_,
108                                bool IsReturn)
109       : OutgoingValueAssigner(AssignFn_, AssignFnVarArg_),
110         Subtarget(Subtarget_), IsReturn(IsReturn) {}
111 
assignArg__anon8a8c25f50111::AArch64OutgoingValueAssigner112   bool assignArg(unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT,
113                  CCValAssign::LocInfo LocInfo,
114                  const CallLowering::ArgInfo &Info, ISD::ArgFlagsTy Flags,
115                  CCState &State) override {
116     bool IsCalleeWin = Subtarget.isCallingConvWin64(State.getCallingConv());
117     bool UseVarArgsCCForFixed = IsCalleeWin && State.isVarArg();
118 
119     bool Res;
120     if (Info.IsFixed && !UseVarArgsCCForFixed) {
121       if (!IsReturn)
122         applyStackPassedSmallTypeDAGHack(OrigVT, ValVT, LocVT);
123       Res = AssignFn(ValNo, ValVT, LocVT, LocInfo, Flags, State);
124     } else
125       Res = AssignFnVarArg(ValNo, ValVT, LocVT, LocInfo, Flags, State);
126 
127     StackSize = State.getStackSize();
128     return Res;
129   }
130 };
131 
132 struct IncomingArgHandler : public CallLowering::IncomingValueHandler {
IncomingArgHandler__anon8a8c25f50111::IncomingArgHandler133   IncomingArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)
134       : IncomingValueHandler(MIRBuilder, MRI) {}
135 
getStackAddress__anon8a8c25f50111::IncomingArgHandler136   Register getStackAddress(uint64_t Size, int64_t Offset,
137                            MachinePointerInfo &MPO,
138                            ISD::ArgFlagsTy Flags) override {
139     auto &MFI = MIRBuilder.getMF().getFrameInfo();
140 
141     // Byval is assumed to be writable memory, but other stack passed arguments
142     // are not.
143     const bool IsImmutable = !Flags.isByVal();
144 
145     int FI = MFI.CreateFixedObject(Size, Offset, IsImmutable);
146     MPO = MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI);
147     auto AddrReg = MIRBuilder.buildFrameIndex(LLT::pointer(0, 64), FI);
148     return AddrReg.getReg(0);
149   }
150 
getStackValueStoreType__anon8a8c25f50111::IncomingArgHandler151   LLT getStackValueStoreType(const DataLayout &DL, const CCValAssign &VA,
152                              ISD::ArgFlagsTy Flags) const override {
153     // For pointers, we just need to fixup the integer types reported in the
154     // CCValAssign.
155     if (Flags.isPointer())
156       return CallLowering::ValueHandler::getStackValueStoreType(DL, VA, Flags);
157     return getStackValueStoreTypeHack(VA);
158   }
159 
assignValueToReg__anon8a8c25f50111::IncomingArgHandler160   void assignValueToReg(Register ValVReg, Register PhysReg,
161                         const CCValAssign &VA) override {
162     markPhysRegUsed(PhysReg);
163     IncomingValueHandler::assignValueToReg(ValVReg, PhysReg, VA);
164   }
165 
assignValueToAddress__anon8a8c25f50111::IncomingArgHandler166   void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy,
167                             const MachinePointerInfo &MPO,
168                             const CCValAssign &VA) override {
169     MachineFunction &MF = MIRBuilder.getMF();
170 
171     LLT ValTy(VA.getValVT());
172     LLT LocTy(VA.getLocVT());
173 
174     // Fixup the types for the DAG compatibility hack.
175     if (VA.getValVT() == MVT::i8 || VA.getValVT() == MVT::i16)
176       std::swap(ValTy, LocTy);
177     else {
178       // The calling code knows if this is a pointer or not, we're only touching
179       // the LocTy for the i8/i16 hack.
180       assert(LocTy.getSizeInBits() == MemTy.getSizeInBits());
181       LocTy = MemTy;
182     }
183 
184     auto MMO = MF.getMachineMemOperand(
185         MPO, MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant, LocTy,
186         inferAlignFromPtrInfo(MF, MPO));
187 
188     switch (VA.getLocInfo()) {
189     case CCValAssign::LocInfo::ZExt:
190       MIRBuilder.buildLoadInstr(TargetOpcode::G_ZEXTLOAD, ValVReg, Addr, *MMO);
191       return;
192     case CCValAssign::LocInfo::SExt:
193       MIRBuilder.buildLoadInstr(TargetOpcode::G_SEXTLOAD, ValVReg, Addr, *MMO);
194       return;
195     default:
196       MIRBuilder.buildLoad(ValVReg, Addr, *MMO);
197       return;
198     }
199   }
200 
201   /// How the physical register gets marked varies between formal
202   /// parameters (it's a basic-block live-in), and a call instruction
203   /// (it's an implicit-def of the BL).
204   virtual void markPhysRegUsed(MCRegister PhysReg) = 0;
205 };
206 
207 struct FormalArgHandler : public IncomingArgHandler {
FormalArgHandler__anon8a8c25f50111::FormalArgHandler208   FormalArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)
209       : IncomingArgHandler(MIRBuilder, MRI) {}
210 
markPhysRegUsed__anon8a8c25f50111::FormalArgHandler211   void markPhysRegUsed(MCRegister PhysReg) override {
212     MIRBuilder.getMRI()->addLiveIn(PhysReg);
213     MIRBuilder.getMBB().addLiveIn(PhysReg);
214   }
215 };
216 
217 struct CallReturnHandler : public IncomingArgHandler {
CallReturnHandler__anon8a8c25f50111::CallReturnHandler218   CallReturnHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
219                     MachineInstrBuilder MIB)
220       : IncomingArgHandler(MIRBuilder, MRI), MIB(MIB) {}
221 
markPhysRegUsed__anon8a8c25f50111::CallReturnHandler222   void markPhysRegUsed(MCRegister PhysReg) override {
223     MIB.addDef(PhysReg, RegState::Implicit);
224   }
225 
226   MachineInstrBuilder MIB;
227 };
228 
229 /// A special return arg handler for "returned" attribute arg calls.
230 struct ReturnedArgCallReturnHandler : public CallReturnHandler {
ReturnedArgCallReturnHandler__anon8a8c25f50111::ReturnedArgCallReturnHandler231   ReturnedArgCallReturnHandler(MachineIRBuilder &MIRBuilder,
232                                MachineRegisterInfo &MRI,
233                                MachineInstrBuilder MIB)
234       : CallReturnHandler(MIRBuilder, MRI, MIB) {}
235 
markPhysRegUsed__anon8a8c25f50111::ReturnedArgCallReturnHandler236   void markPhysRegUsed(MCRegister PhysReg) override {}
237 };
238 
239 struct OutgoingArgHandler : public CallLowering::OutgoingValueHandler {
OutgoingArgHandler__anon8a8c25f50111::OutgoingArgHandler240   OutgoingArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
241                      MachineInstrBuilder MIB, bool IsTailCall = false,
242                      int FPDiff = 0)
243       : OutgoingValueHandler(MIRBuilder, MRI), MIB(MIB), IsTailCall(IsTailCall),
244         FPDiff(FPDiff),
245         Subtarget(MIRBuilder.getMF().getSubtarget<AArch64Subtarget>()) {}
246 
getStackAddress__anon8a8c25f50111::OutgoingArgHandler247   Register getStackAddress(uint64_t Size, int64_t Offset,
248                            MachinePointerInfo &MPO,
249                            ISD::ArgFlagsTy Flags) override {
250     MachineFunction &MF = MIRBuilder.getMF();
251     LLT p0 = LLT::pointer(0, 64);
252     LLT s64 = LLT::scalar(64);
253 
254     if (IsTailCall) {
255       assert(!Flags.isByVal() && "byval unhandled with tail calls");
256 
257       Offset += FPDiff;
258       int FI = MF.getFrameInfo().CreateFixedObject(Size, Offset, true);
259       auto FIReg = MIRBuilder.buildFrameIndex(p0, FI);
260       MPO = MachinePointerInfo::getFixedStack(MF, FI);
261       return FIReg.getReg(0);
262     }
263 
264     if (!SPReg)
265       SPReg = MIRBuilder.buildCopy(p0, Register(AArch64::SP)).getReg(0);
266 
267     auto OffsetReg = MIRBuilder.buildConstant(s64, Offset);
268 
269     auto AddrReg = MIRBuilder.buildPtrAdd(p0, SPReg, OffsetReg);
270 
271     MPO = MachinePointerInfo::getStack(MF, Offset);
272     return AddrReg.getReg(0);
273   }
274 
275   /// We need to fixup the reported store size for certain value types because
276   /// we invert the interpretation of ValVT and LocVT in certain cases. This is
277   /// for compatability with the DAG call lowering implementation, which we're
278   /// currently building on top of.
getStackValueStoreType__anon8a8c25f50111::OutgoingArgHandler279   LLT getStackValueStoreType(const DataLayout &DL, const CCValAssign &VA,
280                              ISD::ArgFlagsTy Flags) const override {
281     if (Flags.isPointer())
282       return CallLowering::ValueHandler::getStackValueStoreType(DL, VA, Flags);
283     return getStackValueStoreTypeHack(VA);
284   }
285 
assignValueToReg__anon8a8c25f50111::OutgoingArgHandler286   void assignValueToReg(Register ValVReg, Register PhysReg,
287                         const CCValAssign &VA) override {
288     MIB.addUse(PhysReg, RegState::Implicit);
289     Register ExtReg = extendRegister(ValVReg, VA);
290     MIRBuilder.buildCopy(PhysReg, ExtReg);
291   }
292 
assignValueToAddress__anon8a8c25f50111::OutgoingArgHandler293   void assignValueToAddress(Register ValVReg, Register Addr, LLT MemTy,
294                             const MachinePointerInfo &MPO,
295                             const CCValAssign &VA) override {
296     MachineFunction &MF = MIRBuilder.getMF();
297     auto MMO = MF.getMachineMemOperand(MPO, MachineMemOperand::MOStore, MemTy,
298                                        inferAlignFromPtrInfo(MF, MPO));
299     MIRBuilder.buildStore(ValVReg, Addr, *MMO);
300   }
301 
assignValueToAddress__anon8a8c25f50111::OutgoingArgHandler302   void assignValueToAddress(const CallLowering::ArgInfo &Arg, unsigned RegIndex,
303                             Register Addr, LLT MemTy,
304                             const MachinePointerInfo &MPO,
305                             const CCValAssign &VA) override {
306     unsigned MaxSize = MemTy.getSizeInBytes() * 8;
307     // For varargs, we always want to extend them to 8 bytes, in which case
308     // we disable setting a max.
309     if (!Arg.IsFixed)
310       MaxSize = 0;
311 
312     Register ValVReg = Arg.Regs[RegIndex];
313     if (VA.getLocInfo() != CCValAssign::LocInfo::FPExt) {
314       MVT LocVT = VA.getLocVT();
315       MVT ValVT = VA.getValVT();
316 
317       if (VA.getValVT() == MVT::i8 || VA.getValVT() == MVT::i16) {
318         std::swap(ValVT, LocVT);
319         MemTy = LLT(VA.getValVT());
320       }
321 
322       ValVReg = extendRegister(ValVReg, VA, MaxSize);
323     } else {
324       // The store does not cover the full allocated stack slot.
325       MemTy = LLT(VA.getValVT());
326     }
327 
328     assignValueToAddress(ValVReg, Addr, MemTy, MPO, VA);
329   }
330 
331   MachineInstrBuilder MIB;
332 
333   bool IsTailCall;
334 
335   /// For tail calls, the byte offset of the call's argument area from the
336   /// callee's. Unused elsewhere.
337   int FPDiff;
338 
339   // Cache the SP register vreg if we need it more than once in this call site.
340   Register SPReg;
341 
342   const AArch64Subtarget &Subtarget;
343 };
344 } // namespace
345 
doesCalleeRestoreStack(CallingConv::ID CallConv,bool TailCallOpt)346 static bool doesCalleeRestoreStack(CallingConv::ID CallConv, bool TailCallOpt) {
347   return (CallConv == CallingConv::Fast && TailCallOpt) ||
348          CallConv == CallingConv::Tail || CallConv == CallingConv::SwiftTail;
349 }
350 
lowerReturn(MachineIRBuilder & MIRBuilder,const Value * Val,ArrayRef<Register> VRegs,FunctionLoweringInfo & FLI,Register SwiftErrorVReg) const351 bool AArch64CallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
352                                       const Value *Val,
353                                       ArrayRef<Register> VRegs,
354                                       FunctionLoweringInfo &FLI,
355                                       Register SwiftErrorVReg) const {
356   auto MIB = MIRBuilder.buildInstrNoInsert(AArch64::RET_ReallyLR);
357   assert(((Val && !VRegs.empty()) || (!Val && VRegs.empty())) &&
358          "Return value without a vreg");
359 
360   bool Success = true;
361   if (!FLI.CanLowerReturn) {
362     insertSRetStores(MIRBuilder, Val->getType(), VRegs, FLI.DemoteRegister);
363   } else if (!VRegs.empty()) {
364     MachineFunction &MF = MIRBuilder.getMF();
365     const Function &F = MF.getFunction();
366     const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
367 
368     MachineRegisterInfo &MRI = MF.getRegInfo();
369     const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
370     CCAssignFn *AssignFn = TLI.CCAssignFnForReturn(F.getCallingConv());
371     auto &DL = F.getParent()->getDataLayout();
372     LLVMContext &Ctx = Val->getType()->getContext();
373 
374     SmallVector<EVT, 4> SplitEVTs;
375     ComputeValueVTs(TLI, DL, Val->getType(), SplitEVTs);
376     assert(VRegs.size() == SplitEVTs.size() &&
377            "For each split Type there should be exactly one VReg.");
378 
379     SmallVector<ArgInfo, 8> SplitArgs;
380     CallingConv::ID CC = F.getCallingConv();
381 
382     for (unsigned i = 0; i < SplitEVTs.size(); ++i) {
383       Register CurVReg = VRegs[i];
384       ArgInfo CurArgInfo = ArgInfo{CurVReg, SplitEVTs[i].getTypeForEVT(Ctx), 0};
385       setArgFlags(CurArgInfo, AttributeList::ReturnIndex, DL, F);
386 
387       // i1 is a special case because SDAG i1 true is naturally zero extended
388       // when widened using ANYEXT. We need to do it explicitly here.
389       auto &Flags = CurArgInfo.Flags[0];
390       if (MRI.getType(CurVReg).getSizeInBits() == 1 && !Flags.isSExt() &&
391           !Flags.isZExt()) {
392         CurVReg = MIRBuilder.buildZExt(LLT::scalar(8), CurVReg).getReg(0);
393       } else if (TLI.getNumRegistersForCallingConv(Ctx, CC, SplitEVTs[i]) ==
394                  1) {
395         // Some types will need extending as specified by the CC.
396         MVT NewVT = TLI.getRegisterTypeForCallingConv(Ctx, CC, SplitEVTs[i]);
397         if (EVT(NewVT) != SplitEVTs[i]) {
398           unsigned ExtendOp = TargetOpcode::G_ANYEXT;
399           if (F.getAttributes().hasRetAttr(Attribute::SExt))
400             ExtendOp = TargetOpcode::G_SEXT;
401           else if (F.getAttributes().hasRetAttr(Attribute::ZExt))
402             ExtendOp = TargetOpcode::G_ZEXT;
403 
404           LLT NewLLT(NewVT);
405           LLT OldLLT(MVT::getVT(CurArgInfo.Ty));
406           CurArgInfo.Ty = EVT(NewVT).getTypeForEVT(Ctx);
407           // Instead of an extend, we might have a vector type which needs
408           // padding with more elements, e.g. <2 x half> -> <4 x half>.
409           if (NewVT.isVector()) {
410             if (OldLLT.isVector()) {
411               if (NewLLT.getNumElements() > OldLLT.getNumElements()) {
412 
413                 CurVReg =
414                     MIRBuilder.buildPadVectorWithUndefElements(NewLLT, CurVReg)
415                         .getReg(0);
416               } else {
417                 // Just do a vector extend.
418                 CurVReg = MIRBuilder.buildInstr(ExtendOp, {NewLLT}, {CurVReg})
419                               .getReg(0);
420               }
421             } else if (NewLLT.getNumElements() >= 2 &&
422                        NewLLT.getNumElements() <= 8) {
423               // We need to pad a <1 x S> type to <2/4/8 x S>. Since we don't
424               // have <1 x S> vector types in GISel we use a build_vector
425               // instead of a vector merge/concat.
426               CurVReg =
427                   MIRBuilder.buildPadVectorWithUndefElements(NewLLT, CurVReg)
428                       .getReg(0);
429             } else {
430               LLVM_DEBUG(dbgs() << "Could not handle ret ty\n");
431               return false;
432             }
433           } else {
434             // If the split EVT was a <1 x T> vector, and NewVT is T, then we
435             // don't have to do anything since we don't distinguish between the
436             // two.
437             if (NewLLT != MRI.getType(CurVReg)) {
438               // A scalar extend.
439               CurVReg = MIRBuilder.buildInstr(ExtendOp, {NewLLT}, {CurVReg})
440                             .getReg(0);
441             }
442           }
443         }
444       }
445       if (CurVReg != CurArgInfo.Regs[0]) {
446         CurArgInfo.Regs[0] = CurVReg;
447         // Reset the arg flags after modifying CurVReg.
448         setArgFlags(CurArgInfo, AttributeList::ReturnIndex, DL, F);
449       }
450       splitToValueTypes(CurArgInfo, SplitArgs, DL, CC);
451     }
452 
453     AArch64OutgoingValueAssigner Assigner(AssignFn, AssignFn, Subtarget,
454                                           /*IsReturn*/ true);
455     OutgoingArgHandler Handler(MIRBuilder, MRI, MIB);
456     Success = determineAndHandleAssignments(Handler, Assigner, SplitArgs,
457                                             MIRBuilder, CC, F.isVarArg());
458   }
459 
460   if (SwiftErrorVReg) {
461     MIB.addUse(AArch64::X21, RegState::Implicit);
462     MIRBuilder.buildCopy(AArch64::X21, SwiftErrorVReg);
463   }
464 
465   MIRBuilder.insertInstr(MIB);
466   return Success;
467 }
468 
canLowerReturn(MachineFunction & MF,CallingConv::ID CallConv,SmallVectorImpl<BaseArgInfo> & Outs,bool IsVarArg) const469 bool AArch64CallLowering::canLowerReturn(MachineFunction &MF,
470                                          CallingConv::ID CallConv,
471                                          SmallVectorImpl<BaseArgInfo> &Outs,
472                                          bool IsVarArg) const {
473   SmallVector<CCValAssign, 16> ArgLocs;
474   const auto &TLI = *getTLI<AArch64TargetLowering>();
475   CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs,
476                  MF.getFunction().getContext());
477 
478   return checkReturn(CCInfo, Outs, TLI.CCAssignFnForReturn(CallConv));
479 }
480 
481 /// Helper function to compute forwarded registers for musttail calls. Computes
482 /// the forwarded registers, sets MBB liveness, and emits COPY instructions that
483 /// can be used to save + restore registers later.
handleMustTailForwardedRegisters(MachineIRBuilder & MIRBuilder,CCAssignFn * AssignFn)484 static void handleMustTailForwardedRegisters(MachineIRBuilder &MIRBuilder,
485                                              CCAssignFn *AssignFn) {
486   MachineBasicBlock &MBB = MIRBuilder.getMBB();
487   MachineFunction &MF = MIRBuilder.getMF();
488   MachineFrameInfo &MFI = MF.getFrameInfo();
489 
490   if (!MFI.hasMustTailInVarArgFunc())
491     return;
492 
493   AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
494   const Function &F = MF.getFunction();
495   assert(F.isVarArg() && "Expected F to be vararg?");
496 
497   // Compute the set of forwarded registers. The rest are scratch.
498   SmallVector<CCValAssign, 16> ArgLocs;
499   CCState CCInfo(F.getCallingConv(), /*IsVarArg=*/true, MF, ArgLocs,
500                  F.getContext());
501   SmallVector<MVT, 2> RegParmTypes;
502   RegParmTypes.push_back(MVT::i64);
503   RegParmTypes.push_back(MVT::f128);
504 
505   // Later on, we can use this vector to restore the registers if necessary.
506   SmallVectorImpl<ForwardedRegister> &Forwards =
507       FuncInfo->getForwardedMustTailRegParms();
508   CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes, AssignFn);
509 
510   // Conservatively forward X8, since it might be used for an aggregate
511   // return.
512   if (!CCInfo.isAllocated(AArch64::X8)) {
513     Register X8VReg = MF.addLiveIn(AArch64::X8, &AArch64::GPR64RegClass);
514     Forwards.push_back(ForwardedRegister(X8VReg, AArch64::X8, MVT::i64));
515   }
516 
517   // Add the forwards to the MachineBasicBlock and MachineFunction.
518   for (const auto &F : Forwards) {
519     MBB.addLiveIn(F.PReg);
520     MIRBuilder.buildCopy(Register(F.VReg), Register(F.PReg));
521   }
522 }
523 
fallBackToDAGISel(const MachineFunction & MF) const524 bool AArch64CallLowering::fallBackToDAGISel(const MachineFunction &MF) const {
525   auto &F = MF.getFunction();
526   if (F.getReturnType()->isScalableTy() ||
527       llvm::any_of(F.args(), [](const Argument &A) {
528         return A.getType()->isScalableTy();
529       }))
530     return true;
531   const auto &ST = MF.getSubtarget<AArch64Subtarget>();
532   if (!ST.hasNEON() || !ST.hasFPARMv8()) {
533     LLVM_DEBUG(dbgs() << "Falling back to SDAG because we don't support no-NEON\n");
534     return true;
535   }
536 
537   SMEAttrs Attrs(F);
538   if (Attrs.hasZAState() || Attrs.hasStreamingInterfaceOrBody() ||
539       Attrs.hasStreamingCompatibleInterface())
540     return true;
541 
542   return false;
543 }
544 
saveVarArgRegisters(MachineIRBuilder & MIRBuilder,CallLowering::IncomingValueHandler & Handler,CCState & CCInfo) const545 void AArch64CallLowering::saveVarArgRegisters(
546     MachineIRBuilder &MIRBuilder, CallLowering::IncomingValueHandler &Handler,
547     CCState &CCInfo) const {
548   auto GPRArgRegs = AArch64::getGPRArgRegs();
549   auto FPRArgRegs = AArch64::getFPRArgRegs();
550 
551   MachineFunction &MF = MIRBuilder.getMF();
552   MachineRegisterInfo &MRI = MF.getRegInfo();
553   MachineFrameInfo &MFI = MF.getFrameInfo();
554   AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
555   auto &Subtarget = MF.getSubtarget<AArch64Subtarget>();
556   bool IsWin64CC =
557       Subtarget.isCallingConvWin64(CCInfo.getCallingConv());
558   const LLT p0 = LLT::pointer(0, 64);
559   const LLT s64 = LLT::scalar(64);
560 
561   unsigned FirstVariadicGPR = CCInfo.getFirstUnallocated(GPRArgRegs);
562   unsigned NumVariadicGPRArgRegs = GPRArgRegs.size() - FirstVariadicGPR + 1;
563 
564   unsigned GPRSaveSize = 8 * (GPRArgRegs.size() - FirstVariadicGPR);
565   int GPRIdx = 0;
566   if (GPRSaveSize != 0) {
567     if (IsWin64CC) {
568       GPRIdx = MFI.CreateFixedObject(GPRSaveSize,
569                                      -static_cast<int>(GPRSaveSize), false);
570       if (GPRSaveSize & 15)
571         // The extra size here, if triggered, will always be 8.
572         MFI.CreateFixedObject(16 - (GPRSaveSize & 15),
573                               -static_cast<int>(alignTo(GPRSaveSize, 16)),
574                               false);
575     } else
576       GPRIdx = MFI.CreateStackObject(GPRSaveSize, Align(8), false);
577 
578     auto FIN = MIRBuilder.buildFrameIndex(p0, GPRIdx);
579     auto Offset =
580         MIRBuilder.buildConstant(MRI.createGenericVirtualRegister(s64), 8);
581 
582     for (unsigned i = FirstVariadicGPR; i < GPRArgRegs.size(); ++i) {
583       Register Val = MRI.createGenericVirtualRegister(s64);
584       Handler.assignValueToReg(
585           Val, GPRArgRegs[i],
586           CCValAssign::getReg(i + MF.getFunction().getNumOperands(), MVT::i64,
587                               GPRArgRegs[i], MVT::i64, CCValAssign::Full));
588       auto MPO = IsWin64CC ? MachinePointerInfo::getFixedStack(
589                                MF, GPRIdx, (i - FirstVariadicGPR) * 8)
590                          : MachinePointerInfo::getStack(MF, i * 8);
591       MIRBuilder.buildStore(Val, FIN, MPO, inferAlignFromPtrInfo(MF, MPO));
592 
593       FIN = MIRBuilder.buildPtrAdd(MRI.createGenericVirtualRegister(p0),
594                                    FIN.getReg(0), Offset);
595     }
596   }
597   FuncInfo->setVarArgsGPRIndex(GPRIdx);
598   FuncInfo->setVarArgsGPRSize(GPRSaveSize);
599 
600   if (Subtarget.hasFPARMv8() && !IsWin64CC) {
601     unsigned FirstVariadicFPR = CCInfo.getFirstUnallocated(FPRArgRegs);
602 
603     unsigned FPRSaveSize = 16 * (FPRArgRegs.size() - FirstVariadicFPR);
604     int FPRIdx = 0;
605     if (FPRSaveSize != 0) {
606       FPRIdx = MFI.CreateStackObject(FPRSaveSize, Align(16), false);
607 
608       auto FIN = MIRBuilder.buildFrameIndex(p0, FPRIdx);
609       auto Offset =
610           MIRBuilder.buildConstant(MRI.createGenericVirtualRegister(s64), 16);
611 
612       for (unsigned i = FirstVariadicFPR; i < FPRArgRegs.size(); ++i) {
613         Register Val = MRI.createGenericVirtualRegister(LLT::scalar(128));
614         Handler.assignValueToReg(
615             Val, FPRArgRegs[i],
616             CCValAssign::getReg(
617                 i + MF.getFunction().getNumOperands() + NumVariadicGPRArgRegs,
618                 MVT::f128, FPRArgRegs[i], MVT::f128, CCValAssign::Full));
619 
620         auto MPO = MachinePointerInfo::getStack(MF, i * 16);
621         MIRBuilder.buildStore(Val, FIN, MPO, inferAlignFromPtrInfo(MF, MPO));
622 
623         FIN = MIRBuilder.buildPtrAdd(MRI.createGenericVirtualRegister(p0),
624                                      FIN.getReg(0), Offset);
625       }
626     }
627     FuncInfo->setVarArgsFPRIndex(FPRIdx);
628     FuncInfo->setVarArgsFPRSize(FPRSaveSize);
629   }
630 }
631 
lowerFormalArguments(MachineIRBuilder & MIRBuilder,const Function & F,ArrayRef<ArrayRef<Register>> VRegs,FunctionLoweringInfo & FLI) const632 bool AArch64CallLowering::lowerFormalArguments(
633     MachineIRBuilder &MIRBuilder, const Function &F,
634     ArrayRef<ArrayRef<Register>> VRegs, FunctionLoweringInfo &FLI) const {
635   MachineFunction &MF = MIRBuilder.getMF();
636   MachineBasicBlock &MBB = MIRBuilder.getMBB();
637   MachineRegisterInfo &MRI = MF.getRegInfo();
638   auto &DL = F.getParent()->getDataLayout();
639   auto &Subtarget = MF.getSubtarget<AArch64Subtarget>();
640 
641   // Arm64EC has extra requirements for varargs calls which are only implemented
642   // in SelectionDAG; bail out for now.
643   if (F.isVarArg() && Subtarget.isWindowsArm64EC())
644     return false;
645 
646   // Arm64EC thunks have a special calling convention which is only implemented
647   // in SelectionDAG; bail out for now.
648   if (F.getCallingConv() == CallingConv::ARM64EC_Thunk_Native ||
649       F.getCallingConv() == CallingConv::ARM64EC_Thunk_X64)
650     return false;
651 
652   bool IsWin64 = Subtarget.isCallingConvWin64(F.getCallingConv()) && !Subtarget.isWindowsArm64EC();
653 
654   SmallVector<ArgInfo, 8> SplitArgs;
655   SmallVector<std::pair<Register, Register>> BoolArgs;
656 
657   // Insert the hidden sret parameter if the return value won't fit in the
658   // return registers.
659   if (!FLI.CanLowerReturn)
660     insertSRetIncomingArgument(F, SplitArgs, FLI.DemoteRegister, MRI, DL);
661 
662   unsigned i = 0;
663   for (auto &Arg : F.args()) {
664     if (DL.getTypeStoreSize(Arg.getType()).isZero())
665       continue;
666 
667     ArgInfo OrigArg{VRegs[i], Arg, i};
668     setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, F);
669 
670     // i1 arguments are zero-extended to i8 by the caller. Emit a
671     // hint to reflect this.
672     if (OrigArg.Ty->isIntegerTy(1)) {
673       assert(OrigArg.Regs.size() == 1 &&
674              MRI.getType(OrigArg.Regs[0]).getSizeInBits() == 1 &&
675              "Unexpected registers used for i1 arg");
676 
677       auto &Flags = OrigArg.Flags[0];
678       if (!Flags.isZExt() && !Flags.isSExt()) {
679         // Lower i1 argument as i8, and insert AssertZExt + Trunc later.
680         Register OrigReg = OrigArg.Regs[0];
681         Register WideReg = MRI.createGenericVirtualRegister(LLT::scalar(8));
682         OrigArg.Regs[0] = WideReg;
683         BoolArgs.push_back({OrigReg, WideReg});
684       }
685     }
686 
687     if (Arg.hasAttribute(Attribute::SwiftAsync))
688       MF.getInfo<AArch64FunctionInfo>()->setHasSwiftAsyncContext(true);
689 
690     splitToValueTypes(OrigArg, SplitArgs, DL, F.getCallingConv());
691     ++i;
692   }
693 
694   if (!MBB.empty())
695     MIRBuilder.setInstr(*MBB.begin());
696 
697   const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
698   CCAssignFn *AssignFn = TLI.CCAssignFnForCall(F.getCallingConv(), IsWin64 && F.isVarArg());
699 
700   AArch64IncomingValueAssigner Assigner(AssignFn, AssignFn);
701   FormalArgHandler Handler(MIRBuilder, MRI);
702   SmallVector<CCValAssign, 16> ArgLocs;
703   CCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext());
704   if (!determineAssignments(Assigner, SplitArgs, CCInfo) ||
705       !handleAssignments(Handler, SplitArgs, CCInfo, ArgLocs, MIRBuilder))
706     return false;
707 
708   if (!BoolArgs.empty()) {
709     for (auto &KV : BoolArgs) {
710       Register OrigReg = KV.first;
711       Register WideReg = KV.second;
712       LLT WideTy = MRI.getType(WideReg);
713       assert(MRI.getType(OrigReg).getScalarSizeInBits() == 1 &&
714              "Unexpected bit size of a bool arg");
715       MIRBuilder.buildTrunc(
716           OrigReg, MIRBuilder.buildAssertZExt(WideTy, WideReg, 1).getReg(0));
717     }
718   }
719 
720   AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
721   uint64_t StackSize = Assigner.StackSize;
722   if (F.isVarArg()) {
723     if ((!Subtarget.isTargetDarwin() && !Subtarget.isWindowsArm64EC()) || IsWin64) {
724       // The AAPCS variadic function ABI is identical to the non-variadic
725       // one. As a result there may be more arguments in registers and we should
726       // save them for future reference.
727       // Win64 variadic functions also pass arguments in registers, but all
728       // float arguments are passed in integer registers.
729       saveVarArgRegisters(MIRBuilder, Handler, CCInfo);
730     } else if (Subtarget.isWindowsArm64EC()) {
731       return false;
732     }
733 
734     // We currently pass all varargs at 8-byte alignment, or 4 in ILP32.
735     StackSize = alignTo(Assigner.StackSize, Subtarget.isTargetILP32() ? 4 : 8);
736 
737     auto &MFI = MIRBuilder.getMF().getFrameInfo();
738     FuncInfo->setVarArgsStackIndex(MFI.CreateFixedObject(4, StackSize, true));
739   }
740 
741   if (doesCalleeRestoreStack(F.getCallingConv(),
742                              MF.getTarget().Options.GuaranteedTailCallOpt)) {
743     // We have a non-standard ABI, so why not make full use of the stack that
744     // we're going to pop? It must be aligned to 16 B in any case.
745     StackSize = alignTo(StackSize, 16);
746 
747     // If we're expected to restore the stack (e.g. fastcc), then we'll be
748     // adding a multiple of 16.
749     FuncInfo->setArgumentStackToRestore(StackSize);
750 
751     // Our own callers will guarantee that the space is free by giving an
752     // aligned value to CALLSEQ_START.
753   }
754 
755   // When we tail call, we need to check if the callee's arguments
756   // will fit on the caller's stack. So, whenever we lower formal arguments,
757   // we should keep track of this information, since we might lower a tail call
758   // in this function later.
759   FuncInfo->setBytesInStackArgArea(StackSize);
760 
761   if (Subtarget.hasCustomCallingConv())
762     Subtarget.getRegisterInfo()->UpdateCustomCalleeSavedRegs(MF);
763 
764   handleMustTailForwardedRegisters(MIRBuilder, AssignFn);
765 
766   // Move back to the end of the basic block.
767   MIRBuilder.setMBB(MBB);
768 
769   return true;
770 }
771 
772 /// Return true if the calling convention is one that we can guarantee TCO for.
canGuaranteeTCO(CallingConv::ID CC,bool GuaranteeTailCalls)773 static bool canGuaranteeTCO(CallingConv::ID CC, bool GuaranteeTailCalls) {
774   return (CC == CallingConv::Fast && GuaranteeTailCalls) ||
775          CC == CallingConv::Tail || CC == CallingConv::SwiftTail;
776 }
777 
778 /// Return true if we might ever do TCO for calls with this calling convention.
mayTailCallThisCC(CallingConv::ID CC)779 static bool mayTailCallThisCC(CallingConv::ID CC) {
780   switch (CC) {
781   case CallingConv::C:
782   case CallingConv::PreserveMost:
783   case CallingConv::PreserveAll:
784   case CallingConv::Swift:
785   case CallingConv::SwiftTail:
786   case CallingConv::Tail:
787   case CallingConv::Fast:
788     return true;
789   default:
790     return false;
791   }
792 }
793 
794 /// Returns a pair containing the fixed CCAssignFn and the vararg CCAssignFn for
795 /// CC.
796 static std::pair<CCAssignFn *, CCAssignFn *>
getAssignFnsForCC(CallingConv::ID CC,const AArch64TargetLowering & TLI)797 getAssignFnsForCC(CallingConv::ID CC, const AArch64TargetLowering &TLI) {
798   return {TLI.CCAssignFnForCall(CC, false), TLI.CCAssignFnForCall(CC, true)};
799 }
800 
doCallerAndCalleePassArgsTheSameWay(CallLoweringInfo & Info,MachineFunction & MF,SmallVectorImpl<ArgInfo> & InArgs) const801 bool AArch64CallLowering::doCallerAndCalleePassArgsTheSameWay(
802     CallLoweringInfo &Info, MachineFunction &MF,
803     SmallVectorImpl<ArgInfo> &InArgs) const {
804   const Function &CallerF = MF.getFunction();
805   CallingConv::ID CalleeCC = Info.CallConv;
806   CallingConv::ID CallerCC = CallerF.getCallingConv();
807 
808   // If the calling conventions match, then everything must be the same.
809   if (CalleeCC == CallerCC)
810     return true;
811 
812   // Check if the caller and callee will handle arguments in the same way.
813   const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
814   CCAssignFn *CalleeAssignFnFixed;
815   CCAssignFn *CalleeAssignFnVarArg;
816   std::tie(CalleeAssignFnFixed, CalleeAssignFnVarArg) =
817       getAssignFnsForCC(CalleeCC, TLI);
818 
819   CCAssignFn *CallerAssignFnFixed;
820   CCAssignFn *CallerAssignFnVarArg;
821   std::tie(CallerAssignFnFixed, CallerAssignFnVarArg) =
822       getAssignFnsForCC(CallerCC, TLI);
823 
824   AArch64IncomingValueAssigner CalleeAssigner(CalleeAssignFnFixed,
825                                               CalleeAssignFnVarArg);
826   AArch64IncomingValueAssigner CallerAssigner(CallerAssignFnFixed,
827                                               CallerAssignFnVarArg);
828 
829   if (!resultsCompatible(Info, MF, InArgs, CalleeAssigner, CallerAssigner))
830     return false;
831 
832   // Make sure that the caller and callee preserve all of the same registers.
833   auto TRI = MF.getSubtarget<AArch64Subtarget>().getRegisterInfo();
834   const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
835   const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
836   if (MF.getSubtarget<AArch64Subtarget>().hasCustomCallingConv()) {
837     TRI->UpdateCustomCallPreservedMask(MF, &CallerPreserved);
838     TRI->UpdateCustomCallPreservedMask(MF, &CalleePreserved);
839   }
840 
841   return TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved);
842 }
843 
areCalleeOutgoingArgsTailCallable(CallLoweringInfo & Info,MachineFunction & MF,SmallVectorImpl<ArgInfo> & OrigOutArgs) const844 bool AArch64CallLowering::areCalleeOutgoingArgsTailCallable(
845     CallLoweringInfo &Info, MachineFunction &MF,
846     SmallVectorImpl<ArgInfo> &OrigOutArgs) const {
847   // If there are no outgoing arguments, then we are done.
848   if (OrigOutArgs.empty())
849     return true;
850 
851   const Function &CallerF = MF.getFunction();
852   LLVMContext &Ctx = CallerF.getContext();
853   CallingConv::ID CalleeCC = Info.CallConv;
854   CallingConv::ID CallerCC = CallerF.getCallingConv();
855   const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
856   const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
857 
858   CCAssignFn *AssignFnFixed;
859   CCAssignFn *AssignFnVarArg;
860   std::tie(AssignFnFixed, AssignFnVarArg) = getAssignFnsForCC(CalleeCC, TLI);
861 
862   // We have outgoing arguments. Make sure that we can tail call with them.
863   SmallVector<CCValAssign, 16> OutLocs;
864   CCState OutInfo(CalleeCC, false, MF, OutLocs, Ctx);
865 
866   AArch64OutgoingValueAssigner CalleeAssigner(AssignFnFixed, AssignFnVarArg,
867                                               Subtarget, /*IsReturn*/ false);
868   // determineAssignments() may modify argument flags, so make a copy.
869   SmallVector<ArgInfo, 8> OutArgs;
870   append_range(OutArgs, OrigOutArgs);
871   if (!determineAssignments(CalleeAssigner, OutArgs, OutInfo)) {
872     LLVM_DEBUG(dbgs() << "... Could not analyze call operands.\n");
873     return false;
874   }
875 
876   // Make sure that they can fit on the caller's stack.
877   const AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
878   if (OutInfo.getStackSize() > FuncInfo->getBytesInStackArgArea()) {
879     LLVM_DEBUG(dbgs() << "... Cannot fit call operands on caller's stack.\n");
880     return false;
881   }
882 
883   // Verify that the parameters in callee-saved registers match.
884   // TODO: Port this over to CallLowering as general code once swiftself is
885   // supported.
886   auto TRI = MF.getSubtarget<AArch64Subtarget>().getRegisterInfo();
887   const uint32_t *CallerPreservedMask = TRI->getCallPreservedMask(MF, CallerCC);
888   MachineRegisterInfo &MRI = MF.getRegInfo();
889 
890   if (Info.IsVarArg) {
891     // Be conservative and disallow variadic memory operands to match SDAG's
892     // behaviour.
893     // FIXME: If the caller's calling convention is C, then we can
894     // potentially use its argument area. However, for cases like fastcc,
895     // we can't do anything.
896     for (unsigned i = 0; i < OutLocs.size(); ++i) {
897       auto &ArgLoc = OutLocs[i];
898       if (ArgLoc.isRegLoc())
899         continue;
900 
901       LLVM_DEBUG(
902           dbgs()
903           << "... Cannot tail call vararg function with stack arguments\n");
904       return false;
905     }
906   }
907 
908   return parametersInCSRMatch(MRI, CallerPreservedMask, OutLocs, OutArgs);
909 }
910 
isEligibleForTailCallOptimization(MachineIRBuilder & MIRBuilder,CallLoweringInfo & Info,SmallVectorImpl<ArgInfo> & InArgs,SmallVectorImpl<ArgInfo> & OutArgs) const911 bool AArch64CallLowering::isEligibleForTailCallOptimization(
912     MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info,
913     SmallVectorImpl<ArgInfo> &InArgs,
914     SmallVectorImpl<ArgInfo> &OutArgs) const {
915 
916   // Must pass all target-independent checks in order to tail call optimize.
917   if (!Info.IsTailCall)
918     return false;
919 
920   CallingConv::ID CalleeCC = Info.CallConv;
921   MachineFunction &MF = MIRBuilder.getMF();
922   const Function &CallerF = MF.getFunction();
923 
924   LLVM_DEBUG(dbgs() << "Attempting to lower call as tail call\n");
925 
926   if (Info.SwiftErrorVReg) {
927     // TODO: We should handle this.
928     // Note that this is also handled by the check for no outgoing arguments.
929     // Proactively disabling this though, because the swifterror handling in
930     // lowerCall inserts a COPY *after* the location of the call.
931     LLVM_DEBUG(dbgs() << "... Cannot handle tail calls with swifterror yet.\n");
932     return false;
933   }
934 
935   if (!mayTailCallThisCC(CalleeCC)) {
936     LLVM_DEBUG(dbgs() << "... Calling convention cannot be tail called.\n");
937     return false;
938   }
939 
940   // Byval parameters hand the function a pointer directly into the stack area
941   // we want to reuse during a tail call. Working around this *is* possible (see
942   // X86).
943   //
944   // FIXME: In AArch64ISelLowering, this isn't worked around. Can/should we try
945   // it?
946   //
947   // On Windows, "inreg" attributes signify non-aggregate indirect returns.
948   // In this case, it is necessary to save/restore X0 in the callee. Tail
949   // call opt interferes with this. So we disable tail call opt when the
950   // caller has an argument with "inreg" attribute.
951   //
952   // FIXME: Check whether the callee also has an "inreg" argument.
953   //
954   // When the caller has a swifterror argument, we don't want to tail call
955   // because would have to move into the swifterror register before the
956   // tail call.
957   if (any_of(CallerF.args(), [](const Argument &A) {
958         return A.hasByValAttr() || A.hasInRegAttr() || A.hasSwiftErrorAttr();
959       })) {
960     LLVM_DEBUG(dbgs() << "... Cannot tail call from callers with byval, "
961                          "inreg, or swifterror arguments\n");
962     return false;
963   }
964 
965   // Externally-defined functions with weak linkage should not be
966   // tail-called on AArch64 when the OS does not support dynamic
967   // pre-emption of symbols, as the AAELF spec requires normal calls
968   // to undefined weak functions to be replaced with a NOP or jump to the
969   // next instruction. The behaviour of branch instructions in this
970   // situation (as used for tail calls) is implementation-defined, so we
971   // cannot rely on the linker replacing the tail call with a return.
972   if (Info.Callee.isGlobal()) {
973     const GlobalValue *GV = Info.Callee.getGlobal();
974     const Triple &TT = MF.getTarget().getTargetTriple();
975     if (GV->hasExternalWeakLinkage() &&
976         (!TT.isOSWindows() || TT.isOSBinFormatELF() ||
977          TT.isOSBinFormatMachO())) {
978       LLVM_DEBUG(dbgs() << "... Cannot tail call externally-defined function "
979                            "with weak linkage for this OS.\n");
980       return false;
981     }
982   }
983 
984   // If we have -tailcallopt, then we're done.
985   if (canGuaranteeTCO(CalleeCC, MF.getTarget().Options.GuaranteedTailCallOpt))
986     return CalleeCC == CallerF.getCallingConv();
987 
988   // We don't have -tailcallopt, so we're allowed to change the ABI (sibcall).
989   // Try to find cases where we can do that.
990 
991   // I want anyone implementing a new calling convention to think long and hard
992   // about this assert.
993   assert((!Info.IsVarArg || CalleeCC == CallingConv::C) &&
994          "Unexpected variadic calling convention");
995 
996   // Verify that the incoming and outgoing arguments from the callee are
997   // safe to tail call.
998   if (!doCallerAndCalleePassArgsTheSameWay(Info, MF, InArgs)) {
999     LLVM_DEBUG(
1000         dbgs()
1001         << "... Caller and callee have incompatible calling conventions.\n");
1002     return false;
1003   }
1004 
1005   if (!areCalleeOutgoingArgsTailCallable(Info, MF, OutArgs))
1006     return false;
1007 
1008   LLVM_DEBUG(
1009       dbgs() << "... Call is eligible for tail call optimization.\n");
1010   return true;
1011 }
1012 
getCallOpcode(const MachineFunction & CallerF,bool IsIndirect,bool IsTailCall)1013 static unsigned getCallOpcode(const MachineFunction &CallerF, bool IsIndirect,
1014                               bool IsTailCall) {
1015   if (!IsTailCall)
1016     return IsIndirect ? getBLRCallOpcode(CallerF) : (unsigned)AArch64::BL;
1017 
1018   if (!IsIndirect)
1019     return AArch64::TCRETURNdi;
1020 
1021   // When BTI is enabled, we need to use TCRETURNriBTI to make sure that we use
1022   // x16 or x17.
1023   if (CallerF.getInfo<AArch64FunctionInfo>()->branchTargetEnforcement())
1024     return AArch64::TCRETURNriBTI;
1025 
1026   return AArch64::TCRETURNri;
1027 }
1028 
1029 static const uint32_t *
getMaskForArgs(SmallVectorImpl<AArch64CallLowering::ArgInfo> & OutArgs,AArch64CallLowering::CallLoweringInfo & Info,const AArch64RegisterInfo & TRI,MachineFunction & MF)1030 getMaskForArgs(SmallVectorImpl<AArch64CallLowering::ArgInfo> &OutArgs,
1031                AArch64CallLowering::CallLoweringInfo &Info,
1032                const AArch64RegisterInfo &TRI, MachineFunction &MF) {
1033   const uint32_t *Mask;
1034   if (!OutArgs.empty() && OutArgs[0].Flags[0].isReturned()) {
1035     // For 'this' returns, use the X0-preserving mask if applicable
1036     Mask = TRI.getThisReturnPreservedMask(MF, Info.CallConv);
1037     if (!Mask) {
1038       OutArgs[0].Flags[0].setReturned(false);
1039       Mask = TRI.getCallPreservedMask(MF, Info.CallConv);
1040     }
1041   } else {
1042     Mask = TRI.getCallPreservedMask(MF, Info.CallConv);
1043   }
1044   return Mask;
1045 }
1046 
lowerTailCall(MachineIRBuilder & MIRBuilder,CallLoweringInfo & Info,SmallVectorImpl<ArgInfo> & OutArgs) const1047 bool AArch64CallLowering::lowerTailCall(
1048     MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info,
1049     SmallVectorImpl<ArgInfo> &OutArgs) const {
1050   MachineFunction &MF = MIRBuilder.getMF();
1051   const Function &F = MF.getFunction();
1052   MachineRegisterInfo &MRI = MF.getRegInfo();
1053   const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
1054   AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
1055 
1056   // True when we're tail calling, but without -tailcallopt.
1057   bool IsSibCall = !MF.getTarget().Options.GuaranteedTailCallOpt &&
1058                    Info.CallConv != CallingConv::Tail &&
1059                    Info.CallConv != CallingConv::SwiftTail;
1060 
1061   // TODO: Right now, regbankselect doesn't know how to handle the rtcGPR64
1062   // register class. Until we can do that, we should fall back here.
1063   if (MF.getInfo<AArch64FunctionInfo>()->branchTargetEnforcement()) {
1064     LLVM_DEBUG(
1065         dbgs() << "Cannot lower indirect tail calls with BTI enabled yet.\n");
1066     return false;
1067   }
1068 
1069   // Find out which ABI gets to decide where things go.
1070   CallingConv::ID CalleeCC = Info.CallConv;
1071   CCAssignFn *AssignFnFixed;
1072   CCAssignFn *AssignFnVarArg;
1073   std::tie(AssignFnFixed, AssignFnVarArg) = getAssignFnsForCC(CalleeCC, TLI);
1074 
1075   MachineInstrBuilder CallSeqStart;
1076   if (!IsSibCall)
1077     CallSeqStart = MIRBuilder.buildInstr(AArch64::ADJCALLSTACKDOWN);
1078 
1079   unsigned Opc = getCallOpcode(MF, Info.Callee.isReg(), true);
1080   auto MIB = MIRBuilder.buildInstrNoInsert(Opc);
1081   MIB.add(Info.Callee);
1082 
1083   // Byte offset for the tail call. When we are sibcalling, this will always
1084   // be 0.
1085   MIB.addImm(0);
1086 
1087   // Tell the call which registers are clobbered.
1088   const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
1089   auto TRI = Subtarget.getRegisterInfo();
1090   const uint32_t *Mask = TRI->getCallPreservedMask(MF, CalleeCC);
1091   if (Subtarget.hasCustomCallingConv())
1092     TRI->UpdateCustomCallPreservedMask(MF, &Mask);
1093   MIB.addRegMask(Mask);
1094 
1095   if (Info.CFIType)
1096     MIB->setCFIType(MF, Info.CFIType->getZExtValue());
1097 
1098   if (TRI->isAnyArgRegReserved(MF))
1099     TRI->emitReservedArgRegCallError(MF);
1100 
1101   // FPDiff is the byte offset of the call's argument area from the callee's.
1102   // Stores to callee stack arguments will be placed in FixedStackSlots offset
1103   // by this amount for a tail call. In a sibling call it must be 0 because the
1104   // caller will deallocate the entire stack and the callee still expects its
1105   // arguments to begin at SP+0.
1106   int FPDiff = 0;
1107 
1108   // This will be 0 for sibcalls, potentially nonzero for tail calls produced
1109   // by -tailcallopt. For sibcalls, the memory operands for the call are
1110   // already available in the caller's incoming argument space.
1111   unsigned NumBytes = 0;
1112   if (!IsSibCall) {
1113     // We aren't sibcalling, so we need to compute FPDiff. We need to do this
1114     // before handling assignments, because FPDiff must be known for memory
1115     // arguments.
1116     unsigned NumReusableBytes = FuncInfo->getBytesInStackArgArea();
1117     SmallVector<CCValAssign, 16> OutLocs;
1118     CCState OutInfo(CalleeCC, false, MF, OutLocs, F.getContext());
1119 
1120     AArch64OutgoingValueAssigner CalleeAssigner(AssignFnFixed, AssignFnVarArg,
1121                                                 Subtarget, /*IsReturn*/ false);
1122     if (!determineAssignments(CalleeAssigner, OutArgs, OutInfo))
1123       return false;
1124 
1125     // The callee will pop the argument stack as a tail call. Thus, we must
1126     // keep it 16-byte aligned.
1127     NumBytes = alignTo(OutInfo.getStackSize(), 16);
1128 
1129     // FPDiff will be negative if this tail call requires more space than we
1130     // would automatically have in our incoming argument space. Positive if we
1131     // actually shrink the stack.
1132     FPDiff = NumReusableBytes - NumBytes;
1133 
1134     // Update the required reserved area if this is the tail call requiring the
1135     // most argument stack space.
1136     if (FPDiff < 0 && FuncInfo->getTailCallReservedStack() < (unsigned)-FPDiff)
1137       FuncInfo->setTailCallReservedStack(-FPDiff);
1138 
1139     // The stack pointer must be 16-byte aligned at all times it's used for a
1140     // memory operation, which in practice means at *all* times and in
1141     // particular across call boundaries. Therefore our own arguments started at
1142     // a 16-byte aligned SP and the delta applied for the tail call should
1143     // satisfy the same constraint.
1144     assert(FPDiff % 16 == 0 && "unaligned stack on tail call");
1145   }
1146 
1147   const auto &Forwards = FuncInfo->getForwardedMustTailRegParms();
1148 
1149   AArch64OutgoingValueAssigner Assigner(AssignFnFixed, AssignFnVarArg,
1150                                         Subtarget, /*IsReturn*/ false);
1151 
1152   // Do the actual argument marshalling.
1153   OutgoingArgHandler Handler(MIRBuilder, MRI, MIB,
1154                              /*IsTailCall*/ true, FPDiff);
1155   if (!determineAndHandleAssignments(Handler, Assigner, OutArgs, MIRBuilder,
1156                                      CalleeCC, Info.IsVarArg))
1157     return false;
1158 
1159   Mask = getMaskForArgs(OutArgs, Info, *TRI, MF);
1160 
1161   if (Info.IsVarArg && Info.IsMustTailCall) {
1162     // Now we know what's being passed to the function. Add uses to the call for
1163     // the forwarded registers that we *aren't* passing as parameters. This will
1164     // preserve the copies we build earlier.
1165     for (const auto &F : Forwards) {
1166       Register ForwardedReg = F.PReg;
1167       // If the register is already passed, or aliases a register which is
1168       // already being passed, then skip it.
1169       if (any_of(MIB->uses(), [&ForwardedReg, &TRI](const MachineOperand &Use) {
1170             if (!Use.isReg())
1171               return false;
1172             return TRI->regsOverlap(Use.getReg(), ForwardedReg);
1173           }))
1174         continue;
1175 
1176       // We aren't passing it already, so we should add it to the call.
1177       MIRBuilder.buildCopy(ForwardedReg, Register(F.VReg));
1178       MIB.addReg(ForwardedReg, RegState::Implicit);
1179     }
1180   }
1181 
1182   // If we have -tailcallopt, we need to adjust the stack. We'll do the call
1183   // sequence start and end here.
1184   if (!IsSibCall) {
1185     MIB->getOperand(1).setImm(FPDiff);
1186     CallSeqStart.addImm(0).addImm(0);
1187     // End the call sequence *before* emitting the call. Normally, we would
1188     // tidy the frame up after the call. However, here, we've laid out the
1189     // parameters so that when SP is reset, they will be in the correct
1190     // location.
1191     MIRBuilder.buildInstr(AArch64::ADJCALLSTACKUP).addImm(0).addImm(0);
1192   }
1193 
1194   // Now we can add the actual call instruction to the correct basic block.
1195   MIRBuilder.insertInstr(MIB);
1196 
1197   // If Callee is a reg, since it is used by a target specific instruction,
1198   // it must have a register class matching the constraint of that instruction.
1199   if (MIB->getOperand(0).isReg())
1200     constrainOperandRegClass(MF, *TRI, MRI, *MF.getSubtarget().getInstrInfo(),
1201                              *MF.getSubtarget().getRegBankInfo(), *MIB,
1202                              MIB->getDesc(), MIB->getOperand(0), 0);
1203 
1204   MF.getFrameInfo().setHasTailCall();
1205   Info.LoweredTailCall = true;
1206   return true;
1207 }
1208 
lowerCall(MachineIRBuilder & MIRBuilder,CallLoweringInfo & Info) const1209 bool AArch64CallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
1210                                     CallLoweringInfo &Info) const {
1211   MachineFunction &MF = MIRBuilder.getMF();
1212   const Function &F = MF.getFunction();
1213   MachineRegisterInfo &MRI = MF.getRegInfo();
1214   auto &DL = F.getParent()->getDataLayout();
1215   const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
1216   const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
1217 
1218   // Arm64EC has extra requirements for varargs calls; bail out for now.
1219   //
1220   // Arm64EC has special mangling rules for calls; bail out on all calls for
1221   // now.
1222   if (Subtarget.isWindowsArm64EC())
1223     return false;
1224 
1225   // Arm64EC thunks have a special calling convention which is only implemented
1226   // in SelectionDAG; bail out for now.
1227   if (Info.CallConv == CallingConv::ARM64EC_Thunk_Native ||
1228       Info.CallConv == CallingConv::ARM64EC_Thunk_X64)
1229     return false;
1230 
1231   SmallVector<ArgInfo, 8> OutArgs;
1232   for (auto &OrigArg : Info.OrigArgs) {
1233     splitToValueTypes(OrigArg, OutArgs, DL, Info.CallConv);
1234     // AAPCS requires that we zero-extend i1 to 8 bits by the caller.
1235     auto &Flags = OrigArg.Flags[0];
1236     if (OrigArg.Ty->isIntegerTy(1) && !Flags.isSExt() && !Flags.isZExt()) {
1237       ArgInfo &OutArg = OutArgs.back();
1238       assert(OutArg.Regs.size() == 1 &&
1239              MRI.getType(OutArg.Regs[0]).getSizeInBits() == 1 &&
1240              "Unexpected registers used for i1 arg");
1241 
1242       // We cannot use a ZExt ArgInfo flag here, because it will
1243       // zero-extend the argument to i32 instead of just i8.
1244       OutArg.Regs[0] =
1245           MIRBuilder.buildZExt(LLT::scalar(8), OutArg.Regs[0]).getReg(0);
1246       LLVMContext &Ctx = MF.getFunction().getContext();
1247       OutArg.Ty = Type::getInt8Ty(Ctx);
1248     }
1249   }
1250 
1251   SmallVector<ArgInfo, 8> InArgs;
1252   if (!Info.OrigRet.Ty->isVoidTy())
1253     splitToValueTypes(Info.OrigRet, InArgs, DL, Info.CallConv);
1254 
1255   // If we can lower as a tail call, do that instead.
1256   bool CanTailCallOpt =
1257       isEligibleForTailCallOptimization(MIRBuilder, Info, InArgs, OutArgs);
1258 
1259   // We must emit a tail call if we have musttail.
1260   if (Info.IsMustTailCall && !CanTailCallOpt) {
1261     // There are types of incoming/outgoing arguments we can't handle yet, so
1262     // it doesn't make sense to actually die here like in ISelLowering. Instead,
1263     // fall back to SelectionDAG and let it try to handle this.
1264     LLVM_DEBUG(dbgs() << "Failed to lower musttail call as tail call\n");
1265     return false;
1266   }
1267 
1268   Info.IsTailCall = CanTailCallOpt;
1269   if (CanTailCallOpt)
1270     return lowerTailCall(MIRBuilder, Info, OutArgs);
1271 
1272   // Find out which ABI gets to decide where things go.
1273   CCAssignFn *AssignFnFixed;
1274   CCAssignFn *AssignFnVarArg;
1275   std::tie(AssignFnFixed, AssignFnVarArg) =
1276       getAssignFnsForCC(Info.CallConv, TLI);
1277 
1278   MachineInstrBuilder CallSeqStart;
1279   CallSeqStart = MIRBuilder.buildInstr(AArch64::ADJCALLSTACKDOWN);
1280 
1281   // Create a temporarily-floating call instruction so we can add the implicit
1282   // uses of arg registers.
1283 
1284   unsigned Opc = 0;
1285   // Calls with operand bundle "clang.arc.attachedcall" are special. They should
1286   // be expanded to the call, directly followed by a special marker sequence and
1287   // a call to an ObjC library function.
1288   if (Info.CB && objcarc::hasAttachedCallOpBundle(Info.CB))
1289     Opc = AArch64::BLR_RVMARKER;
1290   // A call to a returns twice function like setjmp must be followed by a bti
1291   // instruction.
1292   else if (Info.CB && Info.CB->hasFnAttr(Attribute::ReturnsTwice) &&
1293            !Subtarget.noBTIAtReturnTwice() &&
1294            MF.getInfo<AArch64FunctionInfo>()->branchTargetEnforcement())
1295     Opc = AArch64::BLR_BTI;
1296   else
1297     Opc = getCallOpcode(MF, Info.Callee.isReg(), false);
1298 
1299   auto MIB = MIRBuilder.buildInstrNoInsert(Opc);
1300   unsigned CalleeOpNo = 0;
1301 
1302   if (Opc == AArch64::BLR_RVMARKER) {
1303     // Add a target global address for the retainRV/claimRV runtime function
1304     // just before the call target.
1305     Function *ARCFn = *objcarc::getAttachedARCFunction(Info.CB);
1306     MIB.addGlobalAddress(ARCFn);
1307     ++CalleeOpNo;
1308   } else if (Info.CFIType) {
1309     MIB->setCFIType(MF, Info.CFIType->getZExtValue());
1310   }
1311 
1312   MIB.add(Info.Callee);
1313 
1314   // Tell the call which registers are clobbered.
1315   const uint32_t *Mask;
1316   const auto *TRI = Subtarget.getRegisterInfo();
1317 
1318   AArch64OutgoingValueAssigner Assigner(AssignFnFixed, AssignFnVarArg,
1319                                         Subtarget, /*IsReturn*/ false);
1320   // Do the actual argument marshalling.
1321   OutgoingArgHandler Handler(MIRBuilder, MRI, MIB, /*IsReturn*/ false);
1322   if (!determineAndHandleAssignments(Handler, Assigner, OutArgs, MIRBuilder,
1323                                      Info.CallConv, Info.IsVarArg))
1324     return false;
1325 
1326   Mask = getMaskForArgs(OutArgs, Info, *TRI, MF);
1327 
1328   if (MF.getSubtarget<AArch64Subtarget>().hasCustomCallingConv())
1329     TRI->UpdateCustomCallPreservedMask(MF, &Mask);
1330   MIB.addRegMask(Mask);
1331 
1332   if (TRI->isAnyArgRegReserved(MF))
1333     TRI->emitReservedArgRegCallError(MF);
1334 
1335   // Now we can add the actual call instruction to the correct basic block.
1336   MIRBuilder.insertInstr(MIB);
1337 
1338   uint64_t CalleePopBytes =
1339       doesCalleeRestoreStack(Info.CallConv,
1340                              MF.getTarget().Options.GuaranteedTailCallOpt)
1341           ? alignTo(Assigner.StackSize, 16)
1342           : 0;
1343 
1344   CallSeqStart.addImm(Assigner.StackSize).addImm(0);
1345   MIRBuilder.buildInstr(AArch64::ADJCALLSTACKUP)
1346       .addImm(Assigner.StackSize)
1347       .addImm(CalleePopBytes);
1348 
1349   // If Callee is a reg, since it is used by a target specific
1350   // instruction, it must have a register class matching the
1351   // constraint of that instruction.
1352   if (MIB->getOperand(CalleeOpNo).isReg())
1353     constrainOperandRegClass(MF, *TRI, MRI, *Subtarget.getInstrInfo(),
1354                              *Subtarget.getRegBankInfo(), *MIB, MIB->getDesc(),
1355                              MIB->getOperand(CalleeOpNo), CalleeOpNo);
1356 
1357   // Finally we can copy the returned value back into its virtual-register. In
1358   // symmetry with the arguments, the physical register must be an
1359   // implicit-define of the call instruction.
1360   if (Info.CanLowerReturn  && !Info.OrigRet.Ty->isVoidTy()) {
1361     CCAssignFn *RetAssignFn = TLI.CCAssignFnForReturn(Info.CallConv);
1362     CallReturnHandler Handler(MIRBuilder, MRI, MIB);
1363     bool UsingReturnedArg =
1364         !OutArgs.empty() && OutArgs[0].Flags[0].isReturned();
1365 
1366     AArch64OutgoingValueAssigner Assigner(RetAssignFn, RetAssignFn, Subtarget,
1367                                           /*IsReturn*/ false);
1368     ReturnedArgCallReturnHandler ReturnedArgHandler(MIRBuilder, MRI, MIB);
1369     if (!determineAndHandleAssignments(
1370             UsingReturnedArg ? ReturnedArgHandler : Handler, Assigner, InArgs,
1371             MIRBuilder, Info.CallConv, Info.IsVarArg,
1372             UsingReturnedArg ? ArrayRef(OutArgs[0].Regs) : std::nullopt))
1373       return false;
1374   }
1375 
1376   if (Info.SwiftErrorVReg) {
1377     MIB.addDef(AArch64::X21, RegState::Implicit);
1378     MIRBuilder.buildCopy(Info.SwiftErrorVReg, Register(AArch64::X21));
1379   }
1380 
1381   if (!Info.CanLowerReturn) {
1382     insertSRetLoads(MIRBuilder, Info.OrigRet.Ty, Info.OrigRet.Regs,
1383                     Info.DemoteRegister, Info.DemoteStackIndex);
1384   }
1385   return true;
1386 }
1387 
isTypeIsValidForThisReturn(EVT Ty) const1388 bool AArch64CallLowering::isTypeIsValidForThisReturn(EVT Ty) const {
1389   return Ty.getSizeInBits() == 64;
1390 }
1391