1 //===-- VEInstrInfo.cpp - VE Instruction Information ----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the VE implementation of the TargetInstrInfo class.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "VEInstrInfo.h"
14 #include "VE.h"
15 #include "VEMachineFunctionInfo.h"
16 #include "VESubtarget.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/CodeGen/MachineFrameInfo.h"
20 #include "llvm/CodeGen/MachineInstrBuilder.h"
21 #include "llvm/CodeGen/MachineMemOperand.h"
22 #include "llvm/CodeGen/MachineRegisterInfo.h"
23 #include "llvm/Support/CommandLine.h"
24 #include "llvm/Support/Debug.h"
25 #include "llvm/Support/ErrorHandling.h"
26 #include "llvm/Support/TargetRegistry.h"
27
28 #define DEBUG_TYPE "ve-instr-info"
29
30 using namespace llvm;
31
32 #define GET_INSTRINFO_CTOR_DTOR
33 #include "VEGenInstrInfo.inc"
34
35 // Pin the vtable to this file.
anchor()36 void VEInstrInfo::anchor() {}
37
VEInstrInfo(VESubtarget & ST)38 VEInstrInfo::VEInstrInfo(VESubtarget &ST)
39 : VEGenInstrInfo(VE::ADJCALLSTACKDOWN, VE::ADJCALLSTACKUP), RI() {}
40
IsIntegerCC(unsigned CC)41 static bool IsIntegerCC(unsigned CC) { return (CC < VECC::CC_AF); }
42
GetOppositeBranchCondition(VECC::CondCode CC)43 static VECC::CondCode GetOppositeBranchCondition(VECC::CondCode CC) {
44 switch (CC) {
45 case VECC::CC_IG:
46 return VECC::CC_ILE;
47 case VECC::CC_IL:
48 return VECC::CC_IGE;
49 case VECC::CC_INE:
50 return VECC::CC_IEQ;
51 case VECC::CC_IEQ:
52 return VECC::CC_INE;
53 case VECC::CC_IGE:
54 return VECC::CC_IL;
55 case VECC::CC_ILE:
56 return VECC::CC_IG;
57 case VECC::CC_AF:
58 return VECC::CC_AT;
59 case VECC::CC_G:
60 return VECC::CC_LENAN;
61 case VECC::CC_L:
62 return VECC::CC_GENAN;
63 case VECC::CC_NE:
64 return VECC::CC_EQNAN;
65 case VECC::CC_EQ:
66 return VECC::CC_NENAN;
67 case VECC::CC_GE:
68 return VECC::CC_LNAN;
69 case VECC::CC_LE:
70 return VECC::CC_GNAN;
71 case VECC::CC_NUM:
72 return VECC::CC_NAN;
73 case VECC::CC_NAN:
74 return VECC::CC_NUM;
75 case VECC::CC_GNAN:
76 return VECC::CC_LE;
77 case VECC::CC_LNAN:
78 return VECC::CC_GE;
79 case VECC::CC_NENAN:
80 return VECC::CC_EQ;
81 case VECC::CC_EQNAN:
82 return VECC::CC_NE;
83 case VECC::CC_GENAN:
84 return VECC::CC_L;
85 case VECC::CC_LENAN:
86 return VECC::CC_G;
87 case VECC::CC_AT:
88 return VECC::CC_AF;
89 case VECC::UNKNOWN:
90 return VECC::UNKNOWN;
91 }
92 llvm_unreachable("Invalid cond code");
93 }
94
95 // Treat a branch relative long always instruction as unconditional branch.
96 // For example, br.l.t and br.l.
isUncondBranchOpcode(int Opc)97 static bool isUncondBranchOpcode(int Opc) {
98 using namespace llvm::VE;
99
100 #define BRKIND(NAME) (Opc == NAME##a || Opc == NAME##a_nt || Opc == NAME##a_t)
101 // VE has other branch relative always instructions for word/double/float,
102 // but we use only long branches in our lower. So, sanity check it here.
103 assert(!BRKIND(BRCFW) && !BRKIND(BRCFD) && !BRKIND(BRCFS) &&
104 "Branch relative word/double/float always instructions should not be "
105 "used!");
106 return BRKIND(BRCFL);
107 #undef BRKIND
108 }
109
110 // Treat branch relative conditional as conditional branch instructions.
111 // For example, brgt.l.t and brle.s.nt.
isCondBranchOpcode(int Opc)112 static bool isCondBranchOpcode(int Opc) {
113 using namespace llvm::VE;
114
115 #define BRKIND(NAME) \
116 (Opc == NAME##rr || Opc == NAME##rr_nt || Opc == NAME##rr_t || \
117 Opc == NAME##ir || Opc == NAME##ir_nt || Opc == NAME##ir_t)
118 return BRKIND(BRCFL) || BRKIND(BRCFW) || BRKIND(BRCFD) || BRKIND(BRCFS);
119 #undef BRKIND
120 }
121
122 // Treat branch long always instructions as indirect branch.
123 // For example, b.l.t and b.l.
isIndirectBranchOpcode(int Opc)124 static bool isIndirectBranchOpcode(int Opc) {
125 using namespace llvm::VE;
126
127 #define BRKIND(NAME) \
128 (Opc == NAME##ari || Opc == NAME##ari_nt || Opc == NAME##ari_t)
129 // VE has other branch always instructions for word/double/float, but
130 // we use only long branches in our lower. So, sanity check it here.
131 assert(!BRKIND(BCFW) && !BRKIND(BCFD) && !BRKIND(BCFS) &&
132 "Branch word/double/float always instructions should not be used!");
133 return BRKIND(BCFL);
134 #undef BRKIND
135 }
136
parseCondBranch(MachineInstr * LastInst,MachineBasicBlock * & Target,SmallVectorImpl<MachineOperand> & Cond)137 static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target,
138 SmallVectorImpl<MachineOperand> &Cond) {
139 Cond.push_back(MachineOperand::CreateImm(LastInst->getOperand(0).getImm()));
140 Cond.push_back(LastInst->getOperand(1));
141 Cond.push_back(LastInst->getOperand(2));
142 Target = LastInst->getOperand(3).getMBB();
143 }
144
analyzeBranch(MachineBasicBlock & MBB,MachineBasicBlock * & TBB,MachineBasicBlock * & FBB,SmallVectorImpl<MachineOperand> & Cond,bool AllowModify) const145 bool VEInstrInfo::analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
146 MachineBasicBlock *&FBB,
147 SmallVectorImpl<MachineOperand> &Cond,
148 bool AllowModify) const {
149 MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
150 if (I == MBB.end())
151 return false;
152
153 if (!isUnpredicatedTerminator(*I))
154 return false;
155
156 // Get the last instruction in the block.
157 MachineInstr *LastInst = &*I;
158 unsigned LastOpc = LastInst->getOpcode();
159
160 // If there is only one terminator instruction, process it.
161 if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)) {
162 if (isUncondBranchOpcode(LastOpc)) {
163 TBB = LastInst->getOperand(0).getMBB();
164 return false;
165 }
166 if (isCondBranchOpcode(LastOpc)) {
167 // Block ends with fall-through condbranch.
168 parseCondBranch(LastInst, TBB, Cond);
169 return false;
170 }
171 return true; // Can't handle indirect branch.
172 }
173
174 // Get the instruction before it if it is a terminator.
175 MachineInstr *SecondLastInst = &*I;
176 unsigned SecondLastOpc = SecondLastInst->getOpcode();
177
178 // If AllowModify is true and the block ends with two or more unconditional
179 // branches, delete all but the first unconditional branch.
180 if (AllowModify && isUncondBranchOpcode(LastOpc)) {
181 while (isUncondBranchOpcode(SecondLastOpc)) {
182 LastInst->eraseFromParent();
183 LastInst = SecondLastInst;
184 LastOpc = LastInst->getOpcode();
185 if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)) {
186 // Return now the only terminator is an unconditional branch.
187 TBB = LastInst->getOperand(0).getMBB();
188 return false;
189 }
190 SecondLastInst = &*I;
191 SecondLastOpc = SecondLastInst->getOpcode();
192 }
193 }
194
195 // If there are three terminators, we don't know what sort of block this is.
196 if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(*--I))
197 return true;
198
199 // If the block ends with a B and a Bcc, handle it.
200 if (isCondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
201 parseCondBranch(SecondLastInst, TBB, Cond);
202 FBB = LastInst->getOperand(0).getMBB();
203 return false;
204 }
205
206 // If the block ends with two unconditional branches, handle it. The second
207 // one is not executed.
208 if (isUncondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
209 TBB = SecondLastInst->getOperand(0).getMBB();
210 return false;
211 }
212
213 // ...likewise if it ends with an indirect branch followed by an unconditional
214 // branch.
215 if (isIndirectBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
216 I = LastInst;
217 if (AllowModify)
218 I->eraseFromParent();
219 return true;
220 }
221
222 // Otherwise, can't handle this.
223 return true;
224 }
225
insertBranch(MachineBasicBlock & MBB,MachineBasicBlock * TBB,MachineBasicBlock * FBB,ArrayRef<MachineOperand> Cond,const DebugLoc & DL,int * BytesAdded) const226 unsigned VEInstrInfo::insertBranch(MachineBasicBlock &MBB,
227 MachineBasicBlock *TBB,
228 MachineBasicBlock *FBB,
229 ArrayRef<MachineOperand> Cond,
230 const DebugLoc &DL, int *BytesAdded) const {
231 assert(TBB && "insertBranch must not be told to insert a fallthrough");
232 assert((Cond.size() == 3 || Cond.size() == 0) &&
233 "VE branch conditions should have three component!");
234 assert(!BytesAdded && "code size not handled");
235 if (Cond.empty()) {
236 // Uncondition branch
237 assert(!FBB && "Unconditional branch with multiple successors!");
238 BuildMI(&MBB, DL, get(VE::BRCFLa_t))
239 .addMBB(TBB);
240 return 1;
241 }
242
243 // Conditional branch
244 // (BRCFir CC sy sz addr)
245 assert(Cond[0].isImm() && Cond[2].isReg() && "not implemented");
246
247 unsigned opc[2];
248 const TargetRegisterInfo *TRI = &getRegisterInfo();
249 MachineFunction *MF = MBB.getParent();
250 const MachineRegisterInfo &MRI = MF->getRegInfo();
251 unsigned Reg = Cond[2].getReg();
252 if (IsIntegerCC(Cond[0].getImm())) {
253 if (TRI->getRegSizeInBits(Reg, MRI) == 32) {
254 opc[0] = VE::BRCFWir;
255 opc[1] = VE::BRCFWrr;
256 } else {
257 opc[0] = VE::BRCFLir;
258 opc[1] = VE::BRCFLrr;
259 }
260 } else {
261 if (TRI->getRegSizeInBits(Reg, MRI) == 32) {
262 opc[0] = VE::BRCFSir;
263 opc[1] = VE::BRCFSrr;
264 } else {
265 opc[0] = VE::BRCFDir;
266 opc[1] = VE::BRCFDrr;
267 }
268 }
269 if (Cond[1].isImm()) {
270 BuildMI(&MBB, DL, get(opc[0]))
271 .add(Cond[0]) // condition code
272 .add(Cond[1]) // lhs
273 .add(Cond[2]) // rhs
274 .addMBB(TBB);
275 } else {
276 BuildMI(&MBB, DL, get(opc[1]))
277 .add(Cond[0])
278 .add(Cond[1])
279 .add(Cond[2])
280 .addMBB(TBB);
281 }
282
283 if (!FBB)
284 return 1;
285
286 BuildMI(&MBB, DL, get(VE::BRCFLa_t))
287 .addMBB(FBB);
288 return 2;
289 }
290
removeBranch(MachineBasicBlock & MBB,int * BytesRemoved) const291 unsigned VEInstrInfo::removeBranch(MachineBasicBlock &MBB,
292 int *BytesRemoved) const {
293 assert(!BytesRemoved && "code size not handled");
294
295 MachineBasicBlock::iterator I = MBB.end();
296 unsigned Count = 0;
297 while (I != MBB.begin()) {
298 --I;
299
300 if (I->isDebugValue())
301 continue;
302
303 if (!isUncondBranchOpcode(I->getOpcode()) &&
304 !isCondBranchOpcode(I->getOpcode()))
305 break; // Not a branch
306
307 I->eraseFromParent();
308 I = MBB.end();
309 ++Count;
310 }
311 return Count;
312 }
313
reverseBranchCondition(SmallVectorImpl<MachineOperand> & Cond) const314 bool VEInstrInfo::reverseBranchCondition(
315 SmallVectorImpl<MachineOperand> &Cond) const {
316 VECC::CondCode CC = static_cast<VECC::CondCode>(Cond[0].getImm());
317 Cond[0].setImm(GetOppositeBranchCondition(CC));
318 return false;
319 }
320
IsAliasOfSX(Register Reg)321 static bool IsAliasOfSX(Register Reg) {
322 return VE::I32RegClass.contains(Reg) || VE::I64RegClass.contains(Reg) ||
323 VE::F32RegClass.contains(Reg);
324 }
325
copyPhysSubRegs(MachineBasicBlock & MBB,MachineBasicBlock::iterator I,const DebugLoc & DL,MCRegister DestReg,MCRegister SrcReg,bool KillSrc,const MCInstrDesc & MCID,unsigned int NumSubRegs,const unsigned * SubRegIdx,const TargetRegisterInfo * TRI)326 static void copyPhysSubRegs(MachineBasicBlock &MBB,
327 MachineBasicBlock::iterator I, const DebugLoc &DL,
328 MCRegister DestReg, MCRegister SrcReg, bool KillSrc,
329 const MCInstrDesc &MCID, unsigned int NumSubRegs,
330 const unsigned *SubRegIdx,
331 const TargetRegisterInfo *TRI) {
332 MachineInstr *MovMI = nullptr;
333
334 for (unsigned Idx = 0; Idx != NumSubRegs; ++Idx) {
335 Register SubDest = TRI->getSubReg(DestReg, SubRegIdx[Idx]);
336 Register SubSrc = TRI->getSubReg(SrcReg, SubRegIdx[Idx]);
337 assert(SubDest && SubSrc && "Bad sub-register");
338
339 if (MCID.getOpcode() == VE::ORri) {
340 // generate "ORri, dest, src, 0" instruction.
341 MachineInstrBuilder MIB =
342 BuildMI(MBB, I, DL, MCID, SubDest).addReg(SubSrc).addImm(0);
343 MovMI = MIB.getInstr();
344 } else if (MCID.getOpcode() == VE::ANDMmm) {
345 // generate "ANDM, dest, vm0, src" instruction.
346 MachineInstrBuilder MIB =
347 BuildMI(MBB, I, DL, MCID, SubDest).addReg(VE::VM0).addReg(SubSrc);
348 MovMI = MIB.getInstr();
349 } else {
350 llvm_unreachable("Unexpected reg-to-reg copy instruction");
351 }
352 }
353 // Add implicit super-register defs and kills to the last MovMI.
354 MovMI->addRegisterDefined(DestReg, TRI);
355 if (KillSrc)
356 MovMI->addRegisterKilled(SrcReg, TRI, true);
357 }
358
copyPhysReg(MachineBasicBlock & MBB,MachineBasicBlock::iterator I,const DebugLoc & DL,MCRegister DestReg,MCRegister SrcReg,bool KillSrc) const359 void VEInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
360 MachineBasicBlock::iterator I, const DebugLoc &DL,
361 MCRegister DestReg, MCRegister SrcReg,
362 bool KillSrc) const {
363
364 if (IsAliasOfSX(SrcReg) && IsAliasOfSX(DestReg)) {
365 BuildMI(MBB, I, DL, get(VE::ORri), DestReg)
366 .addReg(SrcReg, getKillRegState(KillSrc))
367 .addImm(0);
368 } else if (VE::V64RegClass.contains(DestReg, SrcReg)) {
369 // Generate following instructions
370 // %sw16 = LEA32zii 256
371 // VORmvl %dest, (0)1, %src, %sw16
372 // TODO: reuse a register if vl is already assigned to a register
373 // FIXME: it would be better to scavenge a register here instead of
374 // reserving SX16 all of the time.
375 const TargetRegisterInfo *TRI = &getRegisterInfo();
376 Register TmpReg = VE::SX16;
377 Register SubTmp = TRI->getSubReg(TmpReg, VE::sub_i32);
378 BuildMI(MBB, I, DL, get(VE::LEAzii), TmpReg)
379 .addImm(0)
380 .addImm(0)
381 .addImm(256);
382 MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(VE::VORmvl), DestReg)
383 .addImm(M1(0)) // Represent (0)1.
384 .addReg(SrcReg, getKillRegState(KillSrc))
385 .addReg(SubTmp, getKillRegState(true));
386 MIB.getInstr()->addRegisterKilled(TmpReg, TRI, true);
387 } else if (VE::VMRegClass.contains(DestReg, SrcReg)) {
388 BuildMI(MBB, I, DL, get(VE::ANDMmm), DestReg)
389 .addReg(VE::VM0)
390 .addReg(SrcReg, getKillRegState(KillSrc));
391 } else if (VE::VM512RegClass.contains(DestReg, SrcReg)) {
392 // Use two instructions.
393 const unsigned SubRegIdx[] = {VE::sub_vm_even, VE::sub_vm_odd};
394 unsigned int NumSubRegs = 2;
395 copyPhysSubRegs(MBB, I, DL, DestReg, SrcReg, KillSrc, get(VE::ANDMmm),
396 NumSubRegs, SubRegIdx, &getRegisterInfo());
397 } else if (VE::F128RegClass.contains(DestReg, SrcReg)) {
398 // Use two instructions.
399 const unsigned SubRegIdx[] = {VE::sub_even, VE::sub_odd};
400 unsigned int NumSubRegs = 2;
401 copyPhysSubRegs(MBB, I, DL, DestReg, SrcReg, KillSrc, get(VE::ORri),
402 NumSubRegs, SubRegIdx, &getRegisterInfo());
403 } else {
404 const TargetRegisterInfo *TRI = &getRegisterInfo();
405 dbgs() << "Impossible reg-to-reg copy from " << printReg(SrcReg, TRI)
406 << " to " << printReg(DestReg, TRI) << "\n";
407 llvm_unreachable("Impossible reg-to-reg copy");
408 }
409 }
410
411 /// isLoadFromStackSlot - If the specified machine instruction is a direct
412 /// load from a stack slot, return the virtual or physical register number of
413 /// the destination along with the FrameIndex of the loaded stack slot. If
414 /// not, return 0. This predicate must return 0 if the instruction has
415 /// any side effects other than loading from the stack slot.
isLoadFromStackSlot(const MachineInstr & MI,int & FrameIndex) const416 unsigned VEInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
417 int &FrameIndex) const {
418 if (MI.getOpcode() == VE::LDrii || // I64
419 MI.getOpcode() == VE::LDLSXrii || // I32
420 MI.getOpcode() == VE::LDUrii || // F32
421 MI.getOpcode() == VE::LDQrii // F128 (pseudo)
422 ) {
423 if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
424 MI.getOperand(2).getImm() == 0 && MI.getOperand(3).isImm() &&
425 MI.getOperand(3).getImm() == 0) {
426 FrameIndex = MI.getOperand(1).getIndex();
427 return MI.getOperand(0).getReg();
428 }
429 }
430 return 0;
431 }
432
433 /// isStoreToStackSlot - If the specified machine instruction is a direct
434 /// store to a stack slot, return the virtual or physical register number of
435 /// the source reg along with the FrameIndex of the loaded stack slot. If
436 /// not, return 0. This predicate must return 0 if the instruction has
437 /// any side effects other than storing to the stack slot.
isStoreToStackSlot(const MachineInstr & MI,int & FrameIndex) const438 unsigned VEInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
439 int &FrameIndex) const {
440 if (MI.getOpcode() == VE::STrii || // I64
441 MI.getOpcode() == VE::STLrii || // I32
442 MI.getOpcode() == VE::STUrii || // F32
443 MI.getOpcode() == VE::STQrii // F128 (pseudo)
444 ) {
445 if (MI.getOperand(0).isFI() && MI.getOperand(1).isImm() &&
446 MI.getOperand(1).getImm() == 0 && MI.getOperand(2).isImm() &&
447 MI.getOperand(2).getImm() == 0) {
448 FrameIndex = MI.getOperand(0).getIndex();
449 return MI.getOperand(3).getReg();
450 }
451 }
452 return 0;
453 }
454
storeRegToStackSlot(MachineBasicBlock & MBB,MachineBasicBlock::iterator I,Register SrcReg,bool isKill,int FI,const TargetRegisterClass * RC,const TargetRegisterInfo * TRI) const455 void VEInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
456 MachineBasicBlock::iterator I,
457 Register SrcReg, bool isKill, int FI,
458 const TargetRegisterClass *RC,
459 const TargetRegisterInfo *TRI) const {
460 DebugLoc DL;
461 if (I != MBB.end())
462 DL = I->getDebugLoc();
463
464 MachineFunction *MF = MBB.getParent();
465 const MachineFrameInfo &MFI = MF->getFrameInfo();
466 MachineMemOperand *MMO = MF->getMachineMemOperand(
467 MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOStore,
468 MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
469
470 // On the order of operands here: think "[FrameIdx + 0] = SrcReg".
471 if (RC == &VE::I64RegClass) {
472 BuildMI(MBB, I, DL, get(VE::STrii))
473 .addFrameIndex(FI)
474 .addImm(0)
475 .addImm(0)
476 .addReg(SrcReg, getKillRegState(isKill))
477 .addMemOperand(MMO);
478 } else if (RC == &VE::I32RegClass) {
479 BuildMI(MBB, I, DL, get(VE::STLrii))
480 .addFrameIndex(FI)
481 .addImm(0)
482 .addImm(0)
483 .addReg(SrcReg, getKillRegState(isKill))
484 .addMemOperand(MMO);
485 } else if (RC == &VE::F32RegClass) {
486 BuildMI(MBB, I, DL, get(VE::STUrii))
487 .addFrameIndex(FI)
488 .addImm(0)
489 .addImm(0)
490 .addReg(SrcReg, getKillRegState(isKill))
491 .addMemOperand(MMO);
492 } else if (VE::F128RegClass.hasSubClassEq(RC)) {
493 BuildMI(MBB, I, DL, get(VE::STQrii))
494 .addFrameIndex(FI)
495 .addImm(0)
496 .addImm(0)
497 .addReg(SrcReg, getKillRegState(isKill))
498 .addMemOperand(MMO);
499 } else
500 report_fatal_error("Can't store this register to stack slot");
501 }
502
loadRegFromStackSlot(MachineBasicBlock & MBB,MachineBasicBlock::iterator I,Register DestReg,int FI,const TargetRegisterClass * RC,const TargetRegisterInfo * TRI) const503 void VEInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
504 MachineBasicBlock::iterator I,
505 Register DestReg, int FI,
506 const TargetRegisterClass *RC,
507 const TargetRegisterInfo *TRI) const {
508 DebugLoc DL;
509 if (I != MBB.end())
510 DL = I->getDebugLoc();
511
512 MachineFunction *MF = MBB.getParent();
513 const MachineFrameInfo &MFI = MF->getFrameInfo();
514 MachineMemOperand *MMO = MF->getMachineMemOperand(
515 MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOLoad,
516 MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
517
518 if (RC == &VE::I64RegClass) {
519 BuildMI(MBB, I, DL, get(VE::LDrii), DestReg)
520 .addFrameIndex(FI)
521 .addImm(0)
522 .addImm(0)
523 .addMemOperand(MMO);
524 } else if (RC == &VE::I32RegClass) {
525 BuildMI(MBB, I, DL, get(VE::LDLSXrii), DestReg)
526 .addFrameIndex(FI)
527 .addImm(0)
528 .addImm(0)
529 .addMemOperand(MMO);
530 } else if (RC == &VE::F32RegClass) {
531 BuildMI(MBB, I, DL, get(VE::LDUrii), DestReg)
532 .addFrameIndex(FI)
533 .addImm(0)
534 .addImm(0)
535 .addMemOperand(MMO);
536 } else if (VE::F128RegClass.hasSubClassEq(RC)) {
537 BuildMI(MBB, I, DL, get(VE::LDQrii), DestReg)
538 .addFrameIndex(FI)
539 .addImm(0)
540 .addImm(0)
541 .addMemOperand(MMO);
542 } else
543 report_fatal_error("Can't load this register from stack slot");
544 }
545
FoldImmediate(MachineInstr & UseMI,MachineInstr & DefMI,Register Reg,MachineRegisterInfo * MRI) const546 bool VEInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
547 Register Reg, MachineRegisterInfo *MRI) const {
548 LLVM_DEBUG(dbgs() << "FoldImmediate\n");
549
550 LLVM_DEBUG(dbgs() << "checking DefMI\n");
551 int64_t ImmVal;
552 switch (DefMI.getOpcode()) {
553 default:
554 return false;
555 case VE::ORim:
556 // General move small immediate instruction on VE.
557 LLVM_DEBUG(dbgs() << "checking ORim\n");
558 LLVM_DEBUG(DefMI.dump());
559 // FIXME: We may need to support FPImm too.
560 assert(DefMI.getOperand(1).isImm());
561 assert(DefMI.getOperand(2).isImm());
562 ImmVal =
563 DefMI.getOperand(1).getImm() + mimm2Val(DefMI.getOperand(2).getImm());
564 LLVM_DEBUG(dbgs() << "ImmVal is " << ImmVal << "\n");
565 break;
566 case VE::LEAzii:
567 // General move immediate instruction on VE.
568 LLVM_DEBUG(dbgs() << "checking LEAzii\n");
569 LLVM_DEBUG(DefMI.dump());
570 // FIXME: We may need to support FPImm too.
571 assert(DefMI.getOperand(2).isImm());
572 if (!DefMI.getOperand(3).isImm())
573 // LEAzii may refer label
574 return false;
575 ImmVal = DefMI.getOperand(2).getImm() + DefMI.getOperand(3).getImm();
576 LLVM_DEBUG(dbgs() << "ImmVal is " << ImmVal << "\n");
577 break;
578 }
579
580 // Try to fold like below:
581 // %1:i64 = ORim 0, 0(1)
582 // %2:i64 = CMPSLrr %0, %1
583 // To
584 // %2:i64 = CMPSLrm %0, 0(1)
585 //
586 // Another example:
587 // %1:i64 = ORim 6, 0(1)
588 // %2:i64 = CMPSLrr %1, %0
589 // To
590 // %2:i64 = CMPSLir 6, %0
591 //
592 // Support commutable instructions like below:
593 // %1:i64 = ORim 6, 0(1)
594 // %2:i64 = ADDSLrr %1, %0
595 // To
596 // %2:i64 = ADDSLri %0, 6
597 //
598 // FIXME: Need to support i32. Current implementtation requires
599 // EXTRACT_SUBREG, so input has following COPY and it avoids folding:
600 // %1:i64 = ORim 6, 0(1)
601 // %2:i32 = COPY %1.sub_i32
602 // %3:i32 = ADDSWSXrr %0, %2
603 // FIXME: Need to support shift, cmov, and more instructions.
604 // FIXME: Need to support lvl too, but LVLGen runs after peephole-opt.
605
606 LLVM_DEBUG(dbgs() << "checking UseMI\n");
607 LLVM_DEBUG(UseMI.dump());
608 unsigned NewUseOpcSImm7;
609 unsigned NewUseOpcMImm;
610 enum InstType {
611 rr2ri_rm, // rr -> ri or rm, commutable
612 rr2ir_rm, // rr -> ir or rm
613 } InstType;
614
615 using namespace llvm::VE;
616 #define INSTRKIND(NAME) \
617 case NAME##rr: \
618 NewUseOpcSImm7 = NAME##ri; \
619 NewUseOpcMImm = NAME##rm; \
620 InstType = rr2ri_rm; \
621 break
622 #define NCINSTRKIND(NAME) \
623 case NAME##rr: \
624 NewUseOpcSImm7 = NAME##ir; \
625 NewUseOpcMImm = NAME##rm; \
626 InstType = rr2ir_rm; \
627 break
628
629 switch (UseMI.getOpcode()) {
630 default:
631 return false;
632
633 INSTRKIND(ADDUL);
634 INSTRKIND(ADDSWSX);
635 INSTRKIND(ADDSWZX);
636 INSTRKIND(ADDSL);
637 NCINSTRKIND(SUBUL);
638 NCINSTRKIND(SUBSWSX);
639 NCINSTRKIND(SUBSWZX);
640 NCINSTRKIND(SUBSL);
641 INSTRKIND(MULUL);
642 INSTRKIND(MULSWSX);
643 INSTRKIND(MULSWZX);
644 INSTRKIND(MULSL);
645 NCINSTRKIND(DIVUL);
646 NCINSTRKIND(DIVSWSX);
647 NCINSTRKIND(DIVSWZX);
648 NCINSTRKIND(DIVSL);
649 NCINSTRKIND(CMPUL);
650 NCINSTRKIND(CMPSWSX);
651 NCINSTRKIND(CMPSWZX);
652 NCINSTRKIND(CMPSL);
653 INSTRKIND(MAXSWSX);
654 INSTRKIND(MAXSWZX);
655 INSTRKIND(MAXSL);
656 INSTRKIND(MINSWSX);
657 INSTRKIND(MINSWZX);
658 INSTRKIND(MINSL);
659 INSTRKIND(AND);
660 INSTRKIND(OR);
661 INSTRKIND(XOR);
662 INSTRKIND(EQV);
663 NCINSTRKIND(NND);
664 NCINSTRKIND(MRG);
665 }
666
667 #undef INSTRKIND
668
669 unsigned NewUseOpc;
670 unsigned UseIdx;
671 bool Commute = false;
672 LLVM_DEBUG(dbgs() << "checking UseMI operands\n");
673 switch (InstType) {
674 case rr2ri_rm:
675 UseIdx = 2;
676 if (UseMI.getOperand(1).getReg() == Reg) {
677 Commute = true;
678 } else {
679 assert(UseMI.getOperand(2).getReg() == Reg);
680 }
681 if (isInt<7>(ImmVal)) {
682 // This ImmVal matches to SImm7 slot, so change UseOpc to an instruction
683 // holds a simm7 slot.
684 NewUseOpc = NewUseOpcSImm7;
685 } else if (isMImmVal(ImmVal)) {
686 // Similarly, change UseOpc to an instruction holds a mimm slot.
687 NewUseOpc = NewUseOpcMImm;
688 ImmVal = val2MImm(ImmVal);
689 } else
690 return false;
691 break;
692 case rr2ir_rm:
693 if (UseMI.getOperand(1).getReg() == Reg) {
694 // Check immediate value whether it matchs to the UseMI instruction.
695 if (!isInt<7>(ImmVal))
696 return false;
697 NewUseOpc = NewUseOpcSImm7;
698 UseIdx = 1;
699 } else {
700 assert(UseMI.getOperand(2).getReg() == Reg);
701 // Check immediate value whether it matchs to the UseMI instruction.
702 if (!isMImmVal(ImmVal))
703 return false;
704 NewUseOpc = NewUseOpcMImm;
705 ImmVal = val2MImm(ImmVal);
706 UseIdx = 2;
707 }
708 break;
709 }
710
711 LLVM_DEBUG(dbgs() << "modifying UseMI\n");
712 bool DeleteDef = MRI->hasOneNonDBGUse(Reg);
713 UseMI.setDesc(get(NewUseOpc));
714 if (Commute) {
715 UseMI.getOperand(1).setReg(UseMI.getOperand(UseIdx).getReg());
716 }
717 UseMI.getOperand(UseIdx).ChangeToImmediate(ImmVal);
718 if (DeleteDef)
719 DefMI.eraseFromParent();
720
721 return true;
722 }
723
getGlobalBaseReg(MachineFunction * MF) const724 Register VEInstrInfo::getGlobalBaseReg(MachineFunction *MF) const {
725 VEMachineFunctionInfo *VEFI = MF->getInfo<VEMachineFunctionInfo>();
726 Register GlobalBaseReg = VEFI->getGlobalBaseReg();
727 if (GlobalBaseReg != 0)
728 return GlobalBaseReg;
729
730 // We use %s15 (%got) as a global base register
731 GlobalBaseReg = VE::SX15;
732
733 // Insert a pseudo instruction to set the GlobalBaseReg into the first
734 // MBB of the function
735 MachineBasicBlock &FirstMBB = MF->front();
736 MachineBasicBlock::iterator MBBI = FirstMBB.begin();
737 DebugLoc dl;
738 BuildMI(FirstMBB, MBBI, dl, get(VE::GETGOT), GlobalBaseReg);
739 VEFI->setGlobalBaseReg(GlobalBaseReg);
740 return GlobalBaseReg;
741 }
742
getVM512Upper(Register reg)743 static Register getVM512Upper(Register reg) {
744 return (reg - VE::VMP0) * 2 + VE::VM0;
745 }
746
getVM512Lower(Register reg)747 static Register getVM512Lower(Register reg) { return getVM512Upper(reg) + 1; }
748
749 // Expand pseudo logical vector instructions for VM512 registers.
expandPseudoLogM(MachineInstr & MI,const MCInstrDesc & MCID)750 static void expandPseudoLogM(MachineInstr &MI, const MCInstrDesc &MCID) {
751 MachineBasicBlock *MBB = MI.getParent();
752 DebugLoc DL = MI.getDebugLoc();
753
754 Register VMXu = getVM512Upper(MI.getOperand(0).getReg());
755 Register VMXl = getVM512Lower(MI.getOperand(0).getReg());
756 Register VMYu = getVM512Upper(MI.getOperand(1).getReg());
757 Register VMYl = getVM512Lower(MI.getOperand(1).getReg());
758
759 switch (MI.getOpcode()) {
760 default: {
761 Register VMZu = getVM512Upper(MI.getOperand(2).getReg());
762 Register VMZl = getVM512Lower(MI.getOperand(2).getReg());
763 BuildMI(*MBB, MI, DL, MCID).addDef(VMXu).addUse(VMYu).addUse(VMZu);
764 BuildMI(*MBB, MI, DL, MCID).addDef(VMXl).addUse(VMYl).addUse(VMZl);
765 break;
766 }
767 case VE::NEGMy:
768 BuildMI(*MBB, MI, DL, MCID).addDef(VMXu).addUse(VMYu);
769 BuildMI(*MBB, MI, DL, MCID).addDef(VMXl).addUse(VMYl);
770 break;
771 }
772 MI.eraseFromParent();
773 }
774
addOperandsForVFMK(MachineInstrBuilder & MIB,MachineInstr & MI,bool Upper)775 static void addOperandsForVFMK(MachineInstrBuilder &MIB, MachineInstr &MI,
776 bool Upper) {
777 // VM512
778 MIB.addReg(Upper ? getVM512Upper(MI.getOperand(0).getReg())
779 : getVM512Lower(MI.getOperand(0).getReg()));
780
781 switch (MI.getNumExplicitOperands()) {
782 default:
783 report_fatal_error("unexpected number of operands for pvfmk");
784 case 2: // _Ml: VM512, VL
785 // VL
786 MIB.addReg(MI.getOperand(1).getReg());
787 break;
788 case 4: // _Mvl: VM512, CC, VR, VL
789 // CC
790 MIB.addImm(MI.getOperand(1).getImm());
791 // VR
792 MIB.addReg(MI.getOperand(2).getReg());
793 // VL
794 MIB.addReg(MI.getOperand(3).getReg());
795 break;
796 case 5: // _MvMl: VM512, CC, VR, VM512, VL
797 // CC
798 MIB.addImm(MI.getOperand(1).getImm());
799 // VR
800 MIB.addReg(MI.getOperand(2).getReg());
801 // VM512
802 MIB.addReg(Upper ? getVM512Upper(MI.getOperand(3).getReg())
803 : getVM512Lower(MI.getOperand(3).getReg()));
804 // VL
805 MIB.addReg(MI.getOperand(4).getReg());
806 break;
807 }
808 }
809
expandPseudoVFMK(const TargetInstrInfo & TI,MachineInstr & MI)810 static void expandPseudoVFMK(const TargetInstrInfo &TI, MachineInstr &MI) {
811 // replace to pvfmk.w.up and pvfmk.w.lo
812 // replace to pvfmk.s.up and pvfmk.s.lo
813
814 static std::map<unsigned, std::pair<unsigned, unsigned>> VFMKMap = {
815 {VE::VFMKyal, {VE::VFMKLal, VE::VFMKLal}},
816 {VE::VFMKynal, {VE::VFMKLnal, VE::VFMKLnal}},
817 {VE::VFMKWyvl, {VE::PVFMKWUPvl, VE::PVFMKWLOvl}},
818 {VE::VFMKWyvyl, {VE::PVFMKWUPvml, VE::PVFMKWLOvml}},
819 {VE::VFMKSyvl, {VE::PVFMKSUPvl, VE::PVFMKSLOvl}},
820 {VE::VFMKSyvyl, {VE::PVFMKSUPvml, VE::PVFMKSLOvml}},
821 };
822
823 unsigned Opcode = MI.getOpcode();
824
825 auto Found = VFMKMap.find(Opcode);
826 if (Found == VFMKMap.end())
827 report_fatal_error("unexpected opcode for pseudo vfmk");
828
829 unsigned OpcodeUpper = (*Found).second.first;
830 unsigned OpcodeLower = (*Found).second.second;
831
832 MachineBasicBlock *MBB = MI.getParent();
833 DebugLoc DL = MI.getDebugLoc();
834
835 MachineInstrBuilder Bu = BuildMI(*MBB, MI, DL, TI.get(OpcodeUpper));
836 addOperandsForVFMK(Bu, MI, /* Upper */ true);
837 MachineInstrBuilder Bl = BuildMI(*MBB, MI, DL, TI.get(OpcodeLower));
838 addOperandsForVFMK(Bl, MI, /* Upper */ false);
839
840 MI.eraseFromParent();
841 }
842
expandPostRAPseudo(MachineInstr & MI) const843 bool VEInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
844 switch (MI.getOpcode()) {
845 case VE::EXTEND_STACK: {
846 return expandExtendStackPseudo(MI);
847 }
848 case VE::EXTEND_STACK_GUARD: {
849 MI.eraseFromParent(); // The pseudo instruction is gone now.
850 return true;
851 }
852 case VE::GETSTACKTOP: {
853 return expandGetStackTopPseudo(MI);
854 }
855
856 case VE::ANDMyy:
857 expandPseudoLogM(MI, get(VE::ANDMmm));
858 return true;
859 case VE::ORMyy:
860 expandPseudoLogM(MI, get(VE::ORMmm));
861 return true;
862 case VE::XORMyy:
863 expandPseudoLogM(MI, get(VE::XORMmm));
864 return true;
865 case VE::EQVMyy:
866 expandPseudoLogM(MI, get(VE::EQVMmm));
867 return true;
868 case VE::NNDMyy:
869 expandPseudoLogM(MI, get(VE::NNDMmm));
870 return true;
871 case VE::NEGMy:
872 expandPseudoLogM(MI, get(VE::NEGMm));
873 return true;
874
875 case VE::LVMyir:
876 case VE::LVMyim:
877 case VE::LVMyir_y:
878 case VE::LVMyim_y: {
879 Register VMXu = getVM512Upper(MI.getOperand(0).getReg());
880 Register VMXl = getVM512Lower(MI.getOperand(0).getReg());
881 int64_t Imm = MI.getOperand(1).getImm();
882 bool IsSrcReg =
883 MI.getOpcode() == VE::LVMyir || MI.getOpcode() == VE::LVMyir_y;
884 Register Src = IsSrcReg ? MI.getOperand(2).getReg() : VE::NoRegister;
885 int64_t MImm = IsSrcReg ? 0 : MI.getOperand(2).getImm();
886 bool KillSrc = IsSrcReg ? MI.getOperand(2).isKill() : false;
887 Register VMX = VMXl;
888 if (Imm >= 4) {
889 VMX = VMXu;
890 Imm -= 4;
891 }
892 MachineBasicBlock *MBB = MI.getParent();
893 DebugLoc DL = MI.getDebugLoc();
894 switch (MI.getOpcode()) {
895 case VE::LVMyir:
896 BuildMI(*MBB, MI, DL, get(VE::LVMir))
897 .addDef(VMX)
898 .addImm(Imm)
899 .addReg(Src, getKillRegState(KillSrc));
900 break;
901 case VE::LVMyim:
902 BuildMI(*MBB, MI, DL, get(VE::LVMim))
903 .addDef(VMX)
904 .addImm(Imm)
905 .addImm(MImm);
906 break;
907 case VE::LVMyir_y:
908 assert(MI.getOperand(0).getReg() == MI.getOperand(3).getReg() &&
909 "LVMyir_y has different register in 3rd operand");
910 BuildMI(*MBB, MI, DL, get(VE::LVMir_m))
911 .addDef(VMX)
912 .addImm(Imm)
913 .addReg(Src, getKillRegState(KillSrc))
914 .addReg(VMX);
915 break;
916 case VE::LVMyim_y:
917 assert(MI.getOperand(0).getReg() == MI.getOperand(3).getReg() &&
918 "LVMyim_y has different register in 3rd operand");
919 BuildMI(*MBB, MI, DL, get(VE::LVMim_m))
920 .addDef(VMX)
921 .addImm(Imm)
922 .addImm(MImm)
923 .addReg(VMX);
924 break;
925 }
926 MI.eraseFromParent();
927 return true;
928 }
929 case VE::SVMyi: {
930 Register Dest = MI.getOperand(0).getReg();
931 Register VMZu = getVM512Upper(MI.getOperand(1).getReg());
932 Register VMZl = getVM512Lower(MI.getOperand(1).getReg());
933 bool KillSrc = MI.getOperand(1).isKill();
934 int64_t Imm = MI.getOperand(2).getImm();
935 Register VMZ = VMZl;
936 if (Imm >= 4) {
937 VMZ = VMZu;
938 Imm -= 4;
939 }
940 MachineBasicBlock *MBB = MI.getParent();
941 DebugLoc DL = MI.getDebugLoc();
942 MachineInstrBuilder MIB =
943 BuildMI(*MBB, MI, DL, get(VE::SVMmi), Dest).addReg(VMZ).addImm(Imm);
944 MachineInstr *Inst = MIB.getInstr();
945 MI.eraseFromParent();
946 if (KillSrc) {
947 const TargetRegisterInfo *TRI = &getRegisterInfo();
948 Inst->addRegisterKilled(MI.getOperand(1).getReg(), TRI, true);
949 }
950 return true;
951 }
952 case VE::VFMKyal:
953 case VE::VFMKynal:
954 case VE::VFMKWyvl:
955 case VE::VFMKWyvyl:
956 case VE::VFMKSyvl:
957 case VE::VFMKSyvyl:
958 expandPseudoVFMK(*this, MI);
959 }
960 return false;
961 }
962
expandExtendStackPseudo(MachineInstr & MI) const963 bool VEInstrInfo::expandExtendStackPseudo(MachineInstr &MI) const {
964 MachineBasicBlock &MBB = *MI.getParent();
965 MachineFunction &MF = *MBB.getParent();
966 const VESubtarget &STI = MF.getSubtarget<VESubtarget>();
967 const VEInstrInfo &TII = *STI.getInstrInfo();
968 DebugLoc dl = MBB.findDebugLoc(MI);
969
970 // Create following instructions and multiple basic blocks.
971 //
972 // thisBB:
973 // brge.l.t %sp, %sl, sinkBB
974 // syscallBB:
975 // ld %s61, 0x18(, %tp) // load param area
976 // or %s62, 0, %s0 // spill the value of %s0
977 // lea %s63, 0x13b // syscall # of grow
978 // shm.l %s63, 0x0(%s61) // store syscall # at addr:0
979 // shm.l %sl, 0x8(%s61) // store old limit at addr:8
980 // shm.l %sp, 0x10(%s61) // store new limit at addr:16
981 // monc // call monitor
982 // or %s0, 0, %s62 // restore the value of %s0
983 // sinkBB:
984
985 // Create new MBB
986 MachineBasicBlock *BB = &MBB;
987 const BasicBlock *LLVM_BB = BB->getBasicBlock();
988 MachineBasicBlock *syscallMBB = MF.CreateMachineBasicBlock(LLVM_BB);
989 MachineBasicBlock *sinkMBB = MF.CreateMachineBasicBlock(LLVM_BB);
990 MachineFunction::iterator It = ++(BB->getIterator());
991 MF.insert(It, syscallMBB);
992 MF.insert(It, sinkMBB);
993
994 // Transfer the remainder of BB and its successor edges to sinkMBB.
995 sinkMBB->splice(sinkMBB->begin(), BB,
996 std::next(std::next(MachineBasicBlock::iterator(MI))),
997 BB->end());
998 sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
999
1000 // Next, add the true and fallthrough blocks as its successors.
1001 BB->addSuccessor(syscallMBB);
1002 BB->addSuccessor(sinkMBB);
1003 BuildMI(BB, dl, TII.get(VE::BRCFLrr_t))
1004 .addImm(VECC::CC_IGE)
1005 .addReg(VE::SX11) // %sp
1006 .addReg(VE::SX8) // %sl
1007 .addMBB(sinkMBB);
1008
1009 BB = syscallMBB;
1010
1011 // Update machine-CFG edges
1012 BB->addSuccessor(sinkMBB);
1013
1014 BuildMI(BB, dl, TII.get(VE::LDrii), VE::SX61)
1015 .addReg(VE::SX14)
1016 .addImm(0)
1017 .addImm(0x18);
1018 BuildMI(BB, dl, TII.get(VE::ORri), VE::SX62)
1019 .addReg(VE::SX0)
1020 .addImm(0);
1021 BuildMI(BB, dl, TII.get(VE::LEAzii), VE::SX63)
1022 .addImm(0)
1023 .addImm(0)
1024 .addImm(0x13b);
1025 BuildMI(BB, dl, TII.get(VE::SHMLri))
1026 .addReg(VE::SX61)
1027 .addImm(0)
1028 .addReg(VE::SX63);
1029 BuildMI(BB, dl, TII.get(VE::SHMLri))
1030 .addReg(VE::SX61)
1031 .addImm(8)
1032 .addReg(VE::SX8);
1033 BuildMI(BB, dl, TII.get(VE::SHMLri))
1034 .addReg(VE::SX61)
1035 .addImm(16)
1036 .addReg(VE::SX11);
1037 BuildMI(BB, dl, TII.get(VE::MONC));
1038
1039 BuildMI(BB, dl, TII.get(VE::ORri), VE::SX0)
1040 .addReg(VE::SX62)
1041 .addImm(0);
1042
1043 MI.eraseFromParent(); // The pseudo instruction is gone now.
1044 return true;
1045 }
1046
expandGetStackTopPseudo(MachineInstr & MI) const1047 bool VEInstrInfo::expandGetStackTopPseudo(MachineInstr &MI) const {
1048 MachineBasicBlock *MBB = MI.getParent();
1049 MachineFunction &MF = *MBB->getParent();
1050 const VESubtarget &STI = MF.getSubtarget<VESubtarget>();
1051 const VEInstrInfo &TII = *STI.getInstrInfo();
1052 DebugLoc DL = MBB->findDebugLoc(MI);
1053
1054 // Create following instruction
1055 //
1056 // dst = %sp + target specific frame + the size of parameter area
1057
1058 const MachineFrameInfo &MFI = MF.getFrameInfo();
1059 const VEFrameLowering &TFL = *STI.getFrameLowering();
1060
1061 // The VE ABI requires a reserved area at the top of stack as described
1062 // in VEFrameLowering.cpp. So, we adjust it here.
1063 unsigned NumBytes = STI.getAdjustedFrameSize(0);
1064
1065 // Also adds the size of parameter area.
1066 if (MFI.adjustsStack() && TFL.hasReservedCallFrame(MF))
1067 NumBytes += MFI.getMaxCallFrameSize();
1068
1069 BuildMI(*MBB, MI, DL, TII.get(VE::LEArii))
1070 .addDef(MI.getOperand(0).getReg())
1071 .addReg(VE::SX11)
1072 .addImm(0)
1073 .addImm(NumBytes);
1074
1075 MI.eraseFromParent(); // The pseudo instruction is gone now.
1076 return true;
1077 }
1078