1 //===-- llvm/CodeGen/GlobalISel/MachineIRBuilder.cpp - MIBuilder--*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the MachineIRBuidler class.
10 //===----------------------------------------------------------------------===//
11 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
12 #include "llvm/Analysis/MemoryLocation.h"
13 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
14 #include "llvm/CodeGen/MachineFunction.h"
15 #include "llvm/CodeGen/MachineInstr.h"
16 #include "llvm/CodeGen/MachineInstrBuilder.h"
17 #include "llvm/CodeGen/MachineRegisterInfo.h"
18 #include "llvm/CodeGen/TargetInstrInfo.h"
19 #include "llvm/CodeGen/TargetLowering.h"
20 #include "llvm/CodeGen/TargetOpcodes.h"
21 #include "llvm/CodeGen/TargetSubtargetInfo.h"
22 #include "llvm/IR/DebugInfo.h"
23 
24 using namespace llvm;
25 
setMF(MachineFunction & MF)26 void MachineIRBuilder::setMF(MachineFunction &MF) {
27   State.MF = &MF;
28   State.MBB = nullptr;
29   State.MRI = &MF.getRegInfo();
30   State.TII = MF.getSubtarget().getInstrInfo();
31   State.DL = DebugLoc();
32   State.II = MachineBasicBlock::iterator();
33   State.Observer = nullptr;
34 }
35 
36 //------------------------------------------------------------------------------
37 // Build instruction variants.
38 //------------------------------------------------------------------------------
39 
buildInstrNoInsert(unsigned Opcode)40 MachineInstrBuilder MachineIRBuilder::buildInstrNoInsert(unsigned Opcode) {
41   MachineInstrBuilder MIB = BuildMI(getMF(), getDL(), getTII().get(Opcode));
42   return MIB;
43 }
44 
insertInstr(MachineInstrBuilder MIB)45 MachineInstrBuilder MachineIRBuilder::insertInstr(MachineInstrBuilder MIB) {
46   getMBB().insert(getInsertPt(), MIB);
47   recordInsertion(MIB);
48   return MIB;
49 }
50 
51 MachineInstrBuilder
buildDirectDbgValue(Register Reg,const MDNode * Variable,const MDNode * Expr)52 MachineIRBuilder::buildDirectDbgValue(Register Reg, const MDNode *Variable,
53                                       const MDNode *Expr) {
54   assert(isa<DILocalVariable>(Variable) && "not a variable");
55   assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
56   assert(
57       cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
58       "Expected inlined-at fields to agree");
59   return insertInstr(BuildMI(getMF(), getDL(),
60                              getTII().get(TargetOpcode::DBG_VALUE),
61                              /*IsIndirect*/ false, Reg, Variable, Expr));
62 }
63 
64 MachineInstrBuilder
buildIndirectDbgValue(Register Reg,const MDNode * Variable,const MDNode * Expr)65 MachineIRBuilder::buildIndirectDbgValue(Register Reg, const MDNode *Variable,
66                                         const MDNode *Expr) {
67   assert(isa<DILocalVariable>(Variable) && "not a variable");
68   assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
69   assert(
70       cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
71       "Expected inlined-at fields to agree");
72   return insertInstr(BuildMI(getMF(), getDL(),
73                              getTII().get(TargetOpcode::DBG_VALUE),
74                              /*IsIndirect*/ true, Reg, Variable, Expr));
75 }
76 
buildFIDbgValue(int FI,const MDNode * Variable,const MDNode * Expr)77 MachineInstrBuilder MachineIRBuilder::buildFIDbgValue(int FI,
78                                                       const MDNode *Variable,
79                                                       const MDNode *Expr) {
80   assert(isa<DILocalVariable>(Variable) && "not a variable");
81   assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
82   assert(
83       cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
84       "Expected inlined-at fields to agree");
85   return buildInstr(TargetOpcode::DBG_VALUE)
86       .addFrameIndex(FI)
87       .addImm(0)
88       .addMetadata(Variable)
89       .addMetadata(Expr);
90 }
91 
buildConstDbgValue(const Constant & C,const MDNode * Variable,const MDNode * Expr)92 MachineInstrBuilder MachineIRBuilder::buildConstDbgValue(const Constant &C,
93                                                          const MDNode *Variable,
94                                                          const MDNode *Expr) {
95   assert(isa<DILocalVariable>(Variable) && "not a variable");
96   assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
97   assert(
98       cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
99       "Expected inlined-at fields to agree");
100   auto MIB = buildInstrNoInsert(TargetOpcode::DBG_VALUE);
101   if (auto *CI = dyn_cast<ConstantInt>(&C)) {
102     if (CI->getBitWidth() > 64)
103       MIB.addCImm(CI);
104     else
105       MIB.addImm(CI->getZExtValue());
106   } else if (auto *CFP = dyn_cast<ConstantFP>(&C)) {
107     MIB.addFPImm(CFP);
108   } else {
109     // Insert $noreg if we didn't find a usable constant and had to drop it.
110     MIB.addReg(Register());
111   }
112 
113   MIB.addImm(0).addMetadata(Variable).addMetadata(Expr);
114   return insertInstr(MIB);
115 }
116 
buildDbgLabel(const MDNode * Label)117 MachineInstrBuilder MachineIRBuilder::buildDbgLabel(const MDNode *Label) {
118   assert(isa<DILabel>(Label) && "not a label");
119   assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(State.DL) &&
120          "Expected inlined-at fields to agree");
121   auto MIB = buildInstr(TargetOpcode::DBG_LABEL);
122 
123   return MIB.addMetadata(Label);
124 }
125 
buildDynStackAlloc(const DstOp & Res,const SrcOp & Size,Align Alignment)126 MachineInstrBuilder MachineIRBuilder::buildDynStackAlloc(const DstOp &Res,
127                                                          const SrcOp &Size,
128                                                          Align Alignment) {
129   assert(Res.getLLTTy(*getMRI()).isPointer() && "expected ptr dst type");
130   auto MIB = buildInstr(TargetOpcode::G_DYN_STACKALLOC);
131   Res.addDefToMIB(*getMRI(), MIB);
132   Size.addSrcToMIB(MIB);
133   MIB.addImm(Alignment.value());
134   return MIB;
135 }
136 
buildFrameIndex(const DstOp & Res,int Idx)137 MachineInstrBuilder MachineIRBuilder::buildFrameIndex(const DstOp &Res,
138                                                       int Idx) {
139   assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
140   auto MIB = buildInstr(TargetOpcode::G_FRAME_INDEX);
141   Res.addDefToMIB(*getMRI(), MIB);
142   MIB.addFrameIndex(Idx);
143   return MIB;
144 }
145 
buildGlobalValue(const DstOp & Res,const GlobalValue * GV)146 MachineInstrBuilder MachineIRBuilder::buildGlobalValue(const DstOp &Res,
147                                                        const GlobalValue *GV) {
148   assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
149   assert(Res.getLLTTy(*getMRI()).getAddressSpace() ==
150              GV->getType()->getAddressSpace() &&
151          "address space mismatch");
152 
153   auto MIB = buildInstr(TargetOpcode::G_GLOBAL_VALUE);
154   Res.addDefToMIB(*getMRI(), MIB);
155   MIB.addGlobalAddress(GV);
156   return MIB;
157 }
158 
buildJumpTable(const LLT PtrTy,unsigned JTI)159 MachineInstrBuilder MachineIRBuilder::buildJumpTable(const LLT PtrTy,
160                                                      unsigned JTI) {
161   return buildInstr(TargetOpcode::G_JUMP_TABLE, {PtrTy}, {})
162       .addJumpTableIndex(JTI);
163 }
164 
validateUnaryOp(const LLT Res,const LLT Op0)165 void MachineIRBuilder::validateUnaryOp(const LLT Res, const LLT Op0) {
166   assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
167   assert((Res == Op0) && "type mismatch");
168 }
169 
validateBinaryOp(const LLT Res,const LLT Op0,const LLT Op1)170 void MachineIRBuilder::validateBinaryOp(const LLT Res, const LLT Op0,
171                                         const LLT Op1) {
172   assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
173   assert((Res == Op0 && Res == Op1) && "type mismatch");
174 }
175 
validateShiftOp(const LLT Res,const LLT Op0,const LLT Op1)176 void MachineIRBuilder::validateShiftOp(const LLT Res, const LLT Op0,
177                                        const LLT Op1) {
178   assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
179   assert((Res == Op0) && "type mismatch");
180 }
181 
buildPtrAdd(const DstOp & Res,const SrcOp & Op0,const SrcOp & Op1)182 MachineInstrBuilder MachineIRBuilder::buildPtrAdd(const DstOp &Res,
183                                                   const SrcOp &Op0,
184                                                   const SrcOp &Op1) {
185   assert(Res.getLLTTy(*getMRI()).getScalarType().isPointer() &&
186          Res.getLLTTy(*getMRI()) == Op0.getLLTTy(*getMRI()) && "type mismatch");
187   assert(Op1.getLLTTy(*getMRI()).getScalarType().isScalar() && "invalid offset type");
188 
189   return buildInstr(TargetOpcode::G_PTR_ADD, {Res}, {Op0, Op1});
190 }
191 
192 Optional<MachineInstrBuilder>
materializePtrAdd(Register & Res,Register Op0,const LLT ValueTy,uint64_t Value)193 MachineIRBuilder::materializePtrAdd(Register &Res, Register Op0,
194                                     const LLT ValueTy, uint64_t Value) {
195   assert(Res == 0 && "Res is a result argument");
196   assert(ValueTy.isScalar()  && "invalid offset type");
197 
198   if (Value == 0) {
199     Res = Op0;
200     return None;
201   }
202 
203   Res = getMRI()->createGenericVirtualRegister(getMRI()->getType(Op0));
204   auto Cst = buildConstant(ValueTy, Value);
205   return buildPtrAdd(Res, Op0, Cst.getReg(0));
206 }
207 
buildMaskLowPtrBits(const DstOp & Res,const SrcOp & Op0,uint32_t NumBits)208 MachineInstrBuilder MachineIRBuilder::buildMaskLowPtrBits(const DstOp &Res,
209                                                           const SrcOp &Op0,
210                                                           uint32_t NumBits) {
211   LLT PtrTy = Res.getLLTTy(*getMRI());
212   LLT MaskTy = LLT::scalar(PtrTy.getSizeInBits());
213   Register MaskReg = getMRI()->createGenericVirtualRegister(MaskTy);
214   buildConstant(MaskReg, maskTrailingZeros<uint64_t>(NumBits));
215   return buildPtrMask(Res, Op0, MaskReg);
216 }
217 
buildBr(MachineBasicBlock & Dest)218 MachineInstrBuilder MachineIRBuilder::buildBr(MachineBasicBlock &Dest) {
219   return buildInstr(TargetOpcode::G_BR).addMBB(&Dest);
220 }
221 
buildBrIndirect(Register Tgt)222 MachineInstrBuilder MachineIRBuilder::buildBrIndirect(Register Tgt) {
223   assert(getMRI()->getType(Tgt).isPointer() && "invalid branch destination");
224   return buildInstr(TargetOpcode::G_BRINDIRECT).addUse(Tgt);
225 }
226 
buildBrJT(Register TablePtr,unsigned JTI,Register IndexReg)227 MachineInstrBuilder MachineIRBuilder::buildBrJT(Register TablePtr,
228                                                 unsigned JTI,
229                                                 Register IndexReg) {
230   assert(getMRI()->getType(TablePtr).isPointer() &&
231          "Table reg must be a pointer");
232   return buildInstr(TargetOpcode::G_BRJT)
233       .addUse(TablePtr)
234       .addJumpTableIndex(JTI)
235       .addUse(IndexReg);
236 }
237 
buildCopy(const DstOp & Res,const SrcOp & Op)238 MachineInstrBuilder MachineIRBuilder::buildCopy(const DstOp &Res,
239                                                 const SrcOp &Op) {
240   return buildInstr(TargetOpcode::COPY, Res, Op);
241 }
242 
buildAssertSExt(const DstOp & Res,const SrcOp & Op,unsigned Size)243 MachineInstrBuilder MachineIRBuilder::buildAssertSExt(const DstOp &Res,
244                                                       const SrcOp &Op,
245                                                       unsigned Size) {
246   return buildInstr(TargetOpcode::G_ASSERT_SEXT, Res, Op).addImm(Size);
247 }
248 
buildAssertZExt(const DstOp & Res,const SrcOp & Op,unsigned Size)249 MachineInstrBuilder MachineIRBuilder::buildAssertZExt(const DstOp &Res,
250                                                       const SrcOp &Op,
251                                                       unsigned Size) {
252   return buildInstr(TargetOpcode::G_ASSERT_ZEXT, Res, Op).addImm(Size);
253 }
254 
buildConstant(const DstOp & Res,const ConstantInt & Val)255 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
256                                                     const ConstantInt &Val) {
257   LLT Ty = Res.getLLTTy(*getMRI());
258   LLT EltTy = Ty.getScalarType();
259   assert(EltTy.getScalarSizeInBits() == Val.getBitWidth() &&
260          "creating constant with the wrong size");
261 
262   if (Ty.isVector()) {
263     auto Const = buildInstr(TargetOpcode::G_CONSTANT)
264     .addDef(getMRI()->createGenericVirtualRegister(EltTy))
265     .addCImm(&Val);
266     return buildSplatVector(Res, Const);
267   }
268 
269   auto Const = buildInstr(TargetOpcode::G_CONSTANT);
270   Const->setDebugLoc(DebugLoc());
271   Res.addDefToMIB(*getMRI(), Const);
272   Const.addCImm(&Val);
273   return Const;
274 }
275 
buildConstant(const DstOp & Res,int64_t Val)276 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
277                                                     int64_t Val) {
278   auto IntN = IntegerType::get(getMF().getFunction().getContext(),
279                                Res.getLLTTy(*getMRI()).getScalarSizeInBits());
280   ConstantInt *CI = ConstantInt::get(IntN, Val, true);
281   return buildConstant(Res, *CI);
282 }
283 
buildFConstant(const DstOp & Res,const ConstantFP & Val)284 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
285                                                      const ConstantFP &Val) {
286   LLT Ty = Res.getLLTTy(*getMRI());
287   LLT EltTy = Ty.getScalarType();
288 
289   assert(APFloat::getSizeInBits(Val.getValueAPF().getSemantics())
290          == EltTy.getSizeInBits() &&
291          "creating fconstant with the wrong size");
292 
293   assert(!Ty.isPointer() && "invalid operand type");
294 
295   if (Ty.isVector()) {
296     auto Const = buildInstr(TargetOpcode::G_FCONSTANT)
297     .addDef(getMRI()->createGenericVirtualRegister(EltTy))
298     .addFPImm(&Val);
299 
300     return buildSplatVector(Res, Const);
301   }
302 
303   auto Const = buildInstr(TargetOpcode::G_FCONSTANT);
304   Const->setDebugLoc(DebugLoc());
305   Res.addDefToMIB(*getMRI(), Const);
306   Const.addFPImm(&Val);
307   return Const;
308 }
309 
buildConstant(const DstOp & Res,const APInt & Val)310 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
311                                                     const APInt &Val) {
312   ConstantInt *CI = ConstantInt::get(getMF().getFunction().getContext(), Val);
313   return buildConstant(Res, *CI);
314 }
315 
buildFConstant(const DstOp & Res,double Val)316 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
317                                                      double Val) {
318   LLT DstTy = Res.getLLTTy(*getMRI());
319   auto &Ctx = getMF().getFunction().getContext();
320   auto *CFP =
321       ConstantFP::get(Ctx, getAPFloatFromSize(Val, DstTy.getScalarSizeInBits()));
322   return buildFConstant(Res, *CFP);
323 }
324 
buildFConstant(const DstOp & Res,const APFloat & Val)325 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
326                                                      const APFloat &Val) {
327   auto &Ctx = getMF().getFunction().getContext();
328   auto *CFP = ConstantFP::get(Ctx, Val);
329   return buildFConstant(Res, *CFP);
330 }
331 
buildBrCond(const SrcOp & Tst,MachineBasicBlock & Dest)332 MachineInstrBuilder MachineIRBuilder::buildBrCond(const SrcOp &Tst,
333                                                   MachineBasicBlock &Dest) {
334   assert(Tst.getLLTTy(*getMRI()).isScalar() && "invalid operand type");
335 
336   auto MIB = buildInstr(TargetOpcode::G_BRCOND);
337   Tst.addSrcToMIB(MIB);
338   MIB.addMBB(&Dest);
339   return MIB;
340 }
341 
342 MachineInstrBuilder
buildLoad(const DstOp & Dst,const SrcOp & Addr,MachinePointerInfo PtrInfo,Align Alignment,MachineMemOperand::Flags MMOFlags,const AAMDNodes & AAInfo)343 MachineIRBuilder::buildLoad(const DstOp &Dst, const SrcOp &Addr,
344                             MachinePointerInfo PtrInfo, Align Alignment,
345                             MachineMemOperand::Flags MMOFlags,
346                             const AAMDNodes &AAInfo) {
347   MMOFlags |= MachineMemOperand::MOLoad;
348   assert((MMOFlags & MachineMemOperand::MOStore) == 0);
349 
350   LLT Ty = Dst.getLLTTy(*getMRI());
351   MachineMemOperand *MMO =
352       getMF().getMachineMemOperand(PtrInfo, MMOFlags, Ty, Alignment, AAInfo);
353   return buildLoad(Dst, Addr, *MMO);
354 }
355 
buildLoadInstr(unsigned Opcode,const DstOp & Res,const SrcOp & Addr,MachineMemOperand & MMO)356 MachineInstrBuilder MachineIRBuilder::buildLoadInstr(unsigned Opcode,
357                                                      const DstOp &Res,
358                                                      const SrcOp &Addr,
359                                                      MachineMemOperand &MMO) {
360   assert(Res.getLLTTy(*getMRI()).isValid() && "invalid operand type");
361   assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
362 
363   auto MIB = buildInstr(Opcode);
364   Res.addDefToMIB(*getMRI(), MIB);
365   Addr.addSrcToMIB(MIB);
366   MIB.addMemOperand(&MMO);
367   return MIB;
368 }
369 
buildLoadFromOffset(const DstOp & Dst,const SrcOp & BasePtr,MachineMemOperand & BaseMMO,int64_t Offset)370 MachineInstrBuilder MachineIRBuilder::buildLoadFromOffset(
371   const DstOp &Dst, const SrcOp &BasePtr,
372   MachineMemOperand &BaseMMO, int64_t Offset) {
373   LLT LoadTy = Dst.getLLTTy(*getMRI());
374   MachineMemOperand *OffsetMMO =
375       getMF().getMachineMemOperand(&BaseMMO, Offset, LoadTy);
376 
377   if (Offset == 0) // This may be a size or type changing load.
378     return buildLoad(Dst, BasePtr, *OffsetMMO);
379 
380   LLT PtrTy = BasePtr.getLLTTy(*getMRI());
381   LLT OffsetTy = LLT::scalar(PtrTy.getSizeInBits());
382   auto ConstOffset = buildConstant(OffsetTy, Offset);
383   auto Ptr = buildPtrAdd(PtrTy, BasePtr, ConstOffset);
384   return buildLoad(Dst, Ptr, *OffsetMMO);
385 }
386 
buildStore(const SrcOp & Val,const SrcOp & Addr,MachineMemOperand & MMO)387 MachineInstrBuilder MachineIRBuilder::buildStore(const SrcOp &Val,
388                                                  const SrcOp &Addr,
389                                                  MachineMemOperand &MMO) {
390   assert(Val.getLLTTy(*getMRI()).isValid() && "invalid operand type");
391   assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
392 
393   auto MIB = buildInstr(TargetOpcode::G_STORE);
394   Val.addSrcToMIB(MIB);
395   Addr.addSrcToMIB(MIB);
396   MIB.addMemOperand(&MMO);
397   return MIB;
398 }
399 
400 MachineInstrBuilder
buildStore(const SrcOp & Val,const SrcOp & Addr,MachinePointerInfo PtrInfo,Align Alignment,MachineMemOperand::Flags MMOFlags,const AAMDNodes & AAInfo)401 MachineIRBuilder::buildStore(const SrcOp &Val, const SrcOp &Addr,
402                              MachinePointerInfo PtrInfo, Align Alignment,
403                              MachineMemOperand::Flags MMOFlags,
404                              const AAMDNodes &AAInfo) {
405   MMOFlags |= MachineMemOperand::MOStore;
406   assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
407 
408   LLT Ty = Val.getLLTTy(*getMRI());
409   MachineMemOperand *MMO =
410       getMF().getMachineMemOperand(PtrInfo, MMOFlags, Ty, Alignment, AAInfo);
411   return buildStore(Val, Addr, *MMO);
412 }
413 
buildAnyExt(const DstOp & Res,const SrcOp & Op)414 MachineInstrBuilder MachineIRBuilder::buildAnyExt(const DstOp &Res,
415                                                   const SrcOp &Op) {
416   return buildInstr(TargetOpcode::G_ANYEXT, Res, Op);
417 }
418 
buildSExt(const DstOp & Res,const SrcOp & Op)419 MachineInstrBuilder MachineIRBuilder::buildSExt(const DstOp &Res,
420                                                 const SrcOp &Op) {
421   return buildInstr(TargetOpcode::G_SEXT, Res, Op);
422 }
423 
buildZExt(const DstOp & Res,const SrcOp & Op)424 MachineInstrBuilder MachineIRBuilder::buildZExt(const DstOp &Res,
425                                                 const SrcOp &Op) {
426   return buildInstr(TargetOpcode::G_ZEXT, Res, Op);
427 }
428 
getBoolExtOp(bool IsVec,bool IsFP) const429 unsigned MachineIRBuilder::getBoolExtOp(bool IsVec, bool IsFP) const {
430   const auto *TLI = getMF().getSubtarget().getTargetLowering();
431   switch (TLI->getBooleanContents(IsVec, IsFP)) {
432   case TargetLoweringBase::ZeroOrNegativeOneBooleanContent:
433     return TargetOpcode::G_SEXT;
434   case TargetLoweringBase::ZeroOrOneBooleanContent:
435     return TargetOpcode::G_ZEXT;
436   default:
437     return TargetOpcode::G_ANYEXT;
438   }
439 }
440 
buildBoolExt(const DstOp & Res,const SrcOp & Op,bool IsFP)441 MachineInstrBuilder MachineIRBuilder::buildBoolExt(const DstOp &Res,
442                                                    const SrcOp &Op,
443                                                    bool IsFP) {
444   unsigned ExtOp = getBoolExtOp(getMRI()->getType(Op.getReg()).isVector(), IsFP);
445   return buildInstr(ExtOp, Res, Op);
446 }
447 
buildExtOrTrunc(unsigned ExtOpc,const DstOp & Res,const SrcOp & Op)448 MachineInstrBuilder MachineIRBuilder::buildExtOrTrunc(unsigned ExtOpc,
449                                                       const DstOp &Res,
450                                                       const SrcOp &Op) {
451   assert((TargetOpcode::G_ANYEXT == ExtOpc || TargetOpcode::G_ZEXT == ExtOpc ||
452           TargetOpcode::G_SEXT == ExtOpc) &&
453          "Expecting Extending Opc");
454   assert(Res.getLLTTy(*getMRI()).isScalar() ||
455          Res.getLLTTy(*getMRI()).isVector());
456   assert(Res.getLLTTy(*getMRI()).isScalar() ==
457          Op.getLLTTy(*getMRI()).isScalar());
458 
459   unsigned Opcode = TargetOpcode::COPY;
460   if (Res.getLLTTy(*getMRI()).getSizeInBits() >
461       Op.getLLTTy(*getMRI()).getSizeInBits())
462     Opcode = ExtOpc;
463   else if (Res.getLLTTy(*getMRI()).getSizeInBits() <
464            Op.getLLTTy(*getMRI()).getSizeInBits())
465     Opcode = TargetOpcode::G_TRUNC;
466   else
467     assert(Res.getLLTTy(*getMRI()) == Op.getLLTTy(*getMRI()));
468 
469   return buildInstr(Opcode, Res, Op);
470 }
471 
buildSExtOrTrunc(const DstOp & Res,const SrcOp & Op)472 MachineInstrBuilder MachineIRBuilder::buildSExtOrTrunc(const DstOp &Res,
473                                                        const SrcOp &Op) {
474   return buildExtOrTrunc(TargetOpcode::G_SEXT, Res, Op);
475 }
476 
buildZExtOrTrunc(const DstOp & Res,const SrcOp & Op)477 MachineInstrBuilder MachineIRBuilder::buildZExtOrTrunc(const DstOp &Res,
478                                                        const SrcOp &Op) {
479   return buildExtOrTrunc(TargetOpcode::G_ZEXT, Res, Op);
480 }
481 
buildAnyExtOrTrunc(const DstOp & Res,const SrcOp & Op)482 MachineInstrBuilder MachineIRBuilder::buildAnyExtOrTrunc(const DstOp &Res,
483                                                          const SrcOp &Op) {
484   return buildExtOrTrunc(TargetOpcode::G_ANYEXT, Res, Op);
485 }
486 
buildZExtInReg(const DstOp & Res,const SrcOp & Op,int64_t ImmOp)487 MachineInstrBuilder MachineIRBuilder::buildZExtInReg(const DstOp &Res,
488                                                      const SrcOp &Op,
489                                                      int64_t ImmOp) {
490   LLT ResTy = Res.getLLTTy(*getMRI());
491   auto Mask = buildConstant(
492       ResTy, APInt::getLowBitsSet(ResTy.getScalarSizeInBits(), ImmOp));
493   return buildAnd(Res, Op, Mask);
494 }
495 
buildCast(const DstOp & Dst,const SrcOp & Src)496 MachineInstrBuilder MachineIRBuilder::buildCast(const DstOp &Dst,
497                                                 const SrcOp &Src) {
498   LLT SrcTy = Src.getLLTTy(*getMRI());
499   LLT DstTy = Dst.getLLTTy(*getMRI());
500   if (SrcTy == DstTy)
501     return buildCopy(Dst, Src);
502 
503   unsigned Opcode;
504   if (SrcTy.isPointer() && DstTy.isScalar())
505     Opcode = TargetOpcode::G_PTRTOINT;
506   else if (DstTy.isPointer() && SrcTy.isScalar())
507     Opcode = TargetOpcode::G_INTTOPTR;
508   else {
509     assert(!SrcTy.isPointer() && !DstTy.isPointer() && "n G_ADDRCAST yet");
510     Opcode = TargetOpcode::G_BITCAST;
511   }
512 
513   return buildInstr(Opcode, Dst, Src);
514 }
515 
buildExtract(const DstOp & Dst,const SrcOp & Src,uint64_t Index)516 MachineInstrBuilder MachineIRBuilder::buildExtract(const DstOp &Dst,
517                                                    const SrcOp &Src,
518                                                    uint64_t Index) {
519   LLT SrcTy = Src.getLLTTy(*getMRI());
520   LLT DstTy = Dst.getLLTTy(*getMRI());
521 
522 #ifndef NDEBUG
523   assert(SrcTy.isValid() && "invalid operand type");
524   assert(DstTy.isValid() && "invalid operand type");
525   assert(Index + DstTy.getSizeInBits() <= SrcTy.getSizeInBits() &&
526          "extracting off end of register");
527 #endif
528 
529   if (DstTy.getSizeInBits() == SrcTy.getSizeInBits()) {
530     assert(Index == 0 && "insertion past the end of a register");
531     return buildCast(Dst, Src);
532   }
533 
534   auto Extract = buildInstr(TargetOpcode::G_EXTRACT);
535   Dst.addDefToMIB(*getMRI(), Extract);
536   Src.addSrcToMIB(Extract);
537   Extract.addImm(Index);
538   return Extract;
539 }
540 
buildSequence(Register Res,ArrayRef<Register> Ops,ArrayRef<uint64_t> Indices)541 void MachineIRBuilder::buildSequence(Register Res, ArrayRef<Register> Ops,
542                                      ArrayRef<uint64_t> Indices) {
543 #ifndef NDEBUG
544   assert(Ops.size() == Indices.size() && "incompatible args");
545   assert(!Ops.empty() && "invalid trivial sequence");
546   assert(llvm::is_sorted(Indices) &&
547          "sequence offsets must be in ascending order");
548 
549   assert(getMRI()->getType(Res).isValid() && "invalid operand type");
550   for (auto Op : Ops)
551     assert(getMRI()->getType(Op).isValid() && "invalid operand type");
552 #endif
553 
554   LLT ResTy = getMRI()->getType(Res);
555   LLT OpTy = getMRI()->getType(Ops[0]);
556   unsigned OpSize = OpTy.getSizeInBits();
557   bool MaybeMerge = true;
558   for (unsigned i = 0; i < Ops.size(); ++i) {
559     if (getMRI()->getType(Ops[i]) != OpTy || Indices[i] != i * OpSize) {
560       MaybeMerge = false;
561       break;
562     }
563   }
564 
565   if (MaybeMerge && Ops.size() * OpSize == ResTy.getSizeInBits()) {
566     buildMerge(Res, Ops);
567     return;
568   }
569 
570   Register ResIn = getMRI()->createGenericVirtualRegister(ResTy);
571   buildUndef(ResIn);
572 
573   for (unsigned i = 0; i < Ops.size(); ++i) {
574     Register ResOut = i + 1 == Ops.size()
575                           ? Res
576                           : getMRI()->createGenericVirtualRegister(ResTy);
577     buildInsert(ResOut, ResIn, Ops[i], Indices[i]);
578     ResIn = ResOut;
579   }
580 }
581 
buildUndef(const DstOp & Res)582 MachineInstrBuilder MachineIRBuilder::buildUndef(const DstOp &Res) {
583   return buildInstr(TargetOpcode::G_IMPLICIT_DEF, {Res}, {});
584 }
585 
buildMerge(const DstOp & Res,ArrayRef<Register> Ops)586 MachineInstrBuilder MachineIRBuilder::buildMerge(const DstOp &Res,
587                                                  ArrayRef<Register> Ops) {
588   // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
589   // we need some temporary storage for the DstOp objects. Here we use a
590   // sufficiently large SmallVector to not go through the heap.
591   SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
592   assert(TmpVec.size() > 1);
593   return buildInstr(TargetOpcode::G_MERGE_VALUES, Res, TmpVec);
594 }
595 
596 MachineInstrBuilder
buildMerge(const DstOp & Res,std::initializer_list<SrcOp> Ops)597 MachineIRBuilder::buildMerge(const DstOp &Res,
598                              std::initializer_list<SrcOp> Ops) {
599   assert(Ops.size() > 1);
600   return buildInstr(TargetOpcode::G_MERGE_VALUES, Res, Ops);
601 }
602 
buildUnmerge(ArrayRef<LLT> Res,const SrcOp & Op)603 MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<LLT> Res,
604                                                    const SrcOp &Op) {
605   // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<DstOp>,
606   // we need some temporary storage for the DstOp objects. Here we use a
607   // sufficiently large SmallVector to not go through the heap.
608   SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end());
609   assert(TmpVec.size() > 1);
610   return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
611 }
612 
buildUnmerge(LLT Res,const SrcOp & Op)613 MachineInstrBuilder MachineIRBuilder::buildUnmerge(LLT Res,
614                                                    const SrcOp &Op) {
615   unsigned NumReg = Op.getLLTTy(*getMRI()).getSizeInBits() / Res.getSizeInBits();
616   SmallVector<Register, 8> TmpVec;
617   for (unsigned I = 0; I != NumReg; ++I)
618     TmpVec.push_back(getMRI()->createGenericVirtualRegister(Res));
619   return buildUnmerge(TmpVec, Op);
620 }
621 
buildUnmerge(ArrayRef<Register> Res,const SrcOp & Op)622 MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<Register> Res,
623                                                    const SrcOp &Op) {
624   // Unfortunately to convert from ArrayRef<Register> to ArrayRef<DstOp>,
625   // we need some temporary storage for the DstOp objects. Here we use a
626   // sufficiently large SmallVector to not go through the heap.
627   SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end());
628   assert(TmpVec.size() > 1);
629   return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
630 }
631 
buildBuildVector(const DstOp & Res,ArrayRef<Register> Ops)632 MachineInstrBuilder MachineIRBuilder::buildBuildVector(const DstOp &Res,
633                                                        ArrayRef<Register> Ops) {
634   // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
635   // we need some temporary storage for the DstOp objects. Here we use a
636   // sufficiently large SmallVector to not go through the heap.
637   SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
638   return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
639 }
640 
buildSplatVector(const DstOp & Res,const SrcOp & Src)641 MachineInstrBuilder MachineIRBuilder::buildSplatVector(const DstOp &Res,
642                                                        const SrcOp &Src) {
643   SmallVector<SrcOp, 8> TmpVec(Res.getLLTTy(*getMRI()).getNumElements(), Src);
644   return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
645 }
646 
647 MachineInstrBuilder
buildBuildVectorTrunc(const DstOp & Res,ArrayRef<Register> Ops)648 MachineIRBuilder::buildBuildVectorTrunc(const DstOp &Res,
649                                         ArrayRef<Register> Ops) {
650   // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
651   // we need some temporary storage for the DstOp objects. Here we use a
652   // sufficiently large SmallVector to not go through the heap.
653   SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
654   return buildInstr(TargetOpcode::G_BUILD_VECTOR_TRUNC, Res, TmpVec);
655 }
656 
buildShuffleSplat(const DstOp & Res,const SrcOp & Src)657 MachineInstrBuilder MachineIRBuilder::buildShuffleSplat(const DstOp &Res,
658                                                         const SrcOp &Src) {
659   LLT DstTy = Res.getLLTTy(*getMRI());
660   assert(Src.getLLTTy(*getMRI()) == DstTy.getElementType() &&
661          "Expected Src to match Dst elt ty");
662   auto UndefVec = buildUndef(DstTy);
663   auto Zero = buildConstant(LLT::scalar(64), 0);
664   auto InsElt = buildInsertVectorElement(DstTy, UndefVec, Src, Zero);
665   SmallVector<int, 16> ZeroMask(DstTy.getNumElements());
666   return buildShuffleVector(DstTy, InsElt, UndefVec, ZeroMask);
667 }
668 
buildShuffleVector(const DstOp & Res,const SrcOp & Src1,const SrcOp & Src2,ArrayRef<int> Mask)669 MachineInstrBuilder MachineIRBuilder::buildShuffleVector(const DstOp &Res,
670                                                          const SrcOp &Src1,
671                                                          const SrcOp &Src2,
672                                                          ArrayRef<int> Mask) {
673   LLT DstTy = Res.getLLTTy(*getMRI());
674   LLT Src1Ty = Src1.getLLTTy(*getMRI());
675   LLT Src2Ty = Src2.getLLTTy(*getMRI());
676   assert((size_t)(Src1Ty.getNumElements() + Src2Ty.getNumElements()) >=
677          Mask.size());
678   assert(DstTy.getElementType() == Src1Ty.getElementType() &&
679          DstTy.getElementType() == Src2Ty.getElementType());
680   (void)DstTy;
681   (void)Src1Ty;
682   (void)Src2Ty;
683   ArrayRef<int> MaskAlloc = getMF().allocateShuffleMask(Mask);
684   return buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {Res}, {Src1, Src2})
685       .addShuffleMask(MaskAlloc);
686 }
687 
688 MachineInstrBuilder
buildConcatVectors(const DstOp & Res,ArrayRef<Register> Ops)689 MachineIRBuilder::buildConcatVectors(const DstOp &Res, ArrayRef<Register> Ops) {
690   // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
691   // we need some temporary storage for the DstOp objects. Here we use a
692   // sufficiently large SmallVector to not go through the heap.
693   SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
694   return buildInstr(TargetOpcode::G_CONCAT_VECTORS, Res, TmpVec);
695 }
696 
buildInsert(const DstOp & Res,const SrcOp & Src,const SrcOp & Op,unsigned Index)697 MachineInstrBuilder MachineIRBuilder::buildInsert(const DstOp &Res,
698                                                   const SrcOp &Src,
699                                                   const SrcOp &Op,
700                                                   unsigned Index) {
701   assert(Index + Op.getLLTTy(*getMRI()).getSizeInBits() <=
702              Res.getLLTTy(*getMRI()).getSizeInBits() &&
703          "insertion past the end of a register");
704 
705   if (Res.getLLTTy(*getMRI()).getSizeInBits() ==
706       Op.getLLTTy(*getMRI()).getSizeInBits()) {
707     return buildCast(Res, Op);
708   }
709 
710   return buildInstr(TargetOpcode::G_INSERT, Res, {Src, Op, uint64_t(Index)});
711 }
712 
buildIntrinsic(Intrinsic::ID ID,ArrayRef<Register> ResultRegs,bool HasSideEffects)713 MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
714                                                      ArrayRef<Register> ResultRegs,
715                                                      bool HasSideEffects) {
716   auto MIB =
717       buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
718                                 : TargetOpcode::G_INTRINSIC);
719   for (unsigned ResultReg : ResultRegs)
720     MIB.addDef(ResultReg);
721   MIB.addIntrinsicID(ID);
722   return MIB;
723 }
724 
buildIntrinsic(Intrinsic::ID ID,ArrayRef<DstOp> Results,bool HasSideEffects)725 MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
726                                                      ArrayRef<DstOp> Results,
727                                                      bool HasSideEffects) {
728   auto MIB =
729       buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
730                                 : TargetOpcode::G_INTRINSIC);
731   for (DstOp Result : Results)
732     Result.addDefToMIB(*getMRI(), MIB);
733   MIB.addIntrinsicID(ID);
734   return MIB;
735 }
736 
buildTrunc(const DstOp & Res,const SrcOp & Op)737 MachineInstrBuilder MachineIRBuilder::buildTrunc(const DstOp &Res,
738                                                  const SrcOp &Op) {
739   return buildInstr(TargetOpcode::G_TRUNC, Res, Op);
740 }
741 
buildFPTrunc(const DstOp & Res,const SrcOp & Op,Optional<unsigned> Flags)742 MachineInstrBuilder MachineIRBuilder::buildFPTrunc(const DstOp &Res,
743                                                    const SrcOp &Op,
744                                                    Optional<unsigned> Flags) {
745   return buildInstr(TargetOpcode::G_FPTRUNC, Res, Op, Flags);
746 }
747 
buildICmp(CmpInst::Predicate Pred,const DstOp & Res,const SrcOp & Op0,const SrcOp & Op1)748 MachineInstrBuilder MachineIRBuilder::buildICmp(CmpInst::Predicate Pred,
749                                                 const DstOp &Res,
750                                                 const SrcOp &Op0,
751                                                 const SrcOp &Op1) {
752   return buildInstr(TargetOpcode::G_ICMP, Res, {Pred, Op0, Op1});
753 }
754 
buildFCmp(CmpInst::Predicate Pred,const DstOp & Res,const SrcOp & Op0,const SrcOp & Op1,Optional<unsigned> Flags)755 MachineInstrBuilder MachineIRBuilder::buildFCmp(CmpInst::Predicate Pred,
756                                                 const DstOp &Res,
757                                                 const SrcOp &Op0,
758                                                 const SrcOp &Op1,
759                                                 Optional<unsigned> Flags) {
760 
761   return buildInstr(TargetOpcode::G_FCMP, Res, {Pred, Op0, Op1}, Flags);
762 }
763 
buildSelect(const DstOp & Res,const SrcOp & Tst,const SrcOp & Op0,const SrcOp & Op1,Optional<unsigned> Flags)764 MachineInstrBuilder MachineIRBuilder::buildSelect(const DstOp &Res,
765                                                   const SrcOp &Tst,
766                                                   const SrcOp &Op0,
767                                                   const SrcOp &Op1,
768                                                   Optional<unsigned> Flags) {
769 
770   return buildInstr(TargetOpcode::G_SELECT, {Res}, {Tst, Op0, Op1}, Flags);
771 }
772 
773 MachineInstrBuilder
buildInsertVectorElement(const DstOp & Res,const SrcOp & Val,const SrcOp & Elt,const SrcOp & Idx)774 MachineIRBuilder::buildInsertVectorElement(const DstOp &Res, const SrcOp &Val,
775                                            const SrcOp &Elt, const SrcOp &Idx) {
776   return buildInstr(TargetOpcode::G_INSERT_VECTOR_ELT, Res, {Val, Elt, Idx});
777 }
778 
779 MachineInstrBuilder
buildExtractVectorElement(const DstOp & Res,const SrcOp & Val,const SrcOp & Idx)780 MachineIRBuilder::buildExtractVectorElement(const DstOp &Res, const SrcOp &Val,
781                                             const SrcOp &Idx) {
782   return buildInstr(TargetOpcode::G_EXTRACT_VECTOR_ELT, Res, {Val, Idx});
783 }
784 
buildAtomicCmpXchgWithSuccess(Register OldValRes,Register SuccessRes,Register Addr,Register CmpVal,Register NewVal,MachineMemOperand & MMO)785 MachineInstrBuilder MachineIRBuilder::buildAtomicCmpXchgWithSuccess(
786     Register OldValRes, Register SuccessRes, Register Addr, Register CmpVal,
787     Register NewVal, MachineMemOperand &MMO) {
788 #ifndef NDEBUG
789   LLT OldValResTy = getMRI()->getType(OldValRes);
790   LLT SuccessResTy = getMRI()->getType(SuccessRes);
791   LLT AddrTy = getMRI()->getType(Addr);
792   LLT CmpValTy = getMRI()->getType(CmpVal);
793   LLT NewValTy = getMRI()->getType(NewVal);
794   assert(OldValResTy.isScalar() && "invalid operand type");
795   assert(SuccessResTy.isScalar() && "invalid operand type");
796   assert(AddrTy.isPointer() && "invalid operand type");
797   assert(CmpValTy.isValid() && "invalid operand type");
798   assert(NewValTy.isValid() && "invalid operand type");
799   assert(OldValResTy == CmpValTy && "type mismatch");
800   assert(OldValResTy == NewValTy && "type mismatch");
801 #endif
802 
803   return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS)
804       .addDef(OldValRes)
805       .addDef(SuccessRes)
806       .addUse(Addr)
807       .addUse(CmpVal)
808       .addUse(NewVal)
809       .addMemOperand(&MMO);
810 }
811 
812 MachineInstrBuilder
buildAtomicCmpXchg(Register OldValRes,Register Addr,Register CmpVal,Register NewVal,MachineMemOperand & MMO)813 MachineIRBuilder::buildAtomicCmpXchg(Register OldValRes, Register Addr,
814                                      Register CmpVal, Register NewVal,
815                                      MachineMemOperand &MMO) {
816 #ifndef NDEBUG
817   LLT OldValResTy = getMRI()->getType(OldValRes);
818   LLT AddrTy = getMRI()->getType(Addr);
819   LLT CmpValTy = getMRI()->getType(CmpVal);
820   LLT NewValTy = getMRI()->getType(NewVal);
821   assert(OldValResTy.isScalar() && "invalid operand type");
822   assert(AddrTy.isPointer() && "invalid operand type");
823   assert(CmpValTy.isValid() && "invalid operand type");
824   assert(NewValTy.isValid() && "invalid operand type");
825   assert(OldValResTy == CmpValTy && "type mismatch");
826   assert(OldValResTy == NewValTy && "type mismatch");
827 #endif
828 
829   return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG)
830       .addDef(OldValRes)
831       .addUse(Addr)
832       .addUse(CmpVal)
833       .addUse(NewVal)
834       .addMemOperand(&MMO);
835 }
836 
buildAtomicRMW(unsigned Opcode,const DstOp & OldValRes,const SrcOp & Addr,const SrcOp & Val,MachineMemOperand & MMO)837 MachineInstrBuilder MachineIRBuilder::buildAtomicRMW(
838   unsigned Opcode, const DstOp &OldValRes,
839   const SrcOp &Addr, const SrcOp &Val,
840   MachineMemOperand &MMO) {
841 
842 #ifndef NDEBUG
843   LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
844   LLT AddrTy = Addr.getLLTTy(*getMRI());
845   LLT ValTy = Val.getLLTTy(*getMRI());
846   assert(OldValResTy.isScalar() && "invalid operand type");
847   assert(AddrTy.isPointer() && "invalid operand type");
848   assert(ValTy.isValid() && "invalid operand type");
849   assert(OldValResTy == ValTy && "type mismatch");
850   assert(MMO.isAtomic() && "not atomic mem operand");
851 #endif
852 
853   auto MIB = buildInstr(Opcode);
854   OldValRes.addDefToMIB(*getMRI(), MIB);
855   Addr.addSrcToMIB(MIB);
856   Val.addSrcToMIB(MIB);
857   MIB.addMemOperand(&MMO);
858   return MIB;
859 }
860 
861 MachineInstrBuilder
buildAtomicRMWXchg(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)862 MachineIRBuilder::buildAtomicRMWXchg(Register OldValRes, Register Addr,
863                                      Register Val, MachineMemOperand &MMO) {
864   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XCHG, OldValRes, Addr, Val,
865                         MMO);
866 }
867 MachineInstrBuilder
buildAtomicRMWAdd(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)868 MachineIRBuilder::buildAtomicRMWAdd(Register OldValRes, Register Addr,
869                                     Register Val, MachineMemOperand &MMO) {
870   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_ADD, OldValRes, Addr, Val,
871                         MMO);
872 }
873 MachineInstrBuilder
buildAtomicRMWSub(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)874 MachineIRBuilder::buildAtomicRMWSub(Register OldValRes, Register Addr,
875                                     Register Val, MachineMemOperand &MMO) {
876   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_SUB, OldValRes, Addr, Val,
877                         MMO);
878 }
879 MachineInstrBuilder
buildAtomicRMWAnd(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)880 MachineIRBuilder::buildAtomicRMWAnd(Register OldValRes, Register Addr,
881                                     Register Val, MachineMemOperand &MMO) {
882   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_AND, OldValRes, Addr, Val,
883                         MMO);
884 }
885 MachineInstrBuilder
buildAtomicRMWNand(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)886 MachineIRBuilder::buildAtomicRMWNand(Register OldValRes, Register Addr,
887                                      Register Val, MachineMemOperand &MMO) {
888   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_NAND, OldValRes, Addr, Val,
889                         MMO);
890 }
buildAtomicRMWOr(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)891 MachineInstrBuilder MachineIRBuilder::buildAtomicRMWOr(Register OldValRes,
892                                                        Register Addr,
893                                                        Register Val,
894                                                        MachineMemOperand &MMO) {
895   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_OR, OldValRes, Addr, Val,
896                         MMO);
897 }
898 MachineInstrBuilder
buildAtomicRMWXor(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)899 MachineIRBuilder::buildAtomicRMWXor(Register OldValRes, Register Addr,
900                                     Register Val, MachineMemOperand &MMO) {
901   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XOR, OldValRes, Addr, Val,
902                         MMO);
903 }
904 MachineInstrBuilder
buildAtomicRMWMax(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)905 MachineIRBuilder::buildAtomicRMWMax(Register OldValRes, Register Addr,
906                                     Register Val, MachineMemOperand &MMO) {
907   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MAX, OldValRes, Addr, Val,
908                         MMO);
909 }
910 MachineInstrBuilder
buildAtomicRMWMin(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)911 MachineIRBuilder::buildAtomicRMWMin(Register OldValRes, Register Addr,
912                                     Register Val, MachineMemOperand &MMO) {
913   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MIN, OldValRes, Addr, Val,
914                         MMO);
915 }
916 MachineInstrBuilder
buildAtomicRMWUmax(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)917 MachineIRBuilder::buildAtomicRMWUmax(Register OldValRes, Register Addr,
918                                      Register Val, MachineMemOperand &MMO) {
919   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMAX, OldValRes, Addr, Val,
920                         MMO);
921 }
922 MachineInstrBuilder
buildAtomicRMWUmin(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)923 MachineIRBuilder::buildAtomicRMWUmin(Register OldValRes, Register Addr,
924                                      Register Val, MachineMemOperand &MMO) {
925   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMIN, OldValRes, Addr, Val,
926                         MMO);
927 }
928 
929 MachineInstrBuilder
buildAtomicRMWFAdd(const DstOp & OldValRes,const SrcOp & Addr,const SrcOp & Val,MachineMemOperand & MMO)930 MachineIRBuilder::buildAtomicRMWFAdd(
931   const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
932   MachineMemOperand &MMO) {
933   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FADD, OldValRes, Addr, Val,
934                         MMO);
935 }
936 
937 MachineInstrBuilder
buildAtomicRMWFSub(const DstOp & OldValRes,const SrcOp & Addr,const SrcOp & Val,MachineMemOperand & MMO)938 MachineIRBuilder::buildAtomicRMWFSub(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
939                                      MachineMemOperand &MMO) {
940   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FSUB, OldValRes, Addr, Val,
941                         MMO);
942 }
943 
944 MachineInstrBuilder
buildFence(unsigned Ordering,unsigned Scope)945 MachineIRBuilder::buildFence(unsigned Ordering, unsigned Scope) {
946   return buildInstr(TargetOpcode::G_FENCE)
947     .addImm(Ordering)
948     .addImm(Scope);
949 }
950 
951 MachineInstrBuilder
buildBlockAddress(Register Res,const BlockAddress * BA)952 MachineIRBuilder::buildBlockAddress(Register Res, const BlockAddress *BA) {
953 #ifndef NDEBUG
954   assert(getMRI()->getType(Res).isPointer() && "invalid res type");
955 #endif
956 
957   return buildInstr(TargetOpcode::G_BLOCK_ADDR).addDef(Res).addBlockAddress(BA);
958 }
959 
validateTruncExt(const LLT DstTy,const LLT SrcTy,bool IsExtend)960 void MachineIRBuilder::validateTruncExt(const LLT DstTy, const LLT SrcTy,
961                                         bool IsExtend) {
962 #ifndef NDEBUG
963   if (DstTy.isVector()) {
964     assert(SrcTy.isVector() && "mismatched cast between vector and non-vector");
965     assert(SrcTy.getNumElements() == DstTy.getNumElements() &&
966            "different number of elements in a trunc/ext");
967   } else
968     assert(DstTy.isScalar() && SrcTy.isScalar() && "invalid extend/trunc");
969 
970   if (IsExtend)
971     assert(DstTy.getSizeInBits() > SrcTy.getSizeInBits() &&
972            "invalid narrowing extend");
973   else
974     assert(DstTy.getSizeInBits() < SrcTy.getSizeInBits() &&
975            "invalid widening trunc");
976 #endif
977 }
978 
validateSelectOp(const LLT ResTy,const LLT TstTy,const LLT Op0Ty,const LLT Op1Ty)979 void MachineIRBuilder::validateSelectOp(const LLT ResTy, const LLT TstTy,
980                                         const LLT Op0Ty, const LLT Op1Ty) {
981 #ifndef NDEBUG
982   assert((ResTy.isScalar() || ResTy.isVector() || ResTy.isPointer()) &&
983          "invalid operand type");
984   assert((ResTy == Op0Ty && ResTy == Op1Ty) && "type mismatch");
985   if (ResTy.isScalar() || ResTy.isPointer())
986     assert(TstTy.isScalar() && "type mismatch");
987   else
988     assert((TstTy.isScalar() ||
989             (TstTy.isVector() &&
990              TstTy.getNumElements() == Op0Ty.getNumElements())) &&
991            "type mismatch");
992 #endif
993 }
994 
buildInstr(unsigned Opc,ArrayRef<DstOp> DstOps,ArrayRef<SrcOp> SrcOps,Optional<unsigned> Flags)995 MachineInstrBuilder MachineIRBuilder::buildInstr(unsigned Opc,
996                                                  ArrayRef<DstOp> DstOps,
997                                                  ArrayRef<SrcOp> SrcOps,
998                                                  Optional<unsigned> Flags) {
999   switch (Opc) {
1000   default:
1001     break;
1002   case TargetOpcode::G_SELECT: {
1003     assert(DstOps.size() == 1 && "Invalid select");
1004     assert(SrcOps.size() == 3 && "Invalid select");
1005     validateSelectOp(
1006         DstOps[0].getLLTTy(*getMRI()), SrcOps[0].getLLTTy(*getMRI()),
1007         SrcOps[1].getLLTTy(*getMRI()), SrcOps[2].getLLTTy(*getMRI()));
1008     break;
1009   }
1010   case TargetOpcode::G_FNEG:
1011   case TargetOpcode::G_ABS:
1012     // All these are unary ops.
1013     assert(DstOps.size() == 1 && "Invalid Dst");
1014     assert(SrcOps.size() == 1 && "Invalid Srcs");
1015     validateUnaryOp(DstOps[0].getLLTTy(*getMRI()),
1016                     SrcOps[0].getLLTTy(*getMRI()));
1017     break;
1018   case TargetOpcode::G_ADD:
1019   case TargetOpcode::G_AND:
1020   case TargetOpcode::G_MUL:
1021   case TargetOpcode::G_OR:
1022   case TargetOpcode::G_SUB:
1023   case TargetOpcode::G_XOR:
1024   case TargetOpcode::G_UDIV:
1025   case TargetOpcode::G_SDIV:
1026   case TargetOpcode::G_UREM:
1027   case TargetOpcode::G_SREM:
1028   case TargetOpcode::G_SMIN:
1029   case TargetOpcode::G_SMAX:
1030   case TargetOpcode::G_UMIN:
1031   case TargetOpcode::G_UMAX:
1032   case TargetOpcode::G_UADDSAT:
1033   case TargetOpcode::G_SADDSAT:
1034   case TargetOpcode::G_USUBSAT:
1035   case TargetOpcode::G_SSUBSAT: {
1036     // All these are binary ops.
1037     assert(DstOps.size() == 1 && "Invalid Dst");
1038     assert(SrcOps.size() == 2 && "Invalid Srcs");
1039     validateBinaryOp(DstOps[0].getLLTTy(*getMRI()),
1040                      SrcOps[0].getLLTTy(*getMRI()),
1041                      SrcOps[1].getLLTTy(*getMRI()));
1042     break;
1043   }
1044   case TargetOpcode::G_SHL:
1045   case TargetOpcode::G_ASHR:
1046   case TargetOpcode::G_LSHR:
1047   case TargetOpcode::G_USHLSAT:
1048   case TargetOpcode::G_SSHLSAT: {
1049     assert(DstOps.size() == 1 && "Invalid Dst");
1050     assert(SrcOps.size() == 2 && "Invalid Srcs");
1051     validateShiftOp(DstOps[0].getLLTTy(*getMRI()),
1052                     SrcOps[0].getLLTTy(*getMRI()),
1053                     SrcOps[1].getLLTTy(*getMRI()));
1054     break;
1055   }
1056   case TargetOpcode::G_SEXT:
1057   case TargetOpcode::G_ZEXT:
1058   case TargetOpcode::G_ANYEXT:
1059     assert(DstOps.size() == 1 && "Invalid Dst");
1060     assert(SrcOps.size() == 1 && "Invalid Srcs");
1061     validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
1062                      SrcOps[0].getLLTTy(*getMRI()), true);
1063     break;
1064   case TargetOpcode::G_TRUNC:
1065   case TargetOpcode::G_FPTRUNC: {
1066     assert(DstOps.size() == 1 && "Invalid Dst");
1067     assert(SrcOps.size() == 1 && "Invalid Srcs");
1068     validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
1069                      SrcOps[0].getLLTTy(*getMRI()), false);
1070     break;
1071   }
1072   case TargetOpcode::G_BITCAST: {
1073     assert(DstOps.size() == 1 && "Invalid Dst");
1074     assert(SrcOps.size() == 1 && "Invalid Srcs");
1075     assert(DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1076            SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() && "invalid bitcast");
1077     break;
1078   }
1079   case TargetOpcode::COPY:
1080     assert(DstOps.size() == 1 && "Invalid Dst");
1081     // If the caller wants to add a subreg source it has to be done separately
1082     // so we may not have any SrcOps at this point yet.
1083     break;
1084   case TargetOpcode::G_FCMP:
1085   case TargetOpcode::G_ICMP: {
1086     assert(DstOps.size() == 1 && "Invalid Dst Operands");
1087     assert(SrcOps.size() == 3 && "Invalid Src Operands");
1088     // For F/ICMP, the first src operand is the predicate, followed by
1089     // the two comparands.
1090     assert(SrcOps[0].getSrcOpKind() == SrcOp::SrcType::Ty_Predicate &&
1091            "Expecting predicate");
1092     assert([&]() -> bool {
1093       CmpInst::Predicate Pred = SrcOps[0].getPredicate();
1094       return Opc == TargetOpcode::G_ICMP ? CmpInst::isIntPredicate(Pred)
1095                                          : CmpInst::isFPPredicate(Pred);
1096     }() && "Invalid predicate");
1097     assert(SrcOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1098            "Type mismatch");
1099     assert([&]() -> bool {
1100       LLT Op0Ty = SrcOps[1].getLLTTy(*getMRI());
1101       LLT DstTy = DstOps[0].getLLTTy(*getMRI());
1102       if (Op0Ty.isScalar() || Op0Ty.isPointer())
1103         return DstTy.isScalar();
1104       else
1105         return DstTy.isVector() &&
1106                DstTy.getNumElements() == Op0Ty.getNumElements();
1107     }() && "Type Mismatch");
1108     break;
1109   }
1110   case TargetOpcode::G_UNMERGE_VALUES: {
1111     assert(!DstOps.empty() && "Invalid trivial sequence");
1112     assert(SrcOps.size() == 1 && "Invalid src for Unmerge");
1113     assert(llvm::all_of(DstOps,
1114                         [&, this](const DstOp &Op) {
1115                           return Op.getLLTTy(*getMRI()) ==
1116                                  DstOps[0].getLLTTy(*getMRI());
1117                         }) &&
1118            "type mismatch in output list");
1119     assert((TypeSize::ScalarTy)DstOps.size() *
1120                    DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1121                SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1122            "input operands do not cover output register");
1123     break;
1124   }
1125   case TargetOpcode::G_MERGE_VALUES: {
1126     assert(!SrcOps.empty() && "invalid trivial sequence");
1127     assert(DstOps.size() == 1 && "Invalid Dst");
1128     assert(llvm::all_of(SrcOps,
1129                         [&, this](const SrcOp &Op) {
1130                           return Op.getLLTTy(*getMRI()) ==
1131                                  SrcOps[0].getLLTTy(*getMRI());
1132                         }) &&
1133            "type mismatch in input list");
1134     assert((TypeSize::ScalarTy)SrcOps.size() *
1135                    SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1136                DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1137            "input operands do not cover output register");
1138     if (SrcOps.size() == 1)
1139       return buildCast(DstOps[0], SrcOps[0]);
1140     if (DstOps[0].getLLTTy(*getMRI()).isVector()) {
1141       if (SrcOps[0].getLLTTy(*getMRI()).isVector())
1142         return buildInstr(TargetOpcode::G_CONCAT_VECTORS, DstOps, SrcOps);
1143       return buildInstr(TargetOpcode::G_BUILD_VECTOR, DstOps, SrcOps);
1144     }
1145     break;
1146   }
1147   case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1148     assert(DstOps.size() == 1 && "Invalid Dst size");
1149     assert(SrcOps.size() == 2 && "Invalid Src size");
1150     assert(SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1151     assert((DstOps[0].getLLTTy(*getMRI()).isScalar() ||
1152             DstOps[0].getLLTTy(*getMRI()).isPointer()) &&
1153            "Invalid operand type");
1154     assert(SrcOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand type");
1155     assert(SrcOps[0].getLLTTy(*getMRI()).getElementType() ==
1156                DstOps[0].getLLTTy(*getMRI()) &&
1157            "Type mismatch");
1158     break;
1159   }
1160   case TargetOpcode::G_INSERT_VECTOR_ELT: {
1161     assert(DstOps.size() == 1 && "Invalid dst size");
1162     assert(SrcOps.size() == 3 && "Invalid src size");
1163     assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1164            SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1165     assert(DstOps[0].getLLTTy(*getMRI()).getElementType() ==
1166                SrcOps[1].getLLTTy(*getMRI()) &&
1167            "Type mismatch");
1168     assert(SrcOps[2].getLLTTy(*getMRI()).isScalar() && "Invalid index");
1169     assert(DstOps[0].getLLTTy(*getMRI()).getNumElements() ==
1170                SrcOps[0].getLLTTy(*getMRI()).getNumElements() &&
1171            "Type mismatch");
1172     break;
1173   }
1174   case TargetOpcode::G_BUILD_VECTOR: {
1175     assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1176            "Must have at least 2 operands");
1177     assert(DstOps.size() == 1 && "Invalid DstOps");
1178     assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1179            "Res type must be a vector");
1180     assert(llvm::all_of(SrcOps,
1181                         [&, this](const SrcOp &Op) {
1182                           return Op.getLLTTy(*getMRI()) ==
1183                                  SrcOps[0].getLLTTy(*getMRI());
1184                         }) &&
1185            "type mismatch in input list");
1186     assert((TypeSize::ScalarTy)SrcOps.size() *
1187                    SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1188                DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1189            "input scalars do not exactly cover the output vector register");
1190     break;
1191   }
1192   case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1193     assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1194            "Must have at least 2 operands");
1195     assert(DstOps.size() == 1 && "Invalid DstOps");
1196     assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1197            "Res type must be a vector");
1198     assert(llvm::all_of(SrcOps,
1199                         [&, this](const SrcOp &Op) {
1200                           return Op.getLLTTy(*getMRI()) ==
1201                                  SrcOps[0].getLLTTy(*getMRI());
1202                         }) &&
1203            "type mismatch in input list");
1204     if (SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1205         DstOps[0].getLLTTy(*getMRI()).getElementType().getSizeInBits())
1206       return buildInstr(TargetOpcode::G_BUILD_VECTOR, DstOps, SrcOps);
1207     break;
1208   }
1209   case TargetOpcode::G_CONCAT_VECTORS: {
1210     assert(DstOps.size() == 1 && "Invalid DstOps");
1211     assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1212            "Must have at least 2 operands");
1213     assert(llvm::all_of(SrcOps,
1214                         [&, this](const SrcOp &Op) {
1215                           return (Op.getLLTTy(*getMRI()).isVector() &&
1216                                   Op.getLLTTy(*getMRI()) ==
1217                                       SrcOps[0].getLLTTy(*getMRI()));
1218                         }) &&
1219            "type mismatch in input list");
1220     assert((TypeSize::ScalarTy)SrcOps.size() *
1221                    SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1222                DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1223            "input vectors do not exactly cover the output vector register");
1224     break;
1225   }
1226   case TargetOpcode::G_UADDE: {
1227     assert(DstOps.size() == 2 && "Invalid no of dst operands");
1228     assert(SrcOps.size() == 3 && "Invalid no of src operands");
1229     assert(DstOps[0].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1230     assert((DstOps[0].getLLTTy(*getMRI()) == SrcOps[0].getLLTTy(*getMRI())) &&
1231            (DstOps[0].getLLTTy(*getMRI()) == SrcOps[1].getLLTTy(*getMRI())) &&
1232            "Invalid operand");
1233     assert(DstOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1234     assert(DstOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1235            "type mismatch");
1236     break;
1237   }
1238   }
1239 
1240   auto MIB = buildInstr(Opc);
1241   for (const DstOp &Op : DstOps)
1242     Op.addDefToMIB(*getMRI(), MIB);
1243   for (const SrcOp &Op : SrcOps)
1244     Op.addSrcToMIB(MIB);
1245   if (Flags)
1246     MIB->setFlags(*Flags);
1247   return MIB;
1248 }
1249