1 //===-- llvm/CodeGen/GlobalISel/MachineIRBuilder.cpp - MIBuilder--*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the MachineIRBuidler class.
10 //===----------------------------------------------------------------------===//
11 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
12 #include "llvm/Analysis/MemoryLocation.h"
13 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
14 #include "llvm/CodeGen/MachineFunction.h"
15 #include "llvm/CodeGen/MachineInstr.h"
16 #include "llvm/CodeGen/MachineInstrBuilder.h"
17 #include "llvm/CodeGen/MachineRegisterInfo.h"
18 #include "llvm/CodeGen/TargetInstrInfo.h"
19 #include "llvm/CodeGen/TargetLowering.h"
20 #include "llvm/CodeGen/TargetOpcodes.h"
21 #include "llvm/CodeGen/TargetSubtargetInfo.h"
22 #include "llvm/IR/DebugInfo.h"
23 
24 using namespace llvm;
25 
setMF(MachineFunction & MF)26 void MachineIRBuilder::setMF(MachineFunction &MF) {
27   State.MF = &MF;
28   State.MBB = nullptr;
29   State.MRI = &MF.getRegInfo();
30   State.TII = MF.getSubtarget().getInstrInfo();
31   State.DL = DebugLoc();
32   State.II = MachineBasicBlock::iterator();
33   State.Observer = nullptr;
34 }
35 
36 //------------------------------------------------------------------------------
37 // Build instruction variants.
38 //------------------------------------------------------------------------------
39 
buildInstrNoInsert(unsigned Opcode)40 MachineInstrBuilder MachineIRBuilder::buildInstrNoInsert(unsigned Opcode) {
41   MachineInstrBuilder MIB = BuildMI(getMF(), getDL(), getTII().get(Opcode));
42   return MIB;
43 }
44 
insertInstr(MachineInstrBuilder MIB)45 MachineInstrBuilder MachineIRBuilder::insertInstr(MachineInstrBuilder MIB) {
46   getMBB().insert(getInsertPt(), MIB);
47   recordInsertion(MIB);
48   return MIB;
49 }
50 
51 MachineInstrBuilder
buildDirectDbgValue(Register Reg,const MDNode * Variable,const MDNode * Expr)52 MachineIRBuilder::buildDirectDbgValue(Register Reg, const MDNode *Variable,
53                                       const MDNode *Expr) {
54   assert(isa<DILocalVariable>(Variable) && "not a variable");
55   assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
56   assert(
57       cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
58       "Expected inlined-at fields to agree");
59   return insertInstr(BuildMI(getMF(), getDL(),
60                              getTII().get(TargetOpcode::DBG_VALUE),
61                              /*IsIndirect*/ false, Reg, Variable, Expr));
62 }
63 
64 MachineInstrBuilder
buildIndirectDbgValue(Register Reg,const MDNode * Variable,const MDNode * Expr)65 MachineIRBuilder::buildIndirectDbgValue(Register Reg, const MDNode *Variable,
66                                         const MDNode *Expr) {
67   assert(isa<DILocalVariable>(Variable) && "not a variable");
68   assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
69   assert(
70       cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
71       "Expected inlined-at fields to agree");
72   return insertInstr(BuildMI(getMF(), getDL(),
73                              getTII().get(TargetOpcode::DBG_VALUE),
74                              /*IsIndirect*/ true, Reg, Variable, Expr));
75 }
76 
buildFIDbgValue(int FI,const MDNode * Variable,const MDNode * Expr)77 MachineInstrBuilder MachineIRBuilder::buildFIDbgValue(int FI,
78                                                       const MDNode *Variable,
79                                                       const MDNode *Expr) {
80   assert(isa<DILocalVariable>(Variable) && "not a variable");
81   assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
82   assert(
83       cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
84       "Expected inlined-at fields to agree");
85   return buildInstr(TargetOpcode::DBG_VALUE)
86       .addFrameIndex(FI)
87       .addImm(0)
88       .addMetadata(Variable)
89       .addMetadata(Expr);
90 }
91 
buildConstDbgValue(const Constant & C,const MDNode * Variable,const MDNode * Expr)92 MachineInstrBuilder MachineIRBuilder::buildConstDbgValue(const Constant &C,
93                                                          const MDNode *Variable,
94                                                          const MDNode *Expr) {
95   assert(isa<DILocalVariable>(Variable) && "not a variable");
96   assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
97   assert(
98       cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
99       "Expected inlined-at fields to agree");
100   auto MIB = buildInstrNoInsert(TargetOpcode::DBG_VALUE);
101   if (auto *CI = dyn_cast<ConstantInt>(&C)) {
102     if (CI->getBitWidth() > 64)
103       MIB.addCImm(CI);
104     else
105       MIB.addImm(CI->getZExtValue());
106   } else if (auto *CFP = dyn_cast<ConstantFP>(&C)) {
107     MIB.addFPImm(CFP);
108   } else {
109     // Insert $noreg if we didn't find a usable constant and had to drop it.
110     MIB.addReg(Register());
111   }
112 
113   MIB.addImm(0).addMetadata(Variable).addMetadata(Expr);
114   return insertInstr(MIB);
115 }
116 
buildDbgLabel(const MDNode * Label)117 MachineInstrBuilder MachineIRBuilder::buildDbgLabel(const MDNode *Label) {
118   assert(isa<DILabel>(Label) && "not a label");
119   assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(State.DL) &&
120          "Expected inlined-at fields to agree");
121   auto MIB = buildInstr(TargetOpcode::DBG_LABEL);
122 
123   return MIB.addMetadata(Label);
124 }
125 
buildDynStackAlloc(const DstOp & Res,const SrcOp & Size,Align Alignment)126 MachineInstrBuilder MachineIRBuilder::buildDynStackAlloc(const DstOp &Res,
127                                                          const SrcOp &Size,
128                                                          Align Alignment) {
129   assert(Res.getLLTTy(*getMRI()).isPointer() && "expected ptr dst type");
130   auto MIB = buildInstr(TargetOpcode::G_DYN_STACKALLOC);
131   Res.addDefToMIB(*getMRI(), MIB);
132   Size.addSrcToMIB(MIB);
133   MIB.addImm(Alignment.value());
134   return MIB;
135 }
136 
buildFrameIndex(const DstOp & Res,int Idx)137 MachineInstrBuilder MachineIRBuilder::buildFrameIndex(const DstOp &Res,
138                                                       int Idx) {
139   assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
140   auto MIB = buildInstr(TargetOpcode::G_FRAME_INDEX);
141   Res.addDefToMIB(*getMRI(), MIB);
142   MIB.addFrameIndex(Idx);
143   return MIB;
144 }
145 
buildGlobalValue(const DstOp & Res,const GlobalValue * GV)146 MachineInstrBuilder MachineIRBuilder::buildGlobalValue(const DstOp &Res,
147                                                        const GlobalValue *GV) {
148   assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
149   assert(Res.getLLTTy(*getMRI()).getAddressSpace() ==
150              GV->getType()->getAddressSpace() &&
151          "address space mismatch");
152 
153   auto MIB = buildInstr(TargetOpcode::G_GLOBAL_VALUE);
154   Res.addDefToMIB(*getMRI(), MIB);
155   MIB.addGlobalAddress(GV);
156   return MIB;
157 }
158 
buildJumpTable(const LLT PtrTy,unsigned JTI)159 MachineInstrBuilder MachineIRBuilder::buildJumpTable(const LLT PtrTy,
160                                                      unsigned JTI) {
161   return buildInstr(TargetOpcode::G_JUMP_TABLE, {PtrTy}, {})
162       .addJumpTableIndex(JTI);
163 }
164 
validateUnaryOp(const LLT Res,const LLT Op0)165 void MachineIRBuilder::validateUnaryOp(const LLT Res, const LLT Op0) {
166   assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
167   assert((Res == Op0) && "type mismatch");
168 }
169 
validateBinaryOp(const LLT Res,const LLT Op0,const LLT Op1)170 void MachineIRBuilder::validateBinaryOp(const LLT Res, const LLT Op0,
171                                         const LLT Op1) {
172   assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
173   assert((Res == Op0 && Res == Op1) && "type mismatch");
174 }
175 
validateShiftOp(const LLT Res,const LLT Op0,const LLT Op1)176 void MachineIRBuilder::validateShiftOp(const LLT Res, const LLT Op0,
177                                        const LLT Op1) {
178   assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
179   assert((Res == Op0) && "type mismatch");
180 }
181 
buildPtrAdd(const DstOp & Res,const SrcOp & Op0,const SrcOp & Op1)182 MachineInstrBuilder MachineIRBuilder::buildPtrAdd(const DstOp &Res,
183                                                   const SrcOp &Op0,
184                                                   const SrcOp &Op1) {
185   assert(Res.getLLTTy(*getMRI()).getScalarType().isPointer() &&
186          Res.getLLTTy(*getMRI()) == Op0.getLLTTy(*getMRI()) && "type mismatch");
187   assert(Op1.getLLTTy(*getMRI()).getScalarType().isScalar() && "invalid offset type");
188 
189   return buildInstr(TargetOpcode::G_PTR_ADD, {Res}, {Op0, Op1});
190 }
191 
192 Optional<MachineInstrBuilder>
materializePtrAdd(Register & Res,Register Op0,const LLT ValueTy,uint64_t Value)193 MachineIRBuilder::materializePtrAdd(Register &Res, Register Op0,
194                                     const LLT ValueTy, uint64_t Value) {
195   assert(Res == 0 && "Res is a result argument");
196   assert(ValueTy.isScalar()  && "invalid offset type");
197 
198   if (Value == 0) {
199     Res = Op0;
200     return None;
201   }
202 
203   Res = getMRI()->createGenericVirtualRegister(getMRI()->getType(Op0));
204   auto Cst = buildConstant(ValueTy, Value);
205   return buildPtrAdd(Res, Op0, Cst.getReg(0));
206 }
207 
buildMaskLowPtrBits(const DstOp & Res,const SrcOp & Op0,uint32_t NumBits)208 MachineInstrBuilder MachineIRBuilder::buildMaskLowPtrBits(const DstOp &Res,
209                                                           const SrcOp &Op0,
210                                                           uint32_t NumBits) {
211   LLT PtrTy = Res.getLLTTy(*getMRI());
212   LLT MaskTy = LLT::scalar(PtrTy.getSizeInBits());
213   Register MaskReg = getMRI()->createGenericVirtualRegister(MaskTy);
214   buildConstant(MaskReg, maskTrailingZeros<uint64_t>(NumBits));
215   return buildPtrMask(Res, Op0, MaskReg);
216 }
217 
buildBr(MachineBasicBlock & Dest)218 MachineInstrBuilder MachineIRBuilder::buildBr(MachineBasicBlock &Dest) {
219   return buildInstr(TargetOpcode::G_BR).addMBB(&Dest);
220 }
221 
buildBrIndirect(Register Tgt)222 MachineInstrBuilder MachineIRBuilder::buildBrIndirect(Register Tgt) {
223   assert(getMRI()->getType(Tgt).isPointer() && "invalid branch destination");
224   return buildInstr(TargetOpcode::G_BRINDIRECT).addUse(Tgt);
225 }
226 
buildBrJT(Register TablePtr,unsigned JTI,Register IndexReg)227 MachineInstrBuilder MachineIRBuilder::buildBrJT(Register TablePtr,
228                                                 unsigned JTI,
229                                                 Register IndexReg) {
230   assert(getMRI()->getType(TablePtr).isPointer() &&
231          "Table reg must be a pointer");
232   return buildInstr(TargetOpcode::G_BRJT)
233       .addUse(TablePtr)
234       .addJumpTableIndex(JTI)
235       .addUse(IndexReg);
236 }
237 
buildCopy(const DstOp & Res,const SrcOp & Op)238 MachineInstrBuilder MachineIRBuilder::buildCopy(const DstOp &Res,
239                                                 const SrcOp &Op) {
240   return buildInstr(TargetOpcode::COPY, Res, Op);
241 }
242 
buildAssertSExt(const DstOp & Res,const SrcOp & Op,unsigned Size)243 MachineInstrBuilder MachineIRBuilder::buildAssertSExt(const DstOp &Res,
244                                                       const SrcOp &Op,
245                                                       unsigned Size) {
246   return buildInstr(TargetOpcode::G_ASSERT_SEXT, Res, Op).addImm(Size);
247 }
248 
buildAssertZExt(const DstOp & Res,const SrcOp & Op,unsigned Size)249 MachineInstrBuilder MachineIRBuilder::buildAssertZExt(const DstOp &Res,
250                                                       const SrcOp &Op,
251                                                       unsigned Size) {
252   return buildInstr(TargetOpcode::G_ASSERT_ZEXT, Res, Op).addImm(Size);
253 }
254 
buildConstant(const DstOp & Res,const ConstantInt & Val)255 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
256                                                     const ConstantInt &Val) {
257   LLT Ty = Res.getLLTTy(*getMRI());
258   LLT EltTy = Ty.getScalarType();
259   assert(EltTy.getScalarSizeInBits() == Val.getBitWidth() &&
260          "creating constant with the wrong size");
261 
262   if (Ty.isVector()) {
263     auto Const = buildInstr(TargetOpcode::G_CONSTANT)
264     .addDef(getMRI()->createGenericVirtualRegister(EltTy))
265     .addCImm(&Val);
266     return buildSplatVector(Res, Const);
267   }
268 
269   auto Const = buildInstr(TargetOpcode::G_CONSTANT);
270   Const->setDebugLoc(DebugLoc());
271   Res.addDefToMIB(*getMRI(), Const);
272   Const.addCImm(&Val);
273   return Const;
274 }
275 
buildConstant(const DstOp & Res,int64_t Val)276 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
277                                                     int64_t Val) {
278   auto IntN = IntegerType::get(getMF().getFunction().getContext(),
279                                Res.getLLTTy(*getMRI()).getScalarSizeInBits());
280   ConstantInt *CI = ConstantInt::get(IntN, Val, true);
281   return buildConstant(Res, *CI);
282 }
283 
buildFConstant(const DstOp & Res,const ConstantFP & Val)284 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
285                                                      const ConstantFP &Val) {
286   LLT Ty = Res.getLLTTy(*getMRI());
287   LLT EltTy = Ty.getScalarType();
288 
289   assert(APFloat::getSizeInBits(Val.getValueAPF().getSemantics())
290          == EltTy.getSizeInBits() &&
291          "creating fconstant with the wrong size");
292 
293   assert(!Ty.isPointer() && "invalid operand type");
294 
295   if (Ty.isVector()) {
296     auto Const = buildInstr(TargetOpcode::G_FCONSTANT)
297     .addDef(getMRI()->createGenericVirtualRegister(EltTy))
298     .addFPImm(&Val);
299 
300     return buildSplatVector(Res, Const);
301   }
302 
303   auto Const = buildInstr(TargetOpcode::G_FCONSTANT);
304   Const->setDebugLoc(DebugLoc());
305   Res.addDefToMIB(*getMRI(), Const);
306   Const.addFPImm(&Val);
307   return Const;
308 }
309 
buildConstant(const DstOp & Res,const APInt & Val)310 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
311                                                     const APInt &Val) {
312   ConstantInt *CI = ConstantInt::get(getMF().getFunction().getContext(), Val);
313   return buildConstant(Res, *CI);
314 }
315 
buildFConstant(const DstOp & Res,double Val)316 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
317                                                      double Val) {
318   LLT DstTy = Res.getLLTTy(*getMRI());
319   auto &Ctx = getMF().getFunction().getContext();
320   auto *CFP =
321       ConstantFP::get(Ctx, getAPFloatFromSize(Val, DstTy.getScalarSizeInBits()));
322   return buildFConstant(Res, *CFP);
323 }
324 
buildFConstant(const DstOp & Res,const APFloat & Val)325 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
326                                                      const APFloat &Val) {
327   auto &Ctx = getMF().getFunction().getContext();
328   auto *CFP = ConstantFP::get(Ctx, Val);
329   return buildFConstant(Res, *CFP);
330 }
331 
buildBrCond(const SrcOp & Tst,MachineBasicBlock & Dest)332 MachineInstrBuilder MachineIRBuilder::buildBrCond(const SrcOp &Tst,
333                                                   MachineBasicBlock &Dest) {
334   assert(Tst.getLLTTy(*getMRI()).isScalar() && "invalid operand type");
335 
336   auto MIB = buildInstr(TargetOpcode::G_BRCOND);
337   Tst.addSrcToMIB(MIB);
338   MIB.addMBB(&Dest);
339   return MIB;
340 }
341 
342 MachineInstrBuilder
buildLoad(const DstOp & Dst,const SrcOp & Addr,MachinePointerInfo PtrInfo,Align Alignment,MachineMemOperand::Flags MMOFlags,const AAMDNodes & AAInfo)343 MachineIRBuilder::buildLoad(const DstOp &Dst, const SrcOp &Addr,
344                             MachinePointerInfo PtrInfo, Align Alignment,
345                             MachineMemOperand::Flags MMOFlags,
346                             const AAMDNodes &AAInfo) {
347   MMOFlags |= MachineMemOperand::MOLoad;
348   assert((MMOFlags & MachineMemOperand::MOStore) == 0);
349 
350   uint64_t Size = MemoryLocation::getSizeOrUnknown(
351       TypeSize::Fixed(Dst.getLLTTy(*getMRI()).getSizeInBytes()));
352   MachineMemOperand *MMO =
353       getMF().getMachineMemOperand(PtrInfo, MMOFlags, Size, Alignment, AAInfo);
354   return buildLoad(Dst, Addr, *MMO);
355 }
356 
buildLoadInstr(unsigned Opcode,const DstOp & Res,const SrcOp & Addr,MachineMemOperand & MMO)357 MachineInstrBuilder MachineIRBuilder::buildLoadInstr(unsigned Opcode,
358                                                      const DstOp &Res,
359                                                      const SrcOp &Addr,
360                                                      MachineMemOperand &MMO) {
361   assert(Res.getLLTTy(*getMRI()).isValid() && "invalid operand type");
362   assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
363 
364   auto MIB = buildInstr(Opcode);
365   Res.addDefToMIB(*getMRI(), MIB);
366   Addr.addSrcToMIB(MIB);
367   MIB.addMemOperand(&MMO);
368   return MIB;
369 }
370 
buildLoadFromOffset(const DstOp & Dst,const SrcOp & BasePtr,MachineMemOperand & BaseMMO,int64_t Offset)371 MachineInstrBuilder MachineIRBuilder::buildLoadFromOffset(
372   const DstOp &Dst, const SrcOp &BasePtr,
373   MachineMemOperand &BaseMMO, int64_t Offset) {
374   LLT LoadTy = Dst.getLLTTy(*getMRI());
375   MachineMemOperand *OffsetMMO =
376     getMF().getMachineMemOperand(&BaseMMO, Offset, LoadTy.getSizeInBytes());
377 
378   if (Offset == 0) // This may be a size or type changing load.
379     return buildLoad(Dst, BasePtr, *OffsetMMO);
380 
381   LLT PtrTy = BasePtr.getLLTTy(*getMRI());
382   LLT OffsetTy = LLT::scalar(PtrTy.getSizeInBits());
383   auto ConstOffset = buildConstant(OffsetTy, Offset);
384   auto Ptr = buildPtrAdd(PtrTy, BasePtr, ConstOffset);
385   return buildLoad(Dst, Ptr, *OffsetMMO);
386 }
387 
buildStore(const SrcOp & Val,const SrcOp & Addr,MachineMemOperand & MMO)388 MachineInstrBuilder MachineIRBuilder::buildStore(const SrcOp &Val,
389                                                  const SrcOp &Addr,
390                                                  MachineMemOperand &MMO) {
391   assert(Val.getLLTTy(*getMRI()).isValid() && "invalid operand type");
392   assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
393 
394   auto MIB = buildInstr(TargetOpcode::G_STORE);
395   Val.addSrcToMIB(MIB);
396   Addr.addSrcToMIB(MIB);
397   MIB.addMemOperand(&MMO);
398   return MIB;
399 }
400 
401 MachineInstrBuilder
buildStore(const SrcOp & Val,const SrcOp & Addr,MachinePointerInfo PtrInfo,Align Alignment,MachineMemOperand::Flags MMOFlags,const AAMDNodes & AAInfo)402 MachineIRBuilder::buildStore(const SrcOp &Val, const SrcOp &Addr,
403                              MachinePointerInfo PtrInfo, Align Alignment,
404                              MachineMemOperand::Flags MMOFlags,
405                              const AAMDNodes &AAInfo) {
406   MMOFlags |= MachineMemOperand::MOStore;
407   assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
408 
409   uint64_t Size = MemoryLocation::getSizeOrUnknown(
410       TypeSize::Fixed(Val.getLLTTy(*getMRI()).getSizeInBytes()));
411   MachineMemOperand *MMO =
412       getMF().getMachineMemOperand(PtrInfo, MMOFlags, Size, Alignment, AAInfo);
413   return buildStore(Val, Addr, *MMO);
414 }
415 
buildAnyExt(const DstOp & Res,const SrcOp & Op)416 MachineInstrBuilder MachineIRBuilder::buildAnyExt(const DstOp &Res,
417                                                   const SrcOp &Op) {
418   return buildInstr(TargetOpcode::G_ANYEXT, Res, Op);
419 }
420 
buildSExt(const DstOp & Res,const SrcOp & Op)421 MachineInstrBuilder MachineIRBuilder::buildSExt(const DstOp &Res,
422                                                 const SrcOp &Op) {
423   return buildInstr(TargetOpcode::G_SEXT, Res, Op);
424 }
425 
buildZExt(const DstOp & Res,const SrcOp & Op)426 MachineInstrBuilder MachineIRBuilder::buildZExt(const DstOp &Res,
427                                                 const SrcOp &Op) {
428   return buildInstr(TargetOpcode::G_ZEXT, Res, Op);
429 }
430 
getBoolExtOp(bool IsVec,bool IsFP) const431 unsigned MachineIRBuilder::getBoolExtOp(bool IsVec, bool IsFP) const {
432   const auto *TLI = getMF().getSubtarget().getTargetLowering();
433   switch (TLI->getBooleanContents(IsVec, IsFP)) {
434   case TargetLoweringBase::ZeroOrNegativeOneBooleanContent:
435     return TargetOpcode::G_SEXT;
436   case TargetLoweringBase::ZeroOrOneBooleanContent:
437     return TargetOpcode::G_ZEXT;
438   default:
439     return TargetOpcode::G_ANYEXT;
440   }
441 }
442 
buildBoolExt(const DstOp & Res,const SrcOp & Op,bool IsFP)443 MachineInstrBuilder MachineIRBuilder::buildBoolExt(const DstOp &Res,
444                                                    const SrcOp &Op,
445                                                    bool IsFP) {
446   unsigned ExtOp = getBoolExtOp(getMRI()->getType(Op.getReg()).isVector(), IsFP);
447   return buildInstr(ExtOp, Res, Op);
448 }
449 
buildExtOrTrunc(unsigned ExtOpc,const DstOp & Res,const SrcOp & Op)450 MachineInstrBuilder MachineIRBuilder::buildExtOrTrunc(unsigned ExtOpc,
451                                                       const DstOp &Res,
452                                                       const SrcOp &Op) {
453   assert((TargetOpcode::G_ANYEXT == ExtOpc || TargetOpcode::G_ZEXT == ExtOpc ||
454           TargetOpcode::G_SEXT == ExtOpc) &&
455          "Expecting Extending Opc");
456   assert(Res.getLLTTy(*getMRI()).isScalar() ||
457          Res.getLLTTy(*getMRI()).isVector());
458   assert(Res.getLLTTy(*getMRI()).isScalar() ==
459          Op.getLLTTy(*getMRI()).isScalar());
460 
461   unsigned Opcode = TargetOpcode::COPY;
462   if (Res.getLLTTy(*getMRI()).getSizeInBits() >
463       Op.getLLTTy(*getMRI()).getSizeInBits())
464     Opcode = ExtOpc;
465   else if (Res.getLLTTy(*getMRI()).getSizeInBits() <
466            Op.getLLTTy(*getMRI()).getSizeInBits())
467     Opcode = TargetOpcode::G_TRUNC;
468   else
469     assert(Res.getLLTTy(*getMRI()) == Op.getLLTTy(*getMRI()));
470 
471   return buildInstr(Opcode, Res, Op);
472 }
473 
buildSExtOrTrunc(const DstOp & Res,const SrcOp & Op)474 MachineInstrBuilder MachineIRBuilder::buildSExtOrTrunc(const DstOp &Res,
475                                                        const SrcOp &Op) {
476   return buildExtOrTrunc(TargetOpcode::G_SEXT, Res, Op);
477 }
478 
buildZExtOrTrunc(const DstOp & Res,const SrcOp & Op)479 MachineInstrBuilder MachineIRBuilder::buildZExtOrTrunc(const DstOp &Res,
480                                                        const SrcOp &Op) {
481   return buildExtOrTrunc(TargetOpcode::G_ZEXT, Res, Op);
482 }
483 
buildAnyExtOrTrunc(const DstOp & Res,const SrcOp & Op)484 MachineInstrBuilder MachineIRBuilder::buildAnyExtOrTrunc(const DstOp &Res,
485                                                          const SrcOp &Op) {
486   return buildExtOrTrunc(TargetOpcode::G_ANYEXT, Res, Op);
487 }
488 
buildZExtInReg(const DstOp & Res,const SrcOp & Op,int64_t ImmOp)489 MachineInstrBuilder MachineIRBuilder::buildZExtInReg(const DstOp &Res,
490                                                      const SrcOp &Op,
491                                                      int64_t ImmOp) {
492   LLT ResTy = Res.getLLTTy(*getMRI());
493   auto Mask = buildConstant(
494       ResTy, APInt::getLowBitsSet(ResTy.getScalarSizeInBits(), ImmOp));
495   return buildAnd(Res, Op, Mask);
496 }
497 
buildCast(const DstOp & Dst,const SrcOp & Src)498 MachineInstrBuilder MachineIRBuilder::buildCast(const DstOp &Dst,
499                                                 const SrcOp &Src) {
500   LLT SrcTy = Src.getLLTTy(*getMRI());
501   LLT DstTy = Dst.getLLTTy(*getMRI());
502   if (SrcTy == DstTy)
503     return buildCopy(Dst, Src);
504 
505   unsigned Opcode;
506   if (SrcTy.isPointer() && DstTy.isScalar())
507     Opcode = TargetOpcode::G_PTRTOINT;
508   else if (DstTy.isPointer() && SrcTy.isScalar())
509     Opcode = TargetOpcode::G_INTTOPTR;
510   else {
511     assert(!SrcTy.isPointer() && !DstTy.isPointer() && "n G_ADDRCAST yet");
512     Opcode = TargetOpcode::G_BITCAST;
513   }
514 
515   return buildInstr(Opcode, Dst, Src);
516 }
517 
buildExtract(const DstOp & Dst,const SrcOp & Src,uint64_t Index)518 MachineInstrBuilder MachineIRBuilder::buildExtract(const DstOp &Dst,
519                                                    const SrcOp &Src,
520                                                    uint64_t Index) {
521   LLT SrcTy = Src.getLLTTy(*getMRI());
522   LLT DstTy = Dst.getLLTTy(*getMRI());
523 
524 #ifndef NDEBUG
525   assert(SrcTy.isValid() && "invalid operand type");
526   assert(DstTy.isValid() && "invalid operand type");
527   assert(Index + DstTy.getSizeInBits() <= SrcTy.getSizeInBits() &&
528          "extracting off end of register");
529 #endif
530 
531   if (DstTy.getSizeInBits() == SrcTy.getSizeInBits()) {
532     assert(Index == 0 && "insertion past the end of a register");
533     return buildCast(Dst, Src);
534   }
535 
536   auto Extract = buildInstr(TargetOpcode::G_EXTRACT);
537   Dst.addDefToMIB(*getMRI(), Extract);
538   Src.addSrcToMIB(Extract);
539   Extract.addImm(Index);
540   return Extract;
541 }
542 
buildSequence(Register Res,ArrayRef<Register> Ops,ArrayRef<uint64_t> Indices)543 void MachineIRBuilder::buildSequence(Register Res, ArrayRef<Register> Ops,
544                                      ArrayRef<uint64_t> Indices) {
545 #ifndef NDEBUG
546   assert(Ops.size() == Indices.size() && "incompatible args");
547   assert(!Ops.empty() && "invalid trivial sequence");
548   assert(llvm::is_sorted(Indices) &&
549          "sequence offsets must be in ascending order");
550 
551   assert(getMRI()->getType(Res).isValid() && "invalid operand type");
552   for (auto Op : Ops)
553     assert(getMRI()->getType(Op).isValid() && "invalid operand type");
554 #endif
555 
556   LLT ResTy = getMRI()->getType(Res);
557   LLT OpTy = getMRI()->getType(Ops[0]);
558   unsigned OpSize = OpTy.getSizeInBits();
559   bool MaybeMerge = true;
560   for (unsigned i = 0; i < Ops.size(); ++i) {
561     if (getMRI()->getType(Ops[i]) != OpTy || Indices[i] != i * OpSize) {
562       MaybeMerge = false;
563       break;
564     }
565   }
566 
567   if (MaybeMerge && Ops.size() * OpSize == ResTy.getSizeInBits()) {
568     buildMerge(Res, Ops);
569     return;
570   }
571 
572   Register ResIn = getMRI()->createGenericVirtualRegister(ResTy);
573   buildUndef(ResIn);
574 
575   for (unsigned i = 0; i < Ops.size(); ++i) {
576     Register ResOut = i + 1 == Ops.size()
577                           ? Res
578                           : getMRI()->createGenericVirtualRegister(ResTy);
579     buildInsert(ResOut, ResIn, Ops[i], Indices[i]);
580     ResIn = ResOut;
581   }
582 }
583 
buildUndef(const DstOp & Res)584 MachineInstrBuilder MachineIRBuilder::buildUndef(const DstOp &Res) {
585   return buildInstr(TargetOpcode::G_IMPLICIT_DEF, {Res}, {});
586 }
587 
buildMerge(const DstOp & Res,ArrayRef<Register> Ops)588 MachineInstrBuilder MachineIRBuilder::buildMerge(const DstOp &Res,
589                                                  ArrayRef<Register> Ops) {
590   // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
591   // we need some temporary storage for the DstOp objects. Here we use a
592   // sufficiently large SmallVector to not go through the heap.
593   SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
594   assert(TmpVec.size() > 1);
595   return buildInstr(TargetOpcode::G_MERGE_VALUES, Res, TmpVec);
596 }
597 
598 MachineInstrBuilder
buildMerge(const DstOp & Res,std::initializer_list<SrcOp> Ops)599 MachineIRBuilder::buildMerge(const DstOp &Res,
600                              std::initializer_list<SrcOp> Ops) {
601   assert(Ops.size() > 1);
602   return buildInstr(TargetOpcode::G_MERGE_VALUES, Res, Ops);
603 }
604 
buildUnmerge(ArrayRef<LLT> Res,const SrcOp & Op)605 MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<LLT> Res,
606                                                    const SrcOp &Op) {
607   // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<DstOp>,
608   // we need some temporary storage for the DstOp objects. Here we use a
609   // sufficiently large SmallVector to not go through the heap.
610   SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end());
611   assert(TmpVec.size() > 1);
612   return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
613 }
614 
buildUnmerge(LLT Res,const SrcOp & Op)615 MachineInstrBuilder MachineIRBuilder::buildUnmerge(LLT Res,
616                                                    const SrcOp &Op) {
617   unsigned NumReg = Op.getLLTTy(*getMRI()).getSizeInBits() / Res.getSizeInBits();
618   SmallVector<Register, 8> TmpVec;
619   for (unsigned I = 0; I != NumReg; ++I)
620     TmpVec.push_back(getMRI()->createGenericVirtualRegister(Res));
621   return buildUnmerge(TmpVec, Op);
622 }
623 
buildUnmerge(ArrayRef<Register> Res,const SrcOp & Op)624 MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<Register> Res,
625                                                    const SrcOp &Op) {
626   // Unfortunately to convert from ArrayRef<Register> to ArrayRef<DstOp>,
627   // we need some temporary storage for the DstOp objects. Here we use a
628   // sufficiently large SmallVector to not go through the heap.
629   SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end());
630   assert(TmpVec.size() > 1);
631   return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
632 }
633 
buildBuildVector(const DstOp & Res,ArrayRef<Register> Ops)634 MachineInstrBuilder MachineIRBuilder::buildBuildVector(const DstOp &Res,
635                                                        ArrayRef<Register> Ops) {
636   // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
637   // we need some temporary storage for the DstOp objects. Here we use a
638   // sufficiently large SmallVector to not go through the heap.
639   SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
640   return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
641 }
642 
buildSplatVector(const DstOp & Res,const SrcOp & Src)643 MachineInstrBuilder MachineIRBuilder::buildSplatVector(const DstOp &Res,
644                                                        const SrcOp &Src) {
645   SmallVector<SrcOp, 8> TmpVec(Res.getLLTTy(*getMRI()).getNumElements(), Src);
646   return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
647 }
648 
649 MachineInstrBuilder
buildBuildVectorTrunc(const DstOp & Res,ArrayRef<Register> Ops)650 MachineIRBuilder::buildBuildVectorTrunc(const DstOp &Res,
651                                         ArrayRef<Register> Ops) {
652   // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
653   // we need some temporary storage for the DstOp objects. Here we use a
654   // sufficiently large SmallVector to not go through the heap.
655   SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
656   return buildInstr(TargetOpcode::G_BUILD_VECTOR_TRUNC, Res, TmpVec);
657 }
658 
buildShuffleSplat(const DstOp & Res,const SrcOp & Src)659 MachineInstrBuilder MachineIRBuilder::buildShuffleSplat(const DstOp &Res,
660                                                         const SrcOp &Src) {
661   LLT DstTy = Res.getLLTTy(*getMRI());
662   assert(Src.getLLTTy(*getMRI()) == DstTy.getElementType() &&
663          "Expected Src to match Dst elt ty");
664   auto UndefVec = buildUndef(DstTy);
665   auto Zero = buildConstant(LLT::scalar(64), 0);
666   auto InsElt = buildInsertVectorElement(DstTy, UndefVec, Src, Zero);
667   SmallVector<int, 16> ZeroMask(DstTy.getNumElements());
668   return buildShuffleVector(DstTy, InsElt, UndefVec, ZeroMask);
669 }
670 
buildShuffleVector(const DstOp & Res,const SrcOp & Src1,const SrcOp & Src2,ArrayRef<int> Mask)671 MachineInstrBuilder MachineIRBuilder::buildShuffleVector(const DstOp &Res,
672                                                          const SrcOp &Src1,
673                                                          const SrcOp &Src2,
674                                                          ArrayRef<int> Mask) {
675   LLT DstTy = Res.getLLTTy(*getMRI());
676   LLT Src1Ty = Src1.getLLTTy(*getMRI());
677   LLT Src2Ty = Src2.getLLTTy(*getMRI());
678   assert(Src1Ty.getNumElements() + Src2Ty.getNumElements() >= Mask.size());
679   assert(DstTy.getElementType() == Src1Ty.getElementType() &&
680          DstTy.getElementType() == Src2Ty.getElementType());
681   (void)Src1Ty;
682   (void)Src2Ty;
683   ArrayRef<int> MaskAlloc = getMF().allocateShuffleMask(Mask);
684   return buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {DstTy}, {Src1, Src2})
685       .addShuffleMask(MaskAlloc);
686 }
687 
688 MachineInstrBuilder
buildConcatVectors(const DstOp & Res,ArrayRef<Register> Ops)689 MachineIRBuilder::buildConcatVectors(const DstOp &Res, ArrayRef<Register> Ops) {
690   // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
691   // we need some temporary storage for the DstOp objects. Here we use a
692   // sufficiently large SmallVector to not go through the heap.
693   SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
694   return buildInstr(TargetOpcode::G_CONCAT_VECTORS, Res, TmpVec);
695 }
696 
buildInsert(const DstOp & Res,const SrcOp & Src,const SrcOp & Op,unsigned Index)697 MachineInstrBuilder MachineIRBuilder::buildInsert(const DstOp &Res,
698                                                   const SrcOp &Src,
699                                                   const SrcOp &Op,
700                                                   unsigned Index) {
701   assert(Index + Op.getLLTTy(*getMRI()).getSizeInBits() <=
702              Res.getLLTTy(*getMRI()).getSizeInBits() &&
703          "insertion past the end of a register");
704 
705   if (Res.getLLTTy(*getMRI()).getSizeInBits() ==
706       Op.getLLTTy(*getMRI()).getSizeInBits()) {
707     return buildCast(Res, Op);
708   }
709 
710   return buildInstr(TargetOpcode::G_INSERT, Res, {Src, Op, uint64_t(Index)});
711 }
712 
buildIntrinsic(Intrinsic::ID ID,ArrayRef<Register> ResultRegs,bool HasSideEffects)713 MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
714                                                      ArrayRef<Register> ResultRegs,
715                                                      bool HasSideEffects) {
716   auto MIB =
717       buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
718                                 : TargetOpcode::G_INTRINSIC);
719   for (unsigned ResultReg : ResultRegs)
720     MIB.addDef(ResultReg);
721   MIB.addIntrinsicID(ID);
722   return MIB;
723 }
724 
buildIntrinsic(Intrinsic::ID ID,ArrayRef<DstOp> Results,bool HasSideEffects)725 MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
726                                                      ArrayRef<DstOp> Results,
727                                                      bool HasSideEffects) {
728   auto MIB =
729       buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
730                                 : TargetOpcode::G_INTRINSIC);
731   for (DstOp Result : Results)
732     Result.addDefToMIB(*getMRI(), MIB);
733   MIB.addIntrinsicID(ID);
734   return MIB;
735 }
736 
buildTrunc(const DstOp & Res,const SrcOp & Op)737 MachineInstrBuilder MachineIRBuilder::buildTrunc(const DstOp &Res,
738                                                  const SrcOp &Op) {
739   return buildInstr(TargetOpcode::G_TRUNC, Res, Op);
740 }
741 
buildFPTrunc(const DstOp & Res,const SrcOp & Op,Optional<unsigned> Flags)742 MachineInstrBuilder MachineIRBuilder::buildFPTrunc(const DstOp &Res,
743                                                    const SrcOp &Op,
744                                                    Optional<unsigned> Flags) {
745   return buildInstr(TargetOpcode::G_FPTRUNC, Res, Op, Flags);
746 }
747 
buildICmp(CmpInst::Predicate Pred,const DstOp & Res,const SrcOp & Op0,const SrcOp & Op1)748 MachineInstrBuilder MachineIRBuilder::buildICmp(CmpInst::Predicate Pred,
749                                                 const DstOp &Res,
750                                                 const SrcOp &Op0,
751                                                 const SrcOp &Op1) {
752   return buildInstr(TargetOpcode::G_ICMP, Res, {Pred, Op0, Op1});
753 }
754 
buildFCmp(CmpInst::Predicate Pred,const DstOp & Res,const SrcOp & Op0,const SrcOp & Op1,Optional<unsigned> Flags)755 MachineInstrBuilder MachineIRBuilder::buildFCmp(CmpInst::Predicate Pred,
756                                                 const DstOp &Res,
757                                                 const SrcOp &Op0,
758                                                 const SrcOp &Op1,
759                                                 Optional<unsigned> Flags) {
760 
761   return buildInstr(TargetOpcode::G_FCMP, Res, {Pred, Op0, Op1}, Flags);
762 }
763 
buildSelect(const DstOp & Res,const SrcOp & Tst,const SrcOp & Op0,const SrcOp & Op1,Optional<unsigned> Flags)764 MachineInstrBuilder MachineIRBuilder::buildSelect(const DstOp &Res,
765                                                   const SrcOp &Tst,
766                                                   const SrcOp &Op0,
767                                                   const SrcOp &Op1,
768                                                   Optional<unsigned> Flags) {
769 
770   return buildInstr(TargetOpcode::G_SELECT, {Res}, {Tst, Op0, Op1}, Flags);
771 }
772 
773 MachineInstrBuilder
buildInsertVectorElement(const DstOp & Res,const SrcOp & Val,const SrcOp & Elt,const SrcOp & Idx)774 MachineIRBuilder::buildInsertVectorElement(const DstOp &Res, const SrcOp &Val,
775                                            const SrcOp &Elt, const SrcOp &Idx) {
776   return buildInstr(TargetOpcode::G_INSERT_VECTOR_ELT, Res, {Val, Elt, Idx});
777 }
778 
779 MachineInstrBuilder
buildExtractVectorElement(const DstOp & Res,const SrcOp & Val,const SrcOp & Idx)780 MachineIRBuilder::buildExtractVectorElement(const DstOp &Res, const SrcOp &Val,
781                                             const SrcOp &Idx) {
782   return buildInstr(TargetOpcode::G_EXTRACT_VECTOR_ELT, Res, {Val, Idx});
783 }
784 
buildAtomicCmpXchgWithSuccess(Register OldValRes,Register SuccessRes,Register Addr,Register CmpVal,Register NewVal,MachineMemOperand & MMO)785 MachineInstrBuilder MachineIRBuilder::buildAtomicCmpXchgWithSuccess(
786     Register OldValRes, Register SuccessRes, Register Addr, Register CmpVal,
787     Register NewVal, MachineMemOperand &MMO) {
788 #ifndef NDEBUG
789   LLT OldValResTy = getMRI()->getType(OldValRes);
790   LLT SuccessResTy = getMRI()->getType(SuccessRes);
791   LLT AddrTy = getMRI()->getType(Addr);
792   LLT CmpValTy = getMRI()->getType(CmpVal);
793   LLT NewValTy = getMRI()->getType(NewVal);
794   assert(OldValResTy.isScalar() && "invalid operand type");
795   assert(SuccessResTy.isScalar() && "invalid operand type");
796   assert(AddrTy.isPointer() && "invalid operand type");
797   assert(CmpValTy.isValid() && "invalid operand type");
798   assert(NewValTy.isValid() && "invalid operand type");
799   assert(OldValResTy == CmpValTy && "type mismatch");
800   assert(OldValResTy == NewValTy && "type mismatch");
801 #endif
802 
803   return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS)
804       .addDef(OldValRes)
805       .addDef(SuccessRes)
806       .addUse(Addr)
807       .addUse(CmpVal)
808       .addUse(NewVal)
809       .addMemOperand(&MMO);
810 }
811 
812 MachineInstrBuilder
buildAtomicCmpXchg(Register OldValRes,Register Addr,Register CmpVal,Register NewVal,MachineMemOperand & MMO)813 MachineIRBuilder::buildAtomicCmpXchg(Register OldValRes, Register Addr,
814                                      Register CmpVal, Register NewVal,
815                                      MachineMemOperand &MMO) {
816 #ifndef NDEBUG
817   LLT OldValResTy = getMRI()->getType(OldValRes);
818   LLT AddrTy = getMRI()->getType(Addr);
819   LLT CmpValTy = getMRI()->getType(CmpVal);
820   LLT NewValTy = getMRI()->getType(NewVal);
821   assert(OldValResTy.isScalar() && "invalid operand type");
822   assert(AddrTy.isPointer() && "invalid operand type");
823   assert(CmpValTy.isValid() && "invalid operand type");
824   assert(NewValTy.isValid() && "invalid operand type");
825   assert(OldValResTy == CmpValTy && "type mismatch");
826   assert(OldValResTy == NewValTy && "type mismatch");
827 #endif
828 
829   return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG)
830       .addDef(OldValRes)
831       .addUse(Addr)
832       .addUse(CmpVal)
833       .addUse(NewVal)
834       .addMemOperand(&MMO);
835 }
836 
buildAtomicRMW(unsigned Opcode,const DstOp & OldValRes,const SrcOp & Addr,const SrcOp & Val,MachineMemOperand & MMO)837 MachineInstrBuilder MachineIRBuilder::buildAtomicRMW(
838   unsigned Opcode, const DstOp &OldValRes,
839   const SrcOp &Addr, const SrcOp &Val,
840   MachineMemOperand &MMO) {
841 
842 #ifndef NDEBUG
843   LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
844   LLT AddrTy = Addr.getLLTTy(*getMRI());
845   LLT ValTy = Val.getLLTTy(*getMRI());
846   assert(OldValResTy.isScalar() && "invalid operand type");
847   assert(AddrTy.isPointer() && "invalid operand type");
848   assert(ValTy.isValid() && "invalid operand type");
849   assert(OldValResTy == ValTy && "type mismatch");
850   assert(MMO.isAtomic() && "not atomic mem operand");
851 #endif
852 
853   auto MIB = buildInstr(Opcode);
854   OldValRes.addDefToMIB(*getMRI(), MIB);
855   Addr.addSrcToMIB(MIB);
856   Val.addSrcToMIB(MIB);
857   MIB.addMemOperand(&MMO);
858   return MIB;
859 }
860 
861 MachineInstrBuilder
buildAtomicRMWXchg(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)862 MachineIRBuilder::buildAtomicRMWXchg(Register OldValRes, Register Addr,
863                                      Register Val, MachineMemOperand &MMO) {
864   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XCHG, OldValRes, Addr, Val,
865                         MMO);
866 }
867 MachineInstrBuilder
buildAtomicRMWAdd(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)868 MachineIRBuilder::buildAtomicRMWAdd(Register OldValRes, Register Addr,
869                                     Register Val, MachineMemOperand &MMO) {
870   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_ADD, OldValRes, Addr, Val,
871                         MMO);
872 }
873 MachineInstrBuilder
buildAtomicRMWSub(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)874 MachineIRBuilder::buildAtomicRMWSub(Register OldValRes, Register Addr,
875                                     Register Val, MachineMemOperand &MMO) {
876   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_SUB, OldValRes, Addr, Val,
877                         MMO);
878 }
879 MachineInstrBuilder
buildAtomicRMWAnd(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)880 MachineIRBuilder::buildAtomicRMWAnd(Register OldValRes, Register Addr,
881                                     Register Val, MachineMemOperand &MMO) {
882   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_AND, OldValRes, Addr, Val,
883                         MMO);
884 }
885 MachineInstrBuilder
buildAtomicRMWNand(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)886 MachineIRBuilder::buildAtomicRMWNand(Register OldValRes, Register Addr,
887                                      Register Val, MachineMemOperand &MMO) {
888   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_NAND, OldValRes, Addr, Val,
889                         MMO);
890 }
buildAtomicRMWOr(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)891 MachineInstrBuilder MachineIRBuilder::buildAtomicRMWOr(Register OldValRes,
892                                                        Register Addr,
893                                                        Register Val,
894                                                        MachineMemOperand &MMO) {
895   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_OR, OldValRes, Addr, Val,
896                         MMO);
897 }
898 MachineInstrBuilder
buildAtomicRMWXor(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)899 MachineIRBuilder::buildAtomicRMWXor(Register OldValRes, Register Addr,
900                                     Register Val, MachineMemOperand &MMO) {
901   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XOR, OldValRes, Addr, Val,
902                         MMO);
903 }
904 MachineInstrBuilder
buildAtomicRMWMax(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)905 MachineIRBuilder::buildAtomicRMWMax(Register OldValRes, Register Addr,
906                                     Register Val, MachineMemOperand &MMO) {
907   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MAX, OldValRes, Addr, Val,
908                         MMO);
909 }
910 MachineInstrBuilder
buildAtomicRMWMin(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)911 MachineIRBuilder::buildAtomicRMWMin(Register OldValRes, Register Addr,
912                                     Register Val, MachineMemOperand &MMO) {
913   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MIN, OldValRes, Addr, Val,
914                         MMO);
915 }
916 MachineInstrBuilder
buildAtomicRMWUmax(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)917 MachineIRBuilder::buildAtomicRMWUmax(Register OldValRes, Register Addr,
918                                      Register Val, MachineMemOperand &MMO) {
919   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMAX, OldValRes, Addr, Val,
920                         MMO);
921 }
922 MachineInstrBuilder
buildAtomicRMWUmin(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)923 MachineIRBuilder::buildAtomicRMWUmin(Register OldValRes, Register Addr,
924                                      Register Val, MachineMemOperand &MMO) {
925   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMIN, OldValRes, Addr, Val,
926                         MMO);
927 }
928 
929 MachineInstrBuilder
buildAtomicRMWFAdd(const DstOp & OldValRes,const SrcOp & Addr,const SrcOp & Val,MachineMemOperand & MMO)930 MachineIRBuilder::buildAtomicRMWFAdd(
931   const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
932   MachineMemOperand &MMO) {
933   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FADD, OldValRes, Addr, Val,
934                         MMO);
935 }
936 
937 MachineInstrBuilder
buildAtomicRMWFSub(const DstOp & OldValRes,const SrcOp & Addr,const SrcOp & Val,MachineMemOperand & MMO)938 MachineIRBuilder::buildAtomicRMWFSub(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
939                                      MachineMemOperand &MMO) {
940   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FSUB, OldValRes, Addr, Val,
941                         MMO);
942 }
943 
944 MachineInstrBuilder
buildFence(unsigned Ordering,unsigned Scope)945 MachineIRBuilder::buildFence(unsigned Ordering, unsigned Scope) {
946   return buildInstr(TargetOpcode::G_FENCE)
947     .addImm(Ordering)
948     .addImm(Scope);
949 }
950 
951 MachineInstrBuilder
buildBlockAddress(Register Res,const BlockAddress * BA)952 MachineIRBuilder::buildBlockAddress(Register Res, const BlockAddress *BA) {
953 #ifndef NDEBUG
954   assert(getMRI()->getType(Res).isPointer() && "invalid res type");
955 #endif
956 
957   return buildInstr(TargetOpcode::G_BLOCK_ADDR).addDef(Res).addBlockAddress(BA);
958 }
959 
validateTruncExt(const LLT DstTy,const LLT SrcTy,bool IsExtend)960 void MachineIRBuilder::validateTruncExt(const LLT DstTy, const LLT SrcTy,
961                                         bool IsExtend) {
962 #ifndef NDEBUG
963   if (DstTy.isVector()) {
964     assert(SrcTy.isVector() && "mismatched cast between vector and non-vector");
965     assert(SrcTy.getNumElements() == DstTy.getNumElements() &&
966            "different number of elements in a trunc/ext");
967   } else
968     assert(DstTy.isScalar() && SrcTy.isScalar() && "invalid extend/trunc");
969 
970   if (IsExtend)
971     assert(DstTy.getSizeInBits() > SrcTy.getSizeInBits() &&
972            "invalid narrowing extend");
973   else
974     assert(DstTy.getSizeInBits() < SrcTy.getSizeInBits() &&
975            "invalid widening trunc");
976 #endif
977 }
978 
validateSelectOp(const LLT ResTy,const LLT TstTy,const LLT Op0Ty,const LLT Op1Ty)979 void MachineIRBuilder::validateSelectOp(const LLT ResTy, const LLT TstTy,
980                                         const LLT Op0Ty, const LLT Op1Ty) {
981 #ifndef NDEBUG
982   assert((ResTy.isScalar() || ResTy.isVector() || ResTy.isPointer()) &&
983          "invalid operand type");
984   assert((ResTy == Op0Ty && ResTy == Op1Ty) && "type mismatch");
985   if (ResTy.isScalar() || ResTy.isPointer())
986     assert(TstTy.isScalar() && "type mismatch");
987   else
988     assert((TstTy.isScalar() ||
989             (TstTy.isVector() &&
990              TstTy.getNumElements() == Op0Ty.getNumElements())) &&
991            "type mismatch");
992 #endif
993 }
994 
buildInstr(unsigned Opc,ArrayRef<DstOp> DstOps,ArrayRef<SrcOp> SrcOps,Optional<unsigned> Flags)995 MachineInstrBuilder MachineIRBuilder::buildInstr(unsigned Opc,
996                                                  ArrayRef<DstOp> DstOps,
997                                                  ArrayRef<SrcOp> SrcOps,
998                                                  Optional<unsigned> Flags) {
999   switch (Opc) {
1000   default:
1001     break;
1002   case TargetOpcode::G_SELECT: {
1003     assert(DstOps.size() == 1 && "Invalid select");
1004     assert(SrcOps.size() == 3 && "Invalid select");
1005     validateSelectOp(
1006         DstOps[0].getLLTTy(*getMRI()), SrcOps[0].getLLTTy(*getMRI()),
1007         SrcOps[1].getLLTTy(*getMRI()), SrcOps[2].getLLTTy(*getMRI()));
1008     break;
1009   }
1010   case TargetOpcode::G_FNEG:
1011   case TargetOpcode::G_ABS:
1012     // All these are unary ops.
1013     assert(DstOps.size() == 1 && "Invalid Dst");
1014     assert(SrcOps.size() == 1 && "Invalid Srcs");
1015     validateUnaryOp(DstOps[0].getLLTTy(*getMRI()),
1016                     SrcOps[0].getLLTTy(*getMRI()));
1017     break;
1018   case TargetOpcode::G_ADD:
1019   case TargetOpcode::G_AND:
1020   case TargetOpcode::G_MUL:
1021   case TargetOpcode::G_OR:
1022   case TargetOpcode::G_SUB:
1023   case TargetOpcode::G_XOR:
1024   case TargetOpcode::G_UDIV:
1025   case TargetOpcode::G_SDIV:
1026   case TargetOpcode::G_UREM:
1027   case TargetOpcode::G_SREM:
1028   case TargetOpcode::G_SMIN:
1029   case TargetOpcode::G_SMAX:
1030   case TargetOpcode::G_UMIN:
1031   case TargetOpcode::G_UMAX:
1032   case TargetOpcode::G_UADDSAT:
1033   case TargetOpcode::G_SADDSAT:
1034   case TargetOpcode::G_USUBSAT:
1035   case TargetOpcode::G_SSUBSAT: {
1036     // All these are binary ops.
1037     assert(DstOps.size() == 1 && "Invalid Dst");
1038     assert(SrcOps.size() == 2 && "Invalid Srcs");
1039     validateBinaryOp(DstOps[0].getLLTTy(*getMRI()),
1040                      SrcOps[0].getLLTTy(*getMRI()),
1041                      SrcOps[1].getLLTTy(*getMRI()));
1042     break;
1043   }
1044   case TargetOpcode::G_SHL:
1045   case TargetOpcode::G_ASHR:
1046   case TargetOpcode::G_LSHR:
1047   case TargetOpcode::G_USHLSAT:
1048   case TargetOpcode::G_SSHLSAT: {
1049     assert(DstOps.size() == 1 && "Invalid Dst");
1050     assert(SrcOps.size() == 2 && "Invalid Srcs");
1051     validateShiftOp(DstOps[0].getLLTTy(*getMRI()),
1052                     SrcOps[0].getLLTTy(*getMRI()),
1053                     SrcOps[1].getLLTTy(*getMRI()));
1054     break;
1055   }
1056   case TargetOpcode::G_SEXT:
1057   case TargetOpcode::G_ZEXT:
1058   case TargetOpcode::G_ANYEXT:
1059     assert(DstOps.size() == 1 && "Invalid Dst");
1060     assert(SrcOps.size() == 1 && "Invalid Srcs");
1061     validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
1062                      SrcOps[0].getLLTTy(*getMRI()), true);
1063     break;
1064   case TargetOpcode::G_TRUNC:
1065   case TargetOpcode::G_FPTRUNC: {
1066     assert(DstOps.size() == 1 && "Invalid Dst");
1067     assert(SrcOps.size() == 1 && "Invalid Srcs");
1068     validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
1069                      SrcOps[0].getLLTTy(*getMRI()), false);
1070     break;
1071   }
1072   case TargetOpcode::G_BITCAST: {
1073     assert(DstOps.size() == 1 && "Invalid Dst");
1074     assert(SrcOps.size() == 1 && "Invalid Srcs");
1075     assert(DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1076            SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() && "invalid bitcast");
1077     break;
1078   }
1079   case TargetOpcode::COPY:
1080     assert(DstOps.size() == 1 && "Invalid Dst");
1081     // If the caller wants to add a subreg source it has to be done separately
1082     // so we may not have any SrcOps at this point yet.
1083     break;
1084   case TargetOpcode::G_FCMP:
1085   case TargetOpcode::G_ICMP: {
1086     assert(DstOps.size() == 1 && "Invalid Dst Operands");
1087     assert(SrcOps.size() == 3 && "Invalid Src Operands");
1088     // For F/ICMP, the first src operand is the predicate, followed by
1089     // the two comparands.
1090     assert(SrcOps[0].getSrcOpKind() == SrcOp::SrcType::Ty_Predicate &&
1091            "Expecting predicate");
1092     assert([&]() -> bool {
1093       CmpInst::Predicate Pred = SrcOps[0].getPredicate();
1094       return Opc == TargetOpcode::G_ICMP ? CmpInst::isIntPredicate(Pred)
1095                                          : CmpInst::isFPPredicate(Pred);
1096     }() && "Invalid predicate");
1097     assert(SrcOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1098            "Type mismatch");
1099     assert([&]() -> bool {
1100       LLT Op0Ty = SrcOps[1].getLLTTy(*getMRI());
1101       LLT DstTy = DstOps[0].getLLTTy(*getMRI());
1102       if (Op0Ty.isScalar() || Op0Ty.isPointer())
1103         return DstTy.isScalar();
1104       else
1105         return DstTy.isVector() &&
1106                DstTy.getNumElements() == Op0Ty.getNumElements();
1107     }() && "Type Mismatch");
1108     break;
1109   }
1110   case TargetOpcode::G_UNMERGE_VALUES: {
1111     assert(!DstOps.empty() && "Invalid trivial sequence");
1112     assert(SrcOps.size() == 1 && "Invalid src for Unmerge");
1113     assert(llvm::all_of(DstOps,
1114                         [&, this](const DstOp &Op) {
1115                           return Op.getLLTTy(*getMRI()) ==
1116                                  DstOps[0].getLLTTy(*getMRI());
1117                         }) &&
1118            "type mismatch in output list");
1119     assert(DstOps.size() * DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1120                SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1121            "input operands do not cover output register");
1122     break;
1123   }
1124   case TargetOpcode::G_MERGE_VALUES: {
1125     assert(!SrcOps.empty() && "invalid trivial sequence");
1126     assert(DstOps.size() == 1 && "Invalid Dst");
1127     assert(llvm::all_of(SrcOps,
1128                         [&, this](const SrcOp &Op) {
1129                           return Op.getLLTTy(*getMRI()) ==
1130                                  SrcOps[0].getLLTTy(*getMRI());
1131                         }) &&
1132            "type mismatch in input list");
1133     assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1134                DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1135            "input operands do not cover output register");
1136     if (SrcOps.size() == 1)
1137       return buildCast(DstOps[0], SrcOps[0]);
1138     if (DstOps[0].getLLTTy(*getMRI()).isVector()) {
1139       if (SrcOps[0].getLLTTy(*getMRI()).isVector())
1140         return buildInstr(TargetOpcode::G_CONCAT_VECTORS, DstOps, SrcOps);
1141       return buildInstr(TargetOpcode::G_BUILD_VECTOR, DstOps, SrcOps);
1142     }
1143     break;
1144   }
1145   case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1146     assert(DstOps.size() == 1 && "Invalid Dst size");
1147     assert(SrcOps.size() == 2 && "Invalid Src size");
1148     assert(SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1149     assert((DstOps[0].getLLTTy(*getMRI()).isScalar() ||
1150             DstOps[0].getLLTTy(*getMRI()).isPointer()) &&
1151            "Invalid operand type");
1152     assert(SrcOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand type");
1153     assert(SrcOps[0].getLLTTy(*getMRI()).getElementType() ==
1154                DstOps[0].getLLTTy(*getMRI()) &&
1155            "Type mismatch");
1156     break;
1157   }
1158   case TargetOpcode::G_INSERT_VECTOR_ELT: {
1159     assert(DstOps.size() == 1 && "Invalid dst size");
1160     assert(SrcOps.size() == 3 && "Invalid src size");
1161     assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1162            SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1163     assert(DstOps[0].getLLTTy(*getMRI()).getElementType() ==
1164                SrcOps[1].getLLTTy(*getMRI()) &&
1165            "Type mismatch");
1166     assert(SrcOps[2].getLLTTy(*getMRI()).isScalar() && "Invalid index");
1167     assert(DstOps[0].getLLTTy(*getMRI()).getNumElements() ==
1168                SrcOps[0].getLLTTy(*getMRI()).getNumElements() &&
1169            "Type mismatch");
1170     break;
1171   }
1172   case TargetOpcode::G_BUILD_VECTOR: {
1173     assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1174            "Must have at least 2 operands");
1175     assert(DstOps.size() == 1 && "Invalid DstOps");
1176     assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1177            "Res type must be a vector");
1178     assert(llvm::all_of(SrcOps,
1179                         [&, this](const SrcOp &Op) {
1180                           return Op.getLLTTy(*getMRI()) ==
1181                                  SrcOps[0].getLLTTy(*getMRI());
1182                         }) &&
1183            "type mismatch in input list");
1184     assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1185                DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1186            "input scalars do not exactly cover the output vector register");
1187     break;
1188   }
1189   case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1190     assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1191            "Must have at least 2 operands");
1192     assert(DstOps.size() == 1 && "Invalid DstOps");
1193     assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1194            "Res type must be a vector");
1195     assert(llvm::all_of(SrcOps,
1196                         [&, this](const SrcOp &Op) {
1197                           return Op.getLLTTy(*getMRI()) ==
1198                                  SrcOps[0].getLLTTy(*getMRI());
1199                         }) &&
1200            "type mismatch in input list");
1201     if (SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1202         DstOps[0].getLLTTy(*getMRI()).getElementType().getSizeInBits())
1203       return buildInstr(TargetOpcode::G_BUILD_VECTOR, DstOps, SrcOps);
1204     break;
1205   }
1206   case TargetOpcode::G_CONCAT_VECTORS: {
1207     assert(DstOps.size() == 1 && "Invalid DstOps");
1208     assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1209            "Must have at least 2 operands");
1210     assert(llvm::all_of(SrcOps,
1211                         [&, this](const SrcOp &Op) {
1212                           return (Op.getLLTTy(*getMRI()).isVector() &&
1213                                   Op.getLLTTy(*getMRI()) ==
1214                                       SrcOps[0].getLLTTy(*getMRI()));
1215                         }) &&
1216            "type mismatch in input list");
1217     assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1218                DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1219            "input vectors do not exactly cover the output vector register");
1220     break;
1221   }
1222   case TargetOpcode::G_UADDE: {
1223     assert(DstOps.size() == 2 && "Invalid no of dst operands");
1224     assert(SrcOps.size() == 3 && "Invalid no of src operands");
1225     assert(DstOps[0].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1226     assert((DstOps[0].getLLTTy(*getMRI()) == SrcOps[0].getLLTTy(*getMRI())) &&
1227            (DstOps[0].getLLTTy(*getMRI()) == SrcOps[1].getLLTTy(*getMRI())) &&
1228            "Invalid operand");
1229     assert(DstOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1230     assert(DstOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1231            "type mismatch");
1232     break;
1233   }
1234   }
1235 
1236   auto MIB = buildInstr(Opc);
1237   for (const DstOp &Op : DstOps)
1238     Op.addDefToMIB(*getMRI(), MIB);
1239   for (const SrcOp &Op : SrcOps)
1240     Op.addSrcToMIB(MIB);
1241   if (Flags)
1242     MIB->setFlags(*Flags);
1243   return MIB;
1244 }
1245