1 //===-- llvm/CodeGen/GlobalISel/MachineIRBuilder.cpp - MIBuilder--*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the MachineIRBuidler class.
10 //===----------------------------------------------------------------------===//
11 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
12 #include "llvm/CodeGen/MachineFunction.h"
13 #include "llvm/CodeGen/MachineInstr.h"
14 #include "llvm/CodeGen/MachineInstrBuilder.h"
15 #include "llvm/CodeGen/MachineRegisterInfo.h"
16 #include "llvm/CodeGen/TargetInstrInfo.h"
17 #include "llvm/CodeGen/TargetLowering.h"
18 #include "llvm/CodeGen/TargetOpcodes.h"
19 #include "llvm/CodeGen/TargetSubtargetInfo.h"
20 #include "llvm/IR/DebugInfoMetadata.h"
21 
22 using namespace llvm;
23 
setMF(MachineFunction & MF)24 void MachineIRBuilder::setMF(MachineFunction &MF) {
25   State.MF = &MF;
26   State.MBB = nullptr;
27   State.MRI = &MF.getRegInfo();
28   State.TII = MF.getSubtarget().getInstrInfo();
29   State.DL = DebugLoc();
30   State.PCSections = nullptr;
31   State.II = MachineBasicBlock::iterator();
32   State.Observer = nullptr;
33 }
34 
35 //------------------------------------------------------------------------------
36 // Build instruction variants.
37 //------------------------------------------------------------------------------
38 
buildInstrNoInsert(unsigned Opcode)39 MachineInstrBuilder MachineIRBuilder::buildInstrNoInsert(unsigned Opcode) {
40   return BuildMI(getMF(), {getDL(), getPCSections()}, getTII().get(Opcode));
41 }
42 
insertInstr(MachineInstrBuilder MIB)43 MachineInstrBuilder MachineIRBuilder::insertInstr(MachineInstrBuilder MIB) {
44   getMBB().insert(getInsertPt(), MIB);
45   recordInsertion(MIB);
46   return MIB;
47 }
48 
49 MachineInstrBuilder
buildDirectDbgValue(Register Reg,const MDNode * Variable,const MDNode * Expr)50 MachineIRBuilder::buildDirectDbgValue(Register Reg, const MDNode *Variable,
51                                       const MDNode *Expr) {
52   assert(isa<DILocalVariable>(Variable) && "not a variable");
53   assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
54   assert(
55       cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
56       "Expected inlined-at fields to agree");
57   return insertInstr(BuildMI(getMF(), getDL(),
58                              getTII().get(TargetOpcode::DBG_VALUE),
59                              /*IsIndirect*/ false, Reg, Variable, Expr));
60 }
61 
62 MachineInstrBuilder
buildIndirectDbgValue(Register Reg,const MDNode * Variable,const MDNode * Expr)63 MachineIRBuilder::buildIndirectDbgValue(Register Reg, const MDNode *Variable,
64                                         const MDNode *Expr) {
65   assert(isa<DILocalVariable>(Variable) && "not a variable");
66   assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
67   assert(
68       cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
69       "Expected inlined-at fields to agree");
70   return insertInstr(BuildMI(getMF(), getDL(),
71                              getTII().get(TargetOpcode::DBG_VALUE),
72                              /*IsIndirect*/ true, Reg, Variable, Expr));
73 }
74 
buildFIDbgValue(int FI,const MDNode * Variable,const MDNode * Expr)75 MachineInstrBuilder MachineIRBuilder::buildFIDbgValue(int FI,
76                                                       const MDNode *Variable,
77                                                       const MDNode *Expr) {
78   assert(isa<DILocalVariable>(Variable) && "not a variable");
79   assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
80   assert(
81       cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
82       "Expected inlined-at fields to agree");
83   return insertInstr(buildInstrNoInsert(TargetOpcode::DBG_VALUE)
84                          .addFrameIndex(FI)
85                          .addImm(0)
86                          .addMetadata(Variable)
87                          .addMetadata(Expr));
88 }
89 
buildConstDbgValue(const Constant & C,const MDNode * Variable,const MDNode * Expr)90 MachineInstrBuilder MachineIRBuilder::buildConstDbgValue(const Constant &C,
91                                                          const MDNode *Variable,
92                                                          const MDNode *Expr) {
93   assert(isa<DILocalVariable>(Variable) && "not a variable");
94   assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
95   assert(
96       cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
97       "Expected inlined-at fields to agree");
98   auto MIB = buildInstrNoInsert(TargetOpcode::DBG_VALUE);
99 
100   auto *NumericConstant = [&] () -> const Constant* {
101     if (const auto *CE = dyn_cast<ConstantExpr>(&C))
102       if (CE->getOpcode() == Instruction::IntToPtr)
103         return CE->getOperand(0);
104     return &C;
105   }();
106 
107   if (auto *CI = dyn_cast<ConstantInt>(NumericConstant)) {
108     if (CI->getBitWidth() > 64)
109       MIB.addCImm(CI);
110     else
111       MIB.addImm(CI->getZExtValue());
112   } else if (auto *CFP = dyn_cast<ConstantFP>(NumericConstant)) {
113     MIB.addFPImm(CFP);
114   } else if (isa<ConstantPointerNull>(NumericConstant)) {
115     MIB.addImm(0);
116   } else {
117     // Insert $noreg if we didn't find a usable constant and had to drop it.
118     MIB.addReg(Register());
119   }
120 
121   MIB.addImm(0).addMetadata(Variable).addMetadata(Expr);
122   return insertInstr(MIB);
123 }
124 
buildDbgLabel(const MDNode * Label)125 MachineInstrBuilder MachineIRBuilder::buildDbgLabel(const MDNode *Label) {
126   assert(isa<DILabel>(Label) && "not a label");
127   assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(State.DL) &&
128          "Expected inlined-at fields to agree");
129   auto MIB = buildInstr(TargetOpcode::DBG_LABEL);
130 
131   return MIB.addMetadata(Label);
132 }
133 
buildDynStackAlloc(const DstOp & Res,const SrcOp & Size,Align Alignment)134 MachineInstrBuilder MachineIRBuilder::buildDynStackAlloc(const DstOp &Res,
135                                                          const SrcOp &Size,
136                                                          Align Alignment) {
137   assert(Res.getLLTTy(*getMRI()).isPointer() && "expected ptr dst type");
138   auto MIB = buildInstr(TargetOpcode::G_DYN_STACKALLOC);
139   Res.addDefToMIB(*getMRI(), MIB);
140   Size.addSrcToMIB(MIB);
141   MIB.addImm(Alignment.value());
142   return MIB;
143 }
144 
buildFrameIndex(const DstOp & Res,int Idx)145 MachineInstrBuilder MachineIRBuilder::buildFrameIndex(const DstOp &Res,
146                                                       int Idx) {
147   assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
148   auto MIB = buildInstr(TargetOpcode::G_FRAME_INDEX);
149   Res.addDefToMIB(*getMRI(), MIB);
150   MIB.addFrameIndex(Idx);
151   return MIB;
152 }
153 
buildGlobalValue(const DstOp & Res,const GlobalValue * GV)154 MachineInstrBuilder MachineIRBuilder::buildGlobalValue(const DstOp &Res,
155                                                        const GlobalValue *GV) {
156   assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
157   assert(Res.getLLTTy(*getMRI()).getAddressSpace() ==
158              GV->getType()->getAddressSpace() &&
159          "address space mismatch");
160 
161   auto MIB = buildInstr(TargetOpcode::G_GLOBAL_VALUE);
162   Res.addDefToMIB(*getMRI(), MIB);
163   MIB.addGlobalAddress(GV);
164   return MIB;
165 }
166 
buildConstantPool(const DstOp & Res,unsigned Idx)167 MachineInstrBuilder MachineIRBuilder::buildConstantPool(const DstOp &Res,
168                                                         unsigned Idx) {
169   assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
170   auto MIB = buildInstr(TargetOpcode::G_CONSTANT_POOL);
171   Res.addDefToMIB(*getMRI(), MIB);
172   MIB.addConstantPoolIndex(Idx);
173   return MIB;
174 }
175 
buildJumpTable(const LLT PtrTy,unsigned JTI)176 MachineInstrBuilder MachineIRBuilder::buildJumpTable(const LLT PtrTy,
177                                                      unsigned JTI) {
178   return buildInstr(TargetOpcode::G_JUMP_TABLE, {PtrTy}, {})
179       .addJumpTableIndex(JTI);
180 }
181 
validateUnaryOp(const LLT Res,const LLT Op0)182 void MachineIRBuilder::validateUnaryOp(const LLT Res, const LLT Op0) {
183   assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
184   assert((Res == Op0) && "type mismatch");
185 }
186 
validateBinaryOp(const LLT Res,const LLT Op0,const LLT Op1)187 void MachineIRBuilder::validateBinaryOp(const LLT Res, const LLT Op0,
188                                         const LLT Op1) {
189   assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
190   assert((Res == Op0 && Res == Op1) && "type mismatch");
191 }
192 
validateShiftOp(const LLT Res,const LLT Op0,const LLT Op1)193 void MachineIRBuilder::validateShiftOp(const LLT Res, const LLT Op0,
194                                        const LLT Op1) {
195   assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
196   assert((Res == Op0) && "type mismatch");
197 }
198 
199 MachineInstrBuilder
buildPtrAdd(const DstOp & Res,const SrcOp & Op0,const SrcOp & Op1,std::optional<unsigned> Flags)200 MachineIRBuilder::buildPtrAdd(const DstOp &Res, const SrcOp &Op0,
201                               const SrcOp &Op1, std::optional<unsigned> Flags) {
202   assert(Res.getLLTTy(*getMRI()).getScalarType().isPointer() &&
203          Res.getLLTTy(*getMRI()) == Op0.getLLTTy(*getMRI()) && "type mismatch");
204   assert(Op1.getLLTTy(*getMRI()).getScalarType().isScalar() && "invalid offset type");
205 
206   return buildInstr(TargetOpcode::G_PTR_ADD, {Res}, {Op0, Op1}, Flags);
207 }
208 
209 std::optional<MachineInstrBuilder>
materializePtrAdd(Register & Res,Register Op0,const LLT ValueTy,uint64_t Value)210 MachineIRBuilder::materializePtrAdd(Register &Res, Register Op0,
211                                     const LLT ValueTy, uint64_t Value) {
212   assert(Res == 0 && "Res is a result argument");
213   assert(ValueTy.isScalar()  && "invalid offset type");
214 
215   if (Value == 0) {
216     Res = Op0;
217     return std::nullopt;
218   }
219 
220   Res = getMRI()->createGenericVirtualRegister(getMRI()->getType(Op0));
221   auto Cst = buildConstant(ValueTy, Value);
222   return buildPtrAdd(Res, Op0, Cst.getReg(0));
223 }
224 
buildMaskLowPtrBits(const DstOp & Res,const SrcOp & Op0,uint32_t NumBits)225 MachineInstrBuilder MachineIRBuilder::buildMaskLowPtrBits(const DstOp &Res,
226                                                           const SrcOp &Op0,
227                                                           uint32_t NumBits) {
228   LLT PtrTy = Res.getLLTTy(*getMRI());
229   LLT MaskTy = LLT::scalar(PtrTy.getSizeInBits());
230   Register MaskReg = getMRI()->createGenericVirtualRegister(MaskTy);
231   buildConstant(MaskReg, maskTrailingZeros<uint64_t>(NumBits));
232   return buildPtrMask(Res, Op0, MaskReg);
233 }
234 
235 MachineInstrBuilder
buildPadVectorWithUndefElements(const DstOp & Res,const SrcOp & Op0)236 MachineIRBuilder::buildPadVectorWithUndefElements(const DstOp &Res,
237                                                   const SrcOp &Op0) {
238   LLT ResTy = Res.getLLTTy(*getMRI());
239   LLT Op0Ty = Op0.getLLTTy(*getMRI());
240 
241   assert(ResTy.isVector() && "Res non vector type");
242 
243   SmallVector<Register, 8> Regs;
244   if (Op0Ty.isVector()) {
245     assert((ResTy.getElementType() == Op0Ty.getElementType()) &&
246            "Different vector element types");
247     assert((ResTy.getNumElements() > Op0Ty.getNumElements()) &&
248            "Op0 has more elements");
249     auto Unmerge = buildUnmerge(Op0Ty.getElementType(), Op0);
250 
251     for (auto Op : Unmerge.getInstr()->defs())
252       Regs.push_back(Op.getReg());
253   } else {
254     assert((ResTy.getSizeInBits() > Op0Ty.getSizeInBits()) &&
255            "Op0 has more size");
256     Regs.push_back(Op0.getReg());
257   }
258   Register Undef =
259       buildUndef(Op0Ty.isVector() ? Op0Ty.getElementType() : Op0Ty).getReg(0);
260   unsigned NumberOfPadElts = ResTy.getNumElements() - Regs.size();
261   for (unsigned i = 0; i < NumberOfPadElts; ++i)
262     Regs.push_back(Undef);
263   return buildMergeLikeInstr(Res, Regs);
264 }
265 
266 MachineInstrBuilder
buildDeleteTrailingVectorElements(const DstOp & Res,const SrcOp & Op0)267 MachineIRBuilder::buildDeleteTrailingVectorElements(const DstOp &Res,
268                                                     const SrcOp &Op0) {
269   LLT ResTy = Res.getLLTTy(*getMRI());
270   LLT Op0Ty = Op0.getLLTTy(*getMRI());
271 
272   assert((ResTy.isVector() && Op0Ty.isVector()) && "Non vector type");
273   assert((ResTy.getElementType() == Op0Ty.getElementType()) &&
274          "Different vector element types");
275   assert((ResTy.getNumElements() < Op0Ty.getNumElements()) &&
276          "Op0 has fewer elements");
277 
278   SmallVector<Register, 8> Regs;
279   auto Unmerge = buildUnmerge(Op0Ty.getElementType(), Op0);
280   for (unsigned i = 0; i < ResTy.getNumElements(); ++i)
281     Regs.push_back(Unmerge.getReg(i));
282   return buildMergeLikeInstr(Res, Regs);
283 }
284 
buildBr(MachineBasicBlock & Dest)285 MachineInstrBuilder MachineIRBuilder::buildBr(MachineBasicBlock &Dest) {
286   return buildInstr(TargetOpcode::G_BR).addMBB(&Dest);
287 }
288 
buildBrIndirect(Register Tgt)289 MachineInstrBuilder MachineIRBuilder::buildBrIndirect(Register Tgt) {
290   assert(getMRI()->getType(Tgt).isPointer() && "invalid branch destination");
291   return buildInstr(TargetOpcode::G_BRINDIRECT).addUse(Tgt);
292 }
293 
buildBrJT(Register TablePtr,unsigned JTI,Register IndexReg)294 MachineInstrBuilder MachineIRBuilder::buildBrJT(Register TablePtr,
295                                                 unsigned JTI,
296                                                 Register IndexReg) {
297   assert(getMRI()->getType(TablePtr).isPointer() &&
298          "Table reg must be a pointer");
299   return buildInstr(TargetOpcode::G_BRJT)
300       .addUse(TablePtr)
301       .addJumpTableIndex(JTI)
302       .addUse(IndexReg);
303 }
304 
buildCopy(const DstOp & Res,const SrcOp & Op)305 MachineInstrBuilder MachineIRBuilder::buildCopy(const DstOp &Res,
306                                                 const SrcOp &Op) {
307   return buildInstr(TargetOpcode::COPY, Res, Op);
308 }
309 
buildConstant(const DstOp & Res,const ConstantInt & Val)310 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
311                                                     const ConstantInt &Val) {
312   LLT Ty = Res.getLLTTy(*getMRI());
313   LLT EltTy = Ty.getScalarType();
314   assert(EltTy.getScalarSizeInBits() == Val.getBitWidth() &&
315          "creating constant with the wrong size");
316 
317   assert(!Ty.isScalableVector() &&
318          "unexpected scalable vector in buildConstant");
319 
320   if (Ty.isFixedVector()) {
321     auto Const = buildInstr(TargetOpcode::G_CONSTANT)
322     .addDef(getMRI()->createGenericVirtualRegister(EltTy))
323     .addCImm(&Val);
324     return buildSplatVector(Res, Const);
325   }
326 
327   auto Const = buildInstr(TargetOpcode::G_CONSTANT);
328   Const->setDebugLoc(DebugLoc());
329   Res.addDefToMIB(*getMRI(), Const);
330   Const.addCImm(&Val);
331   return Const;
332 }
333 
buildConstant(const DstOp & Res,int64_t Val)334 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
335                                                     int64_t Val) {
336   auto IntN = IntegerType::get(getMF().getFunction().getContext(),
337                                Res.getLLTTy(*getMRI()).getScalarSizeInBits());
338   ConstantInt *CI = ConstantInt::get(IntN, Val, true);
339   return buildConstant(Res, *CI);
340 }
341 
buildFConstant(const DstOp & Res,const ConstantFP & Val)342 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
343                                                      const ConstantFP &Val) {
344   LLT Ty = Res.getLLTTy(*getMRI());
345   LLT EltTy = Ty.getScalarType();
346 
347   assert(APFloat::getSizeInBits(Val.getValueAPF().getSemantics())
348          == EltTy.getSizeInBits() &&
349          "creating fconstant with the wrong size");
350 
351   assert(!Ty.isPointer() && "invalid operand type");
352 
353   assert(!Ty.isScalableVector() &&
354          "unexpected scalable vector in buildFConstant");
355 
356   if (Ty.isFixedVector()) {
357     auto Const = buildInstr(TargetOpcode::G_FCONSTANT)
358     .addDef(getMRI()->createGenericVirtualRegister(EltTy))
359     .addFPImm(&Val);
360 
361     return buildSplatVector(Res, Const);
362   }
363 
364   auto Const = buildInstr(TargetOpcode::G_FCONSTANT);
365   Const->setDebugLoc(DebugLoc());
366   Res.addDefToMIB(*getMRI(), Const);
367   Const.addFPImm(&Val);
368   return Const;
369 }
370 
buildConstant(const DstOp & Res,const APInt & Val)371 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
372                                                     const APInt &Val) {
373   ConstantInt *CI = ConstantInt::get(getMF().getFunction().getContext(), Val);
374   return buildConstant(Res, *CI);
375 }
376 
buildFConstant(const DstOp & Res,double Val)377 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
378                                                      double Val) {
379   LLT DstTy = Res.getLLTTy(*getMRI());
380   auto &Ctx = getMF().getFunction().getContext();
381   auto *CFP =
382       ConstantFP::get(Ctx, getAPFloatFromSize(Val, DstTy.getScalarSizeInBits()));
383   return buildFConstant(Res, *CFP);
384 }
385 
buildFConstant(const DstOp & Res,const APFloat & Val)386 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
387                                                      const APFloat &Val) {
388   auto &Ctx = getMF().getFunction().getContext();
389   auto *CFP = ConstantFP::get(Ctx, Val);
390   return buildFConstant(Res, *CFP);
391 }
392 
buildBrCond(const SrcOp & Tst,MachineBasicBlock & Dest)393 MachineInstrBuilder MachineIRBuilder::buildBrCond(const SrcOp &Tst,
394                                                   MachineBasicBlock &Dest) {
395   assert(Tst.getLLTTy(*getMRI()).isScalar() && "invalid operand type");
396 
397   auto MIB = buildInstr(TargetOpcode::G_BRCOND);
398   Tst.addSrcToMIB(MIB);
399   MIB.addMBB(&Dest);
400   return MIB;
401 }
402 
403 MachineInstrBuilder
buildLoad(const DstOp & Dst,const SrcOp & Addr,MachinePointerInfo PtrInfo,Align Alignment,MachineMemOperand::Flags MMOFlags,const AAMDNodes & AAInfo)404 MachineIRBuilder::buildLoad(const DstOp &Dst, const SrcOp &Addr,
405                             MachinePointerInfo PtrInfo, Align Alignment,
406                             MachineMemOperand::Flags MMOFlags,
407                             const AAMDNodes &AAInfo) {
408   MMOFlags |= MachineMemOperand::MOLoad;
409   assert((MMOFlags & MachineMemOperand::MOStore) == 0);
410 
411   LLT Ty = Dst.getLLTTy(*getMRI());
412   MachineMemOperand *MMO =
413       getMF().getMachineMemOperand(PtrInfo, MMOFlags, Ty, Alignment, AAInfo);
414   return buildLoad(Dst, Addr, *MMO);
415 }
416 
buildLoadInstr(unsigned Opcode,const DstOp & Res,const SrcOp & Addr,MachineMemOperand & MMO)417 MachineInstrBuilder MachineIRBuilder::buildLoadInstr(unsigned Opcode,
418                                                      const DstOp &Res,
419                                                      const SrcOp &Addr,
420                                                      MachineMemOperand &MMO) {
421   assert(Res.getLLTTy(*getMRI()).isValid() && "invalid operand type");
422   assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
423 
424   auto MIB = buildInstr(Opcode);
425   Res.addDefToMIB(*getMRI(), MIB);
426   Addr.addSrcToMIB(MIB);
427   MIB.addMemOperand(&MMO);
428   return MIB;
429 }
430 
buildLoadFromOffset(const DstOp & Dst,const SrcOp & BasePtr,MachineMemOperand & BaseMMO,int64_t Offset)431 MachineInstrBuilder MachineIRBuilder::buildLoadFromOffset(
432   const DstOp &Dst, const SrcOp &BasePtr,
433   MachineMemOperand &BaseMMO, int64_t Offset) {
434   LLT LoadTy = Dst.getLLTTy(*getMRI());
435   MachineMemOperand *OffsetMMO =
436       getMF().getMachineMemOperand(&BaseMMO, Offset, LoadTy);
437 
438   if (Offset == 0) // This may be a size or type changing load.
439     return buildLoad(Dst, BasePtr, *OffsetMMO);
440 
441   LLT PtrTy = BasePtr.getLLTTy(*getMRI());
442   LLT OffsetTy = LLT::scalar(PtrTy.getSizeInBits());
443   auto ConstOffset = buildConstant(OffsetTy, Offset);
444   auto Ptr = buildPtrAdd(PtrTy, BasePtr, ConstOffset);
445   return buildLoad(Dst, Ptr, *OffsetMMO);
446 }
447 
buildStore(const SrcOp & Val,const SrcOp & Addr,MachineMemOperand & MMO)448 MachineInstrBuilder MachineIRBuilder::buildStore(const SrcOp &Val,
449                                                  const SrcOp &Addr,
450                                                  MachineMemOperand &MMO) {
451   assert(Val.getLLTTy(*getMRI()).isValid() && "invalid operand type");
452   assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
453 
454   auto MIB = buildInstr(TargetOpcode::G_STORE);
455   Val.addSrcToMIB(MIB);
456   Addr.addSrcToMIB(MIB);
457   MIB.addMemOperand(&MMO);
458   return MIB;
459 }
460 
461 MachineInstrBuilder
buildStore(const SrcOp & Val,const SrcOp & Addr,MachinePointerInfo PtrInfo,Align Alignment,MachineMemOperand::Flags MMOFlags,const AAMDNodes & AAInfo)462 MachineIRBuilder::buildStore(const SrcOp &Val, const SrcOp &Addr,
463                              MachinePointerInfo PtrInfo, Align Alignment,
464                              MachineMemOperand::Flags MMOFlags,
465                              const AAMDNodes &AAInfo) {
466   MMOFlags |= MachineMemOperand::MOStore;
467   assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
468 
469   LLT Ty = Val.getLLTTy(*getMRI());
470   MachineMemOperand *MMO =
471       getMF().getMachineMemOperand(PtrInfo, MMOFlags, Ty, Alignment, AAInfo);
472   return buildStore(Val, Addr, *MMO);
473 }
474 
buildAnyExt(const DstOp & Res,const SrcOp & Op)475 MachineInstrBuilder MachineIRBuilder::buildAnyExt(const DstOp &Res,
476                                                   const SrcOp &Op) {
477   return buildInstr(TargetOpcode::G_ANYEXT, Res, Op);
478 }
479 
buildSExt(const DstOp & Res,const SrcOp & Op)480 MachineInstrBuilder MachineIRBuilder::buildSExt(const DstOp &Res,
481                                                 const SrcOp &Op) {
482   return buildInstr(TargetOpcode::G_SEXT, Res, Op);
483 }
484 
buildZExt(const DstOp & Res,const SrcOp & Op)485 MachineInstrBuilder MachineIRBuilder::buildZExt(const DstOp &Res,
486                                                 const SrcOp &Op) {
487   return buildInstr(TargetOpcode::G_ZEXT, Res, Op);
488 }
489 
getBoolExtOp(bool IsVec,bool IsFP) const490 unsigned MachineIRBuilder::getBoolExtOp(bool IsVec, bool IsFP) const {
491   const auto *TLI = getMF().getSubtarget().getTargetLowering();
492   switch (TLI->getBooleanContents(IsVec, IsFP)) {
493   case TargetLoweringBase::ZeroOrNegativeOneBooleanContent:
494     return TargetOpcode::G_SEXT;
495   case TargetLoweringBase::ZeroOrOneBooleanContent:
496     return TargetOpcode::G_ZEXT;
497   default:
498     return TargetOpcode::G_ANYEXT;
499   }
500 }
501 
buildBoolExt(const DstOp & Res,const SrcOp & Op,bool IsFP)502 MachineInstrBuilder MachineIRBuilder::buildBoolExt(const DstOp &Res,
503                                                    const SrcOp &Op,
504                                                    bool IsFP) {
505   unsigned ExtOp = getBoolExtOp(getMRI()->getType(Op.getReg()).isVector(), IsFP);
506   return buildInstr(ExtOp, Res, Op);
507 }
508 
buildBoolExtInReg(const DstOp & Res,const SrcOp & Op,bool IsVector,bool IsFP)509 MachineInstrBuilder MachineIRBuilder::buildBoolExtInReg(const DstOp &Res,
510                                                         const SrcOp &Op,
511                                                         bool IsVector,
512                                                         bool IsFP) {
513   const auto *TLI = getMF().getSubtarget().getTargetLowering();
514   switch (TLI->getBooleanContents(IsVector, IsFP)) {
515   case TargetLoweringBase::ZeroOrNegativeOneBooleanContent:
516     return buildSExtInReg(Res, Op, 1);
517   case TargetLoweringBase::ZeroOrOneBooleanContent:
518     return buildZExtInReg(Res, Op, 1);
519   case TargetLoweringBase::UndefinedBooleanContent:
520     return buildCopy(Res, Op);
521   }
522 
523   llvm_unreachable("unexpected BooleanContent");
524 }
525 
buildExtOrTrunc(unsigned ExtOpc,const DstOp & Res,const SrcOp & Op)526 MachineInstrBuilder MachineIRBuilder::buildExtOrTrunc(unsigned ExtOpc,
527                                                       const DstOp &Res,
528                                                       const SrcOp &Op) {
529   assert((TargetOpcode::G_ANYEXT == ExtOpc || TargetOpcode::G_ZEXT == ExtOpc ||
530           TargetOpcode::G_SEXT == ExtOpc) &&
531          "Expecting Extending Opc");
532   assert(Res.getLLTTy(*getMRI()).isScalar() ||
533          Res.getLLTTy(*getMRI()).isVector());
534   assert(Res.getLLTTy(*getMRI()).isScalar() ==
535          Op.getLLTTy(*getMRI()).isScalar());
536 
537   unsigned Opcode = TargetOpcode::COPY;
538   if (Res.getLLTTy(*getMRI()).getSizeInBits() >
539       Op.getLLTTy(*getMRI()).getSizeInBits())
540     Opcode = ExtOpc;
541   else if (Res.getLLTTy(*getMRI()).getSizeInBits() <
542            Op.getLLTTy(*getMRI()).getSizeInBits())
543     Opcode = TargetOpcode::G_TRUNC;
544   else
545     assert(Res.getLLTTy(*getMRI()) == Op.getLLTTy(*getMRI()));
546 
547   return buildInstr(Opcode, Res, Op);
548 }
549 
buildSExtOrTrunc(const DstOp & Res,const SrcOp & Op)550 MachineInstrBuilder MachineIRBuilder::buildSExtOrTrunc(const DstOp &Res,
551                                                        const SrcOp &Op) {
552   return buildExtOrTrunc(TargetOpcode::G_SEXT, Res, Op);
553 }
554 
buildZExtOrTrunc(const DstOp & Res,const SrcOp & Op)555 MachineInstrBuilder MachineIRBuilder::buildZExtOrTrunc(const DstOp &Res,
556                                                        const SrcOp &Op) {
557   return buildExtOrTrunc(TargetOpcode::G_ZEXT, Res, Op);
558 }
559 
buildAnyExtOrTrunc(const DstOp & Res,const SrcOp & Op)560 MachineInstrBuilder MachineIRBuilder::buildAnyExtOrTrunc(const DstOp &Res,
561                                                          const SrcOp &Op) {
562   return buildExtOrTrunc(TargetOpcode::G_ANYEXT, Res, Op);
563 }
564 
buildZExtInReg(const DstOp & Res,const SrcOp & Op,int64_t ImmOp)565 MachineInstrBuilder MachineIRBuilder::buildZExtInReg(const DstOp &Res,
566                                                      const SrcOp &Op,
567                                                      int64_t ImmOp) {
568   LLT ResTy = Res.getLLTTy(*getMRI());
569   auto Mask = buildConstant(
570       ResTy, APInt::getLowBitsSet(ResTy.getScalarSizeInBits(), ImmOp));
571   return buildAnd(Res, Op, Mask);
572 }
573 
buildCast(const DstOp & Dst,const SrcOp & Src)574 MachineInstrBuilder MachineIRBuilder::buildCast(const DstOp &Dst,
575                                                 const SrcOp &Src) {
576   LLT SrcTy = Src.getLLTTy(*getMRI());
577   LLT DstTy = Dst.getLLTTy(*getMRI());
578   if (SrcTy == DstTy)
579     return buildCopy(Dst, Src);
580 
581   unsigned Opcode;
582   if (SrcTy.isPointer() && DstTy.isScalar())
583     Opcode = TargetOpcode::G_PTRTOINT;
584   else if (DstTy.isPointer() && SrcTy.isScalar())
585     Opcode = TargetOpcode::G_INTTOPTR;
586   else {
587     assert(!SrcTy.isPointer() && !DstTy.isPointer() && "n G_ADDRCAST yet");
588     Opcode = TargetOpcode::G_BITCAST;
589   }
590 
591   return buildInstr(Opcode, Dst, Src);
592 }
593 
buildExtract(const DstOp & Dst,const SrcOp & Src,uint64_t Index)594 MachineInstrBuilder MachineIRBuilder::buildExtract(const DstOp &Dst,
595                                                    const SrcOp &Src,
596                                                    uint64_t Index) {
597   LLT SrcTy = Src.getLLTTy(*getMRI());
598   LLT DstTy = Dst.getLLTTy(*getMRI());
599 
600 #ifndef NDEBUG
601   assert(SrcTy.isValid() && "invalid operand type");
602   assert(DstTy.isValid() && "invalid operand type");
603   assert(Index + DstTy.getSizeInBits() <= SrcTy.getSizeInBits() &&
604          "extracting off end of register");
605 #endif
606 
607   if (DstTy.getSizeInBits() == SrcTy.getSizeInBits()) {
608     assert(Index == 0 && "insertion past the end of a register");
609     return buildCast(Dst, Src);
610   }
611 
612   auto Extract = buildInstr(TargetOpcode::G_EXTRACT);
613   Dst.addDefToMIB(*getMRI(), Extract);
614   Src.addSrcToMIB(Extract);
615   Extract.addImm(Index);
616   return Extract;
617 }
618 
buildUndef(const DstOp & Res)619 MachineInstrBuilder MachineIRBuilder::buildUndef(const DstOp &Res) {
620   return buildInstr(TargetOpcode::G_IMPLICIT_DEF, {Res}, {});
621 }
622 
buildMergeValues(const DstOp & Res,ArrayRef<Register> Ops)623 MachineInstrBuilder MachineIRBuilder::buildMergeValues(const DstOp &Res,
624                                                        ArrayRef<Register> Ops) {
625   // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
626   // we need some temporary storage for the DstOp objects. Here we use a
627   // sufficiently large SmallVector to not go through the heap.
628   SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
629   assert(TmpVec.size() > 1);
630   return buildInstr(TargetOpcode::G_MERGE_VALUES, Res, TmpVec);
631 }
632 
633 MachineInstrBuilder
buildMergeLikeInstr(const DstOp & Res,ArrayRef<Register> Ops)634 MachineIRBuilder::buildMergeLikeInstr(const DstOp &Res,
635                                       ArrayRef<Register> Ops) {
636   // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
637   // we need some temporary storage for the DstOp objects. Here we use a
638   // sufficiently large SmallVector to not go through the heap.
639   SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
640   assert(TmpVec.size() > 1);
641   return buildInstr(getOpcodeForMerge(Res, TmpVec), Res, TmpVec);
642 }
643 
644 MachineInstrBuilder
buildMergeLikeInstr(const DstOp & Res,std::initializer_list<SrcOp> Ops)645 MachineIRBuilder::buildMergeLikeInstr(const DstOp &Res,
646                                       std::initializer_list<SrcOp> Ops) {
647   assert(Ops.size() > 1);
648   return buildInstr(getOpcodeForMerge(Res, Ops), Res, Ops);
649 }
650 
getOpcodeForMerge(const DstOp & DstOp,ArrayRef<SrcOp> SrcOps) const651 unsigned MachineIRBuilder::getOpcodeForMerge(const DstOp &DstOp,
652                                              ArrayRef<SrcOp> SrcOps) const {
653   if (DstOp.getLLTTy(*getMRI()).isVector()) {
654     if (SrcOps[0].getLLTTy(*getMRI()).isVector())
655       return TargetOpcode::G_CONCAT_VECTORS;
656     return TargetOpcode::G_BUILD_VECTOR;
657   }
658 
659   return TargetOpcode::G_MERGE_VALUES;
660 }
661 
buildUnmerge(ArrayRef<LLT> Res,const SrcOp & Op)662 MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<LLT> Res,
663                                                    const SrcOp &Op) {
664   // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<DstOp>,
665   // we need some temporary storage for the DstOp objects. Here we use a
666   // sufficiently large SmallVector to not go through the heap.
667   SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end());
668   assert(TmpVec.size() > 1);
669   return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
670 }
671 
buildUnmerge(LLT Res,const SrcOp & Op)672 MachineInstrBuilder MachineIRBuilder::buildUnmerge(LLT Res,
673                                                    const SrcOp &Op) {
674   unsigned NumReg = Op.getLLTTy(*getMRI()).getSizeInBits() / Res.getSizeInBits();
675   SmallVector<DstOp, 8> TmpVec(NumReg, Res);
676   return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
677 }
678 
buildUnmerge(ArrayRef<Register> Res,const SrcOp & Op)679 MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<Register> Res,
680                                                    const SrcOp &Op) {
681   // Unfortunately to convert from ArrayRef<Register> to ArrayRef<DstOp>,
682   // we need some temporary storage for the DstOp objects. Here we use a
683   // sufficiently large SmallVector to not go through the heap.
684   SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end());
685   assert(TmpVec.size() > 1);
686   return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
687 }
688 
buildBuildVector(const DstOp & Res,ArrayRef<Register> Ops)689 MachineInstrBuilder MachineIRBuilder::buildBuildVector(const DstOp &Res,
690                                                        ArrayRef<Register> Ops) {
691   // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
692   // we need some temporary storage for the DstOp objects. Here we use a
693   // sufficiently large SmallVector to not go through the heap.
694   SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
695   return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
696 }
697 
698 MachineInstrBuilder
buildBuildVectorConstant(const DstOp & Res,ArrayRef<APInt> Ops)699 MachineIRBuilder::buildBuildVectorConstant(const DstOp &Res,
700                                            ArrayRef<APInt> Ops) {
701   SmallVector<SrcOp> TmpVec;
702   TmpVec.reserve(Ops.size());
703   LLT EltTy = Res.getLLTTy(*getMRI()).getElementType();
704   for (const auto &Op : Ops)
705     TmpVec.push_back(buildConstant(EltTy, Op));
706   return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
707 }
708 
buildSplatVector(const DstOp & Res,const SrcOp & Src)709 MachineInstrBuilder MachineIRBuilder::buildSplatVector(const DstOp &Res,
710                                                        const SrcOp &Src) {
711   SmallVector<SrcOp, 8> TmpVec(Res.getLLTTy(*getMRI()).getNumElements(), Src);
712   return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
713 }
714 
715 MachineInstrBuilder
buildBuildVectorTrunc(const DstOp & Res,ArrayRef<Register> Ops)716 MachineIRBuilder::buildBuildVectorTrunc(const DstOp &Res,
717                                         ArrayRef<Register> Ops) {
718   // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
719   // we need some temporary storage for the DstOp objects. Here we use a
720   // sufficiently large SmallVector to not go through the heap.
721   SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
722   if (TmpVec[0].getLLTTy(*getMRI()).getSizeInBits() ==
723       Res.getLLTTy(*getMRI()).getElementType().getSizeInBits())
724     return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
725   return buildInstr(TargetOpcode::G_BUILD_VECTOR_TRUNC, Res, TmpVec);
726 }
727 
buildShuffleSplat(const DstOp & Res,const SrcOp & Src)728 MachineInstrBuilder MachineIRBuilder::buildShuffleSplat(const DstOp &Res,
729                                                         const SrcOp &Src) {
730   LLT DstTy = Res.getLLTTy(*getMRI());
731   assert(Src.getLLTTy(*getMRI()) == DstTy.getElementType() &&
732          "Expected Src to match Dst elt ty");
733   auto UndefVec = buildUndef(DstTy);
734   auto Zero = buildConstant(LLT::scalar(64), 0);
735   auto InsElt = buildInsertVectorElement(DstTy, UndefVec, Src, Zero);
736   SmallVector<int, 16> ZeroMask(DstTy.getNumElements());
737   return buildShuffleVector(DstTy, InsElt, UndefVec, ZeroMask);
738 }
739 
buildShuffleVector(const DstOp & Res,const SrcOp & Src1,const SrcOp & Src2,ArrayRef<int> Mask)740 MachineInstrBuilder MachineIRBuilder::buildShuffleVector(const DstOp &Res,
741                                                          const SrcOp &Src1,
742                                                          const SrcOp &Src2,
743                                                          ArrayRef<int> Mask) {
744   LLT DstTy = Res.getLLTTy(*getMRI());
745   LLT Src1Ty = Src1.getLLTTy(*getMRI());
746   LLT Src2Ty = Src2.getLLTTy(*getMRI());
747   assert((size_t)(Src1Ty.getNumElements() + Src2Ty.getNumElements()) >=
748          Mask.size());
749   assert(DstTy.getElementType() == Src1Ty.getElementType() &&
750          DstTy.getElementType() == Src2Ty.getElementType());
751   (void)DstTy;
752   (void)Src1Ty;
753   (void)Src2Ty;
754   ArrayRef<int> MaskAlloc = getMF().allocateShuffleMask(Mask);
755   return buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {Res}, {Src1, Src2})
756       .addShuffleMask(MaskAlloc);
757 }
758 
759 MachineInstrBuilder
buildConcatVectors(const DstOp & Res,ArrayRef<Register> Ops)760 MachineIRBuilder::buildConcatVectors(const DstOp &Res, ArrayRef<Register> Ops) {
761   // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
762   // we need some temporary storage for the DstOp objects. Here we use a
763   // sufficiently large SmallVector to not go through the heap.
764   SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
765   return buildInstr(TargetOpcode::G_CONCAT_VECTORS, Res, TmpVec);
766 }
767 
buildInsert(const DstOp & Res,const SrcOp & Src,const SrcOp & Op,unsigned Index)768 MachineInstrBuilder MachineIRBuilder::buildInsert(const DstOp &Res,
769                                                   const SrcOp &Src,
770                                                   const SrcOp &Op,
771                                                   unsigned Index) {
772   assert(Index + Op.getLLTTy(*getMRI()).getSizeInBits() <=
773              Res.getLLTTy(*getMRI()).getSizeInBits() &&
774          "insertion past the end of a register");
775 
776   if (Res.getLLTTy(*getMRI()).getSizeInBits() ==
777       Op.getLLTTy(*getMRI()).getSizeInBits()) {
778     return buildCast(Res, Op);
779   }
780 
781   return buildInstr(TargetOpcode::G_INSERT, Res, {Src, Op, uint64_t(Index)});
782 }
783 
getIntrinsicOpcode(bool HasSideEffects,bool IsConvergent)784 static unsigned getIntrinsicOpcode(bool HasSideEffects, bool IsConvergent) {
785   if (HasSideEffects && IsConvergent)
786     return TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS;
787   if (HasSideEffects)
788     return TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS;
789   if (IsConvergent)
790     return TargetOpcode::G_INTRINSIC_CONVERGENT;
791   return TargetOpcode::G_INTRINSIC;
792 }
793 
794 MachineInstrBuilder
buildIntrinsic(Intrinsic::ID ID,ArrayRef<Register> ResultRegs,bool HasSideEffects,bool isConvergent)795 MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
796                                  ArrayRef<Register> ResultRegs,
797                                  bool HasSideEffects, bool isConvergent) {
798   auto MIB = buildInstr(getIntrinsicOpcode(HasSideEffects, isConvergent));
799   for (unsigned ResultReg : ResultRegs)
800     MIB.addDef(ResultReg);
801   MIB.addIntrinsicID(ID);
802   return MIB;
803 }
804 
805 MachineInstrBuilder
buildIntrinsic(Intrinsic::ID ID,ArrayRef<Register> ResultRegs)806 MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
807                                  ArrayRef<Register> ResultRegs) {
808   auto Attrs = Intrinsic::getAttributes(getContext(), ID);
809   bool HasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
810   bool isConvergent = Attrs.hasFnAttr(Attribute::Convergent);
811   return buildIntrinsic(ID, ResultRegs, HasSideEffects, isConvergent);
812 }
813 
buildIntrinsic(Intrinsic::ID ID,ArrayRef<DstOp> Results,bool HasSideEffects,bool isConvergent)814 MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
815                                                      ArrayRef<DstOp> Results,
816                                                      bool HasSideEffects,
817                                                      bool isConvergent) {
818   auto MIB = buildInstr(getIntrinsicOpcode(HasSideEffects, isConvergent));
819   for (DstOp Result : Results)
820     Result.addDefToMIB(*getMRI(), MIB);
821   MIB.addIntrinsicID(ID);
822   return MIB;
823 }
824 
buildIntrinsic(Intrinsic::ID ID,ArrayRef<DstOp> Results)825 MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
826                                                      ArrayRef<DstOp> Results) {
827   auto Attrs = Intrinsic::getAttributes(getContext(), ID);
828   bool HasSideEffects = !Attrs.getMemoryEffects().doesNotAccessMemory();
829   bool isConvergent = Attrs.hasFnAttr(Attribute::Convergent);
830   return buildIntrinsic(ID, Results, HasSideEffects, isConvergent);
831 }
832 
buildTrunc(const DstOp & Res,const SrcOp & Op)833 MachineInstrBuilder MachineIRBuilder::buildTrunc(const DstOp &Res,
834                                                  const SrcOp &Op) {
835   return buildInstr(TargetOpcode::G_TRUNC, Res, Op);
836 }
837 
838 MachineInstrBuilder
buildFPTrunc(const DstOp & Res,const SrcOp & Op,std::optional<unsigned> Flags)839 MachineIRBuilder::buildFPTrunc(const DstOp &Res, const SrcOp &Op,
840                                std::optional<unsigned> Flags) {
841   return buildInstr(TargetOpcode::G_FPTRUNC, Res, Op, Flags);
842 }
843 
buildICmp(CmpInst::Predicate Pred,const DstOp & Res,const SrcOp & Op0,const SrcOp & Op1)844 MachineInstrBuilder MachineIRBuilder::buildICmp(CmpInst::Predicate Pred,
845                                                 const DstOp &Res,
846                                                 const SrcOp &Op0,
847                                                 const SrcOp &Op1) {
848   return buildInstr(TargetOpcode::G_ICMP, Res, {Pred, Op0, Op1});
849 }
850 
buildFCmp(CmpInst::Predicate Pred,const DstOp & Res,const SrcOp & Op0,const SrcOp & Op1,std::optional<unsigned> Flags)851 MachineInstrBuilder MachineIRBuilder::buildFCmp(CmpInst::Predicate Pred,
852                                                 const DstOp &Res,
853                                                 const SrcOp &Op0,
854                                                 const SrcOp &Op1,
855                                                 std::optional<unsigned> Flags) {
856 
857   return buildInstr(TargetOpcode::G_FCMP, Res, {Pred, Op0, Op1}, Flags);
858 }
859 
860 MachineInstrBuilder
buildSelect(const DstOp & Res,const SrcOp & Tst,const SrcOp & Op0,const SrcOp & Op1,std::optional<unsigned> Flags)861 MachineIRBuilder::buildSelect(const DstOp &Res, const SrcOp &Tst,
862                               const SrcOp &Op0, const SrcOp &Op1,
863                               std::optional<unsigned> Flags) {
864 
865   return buildInstr(TargetOpcode::G_SELECT, {Res}, {Tst, Op0, Op1}, Flags);
866 }
867 
868 MachineInstrBuilder
buildInsertVectorElement(const DstOp & Res,const SrcOp & Val,const SrcOp & Elt,const SrcOp & Idx)869 MachineIRBuilder::buildInsertVectorElement(const DstOp &Res, const SrcOp &Val,
870                                            const SrcOp &Elt, const SrcOp &Idx) {
871   return buildInstr(TargetOpcode::G_INSERT_VECTOR_ELT, Res, {Val, Elt, Idx});
872 }
873 
874 MachineInstrBuilder
buildExtractVectorElement(const DstOp & Res,const SrcOp & Val,const SrcOp & Idx)875 MachineIRBuilder::buildExtractVectorElement(const DstOp &Res, const SrcOp &Val,
876                                             const SrcOp &Idx) {
877   return buildInstr(TargetOpcode::G_EXTRACT_VECTOR_ELT, Res, {Val, Idx});
878 }
879 
buildAtomicCmpXchgWithSuccess(Register OldValRes,Register SuccessRes,Register Addr,Register CmpVal,Register NewVal,MachineMemOperand & MMO)880 MachineInstrBuilder MachineIRBuilder::buildAtomicCmpXchgWithSuccess(
881     Register OldValRes, Register SuccessRes, Register Addr, Register CmpVal,
882     Register NewVal, MachineMemOperand &MMO) {
883 #ifndef NDEBUG
884   LLT OldValResTy = getMRI()->getType(OldValRes);
885   LLT SuccessResTy = getMRI()->getType(SuccessRes);
886   LLT AddrTy = getMRI()->getType(Addr);
887   LLT CmpValTy = getMRI()->getType(CmpVal);
888   LLT NewValTy = getMRI()->getType(NewVal);
889   assert(OldValResTy.isScalar() && "invalid operand type");
890   assert(SuccessResTy.isScalar() && "invalid operand type");
891   assert(AddrTy.isPointer() && "invalid operand type");
892   assert(CmpValTy.isValid() && "invalid operand type");
893   assert(NewValTy.isValid() && "invalid operand type");
894   assert(OldValResTy == CmpValTy && "type mismatch");
895   assert(OldValResTy == NewValTy && "type mismatch");
896 #endif
897 
898   return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS)
899       .addDef(OldValRes)
900       .addDef(SuccessRes)
901       .addUse(Addr)
902       .addUse(CmpVal)
903       .addUse(NewVal)
904       .addMemOperand(&MMO);
905 }
906 
907 MachineInstrBuilder
buildAtomicCmpXchg(Register OldValRes,Register Addr,Register CmpVal,Register NewVal,MachineMemOperand & MMO)908 MachineIRBuilder::buildAtomicCmpXchg(Register OldValRes, Register Addr,
909                                      Register CmpVal, Register NewVal,
910                                      MachineMemOperand &MMO) {
911 #ifndef NDEBUG
912   LLT OldValResTy = getMRI()->getType(OldValRes);
913   LLT AddrTy = getMRI()->getType(Addr);
914   LLT CmpValTy = getMRI()->getType(CmpVal);
915   LLT NewValTy = getMRI()->getType(NewVal);
916   assert(OldValResTy.isScalar() && "invalid operand type");
917   assert(AddrTy.isPointer() && "invalid operand type");
918   assert(CmpValTy.isValid() && "invalid operand type");
919   assert(NewValTy.isValid() && "invalid operand type");
920   assert(OldValResTy == CmpValTy && "type mismatch");
921   assert(OldValResTy == NewValTy && "type mismatch");
922 #endif
923 
924   return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG)
925       .addDef(OldValRes)
926       .addUse(Addr)
927       .addUse(CmpVal)
928       .addUse(NewVal)
929       .addMemOperand(&MMO);
930 }
931 
buildAtomicRMW(unsigned Opcode,const DstOp & OldValRes,const SrcOp & Addr,const SrcOp & Val,MachineMemOperand & MMO)932 MachineInstrBuilder MachineIRBuilder::buildAtomicRMW(
933   unsigned Opcode, const DstOp &OldValRes,
934   const SrcOp &Addr, const SrcOp &Val,
935   MachineMemOperand &MMO) {
936 
937 #ifndef NDEBUG
938   LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
939   LLT AddrTy = Addr.getLLTTy(*getMRI());
940   LLT ValTy = Val.getLLTTy(*getMRI());
941   assert(OldValResTy.isScalar() && "invalid operand type");
942   assert(AddrTy.isPointer() && "invalid operand type");
943   assert(ValTy.isValid() && "invalid operand type");
944   assert(OldValResTy == ValTy && "type mismatch");
945   assert(MMO.isAtomic() && "not atomic mem operand");
946 #endif
947 
948   auto MIB = buildInstr(Opcode);
949   OldValRes.addDefToMIB(*getMRI(), MIB);
950   Addr.addSrcToMIB(MIB);
951   Val.addSrcToMIB(MIB);
952   MIB.addMemOperand(&MMO);
953   return MIB;
954 }
955 
956 MachineInstrBuilder
buildAtomicRMWXchg(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)957 MachineIRBuilder::buildAtomicRMWXchg(Register OldValRes, Register Addr,
958                                      Register Val, MachineMemOperand &MMO) {
959   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XCHG, OldValRes, Addr, Val,
960                         MMO);
961 }
962 MachineInstrBuilder
buildAtomicRMWAdd(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)963 MachineIRBuilder::buildAtomicRMWAdd(Register OldValRes, Register Addr,
964                                     Register Val, MachineMemOperand &MMO) {
965   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_ADD, OldValRes, Addr, Val,
966                         MMO);
967 }
968 MachineInstrBuilder
buildAtomicRMWSub(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)969 MachineIRBuilder::buildAtomicRMWSub(Register OldValRes, Register Addr,
970                                     Register Val, MachineMemOperand &MMO) {
971   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_SUB, OldValRes, Addr, Val,
972                         MMO);
973 }
974 MachineInstrBuilder
buildAtomicRMWAnd(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)975 MachineIRBuilder::buildAtomicRMWAnd(Register OldValRes, Register Addr,
976                                     Register Val, MachineMemOperand &MMO) {
977   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_AND, OldValRes, Addr, Val,
978                         MMO);
979 }
980 MachineInstrBuilder
buildAtomicRMWNand(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)981 MachineIRBuilder::buildAtomicRMWNand(Register OldValRes, Register Addr,
982                                      Register Val, MachineMemOperand &MMO) {
983   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_NAND, OldValRes, Addr, Val,
984                         MMO);
985 }
buildAtomicRMWOr(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)986 MachineInstrBuilder MachineIRBuilder::buildAtomicRMWOr(Register OldValRes,
987                                                        Register Addr,
988                                                        Register Val,
989                                                        MachineMemOperand &MMO) {
990   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_OR, OldValRes, Addr, Val,
991                         MMO);
992 }
993 MachineInstrBuilder
buildAtomicRMWXor(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)994 MachineIRBuilder::buildAtomicRMWXor(Register OldValRes, Register Addr,
995                                     Register Val, MachineMemOperand &MMO) {
996   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XOR, OldValRes, Addr, Val,
997                         MMO);
998 }
999 MachineInstrBuilder
buildAtomicRMWMax(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)1000 MachineIRBuilder::buildAtomicRMWMax(Register OldValRes, Register Addr,
1001                                     Register Val, MachineMemOperand &MMO) {
1002   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MAX, OldValRes, Addr, Val,
1003                         MMO);
1004 }
1005 MachineInstrBuilder
buildAtomicRMWMin(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)1006 MachineIRBuilder::buildAtomicRMWMin(Register OldValRes, Register Addr,
1007                                     Register Val, MachineMemOperand &MMO) {
1008   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MIN, OldValRes, Addr, Val,
1009                         MMO);
1010 }
1011 MachineInstrBuilder
buildAtomicRMWUmax(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)1012 MachineIRBuilder::buildAtomicRMWUmax(Register OldValRes, Register Addr,
1013                                      Register Val, MachineMemOperand &MMO) {
1014   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMAX, OldValRes, Addr, Val,
1015                         MMO);
1016 }
1017 MachineInstrBuilder
buildAtomicRMWUmin(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)1018 MachineIRBuilder::buildAtomicRMWUmin(Register OldValRes, Register Addr,
1019                                      Register Val, MachineMemOperand &MMO) {
1020   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMIN, OldValRes, Addr, Val,
1021                         MMO);
1022 }
1023 
1024 MachineInstrBuilder
buildAtomicRMWFAdd(const DstOp & OldValRes,const SrcOp & Addr,const SrcOp & Val,MachineMemOperand & MMO)1025 MachineIRBuilder::buildAtomicRMWFAdd(
1026   const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
1027   MachineMemOperand &MMO) {
1028   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FADD, OldValRes, Addr, Val,
1029                         MMO);
1030 }
1031 
1032 MachineInstrBuilder
buildAtomicRMWFSub(const DstOp & OldValRes,const SrcOp & Addr,const SrcOp & Val,MachineMemOperand & MMO)1033 MachineIRBuilder::buildAtomicRMWFSub(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
1034                                      MachineMemOperand &MMO) {
1035   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FSUB, OldValRes, Addr, Val,
1036                         MMO);
1037 }
1038 
1039 MachineInstrBuilder
buildAtomicRMWFMax(const DstOp & OldValRes,const SrcOp & Addr,const SrcOp & Val,MachineMemOperand & MMO)1040 MachineIRBuilder::buildAtomicRMWFMax(const DstOp &OldValRes, const SrcOp &Addr,
1041                                      const SrcOp &Val, MachineMemOperand &MMO) {
1042   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FMAX, OldValRes, Addr, Val,
1043                         MMO);
1044 }
1045 
1046 MachineInstrBuilder
buildAtomicRMWFMin(const DstOp & OldValRes,const SrcOp & Addr,const SrcOp & Val,MachineMemOperand & MMO)1047 MachineIRBuilder::buildAtomicRMWFMin(const DstOp &OldValRes, const SrcOp &Addr,
1048                                      const SrcOp &Val, MachineMemOperand &MMO) {
1049   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FMIN, OldValRes, Addr, Val,
1050                         MMO);
1051 }
1052 
1053 MachineInstrBuilder
buildFence(unsigned Ordering,unsigned Scope)1054 MachineIRBuilder::buildFence(unsigned Ordering, unsigned Scope) {
1055   return buildInstr(TargetOpcode::G_FENCE)
1056     .addImm(Ordering)
1057     .addImm(Scope);
1058 }
1059 
buildPrefetch(const SrcOp & Addr,unsigned RW,unsigned Locality,unsigned CacheType,MachineMemOperand & MMO)1060 MachineInstrBuilder MachineIRBuilder::buildPrefetch(const SrcOp &Addr,
1061                                                     unsigned RW,
1062                                                     unsigned Locality,
1063                                                     unsigned CacheType,
1064                                                     MachineMemOperand &MMO) {
1065   auto MIB = buildInstr(TargetOpcode::G_PREFETCH);
1066   Addr.addSrcToMIB(MIB);
1067   MIB.addImm(RW).addImm(Locality).addImm(CacheType);
1068   MIB.addMemOperand(&MMO);
1069   return MIB;
1070 }
1071 
1072 MachineInstrBuilder
buildBlockAddress(Register Res,const BlockAddress * BA)1073 MachineIRBuilder::buildBlockAddress(Register Res, const BlockAddress *BA) {
1074 #ifndef NDEBUG
1075   assert(getMRI()->getType(Res).isPointer() && "invalid res type");
1076 #endif
1077 
1078   return buildInstr(TargetOpcode::G_BLOCK_ADDR).addDef(Res).addBlockAddress(BA);
1079 }
1080 
validateTruncExt(const LLT DstTy,const LLT SrcTy,bool IsExtend)1081 void MachineIRBuilder::validateTruncExt(const LLT DstTy, const LLT SrcTy,
1082                                         bool IsExtend) {
1083 #ifndef NDEBUG
1084   if (DstTy.isVector()) {
1085     assert(SrcTy.isVector() && "mismatched cast between vector and non-vector");
1086     assert(SrcTy.getElementCount() == DstTy.getElementCount() &&
1087            "different number of elements in a trunc/ext");
1088   } else
1089     assert(DstTy.isScalar() && SrcTy.isScalar() && "invalid extend/trunc");
1090 
1091   if (IsExtend)
1092     assert(TypeSize::isKnownGT(DstTy.getSizeInBits(), SrcTy.getSizeInBits()) &&
1093            "invalid narrowing extend");
1094   else
1095     assert(TypeSize::isKnownLT(DstTy.getSizeInBits(), SrcTy.getSizeInBits()) &&
1096            "invalid widening trunc");
1097 #endif
1098 }
1099 
validateSelectOp(const LLT ResTy,const LLT TstTy,const LLT Op0Ty,const LLT Op1Ty)1100 void MachineIRBuilder::validateSelectOp(const LLT ResTy, const LLT TstTy,
1101                                         const LLT Op0Ty, const LLT Op1Ty) {
1102 #ifndef NDEBUG
1103   assert((ResTy.isScalar() || ResTy.isVector() || ResTy.isPointer()) &&
1104          "invalid operand type");
1105   assert((ResTy == Op0Ty && ResTy == Op1Ty) && "type mismatch");
1106   if (ResTy.isScalar() || ResTy.isPointer())
1107     assert(TstTy.isScalar() && "type mismatch");
1108   else
1109     assert((TstTy.isScalar() ||
1110             (TstTy.isVector() &&
1111              TstTy.getNumElements() == Op0Ty.getNumElements())) &&
1112            "type mismatch");
1113 #endif
1114 }
1115 
1116 MachineInstrBuilder
buildInstr(unsigned Opc,ArrayRef<DstOp> DstOps,ArrayRef<SrcOp> SrcOps,std::optional<unsigned> Flags)1117 MachineIRBuilder::buildInstr(unsigned Opc, ArrayRef<DstOp> DstOps,
1118                              ArrayRef<SrcOp> SrcOps,
1119                              std::optional<unsigned> Flags) {
1120   switch (Opc) {
1121   default:
1122     break;
1123   case TargetOpcode::G_SELECT: {
1124     assert(DstOps.size() == 1 && "Invalid select");
1125     assert(SrcOps.size() == 3 && "Invalid select");
1126     validateSelectOp(
1127         DstOps[0].getLLTTy(*getMRI()), SrcOps[0].getLLTTy(*getMRI()),
1128         SrcOps[1].getLLTTy(*getMRI()), SrcOps[2].getLLTTy(*getMRI()));
1129     break;
1130   }
1131   case TargetOpcode::G_FNEG:
1132   case TargetOpcode::G_ABS:
1133     // All these are unary ops.
1134     assert(DstOps.size() == 1 && "Invalid Dst");
1135     assert(SrcOps.size() == 1 && "Invalid Srcs");
1136     validateUnaryOp(DstOps[0].getLLTTy(*getMRI()),
1137                     SrcOps[0].getLLTTy(*getMRI()));
1138     break;
1139   case TargetOpcode::G_ADD:
1140   case TargetOpcode::G_AND:
1141   case TargetOpcode::G_MUL:
1142   case TargetOpcode::G_OR:
1143   case TargetOpcode::G_SUB:
1144   case TargetOpcode::G_XOR:
1145   case TargetOpcode::G_UDIV:
1146   case TargetOpcode::G_SDIV:
1147   case TargetOpcode::G_UREM:
1148   case TargetOpcode::G_SREM:
1149   case TargetOpcode::G_SMIN:
1150   case TargetOpcode::G_SMAX:
1151   case TargetOpcode::G_UMIN:
1152   case TargetOpcode::G_UMAX:
1153   case TargetOpcode::G_UADDSAT:
1154   case TargetOpcode::G_SADDSAT:
1155   case TargetOpcode::G_USUBSAT:
1156   case TargetOpcode::G_SSUBSAT: {
1157     // All these are binary ops.
1158     assert(DstOps.size() == 1 && "Invalid Dst");
1159     assert(SrcOps.size() == 2 && "Invalid Srcs");
1160     validateBinaryOp(DstOps[0].getLLTTy(*getMRI()),
1161                      SrcOps[0].getLLTTy(*getMRI()),
1162                      SrcOps[1].getLLTTy(*getMRI()));
1163     break;
1164   }
1165   case TargetOpcode::G_SHL:
1166   case TargetOpcode::G_ASHR:
1167   case TargetOpcode::G_LSHR:
1168   case TargetOpcode::G_USHLSAT:
1169   case TargetOpcode::G_SSHLSAT: {
1170     assert(DstOps.size() == 1 && "Invalid Dst");
1171     assert(SrcOps.size() == 2 && "Invalid Srcs");
1172     validateShiftOp(DstOps[0].getLLTTy(*getMRI()),
1173                     SrcOps[0].getLLTTy(*getMRI()),
1174                     SrcOps[1].getLLTTy(*getMRI()));
1175     break;
1176   }
1177   case TargetOpcode::G_SEXT:
1178   case TargetOpcode::G_ZEXT:
1179   case TargetOpcode::G_ANYEXT:
1180     assert(DstOps.size() == 1 && "Invalid Dst");
1181     assert(SrcOps.size() == 1 && "Invalid Srcs");
1182     validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
1183                      SrcOps[0].getLLTTy(*getMRI()), true);
1184     break;
1185   case TargetOpcode::G_TRUNC:
1186   case TargetOpcode::G_FPTRUNC: {
1187     assert(DstOps.size() == 1 && "Invalid Dst");
1188     assert(SrcOps.size() == 1 && "Invalid Srcs");
1189     validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
1190                      SrcOps[0].getLLTTy(*getMRI()), false);
1191     break;
1192   }
1193   case TargetOpcode::G_BITCAST: {
1194     assert(DstOps.size() == 1 && "Invalid Dst");
1195     assert(SrcOps.size() == 1 && "Invalid Srcs");
1196     assert(DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1197            SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() && "invalid bitcast");
1198     break;
1199   }
1200   case TargetOpcode::COPY:
1201     assert(DstOps.size() == 1 && "Invalid Dst");
1202     // If the caller wants to add a subreg source it has to be done separately
1203     // so we may not have any SrcOps at this point yet.
1204     break;
1205   case TargetOpcode::G_FCMP:
1206   case TargetOpcode::G_ICMP: {
1207     assert(DstOps.size() == 1 && "Invalid Dst Operands");
1208     assert(SrcOps.size() == 3 && "Invalid Src Operands");
1209     // For F/ICMP, the first src operand is the predicate, followed by
1210     // the two comparands.
1211     assert(SrcOps[0].getSrcOpKind() == SrcOp::SrcType::Ty_Predicate &&
1212            "Expecting predicate");
1213     assert([&]() -> bool {
1214       CmpInst::Predicate Pred = SrcOps[0].getPredicate();
1215       return Opc == TargetOpcode::G_ICMP ? CmpInst::isIntPredicate(Pred)
1216                                          : CmpInst::isFPPredicate(Pred);
1217     }() && "Invalid predicate");
1218     assert(SrcOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1219            "Type mismatch");
1220     assert([&]() -> bool {
1221       LLT Op0Ty = SrcOps[1].getLLTTy(*getMRI());
1222       LLT DstTy = DstOps[0].getLLTTy(*getMRI());
1223       if (Op0Ty.isScalar() || Op0Ty.isPointer())
1224         return DstTy.isScalar();
1225       else
1226         return DstTy.isVector() &&
1227                DstTy.getNumElements() == Op0Ty.getNumElements();
1228     }() && "Type Mismatch");
1229     break;
1230   }
1231   case TargetOpcode::G_UNMERGE_VALUES: {
1232     assert(!DstOps.empty() && "Invalid trivial sequence");
1233     assert(SrcOps.size() == 1 && "Invalid src for Unmerge");
1234     assert(llvm::all_of(DstOps,
1235                         [&, this](const DstOp &Op) {
1236                           return Op.getLLTTy(*getMRI()) ==
1237                                  DstOps[0].getLLTTy(*getMRI());
1238                         }) &&
1239            "type mismatch in output list");
1240     assert((TypeSize::ScalarTy)DstOps.size() *
1241                    DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1242                SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1243            "input operands do not cover output register");
1244     break;
1245   }
1246   case TargetOpcode::G_MERGE_VALUES: {
1247     assert(SrcOps.size() >= 2 && "invalid trivial sequence");
1248     assert(DstOps.size() == 1 && "Invalid Dst");
1249     assert(llvm::all_of(SrcOps,
1250                         [&, this](const SrcOp &Op) {
1251                           return Op.getLLTTy(*getMRI()) ==
1252                                  SrcOps[0].getLLTTy(*getMRI());
1253                         }) &&
1254            "type mismatch in input list");
1255     assert((TypeSize::ScalarTy)SrcOps.size() *
1256                    SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1257                DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1258            "input operands do not cover output register");
1259     assert(!DstOps[0].getLLTTy(*getMRI()).isVector() &&
1260            "vectors should be built with G_CONCAT_VECTOR or G_BUILD_VECTOR");
1261     break;
1262   }
1263   case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1264     assert(DstOps.size() == 1 && "Invalid Dst size");
1265     assert(SrcOps.size() == 2 && "Invalid Src size");
1266     assert(SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1267     assert((DstOps[0].getLLTTy(*getMRI()).isScalar() ||
1268             DstOps[0].getLLTTy(*getMRI()).isPointer()) &&
1269            "Invalid operand type");
1270     assert(SrcOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand type");
1271     assert(SrcOps[0].getLLTTy(*getMRI()).getElementType() ==
1272                DstOps[0].getLLTTy(*getMRI()) &&
1273            "Type mismatch");
1274     break;
1275   }
1276   case TargetOpcode::G_INSERT_VECTOR_ELT: {
1277     assert(DstOps.size() == 1 && "Invalid dst size");
1278     assert(SrcOps.size() == 3 && "Invalid src size");
1279     assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1280            SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1281     assert(DstOps[0].getLLTTy(*getMRI()).getElementType() ==
1282                SrcOps[1].getLLTTy(*getMRI()) &&
1283            "Type mismatch");
1284     assert(SrcOps[2].getLLTTy(*getMRI()).isScalar() && "Invalid index");
1285     assert(DstOps[0].getLLTTy(*getMRI()).getNumElements() ==
1286                SrcOps[0].getLLTTy(*getMRI()).getNumElements() &&
1287            "Type mismatch");
1288     break;
1289   }
1290   case TargetOpcode::G_BUILD_VECTOR: {
1291     assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1292            "Must have at least 2 operands");
1293     assert(DstOps.size() == 1 && "Invalid DstOps");
1294     assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1295            "Res type must be a vector");
1296     assert(llvm::all_of(SrcOps,
1297                         [&, this](const SrcOp &Op) {
1298                           return Op.getLLTTy(*getMRI()) ==
1299                                  SrcOps[0].getLLTTy(*getMRI());
1300                         }) &&
1301            "type mismatch in input list");
1302     assert((TypeSize::ScalarTy)SrcOps.size() *
1303                    SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1304                DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1305            "input scalars do not exactly cover the output vector register");
1306     break;
1307   }
1308   case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1309     assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1310            "Must have at least 2 operands");
1311     assert(DstOps.size() == 1 && "Invalid DstOps");
1312     assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1313            "Res type must be a vector");
1314     assert(llvm::all_of(SrcOps,
1315                         [&, this](const SrcOp &Op) {
1316                           return Op.getLLTTy(*getMRI()) ==
1317                                  SrcOps[0].getLLTTy(*getMRI());
1318                         }) &&
1319            "type mismatch in input list");
1320     break;
1321   }
1322   case TargetOpcode::G_CONCAT_VECTORS: {
1323     assert(DstOps.size() == 1 && "Invalid DstOps");
1324     assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1325            "Must have at least 2 operands");
1326     assert(llvm::all_of(SrcOps,
1327                         [&, this](const SrcOp &Op) {
1328                           return (Op.getLLTTy(*getMRI()).isVector() &&
1329                                   Op.getLLTTy(*getMRI()) ==
1330                                       SrcOps[0].getLLTTy(*getMRI()));
1331                         }) &&
1332            "type mismatch in input list");
1333     assert((TypeSize::ScalarTy)SrcOps.size() *
1334                    SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1335                DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1336            "input vectors do not exactly cover the output vector register");
1337     break;
1338   }
1339   case TargetOpcode::G_UADDE: {
1340     assert(DstOps.size() == 2 && "Invalid no of dst operands");
1341     assert(SrcOps.size() == 3 && "Invalid no of src operands");
1342     assert(DstOps[0].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1343     assert((DstOps[0].getLLTTy(*getMRI()) == SrcOps[0].getLLTTy(*getMRI())) &&
1344            (DstOps[0].getLLTTy(*getMRI()) == SrcOps[1].getLLTTy(*getMRI())) &&
1345            "Invalid operand");
1346     assert(DstOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1347     assert(DstOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1348            "type mismatch");
1349     break;
1350   }
1351   }
1352 
1353   auto MIB = buildInstr(Opc);
1354   for (const DstOp &Op : DstOps)
1355     Op.addDefToMIB(*getMRI(), MIB);
1356   for (const SrcOp &Op : SrcOps)
1357     Op.addSrcToMIB(MIB);
1358   if (Flags)
1359     MIB->setFlags(*Flags);
1360   return MIB;
1361 }
1362