1 //===-- llvm/CodeGen/GlobalISel/MachineIRBuilder.cpp - MIBuilder--*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the MachineIRBuidler class.
10 //===----------------------------------------------------------------------===//
11 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
12 #include "llvm/CodeGen/MachineFunction.h"
13 #include "llvm/CodeGen/MachineInstr.h"
14 #include "llvm/CodeGen/MachineInstrBuilder.h"
15 #include "llvm/CodeGen/MachineRegisterInfo.h"
16 #include "llvm/CodeGen/TargetInstrInfo.h"
17 #include "llvm/CodeGen/TargetLowering.h"
18 #include "llvm/CodeGen/TargetOpcodes.h"
19 #include "llvm/CodeGen/TargetSubtargetInfo.h"
20 #include "llvm/IR/DebugInfoMetadata.h"
21 
22 using namespace llvm;
23 
setMF(MachineFunction & MF)24 void MachineIRBuilder::setMF(MachineFunction &MF) {
25   State.MF = &MF;
26   State.MBB = nullptr;
27   State.MRI = &MF.getRegInfo();
28   State.TII = MF.getSubtarget().getInstrInfo();
29   State.DL = DebugLoc();
30   State.PCSections = nullptr;
31   State.II = MachineBasicBlock::iterator();
32   State.Observer = nullptr;
33 }
34 
35 //------------------------------------------------------------------------------
36 // Build instruction variants.
37 //------------------------------------------------------------------------------
38 
buildInstrNoInsert(unsigned Opcode)39 MachineInstrBuilder MachineIRBuilder::buildInstrNoInsert(unsigned Opcode) {
40   return BuildMI(getMF(), {getDL(), getPCSections()}, getTII().get(Opcode));
41 }
42 
insertInstr(MachineInstrBuilder MIB)43 MachineInstrBuilder MachineIRBuilder::insertInstr(MachineInstrBuilder MIB) {
44   getMBB().insert(getInsertPt(), MIB);
45   recordInsertion(MIB);
46   return MIB;
47 }
48 
49 MachineInstrBuilder
buildDirectDbgValue(Register Reg,const MDNode * Variable,const MDNode * Expr)50 MachineIRBuilder::buildDirectDbgValue(Register Reg, const MDNode *Variable,
51                                       const MDNode *Expr) {
52   assert(isa<DILocalVariable>(Variable) && "not a variable");
53   assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
54   assert(
55       cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
56       "Expected inlined-at fields to agree");
57   return insertInstr(BuildMI(getMF(), getDL(),
58                              getTII().get(TargetOpcode::DBG_VALUE),
59                              /*IsIndirect*/ false, Reg, Variable, Expr));
60 }
61 
62 MachineInstrBuilder
buildIndirectDbgValue(Register Reg,const MDNode * Variable,const MDNode * Expr)63 MachineIRBuilder::buildIndirectDbgValue(Register Reg, const MDNode *Variable,
64                                         const MDNode *Expr) {
65   assert(isa<DILocalVariable>(Variable) && "not a variable");
66   assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
67   assert(
68       cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
69       "Expected inlined-at fields to agree");
70   return insertInstr(BuildMI(getMF(), getDL(),
71                              getTII().get(TargetOpcode::DBG_VALUE),
72                              /*IsIndirect*/ true, Reg, Variable, Expr));
73 }
74 
buildFIDbgValue(int FI,const MDNode * Variable,const MDNode * Expr)75 MachineInstrBuilder MachineIRBuilder::buildFIDbgValue(int FI,
76                                                       const MDNode *Variable,
77                                                       const MDNode *Expr) {
78   assert(isa<DILocalVariable>(Variable) && "not a variable");
79   assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
80   assert(
81       cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
82       "Expected inlined-at fields to agree");
83   return buildInstr(TargetOpcode::DBG_VALUE)
84       .addFrameIndex(FI)
85       .addImm(0)
86       .addMetadata(Variable)
87       .addMetadata(Expr);
88 }
89 
buildConstDbgValue(const Constant & C,const MDNode * Variable,const MDNode * Expr)90 MachineInstrBuilder MachineIRBuilder::buildConstDbgValue(const Constant &C,
91                                                          const MDNode *Variable,
92                                                          const MDNode *Expr) {
93   assert(isa<DILocalVariable>(Variable) && "not a variable");
94   assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
95   assert(
96       cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
97       "Expected inlined-at fields to agree");
98   auto MIB = buildInstrNoInsert(TargetOpcode::DBG_VALUE);
99 
100   auto *NumericConstant = [&] () -> const Constant* {
101     if (const auto *CE = dyn_cast<ConstantExpr>(&C))
102       if (CE->getOpcode() == Instruction::IntToPtr)
103         return CE->getOperand(0);
104     return &C;
105   }();
106 
107   if (auto *CI = dyn_cast<ConstantInt>(NumericConstant)) {
108     if (CI->getBitWidth() > 64)
109       MIB.addCImm(CI);
110     else
111       MIB.addImm(CI->getZExtValue());
112   } else if (auto *CFP = dyn_cast<ConstantFP>(NumericConstant)) {
113     MIB.addFPImm(CFP);
114   } else if (isa<ConstantPointerNull>(NumericConstant)) {
115     MIB.addImm(0);
116   } else {
117     // Insert $noreg if we didn't find a usable constant and had to drop it.
118     MIB.addReg(Register());
119   }
120 
121   MIB.addImm(0).addMetadata(Variable).addMetadata(Expr);
122   return insertInstr(MIB);
123 }
124 
buildDbgLabel(const MDNode * Label)125 MachineInstrBuilder MachineIRBuilder::buildDbgLabel(const MDNode *Label) {
126   assert(isa<DILabel>(Label) && "not a label");
127   assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(State.DL) &&
128          "Expected inlined-at fields to agree");
129   auto MIB = buildInstr(TargetOpcode::DBG_LABEL);
130 
131   return MIB.addMetadata(Label);
132 }
133 
buildDynStackAlloc(const DstOp & Res,const SrcOp & Size,Align Alignment)134 MachineInstrBuilder MachineIRBuilder::buildDynStackAlloc(const DstOp &Res,
135                                                          const SrcOp &Size,
136                                                          Align Alignment) {
137   assert(Res.getLLTTy(*getMRI()).isPointer() && "expected ptr dst type");
138   auto MIB = buildInstr(TargetOpcode::G_DYN_STACKALLOC);
139   Res.addDefToMIB(*getMRI(), MIB);
140   Size.addSrcToMIB(MIB);
141   MIB.addImm(Alignment.value());
142   return MIB;
143 }
144 
buildFrameIndex(const DstOp & Res,int Idx)145 MachineInstrBuilder MachineIRBuilder::buildFrameIndex(const DstOp &Res,
146                                                       int Idx) {
147   assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
148   auto MIB = buildInstr(TargetOpcode::G_FRAME_INDEX);
149   Res.addDefToMIB(*getMRI(), MIB);
150   MIB.addFrameIndex(Idx);
151   return MIB;
152 }
153 
buildGlobalValue(const DstOp & Res,const GlobalValue * GV)154 MachineInstrBuilder MachineIRBuilder::buildGlobalValue(const DstOp &Res,
155                                                        const GlobalValue *GV) {
156   assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
157   assert(Res.getLLTTy(*getMRI()).getAddressSpace() ==
158              GV->getType()->getAddressSpace() &&
159          "address space mismatch");
160 
161   auto MIB = buildInstr(TargetOpcode::G_GLOBAL_VALUE);
162   Res.addDefToMIB(*getMRI(), MIB);
163   MIB.addGlobalAddress(GV);
164   return MIB;
165 }
166 
buildJumpTable(const LLT PtrTy,unsigned JTI)167 MachineInstrBuilder MachineIRBuilder::buildJumpTable(const LLT PtrTy,
168                                                      unsigned JTI) {
169   return buildInstr(TargetOpcode::G_JUMP_TABLE, {PtrTy}, {})
170       .addJumpTableIndex(JTI);
171 }
172 
validateUnaryOp(const LLT Res,const LLT Op0)173 void MachineIRBuilder::validateUnaryOp(const LLT Res, const LLT Op0) {
174   assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
175   assert((Res == Op0) && "type mismatch");
176 }
177 
validateBinaryOp(const LLT Res,const LLT Op0,const LLT Op1)178 void MachineIRBuilder::validateBinaryOp(const LLT Res, const LLT Op0,
179                                         const LLT Op1) {
180   assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
181   assert((Res == Op0 && Res == Op1) && "type mismatch");
182 }
183 
validateShiftOp(const LLT Res,const LLT Op0,const LLT Op1)184 void MachineIRBuilder::validateShiftOp(const LLT Res, const LLT Op0,
185                                        const LLT Op1) {
186   assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
187   assert((Res == Op0) && "type mismatch");
188 }
189 
buildPtrAdd(const DstOp & Res,const SrcOp & Op0,const SrcOp & Op1)190 MachineInstrBuilder MachineIRBuilder::buildPtrAdd(const DstOp &Res,
191                                                   const SrcOp &Op0,
192                                                   const SrcOp &Op1) {
193   assert(Res.getLLTTy(*getMRI()).getScalarType().isPointer() &&
194          Res.getLLTTy(*getMRI()) == Op0.getLLTTy(*getMRI()) && "type mismatch");
195   assert(Op1.getLLTTy(*getMRI()).getScalarType().isScalar() && "invalid offset type");
196 
197   return buildInstr(TargetOpcode::G_PTR_ADD, {Res}, {Op0, Op1});
198 }
199 
200 std::optional<MachineInstrBuilder>
materializePtrAdd(Register & Res,Register Op0,const LLT ValueTy,uint64_t Value)201 MachineIRBuilder::materializePtrAdd(Register &Res, Register Op0,
202                                     const LLT ValueTy, uint64_t Value) {
203   assert(Res == 0 && "Res is a result argument");
204   assert(ValueTy.isScalar()  && "invalid offset type");
205 
206   if (Value == 0) {
207     Res = Op0;
208     return std::nullopt;
209   }
210 
211   Res = getMRI()->createGenericVirtualRegister(getMRI()->getType(Op0));
212   auto Cst = buildConstant(ValueTy, Value);
213   return buildPtrAdd(Res, Op0, Cst.getReg(0));
214 }
215 
buildMaskLowPtrBits(const DstOp & Res,const SrcOp & Op0,uint32_t NumBits)216 MachineInstrBuilder MachineIRBuilder::buildMaskLowPtrBits(const DstOp &Res,
217                                                           const SrcOp &Op0,
218                                                           uint32_t NumBits) {
219   LLT PtrTy = Res.getLLTTy(*getMRI());
220   LLT MaskTy = LLT::scalar(PtrTy.getSizeInBits());
221   Register MaskReg = getMRI()->createGenericVirtualRegister(MaskTy);
222   buildConstant(MaskReg, maskTrailingZeros<uint64_t>(NumBits));
223   return buildPtrMask(Res, Op0, MaskReg);
224 }
225 
226 MachineInstrBuilder
buildPadVectorWithUndefElements(const DstOp & Res,const SrcOp & Op0)227 MachineIRBuilder::buildPadVectorWithUndefElements(const DstOp &Res,
228                                                   const SrcOp &Op0) {
229   LLT ResTy = Res.getLLTTy(*getMRI());
230   LLT Op0Ty = Op0.getLLTTy(*getMRI());
231 
232   assert((ResTy.isVector() && Op0Ty.isVector()) && "Non vector type");
233   assert((ResTy.getElementType() == Op0Ty.getElementType()) &&
234          "Different vector element types");
235   assert((ResTy.getNumElements() > Op0Ty.getNumElements()) &&
236          "Op0 has more elements");
237 
238   auto Unmerge = buildUnmerge(Op0Ty.getElementType(), Op0);
239   SmallVector<Register, 8> Regs;
240   for (auto Op : Unmerge.getInstr()->defs())
241     Regs.push_back(Op.getReg());
242   Register Undef = buildUndef(Op0Ty.getElementType()).getReg(0);
243   unsigned NumberOfPadElts = ResTy.getNumElements() - Regs.size();
244   for (unsigned i = 0; i < NumberOfPadElts; ++i)
245     Regs.push_back(Undef);
246   return buildMergeLikeInstr(Res, Regs);
247 }
248 
249 MachineInstrBuilder
buildDeleteTrailingVectorElements(const DstOp & Res,const SrcOp & Op0)250 MachineIRBuilder::buildDeleteTrailingVectorElements(const DstOp &Res,
251                                                     const SrcOp &Op0) {
252   LLT ResTy = Res.getLLTTy(*getMRI());
253   LLT Op0Ty = Op0.getLLTTy(*getMRI());
254 
255   assert((ResTy.isVector() && Op0Ty.isVector()) && "Non vector type");
256   assert((ResTy.getElementType() == Op0Ty.getElementType()) &&
257          "Different vector element types");
258   assert((ResTy.getNumElements() < Op0Ty.getNumElements()) &&
259          "Op0 has fewer elements");
260 
261   SmallVector<Register, 8> Regs;
262   auto Unmerge = buildUnmerge(Op0Ty.getElementType(), Op0);
263   for (unsigned i = 0; i < ResTy.getNumElements(); ++i)
264     Regs.push_back(Unmerge.getReg(i));
265   return buildMergeLikeInstr(Res, Regs);
266 }
267 
buildBr(MachineBasicBlock & Dest)268 MachineInstrBuilder MachineIRBuilder::buildBr(MachineBasicBlock &Dest) {
269   return buildInstr(TargetOpcode::G_BR).addMBB(&Dest);
270 }
271 
buildBrIndirect(Register Tgt)272 MachineInstrBuilder MachineIRBuilder::buildBrIndirect(Register Tgt) {
273   assert(getMRI()->getType(Tgt).isPointer() && "invalid branch destination");
274   return buildInstr(TargetOpcode::G_BRINDIRECT).addUse(Tgt);
275 }
276 
buildBrJT(Register TablePtr,unsigned JTI,Register IndexReg)277 MachineInstrBuilder MachineIRBuilder::buildBrJT(Register TablePtr,
278                                                 unsigned JTI,
279                                                 Register IndexReg) {
280   assert(getMRI()->getType(TablePtr).isPointer() &&
281          "Table reg must be a pointer");
282   return buildInstr(TargetOpcode::G_BRJT)
283       .addUse(TablePtr)
284       .addJumpTableIndex(JTI)
285       .addUse(IndexReg);
286 }
287 
buildCopy(const DstOp & Res,const SrcOp & Op)288 MachineInstrBuilder MachineIRBuilder::buildCopy(const DstOp &Res,
289                                                 const SrcOp &Op) {
290   return buildInstr(TargetOpcode::COPY, Res, Op);
291 }
292 
buildConstant(const DstOp & Res,const ConstantInt & Val)293 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
294                                                     const ConstantInt &Val) {
295   LLT Ty = Res.getLLTTy(*getMRI());
296   LLT EltTy = Ty.getScalarType();
297   assert(EltTy.getScalarSizeInBits() == Val.getBitWidth() &&
298          "creating constant with the wrong size");
299 
300   if (Ty.isVector()) {
301     auto Const = buildInstr(TargetOpcode::G_CONSTANT)
302     .addDef(getMRI()->createGenericVirtualRegister(EltTy))
303     .addCImm(&Val);
304     return buildSplatVector(Res, Const);
305   }
306 
307   auto Const = buildInstr(TargetOpcode::G_CONSTANT);
308   Const->setDebugLoc(DebugLoc());
309   Res.addDefToMIB(*getMRI(), Const);
310   Const.addCImm(&Val);
311   return Const;
312 }
313 
buildConstant(const DstOp & Res,int64_t Val)314 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
315                                                     int64_t Val) {
316   auto IntN = IntegerType::get(getMF().getFunction().getContext(),
317                                Res.getLLTTy(*getMRI()).getScalarSizeInBits());
318   ConstantInt *CI = ConstantInt::get(IntN, Val, true);
319   return buildConstant(Res, *CI);
320 }
321 
buildFConstant(const DstOp & Res,const ConstantFP & Val)322 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
323                                                      const ConstantFP &Val) {
324   LLT Ty = Res.getLLTTy(*getMRI());
325   LLT EltTy = Ty.getScalarType();
326 
327   assert(APFloat::getSizeInBits(Val.getValueAPF().getSemantics())
328          == EltTy.getSizeInBits() &&
329          "creating fconstant with the wrong size");
330 
331   assert(!Ty.isPointer() && "invalid operand type");
332 
333   if (Ty.isVector()) {
334     auto Const = buildInstr(TargetOpcode::G_FCONSTANT)
335     .addDef(getMRI()->createGenericVirtualRegister(EltTy))
336     .addFPImm(&Val);
337 
338     return buildSplatVector(Res, Const);
339   }
340 
341   auto Const = buildInstr(TargetOpcode::G_FCONSTANT);
342   Const->setDebugLoc(DebugLoc());
343   Res.addDefToMIB(*getMRI(), Const);
344   Const.addFPImm(&Val);
345   return Const;
346 }
347 
buildConstant(const DstOp & Res,const APInt & Val)348 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
349                                                     const APInt &Val) {
350   ConstantInt *CI = ConstantInt::get(getMF().getFunction().getContext(), Val);
351   return buildConstant(Res, *CI);
352 }
353 
buildFConstant(const DstOp & Res,double Val)354 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
355                                                      double Val) {
356   LLT DstTy = Res.getLLTTy(*getMRI());
357   auto &Ctx = getMF().getFunction().getContext();
358   auto *CFP =
359       ConstantFP::get(Ctx, getAPFloatFromSize(Val, DstTy.getScalarSizeInBits()));
360   return buildFConstant(Res, *CFP);
361 }
362 
buildFConstant(const DstOp & Res,const APFloat & Val)363 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
364                                                      const APFloat &Val) {
365   auto &Ctx = getMF().getFunction().getContext();
366   auto *CFP = ConstantFP::get(Ctx, Val);
367   return buildFConstant(Res, *CFP);
368 }
369 
buildBrCond(const SrcOp & Tst,MachineBasicBlock & Dest)370 MachineInstrBuilder MachineIRBuilder::buildBrCond(const SrcOp &Tst,
371                                                   MachineBasicBlock &Dest) {
372   assert(Tst.getLLTTy(*getMRI()).isScalar() && "invalid operand type");
373 
374   auto MIB = buildInstr(TargetOpcode::G_BRCOND);
375   Tst.addSrcToMIB(MIB);
376   MIB.addMBB(&Dest);
377   return MIB;
378 }
379 
380 MachineInstrBuilder
buildLoad(const DstOp & Dst,const SrcOp & Addr,MachinePointerInfo PtrInfo,Align Alignment,MachineMemOperand::Flags MMOFlags,const AAMDNodes & AAInfo)381 MachineIRBuilder::buildLoad(const DstOp &Dst, const SrcOp &Addr,
382                             MachinePointerInfo PtrInfo, Align Alignment,
383                             MachineMemOperand::Flags MMOFlags,
384                             const AAMDNodes &AAInfo) {
385   MMOFlags |= MachineMemOperand::MOLoad;
386   assert((MMOFlags & MachineMemOperand::MOStore) == 0);
387 
388   LLT Ty = Dst.getLLTTy(*getMRI());
389   MachineMemOperand *MMO =
390       getMF().getMachineMemOperand(PtrInfo, MMOFlags, Ty, Alignment, AAInfo);
391   return buildLoad(Dst, Addr, *MMO);
392 }
393 
buildLoadInstr(unsigned Opcode,const DstOp & Res,const SrcOp & Addr,MachineMemOperand & MMO)394 MachineInstrBuilder MachineIRBuilder::buildLoadInstr(unsigned Opcode,
395                                                      const DstOp &Res,
396                                                      const SrcOp &Addr,
397                                                      MachineMemOperand &MMO) {
398   assert(Res.getLLTTy(*getMRI()).isValid() && "invalid operand type");
399   assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
400 
401   auto MIB = buildInstr(Opcode);
402   Res.addDefToMIB(*getMRI(), MIB);
403   Addr.addSrcToMIB(MIB);
404   MIB.addMemOperand(&MMO);
405   return MIB;
406 }
407 
buildLoadFromOffset(const DstOp & Dst,const SrcOp & BasePtr,MachineMemOperand & BaseMMO,int64_t Offset)408 MachineInstrBuilder MachineIRBuilder::buildLoadFromOffset(
409   const DstOp &Dst, const SrcOp &BasePtr,
410   MachineMemOperand &BaseMMO, int64_t Offset) {
411   LLT LoadTy = Dst.getLLTTy(*getMRI());
412   MachineMemOperand *OffsetMMO =
413       getMF().getMachineMemOperand(&BaseMMO, Offset, LoadTy);
414 
415   if (Offset == 0) // This may be a size or type changing load.
416     return buildLoad(Dst, BasePtr, *OffsetMMO);
417 
418   LLT PtrTy = BasePtr.getLLTTy(*getMRI());
419   LLT OffsetTy = LLT::scalar(PtrTy.getSizeInBits());
420   auto ConstOffset = buildConstant(OffsetTy, Offset);
421   auto Ptr = buildPtrAdd(PtrTy, BasePtr, ConstOffset);
422   return buildLoad(Dst, Ptr, *OffsetMMO);
423 }
424 
buildStore(const SrcOp & Val,const SrcOp & Addr,MachineMemOperand & MMO)425 MachineInstrBuilder MachineIRBuilder::buildStore(const SrcOp &Val,
426                                                  const SrcOp &Addr,
427                                                  MachineMemOperand &MMO) {
428   assert(Val.getLLTTy(*getMRI()).isValid() && "invalid operand type");
429   assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
430 
431   auto MIB = buildInstr(TargetOpcode::G_STORE);
432   Val.addSrcToMIB(MIB);
433   Addr.addSrcToMIB(MIB);
434   MIB.addMemOperand(&MMO);
435   return MIB;
436 }
437 
438 MachineInstrBuilder
buildStore(const SrcOp & Val,const SrcOp & Addr,MachinePointerInfo PtrInfo,Align Alignment,MachineMemOperand::Flags MMOFlags,const AAMDNodes & AAInfo)439 MachineIRBuilder::buildStore(const SrcOp &Val, const SrcOp &Addr,
440                              MachinePointerInfo PtrInfo, Align Alignment,
441                              MachineMemOperand::Flags MMOFlags,
442                              const AAMDNodes &AAInfo) {
443   MMOFlags |= MachineMemOperand::MOStore;
444   assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
445 
446   LLT Ty = Val.getLLTTy(*getMRI());
447   MachineMemOperand *MMO =
448       getMF().getMachineMemOperand(PtrInfo, MMOFlags, Ty, Alignment, AAInfo);
449   return buildStore(Val, Addr, *MMO);
450 }
451 
buildAnyExt(const DstOp & Res,const SrcOp & Op)452 MachineInstrBuilder MachineIRBuilder::buildAnyExt(const DstOp &Res,
453                                                   const SrcOp &Op) {
454   return buildInstr(TargetOpcode::G_ANYEXT, Res, Op);
455 }
456 
buildSExt(const DstOp & Res,const SrcOp & Op)457 MachineInstrBuilder MachineIRBuilder::buildSExt(const DstOp &Res,
458                                                 const SrcOp &Op) {
459   return buildInstr(TargetOpcode::G_SEXT, Res, Op);
460 }
461 
buildZExt(const DstOp & Res,const SrcOp & Op)462 MachineInstrBuilder MachineIRBuilder::buildZExt(const DstOp &Res,
463                                                 const SrcOp &Op) {
464   return buildInstr(TargetOpcode::G_ZEXT, Res, Op);
465 }
466 
getBoolExtOp(bool IsVec,bool IsFP) const467 unsigned MachineIRBuilder::getBoolExtOp(bool IsVec, bool IsFP) const {
468   const auto *TLI = getMF().getSubtarget().getTargetLowering();
469   switch (TLI->getBooleanContents(IsVec, IsFP)) {
470   case TargetLoweringBase::ZeroOrNegativeOneBooleanContent:
471     return TargetOpcode::G_SEXT;
472   case TargetLoweringBase::ZeroOrOneBooleanContent:
473     return TargetOpcode::G_ZEXT;
474   default:
475     return TargetOpcode::G_ANYEXT;
476   }
477 }
478 
buildBoolExt(const DstOp & Res,const SrcOp & Op,bool IsFP)479 MachineInstrBuilder MachineIRBuilder::buildBoolExt(const DstOp &Res,
480                                                    const SrcOp &Op,
481                                                    bool IsFP) {
482   unsigned ExtOp = getBoolExtOp(getMRI()->getType(Op.getReg()).isVector(), IsFP);
483   return buildInstr(ExtOp, Res, Op);
484 }
485 
buildBoolExtInReg(const DstOp & Res,const SrcOp & Op,bool IsVector,bool IsFP)486 MachineInstrBuilder MachineIRBuilder::buildBoolExtInReg(const DstOp &Res,
487                                                         const SrcOp &Op,
488                                                         bool IsVector,
489                                                         bool IsFP) {
490   const auto *TLI = getMF().getSubtarget().getTargetLowering();
491   switch (TLI->getBooleanContents(IsVector, IsFP)) {
492   case TargetLoweringBase::ZeroOrNegativeOneBooleanContent:
493     return buildSExtInReg(Res, Op, 1);
494   case TargetLoweringBase::ZeroOrOneBooleanContent:
495     return buildZExtInReg(Res, Op, 1);
496   case TargetLoweringBase::UndefinedBooleanContent:
497     return buildCopy(Res, Op);
498   }
499 
500   llvm_unreachable("unexpected BooleanContent");
501 }
502 
buildExtOrTrunc(unsigned ExtOpc,const DstOp & Res,const SrcOp & Op)503 MachineInstrBuilder MachineIRBuilder::buildExtOrTrunc(unsigned ExtOpc,
504                                                       const DstOp &Res,
505                                                       const SrcOp &Op) {
506   assert((TargetOpcode::G_ANYEXT == ExtOpc || TargetOpcode::G_ZEXT == ExtOpc ||
507           TargetOpcode::G_SEXT == ExtOpc) &&
508          "Expecting Extending Opc");
509   assert(Res.getLLTTy(*getMRI()).isScalar() ||
510          Res.getLLTTy(*getMRI()).isVector());
511   assert(Res.getLLTTy(*getMRI()).isScalar() ==
512          Op.getLLTTy(*getMRI()).isScalar());
513 
514   unsigned Opcode = TargetOpcode::COPY;
515   if (Res.getLLTTy(*getMRI()).getSizeInBits() >
516       Op.getLLTTy(*getMRI()).getSizeInBits())
517     Opcode = ExtOpc;
518   else if (Res.getLLTTy(*getMRI()).getSizeInBits() <
519            Op.getLLTTy(*getMRI()).getSizeInBits())
520     Opcode = TargetOpcode::G_TRUNC;
521   else
522     assert(Res.getLLTTy(*getMRI()) == Op.getLLTTy(*getMRI()));
523 
524   return buildInstr(Opcode, Res, Op);
525 }
526 
buildSExtOrTrunc(const DstOp & Res,const SrcOp & Op)527 MachineInstrBuilder MachineIRBuilder::buildSExtOrTrunc(const DstOp &Res,
528                                                        const SrcOp &Op) {
529   return buildExtOrTrunc(TargetOpcode::G_SEXT, Res, Op);
530 }
531 
buildZExtOrTrunc(const DstOp & Res,const SrcOp & Op)532 MachineInstrBuilder MachineIRBuilder::buildZExtOrTrunc(const DstOp &Res,
533                                                        const SrcOp &Op) {
534   return buildExtOrTrunc(TargetOpcode::G_ZEXT, Res, Op);
535 }
536 
buildAnyExtOrTrunc(const DstOp & Res,const SrcOp & Op)537 MachineInstrBuilder MachineIRBuilder::buildAnyExtOrTrunc(const DstOp &Res,
538                                                          const SrcOp &Op) {
539   return buildExtOrTrunc(TargetOpcode::G_ANYEXT, Res, Op);
540 }
541 
buildZExtInReg(const DstOp & Res,const SrcOp & Op,int64_t ImmOp)542 MachineInstrBuilder MachineIRBuilder::buildZExtInReg(const DstOp &Res,
543                                                      const SrcOp &Op,
544                                                      int64_t ImmOp) {
545   LLT ResTy = Res.getLLTTy(*getMRI());
546   auto Mask = buildConstant(
547       ResTy, APInt::getLowBitsSet(ResTy.getScalarSizeInBits(), ImmOp));
548   return buildAnd(Res, Op, Mask);
549 }
550 
buildCast(const DstOp & Dst,const SrcOp & Src)551 MachineInstrBuilder MachineIRBuilder::buildCast(const DstOp &Dst,
552                                                 const SrcOp &Src) {
553   LLT SrcTy = Src.getLLTTy(*getMRI());
554   LLT DstTy = Dst.getLLTTy(*getMRI());
555   if (SrcTy == DstTy)
556     return buildCopy(Dst, Src);
557 
558   unsigned Opcode;
559   if (SrcTy.isPointer() && DstTy.isScalar())
560     Opcode = TargetOpcode::G_PTRTOINT;
561   else if (DstTy.isPointer() && SrcTy.isScalar())
562     Opcode = TargetOpcode::G_INTTOPTR;
563   else {
564     assert(!SrcTy.isPointer() && !DstTy.isPointer() && "n G_ADDRCAST yet");
565     Opcode = TargetOpcode::G_BITCAST;
566   }
567 
568   return buildInstr(Opcode, Dst, Src);
569 }
570 
buildExtract(const DstOp & Dst,const SrcOp & Src,uint64_t Index)571 MachineInstrBuilder MachineIRBuilder::buildExtract(const DstOp &Dst,
572                                                    const SrcOp &Src,
573                                                    uint64_t Index) {
574   LLT SrcTy = Src.getLLTTy(*getMRI());
575   LLT DstTy = Dst.getLLTTy(*getMRI());
576 
577 #ifndef NDEBUG
578   assert(SrcTy.isValid() && "invalid operand type");
579   assert(DstTy.isValid() && "invalid operand type");
580   assert(Index + DstTy.getSizeInBits() <= SrcTy.getSizeInBits() &&
581          "extracting off end of register");
582 #endif
583 
584   if (DstTy.getSizeInBits() == SrcTy.getSizeInBits()) {
585     assert(Index == 0 && "insertion past the end of a register");
586     return buildCast(Dst, Src);
587   }
588 
589   auto Extract = buildInstr(TargetOpcode::G_EXTRACT);
590   Dst.addDefToMIB(*getMRI(), Extract);
591   Src.addSrcToMIB(Extract);
592   Extract.addImm(Index);
593   return Extract;
594 }
595 
buildUndef(const DstOp & Res)596 MachineInstrBuilder MachineIRBuilder::buildUndef(const DstOp &Res) {
597   return buildInstr(TargetOpcode::G_IMPLICIT_DEF, {Res}, {});
598 }
599 
buildMergeValues(const DstOp & Res,ArrayRef<Register> Ops)600 MachineInstrBuilder MachineIRBuilder::buildMergeValues(const DstOp &Res,
601                                                        ArrayRef<Register> Ops) {
602   // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
603   // we need some temporary storage for the DstOp objects. Here we use a
604   // sufficiently large SmallVector to not go through the heap.
605   SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
606   assert(TmpVec.size() > 1);
607   return buildInstr(TargetOpcode::G_MERGE_VALUES, Res, TmpVec);
608 }
609 
610 MachineInstrBuilder
buildMergeLikeInstr(const DstOp & Res,ArrayRef<Register> Ops)611 MachineIRBuilder::buildMergeLikeInstr(const DstOp &Res,
612                                       ArrayRef<Register> Ops) {
613   // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
614   // we need some temporary storage for the DstOp objects. Here we use a
615   // sufficiently large SmallVector to not go through the heap.
616   SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
617   assert(TmpVec.size() > 1);
618   return buildInstr(getOpcodeForMerge(Res, TmpVec), Res, TmpVec);
619 }
620 
621 MachineInstrBuilder
buildMergeLikeInstr(const DstOp & Res,std::initializer_list<SrcOp> Ops)622 MachineIRBuilder::buildMergeLikeInstr(const DstOp &Res,
623                                       std::initializer_list<SrcOp> Ops) {
624   assert(Ops.size() > 1);
625   return buildInstr(getOpcodeForMerge(Res, Ops), Res, Ops);
626 }
627 
getOpcodeForMerge(const DstOp & DstOp,ArrayRef<SrcOp> SrcOps) const628 unsigned MachineIRBuilder::getOpcodeForMerge(const DstOp &DstOp,
629                                              ArrayRef<SrcOp> SrcOps) const {
630   if (DstOp.getLLTTy(*getMRI()).isVector()) {
631     if (SrcOps[0].getLLTTy(*getMRI()).isVector())
632       return TargetOpcode::G_CONCAT_VECTORS;
633     return TargetOpcode::G_BUILD_VECTOR;
634   }
635 
636   return TargetOpcode::G_MERGE_VALUES;
637 }
638 
buildUnmerge(ArrayRef<LLT> Res,const SrcOp & Op)639 MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<LLT> Res,
640                                                    const SrcOp &Op) {
641   // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<DstOp>,
642   // we need some temporary storage for the DstOp objects. Here we use a
643   // sufficiently large SmallVector to not go through the heap.
644   SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end());
645   assert(TmpVec.size() > 1);
646   return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
647 }
648 
buildUnmerge(LLT Res,const SrcOp & Op)649 MachineInstrBuilder MachineIRBuilder::buildUnmerge(LLT Res,
650                                                    const SrcOp &Op) {
651   unsigned NumReg = Op.getLLTTy(*getMRI()).getSizeInBits() / Res.getSizeInBits();
652   SmallVector<DstOp, 8> TmpVec(NumReg, Res);
653   return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
654 }
655 
buildUnmerge(ArrayRef<Register> Res,const SrcOp & Op)656 MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<Register> Res,
657                                                    const SrcOp &Op) {
658   // Unfortunately to convert from ArrayRef<Register> to ArrayRef<DstOp>,
659   // we need some temporary storage for the DstOp objects. Here we use a
660   // sufficiently large SmallVector to not go through the heap.
661   SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end());
662   assert(TmpVec.size() > 1);
663   return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
664 }
665 
buildBuildVector(const DstOp & Res,ArrayRef<Register> Ops)666 MachineInstrBuilder MachineIRBuilder::buildBuildVector(const DstOp &Res,
667                                                        ArrayRef<Register> Ops) {
668   // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
669   // we need some temporary storage for the DstOp objects. Here we use a
670   // sufficiently large SmallVector to not go through the heap.
671   SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
672   return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
673 }
674 
675 MachineInstrBuilder
buildBuildVectorConstant(const DstOp & Res,ArrayRef<APInt> Ops)676 MachineIRBuilder::buildBuildVectorConstant(const DstOp &Res,
677                                            ArrayRef<APInt> Ops) {
678   SmallVector<SrcOp> TmpVec;
679   TmpVec.reserve(Ops.size());
680   LLT EltTy = Res.getLLTTy(*getMRI()).getElementType();
681   for (const auto &Op : Ops)
682     TmpVec.push_back(buildConstant(EltTy, Op));
683   return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
684 }
685 
buildSplatVector(const DstOp & Res,const SrcOp & Src)686 MachineInstrBuilder MachineIRBuilder::buildSplatVector(const DstOp &Res,
687                                                        const SrcOp &Src) {
688   SmallVector<SrcOp, 8> TmpVec(Res.getLLTTy(*getMRI()).getNumElements(), Src);
689   return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
690 }
691 
692 MachineInstrBuilder
buildBuildVectorTrunc(const DstOp & Res,ArrayRef<Register> Ops)693 MachineIRBuilder::buildBuildVectorTrunc(const DstOp &Res,
694                                         ArrayRef<Register> Ops) {
695   // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
696   // we need some temporary storage for the DstOp objects. Here we use a
697   // sufficiently large SmallVector to not go through the heap.
698   SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
699   if (TmpVec[0].getLLTTy(*getMRI()).getSizeInBits() ==
700       Res.getLLTTy(*getMRI()).getElementType().getSizeInBits())
701     return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
702   return buildInstr(TargetOpcode::G_BUILD_VECTOR_TRUNC, Res, TmpVec);
703 }
704 
buildShuffleSplat(const DstOp & Res,const SrcOp & Src)705 MachineInstrBuilder MachineIRBuilder::buildShuffleSplat(const DstOp &Res,
706                                                         const SrcOp &Src) {
707   LLT DstTy = Res.getLLTTy(*getMRI());
708   assert(Src.getLLTTy(*getMRI()) == DstTy.getElementType() &&
709          "Expected Src to match Dst elt ty");
710   auto UndefVec = buildUndef(DstTy);
711   auto Zero = buildConstant(LLT::scalar(64), 0);
712   auto InsElt = buildInsertVectorElement(DstTy, UndefVec, Src, Zero);
713   SmallVector<int, 16> ZeroMask(DstTy.getNumElements());
714   return buildShuffleVector(DstTy, InsElt, UndefVec, ZeroMask);
715 }
716 
buildShuffleVector(const DstOp & Res,const SrcOp & Src1,const SrcOp & Src2,ArrayRef<int> Mask)717 MachineInstrBuilder MachineIRBuilder::buildShuffleVector(const DstOp &Res,
718                                                          const SrcOp &Src1,
719                                                          const SrcOp &Src2,
720                                                          ArrayRef<int> Mask) {
721   LLT DstTy = Res.getLLTTy(*getMRI());
722   LLT Src1Ty = Src1.getLLTTy(*getMRI());
723   LLT Src2Ty = Src2.getLLTTy(*getMRI());
724   assert((size_t)(Src1Ty.getNumElements() + Src2Ty.getNumElements()) >=
725          Mask.size());
726   assert(DstTy.getElementType() == Src1Ty.getElementType() &&
727          DstTy.getElementType() == Src2Ty.getElementType());
728   (void)DstTy;
729   (void)Src1Ty;
730   (void)Src2Ty;
731   ArrayRef<int> MaskAlloc = getMF().allocateShuffleMask(Mask);
732   return buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {Res}, {Src1, Src2})
733       .addShuffleMask(MaskAlloc);
734 }
735 
736 MachineInstrBuilder
buildConcatVectors(const DstOp & Res,ArrayRef<Register> Ops)737 MachineIRBuilder::buildConcatVectors(const DstOp &Res, ArrayRef<Register> Ops) {
738   // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
739   // we need some temporary storage for the DstOp objects. Here we use a
740   // sufficiently large SmallVector to not go through the heap.
741   SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
742   return buildInstr(TargetOpcode::G_CONCAT_VECTORS, Res, TmpVec);
743 }
744 
buildInsert(const DstOp & Res,const SrcOp & Src,const SrcOp & Op,unsigned Index)745 MachineInstrBuilder MachineIRBuilder::buildInsert(const DstOp &Res,
746                                                   const SrcOp &Src,
747                                                   const SrcOp &Op,
748                                                   unsigned Index) {
749   assert(Index + Op.getLLTTy(*getMRI()).getSizeInBits() <=
750              Res.getLLTTy(*getMRI()).getSizeInBits() &&
751          "insertion past the end of a register");
752 
753   if (Res.getLLTTy(*getMRI()).getSizeInBits() ==
754       Op.getLLTTy(*getMRI()).getSizeInBits()) {
755     return buildCast(Res, Op);
756   }
757 
758   return buildInstr(TargetOpcode::G_INSERT, Res, {Src, Op, uint64_t(Index)});
759 }
760 
buildIntrinsic(Intrinsic::ID ID,ArrayRef<Register> ResultRegs,bool HasSideEffects)761 MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
762                                                      ArrayRef<Register> ResultRegs,
763                                                      bool HasSideEffects) {
764   auto MIB =
765       buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
766                                 : TargetOpcode::G_INTRINSIC);
767   for (unsigned ResultReg : ResultRegs)
768     MIB.addDef(ResultReg);
769   MIB.addIntrinsicID(ID);
770   return MIB;
771 }
772 
buildIntrinsic(Intrinsic::ID ID,ArrayRef<DstOp> Results,bool HasSideEffects)773 MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
774                                                      ArrayRef<DstOp> Results,
775                                                      bool HasSideEffects) {
776   auto MIB =
777       buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
778                                 : TargetOpcode::G_INTRINSIC);
779   for (DstOp Result : Results)
780     Result.addDefToMIB(*getMRI(), MIB);
781   MIB.addIntrinsicID(ID);
782   return MIB;
783 }
784 
buildTrunc(const DstOp & Res,const SrcOp & Op)785 MachineInstrBuilder MachineIRBuilder::buildTrunc(const DstOp &Res,
786                                                  const SrcOp &Op) {
787   return buildInstr(TargetOpcode::G_TRUNC, Res, Op);
788 }
789 
790 MachineInstrBuilder
buildFPTrunc(const DstOp & Res,const SrcOp & Op,std::optional<unsigned> Flags)791 MachineIRBuilder::buildFPTrunc(const DstOp &Res, const SrcOp &Op,
792                                std::optional<unsigned> Flags) {
793   return buildInstr(TargetOpcode::G_FPTRUNC, Res, Op, Flags);
794 }
795 
buildICmp(CmpInst::Predicate Pred,const DstOp & Res,const SrcOp & Op0,const SrcOp & Op1)796 MachineInstrBuilder MachineIRBuilder::buildICmp(CmpInst::Predicate Pred,
797                                                 const DstOp &Res,
798                                                 const SrcOp &Op0,
799                                                 const SrcOp &Op1) {
800   return buildInstr(TargetOpcode::G_ICMP, Res, {Pred, Op0, Op1});
801 }
802 
buildFCmp(CmpInst::Predicate Pred,const DstOp & Res,const SrcOp & Op0,const SrcOp & Op1,std::optional<unsigned> Flags)803 MachineInstrBuilder MachineIRBuilder::buildFCmp(CmpInst::Predicate Pred,
804                                                 const DstOp &Res,
805                                                 const SrcOp &Op0,
806                                                 const SrcOp &Op1,
807                                                 std::optional<unsigned> Flags) {
808 
809   return buildInstr(TargetOpcode::G_FCMP, Res, {Pred, Op0, Op1}, Flags);
810 }
811 
812 MachineInstrBuilder
buildSelect(const DstOp & Res,const SrcOp & Tst,const SrcOp & Op0,const SrcOp & Op1,std::optional<unsigned> Flags)813 MachineIRBuilder::buildSelect(const DstOp &Res, const SrcOp &Tst,
814                               const SrcOp &Op0, const SrcOp &Op1,
815                               std::optional<unsigned> Flags) {
816 
817   return buildInstr(TargetOpcode::G_SELECT, {Res}, {Tst, Op0, Op1}, Flags);
818 }
819 
820 MachineInstrBuilder
buildInsertVectorElement(const DstOp & Res,const SrcOp & Val,const SrcOp & Elt,const SrcOp & Idx)821 MachineIRBuilder::buildInsertVectorElement(const DstOp &Res, const SrcOp &Val,
822                                            const SrcOp &Elt, const SrcOp &Idx) {
823   return buildInstr(TargetOpcode::G_INSERT_VECTOR_ELT, Res, {Val, Elt, Idx});
824 }
825 
826 MachineInstrBuilder
buildExtractVectorElement(const DstOp & Res,const SrcOp & Val,const SrcOp & Idx)827 MachineIRBuilder::buildExtractVectorElement(const DstOp &Res, const SrcOp &Val,
828                                             const SrcOp &Idx) {
829   return buildInstr(TargetOpcode::G_EXTRACT_VECTOR_ELT, Res, {Val, Idx});
830 }
831 
buildAtomicCmpXchgWithSuccess(Register OldValRes,Register SuccessRes,Register Addr,Register CmpVal,Register NewVal,MachineMemOperand & MMO)832 MachineInstrBuilder MachineIRBuilder::buildAtomicCmpXchgWithSuccess(
833     Register OldValRes, Register SuccessRes, Register Addr, Register CmpVal,
834     Register NewVal, MachineMemOperand &MMO) {
835 #ifndef NDEBUG
836   LLT OldValResTy = getMRI()->getType(OldValRes);
837   LLT SuccessResTy = getMRI()->getType(SuccessRes);
838   LLT AddrTy = getMRI()->getType(Addr);
839   LLT CmpValTy = getMRI()->getType(CmpVal);
840   LLT NewValTy = getMRI()->getType(NewVal);
841   assert(OldValResTy.isScalar() && "invalid operand type");
842   assert(SuccessResTy.isScalar() && "invalid operand type");
843   assert(AddrTy.isPointer() && "invalid operand type");
844   assert(CmpValTy.isValid() && "invalid operand type");
845   assert(NewValTy.isValid() && "invalid operand type");
846   assert(OldValResTy == CmpValTy && "type mismatch");
847   assert(OldValResTy == NewValTy && "type mismatch");
848 #endif
849 
850   return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS)
851       .addDef(OldValRes)
852       .addDef(SuccessRes)
853       .addUse(Addr)
854       .addUse(CmpVal)
855       .addUse(NewVal)
856       .addMemOperand(&MMO);
857 }
858 
859 MachineInstrBuilder
buildAtomicCmpXchg(Register OldValRes,Register Addr,Register CmpVal,Register NewVal,MachineMemOperand & MMO)860 MachineIRBuilder::buildAtomicCmpXchg(Register OldValRes, Register Addr,
861                                      Register CmpVal, Register NewVal,
862                                      MachineMemOperand &MMO) {
863 #ifndef NDEBUG
864   LLT OldValResTy = getMRI()->getType(OldValRes);
865   LLT AddrTy = getMRI()->getType(Addr);
866   LLT CmpValTy = getMRI()->getType(CmpVal);
867   LLT NewValTy = getMRI()->getType(NewVal);
868   assert(OldValResTy.isScalar() && "invalid operand type");
869   assert(AddrTy.isPointer() && "invalid operand type");
870   assert(CmpValTy.isValid() && "invalid operand type");
871   assert(NewValTy.isValid() && "invalid operand type");
872   assert(OldValResTy == CmpValTy && "type mismatch");
873   assert(OldValResTy == NewValTy && "type mismatch");
874 #endif
875 
876   return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG)
877       .addDef(OldValRes)
878       .addUse(Addr)
879       .addUse(CmpVal)
880       .addUse(NewVal)
881       .addMemOperand(&MMO);
882 }
883 
buildAtomicRMW(unsigned Opcode,const DstOp & OldValRes,const SrcOp & Addr,const SrcOp & Val,MachineMemOperand & MMO)884 MachineInstrBuilder MachineIRBuilder::buildAtomicRMW(
885   unsigned Opcode, const DstOp &OldValRes,
886   const SrcOp &Addr, const SrcOp &Val,
887   MachineMemOperand &MMO) {
888 
889 #ifndef NDEBUG
890   LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
891   LLT AddrTy = Addr.getLLTTy(*getMRI());
892   LLT ValTy = Val.getLLTTy(*getMRI());
893   assert(OldValResTy.isScalar() && "invalid operand type");
894   assert(AddrTy.isPointer() && "invalid operand type");
895   assert(ValTy.isValid() && "invalid operand type");
896   assert(OldValResTy == ValTy && "type mismatch");
897   assert(MMO.isAtomic() && "not atomic mem operand");
898 #endif
899 
900   auto MIB = buildInstr(Opcode);
901   OldValRes.addDefToMIB(*getMRI(), MIB);
902   Addr.addSrcToMIB(MIB);
903   Val.addSrcToMIB(MIB);
904   MIB.addMemOperand(&MMO);
905   return MIB;
906 }
907 
908 MachineInstrBuilder
buildAtomicRMWXchg(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)909 MachineIRBuilder::buildAtomicRMWXchg(Register OldValRes, Register Addr,
910                                      Register Val, MachineMemOperand &MMO) {
911   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XCHG, OldValRes, Addr, Val,
912                         MMO);
913 }
914 MachineInstrBuilder
buildAtomicRMWAdd(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)915 MachineIRBuilder::buildAtomicRMWAdd(Register OldValRes, Register Addr,
916                                     Register Val, MachineMemOperand &MMO) {
917   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_ADD, OldValRes, Addr, Val,
918                         MMO);
919 }
920 MachineInstrBuilder
buildAtomicRMWSub(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)921 MachineIRBuilder::buildAtomicRMWSub(Register OldValRes, Register Addr,
922                                     Register Val, MachineMemOperand &MMO) {
923   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_SUB, OldValRes, Addr, Val,
924                         MMO);
925 }
926 MachineInstrBuilder
buildAtomicRMWAnd(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)927 MachineIRBuilder::buildAtomicRMWAnd(Register OldValRes, Register Addr,
928                                     Register Val, MachineMemOperand &MMO) {
929   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_AND, OldValRes, Addr, Val,
930                         MMO);
931 }
932 MachineInstrBuilder
buildAtomicRMWNand(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)933 MachineIRBuilder::buildAtomicRMWNand(Register OldValRes, Register Addr,
934                                      Register Val, MachineMemOperand &MMO) {
935   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_NAND, OldValRes, Addr, Val,
936                         MMO);
937 }
buildAtomicRMWOr(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)938 MachineInstrBuilder MachineIRBuilder::buildAtomicRMWOr(Register OldValRes,
939                                                        Register Addr,
940                                                        Register Val,
941                                                        MachineMemOperand &MMO) {
942   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_OR, OldValRes, Addr, Val,
943                         MMO);
944 }
945 MachineInstrBuilder
buildAtomicRMWXor(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)946 MachineIRBuilder::buildAtomicRMWXor(Register OldValRes, Register Addr,
947                                     Register Val, MachineMemOperand &MMO) {
948   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XOR, OldValRes, Addr, Val,
949                         MMO);
950 }
951 MachineInstrBuilder
buildAtomicRMWMax(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)952 MachineIRBuilder::buildAtomicRMWMax(Register OldValRes, Register Addr,
953                                     Register Val, MachineMemOperand &MMO) {
954   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MAX, OldValRes, Addr, Val,
955                         MMO);
956 }
957 MachineInstrBuilder
buildAtomicRMWMin(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)958 MachineIRBuilder::buildAtomicRMWMin(Register OldValRes, Register Addr,
959                                     Register Val, MachineMemOperand &MMO) {
960   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MIN, OldValRes, Addr, Val,
961                         MMO);
962 }
963 MachineInstrBuilder
buildAtomicRMWUmax(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)964 MachineIRBuilder::buildAtomicRMWUmax(Register OldValRes, Register Addr,
965                                      Register Val, MachineMemOperand &MMO) {
966   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMAX, OldValRes, Addr, Val,
967                         MMO);
968 }
969 MachineInstrBuilder
buildAtomicRMWUmin(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)970 MachineIRBuilder::buildAtomicRMWUmin(Register OldValRes, Register Addr,
971                                      Register Val, MachineMemOperand &MMO) {
972   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMIN, OldValRes, Addr, Val,
973                         MMO);
974 }
975 
976 MachineInstrBuilder
buildAtomicRMWFAdd(const DstOp & OldValRes,const SrcOp & Addr,const SrcOp & Val,MachineMemOperand & MMO)977 MachineIRBuilder::buildAtomicRMWFAdd(
978   const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
979   MachineMemOperand &MMO) {
980   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FADD, OldValRes, Addr, Val,
981                         MMO);
982 }
983 
984 MachineInstrBuilder
buildAtomicRMWFSub(const DstOp & OldValRes,const SrcOp & Addr,const SrcOp & Val,MachineMemOperand & MMO)985 MachineIRBuilder::buildAtomicRMWFSub(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
986                                      MachineMemOperand &MMO) {
987   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FSUB, OldValRes, Addr, Val,
988                         MMO);
989 }
990 
991 MachineInstrBuilder
buildAtomicRMWFMax(const DstOp & OldValRes,const SrcOp & Addr,const SrcOp & Val,MachineMemOperand & MMO)992 MachineIRBuilder::buildAtomicRMWFMax(const DstOp &OldValRes, const SrcOp &Addr,
993                                      const SrcOp &Val, MachineMemOperand &MMO) {
994   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FMAX, OldValRes, Addr, Val,
995                         MMO);
996 }
997 
998 MachineInstrBuilder
buildAtomicRMWFMin(const DstOp & OldValRes,const SrcOp & Addr,const SrcOp & Val,MachineMemOperand & MMO)999 MachineIRBuilder::buildAtomicRMWFMin(const DstOp &OldValRes, const SrcOp &Addr,
1000                                      const SrcOp &Val, MachineMemOperand &MMO) {
1001   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FMIN, OldValRes, Addr, Val,
1002                         MMO);
1003 }
1004 
1005 MachineInstrBuilder
buildFence(unsigned Ordering,unsigned Scope)1006 MachineIRBuilder::buildFence(unsigned Ordering, unsigned Scope) {
1007   return buildInstr(TargetOpcode::G_FENCE)
1008     .addImm(Ordering)
1009     .addImm(Scope);
1010 }
1011 
1012 MachineInstrBuilder
buildBlockAddress(Register Res,const BlockAddress * BA)1013 MachineIRBuilder::buildBlockAddress(Register Res, const BlockAddress *BA) {
1014 #ifndef NDEBUG
1015   assert(getMRI()->getType(Res).isPointer() && "invalid res type");
1016 #endif
1017 
1018   return buildInstr(TargetOpcode::G_BLOCK_ADDR).addDef(Res).addBlockAddress(BA);
1019 }
1020 
validateTruncExt(const LLT DstTy,const LLT SrcTy,bool IsExtend)1021 void MachineIRBuilder::validateTruncExt(const LLT DstTy, const LLT SrcTy,
1022                                         bool IsExtend) {
1023 #ifndef NDEBUG
1024   if (DstTy.isVector()) {
1025     assert(SrcTy.isVector() && "mismatched cast between vector and non-vector");
1026     assert(SrcTy.getNumElements() == DstTy.getNumElements() &&
1027            "different number of elements in a trunc/ext");
1028   } else
1029     assert(DstTy.isScalar() && SrcTy.isScalar() && "invalid extend/trunc");
1030 
1031   if (IsExtend)
1032     assert(DstTy.getSizeInBits() > SrcTy.getSizeInBits() &&
1033            "invalid narrowing extend");
1034   else
1035     assert(DstTy.getSizeInBits() < SrcTy.getSizeInBits() &&
1036            "invalid widening trunc");
1037 #endif
1038 }
1039 
validateSelectOp(const LLT ResTy,const LLT TstTy,const LLT Op0Ty,const LLT Op1Ty)1040 void MachineIRBuilder::validateSelectOp(const LLT ResTy, const LLT TstTy,
1041                                         const LLT Op0Ty, const LLT Op1Ty) {
1042 #ifndef NDEBUG
1043   assert((ResTy.isScalar() || ResTy.isVector() || ResTy.isPointer()) &&
1044          "invalid operand type");
1045   assert((ResTy == Op0Ty && ResTy == Op1Ty) && "type mismatch");
1046   if (ResTy.isScalar() || ResTy.isPointer())
1047     assert(TstTy.isScalar() && "type mismatch");
1048   else
1049     assert((TstTy.isScalar() ||
1050             (TstTy.isVector() &&
1051              TstTy.getNumElements() == Op0Ty.getNumElements())) &&
1052            "type mismatch");
1053 #endif
1054 }
1055 
1056 MachineInstrBuilder
buildInstr(unsigned Opc,ArrayRef<DstOp> DstOps,ArrayRef<SrcOp> SrcOps,std::optional<unsigned> Flags)1057 MachineIRBuilder::buildInstr(unsigned Opc, ArrayRef<DstOp> DstOps,
1058                              ArrayRef<SrcOp> SrcOps,
1059                              std::optional<unsigned> Flags) {
1060   switch (Opc) {
1061   default:
1062     break;
1063   case TargetOpcode::G_SELECT: {
1064     assert(DstOps.size() == 1 && "Invalid select");
1065     assert(SrcOps.size() == 3 && "Invalid select");
1066     validateSelectOp(
1067         DstOps[0].getLLTTy(*getMRI()), SrcOps[0].getLLTTy(*getMRI()),
1068         SrcOps[1].getLLTTy(*getMRI()), SrcOps[2].getLLTTy(*getMRI()));
1069     break;
1070   }
1071   case TargetOpcode::G_FNEG:
1072   case TargetOpcode::G_ABS:
1073     // All these are unary ops.
1074     assert(DstOps.size() == 1 && "Invalid Dst");
1075     assert(SrcOps.size() == 1 && "Invalid Srcs");
1076     validateUnaryOp(DstOps[0].getLLTTy(*getMRI()),
1077                     SrcOps[0].getLLTTy(*getMRI()));
1078     break;
1079   case TargetOpcode::G_ADD:
1080   case TargetOpcode::G_AND:
1081   case TargetOpcode::G_MUL:
1082   case TargetOpcode::G_OR:
1083   case TargetOpcode::G_SUB:
1084   case TargetOpcode::G_XOR:
1085   case TargetOpcode::G_UDIV:
1086   case TargetOpcode::G_SDIV:
1087   case TargetOpcode::G_UREM:
1088   case TargetOpcode::G_SREM:
1089   case TargetOpcode::G_SMIN:
1090   case TargetOpcode::G_SMAX:
1091   case TargetOpcode::G_UMIN:
1092   case TargetOpcode::G_UMAX:
1093   case TargetOpcode::G_UADDSAT:
1094   case TargetOpcode::G_SADDSAT:
1095   case TargetOpcode::G_USUBSAT:
1096   case TargetOpcode::G_SSUBSAT: {
1097     // All these are binary ops.
1098     assert(DstOps.size() == 1 && "Invalid Dst");
1099     assert(SrcOps.size() == 2 && "Invalid Srcs");
1100     validateBinaryOp(DstOps[0].getLLTTy(*getMRI()),
1101                      SrcOps[0].getLLTTy(*getMRI()),
1102                      SrcOps[1].getLLTTy(*getMRI()));
1103     break;
1104   }
1105   case TargetOpcode::G_SHL:
1106   case TargetOpcode::G_ASHR:
1107   case TargetOpcode::G_LSHR:
1108   case TargetOpcode::G_USHLSAT:
1109   case TargetOpcode::G_SSHLSAT: {
1110     assert(DstOps.size() == 1 && "Invalid Dst");
1111     assert(SrcOps.size() == 2 && "Invalid Srcs");
1112     validateShiftOp(DstOps[0].getLLTTy(*getMRI()),
1113                     SrcOps[0].getLLTTy(*getMRI()),
1114                     SrcOps[1].getLLTTy(*getMRI()));
1115     break;
1116   }
1117   case TargetOpcode::G_SEXT:
1118   case TargetOpcode::G_ZEXT:
1119   case TargetOpcode::G_ANYEXT:
1120     assert(DstOps.size() == 1 && "Invalid Dst");
1121     assert(SrcOps.size() == 1 && "Invalid Srcs");
1122     validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
1123                      SrcOps[0].getLLTTy(*getMRI()), true);
1124     break;
1125   case TargetOpcode::G_TRUNC:
1126   case TargetOpcode::G_FPTRUNC: {
1127     assert(DstOps.size() == 1 && "Invalid Dst");
1128     assert(SrcOps.size() == 1 && "Invalid Srcs");
1129     validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
1130                      SrcOps[0].getLLTTy(*getMRI()), false);
1131     break;
1132   }
1133   case TargetOpcode::G_BITCAST: {
1134     assert(DstOps.size() == 1 && "Invalid Dst");
1135     assert(SrcOps.size() == 1 && "Invalid Srcs");
1136     assert(DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1137            SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() && "invalid bitcast");
1138     break;
1139   }
1140   case TargetOpcode::COPY:
1141     assert(DstOps.size() == 1 && "Invalid Dst");
1142     // If the caller wants to add a subreg source it has to be done separately
1143     // so we may not have any SrcOps at this point yet.
1144     break;
1145   case TargetOpcode::G_FCMP:
1146   case TargetOpcode::G_ICMP: {
1147     assert(DstOps.size() == 1 && "Invalid Dst Operands");
1148     assert(SrcOps.size() == 3 && "Invalid Src Operands");
1149     // For F/ICMP, the first src operand is the predicate, followed by
1150     // the two comparands.
1151     assert(SrcOps[0].getSrcOpKind() == SrcOp::SrcType::Ty_Predicate &&
1152            "Expecting predicate");
1153     assert([&]() -> bool {
1154       CmpInst::Predicate Pred = SrcOps[0].getPredicate();
1155       return Opc == TargetOpcode::G_ICMP ? CmpInst::isIntPredicate(Pred)
1156                                          : CmpInst::isFPPredicate(Pred);
1157     }() && "Invalid predicate");
1158     assert(SrcOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1159            "Type mismatch");
1160     assert([&]() -> bool {
1161       LLT Op0Ty = SrcOps[1].getLLTTy(*getMRI());
1162       LLT DstTy = DstOps[0].getLLTTy(*getMRI());
1163       if (Op0Ty.isScalar() || Op0Ty.isPointer())
1164         return DstTy.isScalar();
1165       else
1166         return DstTy.isVector() &&
1167                DstTy.getNumElements() == Op0Ty.getNumElements();
1168     }() && "Type Mismatch");
1169     break;
1170   }
1171   case TargetOpcode::G_UNMERGE_VALUES: {
1172     assert(!DstOps.empty() && "Invalid trivial sequence");
1173     assert(SrcOps.size() == 1 && "Invalid src for Unmerge");
1174     assert(llvm::all_of(DstOps,
1175                         [&, this](const DstOp &Op) {
1176                           return Op.getLLTTy(*getMRI()) ==
1177                                  DstOps[0].getLLTTy(*getMRI());
1178                         }) &&
1179            "type mismatch in output list");
1180     assert((TypeSize::ScalarTy)DstOps.size() *
1181                    DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1182                SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1183            "input operands do not cover output register");
1184     break;
1185   }
1186   case TargetOpcode::G_MERGE_VALUES: {
1187     assert(SrcOps.size() >= 2 && "invalid trivial sequence");
1188     assert(DstOps.size() == 1 && "Invalid Dst");
1189     assert(llvm::all_of(SrcOps,
1190                         [&, this](const SrcOp &Op) {
1191                           return Op.getLLTTy(*getMRI()) ==
1192                                  SrcOps[0].getLLTTy(*getMRI());
1193                         }) &&
1194            "type mismatch in input list");
1195     assert((TypeSize::ScalarTy)SrcOps.size() *
1196                    SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1197                DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1198            "input operands do not cover output register");
1199     assert(!DstOps[0].getLLTTy(*getMRI()).isVector() &&
1200            "vectors should be built with G_CONCAT_VECTOR or G_BUILD_VECTOR");
1201     break;
1202   }
1203   case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1204     assert(DstOps.size() == 1 && "Invalid Dst size");
1205     assert(SrcOps.size() == 2 && "Invalid Src size");
1206     assert(SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1207     assert((DstOps[0].getLLTTy(*getMRI()).isScalar() ||
1208             DstOps[0].getLLTTy(*getMRI()).isPointer()) &&
1209            "Invalid operand type");
1210     assert(SrcOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand type");
1211     assert(SrcOps[0].getLLTTy(*getMRI()).getElementType() ==
1212                DstOps[0].getLLTTy(*getMRI()) &&
1213            "Type mismatch");
1214     break;
1215   }
1216   case TargetOpcode::G_INSERT_VECTOR_ELT: {
1217     assert(DstOps.size() == 1 && "Invalid dst size");
1218     assert(SrcOps.size() == 3 && "Invalid src size");
1219     assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1220            SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1221     assert(DstOps[0].getLLTTy(*getMRI()).getElementType() ==
1222                SrcOps[1].getLLTTy(*getMRI()) &&
1223            "Type mismatch");
1224     assert(SrcOps[2].getLLTTy(*getMRI()).isScalar() && "Invalid index");
1225     assert(DstOps[0].getLLTTy(*getMRI()).getNumElements() ==
1226                SrcOps[0].getLLTTy(*getMRI()).getNumElements() &&
1227            "Type mismatch");
1228     break;
1229   }
1230   case TargetOpcode::G_BUILD_VECTOR: {
1231     assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1232            "Must have at least 2 operands");
1233     assert(DstOps.size() == 1 && "Invalid DstOps");
1234     assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1235            "Res type must be a vector");
1236     assert(llvm::all_of(SrcOps,
1237                         [&, this](const SrcOp &Op) {
1238                           return Op.getLLTTy(*getMRI()) ==
1239                                  SrcOps[0].getLLTTy(*getMRI());
1240                         }) &&
1241            "type mismatch in input list");
1242     assert((TypeSize::ScalarTy)SrcOps.size() *
1243                    SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1244                DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1245            "input scalars do not exactly cover the output vector register");
1246     break;
1247   }
1248   case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1249     assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1250            "Must have at least 2 operands");
1251     assert(DstOps.size() == 1 && "Invalid DstOps");
1252     assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1253            "Res type must be a vector");
1254     assert(llvm::all_of(SrcOps,
1255                         [&, this](const SrcOp &Op) {
1256                           return Op.getLLTTy(*getMRI()) ==
1257                                  SrcOps[0].getLLTTy(*getMRI());
1258                         }) &&
1259            "type mismatch in input list");
1260     break;
1261   }
1262   case TargetOpcode::G_CONCAT_VECTORS: {
1263     assert(DstOps.size() == 1 && "Invalid DstOps");
1264     assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1265            "Must have at least 2 operands");
1266     assert(llvm::all_of(SrcOps,
1267                         [&, this](const SrcOp &Op) {
1268                           return (Op.getLLTTy(*getMRI()).isVector() &&
1269                                   Op.getLLTTy(*getMRI()) ==
1270                                       SrcOps[0].getLLTTy(*getMRI()));
1271                         }) &&
1272            "type mismatch in input list");
1273     assert((TypeSize::ScalarTy)SrcOps.size() *
1274                    SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1275                DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1276            "input vectors do not exactly cover the output vector register");
1277     break;
1278   }
1279   case TargetOpcode::G_UADDE: {
1280     assert(DstOps.size() == 2 && "Invalid no of dst operands");
1281     assert(SrcOps.size() == 3 && "Invalid no of src operands");
1282     assert(DstOps[0].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1283     assert((DstOps[0].getLLTTy(*getMRI()) == SrcOps[0].getLLTTy(*getMRI())) &&
1284            (DstOps[0].getLLTTy(*getMRI()) == SrcOps[1].getLLTTy(*getMRI())) &&
1285            "Invalid operand");
1286     assert(DstOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1287     assert(DstOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1288            "type mismatch");
1289     break;
1290   }
1291   }
1292 
1293   auto MIB = buildInstr(Opc);
1294   for (const DstOp &Op : DstOps)
1295     Op.addDefToMIB(*getMRI(), MIB);
1296   for (const SrcOp &Op : SrcOps)
1297     Op.addSrcToMIB(MIB);
1298   if (Flags)
1299     MIB->setFlags(*Flags);
1300   return MIB;
1301 }
1302