1 //===-- llvm/CodeGen/GlobalISel/MachineIRBuilder.cpp - MIBuilder--*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the MachineIRBuidler class.
10 //===----------------------------------------------------------------------===//
11 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
12 #include "llvm/CodeGen/MachineFunction.h"
13 #include "llvm/CodeGen/MachineInstr.h"
14 #include "llvm/CodeGen/MachineInstrBuilder.h"
15 #include "llvm/CodeGen/MachineRegisterInfo.h"
16 #include "llvm/CodeGen/TargetInstrInfo.h"
17 #include "llvm/CodeGen/TargetLowering.h"
18 #include "llvm/CodeGen/TargetOpcodes.h"
19 #include "llvm/CodeGen/TargetSubtargetInfo.h"
20 #include "llvm/IR/DebugInfoMetadata.h"
21 
22 using namespace llvm;
23 
24 void MachineIRBuilder::setMF(MachineFunction &MF) {
25   State.MF = &MF;
26   State.MBB = nullptr;
27   State.MRI = &MF.getRegInfo();
28   State.TII = MF.getSubtarget().getInstrInfo();
29   State.DL = DebugLoc();
30   State.PCSections = nullptr;
31   State.II = MachineBasicBlock::iterator();
32   State.Observer = nullptr;
33 }
34 
35 //------------------------------------------------------------------------------
36 // Build instruction variants.
37 //------------------------------------------------------------------------------
38 
39 MachineInstrBuilder MachineIRBuilder::buildInstrNoInsert(unsigned Opcode) {
40   return BuildMI(getMF(), {getDL(), getPCSections()}, getTII().get(Opcode));
41 }
42 
43 MachineInstrBuilder MachineIRBuilder::insertInstr(MachineInstrBuilder MIB) {
44   getMBB().insert(getInsertPt(), MIB);
45   recordInsertion(MIB);
46   return MIB;
47 }
48 
49 MachineInstrBuilder
50 MachineIRBuilder::buildDirectDbgValue(Register Reg, const MDNode *Variable,
51                                       const MDNode *Expr) {
52   assert(isa<DILocalVariable>(Variable) && "not a variable");
53   assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
54   assert(
55       cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
56       "Expected inlined-at fields to agree");
57   return insertInstr(BuildMI(getMF(), getDL(),
58                              getTII().get(TargetOpcode::DBG_VALUE),
59                              /*IsIndirect*/ false, Reg, Variable, Expr));
60 }
61 
62 MachineInstrBuilder
63 MachineIRBuilder::buildIndirectDbgValue(Register Reg, const MDNode *Variable,
64                                         const MDNode *Expr) {
65   assert(isa<DILocalVariable>(Variable) && "not a variable");
66   assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
67   assert(
68       cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
69       "Expected inlined-at fields to agree");
70   return insertInstr(BuildMI(getMF(), getDL(),
71                              getTII().get(TargetOpcode::DBG_VALUE),
72                              /*IsIndirect*/ true, Reg, Variable, Expr));
73 }
74 
75 MachineInstrBuilder MachineIRBuilder::buildFIDbgValue(int FI,
76                                                       const MDNode *Variable,
77                                                       const MDNode *Expr) {
78   assert(isa<DILocalVariable>(Variable) && "not a variable");
79   assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
80   assert(
81       cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
82       "Expected inlined-at fields to agree");
83   return insertInstr(buildInstrNoInsert(TargetOpcode::DBG_VALUE)
84                          .addFrameIndex(FI)
85                          .addImm(0)
86                          .addMetadata(Variable)
87                          .addMetadata(Expr));
88 }
89 
90 MachineInstrBuilder MachineIRBuilder::buildConstDbgValue(const Constant &C,
91                                                          const MDNode *Variable,
92                                                          const MDNode *Expr) {
93   assert(isa<DILocalVariable>(Variable) && "not a variable");
94   assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
95   assert(
96       cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
97       "Expected inlined-at fields to agree");
98   auto MIB = buildInstrNoInsert(TargetOpcode::DBG_VALUE);
99 
100   auto *NumericConstant = [&] () -> const Constant* {
101     if (const auto *CE = dyn_cast<ConstantExpr>(&C))
102       if (CE->getOpcode() == Instruction::IntToPtr)
103         return CE->getOperand(0);
104     return &C;
105   }();
106 
107   if (auto *CI = dyn_cast<ConstantInt>(NumericConstant)) {
108     if (CI->getBitWidth() > 64)
109       MIB.addCImm(CI);
110     else
111       MIB.addImm(CI->getZExtValue());
112   } else if (auto *CFP = dyn_cast<ConstantFP>(NumericConstant)) {
113     MIB.addFPImm(CFP);
114   } else if (isa<ConstantPointerNull>(NumericConstant)) {
115     MIB.addImm(0);
116   } else {
117     // Insert $noreg if we didn't find a usable constant and had to drop it.
118     MIB.addReg(Register());
119   }
120 
121   MIB.addImm(0).addMetadata(Variable).addMetadata(Expr);
122   return insertInstr(MIB);
123 }
124 
125 MachineInstrBuilder MachineIRBuilder::buildDbgLabel(const MDNode *Label) {
126   assert(isa<DILabel>(Label) && "not a label");
127   assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(State.DL) &&
128          "Expected inlined-at fields to agree");
129   auto MIB = buildInstr(TargetOpcode::DBG_LABEL);
130 
131   return MIB.addMetadata(Label);
132 }
133 
134 MachineInstrBuilder MachineIRBuilder::buildDynStackAlloc(const DstOp &Res,
135                                                          const SrcOp &Size,
136                                                          Align Alignment) {
137   assert(Res.getLLTTy(*getMRI()).isPointer() && "expected ptr dst type");
138   auto MIB = buildInstr(TargetOpcode::G_DYN_STACKALLOC);
139   Res.addDefToMIB(*getMRI(), MIB);
140   Size.addSrcToMIB(MIB);
141   MIB.addImm(Alignment.value());
142   return MIB;
143 }
144 
145 MachineInstrBuilder MachineIRBuilder::buildFrameIndex(const DstOp &Res,
146                                                       int Idx) {
147   assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
148   auto MIB = buildInstr(TargetOpcode::G_FRAME_INDEX);
149   Res.addDefToMIB(*getMRI(), MIB);
150   MIB.addFrameIndex(Idx);
151   return MIB;
152 }
153 
154 MachineInstrBuilder MachineIRBuilder::buildGlobalValue(const DstOp &Res,
155                                                        const GlobalValue *GV) {
156   assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
157   assert(Res.getLLTTy(*getMRI()).getAddressSpace() ==
158              GV->getType()->getAddressSpace() &&
159          "address space mismatch");
160 
161   auto MIB = buildInstr(TargetOpcode::G_GLOBAL_VALUE);
162   Res.addDefToMIB(*getMRI(), MIB);
163   MIB.addGlobalAddress(GV);
164   return MIB;
165 }
166 
167 MachineInstrBuilder MachineIRBuilder::buildConstantPool(const DstOp &Res,
168                                                         unsigned Idx) {
169   assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
170   auto MIB = buildInstr(TargetOpcode::G_CONSTANT_POOL);
171   Res.addDefToMIB(*getMRI(), MIB);
172   MIB.addConstantPoolIndex(Idx);
173   return MIB;
174 }
175 
176 MachineInstrBuilder MachineIRBuilder::buildJumpTable(const LLT PtrTy,
177                                                      unsigned JTI) {
178   return buildInstr(TargetOpcode::G_JUMP_TABLE, {PtrTy}, {})
179       .addJumpTableIndex(JTI);
180 }
181 
182 void MachineIRBuilder::validateUnaryOp(const LLT Res, const LLT Op0) {
183   assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
184   assert((Res == Op0) && "type mismatch");
185 }
186 
187 void MachineIRBuilder::validateBinaryOp(const LLT Res, const LLT Op0,
188                                         const LLT Op1) {
189   assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
190   assert((Res == Op0 && Res == Op1) && "type mismatch");
191 }
192 
193 void MachineIRBuilder::validateShiftOp(const LLT Res, const LLT Op0,
194                                        const LLT Op1) {
195   assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
196   assert((Res == Op0) && "type mismatch");
197 }
198 
199 MachineInstrBuilder MachineIRBuilder::buildPtrAdd(const DstOp &Res,
200                                                   const SrcOp &Op0,
201                                                   const SrcOp &Op1) {
202   assert(Res.getLLTTy(*getMRI()).getScalarType().isPointer() &&
203          Res.getLLTTy(*getMRI()) == Op0.getLLTTy(*getMRI()) && "type mismatch");
204   assert(Op1.getLLTTy(*getMRI()).getScalarType().isScalar() && "invalid offset type");
205 
206   return buildInstr(TargetOpcode::G_PTR_ADD, {Res}, {Op0, Op1});
207 }
208 
209 std::optional<MachineInstrBuilder>
210 MachineIRBuilder::materializePtrAdd(Register &Res, Register Op0,
211                                     const LLT ValueTy, uint64_t Value) {
212   assert(Res == 0 && "Res is a result argument");
213   assert(ValueTy.isScalar()  && "invalid offset type");
214 
215   if (Value == 0) {
216     Res = Op0;
217     return std::nullopt;
218   }
219 
220   Res = getMRI()->createGenericVirtualRegister(getMRI()->getType(Op0));
221   auto Cst = buildConstant(ValueTy, Value);
222   return buildPtrAdd(Res, Op0, Cst.getReg(0));
223 }
224 
225 MachineInstrBuilder MachineIRBuilder::buildMaskLowPtrBits(const DstOp &Res,
226                                                           const SrcOp &Op0,
227                                                           uint32_t NumBits) {
228   LLT PtrTy = Res.getLLTTy(*getMRI());
229   LLT MaskTy = LLT::scalar(PtrTy.getSizeInBits());
230   Register MaskReg = getMRI()->createGenericVirtualRegister(MaskTy);
231   buildConstant(MaskReg, maskTrailingZeros<uint64_t>(NumBits));
232   return buildPtrMask(Res, Op0, MaskReg);
233 }
234 
235 MachineInstrBuilder
236 MachineIRBuilder::buildPadVectorWithUndefElements(const DstOp &Res,
237                                                   const SrcOp &Op0) {
238   LLT ResTy = Res.getLLTTy(*getMRI());
239   LLT Op0Ty = Op0.getLLTTy(*getMRI());
240 
241   assert(ResTy.isVector() && "Res non vector type");
242 
243   SmallVector<Register, 8> Regs;
244   if (Op0Ty.isVector()) {
245     assert((ResTy.getElementType() == Op0Ty.getElementType()) &&
246            "Different vector element types");
247     assert((ResTy.getNumElements() > Op0Ty.getNumElements()) &&
248            "Op0 has more elements");
249     auto Unmerge = buildUnmerge(Op0Ty.getElementType(), Op0);
250 
251     for (auto Op : Unmerge.getInstr()->defs())
252       Regs.push_back(Op.getReg());
253   } else {
254     assert((ResTy.getSizeInBits() > Op0Ty.getSizeInBits()) &&
255            "Op0 has more size");
256     Regs.push_back(Op0.getReg());
257   }
258   Register Undef =
259       buildUndef(Op0Ty.isVector() ? Op0Ty.getElementType() : Op0Ty).getReg(0);
260   unsigned NumberOfPadElts = ResTy.getNumElements() - Regs.size();
261   for (unsigned i = 0; i < NumberOfPadElts; ++i)
262     Regs.push_back(Undef);
263   return buildMergeLikeInstr(Res, Regs);
264 }
265 
266 MachineInstrBuilder
267 MachineIRBuilder::buildDeleteTrailingVectorElements(const DstOp &Res,
268                                                     const SrcOp &Op0) {
269   LLT ResTy = Res.getLLTTy(*getMRI());
270   LLT Op0Ty = Op0.getLLTTy(*getMRI());
271 
272   assert((ResTy.isVector() && Op0Ty.isVector()) && "Non vector type");
273   assert((ResTy.getElementType() == Op0Ty.getElementType()) &&
274          "Different vector element types");
275   assert((ResTy.getNumElements() < Op0Ty.getNumElements()) &&
276          "Op0 has fewer elements");
277 
278   SmallVector<Register, 8> Regs;
279   auto Unmerge = buildUnmerge(Op0Ty.getElementType(), Op0);
280   for (unsigned i = 0; i < ResTy.getNumElements(); ++i)
281     Regs.push_back(Unmerge.getReg(i));
282   return buildMergeLikeInstr(Res, Regs);
283 }
284 
285 MachineInstrBuilder MachineIRBuilder::buildBr(MachineBasicBlock &Dest) {
286   return buildInstr(TargetOpcode::G_BR).addMBB(&Dest);
287 }
288 
289 MachineInstrBuilder MachineIRBuilder::buildBrIndirect(Register Tgt) {
290   assert(getMRI()->getType(Tgt).isPointer() && "invalid branch destination");
291   return buildInstr(TargetOpcode::G_BRINDIRECT).addUse(Tgt);
292 }
293 
294 MachineInstrBuilder MachineIRBuilder::buildBrJT(Register TablePtr,
295                                                 unsigned JTI,
296                                                 Register IndexReg) {
297   assert(getMRI()->getType(TablePtr).isPointer() &&
298          "Table reg must be a pointer");
299   return buildInstr(TargetOpcode::G_BRJT)
300       .addUse(TablePtr)
301       .addJumpTableIndex(JTI)
302       .addUse(IndexReg);
303 }
304 
305 MachineInstrBuilder MachineIRBuilder::buildCopy(const DstOp &Res,
306                                                 const SrcOp &Op) {
307   return buildInstr(TargetOpcode::COPY, Res, Op);
308 }
309 
310 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
311                                                     const ConstantInt &Val) {
312   LLT Ty = Res.getLLTTy(*getMRI());
313   LLT EltTy = Ty.getScalarType();
314   assert(EltTy.getScalarSizeInBits() == Val.getBitWidth() &&
315          "creating constant with the wrong size");
316 
317   if (Ty.isVector()) {
318     auto Const = buildInstr(TargetOpcode::G_CONSTANT)
319     .addDef(getMRI()->createGenericVirtualRegister(EltTy))
320     .addCImm(&Val);
321     return buildSplatVector(Res, Const);
322   }
323 
324   auto Const = buildInstr(TargetOpcode::G_CONSTANT);
325   Const->setDebugLoc(DebugLoc());
326   Res.addDefToMIB(*getMRI(), Const);
327   Const.addCImm(&Val);
328   return Const;
329 }
330 
331 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
332                                                     int64_t Val) {
333   auto IntN = IntegerType::get(getMF().getFunction().getContext(),
334                                Res.getLLTTy(*getMRI()).getScalarSizeInBits());
335   ConstantInt *CI = ConstantInt::get(IntN, Val, true);
336   return buildConstant(Res, *CI);
337 }
338 
339 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
340                                                      const ConstantFP &Val) {
341   LLT Ty = Res.getLLTTy(*getMRI());
342   LLT EltTy = Ty.getScalarType();
343 
344   assert(APFloat::getSizeInBits(Val.getValueAPF().getSemantics())
345          == EltTy.getSizeInBits() &&
346          "creating fconstant with the wrong size");
347 
348   assert(!Ty.isPointer() && "invalid operand type");
349 
350   if (Ty.isVector()) {
351     auto Const = buildInstr(TargetOpcode::G_FCONSTANT)
352     .addDef(getMRI()->createGenericVirtualRegister(EltTy))
353     .addFPImm(&Val);
354 
355     return buildSplatVector(Res, Const);
356   }
357 
358   auto Const = buildInstr(TargetOpcode::G_FCONSTANT);
359   Const->setDebugLoc(DebugLoc());
360   Res.addDefToMIB(*getMRI(), Const);
361   Const.addFPImm(&Val);
362   return Const;
363 }
364 
365 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
366                                                     const APInt &Val) {
367   ConstantInt *CI = ConstantInt::get(getMF().getFunction().getContext(), Val);
368   return buildConstant(Res, *CI);
369 }
370 
371 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
372                                                      double Val) {
373   LLT DstTy = Res.getLLTTy(*getMRI());
374   auto &Ctx = getMF().getFunction().getContext();
375   auto *CFP =
376       ConstantFP::get(Ctx, getAPFloatFromSize(Val, DstTy.getScalarSizeInBits()));
377   return buildFConstant(Res, *CFP);
378 }
379 
380 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
381                                                      const APFloat &Val) {
382   auto &Ctx = getMF().getFunction().getContext();
383   auto *CFP = ConstantFP::get(Ctx, Val);
384   return buildFConstant(Res, *CFP);
385 }
386 
387 MachineInstrBuilder MachineIRBuilder::buildBrCond(const SrcOp &Tst,
388                                                   MachineBasicBlock &Dest) {
389   assert(Tst.getLLTTy(*getMRI()).isScalar() && "invalid operand type");
390 
391   auto MIB = buildInstr(TargetOpcode::G_BRCOND);
392   Tst.addSrcToMIB(MIB);
393   MIB.addMBB(&Dest);
394   return MIB;
395 }
396 
397 MachineInstrBuilder
398 MachineIRBuilder::buildLoad(const DstOp &Dst, const SrcOp &Addr,
399                             MachinePointerInfo PtrInfo, Align Alignment,
400                             MachineMemOperand::Flags MMOFlags,
401                             const AAMDNodes &AAInfo) {
402   MMOFlags |= MachineMemOperand::MOLoad;
403   assert((MMOFlags & MachineMemOperand::MOStore) == 0);
404 
405   LLT Ty = Dst.getLLTTy(*getMRI());
406   MachineMemOperand *MMO =
407       getMF().getMachineMemOperand(PtrInfo, MMOFlags, Ty, Alignment, AAInfo);
408   return buildLoad(Dst, Addr, *MMO);
409 }
410 
411 MachineInstrBuilder MachineIRBuilder::buildLoadInstr(unsigned Opcode,
412                                                      const DstOp &Res,
413                                                      const SrcOp &Addr,
414                                                      MachineMemOperand &MMO) {
415   assert(Res.getLLTTy(*getMRI()).isValid() && "invalid operand type");
416   assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
417 
418   auto MIB = buildInstr(Opcode);
419   Res.addDefToMIB(*getMRI(), MIB);
420   Addr.addSrcToMIB(MIB);
421   MIB.addMemOperand(&MMO);
422   return MIB;
423 }
424 
425 MachineInstrBuilder MachineIRBuilder::buildLoadFromOffset(
426   const DstOp &Dst, const SrcOp &BasePtr,
427   MachineMemOperand &BaseMMO, int64_t Offset) {
428   LLT LoadTy = Dst.getLLTTy(*getMRI());
429   MachineMemOperand *OffsetMMO =
430       getMF().getMachineMemOperand(&BaseMMO, Offset, LoadTy);
431 
432   if (Offset == 0) // This may be a size or type changing load.
433     return buildLoad(Dst, BasePtr, *OffsetMMO);
434 
435   LLT PtrTy = BasePtr.getLLTTy(*getMRI());
436   LLT OffsetTy = LLT::scalar(PtrTy.getSizeInBits());
437   auto ConstOffset = buildConstant(OffsetTy, Offset);
438   auto Ptr = buildPtrAdd(PtrTy, BasePtr, ConstOffset);
439   return buildLoad(Dst, Ptr, *OffsetMMO);
440 }
441 
442 MachineInstrBuilder MachineIRBuilder::buildStore(const SrcOp &Val,
443                                                  const SrcOp &Addr,
444                                                  MachineMemOperand &MMO) {
445   assert(Val.getLLTTy(*getMRI()).isValid() && "invalid operand type");
446   assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
447 
448   auto MIB = buildInstr(TargetOpcode::G_STORE);
449   Val.addSrcToMIB(MIB);
450   Addr.addSrcToMIB(MIB);
451   MIB.addMemOperand(&MMO);
452   return MIB;
453 }
454 
455 MachineInstrBuilder
456 MachineIRBuilder::buildStore(const SrcOp &Val, const SrcOp &Addr,
457                              MachinePointerInfo PtrInfo, Align Alignment,
458                              MachineMemOperand::Flags MMOFlags,
459                              const AAMDNodes &AAInfo) {
460   MMOFlags |= MachineMemOperand::MOStore;
461   assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
462 
463   LLT Ty = Val.getLLTTy(*getMRI());
464   MachineMemOperand *MMO =
465       getMF().getMachineMemOperand(PtrInfo, MMOFlags, Ty, Alignment, AAInfo);
466   return buildStore(Val, Addr, *MMO);
467 }
468 
469 MachineInstrBuilder MachineIRBuilder::buildAnyExt(const DstOp &Res,
470                                                   const SrcOp &Op) {
471   return buildInstr(TargetOpcode::G_ANYEXT, Res, Op);
472 }
473 
474 MachineInstrBuilder MachineIRBuilder::buildSExt(const DstOp &Res,
475                                                 const SrcOp &Op) {
476   return buildInstr(TargetOpcode::G_SEXT, Res, Op);
477 }
478 
479 MachineInstrBuilder MachineIRBuilder::buildZExt(const DstOp &Res,
480                                                 const SrcOp &Op) {
481   return buildInstr(TargetOpcode::G_ZEXT, Res, Op);
482 }
483 
484 unsigned MachineIRBuilder::getBoolExtOp(bool IsVec, bool IsFP) const {
485   const auto *TLI = getMF().getSubtarget().getTargetLowering();
486   switch (TLI->getBooleanContents(IsVec, IsFP)) {
487   case TargetLoweringBase::ZeroOrNegativeOneBooleanContent:
488     return TargetOpcode::G_SEXT;
489   case TargetLoweringBase::ZeroOrOneBooleanContent:
490     return TargetOpcode::G_ZEXT;
491   default:
492     return TargetOpcode::G_ANYEXT;
493   }
494 }
495 
496 MachineInstrBuilder MachineIRBuilder::buildBoolExt(const DstOp &Res,
497                                                    const SrcOp &Op,
498                                                    bool IsFP) {
499   unsigned ExtOp = getBoolExtOp(getMRI()->getType(Op.getReg()).isVector(), IsFP);
500   return buildInstr(ExtOp, Res, Op);
501 }
502 
503 MachineInstrBuilder MachineIRBuilder::buildBoolExtInReg(const DstOp &Res,
504                                                         const SrcOp &Op,
505                                                         bool IsVector,
506                                                         bool IsFP) {
507   const auto *TLI = getMF().getSubtarget().getTargetLowering();
508   switch (TLI->getBooleanContents(IsVector, IsFP)) {
509   case TargetLoweringBase::ZeroOrNegativeOneBooleanContent:
510     return buildSExtInReg(Res, Op, 1);
511   case TargetLoweringBase::ZeroOrOneBooleanContent:
512     return buildZExtInReg(Res, Op, 1);
513   case TargetLoweringBase::UndefinedBooleanContent:
514     return buildCopy(Res, Op);
515   }
516 
517   llvm_unreachable("unexpected BooleanContent");
518 }
519 
520 MachineInstrBuilder MachineIRBuilder::buildExtOrTrunc(unsigned ExtOpc,
521                                                       const DstOp &Res,
522                                                       const SrcOp &Op) {
523   assert((TargetOpcode::G_ANYEXT == ExtOpc || TargetOpcode::G_ZEXT == ExtOpc ||
524           TargetOpcode::G_SEXT == ExtOpc) &&
525          "Expecting Extending Opc");
526   assert(Res.getLLTTy(*getMRI()).isScalar() ||
527          Res.getLLTTy(*getMRI()).isVector());
528   assert(Res.getLLTTy(*getMRI()).isScalar() ==
529          Op.getLLTTy(*getMRI()).isScalar());
530 
531   unsigned Opcode = TargetOpcode::COPY;
532   if (Res.getLLTTy(*getMRI()).getSizeInBits() >
533       Op.getLLTTy(*getMRI()).getSizeInBits())
534     Opcode = ExtOpc;
535   else if (Res.getLLTTy(*getMRI()).getSizeInBits() <
536            Op.getLLTTy(*getMRI()).getSizeInBits())
537     Opcode = TargetOpcode::G_TRUNC;
538   else
539     assert(Res.getLLTTy(*getMRI()) == Op.getLLTTy(*getMRI()));
540 
541   return buildInstr(Opcode, Res, Op);
542 }
543 
544 MachineInstrBuilder MachineIRBuilder::buildSExtOrTrunc(const DstOp &Res,
545                                                        const SrcOp &Op) {
546   return buildExtOrTrunc(TargetOpcode::G_SEXT, Res, Op);
547 }
548 
549 MachineInstrBuilder MachineIRBuilder::buildZExtOrTrunc(const DstOp &Res,
550                                                        const SrcOp &Op) {
551   return buildExtOrTrunc(TargetOpcode::G_ZEXT, Res, Op);
552 }
553 
554 MachineInstrBuilder MachineIRBuilder::buildAnyExtOrTrunc(const DstOp &Res,
555                                                          const SrcOp &Op) {
556   return buildExtOrTrunc(TargetOpcode::G_ANYEXT, Res, Op);
557 }
558 
559 MachineInstrBuilder MachineIRBuilder::buildZExtInReg(const DstOp &Res,
560                                                      const SrcOp &Op,
561                                                      int64_t ImmOp) {
562   LLT ResTy = Res.getLLTTy(*getMRI());
563   auto Mask = buildConstant(
564       ResTy, APInt::getLowBitsSet(ResTy.getScalarSizeInBits(), ImmOp));
565   return buildAnd(Res, Op, Mask);
566 }
567 
568 MachineInstrBuilder MachineIRBuilder::buildCast(const DstOp &Dst,
569                                                 const SrcOp &Src) {
570   LLT SrcTy = Src.getLLTTy(*getMRI());
571   LLT DstTy = Dst.getLLTTy(*getMRI());
572   if (SrcTy == DstTy)
573     return buildCopy(Dst, Src);
574 
575   unsigned Opcode;
576   if (SrcTy.isPointer() && DstTy.isScalar())
577     Opcode = TargetOpcode::G_PTRTOINT;
578   else if (DstTy.isPointer() && SrcTy.isScalar())
579     Opcode = TargetOpcode::G_INTTOPTR;
580   else {
581     assert(!SrcTy.isPointer() && !DstTy.isPointer() && "n G_ADDRCAST yet");
582     Opcode = TargetOpcode::G_BITCAST;
583   }
584 
585   return buildInstr(Opcode, Dst, Src);
586 }
587 
588 MachineInstrBuilder MachineIRBuilder::buildExtract(const DstOp &Dst,
589                                                    const SrcOp &Src,
590                                                    uint64_t Index) {
591   LLT SrcTy = Src.getLLTTy(*getMRI());
592   LLT DstTy = Dst.getLLTTy(*getMRI());
593 
594 #ifndef NDEBUG
595   assert(SrcTy.isValid() && "invalid operand type");
596   assert(DstTy.isValid() && "invalid operand type");
597   assert(Index + DstTy.getSizeInBits() <= SrcTy.getSizeInBits() &&
598          "extracting off end of register");
599 #endif
600 
601   if (DstTy.getSizeInBits() == SrcTy.getSizeInBits()) {
602     assert(Index == 0 && "insertion past the end of a register");
603     return buildCast(Dst, Src);
604   }
605 
606   auto Extract = buildInstr(TargetOpcode::G_EXTRACT);
607   Dst.addDefToMIB(*getMRI(), Extract);
608   Src.addSrcToMIB(Extract);
609   Extract.addImm(Index);
610   return Extract;
611 }
612 
613 MachineInstrBuilder MachineIRBuilder::buildUndef(const DstOp &Res) {
614   return buildInstr(TargetOpcode::G_IMPLICIT_DEF, {Res}, {});
615 }
616 
617 MachineInstrBuilder MachineIRBuilder::buildMergeValues(const DstOp &Res,
618                                                        ArrayRef<Register> Ops) {
619   // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
620   // we need some temporary storage for the DstOp objects. Here we use a
621   // sufficiently large SmallVector to not go through the heap.
622   SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
623   assert(TmpVec.size() > 1);
624   return buildInstr(TargetOpcode::G_MERGE_VALUES, Res, TmpVec);
625 }
626 
627 MachineInstrBuilder
628 MachineIRBuilder::buildMergeLikeInstr(const DstOp &Res,
629                                       ArrayRef<Register> Ops) {
630   // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
631   // we need some temporary storage for the DstOp objects. Here we use a
632   // sufficiently large SmallVector to not go through the heap.
633   SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
634   assert(TmpVec.size() > 1);
635   return buildInstr(getOpcodeForMerge(Res, TmpVec), Res, TmpVec);
636 }
637 
638 MachineInstrBuilder
639 MachineIRBuilder::buildMergeLikeInstr(const DstOp &Res,
640                                       std::initializer_list<SrcOp> Ops) {
641   assert(Ops.size() > 1);
642   return buildInstr(getOpcodeForMerge(Res, Ops), Res, Ops);
643 }
644 
645 unsigned MachineIRBuilder::getOpcodeForMerge(const DstOp &DstOp,
646                                              ArrayRef<SrcOp> SrcOps) const {
647   if (DstOp.getLLTTy(*getMRI()).isVector()) {
648     if (SrcOps[0].getLLTTy(*getMRI()).isVector())
649       return TargetOpcode::G_CONCAT_VECTORS;
650     return TargetOpcode::G_BUILD_VECTOR;
651   }
652 
653   return TargetOpcode::G_MERGE_VALUES;
654 }
655 
656 MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<LLT> Res,
657                                                    const SrcOp &Op) {
658   // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<DstOp>,
659   // we need some temporary storage for the DstOp objects. Here we use a
660   // sufficiently large SmallVector to not go through the heap.
661   SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end());
662   assert(TmpVec.size() > 1);
663   return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
664 }
665 
666 MachineInstrBuilder MachineIRBuilder::buildUnmerge(LLT Res,
667                                                    const SrcOp &Op) {
668   unsigned NumReg = Op.getLLTTy(*getMRI()).getSizeInBits() / Res.getSizeInBits();
669   SmallVector<DstOp, 8> TmpVec(NumReg, Res);
670   return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
671 }
672 
673 MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<Register> Res,
674                                                    const SrcOp &Op) {
675   // Unfortunately to convert from ArrayRef<Register> to ArrayRef<DstOp>,
676   // we need some temporary storage for the DstOp objects. Here we use a
677   // sufficiently large SmallVector to not go through the heap.
678   SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end());
679   assert(TmpVec.size() > 1);
680   return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
681 }
682 
683 MachineInstrBuilder MachineIRBuilder::buildBuildVector(const DstOp &Res,
684                                                        ArrayRef<Register> Ops) {
685   // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
686   // we need some temporary storage for the DstOp objects. Here we use a
687   // sufficiently large SmallVector to not go through the heap.
688   SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
689   return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
690 }
691 
692 MachineInstrBuilder
693 MachineIRBuilder::buildBuildVectorConstant(const DstOp &Res,
694                                            ArrayRef<APInt> Ops) {
695   SmallVector<SrcOp> TmpVec;
696   TmpVec.reserve(Ops.size());
697   LLT EltTy = Res.getLLTTy(*getMRI()).getElementType();
698   for (const auto &Op : Ops)
699     TmpVec.push_back(buildConstant(EltTy, Op));
700   return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
701 }
702 
703 MachineInstrBuilder MachineIRBuilder::buildSplatVector(const DstOp &Res,
704                                                        const SrcOp &Src) {
705   SmallVector<SrcOp, 8> TmpVec(Res.getLLTTy(*getMRI()).getNumElements(), Src);
706   return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
707 }
708 
709 MachineInstrBuilder
710 MachineIRBuilder::buildBuildVectorTrunc(const DstOp &Res,
711                                         ArrayRef<Register> Ops) {
712   // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
713   // we need some temporary storage for the DstOp objects. Here we use a
714   // sufficiently large SmallVector to not go through the heap.
715   SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
716   if (TmpVec[0].getLLTTy(*getMRI()).getSizeInBits() ==
717       Res.getLLTTy(*getMRI()).getElementType().getSizeInBits())
718     return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
719   return buildInstr(TargetOpcode::G_BUILD_VECTOR_TRUNC, Res, TmpVec);
720 }
721 
722 MachineInstrBuilder MachineIRBuilder::buildShuffleSplat(const DstOp &Res,
723                                                         const SrcOp &Src) {
724   LLT DstTy = Res.getLLTTy(*getMRI());
725   assert(Src.getLLTTy(*getMRI()) == DstTy.getElementType() &&
726          "Expected Src to match Dst elt ty");
727   auto UndefVec = buildUndef(DstTy);
728   auto Zero = buildConstant(LLT::scalar(64), 0);
729   auto InsElt = buildInsertVectorElement(DstTy, UndefVec, Src, Zero);
730   SmallVector<int, 16> ZeroMask(DstTy.getNumElements());
731   return buildShuffleVector(DstTy, InsElt, UndefVec, ZeroMask);
732 }
733 
734 MachineInstrBuilder MachineIRBuilder::buildShuffleVector(const DstOp &Res,
735                                                          const SrcOp &Src1,
736                                                          const SrcOp &Src2,
737                                                          ArrayRef<int> Mask) {
738   LLT DstTy = Res.getLLTTy(*getMRI());
739   LLT Src1Ty = Src1.getLLTTy(*getMRI());
740   LLT Src2Ty = Src2.getLLTTy(*getMRI());
741   assert((size_t)(Src1Ty.getNumElements() + Src2Ty.getNumElements()) >=
742          Mask.size());
743   assert(DstTy.getElementType() == Src1Ty.getElementType() &&
744          DstTy.getElementType() == Src2Ty.getElementType());
745   (void)DstTy;
746   (void)Src1Ty;
747   (void)Src2Ty;
748   ArrayRef<int> MaskAlloc = getMF().allocateShuffleMask(Mask);
749   return buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {Res}, {Src1, Src2})
750       .addShuffleMask(MaskAlloc);
751 }
752 
753 MachineInstrBuilder
754 MachineIRBuilder::buildConcatVectors(const DstOp &Res, ArrayRef<Register> Ops) {
755   // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
756   // we need some temporary storage for the DstOp objects. Here we use a
757   // sufficiently large SmallVector to not go through the heap.
758   SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
759   return buildInstr(TargetOpcode::G_CONCAT_VECTORS, Res, TmpVec);
760 }
761 
762 MachineInstrBuilder MachineIRBuilder::buildInsert(const DstOp &Res,
763                                                   const SrcOp &Src,
764                                                   const SrcOp &Op,
765                                                   unsigned Index) {
766   assert(Index + Op.getLLTTy(*getMRI()).getSizeInBits() <=
767              Res.getLLTTy(*getMRI()).getSizeInBits() &&
768          "insertion past the end of a register");
769 
770   if (Res.getLLTTy(*getMRI()).getSizeInBits() ==
771       Op.getLLTTy(*getMRI()).getSizeInBits()) {
772     return buildCast(Res, Op);
773   }
774 
775   return buildInstr(TargetOpcode::G_INSERT, Res, {Src, Op, uint64_t(Index)});
776 }
777 
778 MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
779                                                      ArrayRef<Register> ResultRegs,
780                                                      bool HasSideEffects) {
781   auto MIB =
782       buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
783                                 : TargetOpcode::G_INTRINSIC);
784   for (unsigned ResultReg : ResultRegs)
785     MIB.addDef(ResultReg);
786   MIB.addIntrinsicID(ID);
787   return MIB;
788 }
789 
790 MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
791                                                      ArrayRef<DstOp> Results,
792                                                      bool HasSideEffects) {
793   auto MIB =
794       buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
795                                 : TargetOpcode::G_INTRINSIC);
796   for (DstOp Result : Results)
797     Result.addDefToMIB(*getMRI(), MIB);
798   MIB.addIntrinsicID(ID);
799   return MIB;
800 }
801 
802 MachineInstrBuilder MachineIRBuilder::buildTrunc(const DstOp &Res,
803                                                  const SrcOp &Op) {
804   return buildInstr(TargetOpcode::G_TRUNC, Res, Op);
805 }
806 
807 MachineInstrBuilder
808 MachineIRBuilder::buildFPTrunc(const DstOp &Res, const SrcOp &Op,
809                                std::optional<unsigned> Flags) {
810   return buildInstr(TargetOpcode::G_FPTRUNC, Res, Op, Flags);
811 }
812 
813 MachineInstrBuilder MachineIRBuilder::buildICmp(CmpInst::Predicate Pred,
814                                                 const DstOp &Res,
815                                                 const SrcOp &Op0,
816                                                 const SrcOp &Op1) {
817   return buildInstr(TargetOpcode::G_ICMP, Res, {Pred, Op0, Op1});
818 }
819 
820 MachineInstrBuilder MachineIRBuilder::buildFCmp(CmpInst::Predicate Pred,
821                                                 const DstOp &Res,
822                                                 const SrcOp &Op0,
823                                                 const SrcOp &Op1,
824                                                 std::optional<unsigned> Flags) {
825 
826   return buildInstr(TargetOpcode::G_FCMP, Res, {Pred, Op0, Op1}, Flags);
827 }
828 
829 MachineInstrBuilder
830 MachineIRBuilder::buildSelect(const DstOp &Res, const SrcOp &Tst,
831                               const SrcOp &Op0, const SrcOp &Op1,
832                               std::optional<unsigned> Flags) {
833 
834   return buildInstr(TargetOpcode::G_SELECT, {Res}, {Tst, Op0, Op1}, Flags);
835 }
836 
837 MachineInstrBuilder
838 MachineIRBuilder::buildInsertVectorElement(const DstOp &Res, const SrcOp &Val,
839                                            const SrcOp &Elt, const SrcOp &Idx) {
840   return buildInstr(TargetOpcode::G_INSERT_VECTOR_ELT, Res, {Val, Elt, Idx});
841 }
842 
843 MachineInstrBuilder
844 MachineIRBuilder::buildExtractVectorElement(const DstOp &Res, const SrcOp &Val,
845                                             const SrcOp &Idx) {
846   return buildInstr(TargetOpcode::G_EXTRACT_VECTOR_ELT, Res, {Val, Idx});
847 }
848 
849 MachineInstrBuilder MachineIRBuilder::buildAtomicCmpXchgWithSuccess(
850     Register OldValRes, Register SuccessRes, Register Addr, Register CmpVal,
851     Register NewVal, MachineMemOperand &MMO) {
852 #ifndef NDEBUG
853   LLT OldValResTy = getMRI()->getType(OldValRes);
854   LLT SuccessResTy = getMRI()->getType(SuccessRes);
855   LLT AddrTy = getMRI()->getType(Addr);
856   LLT CmpValTy = getMRI()->getType(CmpVal);
857   LLT NewValTy = getMRI()->getType(NewVal);
858   assert(OldValResTy.isScalar() && "invalid operand type");
859   assert(SuccessResTy.isScalar() && "invalid operand type");
860   assert(AddrTy.isPointer() && "invalid operand type");
861   assert(CmpValTy.isValid() && "invalid operand type");
862   assert(NewValTy.isValid() && "invalid operand type");
863   assert(OldValResTy == CmpValTy && "type mismatch");
864   assert(OldValResTy == NewValTy && "type mismatch");
865 #endif
866 
867   return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS)
868       .addDef(OldValRes)
869       .addDef(SuccessRes)
870       .addUse(Addr)
871       .addUse(CmpVal)
872       .addUse(NewVal)
873       .addMemOperand(&MMO);
874 }
875 
876 MachineInstrBuilder
877 MachineIRBuilder::buildAtomicCmpXchg(Register OldValRes, Register Addr,
878                                      Register CmpVal, Register NewVal,
879                                      MachineMemOperand &MMO) {
880 #ifndef NDEBUG
881   LLT OldValResTy = getMRI()->getType(OldValRes);
882   LLT AddrTy = getMRI()->getType(Addr);
883   LLT CmpValTy = getMRI()->getType(CmpVal);
884   LLT NewValTy = getMRI()->getType(NewVal);
885   assert(OldValResTy.isScalar() && "invalid operand type");
886   assert(AddrTy.isPointer() && "invalid operand type");
887   assert(CmpValTy.isValid() && "invalid operand type");
888   assert(NewValTy.isValid() && "invalid operand type");
889   assert(OldValResTy == CmpValTy && "type mismatch");
890   assert(OldValResTy == NewValTy && "type mismatch");
891 #endif
892 
893   return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG)
894       .addDef(OldValRes)
895       .addUse(Addr)
896       .addUse(CmpVal)
897       .addUse(NewVal)
898       .addMemOperand(&MMO);
899 }
900 
901 MachineInstrBuilder MachineIRBuilder::buildAtomicRMW(
902   unsigned Opcode, const DstOp &OldValRes,
903   const SrcOp &Addr, const SrcOp &Val,
904   MachineMemOperand &MMO) {
905 
906 #ifndef NDEBUG
907   LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
908   LLT AddrTy = Addr.getLLTTy(*getMRI());
909   LLT ValTy = Val.getLLTTy(*getMRI());
910   assert(OldValResTy.isScalar() && "invalid operand type");
911   assert(AddrTy.isPointer() && "invalid operand type");
912   assert(ValTy.isValid() && "invalid operand type");
913   assert(OldValResTy == ValTy && "type mismatch");
914   assert(MMO.isAtomic() && "not atomic mem operand");
915 #endif
916 
917   auto MIB = buildInstr(Opcode);
918   OldValRes.addDefToMIB(*getMRI(), MIB);
919   Addr.addSrcToMIB(MIB);
920   Val.addSrcToMIB(MIB);
921   MIB.addMemOperand(&MMO);
922   return MIB;
923 }
924 
925 MachineInstrBuilder
926 MachineIRBuilder::buildAtomicRMWXchg(Register OldValRes, Register Addr,
927                                      Register Val, MachineMemOperand &MMO) {
928   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XCHG, OldValRes, Addr, Val,
929                         MMO);
930 }
931 MachineInstrBuilder
932 MachineIRBuilder::buildAtomicRMWAdd(Register OldValRes, Register Addr,
933                                     Register Val, MachineMemOperand &MMO) {
934   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_ADD, OldValRes, Addr, Val,
935                         MMO);
936 }
937 MachineInstrBuilder
938 MachineIRBuilder::buildAtomicRMWSub(Register OldValRes, Register Addr,
939                                     Register Val, MachineMemOperand &MMO) {
940   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_SUB, OldValRes, Addr, Val,
941                         MMO);
942 }
943 MachineInstrBuilder
944 MachineIRBuilder::buildAtomicRMWAnd(Register OldValRes, Register Addr,
945                                     Register Val, MachineMemOperand &MMO) {
946   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_AND, OldValRes, Addr, Val,
947                         MMO);
948 }
949 MachineInstrBuilder
950 MachineIRBuilder::buildAtomicRMWNand(Register OldValRes, Register Addr,
951                                      Register Val, MachineMemOperand &MMO) {
952   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_NAND, OldValRes, Addr, Val,
953                         MMO);
954 }
955 MachineInstrBuilder MachineIRBuilder::buildAtomicRMWOr(Register OldValRes,
956                                                        Register Addr,
957                                                        Register Val,
958                                                        MachineMemOperand &MMO) {
959   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_OR, OldValRes, Addr, Val,
960                         MMO);
961 }
962 MachineInstrBuilder
963 MachineIRBuilder::buildAtomicRMWXor(Register OldValRes, Register Addr,
964                                     Register Val, MachineMemOperand &MMO) {
965   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XOR, OldValRes, Addr, Val,
966                         MMO);
967 }
968 MachineInstrBuilder
969 MachineIRBuilder::buildAtomicRMWMax(Register OldValRes, Register Addr,
970                                     Register Val, MachineMemOperand &MMO) {
971   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MAX, OldValRes, Addr, Val,
972                         MMO);
973 }
974 MachineInstrBuilder
975 MachineIRBuilder::buildAtomicRMWMin(Register OldValRes, Register Addr,
976                                     Register Val, MachineMemOperand &MMO) {
977   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MIN, OldValRes, Addr, Val,
978                         MMO);
979 }
980 MachineInstrBuilder
981 MachineIRBuilder::buildAtomicRMWUmax(Register OldValRes, Register Addr,
982                                      Register Val, MachineMemOperand &MMO) {
983   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMAX, OldValRes, Addr, Val,
984                         MMO);
985 }
986 MachineInstrBuilder
987 MachineIRBuilder::buildAtomicRMWUmin(Register OldValRes, Register Addr,
988                                      Register Val, MachineMemOperand &MMO) {
989   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMIN, OldValRes, Addr, Val,
990                         MMO);
991 }
992 
993 MachineInstrBuilder
994 MachineIRBuilder::buildAtomicRMWFAdd(
995   const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
996   MachineMemOperand &MMO) {
997   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FADD, OldValRes, Addr, Val,
998                         MMO);
999 }
1000 
1001 MachineInstrBuilder
1002 MachineIRBuilder::buildAtomicRMWFSub(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
1003                                      MachineMemOperand &MMO) {
1004   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FSUB, OldValRes, Addr, Val,
1005                         MMO);
1006 }
1007 
1008 MachineInstrBuilder
1009 MachineIRBuilder::buildAtomicRMWFMax(const DstOp &OldValRes, const SrcOp &Addr,
1010                                      const SrcOp &Val, MachineMemOperand &MMO) {
1011   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FMAX, OldValRes, Addr, Val,
1012                         MMO);
1013 }
1014 
1015 MachineInstrBuilder
1016 MachineIRBuilder::buildAtomicRMWFMin(const DstOp &OldValRes, const SrcOp &Addr,
1017                                      const SrcOp &Val, MachineMemOperand &MMO) {
1018   return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FMIN, OldValRes, Addr, Val,
1019                         MMO);
1020 }
1021 
1022 MachineInstrBuilder
1023 MachineIRBuilder::buildFence(unsigned Ordering, unsigned Scope) {
1024   return buildInstr(TargetOpcode::G_FENCE)
1025     .addImm(Ordering)
1026     .addImm(Scope);
1027 }
1028 
1029 MachineInstrBuilder
1030 MachineIRBuilder::buildBlockAddress(Register Res, const BlockAddress *BA) {
1031 #ifndef NDEBUG
1032   assert(getMRI()->getType(Res).isPointer() && "invalid res type");
1033 #endif
1034 
1035   return buildInstr(TargetOpcode::G_BLOCK_ADDR).addDef(Res).addBlockAddress(BA);
1036 }
1037 
1038 void MachineIRBuilder::validateTruncExt(const LLT DstTy, const LLT SrcTy,
1039                                         bool IsExtend) {
1040 #ifndef NDEBUG
1041   if (DstTy.isVector()) {
1042     assert(SrcTy.isVector() && "mismatched cast between vector and non-vector");
1043     assert(SrcTy.getNumElements() == DstTy.getNumElements() &&
1044            "different number of elements in a trunc/ext");
1045   } else
1046     assert(DstTy.isScalar() && SrcTy.isScalar() && "invalid extend/trunc");
1047 
1048   if (IsExtend)
1049     assert(DstTy.getSizeInBits() > SrcTy.getSizeInBits() &&
1050            "invalid narrowing extend");
1051   else
1052     assert(DstTy.getSizeInBits() < SrcTy.getSizeInBits() &&
1053            "invalid widening trunc");
1054 #endif
1055 }
1056 
1057 void MachineIRBuilder::validateSelectOp(const LLT ResTy, const LLT TstTy,
1058                                         const LLT Op0Ty, const LLT Op1Ty) {
1059 #ifndef NDEBUG
1060   assert((ResTy.isScalar() || ResTy.isVector() || ResTy.isPointer()) &&
1061          "invalid operand type");
1062   assert((ResTy == Op0Ty && ResTy == Op1Ty) && "type mismatch");
1063   if (ResTy.isScalar() || ResTy.isPointer())
1064     assert(TstTy.isScalar() && "type mismatch");
1065   else
1066     assert((TstTy.isScalar() ||
1067             (TstTy.isVector() &&
1068              TstTy.getNumElements() == Op0Ty.getNumElements())) &&
1069            "type mismatch");
1070 #endif
1071 }
1072 
1073 MachineInstrBuilder
1074 MachineIRBuilder::buildInstr(unsigned Opc, ArrayRef<DstOp> DstOps,
1075                              ArrayRef<SrcOp> SrcOps,
1076                              std::optional<unsigned> Flags) {
1077   switch (Opc) {
1078   default:
1079     break;
1080   case TargetOpcode::G_SELECT: {
1081     assert(DstOps.size() == 1 && "Invalid select");
1082     assert(SrcOps.size() == 3 && "Invalid select");
1083     validateSelectOp(
1084         DstOps[0].getLLTTy(*getMRI()), SrcOps[0].getLLTTy(*getMRI()),
1085         SrcOps[1].getLLTTy(*getMRI()), SrcOps[2].getLLTTy(*getMRI()));
1086     break;
1087   }
1088   case TargetOpcode::G_FNEG:
1089   case TargetOpcode::G_ABS:
1090     // All these are unary ops.
1091     assert(DstOps.size() == 1 && "Invalid Dst");
1092     assert(SrcOps.size() == 1 && "Invalid Srcs");
1093     validateUnaryOp(DstOps[0].getLLTTy(*getMRI()),
1094                     SrcOps[0].getLLTTy(*getMRI()));
1095     break;
1096   case TargetOpcode::G_ADD:
1097   case TargetOpcode::G_AND:
1098   case TargetOpcode::G_MUL:
1099   case TargetOpcode::G_OR:
1100   case TargetOpcode::G_SUB:
1101   case TargetOpcode::G_XOR:
1102   case TargetOpcode::G_UDIV:
1103   case TargetOpcode::G_SDIV:
1104   case TargetOpcode::G_UREM:
1105   case TargetOpcode::G_SREM:
1106   case TargetOpcode::G_SMIN:
1107   case TargetOpcode::G_SMAX:
1108   case TargetOpcode::G_UMIN:
1109   case TargetOpcode::G_UMAX:
1110   case TargetOpcode::G_UADDSAT:
1111   case TargetOpcode::G_SADDSAT:
1112   case TargetOpcode::G_USUBSAT:
1113   case TargetOpcode::G_SSUBSAT: {
1114     // All these are binary ops.
1115     assert(DstOps.size() == 1 && "Invalid Dst");
1116     assert(SrcOps.size() == 2 && "Invalid Srcs");
1117     validateBinaryOp(DstOps[0].getLLTTy(*getMRI()),
1118                      SrcOps[0].getLLTTy(*getMRI()),
1119                      SrcOps[1].getLLTTy(*getMRI()));
1120     break;
1121   }
1122   case TargetOpcode::G_SHL:
1123   case TargetOpcode::G_ASHR:
1124   case TargetOpcode::G_LSHR:
1125   case TargetOpcode::G_USHLSAT:
1126   case TargetOpcode::G_SSHLSAT: {
1127     assert(DstOps.size() == 1 && "Invalid Dst");
1128     assert(SrcOps.size() == 2 && "Invalid Srcs");
1129     validateShiftOp(DstOps[0].getLLTTy(*getMRI()),
1130                     SrcOps[0].getLLTTy(*getMRI()),
1131                     SrcOps[1].getLLTTy(*getMRI()));
1132     break;
1133   }
1134   case TargetOpcode::G_SEXT:
1135   case TargetOpcode::G_ZEXT:
1136   case TargetOpcode::G_ANYEXT:
1137     assert(DstOps.size() == 1 && "Invalid Dst");
1138     assert(SrcOps.size() == 1 && "Invalid Srcs");
1139     validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
1140                      SrcOps[0].getLLTTy(*getMRI()), true);
1141     break;
1142   case TargetOpcode::G_TRUNC:
1143   case TargetOpcode::G_FPTRUNC: {
1144     assert(DstOps.size() == 1 && "Invalid Dst");
1145     assert(SrcOps.size() == 1 && "Invalid Srcs");
1146     validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
1147                      SrcOps[0].getLLTTy(*getMRI()), false);
1148     break;
1149   }
1150   case TargetOpcode::G_BITCAST: {
1151     assert(DstOps.size() == 1 && "Invalid Dst");
1152     assert(SrcOps.size() == 1 && "Invalid Srcs");
1153     assert(DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1154            SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() && "invalid bitcast");
1155     break;
1156   }
1157   case TargetOpcode::COPY:
1158     assert(DstOps.size() == 1 && "Invalid Dst");
1159     // If the caller wants to add a subreg source it has to be done separately
1160     // so we may not have any SrcOps at this point yet.
1161     break;
1162   case TargetOpcode::G_FCMP:
1163   case TargetOpcode::G_ICMP: {
1164     assert(DstOps.size() == 1 && "Invalid Dst Operands");
1165     assert(SrcOps.size() == 3 && "Invalid Src Operands");
1166     // For F/ICMP, the first src operand is the predicate, followed by
1167     // the two comparands.
1168     assert(SrcOps[0].getSrcOpKind() == SrcOp::SrcType::Ty_Predicate &&
1169            "Expecting predicate");
1170     assert([&]() -> bool {
1171       CmpInst::Predicate Pred = SrcOps[0].getPredicate();
1172       return Opc == TargetOpcode::G_ICMP ? CmpInst::isIntPredicate(Pred)
1173                                          : CmpInst::isFPPredicate(Pred);
1174     }() && "Invalid predicate");
1175     assert(SrcOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1176            "Type mismatch");
1177     assert([&]() -> bool {
1178       LLT Op0Ty = SrcOps[1].getLLTTy(*getMRI());
1179       LLT DstTy = DstOps[0].getLLTTy(*getMRI());
1180       if (Op0Ty.isScalar() || Op0Ty.isPointer())
1181         return DstTy.isScalar();
1182       else
1183         return DstTy.isVector() &&
1184                DstTy.getNumElements() == Op0Ty.getNumElements();
1185     }() && "Type Mismatch");
1186     break;
1187   }
1188   case TargetOpcode::G_UNMERGE_VALUES: {
1189     assert(!DstOps.empty() && "Invalid trivial sequence");
1190     assert(SrcOps.size() == 1 && "Invalid src for Unmerge");
1191     assert(llvm::all_of(DstOps,
1192                         [&, this](const DstOp &Op) {
1193                           return Op.getLLTTy(*getMRI()) ==
1194                                  DstOps[0].getLLTTy(*getMRI());
1195                         }) &&
1196            "type mismatch in output list");
1197     assert((TypeSize::ScalarTy)DstOps.size() *
1198                    DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1199                SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1200            "input operands do not cover output register");
1201     break;
1202   }
1203   case TargetOpcode::G_MERGE_VALUES: {
1204     assert(SrcOps.size() >= 2 && "invalid trivial sequence");
1205     assert(DstOps.size() == 1 && "Invalid Dst");
1206     assert(llvm::all_of(SrcOps,
1207                         [&, this](const SrcOp &Op) {
1208                           return Op.getLLTTy(*getMRI()) ==
1209                                  SrcOps[0].getLLTTy(*getMRI());
1210                         }) &&
1211            "type mismatch in input list");
1212     assert((TypeSize::ScalarTy)SrcOps.size() *
1213                    SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1214                DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1215            "input operands do not cover output register");
1216     assert(!DstOps[0].getLLTTy(*getMRI()).isVector() &&
1217            "vectors should be built with G_CONCAT_VECTOR or G_BUILD_VECTOR");
1218     break;
1219   }
1220   case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1221     assert(DstOps.size() == 1 && "Invalid Dst size");
1222     assert(SrcOps.size() == 2 && "Invalid Src size");
1223     assert(SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1224     assert((DstOps[0].getLLTTy(*getMRI()).isScalar() ||
1225             DstOps[0].getLLTTy(*getMRI()).isPointer()) &&
1226            "Invalid operand type");
1227     assert(SrcOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand type");
1228     assert(SrcOps[0].getLLTTy(*getMRI()).getElementType() ==
1229                DstOps[0].getLLTTy(*getMRI()) &&
1230            "Type mismatch");
1231     break;
1232   }
1233   case TargetOpcode::G_INSERT_VECTOR_ELT: {
1234     assert(DstOps.size() == 1 && "Invalid dst size");
1235     assert(SrcOps.size() == 3 && "Invalid src size");
1236     assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1237            SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1238     assert(DstOps[0].getLLTTy(*getMRI()).getElementType() ==
1239                SrcOps[1].getLLTTy(*getMRI()) &&
1240            "Type mismatch");
1241     assert(SrcOps[2].getLLTTy(*getMRI()).isScalar() && "Invalid index");
1242     assert(DstOps[0].getLLTTy(*getMRI()).getNumElements() ==
1243                SrcOps[0].getLLTTy(*getMRI()).getNumElements() &&
1244            "Type mismatch");
1245     break;
1246   }
1247   case TargetOpcode::G_BUILD_VECTOR: {
1248     assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1249            "Must have at least 2 operands");
1250     assert(DstOps.size() == 1 && "Invalid DstOps");
1251     assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1252            "Res type must be a vector");
1253     assert(llvm::all_of(SrcOps,
1254                         [&, this](const SrcOp &Op) {
1255                           return Op.getLLTTy(*getMRI()) ==
1256                                  SrcOps[0].getLLTTy(*getMRI());
1257                         }) &&
1258            "type mismatch in input list");
1259     assert((TypeSize::ScalarTy)SrcOps.size() *
1260                    SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1261                DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1262            "input scalars do not exactly cover the output vector register");
1263     break;
1264   }
1265   case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1266     assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1267            "Must have at least 2 operands");
1268     assert(DstOps.size() == 1 && "Invalid DstOps");
1269     assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1270            "Res type must be a vector");
1271     assert(llvm::all_of(SrcOps,
1272                         [&, this](const SrcOp &Op) {
1273                           return Op.getLLTTy(*getMRI()) ==
1274                                  SrcOps[0].getLLTTy(*getMRI());
1275                         }) &&
1276            "type mismatch in input list");
1277     break;
1278   }
1279   case TargetOpcode::G_CONCAT_VECTORS: {
1280     assert(DstOps.size() == 1 && "Invalid DstOps");
1281     assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1282            "Must have at least 2 operands");
1283     assert(llvm::all_of(SrcOps,
1284                         [&, this](const SrcOp &Op) {
1285                           return (Op.getLLTTy(*getMRI()).isVector() &&
1286                                   Op.getLLTTy(*getMRI()) ==
1287                                       SrcOps[0].getLLTTy(*getMRI()));
1288                         }) &&
1289            "type mismatch in input list");
1290     assert((TypeSize::ScalarTy)SrcOps.size() *
1291                    SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1292                DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1293            "input vectors do not exactly cover the output vector register");
1294     break;
1295   }
1296   case TargetOpcode::G_UADDE: {
1297     assert(DstOps.size() == 2 && "Invalid no of dst operands");
1298     assert(SrcOps.size() == 3 && "Invalid no of src operands");
1299     assert(DstOps[0].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1300     assert((DstOps[0].getLLTTy(*getMRI()) == SrcOps[0].getLLTTy(*getMRI())) &&
1301            (DstOps[0].getLLTTy(*getMRI()) == SrcOps[1].getLLTTy(*getMRI())) &&
1302            "Invalid operand");
1303     assert(DstOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1304     assert(DstOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1305            "type mismatch");
1306     break;
1307   }
1308   }
1309 
1310   auto MIB = buildInstr(Opc);
1311   for (const DstOp &Op : DstOps)
1312     Op.addDefToMIB(*getMRI(), MIB);
1313   for (const SrcOp &Op : SrcOps)
1314     Op.addSrcToMIB(MIB);
1315   if (Flags)
1316     MIB->setFlags(*Flags);
1317   return MIB;
1318 }
1319