1 //===-- llvm/CodeGen/GlobalISel/MachineIRBuilder.cpp - MIBuilder--*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the MachineIRBuidler class.
10 //===----------------------------------------------------------------------===//
11 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
12 #include "llvm/Analysis/MemoryLocation.h"
13 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
14 #include "llvm/CodeGen/MachineFunction.h"
15 #include "llvm/CodeGen/MachineInstr.h"
16 #include "llvm/CodeGen/MachineInstrBuilder.h"
17 #include "llvm/CodeGen/MachineRegisterInfo.h"
18 #include "llvm/CodeGen/TargetInstrInfo.h"
19 #include "llvm/CodeGen/TargetLowering.h"
20 #include "llvm/CodeGen/TargetOpcodes.h"
21 #include "llvm/CodeGen/TargetSubtargetInfo.h"
22 #include "llvm/IR/DebugInfo.h"
23
24 using namespace llvm;
25
setMF(MachineFunction & MF)26 void MachineIRBuilder::setMF(MachineFunction &MF) {
27 State.MF = &MF;
28 State.MBB = nullptr;
29 State.MRI = &MF.getRegInfo();
30 State.TII = MF.getSubtarget().getInstrInfo();
31 State.DL = DebugLoc();
32 State.II = MachineBasicBlock::iterator();
33 State.Observer = nullptr;
34 }
35
36 //------------------------------------------------------------------------------
37 // Build instruction variants.
38 //------------------------------------------------------------------------------
39
buildInstrNoInsert(unsigned Opcode)40 MachineInstrBuilder MachineIRBuilder::buildInstrNoInsert(unsigned Opcode) {
41 MachineInstrBuilder MIB = BuildMI(getMF(), getDL(), getTII().get(Opcode));
42 return MIB;
43 }
44
insertInstr(MachineInstrBuilder MIB)45 MachineInstrBuilder MachineIRBuilder::insertInstr(MachineInstrBuilder MIB) {
46 getMBB().insert(getInsertPt(), MIB);
47 recordInsertion(MIB);
48 return MIB;
49 }
50
51 MachineInstrBuilder
buildDirectDbgValue(Register Reg,const MDNode * Variable,const MDNode * Expr)52 MachineIRBuilder::buildDirectDbgValue(Register Reg, const MDNode *Variable,
53 const MDNode *Expr) {
54 assert(isa<DILocalVariable>(Variable) && "not a variable");
55 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
56 assert(
57 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
58 "Expected inlined-at fields to agree");
59 return insertInstr(BuildMI(getMF(), getDL(),
60 getTII().get(TargetOpcode::DBG_VALUE),
61 /*IsIndirect*/ false, Reg, Variable, Expr));
62 }
63
64 MachineInstrBuilder
buildIndirectDbgValue(Register Reg,const MDNode * Variable,const MDNode * Expr)65 MachineIRBuilder::buildIndirectDbgValue(Register Reg, const MDNode *Variable,
66 const MDNode *Expr) {
67 assert(isa<DILocalVariable>(Variable) && "not a variable");
68 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
69 assert(
70 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
71 "Expected inlined-at fields to agree");
72 return insertInstr(BuildMI(getMF(), getDL(),
73 getTII().get(TargetOpcode::DBG_VALUE),
74 /*IsIndirect*/ true, Reg, Variable, Expr));
75 }
76
buildFIDbgValue(int FI,const MDNode * Variable,const MDNode * Expr)77 MachineInstrBuilder MachineIRBuilder::buildFIDbgValue(int FI,
78 const MDNode *Variable,
79 const MDNode *Expr) {
80 assert(isa<DILocalVariable>(Variable) && "not a variable");
81 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
82 assert(
83 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
84 "Expected inlined-at fields to agree");
85 return buildInstr(TargetOpcode::DBG_VALUE)
86 .addFrameIndex(FI)
87 .addImm(0)
88 .addMetadata(Variable)
89 .addMetadata(Expr);
90 }
91
buildConstDbgValue(const Constant & C,const MDNode * Variable,const MDNode * Expr)92 MachineInstrBuilder MachineIRBuilder::buildConstDbgValue(const Constant &C,
93 const MDNode *Variable,
94 const MDNode *Expr) {
95 assert(isa<DILocalVariable>(Variable) && "not a variable");
96 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
97 assert(
98 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
99 "Expected inlined-at fields to agree");
100 auto MIB = buildInstrNoInsert(TargetOpcode::DBG_VALUE);
101 if (auto *CI = dyn_cast<ConstantInt>(&C)) {
102 if (CI->getBitWidth() > 64)
103 MIB.addCImm(CI);
104 else
105 MIB.addImm(CI->getZExtValue());
106 } else if (auto *CFP = dyn_cast<ConstantFP>(&C)) {
107 MIB.addFPImm(CFP);
108 } else {
109 // Insert $noreg if we didn't find a usable constant and had to drop it.
110 MIB.addReg(Register());
111 }
112
113 MIB.addImm(0).addMetadata(Variable).addMetadata(Expr);
114 return insertInstr(MIB);
115 }
116
buildDbgLabel(const MDNode * Label)117 MachineInstrBuilder MachineIRBuilder::buildDbgLabel(const MDNode *Label) {
118 assert(isa<DILabel>(Label) && "not a label");
119 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(State.DL) &&
120 "Expected inlined-at fields to agree");
121 auto MIB = buildInstr(TargetOpcode::DBG_LABEL);
122
123 return MIB.addMetadata(Label);
124 }
125
buildDynStackAlloc(const DstOp & Res,const SrcOp & Size,Align Alignment)126 MachineInstrBuilder MachineIRBuilder::buildDynStackAlloc(const DstOp &Res,
127 const SrcOp &Size,
128 Align Alignment) {
129 assert(Res.getLLTTy(*getMRI()).isPointer() && "expected ptr dst type");
130 auto MIB = buildInstr(TargetOpcode::G_DYN_STACKALLOC);
131 Res.addDefToMIB(*getMRI(), MIB);
132 Size.addSrcToMIB(MIB);
133 MIB.addImm(Alignment.value());
134 return MIB;
135 }
136
buildFrameIndex(const DstOp & Res,int Idx)137 MachineInstrBuilder MachineIRBuilder::buildFrameIndex(const DstOp &Res,
138 int Idx) {
139 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
140 auto MIB = buildInstr(TargetOpcode::G_FRAME_INDEX);
141 Res.addDefToMIB(*getMRI(), MIB);
142 MIB.addFrameIndex(Idx);
143 return MIB;
144 }
145
buildGlobalValue(const DstOp & Res,const GlobalValue * GV)146 MachineInstrBuilder MachineIRBuilder::buildGlobalValue(const DstOp &Res,
147 const GlobalValue *GV) {
148 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
149 assert(Res.getLLTTy(*getMRI()).getAddressSpace() ==
150 GV->getType()->getAddressSpace() &&
151 "address space mismatch");
152
153 auto MIB = buildInstr(TargetOpcode::G_GLOBAL_VALUE);
154 Res.addDefToMIB(*getMRI(), MIB);
155 MIB.addGlobalAddress(GV);
156 return MIB;
157 }
158
buildJumpTable(const LLT PtrTy,unsigned JTI)159 MachineInstrBuilder MachineIRBuilder::buildJumpTable(const LLT PtrTy,
160 unsigned JTI) {
161 return buildInstr(TargetOpcode::G_JUMP_TABLE, {PtrTy}, {})
162 .addJumpTableIndex(JTI);
163 }
164
validateUnaryOp(const LLT Res,const LLT Op0)165 void MachineIRBuilder::validateUnaryOp(const LLT Res, const LLT Op0) {
166 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
167 assert((Res == Op0) && "type mismatch");
168 }
169
validateBinaryOp(const LLT Res,const LLT Op0,const LLT Op1)170 void MachineIRBuilder::validateBinaryOp(const LLT Res, const LLT Op0,
171 const LLT Op1) {
172 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
173 assert((Res == Op0 && Res == Op1) && "type mismatch");
174 }
175
validateShiftOp(const LLT Res,const LLT Op0,const LLT Op1)176 void MachineIRBuilder::validateShiftOp(const LLT Res, const LLT Op0,
177 const LLT Op1) {
178 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
179 assert((Res == Op0) && "type mismatch");
180 }
181
buildPtrAdd(const DstOp & Res,const SrcOp & Op0,const SrcOp & Op1)182 MachineInstrBuilder MachineIRBuilder::buildPtrAdd(const DstOp &Res,
183 const SrcOp &Op0,
184 const SrcOp &Op1) {
185 assert(Res.getLLTTy(*getMRI()).getScalarType().isPointer() &&
186 Res.getLLTTy(*getMRI()) == Op0.getLLTTy(*getMRI()) && "type mismatch");
187 assert(Op1.getLLTTy(*getMRI()).getScalarType().isScalar() && "invalid offset type");
188
189 return buildInstr(TargetOpcode::G_PTR_ADD, {Res}, {Op0, Op1});
190 }
191
192 Optional<MachineInstrBuilder>
materializePtrAdd(Register & Res,Register Op0,const LLT ValueTy,uint64_t Value)193 MachineIRBuilder::materializePtrAdd(Register &Res, Register Op0,
194 const LLT ValueTy, uint64_t Value) {
195 assert(Res == 0 && "Res is a result argument");
196 assert(ValueTy.isScalar() && "invalid offset type");
197
198 if (Value == 0) {
199 Res = Op0;
200 return None;
201 }
202
203 Res = getMRI()->createGenericVirtualRegister(getMRI()->getType(Op0));
204 auto Cst = buildConstant(ValueTy, Value);
205 return buildPtrAdd(Res, Op0, Cst.getReg(0));
206 }
207
buildMaskLowPtrBits(const DstOp & Res,const SrcOp & Op0,uint32_t NumBits)208 MachineInstrBuilder MachineIRBuilder::buildMaskLowPtrBits(const DstOp &Res,
209 const SrcOp &Op0,
210 uint32_t NumBits) {
211 LLT PtrTy = Res.getLLTTy(*getMRI());
212 LLT MaskTy = LLT::scalar(PtrTy.getSizeInBits());
213 Register MaskReg = getMRI()->createGenericVirtualRegister(MaskTy);
214 buildConstant(MaskReg, maskTrailingZeros<uint64_t>(NumBits));
215 return buildPtrMask(Res, Op0, MaskReg);
216 }
217
buildBr(MachineBasicBlock & Dest)218 MachineInstrBuilder MachineIRBuilder::buildBr(MachineBasicBlock &Dest) {
219 return buildInstr(TargetOpcode::G_BR).addMBB(&Dest);
220 }
221
buildBrIndirect(Register Tgt)222 MachineInstrBuilder MachineIRBuilder::buildBrIndirect(Register Tgt) {
223 assert(getMRI()->getType(Tgt).isPointer() && "invalid branch destination");
224 return buildInstr(TargetOpcode::G_BRINDIRECT).addUse(Tgt);
225 }
226
buildBrJT(Register TablePtr,unsigned JTI,Register IndexReg)227 MachineInstrBuilder MachineIRBuilder::buildBrJT(Register TablePtr,
228 unsigned JTI,
229 Register IndexReg) {
230 assert(getMRI()->getType(TablePtr).isPointer() &&
231 "Table reg must be a pointer");
232 return buildInstr(TargetOpcode::G_BRJT)
233 .addUse(TablePtr)
234 .addJumpTableIndex(JTI)
235 .addUse(IndexReg);
236 }
237
buildCopy(const DstOp & Res,const SrcOp & Op)238 MachineInstrBuilder MachineIRBuilder::buildCopy(const DstOp &Res,
239 const SrcOp &Op) {
240 return buildInstr(TargetOpcode::COPY, Res, Op);
241 }
242
buildAssertSExt(const DstOp & Res,const SrcOp & Op,unsigned Size)243 MachineInstrBuilder MachineIRBuilder::buildAssertSExt(const DstOp &Res,
244 const SrcOp &Op,
245 unsigned Size) {
246 return buildInstr(TargetOpcode::G_ASSERT_SEXT, Res, Op).addImm(Size);
247 }
248
buildAssertZExt(const DstOp & Res,const SrcOp & Op,unsigned Size)249 MachineInstrBuilder MachineIRBuilder::buildAssertZExt(const DstOp &Res,
250 const SrcOp &Op,
251 unsigned Size) {
252 return buildInstr(TargetOpcode::G_ASSERT_ZEXT, Res, Op).addImm(Size);
253 }
254
buildConstant(const DstOp & Res,const ConstantInt & Val)255 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
256 const ConstantInt &Val) {
257 LLT Ty = Res.getLLTTy(*getMRI());
258 LLT EltTy = Ty.getScalarType();
259 assert(EltTy.getScalarSizeInBits() == Val.getBitWidth() &&
260 "creating constant with the wrong size");
261
262 if (Ty.isVector()) {
263 auto Const = buildInstr(TargetOpcode::G_CONSTANT)
264 .addDef(getMRI()->createGenericVirtualRegister(EltTy))
265 .addCImm(&Val);
266 return buildSplatVector(Res, Const);
267 }
268
269 auto Const = buildInstr(TargetOpcode::G_CONSTANT);
270 Const->setDebugLoc(DebugLoc());
271 Res.addDefToMIB(*getMRI(), Const);
272 Const.addCImm(&Val);
273 return Const;
274 }
275
buildConstant(const DstOp & Res,int64_t Val)276 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
277 int64_t Val) {
278 auto IntN = IntegerType::get(getMF().getFunction().getContext(),
279 Res.getLLTTy(*getMRI()).getScalarSizeInBits());
280 ConstantInt *CI = ConstantInt::get(IntN, Val, true);
281 return buildConstant(Res, *CI);
282 }
283
buildFConstant(const DstOp & Res,const ConstantFP & Val)284 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
285 const ConstantFP &Val) {
286 LLT Ty = Res.getLLTTy(*getMRI());
287 LLT EltTy = Ty.getScalarType();
288
289 assert(APFloat::getSizeInBits(Val.getValueAPF().getSemantics())
290 == EltTy.getSizeInBits() &&
291 "creating fconstant with the wrong size");
292
293 assert(!Ty.isPointer() && "invalid operand type");
294
295 if (Ty.isVector()) {
296 auto Const = buildInstr(TargetOpcode::G_FCONSTANT)
297 .addDef(getMRI()->createGenericVirtualRegister(EltTy))
298 .addFPImm(&Val);
299
300 return buildSplatVector(Res, Const);
301 }
302
303 auto Const = buildInstr(TargetOpcode::G_FCONSTANT);
304 Const->setDebugLoc(DebugLoc());
305 Res.addDefToMIB(*getMRI(), Const);
306 Const.addFPImm(&Val);
307 return Const;
308 }
309
buildConstant(const DstOp & Res,const APInt & Val)310 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
311 const APInt &Val) {
312 ConstantInt *CI = ConstantInt::get(getMF().getFunction().getContext(), Val);
313 return buildConstant(Res, *CI);
314 }
315
buildFConstant(const DstOp & Res,double Val)316 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
317 double Val) {
318 LLT DstTy = Res.getLLTTy(*getMRI());
319 auto &Ctx = getMF().getFunction().getContext();
320 auto *CFP =
321 ConstantFP::get(Ctx, getAPFloatFromSize(Val, DstTy.getScalarSizeInBits()));
322 return buildFConstant(Res, *CFP);
323 }
324
buildFConstant(const DstOp & Res,const APFloat & Val)325 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
326 const APFloat &Val) {
327 auto &Ctx = getMF().getFunction().getContext();
328 auto *CFP = ConstantFP::get(Ctx, Val);
329 return buildFConstant(Res, *CFP);
330 }
331
buildBrCond(const SrcOp & Tst,MachineBasicBlock & Dest)332 MachineInstrBuilder MachineIRBuilder::buildBrCond(const SrcOp &Tst,
333 MachineBasicBlock &Dest) {
334 assert(Tst.getLLTTy(*getMRI()).isScalar() && "invalid operand type");
335
336 auto MIB = buildInstr(TargetOpcode::G_BRCOND);
337 Tst.addSrcToMIB(MIB);
338 MIB.addMBB(&Dest);
339 return MIB;
340 }
341
342 MachineInstrBuilder
buildLoad(const DstOp & Dst,const SrcOp & Addr,MachinePointerInfo PtrInfo,Align Alignment,MachineMemOperand::Flags MMOFlags,const AAMDNodes & AAInfo)343 MachineIRBuilder::buildLoad(const DstOp &Dst, const SrcOp &Addr,
344 MachinePointerInfo PtrInfo, Align Alignment,
345 MachineMemOperand::Flags MMOFlags,
346 const AAMDNodes &AAInfo) {
347 MMOFlags |= MachineMemOperand::MOLoad;
348 assert((MMOFlags & MachineMemOperand::MOStore) == 0);
349
350 LLT Ty = Dst.getLLTTy(*getMRI());
351 MachineMemOperand *MMO =
352 getMF().getMachineMemOperand(PtrInfo, MMOFlags, Ty, Alignment, AAInfo);
353 return buildLoad(Dst, Addr, *MMO);
354 }
355
buildLoadInstr(unsigned Opcode,const DstOp & Res,const SrcOp & Addr,MachineMemOperand & MMO)356 MachineInstrBuilder MachineIRBuilder::buildLoadInstr(unsigned Opcode,
357 const DstOp &Res,
358 const SrcOp &Addr,
359 MachineMemOperand &MMO) {
360 assert(Res.getLLTTy(*getMRI()).isValid() && "invalid operand type");
361 assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
362
363 auto MIB = buildInstr(Opcode);
364 Res.addDefToMIB(*getMRI(), MIB);
365 Addr.addSrcToMIB(MIB);
366 MIB.addMemOperand(&MMO);
367 return MIB;
368 }
369
buildLoadFromOffset(const DstOp & Dst,const SrcOp & BasePtr,MachineMemOperand & BaseMMO,int64_t Offset)370 MachineInstrBuilder MachineIRBuilder::buildLoadFromOffset(
371 const DstOp &Dst, const SrcOp &BasePtr,
372 MachineMemOperand &BaseMMO, int64_t Offset) {
373 LLT LoadTy = Dst.getLLTTy(*getMRI());
374 MachineMemOperand *OffsetMMO =
375 getMF().getMachineMemOperand(&BaseMMO, Offset, LoadTy);
376
377 if (Offset == 0) // This may be a size or type changing load.
378 return buildLoad(Dst, BasePtr, *OffsetMMO);
379
380 LLT PtrTy = BasePtr.getLLTTy(*getMRI());
381 LLT OffsetTy = LLT::scalar(PtrTy.getSizeInBits());
382 auto ConstOffset = buildConstant(OffsetTy, Offset);
383 auto Ptr = buildPtrAdd(PtrTy, BasePtr, ConstOffset);
384 return buildLoad(Dst, Ptr, *OffsetMMO);
385 }
386
buildStore(const SrcOp & Val,const SrcOp & Addr,MachineMemOperand & MMO)387 MachineInstrBuilder MachineIRBuilder::buildStore(const SrcOp &Val,
388 const SrcOp &Addr,
389 MachineMemOperand &MMO) {
390 assert(Val.getLLTTy(*getMRI()).isValid() && "invalid operand type");
391 assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
392
393 auto MIB = buildInstr(TargetOpcode::G_STORE);
394 Val.addSrcToMIB(MIB);
395 Addr.addSrcToMIB(MIB);
396 MIB.addMemOperand(&MMO);
397 return MIB;
398 }
399
400 MachineInstrBuilder
buildStore(const SrcOp & Val,const SrcOp & Addr,MachinePointerInfo PtrInfo,Align Alignment,MachineMemOperand::Flags MMOFlags,const AAMDNodes & AAInfo)401 MachineIRBuilder::buildStore(const SrcOp &Val, const SrcOp &Addr,
402 MachinePointerInfo PtrInfo, Align Alignment,
403 MachineMemOperand::Flags MMOFlags,
404 const AAMDNodes &AAInfo) {
405 MMOFlags |= MachineMemOperand::MOStore;
406 assert((MMOFlags & MachineMemOperand::MOLoad) == 0);
407
408 LLT Ty = Val.getLLTTy(*getMRI());
409 MachineMemOperand *MMO =
410 getMF().getMachineMemOperand(PtrInfo, MMOFlags, Ty, Alignment, AAInfo);
411 return buildStore(Val, Addr, *MMO);
412 }
413
buildAnyExt(const DstOp & Res,const SrcOp & Op)414 MachineInstrBuilder MachineIRBuilder::buildAnyExt(const DstOp &Res,
415 const SrcOp &Op) {
416 return buildInstr(TargetOpcode::G_ANYEXT, Res, Op);
417 }
418
buildSExt(const DstOp & Res,const SrcOp & Op)419 MachineInstrBuilder MachineIRBuilder::buildSExt(const DstOp &Res,
420 const SrcOp &Op) {
421 return buildInstr(TargetOpcode::G_SEXT, Res, Op);
422 }
423
buildZExt(const DstOp & Res,const SrcOp & Op)424 MachineInstrBuilder MachineIRBuilder::buildZExt(const DstOp &Res,
425 const SrcOp &Op) {
426 return buildInstr(TargetOpcode::G_ZEXT, Res, Op);
427 }
428
getBoolExtOp(bool IsVec,bool IsFP) const429 unsigned MachineIRBuilder::getBoolExtOp(bool IsVec, bool IsFP) const {
430 const auto *TLI = getMF().getSubtarget().getTargetLowering();
431 switch (TLI->getBooleanContents(IsVec, IsFP)) {
432 case TargetLoweringBase::ZeroOrNegativeOneBooleanContent:
433 return TargetOpcode::G_SEXT;
434 case TargetLoweringBase::ZeroOrOneBooleanContent:
435 return TargetOpcode::G_ZEXT;
436 default:
437 return TargetOpcode::G_ANYEXT;
438 }
439 }
440
buildBoolExt(const DstOp & Res,const SrcOp & Op,bool IsFP)441 MachineInstrBuilder MachineIRBuilder::buildBoolExt(const DstOp &Res,
442 const SrcOp &Op,
443 bool IsFP) {
444 unsigned ExtOp = getBoolExtOp(getMRI()->getType(Op.getReg()).isVector(), IsFP);
445 return buildInstr(ExtOp, Res, Op);
446 }
447
buildExtOrTrunc(unsigned ExtOpc,const DstOp & Res,const SrcOp & Op)448 MachineInstrBuilder MachineIRBuilder::buildExtOrTrunc(unsigned ExtOpc,
449 const DstOp &Res,
450 const SrcOp &Op) {
451 assert((TargetOpcode::G_ANYEXT == ExtOpc || TargetOpcode::G_ZEXT == ExtOpc ||
452 TargetOpcode::G_SEXT == ExtOpc) &&
453 "Expecting Extending Opc");
454 assert(Res.getLLTTy(*getMRI()).isScalar() ||
455 Res.getLLTTy(*getMRI()).isVector());
456 assert(Res.getLLTTy(*getMRI()).isScalar() ==
457 Op.getLLTTy(*getMRI()).isScalar());
458
459 unsigned Opcode = TargetOpcode::COPY;
460 if (Res.getLLTTy(*getMRI()).getSizeInBits() >
461 Op.getLLTTy(*getMRI()).getSizeInBits())
462 Opcode = ExtOpc;
463 else if (Res.getLLTTy(*getMRI()).getSizeInBits() <
464 Op.getLLTTy(*getMRI()).getSizeInBits())
465 Opcode = TargetOpcode::G_TRUNC;
466 else
467 assert(Res.getLLTTy(*getMRI()) == Op.getLLTTy(*getMRI()));
468
469 return buildInstr(Opcode, Res, Op);
470 }
471
buildSExtOrTrunc(const DstOp & Res,const SrcOp & Op)472 MachineInstrBuilder MachineIRBuilder::buildSExtOrTrunc(const DstOp &Res,
473 const SrcOp &Op) {
474 return buildExtOrTrunc(TargetOpcode::G_SEXT, Res, Op);
475 }
476
buildZExtOrTrunc(const DstOp & Res,const SrcOp & Op)477 MachineInstrBuilder MachineIRBuilder::buildZExtOrTrunc(const DstOp &Res,
478 const SrcOp &Op) {
479 return buildExtOrTrunc(TargetOpcode::G_ZEXT, Res, Op);
480 }
481
buildAnyExtOrTrunc(const DstOp & Res,const SrcOp & Op)482 MachineInstrBuilder MachineIRBuilder::buildAnyExtOrTrunc(const DstOp &Res,
483 const SrcOp &Op) {
484 return buildExtOrTrunc(TargetOpcode::G_ANYEXT, Res, Op);
485 }
486
buildZExtInReg(const DstOp & Res,const SrcOp & Op,int64_t ImmOp)487 MachineInstrBuilder MachineIRBuilder::buildZExtInReg(const DstOp &Res,
488 const SrcOp &Op,
489 int64_t ImmOp) {
490 LLT ResTy = Res.getLLTTy(*getMRI());
491 auto Mask = buildConstant(
492 ResTy, APInt::getLowBitsSet(ResTy.getScalarSizeInBits(), ImmOp));
493 return buildAnd(Res, Op, Mask);
494 }
495
buildCast(const DstOp & Dst,const SrcOp & Src)496 MachineInstrBuilder MachineIRBuilder::buildCast(const DstOp &Dst,
497 const SrcOp &Src) {
498 LLT SrcTy = Src.getLLTTy(*getMRI());
499 LLT DstTy = Dst.getLLTTy(*getMRI());
500 if (SrcTy == DstTy)
501 return buildCopy(Dst, Src);
502
503 unsigned Opcode;
504 if (SrcTy.isPointer() && DstTy.isScalar())
505 Opcode = TargetOpcode::G_PTRTOINT;
506 else if (DstTy.isPointer() && SrcTy.isScalar())
507 Opcode = TargetOpcode::G_INTTOPTR;
508 else {
509 assert(!SrcTy.isPointer() && !DstTy.isPointer() && "n G_ADDRCAST yet");
510 Opcode = TargetOpcode::G_BITCAST;
511 }
512
513 return buildInstr(Opcode, Dst, Src);
514 }
515
buildExtract(const DstOp & Dst,const SrcOp & Src,uint64_t Index)516 MachineInstrBuilder MachineIRBuilder::buildExtract(const DstOp &Dst,
517 const SrcOp &Src,
518 uint64_t Index) {
519 LLT SrcTy = Src.getLLTTy(*getMRI());
520 LLT DstTy = Dst.getLLTTy(*getMRI());
521
522 #ifndef NDEBUG
523 assert(SrcTy.isValid() && "invalid operand type");
524 assert(DstTy.isValid() && "invalid operand type");
525 assert(Index + DstTy.getSizeInBits() <= SrcTy.getSizeInBits() &&
526 "extracting off end of register");
527 #endif
528
529 if (DstTy.getSizeInBits() == SrcTy.getSizeInBits()) {
530 assert(Index == 0 && "insertion past the end of a register");
531 return buildCast(Dst, Src);
532 }
533
534 auto Extract = buildInstr(TargetOpcode::G_EXTRACT);
535 Dst.addDefToMIB(*getMRI(), Extract);
536 Src.addSrcToMIB(Extract);
537 Extract.addImm(Index);
538 return Extract;
539 }
540
buildSequence(Register Res,ArrayRef<Register> Ops,ArrayRef<uint64_t> Indices)541 void MachineIRBuilder::buildSequence(Register Res, ArrayRef<Register> Ops,
542 ArrayRef<uint64_t> Indices) {
543 #ifndef NDEBUG
544 assert(Ops.size() == Indices.size() && "incompatible args");
545 assert(!Ops.empty() && "invalid trivial sequence");
546 assert(llvm::is_sorted(Indices) &&
547 "sequence offsets must be in ascending order");
548
549 assert(getMRI()->getType(Res).isValid() && "invalid operand type");
550 for (auto Op : Ops)
551 assert(getMRI()->getType(Op).isValid() && "invalid operand type");
552 #endif
553
554 LLT ResTy = getMRI()->getType(Res);
555 LLT OpTy = getMRI()->getType(Ops[0]);
556 unsigned OpSize = OpTy.getSizeInBits();
557 bool MaybeMerge = true;
558 for (unsigned i = 0; i < Ops.size(); ++i) {
559 if (getMRI()->getType(Ops[i]) != OpTy || Indices[i] != i * OpSize) {
560 MaybeMerge = false;
561 break;
562 }
563 }
564
565 if (MaybeMerge && Ops.size() * OpSize == ResTy.getSizeInBits()) {
566 buildMerge(Res, Ops);
567 return;
568 }
569
570 Register ResIn = getMRI()->createGenericVirtualRegister(ResTy);
571 buildUndef(ResIn);
572
573 for (unsigned i = 0; i < Ops.size(); ++i) {
574 Register ResOut = i + 1 == Ops.size()
575 ? Res
576 : getMRI()->createGenericVirtualRegister(ResTy);
577 buildInsert(ResOut, ResIn, Ops[i], Indices[i]);
578 ResIn = ResOut;
579 }
580 }
581
buildUndef(const DstOp & Res)582 MachineInstrBuilder MachineIRBuilder::buildUndef(const DstOp &Res) {
583 return buildInstr(TargetOpcode::G_IMPLICIT_DEF, {Res}, {});
584 }
585
buildMerge(const DstOp & Res,ArrayRef<Register> Ops)586 MachineInstrBuilder MachineIRBuilder::buildMerge(const DstOp &Res,
587 ArrayRef<Register> Ops) {
588 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
589 // we need some temporary storage for the DstOp objects. Here we use a
590 // sufficiently large SmallVector to not go through the heap.
591 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
592 assert(TmpVec.size() > 1);
593 return buildInstr(TargetOpcode::G_MERGE_VALUES, Res, TmpVec);
594 }
595
596 MachineInstrBuilder
buildMerge(const DstOp & Res,std::initializer_list<SrcOp> Ops)597 MachineIRBuilder::buildMerge(const DstOp &Res,
598 std::initializer_list<SrcOp> Ops) {
599 assert(Ops.size() > 1);
600 return buildInstr(TargetOpcode::G_MERGE_VALUES, Res, Ops);
601 }
602
buildUnmerge(ArrayRef<LLT> Res,const SrcOp & Op)603 MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<LLT> Res,
604 const SrcOp &Op) {
605 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<DstOp>,
606 // we need some temporary storage for the DstOp objects. Here we use a
607 // sufficiently large SmallVector to not go through the heap.
608 SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end());
609 assert(TmpVec.size() > 1);
610 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
611 }
612
buildUnmerge(LLT Res,const SrcOp & Op)613 MachineInstrBuilder MachineIRBuilder::buildUnmerge(LLT Res,
614 const SrcOp &Op) {
615 unsigned NumReg = Op.getLLTTy(*getMRI()).getSizeInBits() / Res.getSizeInBits();
616 SmallVector<Register, 8> TmpVec;
617 for (unsigned I = 0; I != NumReg; ++I)
618 TmpVec.push_back(getMRI()->createGenericVirtualRegister(Res));
619 return buildUnmerge(TmpVec, Op);
620 }
621
buildUnmerge(ArrayRef<Register> Res,const SrcOp & Op)622 MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<Register> Res,
623 const SrcOp &Op) {
624 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<DstOp>,
625 // we need some temporary storage for the DstOp objects. Here we use a
626 // sufficiently large SmallVector to not go through the heap.
627 SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end());
628 assert(TmpVec.size() > 1);
629 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
630 }
631
buildBuildVector(const DstOp & Res,ArrayRef<Register> Ops)632 MachineInstrBuilder MachineIRBuilder::buildBuildVector(const DstOp &Res,
633 ArrayRef<Register> Ops) {
634 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
635 // we need some temporary storage for the DstOp objects. Here we use a
636 // sufficiently large SmallVector to not go through the heap.
637 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
638 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
639 }
640
buildSplatVector(const DstOp & Res,const SrcOp & Src)641 MachineInstrBuilder MachineIRBuilder::buildSplatVector(const DstOp &Res,
642 const SrcOp &Src) {
643 SmallVector<SrcOp, 8> TmpVec(Res.getLLTTy(*getMRI()).getNumElements(), Src);
644 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
645 }
646
647 MachineInstrBuilder
buildBuildVectorTrunc(const DstOp & Res,ArrayRef<Register> Ops)648 MachineIRBuilder::buildBuildVectorTrunc(const DstOp &Res,
649 ArrayRef<Register> Ops) {
650 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
651 // we need some temporary storage for the DstOp objects. Here we use a
652 // sufficiently large SmallVector to not go through the heap.
653 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
654 return buildInstr(TargetOpcode::G_BUILD_VECTOR_TRUNC, Res, TmpVec);
655 }
656
buildShuffleSplat(const DstOp & Res,const SrcOp & Src)657 MachineInstrBuilder MachineIRBuilder::buildShuffleSplat(const DstOp &Res,
658 const SrcOp &Src) {
659 LLT DstTy = Res.getLLTTy(*getMRI());
660 assert(Src.getLLTTy(*getMRI()) == DstTy.getElementType() &&
661 "Expected Src to match Dst elt ty");
662 auto UndefVec = buildUndef(DstTy);
663 auto Zero = buildConstant(LLT::scalar(64), 0);
664 auto InsElt = buildInsertVectorElement(DstTy, UndefVec, Src, Zero);
665 SmallVector<int, 16> ZeroMask(DstTy.getNumElements());
666 return buildShuffleVector(DstTy, InsElt, UndefVec, ZeroMask);
667 }
668
buildShuffleVector(const DstOp & Res,const SrcOp & Src1,const SrcOp & Src2,ArrayRef<int> Mask)669 MachineInstrBuilder MachineIRBuilder::buildShuffleVector(const DstOp &Res,
670 const SrcOp &Src1,
671 const SrcOp &Src2,
672 ArrayRef<int> Mask) {
673 LLT DstTy = Res.getLLTTy(*getMRI());
674 LLT Src1Ty = Src1.getLLTTy(*getMRI());
675 LLT Src2Ty = Src2.getLLTTy(*getMRI());
676 assert(Src1Ty.getNumElements() + Src2Ty.getNumElements() >= Mask.size());
677 assert(DstTy.getElementType() == Src1Ty.getElementType() &&
678 DstTy.getElementType() == Src2Ty.getElementType());
679 (void)DstTy;
680 (void)Src1Ty;
681 (void)Src2Ty;
682 ArrayRef<int> MaskAlloc = getMF().allocateShuffleMask(Mask);
683 return buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {Res}, {Src1, Src2})
684 .addShuffleMask(MaskAlloc);
685 }
686
687 MachineInstrBuilder
buildConcatVectors(const DstOp & Res,ArrayRef<Register> Ops)688 MachineIRBuilder::buildConcatVectors(const DstOp &Res, ArrayRef<Register> Ops) {
689 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
690 // we need some temporary storage for the DstOp objects. Here we use a
691 // sufficiently large SmallVector to not go through the heap.
692 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
693 return buildInstr(TargetOpcode::G_CONCAT_VECTORS, Res, TmpVec);
694 }
695
buildInsert(const DstOp & Res,const SrcOp & Src,const SrcOp & Op,unsigned Index)696 MachineInstrBuilder MachineIRBuilder::buildInsert(const DstOp &Res,
697 const SrcOp &Src,
698 const SrcOp &Op,
699 unsigned Index) {
700 assert(Index + Op.getLLTTy(*getMRI()).getSizeInBits() <=
701 Res.getLLTTy(*getMRI()).getSizeInBits() &&
702 "insertion past the end of a register");
703
704 if (Res.getLLTTy(*getMRI()).getSizeInBits() ==
705 Op.getLLTTy(*getMRI()).getSizeInBits()) {
706 return buildCast(Res, Op);
707 }
708
709 return buildInstr(TargetOpcode::G_INSERT, Res, {Src, Op, uint64_t(Index)});
710 }
711
buildIntrinsic(Intrinsic::ID ID,ArrayRef<Register> ResultRegs,bool HasSideEffects)712 MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
713 ArrayRef<Register> ResultRegs,
714 bool HasSideEffects) {
715 auto MIB =
716 buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
717 : TargetOpcode::G_INTRINSIC);
718 for (unsigned ResultReg : ResultRegs)
719 MIB.addDef(ResultReg);
720 MIB.addIntrinsicID(ID);
721 return MIB;
722 }
723
buildIntrinsic(Intrinsic::ID ID,ArrayRef<DstOp> Results,bool HasSideEffects)724 MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
725 ArrayRef<DstOp> Results,
726 bool HasSideEffects) {
727 auto MIB =
728 buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
729 : TargetOpcode::G_INTRINSIC);
730 for (DstOp Result : Results)
731 Result.addDefToMIB(*getMRI(), MIB);
732 MIB.addIntrinsicID(ID);
733 return MIB;
734 }
735
buildTrunc(const DstOp & Res,const SrcOp & Op)736 MachineInstrBuilder MachineIRBuilder::buildTrunc(const DstOp &Res,
737 const SrcOp &Op) {
738 return buildInstr(TargetOpcode::G_TRUNC, Res, Op);
739 }
740
buildFPTrunc(const DstOp & Res,const SrcOp & Op,Optional<unsigned> Flags)741 MachineInstrBuilder MachineIRBuilder::buildFPTrunc(const DstOp &Res,
742 const SrcOp &Op,
743 Optional<unsigned> Flags) {
744 return buildInstr(TargetOpcode::G_FPTRUNC, Res, Op, Flags);
745 }
746
buildICmp(CmpInst::Predicate Pred,const DstOp & Res,const SrcOp & Op0,const SrcOp & Op1)747 MachineInstrBuilder MachineIRBuilder::buildICmp(CmpInst::Predicate Pred,
748 const DstOp &Res,
749 const SrcOp &Op0,
750 const SrcOp &Op1) {
751 return buildInstr(TargetOpcode::G_ICMP, Res, {Pred, Op0, Op1});
752 }
753
buildFCmp(CmpInst::Predicate Pred,const DstOp & Res,const SrcOp & Op0,const SrcOp & Op1,Optional<unsigned> Flags)754 MachineInstrBuilder MachineIRBuilder::buildFCmp(CmpInst::Predicate Pred,
755 const DstOp &Res,
756 const SrcOp &Op0,
757 const SrcOp &Op1,
758 Optional<unsigned> Flags) {
759
760 return buildInstr(TargetOpcode::G_FCMP, Res, {Pred, Op0, Op1}, Flags);
761 }
762
buildSelect(const DstOp & Res,const SrcOp & Tst,const SrcOp & Op0,const SrcOp & Op1,Optional<unsigned> Flags)763 MachineInstrBuilder MachineIRBuilder::buildSelect(const DstOp &Res,
764 const SrcOp &Tst,
765 const SrcOp &Op0,
766 const SrcOp &Op1,
767 Optional<unsigned> Flags) {
768
769 return buildInstr(TargetOpcode::G_SELECT, {Res}, {Tst, Op0, Op1}, Flags);
770 }
771
772 MachineInstrBuilder
buildInsertVectorElement(const DstOp & Res,const SrcOp & Val,const SrcOp & Elt,const SrcOp & Idx)773 MachineIRBuilder::buildInsertVectorElement(const DstOp &Res, const SrcOp &Val,
774 const SrcOp &Elt, const SrcOp &Idx) {
775 return buildInstr(TargetOpcode::G_INSERT_VECTOR_ELT, Res, {Val, Elt, Idx});
776 }
777
778 MachineInstrBuilder
buildExtractVectorElement(const DstOp & Res,const SrcOp & Val,const SrcOp & Idx)779 MachineIRBuilder::buildExtractVectorElement(const DstOp &Res, const SrcOp &Val,
780 const SrcOp &Idx) {
781 return buildInstr(TargetOpcode::G_EXTRACT_VECTOR_ELT, Res, {Val, Idx});
782 }
783
buildAtomicCmpXchgWithSuccess(Register OldValRes,Register SuccessRes,Register Addr,Register CmpVal,Register NewVal,MachineMemOperand & MMO)784 MachineInstrBuilder MachineIRBuilder::buildAtomicCmpXchgWithSuccess(
785 Register OldValRes, Register SuccessRes, Register Addr, Register CmpVal,
786 Register NewVal, MachineMemOperand &MMO) {
787 #ifndef NDEBUG
788 LLT OldValResTy = getMRI()->getType(OldValRes);
789 LLT SuccessResTy = getMRI()->getType(SuccessRes);
790 LLT AddrTy = getMRI()->getType(Addr);
791 LLT CmpValTy = getMRI()->getType(CmpVal);
792 LLT NewValTy = getMRI()->getType(NewVal);
793 assert(OldValResTy.isScalar() && "invalid operand type");
794 assert(SuccessResTy.isScalar() && "invalid operand type");
795 assert(AddrTy.isPointer() && "invalid operand type");
796 assert(CmpValTy.isValid() && "invalid operand type");
797 assert(NewValTy.isValid() && "invalid operand type");
798 assert(OldValResTy == CmpValTy && "type mismatch");
799 assert(OldValResTy == NewValTy && "type mismatch");
800 #endif
801
802 return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS)
803 .addDef(OldValRes)
804 .addDef(SuccessRes)
805 .addUse(Addr)
806 .addUse(CmpVal)
807 .addUse(NewVal)
808 .addMemOperand(&MMO);
809 }
810
811 MachineInstrBuilder
buildAtomicCmpXchg(Register OldValRes,Register Addr,Register CmpVal,Register NewVal,MachineMemOperand & MMO)812 MachineIRBuilder::buildAtomicCmpXchg(Register OldValRes, Register Addr,
813 Register CmpVal, Register NewVal,
814 MachineMemOperand &MMO) {
815 #ifndef NDEBUG
816 LLT OldValResTy = getMRI()->getType(OldValRes);
817 LLT AddrTy = getMRI()->getType(Addr);
818 LLT CmpValTy = getMRI()->getType(CmpVal);
819 LLT NewValTy = getMRI()->getType(NewVal);
820 assert(OldValResTy.isScalar() && "invalid operand type");
821 assert(AddrTy.isPointer() && "invalid operand type");
822 assert(CmpValTy.isValid() && "invalid operand type");
823 assert(NewValTy.isValid() && "invalid operand type");
824 assert(OldValResTy == CmpValTy && "type mismatch");
825 assert(OldValResTy == NewValTy && "type mismatch");
826 #endif
827
828 return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG)
829 .addDef(OldValRes)
830 .addUse(Addr)
831 .addUse(CmpVal)
832 .addUse(NewVal)
833 .addMemOperand(&MMO);
834 }
835
buildAtomicRMW(unsigned Opcode,const DstOp & OldValRes,const SrcOp & Addr,const SrcOp & Val,MachineMemOperand & MMO)836 MachineInstrBuilder MachineIRBuilder::buildAtomicRMW(
837 unsigned Opcode, const DstOp &OldValRes,
838 const SrcOp &Addr, const SrcOp &Val,
839 MachineMemOperand &MMO) {
840
841 #ifndef NDEBUG
842 LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
843 LLT AddrTy = Addr.getLLTTy(*getMRI());
844 LLT ValTy = Val.getLLTTy(*getMRI());
845 assert(OldValResTy.isScalar() && "invalid operand type");
846 assert(AddrTy.isPointer() && "invalid operand type");
847 assert(ValTy.isValid() && "invalid operand type");
848 assert(OldValResTy == ValTy && "type mismatch");
849 assert(MMO.isAtomic() && "not atomic mem operand");
850 #endif
851
852 auto MIB = buildInstr(Opcode);
853 OldValRes.addDefToMIB(*getMRI(), MIB);
854 Addr.addSrcToMIB(MIB);
855 Val.addSrcToMIB(MIB);
856 MIB.addMemOperand(&MMO);
857 return MIB;
858 }
859
860 MachineInstrBuilder
buildAtomicRMWXchg(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)861 MachineIRBuilder::buildAtomicRMWXchg(Register OldValRes, Register Addr,
862 Register Val, MachineMemOperand &MMO) {
863 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XCHG, OldValRes, Addr, Val,
864 MMO);
865 }
866 MachineInstrBuilder
buildAtomicRMWAdd(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)867 MachineIRBuilder::buildAtomicRMWAdd(Register OldValRes, Register Addr,
868 Register Val, MachineMemOperand &MMO) {
869 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_ADD, OldValRes, Addr, Val,
870 MMO);
871 }
872 MachineInstrBuilder
buildAtomicRMWSub(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)873 MachineIRBuilder::buildAtomicRMWSub(Register OldValRes, Register Addr,
874 Register Val, MachineMemOperand &MMO) {
875 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_SUB, OldValRes, Addr, Val,
876 MMO);
877 }
878 MachineInstrBuilder
buildAtomicRMWAnd(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)879 MachineIRBuilder::buildAtomicRMWAnd(Register OldValRes, Register Addr,
880 Register Val, MachineMemOperand &MMO) {
881 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_AND, OldValRes, Addr, Val,
882 MMO);
883 }
884 MachineInstrBuilder
buildAtomicRMWNand(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)885 MachineIRBuilder::buildAtomicRMWNand(Register OldValRes, Register Addr,
886 Register Val, MachineMemOperand &MMO) {
887 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_NAND, OldValRes, Addr, Val,
888 MMO);
889 }
buildAtomicRMWOr(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)890 MachineInstrBuilder MachineIRBuilder::buildAtomicRMWOr(Register OldValRes,
891 Register Addr,
892 Register Val,
893 MachineMemOperand &MMO) {
894 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_OR, OldValRes, Addr, Val,
895 MMO);
896 }
897 MachineInstrBuilder
buildAtomicRMWXor(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)898 MachineIRBuilder::buildAtomicRMWXor(Register OldValRes, Register Addr,
899 Register Val, MachineMemOperand &MMO) {
900 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XOR, OldValRes, Addr, Val,
901 MMO);
902 }
903 MachineInstrBuilder
buildAtomicRMWMax(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)904 MachineIRBuilder::buildAtomicRMWMax(Register OldValRes, Register Addr,
905 Register Val, MachineMemOperand &MMO) {
906 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MAX, OldValRes, Addr, Val,
907 MMO);
908 }
909 MachineInstrBuilder
buildAtomicRMWMin(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)910 MachineIRBuilder::buildAtomicRMWMin(Register OldValRes, Register Addr,
911 Register Val, MachineMemOperand &MMO) {
912 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MIN, OldValRes, Addr, Val,
913 MMO);
914 }
915 MachineInstrBuilder
buildAtomicRMWUmax(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)916 MachineIRBuilder::buildAtomicRMWUmax(Register OldValRes, Register Addr,
917 Register Val, MachineMemOperand &MMO) {
918 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMAX, OldValRes, Addr, Val,
919 MMO);
920 }
921 MachineInstrBuilder
buildAtomicRMWUmin(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)922 MachineIRBuilder::buildAtomicRMWUmin(Register OldValRes, Register Addr,
923 Register Val, MachineMemOperand &MMO) {
924 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMIN, OldValRes, Addr, Val,
925 MMO);
926 }
927
928 MachineInstrBuilder
buildAtomicRMWFAdd(const DstOp & OldValRes,const SrcOp & Addr,const SrcOp & Val,MachineMemOperand & MMO)929 MachineIRBuilder::buildAtomicRMWFAdd(
930 const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
931 MachineMemOperand &MMO) {
932 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FADD, OldValRes, Addr, Val,
933 MMO);
934 }
935
936 MachineInstrBuilder
buildAtomicRMWFSub(const DstOp & OldValRes,const SrcOp & Addr,const SrcOp & Val,MachineMemOperand & MMO)937 MachineIRBuilder::buildAtomicRMWFSub(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
938 MachineMemOperand &MMO) {
939 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FSUB, OldValRes, Addr, Val,
940 MMO);
941 }
942
943 MachineInstrBuilder
buildFence(unsigned Ordering,unsigned Scope)944 MachineIRBuilder::buildFence(unsigned Ordering, unsigned Scope) {
945 return buildInstr(TargetOpcode::G_FENCE)
946 .addImm(Ordering)
947 .addImm(Scope);
948 }
949
950 MachineInstrBuilder
buildBlockAddress(Register Res,const BlockAddress * BA)951 MachineIRBuilder::buildBlockAddress(Register Res, const BlockAddress *BA) {
952 #ifndef NDEBUG
953 assert(getMRI()->getType(Res).isPointer() && "invalid res type");
954 #endif
955
956 return buildInstr(TargetOpcode::G_BLOCK_ADDR).addDef(Res).addBlockAddress(BA);
957 }
958
validateTruncExt(const LLT DstTy,const LLT SrcTy,bool IsExtend)959 void MachineIRBuilder::validateTruncExt(const LLT DstTy, const LLT SrcTy,
960 bool IsExtend) {
961 #ifndef NDEBUG
962 if (DstTy.isVector()) {
963 assert(SrcTy.isVector() && "mismatched cast between vector and non-vector");
964 assert(SrcTy.getNumElements() == DstTy.getNumElements() &&
965 "different number of elements in a trunc/ext");
966 } else
967 assert(DstTy.isScalar() && SrcTy.isScalar() && "invalid extend/trunc");
968
969 if (IsExtend)
970 assert(DstTy.getSizeInBits() > SrcTy.getSizeInBits() &&
971 "invalid narrowing extend");
972 else
973 assert(DstTy.getSizeInBits() < SrcTy.getSizeInBits() &&
974 "invalid widening trunc");
975 #endif
976 }
977
validateSelectOp(const LLT ResTy,const LLT TstTy,const LLT Op0Ty,const LLT Op1Ty)978 void MachineIRBuilder::validateSelectOp(const LLT ResTy, const LLT TstTy,
979 const LLT Op0Ty, const LLT Op1Ty) {
980 #ifndef NDEBUG
981 assert((ResTy.isScalar() || ResTy.isVector() || ResTy.isPointer()) &&
982 "invalid operand type");
983 assert((ResTy == Op0Ty && ResTy == Op1Ty) && "type mismatch");
984 if (ResTy.isScalar() || ResTy.isPointer())
985 assert(TstTy.isScalar() && "type mismatch");
986 else
987 assert((TstTy.isScalar() ||
988 (TstTy.isVector() &&
989 TstTy.getNumElements() == Op0Ty.getNumElements())) &&
990 "type mismatch");
991 #endif
992 }
993
buildInstr(unsigned Opc,ArrayRef<DstOp> DstOps,ArrayRef<SrcOp> SrcOps,Optional<unsigned> Flags)994 MachineInstrBuilder MachineIRBuilder::buildInstr(unsigned Opc,
995 ArrayRef<DstOp> DstOps,
996 ArrayRef<SrcOp> SrcOps,
997 Optional<unsigned> Flags) {
998 switch (Opc) {
999 default:
1000 break;
1001 case TargetOpcode::G_SELECT: {
1002 assert(DstOps.size() == 1 && "Invalid select");
1003 assert(SrcOps.size() == 3 && "Invalid select");
1004 validateSelectOp(
1005 DstOps[0].getLLTTy(*getMRI()), SrcOps[0].getLLTTy(*getMRI()),
1006 SrcOps[1].getLLTTy(*getMRI()), SrcOps[2].getLLTTy(*getMRI()));
1007 break;
1008 }
1009 case TargetOpcode::G_FNEG:
1010 case TargetOpcode::G_ABS:
1011 // All these are unary ops.
1012 assert(DstOps.size() == 1 && "Invalid Dst");
1013 assert(SrcOps.size() == 1 && "Invalid Srcs");
1014 validateUnaryOp(DstOps[0].getLLTTy(*getMRI()),
1015 SrcOps[0].getLLTTy(*getMRI()));
1016 break;
1017 case TargetOpcode::G_ADD:
1018 case TargetOpcode::G_AND:
1019 case TargetOpcode::G_MUL:
1020 case TargetOpcode::G_OR:
1021 case TargetOpcode::G_SUB:
1022 case TargetOpcode::G_XOR:
1023 case TargetOpcode::G_UDIV:
1024 case TargetOpcode::G_SDIV:
1025 case TargetOpcode::G_UREM:
1026 case TargetOpcode::G_SREM:
1027 case TargetOpcode::G_SMIN:
1028 case TargetOpcode::G_SMAX:
1029 case TargetOpcode::G_UMIN:
1030 case TargetOpcode::G_UMAX:
1031 case TargetOpcode::G_UADDSAT:
1032 case TargetOpcode::G_SADDSAT:
1033 case TargetOpcode::G_USUBSAT:
1034 case TargetOpcode::G_SSUBSAT: {
1035 // All these are binary ops.
1036 assert(DstOps.size() == 1 && "Invalid Dst");
1037 assert(SrcOps.size() == 2 && "Invalid Srcs");
1038 validateBinaryOp(DstOps[0].getLLTTy(*getMRI()),
1039 SrcOps[0].getLLTTy(*getMRI()),
1040 SrcOps[1].getLLTTy(*getMRI()));
1041 break;
1042 }
1043 case TargetOpcode::G_SHL:
1044 case TargetOpcode::G_ASHR:
1045 case TargetOpcode::G_LSHR:
1046 case TargetOpcode::G_USHLSAT:
1047 case TargetOpcode::G_SSHLSAT: {
1048 assert(DstOps.size() == 1 && "Invalid Dst");
1049 assert(SrcOps.size() == 2 && "Invalid Srcs");
1050 validateShiftOp(DstOps[0].getLLTTy(*getMRI()),
1051 SrcOps[0].getLLTTy(*getMRI()),
1052 SrcOps[1].getLLTTy(*getMRI()));
1053 break;
1054 }
1055 case TargetOpcode::G_SEXT:
1056 case TargetOpcode::G_ZEXT:
1057 case TargetOpcode::G_ANYEXT:
1058 assert(DstOps.size() == 1 && "Invalid Dst");
1059 assert(SrcOps.size() == 1 && "Invalid Srcs");
1060 validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
1061 SrcOps[0].getLLTTy(*getMRI()), true);
1062 break;
1063 case TargetOpcode::G_TRUNC:
1064 case TargetOpcode::G_FPTRUNC: {
1065 assert(DstOps.size() == 1 && "Invalid Dst");
1066 assert(SrcOps.size() == 1 && "Invalid Srcs");
1067 validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
1068 SrcOps[0].getLLTTy(*getMRI()), false);
1069 break;
1070 }
1071 case TargetOpcode::G_BITCAST: {
1072 assert(DstOps.size() == 1 && "Invalid Dst");
1073 assert(SrcOps.size() == 1 && "Invalid Srcs");
1074 assert(DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1075 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() && "invalid bitcast");
1076 break;
1077 }
1078 case TargetOpcode::COPY:
1079 assert(DstOps.size() == 1 && "Invalid Dst");
1080 // If the caller wants to add a subreg source it has to be done separately
1081 // so we may not have any SrcOps at this point yet.
1082 break;
1083 case TargetOpcode::G_FCMP:
1084 case TargetOpcode::G_ICMP: {
1085 assert(DstOps.size() == 1 && "Invalid Dst Operands");
1086 assert(SrcOps.size() == 3 && "Invalid Src Operands");
1087 // For F/ICMP, the first src operand is the predicate, followed by
1088 // the two comparands.
1089 assert(SrcOps[0].getSrcOpKind() == SrcOp::SrcType::Ty_Predicate &&
1090 "Expecting predicate");
1091 assert([&]() -> bool {
1092 CmpInst::Predicate Pred = SrcOps[0].getPredicate();
1093 return Opc == TargetOpcode::G_ICMP ? CmpInst::isIntPredicate(Pred)
1094 : CmpInst::isFPPredicate(Pred);
1095 }() && "Invalid predicate");
1096 assert(SrcOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1097 "Type mismatch");
1098 assert([&]() -> bool {
1099 LLT Op0Ty = SrcOps[1].getLLTTy(*getMRI());
1100 LLT DstTy = DstOps[0].getLLTTy(*getMRI());
1101 if (Op0Ty.isScalar() || Op0Ty.isPointer())
1102 return DstTy.isScalar();
1103 else
1104 return DstTy.isVector() &&
1105 DstTy.getNumElements() == Op0Ty.getNumElements();
1106 }() && "Type Mismatch");
1107 break;
1108 }
1109 case TargetOpcode::G_UNMERGE_VALUES: {
1110 assert(!DstOps.empty() && "Invalid trivial sequence");
1111 assert(SrcOps.size() == 1 && "Invalid src for Unmerge");
1112 assert(llvm::all_of(DstOps,
1113 [&, this](const DstOp &Op) {
1114 return Op.getLLTTy(*getMRI()) ==
1115 DstOps[0].getLLTTy(*getMRI());
1116 }) &&
1117 "type mismatch in output list");
1118 assert((TypeSize::ScalarTy)DstOps.size() *
1119 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1120 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1121 "input operands do not cover output register");
1122 break;
1123 }
1124 case TargetOpcode::G_MERGE_VALUES: {
1125 assert(!SrcOps.empty() && "invalid trivial sequence");
1126 assert(DstOps.size() == 1 && "Invalid Dst");
1127 assert(llvm::all_of(SrcOps,
1128 [&, this](const SrcOp &Op) {
1129 return Op.getLLTTy(*getMRI()) ==
1130 SrcOps[0].getLLTTy(*getMRI());
1131 }) &&
1132 "type mismatch in input list");
1133 assert((TypeSize::ScalarTy)SrcOps.size() *
1134 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1135 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1136 "input operands do not cover output register");
1137 if (SrcOps.size() == 1)
1138 return buildCast(DstOps[0], SrcOps[0]);
1139 if (DstOps[0].getLLTTy(*getMRI()).isVector()) {
1140 if (SrcOps[0].getLLTTy(*getMRI()).isVector())
1141 return buildInstr(TargetOpcode::G_CONCAT_VECTORS, DstOps, SrcOps);
1142 return buildInstr(TargetOpcode::G_BUILD_VECTOR, DstOps, SrcOps);
1143 }
1144 break;
1145 }
1146 case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1147 assert(DstOps.size() == 1 && "Invalid Dst size");
1148 assert(SrcOps.size() == 2 && "Invalid Src size");
1149 assert(SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1150 assert((DstOps[0].getLLTTy(*getMRI()).isScalar() ||
1151 DstOps[0].getLLTTy(*getMRI()).isPointer()) &&
1152 "Invalid operand type");
1153 assert(SrcOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand type");
1154 assert(SrcOps[0].getLLTTy(*getMRI()).getElementType() ==
1155 DstOps[0].getLLTTy(*getMRI()) &&
1156 "Type mismatch");
1157 break;
1158 }
1159 case TargetOpcode::G_INSERT_VECTOR_ELT: {
1160 assert(DstOps.size() == 1 && "Invalid dst size");
1161 assert(SrcOps.size() == 3 && "Invalid src size");
1162 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1163 SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1164 assert(DstOps[0].getLLTTy(*getMRI()).getElementType() ==
1165 SrcOps[1].getLLTTy(*getMRI()) &&
1166 "Type mismatch");
1167 assert(SrcOps[2].getLLTTy(*getMRI()).isScalar() && "Invalid index");
1168 assert(DstOps[0].getLLTTy(*getMRI()).getNumElements() ==
1169 SrcOps[0].getLLTTy(*getMRI()).getNumElements() &&
1170 "Type mismatch");
1171 break;
1172 }
1173 case TargetOpcode::G_BUILD_VECTOR: {
1174 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1175 "Must have at least 2 operands");
1176 assert(DstOps.size() == 1 && "Invalid DstOps");
1177 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1178 "Res type must be a vector");
1179 assert(llvm::all_of(SrcOps,
1180 [&, this](const SrcOp &Op) {
1181 return Op.getLLTTy(*getMRI()) ==
1182 SrcOps[0].getLLTTy(*getMRI());
1183 }) &&
1184 "type mismatch in input list");
1185 assert((TypeSize::ScalarTy)SrcOps.size() *
1186 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1187 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1188 "input scalars do not exactly cover the output vector register");
1189 break;
1190 }
1191 case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1192 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1193 "Must have at least 2 operands");
1194 assert(DstOps.size() == 1 && "Invalid DstOps");
1195 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1196 "Res type must be a vector");
1197 assert(llvm::all_of(SrcOps,
1198 [&, this](const SrcOp &Op) {
1199 return Op.getLLTTy(*getMRI()) ==
1200 SrcOps[0].getLLTTy(*getMRI());
1201 }) &&
1202 "type mismatch in input list");
1203 if (SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1204 DstOps[0].getLLTTy(*getMRI()).getElementType().getSizeInBits())
1205 return buildInstr(TargetOpcode::G_BUILD_VECTOR, DstOps, SrcOps);
1206 break;
1207 }
1208 case TargetOpcode::G_CONCAT_VECTORS: {
1209 assert(DstOps.size() == 1 && "Invalid DstOps");
1210 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1211 "Must have at least 2 operands");
1212 assert(llvm::all_of(SrcOps,
1213 [&, this](const SrcOp &Op) {
1214 return (Op.getLLTTy(*getMRI()).isVector() &&
1215 Op.getLLTTy(*getMRI()) ==
1216 SrcOps[0].getLLTTy(*getMRI()));
1217 }) &&
1218 "type mismatch in input list");
1219 assert((TypeSize::ScalarTy)SrcOps.size() *
1220 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1221 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1222 "input vectors do not exactly cover the output vector register");
1223 break;
1224 }
1225 case TargetOpcode::G_UADDE: {
1226 assert(DstOps.size() == 2 && "Invalid no of dst operands");
1227 assert(SrcOps.size() == 3 && "Invalid no of src operands");
1228 assert(DstOps[0].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1229 assert((DstOps[0].getLLTTy(*getMRI()) == SrcOps[0].getLLTTy(*getMRI())) &&
1230 (DstOps[0].getLLTTy(*getMRI()) == SrcOps[1].getLLTTy(*getMRI())) &&
1231 "Invalid operand");
1232 assert(DstOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1233 assert(DstOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1234 "type mismatch");
1235 break;
1236 }
1237 }
1238
1239 auto MIB = buildInstr(Opc);
1240 for (const DstOp &Op : DstOps)
1241 Op.addDefToMIB(*getMRI(), MIB);
1242 for (const SrcOp &Op : SrcOps)
1243 Op.addSrcToMIB(MIB);
1244 if (Flags)
1245 MIB->setFlags(*Flags);
1246 return MIB;
1247 }
1248