1 //===-- llvm/CodeGen/GlobalISel/MachineIRBuilder.cpp - MIBuilder--*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the MachineIRBuidler class.
10 //===----------------------------------------------------------------------===//
11 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
12 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
13
14 #include "llvm/CodeGen/MachineFunction.h"
15 #include "llvm/CodeGen/MachineInstr.h"
16 #include "llvm/CodeGen/MachineInstrBuilder.h"
17 #include "llvm/CodeGen/MachineRegisterInfo.h"
18 #include "llvm/CodeGen/TargetInstrInfo.h"
19 #include "llvm/CodeGen/TargetLowering.h"
20 #include "llvm/CodeGen/TargetOpcodes.h"
21 #include "llvm/CodeGen/TargetSubtargetInfo.h"
22 #include "llvm/IR/DebugInfo.h"
23
24 using namespace llvm;
25
setMF(MachineFunction & MF)26 void MachineIRBuilder::setMF(MachineFunction &MF) {
27 State.MF = &MF;
28 State.MBB = nullptr;
29 State.MRI = &MF.getRegInfo();
30 State.TII = MF.getSubtarget().getInstrInfo();
31 State.DL = DebugLoc();
32 State.II = MachineBasicBlock::iterator();
33 State.Observer = nullptr;
34 }
35
36 //------------------------------------------------------------------------------
37 // Build instruction variants.
38 //------------------------------------------------------------------------------
39
buildInstrNoInsert(unsigned Opcode)40 MachineInstrBuilder MachineIRBuilder::buildInstrNoInsert(unsigned Opcode) {
41 MachineInstrBuilder MIB = BuildMI(getMF(), getDL(), getTII().get(Opcode));
42 return MIB;
43 }
44
insertInstr(MachineInstrBuilder MIB)45 MachineInstrBuilder MachineIRBuilder::insertInstr(MachineInstrBuilder MIB) {
46 getMBB().insert(getInsertPt(), MIB);
47 recordInsertion(MIB);
48 return MIB;
49 }
50
51 MachineInstrBuilder
buildDirectDbgValue(Register Reg,const MDNode * Variable,const MDNode * Expr)52 MachineIRBuilder::buildDirectDbgValue(Register Reg, const MDNode *Variable,
53 const MDNode *Expr) {
54 assert(isa<DILocalVariable>(Variable) && "not a variable");
55 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
56 assert(
57 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
58 "Expected inlined-at fields to agree");
59 return insertInstr(BuildMI(getMF(), getDL(),
60 getTII().get(TargetOpcode::DBG_VALUE),
61 /*IsIndirect*/ false, Reg, Variable, Expr));
62 }
63
64 MachineInstrBuilder
buildIndirectDbgValue(Register Reg,const MDNode * Variable,const MDNode * Expr)65 MachineIRBuilder::buildIndirectDbgValue(Register Reg, const MDNode *Variable,
66 const MDNode *Expr) {
67 assert(isa<DILocalVariable>(Variable) && "not a variable");
68 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
69 assert(
70 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
71 "Expected inlined-at fields to agree");
72 return insertInstr(BuildMI(getMF(), getDL(),
73 getTII().get(TargetOpcode::DBG_VALUE),
74 /*IsIndirect*/ true, Reg, Variable, Expr));
75 }
76
buildFIDbgValue(int FI,const MDNode * Variable,const MDNode * Expr)77 MachineInstrBuilder MachineIRBuilder::buildFIDbgValue(int FI,
78 const MDNode *Variable,
79 const MDNode *Expr) {
80 assert(isa<DILocalVariable>(Variable) && "not a variable");
81 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
82 assert(
83 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
84 "Expected inlined-at fields to agree");
85 return buildInstr(TargetOpcode::DBG_VALUE)
86 .addFrameIndex(FI)
87 .addImm(0)
88 .addMetadata(Variable)
89 .addMetadata(Expr);
90 }
91
buildConstDbgValue(const Constant & C,const MDNode * Variable,const MDNode * Expr)92 MachineInstrBuilder MachineIRBuilder::buildConstDbgValue(const Constant &C,
93 const MDNode *Variable,
94 const MDNode *Expr) {
95 assert(isa<DILocalVariable>(Variable) && "not a variable");
96 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
97 assert(
98 cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(getDL()) &&
99 "Expected inlined-at fields to agree");
100 auto MIB = buildInstrNoInsert(TargetOpcode::DBG_VALUE);
101 if (auto *CI = dyn_cast<ConstantInt>(&C)) {
102 if (CI->getBitWidth() > 64)
103 MIB.addCImm(CI);
104 else
105 MIB.addImm(CI->getZExtValue());
106 } else if (auto *CFP = dyn_cast<ConstantFP>(&C)) {
107 MIB.addFPImm(CFP);
108 } else {
109 // Insert %noreg if we didn't find a usable constant and had to drop it.
110 MIB.addReg(0U);
111 }
112
113 MIB.addImm(0).addMetadata(Variable).addMetadata(Expr);
114 return insertInstr(MIB);
115 }
116
buildDbgLabel(const MDNode * Label)117 MachineInstrBuilder MachineIRBuilder::buildDbgLabel(const MDNode *Label) {
118 assert(isa<DILabel>(Label) && "not a label");
119 assert(cast<DILabel>(Label)->isValidLocationForIntrinsic(State.DL) &&
120 "Expected inlined-at fields to agree");
121 auto MIB = buildInstr(TargetOpcode::DBG_LABEL);
122
123 return MIB.addMetadata(Label);
124 }
125
buildDynStackAlloc(const DstOp & Res,const SrcOp & Size,Align Alignment)126 MachineInstrBuilder MachineIRBuilder::buildDynStackAlloc(const DstOp &Res,
127 const SrcOp &Size,
128 Align Alignment) {
129 assert(Res.getLLTTy(*getMRI()).isPointer() && "expected ptr dst type");
130 auto MIB = buildInstr(TargetOpcode::G_DYN_STACKALLOC);
131 Res.addDefToMIB(*getMRI(), MIB);
132 Size.addSrcToMIB(MIB);
133 MIB.addImm(Alignment.value());
134 return MIB;
135 }
136
buildFrameIndex(const DstOp & Res,int Idx)137 MachineInstrBuilder MachineIRBuilder::buildFrameIndex(const DstOp &Res,
138 int Idx) {
139 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
140 auto MIB = buildInstr(TargetOpcode::G_FRAME_INDEX);
141 Res.addDefToMIB(*getMRI(), MIB);
142 MIB.addFrameIndex(Idx);
143 return MIB;
144 }
145
buildGlobalValue(const DstOp & Res,const GlobalValue * GV)146 MachineInstrBuilder MachineIRBuilder::buildGlobalValue(const DstOp &Res,
147 const GlobalValue *GV) {
148 assert(Res.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
149 assert(Res.getLLTTy(*getMRI()).getAddressSpace() ==
150 GV->getType()->getAddressSpace() &&
151 "address space mismatch");
152
153 auto MIB = buildInstr(TargetOpcode::G_GLOBAL_VALUE);
154 Res.addDefToMIB(*getMRI(), MIB);
155 MIB.addGlobalAddress(GV);
156 return MIB;
157 }
158
buildJumpTable(const LLT PtrTy,unsigned JTI)159 MachineInstrBuilder MachineIRBuilder::buildJumpTable(const LLT PtrTy,
160 unsigned JTI) {
161 return buildInstr(TargetOpcode::G_JUMP_TABLE, {PtrTy}, {})
162 .addJumpTableIndex(JTI);
163 }
164
validateBinaryOp(const LLT Res,const LLT Op0,const LLT Op1)165 void MachineIRBuilder::validateBinaryOp(const LLT Res, const LLT Op0,
166 const LLT Op1) {
167 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
168 assert((Res == Op0 && Res == Op1) && "type mismatch");
169 }
170
validateShiftOp(const LLT Res,const LLT Op0,const LLT Op1)171 void MachineIRBuilder::validateShiftOp(const LLT Res, const LLT Op0,
172 const LLT Op1) {
173 assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
174 assert((Res == Op0) && "type mismatch");
175 }
176
buildPtrAdd(const DstOp & Res,const SrcOp & Op0,const SrcOp & Op1)177 MachineInstrBuilder MachineIRBuilder::buildPtrAdd(const DstOp &Res,
178 const SrcOp &Op0,
179 const SrcOp &Op1) {
180 assert(Res.getLLTTy(*getMRI()).getScalarType().isPointer() &&
181 Res.getLLTTy(*getMRI()) == Op0.getLLTTy(*getMRI()) && "type mismatch");
182 assert(Op1.getLLTTy(*getMRI()).getScalarType().isScalar() && "invalid offset type");
183
184 return buildInstr(TargetOpcode::G_PTR_ADD, {Res}, {Op0, Op1});
185 }
186
187 Optional<MachineInstrBuilder>
materializePtrAdd(Register & Res,Register Op0,const LLT ValueTy,uint64_t Value)188 MachineIRBuilder::materializePtrAdd(Register &Res, Register Op0,
189 const LLT ValueTy, uint64_t Value) {
190 assert(Res == 0 && "Res is a result argument");
191 assert(ValueTy.isScalar() && "invalid offset type");
192
193 if (Value == 0) {
194 Res = Op0;
195 return None;
196 }
197
198 Res = getMRI()->createGenericVirtualRegister(getMRI()->getType(Op0));
199 auto Cst = buildConstant(ValueTy, Value);
200 return buildPtrAdd(Res, Op0, Cst.getReg(0));
201 }
202
buildMaskLowPtrBits(const DstOp & Res,const SrcOp & Op0,uint32_t NumBits)203 MachineInstrBuilder MachineIRBuilder::buildMaskLowPtrBits(const DstOp &Res,
204 const SrcOp &Op0,
205 uint32_t NumBits) {
206 LLT PtrTy = Res.getLLTTy(*getMRI());
207 LLT MaskTy = LLT::scalar(PtrTy.getSizeInBits());
208 Register MaskReg = getMRI()->createGenericVirtualRegister(MaskTy);
209 buildConstant(MaskReg, maskTrailingZeros<uint64_t>(NumBits));
210 return buildPtrMask(Res, Op0, MaskReg);
211 }
212
buildBr(MachineBasicBlock & Dest)213 MachineInstrBuilder MachineIRBuilder::buildBr(MachineBasicBlock &Dest) {
214 return buildInstr(TargetOpcode::G_BR).addMBB(&Dest);
215 }
216
buildBrIndirect(Register Tgt)217 MachineInstrBuilder MachineIRBuilder::buildBrIndirect(Register Tgt) {
218 assert(getMRI()->getType(Tgt).isPointer() && "invalid branch destination");
219 return buildInstr(TargetOpcode::G_BRINDIRECT).addUse(Tgt);
220 }
221
buildBrJT(Register TablePtr,unsigned JTI,Register IndexReg)222 MachineInstrBuilder MachineIRBuilder::buildBrJT(Register TablePtr,
223 unsigned JTI,
224 Register IndexReg) {
225 assert(getMRI()->getType(TablePtr).isPointer() &&
226 "Table reg must be a pointer");
227 return buildInstr(TargetOpcode::G_BRJT)
228 .addUse(TablePtr)
229 .addJumpTableIndex(JTI)
230 .addUse(IndexReg);
231 }
232
buildCopy(const DstOp & Res,const SrcOp & Op)233 MachineInstrBuilder MachineIRBuilder::buildCopy(const DstOp &Res,
234 const SrcOp &Op) {
235 return buildInstr(TargetOpcode::COPY, Res, Op);
236 }
237
buildConstant(const DstOp & Res,const ConstantInt & Val)238 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
239 const ConstantInt &Val) {
240 LLT Ty = Res.getLLTTy(*getMRI());
241 LLT EltTy = Ty.getScalarType();
242 assert(EltTy.getScalarSizeInBits() == Val.getBitWidth() &&
243 "creating constant with the wrong size");
244
245 if (Ty.isVector()) {
246 auto Const = buildInstr(TargetOpcode::G_CONSTANT)
247 .addDef(getMRI()->createGenericVirtualRegister(EltTy))
248 .addCImm(&Val);
249 return buildSplatVector(Res, Const);
250 }
251
252 auto Const = buildInstr(TargetOpcode::G_CONSTANT);
253 Const->setDebugLoc(DebugLoc());
254 Res.addDefToMIB(*getMRI(), Const);
255 Const.addCImm(&Val);
256 return Const;
257 }
258
buildConstant(const DstOp & Res,int64_t Val)259 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
260 int64_t Val) {
261 auto IntN = IntegerType::get(getMF().getFunction().getContext(),
262 Res.getLLTTy(*getMRI()).getScalarSizeInBits());
263 ConstantInt *CI = ConstantInt::get(IntN, Val, true);
264 return buildConstant(Res, *CI);
265 }
266
buildFConstant(const DstOp & Res,const ConstantFP & Val)267 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
268 const ConstantFP &Val) {
269 LLT Ty = Res.getLLTTy(*getMRI());
270 LLT EltTy = Ty.getScalarType();
271
272 assert(APFloat::getSizeInBits(Val.getValueAPF().getSemantics())
273 == EltTy.getSizeInBits() &&
274 "creating fconstant with the wrong size");
275
276 assert(!Ty.isPointer() && "invalid operand type");
277
278 if (Ty.isVector()) {
279 auto Const = buildInstr(TargetOpcode::G_FCONSTANT)
280 .addDef(getMRI()->createGenericVirtualRegister(EltTy))
281 .addFPImm(&Val);
282
283 return buildSplatVector(Res, Const);
284 }
285
286 auto Const = buildInstr(TargetOpcode::G_FCONSTANT);
287 Const->setDebugLoc(DebugLoc());
288 Res.addDefToMIB(*getMRI(), Const);
289 Const.addFPImm(&Val);
290 return Const;
291 }
292
buildConstant(const DstOp & Res,const APInt & Val)293 MachineInstrBuilder MachineIRBuilder::buildConstant(const DstOp &Res,
294 const APInt &Val) {
295 ConstantInt *CI = ConstantInt::get(getMF().getFunction().getContext(), Val);
296 return buildConstant(Res, *CI);
297 }
298
buildFConstant(const DstOp & Res,double Val)299 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
300 double Val) {
301 LLT DstTy = Res.getLLTTy(*getMRI());
302 auto &Ctx = getMF().getFunction().getContext();
303 auto *CFP =
304 ConstantFP::get(Ctx, getAPFloatFromSize(Val, DstTy.getScalarSizeInBits()));
305 return buildFConstant(Res, *CFP);
306 }
307
buildFConstant(const DstOp & Res,const APFloat & Val)308 MachineInstrBuilder MachineIRBuilder::buildFConstant(const DstOp &Res,
309 const APFloat &Val) {
310 auto &Ctx = getMF().getFunction().getContext();
311 auto *CFP = ConstantFP::get(Ctx, Val);
312 return buildFConstant(Res, *CFP);
313 }
314
buildBrCond(Register Tst,MachineBasicBlock & Dest)315 MachineInstrBuilder MachineIRBuilder::buildBrCond(Register Tst,
316 MachineBasicBlock &Dest) {
317 assert(getMRI()->getType(Tst).isScalar() && "invalid operand type");
318
319 return buildInstr(TargetOpcode::G_BRCOND).addUse(Tst).addMBB(&Dest);
320 }
321
buildLoad(const DstOp & Res,const SrcOp & Addr,MachineMemOperand & MMO)322 MachineInstrBuilder MachineIRBuilder::buildLoad(const DstOp &Res,
323 const SrcOp &Addr,
324 MachineMemOperand &MMO) {
325 return buildLoadInstr(TargetOpcode::G_LOAD, Res, Addr, MMO);
326 }
327
buildLoadInstr(unsigned Opcode,const DstOp & Res,const SrcOp & Addr,MachineMemOperand & MMO)328 MachineInstrBuilder MachineIRBuilder::buildLoadInstr(unsigned Opcode,
329 const DstOp &Res,
330 const SrcOp &Addr,
331 MachineMemOperand &MMO) {
332 assert(Res.getLLTTy(*getMRI()).isValid() && "invalid operand type");
333 assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
334
335 auto MIB = buildInstr(Opcode);
336 Res.addDefToMIB(*getMRI(), MIB);
337 Addr.addSrcToMIB(MIB);
338 MIB.addMemOperand(&MMO);
339 return MIB;
340 }
341
buildLoadFromOffset(const DstOp & Dst,const SrcOp & BasePtr,MachineMemOperand & BaseMMO,int64_t Offset)342 MachineInstrBuilder MachineIRBuilder::buildLoadFromOffset(
343 const DstOp &Dst, const SrcOp &BasePtr,
344 MachineMemOperand &BaseMMO, int64_t Offset) {
345 LLT LoadTy = Dst.getLLTTy(*getMRI());
346 MachineMemOperand *OffsetMMO =
347 getMF().getMachineMemOperand(&BaseMMO, Offset, LoadTy.getSizeInBytes());
348
349 if (Offset == 0) // This may be a size or type changing load.
350 return buildLoad(Dst, BasePtr, *OffsetMMO);
351
352 LLT PtrTy = BasePtr.getLLTTy(*getMRI());
353 LLT OffsetTy = LLT::scalar(PtrTy.getSizeInBits());
354 auto ConstOffset = buildConstant(OffsetTy, Offset);
355 auto Ptr = buildPtrAdd(PtrTy, BasePtr, ConstOffset);
356 return buildLoad(Dst, Ptr, *OffsetMMO);
357 }
358
buildStore(const SrcOp & Val,const SrcOp & Addr,MachineMemOperand & MMO)359 MachineInstrBuilder MachineIRBuilder::buildStore(const SrcOp &Val,
360 const SrcOp &Addr,
361 MachineMemOperand &MMO) {
362 assert(Val.getLLTTy(*getMRI()).isValid() && "invalid operand type");
363 assert(Addr.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
364
365 auto MIB = buildInstr(TargetOpcode::G_STORE);
366 Val.addSrcToMIB(MIB);
367 Addr.addSrcToMIB(MIB);
368 MIB.addMemOperand(&MMO);
369 return MIB;
370 }
371
buildAnyExt(const DstOp & Res,const SrcOp & Op)372 MachineInstrBuilder MachineIRBuilder::buildAnyExt(const DstOp &Res,
373 const SrcOp &Op) {
374 return buildInstr(TargetOpcode::G_ANYEXT, Res, Op);
375 }
376
buildSExt(const DstOp & Res,const SrcOp & Op)377 MachineInstrBuilder MachineIRBuilder::buildSExt(const DstOp &Res,
378 const SrcOp &Op) {
379 return buildInstr(TargetOpcode::G_SEXT, Res, Op);
380 }
381
buildZExt(const DstOp & Res,const SrcOp & Op)382 MachineInstrBuilder MachineIRBuilder::buildZExt(const DstOp &Res,
383 const SrcOp &Op) {
384 return buildInstr(TargetOpcode::G_ZEXT, Res, Op);
385 }
386
getBoolExtOp(bool IsVec,bool IsFP) const387 unsigned MachineIRBuilder::getBoolExtOp(bool IsVec, bool IsFP) const {
388 const auto *TLI = getMF().getSubtarget().getTargetLowering();
389 switch (TLI->getBooleanContents(IsVec, IsFP)) {
390 case TargetLoweringBase::ZeroOrNegativeOneBooleanContent:
391 return TargetOpcode::G_SEXT;
392 case TargetLoweringBase::ZeroOrOneBooleanContent:
393 return TargetOpcode::G_ZEXT;
394 default:
395 return TargetOpcode::G_ANYEXT;
396 }
397 }
398
buildBoolExt(const DstOp & Res,const SrcOp & Op,bool IsFP)399 MachineInstrBuilder MachineIRBuilder::buildBoolExt(const DstOp &Res,
400 const SrcOp &Op,
401 bool IsFP) {
402 unsigned ExtOp = getBoolExtOp(getMRI()->getType(Op.getReg()).isVector(), IsFP);
403 return buildInstr(ExtOp, Res, Op);
404 }
405
buildExtOrTrunc(unsigned ExtOpc,const DstOp & Res,const SrcOp & Op)406 MachineInstrBuilder MachineIRBuilder::buildExtOrTrunc(unsigned ExtOpc,
407 const DstOp &Res,
408 const SrcOp &Op) {
409 assert((TargetOpcode::G_ANYEXT == ExtOpc || TargetOpcode::G_ZEXT == ExtOpc ||
410 TargetOpcode::G_SEXT == ExtOpc) &&
411 "Expecting Extending Opc");
412 assert(Res.getLLTTy(*getMRI()).isScalar() ||
413 Res.getLLTTy(*getMRI()).isVector());
414 assert(Res.getLLTTy(*getMRI()).isScalar() ==
415 Op.getLLTTy(*getMRI()).isScalar());
416
417 unsigned Opcode = TargetOpcode::COPY;
418 if (Res.getLLTTy(*getMRI()).getSizeInBits() >
419 Op.getLLTTy(*getMRI()).getSizeInBits())
420 Opcode = ExtOpc;
421 else if (Res.getLLTTy(*getMRI()).getSizeInBits() <
422 Op.getLLTTy(*getMRI()).getSizeInBits())
423 Opcode = TargetOpcode::G_TRUNC;
424 else
425 assert(Res.getLLTTy(*getMRI()) == Op.getLLTTy(*getMRI()));
426
427 return buildInstr(Opcode, Res, Op);
428 }
429
buildSExtOrTrunc(const DstOp & Res,const SrcOp & Op)430 MachineInstrBuilder MachineIRBuilder::buildSExtOrTrunc(const DstOp &Res,
431 const SrcOp &Op) {
432 return buildExtOrTrunc(TargetOpcode::G_SEXT, Res, Op);
433 }
434
buildZExtOrTrunc(const DstOp & Res,const SrcOp & Op)435 MachineInstrBuilder MachineIRBuilder::buildZExtOrTrunc(const DstOp &Res,
436 const SrcOp &Op) {
437 return buildExtOrTrunc(TargetOpcode::G_ZEXT, Res, Op);
438 }
439
buildAnyExtOrTrunc(const DstOp & Res,const SrcOp & Op)440 MachineInstrBuilder MachineIRBuilder::buildAnyExtOrTrunc(const DstOp &Res,
441 const SrcOp &Op) {
442 return buildExtOrTrunc(TargetOpcode::G_ANYEXT, Res, Op);
443 }
444
buildCast(const DstOp & Dst,const SrcOp & Src)445 MachineInstrBuilder MachineIRBuilder::buildCast(const DstOp &Dst,
446 const SrcOp &Src) {
447 LLT SrcTy = Src.getLLTTy(*getMRI());
448 LLT DstTy = Dst.getLLTTy(*getMRI());
449 if (SrcTy == DstTy)
450 return buildCopy(Dst, Src);
451
452 unsigned Opcode;
453 if (SrcTy.isPointer() && DstTy.isScalar())
454 Opcode = TargetOpcode::G_PTRTOINT;
455 else if (DstTy.isPointer() && SrcTy.isScalar())
456 Opcode = TargetOpcode::G_INTTOPTR;
457 else {
458 assert(!SrcTy.isPointer() && !DstTy.isPointer() && "n G_ADDRCAST yet");
459 Opcode = TargetOpcode::G_BITCAST;
460 }
461
462 return buildInstr(Opcode, Dst, Src);
463 }
464
buildExtract(const DstOp & Dst,const SrcOp & Src,uint64_t Index)465 MachineInstrBuilder MachineIRBuilder::buildExtract(const DstOp &Dst,
466 const SrcOp &Src,
467 uint64_t Index) {
468 LLT SrcTy = Src.getLLTTy(*getMRI());
469 LLT DstTy = Dst.getLLTTy(*getMRI());
470
471 #ifndef NDEBUG
472 assert(SrcTy.isValid() && "invalid operand type");
473 assert(DstTy.isValid() && "invalid operand type");
474 assert(Index + DstTy.getSizeInBits() <= SrcTy.getSizeInBits() &&
475 "extracting off end of register");
476 #endif
477
478 if (DstTy.getSizeInBits() == SrcTy.getSizeInBits()) {
479 assert(Index == 0 && "insertion past the end of a register");
480 return buildCast(Dst, Src);
481 }
482
483 auto Extract = buildInstr(TargetOpcode::G_EXTRACT);
484 Dst.addDefToMIB(*getMRI(), Extract);
485 Src.addSrcToMIB(Extract);
486 Extract.addImm(Index);
487 return Extract;
488 }
489
buildSequence(Register Res,ArrayRef<Register> Ops,ArrayRef<uint64_t> Indices)490 void MachineIRBuilder::buildSequence(Register Res, ArrayRef<Register> Ops,
491 ArrayRef<uint64_t> Indices) {
492 #ifndef NDEBUG
493 assert(Ops.size() == Indices.size() && "incompatible args");
494 assert(!Ops.empty() && "invalid trivial sequence");
495 assert(llvm::is_sorted(Indices) &&
496 "sequence offsets must be in ascending order");
497
498 assert(getMRI()->getType(Res).isValid() && "invalid operand type");
499 for (auto Op : Ops)
500 assert(getMRI()->getType(Op).isValid() && "invalid operand type");
501 #endif
502
503 LLT ResTy = getMRI()->getType(Res);
504 LLT OpTy = getMRI()->getType(Ops[0]);
505 unsigned OpSize = OpTy.getSizeInBits();
506 bool MaybeMerge = true;
507 for (unsigned i = 0; i < Ops.size(); ++i) {
508 if (getMRI()->getType(Ops[i]) != OpTy || Indices[i] != i * OpSize) {
509 MaybeMerge = false;
510 break;
511 }
512 }
513
514 if (MaybeMerge && Ops.size() * OpSize == ResTy.getSizeInBits()) {
515 buildMerge(Res, Ops);
516 return;
517 }
518
519 Register ResIn = getMRI()->createGenericVirtualRegister(ResTy);
520 buildUndef(ResIn);
521
522 for (unsigned i = 0; i < Ops.size(); ++i) {
523 Register ResOut = i + 1 == Ops.size()
524 ? Res
525 : getMRI()->createGenericVirtualRegister(ResTy);
526 buildInsert(ResOut, ResIn, Ops[i], Indices[i]);
527 ResIn = ResOut;
528 }
529 }
530
buildUndef(const DstOp & Res)531 MachineInstrBuilder MachineIRBuilder::buildUndef(const DstOp &Res) {
532 return buildInstr(TargetOpcode::G_IMPLICIT_DEF, {Res}, {});
533 }
534
buildMerge(const DstOp & Res,ArrayRef<Register> Ops)535 MachineInstrBuilder MachineIRBuilder::buildMerge(const DstOp &Res,
536 ArrayRef<Register> Ops) {
537 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
538 // we need some temporary storage for the DstOp objects. Here we use a
539 // sufficiently large SmallVector to not go through the heap.
540 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
541 assert(TmpVec.size() > 1);
542 return buildInstr(TargetOpcode::G_MERGE_VALUES, Res, TmpVec);
543 }
544
545 MachineInstrBuilder
buildMerge(const DstOp & Res,std::initializer_list<SrcOp> Ops)546 MachineIRBuilder::buildMerge(const DstOp &Res,
547 std::initializer_list<SrcOp> Ops) {
548 assert(Ops.size() > 1);
549 return buildInstr(TargetOpcode::G_MERGE_VALUES, Res, Ops);
550 }
551
buildUnmerge(ArrayRef<LLT> Res,const SrcOp & Op)552 MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<LLT> Res,
553 const SrcOp &Op) {
554 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<DstOp>,
555 // we need some temporary storage for the DstOp objects. Here we use a
556 // sufficiently large SmallVector to not go through the heap.
557 SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end());
558 assert(TmpVec.size() > 1);
559 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
560 }
561
buildUnmerge(LLT Res,const SrcOp & Op)562 MachineInstrBuilder MachineIRBuilder::buildUnmerge(LLT Res,
563 const SrcOp &Op) {
564 unsigned NumReg = Op.getLLTTy(*getMRI()).getSizeInBits() / Res.getSizeInBits();
565 SmallVector<Register, 8> TmpVec;
566 for (unsigned I = 0; I != NumReg; ++I)
567 TmpVec.push_back(getMRI()->createGenericVirtualRegister(Res));
568 return buildUnmerge(TmpVec, Op);
569 }
570
buildUnmerge(ArrayRef<Register> Res,const SrcOp & Op)571 MachineInstrBuilder MachineIRBuilder::buildUnmerge(ArrayRef<Register> Res,
572 const SrcOp &Op) {
573 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<DstOp>,
574 // we need some temporary storage for the DstOp objects. Here we use a
575 // sufficiently large SmallVector to not go through the heap.
576 SmallVector<DstOp, 8> TmpVec(Res.begin(), Res.end());
577 assert(TmpVec.size() > 1);
578 return buildInstr(TargetOpcode::G_UNMERGE_VALUES, TmpVec, Op);
579 }
580
buildBuildVector(const DstOp & Res,ArrayRef<Register> Ops)581 MachineInstrBuilder MachineIRBuilder::buildBuildVector(const DstOp &Res,
582 ArrayRef<Register> Ops) {
583 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
584 // we need some temporary storage for the DstOp objects. Here we use a
585 // sufficiently large SmallVector to not go through the heap.
586 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
587 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
588 }
589
buildSplatVector(const DstOp & Res,const SrcOp & Src)590 MachineInstrBuilder MachineIRBuilder::buildSplatVector(const DstOp &Res,
591 const SrcOp &Src) {
592 SmallVector<SrcOp, 8> TmpVec(Res.getLLTTy(*getMRI()).getNumElements(), Src);
593 return buildInstr(TargetOpcode::G_BUILD_VECTOR, Res, TmpVec);
594 }
595
596 MachineInstrBuilder
buildBuildVectorTrunc(const DstOp & Res,ArrayRef<Register> Ops)597 MachineIRBuilder::buildBuildVectorTrunc(const DstOp &Res,
598 ArrayRef<Register> Ops) {
599 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
600 // we need some temporary storage for the DstOp objects. Here we use a
601 // sufficiently large SmallVector to not go through the heap.
602 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
603 return buildInstr(TargetOpcode::G_BUILD_VECTOR_TRUNC, Res, TmpVec);
604 }
605
606 MachineInstrBuilder
buildConcatVectors(const DstOp & Res,ArrayRef<Register> Ops)607 MachineIRBuilder::buildConcatVectors(const DstOp &Res, ArrayRef<Register> Ops) {
608 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
609 // we need some temporary storage for the DstOp objects. Here we use a
610 // sufficiently large SmallVector to not go through the heap.
611 SmallVector<SrcOp, 8> TmpVec(Ops.begin(), Ops.end());
612 return buildInstr(TargetOpcode::G_CONCAT_VECTORS, Res, TmpVec);
613 }
614
buildInsert(const DstOp & Res,const SrcOp & Src,const SrcOp & Op,unsigned Index)615 MachineInstrBuilder MachineIRBuilder::buildInsert(const DstOp &Res,
616 const SrcOp &Src,
617 const SrcOp &Op,
618 unsigned Index) {
619 assert(Index + Op.getLLTTy(*getMRI()).getSizeInBits() <=
620 Res.getLLTTy(*getMRI()).getSizeInBits() &&
621 "insertion past the end of a register");
622
623 if (Res.getLLTTy(*getMRI()).getSizeInBits() ==
624 Op.getLLTTy(*getMRI()).getSizeInBits()) {
625 return buildCast(Res, Op);
626 }
627
628 return buildInstr(TargetOpcode::G_INSERT, Res, {Src, Op, uint64_t(Index)});
629 }
630
buildIntrinsic(Intrinsic::ID ID,ArrayRef<Register> ResultRegs,bool HasSideEffects)631 MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
632 ArrayRef<Register> ResultRegs,
633 bool HasSideEffects) {
634 auto MIB =
635 buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
636 : TargetOpcode::G_INTRINSIC);
637 for (unsigned ResultReg : ResultRegs)
638 MIB.addDef(ResultReg);
639 MIB.addIntrinsicID(ID);
640 return MIB;
641 }
642
buildIntrinsic(Intrinsic::ID ID,ArrayRef<DstOp> Results,bool HasSideEffects)643 MachineInstrBuilder MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID,
644 ArrayRef<DstOp> Results,
645 bool HasSideEffects) {
646 auto MIB =
647 buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
648 : TargetOpcode::G_INTRINSIC);
649 for (DstOp Result : Results)
650 Result.addDefToMIB(*getMRI(), MIB);
651 MIB.addIntrinsicID(ID);
652 return MIB;
653 }
654
buildTrunc(const DstOp & Res,const SrcOp & Op)655 MachineInstrBuilder MachineIRBuilder::buildTrunc(const DstOp &Res,
656 const SrcOp &Op) {
657 return buildInstr(TargetOpcode::G_TRUNC, Res, Op);
658 }
659
buildFPTrunc(const DstOp & Res,const SrcOp & Op,Optional<unsigned> Flags)660 MachineInstrBuilder MachineIRBuilder::buildFPTrunc(const DstOp &Res,
661 const SrcOp &Op,
662 Optional<unsigned> Flags) {
663 return buildInstr(TargetOpcode::G_FPTRUNC, Res, Op, Flags);
664 }
665
buildICmp(CmpInst::Predicate Pred,const DstOp & Res,const SrcOp & Op0,const SrcOp & Op1)666 MachineInstrBuilder MachineIRBuilder::buildICmp(CmpInst::Predicate Pred,
667 const DstOp &Res,
668 const SrcOp &Op0,
669 const SrcOp &Op1) {
670 return buildInstr(TargetOpcode::G_ICMP, Res, {Pred, Op0, Op1});
671 }
672
buildFCmp(CmpInst::Predicate Pred,const DstOp & Res,const SrcOp & Op0,const SrcOp & Op1,Optional<unsigned> Flags)673 MachineInstrBuilder MachineIRBuilder::buildFCmp(CmpInst::Predicate Pred,
674 const DstOp &Res,
675 const SrcOp &Op0,
676 const SrcOp &Op1,
677 Optional<unsigned> Flags) {
678
679 return buildInstr(TargetOpcode::G_FCMP, Res, {Pred, Op0, Op1}, Flags);
680 }
681
buildSelect(const DstOp & Res,const SrcOp & Tst,const SrcOp & Op0,const SrcOp & Op1,Optional<unsigned> Flags)682 MachineInstrBuilder MachineIRBuilder::buildSelect(const DstOp &Res,
683 const SrcOp &Tst,
684 const SrcOp &Op0,
685 const SrcOp &Op1,
686 Optional<unsigned> Flags) {
687
688 return buildInstr(TargetOpcode::G_SELECT, {Res}, {Tst, Op0, Op1}, Flags);
689 }
690
691 MachineInstrBuilder
buildInsertVectorElement(const DstOp & Res,const SrcOp & Val,const SrcOp & Elt,const SrcOp & Idx)692 MachineIRBuilder::buildInsertVectorElement(const DstOp &Res, const SrcOp &Val,
693 const SrcOp &Elt, const SrcOp &Idx) {
694 return buildInstr(TargetOpcode::G_INSERT_VECTOR_ELT, Res, {Val, Elt, Idx});
695 }
696
697 MachineInstrBuilder
buildExtractVectorElement(const DstOp & Res,const SrcOp & Val,const SrcOp & Idx)698 MachineIRBuilder::buildExtractVectorElement(const DstOp &Res, const SrcOp &Val,
699 const SrcOp &Idx) {
700 return buildInstr(TargetOpcode::G_EXTRACT_VECTOR_ELT, Res, {Val, Idx});
701 }
702
buildAtomicCmpXchgWithSuccess(Register OldValRes,Register SuccessRes,Register Addr,Register CmpVal,Register NewVal,MachineMemOperand & MMO)703 MachineInstrBuilder MachineIRBuilder::buildAtomicCmpXchgWithSuccess(
704 Register OldValRes, Register SuccessRes, Register Addr, Register CmpVal,
705 Register NewVal, MachineMemOperand &MMO) {
706 #ifndef NDEBUG
707 LLT OldValResTy = getMRI()->getType(OldValRes);
708 LLT SuccessResTy = getMRI()->getType(SuccessRes);
709 LLT AddrTy = getMRI()->getType(Addr);
710 LLT CmpValTy = getMRI()->getType(CmpVal);
711 LLT NewValTy = getMRI()->getType(NewVal);
712 assert(OldValResTy.isScalar() && "invalid operand type");
713 assert(SuccessResTy.isScalar() && "invalid operand type");
714 assert(AddrTy.isPointer() && "invalid operand type");
715 assert(CmpValTy.isValid() && "invalid operand type");
716 assert(NewValTy.isValid() && "invalid operand type");
717 assert(OldValResTy == CmpValTy && "type mismatch");
718 assert(OldValResTy == NewValTy && "type mismatch");
719 #endif
720
721 return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS)
722 .addDef(OldValRes)
723 .addDef(SuccessRes)
724 .addUse(Addr)
725 .addUse(CmpVal)
726 .addUse(NewVal)
727 .addMemOperand(&MMO);
728 }
729
730 MachineInstrBuilder
buildAtomicCmpXchg(Register OldValRes,Register Addr,Register CmpVal,Register NewVal,MachineMemOperand & MMO)731 MachineIRBuilder::buildAtomicCmpXchg(Register OldValRes, Register Addr,
732 Register CmpVal, Register NewVal,
733 MachineMemOperand &MMO) {
734 #ifndef NDEBUG
735 LLT OldValResTy = getMRI()->getType(OldValRes);
736 LLT AddrTy = getMRI()->getType(Addr);
737 LLT CmpValTy = getMRI()->getType(CmpVal);
738 LLT NewValTy = getMRI()->getType(NewVal);
739 assert(OldValResTy.isScalar() && "invalid operand type");
740 assert(AddrTy.isPointer() && "invalid operand type");
741 assert(CmpValTy.isValid() && "invalid operand type");
742 assert(NewValTy.isValid() && "invalid operand type");
743 assert(OldValResTy == CmpValTy && "type mismatch");
744 assert(OldValResTy == NewValTy && "type mismatch");
745 #endif
746
747 return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG)
748 .addDef(OldValRes)
749 .addUse(Addr)
750 .addUse(CmpVal)
751 .addUse(NewVal)
752 .addMemOperand(&MMO);
753 }
754
buildAtomicRMW(unsigned Opcode,const DstOp & OldValRes,const SrcOp & Addr,const SrcOp & Val,MachineMemOperand & MMO)755 MachineInstrBuilder MachineIRBuilder::buildAtomicRMW(
756 unsigned Opcode, const DstOp &OldValRes,
757 const SrcOp &Addr, const SrcOp &Val,
758 MachineMemOperand &MMO) {
759
760 #ifndef NDEBUG
761 LLT OldValResTy = OldValRes.getLLTTy(*getMRI());
762 LLT AddrTy = Addr.getLLTTy(*getMRI());
763 LLT ValTy = Val.getLLTTy(*getMRI());
764 assert(OldValResTy.isScalar() && "invalid operand type");
765 assert(AddrTy.isPointer() && "invalid operand type");
766 assert(ValTy.isValid() && "invalid operand type");
767 assert(OldValResTy == ValTy && "type mismatch");
768 assert(MMO.isAtomic() && "not atomic mem operand");
769 #endif
770
771 auto MIB = buildInstr(Opcode);
772 OldValRes.addDefToMIB(*getMRI(), MIB);
773 Addr.addSrcToMIB(MIB);
774 Val.addSrcToMIB(MIB);
775 MIB.addMemOperand(&MMO);
776 return MIB;
777 }
778
779 MachineInstrBuilder
buildAtomicRMWXchg(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)780 MachineIRBuilder::buildAtomicRMWXchg(Register OldValRes, Register Addr,
781 Register Val, MachineMemOperand &MMO) {
782 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XCHG, OldValRes, Addr, Val,
783 MMO);
784 }
785 MachineInstrBuilder
buildAtomicRMWAdd(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)786 MachineIRBuilder::buildAtomicRMWAdd(Register OldValRes, Register Addr,
787 Register Val, MachineMemOperand &MMO) {
788 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_ADD, OldValRes, Addr, Val,
789 MMO);
790 }
791 MachineInstrBuilder
buildAtomicRMWSub(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)792 MachineIRBuilder::buildAtomicRMWSub(Register OldValRes, Register Addr,
793 Register Val, MachineMemOperand &MMO) {
794 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_SUB, OldValRes, Addr, Val,
795 MMO);
796 }
797 MachineInstrBuilder
buildAtomicRMWAnd(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)798 MachineIRBuilder::buildAtomicRMWAnd(Register OldValRes, Register Addr,
799 Register Val, MachineMemOperand &MMO) {
800 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_AND, OldValRes, Addr, Val,
801 MMO);
802 }
803 MachineInstrBuilder
buildAtomicRMWNand(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)804 MachineIRBuilder::buildAtomicRMWNand(Register OldValRes, Register Addr,
805 Register Val, MachineMemOperand &MMO) {
806 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_NAND, OldValRes, Addr, Val,
807 MMO);
808 }
buildAtomicRMWOr(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)809 MachineInstrBuilder MachineIRBuilder::buildAtomicRMWOr(Register OldValRes,
810 Register Addr,
811 Register Val,
812 MachineMemOperand &MMO) {
813 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_OR, OldValRes, Addr, Val,
814 MMO);
815 }
816 MachineInstrBuilder
buildAtomicRMWXor(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)817 MachineIRBuilder::buildAtomicRMWXor(Register OldValRes, Register Addr,
818 Register Val, MachineMemOperand &MMO) {
819 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XOR, OldValRes, Addr, Val,
820 MMO);
821 }
822 MachineInstrBuilder
buildAtomicRMWMax(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)823 MachineIRBuilder::buildAtomicRMWMax(Register OldValRes, Register Addr,
824 Register Val, MachineMemOperand &MMO) {
825 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MAX, OldValRes, Addr, Val,
826 MMO);
827 }
828 MachineInstrBuilder
buildAtomicRMWMin(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)829 MachineIRBuilder::buildAtomicRMWMin(Register OldValRes, Register Addr,
830 Register Val, MachineMemOperand &MMO) {
831 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MIN, OldValRes, Addr, Val,
832 MMO);
833 }
834 MachineInstrBuilder
buildAtomicRMWUmax(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)835 MachineIRBuilder::buildAtomicRMWUmax(Register OldValRes, Register Addr,
836 Register Val, MachineMemOperand &MMO) {
837 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMAX, OldValRes, Addr, Val,
838 MMO);
839 }
840 MachineInstrBuilder
buildAtomicRMWUmin(Register OldValRes,Register Addr,Register Val,MachineMemOperand & MMO)841 MachineIRBuilder::buildAtomicRMWUmin(Register OldValRes, Register Addr,
842 Register Val, MachineMemOperand &MMO) {
843 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMIN, OldValRes, Addr, Val,
844 MMO);
845 }
846
847 MachineInstrBuilder
buildAtomicRMWFAdd(const DstOp & OldValRes,const SrcOp & Addr,const SrcOp & Val,MachineMemOperand & MMO)848 MachineIRBuilder::buildAtomicRMWFAdd(
849 const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
850 MachineMemOperand &MMO) {
851 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FADD, OldValRes, Addr, Val,
852 MMO);
853 }
854
855 MachineInstrBuilder
buildAtomicRMWFSub(const DstOp & OldValRes,const SrcOp & Addr,const SrcOp & Val,MachineMemOperand & MMO)856 MachineIRBuilder::buildAtomicRMWFSub(const DstOp &OldValRes, const SrcOp &Addr, const SrcOp &Val,
857 MachineMemOperand &MMO) {
858 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FSUB, OldValRes, Addr, Val,
859 MMO);
860 }
861
862 MachineInstrBuilder
buildFence(unsigned Ordering,unsigned Scope)863 MachineIRBuilder::buildFence(unsigned Ordering, unsigned Scope) {
864 return buildInstr(TargetOpcode::G_FENCE)
865 .addImm(Ordering)
866 .addImm(Scope);
867 }
868
869 MachineInstrBuilder
buildBlockAddress(Register Res,const BlockAddress * BA)870 MachineIRBuilder::buildBlockAddress(Register Res, const BlockAddress *BA) {
871 #ifndef NDEBUG
872 assert(getMRI()->getType(Res).isPointer() && "invalid res type");
873 #endif
874
875 return buildInstr(TargetOpcode::G_BLOCK_ADDR).addDef(Res).addBlockAddress(BA);
876 }
877
validateTruncExt(const LLT DstTy,const LLT SrcTy,bool IsExtend)878 void MachineIRBuilder::validateTruncExt(const LLT DstTy, const LLT SrcTy,
879 bool IsExtend) {
880 #ifndef NDEBUG
881 if (DstTy.isVector()) {
882 assert(SrcTy.isVector() && "mismatched cast between vector and non-vector");
883 assert(SrcTy.getNumElements() == DstTy.getNumElements() &&
884 "different number of elements in a trunc/ext");
885 } else
886 assert(DstTy.isScalar() && SrcTy.isScalar() && "invalid extend/trunc");
887
888 if (IsExtend)
889 assert(DstTy.getSizeInBits() > SrcTy.getSizeInBits() &&
890 "invalid narrowing extend");
891 else
892 assert(DstTy.getSizeInBits() < SrcTy.getSizeInBits() &&
893 "invalid widening trunc");
894 #endif
895 }
896
validateSelectOp(const LLT ResTy,const LLT TstTy,const LLT Op0Ty,const LLT Op1Ty)897 void MachineIRBuilder::validateSelectOp(const LLT ResTy, const LLT TstTy,
898 const LLT Op0Ty, const LLT Op1Ty) {
899 #ifndef NDEBUG
900 assert((ResTy.isScalar() || ResTy.isVector() || ResTy.isPointer()) &&
901 "invalid operand type");
902 assert((ResTy == Op0Ty && ResTy == Op1Ty) && "type mismatch");
903 if (ResTy.isScalar() || ResTy.isPointer())
904 assert(TstTy.isScalar() && "type mismatch");
905 else
906 assert((TstTy.isScalar() ||
907 (TstTy.isVector() &&
908 TstTy.getNumElements() == Op0Ty.getNumElements())) &&
909 "type mismatch");
910 #endif
911 }
912
buildInstr(unsigned Opc,ArrayRef<DstOp> DstOps,ArrayRef<SrcOp> SrcOps,Optional<unsigned> Flags)913 MachineInstrBuilder MachineIRBuilder::buildInstr(unsigned Opc,
914 ArrayRef<DstOp> DstOps,
915 ArrayRef<SrcOp> SrcOps,
916 Optional<unsigned> Flags) {
917 switch (Opc) {
918 default:
919 break;
920 case TargetOpcode::G_SELECT: {
921 assert(DstOps.size() == 1 && "Invalid select");
922 assert(SrcOps.size() == 3 && "Invalid select");
923 validateSelectOp(
924 DstOps[0].getLLTTy(*getMRI()), SrcOps[0].getLLTTy(*getMRI()),
925 SrcOps[1].getLLTTy(*getMRI()), SrcOps[2].getLLTTy(*getMRI()));
926 break;
927 }
928 case TargetOpcode::G_ADD:
929 case TargetOpcode::G_AND:
930 case TargetOpcode::G_MUL:
931 case TargetOpcode::G_OR:
932 case TargetOpcode::G_SUB:
933 case TargetOpcode::G_XOR:
934 case TargetOpcode::G_UDIV:
935 case TargetOpcode::G_SDIV:
936 case TargetOpcode::G_UREM:
937 case TargetOpcode::G_SREM:
938 case TargetOpcode::G_SMIN:
939 case TargetOpcode::G_SMAX:
940 case TargetOpcode::G_UMIN:
941 case TargetOpcode::G_UMAX:
942 case TargetOpcode::G_UADDSAT:
943 case TargetOpcode::G_SADDSAT:
944 case TargetOpcode::G_USUBSAT:
945 case TargetOpcode::G_SSUBSAT: {
946 // All these are binary ops.
947 assert(DstOps.size() == 1 && "Invalid Dst");
948 assert(SrcOps.size() == 2 && "Invalid Srcs");
949 validateBinaryOp(DstOps[0].getLLTTy(*getMRI()),
950 SrcOps[0].getLLTTy(*getMRI()),
951 SrcOps[1].getLLTTy(*getMRI()));
952 break;
953 }
954 case TargetOpcode::G_SHL:
955 case TargetOpcode::G_ASHR:
956 case TargetOpcode::G_LSHR: {
957 assert(DstOps.size() == 1 && "Invalid Dst");
958 assert(SrcOps.size() == 2 && "Invalid Srcs");
959 validateShiftOp(DstOps[0].getLLTTy(*getMRI()),
960 SrcOps[0].getLLTTy(*getMRI()),
961 SrcOps[1].getLLTTy(*getMRI()));
962 break;
963 }
964 case TargetOpcode::G_SEXT:
965 case TargetOpcode::G_ZEXT:
966 case TargetOpcode::G_ANYEXT:
967 assert(DstOps.size() == 1 && "Invalid Dst");
968 assert(SrcOps.size() == 1 && "Invalid Srcs");
969 validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
970 SrcOps[0].getLLTTy(*getMRI()), true);
971 break;
972 case TargetOpcode::G_TRUNC:
973 case TargetOpcode::G_FPTRUNC: {
974 assert(DstOps.size() == 1 && "Invalid Dst");
975 assert(SrcOps.size() == 1 && "Invalid Srcs");
976 validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
977 SrcOps[0].getLLTTy(*getMRI()), false);
978 break;
979 }
980 case TargetOpcode::G_BITCAST: {
981 assert(DstOps.size() == 1 && "Invalid Dst");
982 assert(SrcOps.size() == 1 && "Invalid Srcs");
983 assert(DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
984 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() && "invalid bitcast");
985 break;
986 }
987 case TargetOpcode::COPY:
988 assert(DstOps.size() == 1 && "Invalid Dst");
989 // If the caller wants to add a subreg source it has to be done separately
990 // so we may not have any SrcOps at this point yet.
991 break;
992 case TargetOpcode::G_FCMP:
993 case TargetOpcode::G_ICMP: {
994 assert(DstOps.size() == 1 && "Invalid Dst Operands");
995 assert(SrcOps.size() == 3 && "Invalid Src Operands");
996 // For F/ICMP, the first src operand is the predicate, followed by
997 // the two comparands.
998 assert(SrcOps[0].getSrcOpKind() == SrcOp::SrcType::Ty_Predicate &&
999 "Expecting predicate");
1000 assert([&]() -> bool {
1001 CmpInst::Predicate Pred = SrcOps[0].getPredicate();
1002 return Opc == TargetOpcode::G_ICMP ? CmpInst::isIntPredicate(Pred)
1003 : CmpInst::isFPPredicate(Pred);
1004 }() && "Invalid predicate");
1005 assert(SrcOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1006 "Type mismatch");
1007 assert([&]() -> bool {
1008 LLT Op0Ty = SrcOps[1].getLLTTy(*getMRI());
1009 LLT DstTy = DstOps[0].getLLTTy(*getMRI());
1010 if (Op0Ty.isScalar() || Op0Ty.isPointer())
1011 return DstTy.isScalar();
1012 else
1013 return DstTy.isVector() &&
1014 DstTy.getNumElements() == Op0Ty.getNumElements();
1015 }() && "Type Mismatch");
1016 break;
1017 }
1018 case TargetOpcode::G_UNMERGE_VALUES: {
1019 assert(!DstOps.empty() && "Invalid trivial sequence");
1020 assert(SrcOps.size() == 1 && "Invalid src for Unmerge");
1021 assert(std::all_of(DstOps.begin(), DstOps.end(),
1022 [&, this](const DstOp &Op) {
1023 return Op.getLLTTy(*getMRI()) ==
1024 DstOps[0].getLLTTy(*getMRI());
1025 }) &&
1026 "type mismatch in output list");
1027 assert(DstOps.size() * DstOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1028 SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1029 "input operands do not cover output register");
1030 break;
1031 }
1032 case TargetOpcode::G_MERGE_VALUES: {
1033 assert(!SrcOps.empty() && "invalid trivial sequence");
1034 assert(DstOps.size() == 1 && "Invalid Dst");
1035 assert(std::all_of(SrcOps.begin(), SrcOps.end(),
1036 [&, this](const SrcOp &Op) {
1037 return Op.getLLTTy(*getMRI()) ==
1038 SrcOps[0].getLLTTy(*getMRI());
1039 }) &&
1040 "type mismatch in input list");
1041 assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1042 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1043 "input operands do not cover output register");
1044 if (SrcOps.size() == 1)
1045 return buildCast(DstOps[0], SrcOps[0]);
1046 if (DstOps[0].getLLTTy(*getMRI()).isVector()) {
1047 if (SrcOps[0].getLLTTy(*getMRI()).isVector())
1048 return buildInstr(TargetOpcode::G_CONCAT_VECTORS, DstOps, SrcOps);
1049 return buildInstr(TargetOpcode::G_BUILD_VECTOR, DstOps, SrcOps);
1050 }
1051 break;
1052 }
1053 case TargetOpcode::G_EXTRACT_VECTOR_ELT: {
1054 assert(DstOps.size() == 1 && "Invalid Dst size");
1055 assert(SrcOps.size() == 2 && "Invalid Src size");
1056 assert(SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1057 assert((DstOps[0].getLLTTy(*getMRI()).isScalar() ||
1058 DstOps[0].getLLTTy(*getMRI()).isPointer()) &&
1059 "Invalid operand type");
1060 assert(SrcOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand type");
1061 assert(SrcOps[0].getLLTTy(*getMRI()).getElementType() ==
1062 DstOps[0].getLLTTy(*getMRI()) &&
1063 "Type mismatch");
1064 break;
1065 }
1066 case TargetOpcode::G_INSERT_VECTOR_ELT: {
1067 assert(DstOps.size() == 1 && "Invalid dst size");
1068 assert(SrcOps.size() == 3 && "Invalid src size");
1069 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1070 SrcOps[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1071 assert(DstOps[0].getLLTTy(*getMRI()).getElementType() ==
1072 SrcOps[1].getLLTTy(*getMRI()) &&
1073 "Type mismatch");
1074 assert(SrcOps[2].getLLTTy(*getMRI()).isScalar() && "Invalid index");
1075 assert(DstOps[0].getLLTTy(*getMRI()).getNumElements() ==
1076 SrcOps[0].getLLTTy(*getMRI()).getNumElements() &&
1077 "Type mismatch");
1078 break;
1079 }
1080 case TargetOpcode::G_BUILD_VECTOR: {
1081 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1082 "Must have at least 2 operands");
1083 assert(DstOps.size() == 1 && "Invalid DstOps");
1084 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1085 "Res type must be a vector");
1086 assert(std::all_of(SrcOps.begin(), SrcOps.end(),
1087 [&, this](const SrcOp &Op) {
1088 return Op.getLLTTy(*getMRI()) ==
1089 SrcOps[0].getLLTTy(*getMRI());
1090 }) &&
1091 "type mismatch in input list");
1092 assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1093 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1094 "input scalars do not exactly cover the output vector register");
1095 break;
1096 }
1097 case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1098 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1099 "Must have at least 2 operands");
1100 assert(DstOps.size() == 1 && "Invalid DstOps");
1101 assert(DstOps[0].getLLTTy(*getMRI()).isVector() &&
1102 "Res type must be a vector");
1103 assert(std::all_of(SrcOps.begin(), SrcOps.end(),
1104 [&, this](const SrcOp &Op) {
1105 return Op.getLLTTy(*getMRI()) ==
1106 SrcOps[0].getLLTTy(*getMRI());
1107 }) &&
1108 "type mismatch in input list");
1109 if (SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1110 DstOps[0].getLLTTy(*getMRI()).getElementType().getSizeInBits())
1111 return buildInstr(TargetOpcode::G_BUILD_VECTOR, DstOps, SrcOps);
1112 break;
1113 }
1114 case TargetOpcode::G_CONCAT_VECTORS: {
1115 assert(DstOps.size() == 1 && "Invalid DstOps");
1116 assert((!SrcOps.empty() || SrcOps.size() < 2) &&
1117 "Must have at least 2 operands");
1118 assert(std::all_of(SrcOps.begin(), SrcOps.end(),
1119 [&, this](const SrcOp &Op) {
1120 return (Op.getLLTTy(*getMRI()).isVector() &&
1121 Op.getLLTTy(*getMRI()) ==
1122 SrcOps[0].getLLTTy(*getMRI()));
1123 }) &&
1124 "type mismatch in input list");
1125 assert(SrcOps.size() * SrcOps[0].getLLTTy(*getMRI()).getSizeInBits() ==
1126 DstOps[0].getLLTTy(*getMRI()).getSizeInBits() &&
1127 "input vectors do not exactly cover the output vector register");
1128 break;
1129 }
1130 case TargetOpcode::G_UADDE: {
1131 assert(DstOps.size() == 2 && "Invalid no of dst operands");
1132 assert(SrcOps.size() == 3 && "Invalid no of src operands");
1133 assert(DstOps[0].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1134 assert((DstOps[0].getLLTTy(*getMRI()) == SrcOps[0].getLLTTy(*getMRI())) &&
1135 (DstOps[0].getLLTTy(*getMRI()) == SrcOps[1].getLLTTy(*getMRI())) &&
1136 "Invalid operand");
1137 assert(DstOps[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1138 assert(DstOps[1].getLLTTy(*getMRI()) == SrcOps[2].getLLTTy(*getMRI()) &&
1139 "type mismatch");
1140 break;
1141 }
1142 }
1143
1144 auto MIB = buildInstr(Opc);
1145 for (const DstOp &Op : DstOps)
1146 Op.addDefToMIB(*getMRI(), MIB);
1147 for (const SrcOp &Op : SrcOps)
1148 Op.addSrcToMIB(MIB);
1149 if (Flags)
1150 MIB->setFlags(*Flags);
1151 return MIB;
1152 }
1153