1 //===-- lib/CodeGen/GlobalISel/GICombinerHelper.cpp -----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 #include "llvm/CodeGen/GlobalISel/CombinerHelper.h"
9 #include "llvm/CodeGen/GlobalISel/Combiner.h"
10 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
11 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
12 #include "llvm/CodeGen/GlobalISel/LegalizerInfo.h"
13 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
14 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
15 #include "llvm/CodeGen/GlobalISel/Utils.h"
16 #include "llvm/CodeGen/MachineDominators.h"
17 #include "llvm/CodeGen/MachineFrameInfo.h"
18 #include "llvm/CodeGen/MachineInstr.h"
19 #include "llvm/CodeGen/MachineMemOperand.h"
20 #include "llvm/CodeGen/MachineRegisterInfo.h"
21 #include "llvm/CodeGen/TargetInstrInfo.h"
22 #include "llvm/CodeGen/TargetLowering.h"
23 #include "llvm/Support/MathExtras.h"
24 #include "llvm/Target/TargetMachine.h"
25
26 #define DEBUG_TYPE "gi-combiner"
27
28 using namespace llvm;
29 using namespace MIPatternMatch;
30
31 // Option to allow testing of the combiner while no targets know about indexed
32 // addressing.
33 static cl::opt<bool>
34 ForceLegalIndexing("force-legal-indexing", cl::Hidden, cl::init(false),
35 cl::desc("Force all indexed operations to be "
36 "legal for the GlobalISel combiner"));
37
CombinerHelper(GISelChangeObserver & Observer,MachineIRBuilder & B,GISelKnownBits * KB,MachineDominatorTree * MDT,const LegalizerInfo * LI)38 CombinerHelper::CombinerHelper(GISelChangeObserver &Observer,
39 MachineIRBuilder &B, GISelKnownBits *KB,
40 MachineDominatorTree *MDT,
41 const LegalizerInfo *LI)
42 : Builder(B), MRI(Builder.getMF().getRegInfo()), Observer(Observer),
43 KB(KB), MDT(MDT), LI(LI) {
44 (void)this->KB;
45 }
46
getTargetLowering() const47 const TargetLowering &CombinerHelper::getTargetLowering() const {
48 return *Builder.getMF().getSubtarget().getTargetLowering();
49 }
50
51 /// \returns The little endian in-memory byte position of byte \p I in a
52 /// \p ByteWidth bytes wide type.
53 ///
54 /// E.g. Given a 4-byte type x, x[0] -> byte 0
littleEndianByteAt(const unsigned ByteWidth,const unsigned I)55 static unsigned littleEndianByteAt(const unsigned ByteWidth, const unsigned I) {
56 assert(I < ByteWidth && "I must be in [0, ByteWidth)");
57 return I;
58 }
59
60 /// \returns The big endian in-memory byte position of byte \p I in a
61 /// \p ByteWidth bytes wide type.
62 ///
63 /// E.g. Given a 4-byte type x, x[0] -> byte 3
bigEndianByteAt(const unsigned ByteWidth,const unsigned I)64 static unsigned bigEndianByteAt(const unsigned ByteWidth, const unsigned I) {
65 assert(I < ByteWidth && "I must be in [0, ByteWidth)");
66 return ByteWidth - I - 1;
67 }
68
69 /// Given a map from byte offsets in memory to indices in a load/store,
70 /// determine if that map corresponds to a little or big endian byte pattern.
71 ///
72 /// \param MemOffset2Idx maps memory offsets to address offsets.
73 /// \param LowestIdx is the lowest index in \p MemOffset2Idx.
74 ///
75 /// \returns true if the map corresponds to a big endian byte pattern, false
76 /// if it corresponds to a little endian byte pattern, and None otherwise.
77 ///
78 /// E.g. given a 32-bit type x, and x[AddrOffset], the in-memory byte patterns
79 /// are as follows:
80 ///
81 /// AddrOffset Little endian Big endian
82 /// 0 0 3
83 /// 1 1 2
84 /// 2 2 1
85 /// 3 3 0
86 static Optional<bool>
isBigEndian(const SmallDenseMap<int64_t,int64_t,8> & MemOffset2Idx,int64_t LowestIdx)87 isBigEndian(const SmallDenseMap<int64_t, int64_t, 8> &MemOffset2Idx,
88 int64_t LowestIdx) {
89 // Need at least two byte positions to decide on endianness.
90 unsigned Width = MemOffset2Idx.size();
91 if (Width < 2)
92 return None;
93 bool BigEndian = true, LittleEndian = true;
94 for (unsigned MemOffset = 0; MemOffset < Width; ++ MemOffset) {
95 auto MemOffsetAndIdx = MemOffset2Idx.find(MemOffset);
96 if (MemOffsetAndIdx == MemOffset2Idx.end())
97 return None;
98 const int64_t Idx = MemOffsetAndIdx->second - LowestIdx;
99 assert(Idx >= 0 && "Expected non-negative byte offset?");
100 LittleEndian &= Idx == littleEndianByteAt(Width, MemOffset);
101 BigEndian &= Idx == bigEndianByteAt(Width, MemOffset);
102 if (!BigEndian && !LittleEndian)
103 return None;
104 }
105
106 assert((BigEndian != LittleEndian) &&
107 "Pattern cannot be both big and little endian!");
108 return BigEndian;
109 }
110
isLegalOrBeforeLegalizer(const LegalityQuery & Query) const111 bool CombinerHelper::isLegalOrBeforeLegalizer(
112 const LegalityQuery &Query) const {
113 return !LI || LI->getAction(Query).Action == LegalizeActions::Legal;
114 }
115
replaceRegWith(MachineRegisterInfo & MRI,Register FromReg,Register ToReg) const116 void CombinerHelper::replaceRegWith(MachineRegisterInfo &MRI, Register FromReg,
117 Register ToReg) const {
118 Observer.changingAllUsesOfReg(MRI, FromReg);
119
120 if (MRI.constrainRegAttrs(ToReg, FromReg))
121 MRI.replaceRegWith(FromReg, ToReg);
122 else
123 Builder.buildCopy(ToReg, FromReg);
124
125 Observer.finishedChangingAllUsesOfReg();
126 }
127
replaceRegOpWith(MachineRegisterInfo & MRI,MachineOperand & FromRegOp,Register ToReg) const128 void CombinerHelper::replaceRegOpWith(MachineRegisterInfo &MRI,
129 MachineOperand &FromRegOp,
130 Register ToReg) const {
131 assert(FromRegOp.getParent() && "Expected an operand in an MI");
132 Observer.changingInstr(*FromRegOp.getParent());
133
134 FromRegOp.setReg(ToReg);
135
136 Observer.changedInstr(*FromRegOp.getParent());
137 }
138
tryCombineCopy(MachineInstr & MI)139 bool CombinerHelper::tryCombineCopy(MachineInstr &MI) {
140 if (matchCombineCopy(MI)) {
141 applyCombineCopy(MI);
142 return true;
143 }
144 return false;
145 }
matchCombineCopy(MachineInstr & MI)146 bool CombinerHelper::matchCombineCopy(MachineInstr &MI) {
147 if (MI.getOpcode() != TargetOpcode::COPY)
148 return false;
149 Register DstReg = MI.getOperand(0).getReg();
150 Register SrcReg = MI.getOperand(1).getReg();
151 return canReplaceReg(DstReg, SrcReg, MRI);
152 }
applyCombineCopy(MachineInstr & MI)153 void CombinerHelper::applyCombineCopy(MachineInstr &MI) {
154 Register DstReg = MI.getOperand(0).getReg();
155 Register SrcReg = MI.getOperand(1).getReg();
156 MI.eraseFromParent();
157 replaceRegWith(MRI, DstReg, SrcReg);
158 }
159
tryCombineConcatVectors(MachineInstr & MI)160 bool CombinerHelper::tryCombineConcatVectors(MachineInstr &MI) {
161 bool IsUndef = false;
162 SmallVector<Register, 4> Ops;
163 if (matchCombineConcatVectors(MI, IsUndef, Ops)) {
164 applyCombineConcatVectors(MI, IsUndef, Ops);
165 return true;
166 }
167 return false;
168 }
169
matchCombineConcatVectors(MachineInstr & MI,bool & IsUndef,SmallVectorImpl<Register> & Ops)170 bool CombinerHelper::matchCombineConcatVectors(MachineInstr &MI, bool &IsUndef,
171 SmallVectorImpl<Register> &Ops) {
172 assert(MI.getOpcode() == TargetOpcode::G_CONCAT_VECTORS &&
173 "Invalid instruction");
174 IsUndef = true;
175 MachineInstr *Undef = nullptr;
176
177 // Walk over all the operands of concat vectors and check if they are
178 // build_vector themselves or undef.
179 // Then collect their operands in Ops.
180 for (const MachineOperand &MO : MI.uses()) {
181 Register Reg = MO.getReg();
182 MachineInstr *Def = MRI.getVRegDef(Reg);
183 assert(Def && "Operand not defined");
184 switch (Def->getOpcode()) {
185 case TargetOpcode::G_BUILD_VECTOR:
186 IsUndef = false;
187 // Remember the operands of the build_vector to fold
188 // them into the yet-to-build flattened concat vectors.
189 for (const MachineOperand &BuildVecMO : Def->uses())
190 Ops.push_back(BuildVecMO.getReg());
191 break;
192 case TargetOpcode::G_IMPLICIT_DEF: {
193 LLT OpType = MRI.getType(Reg);
194 // Keep one undef value for all the undef operands.
195 if (!Undef) {
196 Builder.setInsertPt(*MI.getParent(), MI);
197 Undef = Builder.buildUndef(OpType.getScalarType());
198 }
199 assert(MRI.getType(Undef->getOperand(0).getReg()) ==
200 OpType.getScalarType() &&
201 "All undefs should have the same type");
202 // Break the undef vector in as many scalar elements as needed
203 // for the flattening.
204 for (unsigned EltIdx = 0, EltEnd = OpType.getNumElements();
205 EltIdx != EltEnd; ++EltIdx)
206 Ops.push_back(Undef->getOperand(0).getReg());
207 break;
208 }
209 default:
210 return false;
211 }
212 }
213 return true;
214 }
applyCombineConcatVectors(MachineInstr & MI,bool IsUndef,const ArrayRef<Register> Ops)215 void CombinerHelper::applyCombineConcatVectors(
216 MachineInstr &MI, bool IsUndef, const ArrayRef<Register> Ops) {
217 // We determined that the concat_vectors can be flatten.
218 // Generate the flattened build_vector.
219 Register DstReg = MI.getOperand(0).getReg();
220 Builder.setInsertPt(*MI.getParent(), MI);
221 Register NewDstReg = MRI.cloneVirtualRegister(DstReg);
222
223 // Note: IsUndef is sort of redundant. We could have determine it by
224 // checking that at all Ops are undef. Alternatively, we could have
225 // generate a build_vector of undefs and rely on another combine to
226 // clean that up. For now, given we already gather this information
227 // in tryCombineConcatVectors, just save compile time and issue the
228 // right thing.
229 if (IsUndef)
230 Builder.buildUndef(NewDstReg);
231 else
232 Builder.buildBuildVector(NewDstReg, Ops);
233 MI.eraseFromParent();
234 replaceRegWith(MRI, DstReg, NewDstReg);
235 }
236
tryCombineShuffleVector(MachineInstr & MI)237 bool CombinerHelper::tryCombineShuffleVector(MachineInstr &MI) {
238 SmallVector<Register, 4> Ops;
239 if (matchCombineShuffleVector(MI, Ops)) {
240 applyCombineShuffleVector(MI, Ops);
241 return true;
242 }
243 return false;
244 }
245
matchCombineShuffleVector(MachineInstr & MI,SmallVectorImpl<Register> & Ops)246 bool CombinerHelper::matchCombineShuffleVector(MachineInstr &MI,
247 SmallVectorImpl<Register> &Ops) {
248 assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR &&
249 "Invalid instruction kind");
250 LLT DstType = MRI.getType(MI.getOperand(0).getReg());
251 Register Src1 = MI.getOperand(1).getReg();
252 LLT SrcType = MRI.getType(Src1);
253 // As bizarre as it may look, shuffle vector can actually produce
254 // scalar! This is because at the IR level a <1 x ty> shuffle
255 // vector is perfectly valid.
256 unsigned DstNumElts = DstType.isVector() ? DstType.getNumElements() : 1;
257 unsigned SrcNumElts = SrcType.isVector() ? SrcType.getNumElements() : 1;
258
259 // If the resulting vector is smaller than the size of the source
260 // vectors being concatenated, we won't be able to replace the
261 // shuffle vector into a concat_vectors.
262 //
263 // Note: We may still be able to produce a concat_vectors fed by
264 // extract_vector_elt and so on. It is less clear that would
265 // be better though, so don't bother for now.
266 //
267 // If the destination is a scalar, the size of the sources doesn't
268 // matter. we will lower the shuffle to a plain copy. This will
269 // work only if the source and destination have the same size. But
270 // that's covered by the next condition.
271 //
272 // TODO: If the size between the source and destination don't match
273 // we could still emit an extract vector element in that case.
274 if (DstNumElts < 2 * SrcNumElts && DstNumElts != 1)
275 return false;
276
277 // Check that the shuffle mask can be broken evenly between the
278 // different sources.
279 if (DstNumElts % SrcNumElts != 0)
280 return false;
281
282 // Mask length is a multiple of the source vector length.
283 // Check if the shuffle is some kind of concatenation of the input
284 // vectors.
285 unsigned NumConcat = DstNumElts / SrcNumElts;
286 SmallVector<int, 8> ConcatSrcs(NumConcat, -1);
287 ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
288 for (unsigned i = 0; i != DstNumElts; ++i) {
289 int Idx = Mask[i];
290 // Undef value.
291 if (Idx < 0)
292 continue;
293 // Ensure the indices in each SrcType sized piece are sequential and that
294 // the same source is used for the whole piece.
295 if ((Idx % SrcNumElts != (i % SrcNumElts)) ||
296 (ConcatSrcs[i / SrcNumElts] >= 0 &&
297 ConcatSrcs[i / SrcNumElts] != (int)(Idx / SrcNumElts)))
298 return false;
299 // Remember which source this index came from.
300 ConcatSrcs[i / SrcNumElts] = Idx / SrcNumElts;
301 }
302
303 // The shuffle is concatenating multiple vectors together.
304 // Collect the different operands for that.
305 Register UndefReg;
306 Register Src2 = MI.getOperand(2).getReg();
307 for (auto Src : ConcatSrcs) {
308 if (Src < 0) {
309 if (!UndefReg) {
310 Builder.setInsertPt(*MI.getParent(), MI);
311 UndefReg = Builder.buildUndef(SrcType).getReg(0);
312 }
313 Ops.push_back(UndefReg);
314 } else if (Src == 0)
315 Ops.push_back(Src1);
316 else
317 Ops.push_back(Src2);
318 }
319 return true;
320 }
321
applyCombineShuffleVector(MachineInstr & MI,const ArrayRef<Register> Ops)322 void CombinerHelper::applyCombineShuffleVector(MachineInstr &MI,
323 const ArrayRef<Register> Ops) {
324 Register DstReg = MI.getOperand(0).getReg();
325 Builder.setInsertPt(*MI.getParent(), MI);
326 Register NewDstReg = MRI.cloneVirtualRegister(DstReg);
327
328 if (Ops.size() == 1)
329 Builder.buildCopy(NewDstReg, Ops[0]);
330 else
331 Builder.buildMerge(NewDstReg, Ops);
332
333 MI.eraseFromParent();
334 replaceRegWith(MRI, DstReg, NewDstReg);
335 }
336
337 namespace {
338
339 /// Select a preference between two uses. CurrentUse is the current preference
340 /// while *ForCandidate is attributes of the candidate under consideration.
ChoosePreferredUse(PreferredTuple & CurrentUse,const LLT TyForCandidate,unsigned OpcodeForCandidate,MachineInstr * MIForCandidate)341 PreferredTuple ChoosePreferredUse(PreferredTuple &CurrentUse,
342 const LLT TyForCandidate,
343 unsigned OpcodeForCandidate,
344 MachineInstr *MIForCandidate) {
345 if (!CurrentUse.Ty.isValid()) {
346 if (CurrentUse.ExtendOpcode == OpcodeForCandidate ||
347 CurrentUse.ExtendOpcode == TargetOpcode::G_ANYEXT)
348 return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
349 return CurrentUse;
350 }
351
352 // We permit the extend to hoist through basic blocks but this is only
353 // sensible if the target has extending loads. If you end up lowering back
354 // into a load and extend during the legalizer then the end result is
355 // hoisting the extend up to the load.
356
357 // Prefer defined extensions to undefined extensions as these are more
358 // likely to reduce the number of instructions.
359 if (OpcodeForCandidate == TargetOpcode::G_ANYEXT &&
360 CurrentUse.ExtendOpcode != TargetOpcode::G_ANYEXT)
361 return CurrentUse;
362 else if (CurrentUse.ExtendOpcode == TargetOpcode::G_ANYEXT &&
363 OpcodeForCandidate != TargetOpcode::G_ANYEXT)
364 return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
365
366 // Prefer sign extensions to zero extensions as sign-extensions tend to be
367 // more expensive.
368 if (CurrentUse.Ty == TyForCandidate) {
369 if (CurrentUse.ExtendOpcode == TargetOpcode::G_SEXT &&
370 OpcodeForCandidate == TargetOpcode::G_ZEXT)
371 return CurrentUse;
372 else if (CurrentUse.ExtendOpcode == TargetOpcode::G_ZEXT &&
373 OpcodeForCandidate == TargetOpcode::G_SEXT)
374 return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
375 }
376
377 // This is potentially target specific. We've chosen the largest type
378 // because G_TRUNC is usually free. One potential catch with this is that
379 // some targets have a reduced number of larger registers than smaller
380 // registers and this choice potentially increases the live-range for the
381 // larger value.
382 if (TyForCandidate.getSizeInBits() > CurrentUse.Ty.getSizeInBits()) {
383 return {TyForCandidate, OpcodeForCandidate, MIForCandidate};
384 }
385 return CurrentUse;
386 }
387
388 /// Find a suitable place to insert some instructions and insert them. This
389 /// function accounts for special cases like inserting before a PHI node.
390 /// The current strategy for inserting before PHI's is to duplicate the
391 /// instructions for each predecessor. However, while that's ok for G_TRUNC
392 /// on most targets since it generally requires no code, other targets/cases may
393 /// want to try harder to find a dominating block.
InsertInsnsWithoutSideEffectsBeforeUse(MachineIRBuilder & Builder,MachineInstr & DefMI,MachineOperand & UseMO,std::function<void (MachineBasicBlock *,MachineBasicBlock::iterator,MachineOperand & UseMO)> Inserter)394 static void InsertInsnsWithoutSideEffectsBeforeUse(
395 MachineIRBuilder &Builder, MachineInstr &DefMI, MachineOperand &UseMO,
396 std::function<void(MachineBasicBlock *, MachineBasicBlock::iterator,
397 MachineOperand &UseMO)>
398 Inserter) {
399 MachineInstr &UseMI = *UseMO.getParent();
400
401 MachineBasicBlock *InsertBB = UseMI.getParent();
402
403 // If the use is a PHI then we want the predecessor block instead.
404 if (UseMI.isPHI()) {
405 MachineOperand *PredBB = std::next(&UseMO);
406 InsertBB = PredBB->getMBB();
407 }
408
409 // If the block is the same block as the def then we want to insert just after
410 // the def instead of at the start of the block.
411 if (InsertBB == DefMI.getParent()) {
412 MachineBasicBlock::iterator InsertPt = &DefMI;
413 Inserter(InsertBB, std::next(InsertPt), UseMO);
414 return;
415 }
416
417 // Otherwise we want the start of the BB
418 Inserter(InsertBB, InsertBB->getFirstNonPHI(), UseMO);
419 }
420 } // end anonymous namespace
421
tryCombineExtendingLoads(MachineInstr & MI)422 bool CombinerHelper::tryCombineExtendingLoads(MachineInstr &MI) {
423 PreferredTuple Preferred;
424 if (matchCombineExtendingLoads(MI, Preferred)) {
425 applyCombineExtendingLoads(MI, Preferred);
426 return true;
427 }
428 return false;
429 }
430
matchCombineExtendingLoads(MachineInstr & MI,PreferredTuple & Preferred)431 bool CombinerHelper::matchCombineExtendingLoads(MachineInstr &MI,
432 PreferredTuple &Preferred) {
433 // We match the loads and follow the uses to the extend instead of matching
434 // the extends and following the def to the load. This is because the load
435 // must remain in the same position for correctness (unless we also add code
436 // to find a safe place to sink it) whereas the extend is freely movable.
437 // It also prevents us from duplicating the load for the volatile case or just
438 // for performance.
439
440 if (MI.getOpcode() != TargetOpcode::G_LOAD &&
441 MI.getOpcode() != TargetOpcode::G_SEXTLOAD &&
442 MI.getOpcode() != TargetOpcode::G_ZEXTLOAD)
443 return false;
444
445 auto &LoadValue = MI.getOperand(0);
446 assert(LoadValue.isReg() && "Result wasn't a register?");
447
448 LLT LoadValueTy = MRI.getType(LoadValue.getReg());
449 if (!LoadValueTy.isScalar())
450 return false;
451
452 // Most architectures are going to legalize <s8 loads into at least a 1 byte
453 // load, and the MMOs can only describe memory accesses in multiples of bytes.
454 // If we try to perform extload combining on those, we can end up with
455 // %a(s8) = extload %ptr (load 1 byte from %ptr)
456 // ... which is an illegal extload instruction.
457 if (LoadValueTy.getSizeInBits() < 8)
458 return false;
459
460 // For non power-of-2 types, they will very likely be legalized into multiple
461 // loads. Don't bother trying to match them into extending loads.
462 if (!isPowerOf2_32(LoadValueTy.getSizeInBits()))
463 return false;
464
465 // Find the preferred type aside from the any-extends (unless it's the only
466 // one) and non-extending ops. We'll emit an extending load to that type and
467 // and emit a variant of (extend (trunc X)) for the others according to the
468 // relative type sizes. At the same time, pick an extend to use based on the
469 // extend involved in the chosen type.
470 unsigned PreferredOpcode = MI.getOpcode() == TargetOpcode::G_LOAD
471 ? TargetOpcode::G_ANYEXT
472 : MI.getOpcode() == TargetOpcode::G_SEXTLOAD
473 ? TargetOpcode::G_SEXT
474 : TargetOpcode::G_ZEXT;
475 Preferred = {LLT(), PreferredOpcode, nullptr};
476 for (auto &UseMI : MRI.use_nodbg_instructions(LoadValue.getReg())) {
477 if (UseMI.getOpcode() == TargetOpcode::G_SEXT ||
478 UseMI.getOpcode() == TargetOpcode::G_ZEXT ||
479 (UseMI.getOpcode() == TargetOpcode::G_ANYEXT)) {
480 // Check for legality.
481 if (LI) {
482 LegalityQuery::MemDesc MMDesc;
483 const auto &MMO = **MI.memoperands_begin();
484 MMDesc.SizeInBits = MMO.getSizeInBits();
485 MMDesc.AlignInBits = MMO.getAlign().value() * 8;
486 MMDesc.Ordering = MMO.getOrdering();
487 LLT UseTy = MRI.getType(UseMI.getOperand(0).getReg());
488 LLT SrcTy = MRI.getType(MI.getOperand(1).getReg());
489 if (LI->getAction({MI.getOpcode(), {UseTy, SrcTy}, {MMDesc}}).Action !=
490 LegalizeActions::Legal)
491 continue;
492 }
493 Preferred = ChoosePreferredUse(Preferred,
494 MRI.getType(UseMI.getOperand(0).getReg()),
495 UseMI.getOpcode(), &UseMI);
496 }
497 }
498
499 // There were no extends
500 if (!Preferred.MI)
501 return false;
502 // It should be impossible to chose an extend without selecting a different
503 // type since by definition the result of an extend is larger.
504 assert(Preferred.Ty != LoadValueTy && "Extending to same type?");
505
506 LLVM_DEBUG(dbgs() << "Preferred use is: " << *Preferred.MI);
507 return true;
508 }
509
applyCombineExtendingLoads(MachineInstr & MI,PreferredTuple & Preferred)510 void CombinerHelper::applyCombineExtendingLoads(MachineInstr &MI,
511 PreferredTuple &Preferred) {
512 // Rewrite the load to the chosen extending load.
513 Register ChosenDstReg = Preferred.MI->getOperand(0).getReg();
514
515 // Inserter to insert a truncate back to the original type at a given point
516 // with some basic CSE to limit truncate duplication to one per BB.
517 DenseMap<MachineBasicBlock *, MachineInstr *> EmittedInsns;
518 auto InsertTruncAt = [&](MachineBasicBlock *InsertIntoBB,
519 MachineBasicBlock::iterator InsertBefore,
520 MachineOperand &UseMO) {
521 MachineInstr *PreviouslyEmitted = EmittedInsns.lookup(InsertIntoBB);
522 if (PreviouslyEmitted) {
523 Observer.changingInstr(*UseMO.getParent());
524 UseMO.setReg(PreviouslyEmitted->getOperand(0).getReg());
525 Observer.changedInstr(*UseMO.getParent());
526 return;
527 }
528
529 Builder.setInsertPt(*InsertIntoBB, InsertBefore);
530 Register NewDstReg = MRI.cloneVirtualRegister(MI.getOperand(0).getReg());
531 MachineInstr *NewMI = Builder.buildTrunc(NewDstReg, ChosenDstReg);
532 EmittedInsns[InsertIntoBB] = NewMI;
533 replaceRegOpWith(MRI, UseMO, NewDstReg);
534 };
535
536 Observer.changingInstr(MI);
537 MI.setDesc(
538 Builder.getTII().get(Preferred.ExtendOpcode == TargetOpcode::G_SEXT
539 ? TargetOpcode::G_SEXTLOAD
540 : Preferred.ExtendOpcode == TargetOpcode::G_ZEXT
541 ? TargetOpcode::G_ZEXTLOAD
542 : TargetOpcode::G_LOAD));
543
544 // Rewrite all the uses to fix up the types.
545 auto &LoadValue = MI.getOperand(0);
546 SmallVector<MachineOperand *, 4> Uses;
547 for (auto &UseMO : MRI.use_operands(LoadValue.getReg()))
548 Uses.push_back(&UseMO);
549
550 for (auto *UseMO : Uses) {
551 MachineInstr *UseMI = UseMO->getParent();
552
553 // If the extend is compatible with the preferred extend then we should fix
554 // up the type and extend so that it uses the preferred use.
555 if (UseMI->getOpcode() == Preferred.ExtendOpcode ||
556 UseMI->getOpcode() == TargetOpcode::G_ANYEXT) {
557 Register UseDstReg = UseMI->getOperand(0).getReg();
558 MachineOperand &UseSrcMO = UseMI->getOperand(1);
559 const LLT UseDstTy = MRI.getType(UseDstReg);
560 if (UseDstReg != ChosenDstReg) {
561 if (Preferred.Ty == UseDstTy) {
562 // If the use has the same type as the preferred use, then merge
563 // the vregs and erase the extend. For example:
564 // %1:_(s8) = G_LOAD ...
565 // %2:_(s32) = G_SEXT %1(s8)
566 // %3:_(s32) = G_ANYEXT %1(s8)
567 // ... = ... %3(s32)
568 // rewrites to:
569 // %2:_(s32) = G_SEXTLOAD ...
570 // ... = ... %2(s32)
571 replaceRegWith(MRI, UseDstReg, ChosenDstReg);
572 Observer.erasingInstr(*UseMO->getParent());
573 UseMO->getParent()->eraseFromParent();
574 } else if (Preferred.Ty.getSizeInBits() < UseDstTy.getSizeInBits()) {
575 // If the preferred size is smaller, then keep the extend but extend
576 // from the result of the extending load. For example:
577 // %1:_(s8) = G_LOAD ...
578 // %2:_(s32) = G_SEXT %1(s8)
579 // %3:_(s64) = G_ANYEXT %1(s8)
580 // ... = ... %3(s64)
581 /// rewrites to:
582 // %2:_(s32) = G_SEXTLOAD ...
583 // %3:_(s64) = G_ANYEXT %2:_(s32)
584 // ... = ... %3(s64)
585 replaceRegOpWith(MRI, UseSrcMO, ChosenDstReg);
586 } else {
587 // If the preferred size is large, then insert a truncate. For
588 // example:
589 // %1:_(s8) = G_LOAD ...
590 // %2:_(s64) = G_SEXT %1(s8)
591 // %3:_(s32) = G_ZEXT %1(s8)
592 // ... = ... %3(s32)
593 /// rewrites to:
594 // %2:_(s64) = G_SEXTLOAD ...
595 // %4:_(s8) = G_TRUNC %2:_(s32)
596 // %3:_(s64) = G_ZEXT %2:_(s8)
597 // ... = ... %3(s64)
598 InsertInsnsWithoutSideEffectsBeforeUse(Builder, MI, *UseMO,
599 InsertTruncAt);
600 }
601 continue;
602 }
603 // The use is (one of) the uses of the preferred use we chose earlier.
604 // We're going to update the load to def this value later so just erase
605 // the old extend.
606 Observer.erasingInstr(*UseMO->getParent());
607 UseMO->getParent()->eraseFromParent();
608 continue;
609 }
610
611 // The use isn't an extend. Truncate back to the type we originally loaded.
612 // This is free on many targets.
613 InsertInsnsWithoutSideEffectsBeforeUse(Builder, MI, *UseMO, InsertTruncAt);
614 }
615
616 MI.getOperand(0).setReg(ChosenDstReg);
617 Observer.changedInstr(MI);
618 }
619
isPredecessor(const MachineInstr & DefMI,const MachineInstr & UseMI)620 bool CombinerHelper::isPredecessor(const MachineInstr &DefMI,
621 const MachineInstr &UseMI) {
622 assert(!DefMI.isDebugInstr() && !UseMI.isDebugInstr() &&
623 "shouldn't consider debug uses");
624 assert(DefMI.getParent() == UseMI.getParent());
625 if (&DefMI == &UseMI)
626 return false;
627 const MachineBasicBlock &MBB = *DefMI.getParent();
628 auto DefOrUse = find_if(MBB, [&DefMI, &UseMI](const MachineInstr &MI) {
629 return &MI == &DefMI || &MI == &UseMI;
630 });
631 if (DefOrUse == MBB.end())
632 llvm_unreachable("Block must contain both DefMI and UseMI!");
633 return &*DefOrUse == &DefMI;
634 }
635
dominates(const MachineInstr & DefMI,const MachineInstr & UseMI)636 bool CombinerHelper::dominates(const MachineInstr &DefMI,
637 const MachineInstr &UseMI) {
638 assert(!DefMI.isDebugInstr() && !UseMI.isDebugInstr() &&
639 "shouldn't consider debug uses");
640 if (MDT)
641 return MDT->dominates(&DefMI, &UseMI);
642 else if (DefMI.getParent() != UseMI.getParent())
643 return false;
644
645 return isPredecessor(DefMI, UseMI);
646 }
647
matchSextTruncSextLoad(MachineInstr & MI)648 bool CombinerHelper::matchSextTruncSextLoad(MachineInstr &MI) {
649 assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
650 Register SrcReg = MI.getOperand(1).getReg();
651 Register LoadUser = SrcReg;
652
653 if (MRI.getType(SrcReg).isVector())
654 return false;
655
656 Register TruncSrc;
657 if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc))))
658 LoadUser = TruncSrc;
659
660 uint64_t SizeInBits = MI.getOperand(2).getImm();
661 // If the source is a G_SEXTLOAD from the same bit width, then we don't
662 // need any extend at all, just a truncate.
663 if (auto *LoadMI = getOpcodeDef(TargetOpcode::G_SEXTLOAD, LoadUser, MRI)) {
664 const auto &MMO = **LoadMI->memoperands_begin();
665 // If truncating more than the original extended value, abort.
666 if (TruncSrc && MRI.getType(TruncSrc).getSizeInBits() < MMO.getSizeInBits())
667 return false;
668 if (MMO.getSizeInBits() == SizeInBits)
669 return true;
670 }
671 return false;
672 }
673
applySextTruncSextLoad(MachineInstr & MI)674 bool CombinerHelper::applySextTruncSextLoad(MachineInstr &MI) {
675 assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
676 Builder.setInstrAndDebugLoc(MI);
677 Builder.buildCopy(MI.getOperand(0).getReg(), MI.getOperand(1).getReg());
678 MI.eraseFromParent();
679 return true;
680 }
681
matchSextInRegOfLoad(MachineInstr & MI,std::tuple<Register,unsigned> & MatchInfo)682 bool CombinerHelper::matchSextInRegOfLoad(
683 MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo) {
684 assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
685
686 // Only supports scalars for now.
687 if (MRI.getType(MI.getOperand(0).getReg()).isVector())
688 return false;
689
690 Register SrcReg = MI.getOperand(1).getReg();
691 MachineInstr *LoadDef = getOpcodeDef(TargetOpcode::G_LOAD, SrcReg, MRI);
692 if (!LoadDef || !MRI.hasOneNonDBGUse(LoadDef->getOperand(0).getReg()))
693 return false;
694
695 // If the sign extend extends from a narrower width than the load's width,
696 // then we can narrow the load width when we combine to a G_SEXTLOAD.
697 auto &MMO = **LoadDef->memoperands_begin();
698 // Don't do this for non-simple loads.
699 if (MMO.isAtomic() || MMO.isVolatile())
700 return false;
701
702 // Avoid widening the load at all.
703 unsigned NewSizeBits =
704 std::min((uint64_t)MI.getOperand(2).getImm(), MMO.getSizeInBits());
705
706 // Don't generate G_SEXTLOADs with a < 1 byte width.
707 if (NewSizeBits < 8)
708 return false;
709 // Don't bother creating a non-power-2 sextload, it will likely be broken up
710 // anyway for most targets.
711 if (!isPowerOf2_32(NewSizeBits))
712 return false;
713 MatchInfo = std::make_tuple(LoadDef->getOperand(0).getReg(), NewSizeBits);
714 return true;
715 }
716
applySextInRegOfLoad(MachineInstr & MI,std::tuple<Register,unsigned> & MatchInfo)717 bool CombinerHelper::applySextInRegOfLoad(
718 MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo) {
719 assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG);
720 Register LoadReg;
721 unsigned ScalarSizeBits;
722 std::tie(LoadReg, ScalarSizeBits) = MatchInfo;
723 auto *LoadDef = MRI.getVRegDef(LoadReg);
724 assert(LoadDef && "Expected a load reg");
725
726 // If we have the following:
727 // %ld = G_LOAD %ptr, (load 2)
728 // %ext = G_SEXT_INREG %ld, 8
729 // ==>
730 // %ld = G_SEXTLOAD %ptr (load 1)
731
732 auto &MMO = **LoadDef->memoperands_begin();
733 Builder.setInstrAndDebugLoc(MI);
734 auto &MF = Builder.getMF();
735 auto PtrInfo = MMO.getPointerInfo();
736 auto *NewMMO = MF.getMachineMemOperand(&MMO, PtrInfo, ScalarSizeBits / 8);
737 Builder.buildLoadInstr(TargetOpcode::G_SEXTLOAD, MI.getOperand(0).getReg(),
738 LoadDef->getOperand(1).getReg(), *NewMMO);
739 MI.eraseFromParent();
740 return true;
741 }
742
findPostIndexCandidate(MachineInstr & MI,Register & Addr,Register & Base,Register & Offset)743 bool CombinerHelper::findPostIndexCandidate(MachineInstr &MI, Register &Addr,
744 Register &Base, Register &Offset) {
745 auto &MF = *MI.getParent()->getParent();
746 const auto &TLI = *MF.getSubtarget().getTargetLowering();
747
748 #ifndef NDEBUG
749 unsigned Opcode = MI.getOpcode();
750 assert(Opcode == TargetOpcode::G_LOAD || Opcode == TargetOpcode::G_SEXTLOAD ||
751 Opcode == TargetOpcode::G_ZEXTLOAD || Opcode == TargetOpcode::G_STORE);
752 #endif
753
754 Base = MI.getOperand(1).getReg();
755 MachineInstr *BaseDef = MRI.getUniqueVRegDef(Base);
756 if (BaseDef && BaseDef->getOpcode() == TargetOpcode::G_FRAME_INDEX)
757 return false;
758
759 LLVM_DEBUG(dbgs() << "Searching for post-indexing opportunity for: " << MI);
760 // FIXME: The following use traversal needs a bail out for patholigical cases.
761 for (auto &Use : MRI.use_nodbg_instructions(Base)) {
762 if (Use.getOpcode() != TargetOpcode::G_PTR_ADD)
763 continue;
764
765 Offset = Use.getOperand(2).getReg();
766 if (!ForceLegalIndexing &&
767 !TLI.isIndexingLegal(MI, Base, Offset, /*IsPre*/ false, MRI)) {
768 LLVM_DEBUG(dbgs() << " Ignoring candidate with illegal addrmode: "
769 << Use);
770 continue;
771 }
772
773 // Make sure the offset calculation is before the potentially indexed op.
774 // FIXME: we really care about dependency here. The offset calculation might
775 // be movable.
776 MachineInstr *OffsetDef = MRI.getUniqueVRegDef(Offset);
777 if (!OffsetDef || !dominates(*OffsetDef, MI)) {
778 LLVM_DEBUG(dbgs() << " Ignoring candidate with offset after mem-op: "
779 << Use);
780 continue;
781 }
782
783 // FIXME: check whether all uses of Base are load/store with foldable
784 // addressing modes. If so, using the normal addr-modes is better than
785 // forming an indexed one.
786
787 bool MemOpDominatesAddrUses = true;
788 for (auto &PtrAddUse :
789 MRI.use_nodbg_instructions(Use.getOperand(0).getReg())) {
790 if (!dominates(MI, PtrAddUse)) {
791 MemOpDominatesAddrUses = false;
792 break;
793 }
794 }
795
796 if (!MemOpDominatesAddrUses) {
797 LLVM_DEBUG(
798 dbgs() << " Ignoring candidate as memop does not dominate uses: "
799 << Use);
800 continue;
801 }
802
803 LLVM_DEBUG(dbgs() << " Found match: " << Use);
804 Addr = Use.getOperand(0).getReg();
805 return true;
806 }
807
808 return false;
809 }
810
findPreIndexCandidate(MachineInstr & MI,Register & Addr,Register & Base,Register & Offset)811 bool CombinerHelper::findPreIndexCandidate(MachineInstr &MI, Register &Addr,
812 Register &Base, Register &Offset) {
813 auto &MF = *MI.getParent()->getParent();
814 const auto &TLI = *MF.getSubtarget().getTargetLowering();
815
816 #ifndef NDEBUG
817 unsigned Opcode = MI.getOpcode();
818 assert(Opcode == TargetOpcode::G_LOAD || Opcode == TargetOpcode::G_SEXTLOAD ||
819 Opcode == TargetOpcode::G_ZEXTLOAD || Opcode == TargetOpcode::G_STORE);
820 #endif
821
822 Addr = MI.getOperand(1).getReg();
823 MachineInstr *AddrDef = getOpcodeDef(TargetOpcode::G_PTR_ADD, Addr, MRI);
824 if (!AddrDef || MRI.hasOneNonDBGUse(Addr))
825 return false;
826
827 Base = AddrDef->getOperand(1).getReg();
828 Offset = AddrDef->getOperand(2).getReg();
829
830 LLVM_DEBUG(dbgs() << "Found potential pre-indexed load_store: " << MI);
831
832 if (!ForceLegalIndexing &&
833 !TLI.isIndexingLegal(MI, Base, Offset, /*IsPre*/ true, MRI)) {
834 LLVM_DEBUG(dbgs() << " Skipping, not legal for target");
835 return false;
836 }
837
838 MachineInstr *BaseDef = getDefIgnoringCopies(Base, MRI);
839 if (BaseDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) {
840 LLVM_DEBUG(dbgs() << " Skipping, frame index would need copy anyway.");
841 return false;
842 }
843
844 if (MI.getOpcode() == TargetOpcode::G_STORE) {
845 // Would require a copy.
846 if (Base == MI.getOperand(0).getReg()) {
847 LLVM_DEBUG(dbgs() << " Skipping, storing base so need copy anyway.");
848 return false;
849 }
850
851 // We're expecting one use of Addr in MI, but it could also be the
852 // value stored, which isn't actually dominated by the instruction.
853 if (MI.getOperand(0).getReg() == Addr) {
854 LLVM_DEBUG(dbgs() << " Skipping, does not dominate all addr uses");
855 return false;
856 }
857 }
858
859 // FIXME: check whether all uses of the base pointer are constant PtrAdds.
860 // That might allow us to end base's liveness here by adjusting the constant.
861
862 for (auto &UseMI : MRI.use_nodbg_instructions(Addr)) {
863 if (!dominates(MI, UseMI)) {
864 LLVM_DEBUG(dbgs() << " Skipping, does not dominate all addr uses.");
865 return false;
866 }
867 }
868
869 return true;
870 }
871
tryCombineIndexedLoadStore(MachineInstr & MI)872 bool CombinerHelper::tryCombineIndexedLoadStore(MachineInstr &MI) {
873 IndexedLoadStoreMatchInfo MatchInfo;
874 if (matchCombineIndexedLoadStore(MI, MatchInfo)) {
875 applyCombineIndexedLoadStore(MI, MatchInfo);
876 return true;
877 }
878 return false;
879 }
880
matchCombineIndexedLoadStore(MachineInstr & MI,IndexedLoadStoreMatchInfo & MatchInfo)881 bool CombinerHelper::matchCombineIndexedLoadStore(MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo) {
882 unsigned Opcode = MI.getOpcode();
883 if (Opcode != TargetOpcode::G_LOAD && Opcode != TargetOpcode::G_SEXTLOAD &&
884 Opcode != TargetOpcode::G_ZEXTLOAD && Opcode != TargetOpcode::G_STORE)
885 return false;
886
887 // For now, no targets actually support these opcodes so don't waste time
888 // running these unless we're forced to for testing.
889 if (!ForceLegalIndexing)
890 return false;
891
892 MatchInfo.IsPre = findPreIndexCandidate(MI, MatchInfo.Addr, MatchInfo.Base,
893 MatchInfo.Offset);
894 if (!MatchInfo.IsPre &&
895 !findPostIndexCandidate(MI, MatchInfo.Addr, MatchInfo.Base,
896 MatchInfo.Offset))
897 return false;
898
899 return true;
900 }
901
applyCombineIndexedLoadStore(MachineInstr & MI,IndexedLoadStoreMatchInfo & MatchInfo)902 void CombinerHelper::applyCombineIndexedLoadStore(
903 MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo) {
904 MachineInstr &AddrDef = *MRI.getUniqueVRegDef(MatchInfo.Addr);
905 MachineIRBuilder MIRBuilder(MI);
906 unsigned Opcode = MI.getOpcode();
907 bool IsStore = Opcode == TargetOpcode::G_STORE;
908 unsigned NewOpcode;
909 switch (Opcode) {
910 case TargetOpcode::G_LOAD:
911 NewOpcode = TargetOpcode::G_INDEXED_LOAD;
912 break;
913 case TargetOpcode::G_SEXTLOAD:
914 NewOpcode = TargetOpcode::G_INDEXED_SEXTLOAD;
915 break;
916 case TargetOpcode::G_ZEXTLOAD:
917 NewOpcode = TargetOpcode::G_INDEXED_ZEXTLOAD;
918 break;
919 case TargetOpcode::G_STORE:
920 NewOpcode = TargetOpcode::G_INDEXED_STORE;
921 break;
922 default:
923 llvm_unreachable("Unknown load/store opcode");
924 }
925
926 auto MIB = MIRBuilder.buildInstr(NewOpcode);
927 if (IsStore) {
928 MIB.addDef(MatchInfo.Addr);
929 MIB.addUse(MI.getOperand(0).getReg());
930 } else {
931 MIB.addDef(MI.getOperand(0).getReg());
932 MIB.addDef(MatchInfo.Addr);
933 }
934
935 MIB.addUse(MatchInfo.Base);
936 MIB.addUse(MatchInfo.Offset);
937 MIB.addImm(MatchInfo.IsPre);
938 MI.eraseFromParent();
939 AddrDef.eraseFromParent();
940
941 LLVM_DEBUG(dbgs() << " Combinined to indexed operation");
942 }
943
matchOptBrCondByInvertingCond(MachineInstr & MI)944 bool CombinerHelper::matchOptBrCondByInvertingCond(MachineInstr &MI) {
945 if (MI.getOpcode() != TargetOpcode::G_BR)
946 return false;
947
948 // Try to match the following:
949 // bb1:
950 // G_BRCOND %c1, %bb2
951 // G_BR %bb3
952 // bb2:
953 // ...
954 // bb3:
955
956 // The above pattern does not have a fall through to the successor bb2, always
957 // resulting in a branch no matter which path is taken. Here we try to find
958 // and replace that pattern with conditional branch to bb3 and otherwise
959 // fallthrough to bb2. This is generally better for branch predictors.
960
961 MachineBasicBlock *MBB = MI.getParent();
962 MachineBasicBlock::iterator BrIt(MI);
963 if (BrIt == MBB->begin())
964 return false;
965 assert(std::next(BrIt) == MBB->end() && "expected G_BR to be a terminator");
966
967 MachineInstr *BrCond = &*std::prev(BrIt);
968 if (BrCond->getOpcode() != TargetOpcode::G_BRCOND)
969 return false;
970
971 // Check that the next block is the conditional branch target. Also make sure
972 // that it isn't the same as the G_BR's target (otherwise, this will loop.)
973 MachineBasicBlock *BrCondTarget = BrCond->getOperand(1).getMBB();
974 return BrCondTarget != MI.getOperand(0).getMBB() &&
975 MBB->isLayoutSuccessor(BrCondTarget);
976 }
977
applyOptBrCondByInvertingCond(MachineInstr & MI)978 void CombinerHelper::applyOptBrCondByInvertingCond(MachineInstr &MI) {
979 MachineBasicBlock *BrTarget = MI.getOperand(0).getMBB();
980 MachineBasicBlock::iterator BrIt(MI);
981 MachineInstr *BrCond = &*std::prev(BrIt);
982
983 Builder.setInstrAndDebugLoc(*BrCond);
984 LLT Ty = MRI.getType(BrCond->getOperand(0).getReg());
985 // FIXME: Does int/fp matter for this? If so, we might need to restrict
986 // this to i1 only since we might not know for sure what kind of
987 // compare generated the condition value.
988 auto True = Builder.buildConstant(
989 Ty, getICmpTrueVal(getTargetLowering(), false, false));
990 auto Xor = Builder.buildXor(Ty, BrCond->getOperand(0), True);
991
992 auto *FallthroughBB = BrCond->getOperand(1).getMBB();
993 Observer.changingInstr(MI);
994 MI.getOperand(0).setMBB(FallthroughBB);
995 Observer.changedInstr(MI);
996
997 // Change the conditional branch to use the inverted condition and
998 // new target block.
999 Observer.changingInstr(*BrCond);
1000 BrCond->getOperand(0).setReg(Xor.getReg(0));
1001 BrCond->getOperand(1).setMBB(BrTarget);
1002 Observer.changedInstr(*BrCond);
1003 }
1004
shouldLowerMemFuncForSize(const MachineFunction & MF)1005 static bool shouldLowerMemFuncForSize(const MachineFunction &MF) {
1006 // On Darwin, -Os means optimize for size without hurting performance, so
1007 // only really optimize for size when -Oz (MinSize) is used.
1008 if (MF.getTarget().getTargetTriple().isOSDarwin())
1009 return MF.getFunction().hasMinSize();
1010 return MF.getFunction().hasOptSize();
1011 }
1012
1013 // Returns a list of types to use for memory op lowering in MemOps. A partial
1014 // port of findOptimalMemOpLowering in TargetLowering.
findGISelOptimalMemOpLowering(std::vector<LLT> & MemOps,unsigned Limit,const MemOp & Op,unsigned DstAS,unsigned SrcAS,const AttributeList & FuncAttributes,const TargetLowering & TLI)1015 static bool findGISelOptimalMemOpLowering(std::vector<LLT> &MemOps,
1016 unsigned Limit, const MemOp &Op,
1017 unsigned DstAS, unsigned SrcAS,
1018 const AttributeList &FuncAttributes,
1019 const TargetLowering &TLI) {
1020 if (Op.isMemcpyWithFixedDstAlign() && Op.getSrcAlign() < Op.getDstAlign())
1021 return false;
1022
1023 LLT Ty = TLI.getOptimalMemOpLLT(Op, FuncAttributes);
1024
1025 if (Ty == LLT()) {
1026 // Use the largest scalar type whose alignment constraints are satisfied.
1027 // We only need to check DstAlign here as SrcAlign is always greater or
1028 // equal to DstAlign (or zero).
1029 Ty = LLT::scalar(64);
1030 if (Op.isFixedDstAlign())
1031 while (Op.getDstAlign() < Ty.getSizeInBytes() &&
1032 !TLI.allowsMisalignedMemoryAccesses(Ty, DstAS, Op.getDstAlign()))
1033 Ty = LLT::scalar(Ty.getSizeInBytes());
1034 assert(Ty.getSizeInBits() > 0 && "Could not find valid type");
1035 // FIXME: check for the largest legal type we can load/store to.
1036 }
1037
1038 unsigned NumMemOps = 0;
1039 uint64_t Size = Op.size();
1040 while (Size) {
1041 unsigned TySize = Ty.getSizeInBytes();
1042 while (TySize > Size) {
1043 // For now, only use non-vector load / store's for the left-over pieces.
1044 LLT NewTy = Ty;
1045 // FIXME: check for mem op safety and legality of the types. Not all of
1046 // SDAGisms map cleanly to GISel concepts.
1047 if (NewTy.isVector())
1048 NewTy = NewTy.getSizeInBits() > 64 ? LLT::scalar(64) : LLT::scalar(32);
1049 NewTy = LLT::scalar(PowerOf2Floor(NewTy.getSizeInBits() - 1));
1050 unsigned NewTySize = NewTy.getSizeInBytes();
1051 assert(NewTySize > 0 && "Could not find appropriate type");
1052
1053 // If the new LLT cannot cover all of the remaining bits, then consider
1054 // issuing a (or a pair of) unaligned and overlapping load / store.
1055 bool Fast;
1056 // Need to get a VT equivalent for allowMisalignedMemoryAccesses().
1057 MVT VT = getMVTForLLT(Ty);
1058 if (NumMemOps && Op.allowOverlap() && NewTySize < Size &&
1059 TLI.allowsMisalignedMemoryAccesses(
1060 VT, DstAS, Op.isFixedDstAlign() ? Op.getDstAlign().value() : 0,
1061 MachineMemOperand::MONone, &Fast) &&
1062 Fast)
1063 TySize = Size;
1064 else {
1065 Ty = NewTy;
1066 TySize = NewTySize;
1067 }
1068 }
1069
1070 if (++NumMemOps > Limit)
1071 return false;
1072
1073 MemOps.push_back(Ty);
1074 Size -= TySize;
1075 }
1076
1077 return true;
1078 }
1079
getTypeForLLT(LLT Ty,LLVMContext & C)1080 static Type *getTypeForLLT(LLT Ty, LLVMContext &C) {
1081 if (Ty.isVector())
1082 return FixedVectorType::get(IntegerType::get(C, Ty.getScalarSizeInBits()),
1083 Ty.getNumElements());
1084 return IntegerType::get(C, Ty.getSizeInBits());
1085 }
1086
1087 // Get a vectorized representation of the memset value operand, GISel edition.
getMemsetValue(Register Val,LLT Ty,MachineIRBuilder & MIB)1088 static Register getMemsetValue(Register Val, LLT Ty, MachineIRBuilder &MIB) {
1089 MachineRegisterInfo &MRI = *MIB.getMRI();
1090 unsigned NumBits = Ty.getScalarSizeInBits();
1091 auto ValVRegAndVal = getConstantVRegValWithLookThrough(Val, MRI);
1092 if (!Ty.isVector() && ValVRegAndVal) {
1093 APInt Scalar = ValVRegAndVal->Value.truncOrSelf(8);
1094 APInt SplatVal = APInt::getSplat(NumBits, Scalar);
1095 return MIB.buildConstant(Ty, SplatVal).getReg(0);
1096 }
1097
1098 // Extend the byte value to the larger type, and then multiply by a magic
1099 // value 0x010101... in order to replicate it across every byte.
1100 // Unless it's zero, in which case just emit a larger G_CONSTANT 0.
1101 if (ValVRegAndVal && ValVRegAndVal->Value == 0) {
1102 return MIB.buildConstant(Ty, 0).getReg(0);
1103 }
1104
1105 LLT ExtType = Ty.getScalarType();
1106 auto ZExt = MIB.buildZExtOrTrunc(ExtType, Val);
1107 if (NumBits > 8) {
1108 APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01));
1109 auto MagicMI = MIB.buildConstant(ExtType, Magic);
1110 Val = MIB.buildMul(ExtType, ZExt, MagicMI).getReg(0);
1111 }
1112
1113 // For vector types create a G_BUILD_VECTOR.
1114 if (Ty.isVector())
1115 Val = MIB.buildSplatVector(Ty, Val).getReg(0);
1116
1117 return Val;
1118 }
1119
optimizeMemset(MachineInstr & MI,Register Dst,Register Val,unsigned KnownLen,Align Alignment,bool IsVolatile)1120 bool CombinerHelper::optimizeMemset(MachineInstr &MI, Register Dst,
1121 Register Val, unsigned KnownLen,
1122 Align Alignment, bool IsVolatile) {
1123 auto &MF = *MI.getParent()->getParent();
1124 const auto &TLI = *MF.getSubtarget().getTargetLowering();
1125 auto &DL = MF.getDataLayout();
1126 LLVMContext &C = MF.getFunction().getContext();
1127
1128 assert(KnownLen != 0 && "Have a zero length memset length!");
1129
1130 bool DstAlignCanChange = false;
1131 MachineFrameInfo &MFI = MF.getFrameInfo();
1132 bool OptSize = shouldLowerMemFuncForSize(MF);
1133
1134 MachineInstr *FIDef = getOpcodeDef(TargetOpcode::G_FRAME_INDEX, Dst, MRI);
1135 if (FIDef && !MFI.isFixedObjectIndex(FIDef->getOperand(1).getIndex()))
1136 DstAlignCanChange = true;
1137
1138 unsigned Limit = TLI.getMaxStoresPerMemset(OptSize);
1139 std::vector<LLT> MemOps;
1140
1141 const auto &DstMMO = **MI.memoperands_begin();
1142 MachinePointerInfo DstPtrInfo = DstMMO.getPointerInfo();
1143
1144 auto ValVRegAndVal = getConstantVRegValWithLookThrough(Val, MRI);
1145 bool IsZeroVal = ValVRegAndVal && ValVRegAndVal->Value == 0;
1146
1147 if (!findGISelOptimalMemOpLowering(MemOps, Limit,
1148 MemOp::Set(KnownLen, DstAlignCanChange,
1149 Alignment,
1150 /*IsZeroMemset=*/IsZeroVal,
1151 /*IsVolatile=*/IsVolatile),
1152 DstPtrInfo.getAddrSpace(), ~0u,
1153 MF.getFunction().getAttributes(), TLI))
1154 return false;
1155
1156 if (DstAlignCanChange) {
1157 // Get an estimate of the type from the LLT.
1158 Type *IRTy = getTypeForLLT(MemOps[0], C);
1159 Align NewAlign = DL.getABITypeAlign(IRTy);
1160 if (NewAlign > Alignment) {
1161 Alignment = NewAlign;
1162 unsigned FI = FIDef->getOperand(1).getIndex();
1163 // Give the stack frame object a larger alignment if needed.
1164 if (MFI.getObjectAlign(FI) < Alignment)
1165 MFI.setObjectAlignment(FI, Alignment);
1166 }
1167 }
1168
1169 MachineIRBuilder MIB(MI);
1170 // Find the largest store and generate the bit pattern for it.
1171 LLT LargestTy = MemOps[0];
1172 for (unsigned i = 1; i < MemOps.size(); i++)
1173 if (MemOps[i].getSizeInBits() > LargestTy.getSizeInBits())
1174 LargestTy = MemOps[i];
1175
1176 // The memset stored value is always defined as an s8, so in order to make it
1177 // work with larger store types we need to repeat the bit pattern across the
1178 // wider type.
1179 Register MemSetValue = getMemsetValue(Val, LargestTy, MIB);
1180
1181 if (!MemSetValue)
1182 return false;
1183
1184 // Generate the stores. For each store type in the list, we generate the
1185 // matching store of that type to the destination address.
1186 LLT PtrTy = MRI.getType(Dst);
1187 unsigned DstOff = 0;
1188 unsigned Size = KnownLen;
1189 for (unsigned I = 0; I < MemOps.size(); I++) {
1190 LLT Ty = MemOps[I];
1191 unsigned TySize = Ty.getSizeInBytes();
1192 if (TySize > Size) {
1193 // Issuing an unaligned load / store pair that overlaps with the previous
1194 // pair. Adjust the offset accordingly.
1195 assert(I == MemOps.size() - 1 && I != 0);
1196 DstOff -= TySize - Size;
1197 }
1198
1199 // If this store is smaller than the largest store see whether we can get
1200 // the smaller value for free with a truncate.
1201 Register Value = MemSetValue;
1202 if (Ty.getSizeInBits() < LargestTy.getSizeInBits()) {
1203 MVT VT = getMVTForLLT(Ty);
1204 MVT LargestVT = getMVTForLLT(LargestTy);
1205 if (!LargestTy.isVector() && !Ty.isVector() &&
1206 TLI.isTruncateFree(LargestVT, VT))
1207 Value = MIB.buildTrunc(Ty, MemSetValue).getReg(0);
1208 else
1209 Value = getMemsetValue(Val, Ty, MIB);
1210 if (!Value)
1211 return false;
1212 }
1213
1214 auto *StoreMMO =
1215 MF.getMachineMemOperand(&DstMMO, DstOff, Ty.getSizeInBytes());
1216
1217 Register Ptr = Dst;
1218 if (DstOff != 0) {
1219 auto Offset =
1220 MIB.buildConstant(LLT::scalar(PtrTy.getSizeInBits()), DstOff);
1221 Ptr = MIB.buildPtrAdd(PtrTy, Dst, Offset).getReg(0);
1222 }
1223
1224 MIB.buildStore(Value, Ptr, *StoreMMO);
1225 DstOff += Ty.getSizeInBytes();
1226 Size -= TySize;
1227 }
1228
1229 MI.eraseFromParent();
1230 return true;
1231 }
1232
optimizeMemcpy(MachineInstr & MI,Register Dst,Register Src,unsigned KnownLen,Align DstAlign,Align SrcAlign,bool IsVolatile)1233 bool CombinerHelper::optimizeMemcpy(MachineInstr &MI, Register Dst,
1234 Register Src, unsigned KnownLen,
1235 Align DstAlign, Align SrcAlign,
1236 bool IsVolatile) {
1237 auto &MF = *MI.getParent()->getParent();
1238 const auto &TLI = *MF.getSubtarget().getTargetLowering();
1239 auto &DL = MF.getDataLayout();
1240 LLVMContext &C = MF.getFunction().getContext();
1241
1242 assert(KnownLen != 0 && "Have a zero length memcpy length!");
1243
1244 bool DstAlignCanChange = false;
1245 MachineFrameInfo &MFI = MF.getFrameInfo();
1246 bool OptSize = shouldLowerMemFuncForSize(MF);
1247 Align Alignment = commonAlignment(DstAlign, SrcAlign);
1248
1249 MachineInstr *FIDef = getOpcodeDef(TargetOpcode::G_FRAME_INDEX, Dst, MRI);
1250 if (FIDef && !MFI.isFixedObjectIndex(FIDef->getOperand(1).getIndex()))
1251 DstAlignCanChange = true;
1252
1253 // FIXME: infer better src pointer alignment like SelectionDAG does here.
1254 // FIXME: also use the equivalent of isMemSrcFromConstant and alwaysinlining
1255 // if the memcpy is in a tail call position.
1256
1257 unsigned Limit = TLI.getMaxStoresPerMemcpy(OptSize);
1258 std::vector<LLT> MemOps;
1259
1260 const auto &DstMMO = **MI.memoperands_begin();
1261 const auto &SrcMMO = **std::next(MI.memoperands_begin());
1262 MachinePointerInfo DstPtrInfo = DstMMO.getPointerInfo();
1263 MachinePointerInfo SrcPtrInfo = SrcMMO.getPointerInfo();
1264
1265 if (!findGISelOptimalMemOpLowering(
1266 MemOps, Limit,
1267 MemOp::Copy(KnownLen, DstAlignCanChange, Alignment, SrcAlign,
1268 IsVolatile),
1269 DstPtrInfo.getAddrSpace(), SrcPtrInfo.getAddrSpace(),
1270 MF.getFunction().getAttributes(), TLI))
1271 return false;
1272
1273 if (DstAlignCanChange) {
1274 // Get an estimate of the type from the LLT.
1275 Type *IRTy = getTypeForLLT(MemOps[0], C);
1276 Align NewAlign = DL.getABITypeAlign(IRTy);
1277
1278 // Don't promote to an alignment that would require dynamic stack
1279 // realignment.
1280 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
1281 if (!TRI->needsStackRealignment(MF))
1282 while (NewAlign > Alignment && DL.exceedsNaturalStackAlignment(NewAlign))
1283 NewAlign = NewAlign / 2;
1284
1285 if (NewAlign > Alignment) {
1286 Alignment = NewAlign;
1287 unsigned FI = FIDef->getOperand(1).getIndex();
1288 // Give the stack frame object a larger alignment if needed.
1289 if (MFI.getObjectAlign(FI) < Alignment)
1290 MFI.setObjectAlignment(FI, Alignment);
1291 }
1292 }
1293
1294 LLVM_DEBUG(dbgs() << "Inlining memcpy: " << MI << " into loads & stores\n");
1295
1296 MachineIRBuilder MIB(MI);
1297 // Now we need to emit a pair of load and stores for each of the types we've
1298 // collected. I.e. for each type, generate a load from the source pointer of
1299 // that type width, and then generate a corresponding store to the dest buffer
1300 // of that value loaded. This can result in a sequence of loads and stores
1301 // mixed types, depending on what the target specifies as good types to use.
1302 unsigned CurrOffset = 0;
1303 LLT PtrTy = MRI.getType(Src);
1304 unsigned Size = KnownLen;
1305 for (auto CopyTy : MemOps) {
1306 // Issuing an unaligned load / store pair that overlaps with the previous
1307 // pair. Adjust the offset accordingly.
1308 if (CopyTy.getSizeInBytes() > Size)
1309 CurrOffset -= CopyTy.getSizeInBytes() - Size;
1310
1311 // Construct MMOs for the accesses.
1312 auto *LoadMMO =
1313 MF.getMachineMemOperand(&SrcMMO, CurrOffset, CopyTy.getSizeInBytes());
1314 auto *StoreMMO =
1315 MF.getMachineMemOperand(&DstMMO, CurrOffset, CopyTy.getSizeInBytes());
1316
1317 // Create the load.
1318 Register LoadPtr = Src;
1319 Register Offset;
1320 if (CurrOffset != 0) {
1321 Offset = MIB.buildConstant(LLT::scalar(PtrTy.getSizeInBits()), CurrOffset)
1322 .getReg(0);
1323 LoadPtr = MIB.buildPtrAdd(PtrTy, Src, Offset).getReg(0);
1324 }
1325 auto LdVal = MIB.buildLoad(CopyTy, LoadPtr, *LoadMMO);
1326
1327 // Create the store.
1328 Register StorePtr =
1329 CurrOffset == 0 ? Dst : MIB.buildPtrAdd(PtrTy, Dst, Offset).getReg(0);
1330 MIB.buildStore(LdVal, StorePtr, *StoreMMO);
1331 CurrOffset += CopyTy.getSizeInBytes();
1332 Size -= CopyTy.getSizeInBytes();
1333 }
1334
1335 MI.eraseFromParent();
1336 return true;
1337 }
1338
optimizeMemmove(MachineInstr & MI,Register Dst,Register Src,unsigned KnownLen,Align DstAlign,Align SrcAlign,bool IsVolatile)1339 bool CombinerHelper::optimizeMemmove(MachineInstr &MI, Register Dst,
1340 Register Src, unsigned KnownLen,
1341 Align DstAlign, Align SrcAlign,
1342 bool IsVolatile) {
1343 auto &MF = *MI.getParent()->getParent();
1344 const auto &TLI = *MF.getSubtarget().getTargetLowering();
1345 auto &DL = MF.getDataLayout();
1346 LLVMContext &C = MF.getFunction().getContext();
1347
1348 assert(KnownLen != 0 && "Have a zero length memmove length!");
1349
1350 bool DstAlignCanChange = false;
1351 MachineFrameInfo &MFI = MF.getFrameInfo();
1352 bool OptSize = shouldLowerMemFuncForSize(MF);
1353 Align Alignment = commonAlignment(DstAlign, SrcAlign);
1354
1355 MachineInstr *FIDef = getOpcodeDef(TargetOpcode::G_FRAME_INDEX, Dst, MRI);
1356 if (FIDef && !MFI.isFixedObjectIndex(FIDef->getOperand(1).getIndex()))
1357 DstAlignCanChange = true;
1358
1359 unsigned Limit = TLI.getMaxStoresPerMemmove(OptSize);
1360 std::vector<LLT> MemOps;
1361
1362 const auto &DstMMO = **MI.memoperands_begin();
1363 const auto &SrcMMO = **std::next(MI.memoperands_begin());
1364 MachinePointerInfo DstPtrInfo = DstMMO.getPointerInfo();
1365 MachinePointerInfo SrcPtrInfo = SrcMMO.getPointerInfo();
1366
1367 // FIXME: SelectionDAG always passes false for 'AllowOverlap', apparently due
1368 // to a bug in it's findOptimalMemOpLowering implementation. For now do the
1369 // same thing here.
1370 if (!findGISelOptimalMemOpLowering(
1371 MemOps, Limit,
1372 MemOp::Copy(KnownLen, DstAlignCanChange, Alignment, SrcAlign,
1373 /*IsVolatile*/ true),
1374 DstPtrInfo.getAddrSpace(), SrcPtrInfo.getAddrSpace(),
1375 MF.getFunction().getAttributes(), TLI))
1376 return false;
1377
1378 if (DstAlignCanChange) {
1379 // Get an estimate of the type from the LLT.
1380 Type *IRTy = getTypeForLLT(MemOps[0], C);
1381 Align NewAlign = DL.getABITypeAlign(IRTy);
1382
1383 // Don't promote to an alignment that would require dynamic stack
1384 // realignment.
1385 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
1386 if (!TRI->needsStackRealignment(MF))
1387 while (NewAlign > Alignment && DL.exceedsNaturalStackAlignment(NewAlign))
1388 NewAlign = NewAlign / 2;
1389
1390 if (NewAlign > Alignment) {
1391 Alignment = NewAlign;
1392 unsigned FI = FIDef->getOperand(1).getIndex();
1393 // Give the stack frame object a larger alignment if needed.
1394 if (MFI.getObjectAlign(FI) < Alignment)
1395 MFI.setObjectAlignment(FI, Alignment);
1396 }
1397 }
1398
1399 LLVM_DEBUG(dbgs() << "Inlining memmove: " << MI << " into loads & stores\n");
1400
1401 MachineIRBuilder MIB(MI);
1402 // Memmove requires that we perform the loads first before issuing the stores.
1403 // Apart from that, this loop is pretty much doing the same thing as the
1404 // memcpy codegen function.
1405 unsigned CurrOffset = 0;
1406 LLT PtrTy = MRI.getType(Src);
1407 SmallVector<Register, 16> LoadVals;
1408 for (auto CopyTy : MemOps) {
1409 // Construct MMO for the load.
1410 auto *LoadMMO =
1411 MF.getMachineMemOperand(&SrcMMO, CurrOffset, CopyTy.getSizeInBytes());
1412
1413 // Create the load.
1414 Register LoadPtr = Src;
1415 if (CurrOffset != 0) {
1416 auto Offset =
1417 MIB.buildConstant(LLT::scalar(PtrTy.getSizeInBits()), CurrOffset);
1418 LoadPtr = MIB.buildPtrAdd(PtrTy, Src, Offset).getReg(0);
1419 }
1420 LoadVals.push_back(MIB.buildLoad(CopyTy, LoadPtr, *LoadMMO).getReg(0));
1421 CurrOffset += CopyTy.getSizeInBytes();
1422 }
1423
1424 CurrOffset = 0;
1425 for (unsigned I = 0; I < MemOps.size(); ++I) {
1426 LLT CopyTy = MemOps[I];
1427 // Now store the values loaded.
1428 auto *StoreMMO =
1429 MF.getMachineMemOperand(&DstMMO, CurrOffset, CopyTy.getSizeInBytes());
1430
1431 Register StorePtr = Dst;
1432 if (CurrOffset != 0) {
1433 auto Offset =
1434 MIB.buildConstant(LLT::scalar(PtrTy.getSizeInBits()), CurrOffset);
1435 StorePtr = MIB.buildPtrAdd(PtrTy, Dst, Offset).getReg(0);
1436 }
1437 MIB.buildStore(LoadVals[I], StorePtr, *StoreMMO);
1438 CurrOffset += CopyTy.getSizeInBytes();
1439 }
1440 MI.eraseFromParent();
1441 return true;
1442 }
1443
tryCombineMemCpyFamily(MachineInstr & MI,unsigned MaxLen)1444 bool CombinerHelper::tryCombineMemCpyFamily(MachineInstr &MI, unsigned MaxLen) {
1445 const unsigned Opc = MI.getOpcode();
1446 // This combine is fairly complex so it's not written with a separate
1447 // matcher function.
1448 assert((Opc == TargetOpcode::G_MEMCPY || Opc == TargetOpcode::G_MEMMOVE ||
1449 Opc == TargetOpcode::G_MEMSET) && "Expected memcpy like instruction");
1450
1451 auto MMOIt = MI.memoperands_begin();
1452 const MachineMemOperand *MemOp = *MMOIt;
1453 bool IsVolatile = MemOp->isVolatile();
1454 // Don't try to optimize volatile.
1455 if (IsVolatile)
1456 return false;
1457
1458 Align DstAlign = MemOp->getBaseAlign();
1459 Align SrcAlign;
1460 Register Dst = MI.getOperand(0).getReg();
1461 Register Src = MI.getOperand(1).getReg();
1462 Register Len = MI.getOperand(2).getReg();
1463
1464 if (Opc != TargetOpcode::G_MEMSET) {
1465 assert(MMOIt != MI.memoperands_end() && "Expected a second MMO on MI");
1466 MemOp = *(++MMOIt);
1467 SrcAlign = MemOp->getBaseAlign();
1468 }
1469
1470 // See if this is a constant length copy
1471 auto LenVRegAndVal = getConstantVRegValWithLookThrough(Len, MRI);
1472 if (!LenVRegAndVal)
1473 return false; // Leave it to the legalizer to lower it to a libcall.
1474 unsigned KnownLen = LenVRegAndVal->Value.getZExtValue();
1475
1476 if (KnownLen == 0) {
1477 MI.eraseFromParent();
1478 return true;
1479 }
1480
1481 if (MaxLen && KnownLen > MaxLen)
1482 return false;
1483
1484 if (Opc == TargetOpcode::G_MEMCPY)
1485 return optimizeMemcpy(MI, Dst, Src, KnownLen, DstAlign, SrcAlign, IsVolatile);
1486 if (Opc == TargetOpcode::G_MEMMOVE)
1487 return optimizeMemmove(MI, Dst, Src, KnownLen, DstAlign, SrcAlign, IsVolatile);
1488 if (Opc == TargetOpcode::G_MEMSET)
1489 return optimizeMemset(MI, Dst, Src, KnownLen, DstAlign, IsVolatile);
1490 return false;
1491 }
1492
constantFoldFpUnary(unsigned Opcode,LLT DstTy,const Register Op,const MachineRegisterInfo & MRI)1493 static Optional<APFloat> constantFoldFpUnary(unsigned Opcode, LLT DstTy,
1494 const Register Op,
1495 const MachineRegisterInfo &MRI) {
1496 const ConstantFP *MaybeCst = getConstantFPVRegVal(Op, MRI);
1497 if (!MaybeCst)
1498 return None;
1499
1500 APFloat V = MaybeCst->getValueAPF();
1501 switch (Opcode) {
1502 default:
1503 llvm_unreachable("Unexpected opcode!");
1504 case TargetOpcode::G_FNEG: {
1505 V.changeSign();
1506 return V;
1507 }
1508 case TargetOpcode::G_FABS: {
1509 V.clearSign();
1510 return V;
1511 }
1512 case TargetOpcode::G_FPTRUNC:
1513 break;
1514 case TargetOpcode::G_FSQRT: {
1515 bool Unused;
1516 V.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven, &Unused);
1517 V = APFloat(sqrt(V.convertToDouble()));
1518 break;
1519 }
1520 case TargetOpcode::G_FLOG2: {
1521 bool Unused;
1522 V.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven, &Unused);
1523 V = APFloat(log2(V.convertToDouble()));
1524 break;
1525 }
1526 }
1527 // Convert `APFloat` to appropriate IEEE type depending on `DstTy`. Otherwise,
1528 // `buildFConstant` will assert on size mismatch. Only `G_FPTRUNC`, `G_FSQRT`,
1529 // and `G_FLOG2` reach here.
1530 bool Unused;
1531 V.convert(getFltSemanticForLLT(DstTy), APFloat::rmNearestTiesToEven, &Unused);
1532 return V;
1533 }
1534
matchCombineConstantFoldFpUnary(MachineInstr & MI,Optional<APFloat> & Cst)1535 bool CombinerHelper::matchCombineConstantFoldFpUnary(MachineInstr &MI,
1536 Optional<APFloat> &Cst) {
1537 Register DstReg = MI.getOperand(0).getReg();
1538 Register SrcReg = MI.getOperand(1).getReg();
1539 LLT DstTy = MRI.getType(DstReg);
1540 Cst = constantFoldFpUnary(MI.getOpcode(), DstTy, SrcReg, MRI);
1541 return Cst.hasValue();
1542 }
1543
applyCombineConstantFoldFpUnary(MachineInstr & MI,Optional<APFloat> & Cst)1544 bool CombinerHelper::applyCombineConstantFoldFpUnary(MachineInstr &MI,
1545 Optional<APFloat> &Cst) {
1546 assert(Cst.hasValue() && "Optional is unexpectedly empty!");
1547 Builder.setInstrAndDebugLoc(MI);
1548 MachineFunction &MF = Builder.getMF();
1549 auto *FPVal = ConstantFP::get(MF.getFunction().getContext(), *Cst);
1550 Register DstReg = MI.getOperand(0).getReg();
1551 Builder.buildFConstant(DstReg, *FPVal);
1552 MI.eraseFromParent();
1553 return true;
1554 }
1555
matchPtrAddImmedChain(MachineInstr & MI,PtrAddChain & MatchInfo)1556 bool CombinerHelper::matchPtrAddImmedChain(MachineInstr &MI,
1557 PtrAddChain &MatchInfo) {
1558 // We're trying to match the following pattern:
1559 // %t1 = G_PTR_ADD %base, G_CONSTANT imm1
1560 // %root = G_PTR_ADD %t1, G_CONSTANT imm2
1561 // -->
1562 // %root = G_PTR_ADD %base, G_CONSTANT (imm1 + imm2)
1563
1564 if (MI.getOpcode() != TargetOpcode::G_PTR_ADD)
1565 return false;
1566
1567 Register Add2 = MI.getOperand(1).getReg();
1568 Register Imm1 = MI.getOperand(2).getReg();
1569 auto MaybeImmVal = getConstantVRegValWithLookThrough(Imm1, MRI);
1570 if (!MaybeImmVal)
1571 return false;
1572
1573 MachineInstr *Add2Def = MRI.getUniqueVRegDef(Add2);
1574 if (!Add2Def || Add2Def->getOpcode() != TargetOpcode::G_PTR_ADD)
1575 return false;
1576
1577 Register Base = Add2Def->getOperand(1).getReg();
1578 Register Imm2 = Add2Def->getOperand(2).getReg();
1579 auto MaybeImm2Val = getConstantVRegValWithLookThrough(Imm2, MRI);
1580 if (!MaybeImm2Val)
1581 return false;
1582
1583 // Pass the combined immediate to the apply function.
1584 MatchInfo.Imm = (MaybeImmVal->Value + MaybeImm2Val->Value).getSExtValue();
1585 MatchInfo.Base = Base;
1586 return true;
1587 }
1588
applyPtrAddImmedChain(MachineInstr & MI,PtrAddChain & MatchInfo)1589 bool CombinerHelper::applyPtrAddImmedChain(MachineInstr &MI,
1590 PtrAddChain &MatchInfo) {
1591 assert(MI.getOpcode() == TargetOpcode::G_PTR_ADD && "Expected G_PTR_ADD");
1592 MachineIRBuilder MIB(MI);
1593 LLT OffsetTy = MRI.getType(MI.getOperand(2).getReg());
1594 auto NewOffset = MIB.buildConstant(OffsetTy, MatchInfo.Imm);
1595 Observer.changingInstr(MI);
1596 MI.getOperand(1).setReg(MatchInfo.Base);
1597 MI.getOperand(2).setReg(NewOffset.getReg(0));
1598 Observer.changedInstr(MI);
1599 return true;
1600 }
1601
matchShiftImmedChain(MachineInstr & MI,RegisterImmPair & MatchInfo)1602 bool CombinerHelper::matchShiftImmedChain(MachineInstr &MI,
1603 RegisterImmPair &MatchInfo) {
1604 // We're trying to match the following pattern with any of
1605 // G_SHL/G_ASHR/G_LSHR/G_SSHLSAT/G_USHLSAT shift instructions:
1606 // %t1 = SHIFT %base, G_CONSTANT imm1
1607 // %root = SHIFT %t1, G_CONSTANT imm2
1608 // -->
1609 // %root = SHIFT %base, G_CONSTANT (imm1 + imm2)
1610
1611 unsigned Opcode = MI.getOpcode();
1612 assert((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR ||
1613 Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_SSHLSAT ||
1614 Opcode == TargetOpcode::G_USHLSAT) &&
1615 "Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT");
1616
1617 Register Shl2 = MI.getOperand(1).getReg();
1618 Register Imm1 = MI.getOperand(2).getReg();
1619 auto MaybeImmVal = getConstantVRegValWithLookThrough(Imm1, MRI);
1620 if (!MaybeImmVal)
1621 return false;
1622
1623 MachineInstr *Shl2Def = MRI.getUniqueVRegDef(Shl2);
1624 if (Shl2Def->getOpcode() != Opcode)
1625 return false;
1626
1627 Register Base = Shl2Def->getOperand(1).getReg();
1628 Register Imm2 = Shl2Def->getOperand(2).getReg();
1629 auto MaybeImm2Val = getConstantVRegValWithLookThrough(Imm2, MRI);
1630 if (!MaybeImm2Val)
1631 return false;
1632
1633 // Pass the combined immediate to the apply function.
1634 MatchInfo.Imm =
1635 (MaybeImmVal->Value.getSExtValue() + MaybeImm2Val->Value).getSExtValue();
1636 MatchInfo.Reg = Base;
1637
1638 // There is no simple replacement for a saturating unsigned left shift that
1639 // exceeds the scalar size.
1640 if (Opcode == TargetOpcode::G_USHLSAT &&
1641 MatchInfo.Imm >= MRI.getType(Shl2).getScalarSizeInBits())
1642 return false;
1643
1644 return true;
1645 }
1646
applyShiftImmedChain(MachineInstr & MI,RegisterImmPair & MatchInfo)1647 bool CombinerHelper::applyShiftImmedChain(MachineInstr &MI,
1648 RegisterImmPair &MatchInfo) {
1649 unsigned Opcode = MI.getOpcode();
1650 assert((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR ||
1651 Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_SSHLSAT ||
1652 Opcode == TargetOpcode::G_USHLSAT) &&
1653 "Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT");
1654
1655 Builder.setInstrAndDebugLoc(MI);
1656 LLT Ty = MRI.getType(MI.getOperand(1).getReg());
1657 unsigned const ScalarSizeInBits = Ty.getScalarSizeInBits();
1658 auto Imm = MatchInfo.Imm;
1659
1660 if (Imm >= ScalarSizeInBits) {
1661 // Any logical shift that exceeds scalar size will produce zero.
1662 if (Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_LSHR) {
1663 Builder.buildConstant(MI.getOperand(0), 0);
1664 MI.eraseFromParent();
1665 return true;
1666 }
1667 // Arithmetic shift and saturating signed left shift have no effect beyond
1668 // scalar size.
1669 Imm = ScalarSizeInBits - 1;
1670 }
1671
1672 LLT ImmTy = MRI.getType(MI.getOperand(2).getReg());
1673 Register NewImm = Builder.buildConstant(ImmTy, Imm).getReg(0);
1674 Observer.changingInstr(MI);
1675 MI.getOperand(1).setReg(MatchInfo.Reg);
1676 MI.getOperand(2).setReg(NewImm);
1677 Observer.changedInstr(MI);
1678 return true;
1679 }
1680
matchShiftOfShiftedLogic(MachineInstr & MI,ShiftOfShiftedLogic & MatchInfo)1681 bool CombinerHelper::matchShiftOfShiftedLogic(MachineInstr &MI,
1682 ShiftOfShiftedLogic &MatchInfo) {
1683 // We're trying to match the following pattern with any of
1684 // G_SHL/G_ASHR/G_LSHR/G_USHLSAT/G_SSHLSAT shift instructions in combination
1685 // with any of G_AND/G_OR/G_XOR logic instructions.
1686 // %t1 = SHIFT %X, G_CONSTANT C0
1687 // %t2 = LOGIC %t1, %Y
1688 // %root = SHIFT %t2, G_CONSTANT C1
1689 // -->
1690 // %t3 = SHIFT %X, G_CONSTANT (C0+C1)
1691 // %t4 = SHIFT %Y, G_CONSTANT C1
1692 // %root = LOGIC %t3, %t4
1693 unsigned ShiftOpcode = MI.getOpcode();
1694 assert((ShiftOpcode == TargetOpcode::G_SHL ||
1695 ShiftOpcode == TargetOpcode::G_ASHR ||
1696 ShiftOpcode == TargetOpcode::G_LSHR ||
1697 ShiftOpcode == TargetOpcode::G_USHLSAT ||
1698 ShiftOpcode == TargetOpcode::G_SSHLSAT) &&
1699 "Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT");
1700
1701 // Match a one-use bitwise logic op.
1702 Register LogicDest = MI.getOperand(1).getReg();
1703 if (!MRI.hasOneNonDBGUse(LogicDest))
1704 return false;
1705
1706 MachineInstr *LogicMI = MRI.getUniqueVRegDef(LogicDest);
1707 unsigned LogicOpcode = LogicMI->getOpcode();
1708 if (LogicOpcode != TargetOpcode::G_AND && LogicOpcode != TargetOpcode::G_OR &&
1709 LogicOpcode != TargetOpcode::G_XOR)
1710 return false;
1711
1712 // Find a matching one-use shift by constant.
1713 const Register C1 = MI.getOperand(2).getReg();
1714 auto MaybeImmVal = getConstantVRegValWithLookThrough(C1, MRI);
1715 if (!MaybeImmVal)
1716 return false;
1717
1718 const uint64_t C1Val = MaybeImmVal->Value.getZExtValue();
1719
1720 auto matchFirstShift = [&](const MachineInstr *MI, uint64_t &ShiftVal) {
1721 // Shift should match previous one and should be a one-use.
1722 if (MI->getOpcode() != ShiftOpcode ||
1723 !MRI.hasOneNonDBGUse(MI->getOperand(0).getReg()))
1724 return false;
1725
1726 // Must be a constant.
1727 auto MaybeImmVal =
1728 getConstantVRegValWithLookThrough(MI->getOperand(2).getReg(), MRI);
1729 if (!MaybeImmVal)
1730 return false;
1731
1732 ShiftVal = MaybeImmVal->Value.getSExtValue();
1733 return true;
1734 };
1735
1736 // Logic ops are commutative, so check each operand for a match.
1737 Register LogicMIReg1 = LogicMI->getOperand(1).getReg();
1738 MachineInstr *LogicMIOp1 = MRI.getUniqueVRegDef(LogicMIReg1);
1739 Register LogicMIReg2 = LogicMI->getOperand(2).getReg();
1740 MachineInstr *LogicMIOp2 = MRI.getUniqueVRegDef(LogicMIReg2);
1741 uint64_t C0Val;
1742
1743 if (matchFirstShift(LogicMIOp1, C0Val)) {
1744 MatchInfo.LogicNonShiftReg = LogicMIReg2;
1745 MatchInfo.Shift2 = LogicMIOp1;
1746 } else if (matchFirstShift(LogicMIOp2, C0Val)) {
1747 MatchInfo.LogicNonShiftReg = LogicMIReg1;
1748 MatchInfo.Shift2 = LogicMIOp2;
1749 } else
1750 return false;
1751
1752 MatchInfo.ValSum = C0Val + C1Val;
1753
1754 // The fold is not valid if the sum of the shift values exceeds bitwidth.
1755 if (MatchInfo.ValSum >= MRI.getType(LogicDest).getScalarSizeInBits())
1756 return false;
1757
1758 MatchInfo.Logic = LogicMI;
1759 return true;
1760 }
1761
applyShiftOfShiftedLogic(MachineInstr & MI,ShiftOfShiftedLogic & MatchInfo)1762 bool CombinerHelper::applyShiftOfShiftedLogic(MachineInstr &MI,
1763 ShiftOfShiftedLogic &MatchInfo) {
1764 unsigned Opcode = MI.getOpcode();
1765 assert((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR ||
1766 Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_USHLSAT ||
1767 Opcode == TargetOpcode::G_SSHLSAT) &&
1768 "Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT");
1769
1770 LLT ShlType = MRI.getType(MI.getOperand(2).getReg());
1771 LLT DestType = MRI.getType(MI.getOperand(0).getReg());
1772 Builder.setInstrAndDebugLoc(MI);
1773
1774 Register Const = Builder.buildConstant(ShlType, MatchInfo.ValSum).getReg(0);
1775
1776 Register Shift1Base = MatchInfo.Shift2->getOperand(1).getReg();
1777 Register Shift1 =
1778 Builder.buildInstr(Opcode, {DestType}, {Shift1Base, Const}).getReg(0);
1779
1780 Register Shift2Const = MI.getOperand(2).getReg();
1781 Register Shift2 = Builder
1782 .buildInstr(Opcode, {DestType},
1783 {MatchInfo.LogicNonShiftReg, Shift2Const})
1784 .getReg(0);
1785
1786 Register Dest = MI.getOperand(0).getReg();
1787 Builder.buildInstr(MatchInfo.Logic->getOpcode(), {Dest}, {Shift1, Shift2});
1788
1789 // These were one use so it's safe to remove them.
1790 MatchInfo.Shift2->eraseFromParent();
1791 MatchInfo.Logic->eraseFromParent();
1792
1793 MI.eraseFromParent();
1794 return true;
1795 }
1796
matchCombineMulToShl(MachineInstr & MI,unsigned & ShiftVal)1797 bool CombinerHelper::matchCombineMulToShl(MachineInstr &MI,
1798 unsigned &ShiftVal) {
1799 assert(MI.getOpcode() == TargetOpcode::G_MUL && "Expected a G_MUL");
1800 auto MaybeImmVal =
1801 getConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI);
1802 if (!MaybeImmVal)
1803 return false;
1804
1805 ShiftVal = MaybeImmVal->Value.exactLogBase2();
1806 return (static_cast<int32_t>(ShiftVal) != -1);
1807 }
1808
applyCombineMulToShl(MachineInstr & MI,unsigned & ShiftVal)1809 bool CombinerHelper::applyCombineMulToShl(MachineInstr &MI,
1810 unsigned &ShiftVal) {
1811 assert(MI.getOpcode() == TargetOpcode::G_MUL && "Expected a G_MUL");
1812 MachineIRBuilder MIB(MI);
1813 LLT ShiftTy = MRI.getType(MI.getOperand(0).getReg());
1814 auto ShiftCst = MIB.buildConstant(ShiftTy, ShiftVal);
1815 Observer.changingInstr(MI);
1816 MI.setDesc(MIB.getTII().get(TargetOpcode::G_SHL));
1817 MI.getOperand(2).setReg(ShiftCst.getReg(0));
1818 Observer.changedInstr(MI);
1819 return true;
1820 }
1821
1822 // shl ([sza]ext x), y => zext (shl x, y), if shift does not overflow source
matchCombineShlOfExtend(MachineInstr & MI,RegisterImmPair & MatchData)1823 bool CombinerHelper::matchCombineShlOfExtend(MachineInstr &MI,
1824 RegisterImmPair &MatchData) {
1825 assert(MI.getOpcode() == TargetOpcode::G_SHL && KB);
1826
1827 Register LHS = MI.getOperand(1).getReg();
1828
1829 Register ExtSrc;
1830 if (!mi_match(LHS, MRI, m_GAnyExt(m_Reg(ExtSrc))) &&
1831 !mi_match(LHS, MRI, m_GZExt(m_Reg(ExtSrc))) &&
1832 !mi_match(LHS, MRI, m_GSExt(m_Reg(ExtSrc))))
1833 return false;
1834
1835 // TODO: Should handle vector splat.
1836 Register RHS = MI.getOperand(2).getReg();
1837 auto MaybeShiftAmtVal = getConstantVRegValWithLookThrough(RHS, MRI);
1838 if (!MaybeShiftAmtVal)
1839 return false;
1840
1841 if (LI) {
1842 LLT SrcTy = MRI.getType(ExtSrc);
1843
1844 // We only really care about the legality with the shifted value. We can
1845 // pick any type the constant shift amount, so ask the target what to
1846 // use. Otherwise we would have to guess and hope it is reported as legal.
1847 LLT ShiftAmtTy = getTargetLowering().getPreferredShiftAmountTy(SrcTy);
1848 if (!isLegalOrBeforeLegalizer({TargetOpcode::G_SHL, {SrcTy, ShiftAmtTy}}))
1849 return false;
1850 }
1851
1852 int64_t ShiftAmt = MaybeShiftAmtVal->Value.getSExtValue();
1853 MatchData.Reg = ExtSrc;
1854 MatchData.Imm = ShiftAmt;
1855
1856 unsigned MinLeadingZeros = KB->getKnownZeroes(ExtSrc).countLeadingOnes();
1857 return MinLeadingZeros >= ShiftAmt;
1858 }
1859
applyCombineShlOfExtend(MachineInstr & MI,const RegisterImmPair & MatchData)1860 bool CombinerHelper::applyCombineShlOfExtend(MachineInstr &MI,
1861 const RegisterImmPair &MatchData) {
1862 Register ExtSrcReg = MatchData.Reg;
1863 int64_t ShiftAmtVal = MatchData.Imm;
1864
1865 LLT ExtSrcTy = MRI.getType(ExtSrcReg);
1866 Builder.setInstrAndDebugLoc(MI);
1867 auto ShiftAmt = Builder.buildConstant(ExtSrcTy, ShiftAmtVal);
1868 auto NarrowShift =
1869 Builder.buildShl(ExtSrcTy, ExtSrcReg, ShiftAmt, MI.getFlags());
1870 Builder.buildZExt(MI.getOperand(0), NarrowShift);
1871 MI.eraseFromParent();
1872 return true;
1873 }
1874
peekThroughBitcast(Register Reg,const MachineRegisterInfo & MRI)1875 static Register peekThroughBitcast(Register Reg,
1876 const MachineRegisterInfo &MRI) {
1877 while (mi_match(Reg, MRI, m_GBitcast(m_Reg(Reg))))
1878 ;
1879
1880 return Reg;
1881 }
1882
matchCombineUnmergeMergeToPlainValues(MachineInstr & MI,SmallVectorImpl<Register> & Operands)1883 bool CombinerHelper::matchCombineUnmergeMergeToPlainValues(
1884 MachineInstr &MI, SmallVectorImpl<Register> &Operands) {
1885 assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
1886 "Expected an unmerge");
1887 Register SrcReg =
1888 peekThroughBitcast(MI.getOperand(MI.getNumOperands() - 1).getReg(), MRI);
1889
1890 MachineInstr *SrcInstr = MRI.getVRegDef(SrcReg);
1891 if (SrcInstr->getOpcode() != TargetOpcode::G_MERGE_VALUES &&
1892 SrcInstr->getOpcode() != TargetOpcode::G_BUILD_VECTOR &&
1893 SrcInstr->getOpcode() != TargetOpcode::G_CONCAT_VECTORS)
1894 return false;
1895
1896 // Check the source type of the merge.
1897 LLT SrcMergeTy = MRI.getType(SrcInstr->getOperand(1).getReg());
1898 LLT Dst0Ty = MRI.getType(MI.getOperand(0).getReg());
1899 bool SameSize = Dst0Ty.getSizeInBits() == SrcMergeTy.getSizeInBits();
1900 if (SrcMergeTy != Dst0Ty && !SameSize)
1901 return false;
1902 // They are the same now (modulo a bitcast).
1903 // We can collect all the src registers.
1904 for (unsigned Idx = 1, EndIdx = SrcInstr->getNumOperands(); Idx != EndIdx;
1905 ++Idx)
1906 Operands.push_back(SrcInstr->getOperand(Idx).getReg());
1907 return true;
1908 }
1909
applyCombineUnmergeMergeToPlainValues(MachineInstr & MI,SmallVectorImpl<Register> & Operands)1910 bool CombinerHelper::applyCombineUnmergeMergeToPlainValues(
1911 MachineInstr &MI, SmallVectorImpl<Register> &Operands) {
1912 assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
1913 "Expected an unmerge");
1914 assert((MI.getNumOperands() - 1 == Operands.size()) &&
1915 "Not enough operands to replace all defs");
1916 unsigned NumElems = MI.getNumOperands() - 1;
1917
1918 LLT SrcTy = MRI.getType(Operands[0]);
1919 LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
1920 bool CanReuseInputDirectly = DstTy == SrcTy;
1921 Builder.setInstrAndDebugLoc(MI);
1922 for (unsigned Idx = 0; Idx < NumElems; ++Idx) {
1923 Register DstReg = MI.getOperand(Idx).getReg();
1924 Register SrcReg = Operands[Idx];
1925 if (CanReuseInputDirectly)
1926 replaceRegWith(MRI, DstReg, SrcReg);
1927 else
1928 Builder.buildCast(DstReg, SrcReg);
1929 }
1930 MI.eraseFromParent();
1931 return true;
1932 }
1933
matchCombineUnmergeConstant(MachineInstr & MI,SmallVectorImpl<APInt> & Csts)1934 bool CombinerHelper::matchCombineUnmergeConstant(MachineInstr &MI,
1935 SmallVectorImpl<APInt> &Csts) {
1936 unsigned SrcIdx = MI.getNumOperands() - 1;
1937 Register SrcReg = MI.getOperand(SrcIdx).getReg();
1938 MachineInstr *SrcInstr = MRI.getVRegDef(SrcReg);
1939 if (SrcInstr->getOpcode() != TargetOpcode::G_CONSTANT &&
1940 SrcInstr->getOpcode() != TargetOpcode::G_FCONSTANT)
1941 return false;
1942 // Break down the big constant in smaller ones.
1943 const MachineOperand &CstVal = SrcInstr->getOperand(1);
1944 APInt Val = SrcInstr->getOpcode() == TargetOpcode::G_CONSTANT
1945 ? CstVal.getCImm()->getValue()
1946 : CstVal.getFPImm()->getValueAPF().bitcastToAPInt();
1947
1948 LLT Dst0Ty = MRI.getType(MI.getOperand(0).getReg());
1949 unsigned ShiftAmt = Dst0Ty.getSizeInBits();
1950 // Unmerge a constant.
1951 for (unsigned Idx = 0; Idx != SrcIdx; ++Idx) {
1952 Csts.emplace_back(Val.trunc(ShiftAmt));
1953 Val = Val.lshr(ShiftAmt);
1954 }
1955
1956 return true;
1957 }
1958
applyCombineUnmergeConstant(MachineInstr & MI,SmallVectorImpl<APInt> & Csts)1959 bool CombinerHelper::applyCombineUnmergeConstant(MachineInstr &MI,
1960 SmallVectorImpl<APInt> &Csts) {
1961 assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
1962 "Expected an unmerge");
1963 assert((MI.getNumOperands() - 1 == Csts.size()) &&
1964 "Not enough operands to replace all defs");
1965 unsigned NumElems = MI.getNumOperands() - 1;
1966 Builder.setInstrAndDebugLoc(MI);
1967 for (unsigned Idx = 0; Idx < NumElems; ++Idx) {
1968 Register DstReg = MI.getOperand(Idx).getReg();
1969 Builder.buildConstant(DstReg, Csts[Idx]);
1970 }
1971
1972 MI.eraseFromParent();
1973 return true;
1974 }
1975
matchCombineUnmergeWithDeadLanesToTrunc(MachineInstr & MI)1976 bool CombinerHelper::matchCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI) {
1977 assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
1978 "Expected an unmerge");
1979 // Check that all the lanes are dead except the first one.
1980 for (unsigned Idx = 1, EndIdx = MI.getNumDefs(); Idx != EndIdx; ++Idx) {
1981 if (!MRI.use_nodbg_empty(MI.getOperand(Idx).getReg()))
1982 return false;
1983 }
1984 return true;
1985 }
1986
applyCombineUnmergeWithDeadLanesToTrunc(MachineInstr & MI)1987 bool CombinerHelper::applyCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI) {
1988 Builder.setInstrAndDebugLoc(MI);
1989 Register SrcReg = MI.getOperand(MI.getNumDefs()).getReg();
1990 // Truncating a vector is going to truncate every single lane,
1991 // whereas we want the full lowbits.
1992 // Do the operation on a scalar instead.
1993 LLT SrcTy = MRI.getType(SrcReg);
1994 if (SrcTy.isVector())
1995 SrcReg =
1996 Builder.buildCast(LLT::scalar(SrcTy.getSizeInBits()), SrcReg).getReg(0);
1997
1998 Register Dst0Reg = MI.getOperand(0).getReg();
1999 LLT Dst0Ty = MRI.getType(Dst0Reg);
2000 if (Dst0Ty.isVector()) {
2001 auto MIB = Builder.buildTrunc(LLT::scalar(Dst0Ty.getSizeInBits()), SrcReg);
2002 Builder.buildCast(Dst0Reg, MIB);
2003 } else
2004 Builder.buildTrunc(Dst0Reg, SrcReg);
2005 MI.eraseFromParent();
2006 return true;
2007 }
2008
matchCombineUnmergeZExtToZExt(MachineInstr & MI)2009 bool CombinerHelper::matchCombineUnmergeZExtToZExt(MachineInstr &MI) {
2010 assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
2011 "Expected an unmerge");
2012 Register Dst0Reg = MI.getOperand(0).getReg();
2013 LLT Dst0Ty = MRI.getType(Dst0Reg);
2014 // G_ZEXT on vector applies to each lane, so it will
2015 // affect all destinations. Therefore we won't be able
2016 // to simplify the unmerge to just the first definition.
2017 if (Dst0Ty.isVector())
2018 return false;
2019 Register SrcReg = MI.getOperand(MI.getNumDefs()).getReg();
2020 LLT SrcTy = MRI.getType(SrcReg);
2021 if (SrcTy.isVector())
2022 return false;
2023
2024 Register ZExtSrcReg;
2025 if (!mi_match(SrcReg, MRI, m_GZExt(m_Reg(ZExtSrcReg))))
2026 return false;
2027
2028 // Finally we can replace the first definition with
2029 // a zext of the source if the definition is big enough to hold
2030 // all of ZExtSrc bits.
2031 LLT ZExtSrcTy = MRI.getType(ZExtSrcReg);
2032 return ZExtSrcTy.getSizeInBits() <= Dst0Ty.getSizeInBits();
2033 }
2034
applyCombineUnmergeZExtToZExt(MachineInstr & MI)2035 bool CombinerHelper::applyCombineUnmergeZExtToZExt(MachineInstr &MI) {
2036 assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES &&
2037 "Expected an unmerge");
2038
2039 Register Dst0Reg = MI.getOperand(0).getReg();
2040
2041 MachineInstr *ZExtInstr =
2042 MRI.getVRegDef(MI.getOperand(MI.getNumDefs()).getReg());
2043 assert(ZExtInstr && ZExtInstr->getOpcode() == TargetOpcode::G_ZEXT &&
2044 "Expecting a G_ZEXT");
2045
2046 Register ZExtSrcReg = ZExtInstr->getOperand(1).getReg();
2047 LLT Dst0Ty = MRI.getType(Dst0Reg);
2048 LLT ZExtSrcTy = MRI.getType(ZExtSrcReg);
2049
2050 Builder.setInstrAndDebugLoc(MI);
2051
2052 if (Dst0Ty.getSizeInBits() > ZExtSrcTy.getSizeInBits()) {
2053 Builder.buildZExt(Dst0Reg, ZExtSrcReg);
2054 } else {
2055 assert(Dst0Ty.getSizeInBits() == ZExtSrcTy.getSizeInBits() &&
2056 "ZExt src doesn't fit in destination");
2057 replaceRegWith(MRI, Dst0Reg, ZExtSrcReg);
2058 }
2059
2060 Register ZeroReg;
2061 for (unsigned Idx = 1, EndIdx = MI.getNumDefs(); Idx != EndIdx; ++Idx) {
2062 if (!ZeroReg)
2063 ZeroReg = Builder.buildConstant(Dst0Ty, 0).getReg(0);
2064 replaceRegWith(MRI, MI.getOperand(Idx).getReg(), ZeroReg);
2065 }
2066 MI.eraseFromParent();
2067 return true;
2068 }
2069
matchCombineShiftToUnmerge(MachineInstr & MI,unsigned TargetShiftSize,unsigned & ShiftVal)2070 bool CombinerHelper::matchCombineShiftToUnmerge(MachineInstr &MI,
2071 unsigned TargetShiftSize,
2072 unsigned &ShiftVal) {
2073 assert((MI.getOpcode() == TargetOpcode::G_SHL ||
2074 MI.getOpcode() == TargetOpcode::G_LSHR ||
2075 MI.getOpcode() == TargetOpcode::G_ASHR) && "Expected a shift");
2076
2077 LLT Ty = MRI.getType(MI.getOperand(0).getReg());
2078 if (Ty.isVector()) // TODO:
2079 return false;
2080
2081 // Don't narrow further than the requested size.
2082 unsigned Size = Ty.getSizeInBits();
2083 if (Size <= TargetShiftSize)
2084 return false;
2085
2086 auto MaybeImmVal =
2087 getConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI);
2088 if (!MaybeImmVal)
2089 return false;
2090
2091 ShiftVal = MaybeImmVal->Value.getSExtValue();
2092 return ShiftVal >= Size / 2 && ShiftVal < Size;
2093 }
2094
applyCombineShiftToUnmerge(MachineInstr & MI,const unsigned & ShiftVal)2095 bool CombinerHelper::applyCombineShiftToUnmerge(MachineInstr &MI,
2096 const unsigned &ShiftVal) {
2097 Register DstReg = MI.getOperand(0).getReg();
2098 Register SrcReg = MI.getOperand(1).getReg();
2099 LLT Ty = MRI.getType(SrcReg);
2100 unsigned Size = Ty.getSizeInBits();
2101 unsigned HalfSize = Size / 2;
2102 assert(ShiftVal >= HalfSize);
2103
2104 LLT HalfTy = LLT::scalar(HalfSize);
2105
2106 Builder.setInstr(MI);
2107 auto Unmerge = Builder.buildUnmerge(HalfTy, SrcReg);
2108 unsigned NarrowShiftAmt = ShiftVal - HalfSize;
2109
2110 if (MI.getOpcode() == TargetOpcode::G_LSHR) {
2111 Register Narrowed = Unmerge.getReg(1);
2112
2113 // dst = G_LSHR s64:x, C for C >= 32
2114 // =>
2115 // lo, hi = G_UNMERGE_VALUES x
2116 // dst = G_MERGE_VALUES (G_LSHR hi, C - 32), 0
2117
2118 if (NarrowShiftAmt != 0) {
2119 Narrowed = Builder.buildLShr(HalfTy, Narrowed,
2120 Builder.buildConstant(HalfTy, NarrowShiftAmt)).getReg(0);
2121 }
2122
2123 auto Zero = Builder.buildConstant(HalfTy, 0);
2124 Builder.buildMerge(DstReg, { Narrowed, Zero });
2125 } else if (MI.getOpcode() == TargetOpcode::G_SHL) {
2126 Register Narrowed = Unmerge.getReg(0);
2127 // dst = G_SHL s64:x, C for C >= 32
2128 // =>
2129 // lo, hi = G_UNMERGE_VALUES x
2130 // dst = G_MERGE_VALUES 0, (G_SHL hi, C - 32)
2131 if (NarrowShiftAmt != 0) {
2132 Narrowed = Builder.buildShl(HalfTy, Narrowed,
2133 Builder.buildConstant(HalfTy, NarrowShiftAmt)).getReg(0);
2134 }
2135
2136 auto Zero = Builder.buildConstant(HalfTy, 0);
2137 Builder.buildMerge(DstReg, { Zero, Narrowed });
2138 } else {
2139 assert(MI.getOpcode() == TargetOpcode::G_ASHR);
2140 auto Hi = Builder.buildAShr(
2141 HalfTy, Unmerge.getReg(1),
2142 Builder.buildConstant(HalfTy, HalfSize - 1));
2143
2144 if (ShiftVal == HalfSize) {
2145 // (G_ASHR i64:x, 32) ->
2146 // G_MERGE_VALUES hi_32(x), (G_ASHR hi_32(x), 31)
2147 Builder.buildMerge(DstReg, { Unmerge.getReg(1), Hi });
2148 } else if (ShiftVal == Size - 1) {
2149 // Don't need a second shift.
2150 // (G_ASHR i64:x, 63) ->
2151 // %narrowed = (G_ASHR hi_32(x), 31)
2152 // G_MERGE_VALUES %narrowed, %narrowed
2153 Builder.buildMerge(DstReg, { Hi, Hi });
2154 } else {
2155 auto Lo = Builder.buildAShr(
2156 HalfTy, Unmerge.getReg(1),
2157 Builder.buildConstant(HalfTy, ShiftVal - HalfSize));
2158
2159 // (G_ASHR i64:x, C) ->, for C >= 32
2160 // G_MERGE_VALUES (G_ASHR hi_32(x), C - 32), (G_ASHR hi_32(x), 31)
2161 Builder.buildMerge(DstReg, { Lo, Hi });
2162 }
2163 }
2164
2165 MI.eraseFromParent();
2166 return true;
2167 }
2168
tryCombineShiftToUnmerge(MachineInstr & MI,unsigned TargetShiftAmount)2169 bool CombinerHelper::tryCombineShiftToUnmerge(MachineInstr &MI,
2170 unsigned TargetShiftAmount) {
2171 unsigned ShiftAmt;
2172 if (matchCombineShiftToUnmerge(MI, TargetShiftAmount, ShiftAmt)) {
2173 applyCombineShiftToUnmerge(MI, ShiftAmt);
2174 return true;
2175 }
2176
2177 return false;
2178 }
2179
matchCombineI2PToP2I(MachineInstr & MI,Register & Reg)2180 bool CombinerHelper::matchCombineI2PToP2I(MachineInstr &MI, Register &Reg) {
2181 assert(MI.getOpcode() == TargetOpcode::G_INTTOPTR && "Expected a G_INTTOPTR");
2182 Register DstReg = MI.getOperand(0).getReg();
2183 LLT DstTy = MRI.getType(DstReg);
2184 Register SrcReg = MI.getOperand(1).getReg();
2185 return mi_match(SrcReg, MRI,
2186 m_GPtrToInt(m_all_of(m_SpecificType(DstTy), m_Reg(Reg))));
2187 }
2188
applyCombineI2PToP2I(MachineInstr & MI,Register & Reg)2189 bool CombinerHelper::applyCombineI2PToP2I(MachineInstr &MI, Register &Reg) {
2190 assert(MI.getOpcode() == TargetOpcode::G_INTTOPTR && "Expected a G_INTTOPTR");
2191 Register DstReg = MI.getOperand(0).getReg();
2192 Builder.setInstr(MI);
2193 Builder.buildCopy(DstReg, Reg);
2194 MI.eraseFromParent();
2195 return true;
2196 }
2197
matchCombineP2IToI2P(MachineInstr & MI,Register & Reg)2198 bool CombinerHelper::matchCombineP2IToI2P(MachineInstr &MI, Register &Reg) {
2199 assert(MI.getOpcode() == TargetOpcode::G_PTRTOINT && "Expected a G_PTRTOINT");
2200 Register SrcReg = MI.getOperand(1).getReg();
2201 return mi_match(SrcReg, MRI, m_GIntToPtr(m_Reg(Reg)));
2202 }
2203
applyCombineP2IToI2P(MachineInstr & MI,Register & Reg)2204 bool CombinerHelper::applyCombineP2IToI2P(MachineInstr &MI, Register &Reg) {
2205 assert(MI.getOpcode() == TargetOpcode::G_PTRTOINT && "Expected a G_PTRTOINT");
2206 Register DstReg = MI.getOperand(0).getReg();
2207 Builder.setInstr(MI);
2208 Builder.buildZExtOrTrunc(DstReg, Reg);
2209 MI.eraseFromParent();
2210 return true;
2211 }
2212
matchCombineAddP2IToPtrAdd(MachineInstr & MI,std::pair<Register,bool> & PtrReg)2213 bool CombinerHelper::matchCombineAddP2IToPtrAdd(
2214 MachineInstr &MI, std::pair<Register, bool> &PtrReg) {
2215 assert(MI.getOpcode() == TargetOpcode::G_ADD);
2216 Register LHS = MI.getOperand(1).getReg();
2217 Register RHS = MI.getOperand(2).getReg();
2218 LLT IntTy = MRI.getType(LHS);
2219
2220 // G_PTR_ADD always has the pointer in the LHS, so we may need to commute the
2221 // instruction.
2222 PtrReg.second = false;
2223 for (Register SrcReg : {LHS, RHS}) {
2224 if (mi_match(SrcReg, MRI, m_GPtrToInt(m_Reg(PtrReg.first)))) {
2225 // Don't handle cases where the integer is implicitly converted to the
2226 // pointer width.
2227 LLT PtrTy = MRI.getType(PtrReg.first);
2228 if (PtrTy.getScalarSizeInBits() == IntTy.getScalarSizeInBits())
2229 return true;
2230 }
2231
2232 PtrReg.second = true;
2233 }
2234
2235 return false;
2236 }
2237
applyCombineAddP2IToPtrAdd(MachineInstr & MI,std::pair<Register,bool> & PtrReg)2238 bool CombinerHelper::applyCombineAddP2IToPtrAdd(
2239 MachineInstr &MI, std::pair<Register, bool> &PtrReg) {
2240 Register Dst = MI.getOperand(0).getReg();
2241 Register LHS = MI.getOperand(1).getReg();
2242 Register RHS = MI.getOperand(2).getReg();
2243
2244 const bool DoCommute = PtrReg.second;
2245 if (DoCommute)
2246 std::swap(LHS, RHS);
2247 LHS = PtrReg.first;
2248
2249 LLT PtrTy = MRI.getType(LHS);
2250
2251 Builder.setInstrAndDebugLoc(MI);
2252 auto PtrAdd = Builder.buildPtrAdd(PtrTy, LHS, RHS);
2253 Builder.buildPtrToInt(Dst, PtrAdd);
2254 MI.eraseFromParent();
2255 return true;
2256 }
2257
matchCombineConstPtrAddToI2P(MachineInstr & MI,int64_t & NewCst)2258 bool CombinerHelper::matchCombineConstPtrAddToI2P(MachineInstr &MI,
2259 int64_t &NewCst) {
2260 assert(MI.getOpcode() == TargetOpcode::G_PTR_ADD && "Expected a G_PTR_ADD");
2261 Register LHS = MI.getOperand(1).getReg();
2262 Register RHS = MI.getOperand(2).getReg();
2263 MachineRegisterInfo &MRI = Builder.getMF().getRegInfo();
2264
2265 if (auto RHSCst = getConstantVRegSExtVal(RHS, MRI)) {
2266 int64_t Cst;
2267 if (mi_match(LHS, MRI, m_GIntToPtr(m_ICst(Cst)))) {
2268 NewCst = Cst + *RHSCst;
2269 return true;
2270 }
2271 }
2272
2273 return false;
2274 }
2275
applyCombineConstPtrAddToI2P(MachineInstr & MI,int64_t & NewCst)2276 bool CombinerHelper::applyCombineConstPtrAddToI2P(MachineInstr &MI,
2277 int64_t &NewCst) {
2278 assert(MI.getOpcode() == TargetOpcode::G_PTR_ADD && "Expected a G_PTR_ADD");
2279 Register Dst = MI.getOperand(0).getReg();
2280
2281 Builder.setInstrAndDebugLoc(MI);
2282 Builder.buildConstant(Dst, NewCst);
2283 MI.eraseFromParent();
2284 return true;
2285 }
2286
matchCombineAnyExtTrunc(MachineInstr & MI,Register & Reg)2287 bool CombinerHelper::matchCombineAnyExtTrunc(MachineInstr &MI, Register &Reg) {
2288 assert(MI.getOpcode() == TargetOpcode::G_ANYEXT && "Expected a G_ANYEXT");
2289 Register DstReg = MI.getOperand(0).getReg();
2290 Register SrcReg = MI.getOperand(1).getReg();
2291 LLT DstTy = MRI.getType(DstReg);
2292 return mi_match(SrcReg, MRI,
2293 m_GTrunc(m_all_of(m_Reg(Reg), m_SpecificType(DstTy))));
2294 }
2295
applyCombineAnyExtTrunc(MachineInstr & MI,Register & Reg)2296 bool CombinerHelper::applyCombineAnyExtTrunc(MachineInstr &MI, Register &Reg) {
2297 assert(MI.getOpcode() == TargetOpcode::G_ANYEXT && "Expected a G_ANYEXT");
2298 Register DstReg = MI.getOperand(0).getReg();
2299 MI.eraseFromParent();
2300 replaceRegWith(MRI, DstReg, Reg);
2301 return true;
2302 }
2303
matchCombineExtOfExt(MachineInstr & MI,std::tuple<Register,unsigned> & MatchInfo)2304 bool CombinerHelper::matchCombineExtOfExt(
2305 MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo) {
2306 assert((MI.getOpcode() == TargetOpcode::G_ANYEXT ||
2307 MI.getOpcode() == TargetOpcode::G_SEXT ||
2308 MI.getOpcode() == TargetOpcode::G_ZEXT) &&
2309 "Expected a G_[ASZ]EXT");
2310 Register SrcReg = MI.getOperand(1).getReg();
2311 MachineInstr *SrcMI = MRI.getVRegDef(SrcReg);
2312 // Match exts with the same opcode, anyext([sz]ext) and sext(zext).
2313 unsigned Opc = MI.getOpcode();
2314 unsigned SrcOpc = SrcMI->getOpcode();
2315 if (Opc == SrcOpc ||
2316 (Opc == TargetOpcode::G_ANYEXT &&
2317 (SrcOpc == TargetOpcode::G_SEXT || SrcOpc == TargetOpcode::G_ZEXT)) ||
2318 (Opc == TargetOpcode::G_SEXT && SrcOpc == TargetOpcode::G_ZEXT)) {
2319 MatchInfo = std::make_tuple(SrcMI->getOperand(1).getReg(), SrcOpc);
2320 return true;
2321 }
2322 return false;
2323 }
2324
applyCombineExtOfExt(MachineInstr & MI,std::tuple<Register,unsigned> & MatchInfo)2325 bool CombinerHelper::applyCombineExtOfExt(
2326 MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo) {
2327 assert((MI.getOpcode() == TargetOpcode::G_ANYEXT ||
2328 MI.getOpcode() == TargetOpcode::G_SEXT ||
2329 MI.getOpcode() == TargetOpcode::G_ZEXT) &&
2330 "Expected a G_[ASZ]EXT");
2331
2332 Register Reg = std::get<0>(MatchInfo);
2333 unsigned SrcExtOp = std::get<1>(MatchInfo);
2334
2335 // Combine exts with the same opcode.
2336 if (MI.getOpcode() == SrcExtOp) {
2337 Observer.changingInstr(MI);
2338 MI.getOperand(1).setReg(Reg);
2339 Observer.changedInstr(MI);
2340 return true;
2341 }
2342
2343 // Combine:
2344 // - anyext([sz]ext x) to [sz]ext x
2345 // - sext(zext x) to zext x
2346 if (MI.getOpcode() == TargetOpcode::G_ANYEXT ||
2347 (MI.getOpcode() == TargetOpcode::G_SEXT &&
2348 SrcExtOp == TargetOpcode::G_ZEXT)) {
2349 Register DstReg = MI.getOperand(0).getReg();
2350 Builder.setInstrAndDebugLoc(MI);
2351 Builder.buildInstr(SrcExtOp, {DstReg}, {Reg});
2352 MI.eraseFromParent();
2353 return true;
2354 }
2355
2356 return false;
2357 }
2358
applyCombineMulByNegativeOne(MachineInstr & MI)2359 bool CombinerHelper::applyCombineMulByNegativeOne(MachineInstr &MI) {
2360 assert(MI.getOpcode() == TargetOpcode::G_MUL && "Expected a G_MUL");
2361 Register DstReg = MI.getOperand(0).getReg();
2362 Register SrcReg = MI.getOperand(1).getReg();
2363 LLT DstTy = MRI.getType(DstReg);
2364
2365 Builder.setInstrAndDebugLoc(MI);
2366 Builder.buildSub(DstReg, Builder.buildConstant(DstTy, 0), SrcReg,
2367 MI.getFlags());
2368 MI.eraseFromParent();
2369 return true;
2370 }
2371
matchCombineFNegOfFNeg(MachineInstr & MI,Register & Reg)2372 bool CombinerHelper::matchCombineFNegOfFNeg(MachineInstr &MI, Register &Reg) {
2373 assert(MI.getOpcode() == TargetOpcode::G_FNEG && "Expected a G_FNEG");
2374 Register SrcReg = MI.getOperand(1).getReg();
2375 return mi_match(SrcReg, MRI, m_GFNeg(m_Reg(Reg)));
2376 }
2377
matchCombineFAbsOfFAbs(MachineInstr & MI,Register & Src)2378 bool CombinerHelper::matchCombineFAbsOfFAbs(MachineInstr &MI, Register &Src) {
2379 assert(MI.getOpcode() == TargetOpcode::G_FABS && "Expected a G_FABS");
2380 Src = MI.getOperand(1).getReg();
2381 Register AbsSrc;
2382 return mi_match(Src, MRI, m_GFabs(m_Reg(AbsSrc)));
2383 }
2384
applyCombineFAbsOfFAbs(MachineInstr & MI,Register & Src)2385 bool CombinerHelper::applyCombineFAbsOfFAbs(MachineInstr &MI, Register &Src) {
2386 assert(MI.getOpcode() == TargetOpcode::G_FABS && "Expected a G_FABS");
2387 Register Dst = MI.getOperand(0).getReg();
2388 MI.eraseFromParent();
2389 replaceRegWith(MRI, Dst, Src);
2390 return true;
2391 }
2392
matchCombineTruncOfExt(MachineInstr & MI,std::pair<Register,unsigned> & MatchInfo)2393 bool CombinerHelper::matchCombineTruncOfExt(
2394 MachineInstr &MI, std::pair<Register, unsigned> &MatchInfo) {
2395 assert(MI.getOpcode() == TargetOpcode::G_TRUNC && "Expected a G_TRUNC");
2396 Register SrcReg = MI.getOperand(1).getReg();
2397 MachineInstr *SrcMI = MRI.getVRegDef(SrcReg);
2398 unsigned SrcOpc = SrcMI->getOpcode();
2399 if (SrcOpc == TargetOpcode::G_ANYEXT || SrcOpc == TargetOpcode::G_SEXT ||
2400 SrcOpc == TargetOpcode::G_ZEXT) {
2401 MatchInfo = std::make_pair(SrcMI->getOperand(1).getReg(), SrcOpc);
2402 return true;
2403 }
2404 return false;
2405 }
2406
applyCombineTruncOfExt(MachineInstr & MI,std::pair<Register,unsigned> & MatchInfo)2407 bool CombinerHelper::applyCombineTruncOfExt(
2408 MachineInstr &MI, std::pair<Register, unsigned> &MatchInfo) {
2409 assert(MI.getOpcode() == TargetOpcode::G_TRUNC && "Expected a G_TRUNC");
2410 Register SrcReg = MatchInfo.first;
2411 unsigned SrcExtOp = MatchInfo.second;
2412 Register DstReg = MI.getOperand(0).getReg();
2413 LLT SrcTy = MRI.getType(SrcReg);
2414 LLT DstTy = MRI.getType(DstReg);
2415 if (SrcTy == DstTy) {
2416 MI.eraseFromParent();
2417 replaceRegWith(MRI, DstReg, SrcReg);
2418 return true;
2419 }
2420 Builder.setInstrAndDebugLoc(MI);
2421 if (SrcTy.getSizeInBits() < DstTy.getSizeInBits())
2422 Builder.buildInstr(SrcExtOp, {DstReg}, {SrcReg});
2423 else
2424 Builder.buildTrunc(DstReg, SrcReg);
2425 MI.eraseFromParent();
2426 return true;
2427 }
2428
matchCombineTruncOfShl(MachineInstr & MI,std::pair<Register,Register> & MatchInfo)2429 bool CombinerHelper::matchCombineTruncOfShl(
2430 MachineInstr &MI, std::pair<Register, Register> &MatchInfo) {
2431 assert(MI.getOpcode() == TargetOpcode::G_TRUNC && "Expected a G_TRUNC");
2432 Register DstReg = MI.getOperand(0).getReg();
2433 Register SrcReg = MI.getOperand(1).getReg();
2434 LLT DstTy = MRI.getType(DstReg);
2435 Register ShiftSrc;
2436 Register ShiftAmt;
2437
2438 if (MRI.hasOneNonDBGUse(SrcReg) &&
2439 mi_match(SrcReg, MRI, m_GShl(m_Reg(ShiftSrc), m_Reg(ShiftAmt))) &&
2440 isLegalOrBeforeLegalizer(
2441 {TargetOpcode::G_SHL,
2442 {DstTy, getTargetLowering().getPreferredShiftAmountTy(DstTy)}})) {
2443 KnownBits Known = KB->getKnownBits(ShiftAmt);
2444 unsigned Size = DstTy.getSizeInBits();
2445 if (Known.getBitWidth() - Known.countMinLeadingZeros() <= Log2_32(Size)) {
2446 MatchInfo = std::make_pair(ShiftSrc, ShiftAmt);
2447 return true;
2448 }
2449 }
2450 return false;
2451 }
2452
applyCombineTruncOfShl(MachineInstr & MI,std::pair<Register,Register> & MatchInfo)2453 bool CombinerHelper::applyCombineTruncOfShl(
2454 MachineInstr &MI, std::pair<Register, Register> &MatchInfo) {
2455 assert(MI.getOpcode() == TargetOpcode::G_TRUNC && "Expected a G_TRUNC");
2456 Register DstReg = MI.getOperand(0).getReg();
2457 Register SrcReg = MI.getOperand(1).getReg();
2458 LLT DstTy = MRI.getType(DstReg);
2459 MachineInstr *SrcMI = MRI.getVRegDef(SrcReg);
2460
2461 Register ShiftSrc = MatchInfo.first;
2462 Register ShiftAmt = MatchInfo.second;
2463 Builder.setInstrAndDebugLoc(MI);
2464 auto TruncShiftSrc = Builder.buildTrunc(DstTy, ShiftSrc);
2465 Builder.buildShl(DstReg, TruncShiftSrc, ShiftAmt, SrcMI->getFlags());
2466 MI.eraseFromParent();
2467 return true;
2468 }
2469
matchAnyExplicitUseIsUndef(MachineInstr & MI)2470 bool CombinerHelper::matchAnyExplicitUseIsUndef(MachineInstr &MI) {
2471 return any_of(MI.explicit_uses(), [this](const MachineOperand &MO) {
2472 return MO.isReg() &&
2473 getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MO.getReg(), MRI);
2474 });
2475 }
2476
matchAllExplicitUsesAreUndef(MachineInstr & MI)2477 bool CombinerHelper::matchAllExplicitUsesAreUndef(MachineInstr &MI) {
2478 return all_of(MI.explicit_uses(), [this](const MachineOperand &MO) {
2479 return !MO.isReg() ||
2480 getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MO.getReg(), MRI);
2481 });
2482 }
2483
matchUndefShuffleVectorMask(MachineInstr & MI)2484 bool CombinerHelper::matchUndefShuffleVectorMask(MachineInstr &MI) {
2485 assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR);
2486 ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask();
2487 return all_of(Mask, [](int Elt) { return Elt < 0; });
2488 }
2489
matchUndefStore(MachineInstr & MI)2490 bool CombinerHelper::matchUndefStore(MachineInstr &MI) {
2491 assert(MI.getOpcode() == TargetOpcode::G_STORE);
2492 return getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MI.getOperand(0).getReg(),
2493 MRI);
2494 }
2495
matchUndefSelectCmp(MachineInstr & MI)2496 bool CombinerHelper::matchUndefSelectCmp(MachineInstr &MI) {
2497 assert(MI.getOpcode() == TargetOpcode::G_SELECT);
2498 return getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MI.getOperand(1).getReg(),
2499 MRI);
2500 }
2501
matchConstantSelectCmp(MachineInstr & MI,unsigned & OpIdx)2502 bool CombinerHelper::matchConstantSelectCmp(MachineInstr &MI, unsigned &OpIdx) {
2503 assert(MI.getOpcode() == TargetOpcode::G_SELECT);
2504 if (auto MaybeCstCmp =
2505 getConstantVRegValWithLookThrough(MI.getOperand(1).getReg(), MRI)) {
2506 OpIdx = MaybeCstCmp->Value.isNullValue() ? 3 : 2;
2507 return true;
2508 }
2509 return false;
2510 }
2511
eraseInst(MachineInstr & MI)2512 bool CombinerHelper::eraseInst(MachineInstr &MI) {
2513 MI.eraseFromParent();
2514 return true;
2515 }
2516
matchEqualDefs(const MachineOperand & MOP1,const MachineOperand & MOP2)2517 bool CombinerHelper::matchEqualDefs(const MachineOperand &MOP1,
2518 const MachineOperand &MOP2) {
2519 if (!MOP1.isReg() || !MOP2.isReg())
2520 return false;
2521 MachineInstr *I1 = getDefIgnoringCopies(MOP1.getReg(), MRI);
2522 if (!I1)
2523 return false;
2524 MachineInstr *I2 = getDefIgnoringCopies(MOP2.getReg(), MRI);
2525 if (!I2)
2526 return false;
2527
2528 // Handle a case like this:
2529 //
2530 // %0:_(s64), %1:_(s64) = G_UNMERGE_VALUES %2:_(<2 x s64>)
2531 //
2532 // Even though %0 and %1 are produced by the same instruction they are not
2533 // the same values.
2534 if (I1 == I2)
2535 return MOP1.getReg() == MOP2.getReg();
2536
2537 // If we have an instruction which loads or stores, we can't guarantee that
2538 // it is identical.
2539 //
2540 // For example, we may have
2541 //
2542 // %x1 = G_LOAD %addr (load N from @somewhere)
2543 // ...
2544 // call @foo
2545 // ...
2546 // %x2 = G_LOAD %addr (load N from @somewhere)
2547 // ...
2548 // %or = G_OR %x1, %x2
2549 //
2550 // It's possible that @foo will modify whatever lives at the address we're
2551 // loading from. To be safe, let's just assume that all loads and stores
2552 // are different (unless we have something which is guaranteed to not
2553 // change.)
2554 if (I1->mayLoadOrStore() && !I1->isDereferenceableInvariantLoad(nullptr))
2555 return false;
2556
2557 // Check for physical registers on the instructions first to avoid cases
2558 // like this:
2559 //
2560 // %a = COPY $physreg
2561 // ...
2562 // SOMETHING implicit-def $physreg
2563 // ...
2564 // %b = COPY $physreg
2565 //
2566 // These copies are not equivalent.
2567 if (any_of(I1->uses(), [](const MachineOperand &MO) {
2568 return MO.isReg() && MO.getReg().isPhysical();
2569 })) {
2570 // Check if we have a case like this:
2571 //
2572 // %a = COPY $physreg
2573 // %b = COPY %a
2574 //
2575 // In this case, I1 and I2 will both be equal to %a = COPY $physreg.
2576 // From that, we know that they must have the same value, since they must
2577 // have come from the same COPY.
2578 return I1->isIdenticalTo(*I2);
2579 }
2580
2581 // We don't have any physical registers, so we don't necessarily need the
2582 // same vreg defs.
2583 //
2584 // On the off-chance that there's some target instruction feeding into the
2585 // instruction, let's use produceSameValue instead of isIdenticalTo.
2586 return Builder.getTII().produceSameValue(*I1, *I2, &MRI);
2587 }
2588
matchConstantOp(const MachineOperand & MOP,int64_t C)2589 bool CombinerHelper::matchConstantOp(const MachineOperand &MOP, int64_t C) {
2590 if (!MOP.isReg())
2591 return false;
2592 // MIPatternMatch doesn't let us look through G_ZEXT etc.
2593 auto ValAndVReg = getConstantVRegValWithLookThrough(MOP.getReg(), MRI);
2594 return ValAndVReg && ValAndVReg->Value == C;
2595 }
2596
replaceSingleDefInstWithOperand(MachineInstr & MI,unsigned OpIdx)2597 bool CombinerHelper::replaceSingleDefInstWithOperand(MachineInstr &MI,
2598 unsigned OpIdx) {
2599 assert(MI.getNumExplicitDefs() == 1 && "Expected one explicit def?");
2600 Register OldReg = MI.getOperand(0).getReg();
2601 Register Replacement = MI.getOperand(OpIdx).getReg();
2602 assert(canReplaceReg(OldReg, Replacement, MRI) && "Cannot replace register?");
2603 MI.eraseFromParent();
2604 replaceRegWith(MRI, OldReg, Replacement);
2605 return true;
2606 }
2607
replaceSingleDefInstWithReg(MachineInstr & MI,Register Replacement)2608 bool CombinerHelper::replaceSingleDefInstWithReg(MachineInstr &MI,
2609 Register Replacement) {
2610 assert(MI.getNumExplicitDefs() == 1 && "Expected one explicit def?");
2611 Register OldReg = MI.getOperand(0).getReg();
2612 assert(canReplaceReg(OldReg, Replacement, MRI) && "Cannot replace register?");
2613 MI.eraseFromParent();
2614 replaceRegWith(MRI, OldReg, Replacement);
2615 return true;
2616 }
2617
matchSelectSameVal(MachineInstr & MI)2618 bool CombinerHelper::matchSelectSameVal(MachineInstr &MI) {
2619 assert(MI.getOpcode() == TargetOpcode::G_SELECT);
2620 // Match (cond ? x : x)
2621 return matchEqualDefs(MI.getOperand(2), MI.getOperand(3)) &&
2622 canReplaceReg(MI.getOperand(0).getReg(), MI.getOperand(2).getReg(),
2623 MRI);
2624 }
2625
matchBinOpSameVal(MachineInstr & MI)2626 bool CombinerHelper::matchBinOpSameVal(MachineInstr &MI) {
2627 return matchEqualDefs(MI.getOperand(1), MI.getOperand(2)) &&
2628 canReplaceReg(MI.getOperand(0).getReg(), MI.getOperand(1).getReg(),
2629 MRI);
2630 }
2631
matchOperandIsZero(MachineInstr & MI,unsigned OpIdx)2632 bool CombinerHelper::matchOperandIsZero(MachineInstr &MI, unsigned OpIdx) {
2633 return matchConstantOp(MI.getOperand(OpIdx), 0) &&
2634 canReplaceReg(MI.getOperand(0).getReg(), MI.getOperand(OpIdx).getReg(),
2635 MRI);
2636 }
2637
matchOperandIsUndef(MachineInstr & MI,unsigned OpIdx)2638 bool CombinerHelper::matchOperandIsUndef(MachineInstr &MI, unsigned OpIdx) {
2639 MachineOperand &MO = MI.getOperand(OpIdx);
2640 return MO.isReg() &&
2641 getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MO.getReg(), MRI);
2642 }
2643
matchOperandIsKnownToBeAPowerOfTwo(MachineInstr & MI,unsigned OpIdx)2644 bool CombinerHelper::matchOperandIsKnownToBeAPowerOfTwo(MachineInstr &MI,
2645 unsigned OpIdx) {
2646 MachineOperand &MO = MI.getOperand(OpIdx);
2647 return isKnownToBeAPowerOfTwo(MO.getReg(), MRI, KB);
2648 }
2649
replaceInstWithFConstant(MachineInstr & MI,double C)2650 bool CombinerHelper::replaceInstWithFConstant(MachineInstr &MI, double C) {
2651 assert(MI.getNumDefs() == 1 && "Expected only one def?");
2652 Builder.setInstr(MI);
2653 Builder.buildFConstant(MI.getOperand(0), C);
2654 MI.eraseFromParent();
2655 return true;
2656 }
2657
replaceInstWithConstant(MachineInstr & MI,int64_t C)2658 bool CombinerHelper::replaceInstWithConstant(MachineInstr &MI, int64_t C) {
2659 assert(MI.getNumDefs() == 1 && "Expected only one def?");
2660 Builder.setInstr(MI);
2661 Builder.buildConstant(MI.getOperand(0), C);
2662 MI.eraseFromParent();
2663 return true;
2664 }
2665
replaceInstWithUndef(MachineInstr & MI)2666 bool CombinerHelper::replaceInstWithUndef(MachineInstr &MI) {
2667 assert(MI.getNumDefs() == 1 && "Expected only one def?");
2668 Builder.setInstr(MI);
2669 Builder.buildUndef(MI.getOperand(0));
2670 MI.eraseFromParent();
2671 return true;
2672 }
2673
matchSimplifyAddToSub(MachineInstr & MI,std::tuple<Register,Register> & MatchInfo)2674 bool CombinerHelper::matchSimplifyAddToSub(
2675 MachineInstr &MI, std::tuple<Register, Register> &MatchInfo) {
2676 Register LHS = MI.getOperand(1).getReg();
2677 Register RHS = MI.getOperand(2).getReg();
2678 Register &NewLHS = std::get<0>(MatchInfo);
2679 Register &NewRHS = std::get<1>(MatchInfo);
2680
2681 // Helper lambda to check for opportunities for
2682 // ((0-A) + B) -> B - A
2683 // (A + (0-B)) -> A - B
2684 auto CheckFold = [&](Register &MaybeSub, Register &MaybeNewLHS) {
2685 if (!mi_match(MaybeSub, MRI, m_Neg(m_Reg(NewRHS))))
2686 return false;
2687 NewLHS = MaybeNewLHS;
2688 return true;
2689 };
2690
2691 return CheckFold(LHS, RHS) || CheckFold(RHS, LHS);
2692 }
2693
matchCombineInsertVecElts(MachineInstr & MI,SmallVectorImpl<Register> & MatchInfo)2694 bool CombinerHelper::matchCombineInsertVecElts(
2695 MachineInstr &MI, SmallVectorImpl<Register> &MatchInfo) {
2696 assert(MI.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT &&
2697 "Invalid opcode");
2698 Register DstReg = MI.getOperand(0).getReg();
2699 LLT DstTy = MRI.getType(DstReg);
2700 assert(DstTy.isVector() && "Invalid G_INSERT_VECTOR_ELT?");
2701 unsigned NumElts = DstTy.getNumElements();
2702 // If this MI is part of a sequence of insert_vec_elts, then
2703 // don't do the combine in the middle of the sequence.
2704 if (MRI.hasOneUse(DstReg) && MRI.use_instr_begin(DstReg)->getOpcode() ==
2705 TargetOpcode::G_INSERT_VECTOR_ELT)
2706 return false;
2707 MachineInstr *CurrInst = &MI;
2708 MachineInstr *TmpInst;
2709 int64_t IntImm;
2710 Register TmpReg;
2711 MatchInfo.resize(NumElts);
2712 while (mi_match(
2713 CurrInst->getOperand(0).getReg(), MRI,
2714 m_GInsertVecElt(m_MInstr(TmpInst), m_Reg(TmpReg), m_ICst(IntImm)))) {
2715 if (IntImm >= NumElts)
2716 return false;
2717 if (!MatchInfo[IntImm])
2718 MatchInfo[IntImm] = TmpReg;
2719 CurrInst = TmpInst;
2720 }
2721 // Variable index.
2722 if (CurrInst->getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT)
2723 return false;
2724 if (TmpInst->getOpcode() == TargetOpcode::G_BUILD_VECTOR) {
2725 for (unsigned I = 1; I < TmpInst->getNumOperands(); ++I) {
2726 if (!MatchInfo[I - 1].isValid())
2727 MatchInfo[I - 1] = TmpInst->getOperand(I).getReg();
2728 }
2729 return true;
2730 }
2731 // If we didn't end in a G_IMPLICIT_DEF, bail out.
2732 return TmpInst->getOpcode() == TargetOpcode::G_IMPLICIT_DEF;
2733 }
2734
applyCombineInsertVecElts(MachineInstr & MI,SmallVectorImpl<Register> & MatchInfo)2735 bool CombinerHelper::applyCombineInsertVecElts(
2736 MachineInstr &MI, SmallVectorImpl<Register> &MatchInfo) {
2737 Builder.setInstr(MI);
2738 Register UndefReg;
2739 auto GetUndef = [&]() {
2740 if (UndefReg)
2741 return UndefReg;
2742 LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
2743 UndefReg = Builder.buildUndef(DstTy.getScalarType()).getReg(0);
2744 return UndefReg;
2745 };
2746 for (unsigned I = 0; I < MatchInfo.size(); ++I) {
2747 if (!MatchInfo[I])
2748 MatchInfo[I] = GetUndef();
2749 }
2750 Builder.buildBuildVector(MI.getOperand(0).getReg(), MatchInfo);
2751 MI.eraseFromParent();
2752 return true;
2753 }
2754
applySimplifyAddToSub(MachineInstr & MI,std::tuple<Register,Register> & MatchInfo)2755 bool CombinerHelper::applySimplifyAddToSub(
2756 MachineInstr &MI, std::tuple<Register, Register> &MatchInfo) {
2757 Builder.setInstr(MI);
2758 Register SubLHS, SubRHS;
2759 std::tie(SubLHS, SubRHS) = MatchInfo;
2760 Builder.buildSub(MI.getOperand(0).getReg(), SubLHS, SubRHS);
2761 MI.eraseFromParent();
2762 return true;
2763 }
2764
matchHoistLogicOpWithSameOpcodeHands(MachineInstr & MI,InstructionStepsMatchInfo & MatchInfo)2765 bool CombinerHelper::matchHoistLogicOpWithSameOpcodeHands(
2766 MachineInstr &MI, InstructionStepsMatchInfo &MatchInfo) {
2767 // Matches: logic (hand x, ...), (hand y, ...) -> hand (logic x, y), ...
2768 //
2769 // Creates the new hand + logic instruction (but does not insert them.)
2770 //
2771 // On success, MatchInfo is populated with the new instructions. These are
2772 // inserted in applyHoistLogicOpWithSameOpcodeHands.
2773 unsigned LogicOpcode = MI.getOpcode();
2774 assert(LogicOpcode == TargetOpcode::G_AND ||
2775 LogicOpcode == TargetOpcode::G_OR ||
2776 LogicOpcode == TargetOpcode::G_XOR);
2777 MachineIRBuilder MIB(MI);
2778 Register Dst = MI.getOperand(0).getReg();
2779 Register LHSReg = MI.getOperand(1).getReg();
2780 Register RHSReg = MI.getOperand(2).getReg();
2781
2782 // Don't recompute anything.
2783 if (!MRI.hasOneNonDBGUse(LHSReg) || !MRI.hasOneNonDBGUse(RHSReg))
2784 return false;
2785
2786 // Make sure we have (hand x, ...), (hand y, ...)
2787 MachineInstr *LeftHandInst = getDefIgnoringCopies(LHSReg, MRI);
2788 MachineInstr *RightHandInst = getDefIgnoringCopies(RHSReg, MRI);
2789 if (!LeftHandInst || !RightHandInst)
2790 return false;
2791 unsigned HandOpcode = LeftHandInst->getOpcode();
2792 if (HandOpcode != RightHandInst->getOpcode())
2793 return false;
2794 if (!LeftHandInst->getOperand(1).isReg() ||
2795 !RightHandInst->getOperand(1).isReg())
2796 return false;
2797
2798 // Make sure the types match up, and if we're doing this post-legalization,
2799 // we end up with legal types.
2800 Register X = LeftHandInst->getOperand(1).getReg();
2801 Register Y = RightHandInst->getOperand(1).getReg();
2802 LLT XTy = MRI.getType(X);
2803 LLT YTy = MRI.getType(Y);
2804 if (XTy != YTy)
2805 return false;
2806 if (!isLegalOrBeforeLegalizer({LogicOpcode, {XTy, YTy}}))
2807 return false;
2808
2809 // Optional extra source register.
2810 Register ExtraHandOpSrcReg;
2811 switch (HandOpcode) {
2812 default:
2813 return false;
2814 case TargetOpcode::G_ANYEXT:
2815 case TargetOpcode::G_SEXT:
2816 case TargetOpcode::G_ZEXT: {
2817 // Match: logic (ext X), (ext Y) --> ext (logic X, Y)
2818 break;
2819 }
2820 case TargetOpcode::G_AND:
2821 case TargetOpcode::G_ASHR:
2822 case TargetOpcode::G_LSHR:
2823 case TargetOpcode::G_SHL: {
2824 // Match: logic (binop x, z), (binop y, z) -> binop (logic x, y), z
2825 MachineOperand &ZOp = LeftHandInst->getOperand(2);
2826 if (!matchEqualDefs(ZOp, RightHandInst->getOperand(2)))
2827 return false;
2828 ExtraHandOpSrcReg = ZOp.getReg();
2829 break;
2830 }
2831 }
2832
2833 // Record the steps to build the new instructions.
2834 //
2835 // Steps to build (logic x, y)
2836 auto NewLogicDst = MRI.createGenericVirtualRegister(XTy);
2837 OperandBuildSteps LogicBuildSteps = {
2838 [=](MachineInstrBuilder &MIB) { MIB.addDef(NewLogicDst); },
2839 [=](MachineInstrBuilder &MIB) { MIB.addReg(X); },
2840 [=](MachineInstrBuilder &MIB) { MIB.addReg(Y); }};
2841 InstructionBuildSteps LogicSteps(LogicOpcode, LogicBuildSteps);
2842
2843 // Steps to build hand (logic x, y), ...z
2844 OperandBuildSteps HandBuildSteps = {
2845 [=](MachineInstrBuilder &MIB) { MIB.addDef(Dst); },
2846 [=](MachineInstrBuilder &MIB) { MIB.addReg(NewLogicDst); }};
2847 if (ExtraHandOpSrcReg.isValid())
2848 HandBuildSteps.push_back(
2849 [=](MachineInstrBuilder &MIB) { MIB.addReg(ExtraHandOpSrcReg); });
2850 InstructionBuildSteps HandSteps(HandOpcode, HandBuildSteps);
2851
2852 MatchInfo = InstructionStepsMatchInfo({LogicSteps, HandSteps});
2853 return true;
2854 }
2855
applyBuildInstructionSteps(MachineInstr & MI,InstructionStepsMatchInfo & MatchInfo)2856 bool CombinerHelper::applyBuildInstructionSteps(
2857 MachineInstr &MI, InstructionStepsMatchInfo &MatchInfo) {
2858 assert(MatchInfo.InstrsToBuild.size() &&
2859 "Expected at least one instr to build?");
2860 Builder.setInstr(MI);
2861 for (auto &InstrToBuild : MatchInfo.InstrsToBuild) {
2862 assert(InstrToBuild.Opcode && "Expected a valid opcode?");
2863 assert(InstrToBuild.OperandFns.size() && "Expected at least one operand?");
2864 MachineInstrBuilder Instr = Builder.buildInstr(InstrToBuild.Opcode);
2865 for (auto &OperandFn : InstrToBuild.OperandFns)
2866 OperandFn(Instr);
2867 }
2868 MI.eraseFromParent();
2869 return true;
2870 }
2871
matchAshrShlToSextInreg(MachineInstr & MI,std::tuple<Register,int64_t> & MatchInfo)2872 bool CombinerHelper::matchAshrShlToSextInreg(
2873 MachineInstr &MI, std::tuple<Register, int64_t> &MatchInfo) {
2874 assert(MI.getOpcode() == TargetOpcode::G_ASHR);
2875 int64_t ShlCst, AshrCst;
2876 Register Src;
2877 // FIXME: detect splat constant vectors.
2878 if (!mi_match(MI.getOperand(0).getReg(), MRI,
2879 m_GAShr(m_GShl(m_Reg(Src), m_ICst(ShlCst)), m_ICst(AshrCst))))
2880 return false;
2881 if (ShlCst != AshrCst)
2882 return false;
2883 if (!isLegalOrBeforeLegalizer(
2884 {TargetOpcode::G_SEXT_INREG, {MRI.getType(Src)}}))
2885 return false;
2886 MatchInfo = std::make_tuple(Src, ShlCst);
2887 return true;
2888 }
applyAshShlToSextInreg(MachineInstr & MI,std::tuple<Register,int64_t> & MatchInfo)2889 bool CombinerHelper::applyAshShlToSextInreg(
2890 MachineInstr &MI, std::tuple<Register, int64_t> &MatchInfo) {
2891 assert(MI.getOpcode() == TargetOpcode::G_ASHR);
2892 Register Src;
2893 int64_t ShiftAmt;
2894 std::tie(Src, ShiftAmt) = MatchInfo;
2895 unsigned Size = MRI.getType(Src).getScalarSizeInBits();
2896 Builder.setInstrAndDebugLoc(MI);
2897 Builder.buildSExtInReg(MI.getOperand(0).getReg(), Src, Size - ShiftAmt);
2898 MI.eraseFromParent();
2899 return true;
2900 }
2901
matchRedundantAnd(MachineInstr & MI,Register & Replacement)2902 bool CombinerHelper::matchRedundantAnd(MachineInstr &MI,
2903 Register &Replacement) {
2904 // Given
2905 //
2906 // %y:_(sN) = G_SOMETHING
2907 // %x:_(sN) = G_SOMETHING
2908 // %res:_(sN) = G_AND %x, %y
2909 //
2910 // Eliminate the G_AND when it is known that x & y == x or x & y == y.
2911 //
2912 // Patterns like this can appear as a result of legalization. E.g.
2913 //
2914 // %cmp:_(s32) = G_ICMP intpred(pred), %x(s32), %y
2915 // %one:_(s32) = G_CONSTANT i32 1
2916 // %and:_(s32) = G_AND %cmp, %one
2917 //
2918 // In this case, G_ICMP only produces a single bit, so x & 1 == x.
2919 assert(MI.getOpcode() == TargetOpcode::G_AND);
2920 if (!KB)
2921 return false;
2922
2923 Register AndDst = MI.getOperand(0).getReg();
2924 LLT DstTy = MRI.getType(AndDst);
2925
2926 // FIXME: This should be removed once GISelKnownBits supports vectors.
2927 if (DstTy.isVector())
2928 return false;
2929
2930 Register LHS = MI.getOperand(1).getReg();
2931 Register RHS = MI.getOperand(2).getReg();
2932 KnownBits LHSBits = KB->getKnownBits(LHS);
2933 KnownBits RHSBits = KB->getKnownBits(RHS);
2934
2935 // Check that x & Mask == x.
2936 // x & 1 == x, always
2937 // x & 0 == x, only if x is also 0
2938 // Meaning Mask has no effect if every bit is either one in Mask or zero in x.
2939 //
2940 // Check if we can replace AndDst with the LHS of the G_AND
2941 if (canReplaceReg(AndDst, LHS, MRI) &&
2942 (LHSBits.Zero | RHSBits.One).isAllOnesValue()) {
2943 Replacement = LHS;
2944 return true;
2945 }
2946
2947 // Check if we can replace AndDst with the RHS of the G_AND
2948 if (canReplaceReg(AndDst, RHS, MRI) &&
2949 (LHSBits.One | RHSBits.Zero).isAllOnesValue()) {
2950 Replacement = RHS;
2951 return true;
2952 }
2953
2954 return false;
2955 }
2956
matchRedundantOr(MachineInstr & MI,Register & Replacement)2957 bool CombinerHelper::matchRedundantOr(MachineInstr &MI, Register &Replacement) {
2958 // Given
2959 //
2960 // %y:_(sN) = G_SOMETHING
2961 // %x:_(sN) = G_SOMETHING
2962 // %res:_(sN) = G_OR %x, %y
2963 //
2964 // Eliminate the G_OR when it is known that x | y == x or x | y == y.
2965 assert(MI.getOpcode() == TargetOpcode::G_OR);
2966 if (!KB)
2967 return false;
2968
2969 Register OrDst = MI.getOperand(0).getReg();
2970 LLT DstTy = MRI.getType(OrDst);
2971
2972 // FIXME: This should be removed once GISelKnownBits supports vectors.
2973 if (DstTy.isVector())
2974 return false;
2975
2976 Register LHS = MI.getOperand(1).getReg();
2977 Register RHS = MI.getOperand(2).getReg();
2978 KnownBits LHSBits = KB->getKnownBits(LHS);
2979 KnownBits RHSBits = KB->getKnownBits(RHS);
2980
2981 // Check that x | Mask == x.
2982 // x | 0 == x, always
2983 // x | 1 == x, only if x is also 1
2984 // Meaning Mask has no effect if every bit is either zero in Mask or one in x.
2985 //
2986 // Check if we can replace OrDst with the LHS of the G_OR
2987 if (canReplaceReg(OrDst, LHS, MRI) &&
2988 (LHSBits.One | RHSBits.Zero).isAllOnesValue()) {
2989 Replacement = LHS;
2990 return true;
2991 }
2992
2993 // Check if we can replace OrDst with the RHS of the G_OR
2994 if (canReplaceReg(OrDst, RHS, MRI) &&
2995 (LHSBits.Zero | RHSBits.One).isAllOnesValue()) {
2996 Replacement = RHS;
2997 return true;
2998 }
2999
3000 return false;
3001 }
3002
matchRedundantSExtInReg(MachineInstr & MI)3003 bool CombinerHelper::matchRedundantSExtInReg(MachineInstr &MI) {
3004 // If the input is already sign extended, just drop the extension.
3005 Register Src = MI.getOperand(1).getReg();
3006 unsigned ExtBits = MI.getOperand(2).getImm();
3007 unsigned TypeSize = MRI.getType(Src).getScalarSizeInBits();
3008 return KB->computeNumSignBits(Src) >= (TypeSize - ExtBits + 1);
3009 }
3010
isConstValidTrue(const TargetLowering & TLI,unsigned ScalarSizeBits,int64_t Cst,bool IsVector,bool IsFP)3011 static bool isConstValidTrue(const TargetLowering &TLI, unsigned ScalarSizeBits,
3012 int64_t Cst, bool IsVector, bool IsFP) {
3013 // For i1, Cst will always be -1 regardless of boolean contents.
3014 return (ScalarSizeBits == 1 && Cst == -1) ||
3015 isConstTrueVal(TLI, Cst, IsVector, IsFP);
3016 }
3017
matchNotCmp(MachineInstr & MI,SmallVectorImpl<Register> & RegsToNegate)3018 bool CombinerHelper::matchNotCmp(MachineInstr &MI,
3019 SmallVectorImpl<Register> &RegsToNegate) {
3020 assert(MI.getOpcode() == TargetOpcode::G_XOR);
3021 LLT Ty = MRI.getType(MI.getOperand(0).getReg());
3022 const auto &TLI = *Builder.getMF().getSubtarget().getTargetLowering();
3023 Register XorSrc;
3024 Register CstReg;
3025 // We match xor(src, true) here.
3026 if (!mi_match(MI.getOperand(0).getReg(), MRI,
3027 m_GXor(m_Reg(XorSrc), m_Reg(CstReg))))
3028 return false;
3029
3030 if (!MRI.hasOneNonDBGUse(XorSrc))
3031 return false;
3032
3033 // Check that XorSrc is the root of a tree of comparisons combined with ANDs
3034 // and ORs. The suffix of RegsToNegate starting from index I is used a work
3035 // list of tree nodes to visit.
3036 RegsToNegate.push_back(XorSrc);
3037 // Remember whether the comparisons are all integer or all floating point.
3038 bool IsInt = false;
3039 bool IsFP = false;
3040 for (unsigned I = 0; I < RegsToNegate.size(); ++I) {
3041 Register Reg = RegsToNegate[I];
3042 if (!MRI.hasOneNonDBGUse(Reg))
3043 return false;
3044 MachineInstr *Def = MRI.getVRegDef(Reg);
3045 switch (Def->getOpcode()) {
3046 default:
3047 // Don't match if the tree contains anything other than ANDs, ORs and
3048 // comparisons.
3049 return false;
3050 case TargetOpcode::G_ICMP:
3051 if (IsFP)
3052 return false;
3053 IsInt = true;
3054 // When we apply the combine we will invert the predicate.
3055 break;
3056 case TargetOpcode::G_FCMP:
3057 if (IsInt)
3058 return false;
3059 IsFP = true;
3060 // When we apply the combine we will invert the predicate.
3061 break;
3062 case TargetOpcode::G_AND:
3063 case TargetOpcode::G_OR:
3064 // Implement De Morgan's laws:
3065 // ~(x & y) -> ~x | ~y
3066 // ~(x | y) -> ~x & ~y
3067 // When we apply the combine we will change the opcode and recursively
3068 // negate the operands.
3069 RegsToNegate.push_back(Def->getOperand(1).getReg());
3070 RegsToNegate.push_back(Def->getOperand(2).getReg());
3071 break;
3072 }
3073 }
3074
3075 // Now we know whether the comparisons are integer or floating point, check
3076 // the constant in the xor.
3077 int64_t Cst;
3078 if (Ty.isVector()) {
3079 MachineInstr *CstDef = MRI.getVRegDef(CstReg);
3080 auto MaybeCst = getBuildVectorConstantSplat(*CstDef, MRI);
3081 if (!MaybeCst)
3082 return false;
3083 if (!isConstValidTrue(TLI, Ty.getScalarSizeInBits(), *MaybeCst, true, IsFP))
3084 return false;
3085 } else {
3086 if (!mi_match(CstReg, MRI, m_ICst(Cst)))
3087 return false;
3088 if (!isConstValidTrue(TLI, Ty.getSizeInBits(), Cst, false, IsFP))
3089 return false;
3090 }
3091
3092 return true;
3093 }
3094
applyNotCmp(MachineInstr & MI,SmallVectorImpl<Register> & RegsToNegate)3095 bool CombinerHelper::applyNotCmp(MachineInstr &MI,
3096 SmallVectorImpl<Register> &RegsToNegate) {
3097 for (Register Reg : RegsToNegate) {
3098 MachineInstr *Def = MRI.getVRegDef(Reg);
3099 Observer.changingInstr(*Def);
3100 // For each comparison, invert the opcode. For each AND and OR, change the
3101 // opcode.
3102 switch (Def->getOpcode()) {
3103 default:
3104 llvm_unreachable("Unexpected opcode");
3105 case TargetOpcode::G_ICMP:
3106 case TargetOpcode::G_FCMP: {
3107 MachineOperand &PredOp = Def->getOperand(1);
3108 CmpInst::Predicate NewP = CmpInst::getInversePredicate(
3109 (CmpInst::Predicate)PredOp.getPredicate());
3110 PredOp.setPredicate(NewP);
3111 break;
3112 }
3113 case TargetOpcode::G_AND:
3114 Def->setDesc(Builder.getTII().get(TargetOpcode::G_OR));
3115 break;
3116 case TargetOpcode::G_OR:
3117 Def->setDesc(Builder.getTII().get(TargetOpcode::G_AND));
3118 break;
3119 }
3120 Observer.changedInstr(*Def);
3121 }
3122
3123 replaceRegWith(MRI, MI.getOperand(0).getReg(), MI.getOperand(1).getReg());
3124 MI.eraseFromParent();
3125 return true;
3126 }
3127
matchXorOfAndWithSameReg(MachineInstr & MI,std::pair<Register,Register> & MatchInfo)3128 bool CombinerHelper::matchXorOfAndWithSameReg(
3129 MachineInstr &MI, std::pair<Register, Register> &MatchInfo) {
3130 // Match (xor (and x, y), y) (or any of its commuted cases)
3131 assert(MI.getOpcode() == TargetOpcode::G_XOR);
3132 Register &X = MatchInfo.first;
3133 Register &Y = MatchInfo.second;
3134 Register AndReg = MI.getOperand(1).getReg();
3135 Register SharedReg = MI.getOperand(2).getReg();
3136
3137 // Find a G_AND on either side of the G_XOR.
3138 // Look for one of
3139 //
3140 // (xor (and x, y), SharedReg)
3141 // (xor SharedReg, (and x, y))
3142 if (!mi_match(AndReg, MRI, m_GAnd(m_Reg(X), m_Reg(Y)))) {
3143 std::swap(AndReg, SharedReg);
3144 if (!mi_match(AndReg, MRI, m_GAnd(m_Reg(X), m_Reg(Y))))
3145 return false;
3146 }
3147
3148 // Only do this if we'll eliminate the G_AND.
3149 if (!MRI.hasOneNonDBGUse(AndReg))
3150 return false;
3151
3152 // We can combine if SharedReg is the same as either the LHS or RHS of the
3153 // G_AND.
3154 if (Y != SharedReg)
3155 std::swap(X, Y);
3156 return Y == SharedReg;
3157 }
3158
applyXorOfAndWithSameReg(MachineInstr & MI,std::pair<Register,Register> & MatchInfo)3159 bool CombinerHelper::applyXorOfAndWithSameReg(
3160 MachineInstr &MI, std::pair<Register, Register> &MatchInfo) {
3161 // Fold (xor (and x, y), y) -> (and (not x), y)
3162 Builder.setInstrAndDebugLoc(MI);
3163 Register X, Y;
3164 std::tie(X, Y) = MatchInfo;
3165 auto Not = Builder.buildNot(MRI.getType(X), X);
3166 Observer.changingInstr(MI);
3167 MI.setDesc(Builder.getTII().get(TargetOpcode::G_AND));
3168 MI.getOperand(1).setReg(Not->getOperand(0).getReg());
3169 MI.getOperand(2).setReg(Y);
3170 Observer.changedInstr(MI);
3171 return true;
3172 }
3173
matchPtrAddZero(MachineInstr & MI)3174 bool CombinerHelper::matchPtrAddZero(MachineInstr &MI) {
3175 Register DstReg = MI.getOperand(0).getReg();
3176 LLT Ty = MRI.getType(DstReg);
3177 const DataLayout &DL = Builder.getMF().getDataLayout();
3178
3179 if (DL.isNonIntegralAddressSpace(Ty.getScalarType().getAddressSpace()))
3180 return false;
3181
3182 if (Ty.isPointer()) {
3183 auto ConstVal = getConstantVRegVal(MI.getOperand(1).getReg(), MRI);
3184 return ConstVal && *ConstVal == 0;
3185 }
3186
3187 assert(Ty.isVector() && "Expecting a vector type");
3188 const MachineInstr *VecMI = MRI.getVRegDef(MI.getOperand(1).getReg());
3189 return isBuildVectorAllZeros(*VecMI, MRI);
3190 }
3191
applyPtrAddZero(MachineInstr & MI)3192 bool CombinerHelper::applyPtrAddZero(MachineInstr &MI) {
3193 assert(MI.getOpcode() == TargetOpcode::G_PTR_ADD);
3194 Builder.setInstrAndDebugLoc(MI);
3195 Builder.buildIntToPtr(MI.getOperand(0), MI.getOperand(2));
3196 MI.eraseFromParent();
3197 return true;
3198 }
3199
3200 /// The second source operand is known to be a power of 2.
applySimplifyURemByPow2(MachineInstr & MI)3201 bool CombinerHelper::applySimplifyURemByPow2(MachineInstr &MI) {
3202 Register DstReg = MI.getOperand(0).getReg();
3203 Register Src0 = MI.getOperand(1).getReg();
3204 Register Pow2Src1 = MI.getOperand(2).getReg();
3205 LLT Ty = MRI.getType(DstReg);
3206 Builder.setInstrAndDebugLoc(MI);
3207
3208 // Fold (urem x, pow2) -> (and x, pow2-1)
3209 auto NegOne = Builder.buildConstant(Ty, -1);
3210 auto Add = Builder.buildAdd(Ty, Pow2Src1, NegOne);
3211 Builder.buildAnd(DstReg, Src0, Add);
3212 MI.eraseFromParent();
3213 return true;
3214 }
3215
3216 Optional<SmallVector<Register, 8>>
findCandidatesForLoadOrCombine(const MachineInstr * Root) const3217 CombinerHelper::findCandidatesForLoadOrCombine(const MachineInstr *Root) const {
3218 assert(Root->getOpcode() == TargetOpcode::G_OR && "Expected G_OR only!");
3219 // We want to detect if Root is part of a tree which represents a bunch
3220 // of loads being merged into a larger load. We'll try to recognize patterns
3221 // like, for example:
3222 //
3223 // Reg Reg
3224 // \ /
3225 // OR_1 Reg
3226 // \ /
3227 // OR_2
3228 // \ Reg
3229 // .. /
3230 // Root
3231 //
3232 // Reg Reg Reg Reg
3233 // \ / \ /
3234 // OR_1 OR_2
3235 // \ /
3236 // \ /
3237 // ...
3238 // Root
3239 //
3240 // Each "Reg" may have been produced by a load + some arithmetic. This
3241 // function will save each of them.
3242 SmallVector<Register, 8> RegsToVisit;
3243 SmallVector<const MachineInstr *, 7> Ors = {Root};
3244
3245 // In the "worst" case, we're dealing with a load for each byte. So, there
3246 // are at most #bytes - 1 ORs.
3247 const unsigned MaxIter =
3248 MRI.getType(Root->getOperand(0).getReg()).getSizeInBytes() - 1;
3249 for (unsigned Iter = 0; Iter < MaxIter; ++Iter) {
3250 if (Ors.empty())
3251 break;
3252 const MachineInstr *Curr = Ors.pop_back_val();
3253 Register OrLHS = Curr->getOperand(1).getReg();
3254 Register OrRHS = Curr->getOperand(2).getReg();
3255
3256 // In the combine, we want to elimate the entire tree.
3257 if (!MRI.hasOneNonDBGUse(OrLHS) || !MRI.hasOneNonDBGUse(OrRHS))
3258 return None;
3259
3260 // If it's a G_OR, save it and continue to walk. If it's not, then it's
3261 // something that may be a load + arithmetic.
3262 if (const MachineInstr *Or = getOpcodeDef(TargetOpcode::G_OR, OrLHS, MRI))
3263 Ors.push_back(Or);
3264 else
3265 RegsToVisit.push_back(OrLHS);
3266 if (const MachineInstr *Or = getOpcodeDef(TargetOpcode::G_OR, OrRHS, MRI))
3267 Ors.push_back(Or);
3268 else
3269 RegsToVisit.push_back(OrRHS);
3270 }
3271
3272 // We're going to try and merge each register into a wider power-of-2 type,
3273 // so we ought to have an even number of registers.
3274 if (RegsToVisit.empty() || RegsToVisit.size() % 2 != 0)
3275 return None;
3276 return RegsToVisit;
3277 }
3278
3279 /// Helper function for findLoadOffsetsForLoadOrCombine.
3280 ///
3281 /// Check if \p Reg is the result of loading a \p MemSizeInBits wide value,
3282 /// and then moving that value into a specific byte offset.
3283 ///
3284 /// e.g. x[i] << 24
3285 ///
3286 /// \returns The load instruction and the byte offset it is moved into.
3287 static Optional<std::pair<MachineInstr *, int64_t>>
matchLoadAndBytePosition(Register Reg,unsigned MemSizeInBits,const MachineRegisterInfo & MRI)3288 matchLoadAndBytePosition(Register Reg, unsigned MemSizeInBits,
3289 const MachineRegisterInfo &MRI) {
3290 assert(MRI.hasOneNonDBGUse(Reg) &&
3291 "Expected Reg to only have one non-debug use?");
3292 Register MaybeLoad;
3293 int64_t Shift;
3294 if (!mi_match(Reg, MRI,
3295 m_OneNonDBGUse(m_GShl(m_Reg(MaybeLoad), m_ICst(Shift))))) {
3296 Shift = 0;
3297 MaybeLoad = Reg;
3298 }
3299
3300 if (Shift % MemSizeInBits != 0)
3301 return None;
3302
3303 // TODO: Handle other types of loads.
3304 auto *Load = getOpcodeDef(TargetOpcode::G_ZEXTLOAD, MaybeLoad, MRI);
3305 if (!Load)
3306 return None;
3307
3308 const auto &MMO = **Load->memoperands_begin();
3309 if (!MMO.isUnordered() || MMO.getSizeInBits() != MemSizeInBits)
3310 return None;
3311
3312 return std::make_pair(Load, Shift / MemSizeInBits);
3313 }
3314
3315 Optional<std::pair<MachineInstr *, int64_t>>
findLoadOffsetsForLoadOrCombine(SmallDenseMap<int64_t,int64_t,8> & MemOffset2Idx,const SmallVector<Register,8> & RegsToVisit,const unsigned MemSizeInBits)3316 CombinerHelper::findLoadOffsetsForLoadOrCombine(
3317 SmallDenseMap<int64_t, int64_t, 8> &MemOffset2Idx,
3318 const SmallVector<Register, 8> &RegsToVisit, const unsigned MemSizeInBits) {
3319
3320 // Each load found for the pattern. There should be one for each RegsToVisit.
3321 SmallSetVector<const MachineInstr *, 8> Loads;
3322
3323 // The lowest index used in any load. (The lowest "i" for each x[i].)
3324 int64_t LowestIdx = INT64_MAX;
3325
3326 // The load which uses the lowest index.
3327 MachineInstr *LowestIdxLoad = nullptr;
3328
3329 // Keeps track of the load indices we see. We shouldn't see any indices twice.
3330 SmallSet<int64_t, 8> SeenIdx;
3331
3332 // Ensure each load is in the same MBB.
3333 // TODO: Support multiple MachineBasicBlocks.
3334 MachineBasicBlock *MBB = nullptr;
3335 const MachineMemOperand *MMO = nullptr;
3336
3337 // Earliest instruction-order load in the pattern.
3338 MachineInstr *EarliestLoad = nullptr;
3339
3340 // Latest instruction-order load in the pattern.
3341 MachineInstr *LatestLoad = nullptr;
3342
3343 // Base pointer which every load should share.
3344 Register BasePtr;
3345
3346 // We want to find a load for each register. Each load should have some
3347 // appropriate bit twiddling arithmetic. During this loop, we will also keep
3348 // track of the load which uses the lowest index. Later, we will check if we
3349 // can use its pointer in the final, combined load.
3350 for (auto Reg : RegsToVisit) {
3351 // Find the load, and find the position that it will end up in (e.g. a
3352 // shifted) value.
3353 auto LoadAndPos = matchLoadAndBytePosition(Reg, MemSizeInBits, MRI);
3354 if (!LoadAndPos)
3355 return None;
3356 MachineInstr *Load;
3357 int64_t DstPos;
3358 std::tie(Load, DstPos) = *LoadAndPos;
3359
3360 // TODO: Handle multiple MachineBasicBlocks. Currently not handled because
3361 // it is difficult to check for stores/calls/etc between loads.
3362 MachineBasicBlock *LoadMBB = Load->getParent();
3363 if (!MBB)
3364 MBB = LoadMBB;
3365 if (LoadMBB != MBB)
3366 return None;
3367
3368 // Make sure that the MachineMemOperands of every seen load are compatible.
3369 const MachineMemOperand *LoadMMO = *Load->memoperands_begin();
3370 if (!MMO)
3371 MMO = LoadMMO;
3372 if (MMO->getAddrSpace() != LoadMMO->getAddrSpace())
3373 return None;
3374
3375 // Find out what the base pointer and index for the load is.
3376 Register LoadPtr;
3377 int64_t Idx;
3378 if (!mi_match(Load->getOperand(1).getReg(), MRI,
3379 m_GPtrAdd(m_Reg(LoadPtr), m_ICst(Idx)))) {
3380 LoadPtr = Load->getOperand(1).getReg();
3381 Idx = 0;
3382 }
3383
3384 // Don't combine things like a[i], a[i] -> a bigger load.
3385 if (!SeenIdx.insert(Idx).second)
3386 return None;
3387
3388 // Every load must share the same base pointer; don't combine things like:
3389 //
3390 // a[i], b[i + 1] -> a bigger load.
3391 if (!BasePtr.isValid())
3392 BasePtr = LoadPtr;
3393 if (BasePtr != LoadPtr)
3394 return None;
3395
3396 if (Idx < LowestIdx) {
3397 LowestIdx = Idx;
3398 LowestIdxLoad = Load;
3399 }
3400
3401 // Keep track of the byte offset that this load ends up at. If we have seen
3402 // the byte offset, then stop here. We do not want to combine:
3403 //
3404 // a[i] << 16, a[i + k] << 16 -> a bigger load.
3405 if (!MemOffset2Idx.try_emplace(DstPos, Idx).second)
3406 return None;
3407 Loads.insert(Load);
3408
3409 // Keep track of the position of the earliest/latest loads in the pattern.
3410 // We will check that there are no load fold barriers between them later
3411 // on.
3412 //
3413 // FIXME: Is there a better way to check for load fold barriers?
3414 if (!EarliestLoad || dominates(*Load, *EarliestLoad))
3415 EarliestLoad = Load;
3416 if (!LatestLoad || dominates(*LatestLoad, *Load))
3417 LatestLoad = Load;
3418 }
3419
3420 // We found a load for each register. Let's check if each load satisfies the
3421 // pattern.
3422 assert(Loads.size() == RegsToVisit.size() &&
3423 "Expected to find a load for each register?");
3424 assert(EarliestLoad != LatestLoad && EarliestLoad &&
3425 LatestLoad && "Expected at least two loads?");
3426
3427 // Check if there are any stores, calls, etc. between any of the loads. If
3428 // there are, then we can't safely perform the combine.
3429 //
3430 // MaxIter is chosen based off the (worst case) number of iterations it
3431 // typically takes to succeed in the LLVM test suite plus some padding.
3432 //
3433 // FIXME: Is there a better way to check for load fold barriers?
3434 const unsigned MaxIter = 20;
3435 unsigned Iter = 0;
3436 for (const auto &MI : instructionsWithoutDebug(EarliestLoad->getIterator(),
3437 LatestLoad->getIterator())) {
3438 if (Loads.count(&MI))
3439 continue;
3440 if (MI.isLoadFoldBarrier())
3441 return None;
3442 if (Iter++ == MaxIter)
3443 return None;
3444 }
3445
3446 return std::make_pair(LowestIdxLoad, LowestIdx);
3447 }
3448
matchLoadOrCombine(MachineInstr & MI,std::function<void (MachineIRBuilder &)> & MatchInfo)3449 bool CombinerHelper::matchLoadOrCombine(
3450 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
3451 assert(MI.getOpcode() == TargetOpcode::G_OR);
3452 MachineFunction &MF = *MI.getMF();
3453 // Assuming a little-endian target, transform:
3454 // s8 *a = ...
3455 // s32 val = a[0] | (a[1] << 8) | (a[2] << 16) | (a[3] << 24)
3456 // =>
3457 // s32 val = *((i32)a)
3458 //
3459 // s8 *a = ...
3460 // s32 val = (a[0] << 24) | (a[1] << 16) | (a[2] << 8) | a[3]
3461 // =>
3462 // s32 val = BSWAP(*((s32)a))
3463 Register Dst = MI.getOperand(0).getReg();
3464 LLT Ty = MRI.getType(Dst);
3465 if (Ty.isVector())
3466 return false;
3467
3468 // We need to combine at least two loads into this type. Since the smallest
3469 // possible load is into a byte, we need at least a 16-bit wide type.
3470 const unsigned WideMemSizeInBits = Ty.getSizeInBits();
3471 if (WideMemSizeInBits < 16 || WideMemSizeInBits % 8 != 0)
3472 return false;
3473
3474 // Match a collection of non-OR instructions in the pattern.
3475 auto RegsToVisit = findCandidatesForLoadOrCombine(&MI);
3476 if (!RegsToVisit)
3477 return false;
3478
3479 // We have a collection of non-OR instructions. Figure out how wide each of
3480 // the small loads should be based off of the number of potential loads we
3481 // found.
3482 const unsigned NarrowMemSizeInBits = WideMemSizeInBits / RegsToVisit->size();
3483 if (NarrowMemSizeInBits % 8 != 0)
3484 return false;
3485
3486 // Check if each register feeding into each OR is a load from the same
3487 // base pointer + some arithmetic.
3488 //
3489 // e.g. a[0], a[1] << 8, a[2] << 16, etc.
3490 //
3491 // Also verify that each of these ends up putting a[i] into the same memory
3492 // offset as a load into a wide type would.
3493 SmallDenseMap<int64_t, int64_t, 8> MemOffset2Idx;
3494 MachineInstr *LowestIdxLoad;
3495 int64_t LowestIdx;
3496 auto MaybeLoadInfo = findLoadOffsetsForLoadOrCombine(
3497 MemOffset2Idx, *RegsToVisit, NarrowMemSizeInBits);
3498 if (!MaybeLoadInfo)
3499 return false;
3500 std::tie(LowestIdxLoad, LowestIdx) = *MaybeLoadInfo;
3501
3502 // We have a bunch of loads being OR'd together. Using the addresses + offsets
3503 // we found before, check if this corresponds to a big or little endian byte
3504 // pattern. If it does, then we can represent it using a load + possibly a
3505 // BSWAP.
3506 bool IsBigEndianTarget = MF.getDataLayout().isBigEndian();
3507 Optional<bool> IsBigEndian = isBigEndian(MemOffset2Idx, LowestIdx);
3508 if (!IsBigEndian.hasValue())
3509 return false;
3510 bool NeedsBSwap = IsBigEndianTarget != *IsBigEndian;
3511 if (NeedsBSwap && !isLegalOrBeforeLegalizer({TargetOpcode::G_BSWAP, {Ty}}))
3512 return false;
3513
3514 // Make sure that the load from the lowest index produces offset 0 in the
3515 // final value.
3516 //
3517 // This ensures that we won't combine something like this:
3518 //
3519 // load x[i] -> byte 2
3520 // load x[i+1] -> byte 0 ---> wide_load x[i]
3521 // load x[i+2] -> byte 1
3522 const unsigned NumLoadsInTy = WideMemSizeInBits / NarrowMemSizeInBits;
3523 const unsigned ZeroByteOffset =
3524 *IsBigEndian
3525 ? bigEndianByteAt(NumLoadsInTy, 0)
3526 : littleEndianByteAt(NumLoadsInTy, 0);
3527 auto ZeroOffsetIdx = MemOffset2Idx.find(ZeroByteOffset);
3528 if (ZeroOffsetIdx == MemOffset2Idx.end() ||
3529 ZeroOffsetIdx->second != LowestIdx)
3530 return false;
3531
3532 // We wil reuse the pointer from the load which ends up at byte offset 0. It
3533 // may not use index 0.
3534 Register Ptr = LowestIdxLoad->getOperand(1).getReg();
3535 const MachineMemOperand &MMO = **LowestIdxLoad->memoperands_begin();
3536 LegalityQuery::MemDesc MMDesc;
3537 MMDesc.SizeInBits = WideMemSizeInBits;
3538 MMDesc.AlignInBits = MMO.getAlign().value() * 8;
3539 MMDesc.Ordering = MMO.getOrdering();
3540 if (!isLegalOrBeforeLegalizer(
3541 {TargetOpcode::G_LOAD, {Ty, MRI.getType(Ptr)}, {MMDesc}}))
3542 return false;
3543 auto PtrInfo = MMO.getPointerInfo();
3544 auto *NewMMO = MF.getMachineMemOperand(&MMO, PtrInfo, WideMemSizeInBits / 8);
3545
3546 // Load must be allowed and fast on the target.
3547 LLVMContext &C = MF.getFunction().getContext();
3548 auto &DL = MF.getDataLayout();
3549 bool Fast = false;
3550 if (!getTargetLowering().allowsMemoryAccess(C, DL, Ty, *NewMMO, &Fast) ||
3551 !Fast)
3552 return false;
3553
3554 MatchInfo = [=](MachineIRBuilder &MIB) {
3555 Register LoadDst = NeedsBSwap ? MRI.cloneVirtualRegister(Dst) : Dst;
3556 MIB.buildLoad(LoadDst, Ptr, *NewMMO);
3557 if (NeedsBSwap)
3558 MIB.buildBSwap(Dst, LoadDst);
3559 };
3560 return true;
3561 }
3562
applyLoadOrCombine(MachineInstr & MI,std::function<void (MachineIRBuilder &)> & MatchInfo)3563 bool CombinerHelper::applyLoadOrCombine(
3564 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) {
3565 Builder.setInstrAndDebugLoc(MI);
3566 MatchInfo(Builder);
3567 MI.eraseFromParent();
3568 return true;
3569 }
3570
tryCombine(MachineInstr & MI)3571 bool CombinerHelper::tryCombine(MachineInstr &MI) {
3572 if (tryCombineCopy(MI))
3573 return true;
3574 if (tryCombineExtendingLoads(MI))
3575 return true;
3576 if (tryCombineIndexedLoadStore(MI))
3577 return true;
3578 return false;
3579 }
3580