1 //===-- lib/CodeGen/GlobalISel/GICombinerHelper.cpp -----------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 #include "llvm/CodeGen/GlobalISel/CombinerHelper.h" 9 #include "llvm/ADT/SetVector.h" 10 #include "llvm/ADT/SmallBitVector.h" 11 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h" 12 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h" 13 #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h" 14 #include "llvm/CodeGen/GlobalISel/LegalizerHelper.h" 15 #include "llvm/CodeGen/GlobalISel/LegalizerInfo.h" 16 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h" 17 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" 18 #include "llvm/CodeGen/GlobalISel/Utils.h" 19 #include "llvm/CodeGen/LowLevelType.h" 20 #include "llvm/CodeGen/MachineBasicBlock.h" 21 #include "llvm/CodeGen/MachineDominators.h" 22 #include "llvm/CodeGen/MachineInstr.h" 23 #include "llvm/CodeGen/MachineMemOperand.h" 24 #include "llvm/CodeGen/MachineRegisterInfo.h" 25 #include "llvm/CodeGen/RegisterBankInfo.h" 26 #include "llvm/CodeGen/TargetInstrInfo.h" 27 #include "llvm/CodeGen/TargetLowering.h" 28 #include "llvm/CodeGen/TargetOpcodes.h" 29 #include "llvm/IR/DataLayout.h" 30 #include "llvm/Support/Casting.h" 31 #include "llvm/Support/DivisionByConstantInfo.h" 32 #include "llvm/Support/MathExtras.h" 33 #include "llvm/Target/TargetMachine.h" 34 #include <tuple> 35 36 #define DEBUG_TYPE "gi-combiner" 37 38 using namespace llvm; 39 using namespace MIPatternMatch; 40 41 // Option to allow testing of the combiner while no targets know about indexed 42 // addressing. 43 static cl::opt<bool> 44 ForceLegalIndexing("force-legal-indexing", cl::Hidden, cl::init(false), 45 cl::desc("Force all indexed operations to be " 46 "legal for the GlobalISel combiner")); 47 48 CombinerHelper::CombinerHelper(GISelChangeObserver &Observer, 49 MachineIRBuilder &B, GISelKnownBits *KB, 50 MachineDominatorTree *MDT, 51 const LegalizerInfo *LI) 52 : Builder(B), MRI(Builder.getMF().getRegInfo()), Observer(Observer), KB(KB), 53 MDT(MDT), LI(LI), RBI(Builder.getMF().getSubtarget().getRegBankInfo()), 54 TRI(Builder.getMF().getSubtarget().getRegisterInfo()) { 55 (void)this->KB; 56 } 57 58 const TargetLowering &CombinerHelper::getTargetLowering() const { 59 return *Builder.getMF().getSubtarget().getTargetLowering(); 60 } 61 62 /// \returns The little endian in-memory byte position of byte \p I in a 63 /// \p ByteWidth bytes wide type. 64 /// 65 /// E.g. Given a 4-byte type x, x[0] -> byte 0 66 static unsigned littleEndianByteAt(const unsigned ByteWidth, const unsigned I) { 67 assert(I < ByteWidth && "I must be in [0, ByteWidth)"); 68 return I; 69 } 70 71 /// Determines the LogBase2 value for a non-null input value using the 72 /// transform: LogBase2(V) = (EltBits - 1) - ctlz(V). 73 static Register buildLogBase2(Register V, MachineIRBuilder &MIB) { 74 auto &MRI = *MIB.getMRI(); 75 LLT Ty = MRI.getType(V); 76 auto Ctlz = MIB.buildCTLZ(Ty, V); 77 auto Base = MIB.buildConstant(Ty, Ty.getScalarSizeInBits() - 1); 78 return MIB.buildSub(Ty, Base, Ctlz).getReg(0); 79 } 80 81 /// \returns The big endian in-memory byte position of byte \p I in a 82 /// \p ByteWidth bytes wide type. 83 /// 84 /// E.g. Given a 4-byte type x, x[0] -> byte 3 85 static unsigned bigEndianByteAt(const unsigned ByteWidth, const unsigned I) { 86 assert(I < ByteWidth && "I must be in [0, ByteWidth)"); 87 return ByteWidth - I - 1; 88 } 89 90 /// Given a map from byte offsets in memory to indices in a load/store, 91 /// determine if that map corresponds to a little or big endian byte pattern. 92 /// 93 /// \param MemOffset2Idx maps memory offsets to address offsets. 94 /// \param LowestIdx is the lowest index in \p MemOffset2Idx. 95 /// 96 /// \returns true if the map corresponds to a big endian byte pattern, false 97 /// if it corresponds to a little endian byte pattern, and None otherwise. 98 /// 99 /// E.g. given a 32-bit type x, and x[AddrOffset], the in-memory byte patterns 100 /// are as follows: 101 /// 102 /// AddrOffset Little endian Big endian 103 /// 0 0 3 104 /// 1 1 2 105 /// 2 2 1 106 /// 3 3 0 107 static Optional<bool> 108 isBigEndian(const SmallDenseMap<int64_t, int64_t, 8> &MemOffset2Idx, 109 int64_t LowestIdx) { 110 // Need at least two byte positions to decide on endianness. 111 unsigned Width = MemOffset2Idx.size(); 112 if (Width < 2) 113 return None; 114 bool BigEndian = true, LittleEndian = true; 115 for (unsigned MemOffset = 0; MemOffset < Width; ++ MemOffset) { 116 auto MemOffsetAndIdx = MemOffset2Idx.find(MemOffset); 117 if (MemOffsetAndIdx == MemOffset2Idx.end()) 118 return None; 119 const int64_t Idx = MemOffsetAndIdx->second - LowestIdx; 120 assert(Idx >= 0 && "Expected non-negative byte offset?"); 121 LittleEndian &= Idx == littleEndianByteAt(Width, MemOffset); 122 BigEndian &= Idx == bigEndianByteAt(Width, MemOffset); 123 if (!BigEndian && !LittleEndian) 124 return None; 125 } 126 127 assert((BigEndian != LittleEndian) && 128 "Pattern cannot be both big and little endian!"); 129 return BigEndian; 130 } 131 132 bool CombinerHelper::isPreLegalize() const { return !LI; } 133 134 bool CombinerHelper::isLegal(const LegalityQuery &Query) const { 135 assert(LI && "Must have LegalizerInfo to query isLegal!"); 136 return LI->getAction(Query).Action == LegalizeActions::Legal; 137 } 138 139 bool CombinerHelper::isLegalOrBeforeLegalizer( 140 const LegalityQuery &Query) const { 141 return isPreLegalize() || isLegal(Query); 142 } 143 144 bool CombinerHelper::isConstantLegalOrBeforeLegalizer(const LLT Ty) const { 145 if (!Ty.isVector()) 146 return isLegalOrBeforeLegalizer({TargetOpcode::G_CONSTANT, {Ty}}); 147 // Vector constants are represented as a G_BUILD_VECTOR of scalar G_CONSTANTs. 148 if (isPreLegalize()) 149 return true; 150 LLT EltTy = Ty.getElementType(); 151 return isLegal({TargetOpcode::G_BUILD_VECTOR, {Ty, EltTy}}) && 152 isLegal({TargetOpcode::G_CONSTANT, {EltTy}}); 153 } 154 155 void CombinerHelper::replaceRegWith(MachineRegisterInfo &MRI, Register FromReg, 156 Register ToReg) const { 157 Observer.changingAllUsesOfReg(MRI, FromReg); 158 159 if (MRI.constrainRegAttrs(ToReg, FromReg)) 160 MRI.replaceRegWith(FromReg, ToReg); 161 else 162 Builder.buildCopy(ToReg, FromReg); 163 164 Observer.finishedChangingAllUsesOfReg(); 165 } 166 167 void CombinerHelper::replaceRegOpWith(MachineRegisterInfo &MRI, 168 MachineOperand &FromRegOp, 169 Register ToReg) const { 170 assert(FromRegOp.getParent() && "Expected an operand in an MI"); 171 Observer.changingInstr(*FromRegOp.getParent()); 172 173 FromRegOp.setReg(ToReg); 174 175 Observer.changedInstr(*FromRegOp.getParent()); 176 } 177 178 void CombinerHelper::replaceOpcodeWith(MachineInstr &FromMI, 179 unsigned ToOpcode) const { 180 Observer.changingInstr(FromMI); 181 182 FromMI.setDesc(Builder.getTII().get(ToOpcode)); 183 184 Observer.changedInstr(FromMI); 185 } 186 187 const RegisterBank *CombinerHelper::getRegBank(Register Reg) const { 188 return RBI->getRegBank(Reg, MRI, *TRI); 189 } 190 191 void CombinerHelper::setRegBank(Register Reg, const RegisterBank *RegBank) { 192 if (RegBank) 193 MRI.setRegBank(Reg, *RegBank); 194 } 195 196 bool CombinerHelper::tryCombineCopy(MachineInstr &MI) { 197 if (matchCombineCopy(MI)) { 198 applyCombineCopy(MI); 199 return true; 200 } 201 return false; 202 } 203 bool CombinerHelper::matchCombineCopy(MachineInstr &MI) { 204 if (MI.getOpcode() != TargetOpcode::COPY) 205 return false; 206 Register DstReg = MI.getOperand(0).getReg(); 207 Register SrcReg = MI.getOperand(1).getReg(); 208 return canReplaceReg(DstReg, SrcReg, MRI); 209 } 210 void CombinerHelper::applyCombineCopy(MachineInstr &MI) { 211 Register DstReg = MI.getOperand(0).getReg(); 212 Register SrcReg = MI.getOperand(1).getReg(); 213 MI.eraseFromParent(); 214 replaceRegWith(MRI, DstReg, SrcReg); 215 } 216 217 bool CombinerHelper::tryCombineConcatVectors(MachineInstr &MI) { 218 bool IsUndef = false; 219 SmallVector<Register, 4> Ops; 220 if (matchCombineConcatVectors(MI, IsUndef, Ops)) { 221 applyCombineConcatVectors(MI, IsUndef, Ops); 222 return true; 223 } 224 return false; 225 } 226 227 bool CombinerHelper::matchCombineConcatVectors(MachineInstr &MI, bool &IsUndef, 228 SmallVectorImpl<Register> &Ops) { 229 assert(MI.getOpcode() == TargetOpcode::G_CONCAT_VECTORS && 230 "Invalid instruction"); 231 IsUndef = true; 232 MachineInstr *Undef = nullptr; 233 234 // Walk over all the operands of concat vectors and check if they are 235 // build_vector themselves or undef. 236 // Then collect their operands in Ops. 237 for (const MachineOperand &MO : MI.uses()) { 238 Register Reg = MO.getReg(); 239 MachineInstr *Def = MRI.getVRegDef(Reg); 240 assert(Def && "Operand not defined"); 241 switch (Def->getOpcode()) { 242 case TargetOpcode::G_BUILD_VECTOR: 243 IsUndef = false; 244 // Remember the operands of the build_vector to fold 245 // them into the yet-to-build flattened concat vectors. 246 for (const MachineOperand &BuildVecMO : Def->uses()) 247 Ops.push_back(BuildVecMO.getReg()); 248 break; 249 case TargetOpcode::G_IMPLICIT_DEF: { 250 LLT OpType = MRI.getType(Reg); 251 // Keep one undef value for all the undef operands. 252 if (!Undef) { 253 Builder.setInsertPt(*MI.getParent(), MI); 254 Undef = Builder.buildUndef(OpType.getScalarType()); 255 } 256 assert(MRI.getType(Undef->getOperand(0).getReg()) == 257 OpType.getScalarType() && 258 "All undefs should have the same type"); 259 // Break the undef vector in as many scalar elements as needed 260 // for the flattening. 261 for (unsigned EltIdx = 0, EltEnd = OpType.getNumElements(); 262 EltIdx != EltEnd; ++EltIdx) 263 Ops.push_back(Undef->getOperand(0).getReg()); 264 break; 265 } 266 default: 267 return false; 268 } 269 } 270 return true; 271 } 272 void CombinerHelper::applyCombineConcatVectors( 273 MachineInstr &MI, bool IsUndef, const ArrayRef<Register> Ops) { 274 // We determined that the concat_vectors can be flatten. 275 // Generate the flattened build_vector. 276 Register DstReg = MI.getOperand(0).getReg(); 277 Builder.setInsertPt(*MI.getParent(), MI); 278 Register NewDstReg = MRI.cloneVirtualRegister(DstReg); 279 280 // Note: IsUndef is sort of redundant. We could have determine it by 281 // checking that at all Ops are undef. Alternatively, we could have 282 // generate a build_vector of undefs and rely on another combine to 283 // clean that up. For now, given we already gather this information 284 // in tryCombineConcatVectors, just save compile time and issue the 285 // right thing. 286 if (IsUndef) 287 Builder.buildUndef(NewDstReg); 288 else 289 Builder.buildBuildVector(NewDstReg, Ops); 290 MI.eraseFromParent(); 291 replaceRegWith(MRI, DstReg, NewDstReg); 292 } 293 294 bool CombinerHelper::tryCombineShuffleVector(MachineInstr &MI) { 295 SmallVector<Register, 4> Ops; 296 if (matchCombineShuffleVector(MI, Ops)) { 297 applyCombineShuffleVector(MI, Ops); 298 return true; 299 } 300 return false; 301 } 302 303 bool CombinerHelper::matchCombineShuffleVector(MachineInstr &MI, 304 SmallVectorImpl<Register> &Ops) { 305 assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR && 306 "Invalid instruction kind"); 307 LLT DstType = MRI.getType(MI.getOperand(0).getReg()); 308 Register Src1 = MI.getOperand(1).getReg(); 309 LLT SrcType = MRI.getType(Src1); 310 // As bizarre as it may look, shuffle vector can actually produce 311 // scalar! This is because at the IR level a <1 x ty> shuffle 312 // vector is perfectly valid. 313 unsigned DstNumElts = DstType.isVector() ? DstType.getNumElements() : 1; 314 unsigned SrcNumElts = SrcType.isVector() ? SrcType.getNumElements() : 1; 315 316 // If the resulting vector is smaller than the size of the source 317 // vectors being concatenated, we won't be able to replace the 318 // shuffle vector into a concat_vectors. 319 // 320 // Note: We may still be able to produce a concat_vectors fed by 321 // extract_vector_elt and so on. It is less clear that would 322 // be better though, so don't bother for now. 323 // 324 // If the destination is a scalar, the size of the sources doesn't 325 // matter. we will lower the shuffle to a plain copy. This will 326 // work only if the source and destination have the same size. But 327 // that's covered by the next condition. 328 // 329 // TODO: If the size between the source and destination don't match 330 // we could still emit an extract vector element in that case. 331 if (DstNumElts < 2 * SrcNumElts && DstNumElts != 1) 332 return false; 333 334 // Check that the shuffle mask can be broken evenly between the 335 // different sources. 336 if (DstNumElts % SrcNumElts != 0) 337 return false; 338 339 // Mask length is a multiple of the source vector length. 340 // Check if the shuffle is some kind of concatenation of the input 341 // vectors. 342 unsigned NumConcat = DstNumElts / SrcNumElts; 343 SmallVector<int, 8> ConcatSrcs(NumConcat, -1); 344 ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask(); 345 for (unsigned i = 0; i != DstNumElts; ++i) { 346 int Idx = Mask[i]; 347 // Undef value. 348 if (Idx < 0) 349 continue; 350 // Ensure the indices in each SrcType sized piece are sequential and that 351 // the same source is used for the whole piece. 352 if ((Idx % SrcNumElts != (i % SrcNumElts)) || 353 (ConcatSrcs[i / SrcNumElts] >= 0 && 354 ConcatSrcs[i / SrcNumElts] != (int)(Idx / SrcNumElts))) 355 return false; 356 // Remember which source this index came from. 357 ConcatSrcs[i / SrcNumElts] = Idx / SrcNumElts; 358 } 359 360 // The shuffle is concatenating multiple vectors together. 361 // Collect the different operands for that. 362 Register UndefReg; 363 Register Src2 = MI.getOperand(2).getReg(); 364 for (auto Src : ConcatSrcs) { 365 if (Src < 0) { 366 if (!UndefReg) { 367 Builder.setInsertPt(*MI.getParent(), MI); 368 UndefReg = Builder.buildUndef(SrcType).getReg(0); 369 } 370 Ops.push_back(UndefReg); 371 } else if (Src == 0) 372 Ops.push_back(Src1); 373 else 374 Ops.push_back(Src2); 375 } 376 return true; 377 } 378 379 void CombinerHelper::applyCombineShuffleVector(MachineInstr &MI, 380 const ArrayRef<Register> Ops) { 381 Register DstReg = MI.getOperand(0).getReg(); 382 Builder.setInsertPt(*MI.getParent(), MI); 383 Register NewDstReg = MRI.cloneVirtualRegister(DstReg); 384 385 if (Ops.size() == 1) 386 Builder.buildCopy(NewDstReg, Ops[0]); 387 else 388 Builder.buildMerge(NewDstReg, Ops); 389 390 MI.eraseFromParent(); 391 replaceRegWith(MRI, DstReg, NewDstReg); 392 } 393 394 namespace { 395 396 /// Select a preference between two uses. CurrentUse is the current preference 397 /// while *ForCandidate is attributes of the candidate under consideration. 398 PreferredTuple ChoosePreferredUse(PreferredTuple &CurrentUse, 399 const LLT TyForCandidate, 400 unsigned OpcodeForCandidate, 401 MachineInstr *MIForCandidate) { 402 if (!CurrentUse.Ty.isValid()) { 403 if (CurrentUse.ExtendOpcode == OpcodeForCandidate || 404 CurrentUse.ExtendOpcode == TargetOpcode::G_ANYEXT) 405 return {TyForCandidate, OpcodeForCandidate, MIForCandidate}; 406 return CurrentUse; 407 } 408 409 // We permit the extend to hoist through basic blocks but this is only 410 // sensible if the target has extending loads. If you end up lowering back 411 // into a load and extend during the legalizer then the end result is 412 // hoisting the extend up to the load. 413 414 // Prefer defined extensions to undefined extensions as these are more 415 // likely to reduce the number of instructions. 416 if (OpcodeForCandidate == TargetOpcode::G_ANYEXT && 417 CurrentUse.ExtendOpcode != TargetOpcode::G_ANYEXT) 418 return CurrentUse; 419 else if (CurrentUse.ExtendOpcode == TargetOpcode::G_ANYEXT && 420 OpcodeForCandidate != TargetOpcode::G_ANYEXT) 421 return {TyForCandidate, OpcodeForCandidate, MIForCandidate}; 422 423 // Prefer sign extensions to zero extensions as sign-extensions tend to be 424 // more expensive. 425 if (CurrentUse.Ty == TyForCandidate) { 426 if (CurrentUse.ExtendOpcode == TargetOpcode::G_SEXT && 427 OpcodeForCandidate == TargetOpcode::G_ZEXT) 428 return CurrentUse; 429 else if (CurrentUse.ExtendOpcode == TargetOpcode::G_ZEXT && 430 OpcodeForCandidate == TargetOpcode::G_SEXT) 431 return {TyForCandidate, OpcodeForCandidate, MIForCandidate}; 432 } 433 434 // This is potentially target specific. We've chosen the largest type 435 // because G_TRUNC is usually free. One potential catch with this is that 436 // some targets have a reduced number of larger registers than smaller 437 // registers and this choice potentially increases the live-range for the 438 // larger value. 439 if (TyForCandidate.getSizeInBits() > CurrentUse.Ty.getSizeInBits()) { 440 return {TyForCandidate, OpcodeForCandidate, MIForCandidate}; 441 } 442 return CurrentUse; 443 } 444 445 /// Find a suitable place to insert some instructions and insert them. This 446 /// function accounts for special cases like inserting before a PHI node. 447 /// The current strategy for inserting before PHI's is to duplicate the 448 /// instructions for each predecessor. However, while that's ok for G_TRUNC 449 /// on most targets since it generally requires no code, other targets/cases may 450 /// want to try harder to find a dominating block. 451 static void InsertInsnsWithoutSideEffectsBeforeUse( 452 MachineIRBuilder &Builder, MachineInstr &DefMI, MachineOperand &UseMO, 453 std::function<void(MachineBasicBlock *, MachineBasicBlock::iterator, 454 MachineOperand &UseMO)> 455 Inserter) { 456 MachineInstr &UseMI = *UseMO.getParent(); 457 458 MachineBasicBlock *InsertBB = UseMI.getParent(); 459 460 // If the use is a PHI then we want the predecessor block instead. 461 if (UseMI.isPHI()) { 462 MachineOperand *PredBB = std::next(&UseMO); 463 InsertBB = PredBB->getMBB(); 464 } 465 466 // If the block is the same block as the def then we want to insert just after 467 // the def instead of at the start of the block. 468 if (InsertBB == DefMI.getParent()) { 469 MachineBasicBlock::iterator InsertPt = &DefMI; 470 Inserter(InsertBB, std::next(InsertPt), UseMO); 471 return; 472 } 473 474 // Otherwise we want the start of the BB 475 Inserter(InsertBB, InsertBB->getFirstNonPHI(), UseMO); 476 } 477 } // end anonymous namespace 478 479 bool CombinerHelper::tryCombineExtendingLoads(MachineInstr &MI) { 480 PreferredTuple Preferred; 481 if (matchCombineExtendingLoads(MI, Preferred)) { 482 applyCombineExtendingLoads(MI, Preferred); 483 return true; 484 } 485 return false; 486 } 487 488 bool CombinerHelper::matchCombineExtendingLoads(MachineInstr &MI, 489 PreferredTuple &Preferred) { 490 // We match the loads and follow the uses to the extend instead of matching 491 // the extends and following the def to the load. This is because the load 492 // must remain in the same position for correctness (unless we also add code 493 // to find a safe place to sink it) whereas the extend is freely movable. 494 // It also prevents us from duplicating the load for the volatile case or just 495 // for performance. 496 GAnyLoad *LoadMI = dyn_cast<GAnyLoad>(&MI); 497 if (!LoadMI) 498 return false; 499 500 Register LoadReg = LoadMI->getDstReg(); 501 502 LLT LoadValueTy = MRI.getType(LoadReg); 503 if (!LoadValueTy.isScalar()) 504 return false; 505 506 // Most architectures are going to legalize <s8 loads into at least a 1 byte 507 // load, and the MMOs can only describe memory accesses in multiples of bytes. 508 // If we try to perform extload combining on those, we can end up with 509 // %a(s8) = extload %ptr (load 1 byte from %ptr) 510 // ... which is an illegal extload instruction. 511 if (LoadValueTy.getSizeInBits() < 8) 512 return false; 513 514 // For non power-of-2 types, they will very likely be legalized into multiple 515 // loads. Don't bother trying to match them into extending loads. 516 if (!isPowerOf2_32(LoadValueTy.getSizeInBits())) 517 return false; 518 519 // Find the preferred type aside from the any-extends (unless it's the only 520 // one) and non-extending ops. We'll emit an extending load to that type and 521 // and emit a variant of (extend (trunc X)) for the others according to the 522 // relative type sizes. At the same time, pick an extend to use based on the 523 // extend involved in the chosen type. 524 unsigned PreferredOpcode = 525 isa<GLoad>(&MI) 526 ? TargetOpcode::G_ANYEXT 527 : isa<GSExtLoad>(&MI) ? TargetOpcode::G_SEXT : TargetOpcode::G_ZEXT; 528 Preferred = {LLT(), PreferredOpcode, nullptr}; 529 for (auto &UseMI : MRI.use_nodbg_instructions(LoadReg)) { 530 if (UseMI.getOpcode() == TargetOpcode::G_SEXT || 531 UseMI.getOpcode() == TargetOpcode::G_ZEXT || 532 (UseMI.getOpcode() == TargetOpcode::G_ANYEXT)) { 533 const auto &MMO = LoadMI->getMMO(); 534 // For atomics, only form anyextending loads. 535 if (MMO.isAtomic() && UseMI.getOpcode() != TargetOpcode::G_ANYEXT) 536 continue; 537 // Check for legality. 538 if (LI) { 539 LegalityQuery::MemDesc MMDesc(MMO); 540 LLT UseTy = MRI.getType(UseMI.getOperand(0).getReg()); 541 LLT SrcTy = MRI.getType(LoadMI->getPointerReg()); 542 if (LI->getAction({LoadMI->getOpcode(), {UseTy, SrcTy}, {MMDesc}}) 543 .Action != LegalizeActions::Legal) 544 continue; 545 } 546 Preferred = ChoosePreferredUse(Preferred, 547 MRI.getType(UseMI.getOperand(0).getReg()), 548 UseMI.getOpcode(), &UseMI); 549 } 550 } 551 552 // There were no extends 553 if (!Preferred.MI) 554 return false; 555 // It should be impossible to chose an extend without selecting a different 556 // type since by definition the result of an extend is larger. 557 assert(Preferred.Ty != LoadValueTy && "Extending to same type?"); 558 559 LLVM_DEBUG(dbgs() << "Preferred use is: " << *Preferred.MI); 560 return true; 561 } 562 563 void CombinerHelper::applyCombineExtendingLoads(MachineInstr &MI, 564 PreferredTuple &Preferred) { 565 // Rewrite the load to the chosen extending load. 566 Register ChosenDstReg = Preferred.MI->getOperand(0).getReg(); 567 568 // Inserter to insert a truncate back to the original type at a given point 569 // with some basic CSE to limit truncate duplication to one per BB. 570 DenseMap<MachineBasicBlock *, MachineInstr *> EmittedInsns; 571 auto InsertTruncAt = [&](MachineBasicBlock *InsertIntoBB, 572 MachineBasicBlock::iterator InsertBefore, 573 MachineOperand &UseMO) { 574 MachineInstr *PreviouslyEmitted = EmittedInsns.lookup(InsertIntoBB); 575 if (PreviouslyEmitted) { 576 Observer.changingInstr(*UseMO.getParent()); 577 UseMO.setReg(PreviouslyEmitted->getOperand(0).getReg()); 578 Observer.changedInstr(*UseMO.getParent()); 579 return; 580 } 581 582 Builder.setInsertPt(*InsertIntoBB, InsertBefore); 583 Register NewDstReg = MRI.cloneVirtualRegister(MI.getOperand(0).getReg()); 584 MachineInstr *NewMI = Builder.buildTrunc(NewDstReg, ChosenDstReg); 585 EmittedInsns[InsertIntoBB] = NewMI; 586 replaceRegOpWith(MRI, UseMO, NewDstReg); 587 }; 588 589 Observer.changingInstr(MI); 590 MI.setDesc( 591 Builder.getTII().get(Preferred.ExtendOpcode == TargetOpcode::G_SEXT 592 ? TargetOpcode::G_SEXTLOAD 593 : Preferred.ExtendOpcode == TargetOpcode::G_ZEXT 594 ? TargetOpcode::G_ZEXTLOAD 595 : TargetOpcode::G_LOAD)); 596 597 // Rewrite all the uses to fix up the types. 598 auto &LoadValue = MI.getOperand(0); 599 SmallVector<MachineOperand *, 4> Uses; 600 for (auto &UseMO : MRI.use_operands(LoadValue.getReg())) 601 Uses.push_back(&UseMO); 602 603 for (auto *UseMO : Uses) { 604 MachineInstr *UseMI = UseMO->getParent(); 605 606 // If the extend is compatible with the preferred extend then we should fix 607 // up the type and extend so that it uses the preferred use. 608 if (UseMI->getOpcode() == Preferred.ExtendOpcode || 609 UseMI->getOpcode() == TargetOpcode::G_ANYEXT) { 610 Register UseDstReg = UseMI->getOperand(0).getReg(); 611 MachineOperand &UseSrcMO = UseMI->getOperand(1); 612 const LLT UseDstTy = MRI.getType(UseDstReg); 613 if (UseDstReg != ChosenDstReg) { 614 if (Preferred.Ty == UseDstTy) { 615 // If the use has the same type as the preferred use, then merge 616 // the vregs and erase the extend. For example: 617 // %1:_(s8) = G_LOAD ... 618 // %2:_(s32) = G_SEXT %1(s8) 619 // %3:_(s32) = G_ANYEXT %1(s8) 620 // ... = ... %3(s32) 621 // rewrites to: 622 // %2:_(s32) = G_SEXTLOAD ... 623 // ... = ... %2(s32) 624 replaceRegWith(MRI, UseDstReg, ChosenDstReg); 625 Observer.erasingInstr(*UseMO->getParent()); 626 UseMO->getParent()->eraseFromParent(); 627 } else if (Preferred.Ty.getSizeInBits() < UseDstTy.getSizeInBits()) { 628 // If the preferred size is smaller, then keep the extend but extend 629 // from the result of the extending load. For example: 630 // %1:_(s8) = G_LOAD ... 631 // %2:_(s32) = G_SEXT %1(s8) 632 // %3:_(s64) = G_ANYEXT %1(s8) 633 // ... = ... %3(s64) 634 /// rewrites to: 635 // %2:_(s32) = G_SEXTLOAD ... 636 // %3:_(s64) = G_ANYEXT %2:_(s32) 637 // ... = ... %3(s64) 638 replaceRegOpWith(MRI, UseSrcMO, ChosenDstReg); 639 } else { 640 // If the preferred size is large, then insert a truncate. For 641 // example: 642 // %1:_(s8) = G_LOAD ... 643 // %2:_(s64) = G_SEXT %1(s8) 644 // %3:_(s32) = G_ZEXT %1(s8) 645 // ... = ... %3(s32) 646 /// rewrites to: 647 // %2:_(s64) = G_SEXTLOAD ... 648 // %4:_(s8) = G_TRUNC %2:_(s32) 649 // %3:_(s64) = G_ZEXT %2:_(s8) 650 // ... = ... %3(s64) 651 InsertInsnsWithoutSideEffectsBeforeUse(Builder, MI, *UseMO, 652 InsertTruncAt); 653 } 654 continue; 655 } 656 // The use is (one of) the uses of the preferred use we chose earlier. 657 // We're going to update the load to def this value later so just erase 658 // the old extend. 659 Observer.erasingInstr(*UseMO->getParent()); 660 UseMO->getParent()->eraseFromParent(); 661 continue; 662 } 663 664 // The use isn't an extend. Truncate back to the type we originally loaded. 665 // This is free on many targets. 666 InsertInsnsWithoutSideEffectsBeforeUse(Builder, MI, *UseMO, InsertTruncAt); 667 } 668 669 MI.getOperand(0).setReg(ChosenDstReg); 670 Observer.changedInstr(MI); 671 } 672 673 bool CombinerHelper::matchCombineLoadWithAndMask(MachineInstr &MI, 674 BuildFnTy &MatchInfo) { 675 assert(MI.getOpcode() == TargetOpcode::G_AND); 676 677 // If we have the following code: 678 // %mask = G_CONSTANT 255 679 // %ld = G_LOAD %ptr, (load s16) 680 // %and = G_AND %ld, %mask 681 // 682 // Try to fold it into 683 // %ld = G_ZEXTLOAD %ptr, (load s8) 684 685 Register Dst = MI.getOperand(0).getReg(); 686 if (MRI.getType(Dst).isVector()) 687 return false; 688 689 auto MaybeMask = 690 getIConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI); 691 if (!MaybeMask) 692 return false; 693 694 APInt MaskVal = MaybeMask->Value; 695 696 if (!MaskVal.isMask()) 697 return false; 698 699 Register SrcReg = MI.getOperand(1).getReg(); 700 // Don't use getOpcodeDef() here since intermediate instructions may have 701 // multiple users. 702 GAnyLoad *LoadMI = dyn_cast<GAnyLoad>(MRI.getVRegDef(SrcReg)); 703 if (!LoadMI || !MRI.hasOneNonDBGUse(LoadMI->getDstReg())) 704 return false; 705 706 Register LoadReg = LoadMI->getDstReg(); 707 LLT RegTy = MRI.getType(LoadReg); 708 Register PtrReg = LoadMI->getPointerReg(); 709 unsigned RegSize = RegTy.getSizeInBits(); 710 uint64_t LoadSizeBits = LoadMI->getMemSizeInBits(); 711 unsigned MaskSizeBits = MaskVal.countTrailingOnes(); 712 713 // The mask may not be larger than the in-memory type, as it might cover sign 714 // extended bits 715 if (MaskSizeBits > LoadSizeBits) 716 return false; 717 718 // If the mask covers the whole destination register, there's nothing to 719 // extend 720 if (MaskSizeBits >= RegSize) 721 return false; 722 723 // Most targets cannot deal with loads of size < 8 and need to re-legalize to 724 // at least byte loads. Avoid creating such loads here 725 if (MaskSizeBits < 8 || !isPowerOf2_32(MaskSizeBits)) 726 return false; 727 728 const MachineMemOperand &MMO = LoadMI->getMMO(); 729 LegalityQuery::MemDesc MemDesc(MMO); 730 731 // Don't modify the memory access size if this is atomic/volatile, but we can 732 // still adjust the opcode to indicate the high bit behavior. 733 if (LoadMI->isSimple()) 734 MemDesc.MemoryTy = LLT::scalar(MaskSizeBits); 735 else if (LoadSizeBits > MaskSizeBits || LoadSizeBits == RegSize) 736 return false; 737 738 // TODO: Could check if it's legal with the reduced or original memory size. 739 if (!isLegalOrBeforeLegalizer( 740 {TargetOpcode::G_ZEXTLOAD, {RegTy, MRI.getType(PtrReg)}, {MemDesc}})) 741 return false; 742 743 MatchInfo = [=](MachineIRBuilder &B) { 744 B.setInstrAndDebugLoc(*LoadMI); 745 auto &MF = B.getMF(); 746 auto PtrInfo = MMO.getPointerInfo(); 747 auto *NewMMO = MF.getMachineMemOperand(&MMO, PtrInfo, MemDesc.MemoryTy); 748 B.buildLoadInstr(TargetOpcode::G_ZEXTLOAD, Dst, PtrReg, *NewMMO); 749 LoadMI->eraseFromParent(); 750 }; 751 return true; 752 } 753 754 bool CombinerHelper::isPredecessor(const MachineInstr &DefMI, 755 const MachineInstr &UseMI) { 756 assert(!DefMI.isDebugInstr() && !UseMI.isDebugInstr() && 757 "shouldn't consider debug uses"); 758 assert(DefMI.getParent() == UseMI.getParent()); 759 if (&DefMI == &UseMI) 760 return true; 761 const MachineBasicBlock &MBB = *DefMI.getParent(); 762 auto DefOrUse = find_if(MBB, [&DefMI, &UseMI](const MachineInstr &MI) { 763 return &MI == &DefMI || &MI == &UseMI; 764 }); 765 if (DefOrUse == MBB.end()) 766 llvm_unreachable("Block must contain both DefMI and UseMI!"); 767 return &*DefOrUse == &DefMI; 768 } 769 770 bool CombinerHelper::dominates(const MachineInstr &DefMI, 771 const MachineInstr &UseMI) { 772 assert(!DefMI.isDebugInstr() && !UseMI.isDebugInstr() && 773 "shouldn't consider debug uses"); 774 if (MDT) 775 return MDT->dominates(&DefMI, &UseMI); 776 else if (DefMI.getParent() != UseMI.getParent()) 777 return false; 778 779 return isPredecessor(DefMI, UseMI); 780 } 781 782 bool CombinerHelper::matchSextTruncSextLoad(MachineInstr &MI) { 783 assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG); 784 Register SrcReg = MI.getOperand(1).getReg(); 785 Register LoadUser = SrcReg; 786 787 if (MRI.getType(SrcReg).isVector()) 788 return false; 789 790 Register TruncSrc; 791 if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc)))) 792 LoadUser = TruncSrc; 793 794 uint64_t SizeInBits = MI.getOperand(2).getImm(); 795 // If the source is a G_SEXTLOAD from the same bit width, then we don't 796 // need any extend at all, just a truncate. 797 if (auto *LoadMI = getOpcodeDef<GSExtLoad>(LoadUser, MRI)) { 798 // If truncating more than the original extended value, abort. 799 auto LoadSizeBits = LoadMI->getMemSizeInBits(); 800 if (TruncSrc && MRI.getType(TruncSrc).getSizeInBits() < LoadSizeBits) 801 return false; 802 if (LoadSizeBits == SizeInBits) 803 return true; 804 } 805 return false; 806 } 807 808 void CombinerHelper::applySextTruncSextLoad(MachineInstr &MI) { 809 assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG); 810 Builder.setInstrAndDebugLoc(MI); 811 Builder.buildCopy(MI.getOperand(0).getReg(), MI.getOperand(1).getReg()); 812 MI.eraseFromParent(); 813 } 814 815 bool CombinerHelper::matchSextInRegOfLoad( 816 MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo) { 817 assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG); 818 819 Register DstReg = MI.getOperand(0).getReg(); 820 LLT RegTy = MRI.getType(DstReg); 821 822 // Only supports scalars for now. 823 if (RegTy.isVector()) 824 return false; 825 826 Register SrcReg = MI.getOperand(1).getReg(); 827 auto *LoadDef = getOpcodeDef<GLoad>(SrcReg, MRI); 828 if (!LoadDef || !MRI.hasOneNonDBGUse(DstReg)) 829 return false; 830 831 uint64_t MemBits = LoadDef->getMemSizeInBits(); 832 833 // If the sign extend extends from a narrower width than the load's width, 834 // then we can narrow the load width when we combine to a G_SEXTLOAD. 835 // Avoid widening the load at all. 836 unsigned NewSizeBits = std::min((uint64_t)MI.getOperand(2).getImm(), MemBits); 837 838 // Don't generate G_SEXTLOADs with a < 1 byte width. 839 if (NewSizeBits < 8) 840 return false; 841 // Don't bother creating a non-power-2 sextload, it will likely be broken up 842 // anyway for most targets. 843 if (!isPowerOf2_32(NewSizeBits)) 844 return false; 845 846 const MachineMemOperand &MMO = LoadDef->getMMO(); 847 LegalityQuery::MemDesc MMDesc(MMO); 848 849 // Don't modify the memory access size if this is atomic/volatile, but we can 850 // still adjust the opcode to indicate the high bit behavior. 851 if (LoadDef->isSimple()) 852 MMDesc.MemoryTy = LLT::scalar(NewSizeBits); 853 else if (MemBits > NewSizeBits || MemBits == RegTy.getSizeInBits()) 854 return false; 855 856 // TODO: Could check if it's legal with the reduced or original memory size. 857 if (!isLegalOrBeforeLegalizer({TargetOpcode::G_SEXTLOAD, 858 {MRI.getType(LoadDef->getDstReg()), 859 MRI.getType(LoadDef->getPointerReg())}, 860 {MMDesc}})) 861 return false; 862 863 MatchInfo = std::make_tuple(LoadDef->getDstReg(), NewSizeBits); 864 return true; 865 } 866 867 void CombinerHelper::applySextInRegOfLoad( 868 MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo) { 869 assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG); 870 Register LoadReg; 871 unsigned ScalarSizeBits; 872 std::tie(LoadReg, ScalarSizeBits) = MatchInfo; 873 GLoad *LoadDef = cast<GLoad>(MRI.getVRegDef(LoadReg)); 874 875 // If we have the following: 876 // %ld = G_LOAD %ptr, (load 2) 877 // %ext = G_SEXT_INREG %ld, 8 878 // ==> 879 // %ld = G_SEXTLOAD %ptr (load 1) 880 881 auto &MMO = LoadDef->getMMO(); 882 Builder.setInstrAndDebugLoc(*LoadDef); 883 auto &MF = Builder.getMF(); 884 auto PtrInfo = MMO.getPointerInfo(); 885 auto *NewMMO = MF.getMachineMemOperand(&MMO, PtrInfo, ScalarSizeBits / 8); 886 Builder.buildLoadInstr(TargetOpcode::G_SEXTLOAD, MI.getOperand(0).getReg(), 887 LoadDef->getPointerReg(), *NewMMO); 888 MI.eraseFromParent(); 889 } 890 891 bool CombinerHelper::findPostIndexCandidate(MachineInstr &MI, Register &Addr, 892 Register &Base, Register &Offset) { 893 auto &MF = *MI.getParent()->getParent(); 894 const auto &TLI = *MF.getSubtarget().getTargetLowering(); 895 896 #ifndef NDEBUG 897 unsigned Opcode = MI.getOpcode(); 898 assert(Opcode == TargetOpcode::G_LOAD || Opcode == TargetOpcode::G_SEXTLOAD || 899 Opcode == TargetOpcode::G_ZEXTLOAD || Opcode == TargetOpcode::G_STORE); 900 #endif 901 902 Base = MI.getOperand(1).getReg(); 903 MachineInstr *BaseDef = MRI.getUniqueVRegDef(Base); 904 if (BaseDef && BaseDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) 905 return false; 906 907 LLVM_DEBUG(dbgs() << "Searching for post-indexing opportunity for: " << MI); 908 // FIXME: The following use traversal needs a bail out for patholigical cases. 909 for (auto &Use : MRI.use_nodbg_instructions(Base)) { 910 if (Use.getOpcode() != TargetOpcode::G_PTR_ADD) 911 continue; 912 913 Offset = Use.getOperand(2).getReg(); 914 if (!ForceLegalIndexing && 915 !TLI.isIndexingLegal(MI, Base, Offset, /*IsPre*/ false, MRI)) { 916 LLVM_DEBUG(dbgs() << " Ignoring candidate with illegal addrmode: " 917 << Use); 918 continue; 919 } 920 921 // Make sure the offset calculation is before the potentially indexed op. 922 // FIXME: we really care about dependency here. The offset calculation might 923 // be movable. 924 MachineInstr *OffsetDef = MRI.getUniqueVRegDef(Offset); 925 if (!OffsetDef || !dominates(*OffsetDef, MI)) { 926 LLVM_DEBUG(dbgs() << " Ignoring candidate with offset after mem-op: " 927 << Use); 928 continue; 929 } 930 931 // FIXME: check whether all uses of Base are load/store with foldable 932 // addressing modes. If so, using the normal addr-modes is better than 933 // forming an indexed one. 934 935 bool MemOpDominatesAddrUses = true; 936 for (auto &PtrAddUse : 937 MRI.use_nodbg_instructions(Use.getOperand(0).getReg())) { 938 if (!dominates(MI, PtrAddUse)) { 939 MemOpDominatesAddrUses = false; 940 break; 941 } 942 } 943 944 if (!MemOpDominatesAddrUses) { 945 LLVM_DEBUG( 946 dbgs() << " Ignoring candidate as memop does not dominate uses: " 947 << Use); 948 continue; 949 } 950 951 LLVM_DEBUG(dbgs() << " Found match: " << Use); 952 Addr = Use.getOperand(0).getReg(); 953 return true; 954 } 955 956 return false; 957 } 958 959 bool CombinerHelper::findPreIndexCandidate(MachineInstr &MI, Register &Addr, 960 Register &Base, Register &Offset) { 961 auto &MF = *MI.getParent()->getParent(); 962 const auto &TLI = *MF.getSubtarget().getTargetLowering(); 963 964 #ifndef NDEBUG 965 unsigned Opcode = MI.getOpcode(); 966 assert(Opcode == TargetOpcode::G_LOAD || Opcode == TargetOpcode::G_SEXTLOAD || 967 Opcode == TargetOpcode::G_ZEXTLOAD || Opcode == TargetOpcode::G_STORE); 968 #endif 969 970 Addr = MI.getOperand(1).getReg(); 971 MachineInstr *AddrDef = getOpcodeDef(TargetOpcode::G_PTR_ADD, Addr, MRI); 972 if (!AddrDef || MRI.hasOneNonDBGUse(Addr)) 973 return false; 974 975 Base = AddrDef->getOperand(1).getReg(); 976 Offset = AddrDef->getOperand(2).getReg(); 977 978 LLVM_DEBUG(dbgs() << "Found potential pre-indexed load_store: " << MI); 979 980 if (!ForceLegalIndexing && 981 !TLI.isIndexingLegal(MI, Base, Offset, /*IsPre*/ true, MRI)) { 982 LLVM_DEBUG(dbgs() << " Skipping, not legal for target"); 983 return false; 984 } 985 986 MachineInstr *BaseDef = getDefIgnoringCopies(Base, MRI); 987 if (BaseDef->getOpcode() == TargetOpcode::G_FRAME_INDEX) { 988 LLVM_DEBUG(dbgs() << " Skipping, frame index would need copy anyway."); 989 return false; 990 } 991 992 if (MI.getOpcode() == TargetOpcode::G_STORE) { 993 // Would require a copy. 994 if (Base == MI.getOperand(0).getReg()) { 995 LLVM_DEBUG(dbgs() << " Skipping, storing base so need copy anyway."); 996 return false; 997 } 998 999 // We're expecting one use of Addr in MI, but it could also be the 1000 // value stored, which isn't actually dominated by the instruction. 1001 if (MI.getOperand(0).getReg() == Addr) { 1002 LLVM_DEBUG(dbgs() << " Skipping, does not dominate all addr uses"); 1003 return false; 1004 } 1005 } 1006 1007 // FIXME: check whether all uses of the base pointer are constant PtrAdds. 1008 // That might allow us to end base's liveness here by adjusting the constant. 1009 1010 for (auto &UseMI : MRI.use_nodbg_instructions(Addr)) { 1011 if (!dominates(MI, UseMI)) { 1012 LLVM_DEBUG(dbgs() << " Skipping, does not dominate all addr uses."); 1013 return false; 1014 } 1015 } 1016 1017 return true; 1018 } 1019 1020 bool CombinerHelper::tryCombineIndexedLoadStore(MachineInstr &MI) { 1021 IndexedLoadStoreMatchInfo MatchInfo; 1022 if (matchCombineIndexedLoadStore(MI, MatchInfo)) { 1023 applyCombineIndexedLoadStore(MI, MatchInfo); 1024 return true; 1025 } 1026 return false; 1027 } 1028 1029 bool CombinerHelper::matchCombineIndexedLoadStore(MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo) { 1030 unsigned Opcode = MI.getOpcode(); 1031 if (Opcode != TargetOpcode::G_LOAD && Opcode != TargetOpcode::G_SEXTLOAD && 1032 Opcode != TargetOpcode::G_ZEXTLOAD && Opcode != TargetOpcode::G_STORE) 1033 return false; 1034 1035 // For now, no targets actually support these opcodes so don't waste time 1036 // running these unless we're forced to for testing. 1037 if (!ForceLegalIndexing) 1038 return false; 1039 1040 MatchInfo.IsPre = findPreIndexCandidate(MI, MatchInfo.Addr, MatchInfo.Base, 1041 MatchInfo.Offset); 1042 if (!MatchInfo.IsPre && 1043 !findPostIndexCandidate(MI, MatchInfo.Addr, MatchInfo.Base, 1044 MatchInfo.Offset)) 1045 return false; 1046 1047 return true; 1048 } 1049 1050 void CombinerHelper::applyCombineIndexedLoadStore( 1051 MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo) { 1052 MachineInstr &AddrDef = *MRI.getUniqueVRegDef(MatchInfo.Addr); 1053 MachineIRBuilder MIRBuilder(MI); 1054 unsigned Opcode = MI.getOpcode(); 1055 bool IsStore = Opcode == TargetOpcode::G_STORE; 1056 unsigned NewOpcode; 1057 switch (Opcode) { 1058 case TargetOpcode::G_LOAD: 1059 NewOpcode = TargetOpcode::G_INDEXED_LOAD; 1060 break; 1061 case TargetOpcode::G_SEXTLOAD: 1062 NewOpcode = TargetOpcode::G_INDEXED_SEXTLOAD; 1063 break; 1064 case TargetOpcode::G_ZEXTLOAD: 1065 NewOpcode = TargetOpcode::G_INDEXED_ZEXTLOAD; 1066 break; 1067 case TargetOpcode::G_STORE: 1068 NewOpcode = TargetOpcode::G_INDEXED_STORE; 1069 break; 1070 default: 1071 llvm_unreachable("Unknown load/store opcode"); 1072 } 1073 1074 auto MIB = MIRBuilder.buildInstr(NewOpcode); 1075 if (IsStore) { 1076 MIB.addDef(MatchInfo.Addr); 1077 MIB.addUse(MI.getOperand(0).getReg()); 1078 } else { 1079 MIB.addDef(MI.getOperand(0).getReg()); 1080 MIB.addDef(MatchInfo.Addr); 1081 } 1082 1083 MIB.addUse(MatchInfo.Base); 1084 MIB.addUse(MatchInfo.Offset); 1085 MIB.addImm(MatchInfo.IsPre); 1086 MI.eraseFromParent(); 1087 AddrDef.eraseFromParent(); 1088 1089 LLVM_DEBUG(dbgs() << " Combinined to indexed operation"); 1090 } 1091 1092 bool CombinerHelper::matchCombineDivRem(MachineInstr &MI, 1093 MachineInstr *&OtherMI) { 1094 unsigned Opcode = MI.getOpcode(); 1095 bool IsDiv, IsSigned; 1096 1097 switch (Opcode) { 1098 default: 1099 llvm_unreachable("Unexpected opcode!"); 1100 case TargetOpcode::G_SDIV: 1101 case TargetOpcode::G_UDIV: { 1102 IsDiv = true; 1103 IsSigned = Opcode == TargetOpcode::G_SDIV; 1104 break; 1105 } 1106 case TargetOpcode::G_SREM: 1107 case TargetOpcode::G_UREM: { 1108 IsDiv = false; 1109 IsSigned = Opcode == TargetOpcode::G_SREM; 1110 break; 1111 } 1112 } 1113 1114 Register Src1 = MI.getOperand(1).getReg(); 1115 unsigned DivOpcode, RemOpcode, DivremOpcode; 1116 if (IsSigned) { 1117 DivOpcode = TargetOpcode::G_SDIV; 1118 RemOpcode = TargetOpcode::G_SREM; 1119 DivremOpcode = TargetOpcode::G_SDIVREM; 1120 } else { 1121 DivOpcode = TargetOpcode::G_UDIV; 1122 RemOpcode = TargetOpcode::G_UREM; 1123 DivremOpcode = TargetOpcode::G_UDIVREM; 1124 } 1125 1126 if (!isLegalOrBeforeLegalizer({DivremOpcode, {MRI.getType(Src1)}})) 1127 return false; 1128 1129 // Combine: 1130 // %div:_ = G_[SU]DIV %src1:_, %src2:_ 1131 // %rem:_ = G_[SU]REM %src1:_, %src2:_ 1132 // into: 1133 // %div:_, %rem:_ = G_[SU]DIVREM %src1:_, %src2:_ 1134 1135 // Combine: 1136 // %rem:_ = G_[SU]REM %src1:_, %src2:_ 1137 // %div:_ = G_[SU]DIV %src1:_, %src2:_ 1138 // into: 1139 // %div:_, %rem:_ = G_[SU]DIVREM %src1:_, %src2:_ 1140 1141 for (auto &UseMI : MRI.use_nodbg_instructions(Src1)) { 1142 if (MI.getParent() == UseMI.getParent() && 1143 ((IsDiv && UseMI.getOpcode() == RemOpcode) || 1144 (!IsDiv && UseMI.getOpcode() == DivOpcode)) && 1145 matchEqualDefs(MI.getOperand(2), UseMI.getOperand(2))) { 1146 OtherMI = &UseMI; 1147 return true; 1148 } 1149 } 1150 1151 return false; 1152 } 1153 1154 void CombinerHelper::applyCombineDivRem(MachineInstr &MI, 1155 MachineInstr *&OtherMI) { 1156 unsigned Opcode = MI.getOpcode(); 1157 assert(OtherMI && "OtherMI shouldn't be empty."); 1158 1159 Register DestDivReg, DestRemReg; 1160 if (Opcode == TargetOpcode::G_SDIV || Opcode == TargetOpcode::G_UDIV) { 1161 DestDivReg = MI.getOperand(0).getReg(); 1162 DestRemReg = OtherMI->getOperand(0).getReg(); 1163 } else { 1164 DestDivReg = OtherMI->getOperand(0).getReg(); 1165 DestRemReg = MI.getOperand(0).getReg(); 1166 } 1167 1168 bool IsSigned = 1169 Opcode == TargetOpcode::G_SDIV || Opcode == TargetOpcode::G_SREM; 1170 1171 // Check which instruction is first in the block so we don't break def-use 1172 // deps by "moving" the instruction incorrectly. 1173 if (dominates(MI, *OtherMI)) 1174 Builder.setInstrAndDebugLoc(MI); 1175 else 1176 Builder.setInstrAndDebugLoc(*OtherMI); 1177 1178 Builder.buildInstr(IsSigned ? TargetOpcode::G_SDIVREM 1179 : TargetOpcode::G_UDIVREM, 1180 {DestDivReg, DestRemReg}, 1181 {MI.getOperand(1).getReg(), MI.getOperand(2).getReg()}); 1182 MI.eraseFromParent(); 1183 OtherMI->eraseFromParent(); 1184 } 1185 1186 bool CombinerHelper::matchOptBrCondByInvertingCond(MachineInstr &MI, 1187 MachineInstr *&BrCond) { 1188 assert(MI.getOpcode() == TargetOpcode::G_BR); 1189 1190 // Try to match the following: 1191 // bb1: 1192 // G_BRCOND %c1, %bb2 1193 // G_BR %bb3 1194 // bb2: 1195 // ... 1196 // bb3: 1197 1198 // The above pattern does not have a fall through to the successor bb2, always 1199 // resulting in a branch no matter which path is taken. Here we try to find 1200 // and replace that pattern with conditional branch to bb3 and otherwise 1201 // fallthrough to bb2. This is generally better for branch predictors. 1202 1203 MachineBasicBlock *MBB = MI.getParent(); 1204 MachineBasicBlock::iterator BrIt(MI); 1205 if (BrIt == MBB->begin()) 1206 return false; 1207 assert(std::next(BrIt) == MBB->end() && "expected G_BR to be a terminator"); 1208 1209 BrCond = &*std::prev(BrIt); 1210 if (BrCond->getOpcode() != TargetOpcode::G_BRCOND) 1211 return false; 1212 1213 // Check that the next block is the conditional branch target. Also make sure 1214 // that it isn't the same as the G_BR's target (otherwise, this will loop.) 1215 MachineBasicBlock *BrCondTarget = BrCond->getOperand(1).getMBB(); 1216 return BrCondTarget != MI.getOperand(0).getMBB() && 1217 MBB->isLayoutSuccessor(BrCondTarget); 1218 } 1219 1220 void CombinerHelper::applyOptBrCondByInvertingCond(MachineInstr &MI, 1221 MachineInstr *&BrCond) { 1222 MachineBasicBlock *BrTarget = MI.getOperand(0).getMBB(); 1223 Builder.setInstrAndDebugLoc(*BrCond); 1224 LLT Ty = MRI.getType(BrCond->getOperand(0).getReg()); 1225 // FIXME: Does int/fp matter for this? If so, we might need to restrict 1226 // this to i1 only since we might not know for sure what kind of 1227 // compare generated the condition value. 1228 auto True = Builder.buildConstant( 1229 Ty, getICmpTrueVal(getTargetLowering(), false, false)); 1230 auto Xor = Builder.buildXor(Ty, BrCond->getOperand(0), True); 1231 1232 auto *FallthroughBB = BrCond->getOperand(1).getMBB(); 1233 Observer.changingInstr(MI); 1234 MI.getOperand(0).setMBB(FallthroughBB); 1235 Observer.changedInstr(MI); 1236 1237 // Change the conditional branch to use the inverted condition and 1238 // new target block. 1239 Observer.changingInstr(*BrCond); 1240 BrCond->getOperand(0).setReg(Xor.getReg(0)); 1241 BrCond->getOperand(1).setMBB(BrTarget); 1242 Observer.changedInstr(*BrCond); 1243 } 1244 1245 static Type *getTypeForLLT(LLT Ty, LLVMContext &C) { 1246 if (Ty.isVector()) 1247 return FixedVectorType::get(IntegerType::get(C, Ty.getScalarSizeInBits()), 1248 Ty.getNumElements()); 1249 return IntegerType::get(C, Ty.getSizeInBits()); 1250 } 1251 1252 bool CombinerHelper::tryEmitMemcpyInline(MachineInstr &MI) { 1253 MachineIRBuilder HelperBuilder(MI); 1254 GISelObserverWrapper DummyObserver; 1255 LegalizerHelper Helper(HelperBuilder.getMF(), DummyObserver, HelperBuilder); 1256 return Helper.lowerMemcpyInline(MI) == 1257 LegalizerHelper::LegalizeResult::Legalized; 1258 } 1259 1260 bool CombinerHelper::tryCombineMemCpyFamily(MachineInstr &MI, unsigned MaxLen) { 1261 MachineIRBuilder HelperBuilder(MI); 1262 GISelObserverWrapper DummyObserver; 1263 LegalizerHelper Helper(HelperBuilder.getMF(), DummyObserver, HelperBuilder); 1264 return Helper.lowerMemCpyFamily(MI, MaxLen) == 1265 LegalizerHelper::LegalizeResult::Legalized; 1266 } 1267 1268 static Optional<APFloat> constantFoldFpUnary(unsigned Opcode, LLT DstTy, 1269 const Register Op, 1270 const MachineRegisterInfo &MRI) { 1271 const ConstantFP *MaybeCst = getConstantFPVRegVal(Op, MRI); 1272 if (!MaybeCst) 1273 return None; 1274 1275 APFloat V = MaybeCst->getValueAPF(); 1276 switch (Opcode) { 1277 default: 1278 llvm_unreachable("Unexpected opcode!"); 1279 case TargetOpcode::G_FNEG: { 1280 V.changeSign(); 1281 return V; 1282 } 1283 case TargetOpcode::G_FABS: { 1284 V.clearSign(); 1285 return V; 1286 } 1287 case TargetOpcode::G_FPTRUNC: 1288 break; 1289 case TargetOpcode::G_FSQRT: { 1290 bool Unused; 1291 V.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven, &Unused); 1292 V = APFloat(sqrt(V.convertToDouble())); 1293 break; 1294 } 1295 case TargetOpcode::G_FLOG2: { 1296 bool Unused; 1297 V.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven, &Unused); 1298 V = APFloat(log2(V.convertToDouble())); 1299 break; 1300 } 1301 } 1302 // Convert `APFloat` to appropriate IEEE type depending on `DstTy`. Otherwise, 1303 // `buildFConstant` will assert on size mismatch. Only `G_FPTRUNC`, `G_FSQRT`, 1304 // and `G_FLOG2` reach here. 1305 bool Unused; 1306 V.convert(getFltSemanticForLLT(DstTy), APFloat::rmNearestTiesToEven, &Unused); 1307 return V; 1308 } 1309 1310 bool CombinerHelper::matchCombineConstantFoldFpUnary(MachineInstr &MI, 1311 Optional<APFloat> &Cst) { 1312 Register DstReg = MI.getOperand(0).getReg(); 1313 Register SrcReg = MI.getOperand(1).getReg(); 1314 LLT DstTy = MRI.getType(DstReg); 1315 Cst = constantFoldFpUnary(MI.getOpcode(), DstTy, SrcReg, MRI); 1316 return Cst.has_value(); 1317 } 1318 1319 void CombinerHelper::applyCombineConstantFoldFpUnary(MachineInstr &MI, 1320 Optional<APFloat> &Cst) { 1321 assert(Cst && "Optional is unexpectedly empty!"); 1322 Builder.setInstrAndDebugLoc(MI); 1323 MachineFunction &MF = Builder.getMF(); 1324 auto *FPVal = ConstantFP::get(MF.getFunction().getContext(), *Cst); 1325 Register DstReg = MI.getOperand(0).getReg(); 1326 Builder.buildFConstant(DstReg, *FPVal); 1327 MI.eraseFromParent(); 1328 } 1329 1330 bool CombinerHelper::matchPtrAddImmedChain(MachineInstr &MI, 1331 PtrAddChain &MatchInfo) { 1332 // We're trying to match the following pattern: 1333 // %t1 = G_PTR_ADD %base, G_CONSTANT imm1 1334 // %root = G_PTR_ADD %t1, G_CONSTANT imm2 1335 // --> 1336 // %root = G_PTR_ADD %base, G_CONSTANT (imm1 + imm2) 1337 1338 if (MI.getOpcode() != TargetOpcode::G_PTR_ADD) 1339 return false; 1340 1341 Register Add2 = MI.getOperand(1).getReg(); 1342 Register Imm1 = MI.getOperand(2).getReg(); 1343 auto MaybeImmVal = getIConstantVRegValWithLookThrough(Imm1, MRI); 1344 if (!MaybeImmVal) 1345 return false; 1346 1347 MachineInstr *Add2Def = MRI.getVRegDef(Add2); 1348 if (!Add2Def || Add2Def->getOpcode() != TargetOpcode::G_PTR_ADD) 1349 return false; 1350 1351 Register Base = Add2Def->getOperand(1).getReg(); 1352 Register Imm2 = Add2Def->getOperand(2).getReg(); 1353 auto MaybeImm2Val = getIConstantVRegValWithLookThrough(Imm2, MRI); 1354 if (!MaybeImm2Val) 1355 return false; 1356 1357 // Check if the new combined immediate forms an illegal addressing mode. 1358 // Do not combine if it was legal before but would get illegal. 1359 // To do so, we need to find a load/store user of the pointer to get 1360 // the access type. 1361 Type *AccessTy = nullptr; 1362 auto &MF = *MI.getMF(); 1363 for (auto &UseMI : MRI.use_nodbg_instructions(MI.getOperand(0).getReg())) { 1364 if (auto *LdSt = dyn_cast<GLoadStore>(&UseMI)) { 1365 AccessTy = getTypeForLLT(MRI.getType(LdSt->getReg(0)), 1366 MF.getFunction().getContext()); 1367 break; 1368 } 1369 } 1370 TargetLoweringBase::AddrMode AMNew; 1371 APInt CombinedImm = MaybeImmVal->Value + MaybeImm2Val->Value; 1372 AMNew.BaseOffs = CombinedImm.getSExtValue(); 1373 if (AccessTy) { 1374 AMNew.HasBaseReg = true; 1375 TargetLoweringBase::AddrMode AMOld; 1376 AMOld.BaseOffs = MaybeImm2Val->Value.getSExtValue(); 1377 AMOld.HasBaseReg = true; 1378 unsigned AS = MRI.getType(Add2).getAddressSpace(); 1379 const auto &TLI = *MF.getSubtarget().getTargetLowering(); 1380 if (TLI.isLegalAddressingMode(MF.getDataLayout(), AMOld, AccessTy, AS) && 1381 !TLI.isLegalAddressingMode(MF.getDataLayout(), AMNew, AccessTy, AS)) 1382 return false; 1383 } 1384 1385 // Pass the combined immediate to the apply function. 1386 MatchInfo.Imm = AMNew.BaseOffs; 1387 MatchInfo.Base = Base; 1388 MatchInfo.Bank = getRegBank(Imm2); 1389 return true; 1390 } 1391 1392 void CombinerHelper::applyPtrAddImmedChain(MachineInstr &MI, 1393 PtrAddChain &MatchInfo) { 1394 assert(MI.getOpcode() == TargetOpcode::G_PTR_ADD && "Expected G_PTR_ADD"); 1395 MachineIRBuilder MIB(MI); 1396 LLT OffsetTy = MRI.getType(MI.getOperand(2).getReg()); 1397 auto NewOffset = MIB.buildConstant(OffsetTy, MatchInfo.Imm); 1398 setRegBank(NewOffset.getReg(0), MatchInfo.Bank); 1399 Observer.changingInstr(MI); 1400 MI.getOperand(1).setReg(MatchInfo.Base); 1401 MI.getOperand(2).setReg(NewOffset.getReg(0)); 1402 Observer.changedInstr(MI); 1403 } 1404 1405 bool CombinerHelper::matchShiftImmedChain(MachineInstr &MI, 1406 RegisterImmPair &MatchInfo) { 1407 // We're trying to match the following pattern with any of 1408 // G_SHL/G_ASHR/G_LSHR/G_SSHLSAT/G_USHLSAT shift instructions: 1409 // %t1 = SHIFT %base, G_CONSTANT imm1 1410 // %root = SHIFT %t1, G_CONSTANT imm2 1411 // --> 1412 // %root = SHIFT %base, G_CONSTANT (imm1 + imm2) 1413 1414 unsigned Opcode = MI.getOpcode(); 1415 assert((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR || 1416 Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_SSHLSAT || 1417 Opcode == TargetOpcode::G_USHLSAT) && 1418 "Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT"); 1419 1420 Register Shl2 = MI.getOperand(1).getReg(); 1421 Register Imm1 = MI.getOperand(2).getReg(); 1422 auto MaybeImmVal = getIConstantVRegValWithLookThrough(Imm1, MRI); 1423 if (!MaybeImmVal) 1424 return false; 1425 1426 MachineInstr *Shl2Def = MRI.getUniqueVRegDef(Shl2); 1427 if (Shl2Def->getOpcode() != Opcode) 1428 return false; 1429 1430 Register Base = Shl2Def->getOperand(1).getReg(); 1431 Register Imm2 = Shl2Def->getOperand(2).getReg(); 1432 auto MaybeImm2Val = getIConstantVRegValWithLookThrough(Imm2, MRI); 1433 if (!MaybeImm2Val) 1434 return false; 1435 1436 // Pass the combined immediate to the apply function. 1437 MatchInfo.Imm = 1438 (MaybeImmVal->Value.getSExtValue() + MaybeImm2Val->Value).getSExtValue(); 1439 MatchInfo.Reg = Base; 1440 1441 // There is no simple replacement for a saturating unsigned left shift that 1442 // exceeds the scalar size. 1443 if (Opcode == TargetOpcode::G_USHLSAT && 1444 MatchInfo.Imm >= MRI.getType(Shl2).getScalarSizeInBits()) 1445 return false; 1446 1447 return true; 1448 } 1449 1450 void CombinerHelper::applyShiftImmedChain(MachineInstr &MI, 1451 RegisterImmPair &MatchInfo) { 1452 unsigned Opcode = MI.getOpcode(); 1453 assert((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR || 1454 Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_SSHLSAT || 1455 Opcode == TargetOpcode::G_USHLSAT) && 1456 "Expected G_SHL, G_ASHR, G_LSHR, G_SSHLSAT or G_USHLSAT"); 1457 1458 Builder.setInstrAndDebugLoc(MI); 1459 LLT Ty = MRI.getType(MI.getOperand(1).getReg()); 1460 unsigned const ScalarSizeInBits = Ty.getScalarSizeInBits(); 1461 auto Imm = MatchInfo.Imm; 1462 1463 if (Imm >= ScalarSizeInBits) { 1464 // Any logical shift that exceeds scalar size will produce zero. 1465 if (Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_LSHR) { 1466 Builder.buildConstant(MI.getOperand(0), 0); 1467 MI.eraseFromParent(); 1468 return; 1469 } 1470 // Arithmetic shift and saturating signed left shift have no effect beyond 1471 // scalar size. 1472 Imm = ScalarSizeInBits - 1; 1473 } 1474 1475 LLT ImmTy = MRI.getType(MI.getOperand(2).getReg()); 1476 Register NewImm = Builder.buildConstant(ImmTy, Imm).getReg(0); 1477 Observer.changingInstr(MI); 1478 MI.getOperand(1).setReg(MatchInfo.Reg); 1479 MI.getOperand(2).setReg(NewImm); 1480 Observer.changedInstr(MI); 1481 } 1482 1483 bool CombinerHelper::matchShiftOfShiftedLogic(MachineInstr &MI, 1484 ShiftOfShiftedLogic &MatchInfo) { 1485 // We're trying to match the following pattern with any of 1486 // G_SHL/G_ASHR/G_LSHR/G_USHLSAT/G_SSHLSAT shift instructions in combination 1487 // with any of G_AND/G_OR/G_XOR logic instructions. 1488 // %t1 = SHIFT %X, G_CONSTANT C0 1489 // %t2 = LOGIC %t1, %Y 1490 // %root = SHIFT %t2, G_CONSTANT C1 1491 // --> 1492 // %t3 = SHIFT %X, G_CONSTANT (C0+C1) 1493 // %t4 = SHIFT %Y, G_CONSTANT C1 1494 // %root = LOGIC %t3, %t4 1495 unsigned ShiftOpcode = MI.getOpcode(); 1496 assert((ShiftOpcode == TargetOpcode::G_SHL || 1497 ShiftOpcode == TargetOpcode::G_ASHR || 1498 ShiftOpcode == TargetOpcode::G_LSHR || 1499 ShiftOpcode == TargetOpcode::G_USHLSAT || 1500 ShiftOpcode == TargetOpcode::G_SSHLSAT) && 1501 "Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT"); 1502 1503 // Match a one-use bitwise logic op. 1504 Register LogicDest = MI.getOperand(1).getReg(); 1505 if (!MRI.hasOneNonDBGUse(LogicDest)) 1506 return false; 1507 1508 MachineInstr *LogicMI = MRI.getUniqueVRegDef(LogicDest); 1509 unsigned LogicOpcode = LogicMI->getOpcode(); 1510 if (LogicOpcode != TargetOpcode::G_AND && LogicOpcode != TargetOpcode::G_OR && 1511 LogicOpcode != TargetOpcode::G_XOR) 1512 return false; 1513 1514 // Find a matching one-use shift by constant. 1515 const Register C1 = MI.getOperand(2).getReg(); 1516 auto MaybeImmVal = getIConstantVRegValWithLookThrough(C1, MRI); 1517 if (!MaybeImmVal) 1518 return false; 1519 1520 const uint64_t C1Val = MaybeImmVal->Value.getZExtValue(); 1521 1522 auto matchFirstShift = [&](const MachineInstr *MI, uint64_t &ShiftVal) { 1523 // Shift should match previous one and should be a one-use. 1524 if (MI->getOpcode() != ShiftOpcode || 1525 !MRI.hasOneNonDBGUse(MI->getOperand(0).getReg())) 1526 return false; 1527 1528 // Must be a constant. 1529 auto MaybeImmVal = 1530 getIConstantVRegValWithLookThrough(MI->getOperand(2).getReg(), MRI); 1531 if (!MaybeImmVal) 1532 return false; 1533 1534 ShiftVal = MaybeImmVal->Value.getSExtValue(); 1535 return true; 1536 }; 1537 1538 // Logic ops are commutative, so check each operand for a match. 1539 Register LogicMIReg1 = LogicMI->getOperand(1).getReg(); 1540 MachineInstr *LogicMIOp1 = MRI.getUniqueVRegDef(LogicMIReg1); 1541 Register LogicMIReg2 = LogicMI->getOperand(2).getReg(); 1542 MachineInstr *LogicMIOp2 = MRI.getUniqueVRegDef(LogicMIReg2); 1543 uint64_t C0Val; 1544 1545 if (matchFirstShift(LogicMIOp1, C0Val)) { 1546 MatchInfo.LogicNonShiftReg = LogicMIReg2; 1547 MatchInfo.Shift2 = LogicMIOp1; 1548 } else if (matchFirstShift(LogicMIOp2, C0Val)) { 1549 MatchInfo.LogicNonShiftReg = LogicMIReg1; 1550 MatchInfo.Shift2 = LogicMIOp2; 1551 } else 1552 return false; 1553 1554 MatchInfo.ValSum = C0Val + C1Val; 1555 1556 // The fold is not valid if the sum of the shift values exceeds bitwidth. 1557 if (MatchInfo.ValSum >= MRI.getType(LogicDest).getScalarSizeInBits()) 1558 return false; 1559 1560 MatchInfo.Logic = LogicMI; 1561 return true; 1562 } 1563 1564 void CombinerHelper::applyShiftOfShiftedLogic(MachineInstr &MI, 1565 ShiftOfShiftedLogic &MatchInfo) { 1566 unsigned Opcode = MI.getOpcode(); 1567 assert((Opcode == TargetOpcode::G_SHL || Opcode == TargetOpcode::G_ASHR || 1568 Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_USHLSAT || 1569 Opcode == TargetOpcode::G_SSHLSAT) && 1570 "Expected G_SHL, G_ASHR, G_LSHR, G_USHLSAT and G_SSHLSAT"); 1571 1572 LLT ShlType = MRI.getType(MI.getOperand(2).getReg()); 1573 LLT DestType = MRI.getType(MI.getOperand(0).getReg()); 1574 Builder.setInstrAndDebugLoc(MI); 1575 1576 Register Const = Builder.buildConstant(ShlType, MatchInfo.ValSum).getReg(0); 1577 1578 Register Shift1Base = MatchInfo.Shift2->getOperand(1).getReg(); 1579 Register Shift1 = 1580 Builder.buildInstr(Opcode, {DestType}, {Shift1Base, Const}).getReg(0); 1581 1582 Register Shift2Const = MI.getOperand(2).getReg(); 1583 Register Shift2 = Builder 1584 .buildInstr(Opcode, {DestType}, 1585 {MatchInfo.LogicNonShiftReg, Shift2Const}) 1586 .getReg(0); 1587 1588 Register Dest = MI.getOperand(0).getReg(); 1589 Builder.buildInstr(MatchInfo.Logic->getOpcode(), {Dest}, {Shift1, Shift2}); 1590 1591 // These were one use so it's safe to remove them. 1592 MatchInfo.Shift2->eraseFromParent(); 1593 MatchInfo.Logic->eraseFromParent(); 1594 1595 MI.eraseFromParent(); 1596 } 1597 1598 bool CombinerHelper::matchCombineMulToShl(MachineInstr &MI, 1599 unsigned &ShiftVal) { 1600 assert(MI.getOpcode() == TargetOpcode::G_MUL && "Expected a G_MUL"); 1601 auto MaybeImmVal = 1602 getIConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI); 1603 if (!MaybeImmVal) 1604 return false; 1605 1606 ShiftVal = MaybeImmVal->Value.exactLogBase2(); 1607 return (static_cast<int32_t>(ShiftVal) != -1); 1608 } 1609 1610 void CombinerHelper::applyCombineMulToShl(MachineInstr &MI, 1611 unsigned &ShiftVal) { 1612 assert(MI.getOpcode() == TargetOpcode::G_MUL && "Expected a G_MUL"); 1613 MachineIRBuilder MIB(MI); 1614 LLT ShiftTy = MRI.getType(MI.getOperand(0).getReg()); 1615 auto ShiftCst = MIB.buildConstant(ShiftTy, ShiftVal); 1616 Observer.changingInstr(MI); 1617 MI.setDesc(MIB.getTII().get(TargetOpcode::G_SHL)); 1618 MI.getOperand(2).setReg(ShiftCst.getReg(0)); 1619 Observer.changedInstr(MI); 1620 } 1621 1622 // shl ([sza]ext x), y => zext (shl x, y), if shift does not overflow source 1623 bool CombinerHelper::matchCombineShlOfExtend(MachineInstr &MI, 1624 RegisterImmPair &MatchData) { 1625 assert(MI.getOpcode() == TargetOpcode::G_SHL && KB); 1626 1627 Register LHS = MI.getOperand(1).getReg(); 1628 1629 Register ExtSrc; 1630 if (!mi_match(LHS, MRI, m_GAnyExt(m_Reg(ExtSrc))) && 1631 !mi_match(LHS, MRI, m_GZExt(m_Reg(ExtSrc))) && 1632 !mi_match(LHS, MRI, m_GSExt(m_Reg(ExtSrc)))) 1633 return false; 1634 1635 // TODO: Should handle vector splat. 1636 Register RHS = MI.getOperand(2).getReg(); 1637 auto MaybeShiftAmtVal = getIConstantVRegValWithLookThrough(RHS, MRI); 1638 if (!MaybeShiftAmtVal) 1639 return false; 1640 1641 if (LI) { 1642 LLT SrcTy = MRI.getType(ExtSrc); 1643 1644 // We only really care about the legality with the shifted value. We can 1645 // pick any type the constant shift amount, so ask the target what to 1646 // use. Otherwise we would have to guess and hope it is reported as legal. 1647 LLT ShiftAmtTy = getTargetLowering().getPreferredShiftAmountTy(SrcTy); 1648 if (!isLegalOrBeforeLegalizer({TargetOpcode::G_SHL, {SrcTy, ShiftAmtTy}})) 1649 return false; 1650 } 1651 1652 int64_t ShiftAmt = MaybeShiftAmtVal->Value.getSExtValue(); 1653 MatchData.Reg = ExtSrc; 1654 MatchData.Imm = ShiftAmt; 1655 1656 unsigned MinLeadingZeros = KB->getKnownZeroes(ExtSrc).countLeadingOnes(); 1657 return MinLeadingZeros >= ShiftAmt; 1658 } 1659 1660 void CombinerHelper::applyCombineShlOfExtend(MachineInstr &MI, 1661 const RegisterImmPair &MatchData) { 1662 Register ExtSrcReg = MatchData.Reg; 1663 int64_t ShiftAmtVal = MatchData.Imm; 1664 1665 LLT ExtSrcTy = MRI.getType(ExtSrcReg); 1666 Builder.setInstrAndDebugLoc(MI); 1667 auto ShiftAmt = Builder.buildConstant(ExtSrcTy, ShiftAmtVal); 1668 auto NarrowShift = 1669 Builder.buildShl(ExtSrcTy, ExtSrcReg, ShiftAmt, MI.getFlags()); 1670 Builder.buildZExt(MI.getOperand(0), NarrowShift); 1671 MI.eraseFromParent(); 1672 } 1673 1674 bool CombinerHelper::matchCombineMergeUnmerge(MachineInstr &MI, 1675 Register &MatchInfo) { 1676 GMerge &Merge = cast<GMerge>(MI); 1677 SmallVector<Register, 16> MergedValues; 1678 for (unsigned I = 0; I < Merge.getNumSources(); ++I) 1679 MergedValues.emplace_back(Merge.getSourceReg(I)); 1680 1681 auto *Unmerge = getOpcodeDef<GUnmerge>(MergedValues[0], MRI); 1682 if (!Unmerge || Unmerge->getNumDefs() != Merge.getNumSources()) 1683 return false; 1684 1685 for (unsigned I = 0; I < MergedValues.size(); ++I) 1686 if (MergedValues[I] != Unmerge->getReg(I)) 1687 return false; 1688 1689 MatchInfo = Unmerge->getSourceReg(); 1690 return true; 1691 } 1692 1693 static Register peekThroughBitcast(Register Reg, 1694 const MachineRegisterInfo &MRI) { 1695 while (mi_match(Reg, MRI, m_GBitcast(m_Reg(Reg)))) 1696 ; 1697 1698 return Reg; 1699 } 1700 1701 bool CombinerHelper::matchCombineUnmergeMergeToPlainValues( 1702 MachineInstr &MI, SmallVectorImpl<Register> &Operands) { 1703 assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES && 1704 "Expected an unmerge"); 1705 auto &Unmerge = cast<GUnmerge>(MI); 1706 Register SrcReg = peekThroughBitcast(Unmerge.getSourceReg(), MRI); 1707 1708 auto *SrcInstr = getOpcodeDef<GMergeLikeOp>(SrcReg, MRI); 1709 if (!SrcInstr) 1710 return false; 1711 1712 // Check the source type of the merge. 1713 LLT SrcMergeTy = MRI.getType(SrcInstr->getSourceReg(0)); 1714 LLT Dst0Ty = MRI.getType(Unmerge.getReg(0)); 1715 bool SameSize = Dst0Ty.getSizeInBits() == SrcMergeTy.getSizeInBits(); 1716 if (SrcMergeTy != Dst0Ty && !SameSize) 1717 return false; 1718 // They are the same now (modulo a bitcast). 1719 // We can collect all the src registers. 1720 for (unsigned Idx = 0; Idx < SrcInstr->getNumSources(); ++Idx) 1721 Operands.push_back(SrcInstr->getSourceReg(Idx)); 1722 return true; 1723 } 1724 1725 void CombinerHelper::applyCombineUnmergeMergeToPlainValues( 1726 MachineInstr &MI, SmallVectorImpl<Register> &Operands) { 1727 assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES && 1728 "Expected an unmerge"); 1729 assert((MI.getNumOperands() - 1 == Operands.size()) && 1730 "Not enough operands to replace all defs"); 1731 unsigned NumElems = MI.getNumOperands() - 1; 1732 1733 LLT SrcTy = MRI.getType(Operands[0]); 1734 LLT DstTy = MRI.getType(MI.getOperand(0).getReg()); 1735 bool CanReuseInputDirectly = DstTy == SrcTy; 1736 Builder.setInstrAndDebugLoc(MI); 1737 for (unsigned Idx = 0; Idx < NumElems; ++Idx) { 1738 Register DstReg = MI.getOperand(Idx).getReg(); 1739 Register SrcReg = Operands[Idx]; 1740 if (CanReuseInputDirectly) 1741 replaceRegWith(MRI, DstReg, SrcReg); 1742 else 1743 Builder.buildCast(DstReg, SrcReg); 1744 } 1745 MI.eraseFromParent(); 1746 } 1747 1748 bool CombinerHelper::matchCombineUnmergeConstant(MachineInstr &MI, 1749 SmallVectorImpl<APInt> &Csts) { 1750 unsigned SrcIdx = MI.getNumOperands() - 1; 1751 Register SrcReg = MI.getOperand(SrcIdx).getReg(); 1752 MachineInstr *SrcInstr = MRI.getVRegDef(SrcReg); 1753 if (SrcInstr->getOpcode() != TargetOpcode::G_CONSTANT && 1754 SrcInstr->getOpcode() != TargetOpcode::G_FCONSTANT) 1755 return false; 1756 // Break down the big constant in smaller ones. 1757 const MachineOperand &CstVal = SrcInstr->getOperand(1); 1758 APInt Val = SrcInstr->getOpcode() == TargetOpcode::G_CONSTANT 1759 ? CstVal.getCImm()->getValue() 1760 : CstVal.getFPImm()->getValueAPF().bitcastToAPInt(); 1761 1762 LLT Dst0Ty = MRI.getType(MI.getOperand(0).getReg()); 1763 unsigned ShiftAmt = Dst0Ty.getSizeInBits(); 1764 // Unmerge a constant. 1765 for (unsigned Idx = 0; Idx != SrcIdx; ++Idx) { 1766 Csts.emplace_back(Val.trunc(ShiftAmt)); 1767 Val = Val.lshr(ShiftAmt); 1768 } 1769 1770 return true; 1771 } 1772 1773 void CombinerHelper::applyCombineUnmergeConstant(MachineInstr &MI, 1774 SmallVectorImpl<APInt> &Csts) { 1775 assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES && 1776 "Expected an unmerge"); 1777 assert((MI.getNumOperands() - 1 == Csts.size()) && 1778 "Not enough operands to replace all defs"); 1779 unsigned NumElems = MI.getNumOperands() - 1; 1780 Builder.setInstrAndDebugLoc(MI); 1781 for (unsigned Idx = 0; Idx < NumElems; ++Idx) { 1782 Register DstReg = MI.getOperand(Idx).getReg(); 1783 Builder.buildConstant(DstReg, Csts[Idx]); 1784 } 1785 1786 MI.eraseFromParent(); 1787 } 1788 1789 bool CombinerHelper::matchCombineUnmergeUndef( 1790 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) { 1791 unsigned SrcIdx = MI.getNumOperands() - 1; 1792 Register SrcReg = MI.getOperand(SrcIdx).getReg(); 1793 MatchInfo = [&MI](MachineIRBuilder &B) { 1794 unsigned NumElems = MI.getNumOperands() - 1; 1795 for (unsigned Idx = 0; Idx < NumElems; ++Idx) { 1796 Register DstReg = MI.getOperand(Idx).getReg(); 1797 B.buildUndef(DstReg); 1798 } 1799 }; 1800 return isa<GImplicitDef>(MRI.getVRegDef(SrcReg)); 1801 } 1802 1803 bool CombinerHelper::matchCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI) { 1804 assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES && 1805 "Expected an unmerge"); 1806 // Check that all the lanes are dead except the first one. 1807 for (unsigned Idx = 1, EndIdx = MI.getNumDefs(); Idx != EndIdx; ++Idx) { 1808 if (!MRI.use_nodbg_empty(MI.getOperand(Idx).getReg())) 1809 return false; 1810 } 1811 return true; 1812 } 1813 1814 void CombinerHelper::applyCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI) { 1815 Builder.setInstrAndDebugLoc(MI); 1816 Register SrcReg = MI.getOperand(MI.getNumDefs()).getReg(); 1817 // Truncating a vector is going to truncate every single lane, 1818 // whereas we want the full lowbits. 1819 // Do the operation on a scalar instead. 1820 LLT SrcTy = MRI.getType(SrcReg); 1821 if (SrcTy.isVector()) 1822 SrcReg = 1823 Builder.buildCast(LLT::scalar(SrcTy.getSizeInBits()), SrcReg).getReg(0); 1824 1825 Register Dst0Reg = MI.getOperand(0).getReg(); 1826 LLT Dst0Ty = MRI.getType(Dst0Reg); 1827 if (Dst0Ty.isVector()) { 1828 auto MIB = Builder.buildTrunc(LLT::scalar(Dst0Ty.getSizeInBits()), SrcReg); 1829 Builder.buildCast(Dst0Reg, MIB); 1830 } else 1831 Builder.buildTrunc(Dst0Reg, SrcReg); 1832 MI.eraseFromParent(); 1833 } 1834 1835 bool CombinerHelper::matchCombineUnmergeZExtToZExt(MachineInstr &MI) { 1836 assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES && 1837 "Expected an unmerge"); 1838 Register Dst0Reg = MI.getOperand(0).getReg(); 1839 LLT Dst0Ty = MRI.getType(Dst0Reg); 1840 // G_ZEXT on vector applies to each lane, so it will 1841 // affect all destinations. Therefore we won't be able 1842 // to simplify the unmerge to just the first definition. 1843 if (Dst0Ty.isVector()) 1844 return false; 1845 Register SrcReg = MI.getOperand(MI.getNumDefs()).getReg(); 1846 LLT SrcTy = MRI.getType(SrcReg); 1847 if (SrcTy.isVector()) 1848 return false; 1849 1850 Register ZExtSrcReg; 1851 if (!mi_match(SrcReg, MRI, m_GZExt(m_Reg(ZExtSrcReg)))) 1852 return false; 1853 1854 // Finally we can replace the first definition with 1855 // a zext of the source if the definition is big enough to hold 1856 // all of ZExtSrc bits. 1857 LLT ZExtSrcTy = MRI.getType(ZExtSrcReg); 1858 return ZExtSrcTy.getSizeInBits() <= Dst0Ty.getSizeInBits(); 1859 } 1860 1861 void CombinerHelper::applyCombineUnmergeZExtToZExt(MachineInstr &MI) { 1862 assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES && 1863 "Expected an unmerge"); 1864 1865 Register Dst0Reg = MI.getOperand(0).getReg(); 1866 1867 MachineInstr *ZExtInstr = 1868 MRI.getVRegDef(MI.getOperand(MI.getNumDefs()).getReg()); 1869 assert(ZExtInstr && ZExtInstr->getOpcode() == TargetOpcode::G_ZEXT && 1870 "Expecting a G_ZEXT"); 1871 1872 Register ZExtSrcReg = ZExtInstr->getOperand(1).getReg(); 1873 LLT Dst0Ty = MRI.getType(Dst0Reg); 1874 LLT ZExtSrcTy = MRI.getType(ZExtSrcReg); 1875 1876 Builder.setInstrAndDebugLoc(MI); 1877 1878 if (Dst0Ty.getSizeInBits() > ZExtSrcTy.getSizeInBits()) { 1879 Builder.buildZExt(Dst0Reg, ZExtSrcReg); 1880 } else { 1881 assert(Dst0Ty.getSizeInBits() == ZExtSrcTy.getSizeInBits() && 1882 "ZExt src doesn't fit in destination"); 1883 replaceRegWith(MRI, Dst0Reg, ZExtSrcReg); 1884 } 1885 1886 Register ZeroReg; 1887 for (unsigned Idx = 1, EndIdx = MI.getNumDefs(); Idx != EndIdx; ++Idx) { 1888 if (!ZeroReg) 1889 ZeroReg = Builder.buildConstant(Dst0Ty, 0).getReg(0); 1890 replaceRegWith(MRI, MI.getOperand(Idx).getReg(), ZeroReg); 1891 } 1892 MI.eraseFromParent(); 1893 } 1894 1895 bool CombinerHelper::matchCombineShiftToUnmerge(MachineInstr &MI, 1896 unsigned TargetShiftSize, 1897 unsigned &ShiftVal) { 1898 assert((MI.getOpcode() == TargetOpcode::G_SHL || 1899 MI.getOpcode() == TargetOpcode::G_LSHR || 1900 MI.getOpcode() == TargetOpcode::G_ASHR) && "Expected a shift"); 1901 1902 LLT Ty = MRI.getType(MI.getOperand(0).getReg()); 1903 if (Ty.isVector()) // TODO: 1904 return false; 1905 1906 // Don't narrow further than the requested size. 1907 unsigned Size = Ty.getSizeInBits(); 1908 if (Size <= TargetShiftSize) 1909 return false; 1910 1911 auto MaybeImmVal = 1912 getIConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI); 1913 if (!MaybeImmVal) 1914 return false; 1915 1916 ShiftVal = MaybeImmVal->Value.getSExtValue(); 1917 return ShiftVal >= Size / 2 && ShiftVal < Size; 1918 } 1919 1920 void CombinerHelper::applyCombineShiftToUnmerge(MachineInstr &MI, 1921 const unsigned &ShiftVal) { 1922 Register DstReg = MI.getOperand(0).getReg(); 1923 Register SrcReg = MI.getOperand(1).getReg(); 1924 LLT Ty = MRI.getType(SrcReg); 1925 unsigned Size = Ty.getSizeInBits(); 1926 unsigned HalfSize = Size / 2; 1927 assert(ShiftVal >= HalfSize); 1928 1929 LLT HalfTy = LLT::scalar(HalfSize); 1930 1931 Builder.setInstr(MI); 1932 auto Unmerge = Builder.buildUnmerge(HalfTy, SrcReg); 1933 unsigned NarrowShiftAmt = ShiftVal - HalfSize; 1934 1935 if (MI.getOpcode() == TargetOpcode::G_LSHR) { 1936 Register Narrowed = Unmerge.getReg(1); 1937 1938 // dst = G_LSHR s64:x, C for C >= 32 1939 // => 1940 // lo, hi = G_UNMERGE_VALUES x 1941 // dst = G_MERGE_VALUES (G_LSHR hi, C - 32), 0 1942 1943 if (NarrowShiftAmt != 0) { 1944 Narrowed = Builder.buildLShr(HalfTy, Narrowed, 1945 Builder.buildConstant(HalfTy, NarrowShiftAmt)).getReg(0); 1946 } 1947 1948 auto Zero = Builder.buildConstant(HalfTy, 0); 1949 Builder.buildMerge(DstReg, { Narrowed, Zero }); 1950 } else if (MI.getOpcode() == TargetOpcode::G_SHL) { 1951 Register Narrowed = Unmerge.getReg(0); 1952 // dst = G_SHL s64:x, C for C >= 32 1953 // => 1954 // lo, hi = G_UNMERGE_VALUES x 1955 // dst = G_MERGE_VALUES 0, (G_SHL hi, C - 32) 1956 if (NarrowShiftAmt != 0) { 1957 Narrowed = Builder.buildShl(HalfTy, Narrowed, 1958 Builder.buildConstant(HalfTy, NarrowShiftAmt)).getReg(0); 1959 } 1960 1961 auto Zero = Builder.buildConstant(HalfTy, 0); 1962 Builder.buildMerge(DstReg, { Zero, Narrowed }); 1963 } else { 1964 assert(MI.getOpcode() == TargetOpcode::G_ASHR); 1965 auto Hi = Builder.buildAShr( 1966 HalfTy, Unmerge.getReg(1), 1967 Builder.buildConstant(HalfTy, HalfSize - 1)); 1968 1969 if (ShiftVal == HalfSize) { 1970 // (G_ASHR i64:x, 32) -> 1971 // G_MERGE_VALUES hi_32(x), (G_ASHR hi_32(x), 31) 1972 Builder.buildMerge(DstReg, { Unmerge.getReg(1), Hi }); 1973 } else if (ShiftVal == Size - 1) { 1974 // Don't need a second shift. 1975 // (G_ASHR i64:x, 63) -> 1976 // %narrowed = (G_ASHR hi_32(x), 31) 1977 // G_MERGE_VALUES %narrowed, %narrowed 1978 Builder.buildMerge(DstReg, { Hi, Hi }); 1979 } else { 1980 auto Lo = Builder.buildAShr( 1981 HalfTy, Unmerge.getReg(1), 1982 Builder.buildConstant(HalfTy, ShiftVal - HalfSize)); 1983 1984 // (G_ASHR i64:x, C) ->, for C >= 32 1985 // G_MERGE_VALUES (G_ASHR hi_32(x), C - 32), (G_ASHR hi_32(x), 31) 1986 Builder.buildMerge(DstReg, { Lo, Hi }); 1987 } 1988 } 1989 1990 MI.eraseFromParent(); 1991 } 1992 1993 bool CombinerHelper::tryCombineShiftToUnmerge(MachineInstr &MI, 1994 unsigned TargetShiftAmount) { 1995 unsigned ShiftAmt; 1996 if (matchCombineShiftToUnmerge(MI, TargetShiftAmount, ShiftAmt)) { 1997 applyCombineShiftToUnmerge(MI, ShiftAmt); 1998 return true; 1999 } 2000 2001 return false; 2002 } 2003 2004 bool CombinerHelper::matchCombineI2PToP2I(MachineInstr &MI, Register &Reg) { 2005 assert(MI.getOpcode() == TargetOpcode::G_INTTOPTR && "Expected a G_INTTOPTR"); 2006 Register DstReg = MI.getOperand(0).getReg(); 2007 LLT DstTy = MRI.getType(DstReg); 2008 Register SrcReg = MI.getOperand(1).getReg(); 2009 return mi_match(SrcReg, MRI, 2010 m_GPtrToInt(m_all_of(m_SpecificType(DstTy), m_Reg(Reg)))); 2011 } 2012 2013 void CombinerHelper::applyCombineI2PToP2I(MachineInstr &MI, Register &Reg) { 2014 assert(MI.getOpcode() == TargetOpcode::G_INTTOPTR && "Expected a G_INTTOPTR"); 2015 Register DstReg = MI.getOperand(0).getReg(); 2016 Builder.setInstr(MI); 2017 Builder.buildCopy(DstReg, Reg); 2018 MI.eraseFromParent(); 2019 } 2020 2021 bool CombinerHelper::matchCombineP2IToI2P(MachineInstr &MI, Register &Reg) { 2022 assert(MI.getOpcode() == TargetOpcode::G_PTRTOINT && "Expected a G_PTRTOINT"); 2023 Register SrcReg = MI.getOperand(1).getReg(); 2024 return mi_match(SrcReg, MRI, m_GIntToPtr(m_Reg(Reg))); 2025 } 2026 2027 void CombinerHelper::applyCombineP2IToI2P(MachineInstr &MI, Register &Reg) { 2028 assert(MI.getOpcode() == TargetOpcode::G_PTRTOINT && "Expected a G_PTRTOINT"); 2029 Register DstReg = MI.getOperand(0).getReg(); 2030 Builder.setInstr(MI); 2031 Builder.buildZExtOrTrunc(DstReg, Reg); 2032 MI.eraseFromParent(); 2033 } 2034 2035 bool CombinerHelper::matchCombineAddP2IToPtrAdd( 2036 MachineInstr &MI, std::pair<Register, bool> &PtrReg) { 2037 assert(MI.getOpcode() == TargetOpcode::G_ADD); 2038 Register LHS = MI.getOperand(1).getReg(); 2039 Register RHS = MI.getOperand(2).getReg(); 2040 LLT IntTy = MRI.getType(LHS); 2041 2042 // G_PTR_ADD always has the pointer in the LHS, so we may need to commute the 2043 // instruction. 2044 PtrReg.second = false; 2045 for (Register SrcReg : {LHS, RHS}) { 2046 if (mi_match(SrcReg, MRI, m_GPtrToInt(m_Reg(PtrReg.first)))) { 2047 // Don't handle cases where the integer is implicitly converted to the 2048 // pointer width. 2049 LLT PtrTy = MRI.getType(PtrReg.first); 2050 if (PtrTy.getScalarSizeInBits() == IntTy.getScalarSizeInBits()) 2051 return true; 2052 } 2053 2054 PtrReg.second = true; 2055 } 2056 2057 return false; 2058 } 2059 2060 void CombinerHelper::applyCombineAddP2IToPtrAdd( 2061 MachineInstr &MI, std::pair<Register, bool> &PtrReg) { 2062 Register Dst = MI.getOperand(0).getReg(); 2063 Register LHS = MI.getOperand(1).getReg(); 2064 Register RHS = MI.getOperand(2).getReg(); 2065 2066 const bool DoCommute = PtrReg.second; 2067 if (DoCommute) 2068 std::swap(LHS, RHS); 2069 LHS = PtrReg.first; 2070 2071 LLT PtrTy = MRI.getType(LHS); 2072 2073 Builder.setInstrAndDebugLoc(MI); 2074 auto PtrAdd = Builder.buildPtrAdd(PtrTy, LHS, RHS); 2075 Builder.buildPtrToInt(Dst, PtrAdd); 2076 MI.eraseFromParent(); 2077 } 2078 2079 bool CombinerHelper::matchCombineConstPtrAddToI2P(MachineInstr &MI, 2080 APInt &NewCst) { 2081 auto &PtrAdd = cast<GPtrAdd>(MI); 2082 Register LHS = PtrAdd.getBaseReg(); 2083 Register RHS = PtrAdd.getOffsetReg(); 2084 MachineRegisterInfo &MRI = Builder.getMF().getRegInfo(); 2085 2086 if (auto RHSCst = getIConstantVRegVal(RHS, MRI)) { 2087 APInt Cst; 2088 if (mi_match(LHS, MRI, m_GIntToPtr(m_ICst(Cst)))) { 2089 auto DstTy = MRI.getType(PtrAdd.getReg(0)); 2090 // G_INTTOPTR uses zero-extension 2091 NewCst = Cst.zextOrTrunc(DstTy.getSizeInBits()); 2092 NewCst += RHSCst->sextOrTrunc(DstTy.getSizeInBits()); 2093 return true; 2094 } 2095 } 2096 2097 return false; 2098 } 2099 2100 void CombinerHelper::applyCombineConstPtrAddToI2P(MachineInstr &MI, 2101 APInt &NewCst) { 2102 auto &PtrAdd = cast<GPtrAdd>(MI); 2103 Register Dst = PtrAdd.getReg(0); 2104 2105 Builder.setInstrAndDebugLoc(MI); 2106 Builder.buildConstant(Dst, NewCst); 2107 PtrAdd.eraseFromParent(); 2108 } 2109 2110 bool CombinerHelper::matchCombineAnyExtTrunc(MachineInstr &MI, Register &Reg) { 2111 assert(MI.getOpcode() == TargetOpcode::G_ANYEXT && "Expected a G_ANYEXT"); 2112 Register DstReg = MI.getOperand(0).getReg(); 2113 Register SrcReg = MI.getOperand(1).getReg(); 2114 LLT DstTy = MRI.getType(DstReg); 2115 return mi_match(SrcReg, MRI, 2116 m_GTrunc(m_all_of(m_Reg(Reg), m_SpecificType(DstTy)))); 2117 } 2118 2119 bool CombinerHelper::matchCombineZextTrunc(MachineInstr &MI, Register &Reg) { 2120 assert(MI.getOpcode() == TargetOpcode::G_ZEXT && "Expected a G_ZEXT"); 2121 Register DstReg = MI.getOperand(0).getReg(); 2122 Register SrcReg = MI.getOperand(1).getReg(); 2123 LLT DstTy = MRI.getType(DstReg); 2124 if (mi_match(SrcReg, MRI, 2125 m_GTrunc(m_all_of(m_Reg(Reg), m_SpecificType(DstTy))))) { 2126 unsigned DstSize = DstTy.getScalarSizeInBits(); 2127 unsigned SrcSize = MRI.getType(SrcReg).getScalarSizeInBits(); 2128 return KB->getKnownBits(Reg).countMinLeadingZeros() >= DstSize - SrcSize; 2129 } 2130 return false; 2131 } 2132 2133 bool CombinerHelper::matchCombineExtOfExt( 2134 MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo) { 2135 assert((MI.getOpcode() == TargetOpcode::G_ANYEXT || 2136 MI.getOpcode() == TargetOpcode::G_SEXT || 2137 MI.getOpcode() == TargetOpcode::G_ZEXT) && 2138 "Expected a G_[ASZ]EXT"); 2139 Register SrcReg = MI.getOperand(1).getReg(); 2140 MachineInstr *SrcMI = MRI.getVRegDef(SrcReg); 2141 // Match exts with the same opcode, anyext([sz]ext) and sext(zext). 2142 unsigned Opc = MI.getOpcode(); 2143 unsigned SrcOpc = SrcMI->getOpcode(); 2144 if (Opc == SrcOpc || 2145 (Opc == TargetOpcode::G_ANYEXT && 2146 (SrcOpc == TargetOpcode::G_SEXT || SrcOpc == TargetOpcode::G_ZEXT)) || 2147 (Opc == TargetOpcode::G_SEXT && SrcOpc == TargetOpcode::G_ZEXT)) { 2148 MatchInfo = std::make_tuple(SrcMI->getOperand(1).getReg(), SrcOpc); 2149 return true; 2150 } 2151 return false; 2152 } 2153 2154 void CombinerHelper::applyCombineExtOfExt( 2155 MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo) { 2156 assert((MI.getOpcode() == TargetOpcode::G_ANYEXT || 2157 MI.getOpcode() == TargetOpcode::G_SEXT || 2158 MI.getOpcode() == TargetOpcode::G_ZEXT) && 2159 "Expected a G_[ASZ]EXT"); 2160 2161 Register Reg = std::get<0>(MatchInfo); 2162 unsigned SrcExtOp = std::get<1>(MatchInfo); 2163 2164 // Combine exts with the same opcode. 2165 if (MI.getOpcode() == SrcExtOp) { 2166 Observer.changingInstr(MI); 2167 MI.getOperand(1).setReg(Reg); 2168 Observer.changedInstr(MI); 2169 return; 2170 } 2171 2172 // Combine: 2173 // - anyext([sz]ext x) to [sz]ext x 2174 // - sext(zext x) to zext x 2175 if (MI.getOpcode() == TargetOpcode::G_ANYEXT || 2176 (MI.getOpcode() == TargetOpcode::G_SEXT && 2177 SrcExtOp == TargetOpcode::G_ZEXT)) { 2178 Register DstReg = MI.getOperand(0).getReg(); 2179 Builder.setInstrAndDebugLoc(MI); 2180 Builder.buildInstr(SrcExtOp, {DstReg}, {Reg}); 2181 MI.eraseFromParent(); 2182 } 2183 } 2184 2185 void CombinerHelper::applyCombineMulByNegativeOne(MachineInstr &MI) { 2186 assert(MI.getOpcode() == TargetOpcode::G_MUL && "Expected a G_MUL"); 2187 Register DstReg = MI.getOperand(0).getReg(); 2188 Register SrcReg = MI.getOperand(1).getReg(); 2189 LLT DstTy = MRI.getType(DstReg); 2190 2191 Builder.setInstrAndDebugLoc(MI); 2192 Builder.buildSub(DstReg, Builder.buildConstant(DstTy, 0), SrcReg, 2193 MI.getFlags()); 2194 MI.eraseFromParent(); 2195 } 2196 2197 bool CombinerHelper::matchCombineFNegOfFNeg(MachineInstr &MI, Register &Reg) { 2198 assert(MI.getOpcode() == TargetOpcode::G_FNEG && "Expected a G_FNEG"); 2199 Register SrcReg = MI.getOperand(1).getReg(); 2200 return mi_match(SrcReg, MRI, m_GFNeg(m_Reg(Reg))); 2201 } 2202 2203 bool CombinerHelper::matchCombineFAbsOfFAbs(MachineInstr &MI, Register &Src) { 2204 assert(MI.getOpcode() == TargetOpcode::G_FABS && "Expected a G_FABS"); 2205 Src = MI.getOperand(1).getReg(); 2206 Register AbsSrc; 2207 return mi_match(Src, MRI, m_GFabs(m_Reg(AbsSrc))); 2208 } 2209 2210 bool CombinerHelper::matchCombineFAbsOfFNeg(MachineInstr &MI, 2211 BuildFnTy &MatchInfo) { 2212 assert(MI.getOpcode() == TargetOpcode::G_FABS && "Expected a G_FABS"); 2213 Register Src = MI.getOperand(1).getReg(); 2214 Register NegSrc; 2215 2216 if (!mi_match(Src, MRI, m_GFNeg(m_Reg(NegSrc)))) 2217 return false; 2218 2219 MatchInfo = [=, &MI](MachineIRBuilder &B) { 2220 Observer.changingInstr(MI); 2221 MI.getOperand(1).setReg(NegSrc); 2222 Observer.changedInstr(MI); 2223 }; 2224 return true; 2225 } 2226 2227 bool CombinerHelper::matchCombineTruncOfExt( 2228 MachineInstr &MI, std::pair<Register, unsigned> &MatchInfo) { 2229 assert(MI.getOpcode() == TargetOpcode::G_TRUNC && "Expected a G_TRUNC"); 2230 Register SrcReg = MI.getOperand(1).getReg(); 2231 MachineInstr *SrcMI = MRI.getVRegDef(SrcReg); 2232 unsigned SrcOpc = SrcMI->getOpcode(); 2233 if (SrcOpc == TargetOpcode::G_ANYEXT || SrcOpc == TargetOpcode::G_SEXT || 2234 SrcOpc == TargetOpcode::G_ZEXT) { 2235 MatchInfo = std::make_pair(SrcMI->getOperand(1).getReg(), SrcOpc); 2236 return true; 2237 } 2238 return false; 2239 } 2240 2241 void CombinerHelper::applyCombineTruncOfExt( 2242 MachineInstr &MI, std::pair<Register, unsigned> &MatchInfo) { 2243 assert(MI.getOpcode() == TargetOpcode::G_TRUNC && "Expected a G_TRUNC"); 2244 Register SrcReg = MatchInfo.first; 2245 unsigned SrcExtOp = MatchInfo.second; 2246 Register DstReg = MI.getOperand(0).getReg(); 2247 LLT SrcTy = MRI.getType(SrcReg); 2248 LLT DstTy = MRI.getType(DstReg); 2249 if (SrcTy == DstTy) { 2250 MI.eraseFromParent(); 2251 replaceRegWith(MRI, DstReg, SrcReg); 2252 return; 2253 } 2254 Builder.setInstrAndDebugLoc(MI); 2255 if (SrcTy.getSizeInBits() < DstTy.getSizeInBits()) 2256 Builder.buildInstr(SrcExtOp, {DstReg}, {SrcReg}); 2257 else 2258 Builder.buildTrunc(DstReg, SrcReg); 2259 MI.eraseFromParent(); 2260 } 2261 2262 bool CombinerHelper::matchCombineTruncOfShl( 2263 MachineInstr &MI, std::pair<Register, Register> &MatchInfo) { 2264 assert(MI.getOpcode() == TargetOpcode::G_TRUNC && "Expected a G_TRUNC"); 2265 Register DstReg = MI.getOperand(0).getReg(); 2266 Register SrcReg = MI.getOperand(1).getReg(); 2267 LLT DstTy = MRI.getType(DstReg); 2268 Register ShiftSrc; 2269 Register ShiftAmt; 2270 2271 if (MRI.hasOneNonDBGUse(SrcReg) && 2272 mi_match(SrcReg, MRI, m_GShl(m_Reg(ShiftSrc), m_Reg(ShiftAmt))) && 2273 isLegalOrBeforeLegalizer( 2274 {TargetOpcode::G_SHL, 2275 {DstTy, getTargetLowering().getPreferredShiftAmountTy(DstTy)}})) { 2276 KnownBits Known = KB->getKnownBits(ShiftAmt); 2277 unsigned Size = DstTy.getSizeInBits(); 2278 if (Known.countMaxActiveBits() <= Log2_32(Size)) { 2279 MatchInfo = std::make_pair(ShiftSrc, ShiftAmt); 2280 return true; 2281 } 2282 } 2283 return false; 2284 } 2285 2286 void CombinerHelper::applyCombineTruncOfShl( 2287 MachineInstr &MI, std::pair<Register, Register> &MatchInfo) { 2288 assert(MI.getOpcode() == TargetOpcode::G_TRUNC && "Expected a G_TRUNC"); 2289 Register DstReg = MI.getOperand(0).getReg(); 2290 Register SrcReg = MI.getOperand(1).getReg(); 2291 LLT DstTy = MRI.getType(DstReg); 2292 MachineInstr *SrcMI = MRI.getVRegDef(SrcReg); 2293 2294 Register ShiftSrc = MatchInfo.first; 2295 Register ShiftAmt = MatchInfo.second; 2296 Builder.setInstrAndDebugLoc(MI); 2297 auto TruncShiftSrc = Builder.buildTrunc(DstTy, ShiftSrc); 2298 Builder.buildShl(DstReg, TruncShiftSrc, ShiftAmt, SrcMI->getFlags()); 2299 MI.eraseFromParent(); 2300 } 2301 2302 bool CombinerHelper::matchAnyExplicitUseIsUndef(MachineInstr &MI) { 2303 return any_of(MI.explicit_uses(), [this](const MachineOperand &MO) { 2304 return MO.isReg() && 2305 getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MO.getReg(), MRI); 2306 }); 2307 } 2308 2309 bool CombinerHelper::matchAllExplicitUsesAreUndef(MachineInstr &MI) { 2310 return all_of(MI.explicit_uses(), [this](const MachineOperand &MO) { 2311 return !MO.isReg() || 2312 getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MO.getReg(), MRI); 2313 }); 2314 } 2315 2316 bool CombinerHelper::matchUndefShuffleVectorMask(MachineInstr &MI) { 2317 assert(MI.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR); 2318 ArrayRef<int> Mask = MI.getOperand(3).getShuffleMask(); 2319 return all_of(Mask, [](int Elt) { return Elt < 0; }); 2320 } 2321 2322 bool CombinerHelper::matchUndefStore(MachineInstr &MI) { 2323 assert(MI.getOpcode() == TargetOpcode::G_STORE); 2324 return getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MI.getOperand(0).getReg(), 2325 MRI); 2326 } 2327 2328 bool CombinerHelper::matchUndefSelectCmp(MachineInstr &MI) { 2329 assert(MI.getOpcode() == TargetOpcode::G_SELECT); 2330 return getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MI.getOperand(1).getReg(), 2331 MRI); 2332 } 2333 2334 bool CombinerHelper::matchConstantSelectCmp(MachineInstr &MI, unsigned &OpIdx) { 2335 GSelect &SelMI = cast<GSelect>(MI); 2336 auto Cst = 2337 isConstantOrConstantSplatVector(*MRI.getVRegDef(SelMI.getCondReg()), MRI); 2338 if (!Cst) 2339 return false; 2340 OpIdx = Cst->isZero() ? 3 : 2; 2341 return true; 2342 } 2343 2344 bool CombinerHelper::eraseInst(MachineInstr &MI) { 2345 MI.eraseFromParent(); 2346 return true; 2347 } 2348 2349 bool CombinerHelper::matchEqualDefs(const MachineOperand &MOP1, 2350 const MachineOperand &MOP2) { 2351 if (!MOP1.isReg() || !MOP2.isReg()) 2352 return false; 2353 auto InstAndDef1 = getDefSrcRegIgnoringCopies(MOP1.getReg(), MRI); 2354 if (!InstAndDef1) 2355 return false; 2356 auto InstAndDef2 = getDefSrcRegIgnoringCopies(MOP2.getReg(), MRI); 2357 if (!InstAndDef2) 2358 return false; 2359 MachineInstr *I1 = InstAndDef1->MI; 2360 MachineInstr *I2 = InstAndDef2->MI; 2361 2362 // Handle a case like this: 2363 // 2364 // %0:_(s64), %1:_(s64) = G_UNMERGE_VALUES %2:_(<2 x s64>) 2365 // 2366 // Even though %0 and %1 are produced by the same instruction they are not 2367 // the same values. 2368 if (I1 == I2) 2369 return MOP1.getReg() == MOP2.getReg(); 2370 2371 // If we have an instruction which loads or stores, we can't guarantee that 2372 // it is identical. 2373 // 2374 // For example, we may have 2375 // 2376 // %x1 = G_LOAD %addr (load N from @somewhere) 2377 // ... 2378 // call @foo 2379 // ... 2380 // %x2 = G_LOAD %addr (load N from @somewhere) 2381 // ... 2382 // %or = G_OR %x1, %x2 2383 // 2384 // It's possible that @foo will modify whatever lives at the address we're 2385 // loading from. To be safe, let's just assume that all loads and stores 2386 // are different (unless we have something which is guaranteed to not 2387 // change.) 2388 if (I1->mayLoadOrStore() && !I1->isDereferenceableInvariantLoad(nullptr)) 2389 return false; 2390 2391 // If both instructions are loads or stores, they are equal only if both 2392 // are dereferenceable invariant loads with the same number of bits. 2393 if (I1->mayLoadOrStore() && I2->mayLoadOrStore()) { 2394 GLoadStore *LS1 = dyn_cast<GLoadStore>(I1); 2395 GLoadStore *LS2 = dyn_cast<GLoadStore>(I2); 2396 if (!LS1 || !LS2) 2397 return false; 2398 2399 if (!I2->isDereferenceableInvariantLoad(nullptr) || 2400 (LS1->getMemSizeInBits() != LS2->getMemSizeInBits())) 2401 return false; 2402 } 2403 2404 // Check for physical registers on the instructions first to avoid cases 2405 // like this: 2406 // 2407 // %a = COPY $physreg 2408 // ... 2409 // SOMETHING implicit-def $physreg 2410 // ... 2411 // %b = COPY $physreg 2412 // 2413 // These copies are not equivalent. 2414 if (any_of(I1->uses(), [](const MachineOperand &MO) { 2415 return MO.isReg() && MO.getReg().isPhysical(); 2416 })) { 2417 // Check if we have a case like this: 2418 // 2419 // %a = COPY $physreg 2420 // %b = COPY %a 2421 // 2422 // In this case, I1 and I2 will both be equal to %a = COPY $physreg. 2423 // From that, we know that they must have the same value, since they must 2424 // have come from the same COPY. 2425 return I1->isIdenticalTo(*I2); 2426 } 2427 2428 // We don't have any physical registers, so we don't necessarily need the 2429 // same vreg defs. 2430 // 2431 // On the off-chance that there's some target instruction feeding into the 2432 // instruction, let's use produceSameValue instead of isIdenticalTo. 2433 if (Builder.getTII().produceSameValue(*I1, *I2, &MRI)) { 2434 // Handle instructions with multiple defs that produce same values. Values 2435 // are same for operands with same index. 2436 // %0:_(s8), %1:_(s8), %2:_(s8), %3:_(s8) = G_UNMERGE_VALUES %4:_(<4 x s8>) 2437 // %5:_(s8), %6:_(s8), %7:_(s8), %8:_(s8) = G_UNMERGE_VALUES %4:_(<4 x s8>) 2438 // I1 and I2 are different instructions but produce same values, 2439 // %1 and %6 are same, %1 and %7 are not the same value. 2440 return I1->findRegisterDefOperandIdx(InstAndDef1->Reg) == 2441 I2->findRegisterDefOperandIdx(InstAndDef2->Reg); 2442 } 2443 return false; 2444 } 2445 2446 bool CombinerHelper::matchConstantOp(const MachineOperand &MOP, int64_t C) { 2447 if (!MOP.isReg()) 2448 return false; 2449 auto *MI = MRI.getVRegDef(MOP.getReg()); 2450 auto MaybeCst = isConstantOrConstantSplatVector(*MI, MRI); 2451 return MaybeCst && MaybeCst->getBitWidth() <= 64 && 2452 MaybeCst->getSExtValue() == C; 2453 } 2454 2455 bool CombinerHelper::replaceSingleDefInstWithOperand(MachineInstr &MI, 2456 unsigned OpIdx) { 2457 assert(MI.getNumExplicitDefs() == 1 && "Expected one explicit def?"); 2458 Register OldReg = MI.getOperand(0).getReg(); 2459 Register Replacement = MI.getOperand(OpIdx).getReg(); 2460 assert(canReplaceReg(OldReg, Replacement, MRI) && "Cannot replace register?"); 2461 MI.eraseFromParent(); 2462 replaceRegWith(MRI, OldReg, Replacement); 2463 return true; 2464 } 2465 2466 bool CombinerHelper::replaceSingleDefInstWithReg(MachineInstr &MI, 2467 Register Replacement) { 2468 assert(MI.getNumExplicitDefs() == 1 && "Expected one explicit def?"); 2469 Register OldReg = MI.getOperand(0).getReg(); 2470 assert(canReplaceReg(OldReg, Replacement, MRI) && "Cannot replace register?"); 2471 MI.eraseFromParent(); 2472 replaceRegWith(MRI, OldReg, Replacement); 2473 return true; 2474 } 2475 2476 bool CombinerHelper::matchSelectSameVal(MachineInstr &MI) { 2477 assert(MI.getOpcode() == TargetOpcode::G_SELECT); 2478 // Match (cond ? x : x) 2479 return matchEqualDefs(MI.getOperand(2), MI.getOperand(3)) && 2480 canReplaceReg(MI.getOperand(0).getReg(), MI.getOperand(2).getReg(), 2481 MRI); 2482 } 2483 2484 bool CombinerHelper::matchBinOpSameVal(MachineInstr &MI) { 2485 return matchEqualDefs(MI.getOperand(1), MI.getOperand(2)) && 2486 canReplaceReg(MI.getOperand(0).getReg(), MI.getOperand(1).getReg(), 2487 MRI); 2488 } 2489 2490 bool CombinerHelper::matchOperandIsZero(MachineInstr &MI, unsigned OpIdx) { 2491 return matchConstantOp(MI.getOperand(OpIdx), 0) && 2492 canReplaceReg(MI.getOperand(0).getReg(), MI.getOperand(OpIdx).getReg(), 2493 MRI); 2494 } 2495 2496 bool CombinerHelper::matchOperandIsUndef(MachineInstr &MI, unsigned OpIdx) { 2497 MachineOperand &MO = MI.getOperand(OpIdx); 2498 return MO.isReg() && 2499 getOpcodeDef(TargetOpcode::G_IMPLICIT_DEF, MO.getReg(), MRI); 2500 } 2501 2502 bool CombinerHelper::matchOperandIsKnownToBeAPowerOfTwo(MachineInstr &MI, 2503 unsigned OpIdx) { 2504 MachineOperand &MO = MI.getOperand(OpIdx); 2505 return isKnownToBeAPowerOfTwo(MO.getReg(), MRI, KB); 2506 } 2507 2508 bool CombinerHelper::replaceInstWithFConstant(MachineInstr &MI, double C) { 2509 assert(MI.getNumDefs() == 1 && "Expected only one def?"); 2510 Builder.setInstr(MI); 2511 Builder.buildFConstant(MI.getOperand(0), C); 2512 MI.eraseFromParent(); 2513 return true; 2514 } 2515 2516 bool CombinerHelper::replaceInstWithConstant(MachineInstr &MI, int64_t C) { 2517 assert(MI.getNumDefs() == 1 && "Expected only one def?"); 2518 Builder.setInstr(MI); 2519 Builder.buildConstant(MI.getOperand(0), C); 2520 MI.eraseFromParent(); 2521 return true; 2522 } 2523 2524 bool CombinerHelper::replaceInstWithConstant(MachineInstr &MI, APInt C) { 2525 assert(MI.getNumDefs() == 1 && "Expected only one def?"); 2526 Builder.setInstr(MI); 2527 Builder.buildConstant(MI.getOperand(0), C); 2528 MI.eraseFromParent(); 2529 return true; 2530 } 2531 2532 bool CombinerHelper::replaceInstWithUndef(MachineInstr &MI) { 2533 assert(MI.getNumDefs() == 1 && "Expected only one def?"); 2534 Builder.setInstr(MI); 2535 Builder.buildUndef(MI.getOperand(0)); 2536 MI.eraseFromParent(); 2537 return true; 2538 } 2539 2540 bool CombinerHelper::matchSimplifyAddToSub( 2541 MachineInstr &MI, std::tuple<Register, Register> &MatchInfo) { 2542 Register LHS = MI.getOperand(1).getReg(); 2543 Register RHS = MI.getOperand(2).getReg(); 2544 Register &NewLHS = std::get<0>(MatchInfo); 2545 Register &NewRHS = std::get<1>(MatchInfo); 2546 2547 // Helper lambda to check for opportunities for 2548 // ((0-A) + B) -> B - A 2549 // (A + (0-B)) -> A - B 2550 auto CheckFold = [&](Register &MaybeSub, Register &MaybeNewLHS) { 2551 if (!mi_match(MaybeSub, MRI, m_Neg(m_Reg(NewRHS)))) 2552 return false; 2553 NewLHS = MaybeNewLHS; 2554 return true; 2555 }; 2556 2557 return CheckFold(LHS, RHS) || CheckFold(RHS, LHS); 2558 } 2559 2560 bool CombinerHelper::matchCombineInsertVecElts( 2561 MachineInstr &MI, SmallVectorImpl<Register> &MatchInfo) { 2562 assert(MI.getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT && 2563 "Invalid opcode"); 2564 Register DstReg = MI.getOperand(0).getReg(); 2565 LLT DstTy = MRI.getType(DstReg); 2566 assert(DstTy.isVector() && "Invalid G_INSERT_VECTOR_ELT?"); 2567 unsigned NumElts = DstTy.getNumElements(); 2568 // If this MI is part of a sequence of insert_vec_elts, then 2569 // don't do the combine in the middle of the sequence. 2570 if (MRI.hasOneUse(DstReg) && MRI.use_instr_begin(DstReg)->getOpcode() == 2571 TargetOpcode::G_INSERT_VECTOR_ELT) 2572 return false; 2573 MachineInstr *CurrInst = &MI; 2574 MachineInstr *TmpInst; 2575 int64_t IntImm; 2576 Register TmpReg; 2577 MatchInfo.resize(NumElts); 2578 while (mi_match( 2579 CurrInst->getOperand(0).getReg(), MRI, 2580 m_GInsertVecElt(m_MInstr(TmpInst), m_Reg(TmpReg), m_ICst(IntImm)))) { 2581 if (IntImm >= NumElts) 2582 return false; 2583 if (!MatchInfo[IntImm]) 2584 MatchInfo[IntImm] = TmpReg; 2585 CurrInst = TmpInst; 2586 } 2587 // Variable index. 2588 if (CurrInst->getOpcode() == TargetOpcode::G_INSERT_VECTOR_ELT) 2589 return false; 2590 if (TmpInst->getOpcode() == TargetOpcode::G_BUILD_VECTOR) { 2591 for (unsigned I = 1; I < TmpInst->getNumOperands(); ++I) { 2592 if (!MatchInfo[I - 1].isValid()) 2593 MatchInfo[I - 1] = TmpInst->getOperand(I).getReg(); 2594 } 2595 return true; 2596 } 2597 // If we didn't end in a G_IMPLICIT_DEF, bail out. 2598 return TmpInst->getOpcode() == TargetOpcode::G_IMPLICIT_DEF; 2599 } 2600 2601 void CombinerHelper::applyCombineInsertVecElts( 2602 MachineInstr &MI, SmallVectorImpl<Register> &MatchInfo) { 2603 Builder.setInstr(MI); 2604 Register UndefReg; 2605 auto GetUndef = [&]() { 2606 if (UndefReg) 2607 return UndefReg; 2608 LLT DstTy = MRI.getType(MI.getOperand(0).getReg()); 2609 UndefReg = Builder.buildUndef(DstTy.getScalarType()).getReg(0); 2610 return UndefReg; 2611 }; 2612 for (unsigned I = 0; I < MatchInfo.size(); ++I) { 2613 if (!MatchInfo[I]) 2614 MatchInfo[I] = GetUndef(); 2615 } 2616 Builder.buildBuildVector(MI.getOperand(0).getReg(), MatchInfo); 2617 MI.eraseFromParent(); 2618 } 2619 2620 void CombinerHelper::applySimplifyAddToSub( 2621 MachineInstr &MI, std::tuple<Register, Register> &MatchInfo) { 2622 Builder.setInstr(MI); 2623 Register SubLHS, SubRHS; 2624 std::tie(SubLHS, SubRHS) = MatchInfo; 2625 Builder.buildSub(MI.getOperand(0).getReg(), SubLHS, SubRHS); 2626 MI.eraseFromParent(); 2627 } 2628 2629 bool CombinerHelper::matchHoistLogicOpWithSameOpcodeHands( 2630 MachineInstr &MI, InstructionStepsMatchInfo &MatchInfo) { 2631 // Matches: logic (hand x, ...), (hand y, ...) -> hand (logic x, y), ... 2632 // 2633 // Creates the new hand + logic instruction (but does not insert them.) 2634 // 2635 // On success, MatchInfo is populated with the new instructions. These are 2636 // inserted in applyHoistLogicOpWithSameOpcodeHands. 2637 unsigned LogicOpcode = MI.getOpcode(); 2638 assert(LogicOpcode == TargetOpcode::G_AND || 2639 LogicOpcode == TargetOpcode::G_OR || 2640 LogicOpcode == TargetOpcode::G_XOR); 2641 MachineIRBuilder MIB(MI); 2642 Register Dst = MI.getOperand(0).getReg(); 2643 Register LHSReg = MI.getOperand(1).getReg(); 2644 Register RHSReg = MI.getOperand(2).getReg(); 2645 2646 // Don't recompute anything. 2647 if (!MRI.hasOneNonDBGUse(LHSReg) || !MRI.hasOneNonDBGUse(RHSReg)) 2648 return false; 2649 2650 // Make sure we have (hand x, ...), (hand y, ...) 2651 MachineInstr *LeftHandInst = getDefIgnoringCopies(LHSReg, MRI); 2652 MachineInstr *RightHandInst = getDefIgnoringCopies(RHSReg, MRI); 2653 if (!LeftHandInst || !RightHandInst) 2654 return false; 2655 unsigned HandOpcode = LeftHandInst->getOpcode(); 2656 if (HandOpcode != RightHandInst->getOpcode()) 2657 return false; 2658 if (!LeftHandInst->getOperand(1).isReg() || 2659 !RightHandInst->getOperand(1).isReg()) 2660 return false; 2661 2662 // Make sure the types match up, and if we're doing this post-legalization, 2663 // we end up with legal types. 2664 Register X = LeftHandInst->getOperand(1).getReg(); 2665 Register Y = RightHandInst->getOperand(1).getReg(); 2666 LLT XTy = MRI.getType(X); 2667 LLT YTy = MRI.getType(Y); 2668 if (XTy != YTy) 2669 return false; 2670 if (!isLegalOrBeforeLegalizer({LogicOpcode, {XTy, YTy}})) 2671 return false; 2672 2673 // Optional extra source register. 2674 Register ExtraHandOpSrcReg; 2675 switch (HandOpcode) { 2676 default: 2677 return false; 2678 case TargetOpcode::G_ANYEXT: 2679 case TargetOpcode::G_SEXT: 2680 case TargetOpcode::G_ZEXT: { 2681 // Match: logic (ext X), (ext Y) --> ext (logic X, Y) 2682 break; 2683 } 2684 case TargetOpcode::G_AND: 2685 case TargetOpcode::G_ASHR: 2686 case TargetOpcode::G_LSHR: 2687 case TargetOpcode::G_SHL: { 2688 // Match: logic (binop x, z), (binop y, z) -> binop (logic x, y), z 2689 MachineOperand &ZOp = LeftHandInst->getOperand(2); 2690 if (!matchEqualDefs(ZOp, RightHandInst->getOperand(2))) 2691 return false; 2692 ExtraHandOpSrcReg = ZOp.getReg(); 2693 break; 2694 } 2695 } 2696 2697 // Record the steps to build the new instructions. 2698 // 2699 // Steps to build (logic x, y) 2700 auto NewLogicDst = MRI.createGenericVirtualRegister(XTy); 2701 OperandBuildSteps LogicBuildSteps = { 2702 [=](MachineInstrBuilder &MIB) { MIB.addDef(NewLogicDst); }, 2703 [=](MachineInstrBuilder &MIB) { MIB.addReg(X); }, 2704 [=](MachineInstrBuilder &MIB) { MIB.addReg(Y); }}; 2705 InstructionBuildSteps LogicSteps(LogicOpcode, LogicBuildSteps); 2706 2707 // Steps to build hand (logic x, y), ...z 2708 OperandBuildSteps HandBuildSteps = { 2709 [=](MachineInstrBuilder &MIB) { MIB.addDef(Dst); }, 2710 [=](MachineInstrBuilder &MIB) { MIB.addReg(NewLogicDst); }}; 2711 if (ExtraHandOpSrcReg.isValid()) 2712 HandBuildSteps.push_back( 2713 [=](MachineInstrBuilder &MIB) { MIB.addReg(ExtraHandOpSrcReg); }); 2714 InstructionBuildSteps HandSteps(HandOpcode, HandBuildSteps); 2715 2716 MatchInfo = InstructionStepsMatchInfo({LogicSteps, HandSteps}); 2717 return true; 2718 } 2719 2720 void CombinerHelper::applyBuildInstructionSteps( 2721 MachineInstr &MI, InstructionStepsMatchInfo &MatchInfo) { 2722 assert(MatchInfo.InstrsToBuild.size() && 2723 "Expected at least one instr to build?"); 2724 Builder.setInstr(MI); 2725 for (auto &InstrToBuild : MatchInfo.InstrsToBuild) { 2726 assert(InstrToBuild.Opcode && "Expected a valid opcode?"); 2727 assert(InstrToBuild.OperandFns.size() && "Expected at least one operand?"); 2728 MachineInstrBuilder Instr = Builder.buildInstr(InstrToBuild.Opcode); 2729 for (auto &OperandFn : InstrToBuild.OperandFns) 2730 OperandFn(Instr); 2731 } 2732 MI.eraseFromParent(); 2733 } 2734 2735 bool CombinerHelper::matchAshrShlToSextInreg( 2736 MachineInstr &MI, std::tuple<Register, int64_t> &MatchInfo) { 2737 assert(MI.getOpcode() == TargetOpcode::G_ASHR); 2738 int64_t ShlCst, AshrCst; 2739 Register Src; 2740 // FIXME: detect splat constant vectors. 2741 if (!mi_match(MI.getOperand(0).getReg(), MRI, 2742 m_GAShr(m_GShl(m_Reg(Src), m_ICst(ShlCst)), m_ICst(AshrCst)))) 2743 return false; 2744 if (ShlCst != AshrCst) 2745 return false; 2746 if (!isLegalOrBeforeLegalizer( 2747 {TargetOpcode::G_SEXT_INREG, {MRI.getType(Src)}})) 2748 return false; 2749 MatchInfo = std::make_tuple(Src, ShlCst); 2750 return true; 2751 } 2752 2753 void CombinerHelper::applyAshShlToSextInreg( 2754 MachineInstr &MI, std::tuple<Register, int64_t> &MatchInfo) { 2755 assert(MI.getOpcode() == TargetOpcode::G_ASHR); 2756 Register Src; 2757 int64_t ShiftAmt; 2758 std::tie(Src, ShiftAmt) = MatchInfo; 2759 unsigned Size = MRI.getType(Src).getScalarSizeInBits(); 2760 Builder.setInstrAndDebugLoc(MI); 2761 Builder.buildSExtInReg(MI.getOperand(0).getReg(), Src, Size - ShiftAmt); 2762 MI.eraseFromParent(); 2763 } 2764 2765 /// and(and(x, C1), C2) -> C1&C2 ? and(x, C1&C2) : 0 2766 bool CombinerHelper::matchOverlappingAnd( 2767 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) { 2768 assert(MI.getOpcode() == TargetOpcode::G_AND); 2769 2770 Register Dst = MI.getOperand(0).getReg(); 2771 LLT Ty = MRI.getType(Dst); 2772 2773 Register R; 2774 int64_t C1; 2775 int64_t C2; 2776 if (!mi_match( 2777 Dst, MRI, 2778 m_GAnd(m_GAnd(m_Reg(R), m_ICst(C1)), m_ICst(C2)))) 2779 return false; 2780 2781 MatchInfo = [=](MachineIRBuilder &B) { 2782 if (C1 & C2) { 2783 B.buildAnd(Dst, R, B.buildConstant(Ty, C1 & C2)); 2784 return; 2785 } 2786 auto Zero = B.buildConstant(Ty, 0); 2787 replaceRegWith(MRI, Dst, Zero->getOperand(0).getReg()); 2788 }; 2789 return true; 2790 } 2791 2792 bool CombinerHelper::matchRedundantAnd(MachineInstr &MI, 2793 Register &Replacement) { 2794 // Given 2795 // 2796 // %y:_(sN) = G_SOMETHING 2797 // %x:_(sN) = G_SOMETHING 2798 // %res:_(sN) = G_AND %x, %y 2799 // 2800 // Eliminate the G_AND when it is known that x & y == x or x & y == y. 2801 // 2802 // Patterns like this can appear as a result of legalization. E.g. 2803 // 2804 // %cmp:_(s32) = G_ICMP intpred(pred), %x(s32), %y 2805 // %one:_(s32) = G_CONSTANT i32 1 2806 // %and:_(s32) = G_AND %cmp, %one 2807 // 2808 // In this case, G_ICMP only produces a single bit, so x & 1 == x. 2809 assert(MI.getOpcode() == TargetOpcode::G_AND); 2810 if (!KB) 2811 return false; 2812 2813 Register AndDst = MI.getOperand(0).getReg(); 2814 LLT DstTy = MRI.getType(AndDst); 2815 2816 // FIXME: This should be removed once GISelKnownBits supports vectors. 2817 if (DstTy.isVector()) 2818 return false; 2819 2820 Register LHS = MI.getOperand(1).getReg(); 2821 Register RHS = MI.getOperand(2).getReg(); 2822 KnownBits LHSBits = KB->getKnownBits(LHS); 2823 KnownBits RHSBits = KB->getKnownBits(RHS); 2824 2825 // Check that x & Mask == x. 2826 // x & 1 == x, always 2827 // x & 0 == x, only if x is also 0 2828 // Meaning Mask has no effect if every bit is either one in Mask or zero in x. 2829 // 2830 // Check if we can replace AndDst with the LHS of the G_AND 2831 if (canReplaceReg(AndDst, LHS, MRI) && 2832 (LHSBits.Zero | RHSBits.One).isAllOnes()) { 2833 Replacement = LHS; 2834 return true; 2835 } 2836 2837 // Check if we can replace AndDst with the RHS of the G_AND 2838 if (canReplaceReg(AndDst, RHS, MRI) && 2839 (LHSBits.One | RHSBits.Zero).isAllOnes()) { 2840 Replacement = RHS; 2841 return true; 2842 } 2843 2844 return false; 2845 } 2846 2847 bool CombinerHelper::matchRedundantOr(MachineInstr &MI, Register &Replacement) { 2848 // Given 2849 // 2850 // %y:_(sN) = G_SOMETHING 2851 // %x:_(sN) = G_SOMETHING 2852 // %res:_(sN) = G_OR %x, %y 2853 // 2854 // Eliminate the G_OR when it is known that x | y == x or x | y == y. 2855 assert(MI.getOpcode() == TargetOpcode::G_OR); 2856 if (!KB) 2857 return false; 2858 2859 Register OrDst = MI.getOperand(0).getReg(); 2860 LLT DstTy = MRI.getType(OrDst); 2861 2862 // FIXME: This should be removed once GISelKnownBits supports vectors. 2863 if (DstTy.isVector()) 2864 return false; 2865 2866 Register LHS = MI.getOperand(1).getReg(); 2867 Register RHS = MI.getOperand(2).getReg(); 2868 KnownBits LHSBits = KB->getKnownBits(LHS); 2869 KnownBits RHSBits = KB->getKnownBits(RHS); 2870 2871 // Check that x | Mask == x. 2872 // x | 0 == x, always 2873 // x | 1 == x, only if x is also 1 2874 // Meaning Mask has no effect if every bit is either zero in Mask or one in x. 2875 // 2876 // Check if we can replace OrDst with the LHS of the G_OR 2877 if (canReplaceReg(OrDst, LHS, MRI) && 2878 (LHSBits.One | RHSBits.Zero).isAllOnes()) { 2879 Replacement = LHS; 2880 return true; 2881 } 2882 2883 // Check if we can replace OrDst with the RHS of the G_OR 2884 if (canReplaceReg(OrDst, RHS, MRI) && 2885 (LHSBits.Zero | RHSBits.One).isAllOnes()) { 2886 Replacement = RHS; 2887 return true; 2888 } 2889 2890 return false; 2891 } 2892 2893 bool CombinerHelper::matchRedundantSExtInReg(MachineInstr &MI) { 2894 // If the input is already sign extended, just drop the extension. 2895 Register Src = MI.getOperand(1).getReg(); 2896 unsigned ExtBits = MI.getOperand(2).getImm(); 2897 unsigned TypeSize = MRI.getType(Src).getScalarSizeInBits(); 2898 return KB->computeNumSignBits(Src) >= (TypeSize - ExtBits + 1); 2899 } 2900 2901 static bool isConstValidTrue(const TargetLowering &TLI, unsigned ScalarSizeBits, 2902 int64_t Cst, bool IsVector, bool IsFP) { 2903 // For i1, Cst will always be -1 regardless of boolean contents. 2904 return (ScalarSizeBits == 1 && Cst == -1) || 2905 isConstTrueVal(TLI, Cst, IsVector, IsFP); 2906 } 2907 2908 bool CombinerHelper::matchNotCmp(MachineInstr &MI, 2909 SmallVectorImpl<Register> &RegsToNegate) { 2910 assert(MI.getOpcode() == TargetOpcode::G_XOR); 2911 LLT Ty = MRI.getType(MI.getOperand(0).getReg()); 2912 const auto &TLI = *Builder.getMF().getSubtarget().getTargetLowering(); 2913 Register XorSrc; 2914 Register CstReg; 2915 // We match xor(src, true) here. 2916 if (!mi_match(MI.getOperand(0).getReg(), MRI, 2917 m_GXor(m_Reg(XorSrc), m_Reg(CstReg)))) 2918 return false; 2919 2920 if (!MRI.hasOneNonDBGUse(XorSrc)) 2921 return false; 2922 2923 // Check that XorSrc is the root of a tree of comparisons combined with ANDs 2924 // and ORs. The suffix of RegsToNegate starting from index I is used a work 2925 // list of tree nodes to visit. 2926 RegsToNegate.push_back(XorSrc); 2927 // Remember whether the comparisons are all integer or all floating point. 2928 bool IsInt = false; 2929 bool IsFP = false; 2930 for (unsigned I = 0; I < RegsToNegate.size(); ++I) { 2931 Register Reg = RegsToNegate[I]; 2932 if (!MRI.hasOneNonDBGUse(Reg)) 2933 return false; 2934 MachineInstr *Def = MRI.getVRegDef(Reg); 2935 switch (Def->getOpcode()) { 2936 default: 2937 // Don't match if the tree contains anything other than ANDs, ORs and 2938 // comparisons. 2939 return false; 2940 case TargetOpcode::G_ICMP: 2941 if (IsFP) 2942 return false; 2943 IsInt = true; 2944 // When we apply the combine we will invert the predicate. 2945 break; 2946 case TargetOpcode::G_FCMP: 2947 if (IsInt) 2948 return false; 2949 IsFP = true; 2950 // When we apply the combine we will invert the predicate. 2951 break; 2952 case TargetOpcode::G_AND: 2953 case TargetOpcode::G_OR: 2954 // Implement De Morgan's laws: 2955 // ~(x & y) -> ~x | ~y 2956 // ~(x | y) -> ~x & ~y 2957 // When we apply the combine we will change the opcode and recursively 2958 // negate the operands. 2959 RegsToNegate.push_back(Def->getOperand(1).getReg()); 2960 RegsToNegate.push_back(Def->getOperand(2).getReg()); 2961 break; 2962 } 2963 } 2964 2965 // Now we know whether the comparisons are integer or floating point, check 2966 // the constant in the xor. 2967 int64_t Cst; 2968 if (Ty.isVector()) { 2969 MachineInstr *CstDef = MRI.getVRegDef(CstReg); 2970 auto MaybeCst = getIConstantSplatSExtVal(*CstDef, MRI); 2971 if (!MaybeCst) 2972 return false; 2973 if (!isConstValidTrue(TLI, Ty.getScalarSizeInBits(), *MaybeCst, true, IsFP)) 2974 return false; 2975 } else { 2976 if (!mi_match(CstReg, MRI, m_ICst(Cst))) 2977 return false; 2978 if (!isConstValidTrue(TLI, Ty.getSizeInBits(), Cst, false, IsFP)) 2979 return false; 2980 } 2981 2982 return true; 2983 } 2984 2985 void CombinerHelper::applyNotCmp(MachineInstr &MI, 2986 SmallVectorImpl<Register> &RegsToNegate) { 2987 for (Register Reg : RegsToNegate) { 2988 MachineInstr *Def = MRI.getVRegDef(Reg); 2989 Observer.changingInstr(*Def); 2990 // For each comparison, invert the opcode. For each AND and OR, change the 2991 // opcode. 2992 switch (Def->getOpcode()) { 2993 default: 2994 llvm_unreachable("Unexpected opcode"); 2995 case TargetOpcode::G_ICMP: 2996 case TargetOpcode::G_FCMP: { 2997 MachineOperand &PredOp = Def->getOperand(1); 2998 CmpInst::Predicate NewP = CmpInst::getInversePredicate( 2999 (CmpInst::Predicate)PredOp.getPredicate()); 3000 PredOp.setPredicate(NewP); 3001 break; 3002 } 3003 case TargetOpcode::G_AND: 3004 Def->setDesc(Builder.getTII().get(TargetOpcode::G_OR)); 3005 break; 3006 case TargetOpcode::G_OR: 3007 Def->setDesc(Builder.getTII().get(TargetOpcode::G_AND)); 3008 break; 3009 } 3010 Observer.changedInstr(*Def); 3011 } 3012 3013 replaceRegWith(MRI, MI.getOperand(0).getReg(), MI.getOperand(1).getReg()); 3014 MI.eraseFromParent(); 3015 } 3016 3017 bool CombinerHelper::matchXorOfAndWithSameReg( 3018 MachineInstr &MI, std::pair<Register, Register> &MatchInfo) { 3019 // Match (xor (and x, y), y) (or any of its commuted cases) 3020 assert(MI.getOpcode() == TargetOpcode::G_XOR); 3021 Register &X = MatchInfo.first; 3022 Register &Y = MatchInfo.second; 3023 Register AndReg = MI.getOperand(1).getReg(); 3024 Register SharedReg = MI.getOperand(2).getReg(); 3025 3026 // Find a G_AND on either side of the G_XOR. 3027 // Look for one of 3028 // 3029 // (xor (and x, y), SharedReg) 3030 // (xor SharedReg, (and x, y)) 3031 if (!mi_match(AndReg, MRI, m_GAnd(m_Reg(X), m_Reg(Y)))) { 3032 std::swap(AndReg, SharedReg); 3033 if (!mi_match(AndReg, MRI, m_GAnd(m_Reg(X), m_Reg(Y)))) 3034 return false; 3035 } 3036 3037 // Only do this if we'll eliminate the G_AND. 3038 if (!MRI.hasOneNonDBGUse(AndReg)) 3039 return false; 3040 3041 // We can combine if SharedReg is the same as either the LHS or RHS of the 3042 // G_AND. 3043 if (Y != SharedReg) 3044 std::swap(X, Y); 3045 return Y == SharedReg; 3046 } 3047 3048 void CombinerHelper::applyXorOfAndWithSameReg( 3049 MachineInstr &MI, std::pair<Register, Register> &MatchInfo) { 3050 // Fold (xor (and x, y), y) -> (and (not x), y) 3051 Builder.setInstrAndDebugLoc(MI); 3052 Register X, Y; 3053 std::tie(X, Y) = MatchInfo; 3054 auto Not = Builder.buildNot(MRI.getType(X), X); 3055 Observer.changingInstr(MI); 3056 MI.setDesc(Builder.getTII().get(TargetOpcode::G_AND)); 3057 MI.getOperand(1).setReg(Not->getOperand(0).getReg()); 3058 MI.getOperand(2).setReg(Y); 3059 Observer.changedInstr(MI); 3060 } 3061 3062 bool CombinerHelper::matchPtrAddZero(MachineInstr &MI) { 3063 auto &PtrAdd = cast<GPtrAdd>(MI); 3064 Register DstReg = PtrAdd.getReg(0); 3065 LLT Ty = MRI.getType(DstReg); 3066 const DataLayout &DL = Builder.getMF().getDataLayout(); 3067 3068 if (DL.isNonIntegralAddressSpace(Ty.getScalarType().getAddressSpace())) 3069 return false; 3070 3071 if (Ty.isPointer()) { 3072 auto ConstVal = getIConstantVRegVal(PtrAdd.getBaseReg(), MRI); 3073 return ConstVal && *ConstVal == 0; 3074 } 3075 3076 assert(Ty.isVector() && "Expecting a vector type"); 3077 const MachineInstr *VecMI = MRI.getVRegDef(PtrAdd.getBaseReg()); 3078 return isBuildVectorAllZeros(*VecMI, MRI); 3079 } 3080 3081 void CombinerHelper::applyPtrAddZero(MachineInstr &MI) { 3082 auto &PtrAdd = cast<GPtrAdd>(MI); 3083 Builder.setInstrAndDebugLoc(PtrAdd); 3084 Builder.buildIntToPtr(PtrAdd.getReg(0), PtrAdd.getOffsetReg()); 3085 PtrAdd.eraseFromParent(); 3086 } 3087 3088 /// The second source operand is known to be a power of 2. 3089 void CombinerHelper::applySimplifyURemByPow2(MachineInstr &MI) { 3090 Register DstReg = MI.getOperand(0).getReg(); 3091 Register Src0 = MI.getOperand(1).getReg(); 3092 Register Pow2Src1 = MI.getOperand(2).getReg(); 3093 LLT Ty = MRI.getType(DstReg); 3094 Builder.setInstrAndDebugLoc(MI); 3095 3096 // Fold (urem x, pow2) -> (and x, pow2-1) 3097 auto NegOne = Builder.buildConstant(Ty, -1); 3098 auto Add = Builder.buildAdd(Ty, Pow2Src1, NegOne); 3099 Builder.buildAnd(DstReg, Src0, Add); 3100 MI.eraseFromParent(); 3101 } 3102 3103 bool CombinerHelper::matchFoldBinOpIntoSelect(MachineInstr &MI, 3104 unsigned &SelectOpNo) { 3105 Register LHS = MI.getOperand(1).getReg(); 3106 Register RHS = MI.getOperand(2).getReg(); 3107 3108 Register OtherOperandReg = RHS; 3109 SelectOpNo = 1; 3110 MachineInstr *Select = MRI.getVRegDef(LHS); 3111 3112 // Don't do this unless the old select is going away. We want to eliminate the 3113 // binary operator, not replace a binop with a select. 3114 if (Select->getOpcode() != TargetOpcode::G_SELECT || 3115 !MRI.hasOneNonDBGUse(LHS)) { 3116 OtherOperandReg = LHS; 3117 SelectOpNo = 2; 3118 Select = MRI.getVRegDef(RHS); 3119 if (Select->getOpcode() != TargetOpcode::G_SELECT || 3120 !MRI.hasOneNonDBGUse(RHS)) 3121 return false; 3122 } 3123 3124 MachineInstr *SelectLHS = MRI.getVRegDef(Select->getOperand(2).getReg()); 3125 MachineInstr *SelectRHS = MRI.getVRegDef(Select->getOperand(3).getReg()); 3126 3127 if (!isConstantOrConstantVector(*SelectLHS, MRI, 3128 /*AllowFP*/ true, 3129 /*AllowOpaqueConstants*/ false)) 3130 return false; 3131 if (!isConstantOrConstantVector(*SelectRHS, MRI, 3132 /*AllowFP*/ true, 3133 /*AllowOpaqueConstants*/ false)) 3134 return false; 3135 3136 unsigned BinOpcode = MI.getOpcode(); 3137 3138 // We know know one of the operands is a select of constants. Now verify that 3139 // the other binary operator operand is either a constant, or we can handle a 3140 // variable. 3141 bool CanFoldNonConst = 3142 (BinOpcode == TargetOpcode::G_AND || BinOpcode == TargetOpcode::G_OR) && 3143 (isNullOrNullSplat(*SelectLHS, MRI) || 3144 isAllOnesOrAllOnesSplat(*SelectLHS, MRI)) && 3145 (isNullOrNullSplat(*SelectRHS, MRI) || 3146 isAllOnesOrAllOnesSplat(*SelectRHS, MRI)); 3147 if (CanFoldNonConst) 3148 return true; 3149 3150 return isConstantOrConstantVector(*MRI.getVRegDef(OtherOperandReg), MRI, 3151 /*AllowFP*/ true, 3152 /*AllowOpaqueConstants*/ false); 3153 } 3154 3155 /// \p SelectOperand is the operand in binary operator \p MI that is the select 3156 /// to fold. 3157 bool CombinerHelper::applyFoldBinOpIntoSelect(MachineInstr &MI, 3158 const unsigned &SelectOperand) { 3159 Builder.setInstrAndDebugLoc(MI); 3160 3161 Register Dst = MI.getOperand(0).getReg(); 3162 Register LHS = MI.getOperand(1).getReg(); 3163 Register RHS = MI.getOperand(2).getReg(); 3164 MachineInstr *Select = MRI.getVRegDef(MI.getOperand(SelectOperand).getReg()); 3165 3166 Register SelectCond = Select->getOperand(1).getReg(); 3167 Register SelectTrue = Select->getOperand(2).getReg(); 3168 Register SelectFalse = Select->getOperand(3).getReg(); 3169 3170 LLT Ty = MRI.getType(Dst); 3171 unsigned BinOpcode = MI.getOpcode(); 3172 3173 Register FoldTrue, FoldFalse; 3174 3175 // We have a select-of-constants followed by a binary operator with a 3176 // constant. Eliminate the binop by pulling the constant math into the select. 3177 // Example: add (select Cond, CT, CF), CBO --> select Cond, CT + CBO, CF + CBO 3178 if (SelectOperand == 1) { 3179 // TODO: SelectionDAG verifies this actually constant folds before 3180 // committing to the combine. 3181 3182 FoldTrue = Builder.buildInstr(BinOpcode, {Ty}, {SelectTrue, RHS}).getReg(0); 3183 FoldFalse = 3184 Builder.buildInstr(BinOpcode, {Ty}, {SelectFalse, RHS}).getReg(0); 3185 } else { 3186 FoldTrue = Builder.buildInstr(BinOpcode, {Ty}, {LHS, SelectTrue}).getReg(0); 3187 FoldFalse = 3188 Builder.buildInstr(BinOpcode, {Ty}, {LHS, SelectFalse}).getReg(0); 3189 } 3190 3191 Builder.buildSelect(Dst, SelectCond, FoldTrue, FoldFalse, MI.getFlags()); 3192 Observer.erasingInstr(*Select); 3193 Select->eraseFromParent(); 3194 MI.eraseFromParent(); 3195 3196 return true; 3197 } 3198 3199 Optional<SmallVector<Register, 8>> 3200 CombinerHelper::findCandidatesForLoadOrCombine(const MachineInstr *Root) const { 3201 assert(Root->getOpcode() == TargetOpcode::G_OR && "Expected G_OR only!"); 3202 // We want to detect if Root is part of a tree which represents a bunch 3203 // of loads being merged into a larger load. We'll try to recognize patterns 3204 // like, for example: 3205 // 3206 // Reg Reg 3207 // \ / 3208 // OR_1 Reg 3209 // \ / 3210 // OR_2 3211 // \ Reg 3212 // .. / 3213 // Root 3214 // 3215 // Reg Reg Reg Reg 3216 // \ / \ / 3217 // OR_1 OR_2 3218 // \ / 3219 // \ / 3220 // ... 3221 // Root 3222 // 3223 // Each "Reg" may have been produced by a load + some arithmetic. This 3224 // function will save each of them. 3225 SmallVector<Register, 8> RegsToVisit; 3226 SmallVector<const MachineInstr *, 7> Ors = {Root}; 3227 3228 // In the "worst" case, we're dealing with a load for each byte. So, there 3229 // are at most #bytes - 1 ORs. 3230 const unsigned MaxIter = 3231 MRI.getType(Root->getOperand(0).getReg()).getSizeInBytes() - 1; 3232 for (unsigned Iter = 0; Iter < MaxIter; ++Iter) { 3233 if (Ors.empty()) 3234 break; 3235 const MachineInstr *Curr = Ors.pop_back_val(); 3236 Register OrLHS = Curr->getOperand(1).getReg(); 3237 Register OrRHS = Curr->getOperand(2).getReg(); 3238 3239 // In the combine, we want to elimate the entire tree. 3240 if (!MRI.hasOneNonDBGUse(OrLHS) || !MRI.hasOneNonDBGUse(OrRHS)) 3241 return None; 3242 3243 // If it's a G_OR, save it and continue to walk. If it's not, then it's 3244 // something that may be a load + arithmetic. 3245 if (const MachineInstr *Or = getOpcodeDef(TargetOpcode::G_OR, OrLHS, MRI)) 3246 Ors.push_back(Or); 3247 else 3248 RegsToVisit.push_back(OrLHS); 3249 if (const MachineInstr *Or = getOpcodeDef(TargetOpcode::G_OR, OrRHS, MRI)) 3250 Ors.push_back(Or); 3251 else 3252 RegsToVisit.push_back(OrRHS); 3253 } 3254 3255 // We're going to try and merge each register into a wider power-of-2 type, 3256 // so we ought to have an even number of registers. 3257 if (RegsToVisit.empty() || RegsToVisit.size() % 2 != 0) 3258 return None; 3259 return RegsToVisit; 3260 } 3261 3262 /// Helper function for findLoadOffsetsForLoadOrCombine. 3263 /// 3264 /// Check if \p Reg is the result of loading a \p MemSizeInBits wide value, 3265 /// and then moving that value into a specific byte offset. 3266 /// 3267 /// e.g. x[i] << 24 3268 /// 3269 /// \returns The load instruction and the byte offset it is moved into. 3270 static Optional<std::pair<GZExtLoad *, int64_t>> 3271 matchLoadAndBytePosition(Register Reg, unsigned MemSizeInBits, 3272 const MachineRegisterInfo &MRI) { 3273 assert(MRI.hasOneNonDBGUse(Reg) && 3274 "Expected Reg to only have one non-debug use?"); 3275 Register MaybeLoad; 3276 int64_t Shift; 3277 if (!mi_match(Reg, MRI, 3278 m_OneNonDBGUse(m_GShl(m_Reg(MaybeLoad), m_ICst(Shift))))) { 3279 Shift = 0; 3280 MaybeLoad = Reg; 3281 } 3282 3283 if (Shift % MemSizeInBits != 0) 3284 return None; 3285 3286 // TODO: Handle other types of loads. 3287 auto *Load = getOpcodeDef<GZExtLoad>(MaybeLoad, MRI); 3288 if (!Load) 3289 return None; 3290 3291 if (!Load->isUnordered() || Load->getMemSizeInBits() != MemSizeInBits) 3292 return None; 3293 3294 return std::make_pair(Load, Shift / MemSizeInBits); 3295 } 3296 3297 Optional<std::tuple<GZExtLoad *, int64_t, GZExtLoad *>> 3298 CombinerHelper::findLoadOffsetsForLoadOrCombine( 3299 SmallDenseMap<int64_t, int64_t, 8> &MemOffset2Idx, 3300 const SmallVector<Register, 8> &RegsToVisit, const unsigned MemSizeInBits) { 3301 3302 // Each load found for the pattern. There should be one for each RegsToVisit. 3303 SmallSetVector<const MachineInstr *, 8> Loads; 3304 3305 // The lowest index used in any load. (The lowest "i" for each x[i].) 3306 int64_t LowestIdx = INT64_MAX; 3307 3308 // The load which uses the lowest index. 3309 GZExtLoad *LowestIdxLoad = nullptr; 3310 3311 // Keeps track of the load indices we see. We shouldn't see any indices twice. 3312 SmallSet<int64_t, 8> SeenIdx; 3313 3314 // Ensure each load is in the same MBB. 3315 // TODO: Support multiple MachineBasicBlocks. 3316 MachineBasicBlock *MBB = nullptr; 3317 const MachineMemOperand *MMO = nullptr; 3318 3319 // Earliest instruction-order load in the pattern. 3320 GZExtLoad *EarliestLoad = nullptr; 3321 3322 // Latest instruction-order load in the pattern. 3323 GZExtLoad *LatestLoad = nullptr; 3324 3325 // Base pointer which every load should share. 3326 Register BasePtr; 3327 3328 // We want to find a load for each register. Each load should have some 3329 // appropriate bit twiddling arithmetic. During this loop, we will also keep 3330 // track of the load which uses the lowest index. Later, we will check if we 3331 // can use its pointer in the final, combined load. 3332 for (auto Reg : RegsToVisit) { 3333 // Find the load, and find the position that it will end up in (e.g. a 3334 // shifted) value. 3335 auto LoadAndPos = matchLoadAndBytePosition(Reg, MemSizeInBits, MRI); 3336 if (!LoadAndPos) 3337 return None; 3338 GZExtLoad *Load; 3339 int64_t DstPos; 3340 std::tie(Load, DstPos) = *LoadAndPos; 3341 3342 // TODO: Handle multiple MachineBasicBlocks. Currently not handled because 3343 // it is difficult to check for stores/calls/etc between loads. 3344 MachineBasicBlock *LoadMBB = Load->getParent(); 3345 if (!MBB) 3346 MBB = LoadMBB; 3347 if (LoadMBB != MBB) 3348 return None; 3349 3350 // Make sure that the MachineMemOperands of every seen load are compatible. 3351 auto &LoadMMO = Load->getMMO(); 3352 if (!MMO) 3353 MMO = &LoadMMO; 3354 if (MMO->getAddrSpace() != LoadMMO.getAddrSpace()) 3355 return None; 3356 3357 // Find out what the base pointer and index for the load is. 3358 Register LoadPtr; 3359 int64_t Idx; 3360 if (!mi_match(Load->getOperand(1).getReg(), MRI, 3361 m_GPtrAdd(m_Reg(LoadPtr), m_ICst(Idx)))) { 3362 LoadPtr = Load->getOperand(1).getReg(); 3363 Idx = 0; 3364 } 3365 3366 // Don't combine things like a[i], a[i] -> a bigger load. 3367 if (!SeenIdx.insert(Idx).second) 3368 return None; 3369 3370 // Every load must share the same base pointer; don't combine things like: 3371 // 3372 // a[i], b[i + 1] -> a bigger load. 3373 if (!BasePtr.isValid()) 3374 BasePtr = LoadPtr; 3375 if (BasePtr != LoadPtr) 3376 return None; 3377 3378 if (Idx < LowestIdx) { 3379 LowestIdx = Idx; 3380 LowestIdxLoad = Load; 3381 } 3382 3383 // Keep track of the byte offset that this load ends up at. If we have seen 3384 // the byte offset, then stop here. We do not want to combine: 3385 // 3386 // a[i] << 16, a[i + k] << 16 -> a bigger load. 3387 if (!MemOffset2Idx.try_emplace(DstPos, Idx).second) 3388 return None; 3389 Loads.insert(Load); 3390 3391 // Keep track of the position of the earliest/latest loads in the pattern. 3392 // We will check that there are no load fold barriers between them later 3393 // on. 3394 // 3395 // FIXME: Is there a better way to check for load fold barriers? 3396 if (!EarliestLoad || dominates(*Load, *EarliestLoad)) 3397 EarliestLoad = Load; 3398 if (!LatestLoad || dominates(*LatestLoad, *Load)) 3399 LatestLoad = Load; 3400 } 3401 3402 // We found a load for each register. Let's check if each load satisfies the 3403 // pattern. 3404 assert(Loads.size() == RegsToVisit.size() && 3405 "Expected to find a load for each register?"); 3406 assert(EarliestLoad != LatestLoad && EarliestLoad && 3407 LatestLoad && "Expected at least two loads?"); 3408 3409 // Check if there are any stores, calls, etc. between any of the loads. If 3410 // there are, then we can't safely perform the combine. 3411 // 3412 // MaxIter is chosen based off the (worst case) number of iterations it 3413 // typically takes to succeed in the LLVM test suite plus some padding. 3414 // 3415 // FIXME: Is there a better way to check for load fold barriers? 3416 const unsigned MaxIter = 20; 3417 unsigned Iter = 0; 3418 for (const auto &MI : instructionsWithoutDebug(EarliestLoad->getIterator(), 3419 LatestLoad->getIterator())) { 3420 if (Loads.count(&MI)) 3421 continue; 3422 if (MI.isLoadFoldBarrier()) 3423 return None; 3424 if (Iter++ == MaxIter) 3425 return None; 3426 } 3427 3428 return std::make_tuple(LowestIdxLoad, LowestIdx, LatestLoad); 3429 } 3430 3431 bool CombinerHelper::matchLoadOrCombine( 3432 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) { 3433 assert(MI.getOpcode() == TargetOpcode::G_OR); 3434 MachineFunction &MF = *MI.getMF(); 3435 // Assuming a little-endian target, transform: 3436 // s8 *a = ... 3437 // s32 val = a[0] | (a[1] << 8) | (a[2] << 16) | (a[3] << 24) 3438 // => 3439 // s32 val = *((i32)a) 3440 // 3441 // s8 *a = ... 3442 // s32 val = (a[0] << 24) | (a[1] << 16) | (a[2] << 8) | a[3] 3443 // => 3444 // s32 val = BSWAP(*((s32)a)) 3445 Register Dst = MI.getOperand(0).getReg(); 3446 LLT Ty = MRI.getType(Dst); 3447 if (Ty.isVector()) 3448 return false; 3449 3450 // We need to combine at least two loads into this type. Since the smallest 3451 // possible load is into a byte, we need at least a 16-bit wide type. 3452 const unsigned WideMemSizeInBits = Ty.getSizeInBits(); 3453 if (WideMemSizeInBits < 16 || WideMemSizeInBits % 8 != 0) 3454 return false; 3455 3456 // Match a collection of non-OR instructions in the pattern. 3457 auto RegsToVisit = findCandidatesForLoadOrCombine(&MI); 3458 if (!RegsToVisit) 3459 return false; 3460 3461 // We have a collection of non-OR instructions. Figure out how wide each of 3462 // the small loads should be based off of the number of potential loads we 3463 // found. 3464 const unsigned NarrowMemSizeInBits = WideMemSizeInBits / RegsToVisit->size(); 3465 if (NarrowMemSizeInBits % 8 != 0) 3466 return false; 3467 3468 // Check if each register feeding into each OR is a load from the same 3469 // base pointer + some arithmetic. 3470 // 3471 // e.g. a[0], a[1] << 8, a[2] << 16, etc. 3472 // 3473 // Also verify that each of these ends up putting a[i] into the same memory 3474 // offset as a load into a wide type would. 3475 SmallDenseMap<int64_t, int64_t, 8> MemOffset2Idx; 3476 GZExtLoad *LowestIdxLoad, *LatestLoad; 3477 int64_t LowestIdx; 3478 auto MaybeLoadInfo = findLoadOffsetsForLoadOrCombine( 3479 MemOffset2Idx, *RegsToVisit, NarrowMemSizeInBits); 3480 if (!MaybeLoadInfo) 3481 return false; 3482 std::tie(LowestIdxLoad, LowestIdx, LatestLoad) = *MaybeLoadInfo; 3483 3484 // We have a bunch of loads being OR'd together. Using the addresses + offsets 3485 // we found before, check if this corresponds to a big or little endian byte 3486 // pattern. If it does, then we can represent it using a load + possibly a 3487 // BSWAP. 3488 bool IsBigEndianTarget = MF.getDataLayout().isBigEndian(); 3489 Optional<bool> IsBigEndian = isBigEndian(MemOffset2Idx, LowestIdx); 3490 if (!IsBigEndian) 3491 return false; 3492 bool NeedsBSwap = IsBigEndianTarget != *IsBigEndian; 3493 if (NeedsBSwap && !isLegalOrBeforeLegalizer({TargetOpcode::G_BSWAP, {Ty}})) 3494 return false; 3495 3496 // Make sure that the load from the lowest index produces offset 0 in the 3497 // final value. 3498 // 3499 // This ensures that we won't combine something like this: 3500 // 3501 // load x[i] -> byte 2 3502 // load x[i+1] -> byte 0 ---> wide_load x[i] 3503 // load x[i+2] -> byte 1 3504 const unsigned NumLoadsInTy = WideMemSizeInBits / NarrowMemSizeInBits; 3505 const unsigned ZeroByteOffset = 3506 *IsBigEndian 3507 ? bigEndianByteAt(NumLoadsInTy, 0) 3508 : littleEndianByteAt(NumLoadsInTy, 0); 3509 auto ZeroOffsetIdx = MemOffset2Idx.find(ZeroByteOffset); 3510 if (ZeroOffsetIdx == MemOffset2Idx.end() || 3511 ZeroOffsetIdx->second != LowestIdx) 3512 return false; 3513 3514 // We wil reuse the pointer from the load which ends up at byte offset 0. It 3515 // may not use index 0. 3516 Register Ptr = LowestIdxLoad->getPointerReg(); 3517 const MachineMemOperand &MMO = LowestIdxLoad->getMMO(); 3518 LegalityQuery::MemDesc MMDesc(MMO); 3519 MMDesc.MemoryTy = Ty; 3520 if (!isLegalOrBeforeLegalizer( 3521 {TargetOpcode::G_LOAD, {Ty, MRI.getType(Ptr)}, {MMDesc}})) 3522 return false; 3523 auto PtrInfo = MMO.getPointerInfo(); 3524 auto *NewMMO = MF.getMachineMemOperand(&MMO, PtrInfo, WideMemSizeInBits / 8); 3525 3526 // Load must be allowed and fast on the target. 3527 LLVMContext &C = MF.getFunction().getContext(); 3528 auto &DL = MF.getDataLayout(); 3529 bool Fast = false; 3530 if (!getTargetLowering().allowsMemoryAccess(C, DL, Ty, *NewMMO, &Fast) || 3531 !Fast) 3532 return false; 3533 3534 MatchInfo = [=](MachineIRBuilder &MIB) { 3535 MIB.setInstrAndDebugLoc(*LatestLoad); 3536 Register LoadDst = NeedsBSwap ? MRI.cloneVirtualRegister(Dst) : Dst; 3537 MIB.buildLoad(LoadDst, Ptr, *NewMMO); 3538 if (NeedsBSwap) 3539 MIB.buildBSwap(Dst, LoadDst); 3540 }; 3541 return true; 3542 } 3543 3544 /// Check if the store \p Store is a truncstore that can be merged. That is, 3545 /// it's a store of a shifted value of \p SrcVal. If \p SrcVal is an empty 3546 /// Register then it does not need to match and SrcVal is set to the source 3547 /// value found. 3548 /// On match, returns the start byte offset of the \p SrcVal that is being 3549 /// stored. 3550 static Optional<int64_t> getTruncStoreByteOffset(GStore &Store, Register &SrcVal, 3551 MachineRegisterInfo &MRI) { 3552 Register TruncVal; 3553 if (!mi_match(Store.getValueReg(), MRI, m_GTrunc(m_Reg(TruncVal)))) 3554 return None; 3555 3556 // The shift amount must be a constant multiple of the narrow type. 3557 // It is translated to the offset address in the wide source value "y". 3558 // 3559 // x = G_LSHR y, ShiftAmtC 3560 // s8 z = G_TRUNC x 3561 // store z, ... 3562 Register FoundSrcVal; 3563 int64_t ShiftAmt; 3564 if (!mi_match(TruncVal, MRI, 3565 m_any_of(m_GLShr(m_Reg(FoundSrcVal), m_ICst(ShiftAmt)), 3566 m_GAShr(m_Reg(FoundSrcVal), m_ICst(ShiftAmt))))) { 3567 if (!SrcVal.isValid() || TruncVal == SrcVal) { 3568 if (!SrcVal.isValid()) 3569 SrcVal = TruncVal; 3570 return 0; // If it's the lowest index store. 3571 } 3572 return None; 3573 } 3574 3575 unsigned NarrowBits = Store.getMMO().getMemoryType().getScalarSizeInBits(); 3576 if (ShiftAmt % NarrowBits!= 0) 3577 return None; 3578 const unsigned Offset = ShiftAmt / NarrowBits; 3579 3580 if (SrcVal.isValid() && FoundSrcVal != SrcVal) 3581 return None; 3582 3583 if (!SrcVal.isValid()) 3584 SrcVal = FoundSrcVal; 3585 else if (MRI.getType(SrcVal) != MRI.getType(FoundSrcVal)) 3586 return None; 3587 return Offset; 3588 } 3589 3590 /// Match a pattern where a wide type scalar value is stored by several narrow 3591 /// stores. Fold it into a single store or a BSWAP and a store if the targets 3592 /// supports it. 3593 /// 3594 /// Assuming little endian target: 3595 /// i8 *p = ... 3596 /// i32 val = ... 3597 /// p[0] = (val >> 0) & 0xFF; 3598 /// p[1] = (val >> 8) & 0xFF; 3599 /// p[2] = (val >> 16) & 0xFF; 3600 /// p[3] = (val >> 24) & 0xFF; 3601 /// => 3602 /// *((i32)p) = val; 3603 /// 3604 /// i8 *p = ... 3605 /// i32 val = ... 3606 /// p[0] = (val >> 24) & 0xFF; 3607 /// p[1] = (val >> 16) & 0xFF; 3608 /// p[2] = (val >> 8) & 0xFF; 3609 /// p[3] = (val >> 0) & 0xFF; 3610 /// => 3611 /// *((i32)p) = BSWAP(val); 3612 bool CombinerHelper::matchTruncStoreMerge(MachineInstr &MI, 3613 MergeTruncStoresInfo &MatchInfo) { 3614 auto &StoreMI = cast<GStore>(MI); 3615 LLT MemTy = StoreMI.getMMO().getMemoryType(); 3616 3617 // We only handle merging simple stores of 1-4 bytes. 3618 if (!MemTy.isScalar()) 3619 return false; 3620 switch (MemTy.getSizeInBits()) { 3621 case 8: 3622 case 16: 3623 case 32: 3624 break; 3625 default: 3626 return false; 3627 } 3628 if (!StoreMI.isSimple()) 3629 return false; 3630 3631 // We do a simple search for mergeable stores prior to this one. 3632 // Any potential alias hazard along the way terminates the search. 3633 SmallVector<GStore *> FoundStores; 3634 3635 // We're looking for: 3636 // 1) a (store(trunc(...))) 3637 // 2) of an LSHR/ASHR of a single wide value, by the appropriate shift to get 3638 // the partial value stored. 3639 // 3) where the offsets form either a little or big-endian sequence. 3640 3641 auto &LastStore = StoreMI; 3642 3643 // The single base pointer that all stores must use. 3644 Register BaseReg; 3645 int64_t LastOffset; 3646 if (!mi_match(LastStore.getPointerReg(), MRI, 3647 m_GPtrAdd(m_Reg(BaseReg), m_ICst(LastOffset)))) { 3648 BaseReg = LastStore.getPointerReg(); 3649 LastOffset = 0; 3650 } 3651 3652 GStore *LowestIdxStore = &LastStore; 3653 int64_t LowestIdxOffset = LastOffset; 3654 3655 Register WideSrcVal; 3656 auto LowestShiftAmt = getTruncStoreByteOffset(LastStore, WideSrcVal, MRI); 3657 if (!LowestShiftAmt) 3658 return false; // Didn't match a trunc. 3659 assert(WideSrcVal.isValid()); 3660 3661 LLT WideStoreTy = MRI.getType(WideSrcVal); 3662 // The wide type might not be a multiple of the memory type, e.g. s48 and s32. 3663 if (WideStoreTy.getSizeInBits() % MemTy.getSizeInBits() != 0) 3664 return false; 3665 const unsigned NumStoresRequired = 3666 WideStoreTy.getSizeInBits() / MemTy.getSizeInBits(); 3667 3668 SmallVector<int64_t, 8> OffsetMap(NumStoresRequired, INT64_MAX); 3669 OffsetMap[*LowestShiftAmt] = LastOffset; 3670 FoundStores.emplace_back(&LastStore); 3671 3672 // Search the block up for more stores. 3673 // We use a search threshold of 10 instructions here because the combiner 3674 // works top-down within a block, and we don't want to search an unbounded 3675 // number of predecessor instructions trying to find matching stores. 3676 // If we moved this optimization into a separate pass then we could probably 3677 // use a more efficient search without having a hard-coded threshold. 3678 const int MaxInstsToCheck = 10; 3679 int NumInstsChecked = 0; 3680 for (auto II = ++LastStore.getReverseIterator(); 3681 II != LastStore.getParent()->rend() && NumInstsChecked < MaxInstsToCheck; 3682 ++II) { 3683 NumInstsChecked++; 3684 GStore *NewStore; 3685 if ((NewStore = dyn_cast<GStore>(&*II))) { 3686 if (NewStore->getMMO().getMemoryType() != MemTy || !NewStore->isSimple()) 3687 break; 3688 } else if (II->isLoadFoldBarrier() || II->mayLoad()) { 3689 break; 3690 } else { 3691 continue; // This is a safe instruction we can look past. 3692 } 3693 3694 Register NewBaseReg; 3695 int64_t MemOffset; 3696 // Check we're storing to the same base + some offset. 3697 if (!mi_match(NewStore->getPointerReg(), MRI, 3698 m_GPtrAdd(m_Reg(NewBaseReg), m_ICst(MemOffset)))) { 3699 NewBaseReg = NewStore->getPointerReg(); 3700 MemOffset = 0; 3701 } 3702 if (BaseReg != NewBaseReg) 3703 break; 3704 3705 auto ShiftByteOffset = getTruncStoreByteOffset(*NewStore, WideSrcVal, MRI); 3706 if (!ShiftByteOffset) 3707 break; 3708 if (MemOffset < LowestIdxOffset) { 3709 LowestIdxOffset = MemOffset; 3710 LowestIdxStore = NewStore; 3711 } 3712 3713 // Map the offset in the store and the offset in the combined value, and 3714 // early return if it has been set before. 3715 if (*ShiftByteOffset < 0 || *ShiftByteOffset >= NumStoresRequired || 3716 OffsetMap[*ShiftByteOffset] != INT64_MAX) 3717 break; 3718 OffsetMap[*ShiftByteOffset] = MemOffset; 3719 3720 FoundStores.emplace_back(NewStore); 3721 // Reset counter since we've found a matching inst. 3722 NumInstsChecked = 0; 3723 if (FoundStores.size() == NumStoresRequired) 3724 break; 3725 } 3726 3727 if (FoundStores.size() != NumStoresRequired) { 3728 return false; 3729 } 3730 3731 const auto &DL = LastStore.getMF()->getDataLayout(); 3732 auto &C = LastStore.getMF()->getFunction().getContext(); 3733 // Check that a store of the wide type is both allowed and fast on the target 3734 bool Fast = false; 3735 bool Allowed = getTargetLowering().allowsMemoryAccess( 3736 C, DL, WideStoreTy, LowestIdxStore->getMMO(), &Fast); 3737 if (!Allowed || !Fast) 3738 return false; 3739 3740 // Check if the pieces of the value are going to the expected places in memory 3741 // to merge the stores. 3742 unsigned NarrowBits = MemTy.getScalarSizeInBits(); 3743 auto checkOffsets = [&](bool MatchLittleEndian) { 3744 if (MatchLittleEndian) { 3745 for (unsigned i = 0; i != NumStoresRequired; ++i) 3746 if (OffsetMap[i] != i * (NarrowBits / 8) + LowestIdxOffset) 3747 return false; 3748 } else { // MatchBigEndian by reversing loop counter. 3749 for (unsigned i = 0, j = NumStoresRequired - 1; i != NumStoresRequired; 3750 ++i, --j) 3751 if (OffsetMap[j] != i * (NarrowBits / 8) + LowestIdxOffset) 3752 return false; 3753 } 3754 return true; 3755 }; 3756 3757 // Check if the offsets line up for the native data layout of this target. 3758 bool NeedBswap = false; 3759 bool NeedRotate = false; 3760 if (!checkOffsets(DL.isLittleEndian())) { 3761 // Special-case: check if byte offsets line up for the opposite endian. 3762 if (NarrowBits == 8 && checkOffsets(DL.isBigEndian())) 3763 NeedBswap = true; 3764 else if (NumStoresRequired == 2 && checkOffsets(DL.isBigEndian())) 3765 NeedRotate = true; 3766 else 3767 return false; 3768 } 3769 3770 if (NeedBswap && 3771 !isLegalOrBeforeLegalizer({TargetOpcode::G_BSWAP, {WideStoreTy}})) 3772 return false; 3773 if (NeedRotate && 3774 !isLegalOrBeforeLegalizer({TargetOpcode::G_ROTR, {WideStoreTy}})) 3775 return false; 3776 3777 MatchInfo.NeedBSwap = NeedBswap; 3778 MatchInfo.NeedRotate = NeedRotate; 3779 MatchInfo.LowestIdxStore = LowestIdxStore; 3780 MatchInfo.WideSrcVal = WideSrcVal; 3781 MatchInfo.FoundStores = std::move(FoundStores); 3782 return true; 3783 } 3784 3785 void CombinerHelper::applyTruncStoreMerge(MachineInstr &MI, 3786 MergeTruncStoresInfo &MatchInfo) { 3787 3788 Builder.setInstrAndDebugLoc(MI); 3789 Register WideSrcVal = MatchInfo.WideSrcVal; 3790 LLT WideStoreTy = MRI.getType(WideSrcVal); 3791 3792 if (MatchInfo.NeedBSwap) { 3793 WideSrcVal = Builder.buildBSwap(WideStoreTy, WideSrcVal).getReg(0); 3794 } else if (MatchInfo.NeedRotate) { 3795 assert(WideStoreTy.getSizeInBits() % 2 == 0 && 3796 "Unexpected type for rotate"); 3797 auto RotAmt = 3798 Builder.buildConstant(WideStoreTy, WideStoreTy.getSizeInBits() / 2); 3799 WideSrcVal = 3800 Builder.buildRotateRight(WideStoreTy, WideSrcVal, RotAmt).getReg(0); 3801 } 3802 3803 Builder.buildStore(WideSrcVal, MatchInfo.LowestIdxStore->getPointerReg(), 3804 MatchInfo.LowestIdxStore->getMMO().getPointerInfo(), 3805 MatchInfo.LowestIdxStore->getMMO().getAlign()); 3806 3807 // Erase the old stores. 3808 for (auto *ST : MatchInfo.FoundStores) 3809 ST->eraseFromParent(); 3810 } 3811 3812 bool CombinerHelper::matchExtendThroughPhis(MachineInstr &MI, 3813 MachineInstr *&ExtMI) { 3814 assert(MI.getOpcode() == TargetOpcode::G_PHI); 3815 3816 Register DstReg = MI.getOperand(0).getReg(); 3817 3818 // TODO: Extending a vector may be expensive, don't do this until heuristics 3819 // are better. 3820 if (MRI.getType(DstReg).isVector()) 3821 return false; 3822 3823 // Try to match a phi, whose only use is an extend. 3824 if (!MRI.hasOneNonDBGUse(DstReg)) 3825 return false; 3826 ExtMI = &*MRI.use_instr_nodbg_begin(DstReg); 3827 switch (ExtMI->getOpcode()) { 3828 case TargetOpcode::G_ANYEXT: 3829 return true; // G_ANYEXT is usually free. 3830 case TargetOpcode::G_ZEXT: 3831 case TargetOpcode::G_SEXT: 3832 break; 3833 default: 3834 return false; 3835 } 3836 3837 // If the target is likely to fold this extend away, don't propagate. 3838 if (Builder.getTII().isExtendLikelyToBeFolded(*ExtMI, MRI)) 3839 return false; 3840 3841 // We don't want to propagate the extends unless there's a good chance that 3842 // they'll be optimized in some way. 3843 // Collect the unique incoming values. 3844 SmallPtrSet<MachineInstr *, 4> InSrcs; 3845 for (unsigned Idx = 1; Idx < MI.getNumOperands(); Idx += 2) { 3846 auto *DefMI = getDefIgnoringCopies(MI.getOperand(Idx).getReg(), MRI); 3847 switch (DefMI->getOpcode()) { 3848 case TargetOpcode::G_LOAD: 3849 case TargetOpcode::G_TRUNC: 3850 case TargetOpcode::G_SEXT: 3851 case TargetOpcode::G_ZEXT: 3852 case TargetOpcode::G_ANYEXT: 3853 case TargetOpcode::G_CONSTANT: 3854 InSrcs.insert(getDefIgnoringCopies(MI.getOperand(Idx).getReg(), MRI)); 3855 // Don't try to propagate if there are too many places to create new 3856 // extends, chances are it'll increase code size. 3857 if (InSrcs.size() > 2) 3858 return false; 3859 break; 3860 default: 3861 return false; 3862 } 3863 } 3864 return true; 3865 } 3866 3867 void CombinerHelper::applyExtendThroughPhis(MachineInstr &MI, 3868 MachineInstr *&ExtMI) { 3869 assert(MI.getOpcode() == TargetOpcode::G_PHI); 3870 Register DstReg = ExtMI->getOperand(0).getReg(); 3871 LLT ExtTy = MRI.getType(DstReg); 3872 3873 // Propagate the extension into the block of each incoming reg's block. 3874 // Use a SetVector here because PHIs can have duplicate edges, and we want 3875 // deterministic iteration order. 3876 SmallSetVector<MachineInstr *, 8> SrcMIs; 3877 SmallDenseMap<MachineInstr *, MachineInstr *, 8> OldToNewSrcMap; 3878 for (unsigned SrcIdx = 1; SrcIdx < MI.getNumOperands(); SrcIdx += 2) { 3879 auto *SrcMI = MRI.getVRegDef(MI.getOperand(SrcIdx).getReg()); 3880 if (!SrcMIs.insert(SrcMI)) 3881 continue; 3882 3883 // Build an extend after each src inst. 3884 auto *MBB = SrcMI->getParent(); 3885 MachineBasicBlock::iterator InsertPt = ++SrcMI->getIterator(); 3886 if (InsertPt != MBB->end() && InsertPt->isPHI()) 3887 InsertPt = MBB->getFirstNonPHI(); 3888 3889 Builder.setInsertPt(*SrcMI->getParent(), InsertPt); 3890 Builder.setDebugLoc(MI.getDebugLoc()); 3891 auto NewExt = Builder.buildExtOrTrunc(ExtMI->getOpcode(), ExtTy, 3892 SrcMI->getOperand(0).getReg()); 3893 OldToNewSrcMap[SrcMI] = NewExt; 3894 } 3895 3896 // Create a new phi with the extended inputs. 3897 Builder.setInstrAndDebugLoc(MI); 3898 auto NewPhi = Builder.buildInstrNoInsert(TargetOpcode::G_PHI); 3899 NewPhi.addDef(DstReg); 3900 for (const MachineOperand &MO : llvm::drop_begin(MI.operands())) { 3901 if (!MO.isReg()) { 3902 NewPhi.addMBB(MO.getMBB()); 3903 continue; 3904 } 3905 auto *NewSrc = OldToNewSrcMap[MRI.getVRegDef(MO.getReg())]; 3906 NewPhi.addUse(NewSrc->getOperand(0).getReg()); 3907 } 3908 Builder.insertInstr(NewPhi); 3909 ExtMI->eraseFromParent(); 3910 } 3911 3912 bool CombinerHelper::matchExtractVecEltBuildVec(MachineInstr &MI, 3913 Register &Reg) { 3914 assert(MI.getOpcode() == TargetOpcode::G_EXTRACT_VECTOR_ELT); 3915 // If we have a constant index, look for a G_BUILD_VECTOR source 3916 // and find the source register that the index maps to. 3917 Register SrcVec = MI.getOperand(1).getReg(); 3918 LLT SrcTy = MRI.getType(SrcVec); 3919 if (!isLegalOrBeforeLegalizer( 3920 {TargetOpcode::G_BUILD_VECTOR, {SrcTy, SrcTy.getElementType()}})) 3921 return false; 3922 3923 auto Cst = getIConstantVRegValWithLookThrough(MI.getOperand(2).getReg(), MRI); 3924 if (!Cst || Cst->Value.getZExtValue() >= SrcTy.getNumElements()) 3925 return false; 3926 3927 unsigned VecIdx = Cst->Value.getZExtValue(); 3928 MachineInstr *BuildVecMI = 3929 getOpcodeDef(TargetOpcode::G_BUILD_VECTOR, SrcVec, MRI); 3930 if (!BuildVecMI) { 3931 BuildVecMI = getOpcodeDef(TargetOpcode::G_BUILD_VECTOR_TRUNC, SrcVec, MRI); 3932 if (!BuildVecMI) 3933 return false; 3934 LLT ScalarTy = MRI.getType(BuildVecMI->getOperand(1).getReg()); 3935 if (!isLegalOrBeforeLegalizer( 3936 {TargetOpcode::G_BUILD_VECTOR_TRUNC, {SrcTy, ScalarTy}})) 3937 return false; 3938 } 3939 3940 EVT Ty(getMVTForLLT(SrcTy)); 3941 if (!MRI.hasOneNonDBGUse(SrcVec) && 3942 !getTargetLowering().aggressivelyPreferBuildVectorSources(Ty)) 3943 return false; 3944 3945 Reg = BuildVecMI->getOperand(VecIdx + 1).getReg(); 3946 return true; 3947 } 3948 3949 void CombinerHelper::applyExtractVecEltBuildVec(MachineInstr &MI, 3950 Register &Reg) { 3951 // Check the type of the register, since it may have come from a 3952 // G_BUILD_VECTOR_TRUNC. 3953 LLT ScalarTy = MRI.getType(Reg); 3954 Register DstReg = MI.getOperand(0).getReg(); 3955 LLT DstTy = MRI.getType(DstReg); 3956 3957 Builder.setInstrAndDebugLoc(MI); 3958 if (ScalarTy != DstTy) { 3959 assert(ScalarTy.getSizeInBits() > DstTy.getSizeInBits()); 3960 Builder.buildTrunc(DstReg, Reg); 3961 MI.eraseFromParent(); 3962 return; 3963 } 3964 replaceSingleDefInstWithReg(MI, Reg); 3965 } 3966 3967 bool CombinerHelper::matchExtractAllEltsFromBuildVector( 3968 MachineInstr &MI, 3969 SmallVectorImpl<std::pair<Register, MachineInstr *>> &SrcDstPairs) { 3970 assert(MI.getOpcode() == TargetOpcode::G_BUILD_VECTOR); 3971 // This combine tries to find build_vector's which have every source element 3972 // extracted using G_EXTRACT_VECTOR_ELT. This can happen when transforms like 3973 // the masked load scalarization is run late in the pipeline. There's already 3974 // a combine for a similar pattern starting from the extract, but that 3975 // doesn't attempt to do it if there are multiple uses of the build_vector, 3976 // which in this case is true. Starting the combine from the build_vector 3977 // feels more natural than trying to find sibling nodes of extracts. 3978 // E.g. 3979 // %vec(<4 x s32>) = G_BUILD_VECTOR %s1(s32), %s2, %s3, %s4 3980 // %ext1 = G_EXTRACT_VECTOR_ELT %vec, 0 3981 // %ext2 = G_EXTRACT_VECTOR_ELT %vec, 1 3982 // %ext3 = G_EXTRACT_VECTOR_ELT %vec, 2 3983 // %ext4 = G_EXTRACT_VECTOR_ELT %vec, 3 3984 // ==> 3985 // replace ext{1,2,3,4} with %s{1,2,3,4} 3986 3987 Register DstReg = MI.getOperand(0).getReg(); 3988 LLT DstTy = MRI.getType(DstReg); 3989 unsigned NumElts = DstTy.getNumElements(); 3990 3991 SmallBitVector ExtractedElts(NumElts); 3992 for (MachineInstr &II : MRI.use_nodbg_instructions(DstReg)) { 3993 if (II.getOpcode() != TargetOpcode::G_EXTRACT_VECTOR_ELT) 3994 return false; 3995 auto Cst = getIConstantVRegVal(II.getOperand(2).getReg(), MRI); 3996 if (!Cst) 3997 return false; 3998 unsigned Idx = Cst->getZExtValue(); 3999 if (Idx >= NumElts) 4000 return false; // Out of range. 4001 ExtractedElts.set(Idx); 4002 SrcDstPairs.emplace_back( 4003 std::make_pair(MI.getOperand(Idx + 1).getReg(), &II)); 4004 } 4005 // Match if every element was extracted. 4006 return ExtractedElts.all(); 4007 } 4008 4009 void CombinerHelper::applyExtractAllEltsFromBuildVector( 4010 MachineInstr &MI, 4011 SmallVectorImpl<std::pair<Register, MachineInstr *>> &SrcDstPairs) { 4012 assert(MI.getOpcode() == TargetOpcode::G_BUILD_VECTOR); 4013 for (auto &Pair : SrcDstPairs) { 4014 auto *ExtMI = Pair.second; 4015 replaceRegWith(MRI, ExtMI->getOperand(0).getReg(), Pair.first); 4016 ExtMI->eraseFromParent(); 4017 } 4018 MI.eraseFromParent(); 4019 } 4020 4021 void CombinerHelper::applyBuildFn( 4022 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) { 4023 Builder.setInstrAndDebugLoc(MI); 4024 MatchInfo(Builder); 4025 MI.eraseFromParent(); 4026 } 4027 4028 void CombinerHelper::applyBuildFnNoErase( 4029 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) { 4030 Builder.setInstrAndDebugLoc(MI); 4031 MatchInfo(Builder); 4032 } 4033 4034 bool CombinerHelper::matchOrShiftToFunnelShift(MachineInstr &MI, 4035 BuildFnTy &MatchInfo) { 4036 assert(MI.getOpcode() == TargetOpcode::G_OR); 4037 4038 Register Dst = MI.getOperand(0).getReg(); 4039 LLT Ty = MRI.getType(Dst); 4040 unsigned BitWidth = Ty.getScalarSizeInBits(); 4041 4042 Register ShlSrc, ShlAmt, LShrSrc, LShrAmt, Amt; 4043 unsigned FshOpc = 0; 4044 4045 // Match (or (shl ...), (lshr ...)). 4046 if (!mi_match(Dst, MRI, 4047 // m_GOr() handles the commuted version as well. 4048 m_GOr(m_GShl(m_Reg(ShlSrc), m_Reg(ShlAmt)), 4049 m_GLShr(m_Reg(LShrSrc), m_Reg(LShrAmt))))) 4050 return false; 4051 4052 // Given constants C0 and C1 such that C0 + C1 is bit-width: 4053 // (or (shl x, C0), (lshr y, C1)) -> (fshl x, y, C0) or (fshr x, y, C1) 4054 int64_t CstShlAmt, CstLShrAmt; 4055 if (mi_match(ShlAmt, MRI, m_ICstOrSplat(CstShlAmt)) && 4056 mi_match(LShrAmt, MRI, m_ICstOrSplat(CstLShrAmt)) && 4057 CstShlAmt + CstLShrAmt == BitWidth) { 4058 FshOpc = TargetOpcode::G_FSHR; 4059 Amt = LShrAmt; 4060 4061 } else if (mi_match(LShrAmt, MRI, 4062 m_GSub(m_SpecificICstOrSplat(BitWidth), m_Reg(Amt))) && 4063 ShlAmt == Amt) { 4064 // (or (shl x, amt), (lshr y, (sub bw, amt))) -> (fshl x, y, amt) 4065 FshOpc = TargetOpcode::G_FSHL; 4066 4067 } else if (mi_match(ShlAmt, MRI, 4068 m_GSub(m_SpecificICstOrSplat(BitWidth), m_Reg(Amt))) && 4069 LShrAmt == Amt) { 4070 // (or (shl x, (sub bw, amt)), (lshr y, amt)) -> (fshr x, y, amt) 4071 FshOpc = TargetOpcode::G_FSHR; 4072 4073 } else { 4074 return false; 4075 } 4076 4077 LLT AmtTy = MRI.getType(Amt); 4078 if (!isLegalOrBeforeLegalizer({FshOpc, {Ty, AmtTy}})) 4079 return false; 4080 4081 MatchInfo = [=](MachineIRBuilder &B) { 4082 B.buildInstr(FshOpc, {Dst}, {ShlSrc, LShrSrc, Amt}); 4083 }; 4084 return true; 4085 } 4086 4087 /// Match an FSHL or FSHR that can be combined to a ROTR or ROTL rotate. 4088 bool CombinerHelper::matchFunnelShiftToRotate(MachineInstr &MI) { 4089 unsigned Opc = MI.getOpcode(); 4090 assert(Opc == TargetOpcode::G_FSHL || Opc == TargetOpcode::G_FSHR); 4091 Register X = MI.getOperand(1).getReg(); 4092 Register Y = MI.getOperand(2).getReg(); 4093 if (X != Y) 4094 return false; 4095 unsigned RotateOpc = 4096 Opc == TargetOpcode::G_FSHL ? TargetOpcode::G_ROTL : TargetOpcode::G_ROTR; 4097 return isLegalOrBeforeLegalizer({RotateOpc, {MRI.getType(X), MRI.getType(Y)}}); 4098 } 4099 4100 void CombinerHelper::applyFunnelShiftToRotate(MachineInstr &MI) { 4101 unsigned Opc = MI.getOpcode(); 4102 assert(Opc == TargetOpcode::G_FSHL || Opc == TargetOpcode::G_FSHR); 4103 bool IsFSHL = Opc == TargetOpcode::G_FSHL; 4104 Observer.changingInstr(MI); 4105 MI.setDesc(Builder.getTII().get(IsFSHL ? TargetOpcode::G_ROTL 4106 : TargetOpcode::G_ROTR)); 4107 MI.removeOperand(2); 4108 Observer.changedInstr(MI); 4109 } 4110 4111 // Fold (rot x, c) -> (rot x, c % BitSize) 4112 bool CombinerHelper::matchRotateOutOfRange(MachineInstr &MI) { 4113 assert(MI.getOpcode() == TargetOpcode::G_ROTL || 4114 MI.getOpcode() == TargetOpcode::G_ROTR); 4115 unsigned Bitsize = 4116 MRI.getType(MI.getOperand(0).getReg()).getScalarSizeInBits(); 4117 Register AmtReg = MI.getOperand(2).getReg(); 4118 bool OutOfRange = false; 4119 auto MatchOutOfRange = [Bitsize, &OutOfRange](const Constant *C) { 4120 if (auto *CI = dyn_cast<ConstantInt>(C)) 4121 OutOfRange |= CI->getValue().uge(Bitsize); 4122 return true; 4123 }; 4124 return matchUnaryPredicate(MRI, AmtReg, MatchOutOfRange) && OutOfRange; 4125 } 4126 4127 void CombinerHelper::applyRotateOutOfRange(MachineInstr &MI) { 4128 assert(MI.getOpcode() == TargetOpcode::G_ROTL || 4129 MI.getOpcode() == TargetOpcode::G_ROTR); 4130 unsigned Bitsize = 4131 MRI.getType(MI.getOperand(0).getReg()).getScalarSizeInBits(); 4132 Builder.setInstrAndDebugLoc(MI); 4133 Register Amt = MI.getOperand(2).getReg(); 4134 LLT AmtTy = MRI.getType(Amt); 4135 auto Bits = Builder.buildConstant(AmtTy, Bitsize); 4136 Amt = Builder.buildURem(AmtTy, MI.getOperand(2).getReg(), Bits).getReg(0); 4137 Observer.changingInstr(MI); 4138 MI.getOperand(2).setReg(Amt); 4139 Observer.changedInstr(MI); 4140 } 4141 4142 bool CombinerHelper::matchICmpToTrueFalseKnownBits(MachineInstr &MI, 4143 int64_t &MatchInfo) { 4144 assert(MI.getOpcode() == TargetOpcode::G_ICMP); 4145 auto Pred = static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate()); 4146 auto KnownLHS = KB->getKnownBits(MI.getOperand(2).getReg()); 4147 auto KnownRHS = KB->getKnownBits(MI.getOperand(3).getReg()); 4148 Optional<bool> KnownVal; 4149 switch (Pred) { 4150 default: 4151 llvm_unreachable("Unexpected G_ICMP predicate?"); 4152 case CmpInst::ICMP_EQ: 4153 KnownVal = KnownBits::eq(KnownLHS, KnownRHS); 4154 break; 4155 case CmpInst::ICMP_NE: 4156 KnownVal = KnownBits::ne(KnownLHS, KnownRHS); 4157 break; 4158 case CmpInst::ICMP_SGE: 4159 KnownVal = KnownBits::sge(KnownLHS, KnownRHS); 4160 break; 4161 case CmpInst::ICMP_SGT: 4162 KnownVal = KnownBits::sgt(KnownLHS, KnownRHS); 4163 break; 4164 case CmpInst::ICMP_SLE: 4165 KnownVal = KnownBits::sle(KnownLHS, KnownRHS); 4166 break; 4167 case CmpInst::ICMP_SLT: 4168 KnownVal = KnownBits::slt(KnownLHS, KnownRHS); 4169 break; 4170 case CmpInst::ICMP_UGE: 4171 KnownVal = KnownBits::uge(KnownLHS, KnownRHS); 4172 break; 4173 case CmpInst::ICMP_UGT: 4174 KnownVal = KnownBits::ugt(KnownLHS, KnownRHS); 4175 break; 4176 case CmpInst::ICMP_ULE: 4177 KnownVal = KnownBits::ule(KnownLHS, KnownRHS); 4178 break; 4179 case CmpInst::ICMP_ULT: 4180 KnownVal = KnownBits::ult(KnownLHS, KnownRHS); 4181 break; 4182 } 4183 if (!KnownVal) 4184 return false; 4185 MatchInfo = 4186 *KnownVal 4187 ? getICmpTrueVal(getTargetLowering(), 4188 /*IsVector = */ 4189 MRI.getType(MI.getOperand(0).getReg()).isVector(), 4190 /* IsFP = */ false) 4191 : 0; 4192 return true; 4193 } 4194 4195 bool CombinerHelper::matchICmpToLHSKnownBits( 4196 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) { 4197 assert(MI.getOpcode() == TargetOpcode::G_ICMP); 4198 // Given: 4199 // 4200 // %x = G_WHATEVER (... x is known to be 0 or 1 ...) 4201 // %cmp = G_ICMP ne %x, 0 4202 // 4203 // Or: 4204 // 4205 // %x = G_WHATEVER (... x is known to be 0 or 1 ...) 4206 // %cmp = G_ICMP eq %x, 1 4207 // 4208 // We can replace %cmp with %x assuming true is 1 on the target. 4209 auto Pred = static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate()); 4210 if (!CmpInst::isEquality(Pred)) 4211 return false; 4212 Register Dst = MI.getOperand(0).getReg(); 4213 LLT DstTy = MRI.getType(Dst); 4214 if (getICmpTrueVal(getTargetLowering(), DstTy.isVector(), 4215 /* IsFP = */ false) != 1) 4216 return false; 4217 int64_t OneOrZero = Pred == CmpInst::ICMP_EQ; 4218 if (!mi_match(MI.getOperand(3).getReg(), MRI, m_SpecificICst(OneOrZero))) 4219 return false; 4220 Register LHS = MI.getOperand(2).getReg(); 4221 auto KnownLHS = KB->getKnownBits(LHS); 4222 if (KnownLHS.getMinValue() != 0 || KnownLHS.getMaxValue() != 1) 4223 return false; 4224 // Make sure replacing Dst with the LHS is a legal operation. 4225 LLT LHSTy = MRI.getType(LHS); 4226 unsigned LHSSize = LHSTy.getSizeInBits(); 4227 unsigned DstSize = DstTy.getSizeInBits(); 4228 unsigned Op = TargetOpcode::COPY; 4229 if (DstSize != LHSSize) 4230 Op = DstSize < LHSSize ? TargetOpcode::G_TRUNC : TargetOpcode::G_ZEXT; 4231 if (!isLegalOrBeforeLegalizer({Op, {DstTy, LHSTy}})) 4232 return false; 4233 MatchInfo = [=](MachineIRBuilder &B) { B.buildInstr(Op, {Dst}, {LHS}); }; 4234 return true; 4235 } 4236 4237 // Replace (and (or x, c1), c2) with (and x, c2) iff c1 & c2 == 0 4238 bool CombinerHelper::matchAndOrDisjointMask( 4239 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) { 4240 assert(MI.getOpcode() == TargetOpcode::G_AND); 4241 4242 // Ignore vector types to simplify matching the two constants. 4243 // TODO: do this for vectors and scalars via a demanded bits analysis. 4244 LLT Ty = MRI.getType(MI.getOperand(0).getReg()); 4245 if (Ty.isVector()) 4246 return false; 4247 4248 Register Src; 4249 Register AndMaskReg; 4250 int64_t AndMaskBits; 4251 int64_t OrMaskBits; 4252 if (!mi_match(MI, MRI, 4253 m_GAnd(m_GOr(m_Reg(Src), m_ICst(OrMaskBits)), 4254 m_all_of(m_ICst(AndMaskBits), m_Reg(AndMaskReg))))) 4255 return false; 4256 4257 // Check if OrMask could turn on any bits in Src. 4258 if (AndMaskBits & OrMaskBits) 4259 return false; 4260 4261 MatchInfo = [=, &MI](MachineIRBuilder &B) { 4262 Observer.changingInstr(MI); 4263 // Canonicalize the result to have the constant on the RHS. 4264 if (MI.getOperand(1).getReg() == AndMaskReg) 4265 MI.getOperand(2).setReg(AndMaskReg); 4266 MI.getOperand(1).setReg(Src); 4267 Observer.changedInstr(MI); 4268 }; 4269 return true; 4270 } 4271 4272 /// Form a G_SBFX from a G_SEXT_INREG fed by a right shift. 4273 bool CombinerHelper::matchBitfieldExtractFromSExtInReg( 4274 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) { 4275 assert(MI.getOpcode() == TargetOpcode::G_SEXT_INREG); 4276 Register Dst = MI.getOperand(0).getReg(); 4277 Register Src = MI.getOperand(1).getReg(); 4278 LLT Ty = MRI.getType(Src); 4279 LLT ExtractTy = getTargetLowering().getPreferredShiftAmountTy(Ty); 4280 if (!LI || !LI->isLegalOrCustom({TargetOpcode::G_SBFX, {Ty, ExtractTy}})) 4281 return false; 4282 int64_t Width = MI.getOperand(2).getImm(); 4283 Register ShiftSrc; 4284 int64_t ShiftImm; 4285 if (!mi_match( 4286 Src, MRI, 4287 m_OneNonDBGUse(m_any_of(m_GAShr(m_Reg(ShiftSrc), m_ICst(ShiftImm)), 4288 m_GLShr(m_Reg(ShiftSrc), m_ICst(ShiftImm)))))) 4289 return false; 4290 if (ShiftImm < 0 || ShiftImm + Width > Ty.getScalarSizeInBits()) 4291 return false; 4292 4293 MatchInfo = [=](MachineIRBuilder &B) { 4294 auto Cst1 = B.buildConstant(ExtractTy, ShiftImm); 4295 auto Cst2 = B.buildConstant(ExtractTy, Width); 4296 B.buildSbfx(Dst, ShiftSrc, Cst1, Cst2); 4297 }; 4298 return true; 4299 } 4300 4301 /// Form a G_UBFX from "(a srl b) & mask", where b and mask are constants. 4302 bool CombinerHelper::matchBitfieldExtractFromAnd( 4303 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) { 4304 assert(MI.getOpcode() == TargetOpcode::G_AND); 4305 Register Dst = MI.getOperand(0).getReg(); 4306 LLT Ty = MRI.getType(Dst); 4307 LLT ExtractTy = getTargetLowering().getPreferredShiftAmountTy(Ty); 4308 if (!getTargetLowering().isConstantUnsignedBitfieldExtractLegal( 4309 TargetOpcode::G_UBFX, Ty, ExtractTy)) 4310 return false; 4311 4312 int64_t AndImm, LSBImm; 4313 Register ShiftSrc; 4314 const unsigned Size = Ty.getScalarSizeInBits(); 4315 if (!mi_match(MI.getOperand(0).getReg(), MRI, 4316 m_GAnd(m_OneNonDBGUse(m_GLShr(m_Reg(ShiftSrc), m_ICst(LSBImm))), 4317 m_ICst(AndImm)))) 4318 return false; 4319 4320 // The mask is a mask of the low bits iff imm & (imm+1) == 0. 4321 auto MaybeMask = static_cast<uint64_t>(AndImm); 4322 if (MaybeMask & (MaybeMask + 1)) 4323 return false; 4324 4325 // LSB must fit within the register. 4326 if (static_cast<uint64_t>(LSBImm) >= Size) 4327 return false; 4328 4329 uint64_t Width = APInt(Size, AndImm).countTrailingOnes(); 4330 MatchInfo = [=](MachineIRBuilder &B) { 4331 auto WidthCst = B.buildConstant(ExtractTy, Width); 4332 auto LSBCst = B.buildConstant(ExtractTy, LSBImm); 4333 B.buildInstr(TargetOpcode::G_UBFX, {Dst}, {ShiftSrc, LSBCst, WidthCst}); 4334 }; 4335 return true; 4336 } 4337 4338 bool CombinerHelper::matchBitfieldExtractFromShr( 4339 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) { 4340 const unsigned Opcode = MI.getOpcode(); 4341 assert(Opcode == TargetOpcode::G_ASHR || Opcode == TargetOpcode::G_LSHR); 4342 4343 const Register Dst = MI.getOperand(0).getReg(); 4344 4345 const unsigned ExtrOpcode = Opcode == TargetOpcode::G_ASHR 4346 ? TargetOpcode::G_SBFX 4347 : TargetOpcode::G_UBFX; 4348 4349 // Check if the type we would use for the extract is legal 4350 LLT Ty = MRI.getType(Dst); 4351 LLT ExtractTy = getTargetLowering().getPreferredShiftAmountTy(Ty); 4352 if (!LI || !LI->isLegalOrCustom({ExtrOpcode, {Ty, ExtractTy}})) 4353 return false; 4354 4355 Register ShlSrc; 4356 int64_t ShrAmt; 4357 int64_t ShlAmt; 4358 const unsigned Size = Ty.getScalarSizeInBits(); 4359 4360 // Try to match shr (shl x, c1), c2 4361 if (!mi_match(Dst, MRI, 4362 m_BinOp(Opcode, 4363 m_OneNonDBGUse(m_GShl(m_Reg(ShlSrc), m_ICst(ShlAmt))), 4364 m_ICst(ShrAmt)))) 4365 return false; 4366 4367 // Make sure that the shift sizes can fit a bitfield extract 4368 if (ShlAmt < 0 || ShlAmt > ShrAmt || ShrAmt >= Size) 4369 return false; 4370 4371 // Skip this combine if the G_SEXT_INREG combine could handle it 4372 if (Opcode == TargetOpcode::G_ASHR && ShlAmt == ShrAmt) 4373 return false; 4374 4375 // Calculate start position and width of the extract 4376 const int64_t Pos = ShrAmt - ShlAmt; 4377 const int64_t Width = Size - ShrAmt; 4378 4379 MatchInfo = [=](MachineIRBuilder &B) { 4380 auto WidthCst = B.buildConstant(ExtractTy, Width); 4381 auto PosCst = B.buildConstant(ExtractTy, Pos); 4382 B.buildInstr(ExtrOpcode, {Dst}, {ShlSrc, PosCst, WidthCst}); 4383 }; 4384 return true; 4385 } 4386 4387 bool CombinerHelper::matchBitfieldExtractFromShrAnd( 4388 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) { 4389 const unsigned Opcode = MI.getOpcode(); 4390 assert(Opcode == TargetOpcode::G_LSHR || Opcode == TargetOpcode::G_ASHR); 4391 4392 const Register Dst = MI.getOperand(0).getReg(); 4393 LLT Ty = MRI.getType(Dst); 4394 LLT ExtractTy = getTargetLowering().getPreferredShiftAmountTy(Ty); 4395 if (!getTargetLowering().isConstantUnsignedBitfieldExtractLegal( 4396 TargetOpcode::G_UBFX, Ty, ExtractTy)) 4397 return false; 4398 4399 // Try to match shr (and x, c1), c2 4400 Register AndSrc; 4401 int64_t ShrAmt; 4402 int64_t SMask; 4403 if (!mi_match(Dst, MRI, 4404 m_BinOp(Opcode, 4405 m_OneNonDBGUse(m_GAnd(m_Reg(AndSrc), m_ICst(SMask))), 4406 m_ICst(ShrAmt)))) 4407 return false; 4408 4409 const unsigned Size = Ty.getScalarSizeInBits(); 4410 if (ShrAmt < 0 || ShrAmt >= Size) 4411 return false; 4412 4413 // If the shift subsumes the mask, emit the 0 directly. 4414 if (0 == (SMask >> ShrAmt)) { 4415 MatchInfo = [=](MachineIRBuilder &B) { 4416 B.buildConstant(Dst, 0); 4417 }; 4418 return true; 4419 } 4420 4421 // Check that ubfx can do the extraction, with no holes in the mask. 4422 uint64_t UMask = SMask; 4423 UMask |= maskTrailingOnes<uint64_t>(ShrAmt); 4424 UMask &= maskTrailingOnes<uint64_t>(Size); 4425 if (!isMask_64(UMask)) 4426 return false; 4427 4428 // Calculate start position and width of the extract. 4429 const int64_t Pos = ShrAmt; 4430 const int64_t Width = countTrailingOnes(UMask) - ShrAmt; 4431 4432 // It's preferable to keep the shift, rather than form G_SBFX. 4433 // TODO: remove the G_AND via demanded bits analysis. 4434 if (Opcode == TargetOpcode::G_ASHR && Width + ShrAmt == Size) 4435 return false; 4436 4437 MatchInfo = [=](MachineIRBuilder &B) { 4438 auto WidthCst = B.buildConstant(ExtractTy, Width); 4439 auto PosCst = B.buildConstant(ExtractTy, Pos); 4440 B.buildInstr(TargetOpcode::G_UBFX, {Dst}, {AndSrc, PosCst, WidthCst}); 4441 }; 4442 return true; 4443 } 4444 4445 bool CombinerHelper::reassociationCanBreakAddressingModePattern( 4446 MachineInstr &PtrAdd) { 4447 assert(PtrAdd.getOpcode() == TargetOpcode::G_PTR_ADD); 4448 4449 Register Src1Reg = PtrAdd.getOperand(1).getReg(); 4450 MachineInstr *Src1Def = getOpcodeDef(TargetOpcode::G_PTR_ADD, Src1Reg, MRI); 4451 if (!Src1Def) 4452 return false; 4453 4454 Register Src2Reg = PtrAdd.getOperand(2).getReg(); 4455 4456 if (MRI.hasOneNonDBGUse(Src1Reg)) 4457 return false; 4458 4459 auto C1 = getIConstantVRegVal(Src1Def->getOperand(2).getReg(), MRI); 4460 if (!C1) 4461 return false; 4462 auto C2 = getIConstantVRegVal(Src2Reg, MRI); 4463 if (!C2) 4464 return false; 4465 4466 const APInt &C1APIntVal = *C1; 4467 const APInt &C2APIntVal = *C2; 4468 const int64_t CombinedValue = (C1APIntVal + C2APIntVal).getSExtValue(); 4469 4470 for (auto &UseMI : MRI.use_nodbg_instructions(Src1Reg)) { 4471 // This combine may end up running before ptrtoint/inttoptr combines 4472 // manage to eliminate redundant conversions, so try to look through them. 4473 MachineInstr *ConvUseMI = &UseMI; 4474 unsigned ConvUseOpc = ConvUseMI->getOpcode(); 4475 while (ConvUseOpc == TargetOpcode::G_INTTOPTR || 4476 ConvUseOpc == TargetOpcode::G_PTRTOINT) { 4477 Register DefReg = ConvUseMI->getOperand(0).getReg(); 4478 if (!MRI.hasOneNonDBGUse(DefReg)) 4479 break; 4480 ConvUseMI = &*MRI.use_instr_nodbg_begin(DefReg); 4481 ConvUseOpc = ConvUseMI->getOpcode(); 4482 } 4483 auto LoadStore = ConvUseOpc == TargetOpcode::G_LOAD || 4484 ConvUseOpc == TargetOpcode::G_STORE; 4485 if (!LoadStore) 4486 continue; 4487 // Is x[offset2] already not a legal addressing mode? If so then 4488 // reassociating the constants breaks nothing (we test offset2 because 4489 // that's the one we hope to fold into the load or store). 4490 TargetLoweringBase::AddrMode AM; 4491 AM.HasBaseReg = true; 4492 AM.BaseOffs = C2APIntVal.getSExtValue(); 4493 unsigned AS = 4494 MRI.getType(ConvUseMI->getOperand(1).getReg()).getAddressSpace(); 4495 Type *AccessTy = 4496 getTypeForLLT(MRI.getType(ConvUseMI->getOperand(0).getReg()), 4497 PtrAdd.getMF()->getFunction().getContext()); 4498 const auto &TLI = *PtrAdd.getMF()->getSubtarget().getTargetLowering(); 4499 if (!TLI.isLegalAddressingMode(PtrAdd.getMF()->getDataLayout(), AM, 4500 AccessTy, AS)) 4501 continue; 4502 4503 // Would x[offset1+offset2] still be a legal addressing mode? 4504 AM.BaseOffs = CombinedValue; 4505 if (!TLI.isLegalAddressingMode(PtrAdd.getMF()->getDataLayout(), AM, 4506 AccessTy, AS)) 4507 return true; 4508 } 4509 4510 return false; 4511 } 4512 4513 bool CombinerHelper::matchReassocConstantInnerRHS(GPtrAdd &MI, 4514 MachineInstr *RHS, 4515 BuildFnTy &MatchInfo) { 4516 // G_PTR_ADD(BASE, G_ADD(X, C)) -> G_PTR_ADD(G_PTR_ADD(BASE, X), C) 4517 Register Src1Reg = MI.getOperand(1).getReg(); 4518 if (RHS->getOpcode() != TargetOpcode::G_ADD) 4519 return false; 4520 auto C2 = getIConstantVRegVal(RHS->getOperand(2).getReg(), MRI); 4521 if (!C2) 4522 return false; 4523 4524 MatchInfo = [=, &MI](MachineIRBuilder &B) { 4525 LLT PtrTy = MRI.getType(MI.getOperand(0).getReg()); 4526 4527 auto NewBase = 4528 Builder.buildPtrAdd(PtrTy, Src1Reg, RHS->getOperand(1).getReg()); 4529 Observer.changingInstr(MI); 4530 MI.getOperand(1).setReg(NewBase.getReg(0)); 4531 MI.getOperand(2).setReg(RHS->getOperand(2).getReg()); 4532 Observer.changedInstr(MI); 4533 }; 4534 return !reassociationCanBreakAddressingModePattern(MI); 4535 } 4536 4537 bool CombinerHelper::matchReassocConstantInnerLHS(GPtrAdd &MI, 4538 MachineInstr *LHS, 4539 MachineInstr *RHS, 4540 BuildFnTy &MatchInfo) { 4541 // G_PTR_ADD (G_PTR_ADD X, C), Y) -> (G_PTR_ADD (G_PTR_ADD(X, Y), C) 4542 // if and only if (G_PTR_ADD X, C) has one use. 4543 Register LHSBase; 4544 Optional<ValueAndVReg> LHSCstOff; 4545 if (!mi_match(MI.getBaseReg(), MRI, 4546 m_OneNonDBGUse(m_GPtrAdd(m_Reg(LHSBase), m_GCst(LHSCstOff))))) 4547 return false; 4548 4549 auto *LHSPtrAdd = cast<GPtrAdd>(LHS); 4550 MatchInfo = [=, &MI](MachineIRBuilder &B) { 4551 // When we change LHSPtrAdd's offset register we might cause it to use a reg 4552 // before its def. Sink the instruction so the outer PTR_ADD to ensure this 4553 // doesn't happen. 4554 LHSPtrAdd->moveBefore(&MI); 4555 Register RHSReg = MI.getOffsetReg(); 4556 Observer.changingInstr(MI); 4557 MI.getOperand(2).setReg(LHSCstOff->VReg); 4558 Observer.changedInstr(MI); 4559 Observer.changingInstr(*LHSPtrAdd); 4560 LHSPtrAdd->getOperand(2).setReg(RHSReg); 4561 Observer.changedInstr(*LHSPtrAdd); 4562 }; 4563 return !reassociationCanBreakAddressingModePattern(MI); 4564 } 4565 4566 bool CombinerHelper::matchReassocFoldConstantsInSubTree(GPtrAdd &MI, 4567 MachineInstr *LHS, 4568 MachineInstr *RHS, 4569 BuildFnTy &MatchInfo) { 4570 // G_PTR_ADD(G_PTR_ADD(BASE, C1), C2) -> G_PTR_ADD(BASE, C1+C2) 4571 auto *LHSPtrAdd = dyn_cast<GPtrAdd>(LHS); 4572 if (!LHSPtrAdd) 4573 return false; 4574 4575 Register Src2Reg = MI.getOperand(2).getReg(); 4576 Register LHSSrc1 = LHSPtrAdd->getBaseReg(); 4577 Register LHSSrc2 = LHSPtrAdd->getOffsetReg(); 4578 auto C1 = getIConstantVRegVal(LHSSrc2, MRI); 4579 if (!C1) 4580 return false; 4581 auto C2 = getIConstantVRegVal(Src2Reg, MRI); 4582 if (!C2) 4583 return false; 4584 4585 MatchInfo = [=, &MI](MachineIRBuilder &B) { 4586 auto NewCst = B.buildConstant(MRI.getType(Src2Reg), *C1 + *C2); 4587 Observer.changingInstr(MI); 4588 MI.getOperand(1).setReg(LHSSrc1); 4589 MI.getOperand(2).setReg(NewCst.getReg(0)); 4590 Observer.changedInstr(MI); 4591 }; 4592 return !reassociationCanBreakAddressingModePattern(MI); 4593 } 4594 4595 bool CombinerHelper::matchReassocPtrAdd(MachineInstr &MI, 4596 BuildFnTy &MatchInfo) { 4597 auto &PtrAdd = cast<GPtrAdd>(MI); 4598 // We're trying to match a few pointer computation patterns here for 4599 // re-association opportunities. 4600 // 1) Isolating a constant operand to be on the RHS, e.g.: 4601 // G_PTR_ADD(BASE, G_ADD(X, C)) -> G_PTR_ADD(G_PTR_ADD(BASE, X), C) 4602 // 4603 // 2) Folding two constants in each sub-tree as long as such folding 4604 // doesn't break a legal addressing mode. 4605 // G_PTR_ADD(G_PTR_ADD(BASE, C1), C2) -> G_PTR_ADD(BASE, C1+C2) 4606 // 4607 // 3) Move a constant from the LHS of an inner op to the RHS of the outer. 4608 // G_PTR_ADD (G_PTR_ADD X, C), Y) -> G_PTR_ADD (G_PTR_ADD(X, Y), C) 4609 // iif (G_PTR_ADD X, C) has one use. 4610 MachineInstr *LHS = MRI.getVRegDef(PtrAdd.getBaseReg()); 4611 MachineInstr *RHS = MRI.getVRegDef(PtrAdd.getOffsetReg()); 4612 4613 // Try to match example 2. 4614 if (matchReassocFoldConstantsInSubTree(PtrAdd, LHS, RHS, MatchInfo)) 4615 return true; 4616 4617 // Try to match example 3. 4618 if (matchReassocConstantInnerLHS(PtrAdd, LHS, RHS, MatchInfo)) 4619 return true; 4620 4621 // Try to match example 1. 4622 if (matchReassocConstantInnerRHS(PtrAdd, RHS, MatchInfo)) 4623 return true; 4624 4625 return false; 4626 } 4627 4628 bool CombinerHelper::matchConstantFold(MachineInstr &MI, APInt &MatchInfo) { 4629 Register Op1 = MI.getOperand(1).getReg(); 4630 Register Op2 = MI.getOperand(2).getReg(); 4631 auto MaybeCst = ConstantFoldBinOp(MI.getOpcode(), Op1, Op2, MRI); 4632 if (!MaybeCst) 4633 return false; 4634 MatchInfo = *MaybeCst; 4635 return true; 4636 } 4637 4638 bool CombinerHelper::matchNarrowBinopFeedingAnd( 4639 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) { 4640 // Look for a binop feeding into an AND with a mask: 4641 // 4642 // %add = G_ADD %lhs, %rhs 4643 // %and = G_AND %add, 000...11111111 4644 // 4645 // Check if it's possible to perform the binop at a narrower width and zext 4646 // back to the original width like so: 4647 // 4648 // %narrow_lhs = G_TRUNC %lhs 4649 // %narrow_rhs = G_TRUNC %rhs 4650 // %narrow_add = G_ADD %narrow_lhs, %narrow_rhs 4651 // %new_add = G_ZEXT %narrow_add 4652 // %and = G_AND %new_add, 000...11111111 4653 // 4654 // This can allow later combines to eliminate the G_AND if it turns out 4655 // that the mask is irrelevant. 4656 assert(MI.getOpcode() == TargetOpcode::G_AND); 4657 Register Dst = MI.getOperand(0).getReg(); 4658 Register AndLHS = MI.getOperand(1).getReg(); 4659 Register AndRHS = MI.getOperand(2).getReg(); 4660 LLT WideTy = MRI.getType(Dst); 4661 4662 // If the potential binop has more than one use, then it's possible that one 4663 // of those uses will need its full width. 4664 if (!WideTy.isScalar() || !MRI.hasOneNonDBGUse(AndLHS)) 4665 return false; 4666 4667 // Check if the LHS feeding the AND is impacted by the high bits that we're 4668 // masking out. 4669 // 4670 // e.g. for 64-bit x, y: 4671 // 4672 // add_64(x, y) & 65535 == zext(add_16(trunc(x), trunc(y))) & 65535 4673 MachineInstr *LHSInst = getDefIgnoringCopies(AndLHS, MRI); 4674 if (!LHSInst) 4675 return false; 4676 unsigned LHSOpc = LHSInst->getOpcode(); 4677 switch (LHSOpc) { 4678 default: 4679 return false; 4680 case TargetOpcode::G_ADD: 4681 case TargetOpcode::G_SUB: 4682 case TargetOpcode::G_MUL: 4683 case TargetOpcode::G_AND: 4684 case TargetOpcode::G_OR: 4685 case TargetOpcode::G_XOR: 4686 break; 4687 } 4688 4689 // Find the mask on the RHS. 4690 auto Cst = getIConstantVRegValWithLookThrough(AndRHS, MRI); 4691 if (!Cst) 4692 return false; 4693 auto Mask = Cst->Value; 4694 if (!Mask.isMask()) 4695 return false; 4696 4697 // No point in combining if there's nothing to truncate. 4698 unsigned NarrowWidth = Mask.countTrailingOnes(); 4699 if (NarrowWidth == WideTy.getSizeInBits()) 4700 return false; 4701 LLT NarrowTy = LLT::scalar(NarrowWidth); 4702 4703 // Check if adding the zext + truncates could be harmful. 4704 auto &MF = *MI.getMF(); 4705 const auto &TLI = getTargetLowering(); 4706 LLVMContext &Ctx = MF.getFunction().getContext(); 4707 auto &DL = MF.getDataLayout(); 4708 if (!TLI.isTruncateFree(WideTy, NarrowTy, DL, Ctx) || 4709 !TLI.isZExtFree(NarrowTy, WideTy, DL, Ctx)) 4710 return false; 4711 if (!isLegalOrBeforeLegalizer({TargetOpcode::G_TRUNC, {NarrowTy, WideTy}}) || 4712 !isLegalOrBeforeLegalizer({TargetOpcode::G_ZEXT, {WideTy, NarrowTy}})) 4713 return false; 4714 Register BinOpLHS = LHSInst->getOperand(1).getReg(); 4715 Register BinOpRHS = LHSInst->getOperand(2).getReg(); 4716 MatchInfo = [=, &MI](MachineIRBuilder &B) { 4717 auto NarrowLHS = Builder.buildTrunc(NarrowTy, BinOpLHS); 4718 auto NarrowRHS = Builder.buildTrunc(NarrowTy, BinOpRHS); 4719 auto NarrowBinOp = 4720 Builder.buildInstr(LHSOpc, {NarrowTy}, {NarrowLHS, NarrowRHS}); 4721 auto Ext = Builder.buildZExt(WideTy, NarrowBinOp); 4722 Observer.changingInstr(MI); 4723 MI.getOperand(1).setReg(Ext.getReg(0)); 4724 Observer.changedInstr(MI); 4725 }; 4726 return true; 4727 } 4728 4729 bool CombinerHelper::matchMulOBy2(MachineInstr &MI, BuildFnTy &MatchInfo) { 4730 unsigned Opc = MI.getOpcode(); 4731 assert(Opc == TargetOpcode::G_UMULO || Opc == TargetOpcode::G_SMULO); 4732 4733 if (!mi_match(MI.getOperand(3).getReg(), MRI, m_SpecificICstOrSplat(2))) 4734 return false; 4735 4736 MatchInfo = [=, &MI](MachineIRBuilder &B) { 4737 Observer.changingInstr(MI); 4738 unsigned NewOpc = Opc == TargetOpcode::G_UMULO ? TargetOpcode::G_UADDO 4739 : TargetOpcode::G_SADDO; 4740 MI.setDesc(Builder.getTII().get(NewOpc)); 4741 MI.getOperand(3).setReg(MI.getOperand(2).getReg()); 4742 Observer.changedInstr(MI); 4743 }; 4744 return true; 4745 } 4746 4747 bool CombinerHelper::matchMulOBy0(MachineInstr &MI, BuildFnTy &MatchInfo) { 4748 // (G_*MULO x, 0) -> 0 + no carry out 4749 assert(MI.getOpcode() == TargetOpcode::G_UMULO || 4750 MI.getOpcode() == TargetOpcode::G_SMULO); 4751 if (!mi_match(MI.getOperand(3).getReg(), MRI, m_SpecificICstOrSplat(0))) 4752 return false; 4753 Register Dst = MI.getOperand(0).getReg(); 4754 Register Carry = MI.getOperand(1).getReg(); 4755 if (!isConstantLegalOrBeforeLegalizer(MRI.getType(Dst)) || 4756 !isConstantLegalOrBeforeLegalizer(MRI.getType(Carry))) 4757 return false; 4758 MatchInfo = [=](MachineIRBuilder &B) { 4759 B.buildConstant(Dst, 0); 4760 B.buildConstant(Carry, 0); 4761 }; 4762 return true; 4763 } 4764 4765 bool CombinerHelper::matchAddOBy0(MachineInstr &MI, BuildFnTy &MatchInfo) { 4766 // (G_*ADDO x, 0) -> x + no carry out 4767 assert(MI.getOpcode() == TargetOpcode::G_UADDO || 4768 MI.getOpcode() == TargetOpcode::G_SADDO); 4769 if (!mi_match(MI.getOperand(3).getReg(), MRI, m_SpecificICstOrSplat(0))) 4770 return false; 4771 Register Carry = MI.getOperand(1).getReg(); 4772 if (!isConstantLegalOrBeforeLegalizer(MRI.getType(Carry))) 4773 return false; 4774 Register Dst = MI.getOperand(0).getReg(); 4775 Register LHS = MI.getOperand(2).getReg(); 4776 MatchInfo = [=](MachineIRBuilder &B) { 4777 B.buildCopy(Dst, LHS); 4778 B.buildConstant(Carry, 0); 4779 }; 4780 return true; 4781 } 4782 4783 MachineInstr *CombinerHelper::buildUDivUsingMul(MachineInstr &MI) { 4784 assert(MI.getOpcode() == TargetOpcode::G_UDIV); 4785 auto &UDiv = cast<GenericMachineInstr>(MI); 4786 Register Dst = UDiv.getReg(0); 4787 Register LHS = UDiv.getReg(1); 4788 Register RHS = UDiv.getReg(2); 4789 LLT Ty = MRI.getType(Dst); 4790 LLT ScalarTy = Ty.getScalarType(); 4791 const unsigned EltBits = ScalarTy.getScalarSizeInBits(); 4792 LLT ShiftAmtTy = getTargetLowering().getPreferredShiftAmountTy(Ty); 4793 LLT ScalarShiftAmtTy = ShiftAmtTy.getScalarType(); 4794 auto &MIB = Builder; 4795 MIB.setInstrAndDebugLoc(MI); 4796 4797 bool UseNPQ = false; 4798 SmallVector<Register, 16> PreShifts, PostShifts, MagicFactors, NPQFactors; 4799 4800 auto BuildUDIVPattern = [&](const Constant *C) { 4801 auto *CI = cast<ConstantInt>(C); 4802 const APInt &Divisor = CI->getValue(); 4803 UnsignedDivisonByConstantInfo magics = 4804 UnsignedDivisonByConstantInfo::get(Divisor); 4805 unsigned PreShift = 0, PostShift = 0; 4806 4807 // If the divisor is even, we can avoid using the expensive fixup by 4808 // shifting the divided value upfront. 4809 if (magics.IsAdd != 0 && !Divisor[0]) { 4810 PreShift = Divisor.countTrailingZeros(); 4811 // Get magic number for the shifted divisor. 4812 magics = 4813 UnsignedDivisonByConstantInfo::get(Divisor.lshr(PreShift), PreShift); 4814 assert(magics.IsAdd == 0 && "Should use cheap fixup now"); 4815 } 4816 4817 APInt Magic = magics.Magic; 4818 4819 unsigned SelNPQ; 4820 if (magics.IsAdd == 0 || Divisor.isOneValue()) { 4821 assert(magics.ShiftAmount < Divisor.getBitWidth() && 4822 "We shouldn't generate an undefined shift!"); 4823 PostShift = magics.ShiftAmount; 4824 SelNPQ = false; 4825 } else { 4826 PostShift = magics.ShiftAmount - 1; 4827 SelNPQ = true; 4828 } 4829 4830 PreShifts.push_back( 4831 MIB.buildConstant(ScalarShiftAmtTy, PreShift).getReg(0)); 4832 MagicFactors.push_back(MIB.buildConstant(ScalarTy, Magic).getReg(0)); 4833 NPQFactors.push_back( 4834 MIB.buildConstant(ScalarTy, 4835 SelNPQ ? APInt::getOneBitSet(EltBits, EltBits - 1) 4836 : APInt::getZero(EltBits)) 4837 .getReg(0)); 4838 PostShifts.push_back( 4839 MIB.buildConstant(ScalarShiftAmtTy, PostShift).getReg(0)); 4840 UseNPQ |= SelNPQ; 4841 return true; 4842 }; 4843 4844 // Collect the shifts/magic values from each element. 4845 bool Matched = matchUnaryPredicate(MRI, RHS, BuildUDIVPattern); 4846 (void)Matched; 4847 assert(Matched && "Expected unary predicate match to succeed"); 4848 4849 Register PreShift, PostShift, MagicFactor, NPQFactor; 4850 auto *RHSDef = getOpcodeDef<GBuildVector>(RHS, MRI); 4851 if (RHSDef) { 4852 PreShift = MIB.buildBuildVector(ShiftAmtTy, PreShifts).getReg(0); 4853 MagicFactor = MIB.buildBuildVector(Ty, MagicFactors).getReg(0); 4854 NPQFactor = MIB.buildBuildVector(Ty, NPQFactors).getReg(0); 4855 PostShift = MIB.buildBuildVector(ShiftAmtTy, PostShifts).getReg(0); 4856 } else { 4857 assert(MRI.getType(RHS).isScalar() && 4858 "Non-build_vector operation should have been a scalar"); 4859 PreShift = PreShifts[0]; 4860 MagicFactor = MagicFactors[0]; 4861 PostShift = PostShifts[0]; 4862 } 4863 4864 Register Q = LHS; 4865 Q = MIB.buildLShr(Ty, Q, PreShift).getReg(0); 4866 4867 // Multiply the numerator (operand 0) by the magic value. 4868 Q = MIB.buildUMulH(Ty, Q, MagicFactor).getReg(0); 4869 4870 if (UseNPQ) { 4871 Register NPQ = MIB.buildSub(Ty, LHS, Q).getReg(0); 4872 4873 // For vectors we might have a mix of non-NPQ/NPQ paths, so use 4874 // G_UMULH to act as a SRL-by-1 for NPQ, else multiply by zero. 4875 if (Ty.isVector()) 4876 NPQ = MIB.buildUMulH(Ty, NPQ, NPQFactor).getReg(0); 4877 else 4878 NPQ = MIB.buildLShr(Ty, NPQ, MIB.buildConstant(ShiftAmtTy, 1)).getReg(0); 4879 4880 Q = MIB.buildAdd(Ty, NPQ, Q).getReg(0); 4881 } 4882 4883 Q = MIB.buildLShr(Ty, Q, PostShift).getReg(0); 4884 auto One = MIB.buildConstant(Ty, 1); 4885 auto IsOne = MIB.buildICmp( 4886 CmpInst::Predicate::ICMP_EQ, 4887 Ty.isScalar() ? LLT::scalar(1) : Ty.changeElementSize(1), RHS, One); 4888 return MIB.buildSelect(Ty, IsOne, LHS, Q); 4889 } 4890 4891 bool CombinerHelper::matchUDivByConst(MachineInstr &MI) { 4892 assert(MI.getOpcode() == TargetOpcode::G_UDIV); 4893 Register Dst = MI.getOperand(0).getReg(); 4894 Register RHS = MI.getOperand(2).getReg(); 4895 LLT DstTy = MRI.getType(Dst); 4896 auto *RHSDef = MRI.getVRegDef(RHS); 4897 if (!isConstantOrConstantVector(*RHSDef, MRI)) 4898 return false; 4899 4900 auto &MF = *MI.getMF(); 4901 AttributeList Attr = MF.getFunction().getAttributes(); 4902 const auto &TLI = getTargetLowering(); 4903 LLVMContext &Ctx = MF.getFunction().getContext(); 4904 auto &DL = MF.getDataLayout(); 4905 if (TLI.isIntDivCheap(getApproximateEVTForLLT(DstTy, DL, Ctx), Attr)) 4906 return false; 4907 4908 // Don't do this for minsize because the instruction sequence is usually 4909 // larger. 4910 if (MF.getFunction().hasMinSize()) 4911 return false; 4912 4913 // Don't do this if the types are not going to be legal. 4914 if (LI) { 4915 if (!isLegalOrBeforeLegalizer({TargetOpcode::G_MUL, {DstTy, DstTy}})) 4916 return false; 4917 if (!isLegalOrBeforeLegalizer({TargetOpcode::G_UMULH, {DstTy}})) 4918 return false; 4919 if (!isLegalOrBeforeLegalizer( 4920 {TargetOpcode::G_ICMP, 4921 {DstTy.isVector() ? DstTy.changeElementSize(1) : LLT::scalar(1), 4922 DstTy}})) 4923 return false; 4924 } 4925 4926 auto CheckEltValue = [&](const Constant *C) { 4927 if (auto *CI = dyn_cast_or_null<ConstantInt>(C)) 4928 return !CI->isZero(); 4929 return false; 4930 }; 4931 return matchUnaryPredicate(MRI, RHS, CheckEltValue); 4932 } 4933 4934 void CombinerHelper::applyUDivByConst(MachineInstr &MI) { 4935 auto *NewMI = buildUDivUsingMul(MI); 4936 replaceSingleDefInstWithReg(MI, NewMI->getOperand(0).getReg()); 4937 } 4938 4939 bool CombinerHelper::matchUMulHToLShr(MachineInstr &MI) { 4940 assert(MI.getOpcode() == TargetOpcode::G_UMULH); 4941 Register RHS = MI.getOperand(2).getReg(); 4942 Register Dst = MI.getOperand(0).getReg(); 4943 LLT Ty = MRI.getType(Dst); 4944 LLT ShiftAmtTy = getTargetLowering().getPreferredShiftAmountTy(Ty); 4945 auto MatchPow2ExceptOne = [&](const Constant *C) { 4946 if (auto *CI = dyn_cast<ConstantInt>(C)) 4947 return CI->getValue().isPowerOf2() && !CI->getValue().isOne(); 4948 return false; 4949 }; 4950 if (!matchUnaryPredicate(MRI, RHS, MatchPow2ExceptOne, false)) 4951 return false; 4952 return isLegalOrBeforeLegalizer({TargetOpcode::G_LSHR, {Ty, ShiftAmtTy}}); 4953 } 4954 4955 void CombinerHelper::applyUMulHToLShr(MachineInstr &MI) { 4956 Register LHS = MI.getOperand(1).getReg(); 4957 Register RHS = MI.getOperand(2).getReg(); 4958 Register Dst = MI.getOperand(0).getReg(); 4959 LLT Ty = MRI.getType(Dst); 4960 LLT ShiftAmtTy = getTargetLowering().getPreferredShiftAmountTy(Ty); 4961 unsigned NumEltBits = Ty.getScalarSizeInBits(); 4962 4963 Builder.setInstrAndDebugLoc(MI); 4964 auto LogBase2 = buildLogBase2(RHS, Builder); 4965 auto ShiftAmt = 4966 Builder.buildSub(Ty, Builder.buildConstant(Ty, NumEltBits), LogBase2); 4967 auto Trunc = Builder.buildZExtOrTrunc(ShiftAmtTy, ShiftAmt); 4968 Builder.buildLShr(Dst, LHS, Trunc); 4969 MI.eraseFromParent(); 4970 } 4971 4972 bool CombinerHelper::matchRedundantNegOperands(MachineInstr &MI, 4973 BuildFnTy &MatchInfo) { 4974 unsigned Opc = MI.getOpcode(); 4975 assert(Opc == TargetOpcode::G_FADD || Opc == TargetOpcode::G_FSUB || 4976 Opc == TargetOpcode::G_FMUL || Opc == TargetOpcode::G_FDIV || 4977 Opc == TargetOpcode::G_FMAD || Opc == TargetOpcode::G_FMA); 4978 4979 Register Dst = MI.getOperand(0).getReg(); 4980 Register X = MI.getOperand(1).getReg(); 4981 Register Y = MI.getOperand(2).getReg(); 4982 LLT Type = MRI.getType(Dst); 4983 4984 // fold (fadd x, fneg(y)) -> (fsub x, y) 4985 // fold (fadd fneg(y), x) -> (fsub x, y) 4986 // G_ADD is commutative so both cases are checked by m_GFAdd 4987 if (mi_match(Dst, MRI, m_GFAdd(m_Reg(X), m_GFNeg(m_Reg(Y)))) && 4988 isLegalOrBeforeLegalizer({TargetOpcode::G_FSUB, {Type}})) { 4989 Opc = TargetOpcode::G_FSUB; 4990 } 4991 /// fold (fsub x, fneg(y)) -> (fadd x, y) 4992 else if (mi_match(Dst, MRI, m_GFSub(m_Reg(X), m_GFNeg(m_Reg(Y)))) && 4993 isLegalOrBeforeLegalizer({TargetOpcode::G_FADD, {Type}})) { 4994 Opc = TargetOpcode::G_FADD; 4995 } 4996 // fold (fmul fneg(x), fneg(y)) -> (fmul x, y) 4997 // fold (fdiv fneg(x), fneg(y)) -> (fdiv x, y) 4998 // fold (fmad fneg(x), fneg(y), z) -> (fmad x, y, z) 4999 // fold (fma fneg(x), fneg(y), z) -> (fma x, y, z) 5000 else if ((Opc == TargetOpcode::G_FMUL || Opc == TargetOpcode::G_FDIV || 5001 Opc == TargetOpcode::G_FMAD || Opc == TargetOpcode::G_FMA) && 5002 mi_match(X, MRI, m_GFNeg(m_Reg(X))) && 5003 mi_match(Y, MRI, m_GFNeg(m_Reg(Y)))) { 5004 // no opcode change 5005 } else 5006 return false; 5007 5008 MatchInfo = [=, &MI](MachineIRBuilder &B) { 5009 Observer.changingInstr(MI); 5010 MI.setDesc(B.getTII().get(Opc)); 5011 MI.getOperand(1).setReg(X); 5012 MI.getOperand(2).setReg(Y); 5013 Observer.changedInstr(MI); 5014 }; 5015 return true; 5016 } 5017 5018 /// Checks if \p MI is TargetOpcode::G_FMUL and contractable either 5019 /// due to global flags or MachineInstr flags. 5020 static bool isContractableFMul(MachineInstr &MI, bool AllowFusionGlobally) { 5021 if (MI.getOpcode() != TargetOpcode::G_FMUL) 5022 return false; 5023 return AllowFusionGlobally || MI.getFlag(MachineInstr::MIFlag::FmContract); 5024 } 5025 5026 static bool hasMoreUses(const MachineInstr &MI0, const MachineInstr &MI1, 5027 const MachineRegisterInfo &MRI) { 5028 return std::distance(MRI.use_instr_nodbg_begin(MI0.getOperand(0).getReg()), 5029 MRI.use_instr_nodbg_end()) > 5030 std::distance(MRI.use_instr_nodbg_begin(MI1.getOperand(0).getReg()), 5031 MRI.use_instr_nodbg_end()); 5032 } 5033 5034 bool CombinerHelper::canCombineFMadOrFMA(MachineInstr &MI, 5035 bool &AllowFusionGlobally, 5036 bool &HasFMAD, bool &Aggressive, 5037 bool CanReassociate) { 5038 5039 auto *MF = MI.getMF(); 5040 const auto &TLI = *MF->getSubtarget().getTargetLowering(); 5041 const TargetOptions &Options = MF->getTarget().Options; 5042 LLT DstType = MRI.getType(MI.getOperand(0).getReg()); 5043 5044 if (CanReassociate && 5045 !(Options.UnsafeFPMath || MI.getFlag(MachineInstr::MIFlag::FmReassoc))) 5046 return false; 5047 5048 // Floating-point multiply-add with intermediate rounding. 5049 HasFMAD = (LI && TLI.isFMADLegal(MI, DstType)); 5050 // Floating-point multiply-add without intermediate rounding. 5051 bool HasFMA = TLI.isFMAFasterThanFMulAndFAdd(*MF, DstType) && 5052 isLegalOrBeforeLegalizer({TargetOpcode::G_FMA, {DstType}}); 5053 // No valid opcode, do not combine. 5054 if (!HasFMAD && !HasFMA) 5055 return false; 5056 5057 AllowFusionGlobally = Options.AllowFPOpFusion == FPOpFusion::Fast || 5058 Options.UnsafeFPMath || HasFMAD; 5059 // If the addition is not contractable, do not combine. 5060 if (!AllowFusionGlobally && !MI.getFlag(MachineInstr::MIFlag::FmContract)) 5061 return false; 5062 5063 Aggressive = TLI.enableAggressiveFMAFusion(DstType); 5064 return true; 5065 } 5066 5067 bool CombinerHelper::matchCombineFAddFMulToFMadOrFMA( 5068 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) { 5069 assert(MI.getOpcode() == TargetOpcode::G_FADD); 5070 5071 bool AllowFusionGlobally, HasFMAD, Aggressive; 5072 if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive)) 5073 return false; 5074 5075 Register Op1 = MI.getOperand(1).getReg(); 5076 Register Op2 = MI.getOperand(2).getReg(); 5077 DefinitionAndSourceRegister LHS = {MRI.getVRegDef(Op1), Op1}; 5078 DefinitionAndSourceRegister RHS = {MRI.getVRegDef(Op2), Op2}; 5079 unsigned PreferredFusedOpcode = 5080 HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA; 5081 5082 // If we have two choices trying to fold (fadd (fmul u, v), (fmul x, y)), 5083 // prefer to fold the multiply with fewer uses. 5084 if (Aggressive && isContractableFMul(*LHS.MI, AllowFusionGlobally) && 5085 isContractableFMul(*RHS.MI, AllowFusionGlobally)) { 5086 if (hasMoreUses(*LHS.MI, *RHS.MI, MRI)) 5087 std::swap(LHS, RHS); 5088 } 5089 5090 // fold (fadd (fmul x, y), z) -> (fma x, y, z) 5091 if (isContractableFMul(*LHS.MI, AllowFusionGlobally) && 5092 (Aggressive || MRI.hasOneNonDBGUse(LHS.Reg))) { 5093 MatchInfo = [=, &MI](MachineIRBuilder &B) { 5094 B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()}, 5095 {LHS.MI->getOperand(1).getReg(), 5096 LHS.MI->getOperand(2).getReg(), RHS.Reg}); 5097 }; 5098 return true; 5099 } 5100 5101 // fold (fadd x, (fmul y, z)) -> (fma y, z, x) 5102 if (isContractableFMul(*RHS.MI, AllowFusionGlobally) && 5103 (Aggressive || MRI.hasOneNonDBGUse(RHS.Reg))) { 5104 MatchInfo = [=, &MI](MachineIRBuilder &B) { 5105 B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()}, 5106 {RHS.MI->getOperand(1).getReg(), 5107 RHS.MI->getOperand(2).getReg(), LHS.Reg}); 5108 }; 5109 return true; 5110 } 5111 5112 return false; 5113 } 5114 5115 bool CombinerHelper::matchCombineFAddFpExtFMulToFMadOrFMA( 5116 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) { 5117 assert(MI.getOpcode() == TargetOpcode::G_FADD); 5118 5119 bool AllowFusionGlobally, HasFMAD, Aggressive; 5120 if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive)) 5121 return false; 5122 5123 const auto &TLI = *MI.getMF()->getSubtarget().getTargetLowering(); 5124 Register Op1 = MI.getOperand(1).getReg(); 5125 Register Op2 = MI.getOperand(2).getReg(); 5126 DefinitionAndSourceRegister LHS = {MRI.getVRegDef(Op1), Op1}; 5127 DefinitionAndSourceRegister RHS = {MRI.getVRegDef(Op2), Op2}; 5128 LLT DstType = MRI.getType(MI.getOperand(0).getReg()); 5129 5130 unsigned PreferredFusedOpcode = 5131 HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA; 5132 5133 // If we have two choices trying to fold (fadd (fmul u, v), (fmul x, y)), 5134 // prefer to fold the multiply with fewer uses. 5135 if (Aggressive && isContractableFMul(*LHS.MI, AllowFusionGlobally) && 5136 isContractableFMul(*RHS.MI, AllowFusionGlobally)) { 5137 if (hasMoreUses(*LHS.MI, *RHS.MI, MRI)) 5138 std::swap(LHS, RHS); 5139 } 5140 5141 // fold (fadd (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), z) 5142 MachineInstr *FpExtSrc; 5143 if (mi_match(LHS.Reg, MRI, m_GFPExt(m_MInstr(FpExtSrc))) && 5144 isContractableFMul(*FpExtSrc, AllowFusionGlobally) && 5145 TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstType, 5146 MRI.getType(FpExtSrc->getOperand(1).getReg()))) { 5147 MatchInfo = [=, &MI](MachineIRBuilder &B) { 5148 auto FpExtX = B.buildFPExt(DstType, FpExtSrc->getOperand(1).getReg()); 5149 auto FpExtY = B.buildFPExt(DstType, FpExtSrc->getOperand(2).getReg()); 5150 B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()}, 5151 {FpExtX.getReg(0), FpExtY.getReg(0), RHS.Reg}); 5152 }; 5153 return true; 5154 } 5155 5156 // fold (fadd z, (fpext (fmul x, y))) -> (fma (fpext x), (fpext y), z) 5157 // Note: Commutes FADD operands. 5158 if (mi_match(RHS.Reg, MRI, m_GFPExt(m_MInstr(FpExtSrc))) && 5159 isContractableFMul(*FpExtSrc, AllowFusionGlobally) && 5160 TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstType, 5161 MRI.getType(FpExtSrc->getOperand(1).getReg()))) { 5162 MatchInfo = [=, &MI](MachineIRBuilder &B) { 5163 auto FpExtX = B.buildFPExt(DstType, FpExtSrc->getOperand(1).getReg()); 5164 auto FpExtY = B.buildFPExt(DstType, FpExtSrc->getOperand(2).getReg()); 5165 B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()}, 5166 {FpExtX.getReg(0), FpExtY.getReg(0), LHS.Reg}); 5167 }; 5168 return true; 5169 } 5170 5171 return false; 5172 } 5173 5174 bool CombinerHelper::matchCombineFAddFMAFMulToFMadOrFMA( 5175 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) { 5176 assert(MI.getOpcode() == TargetOpcode::G_FADD); 5177 5178 bool AllowFusionGlobally, HasFMAD, Aggressive; 5179 if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive, true)) 5180 return false; 5181 5182 Register Op1 = MI.getOperand(1).getReg(); 5183 Register Op2 = MI.getOperand(2).getReg(); 5184 DefinitionAndSourceRegister LHS = {MRI.getVRegDef(Op1), Op1}; 5185 DefinitionAndSourceRegister RHS = {MRI.getVRegDef(Op2), Op2}; 5186 LLT DstTy = MRI.getType(MI.getOperand(0).getReg()); 5187 5188 unsigned PreferredFusedOpcode = 5189 HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA; 5190 5191 // If we have two choices trying to fold (fadd (fmul u, v), (fmul x, y)), 5192 // prefer to fold the multiply with fewer uses. 5193 if (Aggressive && isContractableFMul(*LHS.MI, AllowFusionGlobally) && 5194 isContractableFMul(*RHS.MI, AllowFusionGlobally)) { 5195 if (hasMoreUses(*LHS.MI, *RHS.MI, MRI)) 5196 std::swap(LHS, RHS); 5197 } 5198 5199 MachineInstr *FMA = nullptr; 5200 Register Z; 5201 // fold (fadd (fma x, y, (fmul u, v)), z) -> (fma x, y, (fma u, v, z)) 5202 if (LHS.MI->getOpcode() == PreferredFusedOpcode && 5203 (MRI.getVRegDef(LHS.MI->getOperand(3).getReg())->getOpcode() == 5204 TargetOpcode::G_FMUL) && 5205 MRI.hasOneNonDBGUse(LHS.MI->getOperand(0).getReg()) && 5206 MRI.hasOneNonDBGUse(LHS.MI->getOperand(3).getReg())) { 5207 FMA = LHS.MI; 5208 Z = RHS.Reg; 5209 } 5210 // fold (fadd z, (fma x, y, (fmul u, v))) -> (fma x, y, (fma u, v, z)) 5211 else if (RHS.MI->getOpcode() == PreferredFusedOpcode && 5212 (MRI.getVRegDef(RHS.MI->getOperand(3).getReg())->getOpcode() == 5213 TargetOpcode::G_FMUL) && 5214 MRI.hasOneNonDBGUse(RHS.MI->getOperand(0).getReg()) && 5215 MRI.hasOneNonDBGUse(RHS.MI->getOperand(3).getReg())) { 5216 Z = LHS.Reg; 5217 FMA = RHS.MI; 5218 } 5219 5220 if (FMA) { 5221 MachineInstr *FMulMI = MRI.getVRegDef(FMA->getOperand(3).getReg()); 5222 Register X = FMA->getOperand(1).getReg(); 5223 Register Y = FMA->getOperand(2).getReg(); 5224 Register U = FMulMI->getOperand(1).getReg(); 5225 Register V = FMulMI->getOperand(2).getReg(); 5226 5227 MatchInfo = [=, &MI](MachineIRBuilder &B) { 5228 Register InnerFMA = MRI.createGenericVirtualRegister(DstTy); 5229 B.buildInstr(PreferredFusedOpcode, {InnerFMA}, {U, V, Z}); 5230 B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()}, 5231 {X, Y, InnerFMA}); 5232 }; 5233 return true; 5234 } 5235 5236 return false; 5237 } 5238 5239 bool CombinerHelper::matchCombineFAddFpExtFMulToFMadOrFMAAggressive( 5240 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) { 5241 assert(MI.getOpcode() == TargetOpcode::G_FADD); 5242 5243 bool AllowFusionGlobally, HasFMAD, Aggressive; 5244 if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive)) 5245 return false; 5246 5247 if (!Aggressive) 5248 return false; 5249 5250 const auto &TLI = *MI.getMF()->getSubtarget().getTargetLowering(); 5251 LLT DstType = MRI.getType(MI.getOperand(0).getReg()); 5252 Register Op1 = MI.getOperand(1).getReg(); 5253 Register Op2 = MI.getOperand(2).getReg(); 5254 DefinitionAndSourceRegister LHS = {MRI.getVRegDef(Op1), Op1}; 5255 DefinitionAndSourceRegister RHS = {MRI.getVRegDef(Op2), Op2}; 5256 5257 unsigned PreferredFusedOpcode = 5258 HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA; 5259 5260 // If we have two choices trying to fold (fadd (fmul u, v), (fmul x, y)), 5261 // prefer to fold the multiply with fewer uses. 5262 if (Aggressive && isContractableFMul(*LHS.MI, AllowFusionGlobally) && 5263 isContractableFMul(*RHS.MI, AllowFusionGlobally)) { 5264 if (hasMoreUses(*LHS.MI, *RHS.MI, MRI)) 5265 std::swap(LHS, RHS); 5266 } 5267 5268 // Builds: (fma x, y, (fma (fpext u), (fpext v), z)) 5269 auto buildMatchInfo = [=, &MI](Register U, Register V, Register Z, Register X, 5270 Register Y, MachineIRBuilder &B) { 5271 Register FpExtU = B.buildFPExt(DstType, U).getReg(0); 5272 Register FpExtV = B.buildFPExt(DstType, V).getReg(0); 5273 Register InnerFMA = 5274 B.buildInstr(PreferredFusedOpcode, {DstType}, {FpExtU, FpExtV, Z}) 5275 .getReg(0); 5276 B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()}, 5277 {X, Y, InnerFMA}); 5278 }; 5279 5280 MachineInstr *FMulMI, *FMAMI; 5281 // fold (fadd (fma x, y, (fpext (fmul u, v))), z) 5282 // -> (fma x, y, (fma (fpext u), (fpext v), z)) 5283 if (LHS.MI->getOpcode() == PreferredFusedOpcode && 5284 mi_match(LHS.MI->getOperand(3).getReg(), MRI, 5285 m_GFPExt(m_MInstr(FMulMI))) && 5286 isContractableFMul(*FMulMI, AllowFusionGlobally) && 5287 TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstType, 5288 MRI.getType(FMulMI->getOperand(0).getReg()))) { 5289 MatchInfo = [=](MachineIRBuilder &B) { 5290 buildMatchInfo(FMulMI->getOperand(1).getReg(), 5291 FMulMI->getOperand(2).getReg(), RHS.Reg, 5292 LHS.MI->getOperand(1).getReg(), 5293 LHS.MI->getOperand(2).getReg(), B); 5294 }; 5295 return true; 5296 } 5297 5298 // fold (fadd (fpext (fma x, y, (fmul u, v))), z) 5299 // -> (fma (fpext x), (fpext y), (fma (fpext u), (fpext v), z)) 5300 // FIXME: This turns two single-precision and one double-precision 5301 // operation into two double-precision operations, which might not be 5302 // interesting for all targets, especially GPUs. 5303 if (mi_match(LHS.Reg, MRI, m_GFPExt(m_MInstr(FMAMI))) && 5304 FMAMI->getOpcode() == PreferredFusedOpcode) { 5305 MachineInstr *FMulMI = MRI.getVRegDef(FMAMI->getOperand(3).getReg()); 5306 if (isContractableFMul(*FMulMI, AllowFusionGlobally) && 5307 TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstType, 5308 MRI.getType(FMAMI->getOperand(0).getReg()))) { 5309 MatchInfo = [=](MachineIRBuilder &B) { 5310 Register X = FMAMI->getOperand(1).getReg(); 5311 Register Y = FMAMI->getOperand(2).getReg(); 5312 X = B.buildFPExt(DstType, X).getReg(0); 5313 Y = B.buildFPExt(DstType, Y).getReg(0); 5314 buildMatchInfo(FMulMI->getOperand(1).getReg(), 5315 FMulMI->getOperand(2).getReg(), RHS.Reg, X, Y, B); 5316 }; 5317 5318 return true; 5319 } 5320 } 5321 5322 // fold (fadd z, (fma x, y, (fpext (fmul u, v))) 5323 // -> (fma x, y, (fma (fpext u), (fpext v), z)) 5324 if (RHS.MI->getOpcode() == PreferredFusedOpcode && 5325 mi_match(RHS.MI->getOperand(3).getReg(), MRI, 5326 m_GFPExt(m_MInstr(FMulMI))) && 5327 isContractableFMul(*FMulMI, AllowFusionGlobally) && 5328 TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstType, 5329 MRI.getType(FMulMI->getOperand(0).getReg()))) { 5330 MatchInfo = [=](MachineIRBuilder &B) { 5331 buildMatchInfo(FMulMI->getOperand(1).getReg(), 5332 FMulMI->getOperand(2).getReg(), LHS.Reg, 5333 RHS.MI->getOperand(1).getReg(), 5334 RHS.MI->getOperand(2).getReg(), B); 5335 }; 5336 return true; 5337 } 5338 5339 // fold (fadd z, (fpext (fma x, y, (fmul u, v))) 5340 // -> (fma (fpext x), (fpext y), (fma (fpext u), (fpext v), z)) 5341 // FIXME: This turns two single-precision and one double-precision 5342 // operation into two double-precision operations, which might not be 5343 // interesting for all targets, especially GPUs. 5344 if (mi_match(RHS.Reg, MRI, m_GFPExt(m_MInstr(FMAMI))) && 5345 FMAMI->getOpcode() == PreferredFusedOpcode) { 5346 MachineInstr *FMulMI = MRI.getVRegDef(FMAMI->getOperand(3).getReg()); 5347 if (isContractableFMul(*FMulMI, AllowFusionGlobally) && 5348 TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstType, 5349 MRI.getType(FMAMI->getOperand(0).getReg()))) { 5350 MatchInfo = [=](MachineIRBuilder &B) { 5351 Register X = FMAMI->getOperand(1).getReg(); 5352 Register Y = FMAMI->getOperand(2).getReg(); 5353 X = B.buildFPExt(DstType, X).getReg(0); 5354 Y = B.buildFPExt(DstType, Y).getReg(0); 5355 buildMatchInfo(FMulMI->getOperand(1).getReg(), 5356 FMulMI->getOperand(2).getReg(), LHS.Reg, X, Y, B); 5357 }; 5358 return true; 5359 } 5360 } 5361 5362 return false; 5363 } 5364 5365 bool CombinerHelper::matchCombineFSubFMulToFMadOrFMA( 5366 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) { 5367 assert(MI.getOpcode() == TargetOpcode::G_FSUB); 5368 5369 bool AllowFusionGlobally, HasFMAD, Aggressive; 5370 if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive)) 5371 return false; 5372 5373 Register Op1 = MI.getOperand(1).getReg(); 5374 Register Op2 = MI.getOperand(2).getReg(); 5375 DefinitionAndSourceRegister LHS = {MRI.getVRegDef(Op1), Op1}; 5376 DefinitionAndSourceRegister RHS = {MRI.getVRegDef(Op2), Op2}; 5377 LLT DstTy = MRI.getType(MI.getOperand(0).getReg()); 5378 5379 // If we have two choices trying to fold (fadd (fmul u, v), (fmul x, y)), 5380 // prefer to fold the multiply with fewer uses. 5381 int FirstMulHasFewerUses = true; 5382 if (isContractableFMul(*LHS.MI, AllowFusionGlobally) && 5383 isContractableFMul(*RHS.MI, AllowFusionGlobally) && 5384 hasMoreUses(*LHS.MI, *RHS.MI, MRI)) 5385 FirstMulHasFewerUses = false; 5386 5387 unsigned PreferredFusedOpcode = 5388 HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA; 5389 5390 // fold (fsub (fmul x, y), z) -> (fma x, y, -z) 5391 if (FirstMulHasFewerUses && 5392 (isContractableFMul(*LHS.MI, AllowFusionGlobally) && 5393 (Aggressive || MRI.hasOneNonDBGUse(LHS.Reg)))) { 5394 MatchInfo = [=, &MI](MachineIRBuilder &B) { 5395 Register NegZ = B.buildFNeg(DstTy, RHS.Reg).getReg(0); 5396 B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()}, 5397 {LHS.MI->getOperand(1).getReg(), 5398 LHS.MI->getOperand(2).getReg(), NegZ}); 5399 }; 5400 return true; 5401 } 5402 // fold (fsub x, (fmul y, z)) -> (fma -y, z, x) 5403 else if ((isContractableFMul(*RHS.MI, AllowFusionGlobally) && 5404 (Aggressive || MRI.hasOneNonDBGUse(RHS.Reg)))) { 5405 MatchInfo = [=, &MI](MachineIRBuilder &B) { 5406 Register NegY = 5407 B.buildFNeg(DstTy, RHS.MI->getOperand(1).getReg()).getReg(0); 5408 B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()}, 5409 {NegY, RHS.MI->getOperand(2).getReg(), LHS.Reg}); 5410 }; 5411 return true; 5412 } 5413 5414 return false; 5415 } 5416 5417 bool CombinerHelper::matchCombineFSubFNegFMulToFMadOrFMA( 5418 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) { 5419 assert(MI.getOpcode() == TargetOpcode::G_FSUB); 5420 5421 bool AllowFusionGlobally, HasFMAD, Aggressive; 5422 if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive)) 5423 return false; 5424 5425 Register LHSReg = MI.getOperand(1).getReg(); 5426 Register RHSReg = MI.getOperand(2).getReg(); 5427 LLT DstTy = MRI.getType(MI.getOperand(0).getReg()); 5428 5429 unsigned PreferredFusedOpcode = 5430 HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA; 5431 5432 MachineInstr *FMulMI; 5433 // fold (fsub (fneg (fmul x, y)), z) -> (fma (fneg x), y, (fneg z)) 5434 if (mi_match(LHSReg, MRI, m_GFNeg(m_MInstr(FMulMI))) && 5435 (Aggressive || (MRI.hasOneNonDBGUse(LHSReg) && 5436 MRI.hasOneNonDBGUse(FMulMI->getOperand(0).getReg()))) && 5437 isContractableFMul(*FMulMI, AllowFusionGlobally)) { 5438 MatchInfo = [=, &MI](MachineIRBuilder &B) { 5439 Register NegX = 5440 B.buildFNeg(DstTy, FMulMI->getOperand(1).getReg()).getReg(0); 5441 Register NegZ = B.buildFNeg(DstTy, RHSReg).getReg(0); 5442 B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()}, 5443 {NegX, FMulMI->getOperand(2).getReg(), NegZ}); 5444 }; 5445 return true; 5446 } 5447 5448 // fold (fsub x, (fneg (fmul, y, z))) -> (fma y, z, x) 5449 if (mi_match(RHSReg, MRI, m_GFNeg(m_MInstr(FMulMI))) && 5450 (Aggressive || (MRI.hasOneNonDBGUse(RHSReg) && 5451 MRI.hasOneNonDBGUse(FMulMI->getOperand(0).getReg()))) && 5452 isContractableFMul(*FMulMI, AllowFusionGlobally)) { 5453 MatchInfo = [=, &MI](MachineIRBuilder &B) { 5454 B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()}, 5455 {FMulMI->getOperand(1).getReg(), 5456 FMulMI->getOperand(2).getReg(), LHSReg}); 5457 }; 5458 return true; 5459 } 5460 5461 return false; 5462 } 5463 5464 bool CombinerHelper::matchCombineFSubFpExtFMulToFMadOrFMA( 5465 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) { 5466 assert(MI.getOpcode() == TargetOpcode::G_FSUB); 5467 5468 bool AllowFusionGlobally, HasFMAD, Aggressive; 5469 if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive)) 5470 return false; 5471 5472 Register LHSReg = MI.getOperand(1).getReg(); 5473 Register RHSReg = MI.getOperand(2).getReg(); 5474 LLT DstTy = MRI.getType(MI.getOperand(0).getReg()); 5475 5476 unsigned PreferredFusedOpcode = 5477 HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA; 5478 5479 MachineInstr *FMulMI; 5480 // fold (fsub (fpext (fmul x, y)), z) -> (fma (fpext x), (fpext y), (fneg z)) 5481 if (mi_match(LHSReg, MRI, m_GFPExt(m_MInstr(FMulMI))) && 5482 isContractableFMul(*FMulMI, AllowFusionGlobally) && 5483 (Aggressive || MRI.hasOneNonDBGUse(LHSReg))) { 5484 MatchInfo = [=, &MI](MachineIRBuilder &B) { 5485 Register FpExtX = 5486 B.buildFPExt(DstTy, FMulMI->getOperand(1).getReg()).getReg(0); 5487 Register FpExtY = 5488 B.buildFPExt(DstTy, FMulMI->getOperand(2).getReg()).getReg(0); 5489 Register NegZ = B.buildFNeg(DstTy, RHSReg).getReg(0); 5490 B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()}, 5491 {FpExtX, FpExtY, NegZ}); 5492 }; 5493 return true; 5494 } 5495 5496 // fold (fsub x, (fpext (fmul y, z))) -> (fma (fneg (fpext y)), (fpext z), x) 5497 if (mi_match(RHSReg, MRI, m_GFPExt(m_MInstr(FMulMI))) && 5498 isContractableFMul(*FMulMI, AllowFusionGlobally) && 5499 (Aggressive || MRI.hasOneNonDBGUse(RHSReg))) { 5500 MatchInfo = [=, &MI](MachineIRBuilder &B) { 5501 Register FpExtY = 5502 B.buildFPExt(DstTy, FMulMI->getOperand(1).getReg()).getReg(0); 5503 Register NegY = B.buildFNeg(DstTy, FpExtY).getReg(0); 5504 Register FpExtZ = 5505 B.buildFPExt(DstTy, FMulMI->getOperand(2).getReg()).getReg(0); 5506 B.buildInstr(PreferredFusedOpcode, {MI.getOperand(0).getReg()}, 5507 {NegY, FpExtZ, LHSReg}); 5508 }; 5509 return true; 5510 } 5511 5512 return false; 5513 } 5514 5515 bool CombinerHelper::matchCombineFSubFpExtFNegFMulToFMadOrFMA( 5516 MachineInstr &MI, std::function<void(MachineIRBuilder &)> &MatchInfo) { 5517 assert(MI.getOpcode() == TargetOpcode::G_FSUB); 5518 5519 bool AllowFusionGlobally, HasFMAD, Aggressive; 5520 if (!canCombineFMadOrFMA(MI, AllowFusionGlobally, HasFMAD, Aggressive)) 5521 return false; 5522 5523 const auto &TLI = *MI.getMF()->getSubtarget().getTargetLowering(); 5524 LLT DstTy = MRI.getType(MI.getOperand(0).getReg()); 5525 Register LHSReg = MI.getOperand(1).getReg(); 5526 Register RHSReg = MI.getOperand(2).getReg(); 5527 5528 unsigned PreferredFusedOpcode = 5529 HasFMAD ? TargetOpcode::G_FMAD : TargetOpcode::G_FMA; 5530 5531 auto buildMatchInfo = [=](Register Dst, Register X, Register Y, Register Z, 5532 MachineIRBuilder &B) { 5533 Register FpExtX = B.buildFPExt(DstTy, X).getReg(0); 5534 Register FpExtY = B.buildFPExt(DstTy, Y).getReg(0); 5535 B.buildInstr(PreferredFusedOpcode, {Dst}, {FpExtX, FpExtY, Z}); 5536 }; 5537 5538 MachineInstr *FMulMI; 5539 // fold (fsub (fpext (fneg (fmul x, y))), z) -> 5540 // (fneg (fma (fpext x), (fpext y), z)) 5541 // fold (fsub (fneg (fpext (fmul x, y))), z) -> 5542 // (fneg (fma (fpext x), (fpext y), z)) 5543 if ((mi_match(LHSReg, MRI, m_GFPExt(m_GFNeg(m_MInstr(FMulMI)))) || 5544 mi_match(LHSReg, MRI, m_GFNeg(m_GFPExt(m_MInstr(FMulMI))))) && 5545 isContractableFMul(*FMulMI, AllowFusionGlobally) && 5546 TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstTy, 5547 MRI.getType(FMulMI->getOperand(0).getReg()))) { 5548 MatchInfo = [=, &MI](MachineIRBuilder &B) { 5549 Register FMAReg = MRI.createGenericVirtualRegister(DstTy); 5550 buildMatchInfo(FMAReg, FMulMI->getOperand(1).getReg(), 5551 FMulMI->getOperand(2).getReg(), RHSReg, B); 5552 B.buildFNeg(MI.getOperand(0).getReg(), FMAReg); 5553 }; 5554 return true; 5555 } 5556 5557 // fold (fsub x, (fpext (fneg (fmul y, z)))) -> (fma (fpext y), (fpext z), x) 5558 // fold (fsub x, (fneg (fpext (fmul y, z)))) -> (fma (fpext y), (fpext z), x) 5559 if ((mi_match(RHSReg, MRI, m_GFPExt(m_GFNeg(m_MInstr(FMulMI)))) || 5560 mi_match(RHSReg, MRI, m_GFNeg(m_GFPExt(m_MInstr(FMulMI))))) && 5561 isContractableFMul(*FMulMI, AllowFusionGlobally) && 5562 TLI.isFPExtFoldable(MI, PreferredFusedOpcode, DstTy, 5563 MRI.getType(FMulMI->getOperand(0).getReg()))) { 5564 MatchInfo = [=, &MI](MachineIRBuilder &B) { 5565 buildMatchInfo(MI.getOperand(0).getReg(), FMulMI->getOperand(1).getReg(), 5566 FMulMI->getOperand(2).getReg(), LHSReg, B); 5567 }; 5568 return true; 5569 } 5570 5571 return false; 5572 } 5573 5574 bool CombinerHelper::matchSelectToLogical(MachineInstr &MI, 5575 BuildFnTy &MatchInfo) { 5576 GSelect &Sel = cast<GSelect>(MI); 5577 Register DstReg = Sel.getReg(0); 5578 Register Cond = Sel.getCondReg(); 5579 Register TrueReg = Sel.getTrueReg(); 5580 Register FalseReg = Sel.getFalseReg(); 5581 5582 auto *TrueDef = getDefIgnoringCopies(TrueReg, MRI); 5583 auto *FalseDef = getDefIgnoringCopies(FalseReg, MRI); 5584 5585 const LLT CondTy = MRI.getType(Cond); 5586 const LLT OpTy = MRI.getType(TrueReg); 5587 if (CondTy != OpTy || OpTy.getScalarSizeInBits() != 1) 5588 return false; 5589 5590 // We have a boolean select. 5591 5592 // select Cond, Cond, F --> or Cond, F 5593 // select Cond, 1, F --> or Cond, F 5594 auto MaybeCstTrue = isConstantOrConstantSplatVector(*TrueDef, MRI); 5595 if (Cond == TrueReg || (MaybeCstTrue && MaybeCstTrue->isOne())) { 5596 MatchInfo = [=](MachineIRBuilder &MIB) { 5597 MIB.buildOr(DstReg, Cond, FalseReg); 5598 }; 5599 return true; 5600 } 5601 5602 // select Cond, T, Cond --> and Cond, T 5603 // select Cond, T, 0 --> and Cond, T 5604 auto MaybeCstFalse = isConstantOrConstantSplatVector(*FalseDef, MRI); 5605 if (Cond == FalseReg || (MaybeCstFalse && MaybeCstFalse->isZero())) { 5606 MatchInfo = [=](MachineIRBuilder &MIB) { 5607 MIB.buildAnd(DstReg, Cond, TrueReg); 5608 }; 5609 return true; 5610 } 5611 5612 // select Cond, T, 1 --> or (not Cond), T 5613 if (MaybeCstFalse && MaybeCstFalse->isOne()) { 5614 MatchInfo = [=](MachineIRBuilder &MIB) { 5615 MIB.buildOr(DstReg, MIB.buildNot(OpTy, Cond), TrueReg); 5616 }; 5617 return true; 5618 } 5619 5620 // select Cond, 0, F --> and (not Cond), F 5621 if (MaybeCstTrue && MaybeCstTrue->isZero()) { 5622 MatchInfo = [=](MachineIRBuilder &MIB) { 5623 MIB.buildAnd(DstReg, MIB.buildNot(OpTy, Cond), FalseReg); 5624 }; 5625 return true; 5626 } 5627 return false; 5628 } 5629 5630 bool CombinerHelper::matchCombineFMinMaxNaN(MachineInstr &MI, 5631 unsigned &IdxToPropagate) { 5632 bool PropagateNaN; 5633 switch (MI.getOpcode()) { 5634 default: 5635 return false; 5636 case TargetOpcode::G_FMINNUM: 5637 case TargetOpcode::G_FMAXNUM: 5638 PropagateNaN = false; 5639 break; 5640 case TargetOpcode::G_FMINIMUM: 5641 case TargetOpcode::G_FMAXIMUM: 5642 PropagateNaN = true; 5643 break; 5644 } 5645 5646 auto MatchNaN = [&](unsigned Idx) { 5647 Register MaybeNaNReg = MI.getOperand(Idx).getReg(); 5648 const ConstantFP *MaybeCst = getConstantFPVRegVal(MaybeNaNReg, MRI); 5649 if (!MaybeCst || !MaybeCst->getValueAPF().isNaN()) 5650 return false; 5651 IdxToPropagate = PropagateNaN ? Idx : (Idx == 1 ? 2 : 1); 5652 return true; 5653 }; 5654 5655 return MatchNaN(1) || MatchNaN(2); 5656 } 5657 5658 bool CombinerHelper::matchAddSubSameReg(MachineInstr &MI, Register &Src) { 5659 assert(MI.getOpcode() == TargetOpcode::G_ADD && "Expected a G_ADD"); 5660 Register LHS = MI.getOperand(1).getReg(); 5661 Register RHS = MI.getOperand(2).getReg(); 5662 5663 // Helper lambda to check for opportunities for 5664 // A + (B - A) -> B 5665 // (B - A) + A -> B 5666 auto CheckFold = [&](Register MaybeSub, Register MaybeSameReg) { 5667 Register Reg; 5668 return mi_match(MaybeSub, MRI, m_GSub(m_Reg(Src), m_Reg(Reg))) && 5669 Reg == MaybeSameReg; 5670 }; 5671 return CheckFold(LHS, RHS) || CheckFold(RHS, LHS); 5672 } 5673 5674 bool CombinerHelper::tryCombine(MachineInstr &MI) { 5675 if (tryCombineCopy(MI)) 5676 return true; 5677 if (tryCombineExtendingLoads(MI)) 5678 return true; 5679 if (tryCombineIndexedLoadStore(MI)) 5680 return true; 5681 return false; 5682 } 5683