1 //===-- PeepholeOptimizer.cpp - Peephole Optimizations --------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // Perform peephole optimizations on the machine code: 11 // 12 // - Optimize Extensions 13 // 14 // Optimization of sign / zero extension instructions. It may be extended to 15 // handle other instructions with similar properties. 16 // 17 // On some targets, some instructions, e.g. X86 sign / zero extension, may 18 // leave the source value in the lower part of the result. This optimization 19 // will replace some uses of the pre-extension value with uses of the 20 // sub-register of the results. 21 // 22 // - Optimize Comparisons 23 // 24 // Optimization of comparison instructions. For instance, in this code: 25 // 26 // sub r1, 1 27 // cmp r1, 0 28 // bz L1 29 // 30 // If the "sub" instruction all ready sets (or could be modified to set) the 31 // same flag that the "cmp" instruction sets and that "bz" uses, then we can 32 // eliminate the "cmp" instruction. 33 // 34 // Another instance, in this code: 35 // 36 // sub r1, r3 | sub r1, imm 37 // cmp r3, r1 or cmp r1, r3 | cmp r1, imm 38 // bge L1 39 // 40 // If the branch instruction can use flag from "sub", then we can replace 41 // "sub" with "subs" and eliminate the "cmp" instruction. 42 // 43 // - Optimize Loads: 44 // 45 // Loads that can be folded into a later instruction. A load is foldable 46 // if it loads to virtual registers and the virtual register defined has 47 // a single use. 48 // 49 // - Optimize Copies and Bitcast: 50 // 51 // Rewrite copies and bitcasts to avoid cross register bank copies 52 // when possible. 53 // E.g., Consider the following example, where capital and lower 54 // letters denote different register file: 55 // b = copy A <-- cross-bank copy 56 // C = copy b <-- cross-bank copy 57 // => 58 // b = copy A <-- cross-bank copy 59 // C = copy A <-- same-bank copy 60 // 61 // E.g., for bitcast: 62 // b = bitcast A <-- cross-bank copy 63 // C = bitcast b <-- cross-bank copy 64 // => 65 // b = bitcast A <-- cross-bank copy 66 // C = copy A <-- same-bank copy 67 //===----------------------------------------------------------------------===// 68 69 #define DEBUG_TYPE "peephole-opt" 70 #include "llvm/CodeGen/Passes.h" 71 #include "llvm/ADT/DenseMap.h" 72 #include "llvm/ADT/SmallPtrSet.h" 73 #include "llvm/ADT/SmallSet.h" 74 #include "llvm/ADT/Statistic.h" 75 #include "llvm/CodeGen/MachineDominators.h" 76 #include "llvm/CodeGen/MachineInstrBuilder.h" 77 #include "llvm/CodeGen/MachineRegisterInfo.h" 78 #include "llvm/Support/CommandLine.h" 79 #include "llvm/Support/Debug.h" 80 #include "llvm/Target/TargetInstrInfo.h" 81 #include "llvm/Target/TargetRegisterInfo.h" 82 using namespace llvm; 83 84 // Optimize Extensions 85 static cl::opt<bool> 86 Aggressive("aggressive-ext-opt", cl::Hidden, 87 cl::desc("Aggressive extension optimization")); 88 89 static cl::opt<bool> 90 DisablePeephole("disable-peephole", cl::Hidden, cl::init(false), 91 cl::desc("Disable the peephole optimizer")); 92 93 STATISTIC(NumReuse, "Number of extension results reused"); 94 STATISTIC(NumCmps, "Number of compares eliminated"); 95 STATISTIC(NumImmFold, "Number of move immediate folded"); 96 STATISTIC(NumLoadFold, "Number of loads folded"); 97 STATISTIC(NumSelects, "Number of selects optimized"); 98 STATISTIC(NumCopiesBitcasts, "Number of copies/bitcasts optimized"); 99 100 namespace { 101 class PeepholeOptimizer : public MachineFunctionPass { 102 const TargetMachine *TM; 103 const TargetInstrInfo *TII; 104 MachineRegisterInfo *MRI; 105 MachineDominatorTree *DT; // Machine dominator tree 106 107 public: 108 static char ID; // Pass identification 109 PeepholeOptimizer() : MachineFunctionPass(ID) { 110 initializePeepholeOptimizerPass(*PassRegistry::getPassRegistry()); 111 } 112 113 virtual bool runOnMachineFunction(MachineFunction &MF); 114 115 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 116 AU.setPreservesCFG(); 117 MachineFunctionPass::getAnalysisUsage(AU); 118 if (Aggressive) { 119 AU.addRequired<MachineDominatorTree>(); 120 AU.addPreserved<MachineDominatorTree>(); 121 } 122 } 123 124 private: 125 bool optimizeCmpInstr(MachineInstr *MI, MachineBasicBlock *MBB); 126 bool optimizeExtInstr(MachineInstr *MI, MachineBasicBlock *MBB, 127 SmallPtrSet<MachineInstr*, 8> &LocalMIs); 128 bool optimizeSelect(MachineInstr *MI); 129 bool optimizeCopyOrBitcast(MachineInstr *MI); 130 bool isMoveImmediate(MachineInstr *MI, 131 SmallSet<unsigned, 4> &ImmDefRegs, 132 DenseMap<unsigned, MachineInstr*> &ImmDefMIs); 133 bool foldImmediate(MachineInstr *MI, MachineBasicBlock *MBB, 134 SmallSet<unsigned, 4> &ImmDefRegs, 135 DenseMap<unsigned, MachineInstr*> &ImmDefMIs); 136 bool isLoadFoldable(MachineInstr *MI, unsigned &FoldAsLoadDefReg); 137 }; 138 } 139 140 char PeepholeOptimizer::ID = 0; 141 char &llvm::PeepholeOptimizerID = PeepholeOptimizer::ID; 142 INITIALIZE_PASS_BEGIN(PeepholeOptimizer, "peephole-opts", 143 "Peephole Optimizations", false, false) 144 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree) 145 INITIALIZE_PASS_END(PeepholeOptimizer, "peephole-opts", 146 "Peephole Optimizations", false, false) 147 148 /// optimizeExtInstr - If instruction is a copy-like instruction, i.e. it reads 149 /// a single register and writes a single register and it does not modify the 150 /// source, and if the source value is preserved as a sub-register of the 151 /// result, then replace all reachable uses of the source with the subreg of the 152 /// result. 153 /// 154 /// Do not generate an EXTRACT that is used only in a debug use, as this changes 155 /// the code. Since this code does not currently share EXTRACTs, just ignore all 156 /// debug uses. 157 bool PeepholeOptimizer:: 158 optimizeExtInstr(MachineInstr *MI, MachineBasicBlock *MBB, 159 SmallPtrSet<MachineInstr*, 8> &LocalMIs) { 160 unsigned SrcReg, DstReg, SubIdx; 161 if (!TII->isCoalescableExtInstr(*MI, SrcReg, DstReg, SubIdx)) 162 return false; 163 164 if (TargetRegisterInfo::isPhysicalRegister(DstReg) || 165 TargetRegisterInfo::isPhysicalRegister(SrcReg)) 166 return false; 167 168 if (MRI->hasOneNonDBGUse(SrcReg)) 169 // No other uses. 170 return false; 171 172 // Ensure DstReg can get a register class that actually supports 173 // sub-registers. Don't change the class until we commit. 174 const TargetRegisterClass *DstRC = MRI->getRegClass(DstReg); 175 DstRC = TM->getRegisterInfo()->getSubClassWithSubReg(DstRC, SubIdx); 176 if (!DstRC) 177 return false; 178 179 // The ext instr may be operating on a sub-register of SrcReg as well. 180 // PPC::EXTSW is a 32 -> 64-bit sign extension, but it reads a 64-bit 181 // register. 182 // If UseSrcSubIdx is Set, SubIdx also applies to SrcReg, and only uses of 183 // SrcReg:SubIdx should be replaced. 184 bool UseSrcSubIdx = TM->getRegisterInfo()-> 185 getSubClassWithSubReg(MRI->getRegClass(SrcReg), SubIdx) != 0; 186 187 // The source has other uses. See if we can replace the other uses with use of 188 // the result of the extension. 189 SmallPtrSet<MachineBasicBlock*, 4> ReachedBBs; 190 for (MachineRegisterInfo::use_nodbg_iterator 191 UI = MRI->use_nodbg_begin(DstReg), UE = MRI->use_nodbg_end(); 192 UI != UE; ++UI) 193 ReachedBBs.insert(UI->getParent()); 194 195 // Uses that are in the same BB of uses of the result of the instruction. 196 SmallVector<MachineOperand*, 8> Uses; 197 198 // Uses that the result of the instruction can reach. 199 SmallVector<MachineOperand*, 8> ExtendedUses; 200 201 bool ExtendLife = true; 202 for (MachineRegisterInfo::use_nodbg_iterator 203 UI = MRI->use_nodbg_begin(SrcReg), UE = MRI->use_nodbg_end(); 204 UI != UE; ++UI) { 205 MachineOperand &UseMO = UI.getOperand(); 206 MachineInstr *UseMI = &*UI; 207 if (UseMI == MI) 208 continue; 209 210 if (UseMI->isPHI()) { 211 ExtendLife = false; 212 continue; 213 } 214 215 // Only accept uses of SrcReg:SubIdx. 216 if (UseSrcSubIdx && UseMO.getSubReg() != SubIdx) 217 continue; 218 219 // It's an error to translate this: 220 // 221 // %reg1025 = <sext> %reg1024 222 // ... 223 // %reg1026 = SUBREG_TO_REG 0, %reg1024, 4 224 // 225 // into this: 226 // 227 // %reg1025 = <sext> %reg1024 228 // ... 229 // %reg1027 = COPY %reg1025:4 230 // %reg1026 = SUBREG_TO_REG 0, %reg1027, 4 231 // 232 // The problem here is that SUBREG_TO_REG is there to assert that an 233 // implicit zext occurs. It doesn't insert a zext instruction. If we allow 234 // the COPY here, it will give us the value after the <sext>, not the 235 // original value of %reg1024 before <sext>. 236 if (UseMI->getOpcode() == TargetOpcode::SUBREG_TO_REG) 237 continue; 238 239 MachineBasicBlock *UseMBB = UseMI->getParent(); 240 if (UseMBB == MBB) { 241 // Local uses that come after the extension. 242 if (!LocalMIs.count(UseMI)) 243 Uses.push_back(&UseMO); 244 } else if (ReachedBBs.count(UseMBB)) { 245 // Non-local uses where the result of the extension is used. Always 246 // replace these unless it's a PHI. 247 Uses.push_back(&UseMO); 248 } else if (Aggressive && DT->dominates(MBB, UseMBB)) { 249 // We may want to extend the live range of the extension result in order 250 // to replace these uses. 251 ExtendedUses.push_back(&UseMO); 252 } else { 253 // Both will be live out of the def MBB anyway. Don't extend live range of 254 // the extension result. 255 ExtendLife = false; 256 break; 257 } 258 } 259 260 if (ExtendLife && !ExtendedUses.empty()) 261 // Extend the liveness of the extension result. 262 std::copy(ExtendedUses.begin(), ExtendedUses.end(), 263 std::back_inserter(Uses)); 264 265 // Now replace all uses. 266 bool Changed = false; 267 if (!Uses.empty()) { 268 SmallPtrSet<MachineBasicBlock*, 4> PHIBBs; 269 270 // Look for PHI uses of the extended result, we don't want to extend the 271 // liveness of a PHI input. It breaks all kinds of assumptions down 272 // stream. A PHI use is expected to be the kill of its source values. 273 for (MachineRegisterInfo::use_nodbg_iterator 274 UI = MRI->use_nodbg_begin(DstReg), UE = MRI->use_nodbg_end(); 275 UI != UE; ++UI) 276 if (UI->isPHI()) 277 PHIBBs.insert(UI->getParent()); 278 279 const TargetRegisterClass *RC = MRI->getRegClass(SrcReg); 280 for (unsigned i = 0, e = Uses.size(); i != e; ++i) { 281 MachineOperand *UseMO = Uses[i]; 282 MachineInstr *UseMI = UseMO->getParent(); 283 MachineBasicBlock *UseMBB = UseMI->getParent(); 284 if (PHIBBs.count(UseMBB)) 285 continue; 286 287 // About to add uses of DstReg, clear DstReg's kill flags. 288 if (!Changed) { 289 MRI->clearKillFlags(DstReg); 290 MRI->constrainRegClass(DstReg, DstRC); 291 } 292 293 unsigned NewVR = MRI->createVirtualRegister(RC); 294 MachineInstr *Copy = BuildMI(*UseMBB, UseMI, UseMI->getDebugLoc(), 295 TII->get(TargetOpcode::COPY), NewVR) 296 .addReg(DstReg, 0, SubIdx); 297 // SubIdx applies to both SrcReg and DstReg when UseSrcSubIdx is set. 298 if (UseSrcSubIdx) { 299 Copy->getOperand(0).setSubReg(SubIdx); 300 Copy->getOperand(0).setIsUndef(); 301 } 302 UseMO->setReg(NewVR); 303 ++NumReuse; 304 Changed = true; 305 } 306 } 307 308 return Changed; 309 } 310 311 /// optimizeCmpInstr - If the instruction is a compare and the previous 312 /// instruction it's comparing against all ready sets (or could be modified to 313 /// set) the same flag as the compare, then we can remove the comparison and use 314 /// the flag from the previous instruction. 315 bool PeepholeOptimizer::optimizeCmpInstr(MachineInstr *MI, 316 MachineBasicBlock *MBB) { 317 // If this instruction is a comparison against zero and isn't comparing a 318 // physical register, we can try to optimize it. 319 unsigned SrcReg, SrcReg2; 320 int CmpMask, CmpValue; 321 if (!TII->analyzeCompare(MI, SrcReg, SrcReg2, CmpMask, CmpValue) || 322 TargetRegisterInfo::isPhysicalRegister(SrcReg) || 323 (SrcReg2 != 0 && TargetRegisterInfo::isPhysicalRegister(SrcReg2))) 324 return false; 325 326 // Attempt to optimize the comparison instruction. 327 if (TII->optimizeCompareInstr(MI, SrcReg, SrcReg2, CmpMask, CmpValue, MRI)) { 328 ++NumCmps; 329 return true; 330 } 331 332 return false; 333 } 334 335 /// Optimize a select instruction. 336 bool PeepholeOptimizer::optimizeSelect(MachineInstr *MI) { 337 unsigned TrueOp = 0; 338 unsigned FalseOp = 0; 339 bool Optimizable = false; 340 SmallVector<MachineOperand, 4> Cond; 341 if (TII->analyzeSelect(MI, Cond, TrueOp, FalseOp, Optimizable)) 342 return false; 343 if (!Optimizable) 344 return false; 345 if (!TII->optimizeSelect(MI)) 346 return false; 347 MI->eraseFromParent(); 348 ++NumSelects; 349 return true; 350 } 351 352 /// \brief Check if the registers defined by the pair (RegisterClass, SubReg) 353 /// share the same register file. 354 static bool shareSameRegisterFile(const TargetRegisterInfo &TRI, 355 const TargetRegisterClass *DefRC, 356 unsigned DefSubReg, 357 const TargetRegisterClass *SrcRC, 358 unsigned SrcSubReg) { 359 // Same register class. 360 if (DefRC == SrcRC) 361 return true; 362 363 // Both operands are sub registers. Check if they share a register class. 364 unsigned SrcIdx, DefIdx; 365 if (SrcSubReg && DefSubReg) 366 return TRI.getCommonSuperRegClass(SrcRC, SrcSubReg, DefRC, DefSubReg, 367 SrcIdx, DefIdx) != NULL; 368 // At most one of the register is a sub register, make it Src to avoid 369 // duplicating the test. 370 if (!SrcSubReg) { 371 std::swap(DefSubReg, SrcSubReg); 372 std::swap(DefRC, SrcRC); 373 } 374 375 // One of the register is a sub register, check if we can get a superclass. 376 if (SrcSubReg) 377 return TRI.getMatchingSuperRegClass(SrcRC, DefRC, SrcSubReg) != NULL; 378 // Plain copy. 379 return TRI.getCommonSubClass(DefRC, SrcRC) != NULL; 380 } 381 382 /// \brief Get the index of the definition and source for \p Copy 383 /// instruction. 384 /// \pre Copy.isCopy() or Copy.isBitcast(). 385 /// \return True if the Copy instruction has only one register source 386 /// and one register definition. Otherwise, \p DefIdx and \p SrcIdx 387 /// are invalid. 388 static bool getCopyOrBitcastDefUseIdx(const MachineInstr &Copy, 389 unsigned &DefIdx, unsigned &SrcIdx) { 390 assert((Copy.isCopy() || Copy.isBitcast()) && "Wrong operation type."); 391 if (Copy.isCopy()) { 392 // Copy instruction are supposed to be: Def = Src. 393 if (Copy.getDesc().getNumOperands() != 2) 394 return false; 395 DefIdx = 0; 396 SrcIdx = 1; 397 assert(Copy.getOperand(DefIdx).isDef() && "Use comes before def!"); 398 return true; 399 } 400 // Bitcast case. 401 // Bitcasts with more than one def are not supported. 402 if (Copy.getDesc().getNumDefs() != 1) 403 return false; 404 // Initialize SrcIdx to an undefined operand. 405 SrcIdx = Copy.getDesc().getNumOperands(); 406 for (unsigned OpIdx = 0, EndOpIdx = SrcIdx; OpIdx != EndOpIdx; ++OpIdx) { 407 const MachineOperand &MO = Copy.getOperand(OpIdx); 408 if (!MO.isReg() || !MO.getReg()) 409 continue; 410 if (MO.isDef()) 411 DefIdx = OpIdx; 412 else if (SrcIdx != EndOpIdx) 413 // Multiple sources? 414 return false; 415 SrcIdx = OpIdx; 416 } 417 return true; 418 } 419 420 /// \brief Optimize a copy or bitcast instruction to avoid cross 421 /// register bank copy. The optimization looks through a chain of 422 /// copies and try to find a source that has a compatible register 423 /// class. 424 /// Two register classes are considered to be compatible if they share 425 /// the same register bank. 426 /// New copies issued by this optimization are register allocator 427 /// friendly. This optimization does not remove any copy as it may 428 /// overconstraint the register allocator, but replaces some when 429 /// possible. 430 /// \pre \p MI is a Copy (MI->isCopy() is true) 431 /// \return True, when \p MI has been optimized. In that case, \p MI has 432 /// been removed from its parent. 433 bool PeepholeOptimizer::optimizeCopyOrBitcast(MachineInstr *MI) { 434 unsigned DefIdx, SrcIdx; 435 if (!MI || !getCopyOrBitcastDefUseIdx(*MI, DefIdx, SrcIdx)) 436 return false; 437 438 const MachineOperand &MODef = MI->getOperand(DefIdx); 439 assert(MODef.isReg() && "Copies must be between registers."); 440 unsigned Def = MODef.getReg(); 441 442 if (TargetRegisterInfo::isPhysicalRegister(Def)) 443 return false; 444 445 const TargetRegisterClass *DefRC = MRI->getRegClass(Def); 446 unsigned DefSubReg = MODef.getSubReg(); 447 448 unsigned Src; 449 unsigned SrcSubReg; 450 bool ShouldRewrite = false; 451 MachineInstr *Copy = MI; 452 const TargetRegisterInfo &TRI = *TM->getRegisterInfo(); 453 454 // Follow the chain of copies until we reach the top or find a 455 // more suitable source. 456 do { 457 unsigned CopyDefIdx, CopySrcIdx; 458 if (!getCopyOrBitcastDefUseIdx(*Copy, CopyDefIdx, CopySrcIdx)) 459 break; 460 const MachineOperand &MO = Copy->getOperand(CopySrcIdx); 461 assert(MO.isReg() && "Copies must be between registers."); 462 Src = MO.getReg(); 463 464 if (TargetRegisterInfo::isPhysicalRegister(Src)) 465 break; 466 467 const TargetRegisterClass *SrcRC = MRI->getRegClass(Src); 468 SrcSubReg = MO.getSubReg(); 469 470 // If this source does not incur a cross register bank copy, use it. 471 ShouldRewrite = shareSameRegisterFile(TRI, DefRC, DefSubReg, SrcRC, 472 SrcSubReg); 473 // Follow the chain of copies: get the definition of Src. 474 Copy = MRI->getVRegDef(Src); 475 } while (!ShouldRewrite && Copy && (Copy->isCopy() || Copy->isBitcast())); 476 477 // If we did not find a more suitable source, there is nothing to optimize. 478 if (!ShouldRewrite || Src == MI->getOperand(SrcIdx).getReg()) 479 return false; 480 481 // Rewrite the copy to avoid a cross register bank penalty. 482 unsigned NewVR = TargetRegisterInfo::isPhysicalRegister(Def) ? Def : 483 MRI->createVirtualRegister(DefRC); 484 MachineInstr *NewCopy = BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), 485 TII->get(TargetOpcode::COPY), NewVR) 486 .addReg(Src, 0, SrcSubReg); 487 NewCopy->getOperand(0).setSubReg(DefSubReg); 488 489 MRI->replaceRegWith(Def, NewVR); 490 MRI->clearKillFlags(NewVR); 491 MI->eraseFromParent(); 492 ++NumCopiesBitcasts; 493 return true; 494 } 495 496 /// isLoadFoldable - Check whether MI is a candidate for folding into a later 497 /// instruction. We only fold loads to virtual registers and the virtual 498 /// register defined has a single use. 499 bool PeepholeOptimizer::isLoadFoldable(MachineInstr *MI, 500 unsigned &FoldAsLoadDefReg) { 501 if (!MI->canFoldAsLoad() || !MI->mayLoad()) 502 return false; 503 const MCInstrDesc &MCID = MI->getDesc(); 504 if (MCID.getNumDefs() != 1) 505 return false; 506 507 unsigned Reg = MI->getOperand(0).getReg(); 508 // To reduce compilation time, we check MRI->hasOneUse when inserting 509 // loads. It should be checked when processing uses of the load, since 510 // uses can be removed during peephole. 511 if (!MI->getOperand(0).getSubReg() && 512 TargetRegisterInfo::isVirtualRegister(Reg) && 513 MRI->hasOneUse(Reg)) { 514 FoldAsLoadDefReg = Reg; 515 return true; 516 } 517 return false; 518 } 519 520 bool PeepholeOptimizer::isMoveImmediate(MachineInstr *MI, 521 SmallSet<unsigned, 4> &ImmDefRegs, 522 DenseMap<unsigned, MachineInstr*> &ImmDefMIs) { 523 const MCInstrDesc &MCID = MI->getDesc(); 524 if (!MI->isMoveImmediate()) 525 return false; 526 if (MCID.getNumDefs() != 1) 527 return false; 528 unsigned Reg = MI->getOperand(0).getReg(); 529 if (TargetRegisterInfo::isVirtualRegister(Reg)) { 530 ImmDefMIs.insert(std::make_pair(Reg, MI)); 531 ImmDefRegs.insert(Reg); 532 return true; 533 } 534 535 return false; 536 } 537 538 /// foldImmediate - Try folding register operands that are defined by move 539 /// immediate instructions, i.e. a trivial constant folding optimization, if 540 /// and only if the def and use are in the same BB. 541 bool PeepholeOptimizer::foldImmediate(MachineInstr *MI, MachineBasicBlock *MBB, 542 SmallSet<unsigned, 4> &ImmDefRegs, 543 DenseMap<unsigned, MachineInstr*> &ImmDefMIs) { 544 for (unsigned i = 0, e = MI->getDesc().getNumOperands(); i != e; ++i) { 545 MachineOperand &MO = MI->getOperand(i); 546 if (!MO.isReg() || MO.isDef()) 547 continue; 548 unsigned Reg = MO.getReg(); 549 if (!TargetRegisterInfo::isVirtualRegister(Reg)) 550 continue; 551 if (ImmDefRegs.count(Reg) == 0) 552 continue; 553 DenseMap<unsigned, MachineInstr*>::iterator II = ImmDefMIs.find(Reg); 554 assert(II != ImmDefMIs.end()); 555 if (TII->FoldImmediate(MI, II->second, Reg, MRI)) { 556 ++NumImmFold; 557 return true; 558 } 559 } 560 return false; 561 } 562 563 bool PeepholeOptimizer::runOnMachineFunction(MachineFunction &MF) { 564 DEBUG(dbgs() << "********** PEEPHOLE OPTIMIZER **********\n"); 565 DEBUG(dbgs() << "********** Function: " << MF.getName() << '\n'); 566 567 if (DisablePeephole) 568 return false; 569 570 TM = &MF.getTarget(); 571 TII = TM->getInstrInfo(); 572 MRI = &MF.getRegInfo(); 573 DT = Aggressive ? &getAnalysis<MachineDominatorTree>() : 0; 574 575 bool Changed = false; 576 577 SmallPtrSet<MachineInstr*, 8> LocalMIs; 578 SmallSet<unsigned, 4> ImmDefRegs; 579 DenseMap<unsigned, MachineInstr*> ImmDefMIs; 580 unsigned FoldAsLoadDefReg; 581 for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I) { 582 MachineBasicBlock *MBB = &*I; 583 584 bool SeenMoveImm = false; 585 LocalMIs.clear(); 586 ImmDefRegs.clear(); 587 ImmDefMIs.clear(); 588 FoldAsLoadDefReg = 0; 589 590 for (MachineBasicBlock::iterator 591 MII = I->begin(), MIE = I->end(); MII != MIE; ) { 592 MachineInstr *MI = &*MII; 593 // We may be erasing MI below, increment MII now. 594 ++MII; 595 LocalMIs.insert(MI); 596 597 // If there exists an instruction which belongs to the following 598 // categories, we will discard the load candidate. 599 if (MI->isLabel() || MI->isPHI() || MI->isImplicitDef() || 600 MI->isKill() || MI->isInlineAsm() || MI->isDebugValue() || 601 MI->hasUnmodeledSideEffects()) { 602 FoldAsLoadDefReg = 0; 603 continue; 604 } 605 if (MI->mayStore() || MI->isCall()) 606 FoldAsLoadDefReg = 0; 607 608 if (((MI->isBitcast() || MI->isCopy()) && optimizeCopyOrBitcast(MI)) || 609 (MI->isCompare() && optimizeCmpInstr(MI, MBB)) || 610 (MI->isSelect() && optimizeSelect(MI))) { 611 // MI is deleted. 612 LocalMIs.erase(MI); 613 Changed = true; 614 continue; 615 } 616 617 if (isMoveImmediate(MI, ImmDefRegs, ImmDefMIs)) { 618 SeenMoveImm = true; 619 } else { 620 Changed |= optimizeExtInstr(MI, MBB, LocalMIs); 621 // optimizeExtInstr might have created new instructions after MI 622 // and before the already incremented MII. Adjust MII so that the 623 // next iteration sees the new instructions. 624 MII = MI; 625 ++MII; 626 if (SeenMoveImm) 627 Changed |= foldImmediate(MI, MBB, ImmDefRegs, ImmDefMIs); 628 } 629 630 // Check whether MI is a load candidate for folding into a later 631 // instruction. If MI is not a candidate, check whether we can fold an 632 // earlier load into MI. 633 if (!isLoadFoldable(MI, FoldAsLoadDefReg) && FoldAsLoadDefReg) { 634 // We need to fold load after optimizeCmpInstr, since optimizeCmpInstr 635 // can enable folding by converting SUB to CMP. 636 MachineInstr *DefMI = 0; 637 MachineInstr *FoldMI = TII->optimizeLoadInstr(MI, MRI, 638 FoldAsLoadDefReg, DefMI); 639 if (FoldMI) { 640 // Update LocalMIs since we replaced MI with FoldMI and deleted DefMI. 641 DEBUG(dbgs() << "Replacing: " << *MI); 642 DEBUG(dbgs() << " With: " << *FoldMI); 643 LocalMIs.erase(MI); 644 LocalMIs.erase(DefMI); 645 LocalMIs.insert(FoldMI); 646 MI->eraseFromParent(); 647 DefMI->eraseFromParent(); 648 ++NumLoadFold; 649 650 // MI is replaced with FoldMI. 651 Changed = true; 652 continue; 653 } 654 } 655 } 656 } 657 658 return Changed; 659 } 660