1 //===- X86OptimizeLEAs.cpp - optimize usage of LEA instructions -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the pass that performs some optimizations with LEA
10 // instructions in order to improve performance and code size.
11 // Currently, it does two things:
12 // 1) If there are two LEA instructions calculating addresses which only differ
13 //    by displacement inside a basic block, one of them is removed.
14 // 2) Address calculations in load and store instructions are replaced by
15 //    existing LEA def registers where possible.
16 //
17 //===----------------------------------------------------------------------===//
18 
19 #include "MCTargetDesc/X86BaseInfo.h"
20 #include "X86.h"
21 #include "X86InstrInfo.h"
22 #include "X86Subtarget.h"
23 #include "llvm/ADT/DenseMap.h"
24 #include "llvm/ADT/DenseMapInfo.h"
25 #include "llvm/ADT/Hashing.h"
26 #include "llvm/ADT/SmallVector.h"
27 #include "llvm/ADT/Statistic.h"
28 #include "llvm/Analysis/ProfileSummaryInfo.h"
29 #include "llvm/CodeGen/LazyMachineBlockFrequencyInfo.h"
30 #include "llvm/CodeGen/MachineBasicBlock.h"
31 #include "llvm/CodeGen/MachineFunction.h"
32 #include "llvm/CodeGen/MachineFunctionPass.h"
33 #include "llvm/CodeGen/MachineInstr.h"
34 #include "llvm/CodeGen/MachineInstrBuilder.h"
35 #include "llvm/CodeGen/MachineOperand.h"
36 #include "llvm/CodeGen/MachineRegisterInfo.h"
37 #include "llvm/CodeGen/MachineSizeOpts.h"
38 #include "llvm/CodeGen/TargetOpcodes.h"
39 #include "llvm/CodeGen/TargetRegisterInfo.h"
40 #include "llvm/IR/DebugInfoMetadata.h"
41 #include "llvm/IR/DebugLoc.h"
42 #include "llvm/IR/Function.h"
43 #include "llvm/MC/MCInstrDesc.h"
44 #include "llvm/Support/CommandLine.h"
45 #include "llvm/Support/Debug.h"
46 #include "llvm/Support/ErrorHandling.h"
47 #include "llvm/Support/MathExtras.h"
48 #include "llvm/Support/raw_ostream.h"
49 #include <cassert>
50 #include <cstdint>
51 #include <iterator>
52 
53 using namespace llvm;
54 
55 #define DEBUG_TYPE "x86-optimize-LEAs"
56 
57 static cl::opt<bool>
58     DisableX86LEAOpt("disable-x86-lea-opt", cl::Hidden,
59                      cl::desc("X86: Disable LEA optimizations."),
60                      cl::init(false));
61 
62 STATISTIC(NumSubstLEAs, "Number of LEA instruction substitutions");
63 STATISTIC(NumRedundantLEAs, "Number of redundant LEA instructions removed");
64 
65 /// Returns true if two machine operands are identical and they are not
66 /// physical registers.
67 static inline bool isIdenticalOp(const MachineOperand &MO1,
68                                  const MachineOperand &MO2);
69 
70 /// Returns true if two address displacement operands are of the same
71 /// type and use the same symbol/index/address regardless of the offset.
72 static bool isSimilarDispOp(const MachineOperand &MO1,
73                             const MachineOperand &MO2);
74 
75 /// Returns true if the instruction is LEA.
76 static inline bool isLEA(const MachineInstr &MI);
77 
78 namespace {
79 
80 /// A key based on instruction's memory operands.
81 class MemOpKey {
82 public:
83   MemOpKey(const MachineOperand *Base, const MachineOperand *Scale,
84            const MachineOperand *Index, const MachineOperand *Segment,
85            const MachineOperand *Disp)
86       : Disp(Disp) {
87     Operands[0] = Base;
88     Operands[1] = Scale;
89     Operands[2] = Index;
90     Operands[3] = Segment;
91   }
92 
93   bool operator==(const MemOpKey &Other) const {
94     // Addresses' bases, scales, indices and segments must be identical.
95     for (int i = 0; i < 4; ++i)
96       if (!isIdenticalOp(*Operands[i], *Other.Operands[i]))
97         return false;
98 
99     // Addresses' displacements don't have to be exactly the same. It only
100     // matters that they use the same symbol/index/address. Immediates' or
101     // offsets' differences will be taken care of during instruction
102     // substitution.
103     return isSimilarDispOp(*Disp, *Other.Disp);
104   }
105 
106   // Address' base, scale, index and segment operands.
107   const MachineOperand *Operands[4];
108 
109   // Address' displacement operand.
110   const MachineOperand *Disp;
111 };
112 
113 } // end anonymous namespace
114 
115 namespace llvm {
116 
117 /// Provide DenseMapInfo for MemOpKey.
118 template <> struct DenseMapInfo<MemOpKey> {
119   using PtrInfo = DenseMapInfo<const MachineOperand *>;
120 
121   static inline MemOpKey getEmptyKey() {
122     return MemOpKey(PtrInfo::getEmptyKey(), PtrInfo::getEmptyKey(),
123                     PtrInfo::getEmptyKey(), PtrInfo::getEmptyKey(),
124                     PtrInfo::getEmptyKey());
125   }
126 
127   static inline MemOpKey getTombstoneKey() {
128     return MemOpKey(PtrInfo::getTombstoneKey(), PtrInfo::getTombstoneKey(),
129                     PtrInfo::getTombstoneKey(), PtrInfo::getTombstoneKey(),
130                     PtrInfo::getTombstoneKey());
131   }
132 
133   static unsigned getHashValue(const MemOpKey &Val) {
134     // Checking any field of MemOpKey is enough to determine if the key is
135     // empty or tombstone.
136     assert(Val.Disp != PtrInfo::getEmptyKey() && "Cannot hash the empty key");
137     assert(Val.Disp != PtrInfo::getTombstoneKey() &&
138            "Cannot hash the tombstone key");
139 
140     hash_code Hash = hash_combine(*Val.Operands[0], *Val.Operands[1],
141                                   *Val.Operands[2], *Val.Operands[3]);
142 
143     // If the address displacement is an immediate, it should not affect the
144     // hash so that memory operands which differ only be immediate displacement
145     // would have the same hash. If the address displacement is something else,
146     // we should reflect symbol/index/address in the hash.
147     switch (Val.Disp->getType()) {
148     case MachineOperand::MO_Immediate:
149       break;
150     case MachineOperand::MO_ConstantPoolIndex:
151     case MachineOperand::MO_JumpTableIndex:
152       Hash = hash_combine(Hash, Val.Disp->getIndex());
153       break;
154     case MachineOperand::MO_ExternalSymbol:
155       Hash = hash_combine(Hash, Val.Disp->getSymbolName());
156       break;
157     case MachineOperand::MO_GlobalAddress:
158       Hash = hash_combine(Hash, Val.Disp->getGlobal());
159       break;
160     case MachineOperand::MO_BlockAddress:
161       Hash = hash_combine(Hash, Val.Disp->getBlockAddress());
162       break;
163     case MachineOperand::MO_MCSymbol:
164       Hash = hash_combine(Hash, Val.Disp->getMCSymbol());
165       break;
166     case MachineOperand::MO_MachineBasicBlock:
167       Hash = hash_combine(Hash, Val.Disp->getMBB());
168       break;
169     default:
170       llvm_unreachable("Invalid address displacement operand");
171     }
172 
173     return (unsigned)Hash;
174   }
175 
176   static bool isEqual(const MemOpKey &LHS, const MemOpKey &RHS) {
177     // Checking any field of MemOpKey is enough to determine if the key is
178     // empty or tombstone.
179     if (RHS.Disp == PtrInfo::getEmptyKey())
180       return LHS.Disp == PtrInfo::getEmptyKey();
181     if (RHS.Disp == PtrInfo::getTombstoneKey())
182       return LHS.Disp == PtrInfo::getTombstoneKey();
183     return LHS == RHS;
184   }
185 };
186 
187 } // end namespace llvm
188 
189 /// Returns a hash table key based on memory operands of \p MI. The
190 /// number of the first memory operand of \p MI is specified through \p N.
191 static inline MemOpKey getMemOpKey(const MachineInstr &MI, unsigned N) {
192   assert((isLEA(MI) || MI.mayLoadOrStore()) &&
193          "The instruction must be a LEA, a load or a store");
194   return MemOpKey(&MI.getOperand(N + X86::AddrBaseReg),
195                   &MI.getOperand(N + X86::AddrScaleAmt),
196                   &MI.getOperand(N + X86::AddrIndexReg),
197                   &MI.getOperand(N + X86::AddrSegmentReg),
198                   &MI.getOperand(N + X86::AddrDisp));
199 }
200 
201 static inline bool isIdenticalOp(const MachineOperand &MO1,
202                                  const MachineOperand &MO2) {
203   return MO1.isIdenticalTo(MO2) && (!MO1.isReg() || !MO1.getReg().isPhysical());
204 }
205 
206 #ifndef NDEBUG
207 static bool isValidDispOp(const MachineOperand &MO) {
208   return MO.isImm() || MO.isCPI() || MO.isJTI() || MO.isSymbol() ||
209          MO.isGlobal() || MO.isBlockAddress() || MO.isMCSymbol() || MO.isMBB();
210 }
211 #endif
212 
213 static bool isSimilarDispOp(const MachineOperand &MO1,
214                             const MachineOperand &MO2) {
215   assert(isValidDispOp(MO1) && isValidDispOp(MO2) &&
216          "Address displacement operand is not valid");
217   return (MO1.isImm() && MO2.isImm()) ||
218          (MO1.isCPI() && MO2.isCPI() && MO1.getIndex() == MO2.getIndex()) ||
219          (MO1.isJTI() && MO2.isJTI() && MO1.getIndex() == MO2.getIndex()) ||
220          (MO1.isSymbol() && MO2.isSymbol() &&
221           MO1.getSymbolName() == MO2.getSymbolName()) ||
222          (MO1.isGlobal() && MO2.isGlobal() &&
223           MO1.getGlobal() == MO2.getGlobal()) ||
224          (MO1.isBlockAddress() && MO2.isBlockAddress() &&
225           MO1.getBlockAddress() == MO2.getBlockAddress()) ||
226          (MO1.isMCSymbol() && MO2.isMCSymbol() &&
227           MO1.getMCSymbol() == MO2.getMCSymbol()) ||
228          (MO1.isMBB() && MO2.isMBB() && MO1.getMBB() == MO2.getMBB());
229 }
230 
231 static inline bool isLEA(const MachineInstr &MI) {
232   unsigned Opcode = MI.getOpcode();
233   return Opcode == X86::LEA16r || Opcode == X86::LEA32r ||
234          Opcode == X86::LEA64r || Opcode == X86::LEA64_32r;
235 }
236 
237 namespace {
238 
239 class X86OptimizeLEAPass : public MachineFunctionPass {
240 public:
241   X86OptimizeLEAPass() : MachineFunctionPass(ID) {}
242 
243   StringRef getPassName() const override { return "X86 LEA Optimize"; }
244 
245   /// Loop over all of the basic blocks, replacing address
246   /// calculations in load and store instructions, if it's already
247   /// been calculated by LEA. Also, remove redundant LEAs.
248   bool runOnMachineFunction(MachineFunction &MF) override;
249 
250   static char ID;
251 
252   void getAnalysisUsage(AnalysisUsage &AU) const override {
253     AU.addRequired<ProfileSummaryInfoWrapperPass>();
254     AU.addRequired<LazyMachineBlockFrequencyInfoPass>();
255     MachineFunctionPass::getAnalysisUsage(AU);
256   }
257 
258 private:
259   using MemOpMap = DenseMap<MemOpKey, SmallVector<MachineInstr *, 16>>;
260 
261   /// Returns a distance between two instructions inside one basic block.
262   /// Negative result means, that instructions occur in reverse order.
263   int calcInstrDist(const MachineInstr &First, const MachineInstr &Last);
264 
265   /// Choose the best \p LEA instruction from the \p List to replace
266   /// address calculation in \p MI instruction. Return the address displacement
267   /// and the distance between \p MI and the chosen \p BestLEA in
268   /// \p AddrDispShift and \p Dist.
269   bool chooseBestLEA(const SmallVectorImpl<MachineInstr *> &List,
270                      const MachineInstr &MI, MachineInstr *&BestLEA,
271                      int64_t &AddrDispShift, int &Dist);
272 
273   /// Returns the difference between addresses' displacements of \p MI1
274   /// and \p MI2. The numbers of the first memory operands for the instructions
275   /// are specified through \p N1 and \p N2.
276   int64_t getAddrDispShift(const MachineInstr &MI1, unsigned N1,
277                            const MachineInstr &MI2, unsigned N2) const;
278 
279   /// Returns true if the \p Last LEA instruction can be replaced by the
280   /// \p First. The difference between displacements of the addresses calculated
281   /// by these LEAs is returned in \p AddrDispShift. It'll be used for proper
282   /// replacement of the \p Last LEA's uses with the \p First's def register.
283   bool isReplaceable(const MachineInstr &First, const MachineInstr &Last,
284                      int64_t &AddrDispShift) const;
285 
286   /// Find all LEA instructions in the basic block. Also, assign position
287   /// numbers to all instructions in the basic block to speed up calculation of
288   /// distance between them.
289   void findLEAs(const MachineBasicBlock &MBB, MemOpMap &LEAs);
290 
291   /// Removes redundant address calculations.
292   bool removeRedundantAddrCalc(MemOpMap &LEAs);
293 
294   /// Replace debug value MI with a new debug value instruction using register
295   /// VReg with an appropriate offset and DIExpression to incorporate the
296   /// address displacement AddrDispShift. Return new debug value instruction.
297   MachineInstr *replaceDebugValue(MachineInstr &MI, unsigned OldReg,
298                                   unsigned NewReg, int64_t AddrDispShift);
299 
300   /// Removes LEAs which calculate similar addresses.
301   bool removeRedundantLEAs(MemOpMap &LEAs);
302 
303   DenseMap<const MachineInstr *, unsigned> InstrPos;
304 
305   MachineRegisterInfo *MRI = nullptr;
306   const X86InstrInfo *TII = nullptr;
307   const X86RegisterInfo *TRI = nullptr;
308 };
309 
310 } // end anonymous namespace
311 
312 char X86OptimizeLEAPass::ID = 0;
313 
314 FunctionPass *llvm::createX86OptimizeLEAs() { return new X86OptimizeLEAPass(); }
315 INITIALIZE_PASS(X86OptimizeLEAPass, DEBUG_TYPE, "X86 optimize LEA pass", false,
316                 false)
317 
318 int X86OptimizeLEAPass::calcInstrDist(const MachineInstr &First,
319                                       const MachineInstr &Last) {
320   // Both instructions must be in the same basic block and they must be
321   // presented in InstrPos.
322   assert(Last.getParent() == First.getParent() &&
323          "Instructions are in different basic blocks");
324   assert(InstrPos.find(&First) != InstrPos.end() &&
325          InstrPos.find(&Last) != InstrPos.end() &&
326          "Instructions' positions are undefined");
327 
328   return InstrPos[&Last] - InstrPos[&First];
329 }
330 
331 // Find the best LEA instruction in the List to replace address recalculation in
332 // MI. Such LEA must meet these requirements:
333 // 1) The address calculated by the LEA differs only by the displacement from
334 //    the address used in MI.
335 // 2) The register class of the definition of the LEA is compatible with the
336 //    register class of the address base register of MI.
337 // 3) Displacement of the new memory operand should fit in 1 byte if possible.
338 // 4) The LEA should be as close to MI as possible, and prior to it if
339 //    possible.
340 bool X86OptimizeLEAPass::chooseBestLEA(
341     const SmallVectorImpl<MachineInstr *> &List, const MachineInstr &MI,
342     MachineInstr *&BestLEA, int64_t &AddrDispShift, int &Dist) {
343   const MachineFunction *MF = MI.getParent()->getParent();
344   const MCInstrDesc &Desc = MI.getDesc();
345   int MemOpNo = X86II::getMemoryOperandNo(Desc.TSFlags) +
346                 X86II::getOperandBias(Desc);
347 
348   BestLEA = nullptr;
349 
350   // Loop over all LEA instructions.
351   for (auto *DefMI : List) {
352     // Get new address displacement.
353     int64_t AddrDispShiftTemp = getAddrDispShift(MI, MemOpNo, *DefMI, 1);
354 
355     // Make sure address displacement fits 4 bytes.
356     if (!isInt<32>(AddrDispShiftTemp))
357       continue;
358 
359     // Check that LEA def register can be used as MI address base. Some
360     // instructions can use a limited set of registers as address base, for
361     // example MOV8mr_NOREX. We could constrain the register class of the LEA
362     // def to suit MI, however since this case is very rare and hard to
363     // reproduce in a test it's just more reliable to skip the LEA.
364     if (TII->getRegClass(Desc, MemOpNo + X86::AddrBaseReg, TRI, *MF) !=
365         MRI->getRegClass(DefMI->getOperand(0).getReg()))
366       continue;
367 
368     // Choose the closest LEA instruction from the list, prior to MI if
369     // possible. Note that we took into account resulting address displacement
370     // as well. Also note that the list is sorted by the order in which the LEAs
371     // occur, so the break condition is pretty simple.
372     int DistTemp = calcInstrDist(*DefMI, MI);
373     assert(DistTemp != 0 &&
374            "The distance between two different instructions cannot be zero");
375     if (DistTemp > 0 || BestLEA == nullptr) {
376       // Do not update return LEA, if the current one provides a displacement
377       // which fits in 1 byte, while the new candidate does not.
378       if (BestLEA != nullptr && !isInt<8>(AddrDispShiftTemp) &&
379           isInt<8>(AddrDispShift))
380         continue;
381 
382       BestLEA = DefMI;
383       AddrDispShift = AddrDispShiftTemp;
384       Dist = DistTemp;
385     }
386 
387     // FIXME: Maybe we should not always stop at the first LEA after MI.
388     if (DistTemp < 0)
389       break;
390   }
391 
392   return BestLEA != nullptr;
393 }
394 
395 // Get the difference between the addresses' displacements of the two
396 // instructions \p MI1 and \p MI2. The numbers of the first memory operands are
397 // passed through \p N1 and \p N2.
398 int64_t X86OptimizeLEAPass::getAddrDispShift(const MachineInstr &MI1,
399                                              unsigned N1,
400                                              const MachineInstr &MI2,
401                                              unsigned N2) const {
402   const MachineOperand &Op1 = MI1.getOperand(N1 + X86::AddrDisp);
403   const MachineOperand &Op2 = MI2.getOperand(N2 + X86::AddrDisp);
404 
405   assert(isSimilarDispOp(Op1, Op2) &&
406          "Address displacement operands are not compatible");
407 
408   // After the assert above we can be sure that both operands are of the same
409   // valid type and use the same symbol/index/address, thus displacement shift
410   // calculation is rather simple.
411   if (Op1.isJTI())
412     return 0;
413   return Op1.isImm() ? Op1.getImm() - Op2.getImm()
414                      : Op1.getOffset() - Op2.getOffset();
415 }
416 
417 // Check that the Last LEA can be replaced by the First LEA. To be so,
418 // these requirements must be met:
419 // 1) Addresses calculated by LEAs differ only by displacement.
420 // 2) Def registers of LEAs belong to the same class.
421 // 3) All uses of the Last LEA def register are replaceable, thus the
422 //    register is used only as address base.
423 bool X86OptimizeLEAPass::isReplaceable(const MachineInstr &First,
424                                        const MachineInstr &Last,
425                                        int64_t &AddrDispShift) const {
426   assert(isLEA(First) && isLEA(Last) &&
427          "The function works only with LEA instructions");
428 
429   // Make sure that LEA def registers belong to the same class. There may be
430   // instructions (like MOV8mr_NOREX) which allow a limited set of registers to
431   // be used as their operands, so we must be sure that replacing one LEA
432   // with another won't lead to putting a wrong register in the instruction.
433   if (MRI->getRegClass(First.getOperand(0).getReg()) !=
434       MRI->getRegClass(Last.getOperand(0).getReg()))
435     return false;
436 
437   // Get new address displacement.
438   AddrDispShift = getAddrDispShift(Last, 1, First, 1);
439 
440   // Loop over all uses of the Last LEA to check that its def register is
441   // used only as address base for memory accesses. If so, it can be
442   // replaced, otherwise - no.
443   for (auto &MO : MRI->use_nodbg_operands(Last.getOperand(0).getReg())) {
444     MachineInstr &MI = *MO.getParent();
445 
446     // Get the number of the first memory operand.
447     const MCInstrDesc &Desc = MI.getDesc();
448     int MemOpNo = X86II::getMemoryOperandNo(Desc.TSFlags);
449 
450     // If the use instruction has no memory operand - the LEA is not
451     // replaceable.
452     if (MemOpNo < 0)
453       return false;
454 
455     MemOpNo += X86II::getOperandBias(Desc);
456 
457     // If the address base of the use instruction is not the LEA def register -
458     // the LEA is not replaceable.
459     if (!isIdenticalOp(MI.getOperand(MemOpNo + X86::AddrBaseReg), MO))
460       return false;
461 
462     // If the LEA def register is used as any other operand of the use
463     // instruction - the LEA is not replaceable.
464     for (unsigned i = 0; i < MI.getNumOperands(); i++)
465       if (i != (unsigned)(MemOpNo + X86::AddrBaseReg) &&
466           isIdenticalOp(MI.getOperand(i), MO))
467         return false;
468 
469     // Check that the new address displacement will fit 4 bytes.
470     if (MI.getOperand(MemOpNo + X86::AddrDisp).isImm() &&
471         !isInt<32>(MI.getOperand(MemOpNo + X86::AddrDisp).getImm() +
472                    AddrDispShift))
473       return false;
474   }
475 
476   return true;
477 }
478 
479 void X86OptimizeLEAPass::findLEAs(const MachineBasicBlock &MBB,
480                                   MemOpMap &LEAs) {
481   unsigned Pos = 0;
482   for (auto &MI : MBB) {
483     // Assign the position number to the instruction. Note that we are going to
484     // move some instructions during the optimization however there will never
485     // be a need to move two instructions before any selected instruction. So to
486     // avoid multiple positions' updates during moves we just increase position
487     // counter by two leaving a free space for instructions which will be moved.
488     InstrPos[&MI] = Pos += 2;
489 
490     if (isLEA(MI))
491       LEAs[getMemOpKey(MI, 1)].push_back(const_cast<MachineInstr *>(&MI));
492   }
493 }
494 
495 // Try to find load and store instructions which recalculate addresses already
496 // calculated by some LEA and replace their memory operands with its def
497 // register.
498 bool X86OptimizeLEAPass::removeRedundantAddrCalc(MemOpMap &LEAs) {
499   bool Changed = false;
500 
501   assert(!LEAs.empty());
502   MachineBasicBlock *MBB = (*LEAs.begin()->second.begin())->getParent();
503 
504   // Process all instructions in basic block.
505   for (MachineInstr &MI : llvm::make_early_inc_range(*MBB)) {
506     // Instruction must be load or store.
507     if (!MI.mayLoadOrStore())
508       continue;
509 
510     // Get the number of the first memory operand.
511     const MCInstrDesc &Desc = MI.getDesc();
512     int MemOpNo = X86II::getMemoryOperandNo(Desc.TSFlags);
513 
514     // If instruction has no memory operand - skip it.
515     if (MemOpNo < 0)
516       continue;
517 
518     MemOpNo += X86II::getOperandBias(Desc);
519 
520     // Do not call chooseBestLEA if there was no matching LEA
521     auto Insns = LEAs.find(getMemOpKey(MI, MemOpNo));
522     if (Insns == LEAs.end())
523       continue;
524 
525     // Get the best LEA instruction to replace address calculation.
526     MachineInstr *DefMI;
527     int64_t AddrDispShift;
528     int Dist;
529     if (!chooseBestLEA(Insns->second, MI, DefMI, AddrDispShift, Dist))
530       continue;
531 
532     // If LEA occurs before current instruction, we can freely replace
533     // the instruction. If LEA occurs after, we can lift LEA above the
534     // instruction and this way to be able to replace it. Since LEA and the
535     // instruction have similar memory operands (thus, the same def
536     // instructions for these operands), we can always do that, without
537     // worries of using registers before their defs.
538     if (Dist < 0) {
539       DefMI->removeFromParent();
540       MBB->insert(MachineBasicBlock::iterator(&MI), DefMI);
541       InstrPos[DefMI] = InstrPos[&MI] - 1;
542 
543       // Make sure the instructions' position numbers are sane.
544       assert(((InstrPos[DefMI] == 1 &&
545                MachineBasicBlock::iterator(DefMI) == MBB->begin()) ||
546               InstrPos[DefMI] >
547                   InstrPos[&*std::prev(MachineBasicBlock::iterator(DefMI))]) &&
548              "Instruction positioning is broken");
549     }
550 
551     // Since we can possibly extend register lifetime, clear kill flags.
552     MRI->clearKillFlags(DefMI->getOperand(0).getReg());
553 
554     ++NumSubstLEAs;
555     LLVM_DEBUG(dbgs() << "OptimizeLEAs: Candidate to replace: "; MI.dump(););
556 
557     // Change instruction operands.
558     MI.getOperand(MemOpNo + X86::AddrBaseReg)
559         .ChangeToRegister(DefMI->getOperand(0).getReg(), false);
560     MI.getOperand(MemOpNo + X86::AddrScaleAmt).ChangeToImmediate(1);
561     MI.getOperand(MemOpNo + X86::AddrIndexReg)
562         .ChangeToRegister(X86::NoRegister, false);
563     MI.getOperand(MemOpNo + X86::AddrDisp).ChangeToImmediate(AddrDispShift);
564     MI.getOperand(MemOpNo + X86::AddrSegmentReg)
565         .ChangeToRegister(X86::NoRegister, false);
566 
567     LLVM_DEBUG(dbgs() << "OptimizeLEAs: Replaced by: "; MI.dump(););
568 
569     Changed = true;
570   }
571 
572   return Changed;
573 }
574 
575 MachineInstr *X86OptimizeLEAPass::replaceDebugValue(MachineInstr &MI,
576                                                     unsigned OldReg,
577                                                     unsigned NewReg,
578                                                     int64_t AddrDispShift) {
579   const DIExpression *Expr = MI.getDebugExpression();
580   if (AddrDispShift != 0) {
581     if (MI.isNonListDebugValue()) {
582       Expr =
583           DIExpression::prepend(Expr, DIExpression::StackValue, AddrDispShift);
584     } else {
585       // Update the Expression, appending an offset of `AddrDispShift` to the
586       // Op corresponding to `OldReg`.
587       SmallVector<uint64_t, 3> Ops;
588       DIExpression::appendOffset(Ops, AddrDispShift);
589       for (MachineOperand &Op : MI.getDebugOperandsForReg(OldReg)) {
590         unsigned OpIdx = MI.getDebugOperandIndex(&Op);
591         Expr = DIExpression::appendOpsToArg(Expr, Ops, OpIdx);
592       }
593     }
594   }
595 
596   // Replace DBG_VALUE instruction with modified version.
597   MachineBasicBlock *MBB = MI.getParent();
598   DebugLoc DL = MI.getDebugLoc();
599   bool IsIndirect = MI.isIndirectDebugValue();
600   const MDNode *Var = MI.getDebugVariable();
601   unsigned Opcode = MI.isNonListDebugValue() ? TargetOpcode::DBG_VALUE
602                                              : TargetOpcode::DBG_VALUE_LIST;
603   if (IsIndirect)
604     assert(MI.getDebugOffset().getImm() == 0 &&
605            "DBG_VALUE with nonzero offset");
606   SmallVector<MachineOperand, 4> NewOps;
607   // If we encounter an operand using the old register, replace it with an
608   // operand that uses the new register; otherwise keep the old operand.
609   auto replaceOldReg = [OldReg, NewReg](const MachineOperand &Op) {
610     if (Op.isReg() && Op.getReg() == OldReg)
611       return MachineOperand::CreateReg(NewReg, false, false, false, false,
612                                        false, false, false, false, false,
613                                        /*IsRenamable*/ true);
614     return Op;
615   };
616   for (const MachineOperand &Op : MI.debug_operands())
617     NewOps.push_back(replaceOldReg(Op));
618   return BuildMI(*MBB, MBB->erase(&MI), DL, TII->get(Opcode), IsIndirect,
619                  NewOps, Var, Expr);
620 }
621 
622 // Try to find similar LEAs in the list and replace one with another.
623 bool X86OptimizeLEAPass::removeRedundantLEAs(MemOpMap &LEAs) {
624   bool Changed = false;
625 
626   // Loop over all entries in the table.
627   for (auto &E : LEAs) {
628     auto &List = E.second;
629 
630     // Loop over all LEA pairs.
631     auto I1 = List.begin();
632     while (I1 != List.end()) {
633       MachineInstr &First = **I1;
634       auto I2 = std::next(I1);
635       while (I2 != List.end()) {
636         MachineInstr &Last = **I2;
637         int64_t AddrDispShift;
638 
639         // LEAs should be in occurrence order in the list, so we can freely
640         // replace later LEAs with earlier ones.
641         assert(calcInstrDist(First, Last) > 0 &&
642                "LEAs must be in occurrence order in the list");
643 
644         // Check that the Last LEA instruction can be replaced by the First.
645         if (!isReplaceable(First, Last, AddrDispShift)) {
646           ++I2;
647           continue;
648         }
649 
650         // Loop over all uses of the Last LEA and update their operands. Note
651         // that the correctness of this has already been checked in the
652         // isReplaceable function.
653         Register FirstVReg = First.getOperand(0).getReg();
654         Register LastVReg = Last.getOperand(0).getReg();
655         // We use MRI->use_empty here instead of the combination of
656         // llvm::make_early_inc_range and MRI->use_operands because we could
657         // replace two or more uses in a debug instruction in one iteration, and
658         // that would deeply confuse llvm::make_early_inc_range.
659         while (!MRI->use_empty(LastVReg)) {
660           MachineOperand &MO = *MRI->use_begin(LastVReg);
661           MachineInstr &MI = *MO.getParent();
662 
663           if (MI.isDebugValue()) {
664             // Replace DBG_VALUE instruction with modified version using the
665             // register from the replacing LEA and the address displacement
666             // between the LEA instructions.
667             replaceDebugValue(MI, LastVReg, FirstVReg, AddrDispShift);
668             continue;
669           }
670 
671           // Get the number of the first memory operand.
672           const MCInstrDesc &Desc = MI.getDesc();
673           int MemOpNo =
674               X86II::getMemoryOperandNo(Desc.TSFlags) +
675               X86II::getOperandBias(Desc);
676 
677           // Update address base.
678           MO.setReg(FirstVReg);
679 
680           // Update address disp.
681           MachineOperand &Op = MI.getOperand(MemOpNo + X86::AddrDisp);
682           if (Op.isImm())
683             Op.setImm(Op.getImm() + AddrDispShift);
684           else if (!Op.isJTI())
685             Op.setOffset(Op.getOffset() + AddrDispShift);
686         }
687 
688         // Since we can possibly extend register lifetime, clear kill flags.
689         MRI->clearKillFlags(FirstVReg);
690 
691         ++NumRedundantLEAs;
692         LLVM_DEBUG(dbgs() << "OptimizeLEAs: Remove redundant LEA: ";
693                    Last.dump(););
694 
695         // By this moment, all of the Last LEA's uses must be replaced. So we
696         // can freely remove it.
697         assert(MRI->use_empty(LastVReg) &&
698                "The LEA's def register must have no uses");
699         Last.eraseFromParent();
700 
701         // Erase removed LEA from the list.
702         I2 = List.erase(I2);
703 
704         Changed = true;
705       }
706       ++I1;
707     }
708   }
709 
710   return Changed;
711 }
712 
713 bool X86OptimizeLEAPass::runOnMachineFunction(MachineFunction &MF) {
714   bool Changed = false;
715 
716   if (DisableX86LEAOpt || skipFunction(MF.getFunction()))
717     return false;
718 
719   MRI = &MF.getRegInfo();
720   TII = MF.getSubtarget<X86Subtarget>().getInstrInfo();
721   TRI = MF.getSubtarget<X86Subtarget>().getRegisterInfo();
722   auto *PSI =
723       &getAnalysis<ProfileSummaryInfoWrapperPass>().getPSI();
724   auto *MBFI = (PSI && PSI->hasProfileSummary()) ?
725                &getAnalysis<LazyMachineBlockFrequencyInfoPass>().getBFI() :
726                nullptr;
727 
728   // Process all basic blocks.
729   for (auto &MBB : MF) {
730     MemOpMap LEAs;
731     InstrPos.clear();
732 
733     // Find all LEA instructions in basic block.
734     findLEAs(MBB, LEAs);
735 
736     // If current basic block has no LEAs, move on to the next one.
737     if (LEAs.empty())
738       continue;
739 
740     // Remove redundant LEA instructions.
741     Changed |= removeRedundantLEAs(LEAs);
742 
743     // Remove redundant address calculations. Do it only for -Os/-Oz since only
744     // a code size gain is expected from this part of the pass.
745     bool OptForSize = MF.getFunction().hasOptSize() ||
746                       llvm::shouldOptimizeForSize(&MBB, PSI, MBFI);
747     if (OptForSize)
748       Changed |= removeRedundantAddrCalc(LEAs);
749   }
750 
751   return Changed;
752 }
753