1 //===- AArch64LoadStoreOptimizer.cpp - AArch64 load/store opt. pass -------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains a pass that performs load / store related peephole
10 // optimizations. This pass should be run after register allocation.
11 //
12 // The pass runs after the PrologEpilogInserter where we emit the CFI
13 // instructions. In order to preserve the correctness of the unwind informaiton,
14 // the pass should not change the order of any two instructions, one of which
15 // has the FrameSetup/FrameDestroy flag or, alternatively, apply an add-hoc fix
16 // to unwind information.
17 //
18 //===----------------------------------------------------------------------===//
19 
20 #include "AArch64InstrInfo.h"
21 #include "AArch64MachineFunctionInfo.h"
22 #include "AArch64Subtarget.h"
23 #include "MCTargetDesc/AArch64AddressingModes.h"
24 #include "llvm/ADT/BitVector.h"
25 #include "llvm/ADT/SmallVector.h"
26 #include "llvm/ADT/Statistic.h"
27 #include "llvm/ADT/StringRef.h"
28 #include "llvm/ADT/iterator_range.h"
29 #include "llvm/Analysis/AliasAnalysis.h"
30 #include "llvm/CodeGen/MachineBasicBlock.h"
31 #include "llvm/CodeGen/MachineFunction.h"
32 #include "llvm/CodeGen/MachineFunctionPass.h"
33 #include "llvm/CodeGen/MachineInstr.h"
34 #include "llvm/CodeGen/MachineInstrBuilder.h"
35 #include "llvm/CodeGen/MachineOperand.h"
36 #include "llvm/CodeGen/MachineRegisterInfo.h"
37 #include "llvm/CodeGen/TargetRegisterInfo.h"
38 #include "llvm/IR/DebugLoc.h"
39 #include "llvm/MC/MCAsmInfo.h"
40 #include "llvm/MC/MCDwarf.h"
41 #include "llvm/MC/MCRegisterInfo.h"
42 #include "llvm/Pass.h"
43 #include "llvm/Support/CommandLine.h"
44 #include "llvm/Support/Debug.h"
45 #include "llvm/Support/DebugCounter.h"
46 #include "llvm/Support/ErrorHandling.h"
47 #include "llvm/Support/raw_ostream.h"
48 #include <cassert>
49 #include <cstdint>
50 #include <functional>
51 #include <iterator>
52 #include <limits>
53 #include <optional>
54 
55 using namespace llvm;
56 
57 #define DEBUG_TYPE "aarch64-ldst-opt"
58 
59 STATISTIC(NumPairCreated, "Number of load/store pair instructions generated");
60 STATISTIC(NumPostFolded, "Number of post-index updates folded");
61 STATISTIC(NumPreFolded, "Number of pre-index updates folded");
62 STATISTIC(NumUnscaledPairCreated,
63           "Number of load/store from unscaled generated");
64 STATISTIC(NumZeroStoresPromoted, "Number of narrow zero stores promoted");
65 STATISTIC(NumLoadsFromStoresPromoted, "Number of loads from stores promoted");
66 
67 DEBUG_COUNTER(RegRenamingCounter, DEBUG_TYPE "-reg-renaming",
68               "Controls which pairs are considered for renaming");
69 
70 // The LdStLimit limits how far we search for load/store pairs.
71 static cl::opt<unsigned> LdStLimit("aarch64-load-store-scan-limit",
72                                    cl::init(20), cl::Hidden);
73 
74 // The UpdateLimit limits how far we search for update instructions when we form
75 // pre-/post-index instructions.
76 static cl::opt<unsigned> UpdateLimit("aarch64-update-scan-limit", cl::init(100),
77                                      cl::Hidden);
78 
79 // Enable register renaming to find additional store pairing opportunities.
80 static cl::opt<bool> EnableRenaming("aarch64-load-store-renaming",
81                                     cl::init(true), cl::Hidden);
82 
83 #define AARCH64_LOAD_STORE_OPT_NAME "AArch64 load / store optimization pass"
84 
85 namespace {
86 
87 using LdStPairFlags = struct LdStPairFlags {
88   // If a matching instruction is found, MergeForward is set to true if the
89   // merge is to remove the first instruction and replace the second with
90   // a pair-wise insn, and false if the reverse is true.
91   bool MergeForward = false;
92 
93   // SExtIdx gives the index of the result of the load pair that must be
94   // extended. The value of SExtIdx assumes that the paired load produces the
95   // value in this order: (I, returned iterator), i.e., -1 means no value has
96   // to be extended, 0 means I, and 1 means the returned iterator.
97   int SExtIdx = -1;
98 
99   // If not none, RenameReg can be used to rename the result register of the
100   // first store in a pair. Currently this only works when merging stores
101   // forward.
102   std::optional<MCPhysReg> RenameReg;
103 
104   LdStPairFlags() = default;
105 
106   void setMergeForward(bool V = true) { MergeForward = V; }
107   bool getMergeForward() const { return MergeForward; }
108 
109   void setSExtIdx(int V) { SExtIdx = V; }
110   int getSExtIdx() const { return SExtIdx; }
111 
112   void setRenameReg(MCPhysReg R) { RenameReg = R; }
113   void clearRenameReg() { RenameReg = std::nullopt; }
114   std::optional<MCPhysReg> getRenameReg() const { return RenameReg; }
115 };
116 
117 struct AArch64LoadStoreOpt : public MachineFunctionPass {
118   static char ID;
119 
120   AArch64LoadStoreOpt() : MachineFunctionPass(ID) {
121     initializeAArch64LoadStoreOptPass(*PassRegistry::getPassRegistry());
122   }
123 
124   AliasAnalysis *AA;
125   const AArch64InstrInfo *TII;
126   const TargetRegisterInfo *TRI;
127   const AArch64Subtarget *Subtarget;
128 
129   // Track which register units have been modified and used.
130   LiveRegUnits ModifiedRegUnits, UsedRegUnits;
131   LiveRegUnits DefinedInBB;
132 
133   void getAnalysisUsage(AnalysisUsage &AU) const override {
134     AU.addRequired<AAResultsWrapperPass>();
135     MachineFunctionPass::getAnalysisUsage(AU);
136   }
137 
138   // Scan the instructions looking for a load/store that can be combined
139   // with the current instruction into a load/store pair.
140   // Return the matching instruction if one is found, else MBB->end().
141   MachineBasicBlock::iterator findMatchingInsn(MachineBasicBlock::iterator I,
142                                                LdStPairFlags &Flags,
143                                                unsigned Limit,
144                                                bool FindNarrowMerge);
145 
146   // Scan the instructions looking for a store that writes to the address from
147   // which the current load instruction reads. Return true if one is found.
148   bool findMatchingStore(MachineBasicBlock::iterator I, unsigned Limit,
149                          MachineBasicBlock::iterator &StoreI);
150 
151   // Merge the two instructions indicated into a wider narrow store instruction.
152   MachineBasicBlock::iterator
153   mergeNarrowZeroStores(MachineBasicBlock::iterator I,
154                         MachineBasicBlock::iterator MergeMI,
155                         const LdStPairFlags &Flags);
156 
157   // Merge the two instructions indicated into a single pair-wise instruction.
158   MachineBasicBlock::iterator
159   mergePairedInsns(MachineBasicBlock::iterator I,
160                    MachineBasicBlock::iterator Paired,
161                    const LdStPairFlags &Flags);
162 
163   // Promote the load that reads directly from the address stored to.
164   MachineBasicBlock::iterator
165   promoteLoadFromStore(MachineBasicBlock::iterator LoadI,
166                        MachineBasicBlock::iterator StoreI);
167 
168   // Scan the instruction list to find a base register update that can
169   // be combined with the current instruction (a load or store) using
170   // pre or post indexed addressing with writeback. Scan forwards.
171   MachineBasicBlock::iterator
172   findMatchingUpdateInsnForward(MachineBasicBlock::iterator I,
173                                 int UnscaledOffset, unsigned Limit);
174 
175   // Scan the instruction list to find a base register update that can
176   // be combined with the current instruction (a load or store) using
177   // pre or post indexed addressing with writeback. Scan backwards.
178   MachineBasicBlock::iterator
179   findMatchingUpdateInsnBackward(MachineBasicBlock::iterator I, unsigned Limit);
180 
181   // Find an instruction that updates the base register of the ld/st
182   // instruction.
183   bool isMatchingUpdateInsn(MachineInstr &MemMI, MachineInstr &MI,
184                             unsigned BaseReg, int Offset);
185 
186   // Merge a pre- or post-index base register update into a ld/st instruction.
187   MachineBasicBlock::iterator
188   mergeUpdateInsn(MachineBasicBlock::iterator I,
189                   MachineBasicBlock::iterator Update, bool IsPreIdx);
190 
191   // Find and merge zero store instructions.
192   bool tryToMergeZeroStInst(MachineBasicBlock::iterator &MBBI);
193 
194   // Find and pair ldr/str instructions.
195   bool tryToPairLdStInst(MachineBasicBlock::iterator &MBBI);
196 
197   // Find and promote load instructions which read directly from store.
198   bool tryToPromoteLoadFromStore(MachineBasicBlock::iterator &MBBI);
199 
200   // Find and merge a base register updates before or after a ld/st instruction.
201   bool tryToMergeLdStUpdate(MachineBasicBlock::iterator &MBBI);
202 
203   bool optimizeBlock(MachineBasicBlock &MBB, bool EnableNarrowZeroStOpt);
204 
205   bool runOnMachineFunction(MachineFunction &Fn) override;
206 
207   MachineFunctionProperties getRequiredProperties() const override {
208     return MachineFunctionProperties().set(
209         MachineFunctionProperties::Property::NoVRegs);
210   }
211 
212   StringRef getPassName() const override { return AARCH64_LOAD_STORE_OPT_NAME; }
213 };
214 
215 char AArch64LoadStoreOpt::ID = 0;
216 
217 } // end anonymous namespace
218 
219 INITIALIZE_PASS(AArch64LoadStoreOpt, "aarch64-ldst-opt",
220                 AARCH64_LOAD_STORE_OPT_NAME, false, false)
221 
222 static bool isNarrowStore(unsigned Opc) {
223   switch (Opc) {
224   default:
225     return false;
226   case AArch64::STRBBui:
227   case AArch64::STURBBi:
228   case AArch64::STRHHui:
229   case AArch64::STURHHi:
230     return true;
231   }
232 }
233 
234 // These instruction set memory tag and either keep memory contents unchanged or
235 // set it to zero, ignoring the address part of the source register.
236 static bool isTagStore(const MachineInstr &MI) {
237   switch (MI.getOpcode()) {
238   default:
239     return false;
240   case AArch64::STGi:
241   case AArch64::STZGi:
242   case AArch64::ST2Gi:
243   case AArch64::STZ2Gi:
244     return true;
245   }
246 }
247 
248 static unsigned getMatchingNonSExtOpcode(unsigned Opc,
249                                          bool *IsValidLdStrOpc = nullptr) {
250   if (IsValidLdStrOpc)
251     *IsValidLdStrOpc = true;
252   switch (Opc) {
253   default:
254     if (IsValidLdStrOpc)
255       *IsValidLdStrOpc = false;
256     return std::numeric_limits<unsigned>::max();
257   case AArch64::STRDui:
258   case AArch64::STURDi:
259   case AArch64::STRDpre:
260   case AArch64::STRQui:
261   case AArch64::STURQi:
262   case AArch64::STRQpre:
263   case AArch64::STRBBui:
264   case AArch64::STURBBi:
265   case AArch64::STRHHui:
266   case AArch64::STURHHi:
267   case AArch64::STRWui:
268   case AArch64::STRWpre:
269   case AArch64::STURWi:
270   case AArch64::STRXui:
271   case AArch64::STRXpre:
272   case AArch64::STURXi:
273   case AArch64::LDRDui:
274   case AArch64::LDURDi:
275   case AArch64::LDRDpre:
276   case AArch64::LDRQui:
277   case AArch64::LDURQi:
278   case AArch64::LDRQpre:
279   case AArch64::LDRWui:
280   case AArch64::LDURWi:
281   case AArch64::LDRWpre:
282   case AArch64::LDRXui:
283   case AArch64::LDURXi:
284   case AArch64::LDRXpre:
285   case AArch64::STRSui:
286   case AArch64::STURSi:
287   case AArch64::STRSpre:
288   case AArch64::LDRSui:
289   case AArch64::LDURSi:
290   case AArch64::LDRSpre:
291     return Opc;
292   case AArch64::LDRSWui:
293     return AArch64::LDRWui;
294   case AArch64::LDURSWi:
295     return AArch64::LDURWi;
296   case AArch64::LDRSWpre:
297     return AArch64::LDRWpre;
298   }
299 }
300 
301 static unsigned getMatchingWideOpcode(unsigned Opc) {
302   switch (Opc) {
303   default:
304     llvm_unreachable("Opcode has no wide equivalent!");
305   case AArch64::STRBBui:
306     return AArch64::STRHHui;
307   case AArch64::STRHHui:
308     return AArch64::STRWui;
309   case AArch64::STURBBi:
310     return AArch64::STURHHi;
311   case AArch64::STURHHi:
312     return AArch64::STURWi;
313   case AArch64::STURWi:
314     return AArch64::STURXi;
315   case AArch64::STRWui:
316     return AArch64::STRXui;
317   }
318 }
319 
320 static unsigned getMatchingPairOpcode(unsigned Opc) {
321   switch (Opc) {
322   default:
323     llvm_unreachable("Opcode has no pairwise equivalent!");
324   case AArch64::STRSui:
325   case AArch64::STURSi:
326     return AArch64::STPSi;
327   case AArch64::STRSpre:
328     return AArch64::STPSpre;
329   case AArch64::STRDui:
330   case AArch64::STURDi:
331     return AArch64::STPDi;
332   case AArch64::STRDpre:
333     return AArch64::STPDpre;
334   case AArch64::STRQui:
335   case AArch64::STURQi:
336     return AArch64::STPQi;
337   case AArch64::STRQpre:
338     return AArch64::STPQpre;
339   case AArch64::STRWui:
340   case AArch64::STURWi:
341     return AArch64::STPWi;
342   case AArch64::STRWpre:
343     return AArch64::STPWpre;
344   case AArch64::STRXui:
345   case AArch64::STURXi:
346     return AArch64::STPXi;
347   case AArch64::STRXpre:
348     return AArch64::STPXpre;
349   case AArch64::LDRSui:
350   case AArch64::LDURSi:
351     return AArch64::LDPSi;
352   case AArch64::LDRSpre:
353     return AArch64::LDPSpre;
354   case AArch64::LDRDui:
355   case AArch64::LDURDi:
356     return AArch64::LDPDi;
357   case AArch64::LDRDpre:
358     return AArch64::LDPDpre;
359   case AArch64::LDRQui:
360   case AArch64::LDURQi:
361     return AArch64::LDPQi;
362   case AArch64::LDRQpre:
363     return AArch64::LDPQpre;
364   case AArch64::LDRWui:
365   case AArch64::LDURWi:
366     return AArch64::LDPWi;
367   case AArch64::LDRWpre:
368     return AArch64::LDPWpre;
369   case AArch64::LDRXui:
370   case AArch64::LDURXi:
371     return AArch64::LDPXi;
372   case AArch64::LDRXpre:
373     return AArch64::LDPXpre;
374   case AArch64::LDRSWui:
375   case AArch64::LDURSWi:
376     return AArch64::LDPSWi;
377   case AArch64::LDRSWpre:
378     return AArch64::LDPSWpre;
379   }
380 }
381 
382 static unsigned isMatchingStore(MachineInstr &LoadInst,
383                                 MachineInstr &StoreInst) {
384   unsigned LdOpc = LoadInst.getOpcode();
385   unsigned StOpc = StoreInst.getOpcode();
386   switch (LdOpc) {
387   default:
388     llvm_unreachable("Unsupported load instruction!");
389   case AArch64::LDRBBui:
390     return StOpc == AArch64::STRBBui || StOpc == AArch64::STRHHui ||
391            StOpc == AArch64::STRWui || StOpc == AArch64::STRXui;
392   case AArch64::LDURBBi:
393     return StOpc == AArch64::STURBBi || StOpc == AArch64::STURHHi ||
394            StOpc == AArch64::STURWi || StOpc == AArch64::STURXi;
395   case AArch64::LDRHHui:
396     return StOpc == AArch64::STRHHui || StOpc == AArch64::STRWui ||
397            StOpc == AArch64::STRXui;
398   case AArch64::LDURHHi:
399     return StOpc == AArch64::STURHHi || StOpc == AArch64::STURWi ||
400            StOpc == AArch64::STURXi;
401   case AArch64::LDRWui:
402     return StOpc == AArch64::STRWui || StOpc == AArch64::STRXui;
403   case AArch64::LDURWi:
404     return StOpc == AArch64::STURWi || StOpc == AArch64::STURXi;
405   case AArch64::LDRXui:
406     return StOpc == AArch64::STRXui;
407   case AArch64::LDURXi:
408     return StOpc == AArch64::STURXi;
409   }
410 }
411 
412 static unsigned getPreIndexedOpcode(unsigned Opc) {
413   // FIXME: We don't currently support creating pre-indexed loads/stores when
414   // the load or store is the unscaled version.  If we decide to perform such an
415   // optimization in the future the cases for the unscaled loads/stores will
416   // need to be added here.
417   switch (Opc) {
418   default:
419     llvm_unreachable("Opcode has no pre-indexed equivalent!");
420   case AArch64::STRSui:
421     return AArch64::STRSpre;
422   case AArch64::STRDui:
423     return AArch64::STRDpre;
424   case AArch64::STRQui:
425     return AArch64::STRQpre;
426   case AArch64::STRBBui:
427     return AArch64::STRBBpre;
428   case AArch64::STRHHui:
429     return AArch64::STRHHpre;
430   case AArch64::STRWui:
431     return AArch64::STRWpre;
432   case AArch64::STRXui:
433     return AArch64::STRXpre;
434   case AArch64::LDRSui:
435     return AArch64::LDRSpre;
436   case AArch64::LDRDui:
437     return AArch64::LDRDpre;
438   case AArch64::LDRQui:
439     return AArch64::LDRQpre;
440   case AArch64::LDRBBui:
441     return AArch64::LDRBBpre;
442   case AArch64::LDRHHui:
443     return AArch64::LDRHHpre;
444   case AArch64::LDRWui:
445     return AArch64::LDRWpre;
446   case AArch64::LDRXui:
447     return AArch64::LDRXpre;
448   case AArch64::LDRSWui:
449     return AArch64::LDRSWpre;
450   case AArch64::LDPSi:
451     return AArch64::LDPSpre;
452   case AArch64::LDPSWi:
453     return AArch64::LDPSWpre;
454   case AArch64::LDPDi:
455     return AArch64::LDPDpre;
456   case AArch64::LDPQi:
457     return AArch64::LDPQpre;
458   case AArch64::LDPWi:
459     return AArch64::LDPWpre;
460   case AArch64::LDPXi:
461     return AArch64::LDPXpre;
462   case AArch64::STPSi:
463     return AArch64::STPSpre;
464   case AArch64::STPDi:
465     return AArch64::STPDpre;
466   case AArch64::STPQi:
467     return AArch64::STPQpre;
468   case AArch64::STPWi:
469     return AArch64::STPWpre;
470   case AArch64::STPXi:
471     return AArch64::STPXpre;
472   case AArch64::STGi:
473     return AArch64::STGPreIndex;
474   case AArch64::STZGi:
475     return AArch64::STZGPreIndex;
476   case AArch64::ST2Gi:
477     return AArch64::ST2GPreIndex;
478   case AArch64::STZ2Gi:
479     return AArch64::STZ2GPreIndex;
480   case AArch64::STGPi:
481     return AArch64::STGPpre;
482   }
483 }
484 
485 static unsigned getPostIndexedOpcode(unsigned Opc) {
486   switch (Opc) {
487   default:
488     llvm_unreachable("Opcode has no post-indexed wise equivalent!");
489   case AArch64::STRSui:
490   case AArch64::STURSi:
491     return AArch64::STRSpost;
492   case AArch64::STRDui:
493   case AArch64::STURDi:
494     return AArch64::STRDpost;
495   case AArch64::STRQui:
496   case AArch64::STURQi:
497     return AArch64::STRQpost;
498   case AArch64::STRBBui:
499     return AArch64::STRBBpost;
500   case AArch64::STRHHui:
501     return AArch64::STRHHpost;
502   case AArch64::STRWui:
503   case AArch64::STURWi:
504     return AArch64::STRWpost;
505   case AArch64::STRXui:
506   case AArch64::STURXi:
507     return AArch64::STRXpost;
508   case AArch64::LDRSui:
509   case AArch64::LDURSi:
510     return AArch64::LDRSpost;
511   case AArch64::LDRDui:
512   case AArch64::LDURDi:
513     return AArch64::LDRDpost;
514   case AArch64::LDRQui:
515   case AArch64::LDURQi:
516     return AArch64::LDRQpost;
517   case AArch64::LDRBBui:
518     return AArch64::LDRBBpost;
519   case AArch64::LDRHHui:
520     return AArch64::LDRHHpost;
521   case AArch64::LDRWui:
522   case AArch64::LDURWi:
523     return AArch64::LDRWpost;
524   case AArch64::LDRXui:
525   case AArch64::LDURXi:
526     return AArch64::LDRXpost;
527   case AArch64::LDRSWui:
528     return AArch64::LDRSWpost;
529   case AArch64::LDPSi:
530     return AArch64::LDPSpost;
531   case AArch64::LDPSWi:
532     return AArch64::LDPSWpost;
533   case AArch64::LDPDi:
534     return AArch64::LDPDpost;
535   case AArch64::LDPQi:
536     return AArch64::LDPQpost;
537   case AArch64::LDPWi:
538     return AArch64::LDPWpost;
539   case AArch64::LDPXi:
540     return AArch64::LDPXpost;
541   case AArch64::STPSi:
542     return AArch64::STPSpost;
543   case AArch64::STPDi:
544     return AArch64::STPDpost;
545   case AArch64::STPQi:
546     return AArch64::STPQpost;
547   case AArch64::STPWi:
548     return AArch64::STPWpost;
549   case AArch64::STPXi:
550     return AArch64::STPXpost;
551   case AArch64::STGi:
552     return AArch64::STGPostIndex;
553   case AArch64::STZGi:
554     return AArch64::STZGPostIndex;
555   case AArch64::ST2Gi:
556     return AArch64::ST2GPostIndex;
557   case AArch64::STZ2Gi:
558     return AArch64::STZ2GPostIndex;
559   case AArch64::STGPi:
560     return AArch64::STGPpost;
561   }
562 }
563 
564 static bool isPreLdStPairCandidate(MachineInstr &FirstMI, MachineInstr &MI) {
565 
566   unsigned OpcA = FirstMI.getOpcode();
567   unsigned OpcB = MI.getOpcode();
568 
569   switch (OpcA) {
570   default:
571     return false;
572   case AArch64::STRSpre:
573     return (OpcB == AArch64::STRSui) || (OpcB == AArch64::STURSi);
574   case AArch64::STRDpre:
575     return (OpcB == AArch64::STRDui) || (OpcB == AArch64::STURDi);
576   case AArch64::STRQpre:
577     return (OpcB == AArch64::STRQui) || (OpcB == AArch64::STURQi);
578   case AArch64::STRWpre:
579     return (OpcB == AArch64::STRWui) || (OpcB == AArch64::STURWi);
580   case AArch64::STRXpre:
581     return (OpcB == AArch64::STRXui) || (OpcB == AArch64::STURXi);
582   case AArch64::LDRSpre:
583     return (OpcB == AArch64::LDRSui) || (OpcB == AArch64::LDURSi);
584   case AArch64::LDRDpre:
585     return (OpcB == AArch64::LDRDui) || (OpcB == AArch64::LDURDi);
586   case AArch64::LDRQpre:
587     return (OpcB == AArch64::LDRQui) || (OpcB == AArch64::LDURQi);
588   case AArch64::LDRWpre:
589     return (OpcB == AArch64::LDRWui) || (OpcB == AArch64::LDURWi);
590   case AArch64::LDRXpre:
591     return (OpcB == AArch64::LDRXui) || (OpcB == AArch64::LDURXi);
592   case AArch64::LDRSWpre:
593     return (OpcB == AArch64::LDRSWui) || (OpcB == AArch64::LDURSWi);
594   }
595 }
596 
597 // Returns the scale and offset range of pre/post indexed variants of MI.
598 static void getPrePostIndexedMemOpInfo(const MachineInstr &MI, int &Scale,
599                                        int &MinOffset, int &MaxOffset) {
600   bool IsPaired = AArch64InstrInfo::isPairedLdSt(MI);
601   bool IsTagStore = isTagStore(MI);
602   // ST*G and all paired ldst have the same scale in pre/post-indexed variants
603   // as in the "unsigned offset" variant.
604   // All other pre/post indexed ldst instructions are unscaled.
605   Scale = (IsTagStore || IsPaired) ? AArch64InstrInfo::getMemScale(MI) : 1;
606 
607   if (IsPaired) {
608     MinOffset = -64;
609     MaxOffset = 63;
610   } else {
611     MinOffset = -256;
612     MaxOffset = 255;
613   }
614 }
615 
616 static MachineOperand &getLdStRegOp(MachineInstr &MI,
617                                     unsigned PairedRegOp = 0) {
618   assert(PairedRegOp < 2 && "Unexpected register operand idx.");
619   bool IsPreLdSt = AArch64InstrInfo::isPreLdSt(MI);
620   if (IsPreLdSt)
621     PairedRegOp += 1;
622   unsigned Idx =
623       AArch64InstrInfo::isPairedLdSt(MI) || IsPreLdSt ? PairedRegOp : 0;
624   return MI.getOperand(Idx);
625 }
626 
627 static bool isLdOffsetInRangeOfSt(MachineInstr &LoadInst,
628                                   MachineInstr &StoreInst,
629                                   const AArch64InstrInfo *TII) {
630   assert(isMatchingStore(LoadInst, StoreInst) && "Expect only matched ld/st.");
631   int LoadSize = TII->getMemScale(LoadInst);
632   int StoreSize = TII->getMemScale(StoreInst);
633   int UnscaledStOffset =
634       TII->hasUnscaledLdStOffset(StoreInst)
635           ? AArch64InstrInfo::getLdStOffsetOp(StoreInst).getImm()
636           : AArch64InstrInfo::getLdStOffsetOp(StoreInst).getImm() * StoreSize;
637   int UnscaledLdOffset =
638       TII->hasUnscaledLdStOffset(LoadInst)
639           ? AArch64InstrInfo::getLdStOffsetOp(LoadInst).getImm()
640           : AArch64InstrInfo::getLdStOffsetOp(LoadInst).getImm() * LoadSize;
641   return (UnscaledStOffset <= UnscaledLdOffset) &&
642          (UnscaledLdOffset + LoadSize <= (UnscaledStOffset + StoreSize));
643 }
644 
645 static bool isPromotableZeroStoreInst(MachineInstr &MI) {
646   unsigned Opc = MI.getOpcode();
647   return (Opc == AArch64::STRWui || Opc == AArch64::STURWi ||
648           isNarrowStore(Opc)) &&
649          getLdStRegOp(MI).getReg() == AArch64::WZR;
650 }
651 
652 static bool isPromotableLoadFromStore(MachineInstr &MI) {
653   switch (MI.getOpcode()) {
654   default:
655     return false;
656   // Scaled instructions.
657   case AArch64::LDRBBui:
658   case AArch64::LDRHHui:
659   case AArch64::LDRWui:
660   case AArch64::LDRXui:
661   // Unscaled instructions.
662   case AArch64::LDURBBi:
663   case AArch64::LDURHHi:
664   case AArch64::LDURWi:
665   case AArch64::LDURXi:
666     return true;
667   }
668 }
669 
670 static bool isMergeableLdStUpdate(MachineInstr &MI) {
671   unsigned Opc = MI.getOpcode();
672   switch (Opc) {
673   default:
674     return false;
675   // Scaled instructions.
676   case AArch64::STRSui:
677   case AArch64::STRDui:
678   case AArch64::STRQui:
679   case AArch64::STRXui:
680   case AArch64::STRWui:
681   case AArch64::STRHHui:
682   case AArch64::STRBBui:
683   case AArch64::LDRSui:
684   case AArch64::LDRDui:
685   case AArch64::LDRQui:
686   case AArch64::LDRXui:
687   case AArch64::LDRWui:
688   case AArch64::LDRHHui:
689   case AArch64::LDRBBui:
690   case AArch64::STGi:
691   case AArch64::STZGi:
692   case AArch64::ST2Gi:
693   case AArch64::STZ2Gi:
694   case AArch64::STGPi:
695   // Unscaled instructions.
696   case AArch64::STURSi:
697   case AArch64::STURDi:
698   case AArch64::STURQi:
699   case AArch64::STURWi:
700   case AArch64::STURXi:
701   case AArch64::LDURSi:
702   case AArch64::LDURDi:
703   case AArch64::LDURQi:
704   case AArch64::LDURWi:
705   case AArch64::LDURXi:
706   // Paired instructions.
707   case AArch64::LDPSi:
708   case AArch64::LDPSWi:
709   case AArch64::LDPDi:
710   case AArch64::LDPQi:
711   case AArch64::LDPWi:
712   case AArch64::LDPXi:
713   case AArch64::STPSi:
714   case AArch64::STPDi:
715   case AArch64::STPQi:
716   case AArch64::STPWi:
717   case AArch64::STPXi:
718     // Make sure this is a reg+imm (as opposed to an address reloc).
719     if (!AArch64InstrInfo::getLdStOffsetOp(MI).isImm())
720       return false;
721 
722     return true;
723   }
724 }
725 
726 MachineBasicBlock::iterator
727 AArch64LoadStoreOpt::mergeNarrowZeroStores(MachineBasicBlock::iterator I,
728                                            MachineBasicBlock::iterator MergeMI,
729                                            const LdStPairFlags &Flags) {
730   assert(isPromotableZeroStoreInst(*I) && isPromotableZeroStoreInst(*MergeMI) &&
731          "Expected promotable zero stores.");
732 
733   MachineBasicBlock::iterator E = I->getParent()->end();
734   MachineBasicBlock::iterator NextI = next_nodbg(I, E);
735   // If NextI is the second of the two instructions to be merged, we need
736   // to skip one further. Either way we merge will invalidate the iterator,
737   // and we don't need to scan the new instruction, as it's a pairwise
738   // instruction, which we're not considering for further action anyway.
739   if (NextI == MergeMI)
740     NextI = next_nodbg(NextI, E);
741 
742   unsigned Opc = I->getOpcode();
743   unsigned MergeMIOpc = MergeMI->getOpcode();
744   bool IsScaled = !TII->hasUnscaledLdStOffset(Opc);
745   bool IsMergedMIScaled = !TII->hasUnscaledLdStOffset(MergeMIOpc);
746   int OffsetStride = IsScaled ? TII->getMemScale(*I) : 1;
747   int MergeMIOffsetStride = IsMergedMIScaled ? TII->getMemScale(*MergeMI) : 1;
748 
749   bool MergeForward = Flags.getMergeForward();
750   // Insert our new paired instruction after whichever of the paired
751   // instructions MergeForward indicates.
752   MachineBasicBlock::iterator InsertionPoint = MergeForward ? MergeMI : I;
753   // Also based on MergeForward is from where we copy the base register operand
754   // so we get the flags compatible with the input code.
755   const MachineOperand &BaseRegOp =
756       MergeForward ? AArch64InstrInfo::getLdStBaseOp(*MergeMI)
757                    : AArch64InstrInfo::getLdStBaseOp(*I);
758 
759   // Which register is Rt and which is Rt2 depends on the offset order.
760   int64_t IOffsetInBytes =
761       AArch64InstrInfo::getLdStOffsetOp(*I).getImm() * OffsetStride;
762   int64_t MIOffsetInBytes =
763       AArch64InstrInfo::getLdStOffsetOp(*MergeMI).getImm() *
764       MergeMIOffsetStride;
765   // Select final offset based on the offset order.
766   int64_t OffsetImm;
767   if (IOffsetInBytes > MIOffsetInBytes)
768     OffsetImm = MIOffsetInBytes;
769   else
770     OffsetImm = IOffsetInBytes;
771 
772   int NewOpcode = getMatchingWideOpcode(Opc);
773   bool FinalIsScaled = !TII->hasUnscaledLdStOffset(NewOpcode);
774 
775   // Adjust final offset if the result opcode is a scaled store.
776   if (FinalIsScaled) {
777     int NewOffsetStride = FinalIsScaled ? TII->getMemScale(NewOpcode) : 1;
778     assert(((OffsetImm % NewOffsetStride) == 0) &&
779            "Offset should be a multiple of the store memory scale");
780     OffsetImm = OffsetImm / NewOffsetStride;
781   }
782 
783   // Construct the new instruction.
784   DebugLoc DL = I->getDebugLoc();
785   MachineBasicBlock *MBB = I->getParent();
786   MachineInstrBuilder MIB;
787   MIB = BuildMI(*MBB, InsertionPoint, DL, TII->get(getMatchingWideOpcode(Opc)))
788             .addReg(isNarrowStore(Opc) ? AArch64::WZR : AArch64::XZR)
789             .add(BaseRegOp)
790             .addImm(OffsetImm)
791             .cloneMergedMemRefs({&*I, &*MergeMI})
792             .setMIFlags(I->mergeFlagsWith(*MergeMI));
793   (void)MIB;
794 
795   LLVM_DEBUG(dbgs() << "Creating wider store. Replacing instructions:\n    ");
796   LLVM_DEBUG(I->print(dbgs()));
797   LLVM_DEBUG(dbgs() << "    ");
798   LLVM_DEBUG(MergeMI->print(dbgs()));
799   LLVM_DEBUG(dbgs() << "  with instruction:\n    ");
800   LLVM_DEBUG(((MachineInstr *)MIB)->print(dbgs()));
801   LLVM_DEBUG(dbgs() << "\n");
802 
803   // Erase the old instructions.
804   I->eraseFromParent();
805   MergeMI->eraseFromParent();
806   return NextI;
807 }
808 
809 // Apply Fn to all instructions between MI and the beginning of the block, until
810 // a def for DefReg is reached. Returns true, iff Fn returns true for all
811 // visited instructions. Stop after visiting Limit iterations.
812 static bool forAllMIsUntilDef(MachineInstr &MI, MCPhysReg DefReg,
813                               const TargetRegisterInfo *TRI, unsigned Limit,
814                               std::function<bool(MachineInstr &, bool)> &Fn) {
815   auto MBB = MI.getParent();
816   for (MachineInstr &I :
817        instructionsWithoutDebug(MI.getReverseIterator(), MBB->instr_rend())) {
818     if (!Limit)
819       return false;
820     --Limit;
821 
822     bool isDef = any_of(I.operands(), [DefReg, TRI](MachineOperand &MOP) {
823       return MOP.isReg() && MOP.isDef() && !MOP.isDebug() && MOP.getReg() &&
824              TRI->regsOverlap(MOP.getReg(), DefReg);
825     });
826     if (!Fn(I, isDef))
827       return false;
828     if (isDef)
829       break;
830   }
831   return true;
832 }
833 
834 static void updateDefinedRegisters(MachineInstr &MI, LiveRegUnits &Units,
835                                    const TargetRegisterInfo *TRI) {
836 
837   for (const MachineOperand &MOP : phys_regs_and_masks(MI))
838     if (MOP.isReg() && MOP.isKill())
839       Units.removeReg(MOP.getReg());
840 
841   for (const MachineOperand &MOP : phys_regs_and_masks(MI))
842     if (MOP.isReg() && !MOP.isKill())
843       Units.addReg(MOP.getReg());
844 }
845 
846 MachineBasicBlock::iterator
847 AArch64LoadStoreOpt::mergePairedInsns(MachineBasicBlock::iterator I,
848                                       MachineBasicBlock::iterator Paired,
849                                       const LdStPairFlags &Flags) {
850   MachineBasicBlock::iterator E = I->getParent()->end();
851   MachineBasicBlock::iterator NextI = next_nodbg(I, E);
852   // If NextI is the second of the two instructions to be merged, we need
853   // to skip one further. Either way we merge will invalidate the iterator,
854   // and we don't need to scan the new instruction, as it's a pairwise
855   // instruction, which we're not considering for further action anyway.
856   if (NextI == Paired)
857     NextI = next_nodbg(NextI, E);
858 
859   int SExtIdx = Flags.getSExtIdx();
860   unsigned Opc =
861       SExtIdx == -1 ? I->getOpcode() : getMatchingNonSExtOpcode(I->getOpcode());
862   bool IsUnscaled = TII->hasUnscaledLdStOffset(Opc);
863   int OffsetStride = IsUnscaled ? TII->getMemScale(*I) : 1;
864 
865   bool MergeForward = Flags.getMergeForward();
866 
867   std::optional<MCPhysReg> RenameReg = Flags.getRenameReg();
868   if (MergeForward && RenameReg) {
869     MCRegister RegToRename = getLdStRegOp(*I).getReg();
870     DefinedInBB.addReg(*RenameReg);
871 
872     // Return the sub/super register for RenameReg, matching the size of
873     // OriginalReg.
874     auto GetMatchingSubReg = [this,
875                               RenameReg](MCPhysReg OriginalReg) -> MCPhysReg {
876       for (MCPhysReg SubOrSuper : TRI->sub_and_superregs_inclusive(*RenameReg))
877         if (TRI->getMinimalPhysRegClass(OriginalReg) ==
878             TRI->getMinimalPhysRegClass(SubOrSuper))
879           return SubOrSuper;
880       llvm_unreachable("Should have found matching sub or super register!");
881     };
882 
883     std::function<bool(MachineInstr &, bool)> UpdateMIs =
884         [this, RegToRename, GetMatchingSubReg](MachineInstr &MI, bool IsDef) {
885           if (IsDef) {
886             bool SeenDef = false;
887             for (auto &MOP : MI.operands()) {
888               // Rename the first explicit definition and all implicit
889               // definitions matching RegToRename.
890               if (MOP.isReg() && !MOP.isDebug() && MOP.getReg() &&
891                   (!SeenDef || (MOP.isDef() && MOP.isImplicit())) &&
892                   TRI->regsOverlap(MOP.getReg(), RegToRename)) {
893                 assert((MOP.isImplicit() ||
894                         (MOP.isRenamable() && !MOP.isEarlyClobber())) &&
895                        "Need renamable operands");
896                 MOP.setReg(GetMatchingSubReg(MOP.getReg()));
897                 SeenDef = true;
898               }
899             }
900           } else {
901             for (auto &MOP : MI.operands()) {
902               if (MOP.isReg() && !MOP.isDebug() && MOP.getReg() &&
903                   TRI->regsOverlap(MOP.getReg(), RegToRename)) {
904                 assert((MOP.isImplicit() ||
905                         (MOP.isRenamable() && !MOP.isEarlyClobber())) &&
906                            "Need renamable operands");
907                 MOP.setReg(GetMatchingSubReg(MOP.getReg()));
908               }
909             }
910           }
911           LLVM_DEBUG(dbgs() << "Renamed " << MI << "\n");
912           return true;
913         };
914     forAllMIsUntilDef(*I, RegToRename, TRI, LdStLimit, UpdateMIs);
915 
916 #if !defined(NDEBUG)
917     // Make sure the register used for renaming is not used between the paired
918     // instructions. That would trash the content before the new paired
919     // instruction.
920     for (auto &MI :
921          iterator_range<MachineInstrBundleIterator<llvm::MachineInstr>>(
922              std::next(I), std::next(Paired)))
923       assert(all_of(MI.operands(),
924                     [this, &RenameReg](const MachineOperand &MOP) {
925                       return !MOP.isReg() || MOP.isDebug() || !MOP.getReg() ||
926                              MOP.isUndef() ||
927                              !TRI->regsOverlap(MOP.getReg(), *RenameReg);
928                     }) &&
929              "Rename register used between paired instruction, trashing the "
930              "content");
931 #endif
932   }
933 
934   // Insert our new paired instruction after whichever of the paired
935   // instructions MergeForward indicates.
936   MachineBasicBlock::iterator InsertionPoint = MergeForward ? Paired : I;
937   // Also based on MergeForward is from where we copy the base register operand
938   // so we get the flags compatible with the input code.
939   const MachineOperand &BaseRegOp =
940       MergeForward ? AArch64InstrInfo::getLdStBaseOp(*Paired)
941                    : AArch64InstrInfo::getLdStBaseOp(*I);
942 
943   int Offset = AArch64InstrInfo::getLdStOffsetOp(*I).getImm();
944   int PairedOffset = AArch64InstrInfo::getLdStOffsetOp(*Paired).getImm();
945   bool PairedIsUnscaled = TII->hasUnscaledLdStOffset(Paired->getOpcode());
946   if (IsUnscaled != PairedIsUnscaled) {
947     // We're trying to pair instructions that differ in how they are scaled.  If
948     // I is scaled then scale the offset of Paired accordingly.  Otherwise, do
949     // the opposite (i.e., make Paired's offset unscaled).
950     int MemSize = TII->getMemScale(*Paired);
951     if (PairedIsUnscaled) {
952       // If the unscaled offset isn't a multiple of the MemSize, we can't
953       // pair the operations together.
954       assert(!(PairedOffset % TII->getMemScale(*Paired)) &&
955              "Offset should be a multiple of the stride!");
956       PairedOffset /= MemSize;
957     } else {
958       PairedOffset *= MemSize;
959     }
960   }
961 
962   // Which register is Rt and which is Rt2 depends on the offset order.
963   // However, for pre load/stores the Rt should be the one of the pre
964   // load/store.
965   MachineInstr *RtMI, *Rt2MI;
966   if (Offset == PairedOffset + OffsetStride &&
967       !AArch64InstrInfo::isPreLdSt(*I)) {
968     RtMI = &*Paired;
969     Rt2MI = &*I;
970     // Here we swapped the assumption made for SExtIdx.
971     // I.e., we turn ldp I, Paired into ldp Paired, I.
972     // Update the index accordingly.
973     if (SExtIdx != -1)
974       SExtIdx = (SExtIdx + 1) % 2;
975   } else {
976     RtMI = &*I;
977     Rt2MI = &*Paired;
978   }
979   int OffsetImm = AArch64InstrInfo::getLdStOffsetOp(*RtMI).getImm();
980   // Scale the immediate offset, if necessary.
981   if (TII->hasUnscaledLdStOffset(RtMI->getOpcode())) {
982     assert(!(OffsetImm % TII->getMemScale(*RtMI)) &&
983            "Unscaled offset cannot be scaled.");
984     OffsetImm /= TII->getMemScale(*RtMI);
985   }
986 
987   // Construct the new instruction.
988   MachineInstrBuilder MIB;
989   DebugLoc DL = I->getDebugLoc();
990   MachineBasicBlock *MBB = I->getParent();
991   MachineOperand RegOp0 = getLdStRegOp(*RtMI);
992   MachineOperand RegOp1 = getLdStRegOp(*Rt2MI);
993   // Kill flags may become invalid when moving stores for pairing.
994   if (RegOp0.isUse()) {
995     if (!MergeForward) {
996       // Clear kill flags on store if moving upwards. Example:
997       //   STRWui %w0, ...
998       //   USE %w1
999       //   STRWui kill %w1  ; need to clear kill flag when moving STRWui upwards
1000       RegOp0.setIsKill(false);
1001       RegOp1.setIsKill(false);
1002     } else {
1003       // Clear kill flags of the first stores register. Example:
1004       //   STRWui %w1, ...
1005       //   USE kill %w1   ; need to clear kill flag when moving STRWui downwards
1006       //   STRW %w0
1007       Register Reg = getLdStRegOp(*I).getReg();
1008       for (MachineInstr &MI : make_range(std::next(I), Paired))
1009         MI.clearRegisterKills(Reg, TRI);
1010     }
1011   }
1012 
1013   unsigned int MatchPairOpcode = getMatchingPairOpcode(Opc);
1014   MIB = BuildMI(*MBB, InsertionPoint, DL, TII->get(MatchPairOpcode));
1015 
1016   // Adds the pre-index operand for pre-indexed ld/st pairs.
1017   if (AArch64InstrInfo::isPreLdSt(*RtMI))
1018     MIB.addReg(BaseRegOp.getReg(), RegState::Define);
1019 
1020   MIB.add(RegOp0)
1021       .add(RegOp1)
1022       .add(BaseRegOp)
1023       .addImm(OffsetImm)
1024       .cloneMergedMemRefs({&*I, &*Paired})
1025       .setMIFlags(I->mergeFlagsWith(*Paired));
1026 
1027   (void)MIB;
1028 
1029   LLVM_DEBUG(
1030       dbgs() << "Creating pair load/store. Replacing instructions:\n    ");
1031   LLVM_DEBUG(I->print(dbgs()));
1032   LLVM_DEBUG(dbgs() << "    ");
1033   LLVM_DEBUG(Paired->print(dbgs()));
1034   LLVM_DEBUG(dbgs() << "  with instruction:\n    ");
1035   if (SExtIdx != -1) {
1036     // Generate the sign extension for the proper result of the ldp.
1037     // I.e., with X1, that would be:
1038     // %w1 = KILL %w1, implicit-def %x1
1039     // %x1 = SBFMXri killed %x1, 0, 31
1040     MachineOperand &DstMO = MIB->getOperand(SExtIdx);
1041     // Right now, DstMO has the extended register, since it comes from an
1042     // extended opcode.
1043     Register DstRegX = DstMO.getReg();
1044     // Get the W variant of that register.
1045     Register DstRegW = TRI->getSubReg(DstRegX, AArch64::sub_32);
1046     // Update the result of LDP to use the W instead of the X variant.
1047     DstMO.setReg(DstRegW);
1048     LLVM_DEBUG(((MachineInstr *)MIB)->print(dbgs()));
1049     LLVM_DEBUG(dbgs() << "\n");
1050     // Make the machine verifier happy by providing a definition for
1051     // the X register.
1052     // Insert this definition right after the generated LDP, i.e., before
1053     // InsertionPoint.
1054     MachineInstrBuilder MIBKill =
1055         BuildMI(*MBB, InsertionPoint, DL, TII->get(TargetOpcode::KILL), DstRegW)
1056             .addReg(DstRegW)
1057             .addReg(DstRegX, RegState::Define);
1058     MIBKill->getOperand(2).setImplicit();
1059     // Create the sign extension.
1060     MachineInstrBuilder MIBSXTW =
1061         BuildMI(*MBB, InsertionPoint, DL, TII->get(AArch64::SBFMXri), DstRegX)
1062             .addReg(DstRegX)
1063             .addImm(0)
1064             .addImm(31);
1065     (void)MIBSXTW;
1066     LLVM_DEBUG(dbgs() << "  Extend operand:\n    ");
1067     LLVM_DEBUG(((MachineInstr *)MIBSXTW)->print(dbgs()));
1068   } else {
1069     LLVM_DEBUG(((MachineInstr *)MIB)->print(dbgs()));
1070   }
1071   LLVM_DEBUG(dbgs() << "\n");
1072 
1073   if (MergeForward)
1074     for (const MachineOperand &MOP : phys_regs_and_masks(*I))
1075       if (MOP.isReg() && MOP.isKill())
1076         DefinedInBB.addReg(MOP.getReg());
1077 
1078   // Erase the old instructions.
1079   I->eraseFromParent();
1080   Paired->eraseFromParent();
1081 
1082   return NextI;
1083 }
1084 
1085 MachineBasicBlock::iterator
1086 AArch64LoadStoreOpt::promoteLoadFromStore(MachineBasicBlock::iterator LoadI,
1087                                           MachineBasicBlock::iterator StoreI) {
1088   MachineBasicBlock::iterator NextI =
1089       next_nodbg(LoadI, LoadI->getParent()->end());
1090 
1091   int LoadSize = TII->getMemScale(*LoadI);
1092   int StoreSize = TII->getMemScale(*StoreI);
1093   Register LdRt = getLdStRegOp(*LoadI).getReg();
1094   const MachineOperand &StMO = getLdStRegOp(*StoreI);
1095   Register StRt = getLdStRegOp(*StoreI).getReg();
1096   bool IsStoreXReg = TRI->getRegClass(AArch64::GPR64RegClassID)->contains(StRt);
1097 
1098   assert((IsStoreXReg ||
1099           TRI->getRegClass(AArch64::GPR32RegClassID)->contains(StRt)) &&
1100          "Unexpected RegClass");
1101 
1102   MachineInstr *BitExtMI;
1103   if (LoadSize == StoreSize && (LoadSize == 4 || LoadSize == 8)) {
1104     // Remove the load, if the destination register of the loads is the same
1105     // register for stored value.
1106     if (StRt == LdRt && LoadSize == 8) {
1107       for (MachineInstr &MI : make_range(StoreI->getIterator(),
1108                                          LoadI->getIterator())) {
1109         if (MI.killsRegister(StRt, TRI)) {
1110           MI.clearRegisterKills(StRt, TRI);
1111           break;
1112         }
1113       }
1114       LLVM_DEBUG(dbgs() << "Remove load instruction:\n    ");
1115       LLVM_DEBUG(LoadI->print(dbgs()));
1116       LLVM_DEBUG(dbgs() << "\n");
1117       LoadI->eraseFromParent();
1118       return NextI;
1119     }
1120     // Replace the load with a mov if the load and store are in the same size.
1121     BitExtMI =
1122         BuildMI(*LoadI->getParent(), LoadI, LoadI->getDebugLoc(),
1123                 TII->get(IsStoreXReg ? AArch64::ORRXrs : AArch64::ORRWrs), LdRt)
1124             .addReg(IsStoreXReg ? AArch64::XZR : AArch64::WZR)
1125             .add(StMO)
1126             .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0))
1127             .setMIFlags(LoadI->getFlags());
1128   } else {
1129     // FIXME: Currently we disable this transformation in big-endian targets as
1130     // performance and correctness are verified only in little-endian.
1131     if (!Subtarget->isLittleEndian())
1132       return NextI;
1133     bool IsUnscaled = TII->hasUnscaledLdStOffset(*LoadI);
1134     assert(IsUnscaled == TII->hasUnscaledLdStOffset(*StoreI) &&
1135            "Unsupported ld/st match");
1136     assert(LoadSize <= StoreSize && "Invalid load size");
1137     int UnscaledLdOffset =
1138         IsUnscaled
1139             ? AArch64InstrInfo::getLdStOffsetOp(*LoadI).getImm()
1140             : AArch64InstrInfo::getLdStOffsetOp(*LoadI).getImm() * LoadSize;
1141     int UnscaledStOffset =
1142         IsUnscaled
1143             ? AArch64InstrInfo::getLdStOffsetOp(*StoreI).getImm()
1144             : AArch64InstrInfo::getLdStOffsetOp(*StoreI).getImm() * StoreSize;
1145     int Width = LoadSize * 8;
1146     Register DestReg =
1147         IsStoreXReg ? Register(TRI->getMatchingSuperReg(
1148                           LdRt, AArch64::sub_32, &AArch64::GPR64RegClass))
1149                     : LdRt;
1150 
1151     assert((UnscaledLdOffset >= UnscaledStOffset &&
1152             (UnscaledLdOffset + LoadSize) <= UnscaledStOffset + StoreSize) &&
1153            "Invalid offset");
1154 
1155     int Immr = 8 * (UnscaledLdOffset - UnscaledStOffset);
1156     int Imms = Immr + Width - 1;
1157     if (UnscaledLdOffset == UnscaledStOffset) {
1158       uint32_t AndMaskEncoded = ((IsStoreXReg ? 1 : 0) << 12) // N
1159                                 | ((Immr) << 6)               // immr
1160                                 | ((Imms) << 0)               // imms
1161           ;
1162 
1163       BitExtMI =
1164           BuildMI(*LoadI->getParent(), LoadI, LoadI->getDebugLoc(),
1165                   TII->get(IsStoreXReg ? AArch64::ANDXri : AArch64::ANDWri),
1166                   DestReg)
1167               .add(StMO)
1168               .addImm(AndMaskEncoded)
1169               .setMIFlags(LoadI->getFlags());
1170     } else {
1171       BitExtMI =
1172           BuildMI(*LoadI->getParent(), LoadI, LoadI->getDebugLoc(),
1173                   TII->get(IsStoreXReg ? AArch64::UBFMXri : AArch64::UBFMWri),
1174                   DestReg)
1175               .add(StMO)
1176               .addImm(Immr)
1177               .addImm(Imms)
1178               .setMIFlags(LoadI->getFlags());
1179     }
1180   }
1181 
1182   // Clear kill flags between store and load.
1183   for (MachineInstr &MI : make_range(StoreI->getIterator(),
1184                                      BitExtMI->getIterator()))
1185     if (MI.killsRegister(StRt, TRI)) {
1186       MI.clearRegisterKills(StRt, TRI);
1187       break;
1188     }
1189 
1190   LLVM_DEBUG(dbgs() << "Promoting load by replacing :\n    ");
1191   LLVM_DEBUG(StoreI->print(dbgs()));
1192   LLVM_DEBUG(dbgs() << "    ");
1193   LLVM_DEBUG(LoadI->print(dbgs()));
1194   LLVM_DEBUG(dbgs() << "  with instructions:\n    ");
1195   LLVM_DEBUG(StoreI->print(dbgs()));
1196   LLVM_DEBUG(dbgs() << "    ");
1197   LLVM_DEBUG((BitExtMI)->print(dbgs()));
1198   LLVM_DEBUG(dbgs() << "\n");
1199 
1200   // Erase the old instructions.
1201   LoadI->eraseFromParent();
1202   return NextI;
1203 }
1204 
1205 static bool inBoundsForPair(bool IsUnscaled, int Offset, int OffsetStride) {
1206   // Convert the byte-offset used by unscaled into an "element" offset used
1207   // by the scaled pair load/store instructions.
1208   if (IsUnscaled) {
1209     // If the byte-offset isn't a multiple of the stride, there's no point
1210     // trying to match it.
1211     if (Offset % OffsetStride)
1212       return false;
1213     Offset /= OffsetStride;
1214   }
1215   return Offset <= 63 && Offset >= -64;
1216 }
1217 
1218 // Do alignment, specialized to power of 2 and for signed ints,
1219 // avoiding having to do a C-style cast from uint_64t to int when
1220 // using alignTo from include/llvm/Support/MathExtras.h.
1221 // FIXME: Move this function to include/MathExtras.h?
1222 static int alignTo(int Num, int PowOf2) {
1223   return (Num + PowOf2 - 1) & ~(PowOf2 - 1);
1224 }
1225 
1226 static bool mayAlias(MachineInstr &MIa,
1227                      SmallVectorImpl<MachineInstr *> &MemInsns,
1228                      AliasAnalysis *AA) {
1229   for (MachineInstr *MIb : MemInsns)
1230     if (MIa.mayAlias(AA, *MIb, /*UseTBAA*/ false))
1231       return true;
1232 
1233   return false;
1234 }
1235 
1236 bool AArch64LoadStoreOpt::findMatchingStore(
1237     MachineBasicBlock::iterator I, unsigned Limit,
1238     MachineBasicBlock::iterator &StoreI) {
1239   MachineBasicBlock::iterator B = I->getParent()->begin();
1240   MachineBasicBlock::iterator MBBI = I;
1241   MachineInstr &LoadMI = *I;
1242   Register BaseReg = AArch64InstrInfo::getLdStBaseOp(LoadMI).getReg();
1243 
1244   // If the load is the first instruction in the block, there's obviously
1245   // not any matching store.
1246   if (MBBI == B)
1247     return false;
1248 
1249   // Track which register units have been modified and used between the first
1250   // insn and the second insn.
1251   ModifiedRegUnits.clear();
1252   UsedRegUnits.clear();
1253 
1254   unsigned Count = 0;
1255   do {
1256     MBBI = prev_nodbg(MBBI, B);
1257     MachineInstr &MI = *MBBI;
1258 
1259     // Don't count transient instructions towards the search limit since there
1260     // may be different numbers of them if e.g. debug information is present.
1261     if (!MI.isTransient())
1262       ++Count;
1263 
1264     // If the load instruction reads directly from the address to which the
1265     // store instruction writes and the stored value is not modified, we can
1266     // promote the load. Since we do not handle stores with pre-/post-index,
1267     // it's unnecessary to check if BaseReg is modified by the store itself.
1268     // Also we can't handle stores without an immediate offset operand,
1269     // while the operand might be the address for a global variable.
1270     if (MI.mayStore() && isMatchingStore(LoadMI, MI) &&
1271         BaseReg == AArch64InstrInfo::getLdStBaseOp(MI).getReg() &&
1272         AArch64InstrInfo::getLdStOffsetOp(MI).isImm() &&
1273         isLdOffsetInRangeOfSt(LoadMI, MI, TII) &&
1274         ModifiedRegUnits.available(getLdStRegOp(MI).getReg())) {
1275       StoreI = MBBI;
1276       return true;
1277     }
1278 
1279     if (MI.isCall())
1280       return false;
1281 
1282     // Update modified / uses register units.
1283     LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits, UsedRegUnits, TRI);
1284 
1285     // Otherwise, if the base register is modified, we have no match, so
1286     // return early.
1287     if (!ModifiedRegUnits.available(BaseReg))
1288       return false;
1289 
1290     // If we encounter a store aliased with the load, return early.
1291     if (MI.mayStore() && LoadMI.mayAlias(AA, MI, /*UseTBAA*/ false))
1292       return false;
1293   } while (MBBI != B && Count < Limit);
1294   return false;
1295 }
1296 
1297 static bool needsWinCFI(const MachineFunction *MF) {
1298   return MF->getTarget().getMCAsmInfo()->usesWindowsCFI() &&
1299          MF->getFunction().needsUnwindTableEntry();
1300 }
1301 
1302 // Returns true if FirstMI and MI are candidates for merging or pairing.
1303 // Otherwise, returns false.
1304 static bool areCandidatesToMergeOrPair(MachineInstr &FirstMI, MachineInstr &MI,
1305                                        LdStPairFlags &Flags,
1306                                        const AArch64InstrInfo *TII) {
1307   // If this is volatile or if pairing is suppressed, not a candidate.
1308   if (MI.hasOrderedMemoryRef() || TII->isLdStPairSuppressed(MI))
1309     return false;
1310 
1311   // We should have already checked FirstMI for pair suppression and volatility.
1312   assert(!FirstMI.hasOrderedMemoryRef() &&
1313          !TII->isLdStPairSuppressed(FirstMI) &&
1314          "FirstMI shouldn't get here if either of these checks are true.");
1315 
1316   if (needsWinCFI(MI.getMF()) && (MI.getFlag(MachineInstr::FrameSetup) ||
1317                                   MI.getFlag(MachineInstr::FrameDestroy)))
1318     return false;
1319 
1320   unsigned OpcA = FirstMI.getOpcode();
1321   unsigned OpcB = MI.getOpcode();
1322 
1323   // Opcodes match: If the opcodes are pre ld/st there is nothing more to check.
1324   if (OpcA == OpcB)
1325     return !AArch64InstrInfo::isPreLdSt(FirstMI);
1326 
1327   // Try to match a sign-extended load/store with a zero-extended load/store.
1328   bool IsValidLdStrOpc, PairIsValidLdStrOpc;
1329   unsigned NonSExtOpc = getMatchingNonSExtOpcode(OpcA, &IsValidLdStrOpc);
1330   assert(IsValidLdStrOpc &&
1331          "Given Opc should be a Load or Store with an immediate");
1332   // OpcA will be the first instruction in the pair.
1333   if (NonSExtOpc == getMatchingNonSExtOpcode(OpcB, &PairIsValidLdStrOpc)) {
1334     Flags.setSExtIdx(NonSExtOpc == (unsigned)OpcA ? 1 : 0);
1335     return true;
1336   }
1337 
1338   // If the second instruction isn't even a mergable/pairable load/store, bail
1339   // out.
1340   if (!PairIsValidLdStrOpc)
1341     return false;
1342 
1343   // FIXME: We don't support merging narrow stores with mixed scaled/unscaled
1344   // offsets.
1345   if (isNarrowStore(OpcA) || isNarrowStore(OpcB))
1346     return false;
1347 
1348   // The STR<S,D,Q,W,X>pre - STR<S,D,Q,W,X>ui and
1349   // LDR<S,D,Q,W,X,SW>pre-LDR<S,D,Q,W,X,SW>ui
1350   // are candidate pairs that can be merged.
1351   if (isPreLdStPairCandidate(FirstMI, MI))
1352     return true;
1353 
1354   // Try to match an unscaled load/store with a scaled load/store.
1355   return TII->hasUnscaledLdStOffset(OpcA) != TII->hasUnscaledLdStOffset(OpcB) &&
1356          getMatchingPairOpcode(OpcA) == getMatchingPairOpcode(OpcB);
1357 
1358   // FIXME: Can we also match a mixed sext/zext unscaled/scaled pair?
1359 }
1360 
1361 static bool
1362 canRenameUpToDef(MachineInstr &FirstMI, LiveRegUnits &UsedInBetween,
1363                  SmallPtrSetImpl<const TargetRegisterClass *> &RequiredClasses,
1364                  const TargetRegisterInfo *TRI) {
1365   if (!FirstMI.mayStore())
1366     return false;
1367 
1368   // Check if we can find an unused register which we can use to rename
1369   // the register used by the first load/store.
1370   auto *RegClass = TRI->getMinimalPhysRegClass(getLdStRegOp(FirstMI).getReg());
1371   MachineFunction &MF = *FirstMI.getParent()->getParent();
1372   if (!RegClass || !MF.getRegInfo().tracksLiveness())
1373     return false;
1374 
1375   auto RegToRename = getLdStRegOp(FirstMI).getReg();
1376   // For now, we only rename if the store operand gets killed at the store.
1377   if (!getLdStRegOp(FirstMI).isKill() &&
1378       !any_of(FirstMI.operands(),
1379               [TRI, RegToRename](const MachineOperand &MOP) {
1380                 return MOP.isReg() && !MOP.isDebug() && MOP.getReg() &&
1381                        MOP.isImplicit() && MOP.isKill() &&
1382                        TRI->regsOverlap(RegToRename, MOP.getReg());
1383               })) {
1384     LLVM_DEBUG(dbgs() << "  Operand not killed at " << FirstMI << "\n");
1385     return false;
1386   }
1387   auto canRenameMOP = [TRI](const MachineOperand &MOP) {
1388     if (MOP.isReg()) {
1389       auto *RegClass = TRI->getMinimalPhysRegClass(MOP.getReg());
1390       // Renaming registers with multiple disjunct sub-registers (e.g. the
1391       // result of a LD3) means that all sub-registers are renamed, potentially
1392       // impacting other instructions we did not check. Bail out.
1393       // Note that this relies on the structure of the AArch64 register file. In
1394       // particular, a subregister cannot be written without overwriting the
1395       // whole register.
1396       if (RegClass->HasDisjunctSubRegs) {
1397         LLVM_DEBUG(
1398             dbgs()
1399             << "  Cannot rename operands with multiple disjunct subregisters ("
1400             << MOP << ")\n");
1401         return false;
1402       }
1403     }
1404     return MOP.isImplicit() ||
1405            (MOP.isRenamable() && !MOP.isEarlyClobber() && !MOP.isTied());
1406   };
1407 
1408   bool FoundDef = false;
1409 
1410   // For each instruction between FirstMI and the previous def for RegToRename,
1411   // we
1412   // * check if we can rename RegToRename in this instruction
1413   // * collect the registers used and required register classes for RegToRename.
1414   std::function<bool(MachineInstr &, bool)> CheckMIs = [&](MachineInstr &MI,
1415                                                            bool IsDef) {
1416     LLVM_DEBUG(dbgs() << "Checking " << MI << "\n");
1417     // Currently we do not try to rename across frame-setup instructions.
1418     if (MI.getFlag(MachineInstr::FrameSetup)) {
1419       LLVM_DEBUG(dbgs() << "  Cannot rename framesetup instructions currently ("
1420                         << MI << ")\n");
1421       return false;
1422     }
1423 
1424     UsedInBetween.accumulate(MI);
1425 
1426     // For a definition, check that we can rename the definition and exit the
1427     // loop.
1428     FoundDef = IsDef;
1429 
1430     // For defs, check if we can rename the first def of RegToRename.
1431     if (FoundDef) {
1432       // For some pseudo instructions, we might not generate code in the end
1433       // (e.g. KILL) and we would end up without a correct def for the rename
1434       // register.
1435       // TODO: This might be overly conservative and we could handle those cases
1436       // in multiple ways:
1437       //       1. Insert an extra copy, to materialize the def.
1438       //       2. Skip pseudo-defs until we find an non-pseudo def.
1439       if (MI.isPseudo()) {
1440         LLVM_DEBUG(dbgs() << "  Cannot rename pseudo instruction " << MI
1441                           << "\n");
1442         return false;
1443       }
1444 
1445       for (auto &MOP : MI.operands()) {
1446         if (!MOP.isReg() || !MOP.isDef() || MOP.isDebug() || !MOP.getReg() ||
1447             !TRI->regsOverlap(MOP.getReg(), RegToRename))
1448           continue;
1449         if (!canRenameMOP(MOP)) {
1450           LLVM_DEBUG(dbgs()
1451                      << "  Cannot rename " << MOP << " in " << MI << "\n");
1452           return false;
1453         }
1454         RequiredClasses.insert(TRI->getMinimalPhysRegClass(MOP.getReg()));
1455       }
1456       return true;
1457     } else {
1458       for (auto &MOP : MI.operands()) {
1459         if (!MOP.isReg() || MOP.isDebug() || !MOP.getReg() ||
1460             !TRI->regsOverlap(MOP.getReg(), RegToRename))
1461           continue;
1462 
1463         if (!canRenameMOP(MOP)) {
1464           LLVM_DEBUG(dbgs()
1465                      << "  Cannot rename " << MOP << " in " << MI << "\n");
1466           return false;
1467         }
1468         RequiredClasses.insert(TRI->getMinimalPhysRegClass(MOP.getReg()));
1469       }
1470     }
1471     return true;
1472   };
1473 
1474   if (!forAllMIsUntilDef(FirstMI, RegToRename, TRI, LdStLimit, CheckMIs))
1475     return false;
1476 
1477   if (!FoundDef) {
1478     LLVM_DEBUG(dbgs() << "  Did not find definition for register in BB\n");
1479     return false;
1480   }
1481   return true;
1482 }
1483 
1484 // Check if we can find a physical register for renaming \p Reg. This register
1485 // must:
1486 // * not be defined already in \p DefinedInBB; DefinedInBB must contain all
1487 //   defined registers up to the point where the renamed register will be used,
1488 // * not used in \p UsedInBetween; UsedInBetween must contain all accessed
1489 //   registers in the range the rename register will be used,
1490 // * is available in all used register classes (checked using RequiredClasses).
1491 static std::optional<MCPhysReg> tryToFindRegisterToRename(
1492     const MachineFunction &MF, Register Reg, LiveRegUnits &DefinedInBB,
1493     LiveRegUnits &UsedInBetween,
1494     SmallPtrSetImpl<const TargetRegisterClass *> &RequiredClasses,
1495     const TargetRegisterInfo *TRI) {
1496   const MachineRegisterInfo &RegInfo = MF.getRegInfo();
1497 
1498   // Checks if any sub- or super-register of PR is callee saved.
1499   auto AnySubOrSuperRegCalleePreserved = [&MF, TRI](MCPhysReg PR) {
1500     return any_of(TRI->sub_and_superregs_inclusive(PR),
1501                   [&MF, TRI](MCPhysReg SubOrSuper) {
1502                     return TRI->isCalleeSavedPhysReg(SubOrSuper, MF);
1503                   });
1504   };
1505 
1506   // Check if PR or one of its sub- or super-registers can be used for all
1507   // required register classes.
1508   auto CanBeUsedForAllClasses = [&RequiredClasses, TRI](MCPhysReg PR) {
1509     return all_of(RequiredClasses, [PR, TRI](const TargetRegisterClass *C) {
1510       return any_of(TRI->sub_and_superregs_inclusive(PR),
1511                     [C, TRI](MCPhysReg SubOrSuper) {
1512                       return C == TRI->getMinimalPhysRegClass(SubOrSuper);
1513                     });
1514     });
1515   };
1516 
1517   auto *RegClass = TRI->getMinimalPhysRegClass(Reg);
1518   for (const MCPhysReg &PR : *RegClass) {
1519     if (DefinedInBB.available(PR) && UsedInBetween.available(PR) &&
1520         !RegInfo.isReserved(PR) && !AnySubOrSuperRegCalleePreserved(PR) &&
1521         CanBeUsedForAllClasses(PR)) {
1522       DefinedInBB.addReg(PR);
1523       LLVM_DEBUG(dbgs() << "Found rename register " << printReg(PR, TRI)
1524                         << "\n");
1525       return {PR};
1526     }
1527   }
1528   LLVM_DEBUG(dbgs() << "No rename register found from "
1529                     << TRI->getRegClassName(RegClass) << "\n");
1530   return std::nullopt;
1531 }
1532 
1533 /// Scan the instructions looking for a load/store that can be combined with the
1534 /// current instruction into a wider equivalent or a load/store pair.
1535 MachineBasicBlock::iterator
1536 AArch64LoadStoreOpt::findMatchingInsn(MachineBasicBlock::iterator I,
1537                                       LdStPairFlags &Flags, unsigned Limit,
1538                                       bool FindNarrowMerge) {
1539   MachineBasicBlock::iterator E = I->getParent()->end();
1540   MachineBasicBlock::iterator MBBI = I;
1541   MachineBasicBlock::iterator MBBIWithRenameReg;
1542   MachineInstr &FirstMI = *I;
1543   MBBI = next_nodbg(MBBI, E);
1544 
1545   bool MayLoad = FirstMI.mayLoad();
1546   bool IsUnscaled = TII->hasUnscaledLdStOffset(FirstMI);
1547   Register Reg = getLdStRegOp(FirstMI).getReg();
1548   Register BaseReg = AArch64InstrInfo::getLdStBaseOp(FirstMI).getReg();
1549   int Offset = AArch64InstrInfo::getLdStOffsetOp(FirstMI).getImm();
1550   int OffsetStride = IsUnscaled ? TII->getMemScale(FirstMI) : 1;
1551   bool IsPromotableZeroStore = isPromotableZeroStoreInst(FirstMI);
1552 
1553   std::optional<bool> MaybeCanRename;
1554   if (!EnableRenaming)
1555     MaybeCanRename = {false};
1556 
1557   SmallPtrSet<const TargetRegisterClass *, 5> RequiredClasses;
1558   LiveRegUnits UsedInBetween;
1559   UsedInBetween.init(*TRI);
1560 
1561   Flags.clearRenameReg();
1562 
1563   // Track which register units have been modified and used between the first
1564   // insn (inclusive) and the second insn.
1565   ModifiedRegUnits.clear();
1566   UsedRegUnits.clear();
1567 
1568   // Remember any instructions that read/write memory between FirstMI and MI.
1569   SmallVector<MachineInstr *, 4> MemInsns;
1570 
1571   for (unsigned Count = 0; MBBI != E && Count < Limit;
1572        MBBI = next_nodbg(MBBI, E)) {
1573     MachineInstr &MI = *MBBI;
1574 
1575     UsedInBetween.accumulate(MI);
1576 
1577     // Don't count transient instructions towards the search limit since there
1578     // may be different numbers of them if e.g. debug information is present.
1579     if (!MI.isTransient())
1580       ++Count;
1581 
1582     Flags.setSExtIdx(-1);
1583     if (areCandidatesToMergeOrPair(FirstMI, MI, Flags, TII) &&
1584         AArch64InstrInfo::getLdStOffsetOp(MI).isImm()) {
1585       assert(MI.mayLoadOrStore() && "Expected memory operation.");
1586       // If we've found another instruction with the same opcode, check to see
1587       // if the base and offset are compatible with our starting instruction.
1588       // These instructions all have scaled immediate operands, so we just
1589       // check for +1/-1. Make sure to check the new instruction offset is
1590       // actually an immediate and not a symbolic reference destined for
1591       // a relocation.
1592       Register MIBaseReg = AArch64InstrInfo::getLdStBaseOp(MI).getReg();
1593       int MIOffset = AArch64InstrInfo::getLdStOffsetOp(MI).getImm();
1594       bool MIIsUnscaled = TII->hasUnscaledLdStOffset(MI);
1595       if (IsUnscaled != MIIsUnscaled) {
1596         // We're trying to pair instructions that differ in how they are scaled.
1597         // If FirstMI is scaled then scale the offset of MI accordingly.
1598         // Otherwise, do the opposite (i.e., make MI's offset unscaled).
1599         int MemSize = TII->getMemScale(MI);
1600         if (MIIsUnscaled) {
1601           // If the unscaled offset isn't a multiple of the MemSize, we can't
1602           // pair the operations together: bail and keep looking.
1603           if (MIOffset % MemSize) {
1604             LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits,
1605                                               UsedRegUnits, TRI);
1606             MemInsns.push_back(&MI);
1607             continue;
1608           }
1609           MIOffset /= MemSize;
1610         } else {
1611           MIOffset *= MemSize;
1612         }
1613       }
1614 
1615       bool IsPreLdSt = isPreLdStPairCandidate(FirstMI, MI);
1616 
1617       if (BaseReg == MIBaseReg) {
1618         // If the offset of the second ld/st is not equal to the size of the
1619         // destination register it can’t be paired with a pre-index ld/st
1620         // pair. Additionally if the base reg is used or modified the operations
1621         // can't be paired: bail and keep looking.
1622         if (IsPreLdSt) {
1623           bool IsOutOfBounds = MIOffset != TII->getMemScale(MI);
1624           bool IsBaseRegUsed = !UsedRegUnits.available(
1625               AArch64InstrInfo::getLdStBaseOp(MI).getReg());
1626           bool IsBaseRegModified = !ModifiedRegUnits.available(
1627               AArch64InstrInfo::getLdStBaseOp(MI).getReg());
1628           // If the stored value and the address of the second instruction is
1629           // the same, it needs to be using the updated register and therefore
1630           // it must not be folded.
1631           bool IsMIRegTheSame =
1632               TRI->regsOverlap(getLdStRegOp(MI).getReg(),
1633                                AArch64InstrInfo::getLdStBaseOp(MI).getReg());
1634           if (IsOutOfBounds || IsBaseRegUsed || IsBaseRegModified ||
1635               IsMIRegTheSame) {
1636             LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits,
1637                                               UsedRegUnits, TRI);
1638             MemInsns.push_back(&MI);
1639             continue;
1640           }
1641         } else {
1642           if ((Offset != MIOffset + OffsetStride) &&
1643               (Offset + OffsetStride != MIOffset)) {
1644             LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits,
1645                                               UsedRegUnits, TRI);
1646             MemInsns.push_back(&MI);
1647             continue;
1648           }
1649         }
1650 
1651         int MinOffset = Offset < MIOffset ? Offset : MIOffset;
1652         if (FindNarrowMerge) {
1653           // If the alignment requirements of the scaled wide load/store
1654           // instruction can't express the offset of the scaled narrow input,
1655           // bail and keep looking. For promotable zero stores, allow only when
1656           // the stored value is the same (i.e., WZR).
1657           if ((!IsUnscaled && alignTo(MinOffset, 2) != MinOffset) ||
1658               (IsPromotableZeroStore && Reg != getLdStRegOp(MI).getReg())) {
1659             LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits,
1660                                               UsedRegUnits, TRI);
1661             MemInsns.push_back(&MI);
1662             continue;
1663           }
1664         } else {
1665           // Pairwise instructions have a 7-bit signed offset field. Single
1666           // insns have a 12-bit unsigned offset field.  If the resultant
1667           // immediate offset of merging these instructions is out of range for
1668           // a pairwise instruction, bail and keep looking.
1669           if (!inBoundsForPair(IsUnscaled, MinOffset, OffsetStride)) {
1670             LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits,
1671                                               UsedRegUnits, TRI);
1672             MemInsns.push_back(&MI);
1673             continue;
1674           }
1675           // If the alignment requirements of the paired (scaled) instruction
1676           // can't express the offset of the unscaled input, bail and keep
1677           // looking.
1678           if (IsUnscaled && (alignTo(MinOffset, OffsetStride) != MinOffset)) {
1679             LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits,
1680                                               UsedRegUnits, TRI);
1681             MemInsns.push_back(&MI);
1682             continue;
1683           }
1684         }
1685         // If the destination register of one load is the same register or a
1686         // sub/super register of the other load, bail and keep looking. A
1687         // load-pair instruction with both destination registers the same is
1688         // UNPREDICTABLE and will result in an exception.
1689         if (MayLoad &&
1690             TRI->isSuperOrSubRegisterEq(Reg, getLdStRegOp(MI).getReg())) {
1691           LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits, UsedRegUnits,
1692                                             TRI);
1693           MemInsns.push_back(&MI);
1694           continue;
1695         }
1696 
1697         // If the BaseReg has been modified, then we cannot do the optimization.
1698         // For example, in the following pattern
1699         //   ldr x1 [x2]
1700         //   ldr x2 [x3]
1701         //   ldr x4 [x2, #8],
1702         // the first and third ldr cannot be converted to ldp x1, x4, [x2]
1703         if (!ModifiedRegUnits.available(BaseReg))
1704           return E;
1705 
1706         // If the Rt of the second instruction was not modified or used between
1707         // the two instructions and none of the instructions between the second
1708         // and first alias with the second, we can combine the second into the
1709         // first.
1710         if (ModifiedRegUnits.available(getLdStRegOp(MI).getReg()) &&
1711             !(MI.mayLoad() &&
1712               !UsedRegUnits.available(getLdStRegOp(MI).getReg())) &&
1713             !mayAlias(MI, MemInsns, AA)) {
1714 
1715           Flags.setMergeForward(false);
1716           Flags.clearRenameReg();
1717           return MBBI;
1718         }
1719 
1720         // Likewise, if the Rt of the first instruction is not modified or used
1721         // between the two instructions and none of the instructions between the
1722         // first and the second alias with the first, we can combine the first
1723         // into the second.
1724         if (!(MayLoad &&
1725               !UsedRegUnits.available(getLdStRegOp(FirstMI).getReg())) &&
1726             !mayAlias(FirstMI, MemInsns, AA)) {
1727 
1728           if (ModifiedRegUnits.available(getLdStRegOp(FirstMI).getReg())) {
1729             Flags.setMergeForward(true);
1730             Flags.clearRenameReg();
1731             return MBBI;
1732           }
1733 
1734           if (DebugCounter::shouldExecute(RegRenamingCounter)) {
1735             if (!MaybeCanRename)
1736               MaybeCanRename = {canRenameUpToDef(FirstMI, UsedInBetween,
1737                                                  RequiredClasses, TRI)};
1738 
1739             if (*MaybeCanRename) {
1740               std::optional<MCPhysReg> MaybeRenameReg =
1741                   tryToFindRegisterToRename(*FirstMI.getParent()->getParent(),
1742                                             Reg, DefinedInBB, UsedInBetween,
1743                                             RequiredClasses, TRI);
1744               if (MaybeRenameReg) {
1745                 Flags.setRenameReg(*MaybeRenameReg);
1746                 Flags.setMergeForward(true);
1747                 MBBIWithRenameReg = MBBI;
1748               }
1749             }
1750           }
1751         }
1752         // Unable to combine these instructions due to interference in between.
1753         // Keep looking.
1754       }
1755     }
1756 
1757     if (Flags.getRenameReg())
1758       return MBBIWithRenameReg;
1759 
1760     // If the instruction wasn't a matching load or store.  Stop searching if we
1761     // encounter a call instruction that might modify memory.
1762     if (MI.isCall())
1763       return E;
1764 
1765     // Update modified / uses register units.
1766     LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits, UsedRegUnits, TRI);
1767 
1768     // Otherwise, if the base register is modified, we have no match, so
1769     // return early.
1770     if (!ModifiedRegUnits.available(BaseReg))
1771       return E;
1772 
1773     // Update list of instructions that read/write memory.
1774     if (MI.mayLoadOrStore())
1775       MemInsns.push_back(&MI);
1776   }
1777   return E;
1778 }
1779 
1780 static MachineBasicBlock::iterator
1781 maybeMoveCFI(MachineInstr &MI, MachineBasicBlock::iterator MaybeCFI) {
1782   auto End = MI.getParent()->end();
1783   if (MaybeCFI == End ||
1784       MaybeCFI->getOpcode() != TargetOpcode::CFI_INSTRUCTION ||
1785       !(MI.getFlag(MachineInstr::FrameSetup) ||
1786         MI.getFlag(MachineInstr::FrameDestroy)) ||
1787       AArch64InstrInfo::getLdStBaseOp(MI).getReg() != AArch64::SP)
1788     return End;
1789 
1790   const MachineFunction &MF = *MI.getParent()->getParent();
1791   unsigned CFIIndex = MaybeCFI->getOperand(0).getCFIIndex();
1792   const MCCFIInstruction &CFI = MF.getFrameInstructions()[CFIIndex];
1793   switch (CFI.getOperation()) {
1794   case MCCFIInstruction::OpDefCfa:
1795   case MCCFIInstruction::OpDefCfaOffset:
1796     return MaybeCFI;
1797   default:
1798     return End;
1799   }
1800 }
1801 
1802 MachineBasicBlock::iterator
1803 AArch64LoadStoreOpt::mergeUpdateInsn(MachineBasicBlock::iterator I,
1804                                      MachineBasicBlock::iterator Update,
1805                                      bool IsPreIdx) {
1806   assert((Update->getOpcode() == AArch64::ADDXri ||
1807           Update->getOpcode() == AArch64::SUBXri) &&
1808          "Unexpected base register update instruction to merge!");
1809   MachineBasicBlock::iterator E = I->getParent()->end();
1810   MachineBasicBlock::iterator NextI = next_nodbg(I, E);
1811 
1812   // If updating the SP and the following instruction is CFA offset related CFI
1813   // instruction move it after the merged instruction.
1814   MachineBasicBlock::iterator CFI =
1815       IsPreIdx ? maybeMoveCFI(*Update, next_nodbg(Update, E)) : E;
1816 
1817   // Return the instruction following the merged instruction, which is
1818   // the instruction following our unmerged load. Unless that's the add/sub
1819   // instruction we're merging, in which case it's the one after that.
1820   if (NextI == Update)
1821     NextI = next_nodbg(NextI, E);
1822 
1823   int Value = Update->getOperand(2).getImm();
1824   assert(AArch64_AM::getShiftValue(Update->getOperand(3).getImm()) == 0 &&
1825          "Can't merge 1 << 12 offset into pre-/post-indexed load / store");
1826   if (Update->getOpcode() == AArch64::SUBXri)
1827     Value = -Value;
1828 
1829   unsigned NewOpc = IsPreIdx ? getPreIndexedOpcode(I->getOpcode())
1830                              : getPostIndexedOpcode(I->getOpcode());
1831   MachineInstrBuilder MIB;
1832   int Scale, MinOffset, MaxOffset;
1833   getPrePostIndexedMemOpInfo(*I, Scale, MinOffset, MaxOffset);
1834   if (!AArch64InstrInfo::isPairedLdSt(*I)) {
1835     // Non-paired instruction.
1836     MIB = BuildMI(*I->getParent(), I, I->getDebugLoc(), TII->get(NewOpc))
1837               .add(getLdStRegOp(*Update))
1838               .add(getLdStRegOp(*I))
1839               .add(AArch64InstrInfo::getLdStBaseOp(*I))
1840               .addImm(Value / Scale)
1841               .setMemRefs(I->memoperands())
1842               .setMIFlags(I->mergeFlagsWith(*Update));
1843   } else {
1844     // Paired instruction.
1845     MIB = BuildMI(*I->getParent(), I, I->getDebugLoc(), TII->get(NewOpc))
1846               .add(getLdStRegOp(*Update))
1847               .add(getLdStRegOp(*I, 0))
1848               .add(getLdStRegOp(*I, 1))
1849               .add(AArch64InstrInfo::getLdStBaseOp(*I))
1850               .addImm(Value / Scale)
1851               .setMemRefs(I->memoperands())
1852               .setMIFlags(I->mergeFlagsWith(*Update));
1853   }
1854   if (CFI != E) {
1855     MachineBasicBlock *MBB = I->getParent();
1856     MBB->splice(std::next(MIB.getInstr()->getIterator()), MBB, CFI);
1857   }
1858 
1859   if (IsPreIdx) {
1860     ++NumPreFolded;
1861     LLVM_DEBUG(dbgs() << "Creating pre-indexed load/store.");
1862   } else {
1863     ++NumPostFolded;
1864     LLVM_DEBUG(dbgs() << "Creating post-indexed load/store.");
1865   }
1866   LLVM_DEBUG(dbgs() << "    Replacing instructions:\n    ");
1867   LLVM_DEBUG(I->print(dbgs()));
1868   LLVM_DEBUG(dbgs() << "    ");
1869   LLVM_DEBUG(Update->print(dbgs()));
1870   LLVM_DEBUG(dbgs() << "  with instruction:\n    ");
1871   LLVM_DEBUG(((MachineInstr *)MIB)->print(dbgs()));
1872   LLVM_DEBUG(dbgs() << "\n");
1873 
1874   // Erase the old instructions for the block.
1875   I->eraseFromParent();
1876   Update->eraseFromParent();
1877 
1878   return NextI;
1879 }
1880 
1881 bool AArch64LoadStoreOpt::isMatchingUpdateInsn(MachineInstr &MemMI,
1882                                                MachineInstr &MI,
1883                                                unsigned BaseReg, int Offset) {
1884   switch (MI.getOpcode()) {
1885   default:
1886     break;
1887   case AArch64::SUBXri:
1888   case AArch64::ADDXri:
1889     // Make sure it's a vanilla immediate operand, not a relocation or
1890     // anything else we can't handle.
1891     if (!MI.getOperand(2).isImm())
1892       break;
1893     // Watch out for 1 << 12 shifted value.
1894     if (AArch64_AM::getShiftValue(MI.getOperand(3).getImm()))
1895       break;
1896 
1897     // The update instruction source and destination register must be the
1898     // same as the load/store base register.
1899     if (MI.getOperand(0).getReg() != BaseReg ||
1900         MI.getOperand(1).getReg() != BaseReg)
1901       break;
1902 
1903     int UpdateOffset = MI.getOperand(2).getImm();
1904     if (MI.getOpcode() == AArch64::SUBXri)
1905       UpdateOffset = -UpdateOffset;
1906 
1907     // The immediate must be a multiple of the scaling factor of the pre/post
1908     // indexed instruction.
1909     int Scale, MinOffset, MaxOffset;
1910     getPrePostIndexedMemOpInfo(MemMI, Scale, MinOffset, MaxOffset);
1911     if (UpdateOffset % Scale != 0)
1912       break;
1913 
1914     // Scaled offset must fit in the instruction immediate.
1915     int ScaledOffset = UpdateOffset / Scale;
1916     if (ScaledOffset > MaxOffset || ScaledOffset < MinOffset)
1917       break;
1918 
1919     // If we have a non-zero Offset, we check that it matches the amount
1920     // we're adding to the register.
1921     if (!Offset || Offset == UpdateOffset)
1922       return true;
1923     break;
1924   }
1925   return false;
1926 }
1927 
1928 MachineBasicBlock::iterator AArch64LoadStoreOpt::findMatchingUpdateInsnForward(
1929     MachineBasicBlock::iterator I, int UnscaledOffset, unsigned Limit) {
1930   MachineBasicBlock::iterator E = I->getParent()->end();
1931   MachineInstr &MemMI = *I;
1932   MachineBasicBlock::iterator MBBI = I;
1933 
1934   Register BaseReg = AArch64InstrInfo::getLdStBaseOp(MemMI).getReg();
1935   int MIUnscaledOffset = AArch64InstrInfo::getLdStOffsetOp(MemMI).getImm() *
1936                          TII->getMemScale(MemMI);
1937 
1938   // Scan forward looking for post-index opportunities.  Updating instructions
1939   // can't be formed if the memory instruction doesn't have the offset we're
1940   // looking for.
1941   if (MIUnscaledOffset != UnscaledOffset)
1942     return E;
1943 
1944   // If the base register overlaps a source/destination register, we can't
1945   // merge the update. This does not apply to tag store instructions which
1946   // ignore the address part of the source register.
1947   // This does not apply to STGPi as well, which does not have unpredictable
1948   // behavior in this case unlike normal stores, and always performs writeback
1949   // after reading the source register value.
1950   if (!isTagStore(MemMI) && MemMI.getOpcode() != AArch64::STGPi) {
1951     bool IsPairedInsn = AArch64InstrInfo::isPairedLdSt(MemMI);
1952     for (unsigned i = 0, e = IsPairedInsn ? 2 : 1; i != e; ++i) {
1953       Register DestReg = getLdStRegOp(MemMI, i).getReg();
1954       if (DestReg == BaseReg || TRI->isSubRegister(BaseReg, DestReg))
1955         return E;
1956     }
1957   }
1958 
1959   // Track which register units have been modified and used between the first
1960   // insn (inclusive) and the second insn.
1961   ModifiedRegUnits.clear();
1962   UsedRegUnits.clear();
1963   MBBI = next_nodbg(MBBI, E);
1964 
1965   // We can't post-increment the stack pointer if any instruction between
1966   // the memory access (I) and the increment (MBBI) can access the memory
1967   // region defined by [SP, MBBI].
1968   const bool BaseRegSP = BaseReg == AArch64::SP;
1969   if (BaseRegSP && needsWinCFI(I->getMF())) {
1970     // FIXME: For now, we always block the optimization over SP in windows
1971     // targets as it requires to adjust the unwind/debug info, messing up
1972     // the unwind info can actually cause a miscompile.
1973     return E;
1974   }
1975 
1976   for (unsigned Count = 0; MBBI != E && Count < Limit;
1977        MBBI = next_nodbg(MBBI, E)) {
1978     MachineInstr &MI = *MBBI;
1979 
1980     // Don't count transient instructions towards the search limit since there
1981     // may be different numbers of them if e.g. debug information is present.
1982     if (!MI.isTransient())
1983       ++Count;
1984 
1985     // If we found a match, return it.
1986     if (isMatchingUpdateInsn(*I, MI, BaseReg, UnscaledOffset))
1987       return MBBI;
1988 
1989     // Update the status of what the instruction clobbered and used.
1990     LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits, UsedRegUnits, TRI);
1991 
1992     // Otherwise, if the base register is used or modified, we have no match, so
1993     // return early.
1994     // If we are optimizing SP, do not allow instructions that may load or store
1995     // in between the load and the optimized value update.
1996     if (!ModifiedRegUnits.available(BaseReg) ||
1997         !UsedRegUnits.available(BaseReg) ||
1998         (BaseRegSP && MBBI->mayLoadOrStore()))
1999       return E;
2000   }
2001   return E;
2002 }
2003 
2004 MachineBasicBlock::iterator AArch64LoadStoreOpt::findMatchingUpdateInsnBackward(
2005     MachineBasicBlock::iterator I, unsigned Limit) {
2006   MachineBasicBlock::iterator B = I->getParent()->begin();
2007   MachineBasicBlock::iterator E = I->getParent()->end();
2008   MachineInstr &MemMI = *I;
2009   MachineBasicBlock::iterator MBBI = I;
2010   MachineFunction &MF = *MemMI.getMF();
2011 
2012   Register BaseReg = AArch64InstrInfo::getLdStBaseOp(MemMI).getReg();
2013   int Offset = AArch64InstrInfo::getLdStOffsetOp(MemMI).getImm();
2014 
2015   // If the load/store is the first instruction in the block, there's obviously
2016   // not any matching update. Ditto if the memory offset isn't zero.
2017   if (MBBI == B || Offset != 0)
2018     return E;
2019   // If the base register overlaps a destination register, we can't
2020   // merge the update.
2021   if (!isTagStore(MemMI)) {
2022     bool IsPairedInsn = AArch64InstrInfo::isPairedLdSt(MemMI);
2023     for (unsigned i = 0, e = IsPairedInsn ? 2 : 1; i != e; ++i) {
2024       Register DestReg = getLdStRegOp(MemMI, i).getReg();
2025       if (DestReg == BaseReg || TRI->isSubRegister(BaseReg, DestReg))
2026         return E;
2027     }
2028   }
2029 
2030   const bool BaseRegSP = BaseReg == AArch64::SP;
2031   if (BaseRegSP && needsWinCFI(I->getMF())) {
2032     // FIXME: For now, we always block the optimization over SP in windows
2033     // targets as it requires to adjust the unwind/debug info, messing up
2034     // the unwind info can actually cause a miscompile.
2035     return E;
2036   }
2037 
2038   const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
2039   unsigned RedZoneSize =
2040       Subtarget.getTargetLowering()->getRedZoneSize(MF.getFunction());
2041 
2042   // Track which register units have been modified and used between the first
2043   // insn (inclusive) and the second insn.
2044   ModifiedRegUnits.clear();
2045   UsedRegUnits.clear();
2046   unsigned Count = 0;
2047   bool MemAcessBeforeSPPreInc = false;
2048   do {
2049     MBBI = prev_nodbg(MBBI, B);
2050     MachineInstr &MI = *MBBI;
2051 
2052     // Don't count transient instructions towards the search limit since there
2053     // may be different numbers of them if e.g. debug information is present.
2054     if (!MI.isTransient())
2055       ++Count;
2056 
2057     // If we found a match, return it.
2058     if (isMatchingUpdateInsn(*I, MI, BaseReg, Offset)) {
2059       // Check that the update value is within our red zone limit (which may be
2060       // zero).
2061       if (MemAcessBeforeSPPreInc && MBBI->getOperand(2).getImm() > RedZoneSize)
2062         return E;
2063       return MBBI;
2064     }
2065 
2066     // Update the status of what the instruction clobbered and used.
2067     LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits, UsedRegUnits, TRI);
2068 
2069     // Otherwise, if the base register is used or modified, we have no match, so
2070     // return early.
2071     if (!ModifiedRegUnits.available(BaseReg) ||
2072         !UsedRegUnits.available(BaseReg))
2073       return E;
2074     // Keep track if we have a memory access before an SP pre-increment, in this
2075     // case we need to validate later that the update amount respects the red
2076     // zone.
2077     if (BaseRegSP && MBBI->mayLoadOrStore())
2078       MemAcessBeforeSPPreInc = true;
2079   } while (MBBI != B && Count < Limit);
2080   return E;
2081 }
2082 
2083 bool AArch64LoadStoreOpt::tryToPromoteLoadFromStore(
2084     MachineBasicBlock::iterator &MBBI) {
2085   MachineInstr &MI = *MBBI;
2086   // If this is a volatile load, don't mess with it.
2087   if (MI.hasOrderedMemoryRef())
2088     return false;
2089 
2090   if (needsWinCFI(MI.getMF()) && MI.getFlag(MachineInstr::FrameDestroy))
2091     return false;
2092 
2093   // Make sure this is a reg+imm.
2094   // FIXME: It is possible to extend it to handle reg+reg cases.
2095   if (!AArch64InstrInfo::getLdStOffsetOp(MI).isImm())
2096     return false;
2097 
2098   // Look backward up to LdStLimit instructions.
2099   MachineBasicBlock::iterator StoreI;
2100   if (findMatchingStore(MBBI, LdStLimit, StoreI)) {
2101     ++NumLoadsFromStoresPromoted;
2102     // Promote the load. Keeping the iterator straight is a
2103     // pain, so we let the merge routine tell us what the next instruction
2104     // is after it's done mucking about.
2105     MBBI = promoteLoadFromStore(MBBI, StoreI);
2106     return true;
2107   }
2108   return false;
2109 }
2110 
2111 // Merge adjacent zero stores into a wider store.
2112 bool AArch64LoadStoreOpt::tryToMergeZeroStInst(
2113     MachineBasicBlock::iterator &MBBI) {
2114   assert(isPromotableZeroStoreInst(*MBBI) && "Expected narrow store.");
2115   MachineInstr &MI = *MBBI;
2116   MachineBasicBlock::iterator E = MI.getParent()->end();
2117 
2118   if (!TII->isCandidateToMergeOrPair(MI))
2119     return false;
2120 
2121   // Look ahead up to LdStLimit instructions for a mergable instruction.
2122   LdStPairFlags Flags;
2123   MachineBasicBlock::iterator MergeMI =
2124       findMatchingInsn(MBBI, Flags, LdStLimit, /* FindNarrowMerge = */ true);
2125   if (MergeMI != E) {
2126     ++NumZeroStoresPromoted;
2127 
2128     // Keeping the iterator straight is a pain, so we let the merge routine tell
2129     // us what the next instruction is after it's done mucking about.
2130     MBBI = mergeNarrowZeroStores(MBBI, MergeMI, Flags);
2131     return true;
2132   }
2133   return false;
2134 }
2135 
2136 // Find loads and stores that can be merged into a single load or store pair
2137 // instruction.
2138 bool AArch64LoadStoreOpt::tryToPairLdStInst(MachineBasicBlock::iterator &MBBI) {
2139   MachineInstr &MI = *MBBI;
2140   MachineBasicBlock::iterator E = MI.getParent()->end();
2141 
2142   if (!TII->isCandidateToMergeOrPair(MI))
2143     return false;
2144 
2145   // Early exit if the offset is not possible to match. (6 bits of positive
2146   // range, plus allow an extra one in case we find a later insn that matches
2147   // with Offset-1)
2148   bool IsUnscaled = TII->hasUnscaledLdStOffset(MI);
2149   int Offset = AArch64InstrInfo::getLdStOffsetOp(MI).getImm();
2150   int OffsetStride = IsUnscaled ? TII->getMemScale(MI) : 1;
2151   // Allow one more for offset.
2152   if (Offset > 0)
2153     Offset -= OffsetStride;
2154   if (!inBoundsForPair(IsUnscaled, Offset, OffsetStride))
2155     return false;
2156 
2157   // Look ahead up to LdStLimit instructions for a pairable instruction.
2158   LdStPairFlags Flags;
2159   MachineBasicBlock::iterator Paired =
2160       findMatchingInsn(MBBI, Flags, LdStLimit, /* FindNarrowMerge = */ false);
2161   if (Paired != E) {
2162     ++NumPairCreated;
2163     if (TII->hasUnscaledLdStOffset(MI))
2164       ++NumUnscaledPairCreated;
2165     // Keeping the iterator straight is a pain, so we let the merge routine tell
2166     // us what the next instruction is after it's done mucking about.
2167     auto Prev = std::prev(MBBI);
2168     MBBI = mergePairedInsns(MBBI, Paired, Flags);
2169     // Collect liveness info for instructions between Prev and the new position
2170     // MBBI.
2171     for (auto I = std::next(Prev); I != MBBI; I++)
2172       updateDefinedRegisters(*I, DefinedInBB, TRI);
2173 
2174     return true;
2175   }
2176   return false;
2177 }
2178 
2179 bool AArch64LoadStoreOpt::tryToMergeLdStUpdate
2180     (MachineBasicBlock::iterator &MBBI) {
2181   MachineInstr &MI = *MBBI;
2182   MachineBasicBlock::iterator E = MI.getParent()->end();
2183   MachineBasicBlock::iterator Update;
2184 
2185   // Look forward to try to form a post-index instruction. For example,
2186   // ldr x0, [x20]
2187   // add x20, x20, #32
2188   //   merged into:
2189   // ldr x0, [x20], #32
2190   Update = findMatchingUpdateInsnForward(MBBI, 0, UpdateLimit);
2191   if (Update != E) {
2192     // Merge the update into the ld/st.
2193     MBBI = mergeUpdateInsn(MBBI, Update, /*IsPreIdx=*/false);
2194     return true;
2195   }
2196 
2197   // Don't know how to handle unscaled pre/post-index versions below, so bail.
2198   if (TII->hasUnscaledLdStOffset(MI.getOpcode()))
2199     return false;
2200 
2201   // Look back to try to find a pre-index instruction. For example,
2202   // add x0, x0, #8
2203   // ldr x1, [x0]
2204   //   merged into:
2205   // ldr x1, [x0, #8]!
2206   Update = findMatchingUpdateInsnBackward(MBBI, UpdateLimit);
2207   if (Update != E) {
2208     // Merge the update into the ld/st.
2209     MBBI = mergeUpdateInsn(MBBI, Update, /*IsPreIdx=*/true);
2210     return true;
2211   }
2212 
2213   // The immediate in the load/store is scaled by the size of the memory
2214   // operation. The immediate in the add we're looking for,
2215   // however, is not, so adjust here.
2216   int UnscaledOffset =
2217       AArch64InstrInfo::getLdStOffsetOp(MI).getImm() * TII->getMemScale(MI);
2218 
2219   // Look forward to try to find a pre-index instruction. For example,
2220   // ldr x1, [x0, #64]
2221   // add x0, x0, #64
2222   //   merged into:
2223   // ldr x1, [x0, #64]!
2224   Update = findMatchingUpdateInsnForward(MBBI, UnscaledOffset, UpdateLimit);
2225   if (Update != E) {
2226     // Merge the update into the ld/st.
2227     MBBI = mergeUpdateInsn(MBBI, Update, /*IsPreIdx=*/true);
2228     return true;
2229   }
2230 
2231   return false;
2232 }
2233 
2234 bool AArch64LoadStoreOpt::optimizeBlock(MachineBasicBlock &MBB,
2235                                         bool EnableNarrowZeroStOpt) {
2236 
2237   bool Modified = false;
2238   // Four tranformations to do here:
2239   // 1) Find loads that directly read from stores and promote them by
2240   //    replacing with mov instructions. If the store is wider than the load,
2241   //    the load will be replaced with a bitfield extract.
2242   //      e.g.,
2243   //        str w1, [x0, #4]
2244   //        ldrh w2, [x0, #6]
2245   //        ; becomes
2246   //        str w1, [x0, #4]
2247   //        lsr w2, w1, #16
2248   for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
2249        MBBI != E;) {
2250     if (isPromotableLoadFromStore(*MBBI) && tryToPromoteLoadFromStore(MBBI))
2251       Modified = true;
2252     else
2253       ++MBBI;
2254   }
2255   // 2) Merge adjacent zero stores into a wider store.
2256   //      e.g.,
2257   //        strh wzr, [x0]
2258   //        strh wzr, [x0, #2]
2259   //        ; becomes
2260   //        str wzr, [x0]
2261   //      e.g.,
2262   //        str wzr, [x0]
2263   //        str wzr, [x0, #4]
2264   //        ; becomes
2265   //        str xzr, [x0]
2266   if (EnableNarrowZeroStOpt)
2267     for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
2268          MBBI != E;) {
2269       if (isPromotableZeroStoreInst(*MBBI) && tryToMergeZeroStInst(MBBI))
2270         Modified = true;
2271       else
2272         ++MBBI;
2273     }
2274   // 3) Find loads and stores that can be merged into a single load or store
2275   //    pair instruction.
2276   //      e.g.,
2277   //        ldr x0, [x2]
2278   //        ldr x1, [x2, #8]
2279   //        ; becomes
2280   //        ldp x0, x1, [x2]
2281 
2282   if (MBB.getParent()->getRegInfo().tracksLiveness()) {
2283     DefinedInBB.clear();
2284     DefinedInBB.addLiveIns(MBB);
2285   }
2286 
2287   for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
2288        MBBI != E;) {
2289     // Track currently live registers up to this point, to help with
2290     // searching for a rename register on demand.
2291     updateDefinedRegisters(*MBBI, DefinedInBB, TRI);
2292     if (TII->isPairableLdStInst(*MBBI) && tryToPairLdStInst(MBBI))
2293       Modified = true;
2294     else
2295       ++MBBI;
2296   }
2297   // 4) Find base register updates that can be merged into the load or store
2298   //    as a base-reg writeback.
2299   //      e.g.,
2300   //        ldr x0, [x2]
2301   //        add x2, x2, #4
2302   //        ; becomes
2303   //        ldr x0, [x2], #4
2304   for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
2305        MBBI != E;) {
2306     if (isMergeableLdStUpdate(*MBBI) && tryToMergeLdStUpdate(MBBI))
2307       Modified = true;
2308     else
2309       ++MBBI;
2310   }
2311 
2312   return Modified;
2313 }
2314 
2315 bool AArch64LoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
2316   if (skipFunction(Fn.getFunction()))
2317     return false;
2318 
2319   Subtarget = &Fn.getSubtarget<AArch64Subtarget>();
2320   TII = static_cast<const AArch64InstrInfo *>(Subtarget->getInstrInfo());
2321   TRI = Subtarget->getRegisterInfo();
2322   AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
2323 
2324   // Resize the modified and used register unit trackers.  We do this once
2325   // per function and then clear the register units each time we optimize a load
2326   // or store.
2327   ModifiedRegUnits.init(*TRI);
2328   UsedRegUnits.init(*TRI);
2329   DefinedInBB.init(*TRI);
2330 
2331   bool Modified = false;
2332   bool enableNarrowZeroStOpt = !Subtarget->requiresStrictAlign();
2333   for (auto &MBB : Fn) {
2334     auto M = optimizeBlock(MBB, enableNarrowZeroStOpt);
2335     Modified |= M;
2336   }
2337 
2338   return Modified;
2339 }
2340 
2341 // FIXME: Do we need/want a pre-alloc pass like ARM has to try to keep loads and
2342 // stores near one another?  Note: The pre-RA instruction scheduler already has
2343 // hooks to try and schedule pairable loads/stores together to improve pairing
2344 // opportunities.  Thus, pre-RA pairing pass may not be worth the effort.
2345 
2346 // FIXME: When pairing store instructions it's very possible for this pass to
2347 // hoist a store with a KILL marker above another use (without a KILL marker).
2348 // The resulting IR is invalid, but nothing uses the KILL markers after this
2349 // pass, so it's never caused a problem in practice.
2350 
2351 /// createAArch64LoadStoreOptimizationPass - returns an instance of the
2352 /// load / store optimization pass.
2353 FunctionPass *llvm::createAArch64LoadStoreOptimizationPass() {
2354   return new AArch64LoadStoreOpt();
2355 }
2356