1 //===- AArch64LoadStoreOptimizer.cpp - AArch64 load/store opt. pass -------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains a pass that performs load / store related peephole
10 // optimizations. This pass should be run after register allocation.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "AArch64InstrInfo.h"
15 #include "AArch64Subtarget.h"
16 #include "MCTargetDesc/AArch64AddressingModes.h"
17 #include "llvm/ADT/BitVector.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/ADT/Statistic.h"
20 #include "llvm/ADT/StringRef.h"
21 #include "llvm/ADT/iterator_range.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/CodeGen/MachineBasicBlock.h"
24 #include "llvm/CodeGen/MachineFunction.h"
25 #include "llvm/CodeGen/MachineFunctionPass.h"
26 #include "llvm/CodeGen/MachineInstr.h"
27 #include "llvm/CodeGen/MachineInstrBuilder.h"
28 #include "llvm/CodeGen/MachineOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/TargetRegisterInfo.h"
31 #include "llvm/IR/DebugLoc.h"
32 #include "llvm/MC/MCAsmInfo.h"
33 #include "llvm/MC/MCRegisterInfo.h"
34 #include "llvm/Pass.h"
35 #include "llvm/Support/CommandLine.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/DebugCounter.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include "llvm/Support/raw_ostream.h"
40 #include <cassert>
41 #include <cstdint>
42 #include <functional>
43 #include <iterator>
44 #include <limits>
45
46 using namespace llvm;
47
48 #define DEBUG_TYPE "aarch64-ldst-opt"
49
50 STATISTIC(NumPairCreated, "Number of load/store pair instructions generated");
51 STATISTIC(NumPostFolded, "Number of post-index updates folded");
52 STATISTIC(NumPreFolded, "Number of pre-index updates folded");
53 STATISTIC(NumUnscaledPairCreated,
54 "Number of load/store from unscaled generated");
55 STATISTIC(NumZeroStoresPromoted, "Number of narrow zero stores promoted");
56 STATISTIC(NumLoadsFromStoresPromoted, "Number of loads from stores promoted");
57
58 DEBUG_COUNTER(RegRenamingCounter, DEBUG_TYPE "-reg-renaming",
59 "Controls which pairs are considered for renaming");
60
61 // The LdStLimit limits how far we search for load/store pairs.
62 static cl::opt<unsigned> LdStLimit("aarch64-load-store-scan-limit",
63 cl::init(20), cl::Hidden);
64
65 // The UpdateLimit limits how far we search for update instructions when we form
66 // pre-/post-index instructions.
67 static cl::opt<unsigned> UpdateLimit("aarch64-update-scan-limit", cl::init(100),
68 cl::Hidden);
69
70 // Enable register renaming to find additional store pairing opportunities.
71 static cl::opt<bool> EnableRenaming("aarch64-load-store-renaming",
72 cl::init(true), cl::Hidden);
73
74 #define AARCH64_LOAD_STORE_OPT_NAME "AArch64 load / store optimization pass"
75
76 namespace {
77
78 using LdStPairFlags = struct LdStPairFlags {
79 // If a matching instruction is found, MergeForward is set to true if the
80 // merge is to remove the first instruction and replace the second with
81 // a pair-wise insn, and false if the reverse is true.
82 bool MergeForward = false;
83
84 // SExtIdx gives the index of the result of the load pair that must be
85 // extended. The value of SExtIdx assumes that the paired load produces the
86 // value in this order: (I, returned iterator), i.e., -1 means no value has
87 // to be extended, 0 means I, and 1 means the returned iterator.
88 int SExtIdx = -1;
89
90 // If not none, RenameReg can be used to rename the result register of the
91 // first store in a pair. Currently this only works when merging stores
92 // forward.
93 Optional<MCPhysReg> RenameReg = None;
94
95 LdStPairFlags() = default;
96
97 void setMergeForward(bool V = true) { MergeForward = V; }
98 bool getMergeForward() const { return MergeForward; }
99
100 void setSExtIdx(int V) { SExtIdx = V; }
101 int getSExtIdx() const { return SExtIdx; }
102
103 void setRenameReg(MCPhysReg R) { RenameReg = R; }
104 void clearRenameReg() { RenameReg = None; }
105 Optional<MCPhysReg> getRenameReg() const { return RenameReg; }
106 };
107
108 struct AArch64LoadStoreOpt : public MachineFunctionPass {
109 static char ID;
110
AArch64LoadStoreOpt__anon2544380f0111::AArch64LoadStoreOpt111 AArch64LoadStoreOpt() : MachineFunctionPass(ID) {
112 initializeAArch64LoadStoreOptPass(*PassRegistry::getPassRegistry());
113 }
114
115 AliasAnalysis *AA;
116 const AArch64InstrInfo *TII;
117 const TargetRegisterInfo *TRI;
118 const AArch64Subtarget *Subtarget;
119
120 // Track which register units have been modified and used.
121 LiveRegUnits ModifiedRegUnits, UsedRegUnits;
122 LiveRegUnits DefinedInBB;
123
getAnalysisUsage__anon2544380f0111::AArch64LoadStoreOpt124 void getAnalysisUsage(AnalysisUsage &AU) const override {
125 AU.addRequired<AAResultsWrapperPass>();
126 MachineFunctionPass::getAnalysisUsage(AU);
127 }
128
129 // Scan the instructions looking for a load/store that can be combined
130 // with the current instruction into a load/store pair.
131 // Return the matching instruction if one is found, else MBB->end().
132 MachineBasicBlock::iterator findMatchingInsn(MachineBasicBlock::iterator I,
133 LdStPairFlags &Flags,
134 unsigned Limit,
135 bool FindNarrowMerge);
136
137 // Scan the instructions looking for a store that writes to the address from
138 // which the current load instruction reads. Return true if one is found.
139 bool findMatchingStore(MachineBasicBlock::iterator I, unsigned Limit,
140 MachineBasicBlock::iterator &StoreI);
141
142 // Merge the two instructions indicated into a wider narrow store instruction.
143 MachineBasicBlock::iterator
144 mergeNarrowZeroStores(MachineBasicBlock::iterator I,
145 MachineBasicBlock::iterator MergeMI,
146 const LdStPairFlags &Flags);
147
148 // Merge the two instructions indicated into a single pair-wise instruction.
149 MachineBasicBlock::iterator
150 mergePairedInsns(MachineBasicBlock::iterator I,
151 MachineBasicBlock::iterator Paired,
152 const LdStPairFlags &Flags);
153
154 // Promote the load that reads directly from the address stored to.
155 MachineBasicBlock::iterator
156 promoteLoadFromStore(MachineBasicBlock::iterator LoadI,
157 MachineBasicBlock::iterator StoreI);
158
159 // Scan the instruction list to find a base register update that can
160 // be combined with the current instruction (a load or store) using
161 // pre or post indexed addressing with writeback. Scan forwards.
162 MachineBasicBlock::iterator
163 findMatchingUpdateInsnForward(MachineBasicBlock::iterator I,
164 int UnscaledOffset, unsigned Limit);
165
166 // Scan the instruction list to find a base register update that can
167 // be combined with the current instruction (a load or store) using
168 // pre or post indexed addressing with writeback. Scan backwards.
169 MachineBasicBlock::iterator
170 findMatchingUpdateInsnBackward(MachineBasicBlock::iterator I, unsigned Limit);
171
172 // Find an instruction that updates the base register of the ld/st
173 // instruction.
174 bool isMatchingUpdateInsn(MachineInstr &MemMI, MachineInstr &MI,
175 unsigned BaseReg, int Offset);
176
177 // Merge a pre- or post-index base register update into a ld/st instruction.
178 MachineBasicBlock::iterator
179 mergeUpdateInsn(MachineBasicBlock::iterator I,
180 MachineBasicBlock::iterator Update, bool IsPreIdx);
181
182 // Find and merge zero store instructions.
183 bool tryToMergeZeroStInst(MachineBasicBlock::iterator &MBBI);
184
185 // Find and pair ldr/str instructions.
186 bool tryToPairLdStInst(MachineBasicBlock::iterator &MBBI);
187
188 // Find and promote load instructions which read directly from store.
189 bool tryToPromoteLoadFromStore(MachineBasicBlock::iterator &MBBI);
190
191 // Find and merge a base register updates before or after a ld/st instruction.
192 bool tryToMergeLdStUpdate(MachineBasicBlock::iterator &MBBI);
193
194 bool optimizeBlock(MachineBasicBlock &MBB, bool EnableNarrowZeroStOpt);
195
196 bool runOnMachineFunction(MachineFunction &Fn) override;
197
getRequiredProperties__anon2544380f0111::AArch64LoadStoreOpt198 MachineFunctionProperties getRequiredProperties() const override {
199 return MachineFunctionProperties().set(
200 MachineFunctionProperties::Property::NoVRegs);
201 }
202
getPassName__anon2544380f0111::AArch64LoadStoreOpt203 StringRef getPassName() const override { return AARCH64_LOAD_STORE_OPT_NAME; }
204 };
205
206 char AArch64LoadStoreOpt::ID = 0;
207
208 } // end anonymous namespace
209
210 INITIALIZE_PASS(AArch64LoadStoreOpt, "aarch64-ldst-opt",
211 AARCH64_LOAD_STORE_OPT_NAME, false, false)
212
isNarrowStore(unsigned Opc)213 static bool isNarrowStore(unsigned Opc) {
214 switch (Opc) {
215 default:
216 return false;
217 case AArch64::STRBBui:
218 case AArch64::STURBBi:
219 case AArch64::STRHHui:
220 case AArch64::STURHHi:
221 return true;
222 }
223 }
224
225 // These instruction set memory tag and either keep memory contents unchanged or
226 // set it to zero, ignoring the address part of the source register.
isTagStore(const MachineInstr & MI)227 static bool isTagStore(const MachineInstr &MI) {
228 switch (MI.getOpcode()) {
229 default:
230 return false;
231 case AArch64::STGOffset:
232 case AArch64::STZGOffset:
233 case AArch64::ST2GOffset:
234 case AArch64::STZ2GOffset:
235 return true;
236 }
237 }
238
getMatchingNonSExtOpcode(unsigned Opc,bool * IsValidLdStrOpc=nullptr)239 static unsigned getMatchingNonSExtOpcode(unsigned Opc,
240 bool *IsValidLdStrOpc = nullptr) {
241 if (IsValidLdStrOpc)
242 *IsValidLdStrOpc = true;
243 switch (Opc) {
244 default:
245 if (IsValidLdStrOpc)
246 *IsValidLdStrOpc = false;
247 return std::numeric_limits<unsigned>::max();
248 case AArch64::STRDui:
249 case AArch64::STURDi:
250 case AArch64::STRQui:
251 case AArch64::STURQi:
252 case AArch64::STRBBui:
253 case AArch64::STURBBi:
254 case AArch64::STRHHui:
255 case AArch64::STURHHi:
256 case AArch64::STRWui:
257 case AArch64::STURWi:
258 case AArch64::STRXui:
259 case AArch64::STURXi:
260 case AArch64::LDRDui:
261 case AArch64::LDURDi:
262 case AArch64::LDRQui:
263 case AArch64::LDURQi:
264 case AArch64::LDRWui:
265 case AArch64::LDURWi:
266 case AArch64::LDRXui:
267 case AArch64::LDURXi:
268 case AArch64::STRSui:
269 case AArch64::STURSi:
270 case AArch64::LDRSui:
271 case AArch64::LDURSi:
272 return Opc;
273 case AArch64::LDRSWui:
274 return AArch64::LDRWui;
275 case AArch64::LDURSWi:
276 return AArch64::LDURWi;
277 }
278 }
279
getMatchingWideOpcode(unsigned Opc)280 static unsigned getMatchingWideOpcode(unsigned Opc) {
281 switch (Opc) {
282 default:
283 llvm_unreachable("Opcode has no wide equivalent!");
284 case AArch64::STRBBui:
285 return AArch64::STRHHui;
286 case AArch64::STRHHui:
287 return AArch64::STRWui;
288 case AArch64::STURBBi:
289 return AArch64::STURHHi;
290 case AArch64::STURHHi:
291 return AArch64::STURWi;
292 case AArch64::STURWi:
293 return AArch64::STURXi;
294 case AArch64::STRWui:
295 return AArch64::STRXui;
296 }
297 }
298
getMatchingPairOpcode(unsigned Opc)299 static unsigned getMatchingPairOpcode(unsigned Opc) {
300 switch (Opc) {
301 default:
302 llvm_unreachable("Opcode has no pairwise equivalent!");
303 case AArch64::STRSui:
304 case AArch64::STURSi:
305 return AArch64::STPSi;
306 case AArch64::STRDui:
307 case AArch64::STURDi:
308 return AArch64::STPDi;
309 case AArch64::STRQui:
310 case AArch64::STURQi:
311 return AArch64::STPQi;
312 case AArch64::STRWui:
313 case AArch64::STURWi:
314 return AArch64::STPWi;
315 case AArch64::STRXui:
316 case AArch64::STURXi:
317 return AArch64::STPXi;
318 case AArch64::LDRSui:
319 case AArch64::LDURSi:
320 return AArch64::LDPSi;
321 case AArch64::LDRDui:
322 case AArch64::LDURDi:
323 return AArch64::LDPDi;
324 case AArch64::LDRQui:
325 case AArch64::LDURQi:
326 return AArch64::LDPQi;
327 case AArch64::LDRWui:
328 case AArch64::LDURWi:
329 return AArch64::LDPWi;
330 case AArch64::LDRXui:
331 case AArch64::LDURXi:
332 return AArch64::LDPXi;
333 case AArch64::LDRSWui:
334 case AArch64::LDURSWi:
335 return AArch64::LDPSWi;
336 }
337 }
338
isMatchingStore(MachineInstr & LoadInst,MachineInstr & StoreInst)339 static unsigned isMatchingStore(MachineInstr &LoadInst,
340 MachineInstr &StoreInst) {
341 unsigned LdOpc = LoadInst.getOpcode();
342 unsigned StOpc = StoreInst.getOpcode();
343 switch (LdOpc) {
344 default:
345 llvm_unreachable("Unsupported load instruction!");
346 case AArch64::LDRBBui:
347 return StOpc == AArch64::STRBBui || StOpc == AArch64::STRHHui ||
348 StOpc == AArch64::STRWui || StOpc == AArch64::STRXui;
349 case AArch64::LDURBBi:
350 return StOpc == AArch64::STURBBi || StOpc == AArch64::STURHHi ||
351 StOpc == AArch64::STURWi || StOpc == AArch64::STURXi;
352 case AArch64::LDRHHui:
353 return StOpc == AArch64::STRHHui || StOpc == AArch64::STRWui ||
354 StOpc == AArch64::STRXui;
355 case AArch64::LDURHHi:
356 return StOpc == AArch64::STURHHi || StOpc == AArch64::STURWi ||
357 StOpc == AArch64::STURXi;
358 case AArch64::LDRWui:
359 return StOpc == AArch64::STRWui || StOpc == AArch64::STRXui;
360 case AArch64::LDURWi:
361 return StOpc == AArch64::STURWi || StOpc == AArch64::STURXi;
362 case AArch64::LDRXui:
363 return StOpc == AArch64::STRXui;
364 case AArch64::LDURXi:
365 return StOpc == AArch64::STURXi;
366 }
367 }
368
getPreIndexedOpcode(unsigned Opc)369 static unsigned getPreIndexedOpcode(unsigned Opc) {
370 // FIXME: We don't currently support creating pre-indexed loads/stores when
371 // the load or store is the unscaled version. If we decide to perform such an
372 // optimization in the future the cases for the unscaled loads/stores will
373 // need to be added here.
374 switch (Opc) {
375 default:
376 llvm_unreachable("Opcode has no pre-indexed equivalent!");
377 case AArch64::STRSui:
378 return AArch64::STRSpre;
379 case AArch64::STRDui:
380 return AArch64::STRDpre;
381 case AArch64::STRQui:
382 return AArch64::STRQpre;
383 case AArch64::STRBBui:
384 return AArch64::STRBBpre;
385 case AArch64::STRHHui:
386 return AArch64::STRHHpre;
387 case AArch64::STRWui:
388 return AArch64::STRWpre;
389 case AArch64::STRXui:
390 return AArch64::STRXpre;
391 case AArch64::LDRSui:
392 return AArch64::LDRSpre;
393 case AArch64::LDRDui:
394 return AArch64::LDRDpre;
395 case AArch64::LDRQui:
396 return AArch64::LDRQpre;
397 case AArch64::LDRBBui:
398 return AArch64::LDRBBpre;
399 case AArch64::LDRHHui:
400 return AArch64::LDRHHpre;
401 case AArch64::LDRWui:
402 return AArch64::LDRWpre;
403 case AArch64::LDRXui:
404 return AArch64::LDRXpre;
405 case AArch64::LDRSWui:
406 return AArch64::LDRSWpre;
407 case AArch64::LDPSi:
408 return AArch64::LDPSpre;
409 case AArch64::LDPSWi:
410 return AArch64::LDPSWpre;
411 case AArch64::LDPDi:
412 return AArch64::LDPDpre;
413 case AArch64::LDPQi:
414 return AArch64::LDPQpre;
415 case AArch64::LDPWi:
416 return AArch64::LDPWpre;
417 case AArch64::LDPXi:
418 return AArch64::LDPXpre;
419 case AArch64::STPSi:
420 return AArch64::STPSpre;
421 case AArch64::STPDi:
422 return AArch64::STPDpre;
423 case AArch64::STPQi:
424 return AArch64::STPQpre;
425 case AArch64::STPWi:
426 return AArch64::STPWpre;
427 case AArch64::STPXi:
428 return AArch64::STPXpre;
429 case AArch64::STGOffset:
430 return AArch64::STGPreIndex;
431 case AArch64::STZGOffset:
432 return AArch64::STZGPreIndex;
433 case AArch64::ST2GOffset:
434 return AArch64::ST2GPreIndex;
435 case AArch64::STZ2GOffset:
436 return AArch64::STZ2GPreIndex;
437 case AArch64::STGPi:
438 return AArch64::STGPpre;
439 }
440 }
441
getPostIndexedOpcode(unsigned Opc)442 static unsigned getPostIndexedOpcode(unsigned Opc) {
443 switch (Opc) {
444 default:
445 llvm_unreachable("Opcode has no post-indexed wise equivalent!");
446 case AArch64::STRSui:
447 case AArch64::STURSi:
448 return AArch64::STRSpost;
449 case AArch64::STRDui:
450 case AArch64::STURDi:
451 return AArch64::STRDpost;
452 case AArch64::STRQui:
453 case AArch64::STURQi:
454 return AArch64::STRQpost;
455 case AArch64::STRBBui:
456 return AArch64::STRBBpost;
457 case AArch64::STRHHui:
458 return AArch64::STRHHpost;
459 case AArch64::STRWui:
460 case AArch64::STURWi:
461 return AArch64::STRWpost;
462 case AArch64::STRXui:
463 case AArch64::STURXi:
464 return AArch64::STRXpost;
465 case AArch64::LDRSui:
466 case AArch64::LDURSi:
467 return AArch64::LDRSpost;
468 case AArch64::LDRDui:
469 case AArch64::LDURDi:
470 return AArch64::LDRDpost;
471 case AArch64::LDRQui:
472 case AArch64::LDURQi:
473 return AArch64::LDRQpost;
474 case AArch64::LDRBBui:
475 return AArch64::LDRBBpost;
476 case AArch64::LDRHHui:
477 return AArch64::LDRHHpost;
478 case AArch64::LDRWui:
479 case AArch64::LDURWi:
480 return AArch64::LDRWpost;
481 case AArch64::LDRXui:
482 case AArch64::LDURXi:
483 return AArch64::LDRXpost;
484 case AArch64::LDRSWui:
485 return AArch64::LDRSWpost;
486 case AArch64::LDPSi:
487 return AArch64::LDPSpost;
488 case AArch64::LDPSWi:
489 return AArch64::LDPSWpost;
490 case AArch64::LDPDi:
491 return AArch64::LDPDpost;
492 case AArch64::LDPQi:
493 return AArch64::LDPQpost;
494 case AArch64::LDPWi:
495 return AArch64::LDPWpost;
496 case AArch64::LDPXi:
497 return AArch64::LDPXpost;
498 case AArch64::STPSi:
499 return AArch64::STPSpost;
500 case AArch64::STPDi:
501 return AArch64::STPDpost;
502 case AArch64::STPQi:
503 return AArch64::STPQpost;
504 case AArch64::STPWi:
505 return AArch64::STPWpost;
506 case AArch64::STPXi:
507 return AArch64::STPXpost;
508 case AArch64::STGOffset:
509 return AArch64::STGPostIndex;
510 case AArch64::STZGOffset:
511 return AArch64::STZGPostIndex;
512 case AArch64::ST2GOffset:
513 return AArch64::ST2GPostIndex;
514 case AArch64::STZ2GOffset:
515 return AArch64::STZ2GPostIndex;
516 case AArch64::STGPi:
517 return AArch64::STGPpost;
518 }
519 }
520
isPairedLdSt(const MachineInstr & MI)521 static bool isPairedLdSt(const MachineInstr &MI) {
522 switch (MI.getOpcode()) {
523 default:
524 return false;
525 case AArch64::LDPSi:
526 case AArch64::LDPSWi:
527 case AArch64::LDPDi:
528 case AArch64::LDPQi:
529 case AArch64::LDPWi:
530 case AArch64::LDPXi:
531 case AArch64::STPSi:
532 case AArch64::STPDi:
533 case AArch64::STPQi:
534 case AArch64::STPWi:
535 case AArch64::STPXi:
536 case AArch64::STGPi:
537 return true;
538 }
539 }
540
541 // Returns the scale and offset range of pre/post indexed variants of MI.
getPrePostIndexedMemOpInfo(const MachineInstr & MI,int & Scale,int & MinOffset,int & MaxOffset)542 static void getPrePostIndexedMemOpInfo(const MachineInstr &MI, int &Scale,
543 int &MinOffset, int &MaxOffset) {
544 bool IsPaired = isPairedLdSt(MI);
545 bool IsTagStore = isTagStore(MI);
546 // ST*G and all paired ldst have the same scale in pre/post-indexed variants
547 // as in the "unsigned offset" variant.
548 // All other pre/post indexed ldst instructions are unscaled.
549 Scale = (IsTagStore || IsPaired) ? AArch64InstrInfo::getMemScale(MI) : 1;
550
551 if (IsPaired) {
552 MinOffset = -64;
553 MaxOffset = 63;
554 } else {
555 MinOffset = -256;
556 MaxOffset = 255;
557 }
558 }
559
getLdStRegOp(MachineInstr & MI,unsigned PairedRegOp=0)560 static MachineOperand &getLdStRegOp(MachineInstr &MI,
561 unsigned PairedRegOp = 0) {
562 assert(PairedRegOp < 2 && "Unexpected register operand idx.");
563 unsigned Idx = isPairedLdSt(MI) ? PairedRegOp : 0;
564 return MI.getOperand(Idx);
565 }
566
getLdStBaseOp(const MachineInstr & MI)567 static const MachineOperand &getLdStBaseOp(const MachineInstr &MI) {
568 unsigned Idx = isPairedLdSt(MI) ? 2 : 1;
569 return MI.getOperand(Idx);
570 }
571
getLdStOffsetOp(const MachineInstr & MI)572 static const MachineOperand &getLdStOffsetOp(const MachineInstr &MI) {
573 unsigned Idx = isPairedLdSt(MI) ? 3 : 2;
574 return MI.getOperand(Idx);
575 }
576
isLdOffsetInRangeOfSt(MachineInstr & LoadInst,MachineInstr & StoreInst,const AArch64InstrInfo * TII)577 static bool isLdOffsetInRangeOfSt(MachineInstr &LoadInst,
578 MachineInstr &StoreInst,
579 const AArch64InstrInfo *TII) {
580 assert(isMatchingStore(LoadInst, StoreInst) && "Expect only matched ld/st.");
581 int LoadSize = TII->getMemScale(LoadInst);
582 int StoreSize = TII->getMemScale(StoreInst);
583 int UnscaledStOffset = TII->isUnscaledLdSt(StoreInst)
584 ? getLdStOffsetOp(StoreInst).getImm()
585 : getLdStOffsetOp(StoreInst).getImm() * StoreSize;
586 int UnscaledLdOffset = TII->isUnscaledLdSt(LoadInst)
587 ? getLdStOffsetOp(LoadInst).getImm()
588 : getLdStOffsetOp(LoadInst).getImm() * LoadSize;
589 return (UnscaledStOffset <= UnscaledLdOffset) &&
590 (UnscaledLdOffset + LoadSize <= (UnscaledStOffset + StoreSize));
591 }
592
isPromotableZeroStoreInst(MachineInstr & MI)593 static bool isPromotableZeroStoreInst(MachineInstr &MI) {
594 unsigned Opc = MI.getOpcode();
595 return (Opc == AArch64::STRWui || Opc == AArch64::STURWi ||
596 isNarrowStore(Opc)) &&
597 getLdStRegOp(MI).getReg() == AArch64::WZR;
598 }
599
isPromotableLoadFromStore(MachineInstr & MI)600 static bool isPromotableLoadFromStore(MachineInstr &MI) {
601 switch (MI.getOpcode()) {
602 default:
603 return false;
604 // Scaled instructions.
605 case AArch64::LDRBBui:
606 case AArch64::LDRHHui:
607 case AArch64::LDRWui:
608 case AArch64::LDRXui:
609 // Unscaled instructions.
610 case AArch64::LDURBBi:
611 case AArch64::LDURHHi:
612 case AArch64::LDURWi:
613 case AArch64::LDURXi:
614 return true;
615 }
616 }
617
isMergeableLdStUpdate(MachineInstr & MI)618 static bool isMergeableLdStUpdate(MachineInstr &MI) {
619 unsigned Opc = MI.getOpcode();
620 switch (Opc) {
621 default:
622 return false;
623 // Scaled instructions.
624 case AArch64::STRSui:
625 case AArch64::STRDui:
626 case AArch64::STRQui:
627 case AArch64::STRXui:
628 case AArch64::STRWui:
629 case AArch64::STRHHui:
630 case AArch64::STRBBui:
631 case AArch64::LDRSui:
632 case AArch64::LDRDui:
633 case AArch64::LDRQui:
634 case AArch64::LDRXui:
635 case AArch64::LDRWui:
636 case AArch64::LDRHHui:
637 case AArch64::LDRBBui:
638 case AArch64::STGOffset:
639 case AArch64::STZGOffset:
640 case AArch64::ST2GOffset:
641 case AArch64::STZ2GOffset:
642 case AArch64::STGPi:
643 // Unscaled instructions.
644 case AArch64::STURSi:
645 case AArch64::STURDi:
646 case AArch64::STURQi:
647 case AArch64::STURWi:
648 case AArch64::STURXi:
649 case AArch64::LDURSi:
650 case AArch64::LDURDi:
651 case AArch64::LDURQi:
652 case AArch64::LDURWi:
653 case AArch64::LDURXi:
654 // Paired instructions.
655 case AArch64::LDPSi:
656 case AArch64::LDPSWi:
657 case AArch64::LDPDi:
658 case AArch64::LDPQi:
659 case AArch64::LDPWi:
660 case AArch64::LDPXi:
661 case AArch64::STPSi:
662 case AArch64::STPDi:
663 case AArch64::STPQi:
664 case AArch64::STPWi:
665 case AArch64::STPXi:
666 // Make sure this is a reg+imm (as opposed to an address reloc).
667 if (!getLdStOffsetOp(MI).isImm())
668 return false;
669
670 return true;
671 }
672 }
673
674 MachineBasicBlock::iterator
mergeNarrowZeroStores(MachineBasicBlock::iterator I,MachineBasicBlock::iterator MergeMI,const LdStPairFlags & Flags)675 AArch64LoadStoreOpt::mergeNarrowZeroStores(MachineBasicBlock::iterator I,
676 MachineBasicBlock::iterator MergeMI,
677 const LdStPairFlags &Flags) {
678 assert(isPromotableZeroStoreInst(*I) && isPromotableZeroStoreInst(*MergeMI) &&
679 "Expected promotable zero stores.");
680
681 MachineBasicBlock::iterator E = I->getParent()->end();
682 MachineBasicBlock::iterator NextI = next_nodbg(I, E);
683 // If NextI is the second of the two instructions to be merged, we need
684 // to skip one further. Either way we merge will invalidate the iterator,
685 // and we don't need to scan the new instruction, as it's a pairwise
686 // instruction, which we're not considering for further action anyway.
687 if (NextI == MergeMI)
688 NextI = next_nodbg(NextI, E);
689
690 unsigned Opc = I->getOpcode();
691 bool IsScaled = !TII->isUnscaledLdSt(Opc);
692 int OffsetStride = IsScaled ? 1 : TII->getMemScale(*I);
693
694 bool MergeForward = Flags.getMergeForward();
695 // Insert our new paired instruction after whichever of the paired
696 // instructions MergeForward indicates.
697 MachineBasicBlock::iterator InsertionPoint = MergeForward ? MergeMI : I;
698 // Also based on MergeForward is from where we copy the base register operand
699 // so we get the flags compatible with the input code.
700 const MachineOperand &BaseRegOp =
701 MergeForward ? getLdStBaseOp(*MergeMI) : getLdStBaseOp(*I);
702
703 // Which register is Rt and which is Rt2 depends on the offset order.
704 MachineInstr *RtMI;
705 if (getLdStOffsetOp(*I).getImm() ==
706 getLdStOffsetOp(*MergeMI).getImm() + OffsetStride)
707 RtMI = &*MergeMI;
708 else
709 RtMI = &*I;
710
711 int OffsetImm = getLdStOffsetOp(*RtMI).getImm();
712 // Change the scaled offset from small to large type.
713 if (IsScaled) {
714 assert(((OffsetImm & 1) == 0) && "Unexpected offset to merge");
715 OffsetImm /= 2;
716 }
717
718 // Construct the new instruction.
719 DebugLoc DL = I->getDebugLoc();
720 MachineBasicBlock *MBB = I->getParent();
721 MachineInstrBuilder MIB;
722 MIB = BuildMI(*MBB, InsertionPoint, DL, TII->get(getMatchingWideOpcode(Opc)))
723 .addReg(isNarrowStore(Opc) ? AArch64::WZR : AArch64::XZR)
724 .add(BaseRegOp)
725 .addImm(OffsetImm)
726 .cloneMergedMemRefs({&*I, &*MergeMI})
727 .setMIFlags(I->mergeFlagsWith(*MergeMI));
728 (void)MIB;
729
730 LLVM_DEBUG(dbgs() << "Creating wider store. Replacing instructions:\n ");
731 LLVM_DEBUG(I->print(dbgs()));
732 LLVM_DEBUG(dbgs() << " ");
733 LLVM_DEBUG(MergeMI->print(dbgs()));
734 LLVM_DEBUG(dbgs() << " with instruction:\n ");
735 LLVM_DEBUG(((MachineInstr *)MIB)->print(dbgs()));
736 LLVM_DEBUG(dbgs() << "\n");
737
738 // Erase the old instructions.
739 I->eraseFromParent();
740 MergeMI->eraseFromParent();
741 return NextI;
742 }
743
744 // Apply Fn to all instructions between MI and the beginning of the block, until
745 // a def for DefReg is reached. Returns true, iff Fn returns true for all
746 // visited instructions. Stop after visiting Limit iterations.
forAllMIsUntilDef(MachineInstr & MI,MCPhysReg DefReg,const TargetRegisterInfo * TRI,unsigned Limit,std::function<bool (MachineInstr &,bool)> & Fn)747 static bool forAllMIsUntilDef(MachineInstr &MI, MCPhysReg DefReg,
748 const TargetRegisterInfo *TRI, unsigned Limit,
749 std::function<bool(MachineInstr &, bool)> &Fn) {
750 auto MBB = MI.getParent();
751 for (MachineInstr &I :
752 instructionsWithoutDebug(MI.getReverseIterator(), MBB->instr_rend())) {
753 if (!Limit)
754 return false;
755 --Limit;
756
757 bool isDef = any_of(I.operands(), [DefReg, TRI](MachineOperand &MOP) {
758 return MOP.isReg() && MOP.isDef() && !MOP.isDebug() && MOP.getReg() &&
759 TRI->regsOverlap(MOP.getReg(), DefReg);
760 });
761 if (!Fn(I, isDef))
762 return false;
763 if (isDef)
764 break;
765 }
766 return true;
767 }
768
updateDefinedRegisters(MachineInstr & MI,LiveRegUnits & Units,const TargetRegisterInfo * TRI)769 static void updateDefinedRegisters(MachineInstr &MI, LiveRegUnits &Units,
770 const TargetRegisterInfo *TRI) {
771
772 for (const MachineOperand &MOP : phys_regs_and_masks(MI))
773 if (MOP.isReg() && MOP.isKill())
774 Units.removeReg(MOP.getReg());
775
776 for (const MachineOperand &MOP : phys_regs_and_masks(MI))
777 if (MOP.isReg() && !MOP.isKill())
778 Units.addReg(MOP.getReg());
779 }
780
781 MachineBasicBlock::iterator
mergePairedInsns(MachineBasicBlock::iterator I,MachineBasicBlock::iterator Paired,const LdStPairFlags & Flags)782 AArch64LoadStoreOpt::mergePairedInsns(MachineBasicBlock::iterator I,
783 MachineBasicBlock::iterator Paired,
784 const LdStPairFlags &Flags) {
785 MachineBasicBlock::iterator E = I->getParent()->end();
786 MachineBasicBlock::iterator NextI = next_nodbg(I, E);
787 // If NextI is the second of the two instructions to be merged, we need
788 // to skip one further. Either way we merge will invalidate the iterator,
789 // and we don't need to scan the new instruction, as it's a pairwise
790 // instruction, which we're not considering for further action anyway.
791 if (NextI == Paired)
792 NextI = next_nodbg(NextI, E);
793
794 int SExtIdx = Flags.getSExtIdx();
795 unsigned Opc =
796 SExtIdx == -1 ? I->getOpcode() : getMatchingNonSExtOpcode(I->getOpcode());
797 bool IsUnscaled = TII->isUnscaledLdSt(Opc);
798 int OffsetStride = IsUnscaled ? TII->getMemScale(*I) : 1;
799
800 bool MergeForward = Flags.getMergeForward();
801
802 Optional<MCPhysReg> RenameReg = Flags.getRenameReg();
803 if (MergeForward && RenameReg) {
804 MCRegister RegToRename = getLdStRegOp(*I).getReg();
805 DefinedInBB.addReg(*RenameReg);
806
807 // Return the sub/super register for RenameReg, matching the size of
808 // OriginalReg.
809 auto GetMatchingSubReg = [this,
810 RenameReg](MCPhysReg OriginalReg) -> MCPhysReg {
811 for (MCPhysReg SubOrSuper : TRI->sub_and_superregs_inclusive(*RenameReg))
812 if (TRI->getMinimalPhysRegClass(OriginalReg) ==
813 TRI->getMinimalPhysRegClass(SubOrSuper))
814 return SubOrSuper;
815 llvm_unreachable("Should have found matching sub or super register!");
816 };
817
818 std::function<bool(MachineInstr &, bool)> UpdateMIs =
819 [this, RegToRename, GetMatchingSubReg](MachineInstr &MI, bool IsDef) {
820 if (IsDef) {
821 bool SeenDef = false;
822 for (auto &MOP : MI.operands()) {
823 // Rename the first explicit definition and all implicit
824 // definitions matching RegToRename.
825 if (MOP.isReg() && !MOP.isDebug() && MOP.getReg() &&
826 (!SeenDef || (MOP.isDef() && MOP.isImplicit())) &&
827 TRI->regsOverlap(MOP.getReg(), RegToRename)) {
828 assert((MOP.isImplicit() ||
829 (MOP.isRenamable() && !MOP.isEarlyClobber())) &&
830 "Need renamable operands");
831 MOP.setReg(GetMatchingSubReg(MOP.getReg()));
832 SeenDef = true;
833 }
834 }
835 } else {
836 for (auto &MOP : MI.operands()) {
837 if (MOP.isReg() && !MOP.isDebug() && MOP.getReg() &&
838 TRI->regsOverlap(MOP.getReg(), RegToRename)) {
839 assert((MOP.isImplicit() ||
840 (MOP.isRenamable() && !MOP.isEarlyClobber())) &&
841 "Need renamable operands");
842 MOP.setReg(GetMatchingSubReg(MOP.getReg()));
843 }
844 }
845 }
846 LLVM_DEBUG(dbgs() << "Renamed " << MI << "\n");
847 return true;
848 };
849 forAllMIsUntilDef(*I, RegToRename, TRI, LdStLimit, UpdateMIs);
850
851 #if !defined(NDEBUG)
852 // Make sure the register used for renaming is not used between the paired
853 // instructions. That would trash the content before the new paired
854 // instruction.
855 for (auto &MI :
856 iterator_range<MachineInstrBundleIterator<llvm::MachineInstr>>(
857 std::next(I), std::next(Paired)))
858 assert(all_of(MI.operands(),
859 [this, &RenameReg](const MachineOperand &MOP) {
860 return !MOP.isReg() || MOP.isDebug() || !MOP.getReg() ||
861 !TRI->regsOverlap(MOP.getReg(), *RenameReg);
862 }) &&
863 "Rename register used between paired instruction, trashing the "
864 "content");
865 #endif
866 }
867
868 // Insert our new paired instruction after whichever of the paired
869 // instructions MergeForward indicates.
870 MachineBasicBlock::iterator InsertionPoint = MergeForward ? Paired : I;
871 // Also based on MergeForward is from where we copy the base register operand
872 // so we get the flags compatible with the input code.
873 const MachineOperand &BaseRegOp =
874 MergeForward ? getLdStBaseOp(*Paired) : getLdStBaseOp(*I);
875
876 int Offset = getLdStOffsetOp(*I).getImm();
877 int PairedOffset = getLdStOffsetOp(*Paired).getImm();
878 bool PairedIsUnscaled = TII->isUnscaledLdSt(Paired->getOpcode());
879 if (IsUnscaled != PairedIsUnscaled) {
880 // We're trying to pair instructions that differ in how they are scaled. If
881 // I is scaled then scale the offset of Paired accordingly. Otherwise, do
882 // the opposite (i.e., make Paired's offset unscaled).
883 int MemSize = TII->getMemScale(*Paired);
884 if (PairedIsUnscaled) {
885 // If the unscaled offset isn't a multiple of the MemSize, we can't
886 // pair the operations together.
887 assert(!(PairedOffset % TII->getMemScale(*Paired)) &&
888 "Offset should be a multiple of the stride!");
889 PairedOffset /= MemSize;
890 } else {
891 PairedOffset *= MemSize;
892 }
893 }
894
895 // Which register is Rt and which is Rt2 depends on the offset order.
896 MachineInstr *RtMI, *Rt2MI;
897 if (Offset == PairedOffset + OffsetStride) {
898 RtMI = &*Paired;
899 Rt2MI = &*I;
900 // Here we swapped the assumption made for SExtIdx.
901 // I.e., we turn ldp I, Paired into ldp Paired, I.
902 // Update the index accordingly.
903 if (SExtIdx != -1)
904 SExtIdx = (SExtIdx + 1) % 2;
905 } else {
906 RtMI = &*I;
907 Rt2MI = &*Paired;
908 }
909 int OffsetImm = getLdStOffsetOp(*RtMI).getImm();
910 // Scale the immediate offset, if necessary.
911 if (TII->isUnscaledLdSt(RtMI->getOpcode())) {
912 assert(!(OffsetImm % TII->getMemScale(*RtMI)) &&
913 "Unscaled offset cannot be scaled.");
914 OffsetImm /= TII->getMemScale(*RtMI);
915 }
916
917 // Construct the new instruction.
918 MachineInstrBuilder MIB;
919 DebugLoc DL = I->getDebugLoc();
920 MachineBasicBlock *MBB = I->getParent();
921 MachineOperand RegOp0 = getLdStRegOp(*RtMI);
922 MachineOperand RegOp1 = getLdStRegOp(*Rt2MI);
923 // Kill flags may become invalid when moving stores for pairing.
924 if (RegOp0.isUse()) {
925 if (!MergeForward) {
926 // Clear kill flags on store if moving upwards. Example:
927 // STRWui %w0, ...
928 // USE %w1
929 // STRWui kill %w1 ; need to clear kill flag when moving STRWui upwards
930 RegOp0.setIsKill(false);
931 RegOp1.setIsKill(false);
932 } else {
933 // Clear kill flags of the first stores register. Example:
934 // STRWui %w1, ...
935 // USE kill %w1 ; need to clear kill flag when moving STRWui downwards
936 // STRW %w0
937 Register Reg = getLdStRegOp(*I).getReg();
938 for (MachineInstr &MI : make_range(std::next(I), Paired))
939 MI.clearRegisterKills(Reg, TRI);
940 }
941 }
942 MIB = BuildMI(*MBB, InsertionPoint, DL, TII->get(getMatchingPairOpcode(Opc)))
943 .add(RegOp0)
944 .add(RegOp1)
945 .add(BaseRegOp)
946 .addImm(OffsetImm)
947 .cloneMergedMemRefs({&*I, &*Paired})
948 .setMIFlags(I->mergeFlagsWith(*Paired));
949
950 (void)MIB;
951
952 LLVM_DEBUG(
953 dbgs() << "Creating pair load/store. Replacing instructions:\n ");
954 LLVM_DEBUG(I->print(dbgs()));
955 LLVM_DEBUG(dbgs() << " ");
956 LLVM_DEBUG(Paired->print(dbgs()));
957 LLVM_DEBUG(dbgs() << " with instruction:\n ");
958 if (SExtIdx != -1) {
959 // Generate the sign extension for the proper result of the ldp.
960 // I.e., with X1, that would be:
961 // %w1 = KILL %w1, implicit-def %x1
962 // %x1 = SBFMXri killed %x1, 0, 31
963 MachineOperand &DstMO = MIB->getOperand(SExtIdx);
964 // Right now, DstMO has the extended register, since it comes from an
965 // extended opcode.
966 Register DstRegX = DstMO.getReg();
967 // Get the W variant of that register.
968 Register DstRegW = TRI->getSubReg(DstRegX, AArch64::sub_32);
969 // Update the result of LDP to use the W instead of the X variant.
970 DstMO.setReg(DstRegW);
971 LLVM_DEBUG(((MachineInstr *)MIB)->print(dbgs()));
972 LLVM_DEBUG(dbgs() << "\n");
973 // Make the machine verifier happy by providing a definition for
974 // the X register.
975 // Insert this definition right after the generated LDP, i.e., before
976 // InsertionPoint.
977 MachineInstrBuilder MIBKill =
978 BuildMI(*MBB, InsertionPoint, DL, TII->get(TargetOpcode::KILL), DstRegW)
979 .addReg(DstRegW)
980 .addReg(DstRegX, RegState::Define);
981 MIBKill->getOperand(2).setImplicit();
982 // Create the sign extension.
983 MachineInstrBuilder MIBSXTW =
984 BuildMI(*MBB, InsertionPoint, DL, TII->get(AArch64::SBFMXri), DstRegX)
985 .addReg(DstRegX)
986 .addImm(0)
987 .addImm(31);
988 (void)MIBSXTW;
989 LLVM_DEBUG(dbgs() << " Extend operand:\n ");
990 LLVM_DEBUG(((MachineInstr *)MIBSXTW)->print(dbgs()));
991 } else {
992 LLVM_DEBUG(((MachineInstr *)MIB)->print(dbgs()));
993 }
994 LLVM_DEBUG(dbgs() << "\n");
995
996 if (MergeForward)
997 for (const MachineOperand &MOP : phys_regs_and_masks(*I))
998 if (MOP.isReg() && MOP.isKill())
999 DefinedInBB.addReg(MOP.getReg());
1000
1001 // Erase the old instructions.
1002 I->eraseFromParent();
1003 Paired->eraseFromParent();
1004
1005 return NextI;
1006 }
1007
1008 MachineBasicBlock::iterator
promoteLoadFromStore(MachineBasicBlock::iterator LoadI,MachineBasicBlock::iterator StoreI)1009 AArch64LoadStoreOpt::promoteLoadFromStore(MachineBasicBlock::iterator LoadI,
1010 MachineBasicBlock::iterator StoreI) {
1011 MachineBasicBlock::iterator NextI =
1012 next_nodbg(LoadI, LoadI->getParent()->end());
1013
1014 int LoadSize = TII->getMemScale(*LoadI);
1015 int StoreSize = TII->getMemScale(*StoreI);
1016 Register LdRt = getLdStRegOp(*LoadI).getReg();
1017 const MachineOperand &StMO = getLdStRegOp(*StoreI);
1018 Register StRt = getLdStRegOp(*StoreI).getReg();
1019 bool IsStoreXReg = TRI->getRegClass(AArch64::GPR64RegClassID)->contains(StRt);
1020
1021 assert((IsStoreXReg ||
1022 TRI->getRegClass(AArch64::GPR32RegClassID)->contains(StRt)) &&
1023 "Unexpected RegClass");
1024
1025 MachineInstr *BitExtMI;
1026 if (LoadSize == StoreSize && (LoadSize == 4 || LoadSize == 8)) {
1027 // Remove the load, if the destination register of the loads is the same
1028 // register for stored value.
1029 if (StRt == LdRt && LoadSize == 8) {
1030 for (MachineInstr &MI : make_range(StoreI->getIterator(),
1031 LoadI->getIterator())) {
1032 if (MI.killsRegister(StRt, TRI)) {
1033 MI.clearRegisterKills(StRt, TRI);
1034 break;
1035 }
1036 }
1037 LLVM_DEBUG(dbgs() << "Remove load instruction:\n ");
1038 LLVM_DEBUG(LoadI->print(dbgs()));
1039 LLVM_DEBUG(dbgs() << "\n");
1040 LoadI->eraseFromParent();
1041 return NextI;
1042 }
1043 // Replace the load with a mov if the load and store are in the same size.
1044 BitExtMI =
1045 BuildMI(*LoadI->getParent(), LoadI, LoadI->getDebugLoc(),
1046 TII->get(IsStoreXReg ? AArch64::ORRXrs : AArch64::ORRWrs), LdRt)
1047 .addReg(IsStoreXReg ? AArch64::XZR : AArch64::WZR)
1048 .add(StMO)
1049 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0))
1050 .setMIFlags(LoadI->getFlags());
1051 } else {
1052 // FIXME: Currently we disable this transformation in big-endian targets as
1053 // performance and correctness are verified only in little-endian.
1054 if (!Subtarget->isLittleEndian())
1055 return NextI;
1056 bool IsUnscaled = TII->isUnscaledLdSt(*LoadI);
1057 assert(IsUnscaled == TII->isUnscaledLdSt(*StoreI) &&
1058 "Unsupported ld/st match");
1059 assert(LoadSize <= StoreSize && "Invalid load size");
1060 int UnscaledLdOffset = IsUnscaled
1061 ? getLdStOffsetOp(*LoadI).getImm()
1062 : getLdStOffsetOp(*LoadI).getImm() * LoadSize;
1063 int UnscaledStOffset = IsUnscaled
1064 ? getLdStOffsetOp(*StoreI).getImm()
1065 : getLdStOffsetOp(*StoreI).getImm() * StoreSize;
1066 int Width = LoadSize * 8;
1067 unsigned DestReg =
1068 IsStoreXReg ? Register(TRI->getMatchingSuperReg(
1069 LdRt, AArch64::sub_32, &AArch64::GPR64RegClass))
1070 : LdRt;
1071
1072 assert((UnscaledLdOffset >= UnscaledStOffset &&
1073 (UnscaledLdOffset + LoadSize) <= UnscaledStOffset + StoreSize) &&
1074 "Invalid offset");
1075
1076 int Immr = 8 * (UnscaledLdOffset - UnscaledStOffset);
1077 int Imms = Immr + Width - 1;
1078 if (UnscaledLdOffset == UnscaledStOffset) {
1079 uint32_t AndMaskEncoded = ((IsStoreXReg ? 1 : 0) << 12) // N
1080 | ((Immr) << 6) // immr
1081 | ((Imms) << 0) // imms
1082 ;
1083
1084 BitExtMI =
1085 BuildMI(*LoadI->getParent(), LoadI, LoadI->getDebugLoc(),
1086 TII->get(IsStoreXReg ? AArch64::ANDXri : AArch64::ANDWri),
1087 DestReg)
1088 .add(StMO)
1089 .addImm(AndMaskEncoded)
1090 .setMIFlags(LoadI->getFlags());
1091 } else {
1092 BitExtMI =
1093 BuildMI(*LoadI->getParent(), LoadI, LoadI->getDebugLoc(),
1094 TII->get(IsStoreXReg ? AArch64::UBFMXri : AArch64::UBFMWri),
1095 DestReg)
1096 .add(StMO)
1097 .addImm(Immr)
1098 .addImm(Imms)
1099 .setMIFlags(LoadI->getFlags());
1100 }
1101 }
1102
1103 // Clear kill flags between store and load.
1104 for (MachineInstr &MI : make_range(StoreI->getIterator(),
1105 BitExtMI->getIterator()))
1106 if (MI.killsRegister(StRt, TRI)) {
1107 MI.clearRegisterKills(StRt, TRI);
1108 break;
1109 }
1110
1111 LLVM_DEBUG(dbgs() << "Promoting load by replacing :\n ");
1112 LLVM_DEBUG(StoreI->print(dbgs()));
1113 LLVM_DEBUG(dbgs() << " ");
1114 LLVM_DEBUG(LoadI->print(dbgs()));
1115 LLVM_DEBUG(dbgs() << " with instructions:\n ");
1116 LLVM_DEBUG(StoreI->print(dbgs()));
1117 LLVM_DEBUG(dbgs() << " ");
1118 LLVM_DEBUG((BitExtMI)->print(dbgs()));
1119 LLVM_DEBUG(dbgs() << "\n");
1120
1121 // Erase the old instructions.
1122 LoadI->eraseFromParent();
1123 return NextI;
1124 }
1125
inBoundsForPair(bool IsUnscaled,int Offset,int OffsetStride)1126 static bool inBoundsForPair(bool IsUnscaled, int Offset, int OffsetStride) {
1127 // Convert the byte-offset used by unscaled into an "element" offset used
1128 // by the scaled pair load/store instructions.
1129 if (IsUnscaled) {
1130 // If the byte-offset isn't a multiple of the stride, there's no point
1131 // trying to match it.
1132 if (Offset % OffsetStride)
1133 return false;
1134 Offset /= OffsetStride;
1135 }
1136 return Offset <= 63 && Offset >= -64;
1137 }
1138
1139 // Do alignment, specialized to power of 2 and for signed ints,
1140 // avoiding having to do a C-style cast from uint_64t to int when
1141 // using alignTo from include/llvm/Support/MathExtras.h.
1142 // FIXME: Move this function to include/MathExtras.h?
alignTo(int Num,int PowOf2)1143 static int alignTo(int Num, int PowOf2) {
1144 return (Num + PowOf2 - 1) & ~(PowOf2 - 1);
1145 }
1146
mayAlias(MachineInstr & MIa,SmallVectorImpl<MachineInstr * > & MemInsns,AliasAnalysis * AA)1147 static bool mayAlias(MachineInstr &MIa,
1148 SmallVectorImpl<MachineInstr *> &MemInsns,
1149 AliasAnalysis *AA) {
1150 for (MachineInstr *MIb : MemInsns)
1151 if (MIa.mayAlias(AA, *MIb, /*UseTBAA*/ false))
1152 return true;
1153
1154 return false;
1155 }
1156
findMatchingStore(MachineBasicBlock::iterator I,unsigned Limit,MachineBasicBlock::iterator & StoreI)1157 bool AArch64LoadStoreOpt::findMatchingStore(
1158 MachineBasicBlock::iterator I, unsigned Limit,
1159 MachineBasicBlock::iterator &StoreI) {
1160 MachineBasicBlock::iterator B = I->getParent()->begin();
1161 MachineBasicBlock::iterator MBBI = I;
1162 MachineInstr &LoadMI = *I;
1163 Register BaseReg = getLdStBaseOp(LoadMI).getReg();
1164
1165 // If the load is the first instruction in the block, there's obviously
1166 // not any matching store.
1167 if (MBBI == B)
1168 return false;
1169
1170 // Track which register units have been modified and used between the first
1171 // insn and the second insn.
1172 ModifiedRegUnits.clear();
1173 UsedRegUnits.clear();
1174
1175 unsigned Count = 0;
1176 do {
1177 MBBI = prev_nodbg(MBBI, B);
1178 MachineInstr &MI = *MBBI;
1179
1180 // Don't count transient instructions towards the search limit since there
1181 // may be different numbers of them if e.g. debug information is present.
1182 if (!MI.isTransient())
1183 ++Count;
1184
1185 // If the load instruction reads directly from the address to which the
1186 // store instruction writes and the stored value is not modified, we can
1187 // promote the load. Since we do not handle stores with pre-/post-index,
1188 // it's unnecessary to check if BaseReg is modified by the store itself.
1189 // Also we can't handle stores without an immediate offset operand,
1190 // while the operand might be the address for a global variable.
1191 if (MI.mayStore() && isMatchingStore(LoadMI, MI) &&
1192 BaseReg == getLdStBaseOp(MI).getReg() && getLdStOffsetOp(MI).isImm() &&
1193 isLdOffsetInRangeOfSt(LoadMI, MI, TII) &&
1194 ModifiedRegUnits.available(getLdStRegOp(MI).getReg())) {
1195 StoreI = MBBI;
1196 return true;
1197 }
1198
1199 if (MI.isCall())
1200 return false;
1201
1202 // Update modified / uses register units.
1203 LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits, UsedRegUnits, TRI);
1204
1205 // Otherwise, if the base register is modified, we have no match, so
1206 // return early.
1207 if (!ModifiedRegUnits.available(BaseReg))
1208 return false;
1209
1210 // If we encounter a store aliased with the load, return early.
1211 if (MI.mayStore() && LoadMI.mayAlias(AA, MI, /*UseTBAA*/ false))
1212 return false;
1213 } while (MBBI != B && Count < Limit);
1214 return false;
1215 }
1216
1217 // Returns true if FirstMI and MI are candidates for merging or pairing.
1218 // Otherwise, returns false.
areCandidatesToMergeOrPair(MachineInstr & FirstMI,MachineInstr & MI,LdStPairFlags & Flags,const AArch64InstrInfo * TII)1219 static bool areCandidatesToMergeOrPair(MachineInstr &FirstMI, MachineInstr &MI,
1220 LdStPairFlags &Flags,
1221 const AArch64InstrInfo *TII) {
1222 // If this is volatile or if pairing is suppressed, not a candidate.
1223 if (MI.hasOrderedMemoryRef() || TII->isLdStPairSuppressed(MI))
1224 return false;
1225
1226 // We should have already checked FirstMI for pair suppression and volatility.
1227 assert(!FirstMI.hasOrderedMemoryRef() &&
1228 !TII->isLdStPairSuppressed(FirstMI) &&
1229 "FirstMI shouldn't get here if either of these checks are true.");
1230
1231 unsigned OpcA = FirstMI.getOpcode();
1232 unsigned OpcB = MI.getOpcode();
1233
1234 // Opcodes match: nothing more to check.
1235 if (OpcA == OpcB)
1236 return true;
1237
1238 // Try to match a sign-extended load/store with a zero-extended load/store.
1239 bool IsValidLdStrOpc, PairIsValidLdStrOpc;
1240 unsigned NonSExtOpc = getMatchingNonSExtOpcode(OpcA, &IsValidLdStrOpc);
1241 assert(IsValidLdStrOpc &&
1242 "Given Opc should be a Load or Store with an immediate");
1243 // OpcA will be the first instruction in the pair.
1244 if (NonSExtOpc == getMatchingNonSExtOpcode(OpcB, &PairIsValidLdStrOpc)) {
1245 Flags.setSExtIdx(NonSExtOpc == (unsigned)OpcA ? 1 : 0);
1246 return true;
1247 }
1248
1249 // If the second instruction isn't even a mergable/pairable load/store, bail
1250 // out.
1251 if (!PairIsValidLdStrOpc)
1252 return false;
1253
1254 // FIXME: We don't support merging narrow stores with mixed scaled/unscaled
1255 // offsets.
1256 if (isNarrowStore(OpcA) || isNarrowStore(OpcB))
1257 return false;
1258
1259 // Try to match an unscaled load/store with a scaled load/store.
1260 return TII->isUnscaledLdSt(OpcA) != TII->isUnscaledLdSt(OpcB) &&
1261 getMatchingPairOpcode(OpcA) == getMatchingPairOpcode(OpcB);
1262
1263 // FIXME: Can we also match a mixed sext/zext unscaled/scaled pair?
1264 }
1265
1266 static bool
canRenameUpToDef(MachineInstr & FirstMI,LiveRegUnits & UsedInBetween,SmallPtrSetImpl<const TargetRegisterClass * > & RequiredClasses,const TargetRegisterInfo * TRI)1267 canRenameUpToDef(MachineInstr &FirstMI, LiveRegUnits &UsedInBetween,
1268 SmallPtrSetImpl<const TargetRegisterClass *> &RequiredClasses,
1269 const TargetRegisterInfo *TRI) {
1270 if (!FirstMI.mayStore())
1271 return false;
1272
1273 // Check if we can find an unused register which we can use to rename
1274 // the register used by the first load/store.
1275 auto *RegClass = TRI->getMinimalPhysRegClass(getLdStRegOp(FirstMI).getReg());
1276 MachineFunction &MF = *FirstMI.getParent()->getParent();
1277 if (!RegClass || !MF.getRegInfo().tracksLiveness())
1278 return false;
1279
1280 auto RegToRename = getLdStRegOp(FirstMI).getReg();
1281 // For now, we only rename if the store operand gets killed at the store.
1282 if (!getLdStRegOp(FirstMI).isKill() &&
1283 !any_of(FirstMI.operands(),
1284 [TRI, RegToRename](const MachineOperand &MOP) {
1285 return MOP.isReg() && !MOP.isDebug() && MOP.getReg() &&
1286 MOP.isImplicit() && MOP.isKill() &&
1287 TRI->regsOverlap(RegToRename, MOP.getReg());
1288 })) {
1289 LLVM_DEBUG(dbgs() << " Operand not killed at " << FirstMI << "\n");
1290 return false;
1291 }
1292 auto canRenameMOP = [TRI](const MachineOperand &MOP) {
1293 if (MOP.isReg()) {
1294 auto *RegClass = TRI->getMinimalPhysRegClass(MOP.getReg());
1295 // Renaming registers with multiple disjunct sub-registers (e.g. the
1296 // result of a LD3) means that all sub-registers are renamed, potentially
1297 // impacting other instructions we did not check. Bail out.
1298 // Note that this relies on the structure of the AArch64 register file. In
1299 // particular, a subregister cannot be written without overwriting the
1300 // whole register.
1301 if (RegClass->HasDisjunctSubRegs) {
1302 LLVM_DEBUG(
1303 dbgs()
1304 << " Cannot rename operands with multiple disjunct subregisters ("
1305 << MOP << ")\n");
1306 return false;
1307 }
1308 }
1309 return MOP.isImplicit() ||
1310 (MOP.isRenamable() && !MOP.isEarlyClobber() && !MOP.isTied());
1311 };
1312
1313 bool FoundDef = false;
1314
1315 // For each instruction between FirstMI and the previous def for RegToRename,
1316 // we
1317 // * check if we can rename RegToRename in this instruction
1318 // * collect the registers used and required register classes for RegToRename.
1319 std::function<bool(MachineInstr &, bool)> CheckMIs = [&](MachineInstr &MI,
1320 bool IsDef) {
1321 LLVM_DEBUG(dbgs() << "Checking " << MI << "\n");
1322 // Currently we do not try to rename across frame-setup instructions.
1323 if (MI.getFlag(MachineInstr::FrameSetup)) {
1324 LLVM_DEBUG(dbgs() << " Cannot rename framesetup instructions currently ("
1325 << MI << ")\n");
1326 return false;
1327 }
1328
1329 UsedInBetween.accumulate(MI);
1330
1331 // For a definition, check that we can rename the definition and exit the
1332 // loop.
1333 FoundDef = IsDef;
1334
1335 // For defs, check if we can rename the first def of RegToRename.
1336 if (FoundDef) {
1337 // For some pseudo instructions, we might not generate code in the end
1338 // (e.g. KILL) and we would end up without a correct def for the rename
1339 // register.
1340 // TODO: This might be overly conservative and we could handle those cases
1341 // in multiple ways:
1342 // 1. Insert an extra copy, to materialize the def.
1343 // 2. Skip pseudo-defs until we find an non-pseudo def.
1344 if (MI.isPseudo()) {
1345 LLVM_DEBUG(dbgs() << " Cannot rename pseudo instruction " << MI
1346 << "\n");
1347 return false;
1348 }
1349
1350 for (auto &MOP : MI.operands()) {
1351 if (!MOP.isReg() || !MOP.isDef() || MOP.isDebug() || !MOP.getReg() ||
1352 !TRI->regsOverlap(MOP.getReg(), RegToRename))
1353 continue;
1354 if (!canRenameMOP(MOP)) {
1355 LLVM_DEBUG(dbgs()
1356 << " Cannot rename " << MOP << " in " << MI << "\n");
1357 return false;
1358 }
1359 RequiredClasses.insert(TRI->getMinimalPhysRegClass(MOP.getReg()));
1360 }
1361 return true;
1362 } else {
1363 for (auto &MOP : MI.operands()) {
1364 if (!MOP.isReg() || MOP.isDebug() || !MOP.getReg() ||
1365 !TRI->regsOverlap(MOP.getReg(), RegToRename))
1366 continue;
1367
1368 if (!canRenameMOP(MOP)) {
1369 LLVM_DEBUG(dbgs()
1370 << " Cannot rename " << MOP << " in " << MI << "\n");
1371 return false;
1372 }
1373 RequiredClasses.insert(TRI->getMinimalPhysRegClass(MOP.getReg()));
1374 }
1375 }
1376 return true;
1377 };
1378
1379 if (!forAllMIsUntilDef(FirstMI, RegToRename, TRI, LdStLimit, CheckMIs))
1380 return false;
1381
1382 if (!FoundDef) {
1383 LLVM_DEBUG(dbgs() << " Did not find definition for register in BB\n");
1384 return false;
1385 }
1386 return true;
1387 }
1388
1389 // Check if we can find a physical register for renaming. This register must:
1390 // * not be defined up to FirstMI (checking DefinedInBB)
1391 // * not used between the MI and the defining instruction of the register to
1392 // rename (checked using UsedInBetween).
1393 // * is available in all used register classes (checked using RequiredClasses).
tryToFindRegisterToRename(MachineInstr & FirstMI,MachineInstr & MI,LiveRegUnits & DefinedInBB,LiveRegUnits & UsedInBetween,SmallPtrSetImpl<const TargetRegisterClass * > & RequiredClasses,const TargetRegisterInfo * TRI)1394 static Optional<MCPhysReg> tryToFindRegisterToRename(
1395 MachineInstr &FirstMI, MachineInstr &MI, LiveRegUnits &DefinedInBB,
1396 LiveRegUnits &UsedInBetween,
1397 SmallPtrSetImpl<const TargetRegisterClass *> &RequiredClasses,
1398 const TargetRegisterInfo *TRI) {
1399 auto &MF = *FirstMI.getParent()->getParent();
1400 MachineRegisterInfo &RegInfo = MF.getRegInfo();
1401
1402 // Checks if any sub- or super-register of PR is callee saved.
1403 auto AnySubOrSuperRegCalleePreserved = [&MF, TRI](MCPhysReg PR) {
1404 return any_of(TRI->sub_and_superregs_inclusive(PR),
1405 [&MF, TRI](MCPhysReg SubOrSuper) {
1406 return TRI->isCalleeSavedPhysReg(SubOrSuper, MF);
1407 });
1408 };
1409
1410 // Check if PR or one of its sub- or super-registers can be used for all
1411 // required register classes.
1412 auto CanBeUsedForAllClasses = [&RequiredClasses, TRI](MCPhysReg PR) {
1413 return all_of(RequiredClasses, [PR, TRI](const TargetRegisterClass *C) {
1414 return any_of(TRI->sub_and_superregs_inclusive(PR),
1415 [C, TRI](MCPhysReg SubOrSuper) {
1416 return C == TRI->getMinimalPhysRegClass(SubOrSuper);
1417 });
1418 });
1419 };
1420
1421 auto *RegClass = TRI->getMinimalPhysRegClass(getLdStRegOp(FirstMI).getReg());
1422 for (const MCPhysReg &PR : *RegClass) {
1423 if (DefinedInBB.available(PR) && UsedInBetween.available(PR) &&
1424 !RegInfo.isReserved(PR) && !AnySubOrSuperRegCalleePreserved(PR) &&
1425 CanBeUsedForAllClasses(PR)) {
1426 DefinedInBB.addReg(PR);
1427 LLVM_DEBUG(dbgs() << "Found rename register " << printReg(PR, TRI)
1428 << "\n");
1429 return {PR};
1430 }
1431 }
1432 LLVM_DEBUG(dbgs() << "No rename register found from "
1433 << TRI->getRegClassName(RegClass) << "\n");
1434 return None;
1435 }
1436
1437 /// Scan the instructions looking for a load/store that can be combined with the
1438 /// current instruction into a wider equivalent or a load/store pair.
1439 MachineBasicBlock::iterator
findMatchingInsn(MachineBasicBlock::iterator I,LdStPairFlags & Flags,unsigned Limit,bool FindNarrowMerge)1440 AArch64LoadStoreOpt::findMatchingInsn(MachineBasicBlock::iterator I,
1441 LdStPairFlags &Flags, unsigned Limit,
1442 bool FindNarrowMerge) {
1443 MachineBasicBlock::iterator E = I->getParent()->end();
1444 MachineBasicBlock::iterator MBBI = I;
1445 MachineBasicBlock::iterator MBBIWithRenameReg;
1446 MachineInstr &FirstMI = *I;
1447 MBBI = next_nodbg(MBBI, E);
1448
1449 bool MayLoad = FirstMI.mayLoad();
1450 bool IsUnscaled = TII->isUnscaledLdSt(FirstMI);
1451 Register Reg = getLdStRegOp(FirstMI).getReg();
1452 Register BaseReg = getLdStBaseOp(FirstMI).getReg();
1453 int Offset = getLdStOffsetOp(FirstMI).getImm();
1454 int OffsetStride = IsUnscaled ? TII->getMemScale(FirstMI) : 1;
1455 bool IsPromotableZeroStore = isPromotableZeroStoreInst(FirstMI);
1456
1457 Optional<bool> MaybeCanRename = None;
1458 if (!EnableRenaming)
1459 MaybeCanRename = {false};
1460
1461 SmallPtrSet<const TargetRegisterClass *, 5> RequiredClasses;
1462 LiveRegUnits UsedInBetween;
1463 UsedInBetween.init(*TRI);
1464
1465 Flags.clearRenameReg();
1466
1467 // Track which register units have been modified and used between the first
1468 // insn (inclusive) and the second insn.
1469 ModifiedRegUnits.clear();
1470 UsedRegUnits.clear();
1471
1472 // Remember any instructions that read/write memory between FirstMI and MI.
1473 SmallVector<MachineInstr *, 4> MemInsns;
1474
1475 for (unsigned Count = 0; MBBI != E && Count < Limit;
1476 MBBI = next_nodbg(MBBI, E)) {
1477 MachineInstr &MI = *MBBI;
1478
1479 UsedInBetween.accumulate(MI);
1480
1481 // Don't count transient instructions towards the search limit since there
1482 // may be different numbers of them if e.g. debug information is present.
1483 if (!MI.isTransient())
1484 ++Count;
1485
1486 Flags.setSExtIdx(-1);
1487 if (areCandidatesToMergeOrPair(FirstMI, MI, Flags, TII) &&
1488 getLdStOffsetOp(MI).isImm()) {
1489 assert(MI.mayLoadOrStore() && "Expected memory operation.");
1490 // If we've found another instruction with the same opcode, check to see
1491 // if the base and offset are compatible with our starting instruction.
1492 // These instructions all have scaled immediate operands, so we just
1493 // check for +1/-1. Make sure to check the new instruction offset is
1494 // actually an immediate and not a symbolic reference destined for
1495 // a relocation.
1496 Register MIBaseReg = getLdStBaseOp(MI).getReg();
1497 int MIOffset = getLdStOffsetOp(MI).getImm();
1498 bool MIIsUnscaled = TII->isUnscaledLdSt(MI);
1499 if (IsUnscaled != MIIsUnscaled) {
1500 // We're trying to pair instructions that differ in how they are scaled.
1501 // If FirstMI is scaled then scale the offset of MI accordingly.
1502 // Otherwise, do the opposite (i.e., make MI's offset unscaled).
1503 int MemSize = TII->getMemScale(MI);
1504 if (MIIsUnscaled) {
1505 // If the unscaled offset isn't a multiple of the MemSize, we can't
1506 // pair the operations together: bail and keep looking.
1507 if (MIOffset % MemSize) {
1508 LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits,
1509 UsedRegUnits, TRI);
1510 MemInsns.push_back(&MI);
1511 continue;
1512 }
1513 MIOffset /= MemSize;
1514 } else {
1515 MIOffset *= MemSize;
1516 }
1517 }
1518
1519 if (BaseReg == MIBaseReg && ((Offset == MIOffset + OffsetStride) ||
1520 (Offset + OffsetStride == MIOffset))) {
1521 int MinOffset = Offset < MIOffset ? Offset : MIOffset;
1522 if (FindNarrowMerge) {
1523 // If the alignment requirements of the scaled wide load/store
1524 // instruction can't express the offset of the scaled narrow input,
1525 // bail and keep looking. For promotable zero stores, allow only when
1526 // the stored value is the same (i.e., WZR).
1527 if ((!IsUnscaled && alignTo(MinOffset, 2) != MinOffset) ||
1528 (IsPromotableZeroStore && Reg != getLdStRegOp(MI).getReg())) {
1529 LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits,
1530 UsedRegUnits, TRI);
1531 MemInsns.push_back(&MI);
1532 continue;
1533 }
1534 } else {
1535 // Pairwise instructions have a 7-bit signed offset field. Single
1536 // insns have a 12-bit unsigned offset field. If the resultant
1537 // immediate offset of merging these instructions is out of range for
1538 // a pairwise instruction, bail and keep looking.
1539 if (!inBoundsForPair(IsUnscaled, MinOffset, OffsetStride)) {
1540 LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits,
1541 UsedRegUnits, TRI);
1542 MemInsns.push_back(&MI);
1543 continue;
1544 }
1545 // If the alignment requirements of the paired (scaled) instruction
1546 // can't express the offset of the unscaled input, bail and keep
1547 // looking.
1548 if (IsUnscaled && (alignTo(MinOffset, OffsetStride) != MinOffset)) {
1549 LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits,
1550 UsedRegUnits, TRI);
1551 MemInsns.push_back(&MI);
1552 continue;
1553 }
1554 }
1555 // If the destination register of one load is the same register or a
1556 // sub/super register of the other load, bail and keep looking. A
1557 // load-pair instruction with both destination registers the same is
1558 // UNPREDICTABLE and will result in an exception.
1559 if (MayLoad &&
1560 TRI->isSuperOrSubRegisterEq(Reg, getLdStRegOp(MI).getReg())) {
1561 LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits, UsedRegUnits,
1562 TRI);
1563 MemInsns.push_back(&MI);
1564 continue;
1565 }
1566
1567 // If the BaseReg has been modified, then we cannot do the optimization.
1568 // For example, in the following pattern
1569 // ldr x1 [x2]
1570 // ldr x2 [x3]
1571 // ldr x4 [x2, #8],
1572 // the first and third ldr cannot be converted to ldp x1, x4, [x2]
1573 if (!ModifiedRegUnits.available(BaseReg))
1574 return E;
1575
1576 // If the Rt of the second instruction was not modified or used between
1577 // the two instructions and none of the instructions between the second
1578 // and first alias with the second, we can combine the second into the
1579 // first.
1580 if (ModifiedRegUnits.available(getLdStRegOp(MI).getReg()) &&
1581 !(MI.mayLoad() &&
1582 !UsedRegUnits.available(getLdStRegOp(MI).getReg())) &&
1583 !mayAlias(MI, MemInsns, AA)) {
1584
1585 Flags.setMergeForward(false);
1586 Flags.clearRenameReg();
1587 return MBBI;
1588 }
1589
1590 // Likewise, if the Rt of the first instruction is not modified or used
1591 // between the two instructions and none of the instructions between the
1592 // first and the second alias with the first, we can combine the first
1593 // into the second.
1594 if (!(MayLoad &&
1595 !UsedRegUnits.available(getLdStRegOp(FirstMI).getReg())) &&
1596 !mayAlias(FirstMI, MemInsns, AA)) {
1597
1598 if (ModifiedRegUnits.available(getLdStRegOp(FirstMI).getReg())) {
1599 Flags.setMergeForward(true);
1600 Flags.clearRenameReg();
1601 return MBBI;
1602 }
1603
1604 if (DebugCounter::shouldExecute(RegRenamingCounter)) {
1605 if (!MaybeCanRename)
1606 MaybeCanRename = {canRenameUpToDef(FirstMI, UsedInBetween,
1607 RequiredClasses, TRI)};
1608
1609 if (*MaybeCanRename) {
1610 Optional<MCPhysReg> MaybeRenameReg = tryToFindRegisterToRename(
1611 FirstMI, MI, DefinedInBB, UsedInBetween, RequiredClasses,
1612 TRI);
1613 if (MaybeRenameReg) {
1614 Flags.setRenameReg(*MaybeRenameReg);
1615 Flags.setMergeForward(true);
1616 MBBIWithRenameReg = MBBI;
1617 }
1618 }
1619 }
1620 }
1621 // Unable to combine these instructions due to interference in between.
1622 // Keep looking.
1623 }
1624 }
1625
1626 if (Flags.getRenameReg())
1627 return MBBIWithRenameReg;
1628
1629 // If the instruction wasn't a matching load or store. Stop searching if we
1630 // encounter a call instruction that might modify memory.
1631 if (MI.isCall())
1632 return E;
1633
1634 // Update modified / uses register units.
1635 LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits, UsedRegUnits, TRI);
1636
1637 // Otherwise, if the base register is modified, we have no match, so
1638 // return early.
1639 if (!ModifiedRegUnits.available(BaseReg))
1640 return E;
1641
1642 // Update list of instructions that read/write memory.
1643 if (MI.mayLoadOrStore())
1644 MemInsns.push_back(&MI);
1645 }
1646 return E;
1647 }
1648
1649 MachineBasicBlock::iterator
mergeUpdateInsn(MachineBasicBlock::iterator I,MachineBasicBlock::iterator Update,bool IsPreIdx)1650 AArch64LoadStoreOpt::mergeUpdateInsn(MachineBasicBlock::iterator I,
1651 MachineBasicBlock::iterator Update,
1652 bool IsPreIdx) {
1653 assert((Update->getOpcode() == AArch64::ADDXri ||
1654 Update->getOpcode() == AArch64::SUBXri) &&
1655 "Unexpected base register update instruction to merge!");
1656 MachineBasicBlock::iterator E = I->getParent()->end();
1657 MachineBasicBlock::iterator NextI = next_nodbg(I, E);
1658 // Return the instruction following the merged instruction, which is
1659 // the instruction following our unmerged load. Unless that's the add/sub
1660 // instruction we're merging, in which case it's the one after that.
1661 if (NextI == Update)
1662 NextI = next_nodbg(NextI, E);
1663
1664 int Value = Update->getOperand(2).getImm();
1665 assert(AArch64_AM::getShiftValue(Update->getOperand(3).getImm()) == 0 &&
1666 "Can't merge 1 << 12 offset into pre-/post-indexed load / store");
1667 if (Update->getOpcode() == AArch64::SUBXri)
1668 Value = -Value;
1669
1670 unsigned NewOpc = IsPreIdx ? getPreIndexedOpcode(I->getOpcode())
1671 : getPostIndexedOpcode(I->getOpcode());
1672 MachineInstrBuilder MIB;
1673 int Scale, MinOffset, MaxOffset;
1674 getPrePostIndexedMemOpInfo(*I, Scale, MinOffset, MaxOffset);
1675 if (!isPairedLdSt(*I)) {
1676 // Non-paired instruction.
1677 MIB = BuildMI(*I->getParent(), I, I->getDebugLoc(), TII->get(NewOpc))
1678 .add(getLdStRegOp(*Update))
1679 .add(getLdStRegOp(*I))
1680 .add(getLdStBaseOp(*I))
1681 .addImm(Value / Scale)
1682 .setMemRefs(I->memoperands())
1683 .setMIFlags(I->mergeFlagsWith(*Update));
1684 } else {
1685 // Paired instruction.
1686 MIB = BuildMI(*I->getParent(), I, I->getDebugLoc(), TII->get(NewOpc))
1687 .add(getLdStRegOp(*Update))
1688 .add(getLdStRegOp(*I, 0))
1689 .add(getLdStRegOp(*I, 1))
1690 .add(getLdStBaseOp(*I))
1691 .addImm(Value / Scale)
1692 .setMemRefs(I->memoperands())
1693 .setMIFlags(I->mergeFlagsWith(*Update));
1694 }
1695 (void)MIB;
1696
1697 if (IsPreIdx) {
1698 ++NumPreFolded;
1699 LLVM_DEBUG(dbgs() << "Creating pre-indexed load/store.");
1700 } else {
1701 ++NumPostFolded;
1702 LLVM_DEBUG(dbgs() << "Creating post-indexed load/store.");
1703 }
1704 LLVM_DEBUG(dbgs() << " Replacing instructions:\n ");
1705 LLVM_DEBUG(I->print(dbgs()));
1706 LLVM_DEBUG(dbgs() << " ");
1707 LLVM_DEBUG(Update->print(dbgs()));
1708 LLVM_DEBUG(dbgs() << " with instruction:\n ");
1709 LLVM_DEBUG(((MachineInstr *)MIB)->print(dbgs()));
1710 LLVM_DEBUG(dbgs() << "\n");
1711
1712 // Erase the old instructions for the block.
1713 I->eraseFromParent();
1714 Update->eraseFromParent();
1715
1716 return NextI;
1717 }
1718
isMatchingUpdateInsn(MachineInstr & MemMI,MachineInstr & MI,unsigned BaseReg,int Offset)1719 bool AArch64LoadStoreOpt::isMatchingUpdateInsn(MachineInstr &MemMI,
1720 MachineInstr &MI,
1721 unsigned BaseReg, int Offset) {
1722 switch (MI.getOpcode()) {
1723 default:
1724 break;
1725 case AArch64::SUBXri:
1726 case AArch64::ADDXri:
1727 // Make sure it's a vanilla immediate operand, not a relocation or
1728 // anything else we can't handle.
1729 if (!MI.getOperand(2).isImm())
1730 break;
1731 // Watch out for 1 << 12 shifted value.
1732 if (AArch64_AM::getShiftValue(MI.getOperand(3).getImm()))
1733 break;
1734
1735 // The update instruction source and destination register must be the
1736 // same as the load/store base register.
1737 if (MI.getOperand(0).getReg() != BaseReg ||
1738 MI.getOperand(1).getReg() != BaseReg)
1739 break;
1740
1741 int UpdateOffset = MI.getOperand(2).getImm();
1742 if (MI.getOpcode() == AArch64::SUBXri)
1743 UpdateOffset = -UpdateOffset;
1744
1745 // The immediate must be a multiple of the scaling factor of the pre/post
1746 // indexed instruction.
1747 int Scale, MinOffset, MaxOffset;
1748 getPrePostIndexedMemOpInfo(MemMI, Scale, MinOffset, MaxOffset);
1749 if (UpdateOffset % Scale != 0)
1750 break;
1751
1752 // Scaled offset must fit in the instruction immediate.
1753 int ScaledOffset = UpdateOffset / Scale;
1754 if (ScaledOffset > MaxOffset || ScaledOffset < MinOffset)
1755 break;
1756
1757 // If we have a non-zero Offset, we check that it matches the amount
1758 // we're adding to the register.
1759 if (!Offset || Offset == UpdateOffset)
1760 return true;
1761 break;
1762 }
1763 return false;
1764 }
1765
needsWinCFI(const MachineFunction * MF)1766 static bool needsWinCFI(const MachineFunction *MF) {
1767 return MF->getTarget().getMCAsmInfo()->usesWindowsCFI() &&
1768 MF->getFunction().needsUnwindTableEntry();
1769 }
1770
findMatchingUpdateInsnForward(MachineBasicBlock::iterator I,int UnscaledOffset,unsigned Limit)1771 MachineBasicBlock::iterator AArch64LoadStoreOpt::findMatchingUpdateInsnForward(
1772 MachineBasicBlock::iterator I, int UnscaledOffset, unsigned Limit) {
1773 MachineBasicBlock::iterator E = I->getParent()->end();
1774 MachineInstr &MemMI = *I;
1775 MachineBasicBlock::iterator MBBI = I;
1776
1777 Register BaseReg = getLdStBaseOp(MemMI).getReg();
1778 int MIUnscaledOffset = getLdStOffsetOp(MemMI).getImm() * TII->getMemScale(MemMI);
1779
1780 // Scan forward looking for post-index opportunities. Updating instructions
1781 // can't be formed if the memory instruction doesn't have the offset we're
1782 // looking for.
1783 if (MIUnscaledOffset != UnscaledOffset)
1784 return E;
1785
1786 // If the base register overlaps a source/destination register, we can't
1787 // merge the update. This does not apply to tag store instructions which
1788 // ignore the address part of the source register.
1789 // This does not apply to STGPi as well, which does not have unpredictable
1790 // behavior in this case unlike normal stores, and always performs writeback
1791 // after reading the source register value.
1792 if (!isTagStore(MemMI) && MemMI.getOpcode() != AArch64::STGPi) {
1793 bool IsPairedInsn = isPairedLdSt(MemMI);
1794 for (unsigned i = 0, e = IsPairedInsn ? 2 : 1; i != e; ++i) {
1795 Register DestReg = getLdStRegOp(MemMI, i).getReg();
1796 if (DestReg == BaseReg || TRI->isSubRegister(BaseReg, DestReg))
1797 return E;
1798 }
1799 }
1800
1801 // Track which register units have been modified and used between the first
1802 // insn (inclusive) and the second insn.
1803 ModifiedRegUnits.clear();
1804 UsedRegUnits.clear();
1805 MBBI = next_nodbg(MBBI, E);
1806
1807 // We can't post-increment the stack pointer if any instruction between
1808 // the memory access (I) and the increment (MBBI) can access the memory
1809 // region defined by [SP, MBBI].
1810 const bool BaseRegSP = BaseReg == AArch64::SP;
1811 if (BaseRegSP && needsWinCFI(I->getMF())) {
1812 // FIXME: For now, we always block the optimization over SP in windows
1813 // targets as it requires to adjust the unwind/debug info, messing up
1814 // the unwind info can actually cause a miscompile.
1815 return E;
1816 }
1817
1818 for (unsigned Count = 0; MBBI != E && Count < Limit;
1819 MBBI = next_nodbg(MBBI, E)) {
1820 MachineInstr &MI = *MBBI;
1821
1822 // Don't count transient instructions towards the search limit since there
1823 // may be different numbers of them if e.g. debug information is present.
1824 if (!MI.isTransient())
1825 ++Count;
1826
1827 // If we found a match, return it.
1828 if (isMatchingUpdateInsn(*I, MI, BaseReg, UnscaledOffset))
1829 return MBBI;
1830
1831 // Update the status of what the instruction clobbered and used.
1832 LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits, UsedRegUnits, TRI);
1833
1834 // Otherwise, if the base register is used or modified, we have no match, so
1835 // return early.
1836 // If we are optimizing SP, do not allow instructions that may load or store
1837 // in between the load and the optimized value update.
1838 if (!ModifiedRegUnits.available(BaseReg) ||
1839 !UsedRegUnits.available(BaseReg) ||
1840 (BaseRegSP && MBBI->mayLoadOrStore()))
1841 return E;
1842 }
1843 return E;
1844 }
1845
findMatchingUpdateInsnBackward(MachineBasicBlock::iterator I,unsigned Limit)1846 MachineBasicBlock::iterator AArch64LoadStoreOpt::findMatchingUpdateInsnBackward(
1847 MachineBasicBlock::iterator I, unsigned Limit) {
1848 MachineBasicBlock::iterator B = I->getParent()->begin();
1849 MachineBasicBlock::iterator E = I->getParent()->end();
1850 MachineInstr &MemMI = *I;
1851 MachineBasicBlock::iterator MBBI = I;
1852
1853 Register BaseReg = getLdStBaseOp(MemMI).getReg();
1854 int Offset = getLdStOffsetOp(MemMI).getImm();
1855
1856 // If the load/store is the first instruction in the block, there's obviously
1857 // not any matching update. Ditto if the memory offset isn't zero.
1858 if (MBBI == B || Offset != 0)
1859 return E;
1860 // If the base register overlaps a destination register, we can't
1861 // merge the update.
1862 if (!isTagStore(MemMI)) {
1863 bool IsPairedInsn = isPairedLdSt(MemMI);
1864 for (unsigned i = 0, e = IsPairedInsn ? 2 : 1; i != e; ++i) {
1865 Register DestReg = getLdStRegOp(MemMI, i).getReg();
1866 if (DestReg == BaseReg || TRI->isSubRegister(BaseReg, DestReg))
1867 return E;
1868 }
1869 }
1870
1871 const bool BaseRegSP = BaseReg == AArch64::SP;
1872 if (BaseRegSP && needsWinCFI(I->getMF())) {
1873 // FIXME: For now, we always block the optimization over SP in windows
1874 // targets as it requires to adjust the unwind/debug info, messing up
1875 // the unwind info can actually cause a miscompile.
1876 return E;
1877 }
1878
1879 // Track which register units have been modified and used between the first
1880 // insn (inclusive) and the second insn.
1881 ModifiedRegUnits.clear();
1882 UsedRegUnits.clear();
1883 unsigned Count = 0;
1884 do {
1885 MBBI = prev_nodbg(MBBI, B);
1886 MachineInstr &MI = *MBBI;
1887
1888 // Don't count transient instructions towards the search limit since there
1889 // may be different numbers of them if e.g. debug information is present.
1890 if (!MI.isTransient())
1891 ++Count;
1892
1893 // If we found a match, return it.
1894 if (isMatchingUpdateInsn(*I, MI, BaseReg, Offset))
1895 return MBBI;
1896
1897 // Update the status of what the instruction clobbered and used.
1898 LiveRegUnits::accumulateUsedDefed(MI, ModifiedRegUnits, UsedRegUnits, TRI);
1899
1900 // Otherwise, if the base register is used or modified, we have no match, so
1901 // return early.
1902 if (!ModifiedRegUnits.available(BaseReg) ||
1903 !UsedRegUnits.available(BaseReg))
1904 return E;
1905 } while (MBBI != B && Count < Limit);
1906 return E;
1907 }
1908
tryToPromoteLoadFromStore(MachineBasicBlock::iterator & MBBI)1909 bool AArch64LoadStoreOpt::tryToPromoteLoadFromStore(
1910 MachineBasicBlock::iterator &MBBI) {
1911 MachineInstr &MI = *MBBI;
1912 // If this is a volatile load, don't mess with it.
1913 if (MI.hasOrderedMemoryRef())
1914 return false;
1915
1916 // Make sure this is a reg+imm.
1917 // FIXME: It is possible to extend it to handle reg+reg cases.
1918 if (!getLdStOffsetOp(MI).isImm())
1919 return false;
1920
1921 // Look backward up to LdStLimit instructions.
1922 MachineBasicBlock::iterator StoreI;
1923 if (findMatchingStore(MBBI, LdStLimit, StoreI)) {
1924 ++NumLoadsFromStoresPromoted;
1925 // Promote the load. Keeping the iterator straight is a
1926 // pain, so we let the merge routine tell us what the next instruction
1927 // is after it's done mucking about.
1928 MBBI = promoteLoadFromStore(MBBI, StoreI);
1929 return true;
1930 }
1931 return false;
1932 }
1933
1934 // Merge adjacent zero stores into a wider store.
tryToMergeZeroStInst(MachineBasicBlock::iterator & MBBI)1935 bool AArch64LoadStoreOpt::tryToMergeZeroStInst(
1936 MachineBasicBlock::iterator &MBBI) {
1937 assert(isPromotableZeroStoreInst(*MBBI) && "Expected narrow store.");
1938 MachineInstr &MI = *MBBI;
1939 MachineBasicBlock::iterator E = MI.getParent()->end();
1940
1941 if (!TII->isCandidateToMergeOrPair(MI))
1942 return false;
1943
1944 // Look ahead up to LdStLimit instructions for a mergable instruction.
1945 LdStPairFlags Flags;
1946 MachineBasicBlock::iterator MergeMI =
1947 findMatchingInsn(MBBI, Flags, LdStLimit, /* FindNarrowMerge = */ true);
1948 if (MergeMI != E) {
1949 ++NumZeroStoresPromoted;
1950
1951 // Keeping the iterator straight is a pain, so we let the merge routine tell
1952 // us what the next instruction is after it's done mucking about.
1953 MBBI = mergeNarrowZeroStores(MBBI, MergeMI, Flags);
1954 return true;
1955 }
1956 return false;
1957 }
1958
1959 // Find loads and stores that can be merged into a single load or store pair
1960 // instruction.
tryToPairLdStInst(MachineBasicBlock::iterator & MBBI)1961 bool AArch64LoadStoreOpt::tryToPairLdStInst(MachineBasicBlock::iterator &MBBI) {
1962 MachineInstr &MI = *MBBI;
1963 MachineBasicBlock::iterator E = MI.getParent()->end();
1964
1965 if (!TII->isCandidateToMergeOrPair(MI))
1966 return false;
1967
1968 // Early exit if the offset is not possible to match. (6 bits of positive
1969 // range, plus allow an extra one in case we find a later insn that matches
1970 // with Offset-1)
1971 bool IsUnscaled = TII->isUnscaledLdSt(MI);
1972 int Offset = getLdStOffsetOp(MI).getImm();
1973 int OffsetStride = IsUnscaled ? TII->getMemScale(MI) : 1;
1974 // Allow one more for offset.
1975 if (Offset > 0)
1976 Offset -= OffsetStride;
1977 if (!inBoundsForPair(IsUnscaled, Offset, OffsetStride))
1978 return false;
1979
1980 // Look ahead up to LdStLimit instructions for a pairable instruction.
1981 LdStPairFlags Flags;
1982 MachineBasicBlock::iterator Paired =
1983 findMatchingInsn(MBBI, Flags, LdStLimit, /* FindNarrowMerge = */ false);
1984 if (Paired != E) {
1985 ++NumPairCreated;
1986 if (TII->isUnscaledLdSt(MI))
1987 ++NumUnscaledPairCreated;
1988 // Keeping the iterator straight is a pain, so we let the merge routine tell
1989 // us what the next instruction is after it's done mucking about.
1990 auto Prev = std::prev(MBBI);
1991 MBBI = mergePairedInsns(MBBI, Paired, Flags);
1992 // Collect liveness info for instructions between Prev and the new position
1993 // MBBI.
1994 for (auto I = std::next(Prev); I != MBBI; I++)
1995 updateDefinedRegisters(*I, DefinedInBB, TRI);
1996
1997 return true;
1998 }
1999 return false;
2000 }
2001
tryToMergeLdStUpdate(MachineBasicBlock::iterator & MBBI)2002 bool AArch64LoadStoreOpt::tryToMergeLdStUpdate
2003 (MachineBasicBlock::iterator &MBBI) {
2004 MachineInstr &MI = *MBBI;
2005 MachineBasicBlock::iterator E = MI.getParent()->end();
2006 MachineBasicBlock::iterator Update;
2007
2008 // Look forward to try to form a post-index instruction. For example,
2009 // ldr x0, [x20]
2010 // add x20, x20, #32
2011 // merged into:
2012 // ldr x0, [x20], #32
2013 Update = findMatchingUpdateInsnForward(MBBI, 0, UpdateLimit);
2014 if (Update != E) {
2015 // Merge the update into the ld/st.
2016 MBBI = mergeUpdateInsn(MBBI, Update, /*IsPreIdx=*/false);
2017 return true;
2018 }
2019
2020 // Don't know how to handle unscaled pre/post-index versions below, so bail.
2021 if (TII->isUnscaledLdSt(MI.getOpcode()))
2022 return false;
2023
2024 // Look back to try to find a pre-index instruction. For example,
2025 // add x0, x0, #8
2026 // ldr x1, [x0]
2027 // merged into:
2028 // ldr x1, [x0, #8]!
2029 Update = findMatchingUpdateInsnBackward(MBBI, UpdateLimit);
2030 if (Update != E) {
2031 // Merge the update into the ld/st.
2032 MBBI = mergeUpdateInsn(MBBI, Update, /*IsPreIdx=*/true);
2033 return true;
2034 }
2035
2036 // The immediate in the load/store is scaled by the size of the memory
2037 // operation. The immediate in the add we're looking for,
2038 // however, is not, so adjust here.
2039 int UnscaledOffset = getLdStOffsetOp(MI).getImm() * TII->getMemScale(MI);
2040
2041 // Look forward to try to find a pre-index instruction. For example,
2042 // ldr x1, [x0, #64]
2043 // add x0, x0, #64
2044 // merged into:
2045 // ldr x1, [x0, #64]!
2046 Update = findMatchingUpdateInsnForward(MBBI, UnscaledOffset, UpdateLimit);
2047 if (Update != E) {
2048 // Merge the update into the ld/st.
2049 MBBI = mergeUpdateInsn(MBBI, Update, /*IsPreIdx=*/true);
2050 return true;
2051 }
2052
2053 return false;
2054 }
2055
optimizeBlock(MachineBasicBlock & MBB,bool EnableNarrowZeroStOpt)2056 bool AArch64LoadStoreOpt::optimizeBlock(MachineBasicBlock &MBB,
2057 bool EnableNarrowZeroStOpt) {
2058
2059 bool Modified = false;
2060 // Four tranformations to do here:
2061 // 1) Find loads that directly read from stores and promote them by
2062 // replacing with mov instructions. If the store is wider than the load,
2063 // the load will be replaced with a bitfield extract.
2064 // e.g.,
2065 // str w1, [x0, #4]
2066 // ldrh w2, [x0, #6]
2067 // ; becomes
2068 // str w1, [x0, #4]
2069 // lsr w2, w1, #16
2070 for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
2071 MBBI != E;) {
2072 if (isPromotableLoadFromStore(*MBBI) && tryToPromoteLoadFromStore(MBBI))
2073 Modified = true;
2074 else
2075 ++MBBI;
2076 }
2077 // 2) Merge adjacent zero stores into a wider store.
2078 // e.g.,
2079 // strh wzr, [x0]
2080 // strh wzr, [x0, #2]
2081 // ; becomes
2082 // str wzr, [x0]
2083 // e.g.,
2084 // str wzr, [x0]
2085 // str wzr, [x0, #4]
2086 // ; becomes
2087 // str xzr, [x0]
2088 if (EnableNarrowZeroStOpt)
2089 for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
2090 MBBI != E;) {
2091 if (isPromotableZeroStoreInst(*MBBI) && tryToMergeZeroStInst(MBBI))
2092 Modified = true;
2093 else
2094 ++MBBI;
2095 }
2096 // 3) Find loads and stores that can be merged into a single load or store
2097 // pair instruction.
2098 // e.g.,
2099 // ldr x0, [x2]
2100 // ldr x1, [x2, #8]
2101 // ; becomes
2102 // ldp x0, x1, [x2]
2103
2104 if (MBB.getParent()->getRegInfo().tracksLiveness()) {
2105 DefinedInBB.clear();
2106 DefinedInBB.addLiveIns(MBB);
2107 }
2108
2109 for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
2110 MBBI != E;) {
2111 // Track currently live registers up to this point, to help with
2112 // searching for a rename register on demand.
2113 updateDefinedRegisters(*MBBI, DefinedInBB, TRI);
2114 if (TII->isPairableLdStInst(*MBBI) && tryToPairLdStInst(MBBI))
2115 Modified = true;
2116 else
2117 ++MBBI;
2118 }
2119 // 4) Find base register updates that can be merged into the load or store
2120 // as a base-reg writeback.
2121 // e.g.,
2122 // ldr x0, [x2]
2123 // add x2, x2, #4
2124 // ; becomes
2125 // ldr x0, [x2], #4
2126 for (MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
2127 MBBI != E;) {
2128 if (isMergeableLdStUpdate(*MBBI) && tryToMergeLdStUpdate(MBBI))
2129 Modified = true;
2130 else
2131 ++MBBI;
2132 }
2133
2134 return Modified;
2135 }
2136
runOnMachineFunction(MachineFunction & Fn)2137 bool AArch64LoadStoreOpt::runOnMachineFunction(MachineFunction &Fn) {
2138 if (skipFunction(Fn.getFunction()))
2139 return false;
2140
2141 Subtarget = &static_cast<const AArch64Subtarget &>(Fn.getSubtarget());
2142 TII = static_cast<const AArch64InstrInfo *>(Subtarget->getInstrInfo());
2143 TRI = Subtarget->getRegisterInfo();
2144 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
2145
2146 // Resize the modified and used register unit trackers. We do this once
2147 // per function and then clear the register units each time we optimize a load
2148 // or store.
2149 ModifiedRegUnits.init(*TRI);
2150 UsedRegUnits.init(*TRI);
2151 DefinedInBB.init(*TRI);
2152
2153 bool Modified = false;
2154 bool enableNarrowZeroStOpt = !Subtarget->requiresStrictAlign();
2155 for (auto &MBB : Fn) {
2156 auto M = optimizeBlock(MBB, enableNarrowZeroStOpt);
2157 Modified |= M;
2158 }
2159
2160 return Modified;
2161 }
2162
2163 // FIXME: Do we need/want a pre-alloc pass like ARM has to try to keep loads and
2164 // stores near one another? Note: The pre-RA instruction scheduler already has
2165 // hooks to try and schedule pairable loads/stores together to improve pairing
2166 // opportunities. Thus, pre-RA pairing pass may not be worth the effort.
2167
2168 // FIXME: When pairing store instructions it's very possible for this pass to
2169 // hoist a store with a KILL marker above another use (without a KILL marker).
2170 // The resulting IR is invalid, but nothing uses the KILL markers after this
2171 // pass, so it's never caused a problem in practice.
2172
2173 /// createAArch64LoadStoreOptimizationPass - returns an instance of the
2174 /// load / store optimization pass.
createAArch64LoadStoreOptimizationPass()2175 FunctionPass *llvm::createAArch64LoadStoreOptimizationPass() {
2176 return new AArch64LoadStoreOpt();
2177 }
2178