1 //===- SILoadStoreOptimizer.cpp -------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass tries to fuse DS instructions with close by immediate offsets.
10 // This will fuse operations such as
11 // ds_read_b32 v0, v2 offset:16
12 // ds_read_b32 v1, v2 offset:32
13 // ==>
14 // ds_read2_b32 v[0:1], v2, offset0:4 offset1:8
15 //
16 // The same is done for certain SMEM and VMEM opcodes, e.g.:
17 // s_buffer_load_dword s4, s[0:3], 4
18 // s_buffer_load_dword s5, s[0:3], 8
19 // ==>
20 // s_buffer_load_dwordx2 s[4:5], s[0:3], 4
21 //
22 // This pass also tries to promote constant offset to the immediate by
23 // adjusting the base. It tries to use a base from the nearby instructions that
24 // allows it to have a 13bit constant offset and then promotes the 13bit offset
25 // to the immediate.
26 // E.g.
27 // s_movk_i32 s0, 0x1800
28 // v_add_co_u32_e32 v0, vcc, s0, v2
29 // v_addc_co_u32_e32 v1, vcc, 0, v6, vcc
30 //
31 // s_movk_i32 s0, 0x1000
32 // v_add_co_u32_e32 v5, vcc, s0, v2
33 // v_addc_co_u32_e32 v6, vcc, 0, v6, vcc
34 // global_load_dwordx2 v[5:6], v[5:6], off
35 // global_load_dwordx2 v[0:1], v[0:1], off
36 // =>
37 // s_movk_i32 s0, 0x1000
38 // v_add_co_u32_e32 v5, vcc, s0, v2
39 // v_addc_co_u32_e32 v6, vcc, 0, v6, vcc
40 // global_load_dwordx2 v[5:6], v[5:6], off
41 // global_load_dwordx2 v[0:1], v[5:6], off offset:2048
42 //
43 // Future improvements:
44 //
45 // - This is currently missing stores of constants because loading
46 // the constant into the data register is placed between the stores, although
47 // this is arguably a scheduling problem.
48 //
49 // - Live interval recomputing seems inefficient. This currently only matches
50 // one pair, and recomputes live intervals and moves on to the next pair. It
51 // would be better to compute a list of all merges that need to occur.
52 //
53 // - With a list of instructions to process, we can also merge more. If a
54 // cluster of loads have offsets that are too large to fit in the 8-bit
55 // offsets, but are close enough to fit in the 8 bits, we can add to the base
56 // pointer and use the new reduced offsets.
57 //
58 //===----------------------------------------------------------------------===//
59
60 #include "AMDGPU.h"
61 #include "AMDGPUSubtarget.h"
62 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
63 #include "SIInstrInfo.h"
64 #include "SIRegisterInfo.h"
65 #include "Utils/AMDGPUBaseInfo.h"
66 #include "llvm/ADT/ArrayRef.h"
67 #include "llvm/ADT/SmallVector.h"
68 #include "llvm/ADT/StringRef.h"
69 #include "llvm/Analysis/AliasAnalysis.h"
70 #include "llvm/CodeGen/MachineBasicBlock.h"
71 #include "llvm/CodeGen/MachineFunction.h"
72 #include "llvm/CodeGen/MachineFunctionPass.h"
73 #include "llvm/CodeGen/MachineInstr.h"
74 #include "llvm/CodeGen/MachineInstrBuilder.h"
75 #include "llvm/CodeGen/MachineOperand.h"
76 #include "llvm/CodeGen/MachineRegisterInfo.h"
77 #include "llvm/IR/DebugLoc.h"
78 #include "llvm/InitializePasses.h"
79 #include "llvm/Pass.h"
80 #include "llvm/Support/Debug.h"
81 #include "llvm/Support/MathExtras.h"
82 #include "llvm/Support/raw_ostream.h"
83 #include <algorithm>
84 #include <cassert>
85 #include <cstdlib>
86 #include <iterator>
87 #include <utility>
88
89 using namespace llvm;
90
91 #define DEBUG_TYPE "si-load-store-opt"
92
93 namespace {
94 enum InstClassEnum {
95 UNKNOWN,
96 DS_READ,
97 DS_WRITE,
98 S_BUFFER_LOAD_IMM,
99 BUFFER_LOAD,
100 BUFFER_STORE,
101 MIMG,
102 TBUFFER_LOAD,
103 TBUFFER_STORE,
104 };
105
106 enum RegisterEnum {
107 SBASE = 0x1,
108 SRSRC = 0x2,
109 SOFFSET = 0x4,
110 VADDR = 0x8,
111 ADDR = 0x10,
112 SSAMP = 0x20,
113 };
114
115 class SILoadStoreOptimizer : public MachineFunctionPass {
116 struct CombineInfo {
117 MachineBasicBlock::iterator I;
118 unsigned EltSize;
119 unsigned Offset;
120 unsigned Width;
121 unsigned Format;
122 unsigned BaseOff;
123 unsigned DMask;
124 InstClassEnum InstClass;
125 bool GLC;
126 bool SLC;
127 bool DLC;
128 bool UseST64;
129 SmallVector<MachineInstr *, 8> InstsToMove;
130 int AddrIdx[5];
131 const MachineOperand *AddrReg[5];
132 unsigned NumAddresses;
133
hasSameBaseAddress__anond5e53ae50111::SILoadStoreOptimizer::CombineInfo134 bool hasSameBaseAddress(const MachineInstr &MI) {
135 for (unsigned i = 0; i < NumAddresses; i++) {
136 const MachineOperand &AddrRegNext = MI.getOperand(AddrIdx[i]);
137
138 if (AddrReg[i]->isImm() || AddrRegNext.isImm()) {
139 if (AddrReg[i]->isImm() != AddrRegNext.isImm() ||
140 AddrReg[i]->getImm() != AddrRegNext.getImm()) {
141 return false;
142 }
143 continue;
144 }
145
146 // Check same base pointer. Be careful of subregisters, which can occur
147 // with vectors of pointers.
148 if (AddrReg[i]->getReg() != AddrRegNext.getReg() ||
149 AddrReg[i]->getSubReg() != AddrRegNext.getSubReg()) {
150 return false;
151 }
152 }
153 return true;
154 }
155
hasMergeableAddress__anond5e53ae50111::SILoadStoreOptimizer::CombineInfo156 bool hasMergeableAddress(const MachineRegisterInfo &MRI) {
157 for (unsigned i = 0; i < NumAddresses; ++i) {
158 const MachineOperand *AddrOp = AddrReg[i];
159 // Immediates are always OK.
160 if (AddrOp->isImm())
161 continue;
162
163 // Don't try to merge addresses that aren't either immediates or registers.
164 // TODO: Should be possible to merge FrameIndexes and maybe some other
165 // non-register
166 if (!AddrOp->isReg())
167 return false;
168
169 // TODO: We should be able to merge physical reg addreses.
170 if (Register::isPhysicalRegister(AddrOp->getReg()))
171 return false;
172
173 // If an address has only one use then there will be on other
174 // instructions with the same address, so we can't merge this one.
175 if (MRI.hasOneNonDBGUse(AddrOp->getReg()))
176 return false;
177 }
178 return true;
179 }
180
181 void setMI(MachineBasicBlock::iterator MI, const SIInstrInfo &TII,
182 const GCNSubtarget &STM);
183 };
184
185 struct BaseRegisters {
186 unsigned LoReg = 0;
187 unsigned HiReg = 0;
188
189 unsigned LoSubReg = 0;
190 unsigned HiSubReg = 0;
191 };
192
193 struct MemAddress {
194 BaseRegisters Base;
195 int64_t Offset = 0;
196 };
197
198 using MemInfoMap = DenseMap<MachineInstr *, MemAddress>;
199
200 private:
201 const GCNSubtarget *STM = nullptr;
202 const SIInstrInfo *TII = nullptr;
203 const SIRegisterInfo *TRI = nullptr;
204 const MCSubtargetInfo *STI = nullptr;
205 MachineRegisterInfo *MRI = nullptr;
206 AliasAnalysis *AA = nullptr;
207 bool OptimizeAgain;
208
209 static bool dmasksCanBeCombined(const CombineInfo &CI,
210 const SIInstrInfo &TII,
211 const CombineInfo &Paired);
212 static bool offsetsCanBeCombined(CombineInfo &CI, const MCSubtargetInfo &STI,
213 CombineInfo &Paired);
214 static bool widthsFit(const GCNSubtarget &STM, const CombineInfo &CI,
215 const CombineInfo &Paired);
216 static unsigned getNewOpcode(const CombineInfo &CI, const CombineInfo &Paired);
217 static std::pair<unsigned, unsigned> getSubRegIdxs(const CombineInfo &CI,
218 const CombineInfo &Paired);
219 const TargetRegisterClass *getTargetRegisterClass(const CombineInfo &CI,
220 const CombineInfo &Paired);
221
222 bool findMatchingInst(CombineInfo &CI, CombineInfo &Paired);
223
224 unsigned read2Opcode(unsigned EltSize) const;
225 unsigned read2ST64Opcode(unsigned EltSize) const;
226 MachineBasicBlock::iterator mergeRead2Pair(CombineInfo &CI, CombineInfo &Paired);
227
228 unsigned write2Opcode(unsigned EltSize) const;
229 unsigned write2ST64Opcode(unsigned EltSize) const;
230 MachineBasicBlock::iterator mergeWrite2Pair(CombineInfo &CI, CombineInfo &Paired);
231 MachineBasicBlock::iterator mergeImagePair(CombineInfo &CI, CombineInfo &Paired);
232 MachineBasicBlock::iterator mergeSBufferLoadImmPair(CombineInfo &CI, CombineInfo &Paired);
233 MachineBasicBlock::iterator mergeBufferLoadPair(CombineInfo &CI, CombineInfo &Paired);
234 MachineBasicBlock::iterator mergeBufferStorePair(CombineInfo &CI, CombineInfo &Paired);
235 MachineBasicBlock::iterator mergeTBufferLoadPair(CombineInfo &CI, CombineInfo &Paired);
236 MachineBasicBlock::iterator mergeTBufferStorePair(CombineInfo &CI, CombineInfo &Paired);
237
238 void updateBaseAndOffset(MachineInstr &I, unsigned NewBase,
239 int32_t NewOffset) const;
240 unsigned computeBase(MachineInstr &MI, const MemAddress &Addr) const;
241 MachineOperand createRegOrImm(int32_t Val, MachineInstr &MI) const;
242 Optional<int32_t> extractConstOffset(const MachineOperand &Op) const;
243 void processBaseWithConstOffset(const MachineOperand &Base, MemAddress &Addr) const;
244 /// Promotes constant offset to the immediate by adjusting the base. It
245 /// tries to use a base from the nearby instructions that allows it to have
246 /// a 13bit constant offset which gets promoted to the immediate.
247 bool promoteConstantOffsetToImm(MachineInstr &CI,
248 MemInfoMap &Visited,
249 SmallPtrSet<MachineInstr *, 4> &Promoted) const;
250 void addInstToMergeableList(const CombineInfo &CI,
251 std::list<std::list<CombineInfo> > &MergeableInsts) const;
252 bool collectMergeableInsts(MachineBasicBlock &MBB,
253 std::list<std::list<CombineInfo> > &MergeableInsts) const;
254
255 public:
256 static char ID;
257
SILoadStoreOptimizer()258 SILoadStoreOptimizer() : MachineFunctionPass(ID) {
259 initializeSILoadStoreOptimizerPass(*PassRegistry::getPassRegistry());
260 }
261
262 void removeCombinedInst(std::list<CombineInfo> &MergeList,
263 const MachineInstr &MI);
264 bool optimizeInstsWithSameBaseAddr(std::list<CombineInfo> &MergeList,
265 bool &OptimizeListAgain);
266 bool optimizeBlock(std::list<std::list<CombineInfo> > &MergeableInsts);
267
268 bool runOnMachineFunction(MachineFunction &MF) override;
269
getPassName() const270 StringRef getPassName() const override { return "SI Load Store Optimizer"; }
271
getAnalysisUsage(AnalysisUsage & AU) const272 void getAnalysisUsage(AnalysisUsage &AU) const override {
273 AU.setPreservesCFG();
274 AU.addRequired<AAResultsWrapperPass>();
275
276 MachineFunctionPass::getAnalysisUsage(AU);
277 }
278 };
279
getOpcodeWidth(const MachineInstr & MI,const SIInstrInfo & TII)280 static unsigned getOpcodeWidth(const MachineInstr &MI, const SIInstrInfo &TII) {
281 const unsigned Opc = MI.getOpcode();
282
283 if (TII.isMUBUF(Opc)) {
284 // FIXME: Handle d16 correctly
285 return AMDGPU::getMUBUFElements(Opc);
286 }
287 if (TII.isMIMG(MI)) {
288 uint64_t DMaskImm =
289 TII.getNamedOperand(MI, AMDGPU::OpName::dmask)->getImm();
290 return countPopulation(DMaskImm);
291 }
292 if (TII.isMTBUF(Opc)) {
293 return AMDGPU::getMTBUFElements(Opc);
294 }
295
296 switch (Opc) {
297 case AMDGPU::S_BUFFER_LOAD_DWORD_IMM:
298 return 1;
299 case AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM:
300 return 2;
301 case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM:
302 return 4;
303 default:
304 return 0;
305 }
306 }
307
308 /// Maps instruction opcode to enum InstClassEnum.
getInstClass(unsigned Opc,const SIInstrInfo & TII)309 static InstClassEnum getInstClass(unsigned Opc, const SIInstrInfo &TII) {
310 switch (Opc) {
311 default:
312 if (TII.isMUBUF(Opc)) {
313 switch (AMDGPU::getMUBUFBaseOpcode(Opc)) {
314 default:
315 return UNKNOWN;
316 case AMDGPU::BUFFER_LOAD_DWORD_OFFEN:
317 case AMDGPU::BUFFER_LOAD_DWORD_OFFEN_exact:
318 case AMDGPU::BUFFER_LOAD_DWORD_OFFSET:
319 case AMDGPU::BUFFER_LOAD_DWORD_OFFSET_exact:
320 return BUFFER_LOAD;
321 case AMDGPU::BUFFER_STORE_DWORD_OFFEN:
322 case AMDGPU::BUFFER_STORE_DWORD_OFFEN_exact:
323 case AMDGPU::BUFFER_STORE_DWORD_OFFSET:
324 case AMDGPU::BUFFER_STORE_DWORD_OFFSET_exact:
325 return BUFFER_STORE;
326 }
327 }
328 if (TII.isMIMG(Opc)) {
329 // Ignore instructions encoded without vaddr.
330 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr) == -1)
331 return UNKNOWN;
332 // TODO: Support IMAGE_GET_RESINFO and IMAGE_GET_LOD.
333 if (TII.get(Opc).mayStore() || !TII.get(Opc).mayLoad() ||
334 TII.isGather4(Opc))
335 return UNKNOWN;
336 return MIMG;
337 }
338 if (TII.isMTBUF(Opc)) {
339 switch (AMDGPU::getMTBUFBaseOpcode(Opc)) {
340 default:
341 return UNKNOWN;
342 case AMDGPU::TBUFFER_LOAD_FORMAT_X_OFFEN:
343 case AMDGPU::TBUFFER_LOAD_FORMAT_X_OFFEN_exact:
344 case AMDGPU::TBUFFER_LOAD_FORMAT_X_OFFSET:
345 case AMDGPU::TBUFFER_LOAD_FORMAT_X_OFFSET_exact:
346 return TBUFFER_LOAD;
347 case AMDGPU::TBUFFER_STORE_FORMAT_X_OFFEN:
348 case AMDGPU::TBUFFER_STORE_FORMAT_X_OFFEN_exact:
349 case AMDGPU::TBUFFER_STORE_FORMAT_X_OFFSET:
350 case AMDGPU::TBUFFER_STORE_FORMAT_X_OFFSET_exact:
351 return TBUFFER_STORE;
352 }
353 }
354 return UNKNOWN;
355 case AMDGPU::S_BUFFER_LOAD_DWORD_IMM:
356 case AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM:
357 case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM:
358 return S_BUFFER_LOAD_IMM;
359 case AMDGPU::DS_READ_B32:
360 case AMDGPU::DS_READ_B32_gfx9:
361 case AMDGPU::DS_READ_B64:
362 case AMDGPU::DS_READ_B64_gfx9:
363 return DS_READ;
364 case AMDGPU::DS_WRITE_B32:
365 case AMDGPU::DS_WRITE_B32_gfx9:
366 case AMDGPU::DS_WRITE_B64:
367 case AMDGPU::DS_WRITE_B64_gfx9:
368 return DS_WRITE;
369 }
370 }
371
372 /// Determines instruction subclass from opcode. Only instructions
373 /// of the same subclass can be merged together.
getInstSubclass(unsigned Opc,const SIInstrInfo & TII)374 static unsigned getInstSubclass(unsigned Opc, const SIInstrInfo &TII) {
375 switch (Opc) {
376 default:
377 if (TII.isMUBUF(Opc))
378 return AMDGPU::getMUBUFBaseOpcode(Opc);
379 if (TII.isMIMG(Opc)) {
380 const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(Opc);
381 assert(Info);
382 return Info->BaseOpcode;
383 }
384 if (TII.isMTBUF(Opc))
385 return AMDGPU::getMTBUFBaseOpcode(Opc);
386 return -1;
387 case AMDGPU::DS_READ_B32:
388 case AMDGPU::DS_READ_B32_gfx9:
389 case AMDGPU::DS_READ_B64:
390 case AMDGPU::DS_READ_B64_gfx9:
391 case AMDGPU::DS_WRITE_B32:
392 case AMDGPU::DS_WRITE_B32_gfx9:
393 case AMDGPU::DS_WRITE_B64:
394 case AMDGPU::DS_WRITE_B64_gfx9:
395 return Opc;
396 case AMDGPU::S_BUFFER_LOAD_DWORD_IMM:
397 case AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM:
398 case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM:
399 return AMDGPU::S_BUFFER_LOAD_DWORD_IMM;
400 }
401 }
402
getRegs(unsigned Opc,const SIInstrInfo & TII)403 static unsigned getRegs(unsigned Opc, const SIInstrInfo &TII) {
404 if (TII.isMUBUF(Opc)) {
405 unsigned result = 0;
406
407 if (AMDGPU::getMUBUFHasVAddr(Opc)) {
408 result |= VADDR;
409 }
410
411 if (AMDGPU::getMUBUFHasSrsrc(Opc)) {
412 result |= SRSRC;
413 }
414
415 if (AMDGPU::getMUBUFHasSoffset(Opc)) {
416 result |= SOFFSET;
417 }
418
419 return result;
420 }
421
422 if (TII.isMIMG(Opc)) {
423 unsigned result = VADDR | SRSRC;
424 const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(Opc);
425 if (Info && AMDGPU::getMIMGBaseOpcodeInfo(Info->BaseOpcode)->Sampler)
426 result |= SSAMP;
427
428 return result;
429 }
430 if (TII.isMTBUF(Opc)) {
431 unsigned result = 0;
432
433 if (AMDGPU::getMTBUFHasVAddr(Opc)) {
434 result |= VADDR;
435 }
436
437 if (AMDGPU::getMTBUFHasSrsrc(Opc)) {
438 result |= SRSRC;
439 }
440
441 if (AMDGPU::getMTBUFHasSoffset(Opc)) {
442 result |= SOFFSET;
443 }
444
445 return result;
446 }
447
448 switch (Opc) {
449 default:
450 return 0;
451 case AMDGPU::S_BUFFER_LOAD_DWORD_IMM:
452 case AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM:
453 case AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM:
454 return SBASE;
455 case AMDGPU::DS_READ_B32:
456 case AMDGPU::DS_READ_B64:
457 case AMDGPU::DS_READ_B32_gfx9:
458 case AMDGPU::DS_READ_B64_gfx9:
459 case AMDGPU::DS_WRITE_B32:
460 case AMDGPU::DS_WRITE_B64:
461 case AMDGPU::DS_WRITE_B32_gfx9:
462 case AMDGPU::DS_WRITE_B64_gfx9:
463 return ADDR;
464 }
465 }
466
setMI(MachineBasicBlock::iterator MI,const SIInstrInfo & TII,const GCNSubtarget & STM)467 void SILoadStoreOptimizer::CombineInfo::setMI(MachineBasicBlock::iterator MI,
468 const SIInstrInfo &TII,
469 const GCNSubtarget &STM) {
470 I = MI;
471 unsigned Opc = MI->getOpcode();
472 InstClass = getInstClass(Opc, TII);
473
474 if (InstClass == UNKNOWN)
475 return;
476
477 switch (InstClass) {
478 case DS_READ:
479 EltSize =
480 (Opc == AMDGPU::DS_READ_B64 || Opc == AMDGPU::DS_READ_B64_gfx9) ? 8
481 : 4;
482 break;
483 case DS_WRITE:
484 EltSize =
485 (Opc == AMDGPU::DS_WRITE_B64 || Opc == AMDGPU::DS_WRITE_B64_gfx9) ? 8
486 : 4;
487 break;
488 case S_BUFFER_LOAD_IMM:
489 EltSize = AMDGPU::getSMRDEncodedOffset(STM, 4);
490 break;
491 default:
492 EltSize = 4;
493 break;
494 }
495
496 if (InstClass == MIMG) {
497 DMask = TII.getNamedOperand(*I, AMDGPU::OpName::dmask)->getImm();
498 } else {
499 int OffsetIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::offset);
500 Offset = I->getOperand(OffsetIdx).getImm();
501 }
502
503 if (InstClass == TBUFFER_LOAD || InstClass == TBUFFER_STORE)
504 Format = TII.getNamedOperand(*I, AMDGPU::OpName::format)->getImm();
505
506 Width = getOpcodeWidth(*I, TII);
507
508 if ((InstClass == DS_READ) || (InstClass == DS_WRITE)) {
509 Offset &= 0xffff;
510 } else if (InstClass != MIMG) {
511 GLC = TII.getNamedOperand(*I, AMDGPU::OpName::glc)->getImm();
512 if (InstClass != S_BUFFER_LOAD_IMM) {
513 SLC = TII.getNamedOperand(*I, AMDGPU::OpName::slc)->getImm();
514 }
515 DLC = TII.getNamedOperand(*I, AMDGPU::OpName::dlc)->getImm();
516 }
517
518 unsigned AddrOpName[5] = {0};
519 NumAddresses = 0;
520 const unsigned Regs = getRegs(I->getOpcode(), TII);
521
522 if (Regs & ADDR) {
523 AddrOpName[NumAddresses++] = AMDGPU::OpName::addr;
524 }
525
526 if (Regs & SBASE) {
527 AddrOpName[NumAddresses++] = AMDGPU::OpName::sbase;
528 }
529
530 if (Regs & SRSRC) {
531 AddrOpName[NumAddresses++] = AMDGPU::OpName::srsrc;
532 }
533
534 if (Regs & SOFFSET) {
535 AddrOpName[NumAddresses++] = AMDGPU::OpName::soffset;
536 }
537
538 if (Regs & VADDR) {
539 AddrOpName[NumAddresses++] = AMDGPU::OpName::vaddr;
540 }
541
542 if (Regs & SSAMP) {
543 AddrOpName[NumAddresses++] = AMDGPU::OpName::ssamp;
544 }
545
546 for (unsigned i = 0; i < NumAddresses; i++) {
547 AddrIdx[i] = AMDGPU::getNamedOperandIdx(I->getOpcode(), AddrOpName[i]);
548 AddrReg[i] = &I->getOperand(AddrIdx[i]);
549 }
550
551 InstsToMove.clear();
552 }
553
554 } // end anonymous namespace.
555
556 INITIALIZE_PASS_BEGIN(SILoadStoreOptimizer, DEBUG_TYPE,
557 "SI Load Store Optimizer", false, false)
558 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
559 INITIALIZE_PASS_END(SILoadStoreOptimizer, DEBUG_TYPE, "SI Load Store Optimizer",
560 false, false)
561
562 char SILoadStoreOptimizer::ID = 0;
563
564 char &llvm::SILoadStoreOptimizerID = SILoadStoreOptimizer::ID;
565
createSILoadStoreOptimizerPass()566 FunctionPass *llvm::createSILoadStoreOptimizerPass() {
567 return new SILoadStoreOptimizer();
568 }
569
moveInstsAfter(MachineBasicBlock::iterator I,ArrayRef<MachineInstr * > InstsToMove)570 static void moveInstsAfter(MachineBasicBlock::iterator I,
571 ArrayRef<MachineInstr *> InstsToMove) {
572 MachineBasicBlock *MBB = I->getParent();
573 ++I;
574 for (MachineInstr *MI : InstsToMove) {
575 MI->removeFromParent();
576 MBB->insert(I, MI);
577 }
578 }
579
addDefsUsesToList(const MachineInstr & MI,DenseSet<unsigned> & RegDefs,DenseSet<unsigned> & PhysRegUses)580 static void addDefsUsesToList(const MachineInstr &MI,
581 DenseSet<unsigned> &RegDefs,
582 DenseSet<unsigned> &PhysRegUses) {
583 for (const MachineOperand &Op : MI.operands()) {
584 if (Op.isReg()) {
585 if (Op.isDef())
586 RegDefs.insert(Op.getReg());
587 else if (Op.readsReg() && Register::isPhysicalRegister(Op.getReg()))
588 PhysRegUses.insert(Op.getReg());
589 }
590 }
591 }
592
memAccessesCanBeReordered(MachineBasicBlock::iterator A,MachineBasicBlock::iterator B,AliasAnalysis * AA)593 static bool memAccessesCanBeReordered(MachineBasicBlock::iterator A,
594 MachineBasicBlock::iterator B,
595 AliasAnalysis *AA) {
596 // RAW or WAR - cannot reorder
597 // WAW - cannot reorder
598 // RAR - safe to reorder
599 return !(A->mayStore() || B->mayStore()) || !A->mayAlias(AA, *B, true);
600 }
601
602 // Add MI and its defs to the lists if MI reads one of the defs that are
603 // already in the list. Returns true in that case.
addToListsIfDependent(MachineInstr & MI,DenseSet<unsigned> & RegDefs,DenseSet<unsigned> & PhysRegUses,SmallVectorImpl<MachineInstr * > & Insts)604 static bool addToListsIfDependent(MachineInstr &MI, DenseSet<unsigned> &RegDefs,
605 DenseSet<unsigned> &PhysRegUses,
606 SmallVectorImpl<MachineInstr *> &Insts) {
607 for (MachineOperand &Use : MI.operands()) {
608 // If one of the defs is read, then there is a use of Def between I and the
609 // instruction that I will potentially be merged with. We will need to move
610 // this instruction after the merged instructions.
611 //
612 // Similarly, if there is a def which is read by an instruction that is to
613 // be moved for merging, then we need to move the def-instruction as well.
614 // This can only happen for physical registers such as M0; virtual
615 // registers are in SSA form.
616 if (Use.isReg() &&
617 ((Use.readsReg() && RegDefs.count(Use.getReg())) ||
618 (Use.isDef() && RegDefs.count(Use.getReg())) ||
619 (Use.isDef() && Register::isPhysicalRegister(Use.getReg()) &&
620 PhysRegUses.count(Use.getReg())))) {
621 Insts.push_back(&MI);
622 addDefsUsesToList(MI, RegDefs, PhysRegUses);
623 return true;
624 }
625 }
626
627 return false;
628 }
629
canMoveInstsAcrossMemOp(MachineInstr & MemOp,ArrayRef<MachineInstr * > InstsToMove,AliasAnalysis * AA)630 static bool canMoveInstsAcrossMemOp(MachineInstr &MemOp,
631 ArrayRef<MachineInstr *> InstsToMove,
632 AliasAnalysis *AA) {
633 assert(MemOp.mayLoadOrStore());
634
635 for (MachineInstr *InstToMove : InstsToMove) {
636 if (!InstToMove->mayLoadOrStore())
637 continue;
638 if (!memAccessesCanBeReordered(MemOp, *InstToMove, AA))
639 return false;
640 }
641 return true;
642 }
643
644 // This function assumes that \p A and \p B have are identical except for
645 // size and offset, and they referecne adjacent memory.
combineKnownAdjacentMMOs(MachineFunction & MF,const MachineMemOperand * A,const MachineMemOperand * B)646 static MachineMemOperand *combineKnownAdjacentMMOs(MachineFunction &MF,
647 const MachineMemOperand *A,
648 const MachineMemOperand *B) {
649 unsigned MinOffset = std::min(A->getOffset(), B->getOffset());
650 unsigned Size = A->getSize() + B->getSize();
651 // This function adds the offset parameter to the existing offset for A,
652 // so we pass 0 here as the offset and then manually set it to the correct
653 // value after the call.
654 MachineMemOperand *MMO = MF.getMachineMemOperand(A, 0, Size);
655 MMO->setOffset(MinOffset);
656 return MMO;
657 }
658
dmasksCanBeCombined(const CombineInfo & CI,const SIInstrInfo & TII,const CombineInfo & Paired)659 bool SILoadStoreOptimizer::dmasksCanBeCombined(const CombineInfo &CI,
660 const SIInstrInfo &TII,
661 const CombineInfo &Paired) {
662 assert(CI.InstClass == MIMG);
663
664 // Ignore instructions with tfe/lwe set.
665 const auto *TFEOp = TII.getNamedOperand(*CI.I, AMDGPU::OpName::tfe);
666 const auto *LWEOp = TII.getNamedOperand(*CI.I, AMDGPU::OpName::lwe);
667
668 if ((TFEOp && TFEOp->getImm()) || (LWEOp && LWEOp->getImm()))
669 return false;
670
671 // Check other optional immediate operands for equality.
672 unsigned OperandsToMatch[] = {AMDGPU::OpName::glc, AMDGPU::OpName::slc,
673 AMDGPU::OpName::d16, AMDGPU::OpName::unorm,
674 AMDGPU::OpName::da, AMDGPU::OpName::r128};
675
676 for (auto op : OperandsToMatch) {
677 int Idx = AMDGPU::getNamedOperandIdx(CI.I->getOpcode(), op);
678 if (AMDGPU::getNamedOperandIdx(Paired.I->getOpcode(), op) != Idx)
679 return false;
680 if (Idx != -1 &&
681 CI.I->getOperand(Idx).getImm() != Paired.I->getOperand(Idx).getImm())
682 return false;
683 }
684
685 // Check DMask for overlaps.
686 unsigned MaxMask = std::max(CI.DMask, Paired.DMask);
687 unsigned MinMask = std::min(CI.DMask, Paired.DMask);
688
689 unsigned AllowedBitsForMin = llvm::countTrailingZeros(MaxMask);
690 if ((1u << AllowedBitsForMin) <= MinMask)
691 return false;
692
693 return true;
694 }
695
getBufferFormatWithCompCount(unsigned OldFormat,unsigned ComponentCount,const MCSubtargetInfo & STI)696 static unsigned getBufferFormatWithCompCount(unsigned OldFormat,
697 unsigned ComponentCount,
698 const MCSubtargetInfo &STI) {
699 if (ComponentCount > 4)
700 return 0;
701
702 const llvm::AMDGPU::GcnBufferFormatInfo *OldFormatInfo =
703 llvm::AMDGPU::getGcnBufferFormatInfo(OldFormat, STI);
704 if (!OldFormatInfo)
705 return 0;
706
707 const llvm::AMDGPU::GcnBufferFormatInfo *NewFormatInfo =
708 llvm::AMDGPU::getGcnBufferFormatInfo(OldFormatInfo->BitsPerComp,
709 ComponentCount,
710 OldFormatInfo->NumFormat, STI);
711
712 if (!NewFormatInfo)
713 return 0;
714
715 assert(NewFormatInfo->NumFormat == OldFormatInfo->NumFormat &&
716 NewFormatInfo->BitsPerComp == OldFormatInfo->BitsPerComp);
717
718 return NewFormatInfo->Format;
719 }
720
offsetsCanBeCombined(CombineInfo & CI,const MCSubtargetInfo & STI,CombineInfo & Paired)721 bool SILoadStoreOptimizer::offsetsCanBeCombined(CombineInfo &CI,
722 const MCSubtargetInfo &STI,
723 CombineInfo &Paired) {
724 assert(CI.InstClass != MIMG);
725
726 // XXX - Would the same offset be OK? Is there any reason this would happen or
727 // be useful?
728 if (CI.Offset == Paired.Offset)
729 return false;
730
731 // This won't be valid if the offset isn't aligned.
732 if ((CI.Offset % CI.EltSize != 0) || (Paired.Offset % CI.EltSize != 0))
733 return false;
734
735 if (CI.InstClass == TBUFFER_LOAD || CI.InstClass == TBUFFER_STORE) {
736
737 const llvm::AMDGPU::GcnBufferFormatInfo *Info0 =
738 llvm::AMDGPU::getGcnBufferFormatInfo(CI.Format, STI);
739 if (!Info0)
740 return false;
741 const llvm::AMDGPU::GcnBufferFormatInfo *Info1 =
742 llvm::AMDGPU::getGcnBufferFormatInfo(Paired.Format, STI);
743 if (!Info1)
744 return false;
745
746 if (Info0->BitsPerComp != Info1->BitsPerComp ||
747 Info0->NumFormat != Info1->NumFormat)
748 return false;
749
750 // TODO: Should be possible to support more formats, but if format loads
751 // are not dword-aligned, the merged load might not be valid.
752 if (Info0->BitsPerComp != 32)
753 return false;
754
755 if (getBufferFormatWithCompCount(CI.Format, CI.Width + Paired.Width, STI) == 0)
756 return false;
757 }
758
759 unsigned EltOffset0 = CI.Offset / CI.EltSize;
760 unsigned EltOffset1 = Paired.Offset / CI.EltSize;
761 CI.UseST64 = false;
762 CI.BaseOff = 0;
763
764 // Handle SMEM and VMEM instructions.
765 if ((CI.InstClass != DS_READ) && (CI.InstClass != DS_WRITE)) {
766 return (EltOffset0 + CI.Width == EltOffset1 ||
767 EltOffset1 + Paired.Width == EltOffset0) &&
768 CI.GLC == Paired.GLC && CI.DLC == Paired.DLC &&
769 (CI.InstClass == S_BUFFER_LOAD_IMM || CI.SLC == Paired.SLC);
770 }
771
772 // If the offset in elements doesn't fit in 8-bits, we might be able to use
773 // the stride 64 versions.
774 if ((EltOffset0 % 64 == 0) && (EltOffset1 % 64) == 0 &&
775 isUInt<8>(EltOffset0 / 64) && isUInt<8>(EltOffset1 / 64)) {
776 CI.Offset = EltOffset0 / 64;
777 Paired.Offset = EltOffset1 / 64;
778 CI.UseST64 = true;
779 return true;
780 }
781
782 // Check if the new offsets fit in the reduced 8-bit range.
783 if (isUInt<8>(EltOffset0) && isUInt<8>(EltOffset1)) {
784 CI.Offset = EltOffset0;
785 Paired.Offset = EltOffset1;
786 return true;
787 }
788
789 // Try to shift base address to decrease offsets.
790 unsigned OffsetDiff = std::abs((int)EltOffset1 - (int)EltOffset0);
791 CI.BaseOff = std::min(CI.Offset, Paired.Offset);
792
793 if ((OffsetDiff % 64 == 0) && isUInt<8>(OffsetDiff / 64)) {
794 CI.Offset = (EltOffset0 - CI.BaseOff / CI.EltSize) / 64;
795 Paired.Offset = (EltOffset1 - CI.BaseOff / CI.EltSize) / 64;
796 CI.UseST64 = true;
797 return true;
798 }
799
800 if (isUInt<8>(OffsetDiff)) {
801 CI.Offset = EltOffset0 - CI.BaseOff / CI.EltSize;
802 Paired.Offset = EltOffset1 - CI.BaseOff / CI.EltSize;
803 return true;
804 }
805
806 return false;
807 }
808
widthsFit(const GCNSubtarget & STM,const CombineInfo & CI,const CombineInfo & Paired)809 bool SILoadStoreOptimizer::widthsFit(const GCNSubtarget &STM,
810 const CombineInfo &CI,
811 const CombineInfo &Paired) {
812 const unsigned Width = (CI.Width + Paired.Width);
813 switch (CI.InstClass) {
814 default:
815 return (Width <= 4) && (STM.hasDwordx3LoadStores() || (Width != 3));
816 case S_BUFFER_LOAD_IMM:
817 switch (Width) {
818 default:
819 return false;
820 case 2:
821 case 4:
822 return true;
823 }
824 }
825 }
826
findMatchingInst(CombineInfo & CI,CombineInfo & Paired)827 bool SILoadStoreOptimizer::findMatchingInst(CombineInfo &CI,
828 CombineInfo &Paired) {
829 MachineBasicBlock *MBB = CI.I->getParent();
830 MachineBasicBlock::iterator E = MBB->end();
831 MachineBasicBlock::iterator MBBI = CI.I;
832
833 const unsigned Opc = CI.I->getOpcode();
834 const InstClassEnum InstClass = getInstClass(Opc, *TII);
835
836 if (InstClass == UNKNOWN) {
837 return false;
838 }
839 const unsigned InstSubclass = getInstSubclass(Opc, *TII);
840
841 // Do not merge VMEM buffer instructions with "swizzled" bit set.
842 int Swizzled =
843 AMDGPU::getNamedOperandIdx(CI.I->getOpcode(), AMDGPU::OpName::swz);
844 if (Swizzled != -1 && CI.I->getOperand(Swizzled).getImm())
845 return false;
846
847 ++MBBI;
848
849 DenseSet<unsigned> RegDefsToMove;
850 DenseSet<unsigned> PhysRegUsesToMove;
851 addDefsUsesToList(*CI.I, RegDefsToMove, PhysRegUsesToMove);
852
853 for (; MBBI != E; ++MBBI) {
854
855 if ((getInstClass(MBBI->getOpcode(), *TII) != InstClass) ||
856 (getInstSubclass(MBBI->getOpcode(), *TII) != InstSubclass)) {
857 // This is not a matching instruction, but we can keep looking as
858 // long as one of these conditions are met:
859 // 1. It is safe to move I down past MBBI.
860 // 2. It is safe to move MBBI down past the instruction that I will
861 // be merged into.
862
863 if (MBBI->hasUnmodeledSideEffects()) {
864 // We can't re-order this instruction with respect to other memory
865 // operations, so we fail both conditions mentioned above.
866 return false;
867 }
868
869 if (MBBI->mayLoadOrStore() &&
870 (!memAccessesCanBeReordered(*CI.I, *MBBI, AA) ||
871 !canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, AA))) {
872 // We fail condition #1, but we may still be able to satisfy condition
873 // #2. Add this instruction to the move list and then we will check
874 // if condition #2 holds once we have selected the matching instruction.
875 CI.InstsToMove.push_back(&*MBBI);
876 addDefsUsesToList(*MBBI, RegDefsToMove, PhysRegUsesToMove);
877 continue;
878 }
879
880 // When we match I with another DS instruction we will be moving I down
881 // to the location of the matched instruction any uses of I will need to
882 // be moved down as well.
883 addToListsIfDependent(*MBBI, RegDefsToMove, PhysRegUsesToMove,
884 CI.InstsToMove);
885 continue;
886 }
887
888 // Don't merge volatiles.
889 if (MBBI->hasOrderedMemoryRef())
890 return false;
891
892 int Swizzled =
893 AMDGPU::getNamedOperandIdx(MBBI->getOpcode(), AMDGPU::OpName::swz);
894 if (Swizzled != -1 && MBBI->getOperand(Swizzled).getImm())
895 return false;
896
897 // Handle a case like
898 // DS_WRITE_B32 addr, v, idx0
899 // w = DS_READ_B32 addr, idx0
900 // DS_WRITE_B32 addr, f(w), idx1
901 // where the DS_READ_B32 ends up in InstsToMove and therefore prevents
902 // merging of the two writes.
903 if (addToListsIfDependent(*MBBI, RegDefsToMove, PhysRegUsesToMove,
904 CI.InstsToMove))
905 continue;
906
907 bool Match = CI.hasSameBaseAddress(*MBBI);
908
909 if (Match) {
910 Paired.setMI(MBBI, *TII, *STM);
911
912 // Check both offsets (or masks for MIMG) can be combined and fit in the
913 // reduced range.
914 bool canBeCombined =
915 CI.InstClass == MIMG
916 ? dmasksCanBeCombined(CI, *TII, Paired)
917 : widthsFit(*STM, CI, Paired) && offsetsCanBeCombined(CI, *STI, Paired);
918
919 // We also need to go through the list of instructions that we plan to
920 // move and make sure they are all safe to move down past the merged
921 // instruction.
922 if (canBeCombined && canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, AA))
923 return true;
924 }
925
926 // We've found a load/store that we couldn't merge for some reason.
927 // We could potentially keep looking, but we'd need to make sure that
928 // it was safe to move I and also all the instruction in InstsToMove
929 // down past this instruction.
930 // check if we can move I across MBBI and if we can move all I's users
931 if (!memAccessesCanBeReordered(*CI.I, *MBBI, AA) ||
932 !canMoveInstsAcrossMemOp(*MBBI, CI.InstsToMove, AA))
933 break;
934 }
935 return false;
936 }
937
read2Opcode(unsigned EltSize) const938 unsigned SILoadStoreOptimizer::read2Opcode(unsigned EltSize) const {
939 if (STM->ldsRequiresM0Init())
940 return (EltSize == 4) ? AMDGPU::DS_READ2_B32 : AMDGPU::DS_READ2_B64;
941 return (EltSize == 4) ? AMDGPU::DS_READ2_B32_gfx9 : AMDGPU::DS_READ2_B64_gfx9;
942 }
943
read2ST64Opcode(unsigned EltSize) const944 unsigned SILoadStoreOptimizer::read2ST64Opcode(unsigned EltSize) const {
945 if (STM->ldsRequiresM0Init())
946 return (EltSize == 4) ? AMDGPU::DS_READ2ST64_B32 : AMDGPU::DS_READ2ST64_B64;
947
948 return (EltSize == 4) ? AMDGPU::DS_READ2ST64_B32_gfx9
949 : AMDGPU::DS_READ2ST64_B64_gfx9;
950 }
951
952 MachineBasicBlock::iterator
mergeRead2Pair(CombineInfo & CI,CombineInfo & Paired)953 SILoadStoreOptimizer::mergeRead2Pair(CombineInfo &CI, CombineInfo &Paired) {
954 MachineBasicBlock *MBB = CI.I->getParent();
955
956 // Be careful, since the addresses could be subregisters themselves in weird
957 // cases, like vectors of pointers.
958 const auto *AddrReg = TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr);
959
960 const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdst);
961 const auto *Dest1 = TII->getNamedOperand(*Paired.I, AMDGPU::OpName::vdst);
962
963 unsigned NewOffset0 = CI.Offset;
964 unsigned NewOffset1 = Paired.Offset;
965 unsigned Opc =
966 CI.UseST64 ? read2ST64Opcode(CI.EltSize) : read2Opcode(CI.EltSize);
967
968 unsigned SubRegIdx0 = (CI.EltSize == 4) ? AMDGPU::sub0 : AMDGPU::sub0_sub1;
969 unsigned SubRegIdx1 = (CI.EltSize == 4) ? AMDGPU::sub1 : AMDGPU::sub2_sub3;
970
971 if (NewOffset0 > NewOffset1) {
972 // Canonicalize the merged instruction so the smaller offset comes first.
973 std::swap(NewOffset0, NewOffset1);
974 std::swap(SubRegIdx0, SubRegIdx1);
975 }
976
977 assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
978 (NewOffset0 != NewOffset1) && "Computed offset doesn't fit");
979
980 const MCInstrDesc &Read2Desc = TII->get(Opc);
981
982 const TargetRegisterClass *SuperRC =
983 (CI.EltSize == 4) ? &AMDGPU::VReg_64RegClass : &AMDGPU::VReg_128RegClass;
984 Register DestReg = MRI->createVirtualRegister(SuperRC);
985
986 DebugLoc DL = CI.I->getDebugLoc();
987
988 Register BaseReg = AddrReg->getReg();
989 unsigned BaseSubReg = AddrReg->getSubReg();
990 unsigned BaseRegFlags = 0;
991 if (CI.BaseOff) {
992 Register ImmReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
993 BuildMI(*MBB, Paired.I, DL, TII->get(AMDGPU::S_MOV_B32), ImmReg)
994 .addImm(CI.BaseOff);
995
996 BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
997 BaseRegFlags = RegState::Kill;
998
999 TII->getAddNoCarry(*MBB, Paired.I, DL, BaseReg)
1000 .addReg(ImmReg)
1001 .addReg(AddrReg->getReg(), 0, BaseSubReg)
1002 .addImm(0); // clamp bit
1003 BaseSubReg = 0;
1004 }
1005
1006 MachineInstrBuilder Read2 =
1007 BuildMI(*MBB, Paired.I, DL, Read2Desc, DestReg)
1008 .addReg(BaseReg, BaseRegFlags, BaseSubReg) // addr
1009 .addImm(NewOffset0) // offset0
1010 .addImm(NewOffset1) // offset1
1011 .addImm(0) // gds
1012 .cloneMergedMemRefs({&*CI.I, &*Paired.I});
1013
1014 (void)Read2;
1015
1016 const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
1017
1018 // Copy to the old destination registers.
1019 BuildMI(*MBB, Paired.I, DL, CopyDesc)
1020 .add(*Dest0) // Copy to same destination including flags and sub reg.
1021 .addReg(DestReg, 0, SubRegIdx0);
1022 MachineInstr *Copy1 = BuildMI(*MBB, Paired.I, DL, CopyDesc)
1023 .add(*Dest1)
1024 .addReg(DestReg, RegState::Kill, SubRegIdx1);
1025
1026 moveInstsAfter(Copy1, CI.InstsToMove);
1027
1028 CI.I->eraseFromParent();
1029 Paired.I->eraseFromParent();
1030
1031 LLVM_DEBUG(dbgs() << "Inserted read2: " << *Read2 << '\n');
1032 return Read2;
1033 }
1034
write2Opcode(unsigned EltSize) const1035 unsigned SILoadStoreOptimizer::write2Opcode(unsigned EltSize) const {
1036 if (STM->ldsRequiresM0Init())
1037 return (EltSize == 4) ? AMDGPU::DS_WRITE2_B32 : AMDGPU::DS_WRITE2_B64;
1038 return (EltSize == 4) ? AMDGPU::DS_WRITE2_B32_gfx9
1039 : AMDGPU::DS_WRITE2_B64_gfx9;
1040 }
1041
write2ST64Opcode(unsigned EltSize) const1042 unsigned SILoadStoreOptimizer::write2ST64Opcode(unsigned EltSize) const {
1043 if (STM->ldsRequiresM0Init())
1044 return (EltSize == 4) ? AMDGPU::DS_WRITE2ST64_B32
1045 : AMDGPU::DS_WRITE2ST64_B64;
1046
1047 return (EltSize == 4) ? AMDGPU::DS_WRITE2ST64_B32_gfx9
1048 : AMDGPU::DS_WRITE2ST64_B64_gfx9;
1049 }
1050
1051 MachineBasicBlock::iterator
mergeWrite2Pair(CombineInfo & CI,CombineInfo & Paired)1052 SILoadStoreOptimizer::mergeWrite2Pair(CombineInfo &CI, CombineInfo &Paired) {
1053 MachineBasicBlock *MBB = CI.I->getParent();
1054
1055 // Be sure to use .addOperand(), and not .addReg() with these. We want to be
1056 // sure we preserve the subregister index and any register flags set on them.
1057 const MachineOperand *AddrReg =
1058 TII->getNamedOperand(*CI.I, AMDGPU::OpName::addr);
1059 const MachineOperand *Data0 =
1060 TII->getNamedOperand(*CI.I, AMDGPU::OpName::data0);
1061 const MachineOperand *Data1 =
1062 TII->getNamedOperand(*Paired.I, AMDGPU::OpName::data0);
1063
1064 unsigned NewOffset0 = CI.Offset;
1065 unsigned NewOffset1 = Paired.Offset;
1066 unsigned Opc =
1067 CI.UseST64 ? write2ST64Opcode(CI.EltSize) : write2Opcode(CI.EltSize);
1068
1069 if (NewOffset0 > NewOffset1) {
1070 // Canonicalize the merged instruction so the smaller offset comes first.
1071 std::swap(NewOffset0, NewOffset1);
1072 std::swap(Data0, Data1);
1073 }
1074
1075 assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
1076 (NewOffset0 != NewOffset1) && "Computed offset doesn't fit");
1077
1078 const MCInstrDesc &Write2Desc = TII->get(Opc);
1079 DebugLoc DL = CI.I->getDebugLoc();
1080
1081 Register BaseReg = AddrReg->getReg();
1082 unsigned BaseSubReg = AddrReg->getSubReg();
1083 unsigned BaseRegFlags = 0;
1084 if (CI.BaseOff) {
1085 Register ImmReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1086 BuildMI(*MBB, Paired.I, DL, TII->get(AMDGPU::S_MOV_B32), ImmReg)
1087 .addImm(CI.BaseOff);
1088
1089 BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1090 BaseRegFlags = RegState::Kill;
1091
1092 TII->getAddNoCarry(*MBB, Paired.I, DL, BaseReg)
1093 .addReg(ImmReg)
1094 .addReg(AddrReg->getReg(), 0, BaseSubReg)
1095 .addImm(0); // clamp bit
1096 BaseSubReg = 0;
1097 }
1098
1099 MachineInstrBuilder Write2 =
1100 BuildMI(*MBB, Paired.I, DL, Write2Desc)
1101 .addReg(BaseReg, BaseRegFlags, BaseSubReg) // addr
1102 .add(*Data0) // data0
1103 .add(*Data1) // data1
1104 .addImm(NewOffset0) // offset0
1105 .addImm(NewOffset1) // offset1
1106 .addImm(0) // gds
1107 .cloneMergedMemRefs({&*CI.I, &*Paired.I});
1108
1109 moveInstsAfter(Write2, CI.InstsToMove);
1110
1111 CI.I->eraseFromParent();
1112 Paired.I->eraseFromParent();
1113
1114 LLVM_DEBUG(dbgs() << "Inserted write2 inst: " << *Write2 << '\n');
1115 return Write2;
1116 }
1117
1118 MachineBasicBlock::iterator
mergeImagePair(CombineInfo & CI,CombineInfo & Paired)1119 SILoadStoreOptimizer::mergeImagePair(CombineInfo &CI, CombineInfo &Paired) {
1120 MachineBasicBlock *MBB = CI.I->getParent();
1121 DebugLoc DL = CI.I->getDebugLoc();
1122 const unsigned Opcode = getNewOpcode(CI, Paired);
1123
1124 const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI, Paired);
1125
1126 Register DestReg = MRI->createVirtualRegister(SuperRC);
1127 unsigned MergedDMask = CI.DMask | Paired.DMask;
1128 unsigned DMaskIdx =
1129 AMDGPU::getNamedOperandIdx(CI.I->getOpcode(), AMDGPU::OpName::dmask);
1130
1131 auto MIB = BuildMI(*MBB, Paired.I, DL, TII->get(Opcode), DestReg);
1132 for (unsigned I = 1, E = (*CI.I).getNumOperands(); I != E; ++I) {
1133 if (I == DMaskIdx)
1134 MIB.addImm(MergedDMask);
1135 else
1136 MIB.add((*CI.I).getOperand(I));
1137 }
1138
1139 // It shouldn't be possible to get this far if the two instructions
1140 // don't have a single memoperand, because MachineInstr::mayAlias()
1141 // will return true if this is the case.
1142 assert(CI.I->hasOneMemOperand() && Paired.I->hasOneMemOperand());
1143
1144 const MachineMemOperand *MMOa = *CI.I->memoperands_begin();
1145 const MachineMemOperand *MMOb = *Paired.I->memoperands_begin();
1146
1147 MachineInstr *New = MIB.addMemOperand(combineKnownAdjacentMMOs(*MBB->getParent(), MMOa, MMOb));
1148
1149 unsigned SubRegIdx0, SubRegIdx1;
1150 std::tie(SubRegIdx0, SubRegIdx1) = getSubRegIdxs(CI, Paired);
1151
1152 // Copy to the old destination registers.
1153 const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
1154 const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata);
1155 const auto *Dest1 = TII->getNamedOperand(*Paired.I, AMDGPU::OpName::vdata);
1156
1157 BuildMI(*MBB, Paired.I, DL, CopyDesc)
1158 .add(*Dest0) // Copy to same destination including flags and sub reg.
1159 .addReg(DestReg, 0, SubRegIdx0);
1160 MachineInstr *Copy1 = BuildMI(*MBB, Paired.I, DL, CopyDesc)
1161 .add(*Dest1)
1162 .addReg(DestReg, RegState::Kill, SubRegIdx1);
1163
1164 moveInstsAfter(Copy1, CI.InstsToMove);
1165
1166 CI.I->eraseFromParent();
1167 Paired.I->eraseFromParent();
1168 return New;
1169 }
1170
1171 MachineBasicBlock::iterator
mergeSBufferLoadImmPair(CombineInfo & CI,CombineInfo & Paired)1172 SILoadStoreOptimizer::mergeSBufferLoadImmPair(CombineInfo &CI, CombineInfo &Paired) {
1173 MachineBasicBlock *MBB = CI.I->getParent();
1174 DebugLoc DL = CI.I->getDebugLoc();
1175 const unsigned Opcode = getNewOpcode(CI, Paired);
1176
1177 const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI, Paired);
1178
1179 Register DestReg = MRI->createVirtualRegister(SuperRC);
1180 unsigned MergedOffset = std::min(CI.Offset, Paired.Offset);
1181
1182 // It shouldn't be possible to get this far if the two instructions
1183 // don't have a single memoperand, because MachineInstr::mayAlias()
1184 // will return true if this is the case.
1185 assert(CI.I->hasOneMemOperand() && Paired.I->hasOneMemOperand());
1186
1187 const MachineMemOperand *MMOa = *CI.I->memoperands_begin();
1188 const MachineMemOperand *MMOb = *Paired.I->memoperands_begin();
1189
1190 MachineInstr *New =
1191 BuildMI(*MBB, Paired.I, DL, TII->get(Opcode), DestReg)
1192 .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::sbase))
1193 .addImm(MergedOffset) // offset
1194 .addImm(CI.GLC) // glc
1195 .addImm(CI.DLC) // dlc
1196 .addMemOperand(combineKnownAdjacentMMOs(*MBB->getParent(), MMOa, MMOb));
1197
1198 std::pair<unsigned, unsigned> SubRegIdx = getSubRegIdxs(CI, Paired);
1199 const unsigned SubRegIdx0 = std::get<0>(SubRegIdx);
1200 const unsigned SubRegIdx1 = std::get<1>(SubRegIdx);
1201
1202 // Copy to the old destination registers.
1203 const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
1204 const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::sdst);
1205 const auto *Dest1 = TII->getNamedOperand(*Paired.I, AMDGPU::OpName::sdst);
1206
1207 BuildMI(*MBB, Paired.I, DL, CopyDesc)
1208 .add(*Dest0) // Copy to same destination including flags and sub reg.
1209 .addReg(DestReg, 0, SubRegIdx0);
1210 MachineInstr *Copy1 = BuildMI(*MBB, Paired.I, DL, CopyDesc)
1211 .add(*Dest1)
1212 .addReg(DestReg, RegState::Kill, SubRegIdx1);
1213
1214 moveInstsAfter(Copy1, CI.InstsToMove);
1215
1216 CI.I->eraseFromParent();
1217 Paired.I->eraseFromParent();
1218 return New;
1219 }
1220
1221 MachineBasicBlock::iterator
mergeBufferLoadPair(CombineInfo & CI,CombineInfo & Paired)1222 SILoadStoreOptimizer::mergeBufferLoadPair(CombineInfo &CI, CombineInfo &Paired) {
1223 MachineBasicBlock *MBB = CI.I->getParent();
1224 DebugLoc DL = CI.I->getDebugLoc();
1225
1226 const unsigned Opcode = getNewOpcode(CI, Paired);
1227
1228 const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI, Paired);
1229
1230 // Copy to the new source register.
1231 Register DestReg = MRI->createVirtualRegister(SuperRC);
1232 unsigned MergedOffset = std::min(CI.Offset, Paired.Offset);
1233
1234 auto MIB = BuildMI(*MBB, Paired.I, DL, TII->get(Opcode), DestReg);
1235
1236 const unsigned Regs = getRegs(Opcode, *TII);
1237
1238 if (Regs & VADDR)
1239 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::vaddr));
1240
1241 // It shouldn't be possible to get this far if the two instructions
1242 // don't have a single memoperand, because MachineInstr::mayAlias()
1243 // will return true if this is the case.
1244 assert(CI.I->hasOneMemOperand() && Paired.I->hasOneMemOperand());
1245
1246 const MachineMemOperand *MMOa = *CI.I->memoperands_begin();
1247 const MachineMemOperand *MMOb = *Paired.I->memoperands_begin();
1248
1249 MachineInstr *New =
1250 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc))
1251 .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset))
1252 .addImm(MergedOffset) // offset
1253 .addImm(CI.GLC) // glc
1254 .addImm(CI.SLC) // slc
1255 .addImm(0) // tfe
1256 .addImm(CI.DLC) // dlc
1257 .addImm(0) // swz
1258 .addMemOperand(combineKnownAdjacentMMOs(*MBB->getParent(), MMOa, MMOb));
1259
1260 std::pair<unsigned, unsigned> SubRegIdx = getSubRegIdxs(CI, Paired);
1261 const unsigned SubRegIdx0 = std::get<0>(SubRegIdx);
1262 const unsigned SubRegIdx1 = std::get<1>(SubRegIdx);
1263
1264 // Copy to the old destination registers.
1265 const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
1266 const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata);
1267 const auto *Dest1 = TII->getNamedOperand(*Paired.I, AMDGPU::OpName::vdata);
1268
1269 BuildMI(*MBB, Paired.I, DL, CopyDesc)
1270 .add(*Dest0) // Copy to same destination including flags and sub reg.
1271 .addReg(DestReg, 0, SubRegIdx0);
1272 MachineInstr *Copy1 = BuildMI(*MBB, Paired.I, DL, CopyDesc)
1273 .add(*Dest1)
1274 .addReg(DestReg, RegState::Kill, SubRegIdx1);
1275
1276 moveInstsAfter(Copy1, CI.InstsToMove);
1277
1278 CI.I->eraseFromParent();
1279 Paired.I->eraseFromParent();
1280 return New;
1281 }
1282
1283 MachineBasicBlock::iterator
mergeTBufferLoadPair(CombineInfo & CI,CombineInfo & Paired)1284 SILoadStoreOptimizer::mergeTBufferLoadPair(CombineInfo &CI, CombineInfo &Paired) {
1285 MachineBasicBlock *MBB = CI.I->getParent();
1286 DebugLoc DL = CI.I->getDebugLoc();
1287
1288 const unsigned Opcode = getNewOpcode(CI, Paired);
1289
1290 const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI, Paired);
1291
1292 // Copy to the new source register.
1293 Register DestReg = MRI->createVirtualRegister(SuperRC);
1294 unsigned MergedOffset = std::min(CI.Offset, Paired.Offset);
1295
1296 auto MIB = BuildMI(*MBB, Paired.I, DL, TII->get(Opcode), DestReg);
1297
1298 const unsigned Regs = getRegs(Opcode, *TII);
1299
1300 if (Regs & VADDR)
1301 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::vaddr));
1302
1303 unsigned JoinedFormat =
1304 getBufferFormatWithCompCount(CI.Format, CI.Width + Paired.Width, *STI);
1305
1306 // It shouldn't be possible to get this far if the two instructions
1307 // don't have a single memoperand, because MachineInstr::mayAlias()
1308 // will return true if this is the case.
1309 assert(CI.I->hasOneMemOperand() && Paired.I->hasOneMemOperand());
1310
1311 const MachineMemOperand *MMOa = *CI.I->memoperands_begin();
1312 const MachineMemOperand *MMOb = *Paired.I->memoperands_begin();
1313
1314 MachineInstr *New =
1315 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc))
1316 .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset))
1317 .addImm(MergedOffset) // offset
1318 .addImm(JoinedFormat) // format
1319 .addImm(CI.GLC) // glc
1320 .addImm(CI.SLC) // slc
1321 .addImm(0) // tfe
1322 .addImm(CI.DLC) // dlc
1323 .addImm(0) // swz
1324 .addMemOperand(
1325 combineKnownAdjacentMMOs(*MBB->getParent(), MMOa, MMOb));
1326
1327 std::pair<unsigned, unsigned> SubRegIdx = getSubRegIdxs(CI, Paired);
1328 const unsigned SubRegIdx0 = std::get<0>(SubRegIdx);
1329 const unsigned SubRegIdx1 = std::get<1>(SubRegIdx);
1330
1331 // Copy to the old destination registers.
1332 const MCInstrDesc &CopyDesc = TII->get(TargetOpcode::COPY);
1333 const auto *Dest0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata);
1334 const auto *Dest1 = TII->getNamedOperand(*Paired.I, AMDGPU::OpName::vdata);
1335
1336 BuildMI(*MBB, Paired.I, DL, CopyDesc)
1337 .add(*Dest0) // Copy to same destination including flags and sub reg.
1338 .addReg(DestReg, 0, SubRegIdx0);
1339 MachineInstr *Copy1 = BuildMI(*MBB, Paired.I, DL, CopyDesc)
1340 .add(*Dest1)
1341 .addReg(DestReg, RegState::Kill, SubRegIdx1);
1342
1343 moveInstsAfter(Copy1, CI.InstsToMove);
1344
1345 CI.I->eraseFromParent();
1346 Paired.I->eraseFromParent();
1347 return New;
1348 }
1349
1350 MachineBasicBlock::iterator
mergeTBufferStorePair(CombineInfo & CI,CombineInfo & Paired)1351 SILoadStoreOptimizer::mergeTBufferStorePair(CombineInfo &CI, CombineInfo &Paired) {
1352 MachineBasicBlock *MBB = CI.I->getParent();
1353 DebugLoc DL = CI.I->getDebugLoc();
1354
1355 const unsigned Opcode = getNewOpcode(CI, Paired);
1356
1357 std::pair<unsigned, unsigned> SubRegIdx = getSubRegIdxs(CI, Paired);
1358 const unsigned SubRegIdx0 = std::get<0>(SubRegIdx);
1359 const unsigned SubRegIdx1 = std::get<1>(SubRegIdx);
1360
1361 // Copy to the new source register.
1362 const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI, Paired);
1363 Register SrcReg = MRI->createVirtualRegister(SuperRC);
1364
1365 const auto *Src0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata);
1366 const auto *Src1 = TII->getNamedOperand(*Paired.I, AMDGPU::OpName::vdata);
1367
1368 BuildMI(*MBB, Paired.I, DL, TII->get(AMDGPU::REG_SEQUENCE), SrcReg)
1369 .add(*Src0)
1370 .addImm(SubRegIdx0)
1371 .add(*Src1)
1372 .addImm(SubRegIdx1);
1373
1374 auto MIB = BuildMI(*MBB, Paired.I, DL, TII->get(Opcode))
1375 .addReg(SrcReg, RegState::Kill);
1376
1377 const unsigned Regs = getRegs(Opcode, *TII);
1378
1379 if (Regs & VADDR)
1380 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::vaddr));
1381
1382 unsigned JoinedFormat =
1383 getBufferFormatWithCompCount(CI.Format, CI.Width + Paired.Width, *STI);
1384
1385 // It shouldn't be possible to get this far if the two instructions
1386 // don't have a single memoperand, because MachineInstr::mayAlias()
1387 // will return true if this is the case.
1388 assert(CI.I->hasOneMemOperand() && Paired.I->hasOneMemOperand());
1389
1390 const MachineMemOperand *MMOa = *CI.I->memoperands_begin();
1391 const MachineMemOperand *MMOb = *Paired.I->memoperands_begin();
1392
1393 MachineInstr *New =
1394 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc))
1395 .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset))
1396 .addImm(std::min(CI.Offset, Paired.Offset)) // offset
1397 .addImm(JoinedFormat) // format
1398 .addImm(CI.GLC) // glc
1399 .addImm(CI.SLC) // slc
1400 .addImm(0) // tfe
1401 .addImm(CI.DLC) // dlc
1402 .addImm(0) // swz
1403 .addMemOperand(
1404 combineKnownAdjacentMMOs(*MBB->getParent(), MMOa, MMOb));
1405
1406 moveInstsAfter(MIB, CI.InstsToMove);
1407
1408 CI.I->eraseFromParent();
1409 Paired.I->eraseFromParent();
1410 return New;
1411 }
1412
getNewOpcode(const CombineInfo & CI,const CombineInfo & Paired)1413 unsigned SILoadStoreOptimizer::getNewOpcode(const CombineInfo &CI,
1414 const CombineInfo &Paired) {
1415 const unsigned Width = CI.Width + Paired.Width;
1416
1417 switch (CI.InstClass) {
1418 default:
1419 assert(CI.InstClass == BUFFER_LOAD || CI.InstClass == BUFFER_STORE);
1420 // FIXME: Handle d16 correctly
1421 return AMDGPU::getMUBUFOpcode(AMDGPU::getMUBUFBaseOpcode(CI.I->getOpcode()),
1422 Width);
1423 case TBUFFER_LOAD:
1424 case TBUFFER_STORE:
1425 return AMDGPU::getMTBUFOpcode(AMDGPU::getMTBUFBaseOpcode(CI.I->getOpcode()),
1426 Width);
1427
1428 case UNKNOWN:
1429 llvm_unreachable("Unknown instruction class");
1430 case S_BUFFER_LOAD_IMM:
1431 switch (Width) {
1432 default:
1433 return 0;
1434 case 2:
1435 return AMDGPU::S_BUFFER_LOAD_DWORDX2_IMM;
1436 case 4:
1437 return AMDGPU::S_BUFFER_LOAD_DWORDX4_IMM;
1438 }
1439 case MIMG:
1440 assert("No overlaps" && (countPopulation(CI.DMask | Paired.DMask) == Width));
1441 return AMDGPU::getMaskedMIMGOp(CI.I->getOpcode(), Width);
1442 }
1443 }
1444
1445 std::pair<unsigned, unsigned>
getSubRegIdxs(const CombineInfo & CI,const CombineInfo & Paired)1446 SILoadStoreOptimizer::getSubRegIdxs(const CombineInfo &CI, const CombineInfo &Paired) {
1447
1448 if (CI.Width == 0 || Paired.Width == 0 || CI.Width + Paired.Width > 4)
1449 return std::make_pair(0, 0);
1450
1451 bool ReverseOrder;
1452 if (CI.InstClass == MIMG) {
1453 assert((countPopulation(CI.DMask | Paired.DMask) == CI.Width + Paired.Width) &&
1454 "No overlaps");
1455 ReverseOrder = CI.DMask > Paired.DMask;
1456 } else
1457 ReverseOrder = CI.Offset > Paired.Offset;
1458
1459 static const unsigned Idxs[4][4] = {
1460 {AMDGPU::sub0, AMDGPU::sub0_sub1, AMDGPU::sub0_sub1_sub2, AMDGPU::sub0_sub1_sub2_sub3},
1461 {AMDGPU::sub1, AMDGPU::sub1_sub2, AMDGPU::sub1_sub2_sub3, 0},
1462 {AMDGPU::sub2, AMDGPU::sub2_sub3, 0, 0},
1463 {AMDGPU::sub3, 0, 0, 0},
1464 };
1465 unsigned Idx0;
1466 unsigned Idx1;
1467
1468 assert(CI.Width >= 1 && CI.Width <= 3);
1469 assert(Paired.Width >= 1 && Paired.Width <= 3);
1470
1471 if (ReverseOrder) {
1472 Idx1 = Idxs[0][Paired.Width - 1];
1473 Idx0 = Idxs[Paired.Width][CI.Width - 1];
1474 } else {
1475 Idx0 = Idxs[0][CI.Width - 1];
1476 Idx1 = Idxs[CI.Width][Paired.Width - 1];
1477 }
1478
1479 return std::make_pair(Idx0, Idx1);
1480 }
1481
1482 const TargetRegisterClass *
getTargetRegisterClass(const CombineInfo & CI,const CombineInfo & Paired)1483 SILoadStoreOptimizer::getTargetRegisterClass(const CombineInfo &CI,
1484 const CombineInfo &Paired) {
1485 if (CI.InstClass == S_BUFFER_LOAD_IMM) {
1486 switch (CI.Width + Paired.Width) {
1487 default:
1488 return nullptr;
1489 case 2:
1490 return &AMDGPU::SReg_64_XEXECRegClass;
1491 case 4:
1492 return &AMDGPU::SGPR_128RegClass;
1493 case 8:
1494 return &AMDGPU::SReg_256RegClass;
1495 case 16:
1496 return &AMDGPU::SReg_512RegClass;
1497 }
1498 } else {
1499 switch (CI.Width + Paired.Width) {
1500 default:
1501 return nullptr;
1502 case 2:
1503 return &AMDGPU::VReg_64RegClass;
1504 case 3:
1505 return &AMDGPU::VReg_96RegClass;
1506 case 4:
1507 return &AMDGPU::VReg_128RegClass;
1508 }
1509 }
1510 }
1511
1512 MachineBasicBlock::iterator
mergeBufferStorePair(CombineInfo & CI,CombineInfo & Paired)1513 SILoadStoreOptimizer::mergeBufferStorePair(CombineInfo &CI, CombineInfo &Paired) {
1514 MachineBasicBlock *MBB = CI.I->getParent();
1515 DebugLoc DL = CI.I->getDebugLoc();
1516
1517 const unsigned Opcode = getNewOpcode(CI, Paired);
1518
1519 std::pair<unsigned, unsigned> SubRegIdx = getSubRegIdxs(CI, Paired);
1520 const unsigned SubRegIdx0 = std::get<0>(SubRegIdx);
1521 const unsigned SubRegIdx1 = std::get<1>(SubRegIdx);
1522
1523 // Copy to the new source register.
1524 const TargetRegisterClass *SuperRC = getTargetRegisterClass(CI, Paired);
1525 Register SrcReg = MRI->createVirtualRegister(SuperRC);
1526
1527 const auto *Src0 = TII->getNamedOperand(*CI.I, AMDGPU::OpName::vdata);
1528 const auto *Src1 = TII->getNamedOperand(*Paired.I, AMDGPU::OpName::vdata);
1529
1530 BuildMI(*MBB, Paired.I, DL, TII->get(AMDGPU::REG_SEQUENCE), SrcReg)
1531 .add(*Src0)
1532 .addImm(SubRegIdx0)
1533 .add(*Src1)
1534 .addImm(SubRegIdx1);
1535
1536 auto MIB = BuildMI(*MBB, Paired.I, DL, TII->get(Opcode))
1537 .addReg(SrcReg, RegState::Kill);
1538
1539 const unsigned Regs = getRegs(Opcode, *TII);
1540
1541 if (Regs & VADDR)
1542 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::vaddr));
1543
1544
1545 // It shouldn't be possible to get this far if the two instructions
1546 // don't have a single memoperand, because MachineInstr::mayAlias()
1547 // will return true if this is the case.
1548 assert(CI.I->hasOneMemOperand() && Paired.I->hasOneMemOperand());
1549
1550 const MachineMemOperand *MMOa = *CI.I->memoperands_begin();
1551 const MachineMemOperand *MMOb = *Paired.I->memoperands_begin();
1552
1553 MachineInstr *New =
1554 MIB.add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::srsrc))
1555 .add(*TII->getNamedOperand(*CI.I, AMDGPU::OpName::soffset))
1556 .addImm(std::min(CI.Offset, Paired.Offset)) // offset
1557 .addImm(CI.GLC) // glc
1558 .addImm(CI.SLC) // slc
1559 .addImm(0) // tfe
1560 .addImm(CI.DLC) // dlc
1561 .addImm(0) // swz
1562 .addMemOperand(combineKnownAdjacentMMOs(*MBB->getParent(), MMOa, MMOb));
1563
1564 moveInstsAfter(MIB, CI.InstsToMove);
1565
1566 CI.I->eraseFromParent();
1567 Paired.I->eraseFromParent();
1568 return New;
1569 }
1570
1571 MachineOperand
createRegOrImm(int32_t Val,MachineInstr & MI) const1572 SILoadStoreOptimizer::createRegOrImm(int32_t Val, MachineInstr &MI) const {
1573 APInt V(32, Val, true);
1574 if (TII->isInlineConstant(V))
1575 return MachineOperand::CreateImm(Val);
1576
1577 Register Reg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1578 MachineInstr *Mov =
1579 BuildMI(*MI.getParent(), MI.getIterator(), MI.getDebugLoc(),
1580 TII->get(AMDGPU::S_MOV_B32), Reg)
1581 .addImm(Val);
1582 (void)Mov;
1583 LLVM_DEBUG(dbgs() << " "; Mov->dump());
1584 return MachineOperand::CreateReg(Reg, false);
1585 }
1586
1587 // Compute base address using Addr and return the final register.
computeBase(MachineInstr & MI,const MemAddress & Addr) const1588 unsigned SILoadStoreOptimizer::computeBase(MachineInstr &MI,
1589 const MemAddress &Addr) const {
1590 MachineBasicBlock *MBB = MI.getParent();
1591 MachineBasicBlock::iterator MBBI = MI.getIterator();
1592 DebugLoc DL = MI.getDebugLoc();
1593
1594 assert((TRI->getRegSizeInBits(Addr.Base.LoReg, *MRI) == 32 ||
1595 Addr.Base.LoSubReg) &&
1596 "Expected 32-bit Base-Register-Low!!");
1597
1598 assert((TRI->getRegSizeInBits(Addr.Base.HiReg, *MRI) == 32 ||
1599 Addr.Base.HiSubReg) &&
1600 "Expected 32-bit Base-Register-Hi!!");
1601
1602 LLVM_DEBUG(dbgs() << " Re-Computed Anchor-Base:\n");
1603 MachineOperand OffsetLo = createRegOrImm(static_cast<int32_t>(Addr.Offset), MI);
1604 MachineOperand OffsetHi =
1605 createRegOrImm(static_cast<int32_t>(Addr.Offset >> 32), MI);
1606
1607 const auto *CarryRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID);
1608 Register CarryReg = MRI->createVirtualRegister(CarryRC);
1609 Register DeadCarryReg = MRI->createVirtualRegister(CarryRC);
1610
1611 Register DestSub0 = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1612 Register DestSub1 = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1613 MachineInstr *LoHalf =
1614 BuildMI(*MBB, MBBI, DL, TII->get(AMDGPU::V_ADD_I32_e64), DestSub0)
1615 .addReg(CarryReg, RegState::Define)
1616 .addReg(Addr.Base.LoReg, 0, Addr.Base.LoSubReg)
1617 .add(OffsetLo)
1618 .addImm(0); // clamp bit
1619 (void)LoHalf;
1620 LLVM_DEBUG(dbgs() << " "; LoHalf->dump(););
1621
1622 MachineInstr *HiHalf =
1623 BuildMI(*MBB, MBBI, DL, TII->get(AMDGPU::V_ADDC_U32_e64), DestSub1)
1624 .addReg(DeadCarryReg, RegState::Define | RegState::Dead)
1625 .addReg(Addr.Base.HiReg, 0, Addr.Base.HiSubReg)
1626 .add(OffsetHi)
1627 .addReg(CarryReg, RegState::Kill)
1628 .addImm(0); // clamp bit
1629 (void)HiHalf;
1630 LLVM_DEBUG(dbgs() << " "; HiHalf->dump(););
1631
1632 Register FullDestReg = MRI->createVirtualRegister(&AMDGPU::VReg_64RegClass);
1633 MachineInstr *FullBase =
1634 BuildMI(*MBB, MBBI, DL, TII->get(TargetOpcode::REG_SEQUENCE), FullDestReg)
1635 .addReg(DestSub0)
1636 .addImm(AMDGPU::sub0)
1637 .addReg(DestSub1)
1638 .addImm(AMDGPU::sub1);
1639 (void)FullBase;
1640 LLVM_DEBUG(dbgs() << " "; FullBase->dump(); dbgs() << "\n";);
1641
1642 return FullDestReg;
1643 }
1644
1645 // Update base and offset with the NewBase and NewOffset in MI.
updateBaseAndOffset(MachineInstr & MI,unsigned NewBase,int32_t NewOffset) const1646 void SILoadStoreOptimizer::updateBaseAndOffset(MachineInstr &MI,
1647 unsigned NewBase,
1648 int32_t NewOffset) const {
1649 auto Base = TII->getNamedOperand(MI, AMDGPU::OpName::vaddr);
1650 Base->setReg(NewBase);
1651 Base->setIsKill(false);
1652 TII->getNamedOperand(MI, AMDGPU::OpName::offset)->setImm(NewOffset);
1653 }
1654
1655 Optional<int32_t>
extractConstOffset(const MachineOperand & Op) const1656 SILoadStoreOptimizer::extractConstOffset(const MachineOperand &Op) const {
1657 if (Op.isImm())
1658 return Op.getImm();
1659
1660 if (!Op.isReg())
1661 return None;
1662
1663 MachineInstr *Def = MRI->getUniqueVRegDef(Op.getReg());
1664 if (!Def || Def->getOpcode() != AMDGPU::S_MOV_B32 ||
1665 !Def->getOperand(1).isImm())
1666 return None;
1667
1668 return Def->getOperand(1).getImm();
1669 }
1670
1671 // Analyze Base and extracts:
1672 // - 32bit base registers, subregisters
1673 // - 64bit constant offset
1674 // Expecting base computation as:
1675 // %OFFSET0:sgpr_32 = S_MOV_B32 8000
1676 // %LO:vgpr_32, %c:sreg_64_xexec =
1677 // V_ADD_I32_e64 %BASE_LO:vgpr_32, %103:sgpr_32,
1678 // %HI:vgpr_32, = V_ADDC_U32_e64 %BASE_HI:vgpr_32, 0, killed %c:sreg_64_xexec
1679 // %Base:vreg_64 =
1680 // REG_SEQUENCE %LO:vgpr_32, %subreg.sub0, %HI:vgpr_32, %subreg.sub1
processBaseWithConstOffset(const MachineOperand & Base,MemAddress & Addr) const1681 void SILoadStoreOptimizer::processBaseWithConstOffset(const MachineOperand &Base,
1682 MemAddress &Addr) const {
1683 if (!Base.isReg())
1684 return;
1685
1686 MachineInstr *Def = MRI->getUniqueVRegDef(Base.getReg());
1687 if (!Def || Def->getOpcode() != AMDGPU::REG_SEQUENCE
1688 || Def->getNumOperands() != 5)
1689 return;
1690
1691 MachineOperand BaseLo = Def->getOperand(1);
1692 MachineOperand BaseHi = Def->getOperand(3);
1693 if (!BaseLo.isReg() || !BaseHi.isReg())
1694 return;
1695
1696 MachineInstr *BaseLoDef = MRI->getUniqueVRegDef(BaseLo.getReg());
1697 MachineInstr *BaseHiDef = MRI->getUniqueVRegDef(BaseHi.getReg());
1698
1699 if (!BaseLoDef || BaseLoDef->getOpcode() != AMDGPU::V_ADD_I32_e64 ||
1700 !BaseHiDef || BaseHiDef->getOpcode() != AMDGPU::V_ADDC_U32_e64)
1701 return;
1702
1703 const auto *Src0 = TII->getNamedOperand(*BaseLoDef, AMDGPU::OpName::src0);
1704 const auto *Src1 = TII->getNamedOperand(*BaseLoDef, AMDGPU::OpName::src1);
1705
1706 auto Offset0P = extractConstOffset(*Src0);
1707 if (Offset0P)
1708 BaseLo = *Src1;
1709 else {
1710 if (!(Offset0P = extractConstOffset(*Src1)))
1711 return;
1712 BaseLo = *Src0;
1713 }
1714
1715 Src0 = TII->getNamedOperand(*BaseHiDef, AMDGPU::OpName::src0);
1716 Src1 = TII->getNamedOperand(*BaseHiDef, AMDGPU::OpName::src1);
1717
1718 if (Src0->isImm())
1719 std::swap(Src0, Src1);
1720
1721 if (!Src1->isImm())
1722 return;
1723
1724 uint64_t Offset1 = Src1->getImm();
1725 BaseHi = *Src0;
1726
1727 Addr.Base.LoReg = BaseLo.getReg();
1728 Addr.Base.HiReg = BaseHi.getReg();
1729 Addr.Base.LoSubReg = BaseLo.getSubReg();
1730 Addr.Base.HiSubReg = BaseHi.getSubReg();
1731 Addr.Offset = (*Offset0P & 0x00000000ffffffff) | (Offset1 << 32);
1732 }
1733
promoteConstantOffsetToImm(MachineInstr & MI,MemInfoMap & Visited,SmallPtrSet<MachineInstr *,4> & AnchorList) const1734 bool SILoadStoreOptimizer::promoteConstantOffsetToImm(
1735 MachineInstr &MI,
1736 MemInfoMap &Visited,
1737 SmallPtrSet<MachineInstr *, 4> &AnchorList) const {
1738
1739 if (!(MI.mayLoad() ^ MI.mayStore()))
1740 return false;
1741
1742 // TODO: Support flat and scratch.
1743 if (AMDGPU::getGlobalSaddrOp(MI.getOpcode()) < 0)
1744 return false;
1745
1746 if (MI.mayLoad() && TII->getNamedOperand(MI, AMDGPU::OpName::vdata) != NULL)
1747 return false;
1748
1749 if (AnchorList.count(&MI))
1750 return false;
1751
1752 LLVM_DEBUG(dbgs() << "\nTryToPromoteConstantOffsetToImmFor "; MI.dump());
1753
1754 if (TII->getNamedOperand(MI, AMDGPU::OpName::offset)->getImm()) {
1755 LLVM_DEBUG(dbgs() << " Const-offset is already promoted.\n";);
1756 return false;
1757 }
1758
1759 // Step1: Find the base-registers and a 64bit constant offset.
1760 MachineOperand &Base = *TII->getNamedOperand(MI, AMDGPU::OpName::vaddr);
1761 MemAddress MAddr;
1762 if (Visited.find(&MI) == Visited.end()) {
1763 processBaseWithConstOffset(Base, MAddr);
1764 Visited[&MI] = MAddr;
1765 } else
1766 MAddr = Visited[&MI];
1767
1768 if (MAddr.Offset == 0) {
1769 LLVM_DEBUG(dbgs() << " Failed to extract constant-offset or there are no"
1770 " constant offsets that can be promoted.\n";);
1771 return false;
1772 }
1773
1774 LLVM_DEBUG(dbgs() << " BASE: {" << MAddr.Base.HiReg << ", "
1775 << MAddr.Base.LoReg << "} Offset: " << MAddr.Offset << "\n\n";);
1776
1777 // Step2: Traverse through MI's basic block and find an anchor(that has the
1778 // same base-registers) with the highest 13bit distance from MI's offset.
1779 // E.g. (64bit loads)
1780 // bb:
1781 // addr1 = &a + 4096; load1 = load(addr1, 0)
1782 // addr2 = &a + 6144; load2 = load(addr2, 0)
1783 // addr3 = &a + 8192; load3 = load(addr3, 0)
1784 // addr4 = &a + 10240; load4 = load(addr4, 0)
1785 // addr5 = &a + 12288; load5 = load(addr5, 0)
1786 //
1787 // Starting from the first load, the optimization will try to find a new base
1788 // from which (&a + 4096) has 13 bit distance. Both &a + 6144 and &a + 8192
1789 // has 13bit distance from &a + 4096. The heuristic considers &a + 8192
1790 // as the new-base(anchor) because of the maximum distance which can
1791 // accomodate more intermediate bases presumeably.
1792 //
1793 // Step3: move (&a + 8192) above load1. Compute and promote offsets from
1794 // (&a + 8192) for load1, load2, load4.
1795 // addr = &a + 8192
1796 // load1 = load(addr, -4096)
1797 // load2 = load(addr, -2048)
1798 // load3 = load(addr, 0)
1799 // load4 = load(addr, 2048)
1800 // addr5 = &a + 12288; load5 = load(addr5, 0)
1801 //
1802 MachineInstr *AnchorInst = nullptr;
1803 MemAddress AnchorAddr;
1804 uint32_t MaxDist = std::numeric_limits<uint32_t>::min();
1805 SmallVector<std::pair<MachineInstr *, int64_t>, 4> InstsWCommonBase;
1806
1807 MachineBasicBlock *MBB = MI.getParent();
1808 MachineBasicBlock::iterator E = MBB->end();
1809 MachineBasicBlock::iterator MBBI = MI.getIterator();
1810 ++MBBI;
1811 const SITargetLowering *TLI =
1812 static_cast<const SITargetLowering *>(STM->getTargetLowering());
1813
1814 for ( ; MBBI != E; ++MBBI) {
1815 MachineInstr &MINext = *MBBI;
1816 // TODO: Support finding an anchor(with same base) from store addresses or
1817 // any other load addresses where the opcodes are different.
1818 if (MINext.getOpcode() != MI.getOpcode() ||
1819 TII->getNamedOperand(MINext, AMDGPU::OpName::offset)->getImm())
1820 continue;
1821
1822 const MachineOperand &BaseNext =
1823 *TII->getNamedOperand(MINext, AMDGPU::OpName::vaddr);
1824 MemAddress MAddrNext;
1825 if (Visited.find(&MINext) == Visited.end()) {
1826 processBaseWithConstOffset(BaseNext, MAddrNext);
1827 Visited[&MINext] = MAddrNext;
1828 } else
1829 MAddrNext = Visited[&MINext];
1830
1831 if (MAddrNext.Base.LoReg != MAddr.Base.LoReg ||
1832 MAddrNext.Base.HiReg != MAddr.Base.HiReg ||
1833 MAddrNext.Base.LoSubReg != MAddr.Base.LoSubReg ||
1834 MAddrNext.Base.HiSubReg != MAddr.Base.HiSubReg)
1835 continue;
1836
1837 InstsWCommonBase.push_back(std::make_pair(&MINext, MAddrNext.Offset));
1838
1839 int64_t Dist = MAddr.Offset - MAddrNext.Offset;
1840 TargetLoweringBase::AddrMode AM;
1841 AM.HasBaseReg = true;
1842 AM.BaseOffs = Dist;
1843 if (TLI->isLegalGlobalAddressingMode(AM) &&
1844 (uint32_t)std::abs(Dist) > MaxDist) {
1845 MaxDist = std::abs(Dist);
1846
1847 AnchorAddr = MAddrNext;
1848 AnchorInst = &MINext;
1849 }
1850 }
1851
1852 if (AnchorInst) {
1853 LLVM_DEBUG(dbgs() << " Anchor-Inst(with max-distance from Offset): ";
1854 AnchorInst->dump());
1855 LLVM_DEBUG(dbgs() << " Anchor-Offset from BASE: "
1856 << AnchorAddr.Offset << "\n\n");
1857
1858 // Instead of moving up, just re-compute anchor-instruction's base address.
1859 unsigned Base = computeBase(MI, AnchorAddr);
1860
1861 updateBaseAndOffset(MI, Base, MAddr.Offset - AnchorAddr.Offset);
1862 LLVM_DEBUG(dbgs() << " After promotion: "; MI.dump(););
1863
1864 for (auto P : InstsWCommonBase) {
1865 TargetLoweringBase::AddrMode AM;
1866 AM.HasBaseReg = true;
1867 AM.BaseOffs = P.second - AnchorAddr.Offset;
1868
1869 if (TLI->isLegalGlobalAddressingMode(AM)) {
1870 LLVM_DEBUG(dbgs() << " Promote Offset(" << P.second;
1871 dbgs() << ")"; P.first->dump());
1872 updateBaseAndOffset(*P.first, Base, P.second - AnchorAddr.Offset);
1873 LLVM_DEBUG(dbgs() << " After promotion: "; P.first->dump());
1874 }
1875 }
1876 AnchorList.insert(AnchorInst);
1877 return true;
1878 }
1879
1880 return false;
1881 }
1882
addInstToMergeableList(const CombineInfo & CI,std::list<std::list<CombineInfo>> & MergeableInsts) const1883 void SILoadStoreOptimizer::addInstToMergeableList(const CombineInfo &CI,
1884 std::list<std::list<CombineInfo> > &MergeableInsts) const {
1885 for (std::list<CombineInfo> &AddrList : MergeableInsts) {
1886 if (AddrList.front().InstClass == CI.InstClass &&
1887 AddrList.front().hasSameBaseAddress(*CI.I)) {
1888 AddrList.emplace_back(CI);
1889 return;
1890 }
1891 }
1892
1893 // Base address not found, so add a new list.
1894 MergeableInsts.emplace_back(1, CI);
1895 }
1896
collectMergeableInsts(MachineBasicBlock & MBB,std::list<std::list<CombineInfo>> & MergeableInsts) const1897 bool SILoadStoreOptimizer::collectMergeableInsts(MachineBasicBlock &MBB,
1898 std::list<std::list<CombineInfo> > &MergeableInsts) const {
1899 bool Modified = false;
1900 // Contain the list
1901 MemInfoMap Visited;
1902 // Contains the list of instructions for which constant offsets are being
1903 // promoted to the IMM.
1904 SmallPtrSet<MachineInstr *, 4> AnchorList;
1905
1906 // Sort potential mergeable instructions into lists. One list per base address.
1907 for (MachineInstr &MI : MBB.instrs()) {
1908 // We run this before checking if an address is mergeable, because it can produce
1909 // better code even if the instructions aren't mergeable.
1910 if (promoteConstantOffsetToImm(MI, Visited, AnchorList))
1911 Modified = true;
1912
1913 const InstClassEnum InstClass = getInstClass(MI.getOpcode(), *TII);
1914 if (InstClass == UNKNOWN)
1915 continue;
1916
1917 // Don't combine if volatile.
1918 if (MI.hasOrderedMemoryRef())
1919 continue;
1920
1921 CombineInfo CI;
1922 CI.setMI(MI, *TII, *STM);
1923
1924 if (!CI.hasMergeableAddress(*MRI))
1925 continue;
1926
1927 addInstToMergeableList(CI, MergeableInsts);
1928 }
1929 return Modified;
1930 }
1931
1932 // Scan through looking for adjacent LDS operations with constant offsets from
1933 // the same base register. We rely on the scheduler to do the hard work of
1934 // clustering nearby loads, and assume these are all adjacent.
optimizeBlock(std::list<std::list<CombineInfo>> & MergeableInsts)1935 bool SILoadStoreOptimizer::optimizeBlock(
1936 std::list<std::list<CombineInfo> > &MergeableInsts) {
1937 bool Modified = false;
1938
1939 for (std::list<CombineInfo> &MergeList : MergeableInsts) {
1940 if (MergeList.size() < 2)
1941 continue;
1942
1943 bool OptimizeListAgain = false;
1944 if (!optimizeInstsWithSameBaseAddr(MergeList, OptimizeListAgain)) {
1945 // We weren't able to make any changes, so clear the list so we don't
1946 // process the same instructions the next time we try to optimize this
1947 // block.
1948 MergeList.clear();
1949 continue;
1950 }
1951
1952 // We made changes, but also determined that there were no more optimization
1953 // opportunities, so we don't need to reprocess the list
1954 if (!OptimizeListAgain)
1955 MergeList.clear();
1956
1957 OptimizeAgain |= OptimizeListAgain;
1958 Modified = true;
1959 }
1960 return Modified;
1961 }
1962
1963 void
removeCombinedInst(std::list<CombineInfo> & MergeList,const MachineInstr & MI)1964 SILoadStoreOptimizer::removeCombinedInst(std::list<CombineInfo> &MergeList,
1965 const MachineInstr &MI) {
1966
1967 for (auto CI = MergeList.begin(), E = MergeList.end(); CI != E; ++CI) {
1968 if (&*CI->I == &MI) {
1969 MergeList.erase(CI);
1970 return;
1971 }
1972 }
1973 }
1974
1975 bool
optimizeInstsWithSameBaseAddr(std::list<CombineInfo> & MergeList,bool & OptimizeListAgain)1976 SILoadStoreOptimizer::optimizeInstsWithSameBaseAddr(
1977 std::list<CombineInfo> &MergeList,
1978 bool &OptimizeListAgain) {
1979 bool Modified = false;
1980 for (auto I = MergeList.begin(); I != MergeList.end(); ++I) {
1981 CombineInfo &CI = *I;
1982 CombineInfo Paired;
1983
1984 if (CI.InstClass == UNKNOWN)
1985 continue;
1986
1987 if (!findMatchingInst(CI, Paired))
1988 goto done;
1989
1990 Modified = true;
1991 removeCombinedInst(MergeList, *Paired.I);
1992
1993 switch (CI.InstClass) {
1994 default:
1995 llvm_unreachable("unknown InstClass");
1996 break;
1997 case DS_READ: {
1998 MachineBasicBlock::iterator NewMI = mergeRead2Pair(CI, Paired);
1999 CI.setMI(NewMI, *TII, *STM);
2000 break;
2001 }
2002 case DS_WRITE: {
2003 MachineBasicBlock::iterator NewMI = mergeWrite2Pair(CI, Paired);
2004 CI.setMI(NewMI, *TII, *STM);
2005 break;
2006 }
2007 case S_BUFFER_LOAD_IMM: {
2008 MachineBasicBlock::iterator NewMI = mergeSBufferLoadImmPair(CI, Paired);
2009 CI.setMI(NewMI, *TII, *STM);
2010 OptimizeListAgain |= (CI.Width + Paired.Width) < 16;
2011 break;
2012 }
2013 case BUFFER_LOAD: {
2014 MachineBasicBlock::iterator NewMI = mergeBufferLoadPair(CI, Paired);
2015 CI.setMI(NewMI, *TII, *STM);
2016 OptimizeListAgain |= (CI.Width + Paired.Width) < 4;
2017 break;
2018 }
2019 case BUFFER_STORE: {
2020 MachineBasicBlock::iterator NewMI = mergeBufferStorePair(CI, Paired);
2021 CI.setMI(NewMI, *TII, *STM);
2022 OptimizeListAgain |= (CI.Width + Paired.Width) < 4;
2023 break;
2024 }
2025 case MIMG: {
2026 MachineBasicBlock::iterator NewMI = mergeImagePair(CI, Paired);
2027 CI.setMI(NewMI, *TII, *STM);
2028 OptimizeListAgain |= (CI.Width + Paired.Width) < 4;
2029 break;
2030 }
2031 case TBUFFER_LOAD: {
2032 MachineBasicBlock::iterator NewMI = mergeTBufferLoadPair(CI, Paired);
2033 CI.setMI(NewMI, *TII, *STM);
2034 OptimizeListAgain |= (CI.Width + Paired.Width) < 4;
2035 break;
2036 }
2037 case TBUFFER_STORE: {
2038 MachineBasicBlock::iterator NewMI = mergeTBufferStorePair(CI, Paired);
2039 CI.setMI(NewMI, *TII, *STM);
2040 OptimizeListAgain |= (CI.Width + Paired.Width) < 4;
2041 break;
2042 }
2043 }
2044
2045 done:
2046 // Clear the InstsToMove after we have finished searching so we don't have
2047 // stale values left over if we search for this CI again in another pass
2048 // over the block.
2049 CI.InstsToMove.clear();
2050 }
2051
2052 return Modified;
2053 }
2054
runOnMachineFunction(MachineFunction & MF)2055 bool SILoadStoreOptimizer::runOnMachineFunction(MachineFunction &MF) {
2056 if (skipFunction(MF.getFunction()))
2057 return false;
2058
2059 STM = &MF.getSubtarget<GCNSubtarget>();
2060 if (!STM->loadStoreOptEnabled())
2061 return false;
2062
2063 TII = STM->getInstrInfo();
2064 TRI = &TII->getRegisterInfo();
2065 STI = &MF.getSubtarget<MCSubtargetInfo>();
2066
2067 MRI = &MF.getRegInfo();
2068 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
2069
2070 assert(MRI->isSSA() && "Must be run on SSA");
2071
2072 LLVM_DEBUG(dbgs() << "Running SILoadStoreOptimizer\n");
2073
2074 bool Modified = false;
2075
2076
2077 for (MachineBasicBlock &MBB : MF) {
2078 std::list<std::list<CombineInfo> > MergeableInsts;
2079 // First pass: Collect list of all instructions we know how to merge.
2080 Modified |= collectMergeableInsts(MBB, MergeableInsts);
2081 do {
2082 OptimizeAgain = false;
2083 Modified |= optimizeBlock(MergeableInsts);
2084 } while (OptimizeAgain);
2085 }
2086
2087 return Modified;
2088 }
2089