1 //===- LoadStoreOpt.cpp ----------- Generic memory optimizations -*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the LoadStoreOpt optimization pass.
10 //===----------------------------------------------------------------------===//
11
12 #include "llvm/CodeGen/GlobalISel/LoadStoreOpt.h"
13 #include "llvm/ADT/STLExtras.h"
14 #include "llvm/ADT/SmallPtrSet.h"
15 #include "llvm/ADT/Statistic.h"
16 #include "llvm/Analysis/AliasAnalysis.h"
17 #include "llvm/Analysis/MemoryLocation.h"
18 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
19 #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
20 #include "llvm/CodeGen/GlobalISel/LegalizerInfo.h"
21 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
22 #include "llvm/CodeGen/GlobalISel/Utils.h"
23 #include "llvm/CodeGen/LowLevelTypeUtils.h"
24 #include "llvm/CodeGen/MachineBasicBlock.h"
25 #include "llvm/CodeGen/MachineFrameInfo.h"
26 #include "llvm/CodeGen/MachineFunction.h"
27 #include "llvm/CodeGen/MachineInstr.h"
28 #include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/Register.h"
31 #include "llvm/CodeGen/TargetLowering.h"
32 #include "llvm/CodeGen/TargetOpcodes.h"
33 #include "llvm/IR/DebugInfoMetadata.h"
34 #include "llvm/InitializePasses.h"
35 #include "llvm/Support/AtomicOrdering.h"
36 #include "llvm/Support/Casting.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include "llvm/Support/MathExtras.h"
40 #include <algorithm>
41
42 #define DEBUG_TYPE "loadstore-opt"
43
44 using namespace llvm;
45 using namespace ore;
46 using namespace MIPatternMatch;
47
48 STATISTIC(NumStoresMerged, "Number of stores merged");
49
50 const unsigned MaxStoreSizeToForm = 128;
51
52 char LoadStoreOpt::ID = 0;
53 INITIALIZE_PASS_BEGIN(LoadStoreOpt, DEBUG_TYPE, "Generic memory optimizations",
54 false, false)
55 INITIALIZE_PASS_END(LoadStoreOpt, DEBUG_TYPE, "Generic memory optimizations",
56 false, false)
57
LoadStoreOpt(std::function<bool (const MachineFunction &)> F)58 LoadStoreOpt::LoadStoreOpt(std::function<bool(const MachineFunction &)> F)
59 : MachineFunctionPass(ID), DoNotRunPass(F) {}
60
LoadStoreOpt()61 LoadStoreOpt::LoadStoreOpt()
62 : LoadStoreOpt([](const MachineFunction &) { return false; }) {}
63
init(MachineFunction & MF)64 void LoadStoreOpt::init(MachineFunction &MF) {
65 this->MF = &MF;
66 MRI = &MF.getRegInfo();
67 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
68 TLI = MF.getSubtarget().getTargetLowering();
69 LI = MF.getSubtarget().getLegalizerInfo();
70 Builder.setMF(MF);
71 IsPreLegalizer = !MF.getProperties().hasProperty(
72 MachineFunctionProperties::Property::Legalized);
73 InstsToErase.clear();
74 }
75
getAnalysisUsage(AnalysisUsage & AU) const76 void LoadStoreOpt::getAnalysisUsage(AnalysisUsage &AU) const {
77 AU.addRequired<AAResultsWrapperPass>();
78 AU.setPreservesAll();
79 getSelectionDAGFallbackAnalysisUsage(AU);
80 MachineFunctionPass::getAnalysisUsage(AU);
81 }
82
getPointerInfo(Register Ptr,MachineRegisterInfo & MRI)83 BaseIndexOffset GISelAddressing::getPointerInfo(Register Ptr,
84 MachineRegisterInfo &MRI) {
85 BaseIndexOffset Info;
86 Register PtrAddRHS;
87 Register BaseReg;
88 if (!mi_match(Ptr, MRI, m_GPtrAdd(m_Reg(BaseReg), m_Reg(PtrAddRHS)))) {
89 Info.setBase(Ptr);
90 Info.setOffset(0);
91 return Info;
92 }
93 Info.setBase(BaseReg);
94 auto RHSCst = getIConstantVRegValWithLookThrough(PtrAddRHS, MRI);
95 if (RHSCst)
96 Info.setOffset(RHSCst->Value.getSExtValue());
97
98 // Just recognize a simple case for now. In future we'll need to match
99 // indexing patterns for base + index + constant.
100 Info.setIndex(PtrAddRHS);
101 return Info;
102 }
103
aliasIsKnownForLoadStore(const MachineInstr & MI1,const MachineInstr & MI2,bool & IsAlias,MachineRegisterInfo & MRI)104 bool GISelAddressing::aliasIsKnownForLoadStore(const MachineInstr &MI1,
105 const MachineInstr &MI2,
106 bool &IsAlias,
107 MachineRegisterInfo &MRI) {
108 auto *LdSt1 = dyn_cast<GLoadStore>(&MI1);
109 auto *LdSt2 = dyn_cast<GLoadStore>(&MI2);
110 if (!LdSt1 || !LdSt2)
111 return false;
112
113 BaseIndexOffset BasePtr0 = getPointerInfo(LdSt1->getPointerReg(), MRI);
114 BaseIndexOffset BasePtr1 = getPointerInfo(LdSt2->getPointerReg(), MRI);
115
116 if (!BasePtr0.getBase().isValid() || !BasePtr1.getBase().isValid())
117 return false;
118
119 int64_t Size1 = LdSt1->getMemSize();
120 int64_t Size2 = LdSt2->getMemSize();
121
122 int64_t PtrDiff;
123 if (BasePtr0.getBase() == BasePtr1.getBase() && BasePtr0.hasValidOffset() &&
124 BasePtr1.hasValidOffset()) {
125 PtrDiff = BasePtr1.getOffset() - BasePtr0.getOffset();
126 // If the size of memory access is unknown, do not use it to do analysis.
127 // One example of unknown size memory access is to load/store scalable
128 // vector objects on the stack.
129 // BasePtr1 is PtrDiff away from BasePtr0. They alias if none of the
130 // following situations arise:
131 if (PtrDiff >= 0 &&
132 Size1 != static_cast<int64_t>(MemoryLocation::UnknownSize)) {
133 // [----BasePtr0----]
134 // [---BasePtr1--]
135 // ========PtrDiff========>
136 IsAlias = !(Size1 <= PtrDiff);
137 return true;
138 }
139 if (PtrDiff < 0 &&
140 Size2 != static_cast<int64_t>(MemoryLocation::UnknownSize)) {
141 // [----BasePtr0----]
142 // [---BasePtr1--]
143 // =====(-PtrDiff)====>
144 IsAlias = !((PtrDiff + Size2) <= 0);
145 return true;
146 }
147 return false;
148 }
149
150 // If both BasePtr0 and BasePtr1 are FrameIndexes, we will not be
151 // able to calculate their relative offset if at least one arises
152 // from an alloca. However, these allocas cannot overlap and we
153 // can infer there is no alias.
154 auto *Base0Def = getDefIgnoringCopies(BasePtr0.getBase(), MRI);
155 auto *Base1Def = getDefIgnoringCopies(BasePtr1.getBase(), MRI);
156 if (!Base0Def || !Base1Def)
157 return false; // Couldn't tell anything.
158
159
160 if (Base0Def->getOpcode() != Base1Def->getOpcode())
161 return false;
162
163 if (Base0Def->getOpcode() == TargetOpcode::G_FRAME_INDEX) {
164 MachineFrameInfo &MFI = Base0Def->getMF()->getFrameInfo();
165 // If the bases have the same frame index but we couldn't find a
166 // constant offset, (indices are different) be conservative.
167 if (Base0Def != Base1Def &&
168 (!MFI.isFixedObjectIndex(Base0Def->getOperand(1).getIndex()) ||
169 !MFI.isFixedObjectIndex(Base1Def->getOperand(1).getIndex()))) {
170 IsAlias = false;
171 return true;
172 }
173 }
174
175 // This implementation is a lot more primitive than the SDAG one for now.
176 // FIXME: what about constant pools?
177 if (Base0Def->getOpcode() == TargetOpcode::G_GLOBAL_VALUE) {
178 auto GV0 = Base0Def->getOperand(1).getGlobal();
179 auto GV1 = Base1Def->getOperand(1).getGlobal();
180 if (GV0 != GV1) {
181 IsAlias = false;
182 return true;
183 }
184 }
185
186 // Can't tell anything about aliasing.
187 return false;
188 }
189
instMayAlias(const MachineInstr & MI,const MachineInstr & Other,MachineRegisterInfo & MRI,AliasAnalysis * AA)190 bool GISelAddressing::instMayAlias(const MachineInstr &MI,
191 const MachineInstr &Other,
192 MachineRegisterInfo &MRI,
193 AliasAnalysis *AA) {
194 struct MemUseCharacteristics {
195 bool IsVolatile;
196 bool IsAtomic;
197 Register BasePtr;
198 int64_t Offset;
199 uint64_t NumBytes;
200 MachineMemOperand *MMO;
201 };
202
203 auto getCharacteristics =
204 [&](const MachineInstr *MI) -> MemUseCharacteristics {
205 if (const auto *LS = dyn_cast<GLoadStore>(MI)) {
206 Register BaseReg;
207 int64_t Offset = 0;
208 // No pre/post-inc addressing modes are considered here, unlike in SDAG.
209 if (!mi_match(LS->getPointerReg(), MRI,
210 m_GPtrAdd(m_Reg(BaseReg), m_ICst(Offset)))) {
211 BaseReg = LS->getPointerReg();
212 Offset = 0;
213 }
214
215 uint64_t Size = MemoryLocation::getSizeOrUnknown(
216 LS->getMMO().getMemoryType().getSizeInBytes());
217 return {LS->isVolatile(), LS->isAtomic(), BaseReg,
218 Offset /*base offset*/, Size, &LS->getMMO()};
219 }
220 // FIXME: support recognizing lifetime instructions.
221 // Default.
222 return {false /*isvolatile*/,
223 /*isAtomic*/ false, Register(),
224 (int64_t)0 /*offset*/, 0 /*size*/,
225 (MachineMemOperand *)nullptr};
226 };
227 MemUseCharacteristics MUC0 = getCharacteristics(&MI),
228 MUC1 = getCharacteristics(&Other);
229
230 // If they are to the same address, then they must be aliases.
231 if (MUC0.BasePtr.isValid() && MUC0.BasePtr == MUC1.BasePtr &&
232 MUC0.Offset == MUC1.Offset)
233 return true;
234
235 // If they are both volatile then they cannot be reordered.
236 if (MUC0.IsVolatile && MUC1.IsVolatile)
237 return true;
238
239 // Be conservative about atomics for the moment
240 // TODO: This is way overconservative for unordered atomics (see D66309)
241 if (MUC0.IsAtomic && MUC1.IsAtomic)
242 return true;
243
244 // If one operation reads from invariant memory, and the other may store, they
245 // cannot alias.
246 if (MUC0.MMO && MUC1.MMO) {
247 if ((MUC0.MMO->isInvariant() && MUC1.MMO->isStore()) ||
248 (MUC1.MMO->isInvariant() && MUC0.MMO->isStore()))
249 return false;
250 }
251
252 // Try to prove that there is aliasing, or that there is no aliasing. Either
253 // way, we can return now. If nothing can be proved, proceed with more tests.
254 bool IsAlias;
255 if (GISelAddressing::aliasIsKnownForLoadStore(MI, Other, IsAlias, MRI))
256 return IsAlias;
257
258 // The following all rely on MMO0 and MMO1 being valid.
259 if (!MUC0.MMO || !MUC1.MMO)
260 return true;
261
262 // FIXME: port the alignment based alias analysis from SDAG's isAlias().
263 int64_t SrcValOffset0 = MUC0.MMO->getOffset();
264 int64_t SrcValOffset1 = MUC1.MMO->getOffset();
265 uint64_t Size0 = MUC0.NumBytes;
266 uint64_t Size1 = MUC1.NumBytes;
267 if (AA && MUC0.MMO->getValue() && MUC1.MMO->getValue() &&
268 Size0 != MemoryLocation::UnknownSize &&
269 Size1 != MemoryLocation::UnknownSize) {
270 // Use alias analysis information.
271 int64_t MinOffset = std::min(SrcValOffset0, SrcValOffset1);
272 int64_t Overlap0 = Size0 + SrcValOffset0 - MinOffset;
273 int64_t Overlap1 = Size1 + SrcValOffset1 - MinOffset;
274 if (AA->isNoAlias(MemoryLocation(MUC0.MMO->getValue(), Overlap0,
275 MUC0.MMO->getAAInfo()),
276 MemoryLocation(MUC1.MMO->getValue(), Overlap1,
277 MUC1.MMO->getAAInfo())))
278 return false;
279 }
280
281 // Otherwise we have to assume they alias.
282 return true;
283 }
284
285 /// Returns true if the instruction creates an unavoidable hazard that
286 /// forces a boundary between store merge candidates.
isInstHardMergeHazard(MachineInstr & MI)287 static bool isInstHardMergeHazard(MachineInstr &MI) {
288 return MI.hasUnmodeledSideEffects() || MI.hasOrderedMemoryRef();
289 }
290
mergeStores(SmallVectorImpl<GStore * > & StoresToMerge)291 bool LoadStoreOpt::mergeStores(SmallVectorImpl<GStore *> &StoresToMerge) {
292 // Try to merge all the stores in the vector, splitting into separate segments
293 // as necessary.
294 assert(StoresToMerge.size() > 1 && "Expected multiple stores to merge");
295 LLT OrigTy = MRI->getType(StoresToMerge[0]->getValueReg());
296 LLT PtrTy = MRI->getType(StoresToMerge[0]->getPointerReg());
297 unsigned AS = PtrTy.getAddressSpace();
298 // Ensure the legal store info is computed for this address space.
299 initializeStoreMergeTargetInfo(AS);
300 const auto &LegalSizes = LegalStoreSizes[AS];
301
302 #ifndef NDEBUG
303 for (auto *StoreMI : StoresToMerge)
304 assert(MRI->getType(StoreMI->getValueReg()) == OrigTy);
305 #endif
306
307 const auto &DL = MF->getFunction().getParent()->getDataLayout();
308 bool AnyMerged = false;
309 do {
310 unsigned NumPow2 = llvm::bit_floor(StoresToMerge.size());
311 unsigned MaxSizeBits = NumPow2 * OrigTy.getSizeInBits().getFixedValue();
312 // Compute the biggest store we can generate to handle the number of stores.
313 unsigned MergeSizeBits;
314 for (MergeSizeBits = MaxSizeBits; MergeSizeBits > 1; MergeSizeBits /= 2) {
315 LLT StoreTy = LLT::scalar(MergeSizeBits);
316 EVT StoreEVT =
317 getApproximateEVTForLLT(StoreTy, DL, MF->getFunction().getContext());
318 if (LegalSizes.size() > MergeSizeBits && LegalSizes[MergeSizeBits] &&
319 TLI->canMergeStoresTo(AS, StoreEVT, *MF) &&
320 (TLI->isTypeLegal(StoreEVT)))
321 break; // We can generate a MergeSize bits store.
322 }
323 if (MergeSizeBits <= OrigTy.getSizeInBits())
324 return AnyMerged; // No greater merge.
325
326 unsigned NumStoresToMerge = MergeSizeBits / OrigTy.getSizeInBits();
327 // Perform the actual merging.
328 SmallVector<GStore *, 8> SingleMergeStores(
329 StoresToMerge.begin(), StoresToMerge.begin() + NumStoresToMerge);
330 AnyMerged |= doSingleStoreMerge(SingleMergeStores);
331 StoresToMerge.erase(StoresToMerge.begin(),
332 StoresToMerge.begin() + NumStoresToMerge);
333 } while (StoresToMerge.size() > 1);
334 return AnyMerged;
335 }
336
isLegalOrBeforeLegalizer(const LegalityQuery & Query,MachineFunction & MF) const337 bool LoadStoreOpt::isLegalOrBeforeLegalizer(const LegalityQuery &Query,
338 MachineFunction &MF) const {
339 auto Action = LI->getAction(Query).Action;
340 // If the instruction is unsupported, it can't be legalized at all.
341 if (Action == LegalizeActions::Unsupported)
342 return false;
343 return IsPreLegalizer || Action == LegalizeAction::Legal;
344 }
345
doSingleStoreMerge(SmallVectorImpl<GStore * > & Stores)346 bool LoadStoreOpt::doSingleStoreMerge(SmallVectorImpl<GStore *> &Stores) {
347 assert(Stores.size() > 1);
348 // We know that all the stores are consecutive and there are no aliasing
349 // operations in the range. However, the values that are being stored may be
350 // generated anywhere before each store. To ensure we have the values
351 // available, we materialize the wide value and new store at the place of the
352 // final store in the merge sequence.
353 GStore *FirstStore = Stores[0];
354 const unsigned NumStores = Stores.size();
355 LLT SmallTy = MRI->getType(FirstStore->getValueReg());
356 LLT WideValueTy =
357 LLT::scalar(NumStores * SmallTy.getSizeInBits().getFixedValue());
358
359 // For each store, compute pairwise merged debug locs.
360 DebugLoc MergedLoc = Stores.front()->getDebugLoc();
361 for (auto *Store : drop_begin(Stores))
362 MergedLoc = DILocation::getMergedLocation(MergedLoc, Store->getDebugLoc());
363
364 Builder.setInstr(*Stores.back());
365 Builder.setDebugLoc(MergedLoc);
366
367 // If all of the store values are constants, then create a wide constant
368 // directly. Otherwise, we need to generate some instructions to merge the
369 // existing values together into a wider type.
370 SmallVector<APInt, 8> ConstantVals;
371 for (auto *Store : Stores) {
372 auto MaybeCst =
373 getIConstantVRegValWithLookThrough(Store->getValueReg(), *MRI);
374 if (!MaybeCst) {
375 ConstantVals.clear();
376 break;
377 }
378 ConstantVals.emplace_back(MaybeCst->Value);
379 }
380
381 Register WideReg;
382 auto *WideMMO =
383 MF->getMachineMemOperand(&FirstStore->getMMO(), 0, WideValueTy);
384 if (ConstantVals.empty()) {
385 // Mimic the SDAG behaviour here and don't try to do anything for unknown
386 // values. In future, we should also support the cases of loads and
387 // extracted vector elements.
388 return false;
389 }
390
391 assert(ConstantVals.size() == NumStores);
392 // Check if our wide constant is legal.
393 if (!isLegalOrBeforeLegalizer({TargetOpcode::G_CONSTANT, {WideValueTy}}, *MF))
394 return false;
395 APInt WideConst(WideValueTy.getSizeInBits(), 0);
396 for (unsigned Idx = 0; Idx < ConstantVals.size(); ++Idx) {
397 // Insert the smaller constant into the corresponding position in the
398 // wider one.
399 WideConst.insertBits(ConstantVals[Idx], Idx * SmallTy.getSizeInBits());
400 }
401 WideReg = Builder.buildConstant(WideValueTy, WideConst).getReg(0);
402 auto NewStore =
403 Builder.buildStore(WideReg, FirstStore->getPointerReg(), *WideMMO);
404 (void) NewStore;
405 LLVM_DEBUG(dbgs() << "Merged " << Stores.size()
406 << " stores into merged store: " << *NewStore);
407 LLVM_DEBUG(for (auto *MI : Stores) dbgs() << " " << *MI;);
408 NumStoresMerged += Stores.size();
409
410 MachineOptimizationRemarkEmitter MORE(*MF, nullptr);
411 MORE.emit([&]() {
412 MachineOptimizationRemark R(DEBUG_TYPE, "MergedStore",
413 FirstStore->getDebugLoc(),
414 FirstStore->getParent());
415 R << "Merged " << NV("NumMerged", Stores.size()) << " stores of "
416 << NV("OrigWidth", SmallTy.getSizeInBytes())
417 << " bytes into a single store of "
418 << NV("NewWidth", WideValueTy.getSizeInBytes()) << " bytes";
419 return R;
420 });
421
422 for (auto *MI : Stores)
423 InstsToErase.insert(MI);
424 return true;
425 }
426
processMergeCandidate(StoreMergeCandidate & C)427 bool LoadStoreOpt::processMergeCandidate(StoreMergeCandidate &C) {
428 if (C.Stores.size() < 2) {
429 C.reset();
430 return false;
431 }
432
433 LLVM_DEBUG(dbgs() << "Checking store merge candidate with " << C.Stores.size()
434 << " stores, starting with " << *C.Stores[0]);
435 // We know that the stores in the candidate are adjacent.
436 // Now we need to check if any potential aliasing instructions recorded
437 // during the search alias with load/stores added to the candidate after.
438 // For example, if we have the candidate:
439 // C.Stores = [ST1, ST2, ST3, ST4]
440 // and after seeing ST2 we saw a load LD1, which did not alias with ST1 or
441 // ST2, then we would have recorded it into the PotentialAliases structure
442 // with the associated index value of "1". Then we see ST3 and ST4 and add
443 // them to the candidate group. We know that LD1 does not alias with ST1 or
444 // ST2, since we already did that check. However we don't yet know if it
445 // may alias ST3 and ST4, so we perform those checks now.
446 SmallVector<GStore *> StoresToMerge;
447
448 auto DoesStoreAliasWithPotential = [&](unsigned Idx, GStore &CheckStore) {
449 for (auto AliasInfo : reverse(C.PotentialAliases)) {
450 MachineInstr *PotentialAliasOp = AliasInfo.first;
451 unsigned PreCheckedIdx = AliasInfo.second;
452 if (static_cast<unsigned>(Idx) < PreCheckedIdx) {
453 // Once our store index is lower than the index associated with the
454 // potential alias, we know that we've already checked for this alias
455 // and all of the earlier potential aliases too.
456 return false;
457 }
458 // Need to check this alias.
459 if (GISelAddressing::instMayAlias(CheckStore, *PotentialAliasOp, *MRI,
460 AA)) {
461 LLVM_DEBUG(dbgs() << "Potential alias " << *PotentialAliasOp
462 << " detected\n");
463 return true;
464 }
465 }
466 return false;
467 };
468 // Start from the last store in the group, and check if it aliases with any
469 // of the potential aliasing operations in the list.
470 for (int StoreIdx = C.Stores.size() - 1; StoreIdx >= 0; --StoreIdx) {
471 auto *CheckStore = C.Stores[StoreIdx];
472 if (DoesStoreAliasWithPotential(StoreIdx, *CheckStore))
473 continue;
474 StoresToMerge.emplace_back(CheckStore);
475 }
476
477 LLVM_DEBUG(dbgs() << StoresToMerge.size()
478 << " stores remaining after alias checks. Merging...\n");
479
480 // Now we've checked for aliasing hazards, merge any stores left.
481 C.reset();
482 if (StoresToMerge.size() < 2)
483 return false;
484 return mergeStores(StoresToMerge);
485 }
486
operationAliasesWithCandidate(MachineInstr & MI,StoreMergeCandidate & C)487 bool LoadStoreOpt::operationAliasesWithCandidate(MachineInstr &MI,
488 StoreMergeCandidate &C) {
489 if (C.Stores.empty())
490 return false;
491 return llvm::any_of(C.Stores, [&](MachineInstr *OtherMI) {
492 return instMayAlias(MI, *OtherMI, *MRI, AA);
493 });
494 }
495
addPotentialAlias(MachineInstr & MI)496 void LoadStoreOpt::StoreMergeCandidate::addPotentialAlias(MachineInstr &MI) {
497 PotentialAliases.emplace_back(std::make_pair(&MI, Stores.size() - 1));
498 }
499
addStoreToCandidate(GStore & StoreMI,StoreMergeCandidate & C)500 bool LoadStoreOpt::addStoreToCandidate(GStore &StoreMI,
501 StoreMergeCandidate &C) {
502 // Check if the given store writes to an adjacent address, and other
503 // requirements.
504 LLT ValueTy = MRI->getType(StoreMI.getValueReg());
505 LLT PtrTy = MRI->getType(StoreMI.getPointerReg());
506
507 // Only handle scalars.
508 if (!ValueTy.isScalar())
509 return false;
510
511 // Don't allow truncating stores for now.
512 if (StoreMI.getMemSizeInBits() != ValueTy.getSizeInBits())
513 return false;
514
515 // Avoid adding volatile or ordered stores to the candidate. We already have a
516 // check for this in instMayAlias() but that only get's called later between
517 // potential aliasing hazards.
518 if (!StoreMI.isSimple())
519 return false;
520
521 Register StoreAddr = StoreMI.getPointerReg();
522 auto BIO = getPointerInfo(StoreAddr, *MRI);
523 Register StoreBase = BIO.getBase();
524 if (C.Stores.empty()) {
525 C.BasePtr = StoreBase;
526 if (!BIO.hasValidOffset()) {
527 C.CurrentLowestOffset = 0;
528 } else {
529 C.CurrentLowestOffset = BIO.getOffset();
530 }
531 // This is the first store of the candidate.
532 // If the offset can't possibly allow for a lower addressed store with the
533 // same base, don't bother adding it.
534 if (BIO.hasValidOffset() &&
535 BIO.getOffset() < static_cast<int64_t>(ValueTy.getSizeInBytes()))
536 return false;
537 C.Stores.emplace_back(&StoreMI);
538 LLVM_DEBUG(dbgs() << "Starting a new merge candidate group with: "
539 << StoreMI);
540 return true;
541 }
542
543 // Check the store is the same size as the existing ones in the candidate.
544 if (MRI->getType(C.Stores[0]->getValueReg()).getSizeInBits() !=
545 ValueTy.getSizeInBits())
546 return false;
547
548 if (MRI->getType(C.Stores[0]->getPointerReg()).getAddressSpace() !=
549 PtrTy.getAddressSpace())
550 return false;
551
552 // There are other stores in the candidate. Check that the store address
553 // writes to the next lowest adjacent address.
554 if (C.BasePtr != StoreBase)
555 return false;
556 // If we don't have a valid offset, we can't guarantee to be an adjacent
557 // offset.
558 if (!BIO.hasValidOffset())
559 return false;
560 if ((C.CurrentLowestOffset -
561 static_cast<int64_t>(ValueTy.getSizeInBytes())) != BIO.getOffset())
562 return false;
563
564 // This writes to an adjacent address. Allow it.
565 C.Stores.emplace_back(&StoreMI);
566 C.CurrentLowestOffset = C.CurrentLowestOffset - ValueTy.getSizeInBytes();
567 LLVM_DEBUG(dbgs() << "Candidate added store: " << StoreMI);
568 return true;
569 }
570
mergeBlockStores(MachineBasicBlock & MBB)571 bool LoadStoreOpt::mergeBlockStores(MachineBasicBlock &MBB) {
572 bool Changed = false;
573 // Walk through the block bottom-up, looking for merging candidates.
574 StoreMergeCandidate Candidate;
575 for (MachineInstr &MI : llvm::reverse(MBB)) {
576 if (InstsToErase.contains(&MI))
577 continue;
578
579 if (auto *StoreMI = dyn_cast<GStore>(&MI)) {
580 // We have a G_STORE. Add it to the candidate if it writes to an adjacent
581 // address.
582 if (!addStoreToCandidate(*StoreMI, Candidate)) {
583 // Store wasn't eligible to be added. May need to record it as a
584 // potential alias.
585 if (operationAliasesWithCandidate(*StoreMI, Candidate)) {
586 Changed |= processMergeCandidate(Candidate);
587 continue;
588 }
589 Candidate.addPotentialAlias(*StoreMI);
590 }
591 continue;
592 }
593
594 // If we don't have any stores yet, this instruction can't pose a problem.
595 if (Candidate.Stores.empty())
596 continue;
597
598 // We're dealing with some other kind of instruction.
599 if (isInstHardMergeHazard(MI)) {
600 Changed |= processMergeCandidate(Candidate);
601 Candidate.Stores.clear();
602 continue;
603 }
604
605 if (!MI.mayLoadOrStore())
606 continue;
607
608 if (operationAliasesWithCandidate(MI, Candidate)) {
609 // We have a potential alias, so process the current candidate if we can
610 // and then continue looking for a new candidate.
611 Changed |= processMergeCandidate(Candidate);
612 continue;
613 }
614
615 // Record this instruction as a potential alias for future stores that are
616 // added to the candidate.
617 Candidate.addPotentialAlias(MI);
618 }
619
620 // Process any candidate left after finishing searching the entire block.
621 Changed |= processMergeCandidate(Candidate);
622
623 // Erase instructions now that we're no longer iterating over the block.
624 for (auto *MI : InstsToErase)
625 MI->eraseFromParent();
626 InstsToErase.clear();
627 return Changed;
628 }
629
630 /// Check if the store \p Store is a truncstore that can be merged. That is,
631 /// it's a store of a shifted value of \p SrcVal. If \p SrcVal is an empty
632 /// Register then it does not need to match and SrcVal is set to the source
633 /// value found.
634 /// On match, returns the start byte offset of the \p SrcVal that is being
635 /// stored.
636 static std::optional<int64_t>
getTruncStoreByteOffset(GStore & Store,Register & SrcVal,MachineRegisterInfo & MRI)637 getTruncStoreByteOffset(GStore &Store, Register &SrcVal,
638 MachineRegisterInfo &MRI) {
639 Register TruncVal;
640 if (!mi_match(Store.getValueReg(), MRI, m_GTrunc(m_Reg(TruncVal))))
641 return std::nullopt;
642
643 // The shift amount must be a constant multiple of the narrow type.
644 // It is translated to the offset address in the wide source value "y".
645 //
646 // x = G_LSHR y, ShiftAmtC
647 // s8 z = G_TRUNC x
648 // store z, ...
649 Register FoundSrcVal;
650 int64_t ShiftAmt;
651 if (!mi_match(TruncVal, MRI,
652 m_any_of(m_GLShr(m_Reg(FoundSrcVal), m_ICst(ShiftAmt)),
653 m_GAShr(m_Reg(FoundSrcVal), m_ICst(ShiftAmt))))) {
654 if (!SrcVal.isValid() || TruncVal == SrcVal) {
655 if (!SrcVal.isValid())
656 SrcVal = TruncVal;
657 return 0; // If it's the lowest index store.
658 }
659 return std::nullopt;
660 }
661
662 unsigned NarrowBits = Store.getMMO().getMemoryType().getScalarSizeInBits();
663 if (ShiftAmt % NarrowBits != 0)
664 return std::nullopt;
665 const unsigned Offset = ShiftAmt / NarrowBits;
666
667 if (SrcVal.isValid() && FoundSrcVal != SrcVal)
668 return std::nullopt;
669
670 if (!SrcVal.isValid())
671 SrcVal = FoundSrcVal;
672 else if (MRI.getType(SrcVal) != MRI.getType(FoundSrcVal))
673 return std::nullopt;
674 return Offset;
675 }
676
677 /// Match a pattern where a wide type scalar value is stored by several narrow
678 /// stores. Fold it into a single store or a BSWAP and a store if the targets
679 /// supports it.
680 ///
681 /// Assuming little endian target:
682 /// i8 *p = ...
683 /// i32 val = ...
684 /// p[0] = (val >> 0) & 0xFF;
685 /// p[1] = (val >> 8) & 0xFF;
686 /// p[2] = (val >> 16) & 0xFF;
687 /// p[3] = (val >> 24) & 0xFF;
688 /// =>
689 /// *((i32)p) = val;
690 ///
691 /// i8 *p = ...
692 /// i32 val = ...
693 /// p[0] = (val >> 24) & 0xFF;
694 /// p[1] = (val >> 16) & 0xFF;
695 /// p[2] = (val >> 8) & 0xFF;
696 /// p[3] = (val >> 0) & 0xFF;
697 /// =>
698 /// *((i32)p) = BSWAP(val);
mergeTruncStore(GStore & StoreMI,SmallPtrSetImpl<GStore * > & DeletedStores)699 bool LoadStoreOpt::mergeTruncStore(GStore &StoreMI,
700 SmallPtrSetImpl<GStore *> &DeletedStores) {
701 LLT MemTy = StoreMI.getMMO().getMemoryType();
702
703 // We only handle merging simple stores of 1-4 bytes.
704 if (!MemTy.isScalar())
705 return false;
706 switch (MemTy.getSizeInBits()) {
707 case 8:
708 case 16:
709 case 32:
710 break;
711 default:
712 return false;
713 }
714 if (!StoreMI.isSimple())
715 return false;
716
717 // We do a simple search for mergeable stores prior to this one.
718 // Any potential alias hazard along the way terminates the search.
719 SmallVector<GStore *> FoundStores;
720
721 // We're looking for:
722 // 1) a (store(trunc(...)))
723 // 2) of an LSHR/ASHR of a single wide value, by the appropriate shift to get
724 // the partial value stored.
725 // 3) where the offsets form either a little or big-endian sequence.
726
727 auto &LastStore = StoreMI;
728
729 // The single base pointer that all stores must use.
730 Register BaseReg;
731 int64_t LastOffset;
732 if (!mi_match(LastStore.getPointerReg(), *MRI,
733 m_GPtrAdd(m_Reg(BaseReg), m_ICst(LastOffset)))) {
734 BaseReg = LastStore.getPointerReg();
735 LastOffset = 0;
736 }
737
738 GStore *LowestIdxStore = &LastStore;
739 int64_t LowestIdxOffset = LastOffset;
740
741 Register WideSrcVal;
742 auto LowestShiftAmt = getTruncStoreByteOffset(LastStore, WideSrcVal, *MRI);
743 if (!LowestShiftAmt)
744 return false; // Didn't match a trunc.
745 assert(WideSrcVal.isValid());
746
747 LLT WideStoreTy = MRI->getType(WideSrcVal);
748 // The wide type might not be a multiple of the memory type, e.g. s48 and s32.
749 if (WideStoreTy.getSizeInBits() % MemTy.getSizeInBits() != 0)
750 return false;
751 const unsigned NumStoresRequired =
752 WideStoreTy.getSizeInBits() / MemTy.getSizeInBits();
753
754 SmallVector<int64_t, 8> OffsetMap(NumStoresRequired, INT64_MAX);
755 OffsetMap[*LowestShiftAmt] = LastOffset;
756 FoundStores.emplace_back(&LastStore);
757
758 const int MaxInstsToCheck = 10;
759 int NumInstsChecked = 0;
760 for (auto II = ++LastStore.getReverseIterator();
761 II != LastStore.getParent()->rend() && NumInstsChecked < MaxInstsToCheck;
762 ++II) {
763 NumInstsChecked++;
764 GStore *NewStore;
765 if ((NewStore = dyn_cast<GStore>(&*II))) {
766 if (NewStore->getMMO().getMemoryType() != MemTy || !NewStore->isSimple())
767 break;
768 } else if (II->isLoadFoldBarrier() || II->mayLoad()) {
769 break;
770 } else {
771 continue; // This is a safe instruction we can look past.
772 }
773
774 Register NewBaseReg;
775 int64_t MemOffset;
776 // Check we're storing to the same base + some offset.
777 if (!mi_match(NewStore->getPointerReg(), *MRI,
778 m_GPtrAdd(m_Reg(NewBaseReg), m_ICst(MemOffset)))) {
779 NewBaseReg = NewStore->getPointerReg();
780 MemOffset = 0;
781 }
782 if (BaseReg != NewBaseReg)
783 break;
784
785 auto ShiftByteOffset = getTruncStoreByteOffset(*NewStore, WideSrcVal, *MRI);
786 if (!ShiftByteOffset)
787 break;
788 if (MemOffset < LowestIdxOffset) {
789 LowestIdxOffset = MemOffset;
790 LowestIdxStore = NewStore;
791 }
792
793 // Map the offset in the store and the offset in the combined value, and
794 // early return if it has been set before.
795 if (*ShiftByteOffset < 0 || *ShiftByteOffset >= NumStoresRequired ||
796 OffsetMap[*ShiftByteOffset] != INT64_MAX)
797 break;
798 OffsetMap[*ShiftByteOffset] = MemOffset;
799
800 FoundStores.emplace_back(NewStore);
801 // Reset counter since we've found a matching inst.
802 NumInstsChecked = 0;
803 if (FoundStores.size() == NumStoresRequired)
804 break;
805 }
806
807 if (FoundStores.size() != NumStoresRequired) {
808 if (FoundStores.size() == 1)
809 return false;
810 // We didn't find enough stores to merge into the size of the original
811 // source value, but we may be able to generate a smaller store if we
812 // truncate the source value.
813 WideStoreTy = LLT::scalar(FoundStores.size() * MemTy.getScalarSizeInBits());
814 }
815
816 unsigned NumStoresFound = FoundStores.size();
817
818 const auto &DL = LastStore.getMF()->getDataLayout();
819 auto &C = LastStore.getMF()->getFunction().getContext();
820 // Check that a store of the wide type is both allowed and fast on the target
821 unsigned Fast = 0;
822 bool Allowed = TLI->allowsMemoryAccess(
823 C, DL, WideStoreTy, LowestIdxStore->getMMO(), &Fast);
824 if (!Allowed || !Fast)
825 return false;
826
827 // Check if the pieces of the value are going to the expected places in memory
828 // to merge the stores.
829 unsigned NarrowBits = MemTy.getScalarSizeInBits();
830 auto checkOffsets = [&](bool MatchLittleEndian) {
831 if (MatchLittleEndian) {
832 for (unsigned i = 0; i != NumStoresFound; ++i)
833 if (OffsetMap[i] != i * (NarrowBits / 8) + LowestIdxOffset)
834 return false;
835 } else { // MatchBigEndian by reversing loop counter.
836 for (unsigned i = 0, j = NumStoresFound - 1; i != NumStoresFound;
837 ++i, --j)
838 if (OffsetMap[j] != i * (NarrowBits / 8) + LowestIdxOffset)
839 return false;
840 }
841 return true;
842 };
843
844 // Check if the offsets line up for the native data layout of this target.
845 bool NeedBswap = false;
846 bool NeedRotate = false;
847 if (!checkOffsets(DL.isLittleEndian())) {
848 // Special-case: check if byte offsets line up for the opposite endian.
849 if (NarrowBits == 8 && checkOffsets(DL.isBigEndian()))
850 NeedBswap = true;
851 else if (NumStoresFound == 2 && checkOffsets(DL.isBigEndian()))
852 NeedRotate = true;
853 else
854 return false;
855 }
856
857 if (NeedBswap &&
858 !isLegalOrBeforeLegalizer({TargetOpcode::G_BSWAP, {WideStoreTy}}, *MF))
859 return false;
860 if (NeedRotate &&
861 !isLegalOrBeforeLegalizer(
862 {TargetOpcode::G_ROTR, {WideStoreTy, WideStoreTy}}, *MF))
863 return false;
864
865 Builder.setInstrAndDebugLoc(StoreMI);
866
867 if (WideStoreTy != MRI->getType(WideSrcVal))
868 WideSrcVal = Builder.buildTrunc(WideStoreTy, WideSrcVal).getReg(0);
869
870 if (NeedBswap) {
871 WideSrcVal = Builder.buildBSwap(WideStoreTy, WideSrcVal).getReg(0);
872 } else if (NeedRotate) {
873 assert(WideStoreTy.getSizeInBits() % 2 == 0 &&
874 "Unexpected type for rotate");
875 auto RotAmt =
876 Builder.buildConstant(WideStoreTy, WideStoreTy.getSizeInBits() / 2);
877 WideSrcVal =
878 Builder.buildRotateRight(WideStoreTy, WideSrcVal, RotAmt).getReg(0);
879 }
880
881 Builder.buildStore(WideSrcVal, LowestIdxStore->getPointerReg(),
882 LowestIdxStore->getMMO().getPointerInfo(),
883 LowestIdxStore->getMMO().getAlign());
884
885 // Erase the old stores.
886 for (auto *ST : FoundStores) {
887 ST->eraseFromParent();
888 DeletedStores.insert(ST);
889 }
890 return true;
891 }
892
mergeTruncStoresBlock(MachineBasicBlock & BB)893 bool LoadStoreOpt::mergeTruncStoresBlock(MachineBasicBlock &BB) {
894 bool Changed = false;
895 SmallVector<GStore *, 16> Stores;
896 SmallPtrSet<GStore *, 8> DeletedStores;
897 // Walk up the block so we can see the most eligible stores.
898 for (MachineInstr &MI : llvm::reverse(BB))
899 if (auto *StoreMI = dyn_cast<GStore>(&MI))
900 Stores.emplace_back(StoreMI);
901
902 for (auto *StoreMI : Stores) {
903 if (DeletedStores.count(StoreMI))
904 continue;
905 if (mergeTruncStore(*StoreMI, DeletedStores))
906 Changed = true;
907 }
908 return Changed;
909 }
910
mergeFunctionStores(MachineFunction & MF)911 bool LoadStoreOpt::mergeFunctionStores(MachineFunction &MF) {
912 bool Changed = false;
913 for (auto &BB : MF){
914 Changed |= mergeBlockStores(BB);
915 Changed |= mergeTruncStoresBlock(BB);
916 }
917
918 // Erase all dead instructions left over by the merging.
919 if (Changed) {
920 for (auto &BB : MF) {
921 for (auto &I : make_early_inc_range(make_range(BB.rbegin(), BB.rend()))) {
922 if (isTriviallyDead(I, *MRI))
923 I.eraseFromParent();
924 }
925 }
926 }
927
928 return Changed;
929 }
930
initializeStoreMergeTargetInfo(unsigned AddrSpace)931 void LoadStoreOpt::initializeStoreMergeTargetInfo(unsigned AddrSpace) {
932 // Query the legalizer info to record what store types are legal.
933 // We record this because we don't want to bother trying to merge stores into
934 // illegal ones, which would just result in being split again.
935
936 if (LegalStoreSizes.count(AddrSpace)) {
937 assert(LegalStoreSizes[AddrSpace].any());
938 return; // Already cached sizes for this address space.
939 }
940
941 // Need to reserve at least MaxStoreSizeToForm + 1 bits.
942 BitVector LegalSizes(MaxStoreSizeToForm * 2);
943 const auto &LI = *MF->getSubtarget().getLegalizerInfo();
944 const auto &DL = MF->getFunction().getParent()->getDataLayout();
945 Type *IRPtrTy = PointerType::get(MF->getFunction().getContext(), AddrSpace);
946 LLT PtrTy = getLLTForType(*IRPtrTy, DL);
947 // We assume that we're not going to be generating any stores wider than
948 // MaxStoreSizeToForm bits for now.
949 for (unsigned Size = 2; Size <= MaxStoreSizeToForm; Size *= 2) {
950 LLT Ty = LLT::scalar(Size);
951 SmallVector<LegalityQuery::MemDesc, 2> MemDescrs(
952 {{Ty, Ty.getSizeInBits(), AtomicOrdering::NotAtomic}});
953 SmallVector<LLT> StoreTys({Ty, PtrTy});
954 LegalityQuery Q(TargetOpcode::G_STORE, StoreTys, MemDescrs);
955 LegalizeActionStep ActionStep = LI.getAction(Q);
956 if (ActionStep.Action == LegalizeActions::Legal)
957 LegalSizes.set(Size);
958 }
959 assert(LegalSizes.any() && "Expected some store sizes to be legal!");
960 LegalStoreSizes[AddrSpace] = LegalSizes;
961 }
962
runOnMachineFunction(MachineFunction & MF)963 bool LoadStoreOpt::runOnMachineFunction(MachineFunction &MF) {
964 // If the ISel pipeline failed, do not bother running that pass.
965 if (MF.getProperties().hasProperty(
966 MachineFunctionProperties::Property::FailedISel))
967 return false;
968
969 LLVM_DEBUG(dbgs() << "Begin memory optimizations for: " << MF.getName()
970 << '\n');
971
972 init(MF);
973 bool Changed = false;
974 Changed |= mergeFunctionStores(MF);
975
976 LegalStoreSizes.clear();
977 return Changed;
978 }
979