1 //===-- llvm/CodeGen/GlobalISel/CombinerHelper.h --------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===--------------------------------------------------------------------===//
8 //
9 /// This contains common combine transformations that may be used in a combine
10 /// pass,or by the target elsewhere.
11 /// Targets can pick individual opcode transformations from the helper or use
12 /// tryCombine which invokes all transformations. All of the transformations
13 /// return true if the MachineInstruction changed and false otherwise.
14 //
15 //===--------------------------------------------------------------------===//
16 
17 #ifndef LLVM_CODEGEN_GLOBALISEL_COMBINER_HELPER_H
18 #define LLVM_CODEGEN_GLOBALISEL_COMBINER_HELPER_H
19 
20 #include "llvm/ADT/APFloat.h"
21 #include "llvm/ADT/DenseMap.h"
22 #include "llvm/CodeGen/LowLevelType.h"
23 #include "llvm/CodeGen/Register.h"
24 #include "llvm/Support/Alignment.h"
25 
26 namespace llvm {
27 
28 class GISelChangeObserver;
29 class MachineIRBuilder;
30 class MachineInstrBuilder;
31 class MachineRegisterInfo;
32 class MachineInstr;
33 class MachineOperand;
34 class GISelKnownBits;
35 class MachineDominatorTree;
36 class LegalizerInfo;
37 struct LegalityQuery;
38 class TargetLowering;
39 
40 struct PreferredTuple {
41   LLT Ty;                // The result type of the extend.
42   unsigned ExtendOpcode; // G_ANYEXT/G_SEXT/G_ZEXT
43   MachineInstr *MI;
44 };
45 
46 struct IndexedLoadStoreMatchInfo {
47   Register Addr;
48   Register Base;
49   Register Offset;
50   bool IsPre;
51 };
52 
53 struct PtrAddChain {
54   int64_t Imm;
55   Register Base;
56 };
57 
58 struct RegisterImmPair {
59   Register Reg;
60   int64_t Imm;
61 };
62 
63 struct ShiftOfShiftedLogic {
64   MachineInstr *Logic;
65   MachineInstr *Shift2;
66   Register LogicNonShiftReg;
67   uint64_t ValSum;
68 };
69 
70 using OperandBuildSteps =
71     SmallVector<std::function<void(MachineInstrBuilder &)>, 4>;
72 struct InstructionBuildSteps {
73   unsigned Opcode = 0;          /// The opcode for the produced instruction.
74   OperandBuildSteps OperandFns; /// Operands to be added to the instruction.
75   InstructionBuildSteps() = default;
InstructionBuildStepsInstructionBuildSteps76   InstructionBuildSteps(unsigned Opcode, const OperandBuildSteps &OperandFns)
77       : Opcode(Opcode), OperandFns(OperandFns) {}
78 };
79 
80 struct InstructionStepsMatchInfo {
81   /// Describes instructions to be built during a combine.
82   SmallVector<InstructionBuildSteps, 2> InstrsToBuild;
83   InstructionStepsMatchInfo() = default;
InstructionStepsMatchInfoInstructionStepsMatchInfo84   InstructionStepsMatchInfo(
85       std::initializer_list<InstructionBuildSteps> InstrsToBuild)
86       : InstrsToBuild(InstrsToBuild) {}
87 };
88 
89 class CombinerHelper {
90 protected:
91   MachineIRBuilder &Builder;
92   MachineRegisterInfo &MRI;
93   GISelChangeObserver &Observer;
94   GISelKnownBits *KB;
95   MachineDominatorTree *MDT;
96   const LegalizerInfo *LI;
97 
98 public:
99   CombinerHelper(GISelChangeObserver &Observer, MachineIRBuilder &B,
100                  GISelKnownBits *KB = nullptr,
101                  MachineDominatorTree *MDT = nullptr,
102                  const LegalizerInfo *LI = nullptr);
103 
getKnownBits()104   GISelKnownBits *getKnownBits() const {
105     return KB;
106   }
107 
108   const TargetLowering &getTargetLowering() const;
109 
110   /// \return true if the combine is running prior to legalization, or if \p
111   /// Query is legal on the target.
112   bool isLegalOrBeforeLegalizer(const LegalityQuery &Query) const;
113 
114   /// MachineRegisterInfo::replaceRegWith() and inform the observer of the changes
115   void replaceRegWith(MachineRegisterInfo &MRI, Register FromReg, Register ToReg) const;
116 
117   /// Replace a single register operand with a new register and inform the
118   /// observer of the changes.
119   void replaceRegOpWith(MachineRegisterInfo &MRI, MachineOperand &FromRegOp,
120                         Register ToReg) const;
121 
122   /// If \p MI is COPY, try to combine it.
123   /// Returns true if MI changed.
124   bool tryCombineCopy(MachineInstr &MI);
125   bool matchCombineCopy(MachineInstr &MI);
126   void applyCombineCopy(MachineInstr &MI);
127 
128   /// Returns true if \p DefMI precedes \p UseMI or they are the same
129   /// instruction. Both must be in the same basic block.
130   bool isPredecessor(const MachineInstr &DefMI, const MachineInstr &UseMI);
131 
132   /// Returns true if \p DefMI dominates \p UseMI. By definition an
133   /// instruction dominates itself.
134   ///
135   /// If we haven't been provided with a MachineDominatorTree during
136   /// construction, this function returns a conservative result that tracks just
137   /// a single basic block.
138   bool dominates(const MachineInstr &DefMI, const MachineInstr &UseMI);
139 
140   /// If \p MI is extend that consumes the result of a load, try to combine it.
141   /// Returns true if MI changed.
142   bool tryCombineExtendingLoads(MachineInstr &MI);
143   bool matchCombineExtendingLoads(MachineInstr &MI, PreferredTuple &MatchInfo);
144   void applyCombineExtendingLoads(MachineInstr &MI, PreferredTuple &MatchInfo);
145 
146   /// Combine \p MI into a pre-indexed or post-indexed load/store operation if
147   /// legal and the surrounding code makes it useful.
148   bool tryCombineIndexedLoadStore(MachineInstr &MI);
149   bool matchCombineIndexedLoadStore(MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo);
150   void applyCombineIndexedLoadStore(MachineInstr &MI, IndexedLoadStoreMatchInfo &MatchInfo);
151 
152   bool matchSextTruncSextLoad(MachineInstr &MI);
153   bool applySextTruncSextLoad(MachineInstr &MI);
154 
155   /// Match sext_inreg(load p), imm -> sextload p
156   bool matchSextInRegOfLoad(MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo);
157   bool applySextInRegOfLoad(MachineInstr &MI, std::tuple<Register, unsigned> &MatchInfo);
158 
159   /// If a brcond's true block is not the fallthrough, make it so by inverting
160   /// the condition and swapping operands.
161   bool matchOptBrCondByInvertingCond(MachineInstr &MI);
162   void applyOptBrCondByInvertingCond(MachineInstr &MI);
163 
164   /// If \p MI is G_CONCAT_VECTORS, try to combine it.
165   /// Returns true if MI changed.
166   /// Right now, we support:
167   /// - concat_vector(undef, undef) => undef
168   /// - concat_vector(build_vector(A, B), build_vector(C, D)) =>
169   ///   build_vector(A, B, C, D)
170   ///
171   /// \pre MI.getOpcode() == G_CONCAT_VECTORS.
172   bool tryCombineConcatVectors(MachineInstr &MI);
173   /// Check if the G_CONCAT_VECTORS \p MI is undef or if it
174   /// can be flattened into a build_vector.
175   /// In the first case \p IsUndef will be true.
176   /// In the second case \p Ops will contain the operands needed
177   /// to produce the flattened build_vector.
178   ///
179   /// \pre MI.getOpcode() == G_CONCAT_VECTORS.
180   bool matchCombineConcatVectors(MachineInstr &MI, bool &IsUndef,
181                                  SmallVectorImpl<Register> &Ops);
182   /// Replace \p MI with a flattened build_vector with \p Ops or an
183   /// implicit_def if IsUndef is true.
184   void applyCombineConcatVectors(MachineInstr &MI, bool IsUndef,
185                                  const ArrayRef<Register> Ops);
186 
187   /// Try to combine G_SHUFFLE_VECTOR into G_CONCAT_VECTORS.
188   /// Returns true if MI changed.
189   ///
190   /// \pre MI.getOpcode() == G_SHUFFLE_VECTOR.
191   bool tryCombineShuffleVector(MachineInstr &MI);
192   /// Check if the G_SHUFFLE_VECTOR \p MI can be replaced by a
193   /// concat_vectors.
194   /// \p Ops will contain the operands needed to produce the flattened
195   /// concat_vectors.
196   ///
197   /// \pre MI.getOpcode() == G_SHUFFLE_VECTOR.
198   bool matchCombineShuffleVector(MachineInstr &MI,
199                                  SmallVectorImpl<Register> &Ops);
200   /// Replace \p MI with a concat_vectors with \p Ops.
201   void applyCombineShuffleVector(MachineInstr &MI,
202                                  const ArrayRef<Register> Ops);
203 
204   /// Optimize memcpy intrinsics et al, e.g. constant len calls.
205   /// /p MaxLen if non-zero specifies the max length of a mem libcall to inline.
206   ///
207   /// For example (pre-indexed):
208   ///
209   ///     $addr = G_PTR_ADD $base, $offset
210   ///     [...]
211   ///     $val = G_LOAD $addr
212   ///     [...]
213   ///     $whatever = COPY $addr
214   ///
215   /// -->
216   ///
217   ///     $val, $addr = G_INDEXED_LOAD $base, $offset, 1 (IsPre)
218   ///     [...]
219   ///     $whatever = COPY $addr
220   ///
221   /// or (post-indexed):
222   ///
223   ///     G_STORE $val, $base
224   ///     [...]
225   ///     $addr = G_PTR_ADD $base, $offset
226   ///     [...]
227   ///     $whatever = COPY $addr
228   ///
229   /// -->
230   ///
231   ///     $addr = G_INDEXED_STORE $val, $base, $offset
232   ///     [...]
233   ///     $whatever = COPY $addr
234   bool tryCombineMemCpyFamily(MachineInstr &MI, unsigned MaxLen = 0);
235 
236   bool matchPtrAddImmedChain(MachineInstr &MI, PtrAddChain &MatchInfo);
237   bool applyPtrAddImmedChain(MachineInstr &MI, PtrAddChain &MatchInfo);
238 
239   /// Fold (shift (shift base, x), y) -> (shift base (x+y))
240   bool matchShiftImmedChain(MachineInstr &MI, RegisterImmPair &MatchInfo);
241   bool applyShiftImmedChain(MachineInstr &MI, RegisterImmPair &MatchInfo);
242 
243   /// If we have a shift-by-constant of a bitwise logic op that itself has a
244   /// shift-by-constant operand with identical opcode, we may be able to convert
245   /// that into 2 independent shifts followed by the logic op.
246   bool matchShiftOfShiftedLogic(MachineInstr &MI,
247                                 ShiftOfShiftedLogic &MatchInfo);
248   bool applyShiftOfShiftedLogic(MachineInstr &MI,
249                                 ShiftOfShiftedLogic &MatchInfo);
250 
251   /// Transform a multiply by a power-of-2 value to a left shift.
252   bool matchCombineMulToShl(MachineInstr &MI, unsigned &ShiftVal);
253   bool applyCombineMulToShl(MachineInstr &MI, unsigned &ShiftVal);
254 
255   // Transform a G_SHL with an extended source into a narrower shift if
256   // possible.
257   bool matchCombineShlOfExtend(MachineInstr &MI, RegisterImmPair &MatchData);
258   bool applyCombineShlOfExtend(MachineInstr &MI,
259                                const RegisterImmPair &MatchData);
260 
261   /// Reduce a shift by a constant to an unmerge and a shift on a half sized
262   /// type. This will not produce a shift smaller than \p TargetShiftSize.
263   bool matchCombineShiftToUnmerge(MachineInstr &MI, unsigned TargetShiftSize,
264                                  unsigned &ShiftVal);
265   bool applyCombineShiftToUnmerge(MachineInstr &MI, const unsigned &ShiftVal);
266   bool tryCombineShiftToUnmerge(MachineInstr &MI, unsigned TargetShiftAmount);
267 
268   /// Transform <ty,...> G_UNMERGE(G_MERGE ty X, Y, Z) -> ty X, Y, Z.
269   bool
270   matchCombineUnmergeMergeToPlainValues(MachineInstr &MI,
271                                         SmallVectorImpl<Register> &Operands);
272   bool
273   applyCombineUnmergeMergeToPlainValues(MachineInstr &MI,
274                                         SmallVectorImpl<Register> &Operands);
275 
276   /// Transform G_UNMERGE Constant -> Constant1, Constant2, ...
277   bool matchCombineUnmergeConstant(MachineInstr &MI,
278                                    SmallVectorImpl<APInt> &Csts);
279   bool applyCombineUnmergeConstant(MachineInstr &MI,
280                                    SmallVectorImpl<APInt> &Csts);
281 
282   /// Transform X, Y<dead> = G_UNMERGE Z -> X = G_TRUNC Z.
283   bool matchCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI);
284   bool applyCombineUnmergeWithDeadLanesToTrunc(MachineInstr &MI);
285 
286   /// Transform X, Y = G_UNMERGE(G_ZEXT(Z)) -> X = G_ZEXT(Z); Y = G_CONSTANT 0
287   bool matchCombineUnmergeZExtToZExt(MachineInstr &MI);
288   bool applyCombineUnmergeZExtToZExt(MachineInstr &MI);
289 
290   /// Transform fp_instr(cst) to constant result of the fp operation.
291   bool matchCombineConstantFoldFpUnary(MachineInstr &MI,
292                                        Optional<APFloat> &Cst);
293   bool applyCombineConstantFoldFpUnary(MachineInstr &MI,
294                                        Optional<APFloat> &Cst);
295 
296   /// Transform IntToPtr(PtrToInt(x)) to x if cast is in the same address space.
297   bool matchCombineI2PToP2I(MachineInstr &MI, Register &Reg);
298   bool applyCombineI2PToP2I(MachineInstr &MI, Register &Reg);
299 
300   /// Transform PtrToInt(IntToPtr(x)) to x.
301   bool matchCombineP2IToI2P(MachineInstr &MI, Register &Reg);
302   bool applyCombineP2IToI2P(MachineInstr &MI, Register &Reg);
303 
304   /// Transform G_ADD (G_PTRTOINT x), y -> G_PTRTOINT (G_PTR_ADD x, y)
305   /// Transform G_ADD y, (G_PTRTOINT x) -> G_PTRTOINT (G_PTR_ADD x, y)
306   bool matchCombineAddP2IToPtrAdd(MachineInstr &MI,
307                                   std::pair<Register, bool> &PtrRegAndCommute);
308   bool applyCombineAddP2IToPtrAdd(MachineInstr &MI,
309                                   std::pair<Register, bool> &PtrRegAndCommute);
310 
311   // Transform G_PTR_ADD (G_PTRTOINT C1), C2 -> C1 + C2
312   bool matchCombineConstPtrAddToI2P(MachineInstr &MI, int64_t &NewCst);
313   bool applyCombineConstPtrAddToI2P(MachineInstr &MI, int64_t &NewCst);
314 
315   /// Transform anyext(trunc(x)) to x.
316   bool matchCombineAnyExtTrunc(MachineInstr &MI, Register &Reg);
317   bool applyCombineAnyExtTrunc(MachineInstr &MI, Register &Reg);
318 
319   /// Transform [asz]ext([asz]ext(x)) to [asz]ext x.
320   bool matchCombineExtOfExt(MachineInstr &MI,
321                             std::tuple<Register, unsigned> &MatchInfo);
322   bool applyCombineExtOfExt(MachineInstr &MI,
323                             std::tuple<Register, unsigned> &MatchInfo);
324 
325   /// Transform fneg(fneg(x)) to x.
326   bool matchCombineFNegOfFNeg(MachineInstr &MI, Register &Reg);
327 
328   /// Match fabs(fabs(x)) to fabs(x).
329   bool matchCombineFAbsOfFAbs(MachineInstr &MI, Register &Src);
330   bool applyCombineFAbsOfFAbs(MachineInstr &MI, Register &Src);
331 
332   /// Transform trunc ([asz]ext x) to x or ([asz]ext x) or (trunc x).
333   bool matchCombineTruncOfExt(MachineInstr &MI,
334                               std::pair<Register, unsigned> &MatchInfo);
335   bool applyCombineTruncOfExt(MachineInstr &MI,
336                               std::pair<Register, unsigned> &MatchInfo);
337 
338   /// Transform trunc (shl x, K) to shl (trunc x),
339   /// K => K < VT.getScalarSizeInBits().
340   bool matchCombineTruncOfShl(MachineInstr &MI,
341                               std::pair<Register, Register> &MatchInfo);
342   bool applyCombineTruncOfShl(MachineInstr &MI,
343                               std::pair<Register, Register> &MatchInfo);
344 
345   /// Transform G_MUL(x, -1) to G_SUB(0, x)
346   bool applyCombineMulByNegativeOne(MachineInstr &MI);
347 
348   /// Return true if any explicit use operand on \p MI is defined by a
349   /// G_IMPLICIT_DEF.
350   bool matchAnyExplicitUseIsUndef(MachineInstr &MI);
351 
352   /// Return true if all register explicit use operands on \p MI are defined by
353   /// a G_IMPLICIT_DEF.
354   bool matchAllExplicitUsesAreUndef(MachineInstr &MI);
355 
356   /// Return true if a G_SHUFFLE_VECTOR instruction \p MI has an undef mask.
357   bool matchUndefShuffleVectorMask(MachineInstr &MI);
358 
359   /// Return true if a G_STORE instruction \p MI is storing an undef value.
360   bool matchUndefStore(MachineInstr &MI);
361 
362   /// Return true if a G_SELECT instruction \p MI has an undef comparison.
363   bool matchUndefSelectCmp(MachineInstr &MI);
364 
365   /// Return true if a G_SELECT instruction \p MI has a constant comparison. If
366   /// true, \p OpIdx will store the operand index of the known selected value.
367   bool matchConstantSelectCmp(MachineInstr &MI, unsigned &OpIdx);
368 
369   /// Replace an instruction with a G_FCONSTANT with value \p C.
370   bool replaceInstWithFConstant(MachineInstr &MI, double C);
371 
372   /// Replace an instruction with a G_CONSTANT with value \p C.
373   bool replaceInstWithConstant(MachineInstr &MI, int64_t C);
374 
375   /// Replace an instruction with a G_IMPLICIT_DEF.
376   bool replaceInstWithUndef(MachineInstr &MI);
377 
378   /// Delete \p MI and replace all of its uses with its \p OpIdx-th operand.
379   bool replaceSingleDefInstWithOperand(MachineInstr &MI, unsigned OpIdx);
380 
381   /// Delete \p MI and replace all of its uses with \p Replacement.
382   bool replaceSingleDefInstWithReg(MachineInstr &MI, Register Replacement);
383 
384   /// Return true if \p MOP1 and \p MOP2 are register operands are defined by
385   /// equivalent instructions.
386   bool matchEqualDefs(const MachineOperand &MOP1, const MachineOperand &MOP2);
387 
388   /// Return true if \p MOP is defined by a G_CONSTANT with a value equal to
389   /// \p C.
390   bool matchConstantOp(const MachineOperand &MOP, int64_t C);
391 
392   /// Optimize (cond ? x : x) -> x
393   bool matchSelectSameVal(MachineInstr &MI);
394 
395   /// Optimize (x op x) -> x
396   bool matchBinOpSameVal(MachineInstr &MI);
397 
398   /// Check if operand \p OpIdx is zero.
399   bool matchOperandIsZero(MachineInstr &MI, unsigned OpIdx);
400 
401   /// Check if operand \p OpIdx is undef.
402   bool matchOperandIsUndef(MachineInstr &MI, unsigned OpIdx);
403 
404   /// Check if operand \p OpIdx is known to be a power of 2.
405   bool matchOperandIsKnownToBeAPowerOfTwo(MachineInstr &MI, unsigned OpIdx);
406 
407   /// Erase \p MI
408   bool eraseInst(MachineInstr &MI);
409 
410   /// Return true if MI is a G_ADD which can be simplified to a G_SUB.
411   bool matchSimplifyAddToSub(MachineInstr &MI,
412                              std::tuple<Register, Register> &MatchInfo);
413   bool applySimplifyAddToSub(MachineInstr &MI,
414                              std::tuple<Register, Register> &MatchInfo);
415 
416   /// Match (logic_op (op x...), (op y...)) -> (op (logic_op x, y))
417   bool
418   matchHoistLogicOpWithSameOpcodeHands(MachineInstr &MI,
419                                        InstructionStepsMatchInfo &MatchInfo);
420 
421   /// Replace \p MI with a series of instructions described in \p MatchInfo.
422   bool applyBuildInstructionSteps(MachineInstr &MI,
423                                   InstructionStepsMatchInfo &MatchInfo);
424 
425   /// Match ashr (shl x, C), C -> sext_inreg (C)
426   bool matchAshrShlToSextInreg(MachineInstr &MI,
427                                std::tuple<Register, int64_t> &MatchInfo);
428   bool applyAshShlToSextInreg(MachineInstr &MI,
429                               std::tuple<Register, int64_t> &MatchInfo);
430   /// \return true if \p MI is a G_AND instruction whose operands are x and y
431   /// where x & y == x or x & y == y. (E.g., one of operands is all-ones value.)
432   ///
433   /// \param [in] MI - The G_AND instruction.
434   /// \param [out] Replacement - A register the G_AND should be replaced with on
435   /// success.
436   bool matchRedundantAnd(MachineInstr &MI, Register &Replacement);
437 
438   /// \return true if \p MI is a G_OR instruction whose operands are x and y
439   /// where x | y == x or x | y == y. (E.g., one of operands is all-zeros
440   /// value.)
441   ///
442   /// \param [in] MI - The G_OR instruction.
443   /// \param [out] Replacement - A register the G_OR should be replaced with on
444   /// success.
445   bool matchRedundantOr(MachineInstr &MI, Register &Replacement);
446 
447   /// \return true if \p MI is a G_SEXT_INREG that can be erased.
448   bool matchRedundantSExtInReg(MachineInstr &MI);
449 
450   /// Combine inverting a result of a compare into the opposite cond code.
451   bool matchNotCmp(MachineInstr &MI, SmallVectorImpl<Register> &RegsToNegate);
452   bool applyNotCmp(MachineInstr &MI, SmallVectorImpl<Register> &RegsToNegate);
453 
454   /// Fold (xor (and x, y), y) -> (and (not x), y)
455   ///{
456   bool matchXorOfAndWithSameReg(MachineInstr &MI,
457                                 std::pair<Register, Register> &MatchInfo);
458   bool applyXorOfAndWithSameReg(MachineInstr &MI,
459                                 std::pair<Register, Register> &MatchInfo);
460   ///}
461 
462   /// Combine G_PTR_ADD with nullptr to G_INTTOPTR
463   bool matchPtrAddZero(MachineInstr &MI);
464   bool applyPtrAddZero(MachineInstr &MI);
465 
466   /// Combine G_UREM x, (known power of 2) to an add and bitmasking.
467   bool applySimplifyURemByPow2(MachineInstr &MI);
468 
469   bool matchCombineInsertVecElts(MachineInstr &MI,
470                                  SmallVectorImpl<Register> &MatchInfo);
471 
472   bool applyCombineInsertVecElts(MachineInstr &MI,
473                              SmallVectorImpl<Register> &MatchInfo);
474 
475   /// Match expression trees of the form
476   ///
477   /// \code
478   ///  sN *a = ...
479   ///  sM val = a[0] | (a[1] << N) | (a[2] << 2N) | (a[3] << 3N) ...
480   /// \endcode
481   ///
482   /// And check if the tree can be replaced with a M-bit load + possibly a
483   /// bswap.
484   bool matchLoadOrCombine(MachineInstr &MI,
485                           std::function<void(MachineIRBuilder &)> &MatchInfo);
486   bool applyLoadOrCombine(MachineInstr &MI,
487                           std::function<void(MachineIRBuilder &)> &MatchInfo);
488 
489   /// Try to transform \p MI by using all of the above
490   /// combine functions. Returns true if changed.
491   bool tryCombine(MachineInstr &MI);
492 
493 private:
494   // Memcpy family optimization helpers.
495   bool optimizeMemcpy(MachineInstr &MI, Register Dst, Register Src,
496                       unsigned KnownLen, Align DstAlign, Align SrcAlign,
497                       bool IsVolatile);
498   bool optimizeMemmove(MachineInstr &MI, Register Dst, Register Src,
499                        unsigned KnownLen, Align DstAlign, Align SrcAlign,
500                        bool IsVolatile);
501   bool optimizeMemset(MachineInstr &MI, Register Dst, Register Val,
502                       unsigned KnownLen, Align DstAlign, bool IsVolatile);
503 
504   /// Given a non-indexed load or store instruction \p MI, find an offset that
505   /// can be usefully and legally folded into it as a post-indexing operation.
506   ///
507   /// \returns true if a candidate is found.
508   bool findPostIndexCandidate(MachineInstr &MI, Register &Addr, Register &Base,
509                               Register &Offset);
510 
511   /// Given a non-indexed load or store instruction \p MI, find an offset that
512   /// can be usefully and legally folded into it as a pre-indexing operation.
513   ///
514   /// \returns true if a candidate is found.
515   bool findPreIndexCandidate(MachineInstr &MI, Register &Addr, Register &Base,
516                              Register &Offset);
517 
518   /// Helper function for matchLoadOrCombine. Searches for Registers
519   /// which may have been produced by a load instruction + some arithmetic.
520   ///
521   /// \param [in] Root - The search root.
522   ///
523   /// \returns The Registers found during the search.
524   Optional<SmallVector<Register, 8>>
525   findCandidatesForLoadOrCombine(const MachineInstr *Root) const;
526 
527   /// Helper function for matchLoadOrCombine.
528   ///
529   /// Checks if every register in \p RegsToVisit is defined by a load
530   /// instruction + some arithmetic.
531   ///
532   /// \param [out] MemOffset2Idx - Maps the byte positions each load ends up
533   /// at to the index of the load.
534   /// \param [in] MemSizeInBits - The number of bits each load should produce.
535   ///
536   /// \returns The lowest-index load found and the lowest index on success.
537   Optional<std::pair<MachineInstr *, int64_t>> findLoadOffsetsForLoadOrCombine(
538       SmallDenseMap<int64_t, int64_t, 8> &MemOffset2Idx,
539       const SmallVector<Register, 8> &RegsToVisit,
540       const unsigned MemSizeInBits);
541 };
542 } // namespace llvm
543 
544 #endif
545