1 //===- LowerMatrixIntrinsics.cpp -  Lower matrix intrinsics -----*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Lower matrix intrinsics to vector operations.
10 //
11 // TODO:
12 //  * Improve fusion:
13 //   * Support more cases, e.g. multiply-add, multiply-sub, operands/results
14 //     transposed.
15 //   * Improve cost-modeling, e.g. choose different number of rows/columns
16 //     columns for tiles, consider cost of copies on alias.
17 //
18 //===----------------------------------------------------------------------===//
19 
20 #include "llvm/Transforms/Scalar/LowerMatrixIntrinsics.h"
21 #include "llvm/ADT/GraphTraits.h"
22 #include "llvm/ADT/PostOrderIterator.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/Analysis/AliasAnalysis.h"
25 #include "llvm/Analysis/DomTreeUpdater.h"
26 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
27 #include "llvm/Analysis/TargetTransformInfo.h"
28 #include "llvm/Analysis/ValueTracking.h"
29 #include "llvm/Analysis/VectorUtils.h"
30 #include "llvm/IR/CFG.h"
31 #include "llvm/IR/DataLayout.h"
32 #include "llvm/IR/DebugInfoMetadata.h"
33 #include "llvm/IR/Function.h"
34 #include "llvm/IR/IRBuilder.h"
35 #include "llvm/IR/Instructions.h"
36 #include "llvm/IR/IntrinsicInst.h"
37 #include "llvm/IR/MatrixBuilder.h"
38 #include "llvm/IR/PatternMatch.h"
39 #include "llvm/InitializePasses.h"
40 #include "llvm/Pass.h"
41 #include "llvm/Support/Alignment.h"
42 #include "llvm/Support/CommandLine.h"
43 #include "llvm/Support/Debug.h"
44 #include "llvm/Transforms/Scalar.h"
45 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
46 #include "llvm/Transforms/Utils/LoopUtils.h"
47 #include "llvm/Transforms/Utils/MatrixUtils.h"
48 
49 using namespace llvm;
50 using namespace PatternMatch;
51 
52 #define DEBUG_TYPE "lower-matrix-intrinsics"
53 
54 static cl::opt<bool>
55     FuseMatrix("fuse-matrix", cl::init(true), cl::Hidden,
56                cl::desc("Enable/disable fusing matrix instructions."));
57 // TODO: Allow and use non-square tiles.
58 static cl::opt<unsigned> TileSize(
59     "fuse-matrix-tile-size", cl::init(4), cl::Hidden,
60     cl::desc(
61         "Tile size for matrix instruction fusion using square-shaped tiles."));
62 static cl::opt<bool> TileUseLoops("fuse-matrix-use-loops", cl::init(false),
63                                   cl::Hidden,
64                                   cl::desc("Generate loop nest for tiling."));
65 static cl::opt<bool> ForceFusion(
66     "force-fuse-matrix", cl::init(false), cl::Hidden,
67     cl::desc("Force matrix instruction fusion even if not profitable."));
68 static cl::opt<bool> AllowContractEnabled(
69     "matrix-allow-contract", cl::init(false), cl::Hidden,
70     cl::desc("Allow the use of FMAs if available and profitable. This may "
71              "result in different results, due to less rounding error."));
72 
73 enum class MatrixLayoutTy { ColumnMajor, RowMajor };
74 
75 static cl::opt<MatrixLayoutTy> MatrixLayout(
76     "matrix-default-layout", cl::init(MatrixLayoutTy::ColumnMajor),
77     cl::desc("Sets the default matrix layout"),
78     cl::values(clEnumValN(MatrixLayoutTy::ColumnMajor, "column-major",
79                           "Use column-major layout"),
80                clEnumValN(MatrixLayoutTy::RowMajor, "row-major",
81                           "Use row-major layout")));
82 
83 /// Helper function to either return Scope, if it is a subprogram or the
84 /// attached subprogram for a local scope.
getSubprogram(DIScope * Scope)85 static DISubprogram *getSubprogram(DIScope *Scope) {
86   if (auto *Subprogram = dyn_cast<DISubprogram>(Scope))
87     return Subprogram;
88   return cast<DILocalScope>(Scope)->getSubprogram();
89 }
90 
91 namespace {
92 
93 // Given an element pointer \p BasePtr to the start of a (sub) matrix, compute
94 // the start address of vector \p VecIdx with type (\p EltType x \p NumElements)
95 // assuming \p Stride elements between start two consecutive vectors.
96 // \p Stride must be >= \p NumElements.
97 // For column-major matrixes, the function computes the address of a column
98 // vectors and \p NumElements must be set to the number of elements in a column
99 // (= number of rows of the matrix). For row-major matrixes, the function
100 // computes the address of a row vector and \p NumElements must be set to the
101 // number of elements in a column (= number of columns of the matrix).
102 //
103 // Consider a 4x4 matrix in column-mjaor layout like below
104 //
105 //      0       1      2      3
106 // 0   v_0_0  v_0_1  v_0_2  v_0_3
107 // 1   v_1_0  v_1_1  v_1_2  v_1_3
108 // 2   v_2_0  v_2_1  v_2_2  v_2_3
109 // 3   v_3_0  v_3_1  v_3_2  v_3_3
110 
111 // To compute the column addresses for a 2x3 sub-matrix at row 1 and column 1,
112 // we need a pointer to the first element of the submatrix as base pointer.
113 // Then we can use computeVectorAddr to compute the addresses for the columns
114 // of the sub-matrix.
115 //
116 // Column 0: computeVectorAddr(Base, 0 (column), 4 (stride), 2 (num rows), ..)
117 //           -> just returns Base
118 // Column 1: computeVectorAddr(Base, 1 (column), 4 (stride), 2 (num rows), ..)
119 //           -> returns Base + (1 * 4)
120 // Column 2: computeVectorAddr(Base, 2 (column), 4 (stride), 2 (num rows), ..)
121 //           -> returns Base + (2 * 4)
122 //
123 // The graphic below illustrates the number of elements in a column (marked
124 // with |) and the number of skipped elements (marked with }).
125 //
126 //         v_0_0  v_0_1 {v_0_2 {v_0_3
127 //                Base   Col 1  Col 2
128 //                  |     |      |
129 //         v_1_0 |v_1_1 |v_1_2 |v_1_3
130 //         v_2_0 |v_2_1 |v_2_2 |v_2_3
131 //         v_3_0 {v_3_1 {v_3_2  v_3_3
132 //
computeVectorAddr(Value * BasePtr,Value * VecIdx,Value * Stride,unsigned NumElements,Type * EltType,IRBuilder<> & Builder)133 Value *computeVectorAddr(Value *BasePtr, Value *VecIdx, Value *Stride,
134                          unsigned NumElements, Type *EltType,
135                          IRBuilder<> &Builder) {
136 
137   assert((!isa<ConstantInt>(Stride) ||
138           cast<ConstantInt>(Stride)->getZExtValue() >= NumElements) &&
139          "Stride must be >= the number of elements in the result vector.");
140   unsigned AS = cast<PointerType>(BasePtr->getType())->getAddressSpace();
141 
142   // Compute the start of the vector with index VecIdx as VecIdx * Stride.
143   Value *VecStart = Builder.CreateMul(VecIdx, Stride, "vec.start");
144 
145   // Get pointer to the start of the selected vector. Skip GEP creation,
146   // if we select vector 0.
147   if (isa<ConstantInt>(VecStart) && cast<ConstantInt>(VecStart)->isZero())
148     VecStart = BasePtr;
149   else
150     VecStart = Builder.CreateGEP(EltType, BasePtr, VecStart, "vec.gep");
151 
152   // Cast elementwise vector start pointer to a pointer to a vector
153   // (EltType x NumElements)*.
154   auto *VecType = FixedVectorType::get(EltType, NumElements);
155   Type *VecPtrType = PointerType::get(VecType, AS);
156   return Builder.CreatePointerCast(VecStart, VecPtrType, "vec.cast");
157 }
158 
159 /// LowerMatrixIntrinsics contains the methods used to lower matrix intrinsics.
160 ///
161 /// Currently, the lowering for each matrix intrinsic is done as follows:
162 /// 1. Propagate the shape information from intrinsics to connected
163 /// instructions.
164 /// 2. Lower instructions with shape information (assuming column-major layout).
165 ///  The lowering works similarly using row-major layout.
166 ///  2.1. Get column vectors for each argument. If we already lowered the
167 ///       definition of an argument, use the produced column vectors directly.
168 ///       If not, split the operand vector containing an embedded matrix into
169 ///       a set of column vectors,
170 ///  2.2. Lower the instruction in terms of column major operations, which
171 ///       yields a set of column vectors containing result matrix. Note that we
172 ///       lower all instructions that have shape information. Besides the
173 ///       intrinsics, this includes stores for example.
174 ///  2.3. Update uses of the lowered instruction. If we have shape information
175 ///       for a user, there is nothing to do, as we will look up the result
176 ///       column matrix when lowering the user. For other uses, we embed the
177 ///       result matrix in a flat vector and update the use.
178 ///  2.4. Cache the result column matrix for the instruction we lowered
179 /// 3. After we lowered all instructions in a function, remove the now
180 ///    obsolete instructions.
181 ///
182 class LowerMatrixIntrinsics {
183   Function &Func;
184   const DataLayout &DL;
185   const TargetTransformInfo &TTI;
186   AliasAnalysis *AA;
187   DominatorTree *DT;
188   LoopInfo *LI;
189   OptimizationRemarkEmitter *ORE;
190 
191   /// Contains estimates of the number of operations (loads, stores, compute) required to lower a matrix operation.
192   struct OpInfoTy {
193     /// Number of stores emitted to generate this matrix.
194     unsigned NumStores = 0;
195     /// Number of loads emitted to generate this matrix.
196     unsigned NumLoads = 0;
197     /// Number of compute operations emitted to generate this matrix.
198     unsigned NumComputeOps = 0;
199     /// Most of the time transposes can be fused with matrix multiplies or can
200     /// be folded away via algebraic simplifications.  This is the number of
201     /// transposes that we failed to make "free" via such optimizations.
202     unsigned NumExposedTransposes = 0;
203 
operator +=__anonb7c62bf10111::LowerMatrixIntrinsics::OpInfoTy204     OpInfoTy &operator+=(const OpInfoTy &RHS) {
205       NumStores += RHS.NumStores;
206       NumLoads += RHS.NumLoads;
207       NumComputeOps += RHS.NumComputeOps;
208       NumExposedTransposes += RHS.NumExposedTransposes;
209       return *this;
210     }
211   };
212 
213   /// Wrapper class representing a matrix as a set of vectors, either in row or
214   /// column major layout. All vectors must have the same vector type.
215   class MatrixTy {
216     SmallVector<Value *, 16> Vectors;
217 
218     OpInfoTy OpInfo;
219 
220     bool IsColumnMajor = true;
221 
222   public:
MatrixTy()223     MatrixTy()
224         : Vectors(),
225           IsColumnMajor(MatrixLayout == MatrixLayoutTy::ColumnMajor) {}
MatrixTy(ArrayRef<Value * > Vectors)226     MatrixTy(ArrayRef<Value *> Vectors)
227         : Vectors(Vectors.begin(), Vectors.end()),
228           IsColumnMajor(MatrixLayout == MatrixLayoutTy::ColumnMajor) {}
MatrixTy(unsigned NumRows,unsigned NumColumns,Type * EltTy)229     MatrixTy(unsigned NumRows, unsigned NumColumns, Type *EltTy)
230         : IsColumnMajor(MatrixLayout == MatrixLayoutTy::ColumnMajor) {
231 
232       unsigned D = isColumnMajor() ? NumColumns : NumRows;
233       for (unsigned J = 0; J < D; ++J)
234         addVector(UndefValue::get(FixedVectorType::get(
235             EltTy, isColumnMajor() ? NumRows : NumColumns)));
236     }
237 
getVector(unsigned i) const238     Value *getVector(unsigned i) const { return Vectors[i]; }
getColumn(unsigned i) const239     Value *getColumn(unsigned i) const {
240       assert(isColumnMajor() && "only supported for column-major matrixes");
241       return Vectors[i];
242     }
getRow(unsigned i) const243     Value *getRow(unsigned i) const {
244       assert(!isColumnMajor() && "only supported for row-major matrixes");
245       return Vectors[i];
246     }
247 
setVector(unsigned i,Value * V)248     void setVector(unsigned i, Value *V) { Vectors[i] = V; }
249 
getElementType() const250     Type *getElementType() const { return getVectorTy()->getElementType(); }
251 
getNumVectors() const252     unsigned getNumVectors() const {
253       if (isColumnMajor())
254         return getNumColumns();
255       return getNumRows();
256     }
257 
getNumColumns() const258     unsigned getNumColumns() const {
259       if (isColumnMajor())
260         return Vectors.size();
261       else {
262         assert(Vectors.size() > 0 && "Cannot call getNumRows without columns");
263         return cast<FixedVectorType>(Vectors[0]->getType())->getNumElements();
264       }
265     }
getNumRows() const266     unsigned getNumRows() const {
267       if (isColumnMajor()) {
268         assert(Vectors.size() > 0 && "Cannot call getNumRows without columns");
269         return cast<FixedVectorType>(Vectors[0]->getType())->getNumElements();
270       } else
271         return Vectors.size();
272     }
273 
addVector(Value * V)274     void addVector(Value *V) { Vectors.push_back(V); }
getColumnTy()275     VectorType *getColumnTy() {
276       assert(isColumnMajor() && "only supported for column-major matrixes");
277       return getVectorTy();
278     }
279 
getVectorTy() const280     VectorType *getVectorTy() const {
281       return cast<VectorType>(Vectors[0]->getType());
282     }
283 
columns()284     iterator_range<SmallVector<Value *, 8>::iterator> columns() {
285       assert(isColumnMajor() &&
286              "columns() only supported for column-major matrixes");
287       return make_range(Vectors.begin(), Vectors.end());
288     }
289 
vectors()290     iterator_range<SmallVector<Value *, 8>::iterator> vectors() {
291       return make_range(Vectors.begin(), Vectors.end());
292     }
293 
294     /// Embed the vectors of the matrix into a flat vector by concatenating
295     /// them.
embedInVector(IRBuilder<> & Builder) const296     Value *embedInVector(IRBuilder<> &Builder) const {
297       return Vectors.size() == 1 ? Vectors[0]
298                                  : concatenateVectors(Builder, Vectors);
299     }
300 
addNumLoads(unsigned N)301     MatrixTy &addNumLoads(unsigned N) {
302       OpInfo.NumLoads += N;
303       return *this;
304     }
305 
setNumLoads(unsigned N)306     void setNumLoads(unsigned N) { OpInfo.NumLoads = N; }
307 
addNumStores(unsigned N)308     MatrixTy &addNumStores(unsigned N) {
309       OpInfo.NumStores += N;
310       return *this;
311     }
312 
addNumExposedTransposes(unsigned N)313     MatrixTy &addNumExposedTransposes(unsigned N) {
314       OpInfo.NumExposedTransposes += N;
315       return *this;
316     }
317 
addNumComputeOps(unsigned N)318     MatrixTy &addNumComputeOps(unsigned N) {
319       OpInfo.NumComputeOps += N;
320       return *this;
321     }
322 
getNumStores() const323     unsigned getNumStores() const { return OpInfo.NumStores; }
getNumLoads() const324     unsigned getNumLoads() const { return OpInfo.NumLoads; }
getNumComputeOps() const325     unsigned getNumComputeOps() const { return OpInfo.NumComputeOps; }
326 
getOpInfo() const327     const OpInfoTy &getOpInfo() const { return OpInfo; }
328 
isColumnMajor() const329     bool isColumnMajor() const { return IsColumnMajor; }
330 
getStride() const331     unsigned getStride() const {
332       if (isColumnMajor())
333         return getNumRows();
334       return getNumColumns();
335     }
336 
337     /// Extract a vector of \p NumElts starting at index (\p I, \p J). If the
338     /// matrix is column-major, the result vector is extracted from a column
339     /// vector, otherwise from a row vector.
extractVector(unsigned I,unsigned J,unsigned NumElts,IRBuilder<> & Builder) const340     Value *extractVector(unsigned I, unsigned J, unsigned NumElts,
341                          IRBuilder<> &Builder) const {
342       Value *Vec = isColumnMajor() ? getColumn(J) : getRow(I);
343       return Builder.CreateShuffleVector(
344           Vec, createSequentialMask(isColumnMajor() ? I : J, NumElts, 0),
345           "block");
346     }
347   };
348 
349   struct ShapeInfo {
350     unsigned NumRows;
351     unsigned NumColumns;
352 
353     bool IsColumnMajor;
354 
ShapeInfo__anonb7c62bf10111::LowerMatrixIntrinsics::ShapeInfo355     ShapeInfo(unsigned NumRows = 0, unsigned NumColumns = 0)
356         : NumRows(NumRows), NumColumns(NumColumns),
357           IsColumnMajor(MatrixLayout == MatrixLayoutTy::ColumnMajor) {}
358 
ShapeInfo__anonb7c62bf10111::LowerMatrixIntrinsics::ShapeInfo359     ShapeInfo(Value *NumRows, Value *NumColumns)
360         : ShapeInfo(cast<ConstantInt>(NumRows)->getZExtValue(),
361                     cast<ConstantInt>(NumColumns)->getZExtValue()) {}
362 
operator ==__anonb7c62bf10111::LowerMatrixIntrinsics::ShapeInfo363     bool operator==(const ShapeInfo &other) {
364       return NumRows == other.NumRows && NumColumns == other.NumColumns;
365     }
operator !=__anonb7c62bf10111::LowerMatrixIntrinsics::ShapeInfo366     bool operator!=(const ShapeInfo &other) { return !(*this == other); }
367 
368     /// Returns true if shape-information is defined, meaning both dimensions
369     /// are != 0.
operator bool__anonb7c62bf10111::LowerMatrixIntrinsics::ShapeInfo370     operator bool() const {
371       assert(NumRows == 0 || NumColumns != 0);
372       return NumRows != 0;
373     }
374 
getStride__anonb7c62bf10111::LowerMatrixIntrinsics::ShapeInfo375     unsigned getStride() const {
376       if (IsColumnMajor)
377         return NumRows;
378       return NumColumns;
379     }
380 
getNumVectors__anonb7c62bf10111::LowerMatrixIntrinsics::ShapeInfo381     unsigned getNumVectors() const {
382       if (IsColumnMajor)
383         return NumColumns;
384       return NumRows;
385     }
386   };
387 
388   /// Maps instructions to their shape information. The shape information
389   /// describes the shape to be used while lowering. This matches the shape of
390   /// the result value of the instruction, with the only exceptions being store
391   /// instructions and the matrix_column_major_store intrinsics. For those, the
392   /// shape information indicates that those instructions should be lowered
393   /// using shape information as well.  A ValueMap is used so that when
394   /// sub-passes like optimizeTransposes performs RAUW the map stays
395   /// up-to-date.
396   ValueMap<Value *, ShapeInfo> ShapeMap;
397 
398   /// List of instructions to remove. While lowering, we are not replacing all
399   /// users of a lowered instruction, if shape information is available and
400   /// those need to be removed after we finished lowering.
401   SmallVector<Instruction *, 16> ToRemove;
402 
403   /// Map from instructions to their produced column matrix.
404   MapVector<Value *, MatrixTy> Inst2ColumnMatrix;
405 
406 private:
getFastMathFlags(Instruction * Inst)407   static FastMathFlags getFastMathFlags(Instruction *Inst) {
408     FastMathFlags FMF;
409 
410     if (isa<FPMathOperator>(*Inst))
411       FMF = Inst->getFastMathFlags();
412 
413     FMF.setAllowContract(AllowContractEnabled || FMF.allowContract());
414 
415     return FMF;
416   }
417 
418 public:
LowerMatrixIntrinsics(Function & F,TargetTransformInfo & TTI,AliasAnalysis * AA,DominatorTree * DT,LoopInfo * LI,OptimizationRemarkEmitter * ORE)419   LowerMatrixIntrinsics(Function &F, TargetTransformInfo &TTI,
420                         AliasAnalysis *AA, DominatorTree *DT, LoopInfo *LI,
421                         OptimizationRemarkEmitter *ORE)
422       : Func(F), DL(F.getParent()->getDataLayout()), TTI(TTI), AA(AA), DT(DT),
423         LI(LI), ORE(ORE) {}
424 
getNumOps(Type * VT)425   unsigned getNumOps(Type *VT) {
426     assert(isa<VectorType>(VT) && "Expected vector type");
427     return getNumOps(VT->getScalarType(),
428                      cast<FixedVectorType>(VT)->getNumElements());
429   }
430 
431   /// Is this the minimal version executed in the backend pipelines.
isMinimal() const432   bool isMinimal() const {
433     return !DT;
434   }
435 
436   /// Return the estimated number of vector ops required for an operation on
437   /// \p VT * N.
getNumOps(Type * ST,unsigned N)438   unsigned getNumOps(Type *ST, unsigned N) {
439     return std::ceil((ST->getPrimitiveSizeInBits() * N).getFixedSize() /
440                      double(TTI.getRegisterBitWidth(
441                                    TargetTransformInfo::RGK_FixedWidthVector)
442                                 .getFixedSize()));
443   }
444 
445   /// Return the set of vectors that a matrix value is lowered to.
446   ///
447   /// If we lowered \p MatrixVal, just return the cache result matrix. Otherwise
448   /// split the flat vector \p MatrixVal containing a matrix with shape \p SI
449   /// into vectors.
getMatrix(Value * MatrixVal,const ShapeInfo & SI,IRBuilder<> & Builder)450   MatrixTy getMatrix(Value *MatrixVal, const ShapeInfo &SI,
451                      IRBuilder<> &Builder) {
452     VectorType *VType = dyn_cast<VectorType>(MatrixVal->getType());
453     assert(VType && "MatrixVal must be a vector type");
454     assert(cast<FixedVectorType>(VType)->getNumElements() ==
455                SI.NumRows * SI.NumColumns &&
456            "The vector size must match the number of matrix elements");
457 
458     // Check if we lowered MatrixVal using shape information. In that case,
459     // return the existing matrix, if it matches the requested shape
460     // information. If there is a mis-match, embed the result in a flat
461     // vector and split it later.
462     auto Found = Inst2ColumnMatrix.find(MatrixVal);
463     if (Found != Inst2ColumnMatrix.end()) {
464       MatrixTy &M = Found->second;
465       // Return the found matrix, if its shape matches the requested shape
466       // information
467       if (SI.NumRows == M.getNumRows() && SI.NumColumns == M.getNumColumns())
468         return M;
469 
470       MatrixVal = M.embedInVector(Builder);
471     }
472 
473     // Otherwise split MatrixVal.
474     SmallVector<Value *, 16> SplitVecs;
475     for (unsigned MaskStart = 0;
476          MaskStart < cast<FixedVectorType>(VType)->getNumElements();
477          MaskStart += SI.getStride()) {
478       Value *V = Builder.CreateShuffleVector(
479           MatrixVal, createSequentialMask(MaskStart, SI.getStride(), 0),
480           "split");
481       SplitVecs.push_back(V);
482     }
483 
484     return {SplitVecs};
485   }
486 
487   /// If \p V already has a known shape return false.  Otherwise set the shape
488   /// for instructions that support it.
setShapeInfo(Value * V,ShapeInfo Shape)489   bool setShapeInfo(Value *V, ShapeInfo Shape) {
490     assert(Shape && "Shape not set");
491     if (isa<UndefValue>(V) || !supportsShapeInfo(V))
492       return false;
493 
494     auto SIter = ShapeMap.find(V);
495     if (SIter != ShapeMap.end()) {
496       LLVM_DEBUG(dbgs() << "  not overriding existing shape: "
497                         << SIter->second.NumRows << " "
498                         << SIter->second.NumColumns << " for " << *V << "\n");
499       return false;
500     }
501 
502     ShapeMap.insert({V, Shape});
503     LLVM_DEBUG(dbgs() << "  " << Shape.NumRows << " x " << Shape.NumColumns
504                       << " for " << *V << "\n");
505     return true;
506   }
507 
isUniformShape(Value * V)508   bool isUniformShape(Value *V) {
509     Instruction *I = dyn_cast<Instruction>(V);
510     if (!I)
511       return true;
512 
513     switch (I->getOpcode()) {
514     case Instruction::FAdd:
515     case Instruction::FSub:
516     case Instruction::FMul: // Scalar multiply.
517     case Instruction::FNeg:
518     case Instruction::Add:
519     case Instruction::Mul:
520     case Instruction::Sub:
521       return true;
522     default:
523       return false;
524     }
525   }
526 
527   /// Returns true if shape information can be used for \p V. The supported
528   /// instructions must match the instructions that can be lowered by this pass.
supportsShapeInfo(Value * V)529   bool supportsShapeInfo(Value *V) {
530     Instruction *Inst = dyn_cast<Instruction>(V);
531     if (!Inst)
532       return false;
533 
534     IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst);
535     if (II)
536       switch (II->getIntrinsicID()) {
537       case Intrinsic::matrix_multiply:
538       case Intrinsic::matrix_transpose:
539       case Intrinsic::matrix_column_major_load:
540       case Intrinsic::matrix_column_major_store:
541         return true;
542       default:
543         return false;
544       }
545     return isUniformShape(V) || isa<StoreInst>(V) || isa<LoadInst>(V);
546   }
547 
548   /// Propagate the shape information of instructions to their users.
549   /// The work list contains instructions for which we can compute the shape,
550   /// either based on the information provided by matrix intrinsics or known
551   /// shapes of operands.
552   SmallVector<Instruction *, 32>
propagateShapeForward(SmallVectorImpl<Instruction * > & WorkList)553   propagateShapeForward(SmallVectorImpl<Instruction *> &WorkList) {
554     SmallVector<Instruction *, 32> NewWorkList;
555     // Pop an element for which we guaranteed to have at least one of the
556     // operand shapes.  Add the shape for this and then add users to the work
557     // list.
558     LLVM_DEBUG(dbgs() << "Forward-propagate shapes:\n");
559     while (!WorkList.empty()) {
560       Instruction *Inst = WorkList.pop_back_val();
561 
562       // New entry, set the value and insert operands
563       bool Propagate = false;
564 
565       Value *MatrixA;
566       Value *MatrixB;
567       Value *M;
568       Value *N;
569       Value *K;
570       if (match(Inst, m_Intrinsic<Intrinsic::matrix_multiply>(
571                           m_Value(MatrixA), m_Value(MatrixB), m_Value(M),
572                           m_Value(N), m_Value(K)))) {
573         Propagate = setShapeInfo(Inst, {M, K});
574       } else if (match(Inst, m_Intrinsic<Intrinsic::matrix_transpose>(
575                                  m_Value(MatrixA), m_Value(M), m_Value(N)))) {
576         // Flip dimensions.
577         Propagate = setShapeInfo(Inst, {N, M});
578       } else if (match(Inst, m_Intrinsic<Intrinsic::matrix_column_major_store>(
579                                  m_Value(MatrixA), m_Value(), m_Value(),
580                                  m_Value(), m_Value(M), m_Value(N)))) {
581         Propagate = setShapeInfo(Inst, {N, M});
582       } else if (match(Inst, m_Intrinsic<Intrinsic::matrix_column_major_load>(
583                                  m_Value(), m_Value(), m_Value(), m_Value(M),
584                                  m_Value(N)))) {
585         Propagate = setShapeInfo(Inst, {M, N});
586       } else if (match(Inst, m_Store(m_Value(MatrixA), m_Value()))) {
587         auto OpShape = ShapeMap.find(MatrixA);
588         if (OpShape != ShapeMap.end())
589           setShapeInfo(Inst, OpShape->second);
590         continue;
591       } else if (isUniformShape(Inst)) {
592         // Find the first operand that has a known shape and use that.
593         for (auto &Op : Inst->operands()) {
594           auto OpShape = ShapeMap.find(Op.get());
595           if (OpShape != ShapeMap.end()) {
596             Propagate |= setShapeInfo(Inst, OpShape->second);
597             break;
598           }
599         }
600       }
601 
602       if (Propagate) {
603         NewWorkList.push_back(Inst);
604         for (auto *User : Inst->users())
605           if (ShapeMap.count(User) == 0)
606             WorkList.push_back(cast<Instruction>(User));
607       }
608     }
609 
610     return NewWorkList;
611   }
612 
613   /// Propagate the shape to operands of instructions with shape information.
614   /// \p Worklist contains the instruction for which we already know the shape.
615   SmallVector<Instruction *, 32>
propagateShapeBackward(SmallVectorImpl<Instruction * > & WorkList)616   propagateShapeBackward(SmallVectorImpl<Instruction *> &WorkList) {
617     SmallVector<Instruction *, 32> NewWorkList;
618 
619     auto pushInstruction = [](Value *V,
620                               SmallVectorImpl<Instruction *> &WorkList) {
621       Instruction *I = dyn_cast<Instruction>(V);
622       if (I)
623         WorkList.push_back(I);
624     };
625     // Pop an element with known shape.  Traverse the operands, if their shape
626     // derives from the result shape and is unknown, add it and add them to the
627     // worklist.
628     LLVM_DEBUG(dbgs() << "Backward-propagate shapes:\n");
629     while (!WorkList.empty()) {
630       Value *V = WorkList.pop_back_val();
631 
632       size_t BeforeProcessingV = WorkList.size();
633       if (!isa<Instruction>(V))
634         continue;
635 
636       Value *MatrixA;
637       Value *MatrixB;
638       Value *M;
639       Value *N;
640       Value *K;
641       if (match(V, m_Intrinsic<Intrinsic::matrix_multiply>(
642                        m_Value(MatrixA), m_Value(MatrixB), m_Value(M),
643                        m_Value(N), m_Value(K)))) {
644         if (setShapeInfo(MatrixA, {M, N}))
645           pushInstruction(MatrixA, WorkList);
646 
647         if (setShapeInfo(MatrixB, {N, K}))
648           pushInstruction(MatrixB, WorkList);
649 
650       } else if (match(V, m_Intrinsic<Intrinsic::matrix_transpose>(
651                               m_Value(MatrixA), m_Value(M), m_Value(N)))) {
652         // Flip dimensions.
653         if (setShapeInfo(MatrixA, {M, N}))
654           pushInstruction(MatrixA, WorkList);
655       } else if (match(V, m_Intrinsic<Intrinsic::matrix_column_major_store>(
656                               m_Value(MatrixA), m_Value(), m_Value(), m_Value(),
657                               m_Value(M), m_Value(N)))) {
658         if (setShapeInfo(MatrixA, {M, N})) {
659           pushInstruction(MatrixA, WorkList);
660         }
661       } else if (isa<LoadInst>(V) ||
662                  match(V, m_Intrinsic<Intrinsic::matrix_column_major_load>())) {
663         // Nothing to do, no matrix input.
664       } else if (isa<StoreInst>(V)) {
665         // Nothing to do.  We forward-propagated to this so we would just
666         // backward propagate to an instruction with an already known shape.
667       } else if (isUniformShape(V)) {
668         // Propagate to all operands.
669         ShapeInfo Shape = ShapeMap[V];
670         for (Use &U : cast<Instruction>(V)->operands()) {
671           if (setShapeInfo(U.get(), Shape))
672             pushInstruction(U.get(), WorkList);
673         }
674       }
675       // After we discovered new shape info for new instructions in the
676       // worklist, we use their users as seeds for the next round of forward
677       // propagation.
678       for (size_t I = BeforeProcessingV; I != WorkList.size(); I++)
679         for (User *U : WorkList[I]->users())
680           if (isa<Instruction>(U) && V != U)
681             NewWorkList.push_back(cast<Instruction>(U));
682     }
683     return NewWorkList;
684   }
685 
686   /// Try moving transposes in order to fold them away or into multiplies.
optimizeTransposes()687   void optimizeTransposes() {
688     auto ReplaceAllUsesWith = [this](Instruction &Old, Value *New) {
689       // We need to remove Old from the ShapeMap otherwise RAUW will replace it
690       // with New. We should only add New it it supportsShapeInfo so we insert
691       // it conditionally instead.
692       auto S = ShapeMap.find(&Old);
693       if (S != ShapeMap.end()) {
694         ShapeMap.erase(S);
695         if (supportsShapeInfo(New))
696           ShapeMap.insert({New, S->second});
697       }
698       Old.replaceAllUsesWith(New);
699     };
700 
701     // First sink all transposes inside matmuls, hoping that we end up with NN,
702     // NT or TN variants.
703     for (BasicBlock &BB : reverse(Func)) {
704       for (auto II = BB.rbegin(); II != BB.rend();) {
705         Instruction &I = *II;
706         // We may remove II.  By default continue on the next/prev instruction.
707         ++II;
708         // If we were to erase II, move again.
709         auto EraseFromParent = [&II](Value *V) {
710           auto *Inst = cast<Instruction>(V);
711           if (Inst->use_empty()) {
712             if (Inst == &*II) {
713               ++II;
714             }
715             Inst->eraseFromParent();
716           }
717         };
718 
719         // If we're creating a new instruction, continue from there.
720         Instruction *NewInst = nullptr;
721 
722         IRBuilder<> IB(&I);
723         MatrixBuilder<IRBuilder<>> Builder(IB);
724 
725         Value *TA, *TAMA, *TAMB;
726         ConstantInt *R, *K, *C;
727         if (match(&I, m_Intrinsic<Intrinsic::matrix_transpose>(m_Value(TA)))) {
728 
729           // Transpose of a transpose is a nop
730           Value *TATA;
731           if (match(TA,
732                     m_Intrinsic<Intrinsic::matrix_transpose>(m_Value(TATA)))) {
733             ReplaceAllUsesWith(I, TATA);
734             EraseFromParent(&I);
735             EraseFromParent(TA);
736           }
737 
738           // (A * B)^t -> B^t * A^t
739           // RxK KxC      CxK   KxR
740           else if (match(TA, m_Intrinsic<Intrinsic::matrix_multiply>(
741                                  m_Value(TAMA), m_Value(TAMB), m_ConstantInt(R),
742                                  m_ConstantInt(K), m_ConstantInt(C)))) {
743             Value *T0 = Builder.CreateMatrixTranspose(TAMB, K->getZExtValue(),
744                                                       C->getZExtValue(),
745                                                       TAMB->getName() + "_t");
746             // We are being run after shape prop, add shape for newly created
747             // instructions so that we lower them later.
748             setShapeInfo(T0, {C, K});
749             Value *T1 = Builder.CreateMatrixTranspose(TAMA, R->getZExtValue(),
750                                                       K->getZExtValue(),
751                                                       TAMA->getName() + "_t");
752             setShapeInfo(T1, {K, R});
753             NewInst = Builder.CreateMatrixMultiply(T0, T1, C->getZExtValue(),
754                                                    K->getZExtValue(),
755                                                    R->getZExtValue(), "mmul");
756             ReplaceAllUsesWith(I, NewInst);
757             EraseFromParent(&I);
758             EraseFromParent(TA);
759           }
760         }
761 
762         // If we replaced I with a new instruction, continue from there.
763         if (NewInst)
764           II = std::next(BasicBlock::reverse_iterator(NewInst));
765       }
766     }
767 
768     // If we have a TT matmul, lift the transpose.  We may be able to fold into
769     // consuming multiply.
770     for (BasicBlock &BB : Func) {
771       for (BasicBlock::iterator II = BB.begin(); II != BB.end();) {
772         Instruction *I = &*II;
773         // We may remove I.
774         ++II;
775         Value *A, *B, *AT, *BT;
776         ConstantInt *R, *K, *C;
777         // A^t * B ^t -> (B * A)^t
778         if (match(&*I, m_Intrinsic<Intrinsic::matrix_multiply>(
779                            m_Value(A), m_Value(B), m_ConstantInt(R),
780                            m_ConstantInt(K), m_ConstantInt(C))) &&
781             match(A, m_Intrinsic<Intrinsic::matrix_transpose>(m_Value(AT))) &&
782             match(B, m_Intrinsic<Intrinsic::matrix_transpose>(m_Value((BT))))) {
783           IRBuilder<> IB(&*I);
784           MatrixBuilder<IRBuilder<>> Builder(IB);
785           Value *M = Builder.CreateMatrixMultiply(
786               BT, AT, C->getZExtValue(), K->getZExtValue(), R->getZExtValue());
787           setShapeInfo(M, {C, R});
788           Instruction *NewInst = Builder.CreateMatrixTranspose(
789               M, C->getZExtValue(), R->getZExtValue());
790           ReplaceAllUsesWith(*I, NewInst);
791           if (I->use_empty())
792             I->eraseFromParent();
793           if (A->use_empty())
794             cast<Instruction>(A)->eraseFromParent();
795           if (A != B && B->use_empty())
796             cast<Instruction>(B)->eraseFromParent();
797         }
798       }
799     }
800   }
801 
Visit()802   bool Visit() {
803     SmallVector<Instruction *, 32> WorkList;
804 
805     // Initially only the shape of matrix intrinsics is known.
806     // Initialize the work list with ops carrying shape information.
807     for (BasicBlock &BB : Func)
808       for (Instruction &Inst : BB) {
809         IntrinsicInst *II = dyn_cast<IntrinsicInst>(&Inst);
810         if (!II)
811           continue;
812 
813         switch (II->getIntrinsicID()) {
814         case Intrinsic::matrix_multiply:
815         case Intrinsic::matrix_transpose:
816         case Intrinsic::matrix_column_major_load:
817         case Intrinsic::matrix_column_major_store:
818           WorkList.push_back(&Inst);
819           break;
820         default:
821           break;
822         }
823       }
824 
825     // Avoid unnecessary work if there are no matrix intrinsics in the function.
826     if (WorkList.empty())
827       return false;
828 
829     // Propagate shapes until nothing changes any longer.
830     while (!WorkList.empty()) {
831       WorkList = propagateShapeForward(WorkList);
832       WorkList = propagateShapeBackward(WorkList);
833     }
834 
835     if (!isMinimal()) {
836       optimizeTransposes();
837       LLVM_DEBUG({
838         dbgs() << "Dump after matrix transpose optimization:\n";
839         Func.dump();
840       });
841     }
842 
843     bool Changed = false;
844     SmallVector<CallInst *, 16> MaybeFusableInsts;
845     SmallVector<Instruction *, 16> MatrixInsts;
846 
847     // First, collect all instructions with shape information and candidates for
848     // fusion (currently only matrix multiplies).
849     ReversePostOrderTraversal<Function *> RPOT(&Func);
850     for (auto *BB : RPOT)
851       for (Instruction &I : *BB) {
852         if (ShapeMap.find(&I) == ShapeMap.end())
853           continue;
854         if (match(&I, m_Intrinsic<Intrinsic::matrix_multiply>()))
855           MaybeFusableInsts.push_back(cast<CallInst>(&I));
856         MatrixInsts.push_back(&I);
857       }
858 
859     // Second, try to fuse candidates.
860     SmallPtrSet<Instruction *, 16> FusedInsts;
861     for (CallInst *CI : MaybeFusableInsts)
862       LowerMatrixMultiplyFused(CI, FusedInsts);
863     Changed = !FusedInsts.empty();
864 
865     // Third, lower remaining instructions with shape information.
866     for (Instruction *Inst : MatrixInsts) {
867       if (FusedInsts.count(Inst))
868         continue;
869 
870       IRBuilder<> Builder(Inst);
871 
872       if (CallInst *CInst = dyn_cast<CallInst>(Inst))
873         Changed |= VisitCallInst(CInst);
874 
875       Value *Op1;
876       Value *Op2;
877       if (auto *BinOp = dyn_cast<BinaryOperator>(Inst))
878         Changed |= VisitBinaryOperator(BinOp);
879       if (auto *UnOp = dyn_cast<UnaryOperator>(Inst))
880         Changed |= VisitUnaryOperator(UnOp);
881       if (match(Inst, m_Load(m_Value(Op1))))
882         Changed |= VisitLoad(cast<LoadInst>(Inst), Op1, Builder);
883       else if (match(Inst, m_Store(m_Value(Op1), m_Value(Op2))))
884         Changed |= VisitStore(cast<StoreInst>(Inst), Op1, Op2, Builder);
885     }
886 
887     if (ORE) {
888       RemarkGenerator RemarkGen(Inst2ColumnMatrix, *ORE, Func);
889       RemarkGen.emitRemarks();
890     }
891 
892     // Delete the instructions backwards, as it has a reduced likelihood of
893     // having to update as many def-use and use-def chains.
894     //
895     // Because we add to ToRemove during fusion we can't guarantee that defs
896     // are before uses.  Change uses to undef temporarily as these should get
897     // removed as well.
898     //
899     // For verification, we keep track of where we changed uses to undefs in
900     // UndefedInsts and then check that we in fact remove them.
901     SmallSet<Instruction *, 16> UndefedInsts;
902     for (auto *Inst : reverse(ToRemove)) {
903       for (auto I = Inst->use_begin(), E = Inst->use_end(); I != E;) {
904         Use &U = *I++;
905         if (auto *Undefed = dyn_cast<Instruction>(U.getUser()))
906           UndefedInsts.insert(Undefed);
907         U.set(UndefValue::get(Inst->getType()));
908       }
909       Inst->eraseFromParent();
910       UndefedInsts.erase(Inst);
911     }
912     if (!UndefedInsts.empty()) {
913       // If we didn't remove all undefed instructions, it's a hard error.
914       dbgs() << "Undefed but present instructions:\n";
915       for (auto *I : UndefedInsts)
916         dbgs() << *I << "\n";
917       llvm_unreachable("Undefed but instruction not removed");
918     }
919 
920     return Changed;
921   }
922 
923   /// Turns \p BasePtr into an elementwise pointer to \p EltType.
createElementPtr(Value * BasePtr,Type * EltType,IRBuilder<> & Builder)924   Value *createElementPtr(Value *BasePtr, Type *EltType, IRBuilder<> &Builder) {
925     unsigned AS = cast<PointerType>(BasePtr->getType())->getAddressSpace();
926     Type *EltPtrType = PointerType::get(EltType, AS);
927     return Builder.CreatePointerCast(BasePtr, EltPtrType);
928   }
929 
930   /// Replace intrinsic calls
VisitCallInst(CallInst * Inst)931   bool VisitCallInst(CallInst *Inst) {
932     if (!Inst->getCalledFunction() || !Inst->getCalledFunction()->isIntrinsic())
933       return false;
934 
935     switch (Inst->getCalledFunction()->getIntrinsicID()) {
936     case Intrinsic::matrix_multiply:
937       LowerMultiply(Inst);
938       break;
939     case Intrinsic::matrix_transpose:
940       LowerTranspose(Inst);
941       break;
942     case Intrinsic::matrix_column_major_load:
943       LowerColumnMajorLoad(Inst);
944       break;
945     case Intrinsic::matrix_column_major_store:
946       LowerColumnMajorStore(Inst);
947       break;
948     default:
949       return false;
950     }
951     return true;
952   }
953 
954   /// Compute the alignment for a column/row \p Idx with \p Stride between them.
955   /// The address at \p Idx == 0 has alignment \p A. If \p Stride is a
956   /// ConstantInt, reduce the initial alignment based on the byte offset. For
957   /// non-ConstantInt strides, return the common alignment of the initial
958   /// alignment and the element size in bytes.
getAlignForIndex(unsigned Idx,Value * Stride,Type * ElementTy,MaybeAlign A) const959   Align getAlignForIndex(unsigned Idx, Value *Stride, Type *ElementTy,
960                          MaybeAlign A) const {
961     Align InitialAlign = DL.getValueOrABITypeAlignment(A, ElementTy);
962     if (Idx == 0)
963       return InitialAlign;
964 
965     TypeSize ElementSizeInBits = DL.getTypeSizeInBits(ElementTy);
966     if (auto *ConstStride = dyn_cast<ConstantInt>(Stride)) {
967       uint64_t StrideInBytes =
968           ConstStride->getZExtValue() * ElementSizeInBits / 8;
969       return commonAlignment(InitialAlign, Idx * StrideInBytes);
970     }
971     return commonAlignment(InitialAlign, ElementSizeInBits / 8);
972   }
973 
974   /// Load a matrix with \p Shape starting at \p Ptr and using \p Stride between
975   /// vectors.
loadMatrix(Type * Ty,Value * Ptr,MaybeAlign MAlign,Value * Stride,bool IsVolatile,ShapeInfo Shape,IRBuilder<> & Builder)976   MatrixTy loadMatrix(Type *Ty, Value *Ptr, MaybeAlign MAlign, Value *Stride,
977                       bool IsVolatile, ShapeInfo Shape, IRBuilder<> &Builder) {
978     auto *VType = cast<VectorType>(Ty);
979     Type *EltTy = VType->getElementType();
980     Type *VecTy = FixedVectorType::get(EltTy, Shape.getStride());
981     Value *EltPtr = createElementPtr(Ptr, EltTy, Builder);
982     MatrixTy Result;
983     for (unsigned I = 0, E = Shape.getNumVectors(); I < E; ++I) {
984       Value *GEP = computeVectorAddr(EltPtr, Builder.getInt64(I), Stride,
985                                      Shape.getStride(), EltTy, Builder);
986       Value *Vector = Builder.CreateAlignedLoad(
987           VecTy, GEP, getAlignForIndex(I, Stride, EltTy, MAlign),
988           IsVolatile, "col.load");
989 
990       Result.addVector(Vector);
991     }
992     return Result.addNumLoads(getNumOps(Result.getVectorTy()) *
993                               Result.getNumVectors());
994   }
995 
996   /// Loads a sub-matrix with shape \p ResultShape from a \p R x \p C matrix,
997   /// starting at \p MatrixPtr[I][J].
loadMatrix(Value * MatrixPtr,MaybeAlign Align,bool IsVolatile,ShapeInfo MatrixShape,Value * I,Value * J,ShapeInfo ResultShape,Type * EltTy,IRBuilder<> & Builder)998   MatrixTy loadMatrix(Value *MatrixPtr, MaybeAlign Align, bool IsVolatile,
999                       ShapeInfo MatrixShape, Value *I, Value *J,
1000                       ShapeInfo ResultShape, Type *EltTy,
1001                       IRBuilder<> &Builder) {
1002 
1003     Value *Offset = Builder.CreateAdd(
1004         Builder.CreateMul(J, Builder.getInt64(MatrixShape.getStride())), I);
1005 
1006     unsigned AS = cast<PointerType>(MatrixPtr->getType())->getAddressSpace();
1007     Value *EltPtr =
1008         Builder.CreatePointerCast(MatrixPtr, PointerType::get(EltTy, AS));
1009     Value *TileStart = Builder.CreateGEP(EltTy, EltPtr, Offset);
1010     auto *TileTy = FixedVectorType::get(EltTy, ResultShape.NumRows *
1011                                                    ResultShape.NumColumns);
1012     Type *TilePtrTy = PointerType::get(TileTy, AS);
1013     Value *TilePtr =
1014         Builder.CreatePointerCast(TileStart, TilePtrTy, "col.cast");
1015 
1016     return loadMatrix(TileTy, TilePtr, Align,
1017                       Builder.getInt64(MatrixShape.getStride()), IsVolatile,
1018                       ResultShape, Builder);
1019   }
1020 
1021   /// Lower a load instruction with shape information.
LowerLoad(Instruction * Inst,Value * Ptr,MaybeAlign Align,Value * Stride,bool IsVolatile,ShapeInfo Shape)1022   void LowerLoad(Instruction *Inst, Value *Ptr, MaybeAlign Align, Value *Stride,
1023                  bool IsVolatile, ShapeInfo Shape) {
1024     IRBuilder<> Builder(Inst);
1025     finalizeLowering(Inst,
1026                      loadMatrix(Inst->getType(), Ptr, Align, Stride, IsVolatile,
1027                                 Shape, Builder),
1028                      Builder);
1029   }
1030 
1031   /// Lowers llvm.matrix.column.major.load.
1032   ///
1033   /// The intrinsic loads a matrix from memory using a stride between columns.
LowerColumnMajorLoad(CallInst * Inst)1034   void LowerColumnMajorLoad(CallInst *Inst) {
1035     assert(MatrixLayout == MatrixLayoutTy::ColumnMajor &&
1036            "Intrinsic only supports column-major layout!");
1037     Value *Ptr = Inst->getArgOperand(0);
1038     Value *Stride = Inst->getArgOperand(1);
1039     LowerLoad(Inst, Ptr, Inst->getParamAlign(0), Stride,
1040               cast<ConstantInt>(Inst->getArgOperand(2))->isOne(),
1041               {Inst->getArgOperand(3), Inst->getArgOperand(4)});
1042   }
1043 
1044   /// Stores a sub-matrix \p StoreVal into the \p R x \p C matrix starting at \p
1045   /// MatrixPtr[I][J].
storeMatrix(const MatrixTy & StoreVal,Value * MatrixPtr,MaybeAlign MAlign,bool IsVolatile,ShapeInfo MatrixShape,Value * I,Value * J,Type * EltTy,IRBuilder<> & Builder)1046   void storeMatrix(const MatrixTy &StoreVal, Value *MatrixPtr,
1047                    MaybeAlign MAlign, bool IsVolatile, ShapeInfo MatrixShape,
1048                    Value *I, Value *J, Type *EltTy, IRBuilder<> &Builder) {
1049     Value *Offset = Builder.CreateAdd(
1050         Builder.CreateMul(J, Builder.getInt64(MatrixShape.getStride())), I);
1051 
1052     unsigned AS = cast<PointerType>(MatrixPtr->getType())->getAddressSpace();
1053     Value *EltPtr =
1054         Builder.CreatePointerCast(MatrixPtr, PointerType::get(EltTy, AS));
1055     Value *TileStart = Builder.CreateGEP(EltTy, EltPtr, Offset);
1056     auto *TileTy = FixedVectorType::get(EltTy, StoreVal.getNumRows() *
1057                                                    StoreVal.getNumColumns());
1058     Type *TilePtrTy = PointerType::get(TileTy, AS);
1059     Value *TilePtr =
1060         Builder.CreatePointerCast(TileStart, TilePtrTy, "col.cast");
1061 
1062     storeMatrix(TileTy, StoreVal, TilePtr, MAlign,
1063                 Builder.getInt64(MatrixShape.getStride()), IsVolatile, Builder);
1064   }
1065 
1066   /// Store matrix \p StoreVal starting at \p Ptr and using \p Stride between
1067   /// vectors.
storeMatrix(Type * Ty,MatrixTy StoreVal,Value * Ptr,MaybeAlign MAlign,Value * Stride,bool IsVolatile,IRBuilder<> & Builder)1068   MatrixTy storeMatrix(Type *Ty, MatrixTy StoreVal, Value *Ptr,
1069                        MaybeAlign MAlign, Value *Stride, bool IsVolatile,
1070                        IRBuilder<> &Builder) {
1071     auto VType = cast<VectorType>(Ty);
1072     Value *EltPtr = createElementPtr(Ptr, VType->getElementType(), Builder);
1073     for (auto Vec : enumerate(StoreVal.vectors())) {
1074       Value *GEP = computeVectorAddr(EltPtr, Builder.getInt64(Vec.index()),
1075                                      Stride, StoreVal.getStride(),
1076                                      VType->getElementType(), Builder);
1077       Builder.CreateAlignedStore(Vec.value(), GEP,
1078                                  getAlignForIndex(Vec.index(), Stride,
1079                                                   VType->getElementType(),
1080                                                   MAlign),
1081                                  IsVolatile);
1082     }
1083     return MatrixTy().addNumStores(getNumOps(StoreVal.getVectorTy()) *
1084                                    StoreVal.getNumVectors());
1085   }
1086 
1087   /// Lower a store instruction with shape information.
LowerStore(Instruction * Inst,Value * Matrix,Value * Ptr,MaybeAlign A,Value * Stride,bool IsVolatile,ShapeInfo Shape)1088   void LowerStore(Instruction *Inst, Value *Matrix, Value *Ptr, MaybeAlign A,
1089                   Value *Stride, bool IsVolatile, ShapeInfo Shape) {
1090     IRBuilder<> Builder(Inst);
1091     auto StoreVal = getMatrix(Matrix, Shape, Builder);
1092     finalizeLowering(Inst,
1093                      storeMatrix(Matrix->getType(), StoreVal, Ptr, A, Stride,
1094                                  IsVolatile, Builder),
1095                      Builder);
1096   }
1097 
1098   /// Lowers llvm.matrix.column.major.store.
1099   ///
1100   /// The intrinsic store a matrix back memory using a stride between columns.
LowerColumnMajorStore(CallInst * Inst)1101   void LowerColumnMajorStore(CallInst *Inst) {
1102     assert(MatrixLayout == MatrixLayoutTy::ColumnMajor &&
1103            "Intrinsic only supports column-major layout!");
1104     Value *Matrix = Inst->getArgOperand(0);
1105     Value *Ptr = Inst->getArgOperand(1);
1106     Value *Stride = Inst->getArgOperand(2);
1107     LowerStore(Inst, Matrix, Ptr, Inst->getParamAlign(1), Stride,
1108                cast<ConstantInt>(Inst->getArgOperand(3))->isOne(),
1109                {Inst->getArgOperand(4), Inst->getArgOperand(5)});
1110   }
1111 
1112   // Set elements I..I+NumElts-1 to Block
insertVector(Value * Col,unsigned I,Value * Block,IRBuilder<> & Builder)1113   Value *insertVector(Value *Col, unsigned I, Value *Block,
1114                       IRBuilder<> &Builder) {
1115 
1116     // First, bring Block to the same size as Col
1117     unsigned BlockNumElts =
1118         cast<FixedVectorType>(Block->getType())->getNumElements();
1119     unsigned NumElts = cast<FixedVectorType>(Col->getType())->getNumElements();
1120     assert(NumElts >= BlockNumElts && "Too few elements for current block");
1121 
1122     Block = Builder.CreateShuffleVector(
1123         Block, createSequentialMask(0, BlockNumElts, NumElts - BlockNumElts));
1124 
1125     // If Col is 7 long and I is 2 and BlockNumElts is 2 the mask is: 0, 1, 7,
1126     // 8, 4, 5, 6
1127     SmallVector<int, 16> Mask;
1128     unsigned i;
1129     for (i = 0; i < I; i++)
1130       Mask.push_back(i);
1131 
1132     unsigned VecNumElts =
1133         cast<FixedVectorType>(Col->getType())->getNumElements();
1134     for (; i < I + BlockNumElts; i++)
1135       Mask.push_back(i - I + VecNumElts);
1136 
1137     for (; i < VecNumElts; i++)
1138       Mask.push_back(i);
1139 
1140     return Builder.CreateShuffleVector(Col, Block, Mask);
1141   }
1142 
createMulAdd(Value * Sum,Value * A,Value * B,bool UseFPOp,IRBuilder<> & Builder,bool AllowContraction,unsigned & NumComputeOps)1143   Value *createMulAdd(Value *Sum, Value *A, Value *B, bool UseFPOp,
1144                       IRBuilder<> &Builder, bool AllowContraction,
1145                       unsigned &NumComputeOps) {
1146     NumComputeOps += getNumOps(A->getType());
1147     if (!Sum)
1148       return UseFPOp ? Builder.CreateFMul(A, B) : Builder.CreateMul(A, B);
1149 
1150     if (UseFPOp) {
1151       if (AllowContraction) {
1152         // Use fmuladd for floating point operations and let the backend decide
1153         // if that's profitable.
1154         Function *FMulAdd = Intrinsic::getDeclaration(
1155             Func.getParent(), Intrinsic::fmuladd, A->getType());
1156         return Builder.CreateCall(FMulAdd, {A, B, Sum});
1157       }
1158       NumComputeOps += getNumOps(A->getType());
1159       Value *Mul = Builder.CreateFMul(A, B);
1160       return Builder.CreateFAdd(Sum, Mul);
1161     }
1162 
1163     NumComputeOps += getNumOps(A->getType());
1164     Value *Mul = Builder.CreateMul(A, B);
1165     return Builder.CreateAdd(Sum, Mul);
1166   }
1167 
1168   /// Cache \p Matrix as result of \p Inst and update the uses of \p Inst. For
1169   /// users with shape information, there's nothing to do: they will use the
1170   /// cached value when they are lowered. For other users, \p Matrix is
1171   /// flattened and the uses are updated to use it. Also marks \p Inst for
1172   /// deletion.
finalizeLowering(Instruction * Inst,MatrixTy Matrix,IRBuilder<> & Builder)1173   void finalizeLowering(Instruction *Inst, MatrixTy Matrix,
1174                         IRBuilder<> &Builder) {
1175     auto inserted = Inst2ColumnMatrix.insert(std::make_pair(Inst, Matrix));
1176     (void)inserted;
1177     assert(inserted.second && "multiple matrix lowering mapping");
1178 
1179     ToRemove.push_back(Inst);
1180     Value *Flattened = nullptr;
1181     for (Use &U : llvm::make_early_inc_range(Inst->uses())) {
1182       if (ShapeMap.find(U.getUser()) == ShapeMap.end()) {
1183         if (!Flattened)
1184           Flattened = Matrix.embedInVector(Builder);
1185         U.set(Flattened);
1186       }
1187     }
1188   }
1189 
1190   /// Compute \p Result += \p A * \p B for input matrices with left-associating
1191   /// addition.
1192   ///
1193   /// We can fold a transpose into the operand that is used to extract scalars.
1194   /// This is the first operands with row-major and the second with
1195   /// column-major.  If \p IsScalarMatrixTransposed we assume the appropriate
1196   /// operand is transposed.
emitMatrixMultiply(MatrixTy & Result,const MatrixTy & A,const MatrixTy & B,IRBuilder<> & Builder,bool IsTiled,bool IsScalarMatrixTransposed,FastMathFlags FMF)1197   void emitMatrixMultiply(MatrixTy &Result, const MatrixTy &A,
1198                           const MatrixTy &B, IRBuilder<> &Builder, bool IsTiled,
1199                           bool IsScalarMatrixTransposed, FastMathFlags FMF) {
1200     const unsigned VF = std::max<unsigned>(
1201         TTI.getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector)
1202                 .getFixedSize() /
1203             Result.getElementType()->getPrimitiveSizeInBits().getFixedSize(),
1204         1U);
1205     unsigned R = Result.getNumRows();
1206     unsigned C = Result.getNumColumns();
1207     unsigned M = A.getNumColumns();
1208 
1209     bool IsFP = Result.getElementType()->isFloatingPointTy();
1210     assert(A.isColumnMajor() == B.isColumnMajor() &&
1211            Result.isColumnMajor() == A.isColumnMajor() &&
1212            "operands must agree on matrix layout");
1213     unsigned NumComputeOps = 0;
1214 
1215     Builder.setFastMathFlags(FMF);
1216 
1217     if (A.isColumnMajor()) {
1218       // Multiply columns from the first operand with scalars from the second
1219       // operand. Then move along the K axes and accumulate the columns.  With
1220       // this the adds can be vectorized without reassociation.
1221       for (unsigned J = 0; J < C; ++J) {
1222         unsigned BlockSize = VF;
1223         // If Result is zero, we don't need to accumulate in the K==0 iteration.
1224         bool isSumZero = isa<ConstantAggregateZero>(Result.getColumn(J));
1225 
1226         for (unsigned I = 0; I < R; I += BlockSize) {
1227           // Gradually lower the vectorization factor to cover the remainder.
1228           while (I + BlockSize > R)
1229             BlockSize /= 2;
1230 
1231           Value *Sum = IsTiled ? Result.extractVector(I, J, BlockSize, Builder)
1232                                : nullptr;
1233           for (unsigned K = 0; K < M; ++K) {
1234             Value *L = A.extractVector(I, K, BlockSize, Builder);
1235             Value *RH = Builder.CreateExtractElement(
1236                 B.getColumn(IsScalarMatrixTransposed ? K : J),
1237                 IsScalarMatrixTransposed ? J : K);
1238             Value *Splat = Builder.CreateVectorSplat(BlockSize, RH, "splat");
1239             Sum =
1240                 createMulAdd(isSumZero && K == 0 ? nullptr : Sum, L, Splat,
1241                              IsFP, Builder, FMF.allowContract(), NumComputeOps);
1242           }
1243           Result.setVector(J,
1244                            insertVector(Result.getVector(J), I, Sum, Builder));
1245         }
1246       }
1247     } else {
1248       // Multiply rows from the second operand with scalars from the first
1249       // operand. Then move along the K axes and accumulate the rows.  With this
1250       // the adds can be vectorized without reassociation.
1251       for (unsigned I = 0; I < R; ++I) {
1252         unsigned BlockSize = VF;
1253         bool isSumZero = isa<ConstantAggregateZero>(Result.getRow(I));
1254         for (unsigned J = 0; J < C; J += BlockSize) {
1255           // Gradually lower the vectorization factor to cover the remainder.
1256           while (J + BlockSize > C)
1257             BlockSize /= 2;
1258 
1259           Value *Sum = nullptr;
1260           for (unsigned K = 0; K < M; ++K) {
1261             Value *R = B.extractVector(K, J, BlockSize, Builder);
1262             Value *LH = Builder.CreateExtractElement(
1263                 A.getVector(IsScalarMatrixTransposed ? K : I),
1264                 IsScalarMatrixTransposed ? I : K);
1265             Value *Splat = Builder.CreateVectorSplat(BlockSize, LH, "splat");
1266             Sum =
1267                 createMulAdd(isSumZero && K == 0 ? nullptr : Sum, Splat, R,
1268                              IsFP, Builder, FMF.allowContract(), NumComputeOps);
1269           }
1270           Result.setVector(I,
1271                            insertVector(Result.getVector(I), J, Sum, Builder));
1272         }
1273       }
1274     }
1275     Result.addNumComputeOps(NumComputeOps);
1276   }
1277 
1278   /// Ensure that the memory in \p Load does not alias \p Store by potentially
1279   /// copying it to a new location.  This new or otherwise the original location
1280   /// is returned.
getNonAliasingPointer(LoadInst * Load,StoreInst * Store,CallInst * MatMul)1281   Value *getNonAliasingPointer(LoadInst *Load, StoreInst *Store,
1282                                CallInst *MatMul) {
1283     MemoryLocation StoreLoc = MemoryLocation::get(Store);
1284     MemoryLocation LoadLoc = MemoryLocation::get(Load);
1285 
1286     // If we can statically determine noalias we're good.
1287     if (AA->isNoAlias(LoadLoc, StoreLoc))
1288       return Load->getPointerOperand();
1289 
1290     // Create code to check if the memory locations of the Load and Store
1291     // overlap and if they do, copy Load's operand to a new buffer.
1292 
1293     // First, create  new blocks for 2n part of the check and the copy.
1294     BasicBlock *Check0 = MatMul->getParent();
1295     // FIXME: Use lazy DTU and update SplitBlock to accept a DTU instead of a
1296     // DT. Manually collect dominator tree updates, to avoid unnecessary work,
1297     // as we adjust Check0 and Check1's branches.
1298     SmallVector<DominatorTree::UpdateType, 4> DTUpdates;
1299     for (BasicBlock *Succ : successors(Check0))
1300       DTUpdates.push_back({DT->Delete, Check0, Succ});
1301 
1302     BasicBlock *Check1 =
1303         SplitBlock(MatMul->getParent(), MatMul, (DomTreeUpdater *)nullptr, LI,
1304                    nullptr, "alias_cont");
1305     BasicBlock *Copy =
1306         SplitBlock(MatMul->getParent(), MatMul, (DomTreeUpdater *)nullptr, LI,
1307                    nullptr, "copy");
1308     BasicBlock *Fusion =
1309         SplitBlock(MatMul->getParent(), MatMul, (DomTreeUpdater *)nullptr, LI,
1310                    nullptr, "no_alias");
1311 
1312     // Check if the loaded memory location begins before the end of the store
1313     // location. If the condition holds, they might overlap, otherwise they are
1314     // guaranteed to not overlap.
1315     IRBuilder<> Builder(MatMul);
1316     Check0->getTerminator()->eraseFromParent();
1317     Builder.SetInsertPoint(Check0);
1318     Type *IntPtrTy = Builder.getIntPtrTy(Load->getModule()->getDataLayout());
1319     Value *StoreBegin = Builder.CreatePtrToInt(
1320         const_cast<Value *>(StoreLoc.Ptr), IntPtrTy, "store.begin");
1321     Value *StoreEnd = Builder.CreateAdd(
1322         StoreBegin, ConstantInt::get(IntPtrTy, StoreLoc.Size.getValue()),
1323         "store.end", true, true);
1324     Value *LoadBegin = Builder.CreatePtrToInt(const_cast<Value *>(LoadLoc.Ptr),
1325                                               IntPtrTy, "load.begin");
1326     Builder.CreateCondBr(Builder.CreateICmpULT(LoadBegin, StoreEnd), Check1,
1327                          Fusion);
1328 
1329     // Check if the store begins before the end of the load location. If the
1330     // condition holds, they alias, otherwise they are guaranteed to not
1331     // overlap.
1332     Check1->getTerminator()->eraseFromParent();
1333     Builder.SetInsertPoint(Check1, Check1->begin());
1334     Value *LoadEnd = Builder.CreateAdd(
1335         LoadBegin, ConstantInt::get(IntPtrTy, LoadLoc.Size.getValue()),
1336         "load.end", true, true);
1337     Builder.CreateCondBr(Builder.CreateICmpULT(StoreBegin, LoadEnd), Copy,
1338                          Fusion);
1339 
1340     // Copy load operand to new alloca.
1341     Builder.SetInsertPoint(Copy, Copy->begin());
1342     AllocaInst *NewLd =
1343         Builder.CreateAlloca(Load->getType(), Load->getPointerAddressSpace());
1344     Builder.CreateMemCpy(NewLd, NewLd->getAlign(),
1345                          Load->getPointerOperand(), Load->getAlign(),
1346                          LoadLoc.Size.getValue());
1347     Builder.SetInsertPoint(Fusion, Fusion->begin());
1348     PHINode *PHI = Builder.CreatePHI(Load->getPointerOperandType(), 3);
1349     PHI->addIncoming(Load->getPointerOperand(), Check0);
1350     PHI->addIncoming(Load->getPointerOperand(), Check1);
1351     PHI->addIncoming(NewLd, Copy);
1352 
1353     // Adjust DT.
1354     DTUpdates.push_back({DT->Insert, Check0, Check1});
1355     DTUpdates.push_back({DT->Insert, Check0, Fusion});
1356     DTUpdates.push_back({DT->Insert, Check1, Copy});
1357     DTUpdates.push_back({DT->Insert, Check1, Fusion});
1358     DT->applyUpdates(DTUpdates);
1359     return PHI;
1360   }
1361 
isFusionProfitable(CallInst * MatMul)1362   bool isFusionProfitable(CallInst *MatMul) {
1363     if (ForceFusion)
1364       return true;
1365 
1366     ShapeInfo LShape(MatMul->getArgOperand(2), MatMul->getArgOperand(3));
1367     ShapeInfo RShape(MatMul->getArgOperand(3), MatMul->getArgOperand(4));
1368 
1369     const unsigned R = LShape.NumRows;
1370     const unsigned C = RShape.NumColumns;
1371     const unsigned M = LShape.NumColumns;
1372     auto *EltType = cast<VectorType>(MatMul->getType())->getElementType();
1373 
1374     const unsigned VF = std::max<unsigned>(
1375         TTI.getRegisterBitWidth(TargetTransformInfo::RGK_FixedWidthVector)
1376                 .getFixedSize() /
1377             EltType->getPrimitiveSizeInBits().getFixedSize(),
1378         1U);
1379 
1380     // Cost model for tiling
1381     //
1382     // For tiling to be beneficial, we need reuse either along the R or
1383     // the C axis.  We vectorize along the R axis so that means at least
1384     // 3 elements.
1385     // TODO: Also consider cost of copying if operands alias.
1386     if (R <= VF && C == 1)
1387       return false;
1388     // Then we need enough elements to exceed the number of vector
1389     // registers we have.  Note that this is an oversimplification since
1390     // fusing also takes some extra loads which may exceed the number of
1391     // reloads necessary.
1392     unsigned Op0Regs = (R + VF - 1) / VF * M;
1393     unsigned Op1Regs = (M + VF - 1) / VF * C;
1394     return Op0Regs + Op1Regs > TTI.getNumberOfRegisters(true);
1395   }
1396 
getZeroMatrix(Type * EltType,unsigned R,unsigned C)1397   MatrixTy getZeroMatrix(Type *EltType, unsigned R, unsigned C) {
1398     MatrixTy Res;
1399     auto *ColumType = FixedVectorType::get(EltType, R);
1400     for (unsigned I = 0; I < C; ++I)
1401       Res.addVector(ConstantAggregateZero::get(ColumType));
1402     return Res;
1403   }
1404 
createTiledLoops(CallInst * MatMul,Value * LPtr,ShapeInfo LShape,Value * RPtr,ShapeInfo RShape,StoreInst * Store)1405   void createTiledLoops(CallInst *MatMul, Value *LPtr, ShapeInfo LShape,
1406                         Value *RPtr, ShapeInfo RShape, StoreInst *Store) {
1407     auto *EltType = cast<VectorType>(MatMul->getType())->getElementType();
1408 
1409     // Create the main tiling loop nest.
1410     TileInfo TI(LShape.NumRows, RShape.NumColumns, LShape.NumColumns, TileSize);
1411     DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Lazy);
1412     Instruction *InsertI = cast<Instruction>(MatMul);
1413     BasicBlock *Start = InsertI->getParent();
1414     BasicBlock *End =
1415         SplitBlock(InsertI->getParent(), InsertI, DT, LI, nullptr, "continue");
1416     IRBuilder<> Builder(MatMul);
1417     BasicBlock *InnerBody = TI.CreateTiledLoops(Start, End, Builder, DTU, *LI);
1418 
1419     Type *TileVecTy =
1420         FixedVectorType::get(MatMul->getType()->getScalarType(), TileSize);
1421     MatrixTy TileResult;
1422     // Insert in the inner loop header.
1423     Builder.SetInsertPoint(TI.InnerLoopHeader->getTerminator());
1424     // Create PHI nodes for the result columns to accumulate across iterations.
1425     SmallVector<PHINode *, 4> ColumnPhis;
1426     for (unsigned I = 0; I < TileSize; I++) {
1427       auto *Phi = Builder.CreatePHI(TileVecTy, 2, "result.vec." + Twine(I));
1428       Phi->addIncoming(ConstantAggregateZero::get(TileVecTy),
1429                        TI.RowLoopHeader->getSingleSuccessor());
1430       TileResult.addVector(Phi);
1431       ColumnPhis.push_back(Phi);
1432     }
1433 
1434     // Insert in the inner loop body, which computes
1435     //   Res += Load(CurrentRow, K) * Load(K, CurrentColumn)
1436     Builder.SetInsertPoint(InnerBody->getTerminator());
1437     // Load tiles of the operands.
1438     MatrixTy A = loadMatrix(LPtr, {}, false, LShape, TI.CurrentRow, TI.CurrentK,
1439                             {TileSize, TileSize}, EltType, Builder);
1440     MatrixTy B = loadMatrix(RPtr, {}, false, RShape, TI.CurrentK, TI.CurrentCol,
1441                             {TileSize, TileSize}, EltType, Builder);
1442     emitMatrixMultiply(TileResult, A, B, Builder, true, false,
1443                        getFastMathFlags(MatMul));
1444     // Store result after the inner loop is done.
1445     Builder.SetInsertPoint(TI.RowLoopLatch->getTerminator());
1446     storeMatrix(TileResult, Store->getPointerOperand(), Store->getAlign(),
1447                 Store->isVolatile(), {LShape.NumRows, RShape.NumColumns},
1448                 TI.CurrentRow, TI.CurrentCol, EltType, Builder);
1449 
1450     for (unsigned I = 0; I < TileResult.getNumVectors(); I++)
1451       ColumnPhis[I]->addIncoming(TileResult.getVector(I), TI.InnerLoopLatch);
1452 
1453     // Force unrolling of a few iterations of the inner loop, to make sure there
1454     // is enough work per iteration.
1455     // FIXME: The unroller should make this decision directly instead, but
1456     // currently the cost-model is not up to the task.
1457     unsigned InnerLoopUnrollCount = std::min(10u, LShape.NumColumns / TileSize);
1458     addStringMetadataToLoop(LI->getLoopFor(TI.InnerLoopHeader),
1459                             "llvm.loop.unroll.count", InnerLoopUnrollCount);
1460   }
1461 
emitSIMDTiling(CallInst * MatMul,LoadInst * LoadOp0,LoadInst * LoadOp1,StoreInst * Store,SmallPtrSetImpl<Instruction * > & FusedInsts)1462   void emitSIMDTiling(CallInst *MatMul, LoadInst *LoadOp0, LoadInst *LoadOp1,
1463                       StoreInst *Store,
1464                       SmallPtrSetImpl<Instruction *> &FusedInsts) {
1465     assert(MatrixLayout == MatrixLayoutTy::ColumnMajor &&
1466            "Tiling only supported for column-major matrixes at the moment!");
1467     if (!isFusionProfitable(MatMul))
1468       return;
1469 
1470     ShapeInfo LShape(MatMul->getArgOperand(2), MatMul->getArgOperand(3));
1471     ShapeInfo RShape(MatMul->getArgOperand(3), MatMul->getArgOperand(4));
1472 
1473     const unsigned R = LShape.NumRows;
1474     const unsigned C = RShape.NumColumns;
1475     const unsigned M = LShape.NumColumns;
1476     auto *EltType = cast<VectorType>(MatMul->getType())->getElementType();
1477 
1478     Value *APtr = getNonAliasingPointer(LoadOp0, Store, MatMul);
1479     Value *BPtr = getNonAliasingPointer(LoadOp1, Store, MatMul);
1480     Value *CPtr = Store->getPointerOperand();
1481 
1482     if (TileUseLoops && (R % TileSize == 0 && C % TileSize == 0))
1483       createTiledLoops(MatMul, APtr, LShape, BPtr, RShape, Store);
1484     else {
1485       IRBuilder<> Builder(Store);
1486       for (unsigned J = 0; J < C; J += TileSize)
1487         for (unsigned I = 0; I < R; I += TileSize) {
1488           const unsigned TileR = std::min(R - I, unsigned(TileSize));
1489           const unsigned TileC = std::min(C - J, unsigned(TileSize));
1490           MatrixTy Res = getZeroMatrix(EltType, TileR, TileC);
1491 
1492           for (unsigned K = 0; K < M; K += TileSize) {
1493             const unsigned TileM = std::min(M - K, unsigned(TileSize));
1494             MatrixTy A =
1495                 loadMatrix(APtr, LoadOp0->getAlign(), LoadOp0->isVolatile(),
1496                            LShape, Builder.getInt64(I), Builder.getInt64(K),
1497                            {TileR, TileM}, EltType, Builder);
1498             MatrixTy B =
1499                 loadMatrix(BPtr, LoadOp1->getAlign(), LoadOp1->isVolatile(),
1500                            RShape, Builder.getInt64(K), Builder.getInt64(J),
1501                            {TileM, TileC}, EltType, Builder);
1502             emitMatrixMultiply(Res, A, B, Builder, true, false,
1503                                getFastMathFlags(MatMul));
1504           }
1505           storeMatrix(Res, CPtr, Store->getAlign(), Store->isVolatile(), {R, M},
1506                       Builder.getInt64(I), Builder.getInt64(J), EltType,
1507                       Builder);
1508         }
1509     }
1510 
1511     // Mark eliminated instructions as fused and remove them.
1512     FusedInsts.insert(Store);
1513     FusedInsts.insert(MatMul);
1514     Store->eraseFromParent();
1515     MatMul->eraseFromParent();
1516     if (LoadOp0->hasNUses(0)) {
1517       FusedInsts.insert(LoadOp0);
1518       LoadOp0->eraseFromParent();
1519     }
1520     if (LoadOp1 != LoadOp0 && LoadOp1->hasNUses(0)) {
1521       FusedInsts.insert(LoadOp1);
1522       LoadOp1->eraseFromParent();
1523     }
1524   }
1525 
1526   /// Try to lower matrix multiply chains by fusing operations.
1527   ///
1528   /// Call finalizeLowering on lowered instructions.  Instructions that are
1529   /// completely eliminated by fusion are added to \p FusedInsts.
LowerMatrixMultiplyFused(CallInst * MatMul,SmallPtrSetImpl<Instruction * > & FusedInsts)1530   void LowerMatrixMultiplyFused(CallInst *MatMul,
1531                                 SmallPtrSetImpl<Instruction *> &FusedInsts) {
1532     if (!FuseMatrix || !DT)
1533       return;
1534 
1535     assert(AA && LI && "Analyses should be available");
1536 
1537     Value *A = MatMul->getArgOperand(0);
1538     Value *B = MatMul->getArgOperand(1);
1539 
1540     // We can fold the transpose into the operand that is used to fetch scalars.
1541     Value *T;
1542     if (MatrixLayout == MatrixLayoutTy::ColumnMajor
1543             ? match(B, m_Intrinsic<Intrinsic::matrix_transpose>(m_Value(T)))
1544             : match(A, m_Intrinsic<Intrinsic::matrix_transpose>(m_Value(T)))) {
1545       IRBuilder<> Builder(MatMul);
1546       auto *EltType = cast<VectorType>(MatMul->getType())->getElementType();
1547       ShapeInfo LShape(MatMul->getArgOperand(2), MatMul->getArgOperand(3));
1548       ShapeInfo RShape(MatMul->getArgOperand(3), MatMul->getArgOperand(4));
1549       const unsigned R = LShape.NumRows;
1550       const unsigned M = LShape.NumColumns;
1551       const unsigned C = RShape.NumColumns;
1552 
1553       MatrixTy MA;
1554       MatrixTy MB;
1555 
1556       Value *Transpose;
1557       if (MatrixLayout == MatrixLayoutTy::ColumnMajor) {
1558         MA = getMatrix(A, ShapeInfo(R, M), Builder);
1559         MB = getMatrix(T, ShapeInfo(C, M), Builder);
1560         Transpose = B;
1561       } else {
1562         MA = getMatrix(T, ShapeInfo(R, M), Builder);
1563         MB = getMatrix(B, ShapeInfo(C, M), Builder);
1564         Transpose = A;
1565       }
1566 
1567       // Initialize the output
1568       MatrixTy Result(R, C, EltType);
1569 
1570       emitMatrixMultiply(Result, MA, MB, Builder, false, true,
1571                          getFastMathFlags(MatMul));
1572 
1573       FusedInsts.insert(MatMul);
1574       if (Transpose->hasOneUse()) {
1575         FusedInsts.insert(cast<Instruction>(Transpose));
1576         ToRemove.push_back(cast<Instruction>(Transpose));
1577         // TODO: add a fake entry for the folded instruction so that this is
1578         // included in the expression in the remark.
1579         Inst2ColumnMatrix[Transpose] = MatrixTy(M, C, EltType);
1580       }
1581       finalizeLowering(MatMul, Result, Builder);
1582       return;
1583     }
1584 
1585     if (!MatMul->hasOneUse() || MatrixLayout != MatrixLayoutTy::ColumnMajor)
1586       return;
1587 
1588     // Lower {ld, ld} -> matmul -> st chains.  No need to call finalizeLowering
1589     // since the single store user will be lowered as part of this.
1590     auto *LoadOp0 = dyn_cast<LoadInst>(A);
1591     auto *LoadOp1 = dyn_cast<LoadInst>(B);
1592     auto *Store = dyn_cast<StoreInst>(*MatMul->user_begin());
1593     if (LoadOp0 && LoadOp1 && Store) {
1594       // The store address must dominate the MatMul instruction, otherwise
1595       // we create invalid IR.
1596       SetVector<Value *> WorkList;
1597       WorkList.insert(Store->getOperand(1));
1598       SmallVector<Instruction *> ToHoist;
1599       for (unsigned I = 0; I != WorkList.size(); ++I) {
1600         Value *Current = WorkList[I];
1601         auto *CurrI = dyn_cast<Instruction>(Current);
1602         if (!CurrI)
1603           continue;
1604         if (isa<PHINode>(CurrI))
1605           return;
1606         if (DT->dominates(CurrI, MatMul))
1607           continue;
1608         if (CurrI->mayHaveSideEffects() || CurrI->mayReadFromMemory())
1609           return;
1610         ToHoist.push_back(CurrI);
1611         WorkList.insert(CurrI->op_begin(), CurrI->op_end());
1612       }
1613 
1614       sort(ToHoist, [this](Instruction *A, Instruction *B) {
1615         return DT->dominates(A, B);
1616       });
1617       for (Instruction *I : ToHoist)
1618         I->moveBefore(MatMul);
1619 
1620       emitSIMDTiling(MatMul, LoadOp0, LoadOp1, Store, FusedInsts);
1621       return;
1622     }
1623   }
1624 
1625   /// Lowers llvm.matrix.multiply.
LowerMultiply(CallInst * MatMul)1626   void LowerMultiply(CallInst *MatMul) {
1627     IRBuilder<> Builder(MatMul);
1628     auto *EltType = cast<VectorType>(MatMul->getType())->getElementType();
1629     ShapeInfo LShape(MatMul->getArgOperand(2), MatMul->getArgOperand(3));
1630     ShapeInfo RShape(MatMul->getArgOperand(3), MatMul->getArgOperand(4));
1631 
1632     const MatrixTy &Lhs = getMatrix(MatMul->getArgOperand(0), LShape, Builder);
1633     const MatrixTy &Rhs = getMatrix(MatMul->getArgOperand(1), RShape, Builder);
1634     assert(Lhs.getElementType() == Rhs.getElementType() &&
1635            "Matrix multiply argument element types do not match.");
1636 
1637     const unsigned R = LShape.NumRows;
1638     const unsigned C = RShape.NumColumns;
1639     assert(LShape.NumColumns == RShape.NumRows);
1640 
1641     // Initialize the output
1642     MatrixTy Result(R, C, EltType);
1643     assert(Lhs.getElementType() == Result.getElementType() &&
1644            "Matrix multiply result element type does not match arguments.");
1645 
1646     emitMatrixMultiply(Result, Lhs, Rhs, Builder, false, false,
1647                        getFastMathFlags(MatMul));
1648     finalizeLowering(MatMul, Result, Builder);
1649   }
1650 
1651   /// Lowers llvm.matrix.transpose.
LowerTranspose(CallInst * Inst)1652   void LowerTranspose(CallInst *Inst) {
1653     MatrixTy Result;
1654     IRBuilder<> Builder(Inst);
1655     Value *InputVal = Inst->getArgOperand(0);
1656     VectorType *VectorTy = cast<VectorType>(InputVal->getType());
1657     ShapeInfo ArgShape(Inst->getArgOperand(1), Inst->getArgOperand(2));
1658     MatrixTy InputMatrix = getMatrix(InputVal, ArgShape, Builder);
1659 
1660     const unsigned NewNumVecs =
1661         InputMatrix.isColumnMajor() ? ArgShape.NumRows : ArgShape.NumColumns;
1662     const unsigned NewNumElts =
1663         InputMatrix.isColumnMajor() ? ArgShape.NumColumns : ArgShape.NumRows;
1664 
1665     for (unsigned I = 0; I < NewNumVecs; ++I) {
1666       // Build a single result vector. First initialize it.
1667       Value *ResultVector = UndefValue::get(
1668           FixedVectorType::get(VectorTy->getElementType(), NewNumElts));
1669       // Go through the old elements and insert it into the resulting vector.
1670       for (auto J : enumerate(InputMatrix.vectors())) {
1671         Value *Elt = Builder.CreateExtractElement(J.value(), I);
1672         // Row and column indices are transposed.
1673         ResultVector =
1674             Builder.CreateInsertElement(ResultVector, Elt, J.index());
1675       }
1676       Result.addVector(ResultVector);
1677     }
1678 
1679     // TODO: Improve estimate of operations needed for transposes. Currently we
1680     // just count the insertelement/extractelement instructions, but do not
1681     // account for later simplifications/combines.
1682     finalizeLowering(
1683         Inst,
1684         Result.addNumComputeOps(2 * ArgShape.NumRows * ArgShape.NumColumns)
1685             .addNumExposedTransposes(1),
1686         Builder);
1687   }
1688 
1689   /// Lower load instructions, if shape information is available.
VisitLoad(LoadInst * Inst,Value * Ptr,IRBuilder<> & Builder)1690   bool VisitLoad(LoadInst *Inst, Value *Ptr, IRBuilder<> &Builder) {
1691     auto I = ShapeMap.find(Inst);
1692     if (I == ShapeMap.end())
1693       return false;
1694 
1695     LowerLoad(Inst, Ptr, Inst->getAlign(),
1696               Builder.getInt64(I->second.getStride()), Inst->isVolatile(),
1697               I->second);
1698     return true;
1699   }
1700 
VisitStore(StoreInst * Inst,Value * StoredVal,Value * Ptr,IRBuilder<> & Builder)1701   bool VisitStore(StoreInst *Inst, Value *StoredVal, Value *Ptr,
1702                   IRBuilder<> &Builder) {
1703     auto I = ShapeMap.find(StoredVal);
1704     if (I == ShapeMap.end())
1705       return false;
1706 
1707     LowerStore(Inst, StoredVal, Ptr, Inst->getAlign(),
1708                Builder.getInt64(I->second.getStride()), Inst->isVolatile(),
1709                I->second);
1710     return true;
1711   }
1712 
1713   /// Lower binary operators, if shape information is available.
VisitBinaryOperator(BinaryOperator * Inst)1714   bool VisitBinaryOperator(BinaryOperator *Inst) {
1715     auto I = ShapeMap.find(Inst);
1716     if (I == ShapeMap.end())
1717       return false;
1718 
1719     Value *Lhs = Inst->getOperand(0);
1720     Value *Rhs = Inst->getOperand(1);
1721 
1722     IRBuilder<> Builder(Inst);
1723     ShapeInfo &Shape = I->second;
1724 
1725     MatrixTy Result;
1726     MatrixTy A = getMatrix(Lhs, Shape, Builder);
1727     MatrixTy B = getMatrix(Rhs, Shape, Builder);
1728     assert(A.isColumnMajor() == B.isColumnMajor() &&
1729            Result.isColumnMajor() == A.isColumnMajor() &&
1730            "operands must agree on matrix layout");
1731 
1732     Builder.setFastMathFlags(getFastMathFlags(Inst));
1733 
1734     // Helper to perform binary op on vectors.
1735     auto BuildVectorOp = [&Builder, Inst](Value *LHS, Value *RHS) {
1736       switch (Inst->getOpcode()) {
1737       case Instruction::Add:
1738         return Builder.CreateAdd(LHS, RHS);
1739       case Instruction::Mul:
1740         return Builder.CreateMul(LHS, RHS);
1741       case Instruction::Sub:
1742         return Builder.CreateSub(LHS, RHS);
1743       case Instruction::FAdd:
1744         return Builder.CreateFAdd(LHS, RHS);
1745       case Instruction::FMul:
1746         return Builder.CreateFMul(LHS, RHS);
1747       case Instruction::FSub:
1748         return Builder.CreateFSub(LHS, RHS);
1749       default:
1750         llvm_unreachable("Unsupported binary operator for matrix");
1751       }
1752     };
1753 
1754     for (unsigned I = 0; I < Shape.getNumVectors(); ++I)
1755       Result.addVector(BuildVectorOp(A.getVector(I), B.getVector(I)));
1756 
1757     finalizeLowering(Inst,
1758                      Result.addNumComputeOps(getNumOps(Result.getVectorTy()) *
1759                                              Result.getNumVectors()),
1760                      Builder);
1761     return true;
1762   }
1763 
1764   /// Lower unary operators, if shape information is available.
VisitUnaryOperator(UnaryOperator * Inst)1765   bool VisitUnaryOperator(UnaryOperator *Inst) {
1766     auto I = ShapeMap.find(Inst);
1767     if (I == ShapeMap.end())
1768       return false;
1769 
1770     Value *Op = Inst->getOperand(0);
1771 
1772     IRBuilder<> Builder(Inst);
1773     ShapeInfo &Shape = I->second;
1774 
1775     MatrixTy Result;
1776     MatrixTy M = getMatrix(Op, Shape, Builder);
1777 
1778     Builder.setFastMathFlags(getFastMathFlags(Inst));
1779 
1780     // Helper to perform unary op on vectors.
1781     auto BuildVectorOp = [&Builder, Inst](Value *Op) {
1782       switch (Inst->getOpcode()) {
1783       case Instruction::FNeg:
1784         return Builder.CreateFNeg(Op);
1785       default:
1786         llvm_unreachable("Unsupported unary operator for matrix");
1787       }
1788     };
1789 
1790     for (unsigned I = 0; I < Shape.getNumVectors(); ++I)
1791       Result.addVector(BuildVectorOp(M.getVector(I)));
1792 
1793     finalizeLowering(Inst,
1794                      Result.addNumComputeOps(getNumOps(Result.getVectorTy()) *
1795                                              Result.getNumVectors()),
1796                      Builder);
1797     return true;
1798   }
1799 
1800   /// Helper to linearize a matrix expression tree into a string. Currently
1801   /// matrix expressions are linarized by starting at an expression leaf and
1802   /// linearizing bottom up.
1803   struct ExprLinearizer {
1804     unsigned LengthToBreak = 100;
1805     std::string Str;
1806     raw_string_ostream Stream;
1807     unsigned LineLength = 0;
1808     const DataLayout &DL;
1809 
1810     /// Mapping from instructions to matrixes. It is used to identify
1811     /// matrix instructions.
1812     const MapVector<Value *, MatrixTy> &Inst2Matrix;
1813 
1814     /// Mapping from values to the leaves of all expressions that the value is
1815     /// part of.
1816     const DenseMap<Value *, SmallPtrSet<Value *, 2>> &Shared;
1817 
1818     /// Set of matrix expressions in the scope of a given DISubprogram.
1819     const SmallSetVector<Value *, 32> &ExprsInSubprogram;
1820 
1821     /// Leaf node of the expression to linearize.
1822     Value *Leaf;
1823 
1824     /// Used to keep track of sub-expressions that get reused while linearizing
1825     /// the expression. Re-used sub-expressions are marked as (reused).
1826     SmallPtrSet<Value *, 8> ReusedExprs;
1827 
ExprLinearizer__anonb7c62bf10111::LowerMatrixIntrinsics::ExprLinearizer1828     ExprLinearizer(const DataLayout &DL,
1829                    const MapVector<Value *, MatrixTy> &Inst2Matrix,
1830                    const DenseMap<Value *, SmallPtrSet<Value *, 2>> &Shared,
1831                    const SmallSetVector<Value *, 32> &ExprsInSubprogram,
1832                    Value *Leaf)
1833         : Str(), Stream(Str), DL(DL), Inst2Matrix(Inst2Matrix), Shared(Shared),
1834           ExprsInSubprogram(ExprsInSubprogram), Leaf(Leaf) {}
1835 
indent__anonb7c62bf10111::LowerMatrixIntrinsics::ExprLinearizer1836     void indent(unsigned N) {
1837       LineLength += N;
1838       for (unsigned i = 0; i < N; i++)
1839         Stream << " ";
1840     }
1841 
lineBreak__anonb7c62bf10111::LowerMatrixIntrinsics::ExprLinearizer1842     void lineBreak() {
1843       Stream << "\n";
1844       LineLength = 0;
1845     }
1846 
maybeIndent__anonb7c62bf10111::LowerMatrixIntrinsics::ExprLinearizer1847     void maybeIndent(unsigned Indent) {
1848       if (LineLength >= LengthToBreak)
1849         lineBreak();
1850 
1851       if (LineLength == 0)
1852         indent(Indent);
1853     }
1854 
write__anonb7c62bf10111::LowerMatrixIntrinsics::ExprLinearizer1855     void write(StringRef S) {
1856       LineLength += S.size();
1857       Stream << S;
1858     }
1859 
getUnderlyingObjectThroughLoads__anonb7c62bf10111::LowerMatrixIntrinsics::ExprLinearizer1860     Value *getUnderlyingObjectThroughLoads(Value *V) {
1861       if (Value *Ptr = getPointerOperand(V))
1862         return getUnderlyingObjectThroughLoads(Ptr);
1863       else if (V->getType()->isPointerTy())
1864         return getUnderlyingObject(V);
1865       return V;
1866     }
1867 
1868     /// Returns true if \p V is a matrix value in the given subprogram.
isMatrix__anonb7c62bf10111::LowerMatrixIntrinsics::ExprLinearizer1869     bool isMatrix(Value *V) const { return ExprsInSubprogram.count(V); }
1870 
1871     /// If \p V is a matrix value, print its shape as as NumRows x NumColumns to
1872     /// \p SS.
prettyPrintMatrixType__anonb7c62bf10111::LowerMatrixIntrinsics::ExprLinearizer1873     void prettyPrintMatrixType(Value *V, raw_string_ostream &SS) {
1874       auto M = Inst2Matrix.find(V);
1875       if (M == Inst2Matrix.end())
1876         SS << "unknown";
1877       else {
1878         SS << M->second.getNumRows();
1879         SS << "x";
1880         SS << M->second.getNumColumns();
1881       }
1882     }
1883 
1884     /// Write the called function name. Handles calls to llvm.matrix.*
1885     /// specially: we write the name, followed by the dimensions of the input
1886     /// matrixes, followed by the scalar type name.
writeFnName__anonb7c62bf10111::LowerMatrixIntrinsics::ExprLinearizer1887     void writeFnName(CallInst *CI) {
1888       if (!CI->getCalledFunction())
1889         write("<no called fn>");
1890       else {
1891         StringRef Name = CI->getCalledFunction()->getName();
1892         if (!Name.startswith("llvm.matrix")) {
1893           write(Name);
1894           return;
1895         }
1896         IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
1897         write(Intrinsic::getBaseName(II->getIntrinsicID())
1898                   .drop_front(StringRef("llvm.matrix.").size()));
1899         write(".");
1900         std::string Tmp;
1901         raw_string_ostream SS(Tmp);
1902 
1903         switch (II->getIntrinsicID()) {
1904         case Intrinsic::matrix_multiply:
1905           prettyPrintMatrixType(II->getOperand(0), SS);
1906           SS << ".";
1907           prettyPrintMatrixType(II->getOperand(1), SS);
1908           SS << "." << *II->getType()->getScalarType();
1909           break;
1910         case Intrinsic::matrix_transpose:
1911           prettyPrintMatrixType(II->getOperand(0), SS);
1912           SS << "." << *II->getType()->getScalarType();
1913           break;
1914         case Intrinsic::matrix_column_major_load:
1915           prettyPrintMatrixType(II, SS);
1916           SS << "." << *II->getType()->getScalarType();
1917           break;
1918         case Intrinsic::matrix_column_major_store:
1919           prettyPrintMatrixType(II->getOperand(0), SS);
1920           SS << "." << *II->getOperand(0)->getType()->getScalarType();
1921           break;
1922         default:
1923           llvm_unreachable("Unhandled case");
1924         }
1925         SS.flush();
1926         write(Tmp);
1927       }
1928     }
1929 
getNumShapeArgs__anonb7c62bf10111::LowerMatrixIntrinsics::ExprLinearizer1930     unsigned getNumShapeArgs(CallInst *CI) const {
1931       if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI)) {
1932         switch (II->getIntrinsicID()) {
1933         case Intrinsic::matrix_multiply:
1934           return 3;
1935         case Intrinsic::matrix_transpose:
1936           return 2;
1937         case Intrinsic::matrix_column_major_load:
1938         case Intrinsic::matrix_column_major_store:
1939           return 3;
1940         default:
1941           return 0;
1942         }
1943       }
1944       return 0;
1945     }
1946 
1947     /// Special printing for values: for pointers, we print if they refer to an
1948     /// (function) external address or a stack address, for other values we
1949     /// either print the constant or "scalar"/"matrix" for other values.
write__anonb7c62bf10111::LowerMatrixIntrinsics::ExprLinearizer1950     void write(Value *V) {
1951       V = getUnderlyingObjectThroughLoads(V);
1952       if (V->getType()->isPointerTy()) {
1953         if (isa<AllocaInst>(V)) {
1954           Stream << "stack addr";
1955           LineLength += StringRef("stack addr").size();
1956         } else {
1957           Stream << "addr";
1958           LineLength += StringRef("addr").size();
1959         }
1960         if (!V->getName().empty()) {
1961           Stream << " %" << V->getName() << "";
1962           LineLength += V->getName().size() + 2;
1963         }
1964         return;
1965       }
1966 
1967       std::string Tmp;
1968       raw_string_ostream TmpStream(Tmp);
1969 
1970       if (auto *CI = dyn_cast<ConstantInt>(V))
1971         TmpStream << CI->getValue();
1972       else if (isa<Constant>(V))
1973         TmpStream << "constant";
1974       else {
1975         if (isMatrix(V))
1976           TmpStream << "matrix";
1977         else
1978           TmpStream << "scalar";
1979       }
1980       TmpStream.flush();
1981       Tmp = std::string(StringRef(Tmp).trim());
1982       LineLength += Tmp.size();
1983       Stream << Tmp;
1984     }
1985 
1986     /// Linearize expression \p Expr starting at an indentation of \p Indent.
1987     /// Expressions that are re-used multiple times are prefixed with (reused)
1988     /// at the re-used root instruction.
linearizeExpr__anonb7c62bf10111::LowerMatrixIntrinsics::ExprLinearizer1989     void linearizeExpr(Value *Expr, unsigned Indent, bool ParentReused,
1990                        bool ParentShared) {
1991       auto *I = cast<Instruction>(Expr);
1992       maybeIndent(Indent);
1993       SmallVector<Value *, 8> Ops;
1994 
1995       // Is Expr shared with other expression leaves?
1996       bool ExprShared = false;
1997 
1998       // Deal with shared subtrees. Mark them as shared, if required.
1999       if (!ParentShared) {
2000         auto SI = Shared.find(Expr);
2001         assert(SI != Shared.end() && SI->second.count(Leaf));
2002 
2003         for (Value *S : SI->second) {
2004           if (S == Leaf)
2005             continue;
2006           DebugLoc DL = cast<Instruction>(S)->getDebugLoc();
2007           write("shared with remark at line " + std::to_string(DL.getLine()) +
2008                 " column " + std::to_string(DL.getCol()) + " (");
2009         }
2010         ExprShared = SI->second.size() > 1;
2011       }
2012 
2013       bool Reused = !ReusedExprs.insert(Expr).second;
2014       if (Reused && !ParentReused)
2015         write("(reused) ");
2016 
2017       if (auto *CI = dyn_cast<CallInst>(I)) {
2018         writeFnName(CI);
2019 
2020         Ops.append(CI->arg_begin(), CI->arg_end() - getNumShapeArgs(CI));
2021       } else if (isa<BitCastInst>(Expr)) {
2022         // Special case bitcasts, which are used to materialize matrixes from
2023         // non-matrix ops.
2024         write("matrix");
2025         return;
2026       } else {
2027         Ops.append(I->value_op_begin(), I->value_op_end());
2028         write(std::string(I->getOpcodeName()));
2029       }
2030 
2031       write(std::string("("));
2032 
2033       unsigned NumOpsToBreak = 1;
2034       if (match(Expr, m_Intrinsic<Intrinsic::matrix_column_major_load>()))
2035         NumOpsToBreak = 2;
2036 
2037       for (Value *Op : Ops) {
2038         if (Ops.size() > NumOpsToBreak)
2039           lineBreak();
2040 
2041         maybeIndent(Indent + 1);
2042         if (isMatrix(Op))
2043           linearizeExpr(Op, Indent + 1, Reused, ExprShared);
2044         else
2045           write(Op);
2046         if (Op != Ops.back())
2047           write(", ");
2048       }
2049 
2050       write(")");
2051     }
2052 
getResult__anonb7c62bf10111::LowerMatrixIntrinsics::ExprLinearizer2053     const std::string &getResult() {
2054       Stream.flush();
2055       return Str;
2056     }
2057   };
2058 
2059   /// Generate remarks for matrix operations in a function. To generate remarks
2060   /// for matrix expressions, the following approach is used:
2061   /// 1. Use the inlined-at debug information to group matrix operations to the
2062   ///    DISubprograms they are contained in.
2063   /// 2. Collect leaves of matrix expressions (done in
2064   ///    RemarkGenerator::getExpressionLeaves) for each subprogram - expression
2065   //     mapping.  Leaves are lowered matrix instructions without other matrix
2066   //     users (like stores) in the current subprogram.
2067   /// 3. For each leaf, create a remark containing a linearizied version of the
2068   ///    matrix expression. The expression is linearized by a recursive
2069   ///    bottom-up traversal of the matrix operands, starting at a leaf. Note
2070   ///    that multiple leaves can share sub-expressions. Shared subexpressions
2071   ///    are explicitly marked as shared().
2072   struct RemarkGenerator {
2073     const MapVector<Value *, MatrixTy> &Inst2Matrix;
2074     OptimizationRemarkEmitter &ORE;
2075     Function &Func;
2076     const DataLayout &DL;
2077 
RemarkGenerator__anonb7c62bf10111::LowerMatrixIntrinsics::RemarkGenerator2078     RemarkGenerator(const MapVector<Value *, MatrixTy> &Inst2Matrix,
2079                     OptimizationRemarkEmitter &ORE, Function &Func)
2080         : Inst2Matrix(Inst2Matrix), ORE(ORE), Func(Func),
2081           DL(Func.getParent()->getDataLayout()) {}
2082 
2083     /// Return all leaves of the expressions in \p ExprsInSubprogram. Those are
2084     /// instructions in Inst2Matrix returning void or without any users in
2085     /// \p ExprsInSubprogram. Currently that should only include stores.
2086     SmallVector<Value *, 4>
getExpressionLeaves__anonb7c62bf10111::LowerMatrixIntrinsics::RemarkGenerator2087     getExpressionLeaves(const SmallSetVector<Value *, 32> &ExprsInSubprogram) {
2088       SmallVector<Value *, 4> Leaves;
2089       for (auto *Expr : ExprsInSubprogram)
2090         if (Expr->getType()->isVoidTy() ||
2091             !any_of(Expr->users(), [&ExprsInSubprogram](User *U) {
2092               return ExprsInSubprogram.count(U);
2093             }))
2094           Leaves.push_back(Expr);
2095       return Leaves;
2096     }
2097 
2098     /// Recursively traverse expression \p V starting at \p Leaf and add \p Leaf
2099     /// to all visited expressions in \p Shared. Limit the matrix operations to
2100     /// the ones in \p ExprsInSubprogram.
collectSharedInfo__anonb7c62bf10111::LowerMatrixIntrinsics::RemarkGenerator2101     void collectSharedInfo(Value *Leaf, Value *V,
2102                            const SmallSetVector<Value *, 32> &ExprsInSubprogram,
2103                            DenseMap<Value *, SmallPtrSet<Value *, 2>> &Shared) {
2104 
2105       if (!ExprsInSubprogram.count(V))
2106         return;
2107 
2108       auto I = Shared.insert({V, {}});
2109       I.first->second.insert(Leaf);
2110 
2111       for (Value *Op : cast<Instruction>(V)->operand_values())
2112         collectSharedInfo(Leaf, Op, ExprsInSubprogram, Shared);
2113     }
2114 
2115     /// Calculate the number of exclusive and shared op counts for expression
2116     /// starting at \p V. Expressions used multiple times are counted once.
2117     /// Limit the matrix operations to the ones in \p ExprsInSubprogram.
2118     std::pair<OpInfoTy, OpInfoTy>
sumOpInfos__anonb7c62bf10111::LowerMatrixIntrinsics::RemarkGenerator2119     sumOpInfos(Value *Root, SmallPtrSetImpl<Value *> &ReusedExprs,
2120                const SmallSetVector<Value *, 32> &ExprsInSubprogram,
2121                DenseMap<Value *, SmallPtrSet<Value *, 2>> &Shared) const {
2122       if (!ExprsInSubprogram.count(Root))
2123         return {};
2124 
2125       // Already counted this expression. Stop.
2126       if (!ReusedExprs.insert(Root).second)
2127         return {};
2128 
2129       OpInfoTy SharedCount;
2130       OpInfoTy Count;
2131 
2132       auto I = Shared.find(Root);
2133       auto CM = Inst2Matrix.find(Root);
2134       if (I->second.size() == 1)
2135         Count = CM->second.getOpInfo();
2136       else
2137         SharedCount = CM->second.getOpInfo();
2138 
2139       for (Value *Op : cast<Instruction>(Root)->operand_values()) {
2140         auto C = sumOpInfos(Op, ReusedExprs, ExprsInSubprogram, Shared);
2141         Count += C.first;
2142         SharedCount += C.second;
2143       }
2144       return {Count, SharedCount};
2145     }
2146 
emitRemarks__anonb7c62bf10111::LowerMatrixIntrinsics::RemarkGenerator2147     void emitRemarks() {
2148       if (!ORE.allowExtraAnalysis(DEBUG_TYPE))
2149         return;
2150 
2151       // Map matrix operations to their containting subprograms, by traversing
2152       // the inlinedAt chain. If the function does not have a DISubprogram, we
2153       // only map them to the containing function.
2154       MapVector<DISubprogram *, SmallVector<Value *, 8>> Subprog2Exprs;
2155       for (auto &KV : Inst2Matrix) {
2156         if (Func.getSubprogram()) {
2157           auto *I = cast<Instruction>(KV.first);
2158           DILocation *Context = I->getDebugLoc();
2159           while (Context) {
2160             auto I =
2161                 Subprog2Exprs.insert({getSubprogram(Context->getScope()), {}});
2162             I.first->second.push_back(KV.first);
2163             Context = DebugLoc(Context).getInlinedAt();
2164           }
2165         } else {
2166           auto I = Subprog2Exprs.insert({nullptr, {}});
2167           I.first->second.push_back(KV.first);
2168         }
2169       }
2170       for (auto &KV : Subprog2Exprs) {
2171         SmallSetVector<Value *, 32> ExprsInSubprogram(KV.second.begin(),
2172                                                       KV.second.end());
2173         auto Leaves = getExpressionLeaves(ExprsInSubprogram);
2174 
2175         DenseMap<Value *, SmallPtrSet<Value *, 2>> Shared;
2176         for (Value *Leaf : Leaves)
2177           collectSharedInfo(Leaf, Leaf, ExprsInSubprogram, Shared);
2178 
2179         // Generate remarks for each leaf.
2180         for (auto *L : Leaves) {
2181 
2182           DebugLoc Loc = cast<Instruction>(L)->getDebugLoc();
2183           DILocation *Context = cast<Instruction>(L)->getDebugLoc();
2184           while (Context) {
2185             if (getSubprogram(Context->getScope()) == KV.first) {
2186               Loc = Context;
2187               break;
2188             }
2189             Context = DebugLoc(Context).getInlinedAt();
2190           }
2191 
2192           SmallPtrSet<Value *, 8> ReusedExprs;
2193           OpInfoTy Counts, SharedCounts;
2194           std::tie(Counts, SharedCounts) =
2195               sumOpInfos(L, ReusedExprs, ExprsInSubprogram, Shared);
2196 
2197           OptimizationRemark Rem(DEBUG_TYPE, "matrix-lowered", Loc,
2198                                  cast<Instruction>(L)->getParent());
2199 
2200           Rem << "Lowered with ";
2201           Rem << ore::NV("NumStores", Counts.NumStores) << " stores, "
2202               << ore::NV("NumLoads", Counts.NumLoads) << " loads, "
2203               << ore::NV("NumComputeOps", Counts.NumComputeOps)
2204               << " compute ops, "
2205               << ore::NV("NumExposedTransposes", Counts.NumExposedTransposes)
2206               << " exposed transposes";
2207 
2208           if (SharedCounts.NumStores > 0 || SharedCounts.NumLoads > 0 ||
2209               SharedCounts.NumComputeOps > 0) {
2210             Rem << ",\nadditionally "
2211                 << ore::NV("NumStores", SharedCounts.NumStores) << " stores, "
2212                 << ore::NV("NumLoads", SharedCounts.NumLoads) << " loads, "
2213                 << ore::NV("NumFPOps", SharedCounts.NumComputeOps)
2214                 << " compute ops"
2215                 << " are shared with other expressions";
2216           }
2217 
2218           Rem << ("\n" + linearize(L, Shared, ExprsInSubprogram, DL));
2219           ORE.emit(Rem);
2220         }
2221       }
2222     }
2223 
2224     std::string
linearize__anonb7c62bf10111::LowerMatrixIntrinsics::RemarkGenerator2225     linearize(Value *L,
2226               const DenseMap<Value *, SmallPtrSet<Value *, 2>> &Shared,
2227               const SmallSetVector<Value *, 32> &ExprsInSubprogram,
2228               const DataLayout &DL) {
2229       ExprLinearizer Lin(DL, Inst2Matrix, Shared, ExprsInSubprogram, L);
2230       Lin.linearizeExpr(L, 0, false, false);
2231       return Lin.getResult();
2232     }
2233   };
2234 };
2235 } // namespace
2236 
run(Function & F,FunctionAnalysisManager & AM)2237 PreservedAnalyses LowerMatrixIntrinsicsPass::run(Function &F,
2238                                                  FunctionAnalysisManager &AM) {
2239   auto &TTI = AM.getResult<TargetIRAnalysis>(F);
2240   OptimizationRemarkEmitter *ORE = nullptr;
2241   AAResults *AA = nullptr;
2242   DominatorTree *DT = nullptr;
2243   LoopInfo *LI = nullptr;
2244 
2245   if (!Minimal) {
2246     ORE = &AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
2247     AA = &AM.getResult<AAManager>(F);
2248     DT = &AM.getResult<DominatorTreeAnalysis>(F);
2249     LI = &AM.getResult<LoopAnalysis>(F);
2250   }
2251 
2252   LowerMatrixIntrinsics LMT(F, TTI, AA, DT, LI, ORE);
2253   if (LMT.Visit()) {
2254     PreservedAnalyses PA;
2255     if (!Minimal) {
2256       PA.preserve<LoopAnalysis>();
2257       PA.preserve<DominatorTreeAnalysis>();
2258     }
2259     return PA;
2260   }
2261   return PreservedAnalyses::all();
2262 }
2263 
2264 namespace {
2265 
2266 class LowerMatrixIntrinsicsLegacyPass : public FunctionPass {
2267 public:
2268   static char ID;
2269 
LowerMatrixIntrinsicsLegacyPass()2270   LowerMatrixIntrinsicsLegacyPass() : FunctionPass(ID) {
2271     initializeLowerMatrixIntrinsicsLegacyPassPass(
2272         *PassRegistry::getPassRegistry());
2273   }
2274 
runOnFunction(Function & F)2275   bool runOnFunction(Function &F) override {
2276     auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
2277     auto &ORE = getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
2278     auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
2279     auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2280     auto &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
2281     LowerMatrixIntrinsics LMT(F, TTI, &AA, &DT, &LI, &ORE);
2282     bool C = LMT.Visit();
2283     return C;
2284   }
2285 
getAnalysisUsage(AnalysisUsage & AU) const2286   void getAnalysisUsage(AnalysisUsage &AU) const override {
2287     AU.addRequired<TargetTransformInfoWrapperPass>();
2288     AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
2289     AU.addRequired<AAResultsWrapperPass>();
2290     AU.addRequired<DominatorTreeWrapperPass>();
2291     AU.addPreserved<DominatorTreeWrapperPass>();
2292     AU.addRequired<LoopInfoWrapperPass>();
2293     AU.addPreserved<LoopInfoWrapperPass>();
2294   }
2295 };
2296 } // namespace
2297 
2298 static const char pass_name[] = "Lower the matrix intrinsics";
2299 char LowerMatrixIntrinsicsLegacyPass::ID = 0;
INITIALIZE_PASS_BEGIN(LowerMatrixIntrinsicsLegacyPass,DEBUG_TYPE,pass_name,false,false)2300 INITIALIZE_PASS_BEGIN(LowerMatrixIntrinsicsLegacyPass, DEBUG_TYPE, pass_name,
2301                       false, false)
2302 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
2303 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
2304 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
2305 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
2306 INITIALIZE_PASS_END(LowerMatrixIntrinsicsLegacyPass, DEBUG_TYPE, pass_name,
2307                     false, false)
2308 
2309 Pass *llvm::createLowerMatrixIntrinsicsPass() {
2310   return new LowerMatrixIntrinsicsLegacyPass();
2311 }
2312 
2313 namespace {
2314 
2315 /// A lightweight version of the matrix lowering pass that only requires TTI.
2316 /// Advanced features that require DT, AA or ORE like tiling are disabled. This
2317 /// is used to lower matrix intrinsics if the main lowering pass is not run, for
2318 /// example with -O0.
2319 class LowerMatrixIntrinsicsMinimalLegacyPass : public FunctionPass {
2320 public:
2321   static char ID;
2322 
LowerMatrixIntrinsicsMinimalLegacyPass()2323   LowerMatrixIntrinsicsMinimalLegacyPass() : FunctionPass(ID) {
2324     initializeLowerMatrixIntrinsicsMinimalLegacyPassPass(
2325         *PassRegistry::getPassRegistry());
2326   }
2327 
runOnFunction(Function & F)2328   bool runOnFunction(Function &F) override {
2329     auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
2330     LowerMatrixIntrinsics LMT(F, TTI, nullptr, nullptr, nullptr, nullptr);
2331     bool C = LMT.Visit();
2332     return C;
2333   }
2334 
getAnalysisUsage(AnalysisUsage & AU) const2335   void getAnalysisUsage(AnalysisUsage &AU) const override {
2336     AU.addRequired<TargetTransformInfoWrapperPass>();
2337     AU.setPreservesCFG();
2338   }
2339 };
2340 } // namespace
2341 
2342 static const char pass_name_minimal[] = "Lower the matrix intrinsics (minimal)";
2343 char LowerMatrixIntrinsicsMinimalLegacyPass::ID = 0;
2344 INITIALIZE_PASS_BEGIN(LowerMatrixIntrinsicsMinimalLegacyPass,
2345                       "lower-matrix-intrinsics-minimal", pass_name_minimal,
2346                       false, false)
2347 INITIALIZE_PASS_END(LowerMatrixIntrinsicsMinimalLegacyPass,
2348                     "lower-matrix-intrinsics-minimal", pass_name_minimal, false,
2349                     false)
2350 
createLowerMatrixIntrinsicsMinimalPass()2351 Pass *llvm::createLowerMatrixIntrinsicsMinimalPass() {
2352   return new LowerMatrixIntrinsicsMinimalLegacyPass();
2353 }
2354