1 //===- LowerMatrixIntrinsics.cpp - Lower matrix intrinsics -----*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Lower matrix intrinsics to vector operations.
10 //
11 // TODO:
12 // * Improve fusion:
13 // * Support more cases, e.g. multiply-add, multiply-sub, operands/results
14 // transposed.
15 // * Improve cost-modeling, e.g. choose different number of rows/columns
16 // columns for tiles, consider cost of copies on alias.
17 //
18 //===----------------------------------------------------------------------===//
19
20 #include "llvm/Transforms/Scalar/LowerMatrixIntrinsics.h"
21 #include "llvm/ADT/GraphTraits.h"
22 #include "llvm/ADT/PostOrderIterator.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/Analysis/AliasAnalysis.h"
25 #include "llvm/Analysis/DomTreeUpdater.h"
26 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
27 #include "llvm/Analysis/TargetTransformInfo.h"
28 #include "llvm/Analysis/ValueTracking.h"
29 #include "llvm/Analysis/VectorUtils.h"
30 #include "llvm/IR/CFG.h"
31 #include "llvm/IR/DataLayout.h"
32 #include "llvm/IR/DebugInfoMetadata.h"
33 #include "llvm/IR/Function.h"
34 #include "llvm/IR/IRBuilder.h"
35 #include "llvm/IR/Instructions.h"
36 #include "llvm/IR/IntrinsicInst.h"
37 #include "llvm/IR/PatternMatch.h"
38 #include "llvm/InitializePasses.h"
39 #include "llvm/Pass.h"
40 #include "llvm/Support/Alignment.h"
41 #include "llvm/Support/CommandLine.h"
42 #include "llvm/Support/Debug.h"
43 #include "llvm/Transforms/Scalar.h"
44 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
45 #include "llvm/Transforms/Utils/LoopUtils.h"
46 #include "llvm/Transforms/Utils/MatrixUtils.h"
47
48 using namespace llvm;
49 using namespace PatternMatch;
50
51 #define DEBUG_TYPE "lower-matrix-intrinsics"
52
53 static cl::opt<bool> EnableShapePropagation(
54 "matrix-propagate-shape", cl::init(true), cl::Hidden,
55 cl::desc("Enable/disable shape propagation from matrix intrinsics to other "
56 "instructions."));
57
58 static cl::opt<bool>
59 FuseMatrix("fuse-matrix", cl::init(true), cl::Hidden,
60 cl::desc("Enable/disable fusing matrix instructions."));
61 // TODO: Allow and use non-square tiles.
62 static cl::opt<unsigned> TileSize(
63 "fuse-matrix-tile-size", cl::init(4), cl::Hidden,
64 cl::desc(
65 "Tile size for matrix instruction fusion using square-shaped tiles."));
66 static cl::opt<bool> TileUseLoops("fuse-matrix-use-loops", cl::init(false),
67 cl::Hidden,
68 cl::desc("Generate loop nest for tiling."));
69 static cl::opt<bool> ForceFusion(
70 "force-fuse-matrix", cl::init(false), cl::Hidden,
71 cl::desc("Force matrix instruction fusion even if not profitable."));
72 static cl::opt<bool> AllowContractEnabled(
73 "matrix-allow-contract", cl::init(false), cl::Hidden,
74 cl::desc("Allow the use of FMAs if available and profitable. This may "
75 "result in different results, due to less rounding error."));
76
77 enum class MatrixLayoutTy { ColumnMajor, RowMajor };
78
79 static cl::opt<MatrixLayoutTy> MatrixLayout(
80 "matrix-default-layout", cl::init(MatrixLayoutTy::ColumnMajor),
81 cl::desc("Sets the default matrix layout"),
82 cl::values(clEnumValN(MatrixLayoutTy::ColumnMajor, "column-major",
83 "Use column-major layout"),
84 clEnumValN(MatrixLayoutTy::RowMajor, "row-major",
85 "Use row-major layout")));
86
87 /// Helper function to either return Scope, if it is a subprogram or the
88 /// attached subprogram for a local scope.
getSubprogram(DIScope * Scope)89 static DISubprogram *getSubprogram(DIScope *Scope) {
90 if (auto *Subprogram = dyn_cast<DISubprogram>(Scope))
91 return Subprogram;
92 return cast<DILocalScope>(Scope)->getSubprogram();
93 }
94
95 namespace {
96
97 // Given an element pointer \p BasePtr to the start of a (sub) matrix, compute
98 // the start address of vector \p VecIdx with type (\p EltType x \p NumElements)
99 // assuming \p Stride elements between start two consecutive vectors.
100 // \p Stride must be >= \p NumElements.
101 // For column-major matrixes, the function computes the address of a column
102 // vectors and \p NumElements must be set to the number of elements in a column
103 // (= number of rows of the matrix). For row-major matrixes, the function
104 // computes the address of a row vector and \p NumElements must be set to the
105 // number of elements in a column (= number of columns of the matrix).
106 //
107 // Consider a 4x4 matrix in column-mjaor layout like below
108 //
109 // 0 1 2 3
110 // 0 v_0_0 v_0_1 v_0_2 v_0_3
111 // 1 v_1_0 v_1_1 v_1_2 v_1_3
112 // 2 v_2_0 v_2_1 v_2_2 v_2_3
113 // 3 v_3_0 v_3_1 v_3_2 v_3_3
114
115 // To compute the column addresses for a 2x3 sub-matrix at row 1 and column 1,
116 // we need a pointer to the first element of the submatrix as base pointer.
117 // Then we can use computeVectorAddr to compute the addresses for the columns
118 // of the sub-matrix.
119 //
120 // Column 0: computeVectorAddr(Base, 0 (column), 4 (stride), 2 (num rows), ..)
121 // -> just returns Base
122 // Column 1: computeVectorAddr(Base, 1 (column), 4 (stride), 2 (num rows), ..)
123 // -> returns Base + (1 * 4)
124 // Column 2: computeVectorAddr(Base, 2 (column), 4 (stride), 2 (num rows), ..)
125 // -> returns Base + (2 * 4)
126 //
127 // The graphic below illustrates the number of elements in a column (marked
128 // with |) and the number of skipped elements (marked with }).
129 //
130 // v_0_0 v_0_1 {v_0_2 {v_0_3
131 // Base Col 1 Col 2
132 // | | |
133 // v_1_0 |v_1_1 |v_1_2 |v_1_3
134 // v_2_0 |v_2_1 |v_2_2 |v_2_3
135 // v_3_0 {v_3_1 {v_3_2 v_3_3
136 //
computeVectorAddr(Value * BasePtr,Value * VecIdx,Value * Stride,unsigned NumElements,Type * EltType,IRBuilder<> & Builder)137 Value *computeVectorAddr(Value *BasePtr, Value *VecIdx, Value *Stride,
138 unsigned NumElements, Type *EltType,
139 IRBuilder<> &Builder) {
140
141 assert((!isa<ConstantInt>(Stride) ||
142 cast<ConstantInt>(Stride)->getZExtValue() >= NumElements) &&
143 "Stride must be >= the number of elements in the result vector.");
144 unsigned AS = cast<PointerType>(BasePtr->getType())->getAddressSpace();
145
146 // Compute the start of the vector with index VecIdx as VecIdx * Stride.
147 Value *VecStart = Builder.CreateMul(VecIdx, Stride, "vec.start");
148
149 // Get pointer to the start of the selected vector. Skip GEP creation,
150 // if we select vector 0.
151 if (isa<ConstantInt>(VecStart) && cast<ConstantInt>(VecStart)->isZero())
152 VecStart = BasePtr;
153 else
154 VecStart = Builder.CreateGEP(EltType, BasePtr, VecStart, "vec.gep");
155
156 // Cast elementwise vector start pointer to a pointer to a vector
157 // (EltType x NumElements)*.
158 auto *VecType = FixedVectorType::get(EltType, NumElements);
159 Type *VecPtrType = PointerType::get(VecType, AS);
160 return Builder.CreatePointerCast(VecStart, VecPtrType, "vec.cast");
161 }
162
163 /// LowerMatrixIntrinsics contains the methods used to lower matrix intrinsics.
164 ///
165 /// Currently, the lowering for each matrix intrinsic is done as follows:
166 /// 1. Propagate the shape information from intrinsics to connected
167 /// instructions.
168 /// 2. Lower instructions with shape information (assuming column-major layout).
169 /// The lowering works similarly using row-major layout.
170 /// 2.1. Get column vectors for each argument. If we already lowered the
171 /// definition of an argument, use the produced column vectors directly.
172 /// If not, split the operand vector containing an embedded matrix into
173 /// a set of column vectors,
174 /// 2.2. Lower the instruction in terms of column major operations, which
175 /// yields a set of column vectors containing result matrix. Note that we
176 /// lower all instructions that have shape information. Besides the
177 /// intrinsics, this includes stores for example.
178 /// 2.3. Update uses of the lowered instruction. If we have shape information
179 /// for a user, there is nothing to do, as we will look up the result
180 /// column matrix when lowering the user. For other uses, we embed the
181 /// result matrix in a flat vector and update the use.
182 /// 2.4. Cache the result column matrix for the instruction we lowered
183 /// 3. After we lowered all instructions in a function, remove the now
184 /// obsolete instructions.
185 ///
186 class LowerMatrixIntrinsics {
187 Function &Func;
188 const DataLayout &DL;
189 const TargetTransformInfo &TTI;
190 AliasAnalysis *AA;
191 DominatorTree *DT;
192 LoopInfo *LI;
193 OptimizationRemarkEmitter *ORE;
194
195 /// Contains estimates of the number of operations (loads, stores, compute) required to lower a matrix operation.
196 struct OpInfoTy {
197 /// Number of stores emitted to generate this matrix.
198 unsigned NumStores = 0;
199 /// Number of loads emitted to generate this matrix.
200 unsigned NumLoads = 0;
201 /// Number of compute operations emitted to generate this matrix.
202 unsigned NumComputeOps = 0;
203
operator +=__anon72b1caaf0111::LowerMatrixIntrinsics::OpInfoTy204 OpInfoTy &operator+=(const OpInfoTy &RHS) {
205 NumStores += RHS.NumStores;
206 NumLoads += RHS.NumLoads;
207 NumComputeOps += RHS.NumComputeOps;
208 return *this;
209 }
210 };
211
212 /// Wrapper class representing a matrix as a set of vectors, either in row or
213 /// column major layout. All vectors must have the same vector type.
214 class MatrixTy {
215 SmallVector<Value *, 16> Vectors;
216
217 OpInfoTy OpInfo;
218
219 bool IsColumnMajor = true;
220
221 public:
MatrixTy()222 MatrixTy()
223 : Vectors(),
224 IsColumnMajor(MatrixLayout == MatrixLayoutTy::ColumnMajor) {}
MatrixTy(ArrayRef<Value * > Vectors)225 MatrixTy(ArrayRef<Value *> Vectors)
226 : Vectors(Vectors.begin(), Vectors.end()),
227 IsColumnMajor(MatrixLayout == MatrixLayoutTy::ColumnMajor) {}
MatrixTy(unsigned NumRows,unsigned NumColumns,Type * EltTy)228 MatrixTy(unsigned NumRows, unsigned NumColumns, Type *EltTy)
229 : IsColumnMajor(MatrixLayout == MatrixLayoutTy::ColumnMajor) {
230
231 unsigned D = isColumnMajor() ? NumColumns : NumRows;
232 for (unsigned J = 0; J < D; ++J)
233 addVector(UndefValue::get(FixedVectorType::get(
234 EltTy, isColumnMajor() ? NumRows : NumColumns)));
235 }
236
getVector(unsigned i) const237 Value *getVector(unsigned i) const { return Vectors[i]; }
getColumn(unsigned i) const238 Value *getColumn(unsigned i) const {
239 assert(isColumnMajor() && "only supported for column-major matrixes");
240 return Vectors[i];
241 }
getRow(unsigned i) const242 Value *getRow(unsigned i) const {
243 assert(!isColumnMajor() && "only supported for row-major matrixes");
244 return Vectors[i];
245 }
246
setVector(unsigned i,Value * V)247 void setVector(unsigned i, Value *V) { Vectors[i] = V; }
248
getElementType() const249 Type *getElementType() const { return getVectorTy()->getElementType(); }
250
getNumVectors() const251 unsigned getNumVectors() const {
252 if (isColumnMajor())
253 return getNumColumns();
254 return getNumRows();
255 }
256
getNumColumns() const257 unsigned getNumColumns() const {
258 if (isColumnMajor())
259 return Vectors.size();
260 else {
261 assert(Vectors.size() > 0 && "Cannot call getNumRows without columns");
262 return cast<FixedVectorType>(Vectors[0]->getType())->getNumElements();
263 }
264 }
getNumRows() const265 unsigned getNumRows() const {
266 if (isColumnMajor()) {
267 assert(Vectors.size() > 0 && "Cannot call getNumRows without columns");
268 return cast<FixedVectorType>(Vectors[0]->getType())->getNumElements();
269 } else
270 return Vectors.size();
271 }
272
addVector(Value * V)273 void addVector(Value *V) { Vectors.push_back(V); }
getColumnTy()274 VectorType *getColumnTy() {
275 assert(isColumnMajor() && "only supported for column-major matrixes");
276 return getVectorTy();
277 }
278
getVectorTy() const279 VectorType *getVectorTy() const {
280 return cast<VectorType>(Vectors[0]->getType());
281 }
282
columns()283 iterator_range<SmallVector<Value *, 8>::iterator> columns() {
284 assert(isColumnMajor() &&
285 "columns() only supported for column-major matrixes");
286 return make_range(Vectors.begin(), Vectors.end());
287 }
288
vectors()289 iterator_range<SmallVector<Value *, 8>::iterator> vectors() {
290 return make_range(Vectors.begin(), Vectors.end());
291 }
292
293 /// Embed the vectors of the matrix into a flat vector by concatenating
294 /// them.
embedInVector(IRBuilder<> & Builder) const295 Value *embedInVector(IRBuilder<> &Builder) const {
296 return Vectors.size() == 1 ? Vectors[0]
297 : concatenateVectors(Builder, Vectors);
298 }
299
addNumLoads(unsigned N)300 MatrixTy &addNumLoads(unsigned N) {
301 OpInfo.NumLoads += N;
302 return *this;
303 }
304
setNumLoads(unsigned N)305 void setNumLoads(unsigned N) { OpInfo.NumLoads = N; }
306
addNumStores(unsigned N)307 MatrixTy &addNumStores(unsigned N) {
308 OpInfo.NumStores += N;
309 return *this;
310 }
311
addNumComputeOps(unsigned N)312 MatrixTy &addNumComputeOps(unsigned N) {
313 OpInfo.NumComputeOps += N;
314 return *this;
315 }
316
getNumStores() const317 unsigned getNumStores() const { return OpInfo.NumStores; }
getNumLoads() const318 unsigned getNumLoads() const { return OpInfo.NumLoads; }
getNumComputeOps() const319 unsigned getNumComputeOps() const { return OpInfo.NumComputeOps; }
320
getOpInfo() const321 const OpInfoTy &getOpInfo() const { return OpInfo; }
322
isColumnMajor() const323 bool isColumnMajor() const { return IsColumnMajor; }
324
getStride() const325 unsigned getStride() const {
326 if (isColumnMajor())
327 return getNumRows();
328 return getNumColumns();
329 }
330
331 /// Extract a vector of \p NumElts starting at index (\p I, \p J). If the
332 /// matrix is column-major, the result vector is extracted from a column
333 /// vector, otherwise from a row vector.
extractVector(unsigned I,unsigned J,unsigned NumElts,IRBuilder<> & Builder) const334 Value *extractVector(unsigned I, unsigned J, unsigned NumElts,
335 IRBuilder<> &Builder) const {
336 Value *Vec = isColumnMajor() ? getColumn(J) : getRow(I);
337 return Builder.CreateShuffleVector(
338 Vec, createSequentialMask(isColumnMajor() ? I : J, NumElts, 0),
339 "block");
340 }
341 };
342
343 struct ShapeInfo {
344 unsigned NumRows;
345 unsigned NumColumns;
346
347 bool IsColumnMajor;
348
ShapeInfo__anon72b1caaf0111::LowerMatrixIntrinsics::ShapeInfo349 ShapeInfo(unsigned NumRows = 0, unsigned NumColumns = 0)
350 : NumRows(NumRows), NumColumns(NumColumns),
351 IsColumnMajor(MatrixLayout == MatrixLayoutTy::ColumnMajor) {}
352
ShapeInfo__anon72b1caaf0111::LowerMatrixIntrinsics::ShapeInfo353 ShapeInfo(Value *NumRows, Value *NumColumns)
354 : ShapeInfo(cast<ConstantInt>(NumRows)->getZExtValue(),
355 cast<ConstantInt>(NumColumns)->getZExtValue()) {}
356
operator ==__anon72b1caaf0111::LowerMatrixIntrinsics::ShapeInfo357 bool operator==(const ShapeInfo &other) {
358 return NumRows == other.NumRows && NumColumns == other.NumColumns;
359 }
operator !=__anon72b1caaf0111::LowerMatrixIntrinsics::ShapeInfo360 bool operator!=(const ShapeInfo &other) { return !(*this == other); }
361
362 /// Returns true if shape-information is defined, meaning both dimensions
363 /// are != 0.
operator bool__anon72b1caaf0111::LowerMatrixIntrinsics::ShapeInfo364 operator bool() const {
365 assert(NumRows == 0 || NumColumns != 0);
366 return NumRows != 0;
367 }
368
getStride__anon72b1caaf0111::LowerMatrixIntrinsics::ShapeInfo369 unsigned getStride() const {
370 if (IsColumnMajor)
371 return NumRows;
372 return NumColumns;
373 }
374
getNumVectors__anon72b1caaf0111::LowerMatrixIntrinsics::ShapeInfo375 unsigned getNumVectors() const {
376 if (IsColumnMajor)
377 return NumColumns;
378 return NumRows;
379 }
380 };
381
382 /// Maps instructions to their shape information. The shape information
383 /// describes the shape to be used while lowering. This matches the shape of
384 /// the result value of the instruction, with the only exceptions being store
385 /// instructions and the matrix_column_major_store intrinsics. For those, the
386 /// shape information indicates that those instructions should be lowered
387 /// using shape information as well.
388 DenseMap<Value *, ShapeInfo> ShapeMap;
389
390 /// List of instructions to remove. While lowering, we are not replacing all
391 /// users of a lowered instruction, if shape information is available and
392 /// those need to be removed after we finished lowering.
393 SmallVector<Instruction *, 16> ToRemove;
394
395 /// Map from instructions to their produced column matrix.
396 MapVector<Value *, MatrixTy> Inst2ColumnMatrix;
397
398 public:
LowerMatrixIntrinsics(Function & F,TargetTransformInfo & TTI,AliasAnalysis * AA,DominatorTree * DT,LoopInfo * LI,OptimizationRemarkEmitter * ORE)399 LowerMatrixIntrinsics(Function &F, TargetTransformInfo &TTI,
400 AliasAnalysis *AA, DominatorTree *DT, LoopInfo *LI,
401 OptimizationRemarkEmitter *ORE)
402 : Func(F), DL(F.getParent()->getDataLayout()), TTI(TTI), AA(AA), DT(DT),
403 LI(LI), ORE(ORE) {}
404
getNumOps(Type * VT)405 unsigned getNumOps(Type *VT) {
406 assert(isa<VectorType>(VT) && "Expected vector type");
407 return getNumOps(VT->getScalarType(),
408 cast<FixedVectorType>(VT)->getNumElements());
409 }
410
411 //
412 /// Return the estimated number of vector ops required for an operation on
413 /// \p VT * N.
getNumOps(Type * ST,unsigned N)414 unsigned getNumOps(Type *ST, unsigned N) {
415 return std::ceil((ST->getPrimitiveSizeInBits() * N).getFixedSize() /
416 double(TTI.getRegisterBitWidth(true)));
417 }
418
419 /// Return the set of vectors that a matrix value is lowered to.
420 ///
421 /// If we lowered \p MatrixVal, just return the cache result matrix. Otherwise
422 /// split the flat vector \p MatrixVal containing a matrix with shape \p SI
423 /// into vectors.
getMatrix(Value * MatrixVal,const ShapeInfo & SI,IRBuilder<> & Builder)424 MatrixTy getMatrix(Value *MatrixVal, const ShapeInfo &SI,
425 IRBuilder<> &Builder) {
426 VectorType *VType = dyn_cast<VectorType>(MatrixVal->getType());
427 assert(VType && "MatrixVal must be a vector type");
428 assert(cast<FixedVectorType>(VType)->getNumElements() ==
429 SI.NumRows * SI.NumColumns &&
430 "The vector size must match the number of matrix elements");
431
432 // Check if we lowered MatrixVal using shape information. In that case,
433 // return the existing matrix, if it matches the requested shape
434 // information. If there is a mis-match, embed the result in a flat
435 // vector and split it later.
436 auto Found = Inst2ColumnMatrix.find(MatrixVal);
437 if (Found != Inst2ColumnMatrix.end()) {
438 MatrixTy &M = Found->second;
439 // Return the found matrix, if its shape matches the requested shape
440 // information
441 if (SI.NumRows == M.getNumRows() && SI.NumColumns == M.getNumColumns())
442 return M;
443
444 MatrixVal = M.embedInVector(Builder);
445 }
446
447 // Otherwise split MatrixVal.
448 SmallVector<Value *, 16> SplitVecs;
449 for (unsigned MaskStart = 0;
450 MaskStart < cast<FixedVectorType>(VType)->getNumElements();
451 MaskStart += SI.getStride()) {
452 Value *V = Builder.CreateShuffleVector(
453 MatrixVal, createSequentialMask(MaskStart, SI.getStride(), 0),
454 "split");
455 SplitVecs.push_back(V);
456 }
457
458 return {SplitVecs};
459 }
460
461 /// If \p V already has a known shape return false. Otherwise set the shape
462 /// for instructions that support it.
setShapeInfo(Value * V,ShapeInfo Shape)463 bool setShapeInfo(Value *V, ShapeInfo Shape) {
464 assert(Shape && "Shape not set");
465 if (isa<UndefValue>(V) || !supportsShapeInfo(V))
466 return false;
467
468 auto SIter = ShapeMap.find(V);
469 if (SIter != ShapeMap.end()) {
470 LLVM_DEBUG(dbgs() << " not overriding existing shape: "
471 << SIter->second.NumRows << " "
472 << SIter->second.NumColumns << " for " << *V << "\n");
473 return false;
474 }
475
476 ShapeMap.insert({V, Shape});
477 LLVM_DEBUG(dbgs() << " " << Shape.NumRows << " x " << Shape.NumColumns
478 << " for " << *V << "\n");
479 return true;
480 }
481
isUniformShape(Value * V)482 bool isUniformShape(Value *V) {
483 Instruction *I = dyn_cast<Instruction>(V);
484 if (!I)
485 return true;
486
487 switch (I->getOpcode()) {
488 case Instruction::FAdd:
489 case Instruction::FSub:
490 case Instruction::FMul: // Scalar multiply.
491 case Instruction::FNeg:
492 case Instruction::Add:
493 case Instruction::Mul:
494 case Instruction::Sub:
495 return true;
496 default:
497 return false;
498 }
499 }
500
501 /// Returns true if shape information can be used for \p V. The supported
502 /// instructions must match the instructions that can be lowered by this pass.
supportsShapeInfo(Value * V)503 bool supportsShapeInfo(Value *V) {
504 Instruction *Inst = dyn_cast<Instruction>(V);
505 if (!Inst)
506 return false;
507
508 IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst);
509 if (II)
510 switch (II->getIntrinsicID()) {
511 case Intrinsic::matrix_multiply:
512 case Intrinsic::matrix_transpose:
513 case Intrinsic::matrix_column_major_load:
514 case Intrinsic::matrix_column_major_store:
515 return true;
516 default:
517 return false;
518 }
519 return isUniformShape(V) || isa<StoreInst>(V) || isa<LoadInst>(V);
520 }
521
522 /// Propagate the shape information of instructions to their users.
523 /// The work list contains instructions for which we can compute the shape,
524 /// either based on the information provided by matrix intrinsics or known
525 /// shapes of operands.
526 SmallVector<Instruction *, 32>
propagateShapeForward(SmallVectorImpl<Instruction * > & WorkList)527 propagateShapeForward(SmallVectorImpl<Instruction *> &WorkList) {
528 SmallVector<Instruction *, 32> NewWorkList;
529 // Pop an element for which we guaranteed to have at least one of the
530 // operand shapes. Add the shape for this and then add users to the work
531 // list.
532 LLVM_DEBUG(dbgs() << "Forward-propagate shapes:\n");
533 while (!WorkList.empty()) {
534 Instruction *Inst = WorkList.pop_back_val();
535
536 // New entry, set the value and insert operands
537 bool Propagate = false;
538
539 Value *MatrixA;
540 Value *MatrixB;
541 Value *M;
542 Value *N;
543 Value *K;
544 if (match(Inst, m_Intrinsic<Intrinsic::matrix_multiply>(
545 m_Value(MatrixA), m_Value(MatrixB), m_Value(M),
546 m_Value(N), m_Value(K)))) {
547 Propagate = setShapeInfo(Inst, {M, K});
548 } else if (match(Inst, m_Intrinsic<Intrinsic::matrix_transpose>(
549 m_Value(MatrixA), m_Value(M), m_Value(N)))) {
550 // Flip dimensions.
551 Propagate = setShapeInfo(Inst, {N, M});
552 } else if (match(Inst, m_Intrinsic<Intrinsic::matrix_column_major_store>(
553 m_Value(MatrixA), m_Value(), m_Value(),
554 m_Value(), m_Value(M), m_Value(N)))) {
555 Propagate = setShapeInfo(Inst, {N, M});
556 } else if (match(Inst, m_Intrinsic<Intrinsic::matrix_column_major_load>(
557 m_Value(), m_Value(), m_Value(), m_Value(M),
558 m_Value(N)))) {
559 Propagate = setShapeInfo(Inst, {M, N});
560 } else if (match(Inst, m_Store(m_Value(MatrixA), m_Value()))) {
561 auto OpShape = ShapeMap.find(MatrixA);
562 if (OpShape != ShapeMap.end())
563 setShapeInfo(Inst, OpShape->second);
564 continue;
565 } else if (isUniformShape(Inst)) {
566 // Find the first operand that has a known shape and use that.
567 for (auto &Op : Inst->operands()) {
568 auto OpShape = ShapeMap.find(Op.get());
569 if (OpShape != ShapeMap.end()) {
570 Propagate |= setShapeInfo(Inst, OpShape->second);
571 break;
572 }
573 }
574 }
575
576 if (Propagate) {
577 NewWorkList.push_back(Inst);
578 for (auto *User : Inst->users())
579 if (ShapeMap.count(User) == 0)
580 WorkList.push_back(cast<Instruction>(User));
581 }
582 }
583
584 return NewWorkList;
585 }
586
587 /// Propagate the shape to operands of instructions with shape information.
588 /// \p Worklist contains the instruction for which we already know the shape.
589 SmallVector<Instruction *, 32>
propagateShapeBackward(SmallVectorImpl<Instruction * > & WorkList)590 propagateShapeBackward(SmallVectorImpl<Instruction *> &WorkList) {
591 SmallVector<Instruction *, 32> NewWorkList;
592
593 auto pushInstruction = [](Value *V,
594 SmallVectorImpl<Instruction *> &WorkList) {
595 Instruction *I = dyn_cast<Instruction>(V);
596 if (I)
597 WorkList.push_back(I);
598 };
599 // Pop an element with known shape. Traverse the operands, if their shape
600 // derives from the result shape and is unknown, add it and add them to the
601 // worklist.
602 LLVM_DEBUG(dbgs() << "Backward-propagate shapes:\n");
603 while (!WorkList.empty()) {
604 Value *V = WorkList.pop_back_val();
605
606 size_t BeforeProcessingV = WorkList.size();
607 if (!isa<Instruction>(V))
608 continue;
609
610 Value *MatrixA;
611 Value *MatrixB;
612 Value *M;
613 Value *N;
614 Value *K;
615 if (match(V, m_Intrinsic<Intrinsic::matrix_multiply>(
616 m_Value(MatrixA), m_Value(MatrixB), m_Value(M),
617 m_Value(N), m_Value(K)))) {
618 if (setShapeInfo(MatrixA, {M, N}))
619 pushInstruction(MatrixA, WorkList);
620
621 if (setShapeInfo(MatrixB, {N, K}))
622 pushInstruction(MatrixB, WorkList);
623
624 } else if (match(V, m_Intrinsic<Intrinsic::matrix_transpose>(
625 m_Value(MatrixA), m_Value(M), m_Value(N)))) {
626 // Flip dimensions.
627 if (setShapeInfo(MatrixA, {M, N}))
628 pushInstruction(MatrixA, WorkList);
629 } else if (match(V, m_Intrinsic<Intrinsic::matrix_column_major_store>(
630 m_Value(MatrixA), m_Value(), m_Value(), m_Value(),
631 m_Value(M), m_Value(N)))) {
632 if (setShapeInfo(MatrixA, {M, N})) {
633 pushInstruction(MatrixA, WorkList);
634 }
635 } else if (isa<LoadInst>(V) ||
636 match(V, m_Intrinsic<Intrinsic::matrix_column_major_load>())) {
637 // Nothing to do, no matrix input.
638 } else if (isa<StoreInst>(V)) {
639 // Nothing to do. We forward-propagated to this so we would just
640 // backward propagate to an instruction with an already known shape.
641 } else if (isUniformShape(V)) {
642 // Propagate to all operands.
643 ShapeInfo Shape = ShapeMap[V];
644 for (Use &U : cast<Instruction>(V)->operands()) {
645 if (setShapeInfo(U.get(), Shape))
646 pushInstruction(U.get(), WorkList);
647 }
648 }
649 // After we discovered new shape info for new instructions in the
650 // worklist, we use their users as seeds for the next round of forward
651 // propagation.
652 for (size_t I = BeforeProcessingV; I != WorkList.size(); I++)
653 for (User *U : WorkList[I]->users())
654 if (isa<Instruction>(U) && V != U)
655 NewWorkList.push_back(cast<Instruction>(U));
656 }
657 return NewWorkList;
658 }
659
Visit()660 bool Visit() {
661 if (EnableShapePropagation) {
662 SmallVector<Instruction *, 32> WorkList;
663
664 // Initially only the shape of matrix intrinsics is known.
665 // Initialize the work list with ops carrying shape information.
666 for (BasicBlock &BB : Func)
667 for (Instruction &Inst : BB) {
668 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&Inst);
669 if (!II)
670 continue;
671
672 switch (II->getIntrinsicID()) {
673 case Intrinsic::matrix_multiply:
674 case Intrinsic::matrix_transpose:
675 case Intrinsic::matrix_column_major_load:
676 case Intrinsic::matrix_column_major_store:
677 WorkList.push_back(&Inst);
678 break;
679 default:
680 break;
681 }
682 }
683 // Propagate shapes until nothing changes any longer.
684 while (!WorkList.empty()) {
685 WorkList = propagateShapeForward(WorkList);
686 WorkList = propagateShapeBackward(WorkList);
687 }
688 }
689
690 bool Changed = false;
691 SmallVector<CallInst *, 16> MaybeFusableInsts;
692 SmallVector<Instruction *, 16> MatrixInsts;
693
694 // First, collect all instructions with shape information and candidates for
695 // fusion (currently only matrix multiplies).
696 ReversePostOrderTraversal<Function *> RPOT(&Func);
697 for (auto *BB : RPOT)
698 for (Instruction &I : *BB) {
699 if (ShapeMap.find(&I) == ShapeMap.end())
700 continue;
701 if (match(&I, m_Intrinsic<Intrinsic::matrix_multiply>()))
702 MaybeFusableInsts.push_back(cast<CallInst>(&I));
703 MatrixInsts.push_back(&I);
704 }
705
706 // Second, try to fuse candidates.
707 SmallPtrSet<Instruction *, 16> FusedInsts;
708 for (CallInst *CI : MaybeFusableInsts)
709 LowerMatrixMultiplyFused(CI, FusedInsts);
710 Changed = !FusedInsts.empty();
711
712 // Third, lower remaining instructions with shape information.
713 for (Instruction *Inst : MatrixInsts) {
714 if (FusedInsts.count(Inst))
715 continue;
716
717 IRBuilder<> Builder(Inst);
718
719 if (CallInst *CInst = dyn_cast<CallInst>(Inst))
720 Changed |= VisitCallInst(CInst);
721
722 Value *Op1;
723 Value *Op2;
724 if (auto *BinOp = dyn_cast<BinaryOperator>(Inst))
725 Changed |= VisitBinaryOperator(BinOp);
726 if (auto *UnOp = dyn_cast<UnaryOperator>(Inst))
727 Changed |= VisitUnaryOperator(UnOp);
728 if (match(Inst, m_Load(m_Value(Op1))))
729 Changed |= VisitLoad(cast<LoadInst>(Inst), Op1, Builder);
730 else if (match(Inst, m_Store(m_Value(Op1), m_Value(Op2))))
731 Changed |= VisitStore(cast<StoreInst>(Inst), Op1, Op2, Builder);
732 }
733
734 if (ORE) {
735 RemarkGenerator RemarkGen(Inst2ColumnMatrix, *ORE, Func);
736 RemarkGen.emitRemarks();
737 }
738
739 for (Instruction *Inst : reverse(ToRemove))
740 Inst->eraseFromParent();
741
742 return Changed;
743 }
744
745 /// Turns \p BasePtr into an elementwise pointer to \p EltType.
createElementPtr(Value * BasePtr,Type * EltType,IRBuilder<> & Builder)746 Value *createElementPtr(Value *BasePtr, Type *EltType, IRBuilder<> &Builder) {
747 unsigned AS = cast<PointerType>(BasePtr->getType())->getAddressSpace();
748 Type *EltPtrType = PointerType::get(EltType, AS);
749 return Builder.CreatePointerCast(BasePtr, EltPtrType);
750 }
751
752 /// Replace intrinsic calls
VisitCallInst(CallInst * Inst)753 bool VisitCallInst(CallInst *Inst) {
754 if (!Inst->getCalledFunction() || !Inst->getCalledFunction()->isIntrinsic())
755 return false;
756
757 switch (Inst->getCalledFunction()->getIntrinsicID()) {
758 case Intrinsic::matrix_multiply:
759 LowerMultiply(Inst);
760 break;
761 case Intrinsic::matrix_transpose:
762 LowerTranspose(Inst);
763 break;
764 case Intrinsic::matrix_column_major_load:
765 LowerColumnMajorLoad(Inst);
766 break;
767 case Intrinsic::matrix_column_major_store:
768 LowerColumnMajorStore(Inst);
769 break;
770 default:
771 return false;
772 }
773 return true;
774 }
775
776 /// Compute the alignment for a column/row \p Idx with \p Stride between them.
777 /// The address at \p Idx == 0 has alignment \p A. If \p Stride is a
778 /// ConstantInt, reduce the initial alignment based on the byte offset. For
779 /// non-ConstantInt strides, return the common alignment of the initial
780 /// alignment and the element size in bytes.
getAlignForIndex(unsigned Idx,Value * Stride,Type * ElementTy,MaybeAlign A) const781 Align getAlignForIndex(unsigned Idx, Value *Stride, Type *ElementTy,
782 MaybeAlign A) const {
783 Align InitialAlign = DL.getValueOrABITypeAlignment(A, ElementTy);
784 if (Idx == 0)
785 return InitialAlign;
786
787 TypeSize ElementSizeInBits = DL.getTypeSizeInBits(ElementTy);
788 if (auto *ConstStride = dyn_cast<ConstantInt>(Stride)) {
789 uint64_t StrideInBytes =
790 ConstStride->getZExtValue() * ElementSizeInBits / 8;
791 return commonAlignment(InitialAlign, Idx * StrideInBytes);
792 }
793 return commonAlignment(InitialAlign, ElementSizeInBits / 8);
794 }
795
796 /// Load a matrix with \p Shape starting at \p Ptr and using \p Stride between
797 /// vectors.
loadMatrix(Type * Ty,Value * Ptr,MaybeAlign MAlign,Value * Stride,bool IsVolatile,ShapeInfo Shape,IRBuilder<> & Builder)798 MatrixTy loadMatrix(Type *Ty, Value *Ptr, MaybeAlign MAlign, Value *Stride,
799 bool IsVolatile, ShapeInfo Shape, IRBuilder<> &Builder) {
800 auto VType = cast<VectorType>(Ty);
801 Value *EltPtr = createElementPtr(Ptr, VType->getElementType(), Builder);
802 MatrixTy Result;
803 for (unsigned I = 0, E = Shape.getNumVectors(); I < E; ++I) {
804 Value *GEP = computeVectorAddr(EltPtr, Builder.getInt64(I), Stride,
805 Shape.getStride(), VType->getElementType(),
806 Builder);
807 Value *Vector = Builder.CreateAlignedLoad(
808 GEP, getAlignForIndex(I, Stride, VType->getElementType(), MAlign),
809 IsVolatile, "col.load");
810
811 Result.addVector(Vector);
812 }
813 return Result.addNumLoads(getNumOps(Result.getVectorTy()) *
814 Result.getNumVectors());
815 }
816
817 /// Loads a sub-matrix with shape \p ResultShape from a \p R x \p C matrix,
818 /// starting at \p MatrixPtr[I][J].
loadMatrix(Value * MatrixPtr,MaybeAlign Align,bool IsVolatile,ShapeInfo MatrixShape,Value * I,Value * J,ShapeInfo ResultShape,Type * EltTy,IRBuilder<> & Builder)819 MatrixTy loadMatrix(Value *MatrixPtr, MaybeAlign Align, bool IsVolatile,
820 ShapeInfo MatrixShape, Value *I, Value *J,
821 ShapeInfo ResultShape, Type *EltTy,
822 IRBuilder<> &Builder) {
823
824 Value *Offset = Builder.CreateAdd(
825 Builder.CreateMul(J, Builder.getInt64(MatrixShape.getStride())), I);
826
827 unsigned AS = cast<PointerType>(MatrixPtr->getType())->getAddressSpace();
828 Value *EltPtr =
829 Builder.CreatePointerCast(MatrixPtr, PointerType::get(EltTy, AS));
830 Value *TileStart = Builder.CreateGEP(EltTy, EltPtr, Offset);
831 auto *TileTy = FixedVectorType::get(EltTy, ResultShape.NumRows *
832 ResultShape.NumColumns);
833 Type *TilePtrTy = PointerType::get(TileTy, AS);
834 Value *TilePtr =
835 Builder.CreatePointerCast(TileStart, TilePtrTy, "col.cast");
836
837 return loadMatrix(TileTy, TilePtr, Align,
838 Builder.getInt64(MatrixShape.getStride()), IsVolatile,
839 ResultShape, Builder);
840 }
841
842 /// Lower a load instruction with shape information.
LowerLoad(Instruction * Inst,Value * Ptr,MaybeAlign Align,Value * Stride,bool IsVolatile,ShapeInfo Shape)843 void LowerLoad(Instruction *Inst, Value *Ptr, MaybeAlign Align, Value *Stride,
844 bool IsVolatile, ShapeInfo Shape) {
845 IRBuilder<> Builder(Inst);
846 finalizeLowering(Inst,
847 loadMatrix(Inst->getType(), Ptr, Align, Stride, IsVolatile,
848 Shape, Builder),
849 Builder);
850 }
851
852 /// Lowers llvm.matrix.column.major.load.
853 ///
854 /// The intrinsic loads a matrix from memory using a stride between columns.
LowerColumnMajorLoad(CallInst * Inst)855 void LowerColumnMajorLoad(CallInst *Inst) {
856 assert(MatrixLayout == MatrixLayoutTy::ColumnMajor &&
857 "Intrinsic only supports column-major layout!");
858 Value *Ptr = Inst->getArgOperand(0);
859 Value *Stride = Inst->getArgOperand(1);
860 LowerLoad(Inst, Ptr, Inst->getParamAlign(0), Stride,
861 cast<ConstantInt>(Inst->getArgOperand(2))->isOne(),
862 {Inst->getArgOperand(3), Inst->getArgOperand(4)});
863 }
864
865 /// Stores a sub-matrix \p StoreVal into the \p R x \p C matrix starting at \p
866 /// MatrixPtr[I][J].
storeMatrix(const MatrixTy & StoreVal,Value * MatrixPtr,MaybeAlign MAlign,bool IsVolatile,ShapeInfo MatrixShape,Value * I,Value * J,Type * EltTy,IRBuilder<> & Builder)867 void storeMatrix(const MatrixTy &StoreVal, Value *MatrixPtr,
868 MaybeAlign MAlign, bool IsVolatile, ShapeInfo MatrixShape,
869 Value *I, Value *J, Type *EltTy, IRBuilder<> &Builder) {
870 Value *Offset = Builder.CreateAdd(
871 Builder.CreateMul(J, Builder.getInt64(MatrixShape.getStride())), I);
872
873 unsigned AS = cast<PointerType>(MatrixPtr->getType())->getAddressSpace();
874 Value *EltPtr =
875 Builder.CreatePointerCast(MatrixPtr, PointerType::get(EltTy, AS));
876 Value *TileStart = Builder.CreateGEP(EltTy, EltPtr, Offset);
877 auto *TileTy = FixedVectorType::get(EltTy, StoreVal.getNumRows() *
878 StoreVal.getNumColumns());
879 Type *TilePtrTy = PointerType::get(TileTy, AS);
880 Value *TilePtr =
881 Builder.CreatePointerCast(TileStart, TilePtrTy, "col.cast");
882
883 storeMatrix(TileTy, StoreVal, TilePtr, MAlign,
884 Builder.getInt64(MatrixShape.getStride()), IsVolatile, Builder);
885 }
886
887 /// Store matrix \p StoreVal starting at \p Ptr and using \p Stride between
888 /// vectors.
storeMatrix(Type * Ty,MatrixTy StoreVal,Value * Ptr,MaybeAlign MAlign,Value * Stride,bool IsVolatile,IRBuilder<> & Builder)889 MatrixTy storeMatrix(Type *Ty, MatrixTy StoreVal, Value *Ptr,
890 MaybeAlign MAlign, Value *Stride, bool IsVolatile,
891 IRBuilder<> &Builder) {
892 auto VType = cast<VectorType>(Ty);
893 Value *EltPtr = createElementPtr(Ptr, VType->getElementType(), Builder);
894 for (auto Vec : enumerate(StoreVal.vectors())) {
895 Value *GEP = computeVectorAddr(EltPtr, Builder.getInt64(Vec.index()),
896 Stride, StoreVal.getStride(),
897 VType->getElementType(), Builder);
898 Builder.CreateAlignedStore(Vec.value(), GEP,
899 getAlignForIndex(Vec.index(), Stride,
900 VType->getElementType(),
901 MAlign),
902 IsVolatile);
903 }
904 return MatrixTy().addNumStores(getNumOps(StoreVal.getVectorTy()) *
905 StoreVal.getNumVectors());
906 }
907
908 /// Lower a store instruction with shape information.
LowerStore(Instruction * Inst,Value * Matrix,Value * Ptr,MaybeAlign A,Value * Stride,bool IsVolatile,ShapeInfo Shape)909 void LowerStore(Instruction *Inst, Value *Matrix, Value *Ptr, MaybeAlign A,
910 Value *Stride, bool IsVolatile, ShapeInfo Shape) {
911 IRBuilder<> Builder(Inst);
912 auto StoreVal = getMatrix(Matrix, Shape, Builder);
913 finalizeLowering(Inst,
914 storeMatrix(Matrix->getType(), StoreVal, Ptr, A, Stride,
915 IsVolatile, Builder),
916 Builder);
917 }
918
919 /// Lowers llvm.matrix.column.major.store.
920 ///
921 /// The intrinsic store a matrix back memory using a stride between columns.
LowerColumnMajorStore(CallInst * Inst)922 void LowerColumnMajorStore(CallInst *Inst) {
923 assert(MatrixLayout == MatrixLayoutTy::ColumnMajor &&
924 "Intrinsic only supports column-major layout!");
925 Value *Matrix = Inst->getArgOperand(0);
926 Value *Ptr = Inst->getArgOperand(1);
927 Value *Stride = Inst->getArgOperand(2);
928 LowerStore(Inst, Matrix, Ptr, Inst->getParamAlign(1), Stride,
929 cast<ConstantInt>(Inst->getArgOperand(3))->isOne(),
930 {Inst->getArgOperand(4), Inst->getArgOperand(5)});
931 }
932
933 // Set elements I..I+NumElts-1 to Block
insertVector(Value * Col,unsigned I,Value * Block,IRBuilder<> & Builder)934 Value *insertVector(Value *Col, unsigned I, Value *Block,
935 IRBuilder<> &Builder) {
936
937 // First, bring Block to the same size as Col
938 unsigned BlockNumElts =
939 cast<FixedVectorType>(Block->getType())->getNumElements();
940 unsigned NumElts = cast<FixedVectorType>(Col->getType())->getNumElements();
941 assert(NumElts >= BlockNumElts && "Too few elements for current block");
942
943 Block = Builder.CreateShuffleVector(
944 Block, createSequentialMask(0, BlockNumElts, NumElts - BlockNumElts));
945
946 // If Col is 7 long and I is 2 and BlockNumElts is 2 the mask is: 0, 1, 7,
947 // 8, 4, 5, 6
948 SmallVector<int, 16> Mask;
949 unsigned i;
950 for (i = 0; i < I; i++)
951 Mask.push_back(i);
952
953 unsigned VecNumElts =
954 cast<FixedVectorType>(Col->getType())->getNumElements();
955 for (; i < I + BlockNumElts; i++)
956 Mask.push_back(i - I + VecNumElts);
957
958 for (; i < VecNumElts; i++)
959 Mask.push_back(i);
960
961 return Builder.CreateShuffleVector(Col, Block, Mask);
962 }
963
createMulAdd(Value * Sum,Value * A,Value * B,bool UseFPOp,IRBuilder<> & Builder,bool AllowContraction,unsigned & NumComputeOps)964 Value *createMulAdd(Value *Sum, Value *A, Value *B, bool UseFPOp,
965 IRBuilder<> &Builder, bool AllowContraction,
966 unsigned &NumComputeOps) {
967 NumComputeOps += getNumOps(A->getType());
968 if (!Sum)
969 return UseFPOp ? Builder.CreateFMul(A, B) : Builder.CreateMul(A, B);
970
971 if (UseFPOp) {
972 if (AllowContraction) {
973 // Use fmuladd for floating point operations and let the backend decide
974 // if that's profitable.
975 Function *FMulAdd = Intrinsic::getDeclaration(
976 Func.getParent(), Intrinsic::fmuladd, A->getType());
977 return Builder.CreateCall(FMulAdd, {A, B, Sum});
978 }
979 NumComputeOps += getNumOps(A->getType());
980 Value *Mul = Builder.CreateFMul(A, B);
981 return Builder.CreateFAdd(Sum, Mul);
982 }
983
984 NumComputeOps += getNumOps(A->getType());
985 Value *Mul = Builder.CreateMul(A, B);
986 return Builder.CreateAdd(Sum, Mul);
987 }
988
989 /// Cache \p Matrix as result of \p Inst and update the uses of \p Inst. For
990 /// users with shape information, there's nothing to do: the will use the
991 /// cached value when they are lowered. For other users, \p Matrix is
992 /// flattened and the uses are updated to use it. Also marks \p Inst for
993 /// deletion.
finalizeLowering(Instruction * Inst,MatrixTy Matrix,IRBuilder<> & Builder)994 void finalizeLowering(Instruction *Inst, MatrixTy Matrix,
995 IRBuilder<> &Builder) {
996 Inst2ColumnMatrix.insert(std::make_pair(Inst, Matrix));
997
998 ToRemove.push_back(Inst);
999 Value *Flattened = nullptr;
1000 for (auto I = Inst->use_begin(), E = Inst->use_end(); I != E;) {
1001 Use &U = *I++;
1002 if (ShapeMap.find(U.getUser()) == ShapeMap.end()) {
1003 if (!Flattened)
1004 Flattened = Matrix.embedInVector(Builder);
1005 U.set(Flattened);
1006 }
1007 }
1008 }
1009
1010 /// Compute \p Result += \p A * \p B for input matrices with left-associating
1011 /// addition.
emitMatrixMultiply(MatrixTy & Result,const MatrixTy & A,const MatrixTy & B,bool AllowContraction,IRBuilder<> & Builder,bool isTiled)1012 void emitMatrixMultiply(MatrixTy &Result, const MatrixTy &A,
1013 const MatrixTy &B, bool AllowContraction,
1014 IRBuilder<> &Builder, bool isTiled) {
1015 const unsigned VF = std::max<unsigned>(
1016 TTI.getRegisterBitWidth(true) /
1017 Result.getElementType()->getPrimitiveSizeInBits().getFixedSize(),
1018 1U);
1019 unsigned R = Result.getNumRows();
1020 unsigned C = Result.getNumColumns();
1021 unsigned M = A.getNumColumns();
1022
1023 bool IsFP = Result.getElementType()->isFloatingPointTy();
1024 assert(A.isColumnMajor() == B.isColumnMajor() &&
1025 Result.isColumnMajor() == A.isColumnMajor() &&
1026 "operands must agree on matrix layout");
1027 unsigned NumComputeOps = 0;
1028 if (A.isColumnMajor()) {
1029 // Multiply columns from the first operand with scalars from the second
1030 // operand. Then move along the K axes and accumulate the columns. With
1031 // this the adds can be vectorized without reassociation.
1032 for (unsigned J = 0; J < C; ++J) {
1033 unsigned BlockSize = VF;
1034 // If Result is zero, we don't need to accumulate in the K==0 iteration.
1035 bool isSumZero = isa<ConstantAggregateZero>(Result.getColumn(J));
1036
1037 for (unsigned I = 0; I < R; I += BlockSize) {
1038 // Gradually lower the vectorization factor to cover the remainder.
1039 while (I + BlockSize > R)
1040 BlockSize /= 2;
1041
1042 Value *Sum = isTiled ? Result.extractVector(I, J, BlockSize, Builder)
1043 : nullptr;
1044 for (unsigned K = 0; K < M; ++K) {
1045 Value *L = A.extractVector(I, K, BlockSize, Builder);
1046 Value *RH = Builder.CreateExtractElement(B.getColumn(J), K);
1047 Value *Splat = Builder.CreateVectorSplat(BlockSize, RH, "splat");
1048 Sum = createMulAdd(isSumZero && K == 0 ? nullptr : Sum, L, Splat,
1049 Result.getElementType()->isFloatingPointTy(),
1050 Builder, AllowContraction, NumComputeOps);
1051 }
1052 Result.setVector(J,
1053 insertVector(Result.getVector(J), I, Sum, Builder));
1054 }
1055 }
1056 } else {
1057 // Multiply rows from the second operand with scalars from the first
1058 // operand. Then move along the K axes and accumulate the rows. With this
1059 // the adds can be vectorized without reassociation.
1060 for (unsigned I = 0; I < R; ++I) {
1061 unsigned BlockSize = VF;
1062 bool isSumZero = isa<ConstantAggregateZero>(Result.getRow(I));
1063 for (unsigned J = 0; J < C; J += BlockSize) {
1064 // Gradually lower the vectorization factor to cover the remainder.
1065 while (J + BlockSize > C)
1066 BlockSize /= 2;
1067
1068 Value *Sum = nullptr;
1069 for (unsigned K = 0; K < M; ++K) {
1070 Value *R = B.extractVector(K, J, BlockSize, Builder);
1071 Value *LH = Builder.CreateExtractElement(A.getVector(I), K);
1072 Value *Splat = Builder.CreateVectorSplat(BlockSize, LH, "splat");
1073 Sum = createMulAdd(isSumZero && K == 0 ? nullptr : Sum, Splat, R,
1074 IsFP, Builder, AllowContraction, NumComputeOps);
1075 }
1076 Result.setVector(I,
1077 insertVector(Result.getVector(I), J, Sum, Builder));
1078 }
1079 }
1080 }
1081 Result.addNumComputeOps(NumComputeOps);
1082 }
1083
1084 /// Ensure that the memory in \p Load does not alias \p Store by potentially
1085 /// copying it to a new location. This new or otherwise the original location
1086 /// is returned.
getNonAliasingPointer(LoadInst * Load,StoreInst * Store,CallInst * MatMul)1087 Value *getNonAliasingPointer(LoadInst *Load, StoreInst *Store,
1088 CallInst *MatMul) {
1089 MemoryLocation StoreLoc = MemoryLocation::get(Store);
1090 MemoryLocation LoadLoc = MemoryLocation::get(Load);
1091
1092 AliasResult LdAliased = AA->alias(LoadLoc, StoreLoc);
1093
1094 // If we can statically determine noalias we're good.
1095 if (!LdAliased)
1096 return Load->getPointerOperand();
1097
1098 // Create code to check if the memory locations of the Load and Store
1099 // overlap and if they do, copy Load's operand to a new buffer.
1100
1101 // First, create new blocks for 2n part of the check and the copy.
1102 BasicBlock *Check0 = MatMul->getParent();
1103 // FIXME: Use lazy DTU and update SplitBlock to accept a DTU instead of a
1104 // DT. Manually collect dominator tree updates, to avoid unnecessary work,
1105 // as we adjust Check0 and Check1's branches.
1106 SmallVector<DominatorTree::UpdateType, 4> DTUpdates;
1107 for (BasicBlock *Succ : successors(Check0))
1108 DTUpdates.push_back({DT->Delete, Check0, Succ});
1109
1110 BasicBlock *Check1 =
1111 SplitBlock(MatMul->getParent(), MatMul, (DomTreeUpdater *)nullptr, LI,
1112 nullptr, "alias_cont");
1113 BasicBlock *Copy =
1114 SplitBlock(MatMul->getParent(), MatMul, (DomTreeUpdater *)nullptr, LI,
1115 nullptr, "copy");
1116 BasicBlock *Fusion =
1117 SplitBlock(MatMul->getParent(), MatMul, (DomTreeUpdater *)nullptr, LI,
1118 nullptr, "no_alias");
1119
1120 // Check if the loaded memory location begins before the end of the store
1121 // location. If the condition holds, they might overlap, otherwise they are
1122 // guaranteed to not overlap.
1123 IRBuilder<> Builder(MatMul);
1124 Check0->getTerminator()->eraseFromParent();
1125 Builder.SetInsertPoint(Check0);
1126 Type *IntPtrTy = Builder.getIntPtrTy(Load->getModule()->getDataLayout());
1127 Value *StoreBegin = Builder.CreatePtrToInt(
1128 const_cast<Value *>(StoreLoc.Ptr), IntPtrTy, "store.begin");
1129 Value *StoreEnd = Builder.CreateAdd(
1130 StoreBegin, ConstantInt::get(IntPtrTy, StoreLoc.Size.getValue()),
1131 "store.end", true, true);
1132 Value *LoadBegin = Builder.CreatePtrToInt(const_cast<Value *>(LoadLoc.Ptr),
1133 IntPtrTy, "load.begin");
1134 Builder.CreateCondBr(Builder.CreateICmpULT(LoadBegin, StoreEnd), Check1,
1135 Fusion);
1136
1137 // Check if the store begins before the end of the load location. If the
1138 // condition holds, they alias, otherwise they are guaranteed to not
1139 // overlap.
1140 Check1->getTerminator()->eraseFromParent();
1141 Builder.SetInsertPoint(Check1, Check1->begin());
1142 Value *LoadEnd = Builder.CreateAdd(
1143 LoadBegin, ConstantInt::get(IntPtrTy, LoadLoc.Size.getValue()),
1144 "load.end", true, true);
1145 Builder.CreateCondBr(Builder.CreateICmpULT(StoreBegin, LoadEnd), Copy,
1146 Fusion);
1147
1148 // Copy load operand to new alloca.
1149 Builder.SetInsertPoint(Copy, Copy->begin());
1150 AllocaInst *NewLd =
1151 Builder.CreateAlloca(Load->getType(), Load->getPointerAddressSpace());
1152 Builder.CreateMemCpy(NewLd, NewLd->getAlign(),
1153 Load->getPointerOperand(), Load->getAlign(),
1154 LoadLoc.Size.getValue());
1155 Builder.SetInsertPoint(Fusion, Fusion->begin());
1156 PHINode *PHI = Builder.CreatePHI(Load->getPointerOperandType(), 3);
1157 PHI->addIncoming(Load->getPointerOperand(), Check0);
1158 PHI->addIncoming(Load->getPointerOperand(), Check1);
1159 PHI->addIncoming(NewLd, Copy);
1160
1161 // Adjust DT.
1162 DTUpdates.push_back({DT->Insert, Check0, Check1});
1163 DTUpdates.push_back({DT->Insert, Check0, Fusion});
1164 DTUpdates.push_back({DT->Insert, Check1, Copy});
1165 DTUpdates.push_back({DT->Insert, Check1, Fusion});
1166 DT->applyUpdates(DTUpdates);
1167 return PHI;
1168 }
1169
isFusionProfitable(CallInst * MatMul)1170 bool isFusionProfitable(CallInst *MatMul) {
1171 if (ForceFusion)
1172 return true;
1173
1174 ShapeInfo LShape(MatMul->getArgOperand(2), MatMul->getArgOperand(3));
1175 ShapeInfo RShape(MatMul->getArgOperand(3), MatMul->getArgOperand(4));
1176
1177 const unsigned R = LShape.NumRows;
1178 const unsigned C = RShape.NumColumns;
1179 const unsigned M = LShape.NumColumns;
1180 auto *EltType = cast<VectorType>(MatMul->getType())->getElementType();
1181
1182 const unsigned VF =
1183 std::max<unsigned>(TTI.getRegisterBitWidth(true) /
1184 EltType->getPrimitiveSizeInBits().getFixedSize(),
1185 1U);
1186
1187 // Cost model for tiling
1188 //
1189 // For tiling to be beneficial, we need reuse either along the R or
1190 // the C axis. We vectorize along the R axis so that means at least
1191 // 3 elements.
1192 // TODO: Also consider cost of copying if operands alias.
1193 if (R <= VF && C == 1)
1194 return false;
1195 // Then we need enough elements to exceed the number of vector
1196 // registers we have. Note that this is an oversimplification since
1197 // fusing also takes some extra loads which may exceed the number of
1198 // reloads necessary.
1199 unsigned Op0Regs = (R + VF - 1) / VF * M;
1200 unsigned Op1Regs = (M + VF - 1) / VF * C;
1201 return Op0Regs + Op1Regs > TTI.getNumberOfRegisters(true);
1202 }
1203
getZeroMatrix(Type * EltType,unsigned R,unsigned C)1204 MatrixTy getZeroMatrix(Type *EltType, unsigned R, unsigned C) {
1205 MatrixTy Res;
1206 auto *ColumType = FixedVectorType::get(EltType, R);
1207 for (unsigned I = 0; I < C; ++I)
1208 Res.addVector(ConstantAggregateZero::get(ColumType));
1209 return Res;
1210 }
1211
createTiledLoops(CallInst * MatMul,Value * LPtr,ShapeInfo LShape,Value * RPtr,ShapeInfo RShape,StoreInst * Store,bool AllowContract)1212 void createTiledLoops(CallInst *MatMul, Value *LPtr, ShapeInfo LShape,
1213 Value *RPtr, ShapeInfo RShape, StoreInst *Store,
1214 bool AllowContract) {
1215 auto *EltType = cast<VectorType>(MatMul->getType())->getElementType();
1216
1217 // Create the main tiling loop nest.
1218 TileInfo TI(LShape.NumRows, RShape.NumColumns, LShape.NumColumns, TileSize);
1219 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Lazy);
1220 Instruction *InsertI = cast<Instruction>(MatMul);
1221 BasicBlock *Start = InsertI->getParent();
1222 BasicBlock *End =
1223 SplitBlock(InsertI->getParent(), InsertI, DT, LI, nullptr, "continue");
1224 IRBuilder<> Builder(MatMul);
1225 BasicBlock *InnerBody = TI.CreateTiledLoops(Start, End, Builder, DTU, *LI);
1226
1227 Type *TileVecTy =
1228 FixedVectorType::get(MatMul->getType()->getScalarType(), TileSize);
1229 MatrixTy TileResult;
1230 // Insert in the inner loop header.
1231 Builder.SetInsertPoint(TI.InnerLoopHeader->getTerminator());
1232 // Create PHI nodes for the result columns to accumulate across iterations.
1233 SmallVector<PHINode *, 4> ColumnPhis;
1234 for (unsigned I = 0; I < TileSize; I++) {
1235 auto *Phi = Builder.CreatePHI(TileVecTy, 2, "result.vec." + Twine(I));
1236 Phi->addIncoming(ConstantAggregateZero::get(TileVecTy),
1237 TI.RowLoopHeader->getSingleSuccessor());
1238 TileResult.addVector(Phi);
1239 ColumnPhis.push_back(Phi);
1240 }
1241
1242 // Insert in the inner loop body, which computes
1243 // Res += Load(CurrentRow, K) * Load(K, CurrentColumn)
1244 Builder.SetInsertPoint(InnerBody->getTerminator());
1245 // Load tiles of the operands.
1246 MatrixTy A = loadMatrix(LPtr, {}, false, LShape, TI.CurrentRow, TI.CurrentK,
1247 {TileSize, TileSize}, EltType, Builder);
1248 MatrixTy B = loadMatrix(RPtr, {}, false, RShape, TI.CurrentK, TI.CurrentCol,
1249 {TileSize, TileSize}, EltType, Builder);
1250 emitMatrixMultiply(TileResult, A, B, AllowContract, Builder, true);
1251 // Store result after the inner loop is done.
1252 Builder.SetInsertPoint(TI.RowLoopLatch->getTerminator());
1253 storeMatrix(TileResult, Store->getPointerOperand(), Store->getAlign(),
1254 Store->isVolatile(), {LShape.NumRows, RShape.NumColumns},
1255 TI.CurrentRow, TI.CurrentCol, EltType, Builder);
1256
1257 for (unsigned I = 0; I < TileResult.getNumVectors(); I++)
1258 ColumnPhis[I]->addIncoming(TileResult.getVector(I), TI.InnerLoopLatch);
1259
1260 // Force unrolling of a few iterations of the inner loop, to make sure there
1261 // is enough work per iteration.
1262 // FIXME: The unroller should make this decision directly instead, but
1263 // currently the cost-model is not up to the task.
1264 unsigned InnerLoopUnrollCount = std::min(10u, LShape.NumColumns / TileSize);
1265 addStringMetadataToLoop(LI->getLoopFor(TI.InnerLoopHeader),
1266 "llvm.loop.unroll.count", InnerLoopUnrollCount);
1267 }
1268
emitSIMDTiling(CallInst * MatMul,LoadInst * LoadOp0,LoadInst * LoadOp1,StoreInst * Store,SmallPtrSetImpl<Instruction * > & FusedInsts)1269 void emitSIMDTiling(CallInst *MatMul, LoadInst *LoadOp0, LoadInst *LoadOp1,
1270 StoreInst *Store,
1271 SmallPtrSetImpl<Instruction *> &FusedInsts) {
1272 assert(MatrixLayout == MatrixLayoutTy::ColumnMajor &&
1273 "Tiling only supported for column-major matrixes at the moment!");
1274 if (!isFusionProfitable(MatMul))
1275 return;
1276
1277 ShapeInfo LShape(MatMul->getArgOperand(2), MatMul->getArgOperand(3));
1278 ShapeInfo RShape(MatMul->getArgOperand(3), MatMul->getArgOperand(4));
1279
1280 const unsigned R = LShape.NumRows;
1281 const unsigned C = RShape.NumColumns;
1282 const unsigned M = LShape.NumColumns;
1283 auto *EltType = cast<VectorType>(MatMul->getType())->getElementType();
1284
1285 Value *APtr = getNonAliasingPointer(LoadOp0, Store, MatMul);
1286 Value *BPtr = getNonAliasingPointer(LoadOp1, Store, MatMul);
1287 Value *CPtr = Store->getPointerOperand();
1288
1289 bool AllowContract = AllowContractEnabled || (isa<FPMathOperator>(MatMul) &&
1290 MatMul->hasAllowContract());
1291 if (TileUseLoops && (R % TileSize == 0 && C % TileSize == 0))
1292 createTiledLoops(MatMul, APtr, LShape, BPtr, RShape, Store,
1293 AllowContract);
1294 else {
1295 IRBuilder<> Builder(Store);
1296 for (unsigned J = 0; J < C; J += TileSize)
1297 for (unsigned I = 0; I < R; I += TileSize) {
1298 const unsigned TileR = std::min(R - I, unsigned(TileSize));
1299 const unsigned TileC = std::min(C - J, unsigned(TileSize));
1300 MatrixTy Res = getZeroMatrix(EltType, TileR, TileC);
1301
1302 for (unsigned K = 0; K < M; K += TileSize) {
1303 const unsigned TileM = std::min(M - K, unsigned(TileSize));
1304 MatrixTy A =
1305 loadMatrix(APtr, LoadOp0->getAlign(), LoadOp0->isVolatile(),
1306 LShape, Builder.getInt64(I), Builder.getInt64(K),
1307 {TileR, TileM}, EltType, Builder);
1308 MatrixTy B =
1309 loadMatrix(BPtr, LoadOp1->getAlign(), LoadOp1->isVolatile(),
1310 RShape, Builder.getInt64(K), Builder.getInt64(J),
1311 {TileM, TileC}, EltType, Builder);
1312 emitMatrixMultiply(Res, A, B, AllowContract, Builder, true);
1313 }
1314 storeMatrix(Res, CPtr, Store->getAlign(), Store->isVolatile(), {R, M},
1315 Builder.getInt64(I), Builder.getInt64(J), EltType,
1316 Builder);
1317 }
1318 }
1319
1320 // Mark eliminated instructions as fused and remove them.
1321 FusedInsts.insert(Store);
1322 FusedInsts.insert(MatMul);
1323 Store->eraseFromParent();
1324 MatMul->eraseFromParent();
1325 if (LoadOp0->hasNUses(0)) {
1326 FusedInsts.insert(LoadOp0);
1327 LoadOp0->eraseFromParent();
1328 }
1329 if (LoadOp1->hasNUses(0)) {
1330 FusedInsts.insert(LoadOp1);
1331 LoadOp1->eraseFromParent();
1332 }
1333 }
1334
1335 /// Try to lower matrix multiply chains by fusing operations.
1336 ///
1337 /// Currently we only lower {ld, ld} -> matmul -> st chains.
1338 //
1339 /// No need to return a MatrixTy object for the result of the operation, since
1340 /// the single store user will be lowered as part of this. Instructions that
1341 /// are completely eliminated by fusion are added to \p FusedInsts.
LowerMatrixMultiplyFused(CallInst * MatMul,SmallPtrSetImpl<Instruction * > & FusedInsts)1342 void LowerMatrixMultiplyFused(CallInst *MatMul,
1343 SmallPtrSetImpl<Instruction *> &FusedInsts) {
1344 if (!FuseMatrix || !MatMul->hasOneUse() ||
1345 MatrixLayout != MatrixLayoutTy::ColumnMajor || !DT)
1346 return;
1347
1348 assert(AA && LI && "Analyses should be available");
1349
1350 auto *LoadOp0 = dyn_cast<LoadInst>(MatMul->getOperand(0));
1351 auto *LoadOp1 = dyn_cast<LoadInst>(MatMul->getOperand(1));
1352 auto *Store = dyn_cast<StoreInst>(*MatMul->user_begin());
1353 if (LoadOp0 && LoadOp1 && Store) {
1354 // The store address must dominate the MatMul instruction, otherwise
1355 // we create invalid IR.
1356 // FIXME: See if we can hoist the store address computation.
1357 auto *AddrI = dyn_cast<Instruction>(Store->getOperand(1));
1358 if (AddrI && (!DT->dominates(AddrI, MatMul)))
1359 return;
1360
1361 emitSIMDTiling(MatMul, LoadOp0, LoadOp1, Store, FusedInsts);
1362 return;
1363 }
1364 }
1365
1366 /// Lowers llvm.matrix.multiply.
LowerMultiply(CallInst * MatMul)1367 void LowerMultiply(CallInst *MatMul) {
1368 IRBuilder<> Builder(MatMul);
1369 auto *EltType = cast<VectorType>(MatMul->getType())->getElementType();
1370 ShapeInfo LShape(MatMul->getArgOperand(2), MatMul->getArgOperand(3));
1371 ShapeInfo RShape(MatMul->getArgOperand(3), MatMul->getArgOperand(4));
1372
1373 const MatrixTy &Lhs = getMatrix(MatMul->getArgOperand(0), LShape, Builder);
1374 const MatrixTy &Rhs = getMatrix(MatMul->getArgOperand(1), RShape, Builder);
1375 assert(Lhs.getElementType() == Rhs.getElementType() &&
1376 "Matrix multiply argument element types do not match.");
1377
1378 const unsigned R = LShape.NumRows;
1379 const unsigned C = RShape.NumColumns;
1380 assert(LShape.NumColumns == RShape.NumRows);
1381
1382 // Initialize the output
1383 MatrixTy Result(R, C, EltType);
1384 assert(Lhs.getElementType() == Result.getElementType() &&
1385 "Matrix multiply result element type does not match arguments.");
1386
1387 bool AllowContract = AllowContractEnabled || (isa<FPMathOperator>(MatMul) &&
1388 MatMul->hasAllowContract());
1389 emitMatrixMultiply(Result, Lhs, Rhs, AllowContract, Builder, false);
1390 finalizeLowering(MatMul, Result, Builder);
1391 }
1392
1393 /// Lowers llvm.matrix.transpose.
LowerTranspose(CallInst * Inst)1394 void LowerTranspose(CallInst *Inst) {
1395 MatrixTy Result;
1396 IRBuilder<> Builder(Inst);
1397 Value *InputVal = Inst->getArgOperand(0);
1398 VectorType *VectorTy = cast<VectorType>(InputVal->getType());
1399 ShapeInfo ArgShape(Inst->getArgOperand(1), Inst->getArgOperand(2));
1400 MatrixTy InputMatrix = getMatrix(InputVal, ArgShape, Builder);
1401
1402 const unsigned NewNumVecs =
1403 InputMatrix.isColumnMajor() ? ArgShape.NumRows : ArgShape.NumColumns;
1404 const unsigned NewNumElts =
1405 InputMatrix.isColumnMajor() ? ArgShape.NumColumns : ArgShape.NumRows;
1406
1407 for (unsigned I = 0; I < NewNumVecs; ++I) {
1408 // Build a single result vector. First initialize it.
1409 Value *ResultVector = UndefValue::get(
1410 FixedVectorType::get(VectorTy->getElementType(), NewNumElts));
1411 // Go through the old elements and insert it into the resulting vector.
1412 for (auto J : enumerate(InputMatrix.vectors())) {
1413 Value *Elt = Builder.CreateExtractElement(J.value(), I);
1414 // Row and column indices are transposed.
1415 ResultVector =
1416 Builder.CreateInsertElement(ResultVector, Elt, J.index());
1417 }
1418 Result.addVector(ResultVector);
1419 }
1420
1421 // TODO: Improve estimate of operations needed for transposes. Currently we
1422 // just count the insertelement/extractelement instructions, but do not
1423 // account for later simplifications/combines.
1424 finalizeLowering(
1425 Inst,
1426 Result.addNumComputeOps(2 * ArgShape.NumRows * ArgShape.NumColumns),
1427 Builder);
1428 }
1429
1430 /// Lower load instructions, if shape information is available.
VisitLoad(LoadInst * Inst,Value * Ptr,IRBuilder<> & Builder)1431 bool VisitLoad(LoadInst *Inst, Value *Ptr, IRBuilder<> &Builder) {
1432 auto I = ShapeMap.find(Inst);
1433 if (I == ShapeMap.end())
1434 return false;
1435
1436 LowerLoad(Inst, Ptr, Inst->getAlign(),
1437 Builder.getInt64(I->second.getStride()), Inst->isVolatile(),
1438 I->second);
1439 return true;
1440 }
1441
VisitStore(StoreInst * Inst,Value * StoredVal,Value * Ptr,IRBuilder<> & Builder)1442 bool VisitStore(StoreInst *Inst, Value *StoredVal, Value *Ptr,
1443 IRBuilder<> &Builder) {
1444 auto I = ShapeMap.find(StoredVal);
1445 if (I == ShapeMap.end())
1446 return false;
1447
1448 LowerStore(Inst, StoredVal, Ptr, Inst->getAlign(),
1449 Builder.getInt64(I->second.getStride()), Inst->isVolatile(),
1450 I->second);
1451 return true;
1452 }
1453
1454 /// Lower binary operators, if shape information is available.
VisitBinaryOperator(BinaryOperator * Inst)1455 bool VisitBinaryOperator(BinaryOperator *Inst) {
1456 auto I = ShapeMap.find(Inst);
1457 if (I == ShapeMap.end())
1458 return false;
1459
1460 Value *Lhs = Inst->getOperand(0);
1461 Value *Rhs = Inst->getOperand(1);
1462
1463 IRBuilder<> Builder(Inst);
1464 ShapeInfo &Shape = I->second;
1465
1466 MatrixTy Result;
1467 MatrixTy A = getMatrix(Lhs, Shape, Builder);
1468 MatrixTy B = getMatrix(Rhs, Shape, Builder);
1469 assert(A.isColumnMajor() == B.isColumnMajor() &&
1470 Result.isColumnMajor() == A.isColumnMajor() &&
1471 "operands must agree on matrix layout");
1472
1473 // Helper to perform binary op on vectors.
1474 auto BuildVectorOp = [&Builder, Inst](Value *LHS, Value *RHS) {
1475 switch (Inst->getOpcode()) {
1476 case Instruction::Add:
1477 return Builder.CreateAdd(LHS, RHS);
1478 case Instruction::Mul:
1479 return Builder.CreateMul(LHS, RHS);
1480 case Instruction::Sub:
1481 return Builder.CreateSub(LHS, RHS);
1482 case Instruction::FAdd:
1483 return Builder.CreateFAdd(LHS, RHS);
1484 case Instruction::FMul:
1485 return Builder.CreateFMul(LHS, RHS);
1486 case Instruction::FSub:
1487 return Builder.CreateFSub(LHS, RHS);
1488 default:
1489 llvm_unreachable("Unsupported binary operator for matrix");
1490 }
1491 };
1492
1493 for (unsigned I = 0; I < Shape.getNumVectors(); ++I)
1494 Result.addVector(BuildVectorOp(A.getVector(I), B.getVector(I)));
1495
1496 finalizeLowering(Inst,
1497 Result.addNumComputeOps(getNumOps(Result.getVectorTy()) *
1498 Result.getNumVectors()),
1499 Builder);
1500 return true;
1501 }
1502
1503 /// Lower unary operators, if shape information is available.
VisitUnaryOperator(UnaryOperator * Inst)1504 bool VisitUnaryOperator(UnaryOperator *Inst) {
1505 auto I = ShapeMap.find(Inst);
1506 if (I == ShapeMap.end())
1507 return false;
1508
1509 Value *Op = Inst->getOperand(0);
1510
1511 IRBuilder<> Builder(Inst);
1512 ShapeInfo &Shape = I->second;
1513
1514 MatrixTy Result;
1515 MatrixTy M = getMatrix(Op, Shape, Builder);
1516
1517 // Helper to perform unary op on vectors.
1518 auto BuildVectorOp = [&Builder, Inst](Value *Op) {
1519 switch (Inst->getOpcode()) {
1520 case Instruction::FNeg:
1521 return Builder.CreateFNeg(Op);
1522 default:
1523 llvm_unreachable("Unsupported unary operator for matrix");
1524 }
1525 };
1526
1527 for (unsigned I = 0; I < Shape.getNumVectors(); ++I)
1528 Result.addVector(BuildVectorOp(M.getVector(I)));
1529
1530 finalizeLowering(Inst,
1531 Result.addNumComputeOps(getNumOps(Result.getVectorTy()) *
1532 Result.getNumVectors()),
1533 Builder);
1534 return true;
1535 }
1536
1537 /// Helper to linearize a matrix expression tree into a string. Currently
1538 /// matrix expressions are linarized by starting at an expression leaf and
1539 /// linearizing bottom up.
1540 struct ExprLinearizer {
1541 unsigned LengthToBreak = 100;
1542 std::string Str;
1543 raw_string_ostream Stream;
1544 unsigned LineLength = 0;
1545 const DataLayout &DL;
1546
1547 /// Mapping from instructions to matrixes. It is used to identify
1548 /// matrix instructions.
1549 const MapVector<Value *, MatrixTy> &Inst2Matrix;
1550
1551 /// Mapping from values to the leaves of all expressions that the value is
1552 /// part of.
1553 const DenseMap<Value *, SmallPtrSet<Value *, 2>> &Shared;
1554
1555 /// Set of matrix expressions in the scope of a given DISubprogram.
1556 const SmallSetVector<Value *, 32> &ExprsInSubprogram;
1557
1558 /// Leaf node of the expression to linearize.
1559 Value *Leaf;
1560
1561 /// Used to keep track of sub-expressions that get reused while linearizing
1562 /// the expression. Re-used sub-expressions are marked as (reused).
1563 SmallPtrSet<Value *, 8> ReusedExprs;
1564
ExprLinearizer__anon72b1caaf0111::LowerMatrixIntrinsics::ExprLinearizer1565 ExprLinearizer(const DataLayout &DL,
1566 const MapVector<Value *, MatrixTy> &Inst2Matrix,
1567 const DenseMap<Value *, SmallPtrSet<Value *, 2>> &Shared,
1568 const SmallSetVector<Value *, 32> &ExprsInSubprogram,
1569 Value *Leaf)
1570 : Str(), Stream(Str), DL(DL), Inst2Matrix(Inst2Matrix), Shared(Shared),
1571 ExprsInSubprogram(ExprsInSubprogram), Leaf(Leaf) {}
1572
indent__anon72b1caaf0111::LowerMatrixIntrinsics::ExprLinearizer1573 void indent(unsigned N) {
1574 LineLength += N;
1575 for (unsigned i = 0; i < N; i++)
1576 Stream << " ";
1577 }
1578
lineBreak__anon72b1caaf0111::LowerMatrixIntrinsics::ExprLinearizer1579 void lineBreak() {
1580 Stream << "\n";
1581 LineLength = 0;
1582 }
1583
maybeIndent__anon72b1caaf0111::LowerMatrixIntrinsics::ExprLinearizer1584 void maybeIndent(unsigned Indent) {
1585 if (LineLength >= LengthToBreak)
1586 lineBreak();
1587
1588 if (LineLength == 0)
1589 indent(Indent);
1590 }
1591
write__anon72b1caaf0111::LowerMatrixIntrinsics::ExprLinearizer1592 void write(StringRef S) {
1593 LineLength += S.size();
1594 Stream << S;
1595 }
1596
getUnderlyingObjectThroughLoads__anon72b1caaf0111::LowerMatrixIntrinsics::ExprLinearizer1597 Value *getUnderlyingObjectThroughLoads(Value *V) {
1598 if (Value *Ptr = getPointerOperand(V))
1599 return getUnderlyingObjectThroughLoads(Ptr);
1600 else if (V->getType()->isPointerTy())
1601 return getUnderlyingObject(V);
1602 return V;
1603 }
1604
1605 /// Returns true if \p V is a matrix value in the given subprogram.
isMatrix__anon72b1caaf0111::LowerMatrixIntrinsics::ExprLinearizer1606 bool isMatrix(Value *V) const { return ExprsInSubprogram.count(V); }
1607
1608 /// If \p V is a matrix value, print its shape as as NumRows x NumColumns to
1609 /// \p SS.
prettyPrintMatrixType__anon72b1caaf0111::LowerMatrixIntrinsics::ExprLinearizer1610 void prettyPrintMatrixType(Value *V, raw_string_ostream &SS) {
1611 auto M = Inst2Matrix.find(V);
1612 if (M == Inst2Matrix.end())
1613 SS << "unknown";
1614 else {
1615 SS << M->second.getNumRows();
1616 SS << "x";
1617 SS << M->second.getNumColumns();
1618 }
1619 }
1620
1621 /// Write the called function name. Handles calls to llvm.matrix.*
1622 /// specially: we write the name, followed by the dimensions of the input
1623 /// matrixes, followed by the scalar type name.
writeFnName__anon72b1caaf0111::LowerMatrixIntrinsics::ExprLinearizer1624 void writeFnName(CallInst *CI) {
1625 if (!CI->getCalledFunction())
1626 write("<no called fn>");
1627 else {
1628 StringRef Name = CI->getCalledFunction()->getName();
1629 if (!Name.startswith("llvm.matrix")) {
1630 write(Name);
1631 return;
1632 }
1633 IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
1634 write(StringRef(Intrinsic::getName(II->getIntrinsicID(), {}))
1635 .drop_front(StringRef("llvm.matrix.").size()));
1636 write(".");
1637 std::string Tmp;
1638 raw_string_ostream SS(Tmp);
1639
1640 switch (II->getIntrinsicID()) {
1641 case Intrinsic::matrix_multiply:
1642 prettyPrintMatrixType(II->getOperand(0), SS);
1643 SS << ".";
1644 prettyPrintMatrixType(II->getOperand(1), SS);
1645 SS << "." << *II->getType()->getScalarType();
1646 break;
1647 case Intrinsic::matrix_transpose:
1648 prettyPrintMatrixType(II->getOperand(0), SS);
1649 SS << "." << *II->getType()->getScalarType();
1650 break;
1651 case Intrinsic::matrix_column_major_load:
1652 prettyPrintMatrixType(II, SS);
1653 SS << "." << *II->getType()->getScalarType();
1654 break;
1655 case Intrinsic::matrix_column_major_store:
1656 prettyPrintMatrixType(II->getOperand(0), SS);
1657 SS << "." << *II->getOperand(0)->getType()->getScalarType();
1658 break;
1659 default:
1660 llvm_unreachable("Unhandled case");
1661 }
1662 SS.flush();
1663 write(Tmp);
1664 }
1665 }
1666
getNumShapeArgs__anon72b1caaf0111::LowerMatrixIntrinsics::ExprLinearizer1667 unsigned getNumShapeArgs(CallInst *CI) const {
1668 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI)) {
1669 switch (II->getIntrinsicID()) {
1670 case Intrinsic::matrix_multiply:
1671 return 3;
1672 case Intrinsic::matrix_transpose:
1673 return 2;
1674 case Intrinsic::matrix_column_major_load:
1675 case Intrinsic::matrix_column_major_store:
1676 return 3;
1677 default:
1678 return 0;
1679 }
1680 }
1681 return 0;
1682 }
1683
1684 /// Special printing for values: for pointers, we print if they refer to an
1685 /// (function) external address or a stack address, for other values we
1686 /// either print the constant or "scalar"/"matrix" for other values.
write__anon72b1caaf0111::LowerMatrixIntrinsics::ExprLinearizer1687 void write(Value *V) {
1688 V = getUnderlyingObjectThroughLoads(V);
1689 if (V->getType()->isPointerTy()) {
1690 if (isa<AllocaInst>(V)) {
1691 Stream << "stack addr";
1692 LineLength += StringRef("stack addr").size();
1693 } else {
1694 Stream << "addr";
1695 LineLength += StringRef("addr").size();
1696 }
1697 if (!V->getName().empty()) {
1698 Stream << " %" << V->getName() << "";
1699 LineLength += V->getName().size() + 2;
1700 }
1701 return;
1702 }
1703
1704 std::string Tmp;
1705 raw_string_ostream TmpStream(Tmp);
1706
1707 if (auto *CI = dyn_cast<ConstantInt>(V))
1708 TmpStream << CI->getValue();
1709 else if (isa<Constant>(V))
1710 TmpStream << "constant";
1711 else {
1712 if (isMatrix(V))
1713 TmpStream << "matrix";
1714 else
1715 TmpStream << "scalar";
1716 }
1717 TmpStream.flush();
1718 Tmp = std::string(StringRef(Tmp).trim());
1719 LineLength += Tmp.size();
1720 Stream << Tmp;
1721 }
1722
1723 /// Linearize expression \p Expr starting at an indentation of \p Indent.
1724 /// Expressions that are re-used multiple times are prefixed with (reused)
1725 /// at the re-used root instruction.
linearizeExpr__anon72b1caaf0111::LowerMatrixIntrinsics::ExprLinearizer1726 void linearizeExpr(Value *Expr, unsigned Indent, bool ParentReused,
1727 bool ParentShared) {
1728 auto *I = cast<Instruction>(Expr);
1729 maybeIndent(Indent);
1730 SmallVector<Value *, 8> Ops;
1731
1732 // Is Expr shared with other expression leaves?
1733 bool ExprShared = false;
1734
1735 // Deal with shared subtrees. Mark them as shared, if required.
1736 if (!ParentShared) {
1737 auto SI = Shared.find(Expr);
1738 assert(SI != Shared.end() && SI->second.count(Leaf));
1739
1740 for (Value *S : SI->second) {
1741 if (S == Leaf)
1742 continue;
1743 DebugLoc DL = cast<Instruction>(S)->getDebugLoc();
1744 write("shared with remark at line " + std::to_string(DL.getLine()) +
1745 " column " + std::to_string(DL.getCol()) + " (");
1746 }
1747 ExprShared = SI->second.size() > 1;
1748 }
1749
1750 bool Reused = !ReusedExprs.insert(Expr).second;
1751 if (Reused && !ParentReused)
1752 write("(reused) ");
1753
1754 if (auto *CI = dyn_cast<CallInst>(I)) {
1755 writeFnName(CI);
1756
1757 Ops.append(CI->arg_begin(), CI->arg_end() - getNumShapeArgs(CI));
1758 } else if (isa<BitCastInst>(Expr)) {
1759 // Special case bitcasts, which are used to materialize matrixes from
1760 // non-matrix ops.
1761 write("matrix");
1762 return;
1763 } else {
1764 Ops.append(I->value_op_begin(), I->value_op_end());
1765 write(std::string(I->getOpcodeName()));
1766 }
1767
1768 write(std::string("("));
1769
1770 unsigned NumOpsToBreak = 1;
1771 if (match(Expr, m_Intrinsic<Intrinsic::matrix_column_major_load>()))
1772 NumOpsToBreak = 2;
1773
1774 for (Value *Op : Ops) {
1775 if (Ops.size() > NumOpsToBreak)
1776 lineBreak();
1777
1778 maybeIndent(Indent + 1);
1779 if (isMatrix(Op))
1780 linearizeExpr(Op, Indent + 1, Reused, ExprShared);
1781 else
1782 write(Op);
1783 if (Op != Ops.back())
1784 write(", ");
1785 }
1786
1787 write(")");
1788 }
1789
getResult__anon72b1caaf0111::LowerMatrixIntrinsics::ExprLinearizer1790 const std::string &getResult() {
1791 Stream.flush();
1792 return Str;
1793 }
1794 };
1795
1796 /// Generate remarks for matrix operations in a function. To generate remarks
1797 /// for matrix expressions, the following approach is used:
1798 /// 1. Use the inlined-at debug information to group matrix operations to the
1799 /// DISubprograms they are contained in.
1800 /// 2. Collect leaves of matrix expressions (done in
1801 /// RemarkGenerator::getExpressionLeaves) for each subprogram - expression
1802 // mapping. Leaves are lowered matrix instructions without other matrix
1803 // users (like stores) in the current subprogram.
1804 /// 3. For each leaf, create a remark containing a linearizied version of the
1805 /// matrix expression. The expression is linearized by a recursive
1806 /// bottom-up traversal of the matrix operands, starting at a leaf. Note
1807 /// that multiple leaves can share sub-expressions. Shared subexpressions
1808 /// are explicitly marked as shared().
1809 struct RemarkGenerator {
1810 const MapVector<Value *, MatrixTy> &Inst2Matrix;
1811 OptimizationRemarkEmitter &ORE;
1812 Function &Func;
1813 const DataLayout &DL;
1814
RemarkGenerator__anon72b1caaf0111::LowerMatrixIntrinsics::RemarkGenerator1815 RemarkGenerator(const MapVector<Value *, MatrixTy> &Inst2Matrix,
1816 OptimizationRemarkEmitter &ORE, Function &Func)
1817 : Inst2Matrix(Inst2Matrix), ORE(ORE), Func(Func),
1818 DL(Func.getParent()->getDataLayout()) {}
1819
1820 /// Return all leaves of the expressions in \p ExprsInSubprogram. Those are
1821 /// instructions in Inst2Matrix returning void or without any users in
1822 /// \p ExprsInSubprogram. Currently that should only include stores.
1823 SmallVector<Value *, 4>
getExpressionLeaves__anon72b1caaf0111::LowerMatrixIntrinsics::RemarkGenerator1824 getExpressionLeaves(const SmallSetVector<Value *, 32> &ExprsInSubprogram) {
1825 SmallVector<Value *, 4> Leaves;
1826 for (auto *Expr : ExprsInSubprogram)
1827 if (Expr->getType()->isVoidTy() ||
1828 !any_of(Expr->users(), [&ExprsInSubprogram](User *U) {
1829 return ExprsInSubprogram.count(U);
1830 }))
1831 Leaves.push_back(Expr);
1832 return Leaves;
1833 }
1834
1835 /// Recursively traverse expression \p V starting at \p Leaf and add \p Leaf
1836 /// to all visited expressions in \p Shared. Limit the matrix operations to
1837 /// the ones in \p ExprsInSubprogram.
collectSharedInfo__anon72b1caaf0111::LowerMatrixIntrinsics::RemarkGenerator1838 void collectSharedInfo(Value *Leaf, Value *V,
1839 const SmallSetVector<Value *, 32> &ExprsInSubprogram,
1840 DenseMap<Value *, SmallPtrSet<Value *, 2>> &Shared) {
1841
1842 if (!ExprsInSubprogram.count(V))
1843 return;
1844
1845 auto I = Shared.insert({V, {}});
1846 I.first->second.insert(Leaf);
1847
1848 for (Value *Op : cast<Instruction>(V)->operand_values())
1849 collectSharedInfo(Leaf, Op, ExprsInSubprogram, Shared);
1850 }
1851
1852 /// Calculate the number of exclusive and shared op counts for expression
1853 /// starting at \p V. Expressions used multiple times are counted once.
1854 /// Limit the matrix operations to the ones in \p ExprsInSubprogram.
1855 std::pair<OpInfoTy, OpInfoTy>
sumOpInfos__anon72b1caaf0111::LowerMatrixIntrinsics::RemarkGenerator1856 sumOpInfos(Value *Root, SmallPtrSetImpl<Value *> &ReusedExprs,
1857 const SmallSetVector<Value *, 32> &ExprsInSubprogram,
1858 DenseMap<Value *, SmallPtrSet<Value *, 2>> &Shared) const {
1859 if (!ExprsInSubprogram.count(Root))
1860 return {};
1861
1862 // Already counted this expression. Stop.
1863 if (!ReusedExprs.insert(Root).second)
1864 return {};
1865
1866 OpInfoTy SharedCount;
1867 OpInfoTy Count;
1868
1869 auto I = Shared.find(Root);
1870 auto CM = Inst2Matrix.find(Root);
1871 if (I->second.size() == 1)
1872 Count = CM->second.getOpInfo();
1873 else
1874 SharedCount = CM->second.getOpInfo();
1875
1876 for (Value *Op : cast<Instruction>(Root)->operand_values()) {
1877 auto C = sumOpInfos(Op, ReusedExprs, ExprsInSubprogram, Shared);
1878 Count += C.first;
1879 SharedCount += C.second;
1880 }
1881 return {Count, SharedCount};
1882 }
1883
emitRemarks__anon72b1caaf0111::LowerMatrixIntrinsics::RemarkGenerator1884 void emitRemarks() {
1885 if (!ORE.allowExtraAnalysis(DEBUG_TYPE))
1886 return;
1887
1888 // Map matrix operations to their containting subprograms, by traversing
1889 // the inlinedAt chain. If the function does not have a DISubprogram, we
1890 // only map them to the containing function.
1891 MapVector<DISubprogram *, SmallVector<Value *, 8>> Subprog2Exprs;
1892 for (auto &KV : Inst2Matrix) {
1893 if (Func.getSubprogram()) {
1894 auto *I = cast<Instruction>(KV.first);
1895 DILocation *Context = I->getDebugLoc();
1896 while (Context) {
1897 auto I =
1898 Subprog2Exprs.insert({getSubprogram(Context->getScope()), {}});
1899 I.first->second.push_back(KV.first);
1900 Context = DebugLoc(Context).getInlinedAt();
1901 }
1902 } else {
1903 auto I = Subprog2Exprs.insert({nullptr, {}});
1904 I.first->second.push_back(KV.first);
1905 }
1906 }
1907 for (auto &KV : Subprog2Exprs) {
1908 SmallSetVector<Value *, 32> ExprsInSubprogram(KV.second.begin(),
1909 KV.second.end());
1910 auto Leaves = getExpressionLeaves(ExprsInSubprogram);
1911
1912 DenseMap<Value *, SmallPtrSet<Value *, 2>> Shared;
1913 for (Value *Leaf : Leaves)
1914 collectSharedInfo(Leaf, Leaf, ExprsInSubprogram, Shared);
1915
1916 // Generate remarks for each leaf.
1917 for (auto *L : Leaves) {
1918
1919 DebugLoc Loc = cast<Instruction>(L)->getDebugLoc();
1920 DILocation *Context = cast<Instruction>(L)->getDebugLoc();
1921 while (Context) {
1922 if (getSubprogram(Context->getScope()) == KV.first) {
1923 Loc = Context;
1924 break;
1925 }
1926 Context = DebugLoc(Context).getInlinedAt();
1927 }
1928
1929 SmallPtrSet<Value *, 8> ReusedExprs;
1930 OpInfoTy Counts, SharedCounts;
1931 std::tie(Counts, SharedCounts) =
1932 sumOpInfos(L, ReusedExprs, ExprsInSubprogram, Shared);
1933
1934 OptimizationRemark Rem(DEBUG_TYPE, "matrix-lowered", Loc,
1935 cast<Instruction>(L)->getParent());
1936
1937 Rem << "Lowered with ";
1938 Rem << ore::NV("NumStores", Counts.NumStores) << " stores, "
1939 << ore::NV("NumLoads", Counts.NumLoads) << " loads, "
1940 << ore::NV("NumComputeOps", Counts.NumComputeOps)
1941 << " compute ops";
1942
1943 if (SharedCounts.NumStores > 0 || SharedCounts.NumLoads > 0 ||
1944 SharedCounts.NumComputeOps > 0) {
1945 Rem << ",\nadditionally "
1946 << ore::NV("NumStores", SharedCounts.NumStores) << " stores, "
1947 << ore::NV("NumLoads", SharedCounts.NumLoads) << " loads, "
1948 << ore::NV("NumFPOps", SharedCounts.NumComputeOps)
1949 << " compute ops"
1950 << " are shared with other expressions";
1951 }
1952
1953 Rem << ("\n" + linearize(L, Shared, ExprsInSubprogram, DL));
1954 ORE.emit(Rem);
1955 }
1956 }
1957 }
1958
1959 std::string
linearize__anon72b1caaf0111::LowerMatrixIntrinsics::RemarkGenerator1960 linearize(Value *L,
1961 const DenseMap<Value *, SmallPtrSet<Value *, 2>> &Shared,
1962 const SmallSetVector<Value *, 32> &ExprsInSubprogram,
1963 const DataLayout &DL) {
1964 ExprLinearizer Lin(DL, Inst2Matrix, Shared, ExprsInSubprogram, L);
1965 Lin.linearizeExpr(L, 0, false, false);
1966 return Lin.getResult();
1967 }
1968 };
1969 };
1970 } // namespace
1971
run(Function & F,FunctionAnalysisManager & AM)1972 PreservedAnalyses LowerMatrixIntrinsicsPass::run(Function &F,
1973 FunctionAnalysisManager &AM) {
1974 auto &TTI = AM.getResult<TargetIRAnalysis>(F);
1975 OptimizationRemarkEmitter *ORE = nullptr;
1976 AAResults *AA = nullptr;
1977 DominatorTree *DT = nullptr;
1978 LoopInfo *LI = nullptr;
1979
1980 if (!Minimal) {
1981 ORE = &AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
1982 AA = &AM.getResult<AAManager>(F);
1983 DT = &AM.getResult<DominatorTreeAnalysis>(F);
1984 LI = &AM.getResult<LoopAnalysis>(F);
1985 }
1986
1987 LowerMatrixIntrinsics LMT(F, TTI, AA, DT, LI, ORE);
1988 if (LMT.Visit()) {
1989 PreservedAnalyses PA;
1990 if (!Minimal) {
1991 PA.preserve<LoopAnalysis>();
1992 PA.preserve<DominatorTreeAnalysis>();
1993 }
1994 return PA;
1995 }
1996 return PreservedAnalyses::all();
1997 }
1998
1999 namespace {
2000
2001 class LowerMatrixIntrinsicsLegacyPass : public FunctionPass {
2002 public:
2003 static char ID;
2004
LowerMatrixIntrinsicsLegacyPass()2005 LowerMatrixIntrinsicsLegacyPass() : FunctionPass(ID) {
2006 initializeLowerMatrixIntrinsicsLegacyPassPass(
2007 *PassRegistry::getPassRegistry());
2008 }
2009
runOnFunction(Function & F)2010 bool runOnFunction(Function &F) override {
2011 auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
2012 auto &ORE = getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
2013 auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
2014 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2015 auto &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
2016 LowerMatrixIntrinsics LMT(F, TTI, &AA, &DT, &LI, &ORE);
2017 bool C = LMT.Visit();
2018 return C;
2019 }
2020
getAnalysisUsage(AnalysisUsage & AU) const2021 void getAnalysisUsage(AnalysisUsage &AU) const override {
2022 AU.addRequired<TargetTransformInfoWrapperPass>();
2023 AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
2024 AU.addRequired<AAResultsWrapperPass>();
2025 AU.addRequired<DominatorTreeWrapperPass>();
2026 AU.addPreserved<DominatorTreeWrapperPass>();
2027 AU.addRequired<LoopInfoWrapperPass>();
2028 AU.addPreserved<LoopInfoWrapperPass>();
2029 }
2030 };
2031 } // namespace
2032
2033 static const char pass_name[] = "Lower the matrix intrinsics";
2034 char LowerMatrixIntrinsicsLegacyPass::ID = 0;
INITIALIZE_PASS_BEGIN(LowerMatrixIntrinsicsLegacyPass,DEBUG_TYPE,pass_name,false,false)2035 INITIALIZE_PASS_BEGIN(LowerMatrixIntrinsicsLegacyPass, DEBUG_TYPE, pass_name,
2036 false, false)
2037 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
2038 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
2039 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
2040 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
2041 INITIALIZE_PASS_END(LowerMatrixIntrinsicsLegacyPass, DEBUG_TYPE, pass_name,
2042 false, false)
2043
2044 Pass *llvm::createLowerMatrixIntrinsicsPass() {
2045 return new LowerMatrixIntrinsicsLegacyPass();
2046 }
2047
2048 namespace {
2049
2050 /// A lightweight version of the matrix lowering pass that only requires TTI.
2051 /// Advanced features that require DT, AA or ORE like tiling are disabled. This
2052 /// is used to lower matrix intrinsics if the main lowering pass is not run, for
2053 /// example with -O0.
2054 class LowerMatrixIntrinsicsMinimalLegacyPass : public FunctionPass {
2055 public:
2056 static char ID;
2057
LowerMatrixIntrinsicsMinimalLegacyPass()2058 LowerMatrixIntrinsicsMinimalLegacyPass() : FunctionPass(ID) {
2059 initializeLowerMatrixIntrinsicsMinimalLegacyPassPass(
2060 *PassRegistry::getPassRegistry());
2061 }
2062
runOnFunction(Function & F)2063 bool runOnFunction(Function &F) override {
2064 auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
2065 LowerMatrixIntrinsics LMT(F, TTI, nullptr, nullptr, nullptr, nullptr);
2066 bool C = LMT.Visit();
2067 return C;
2068 }
2069
getAnalysisUsage(AnalysisUsage & AU) const2070 void getAnalysisUsage(AnalysisUsage &AU) const override {
2071 AU.addRequired<TargetTransformInfoWrapperPass>();
2072 AU.setPreservesCFG();
2073 }
2074 };
2075 } // namespace
2076
2077 static const char pass_name_minimal[] = "Lower the matrix intrinsics (minimal)";
2078 char LowerMatrixIntrinsicsMinimalLegacyPass::ID = 0;
2079 INITIALIZE_PASS_BEGIN(LowerMatrixIntrinsicsMinimalLegacyPass,
2080 "lower-matrix-intrinsics-minimal", pass_name_minimal,
2081 false, false)
2082 INITIALIZE_PASS_END(LowerMatrixIntrinsicsMinimalLegacyPass,
2083 "lower-matrix-intrinsics-minimal", pass_name_minimal, false,
2084 false)
2085
createLowerMatrixIntrinsicsMinimalPass()2086 Pass *llvm::createLowerMatrixIntrinsicsMinimalPass() {
2087 return new LowerMatrixIntrinsicsMinimalLegacyPass();
2088 }
2089