1 //===- VectorToGPU.cpp - Convert vector to GPU dialect ----------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements lowering of vector operations to GPU dialect ops.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include <type_traits>
14
15 #include "mlir/Conversion/VectorToGPU/VectorToGPU.h"
16
17 #include "../PassDetail.h"
18 #include "mlir/Analysis/SliceAnalysis.h"
19 #include "mlir/Dialect/GPU/GPUDialect.h"
20 #include "mlir/Dialect/MemRef/IR/MemRef.h"
21 #include "mlir/Dialect/SCF/SCF.h"
22 #include "mlir/Dialect/Utils/StructuredOpsUtils.h"
23 #include "mlir/Dialect/Vector/VectorOps.h"
24 #include "mlir/Dialect/Vector/VectorUtils.h"
25 #include "mlir/IR/Builders.h"
26 #include "mlir/Pass/Pass.h"
27 #include "mlir/Transforms/GreedyPatternRewriteDriver.h"
28 #include "mlir/Transforms/Passes.h"
29
30 using namespace mlir;
31
32 // Return true if the contract op can be convert to MMA matmul.
contractSupportsMMAMatrixType(vector::ContractionOp contract)33 static bool contractSupportsMMAMatrixType(vector::ContractionOp contract) {
34 if (llvm::size(contract.masks()) != 0)
35 return false;
36
37 using MapList = ArrayRef<ArrayRef<AffineExpr>>;
38 auto infer = [](MapList m) { return AffineMap::inferFromExprList(m); };
39 AffineExpr m, n, k;
40 bindDims(contract.getContext(), m, n, k);
41 auto iteratorTypes = contract.iterator_types().getValue();
42 if (!(isParallelIterator(iteratorTypes[0]) &&
43 isParallelIterator(iteratorTypes[1]) &&
44 isReductionIterator(iteratorTypes[2])))
45 return false;
46
47 // The contract needs to represent a matmul to be able to convert to
48 // MMAMatrix matmul.
49 if (contract.getIndexingMaps() != infer({{m, k}, {k, n}, {m, n}}))
50 return false;
51
52 // Check that the size matches what is natively supported.
53 VectorType lhsType = contract.lhs().getType().cast<VectorType>();
54 VectorType rhsType = contract.rhs().getType().cast<VectorType>();
55 VectorType accType = contract.acc().getType().cast<VectorType>();
56
57 std::tuple<int, int, int> dim(lhsType.getDimSize(0), rhsType.getDimSize(1),
58 lhsType.getDimSize(1));
59 if (lhsType.getElementType().isInteger(8) &&
60 rhsType.getElementType().isInteger(8) &&
61 accType.getElementType().isInteger(32) &&
62 (dim == std::make_tuple(8, 8, 32) || dim == std::make_tuple(16, 16, 32) ||
63 dim == std::make_tuple(16, 8, 32)))
64 return true;
65
66 if (lhsType.getElementType().isF16() && rhsType.getElementType().isF16() &&
67 (accType.getElementType().isF16() || accType.getElementType().isF32()) &&
68 (dim == std::make_tuple(8, 8, 16) || dim == std::make_tuple(16, 16, 16) ||
69 dim == std::make_tuple(16, 8, 16)))
70 return true;
71 return false;
72 }
73
74 // Return the stide for the dimension 0 of |type| if it is a memref and has a
75 // constant stride.
76 static llvm::Optional<int64_t>
getMemrefConstantHorizontalStride(ShapedType type)77 getMemrefConstantHorizontalStride(ShapedType type) {
78 auto memrefType = type.dyn_cast<MemRefType>();
79 if (!memrefType)
80 return false;
81 int64_t offset = 0;
82 SmallVector<int64_t, 2> strides;
83 if (failed(getStridesAndOffset(memrefType, strides, offset)))
84 return llvm::None;
85 if (strides[0] == ShapedType::kDynamicStrideOrOffset)
86 return llvm::None;
87 return strides[0];
88 }
89
90 // Return true if the transfer op can be converted to a MMA matrix load.
transferReadSupportsMMAMatrixType(vector::TransferReadOp readOp)91 static bool transferReadSupportsMMAMatrixType(vector::TransferReadOp readOp) {
92 if (readOp.mask() || readOp.hasOutOfBoundsDim() ||
93 readOp.getVectorType().getRank() != 2)
94 return false;
95 if (!getMemrefConstantHorizontalStride(readOp.getShapedType()))
96 return false;
97 // TODO: Support transpose once it is added to GPU dialect ops.
98 if (!readOp.permutation_map().isMinorIdentity())
99 return false;
100 return true;
101 }
102
103 // Return true if the transfer op can be converted to a MMA matrix store.
104 static bool
transferWriteSupportsMMAMatrixType(vector::TransferWriteOp writeOp)105 transferWriteSupportsMMAMatrixType(vector::TransferWriteOp writeOp) {
106 if (writeOp.mask() || writeOp.hasOutOfBoundsDim() ||
107 writeOp.getVectorType().getRank() != 2)
108 return false;
109 if (!getMemrefConstantHorizontalStride(writeOp.getShapedType()))
110 return false;
111 // TODO: Support transpose once it is added to GPU dialect ops.
112 if (!writeOp.permutation_map().isMinorIdentity())
113 return false;
114 return true;
115 }
116
117 /// Return true if the constant is a splat to a 2D vector so that it can be
118 /// converted to a MMA constant matrix op.
constantSupportsMMAMatrixType(ConstantOp constantOp)119 static bool constantSupportsMMAMatrixType(ConstantOp constantOp) {
120 auto vecType = constantOp.getType().dyn_cast<VectorType>();
121 if (!vecType || vecType.getRank() != 2)
122 return false;
123 return constantOp.value().isa<SplatElementsAttr>();
124 }
125
126 /// Return true if this is a broadcast from scalar to a 2D vector.
broadcastSupportsMMAMatrixType(vector::BroadcastOp broadcastOp)127 static bool broadcastSupportsMMAMatrixType(vector::BroadcastOp broadcastOp) {
128 return broadcastOp.getVectorType().getRank() == 2 &&
129 broadcastOp.source().getType().isa<FloatType>();
130 }
131
supportsMMaMatrixType(Operation * op)132 static bool supportsMMaMatrixType(Operation *op) {
133 if (isa<scf::ForOp, scf::YieldOp>(op))
134 return true;
135 if (auto transferRead = dyn_cast<vector::TransferReadOp>(op))
136 return transferReadSupportsMMAMatrixType(transferRead);
137 if (auto transferWrite = dyn_cast<vector::TransferWriteOp>(op))
138 return transferWriteSupportsMMAMatrixType(transferWrite);
139 if (auto contract = dyn_cast<vector::ContractionOp>(op))
140 return contractSupportsMMAMatrixType(contract);
141 if (auto constant = dyn_cast<ConstantOp>(op))
142 return constantSupportsMMAMatrixType(constant);
143 if (auto broadcast = dyn_cast<vector::BroadcastOp>(op))
144 return broadcastSupportsMMAMatrixType(broadcast);
145 return false;
146 }
147
148 // Analyze slice of operations based on convert op to figure out if the whole
149 // slice can be converted to MMA operations.
getOpToConvert(mlir::Operation * op)150 static SetVector<Operation *> getOpToConvert(mlir::Operation *op) {
151 auto hasVectorDest = [](Operation *op) {
152 return llvm::any_of(op->getResultTypes(),
153 [](Type t) { return t.isa<VectorType>(); });
154 };
155 auto hasVectorSrc = [](Operation *op) {
156 return llvm::any_of(op->getOperandTypes(),
157 [](Type t) { return t.isa<VectorType>(); });
158 };
159 SetVector<Operation *> opToConvert;
160 op->walk([&](vector::ContractionOp contract) {
161 if (opToConvert.contains(contract.getOperation()))
162 return;
163 SetVector<Operation *> dependentOps =
164 getSlice(contract, hasVectorDest, hasVectorSrc);
165 // If any instruction cannot use MMA matrix type drop the whole
166 // chaine. MMA matrix are stored in an opaque type so they cannot be used
167 // by all operations.
168 if (llvm::any_of(dependentOps,
169 [](Operation *op) { return !supportsMMaMatrixType(op); }))
170 return;
171 opToConvert.insert(dependentOps.begin(), dependentOps.end());
172 });
173 return opToConvert;
174 }
175
176 namespace {
177 // Transform contract into (m, k)x(k, n)x(m, n) form so that it can be converted
178 // to MMA matmul.
179 struct PrepareContractToGPUMMA
180 : public OpRewritePattern<vector::ContractionOp> {
181 using OpRewritePattern<vector::ContractionOp>::OpRewritePattern;
182
matchAndRewrite__anon1a34278a0811::PrepareContractToGPUMMA183 LogicalResult matchAndRewrite(vector::ContractionOp op,
184 PatternRewriter &rewriter) const override {
185 Location loc = op.getLoc();
186 Value lhs = op.lhs(), rhs = op.rhs(), res = op.acc();
187
188 // Set up the parallel/reduction structure in right form.
189 using MapList = ArrayRef<ArrayRef<AffineExpr>>;
190 auto infer = [](MapList m) { return AffineMap::inferFromExprList(m); };
191 AffineExpr m, n, k;
192 bindDims(rewriter.getContext(), m, n, k);
193 static constexpr std::array<int64_t, 2> perm = {1, 0};
194 auto iteratorTypes = op.iterator_types().getValue();
195 SmallVector<AffineMap, 4> maps = op.getIndexingMaps();
196 if (!(isParallelIterator(iteratorTypes[0]) &&
197 isParallelIterator(iteratorTypes[1]) &&
198 isReductionIterator(iteratorTypes[2])))
199 return failure();
200 //
201 // Two outer parallel, one inner reduction (matmat flavor).
202 //
203 if (maps == infer({{m, k}, {k, n}, {m, n}})) {
204 // This is the classical row-major matmul, nothing to do.
205 return failure();
206 }
207 if (maps == infer({{m, k}, {n, k}, {m, n}})) {
208 rhs = rewriter.create<vector::TransposeOp>(loc, rhs, perm);
209 } else if (maps == infer({{k, m}, {k, n}, {m, n}})) {
210 lhs = rewriter.create<vector::TransposeOp>(loc, lhs, perm);
211 } else if (maps == infer({{k, m}, {n, k}, {m, n}})) {
212 rhs = rewriter.create<vector::TransposeOp>(loc, rhs, perm);
213 lhs = rewriter.create<vector::TransposeOp>(loc, lhs, perm);
214 } else if (maps == infer({{m, k}, {k, n}, {n, m}})) {
215 std::swap(rhs, lhs);
216 rhs = rewriter.create<vector::TransposeOp>(loc, rhs, perm);
217 lhs = rewriter.create<vector::TransposeOp>(loc, lhs, perm);
218 } else if (maps == infer({{m, k}, {n, k}, {n, m}})) {
219 std::swap(rhs, lhs);
220 rhs = rewriter.create<vector::TransposeOp>(loc, rhs, perm);
221 } else if (maps == infer({{k, m}, {k, n}, {n, m}})) {
222 std::swap(lhs, rhs);
223 lhs = rewriter.create<vector::TransposeOp>(loc, lhs, perm);
224 } else if (maps == infer({{k, m}, {n, k}, {n, m}})) {
225 std::swap(lhs, rhs);
226 } else {
227 return failure();
228 }
229 rewriter.replaceOpWithNewOp<vector::ContractionOp>(
230 op, lhs, rhs, res,
231 rewriter.getAffineMapArrayAttr(infer({{m, k}, {k, n}, {m, n}})),
232 op.iterator_types());
233 return success();
234 }
235 };
236
237 // Merge transpose op into the transfer read op. Transpose are not supported on
238 // MMA types but MMA load can transpose the matrix when loading.
239 struct CombineTransferReadOpTranspose final
240 : public OpRewritePattern<vector::TransposeOp> {
241 using OpRewritePattern<vector::TransposeOp>::OpRewritePattern;
242
matchAndRewrite__anon1a34278a0811::CombineTransferReadOpTranspose243 LogicalResult matchAndRewrite(vector::TransposeOp op,
244 PatternRewriter &rewriter) const override {
245 auto transferReadOp = op.vector().getDefiningOp<vector::TransferReadOp>();
246 if (!transferReadOp)
247 return failure();
248 if (transferReadOp.mask() || transferReadOp.hasOutOfBoundsDim())
249 return failure();
250 SmallVector<int64_t, 2> perm;
251 op.getTransp(perm);
252 SmallVector<unsigned, 2> permU;
253 for (int64_t o : perm)
254 permU.push_back(unsigned(o));
255 AffineMap permutationMap =
256 AffineMap::getPermutationMap(permU, op.getContext());
257 AffineMap newMap = permutationMap.compose(transferReadOp.permutation_map());
258 rewriter.replaceOpWithNewOp<vector::TransferReadOp>(
259 op, op.getType(), transferReadOp.source(), transferReadOp.indices(),
260 newMap, transferReadOp.padding(), transferReadOp.mask(),
261 transferReadOp.in_boundsAttr());
262 return success();
263 }
264 };
265
266 } // namespace
267
268 // MMA types have different layout based on how they are used in matmul ops.
269 // Figure the right layout to use by looking at op uses.
270 // TODO: Change the GPU dialect to abstract the layout at the this level and
271 // only care about it during lowering to NVVM.
272 template <typename OpTy>
inferFragType(OpTy op)273 static const char *inferFragType(OpTy op) {
274 for (Operation *users : op->getUsers()) {
275 auto contract = dyn_cast<vector::ContractionOp>(users);
276 if (!contract)
277 continue;
278 if (contract.lhs() == op.getResult())
279 return "AOp";
280 if (contract.rhs() == op.getResult())
281 return "BOp";
282 }
283 return "COp";
284 }
285
convertTransferReadOp(vector::TransferReadOp op,llvm::DenseMap<Value,Value> & valueMapping)286 static void convertTransferReadOp(vector::TransferReadOp op,
287 llvm::DenseMap<Value, Value> &valueMapping) {
288 assert(transferReadSupportsMMAMatrixType(op));
289 Optional<int64_t> stride =
290 getMemrefConstantHorizontalStride(op.getShapedType());
291 assert(stride);
292 const char *fragType = inferFragType(op);
293 gpu::MMAMatrixType type =
294 gpu::MMAMatrixType::get(op.getVectorType().getShape(),
295 op.getVectorType().getElementType(), fragType);
296 OpBuilder b(op);
297 Value load = b.create<gpu::SubgroupMmaLoadMatrixOp>(
298 op.getLoc(), type, op.source(), op.indices(), b.getIndexAttr(*stride));
299 valueMapping[op.getResult()] = load;
300 }
301
convertTransferWriteOp(vector::TransferWriteOp op,llvm::DenseMap<Value,Value> & valueMapping)302 static void convertTransferWriteOp(vector::TransferWriteOp op,
303 llvm::DenseMap<Value, Value> &valueMapping) {
304 assert(transferWriteSupportsMMAMatrixType(op));
305 Optional<int64_t> stride =
306 getMemrefConstantHorizontalStride(op.getShapedType());
307 assert(stride);
308 OpBuilder b(op);
309 Value matrix = valueMapping.find(op.vector())->second;
310 b.create<gpu::SubgroupMmaStoreMatrixOp>(
311 op.getLoc(), matrix, op.source(), op.indices(), b.getIndexAttr(*stride));
312 op.erase();
313 }
314
convertContractOp(vector::ContractionOp op,llvm::DenseMap<Value,Value> & valueMapping)315 static void convertContractOp(vector::ContractionOp op,
316 llvm::DenseMap<Value, Value> &valueMapping) {
317 OpBuilder b(op);
318 Value opA = valueMapping.find(op.lhs())->second;
319 Value opB = valueMapping.find(op.rhs())->second;
320 Value opC = valueMapping.find(op.acc())->second;
321 Value matmul = b.create<gpu::SubgroupMmaComputeOp>(op.getLoc(), opC.getType(),
322 opA, opB, opC);
323 valueMapping[op.getResult()] = matmul;
324 }
325
326 /// Convert a 2D splat ConstantOp to a SubgroupMmaConstantMatrix op.
convertConstantOp(ConstantOp op,llvm::DenseMap<Value,Value> & valueMapping)327 static void convertConstantOp(ConstantOp op,
328 llvm::DenseMap<Value, Value> &valueMapping) {
329 assert(constantSupportsMMAMatrixType(op));
330 OpBuilder b(op);
331 Attribute splat = op.getValue().cast<SplatElementsAttr>().getSplatValue();
332 auto scalarConstant =
333 b.create<ConstantOp>(op.getLoc(), splat.getType(), splat);
334 const char *fragType = inferFragType(op);
335 auto vecType = op.getType().cast<VectorType>();
336 gpu::MMAMatrixType type = gpu::MMAMatrixType::get(
337 vecType.getShape(), vecType.getElementType(), llvm::StringRef(fragType));
338 auto matrix = b.create<gpu::SubgroupMmaConstantMatrixOp>(op.getLoc(), type,
339 scalarConstant);
340 valueMapping[op.getResult()] = matrix;
341 }
342
343 /// Convert a vector.broadcast from scalar to a SubgroupMmaConstantMatrix op.
convertBroadcastOp(vector::BroadcastOp op,llvm::DenseMap<Value,Value> & valueMapping)344 static void convertBroadcastOp(vector::BroadcastOp op,
345 llvm::DenseMap<Value, Value> &valueMapping) {
346 assert(broadcastSupportsMMAMatrixType(op));
347 OpBuilder b(op);
348 const char *fragType = inferFragType(op);
349 auto vecType = op.getVectorType();
350 gpu::MMAMatrixType type = gpu::MMAMatrixType::get(
351 vecType.getShape(), vecType.getElementType(), llvm::StringRef(fragType));
352 auto matrix = b.create<gpu::SubgroupMmaConstantMatrixOp>(op.getLoc(), type,
353 op.source());
354 valueMapping[op.getResult()] = matrix;
355 }
356
357 // Replace ForOp with a new ForOp with extra operands. The YieldOp is not
358 // updated and needs to be updated separatly for the loop to be correct.
replaceForOpWithNewSignature(OpBuilder & b,scf::ForOp loop,ValueRange newIterOperands)359 static scf::ForOp replaceForOpWithNewSignature(OpBuilder &b, scf::ForOp loop,
360 ValueRange newIterOperands) {
361 // Create a new loop before the existing one, with the extra operands.
362 OpBuilder::InsertionGuard g(b);
363 b.setInsertionPoint(loop);
364 auto operands = llvm::to_vector<4>(loop.getIterOperands());
365 operands.append(newIterOperands.begin(), newIterOperands.end());
366 scf::ForOp newLoop =
367 b.create<scf::ForOp>(loop.getLoc(), loop.lowerBound(), loop.upperBound(),
368 loop.step(), operands);
369 newLoop.getBody()->erase();
370 newLoop.getLoopBody().getBlocks().splice(
371 newLoop.getLoopBody().getBlocks().begin(),
372 loop.getLoopBody().getBlocks());
373 for (auto operand : newIterOperands)
374 newLoop.getBody()->addArgument(operand.getType());
375
376 for (auto it : llvm::zip(loop.getResults(), newLoop.getResults().take_front(
377 loop.getNumResults())))
378 std::get<0>(it).replaceAllUsesWith(std::get<1>(it));
379 loop.erase();
380 return newLoop;
381 }
382
convertForOp(scf::ForOp op,llvm::DenseMap<Value,Value> & valueMapping)383 static void convertForOp(scf::ForOp op,
384 llvm::DenseMap<Value, Value> &valueMapping) {
385 SmallVector<Value> newOperands;
386 SmallVector<std::pair<size_t, size_t>> argMapping;
387 for (auto operand : llvm::enumerate(op.getIterOperands())) {
388 auto it = valueMapping.find(operand.value());
389 if (it == valueMapping.end())
390 continue;
391 argMapping.push_back(std::make_pair(
392 operand.index(), op.getNumIterOperands() + newOperands.size()));
393 newOperands.push_back(it->second);
394 }
395 OpBuilder b(op);
396 scf::ForOp newForOp = replaceForOpWithNewSignature(b, op, newOperands);
397 Block &loopBody = *newForOp.getBody();
398 for (auto mapping : argMapping) {
399 valueMapping[newForOp.getResult(mapping.first)] =
400 newForOp.getResult(mapping.second);
401 valueMapping[loopBody.getArgument(mapping.first +
402 newForOp.getNumInductionVars())] =
403 loopBody.getArgument(mapping.second + newForOp.getNumInductionVars());
404 }
405 }
406
convertYieldOp(scf::YieldOp op,llvm::DenseMap<Value,Value> & valueMapping)407 static void convertYieldOp(scf::YieldOp op,
408 llvm::DenseMap<Value, Value> &valueMapping) {
409 OpBuilder b(op);
410 auto loop = cast<scf::ForOp>(op->getParentOp());
411 auto yieldOperands = llvm::to_vector<4>(op.getOperands());
412 for (auto operand : llvm::enumerate(op.getOperands())) {
413 auto it = valueMapping.find(operand.value());
414 if (it == valueMapping.end())
415 continue;
416 // Replace the yield of old value with the for op argument to make it easier
417 // to remove the dead code.
418 yieldOperands[operand.index()] = loop.getIterOperands()[operand.index()];
419 yieldOperands.push_back(it->second);
420 }
421 b.create<scf::YieldOp>(op.getLoc(), yieldOperands);
422 op.erase();
423 }
424
425 namespace mlir {
426
populatePrepareVectorToMMAPatterns(RewritePatternSet & patterns)427 void populatePrepareVectorToMMAPatterns(RewritePatternSet &patterns) {
428 patterns.add<PrepareContractToGPUMMA, CombineTransferReadOpTranspose>(
429 patterns.getContext());
430 }
431
convertVectorToMMAOps(FuncOp funcOp)432 void convertVectorToMMAOps(FuncOp funcOp) {
433 SetVector<Operation *> ops = getOpToConvert(funcOp);
434 llvm::DenseMap<Value, Value> valueMapping;
435 for (Operation *op : ops) {
436 if (auto transferRead = dyn_cast<vector::TransferReadOp>(op)) {
437 convertTransferReadOp(transferRead, valueMapping);
438 } else if (auto transferWrite = dyn_cast<vector::TransferWriteOp>(op)) {
439 convertTransferWriteOp(transferWrite, valueMapping);
440 } else if (auto contractOp = dyn_cast<vector::ContractionOp>(op)) {
441 convertContractOp(contractOp, valueMapping);
442 } else if (auto constantOp = dyn_cast<ConstantOp>(op)) {
443 convertConstantOp(constantOp, valueMapping);
444 } else if (auto broadcastOp = dyn_cast<vector::BroadcastOp>(op)) {
445 convertBroadcastOp(broadcastOp, valueMapping);
446 } else if (auto forOp = dyn_cast<scf::ForOp>(op)) {
447 convertForOp(forOp, valueMapping);
448 } else if (auto yiledOp = dyn_cast<scf::YieldOp>(op)) {
449 convertYieldOp(yiledOp, valueMapping);
450 }
451 }
452 }
453
454 } // namespace mlir
455 namespace {
456
457 struct ConvertVectorToGPUPass
458 : public ConvertVectorToGPUBase<ConvertVectorToGPUPass> {
runOnFunction__anon1a34278a0a11::ConvertVectorToGPUPass459 void runOnFunction() override {
460 RewritePatternSet patterns(getFunction().getContext());
461 populatePrepareVectorToMMAPatterns(patterns);
462 (void)applyPatternsAndFoldGreedily(getFunction(), std::move(patterns));
463
464 convertVectorToMMAOps(getFunction());
465 }
466 };
467
468 } // namespace
469
createConvertVectorToGPUPass()470 std::unique_ptr<Pass> mlir::createConvertVectorToGPUPass() {
471 return std::make_unique<ConvertVectorToGPUPass>();
472 }
473