1 //===- Tiling.cpp - Implementation of linalg Tiling -----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the linalg dialect Tiling pass.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "PassDetail.h"
14 #include "mlir/Dialect/Linalg/IR/LinalgTypes.h"
15 #include "mlir/Dialect/Linalg/Passes.h"
16 #include "mlir/Dialect/Linalg/Transforms/Transforms.h"
17 #include "mlir/Dialect/Linalg/Utils/Utils.h"
18 #include "mlir/Dialect/MemRef/IR/MemRef.h"
19 #include "mlir/Dialect/Tensor/IR/Tensor.h"
20 #include "mlir/IR/AffineExpr.h"
21 #include "mlir/IR/AffineMap.h"
22 #include "mlir/Transforms/FoldUtils.h"
23 #include "mlir/Transforms/GreedyPatternRewriteDriver.h"
24
25 #include "llvm/Support/CommandLine.h"
26
27 using namespace mlir;
28 using namespace mlir::linalg;
29 using namespace mlir::scf;
30
31 #define DEBUG_TYPE "linalg-tiling"
32
isZero(Value v)33 static bool isZero(Value v) {
34 if (auto cst = v.getDefiningOp<ConstantIndexOp>())
35 return cst.getValue() == 0;
36 return false;
37 }
38
39 using LoopIndexToRangeIndexMap = DenseMap<int, int>;
40
41 // Creates a number of ranges equal to the number of non-zero in `tileSizes`.
42 // One for each loop of the LinalgOp that is tiled. The `tileSizes` argument has
43 // one entry per surrounding loop. It uses zero as the convention that a
44 // particular loop is not tiled. This convention simplifies implementations by
45 // avoiding affine map manipulations.
46 // The returned ranges correspond to the loop ranges, in the proper order, that
47 // are tiled and for which new loops will be created. Also the function returns
48 // a map from loop indices of the LinalgOp to the corresponding non-empty range
49 // indices of newly created loops.
50 static std::tuple<SmallVector<Range, 4>, LoopIndexToRangeIndexMap>
makeTiledLoopRanges(OpBuilder & b,Location loc,AffineMap map,ValueRange allShapeSizes,ValueRange allTileSizes)51 makeTiledLoopRanges(OpBuilder &b, Location loc, AffineMap map,
52 ValueRange allShapeSizes, ValueRange allTileSizes) {
53 assert(allTileSizes.size() == map.getNumResults());
54 // Apply `map` to get shape sizes in loop order.
55 auto shapeSizes = applyMapToValues(b, loc, map, allShapeSizes);
56 SmallVector<Value, 4> tileSizes(allTileSizes.begin(), allTileSizes.end());
57
58 // Traverse the tile sizes, which are in loop order, erase zeros everywhere.
59 LoopIndexToRangeIndexMap loopIndexToRangeIndex;
60 for (int idx = 0, e = tileSizes.size(), zerosCount = 0; idx < e; ++idx) {
61 if (isZero(tileSizes[idx - zerosCount])) {
62 shapeSizes.erase(shapeSizes.begin() + idx - zerosCount);
63 tileSizes.erase(tileSizes.begin() + idx - zerosCount);
64 ++zerosCount;
65 continue;
66 }
67 loopIndexToRangeIndex[idx] = idx - zerosCount;
68 }
69
70 // Create a new range with the applied tile sizes.
71 SmallVector<Range, 4> res;
72 for (unsigned idx = 0, e = tileSizes.size(); idx < e; ++idx)
73 res.push_back(Range{b.create<ConstantIndexOp>(loc, 0), shapeSizes[idx],
74 tileSizes[idx]});
75 return std::make_tuple(res, loopIndexToRangeIndex);
76 }
77
78 // All indices returned by IndexOp should be invariant with respect to tiling.
79 // Therefore, if an operation is tiled, we have to transform the indices
80 // accordingly, i.e. offset them by the values of the corresponding induction
81 // variables that are captured implicitly in the body of the op.
82 //
83 // Example. `linalg.generic` before tiling:
84 //
85 // #id_2d = (i, j) -> (i, j)
86 // #pointwise_2d_trait = {
87 // indexing_maps = [#id_2d, #id_2d],
88 // iterator_types = ["parallel", "parallel"]
89 // }
90 // linalg.generic #pointwise_2d_trait %operand, %result {
91 // ^bb0(%operand_in: f32, %result_in: f32):
92 // %i = linalg.index 0 : index
93 // %j = linalg.index 1 : index
94 // <some operations that use %i, %j>
95 // }: memref<50x100xf32>, memref<50x100xf32>
96 //
97 // After tiling pass with tiles sizes 10 and 25:
98 //
99 // #strided = (i, j)[s0, s1, s2] -> (i * s1 + s0 + j * s2)
100 //
101 // %c1 = constant 1 : index
102 // %c0 = constant 0 : index
103 // %c25 = constant 25 : index
104 // %c10 = constant 10 : index
105 // operand_dim_0 = dim %operand, 0 : memref<50x100xf32>
106 // operand_dim_1 = dim %operand, 1 : memref<50x100xf32>
107 // scf.for %k = %c0 to operand_dim_0 step %c10 {
108 // scf.for %l = %c0 to operand_dim_1 step %c25 {
109 // %4 = std.subview %operand[%k, %l][%c10, %c25][%c1, %c1]
110 // : memref<50x100xf32> to memref<?x?xf32, #strided>
111 // %5 = std.subview %result[%k, %l][%c10, %c25][%c1, %c1]
112 // : memref<50x100xf32> to memref<?x?xf32, #strided>
113 // linalg.generic pointwise_2d_trait %4, %5 {
114 // ^bb0(%operand_in: f32, %result_in: f32):
115 // %i = linalg.index 0 : index
116 // %j = linalg.index 1 : index
117 // // Indices `k` and `l` are implicitly captured in the body.
118 // %transformed_i = addi %i, %k : index // index `i` is offset by %k
119 // %transformed_j = addi %j, %l : index // index `j` is offset by %l
120 // // Every use of %i, %j is replaced with %transformed_i, %transformed_j
121 // <some operations that use %transformed_i, %transformed_j>
122 // }: memref<?x?xf32, #strided>, memref<?x?xf32, #strided>
123 // }
124 // }
125 //
126 // TODO: Investigate whether mixing implicit and explicit indices
127 // does not lead to losing information.
128 static void
transformIndexOps(OpBuilder & b,LinalgOp op,SmallVectorImpl<Value> & ivs,const LoopIndexToRangeIndexMap & loopIndexToRangeIndex)129 transformIndexOps(OpBuilder &b, LinalgOp op, SmallVectorImpl<Value> &ivs,
130 const LoopIndexToRangeIndexMap &loopIndexToRangeIndex) {
131 // Skip operations that have no region attached.
132 if (op->getNumRegions() == 0)
133 return;
134 assert(op->getNumRegions() == 1 && op->getRegion(0).getBlocks().size() == 1 &&
135 "expected linalg operation to have one block.");
136 Block &block = op->getRegion(0).front();
137
138 for (IndexOp indexOp : block.getOps<linalg::IndexOp>()) {
139 auto rangeIndex = loopIndexToRangeIndex.find(indexOp.dim());
140 if (rangeIndex == loopIndexToRangeIndex.end())
141 continue;
142 // Offset the index by the value of the corresponding induction variable and
143 // replace all uses of the previous value.
144 OpBuilder::InsertionGuard g(b);
145 b.setInsertionPointAfter(indexOp);
146 AffineExpr index, iv;
147 bindDims(b.getContext(), index, iv);
148 AffineApplyOp applyOp = b.create<AffineApplyOp>(
149 indexOp.getLoc(), index + iv,
150 ValueRange{indexOp.getResult(), ivs[rangeIndex->second]});
151 indexOp.getResult().replaceAllUsesExcept(applyOp, applyOp);
152 }
153 }
154
155 // Insert a tile `source` into the destination tensor `dest`. The position at
156 // which the tile is inserted (as well as size of tile) is taken from a given
157 // ExtractSliceOp `sliceOp`.
insertSliceIntoTensor(OpBuilder & b,Location loc,tensor::ExtractSliceOp sliceOp,Value source,Value dest)158 static Value insertSliceIntoTensor(OpBuilder &b, Location loc,
159 tensor::ExtractSliceOp sliceOp, Value source,
160 Value dest) {
161 return b.create<tensor::InsertSliceOp>(
162 loc, sliceOp.source().getType(), source, dest, sliceOp.offsets(),
163 sliceOp.sizes(), sliceOp.strides(), sliceOp.static_offsets(),
164 sliceOp.static_sizes(), sliceOp.static_strides());
165 }
166
167 template <typename LoopTy>
168 static Optional<TiledLinalgOp>
tileLinalgOpImpl(OpBuilder & b,LinalgOp op,ValueRange tileSizes,const LinalgTilingOptions & options)169 tileLinalgOpImpl(OpBuilder &b, LinalgOp op, ValueRange tileSizes,
170 const LinalgTilingOptions &options) {
171 auto nLoops = op.getNumLoops();
172 // Initial tile sizes may be too big, only take the first nLoops.
173 tileSizes = tileSizes.take_front(nLoops);
174
175 if (llvm::all_of(tileSizes, isZero))
176 return llvm::None;
177
178 if (auto convOp = dyn_cast<linalg::ConvOp>(op.getOperation())) {
179 // For conv op only support tiling along batch dimension (which is the first
180 // loop).
181 if (convOp.padding() && !llvm::all_of(tileSizes.drop_front(), isZero))
182 return llvm::None;
183 }
184
185 // 1. Build the tiled loop ranges.
186 auto allShapeSizes = op.createFlatListOfOperandDims(b, op.getLoc());
187 AffineMap shapeSizesToLoopsMap = op.getShapesToLoopsMap();
188 if (!shapeSizesToLoopsMap)
189 return llvm::None;
190
191 SmallVector<Range, 4> loopRanges;
192 LoopIndexToRangeIndexMap loopIndexToRangeIndex;
193 std::tie(loopRanges, loopIndexToRangeIndex) = makeTiledLoopRanges(
194 b, op.getLoc(), shapeSizesToLoopsMap, allShapeSizes, tileSizes);
195
196 SmallVector<Attribute, 4> iteratorTypes;
197 for (auto attr :
198 enumerate(op.iterator_types().cast<ArrayAttr>().getValue())) {
199 if (loopIndexToRangeIndex.count(attr.index()))
200 iteratorTypes.push_back(attr.value());
201 }
202 // If interchangeVector is empty, use the identity. Build the permutation map
203 // otherwise.
204 auto invPermutationMap =
205 AffineMap::getMultiDimIdentityMap(tileSizes.size(), b.getContext());
206 if (!options.interchangeVector.empty()) {
207 // Based on the pruned iterations (due to zero tile size), recompute the
208 // interchange vector.
209 SmallVector<unsigned, 4> interchangeVector;
210 interchangeVector.reserve(options.interchangeVector.size());
211 for (auto pos : options.interchangeVector) {
212 auto it = loopIndexToRangeIndex.find(pos);
213 if (it == loopIndexToRangeIndex.end())
214 continue;
215 interchangeVector.push_back(it->second);
216 }
217 // Interchange vector is guaranteed to be a permutation,
218 // `inversePermutation` must succeed.
219 invPermutationMap = inversePermutation(
220 AffineMap::getPermutationMap(interchangeVector, b.getContext()));
221 assert(invPermutationMap);
222 applyPermutationToVector(loopRanges, interchangeVector);
223 applyPermutationToVector(iteratorTypes, interchangeVector);
224 }
225
226 // 2. Create the tiled loops.
227 LinalgOp res = op;
228 SmallVector<Value, 4> ivs, tensorResults;
229 auto tiledLoopBodyBuilder = [&](OpBuilder &b, Location loc,
230 ValueRange localIvs,
231 ValueRange iterArgs) -> scf::ValueVector {
232 ivs.assign(localIvs.begin(), localIvs.end());
233
234 // When an `interchangeVector` is present, it has been applied to the
235 // loop ranges and the iterator types. Apply its inverse to the
236 // resulting loop `ivs` to match the op definition.
237 SmallVector<Value, 4> interchangedIvs;
238 if (!options.interchangeVector.empty())
239 interchangedIvs = applyMapToValues(b, loc, invPermutationMap, ivs);
240 else
241 interchangedIvs.assign(ivs.begin(), ivs.end());
242
243 assert(op.getOutputTensorOperands().size() == iterArgs.size() &&
244 "num output tensors must match number of loop iter arguments");
245
246 SmallVector<Value> operands = op.getInputOperands();
247 SmallVector<Value> outputBuffers = op.getOutputBufferOperands();
248 // TODO: thanks to simplifying assumption we do not need to worry about
249 // order of output buffers and tensors: there is only ever one kind.
250 assert(outputBuffers.empty() || iterArgs.empty());
251 operands.append(outputBuffers.begin(), outputBuffers.end());
252 operands.append(iterArgs.begin(), iterArgs.end());
253 auto sizeBounds =
254 applyMapToValues(b, loc, shapeSizesToLoopsMap, allShapeSizes);
255 SmallVector<Value, 4> tiledOperands = makeTiledShapes(
256 b, loc, op, operands, interchangedIvs, tileSizes, sizeBounds);
257
258 // TODO: use an interface/adaptor to avoid leaking position in
259 // `tiledOperands`.
260 SmallVector<Type, 4> resultTensorTypes;
261 for (OpOperand *opOperand : op.getOutputTensorOperands())
262 resultTensorTypes.push_back(
263 tiledOperands[opOperand->getOperandNumber()].getType());
264
265 res = op.clone(b, loc, resultTensorTypes, tiledOperands);
266
267 // Insert a insert_slice for each output tensor.
268 unsigned resultIdx = 0;
269 for (OpOperand *opOperand : op.getOutputTensorOperands()) {
270 // TODO: use an interface/adaptor to avoid leaking position in
271 // `tiledOperands`.
272 Value outputTensor = tiledOperands[opOperand->getOperandNumber()];
273 if (auto sliceOp = outputTensor.getDefiningOp<tensor::ExtractSliceOp>()) {
274 tensorResults.push_back(insertSliceIntoTensor(
275 b, loc, sliceOp, res->getResult(resultIdx), sliceOp.source()));
276 } else {
277 tensorResults.push_back(res->getResult(resultIdx));
278 }
279 ++resultIdx;
280 }
281 return scf::ValueVector(tensorResults.begin(), tensorResults.end());
282 };
283 GenerateLoopNest<LoopTy>::doit(b, op.getLoc(), loopRanges, op, iteratorTypes,
284 tiledLoopBodyBuilder, options.distribution,
285 options.distributionTypes);
286
287 // 3. Transform IndexOp results w.r.t. the tiling.
288 transformIndexOps(b, res, ivs, loopIndexToRangeIndex);
289
290 // 4. Gather the newly created loops and return them with the new op.
291 SmallVector<Operation *, 8> loops;
292 loops.reserve(ivs.size());
293 for (auto iv : ivs) {
294 if (iv.isa<BlockArgument>()) {
295 loops.push_back(iv.cast<BlockArgument>().getOwner()->getParentOp());
296 assert(loops.back() && "no owner found for induction variable!");
297 } else {
298 // TODO: Instead of doing this, try to recover the ops used instead of the
299 // loop.
300 loops.push_back(nullptr);
301 }
302 }
303
304 // 5. Get the tensor results from the outermost loop if available. Otherwise
305 // use the previously captured `tensorResults`.
306 Operation *outermostLoop = nullptr;
307 for (Operation *loop : loops)
308 if ((outermostLoop = loop))
309 break;
310
311 return TiledLinalgOp{
312 res, loops, outermostLoop ? outermostLoop->getResults() : tensorResults};
313 }
314
315 template <typename LoopTy>
tileLinalgOpImpl(OpBuilder & b,LinalgOp op,const LinalgTilingOptions & options)316 Optional<TiledLinalgOp> static tileLinalgOpImpl(
317 OpBuilder &b, LinalgOp op, const LinalgTilingOptions &options) {
318 OpBuilder::InsertionGuard g(b);
319 b.setInsertionPoint(op);
320
321 if (!options.tileSizeComputationFunction)
322 return llvm::None;
323
324 // Enforce the convention that "tiling by zero" skips tiling a particular
325 // dimension. This convention is significantly simpler to handle instead of
326 // adjusting affine maps to account for missing dimensions.
327 auto nLoops = op.getNumLoops();
328 SmallVector<Value, 4> tileSizeVector =
329 options.tileSizeComputationFunction(b, op);
330 if (tileSizeVector.size() < nLoops) {
331 auto zero = b.create<ConstantIndexOp>(op.getLoc(), 0);
332 tileSizeVector.append(nLoops - tileSizeVector.size(), zero);
333 }
334
335 return tileLinalgOpImpl<LoopTy>(b, op, tileSizeVector, options);
336 }
337
338 Optional<TiledLinalgOp>
tileLinalgOp(OpBuilder & b,LinalgOp op,const LinalgTilingOptions & options)339 mlir::linalg::tileLinalgOp(OpBuilder &b, LinalgOp op,
340 const LinalgTilingOptions &options) {
341 switch (options.loopType) {
342 case LinalgTilingLoopType::Loops:
343 return tileLinalgOpImpl<scf::ForOp>(b, op, options);
344 case LinalgTilingLoopType::ParallelLoops:
345 return tileLinalgOpImpl<scf::ParallelOp>(b, op, options);
346 case LinalgTilingLoopType::TiledLoops:
347 return tileLinalgOpImpl<linalg::TiledLoopOp>(b, op, options);
348 default:;
349 }
350 return llvm::None;
351 }
352
353 /// Generate a loop nest around a given PadTensorOp (for tiling). `newPadOp`
354 /// and `loopNest` are output parameters that return the new (tiled) PadTensorOp
355 /// and the loop nest.
tilePadTensorOp(OpBuilder & builder,PadTensorOp op,PadTensorOp & newPadOp,LoopNest & loopNest,const LinalgTilingOptions & options)356 static LogicalResult tilePadTensorOp(OpBuilder &builder, PadTensorOp op,
357 PadTensorOp &newPadOp, LoopNest &loopNest,
358 const LinalgTilingOptions &options) {
359 // Can tile only PadTensorOp that have an output operand.
360 if (!op.output())
361 return failure();
362
363 Location loc = op.getLoc();
364 OpBuilder::InsertionGuard g(builder);
365 builder.setInsertionPoint(op);
366
367 // Clone PadTensorOp so that the existing op can be replaced more easily.
368 newPadOp = cast<PadTensorOp>(builder.clone(*op.getOperation()));
369 // Get rank and tile sizes.
370 int64_t rank = op.getResultType().getRank();
371 SmallVector<Value> tileSizes =
372 options.tileSizeComputationFunction(builder, op);
373 assert(static_cast<int64_t>(tileSizes.size()) == rank);
374 // Compute lower and upper bounds of the loop nest.
375 SmallVector<Value> lbs, dims, steps;
376 for (int64_t i = 0; i < rank; ++i) {
377 if (!isZero(tileSizes[i])) {
378 lbs.push_back(builder.create<ConstantIndexOp>(loc, 0));
379 dims.push_back(builder.create<tensor::DimOp>(loc, op.output(), i));
380 steps.push_back(tileSizes[i]);
381 }
382 }
383 // Generate loop nest: One loop per dimension.
384 loopNest = mlir::scf::buildLoopNest(
385 builder, loc, lbs, /*ubs=*/dims, steps, ValueRange(op.output()),
386 [&](OpBuilder &b, Location loc, ValueRange localIvs,
387 ValueRange iterArgs) -> scf::ValueVector {
388 // Compute offsets and sizes of ExtractSliceOp.
389 SmallVector<Value> offsets =
390 computeTileOffsets(b, loc, localIvs, tileSizes);
391 SmallVector<Value> sizes =
392 computeTileSizes(b, loc, localIvs, tileSizes, dims);
393 // Create ExtractSliceOp: Extract a tile from the PadTensorOp.
394 // Note: The PadTensorOp is located outside of the loop nest. It is
395 // later moved inside by ExtractSliceOfPadTensorSwapPattern.
396 auto map = AffineMap::getMultiDimIdentityMap(rank, b.getContext());
397 Value tiledOutput = makeTiledShape(b, loc, newPadOp->getResult(0),
398 tileSizes, map, offsets, sizes);
399 auto sliceOp = tiledOutput.getDefiningOp<tensor::ExtractSliceOp>();
400 assert(sliceOp && "expected ExtractSliceOp");
401 // Insert the tile into the output tensor.
402 Value yieldValue =
403 insertSliceIntoTensor(b, loc, sliceOp, sliceOp, iterArgs[0]);
404 return scf::ValueVector({yieldValue});
405 });
406 return success();
407 }
408
409 namespace {
410 struct PadTensorOpTilingPattern : public OpRewritePattern<PadTensorOp> {
PadTensorOpTilingPattern__anond4dccb3f0311::PadTensorOpTilingPattern411 PadTensorOpTilingPattern(MLIRContext *ctx, LinalgTilingOptions opt)
412 : OpRewritePattern<PadTensorOp>(ctx), options(opt) {}
413
matchAndRewrite__anond4dccb3f0311::PadTensorOpTilingPattern414 LogicalResult matchAndRewrite(PadTensorOp op,
415 PatternRewriter &rewriter) const override {
416 if (op->hasAttr(LinalgTransforms::kLinalgTransformMarker))
417 return failure();
418 PadTensorOp newPadOp;
419 LoopNest loopNest;
420 if (failed(tilePadTensorOp(rewriter, op, newPadOp, loopNest, options)))
421 return failure();
422 newPadOp->setAttr(LinalgTransforms::kLinalgTransformMarker,
423 rewriter.getUnitAttr());
424 // Replace all uses of the original PadTensorOp.
425 rewriter.replaceOp(op, loopNest.getResults()[0]);
426 return success();
427 }
428
429 LinalgTilingOptions options;
430 };
431 } // namespace
432
433 namespace {
434 /// Helper classes for type list expansion.
435 template <typename... OpTypes>
436 class CanonicalizationPatternList;
437
438 template <>
439 class CanonicalizationPatternList<> {
440 public:
insert(RewritePatternSet & patterns)441 static void insert(RewritePatternSet &patterns) {}
442 };
443
444 template <typename OpTy, typename... OpTypes>
445 class CanonicalizationPatternList<OpTy, OpTypes...> {
446 public:
insert(RewritePatternSet & patterns)447 static void insert(RewritePatternSet &patterns) {
448 OpTy::getCanonicalizationPatterns(patterns, patterns.getContext());
449 CanonicalizationPatternList<OpTypes...>::insert(patterns);
450 }
451 };
452
453 /// Helper classes for type list expansion.
454 template <typename... OpTypes>
455 class RewritePatternList;
456
457 template <>
458 class RewritePatternList<> {
459 public:
insert(RewritePatternSet & patterns,const LinalgTilingOptions & options)460 static void insert(RewritePatternSet &patterns,
461 const LinalgTilingOptions &options) {}
462 };
463
464 template <typename OpTy, typename... OpTypes>
465 class RewritePatternList<OpTy, OpTypes...> {
466 public:
insert(RewritePatternSet & patterns,const LinalgTilingOptions & options)467 static void insert(RewritePatternSet &patterns,
468 const LinalgTilingOptions &options) {
469 auto *ctx = patterns.getContext();
470 patterns.add<LinalgTilingPattern<OpTy>>(
471 ctx, options,
472 LinalgTransformationFilter(ArrayRef<Identifier>{},
473 Identifier::get("tiled", ctx)));
474 RewritePatternList<OpTypes...>::insert(patterns, options);
475 }
476 };
477 } // namespace
478
479 RewritePatternSet
getLinalgTilingCanonicalizationPatterns(MLIRContext * ctx)480 mlir::linalg::getLinalgTilingCanonicalizationPatterns(MLIRContext *ctx) {
481 RewritePatternSet patterns(ctx);
482 populateLinalgTilingCanonicalizationPatterns(patterns);
483 return patterns;
484 }
485
populateLinalgTilingCanonicalizationPatterns(RewritePatternSet & patterns)486 void mlir::linalg::populateLinalgTilingCanonicalizationPatterns(
487 RewritePatternSet &patterns) {
488 auto *ctx = patterns.getContext();
489 AffineApplyOp::getCanonicalizationPatterns(patterns, ctx);
490 AffineForOp::getCanonicalizationPatterns(patterns, ctx);
491 AffineMinOp::getCanonicalizationPatterns(patterns, ctx);
492 AffineMaxOp::getCanonicalizationPatterns(patterns, ctx);
493 scf::ForOp::getCanonicalizationPatterns(patterns, ctx);
494 scf::ParallelOp::getCanonicalizationPatterns(patterns, ctx);
495 ConstantIndexOp::getCanonicalizationPatterns(patterns, ctx);
496 tensor::ExtractSliceOp::getCanonicalizationPatterns(patterns, ctx);
497 tensor::InsertSliceOp::getCanonicalizationPatterns(patterns, ctx);
498 memref::SubViewOp::getCanonicalizationPatterns(patterns, ctx);
499 tensor::CastOp::getCanonicalizationPatterns(patterns, ctx);
500 memref::ViewOp::getCanonicalizationPatterns(patterns, ctx);
501 PadTensorOp::getCanonicalizationPatterns(patterns, ctx);
502 ctx->getLoadedDialect<LinalgDialect>()->getCanonicalizationPatterns(patterns);
503 CanonicalizationPatternList<
504 #define GET_OP_LIST
505 #include "mlir/Dialect/Linalg/IR/LinalgStructuredOps.cpp.inc"
506 >::insert(patterns);
507 }
508
509 /// Populate the given list with patterns that apply Linalg tiling.
insertTilingPatterns(RewritePatternSet & patterns,const LinalgTilingOptions & options)510 static void insertTilingPatterns(RewritePatternSet &patterns,
511 const LinalgTilingOptions &options) {
512 RewritePatternList<GenericOp,
513 #define GET_OP_LIST
514 #include "mlir/Dialect/Linalg/IR/LinalgStructuredOps.cpp.inc"
515 >::insert(patterns, options);
516 patterns.add<PadTensorOpTilingPattern>(patterns.getContext(), options);
517 }
518
applyExtractSliceOfPadTensorSwapPattern(FuncOp funcOp)519 static void applyExtractSliceOfPadTensorSwapPattern(FuncOp funcOp) {
520 MLIRContext *ctx = funcOp.getContext();
521 RewritePatternSet patterns(ctx);
522 patterns.add<ExtractSliceOfPadTensorSwapPattern>(patterns.getContext());
523 (void)applyPatternsAndFoldGreedily(funcOp, std::move(patterns));
524 (void)applyPatternsAndFoldGreedily(
525 funcOp, getLinalgTilingCanonicalizationPatterns(ctx));
526 }
527
528 static void
applyTilingToLoopPatterns(LinalgTilingLoopType loopType,FuncOp funcOp,ArrayRef<int64_t> tileSizes,ArrayRef<StringRef> distributionTypes={})529 applyTilingToLoopPatterns(LinalgTilingLoopType loopType, FuncOp funcOp,
530 ArrayRef<int64_t> tileSizes,
531 ArrayRef<StringRef> distributionTypes = {}) {
532 auto options = LinalgTilingOptions()
533 .setTileSizes(tileSizes)
534 .setLoopType(loopType)
535 .setDistributionTypes(distributionTypes);
536 MLIRContext *ctx = funcOp.getContext();
537 RewritePatternSet patterns(ctx);
538 insertTilingPatterns(patterns, options);
539 patterns.add<AffineMinSCFCanonicalizationPattern>(patterns.getContext());
540 (void)applyPatternsAndFoldGreedily(funcOp, std::move(patterns));
541 (void)applyPatternsAndFoldGreedily(
542 funcOp, getLinalgTilingCanonicalizationPatterns(ctx));
543 // Drop the marker.
__anond4dccb3f0502(LinalgOp op) 544 funcOp.walk([](LinalgOp op) {
545 op->removeAttr(LinalgTransforms::kLinalgTransformMarker);
546 });
547
548 // Apply swap pattern after generating loop nest and running
549 // canonicalizations.
550 applyExtractSliceOfPadTensorSwapPattern(funcOp);
551 }
552
553 namespace {
554 struct LinalgTilingPass : public LinalgTilingBase<LinalgTilingPass> {
555 LinalgTilingPass() = default;
LinalgTilingPass__anond4dccb3f0611::LinalgTilingPass556 LinalgTilingPass(ArrayRef<int64_t> sizes) { tileSizes = sizes; }
557
runOnFunction__anond4dccb3f0611::LinalgTilingPass558 void runOnFunction() override {
559 applyTilingToLoopPatterns(LinalgTilingLoopType::Loops, getFunction(),
560 tileSizes);
561 }
562 };
563
564 struct LinalgTilingToParallelLoopsPass
565 : public LinalgTilingToParallelLoopsBase<LinalgTilingToParallelLoopsPass> {
566 LinalgTilingToParallelLoopsPass() = default;
LinalgTilingToParallelLoopsPass__anond4dccb3f0611::LinalgTilingToParallelLoopsPass567 LinalgTilingToParallelLoopsPass(ArrayRef<int64_t> sizes) {
568 tileSizes = sizes;
569 }
570
runOnFunction__anond4dccb3f0611::LinalgTilingToParallelLoopsPass571 void runOnFunction() override {
572 applyTilingToLoopPatterns(LinalgTilingLoopType::ParallelLoops,
573 getFunction(), tileSizes);
574 }
575 };
576
577 struct LinalgTilingToTiledLoopsPass
578 : public LinalgTilingToTiledLoopsBase<LinalgTilingToTiledLoopsPass> {
579 LinalgTilingToTiledLoopsPass() = default;
LinalgTilingToTiledLoopsPass__anond4dccb3f0611::LinalgTilingToTiledLoopsPass580 LinalgTilingToTiledLoopsPass(ArrayRef<int64_t> sizes,
581 ArrayRef<StringRef> types) {
582 tileSizes = sizes;
583 distributionTypes = llvm::to_vector<2>(
584 llvm::map_range(types, [](StringRef ref) { return ref.str(); }));
585 }
586
runOnFunction__anond4dccb3f0611::LinalgTilingToTiledLoopsPass587 void runOnFunction() override {
588 applyTilingToLoopPatterns(
589 LinalgTilingLoopType::TiledLoops, getFunction(), tileSizes,
590 llvm::to_vector<2>(
591 llvm::map_range(distributionTypes,
592 [](std::string &str) { return StringRef(str); })));
593 }
594 };
595
596 } // namespace
597
598 std::unique_ptr<OperationPass<FuncOp>>
createLinalgTilingPass(ArrayRef<int64_t> tileSizes)599 mlir::createLinalgTilingPass(ArrayRef<int64_t> tileSizes) {
600 return std::make_unique<LinalgTilingPass>(tileSizes);
601 }
602
603 std::unique_ptr<OperationPass<FuncOp>>
createLinalgTilingToParallelLoopsPass(ArrayRef<int64_t> tileSizes)604 mlir::createLinalgTilingToParallelLoopsPass(ArrayRef<int64_t> tileSizes) {
605 return std::make_unique<LinalgTilingToParallelLoopsPass>(tileSizes);
606 }
607
608 std::unique_ptr<OperationPass<FuncOp>>
createLinalgTilingToTiledLoopPass(ArrayRef<int64_t> tileSizes,ArrayRef<StringRef> distributionTypes)609 mlir::createLinalgTilingToTiledLoopPass(ArrayRef<int64_t> tileSizes,
610 ArrayRef<StringRef> distributionTypes) {
611 return std::make_unique<LinalgTilingToTiledLoopsPass>(tileSizes,
612 distributionTypes);
613 }
614