1 //===- Loops.cpp - conversion from Linalg named and generic ops to loops --===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #include "PassDetail.h"
10 #include "mlir/Dialect/Linalg/IR/LinalgOps.h"
11 #include "mlir/Dialect/Linalg/IR/LinalgTypes.h"
12 #include "mlir/Dialect/Linalg/Passes.h"
13 #include "mlir/Dialect/Linalg/Transforms/Transforms.h"
14 #include "mlir/Dialect/Linalg/Utils/Utils.h"
15 #include "mlir/Dialect/StandardOps/Utils/Utils.h"
16 #include "mlir/IR/AffineExpr.h"
17 #include "mlir/IR/AffineMap.h"
18 #include "mlir/IR/BlockAndValueMapping.h"
19 #include "mlir/Support/LLVM.h"
20 #include "mlir/Transforms/DialectConversion.h"
21 #include "mlir/Transforms/FoldUtils.h"
22 #include "mlir/Transforms/GreedyPatternRewriteDriver.h"
23 #include "llvm/ADT/TypeSwitch.h"
24
25 using namespace mlir;
26 using namespace mlir::linalg;
27
makeCanonicalAffineApplies(OpBuilder & b,Location loc,AffineMap map,ArrayRef<Value> vals)28 static SmallVector<Value> makeCanonicalAffineApplies(OpBuilder &b, Location loc,
29 AffineMap map,
30 ArrayRef<Value> vals) {
31 if (map.isEmpty())
32 return {};
33
34 assert(map.getNumInputs() == vals.size());
35 SmallVector<Value> res;
36 res.reserve(map.getNumResults());
37 auto dims = map.getNumDims();
38 for (auto e : map.getResults()) {
39 auto exprMap = AffineMap::get(dims, map.getNumSymbols(), e);
40 SmallVector<Value> operands(vals.begin(), vals.end());
41 canonicalizeMapAndOperands(&exprMap, &operands);
42 res.push_back(b.create<AffineApplyOp>(loc, exprMap, operands));
43 }
44 return res;
45 }
46
47 template <typename LoadOpTy, typename StoreOpTy, typename OpType>
inlineRegionAndEmitStore(OpBuilder & b,Location loc,OpType op,ArrayRef<Value> indexedValues,ArrayRef<SmallVector<Value>> indexing,ArrayRef<Value> outputBuffers)48 static void inlineRegionAndEmitStore(OpBuilder &b, Location loc, OpType op,
49 ArrayRef<Value> indexedValues,
50 ArrayRef<SmallVector<Value>> indexing,
51 ArrayRef<Value> outputBuffers) {
52 auto &block = op->getRegion(0).front();
53 BlockAndValueMapping map;
54 map.map(block.getArguments(), indexedValues);
55 for (auto &op : block.without_terminator()) {
56 auto *newOp = b.clone(op, map);
57 map.map(op.getResults(), newOp->getResults());
58 }
59
60 Operation *terminator = block.getTerminator();
61 for (OpOperand &operand : terminator->getOpOperands()) {
62 Value toStore = map.lookupOrDefault(operand.get());
63 b.create<StoreOpTy>(loc, toStore, outputBuffers[operand.getOperandNumber()],
64 indexing[operand.getOperandNumber()]);
65 }
66 }
67
68 // Returns a pair that contains input indices and output indices of a
69 // SingleInputPoolingOp `op`.
70 struct InputAndOutputIndices {
71 SmallVector<Value> inputs;
72 SmallVector<Value> outputs;
73 };
74 template <typename SingleInputPoolingOp>
75 static InputAndOutputIndices
getInputAndOutputIndices(OpBuilder & b,Location loc,ArrayRef<Value> allIvs,SingleInputPoolingOp op)76 getInputAndOutputIndices(OpBuilder &b, Location loc, ArrayRef<Value> allIvs,
77 SingleInputPoolingOp op) {
78 auto mapsRange = op.indexing_maps().template getAsRange<AffineMapAttr>();
79 auto maps = llvm::to_vector<8>(
80 llvm::map_range(mapsRange, [](AffineMapAttr a) { return a.getValue(); }));
81 return InputAndOutputIndices{
82 makeCanonicalAffineApplies(b, loc, maps[0], allIvs),
83 makeCanonicalAffineApplies(b, loc, maps[2], allIvs)};
84 }
85
86 /// Emits the MLIR for the scalar part of the generic op by:
87 /// 1. Emitting load ops for each input and output view in order. This is
88 /// achieved by applying the appropriate input or output map to the
89 /// enclosing induction variables.
90 /// 2. Emitting a call to `op.fun()` that takes as arguments the scalars
91 /// from point 1. above.
92 /// 3. Emitting store ops to store the results of 2. to the output
93 /// views.
94 ///
95 /// An example output may resemble:
96 ///
97 /// ```
98 /// scf.for %i = %c0 to %0 step %c1 {
99 /// scf.for %j = %c0 to %1 step %c1 {
100 /// scf.for %k = %c0 to %4 step %c1 {
101 /// %11 = load %arg0[%i, %j] :
102 /// memref<?x?xf32, stride_specification>
103 /// %12 = load %arg1[%i, %j, %k] :
104 /// memref<?x?x?xf32, stride_specification>
105 /// %13 = load %arg2[%i, %k, %j] :
106 /// memref<?x?x?xf32, stride_specification>
107 /// %14:2 = call @foo(%11, %12, %13) : (f32, f32, f32) -> (f32, f32)
108 /// store %14#0, %arg1[%i, %j, %k] :
109 /// memref<?x?x?Xf32, stride_specification>
110 /// store %14#1, %arg2[%i, %k, %j] :
111 /// memref<?x?x?Xf32, stride_specification>
112 /// }
113 /// }
114 /// }
115 /// ```
116 template <typename LoadOpTy, typename StoreOpTy>
emitScalarImplementation(OpBuilder & b,Location loc,ArrayRef<Value> allIvs,LinalgOp linalgOp)117 static void emitScalarImplementation(OpBuilder &b, Location loc,
118 ArrayRef<Value> allIvs,
119 LinalgOp linalgOp) {
120 assert(linalgOp.hasBufferSemantics() &&
121 "expected linalg op with buffer semantics");
122 SmallVector<Value> indexedValues;
123 indexedValues.reserve(linalgOp.getNumInputsAndOutputs());
124
125 auto allIvsPlusDims = SmallVector<Value>(allIvs.begin(), allIvs.end());
126
127 // TODO: Avoid the loads if the corresponding argument of the
128 // region has no uses.
129 // 1.a. Emit load from input operand or for scalars access the operand itself.
130 for (OpOperand *inputOperand : linalgOp.getInputOperands()) {
131 if (linalgOp.isScalar(inputOperand)) {
132 indexedValues.push_back(inputOperand->get());
133 continue;
134 }
135 auto indexing = makeCanonicalAffineApplies(
136 b, loc, linalgOp.getTiedIndexingMap(inputOperand), allIvsPlusDims);
137 indexedValues.push_back(
138 b.create<LoadOpTy>(loc, inputOperand->get(), indexing));
139 }
140 // 1.b. Emit load from output views.
141 for (OpOperand *outputOperand : linalgOp.getOutputOperands()) {
142 SmallVector<Value> indexing = makeCanonicalAffineApplies(
143 b, loc, linalgOp.getTiedIndexingMap(outputOperand), allIvsPlusDims);
144 indexedValues.push_back(
145 b.create<LoadOpTy>(loc, outputOperand->get(), indexing));
146 }
147
148 // TODO: When a region inliner exists, use it.
149 // 2. Inline region, currently only works for a single basic block.
150 // 3. Emit store.
151 SmallVector<SmallVector<Value>, 8> indexing;
152 SmallVector<Value> outputBuffers;
153 for (OpOperand *outputOperand : linalgOp.getOutputBufferOperands()) {
154 indexing.push_back(makeCanonicalAffineApplies(
155 b, loc, linalgOp.getTiedIndexingMap(outputOperand), allIvsPlusDims));
156 outputBuffers.push_back(outputOperand->get());
157 }
158 inlineRegionAndEmitStore<LoadOpTy, StoreOpTy>(b, loc, linalgOp, indexedValues,
159 indexing, outputBuffers);
160 }
161
162 // Create a padded view into the given `input` tensor using the 'indices'
163 // to access the tensor. `skipPadding` lists the dimensions for which no padding
164 // is needed e.g. the non-spatial dimensions for convolutions.
getPaddedInput(OpBuilder & b,Location loc,Value input,ArrayRef<Value> indices,ArrayRef<int> skipPadding,Value padValue)165 Value getPaddedInput(OpBuilder &b, Location loc, Value input,
166 ArrayRef<Value> indices, ArrayRef<int> skipPadding,
167 Value padValue) {
168 Value zeroIndex = b.create<ConstantIndexOp>(loc, 0);
169 SmallVector<Value> conds;
170 SmallVector<Value> clampedImIdx;
171 for (auto iter : llvm::enumerate(indices)) {
172 int idx = iter.index();
173 auto dim = iter.value();
174 if (is_contained(skipPadding, idx)) {
175 clampedImIdx.push_back(dim);
176 continue;
177 }
178
179 Value leftOutOfBound =
180 b.create<CmpIOp>(loc, CmpIPredicate::slt, dim, zeroIndex);
181 if (conds.empty())
182 conds.push_back(leftOutOfBound);
183 else
184 conds.push_back(b.create<OrOp>(loc, conds.back(), leftOutOfBound));
185 Value rightBound = createOrFoldDimOp(b, loc, input, idx);
186 Value rightOutOfBound =
187 b.create<CmpIOp>(loc, CmpIPredicate::sge, dim, rightBound);
188 conds.push_back(b.create<OrOp>(loc, conds.back(), rightOutOfBound));
189
190 // When padding is involved, the indices will only be shifted to negative,
191 // so having a max op is enough.
192 MLIRContext *ctx = input.getContext();
193 AffineExpr m = getAffineDimExpr(/*position=*/0, ctx),
194 zero = getAffineConstantExpr(0, ctx);
195 AffineMap maxMap =
196 AffineMap::inferFromExprList(ArrayRef<ArrayRef<AffineExpr>>{{m, zero}})
197 .front();
198 clampedImIdx.push_back(b.create<AffineMaxOp>(loc, maxMap, ValueRange{dim}));
199 }
200
201 Value readInput = b.create<memref::LoadOp>(loc, input, clampedImIdx);
202 if (conds.empty())
203 return readInput;
204
205 return b.create<SelectOp>(loc, conds.back(), padValue, readInput);
206 }
207
208 namespace {
209
210 /// The padding value for a given Op depends on the semantics of the Op.
211 /// The identity value for ConvOp and PoolingSumOp is 0, for PoolingMaxOp is
212 /// -inf or minInt and for PoolingMinOp is inf or maxInt.
getPadValueAttr(Type type)213 template <typename OpType> Attribute getPadValueAttr(Type type) {
214 llvm_unreachable("Unexpected op type for getPadValueAttr");
215 return {};
216 }
217
getPadValueAttr(Type type)218 template <> Attribute getPadValueAttr<PoolingMaxOp>(Type type) {
219 if (auto floatType = type.dyn_cast<FloatType>()) {
220 return OpBuilder(type.getContext())
221 .getFloatAttr(floatType, APFloat::getInf(floatType.getFloatSemantics(),
222 /*Negative*/ true));
223 }
224 if (auto intType = type.dyn_cast<IntegerType>()) {
225 unsigned width = intType.getWidth();
226 // The select instruction used to lower the PoolingMin uses a signed
227 // comparison, use a signed constant irrespective of the signedness of the
228 // integer type.
229 return OpBuilder(type.getContext())
230 .getIntegerAttr(intType, APInt::getSignedMinValue(width));
231 }
232 llvm_unreachable("Unsupported data type for PoolingMaxOp");
233 return {};
234 }
235
getPadValueAttr(Type type)236 template <> Attribute getPadValueAttr<PoolingMinOp>(Type type) {
237 if (auto floatType = type.dyn_cast<FloatType>()) {
238 return OpBuilder(type.getContext())
239 .getFloatAttr(floatType,
240 APFloat::getInf(floatType.getFloatSemantics()));
241 }
242 if (auto intType = type.dyn_cast<IntegerType>()) {
243 unsigned width = intType.getWidth();
244 // The select instruction used to lower the PoolingMin uses a signed
245 // comparison, use a signed constant irrespective of the signedness of the
246 // integer type.
247 return OpBuilder(type.getContext())
248 .getIntegerAttr(intType, APInt::getSignedMaxValue(width));
249 }
250 llvm_unreachable("Unsupported data type for PoolingMinOp");
251 return {};
252 }
253
getPadValueAttr(Type type)254 template <> Attribute getPadValueAttr<PoolingSumOp>(Type type) {
255 return OpBuilder(type.getContext()).getZeroAttr(type);
256 }
257
getPadValueAttr(Type type)258 template <> Attribute getPadValueAttr<ConvOp>(Type type) {
259 return OpBuilder(type.getContext()).getZeroAttr(type);
260 }
261
262 } // namespace
263
264 /// Returns true is `convOp` has a non-zero padding.
hasPadding(ConvOp convOp)265 static bool hasPadding(ConvOp convOp) {
266 for (unsigned i = 0, e = convOp.getNumSpatialDimensions(); i < e; ++i) {
267 if (convOp.getLowPad(i) > 0 || convOp.getHighPad(i) > 0)
268 return true;
269 }
270 return false;
271 }
272
273 template <typename LoadOpTy, typename StoreOpTy>
emitScalarImplementation(OpBuilder & b,Location loc,ArrayRef<Value> allIvs,ConvOp convOp)274 static void emitScalarImplementation(OpBuilder &b, Location loc,
275 ArrayRef<Value> allIvs, ConvOp convOp) {
276 assert(convOp.hasBufferSemantics() &&
277 "expected linalg op with buffer semantics");
278 auto mapsRange = convOp.indexing_maps().getAsRange<AffineMapAttr>();
279 auto maps = llvm::to_vector<8>(
280 llvm::map_range(mapsRange, [](AffineMapAttr a) { return a.getValue(); }));
281 SmallVector<Value> fIdx(makeCanonicalAffineApplies(b, loc, maps[0], allIvs));
282 SmallVector<Value> imIdx(makeCanonicalAffineApplies(b, loc, maps[1], allIvs));
283 SmallVector<Value> oIdx(makeCanonicalAffineApplies(b, loc, maps[2], allIvs));
284
285 Value filter = convOp.filter(), output = convOp.output();
286
287 // Emit scalar form. Padded conv involves an affine.max in the memory access
288 // which is not allowed by affine.load. Override to use an MemRefIndexedValue
289 // when there is non-zero padding.
290 if (hasPadding(convOp)) {
291 Type type = convOp.input().getType().cast<MemRefType>().getElementType();
292 Value padValue =
293 b.create<ConstantOp>(loc, type, getPadValueAttr<ConvOp>(type));
294 Value paddedInput =
295 getPaddedInput(b, loc, convOp.input(), imIdx,
296 /* Only need to pad the window dimensions */
297 {0, static_cast<int>(imIdx.size()) - 1}, padValue);
298 Value filterVal = b.create<LoadOpTy>(loc, filter, fIdx);
299 Value mulVal = ArithBuilder(b, loc).mul(filterVal, paddedInput);
300 Value outputVal = b.create<LoadOpTy>(loc, output, oIdx);
301 Value addVal = ArithBuilder(b, loc).add(mulVal, outputVal);
302 b.create<StoreOpTy>(loc, addVal, output, oIdx);
303 } else {
304 Value inputVal = b.create<LoadOpTy>(loc, convOp.input(), imIdx);
305 Value filterVal = b.create<LoadOpTy>(loc, filter, fIdx);
306 Value mulVal = ArithBuilder(b, loc).mul(filterVal, inputVal);
307 Value outputVal = b.create<LoadOpTy>(loc, output, oIdx);
308 Value addVal = ArithBuilder(b, loc).add(mulVal, outputVal);
309 b.create<StoreOpTy>(loc, addVal, output, oIdx);
310 }
311 }
312
hasPadding(PoolingOp poolingOp)313 template <typename PoolingOp> static bool hasPadding(PoolingOp poolingOp) {
314 for (unsigned i = 0, e = poolingOp.getNumWindowLoops(); i < e; ++i) {
315 if (poolingOp.getLowPad(i) > 0 || poolingOp.getHighPad(i) > 0)
316 return true;
317 }
318 return false;
319 }
320
321 template <typename LoadOpTy, typename StoreOpTy, typename PoolingOp>
getPoolingInput(OpBuilder & b,Location loc,PoolingOp op,ArrayRef<Value> inputIndices)322 static Value getPoolingInput(OpBuilder &b, Location loc, PoolingOp op,
323 ArrayRef<Value> inputIndices) {
324 if (hasPadding(op)) {
325 Type type =
326 op.input().getType().template cast<MemRefType>().getElementType();
327 Value padValue =
328 b.create<ConstantOp>(loc, type, getPadValueAttr<PoolingOp>(type));
329 return getPaddedInput(b, loc, op.input(), inputIndices,
330 /*Pad every dimension*/ {}, padValue);
331 }
332 return b.create<LoadOpTy>(loc, op.input(), inputIndices);
333 }
334
335 template <typename LoadOpTy, typename StoreOpTy, typename OpType>
emitPoolingMinMaxScalarImplementation(OpBuilder & b,Location loc,ArrayRef<Value> allIvs,OpType op)336 void emitPoolingMinMaxScalarImplementation(OpBuilder &b, Location loc,
337 ArrayRef<Value> allIvs, OpType op) {
338 InputAndOutputIndices indices = getInputAndOutputIndices(b, loc, allIvs, op);
339 Value lhs = b.create<LoadOpTy>(loc, op.output(), indices.outputs);
340 Value rhs = getPoolingInput<LoadOpTy, StoreOpTy>(b, loc, op, indices.inputs);
341 Value value = llvm::TypeSwitch<Operation *, Value>(op)
342 .Case([&](PoolingMinOp poolingOp) {
343 return ArithBuilder(b, loc).select(
344 ArithBuilder(b, loc).slt(lhs, rhs), lhs, rhs);
345 })
346 .Case([&](PoolingMaxOp poolingOp) {
347 return ArithBuilder(b, loc).select(
348 ArithBuilder(b, loc).sgt(lhs, rhs), lhs, rhs);
349 })
350 .Default([&](auto) { return Value(); });
351 b.create<StoreOpTy>(loc, value, op.output(), indices.outputs);
352 }
353
354 template <typename LoadOpTy, typename StoreOpTy>
emitScalarImplementation(OpBuilder & b,Location loc,ArrayRef<Value> allIvs,PoolingMaxOp op)355 static void emitScalarImplementation(OpBuilder &b, Location loc,
356 ArrayRef<Value> allIvs, PoolingMaxOp op) {
357 emitPoolingMinMaxScalarImplementation<LoadOpTy, StoreOpTy, PoolingMaxOp>(
358 b, loc, allIvs, op);
359 }
360
361 template <typename LoadOpTy, typename StoreOpTy>
emitScalarImplementation(OpBuilder & b,Location loc,ArrayRef<Value> allIvs,PoolingMinOp op)362 static void emitScalarImplementation(OpBuilder &b, Location loc,
363 ArrayRef<Value> allIvs, PoolingMinOp op) {
364 emitPoolingMinMaxScalarImplementation<LoadOpTy, StoreOpTy, PoolingMinOp>(
365 b, loc, allIvs, op);
366 }
367
368 template <typename LoadOpTy, typename StoreOpTy>
emitScalarImplementation(OpBuilder & b,Location loc,ArrayRef<Value> allIvs,PoolingSumOp op)369 static void emitScalarImplementation(OpBuilder &b, Location loc,
370 ArrayRef<Value> allIvs, PoolingSumOp op) {
371 auto indices = getInputAndOutputIndices(b, loc, allIvs, op);
372 Value inputVal =
373 getPoolingInput<LoadOpTy, StoreOpTy>(b, loc, op, indices.inputs);
374 Value outputVal = b.create<LoadOpTy>(loc, op.output(), indices.outputs);
375 Value added = ArithBuilder(b, loc).add(outputVal, inputVal);
376 b.create<StoreOpTy>(loc, added, op.output(), indices.outputs);
377 }
378
379 /// Replace the index operations in the body of the loop nest by the matching
380 /// induction variables.
replaceIndexOpsByInductionVariables(LinalgOp linalgOp,PatternRewriter & rewriter,ArrayRef<Operation * > loopOps)381 static void replaceIndexOpsByInductionVariables(LinalgOp linalgOp,
382 PatternRewriter &rewriter,
383 ArrayRef<Operation *> loopOps) {
384 // Extract the induction variables of the loop nest from outer to inner.
385 SmallVector<Value> allIvs;
386 for (Operation *loopOp : loopOps) {
387 llvm::TypeSwitch<Operation *>(loopOp)
388 .Case([&](scf::ParallelOp parallelOp) {
389 allIvs.append(parallelOp.getInductionVars().begin(),
390 parallelOp.getInductionVars().end());
391 })
392 .Case([&](scf::ForOp forOp) {
393 allIvs.push_back(forOp.getInductionVar());
394 })
395 .Case([&](AffineForOp affineForOp) {
396 allIvs.push_back(affineForOp.getInductionVar());
397 })
398 .Default([&](Operation *op) { assert(false && "unexpected op"); });
399 }
400 assert(linalgOp.getNumLoops() == allIvs.size() &&
401 "expected the number of loops and induction variables to match");
402 // Replace the index operations in the body of the innermost loop op.
403 if (!loopOps.empty()) {
404 LoopLikeOpInterface loopOp = loopOps.back();
405 for (IndexOp indexOp :
406 llvm::make_early_inc_range(loopOp.getLoopBody().getOps<IndexOp>()))
407 rewriter.replaceOp(indexOp, allIvs[indexOp.dim()]);
408 }
409 }
410
411 template <typename LoopTy>
linalgOpToLoopsImpl(PatternRewriter & rewriter,LinalgOp linalgOp)412 static Optional<LinalgLoops> linalgOpToLoopsImpl(PatternRewriter &rewriter,
413 LinalgOp linalgOp) {
414 using LoadOpTy =
415 typename std::conditional<std::is_same<LoopTy, AffineForOp>::value,
416 AffineLoadOp, memref::LoadOp>::type;
417 using StoreOpTy =
418 typename std::conditional<std::is_same<LoopTy, AffineForOp>::value,
419 AffineStoreOp, memref::StoreOp>::type;
420
421 // The flattened loopToOperandRangesMaps is expected to be an invertible
422 // permutation map (which is asserted in the inverse calculation).
423 assert(linalgOp.hasBufferSemantics() &&
424 "expected linalg op with buffer semantics");
425
426 auto loopRanges = linalgOp.createLoopRanges(rewriter, linalgOp.getLoc());
427 auto iteratorTypes = llvm::to_vector<4>(linalgOp.iterator_types().getValue());
428
429 SmallVector<Value> allIvs;
430 GenerateLoopNest<LoopTy>::doit(
431 rewriter, linalgOp.getLoc(), loopRanges, linalgOp, iteratorTypes,
432 [&](OpBuilder &b, Location loc, ValueRange ivs,
433 ValueRange iterArgs) -> scf::ValueVector {
434 assert(iterArgs.empty() && "unexpected iterArgs");
435 allIvs.append(ivs.begin(), ivs.end());
436 llvm::TypeSwitch<Operation *>(linalgOp)
437 .Case<ConvOp, PoolingMaxOp, PoolingMinOp, PoolingSumOp, LinalgOp>(
438 [&](auto op) {
439 emitScalarImplementation<LoadOpTy, StoreOpTy>(b, loc, allIvs,
440 op);
441 })
442 .Default([&](Operation *op) { assert(false && "unexpected op"); });
443 return scf::ValueVector{};
444 });
445 // Number of loop ops might be different from the number of ivs since some
446 // loops like affine.parallel and scf.parallel have multiple ivs.
447 SetVector<Operation *> loopSet;
448 for (Value iv : allIvs) {
449 if (!iv)
450 return {};
451 // The induction variable is a block argument of the entry block of the
452 // loop operation.
453 BlockArgument ivVal = iv.dyn_cast<BlockArgument>();
454 if (!ivVal)
455 return {};
456 loopSet.insert(ivVal.getOwner()->getParentOp());
457 }
458 LinalgLoops loops(loopSet.begin(), loopSet.end());
459 // Replace all index operations in the loop body.
460 replaceIndexOpsByInductionVariables(linalgOp, rewriter, loops);
461 return loops;
462 }
463
464 namespace {
465 template <typename LoopType>
466 class LinalgRewritePattern : public RewritePattern {
467 public:
LinalgRewritePattern(MLIRContext * context)468 LinalgRewritePattern(MLIRContext *context)
469 : RewritePattern(MatchAnyOpTypeTag(), /*benefit=*/1, context) {}
470
matchAndRewrite(Operation * op,PatternRewriter & rewriter) const471 LogicalResult matchAndRewrite(Operation *op,
472 PatternRewriter &rewriter) const override {
473 auto linalgOp = dyn_cast<LinalgOp>(op);
474 if (!isa<LinalgOp>(op))
475 return failure();
476 if (!linalgOpToLoopsImpl<LoopType>(rewriter, linalgOp))
477 return failure();
478 rewriter.eraseOp(op);
479 return success();
480 }
481 };
482
483 struct TiledLoopToSCFPattern : public OpRewritePattern<TiledLoopOp> {
484 using OpRewritePattern<TiledLoopOp>::OpRewritePattern;
485
matchAndRewrite__anonf31d90e50e11::TiledLoopToSCFPattern486 LogicalResult matchAndRewrite(TiledLoopOp tiledLoop,
487 PatternRewriter &rewriter) const override {
488 Location loc = tiledLoop.getLoc();
489
490 // Fail conversion if the `tiled_loop` has not been bufferized.
491 if (!llvm::all_of(tiledLoop.outputs(), [&](Value arg) {
492 return arg.getType().isa<MemRefType>();
493 }))
494 return failure();
495
496 // TODO: Build loop nest with `scf.for` and `scf.parallel` depending on the
497 // iterator type.
498 scf::buildLoopNest(rewriter, loc, tiledLoop.lowerBound(),
499 tiledLoop.upperBound(), tiledLoop.step(),
500 [&](OpBuilder &builder, Location loc, ValueRange ivs) {
501 // Move body without its terminator.
502 SmallVector<Value> newBlockArgs;
503 newBlockArgs.append(ivs.begin(), ivs.end());
504 newBlockArgs.append(tiledLoop.inputs().begin(),
505 tiledLoop.inputs().end());
506 newBlockArgs.append(tiledLoop.outputs().begin(),
507 tiledLoop.outputs().end());
508 Block *newBody = rewriter.getInsertionBlock();
509 rewriter.mergeBlocks(tiledLoop.getBody(), newBody,
510 newBlockArgs);
511 rewriter.eraseOp(newBody->getTerminator());
512 });
513 rewriter.eraseOp(tiledLoop);
514 return success();
515 }
516 };
517
518 /// Local folding pattern for AffineApplyOp that we can apply greedily.
519 /// This replaces AffineApplyOp by the proper value in cases where the
520 /// associated map is trivial.
521 /// A trivial map here is defined as a map with a single result and either:
522 /// 1. Zero operand + returns a single AffineConstantExpr
523 /// 2. One operand + returns a single AffineDimExpr
524 /// 3. One operand + returns a single AffineSymbolExpr
525 //
526 /// In the first case, the AffineApplyOp is replaced by a new constant. In the
527 /// other cases, it is replaced by its unique operand.
528 struct FoldAffineOp : public RewritePattern {
FoldAffineOp__anonf31d90e50e11::FoldAffineOp529 FoldAffineOp(MLIRContext *context)
530 : RewritePattern(AffineApplyOp::getOperationName(), 0, context) {}
531
matchAndRewrite__anonf31d90e50e11::FoldAffineOp532 LogicalResult matchAndRewrite(Operation *op,
533 PatternRewriter &rewriter) const override {
534 AffineApplyOp affineApplyOp = cast<AffineApplyOp>(op);
535 auto map = affineApplyOp.getAffineMap();
536 if (map.getNumResults() != 1 || map.getNumInputs() > 1)
537 return failure();
538
539 AffineExpr expr = map.getResult(0);
540 if (map.getNumInputs() == 0) {
541 if (auto val = expr.dyn_cast<AffineConstantExpr>()) {
542 rewriter.replaceOpWithNewOp<ConstantIndexOp>(op, val.getValue());
543 return success();
544 }
545 return failure();
546 }
547 if (expr.dyn_cast<AffineDimExpr>() || expr.dyn_cast<AffineSymbolExpr>()) {
548 rewriter.replaceOp(op, op->getOperand(0));
549 return success();
550 }
551 return failure();
552 }
553 };
554
555 template <typename LoopType>
lowerLinalgToLoopsImpl(FuncOp funcOp)556 static void lowerLinalgToLoopsImpl(FuncOp funcOp) {
557 MLIRContext *context = funcOp.getContext();
558 RewritePatternSet patterns(context);
559 patterns.add<LinalgRewritePattern<LoopType>>(context);
560 memref::DimOp::getCanonicalizationPatterns(patterns, context);
561 tensor::DimOp::getCanonicalizationPatterns(patterns, context);
562 AffineApplyOp::getCanonicalizationPatterns(patterns, context);
563 patterns.add<FoldAffineOp>(context);
564 // Just apply the patterns greedily.
565 (void)applyPatternsAndFoldGreedily(funcOp, std::move(patterns));
566 }
567
568 struct LowerToAffineLoops
569 : public LinalgLowerToAffineLoopsBase<LowerToAffineLoops> {
getDependentDialects__anonf31d90e50e11::LowerToAffineLoops570 void getDependentDialects(DialectRegistry ®istry) const override {
571 registry.insert<memref::MemRefDialect>();
572 }
runOnFunction__anonf31d90e50e11::LowerToAffineLoops573 void runOnFunction() override {
574 lowerLinalgToLoopsImpl<AffineForOp>(getFunction());
575 }
576 };
577
578 struct LowerToLoops : public LinalgLowerToLoopsBase<LowerToLoops> {
getDependentDialects__anonf31d90e50e11::LowerToLoops579 void getDependentDialects(DialectRegistry ®istry) const override {
580 registry.insert<memref::MemRefDialect, scf::SCFDialect>();
581 }
runOnFunction__anonf31d90e50e11::LowerToLoops582 void runOnFunction() override {
583 lowerLinalgToLoopsImpl<scf::ForOp>(getFunction());
584 }
585 };
586
587 struct LowerToParallelLoops
588 : public LinalgLowerToParallelLoopsBase<LowerToParallelLoops> {
runOnFunction__anonf31d90e50e11::LowerToParallelLoops589 void runOnFunction() override {
590 lowerLinalgToLoopsImpl<scf::ParallelOp>(getFunction());
591 }
592 };
593
594 struct LowerTiledLoopsToSCF
595 : public LinalgLowerTiledLoopsToSCFBase<LowerTiledLoopsToSCF> {
runOnFunction__anonf31d90e50e11::LowerTiledLoopsToSCF596 void runOnFunction() override {
597 MLIRContext *context = &getContext();
598 RewritePatternSet patterns(context);
599 populateTiledLoopToSCFPattern(patterns);
600 (void)applyPatternsAndFoldGreedily(getFunction(), std::move(patterns));
601 }
602 };
603 } // namespace
604
populateTiledLoopToSCFPattern(RewritePatternSet & patterns)605 void mlir::linalg::populateTiledLoopToSCFPattern(RewritePatternSet &patterns) {
606 patterns.add<TiledLoopToSCFPattern>(patterns.getContext());
607 }
608
609 std::unique_ptr<OperationPass<FuncOp>>
createConvertLinalgTiledLoopsToSCFPass()610 mlir::createConvertLinalgTiledLoopsToSCFPass() {
611 return std::make_unique<LowerTiledLoopsToSCF>();
612 }
613
createConvertLinalgToLoopsPass()614 std::unique_ptr<OperationPass<FuncOp>> mlir::createConvertLinalgToLoopsPass() {
615 return std::make_unique<LowerToLoops>();
616 }
617
618 std::unique_ptr<OperationPass<FuncOp>>
createConvertLinalgToParallelLoopsPass()619 mlir::createConvertLinalgToParallelLoopsPass() {
620 return std::make_unique<LowerToParallelLoops>();
621 }
622
623 std::unique_ptr<OperationPass<FuncOp>>
createConvertLinalgToAffineLoopsPass()624 mlir::createConvertLinalgToAffineLoopsPass() {
625 return std::make_unique<LowerToAffineLoops>();
626 }
627
628 /// Emits a loop nest of `affine.for` with the proper body for `linalgOp`.
629 Optional<LinalgLoops>
linalgOpToAffineLoops(PatternRewriter & rewriter,LinalgOp linalgOp)630 mlir::linalg::linalgOpToAffineLoops(PatternRewriter &rewriter,
631 LinalgOp linalgOp) {
632 return linalgOpToLoopsImpl<AffineForOp>(rewriter, linalgOp);
633 }
634
635 /// Emits a loop nest of `scf.for` with the proper body for `linalgOp`.
linalgOpToLoops(PatternRewriter & rewriter,LinalgOp linalgOp)636 Optional<LinalgLoops> mlir::linalg::linalgOpToLoops(PatternRewriter &rewriter,
637 LinalgOp linalgOp) {
638 return linalgOpToLoopsImpl<scf::ForOp>(rewriter, linalgOp);
639 }
640
641 /// Emits a loop nest of `scf.parallel` with the proper body for `linalgOp`.
642 Optional<LinalgLoops>
linalgOpToParallelLoops(PatternRewriter & rewriter,LinalgOp linalgOp)643 mlir::linalg::linalgOpToParallelLoops(PatternRewriter &rewriter,
644 LinalgOp linalgOp) {
645 return linalgOpToLoopsImpl<scf::ParallelOp>(rewriter, linalgOp);
646 }
647