1 //===- Vectorization.cpp - Implementation of linalg Vectorization ---------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the linalg dialect Vectorization transformations.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "mlir/Analysis/SliceAnalysis.h"
14 #include "mlir/Dialect/Linalg/Analysis/DependenceAnalysis.h"
15 #include "mlir/Dialect/Linalg/IR/LinalgOps.h"
16 #include "mlir/Dialect/Linalg/Transforms/Transforms.h"
17 #include "mlir/Dialect/Linalg/Utils/Utils.h"
18 #include "mlir/Dialect/Tensor/IR/Tensor.h"
19 #include "mlir/Dialect/Utils/StructuredOpsUtils.h"
20 #include "mlir/Dialect/Vector/VectorOps.h"
21 #include "mlir/IR/AffineExpr.h"
22 #include "mlir/IR/Matchers.h"
23 #include "mlir/IR/PatternMatch.h"
24 #include "mlir/Pass/Pass.h"
25 #include "mlir/Support/LLVM.h"
26 #include "mlir/Transforms/RegionUtils.h"
27 #include "llvm/ADT/ScopeExit.h"
28 #include "llvm/ADT/Sequence.h"
29 #include "llvm/ADT/SmallVector.h"
30 #include "llvm/ADT/TypeSwitch.h"
31 #include "llvm/Support/Debug.h"
32 #include "llvm/Support/raw_ostream.h"
33 #include <type_traits>
34
35 using namespace mlir;
36 using namespace mlir::linalg;
37
38 using llvm::dbgs;
39
40 #define DEBUG_TYPE "linalg-vectorization"
41
42 /// Return the unique instance of OpType in `block` if it is indeed unique.
43 /// Return null if none or more than 1 instances exist.
44 template <typename OpType>
getSingleOpOfType(Block & block)45 static OpType getSingleOpOfType(Block &block) {
46 OpType res;
47 block.walk([&](OpType op) {
48 if (res) {
49 res = nullptr;
50 return WalkResult::interrupt();
51 }
52 res = op;
53 return WalkResult::advance();
54 });
55 return res;
56 }
57
58 /// Given an indexing `map` coming from a LinalgOp indexing, restricted to a
59 /// projectedPermutation, compress the unused dimensions to serve as a
60 /// permutation_map for a vector transfer operation.
61 /// For example, given a linalg op such as:
62 ///
63 /// ```
64 /// %0 = linalg.generic {
65 /// indexing_maps = affine_map<(d0, d1, d2, d3, d4) -> (d4, d0, d2)>,
66 /// indexing_maps = affine_map<(d0, d1, d2, d3, d4) -> (d1, d3)>
67 /// }
68 /// ins(%0 : tensor<2x3x4xf32>)
69 /// outs(%1 : tensor<5x6xf32>)
70 /// ```
71 ///
72 /// the iteration domain size of the linalg op is 3x5x4x6x2. The first affine
73 /// map is reindexed to `affine_map<(d0, d1, d2) -> (d2, d0, d1)>`, the second
74 /// affine map is reindexed to `affine_map<(d0, d1) -> (d0, d1)>`.
reindexIndexingMap(AffineMap map)75 static AffineMap reindexIndexingMap(AffineMap map) {
76 assert(map.isProjectedPermutation() && "expected projected permutation");
77 auto res = compressUnusedDims(map);
78 assert(res.getNumDims() == res.getNumResults() &&
79 "expected reindexed map with same number of dims and results");
80 return res;
81 }
82
83 /// Helper data structure to represent the result of vectorization.
84 /// In certain specific cases, like terminators, we do not want to propagate/
85 enum VectorizationStatus {
86 /// Op failed to vectorize.
87 Failure = 0,
88 /// Op vectorized and custom function took care of replacement logic
89 NoReplace,
90 /// Op vectorized into a new Op whose results will replace original Op's
91 /// results.
92 NewOp
93 // TODO: support values if Op vectorized to Many-Ops whose results we need to
94 // aggregate for replacement.
95 };
96 struct VectorizationResult {
97 /// Return status from vectorizing the current op.
98 enum VectorizationStatus status = VectorizationStatus::Failure;
99 /// New vectorized operation to replace the current op.
100 /// Replacement behavior is specified by `status`.
101 Operation *newOp;
102 };
103
104 /// Return a vector type of the same shape and element type as the (assumed)
105 /// ShapedType of `v`.
extractVectorTypeFromShapedValue(Value v)106 static VectorType extractVectorTypeFromShapedValue(Value v) {
107 auto st = v.getType().cast<ShapedType>();
108 if (st.isa<MemRefType>() && st.getShape().empty())
109 return VectorType();
110 return VectorType::get(st.getShape(), st.getElementType());
111 }
112
113 /// Given an `outputOperand` of a LinalgOp, compute the intersection of the
114 /// forward slice starting from `outputOperand` and the backward slice
115 /// starting from the corresponding linalg.yield operand.
116 /// This intersection is assumed to have a single binary operation that is
117 /// the reduction operation. Multiple reduction operations would impose an
118 /// ordering between reduction dimensions and is currently unsupported in
119 /// Linalg. This limitation is motivated by the fact that e.g.
120 /// min(max(X)) != max(min(X))
121 // TODO: use in LinalgOp verification, there is a circular dependency atm.
getSingleBinaryOpAssumedReduction(OpOperand * outputOperand)122 static Operation *getSingleBinaryOpAssumedReduction(OpOperand *outputOperand) {
123 auto linalgOp = cast<LinalgOp>(outputOperand->getOwner());
124 auto yieldOp = cast<YieldOp>(linalgOp->getRegion(0).front().getTerminator());
125 unsigned yieldNum =
126 outputOperand->getOperandNumber() - linalgOp.getNumInputs();
127 llvm::SetVector<Operation *> backwardSlice, forwardSlice;
128 BlockArgument bbArg = linalgOp->getRegion(0).front().getArgument(
129 outputOperand->getOperandNumber());
130 Value yieldVal = yieldOp->getOperand(yieldNum);
131 getBackwardSlice(yieldVal, &backwardSlice, [&](Operation *op) {
132 return op->getParentOp() == linalgOp;
133 });
134 backwardSlice.insert(yieldVal.getDefiningOp());
135 getForwardSlice(bbArg, &forwardSlice,
136 [&](Operation *op) { return op->getParentOp() == linalgOp; });
137 // Search for the (assumed unique) elementwiseMappable op at the intersection
138 // of forward and backward slices.
139 Operation *reductionOp = nullptr;
140 for (Operation *op : llvm::reverse(backwardSlice)) {
141 if (!forwardSlice.contains(op))
142 continue;
143 if (OpTrait::hasElementwiseMappableTraits(op)) {
144 if (reductionOp) {
145 // Reduction detection fails: found more than 1 elementwise-mappable op.
146 return nullptr;
147 }
148 reductionOp = op;
149 }
150 }
151 // TODO: also assert no other subsequent ops break the reduction.
152 return reductionOp;
153 }
154
155 /// If `value` of assumed VectorType has a shape different than `shape`, try to
156 /// build and return a new vector.broadcast to `shape`.
157 /// Otherwise, just return `value`.
158 // TODO: this is best effort atm and there is currently no guarantee of
159 // correctness for the broadcast semantics.
broadcastIfNeeded(OpBuilder & b,Value value,ArrayRef<int64_t> shape)160 static Value broadcastIfNeeded(OpBuilder &b, Value value,
161 ArrayRef<int64_t> shape) {
162 unsigned numDimsGtOne = std::count_if(shape.begin(), shape.end(),
163 [](int64_t val) { return val > 1; });
164 auto vecType = value.getType().dyn_cast<VectorType>();
165 if (shape.empty() ||
166 (vecType != nullptr &&
167 (vecType.getShape() == shape || vecType.getRank() > numDimsGtOne)))
168 return value;
169 auto newVecType = VectorType::get(shape, vecType ? vecType.getElementType()
170 : value.getType());
171 return b.create<vector::BroadcastOp>(b.getInsertionPoint()->getLoc(),
172 newVecType, value);
173 }
174
175 static llvm::Optional<vector::CombiningKind>
getKindForOp(Operation * reductionOp)176 getKindForOp(Operation *reductionOp) {
177 if (!reductionOp)
178 return llvm::None;
179 return llvm::TypeSwitch<Operation *, llvm::Optional<vector::CombiningKind>>(
180 reductionOp)
181 .Case<AddIOp, AddFOp>([&](auto op) {
182 return llvm::Optional<vector::CombiningKind>{
183 vector::CombiningKind::ADD};
184 })
185 .Default([&](auto op) { return llvm::None; });
186 }
187
188 /// If value of assumed VectorType has a shape different than `shape`, build and
189 /// return a new vector.broadcast to `shape`.
190 /// Otherwise, just return value.
reduceIfNeeded(OpBuilder & b,VectorType targetVectorType,Value value,OpOperand * outputOperand)191 static Value reduceIfNeeded(OpBuilder &b, VectorType targetVectorType,
192 Value value, OpOperand *outputOperand) {
193 auto linalgOp = cast<LinalgOp>(outputOperand->getOwner());
194 auto vecType = value.getType().dyn_cast<VectorType>();
195 if (!vecType || vecType.getShape() == targetVectorType.getShape())
196 return value;
197 // At this point, we know we need to reduce. Detect the reduction operator.
198 // TODO: Use the generic reduction detection util.
199 Operation *reductionOp = getSingleBinaryOpAssumedReduction(outputOperand);
200 unsigned pos = 0;
201 MLIRContext *ctx = b.getContext();
202 SmallVector<AffineExpr> exprs;
203 for (auto s : linalgOp.iterator_types())
204 if (isParallelIterator(s))
205 exprs.push_back(getAffineDimExpr(pos++, ctx));
206 auto loc = value.getLoc();
207 // TODO: reuse common CombiningKing logic and support more than add.
208 auto maybeKind = getKindForOp(reductionOp);
209 assert(maybeKind && "Failed precondition: could not get reduction kind");
210 unsigned idx = 0;
211 SmallVector<bool> reductionMask(linalgOp.iterator_types().size(), false);
212 for (auto attr : linalgOp.iterator_types()) {
213 if (isReductionIteratorType(attr))
214 reductionMask[idx] = true;
215 ++idx;
216 }
217 return b.create<vector::MultiDimReductionOp>(loc, value, reductionMask,
218 *maybeKind);
219 }
220
221 /// Build a vector.transfer_read from `source` at indices set to all `0`.
222 /// If source has rank zero, build an memref.load.
223 /// Return the produced value.
buildVectorRead(OpBuilder & b,Value source,VectorType vectorType,AffineMap map)224 static Value buildVectorRead(OpBuilder &b, Value source, VectorType vectorType,
225 AffineMap map) {
226 Location loc = source.getLoc();
227 auto shapedType = source.getType().cast<ShapedType>();
228 SmallVector<Value> indices(shapedType.getRank(),
229 b.create<ConstantIndexOp>(loc, 0));
230 return b.create<vector::TransferReadOp>(loc, vectorType, source, indices,
231 map);
232 }
233
234 /// Build a vector.transfer_write of `value` into `outputOperand` at indices set
235 /// to all `0`; where `outputOperand` is an output operand of the LinalgOp
236 /// currently being vectorized. If `dest` has null rank, build an memref.store.
237 /// Return the produced value or null if no value is produced.
buildVectorWrite(OpBuilder & b,Value value,OpOperand * outputOperand)238 static Value buildVectorWrite(OpBuilder &b, Value value,
239 OpOperand *outputOperand) {
240 Operation *write;
241 Location loc = value.getLoc();
242 if (VectorType vectorType =
243 extractVectorTypeFromShapedValue(outputOperand->get())) {
244 auto linalgOp = cast<LinalgOp>(outputOperand->getOwner());
245 AffineMap map =
246 reindexIndexingMap(linalgOp.getTiedIndexingMap(outputOperand));
247 SmallVector<int64_t> transposeShape =
248 applyPermutationMap(inversePermutation(map), vectorType.getShape());
249 vectorType = VectorType::get(transposeShape, vectorType.getElementType());
250 SmallVector<Value> indices(linalgOp.getRank(outputOperand),
251 b.create<ConstantIndexOp>(loc, 0));
252 value = broadcastIfNeeded(b, value, vectorType.getShape());
253 value = reduceIfNeeded(b, vectorType, value, outputOperand);
254 write = b.create<vector::TransferWriteOp>(loc, value, outputOperand->get(),
255 indices, map);
256 } else {
257 write = b.create<memref::StoreOp>(loc, value, outputOperand->get());
258 }
259 LLVM_DEBUG(dbgs() << "\n[" DEBUG_TYPE "]: vectorized op: " << *write);
260 if (!write->getResults().empty())
261 return write->getResult(0);
262 return Value();
263 }
264
265 // Custom vectorization function type. Produce a vector form of Operation*
266 // assuming all its vectorized operands are already in the BlockAndValueMapping.
267 // Return nullptr if the Operation cannot be vectorized.
268 using CustomVectorizationHook = std::function<VectorizationResult(
269 Operation *, const BlockAndValueMapping &)>;
270
271 /// Helper function to vectorize the terminator of a `linalgOp`. New result
272 /// vector values are appended to `newResults`. Return
273 /// VectorizationStatus::NoReplace to signal the vectorization algorithm that it
274 /// should not try to map produced operations and instead return the results
275 /// using the `newResults` vector making them available to the
276 /// vectorization algorithm for RAUW. This function is meant to be used as a
277 /// CustomVectorizationHook.
278 static VectorizationResult
vectorizeLinalgYield(OpBuilder & b,Operation * op,const BlockAndValueMapping & bvm,LinalgOp linalgOp,SmallVectorImpl<Value> & newResults)279 vectorizeLinalgYield(OpBuilder &b, Operation *op,
280 const BlockAndValueMapping &bvm, LinalgOp linalgOp,
281 SmallVectorImpl<Value> &newResults) {
282 auto yieldOp = dyn_cast<linalg::YieldOp>(op);
283 if (!yieldOp)
284 return VectorizationResult{VectorizationStatus::Failure, nullptr};
285 for (auto outputs : llvm::enumerate(yieldOp.values())) {
286 // TODO: Scan for an opportunity for reuse.
287 // TODO: use a map.
288 Value vectorValue = bvm.lookup(outputs.value());
289 Value newResult = buildVectorWrite(
290 b, vectorValue, linalgOp.getOutputOperand(outputs.index()));
291 if (newResult)
292 newResults.push_back(newResult);
293 }
294 return VectorizationResult{VectorizationStatus::NoReplace, nullptr};
295 }
296
297 /// Helper function to vectorize the index operations of a `linalgOp`. Return
298 /// VectorizationStatus::NewOp to signal the vectorization algorithm that it
299 /// should map the produced operations. This function is meant to be used as a
300 /// CustomVectorizationHook.
vectorizeLinalgIndex(OpBuilder & b,Operation * op,LinalgOp linalgOp)301 static VectorizationResult vectorizeLinalgIndex(OpBuilder &b, Operation *op,
302 LinalgOp linalgOp) {
303 IndexOp indexOp = dyn_cast<linalg::IndexOp>(op);
304 if (!indexOp)
305 return VectorizationResult{VectorizationStatus::Failure, nullptr};
306 auto loc = indexOp.getLoc();
307 // Compute the static loop sizes of the index op.
308 auto targetShape = linalgOp.computeStaticLoopSizes();
309 // Compute a one-dimensional index vector for the index op dimension.
310 SmallVector<int64_t> constantSeq =
311 llvm::to_vector<16>(llvm::seq<int64_t>(0, targetShape[indexOp.dim()]));
312 ConstantOp constantOp =
313 b.create<ConstantOp>(loc, b.getIndexVectorAttr(constantSeq));
314 // Return the one-dimensional index vector if it lives in the trailing
315 // dimension of the iteration space since the vectorization algorithm in this
316 // case can handle the broadcast.
317 if (indexOp.dim() == targetShape.size() - 1)
318 return VectorizationResult{VectorizationStatus::NewOp, constantOp};
319 // Otherwise permute the targetShape to move the index dimension last,
320 // broadcast the one-dimensional index vector to the permuted shape, and
321 // finally transpose the broadcasted index vector to undo the permutation.
322 std::swap(targetShape[indexOp.dim()], targetShape.back());
323 auto broadCastOp = b.create<vector::BroadcastOp>(
324 loc, VectorType::get(targetShape, b.getIndexType()), constantOp);
325 SmallVector<int64_t> transposition =
326 llvm::to_vector<16>(llvm::seq<int64_t>(0, linalgOp.getNumLoops()));
327 std::swap(transposition.back(), transposition[indexOp.dim()]);
328 auto transposeOp =
329 b.create<vector::TransposeOp>(loc, broadCastOp, transposition);
330 return VectorizationResult{VectorizationStatus::NewOp, transposeOp};
331 }
332
333 /// Generic vectorization for a single operation `op`, given already vectorized
334 /// operands carried by `bvm`. Vectorization occurs as follows:
335 /// 1. Try to apply any of the `customVectorizationHooks` and return its
336 /// result on success.
337 /// 2. Clone any constant in the current scope without vectorization: each
338 /// consumer of the constant will later determine the shape to which the
339 /// constant needs to be broadcast to.
340 /// 3. Fail on any remaining non `ElementwiseMappable` op. It is the purpose
341 /// of the `customVectorizationHooks` to cover such cases.
342 /// 4. Clone `op` in vector form to a vector of shape prescribed by the first
343 /// operand of maximal rank. Other operands have smaller rank and are
344 /// broadcast accordingly. It is assumed this broadcast is always legal,
345 /// otherwise, it means one of the `customVectorizationHooks` is incorrect.
346 ///
347 /// This function assumes all operands of `op` have been vectorized and are in
348 /// the `bvm` mapping. As a consequence, this function is meant to be called on
349 /// a topologically-sorted list of ops.
350 /// This function does not update `bvm` but returns a VectorizationStatus that
351 /// instructs the caller what `bvm` update needs to occur.
352 static VectorizationResult
vectorizeOneOp(OpBuilder & b,Operation * op,const BlockAndValueMapping & bvm,ArrayRef<CustomVectorizationHook> customVectorizationHooks)353 vectorizeOneOp(OpBuilder &b, Operation *op, const BlockAndValueMapping &bvm,
354 ArrayRef<CustomVectorizationHook> customVectorizationHooks) {
355 LLVM_DEBUG(dbgs() << "\n[" DEBUG_TYPE "]: vectorize op " << *op);
356
357 // 1. Try to apply any CustomVectorizationHook.
358 if (!customVectorizationHooks.empty()) {
359 for (auto &customFunc : customVectorizationHooks) {
360 VectorizationResult result = customFunc(op, bvm);
361 if (result.status == VectorizationStatus::Failure)
362 continue;
363 return result;
364 }
365 }
366
367 // 2. Constant ops don't get vectorized but rather broadcasted at their users.
368 // Clone so that the constant is not confined to the linalgOp block .
369 if (isa<ConstantOp>(op))
370 return VectorizationResult{VectorizationStatus::NewOp, b.clone(*op)};
371
372 // 3. Only ElementwiseMappable are allowed in the generic vectorization.
373 if (!OpTrait::hasElementwiseMappableTraits(op))
374 return VectorizationResult{VectorizationStatus::Failure, nullptr};
375
376 // 4. Generic vectorization path for ElementwiseMappable ops.
377 // a. first get the first max ranked shape.
378 SmallVector<int64_t, 4> firstMaxRankedShape;
379 for (Value operand : op->getOperands()) {
380 auto vt = bvm.lookup(operand).getType().dyn_cast<VectorType>();
381 if (vt && firstMaxRankedShape.size() < vt.getShape().size())
382 firstMaxRankedShape.assign(vt.getShape().begin(), vt.getShape().end());
383 }
384 // b. broadcast each op if needed.
385 auto vectorizedOperands = llvm::map_range(op->getOperands(), [&](Value v) {
386 return firstMaxRankedShape.empty()
387 ? bvm.lookup(v)
388 : broadcastIfNeeded(b, bvm.lookup(v), firstMaxRankedShape);
389 });
390 // c. for elementwise, the result is the vector with the firstMaxRankedShape
391 auto returnTypes = llvm::map_range(op->getResultTypes(), [&](Type t) {
392 return firstMaxRankedShape.empty()
393 ? t
394 : VectorType::get(firstMaxRankedShape, t);
395 });
396
397 // Build and return the new op.
398 OperationState state(op->getLoc(), op->getName());
399 state.addAttributes(op->getAttrs());
400 state.addOperands(llvm::to_vector<4>(vectorizedOperands));
401 state.addTypes(llvm::to_vector<4>(returnTypes));
402 return VectorizationResult{VectorizationStatus::NewOp,
403 b.createOperation(state)};
404 }
405
406 /// Detect whether `r` has only ConstantOp, ElementwiseMappable and YieldOp.
hasOnlyScalarElementwiseOp(Region & r)407 static bool hasOnlyScalarElementwiseOp(Region &r) {
408 if (!llvm::hasSingleElement(r))
409 return false;
410 for (Operation &op : r.front()) {
411 if (!(isa<ConstantOp, linalg::YieldOp, linalg::IndexOp>(op) ||
412 OpTrait::hasElementwiseMappableTraits(&op)) ||
413 llvm::any_of(op.getResultTypes(),
414 [](Type type) { return !type.isIntOrIndexOrFloat(); }))
415 return false;
416 }
417 return true;
418 }
419
420 // Return true if the op is an element-wise linalg op.
isElementwise(Operation * op)421 static bool isElementwise(Operation *op) {
422 auto linalgOp = dyn_cast<linalg::LinalgOp>(op);
423 if (!linalgOp)
424 return false;
425 if (linalgOp.getNumLoops() != linalgOp.getNumParallelLoops())
426 return false;
427 // TODO: relax the restrictions on indexing map.
428 for (OpOperand *opOperand : linalgOp.getOutputOperands()) {
429 if (!linalgOp.getTiedIndexingMap(opOperand).isIdentity())
430 return false;
431 }
432 if (linalgOp->getNumRegions() != 1)
433 return false;
434 return hasOnlyScalarElementwiseOp(linalgOp->getRegion(0));
435 }
436
437 /// Generic vectorization function that rewrites the body of a `linalgOp` into
438 /// vector form. Generic vectorization proceeds as follows:
439 /// 1. Verify the `linalgOp` has one non-empty region.
440 /// 2. Values defined above the region are mapped to themselves and will be
441 /// broadcasted on a per-need basis by their consumers.
442 /// 3. Each region argument is vectorized into a vector.transfer_read (or 0-d
443 /// load).
444 /// TODO: Reuse opportunities for RAR dependencies.
445 /// 4a. Register CustomVectorizationHook for YieldOp to capture the results.
446 /// 4b. Register CustomVectorizationHook for IndexOp to access the iteration
447 /// indices.
448 /// 5. Iteratively call vectorizeOneOp on the region operations.
449 ///
450 /// When `broadcastToMaximalCommonShape` is set to true, eager broadcasting is
451 /// performed to the maximal common vector size implied by the `linalgOp`
452 /// iteration space. This eager broadcasting is introduced in the
453 /// permutation_map of the vector.transfer_read operations. The eager
454 /// broadcasting makes it trivial to detrmine where broadcast, transposes and
455 /// reductions should occur, without any bookkeeping. The tradeoff is that, in
456 /// the absence of good canonicalizations, the amount of work increases.
457 /// This is not deemed a problem as we expect canonicalizations and foldings to
458 /// aggressively clean up the useless work.
vectorizeAsLinalgGeneric(OpBuilder & b,LinalgOp linalgOp,SmallVectorImpl<Value> & newResults,bool broadcastToMaximalCommonShape=false,ArrayRef<CustomVectorizationHook> customVectorizationHooks={})459 LogicalResult vectorizeAsLinalgGeneric(
460 OpBuilder &b, LinalgOp linalgOp, SmallVectorImpl<Value> &newResults,
461 bool broadcastToMaximalCommonShape = false,
462 ArrayRef<CustomVectorizationHook> customVectorizationHooks = {}) {
463 // 1. Fail to vectorize if the operation does not have one non-empty region.
464 if (linalgOp->getNumRegions() != 1 || linalgOp->getRegion(0).empty())
465 return failure();
466 auto &block = linalgOp->getRegion(0).front();
467
468 // 2. Values defined above the region can only be broadcast for now. Make them
469 // map to themselves.
470 BlockAndValueMapping bvm;
471 SetVector<Value> valuesSet;
472 mlir::getUsedValuesDefinedAbove(linalgOp->getRegion(0), valuesSet);
473 bvm.map(valuesSet.getArrayRef(), valuesSet.getArrayRef());
474
475 if (linalgOp.getNumOutputs() == 0)
476 return failure();
477
478 // TODO: the common vector shape is equal to the static loop sizes only when
479 // all indexing maps are projected permutations. For convs and stencils the
480 // logic will need to evolve.
481 SmallVector<int64_t> commonVectorShape = linalgOp.computeStaticLoopSizes();
482
483 // 3. Turn all BBArgs into vector.transfer_read / load.
484 SmallVector<AffineMap> indexings;
485 for (OpOperand *opOperand : linalgOp.getInputAndOutputOperands()) {
486 BlockArgument bbarg = block.getArgument(opOperand->getOperandNumber());
487 if (linalgOp.isScalar(opOperand)) {
488 bvm.map(bbarg, opOperand->get());
489 continue;
490 }
491 // TODO: 0-d vectors.
492 if (linalgOp.getShape(opOperand).empty()) {
493 Value loaded =
494 b.create<memref::LoadOp>(linalgOp.getLoc(), opOperand->get());
495 LLVM_DEBUG(dbgs() << "\n[" DEBUG_TYPE "]: new vectorized bbarg("
496 << bbarg.getArgNumber() << "): " << loaded);
497 bvm.map(bbarg, loaded);
498 bvm.map(opOperand->get(), loaded);
499 continue;
500 }
501 AffineMap map;
502 VectorType vectorType;
503 if (broadcastToMaximalCommonShape) {
504 map = inverseAndBroadcastProjectedPermuation(
505 linalgOp.getTiedIndexingMap(opOperand));
506 vectorType = VectorType::get(commonVectorShape,
507 getElementTypeOrSelf(opOperand->get()));
508 } else {
509 map = inversePermutation(
510 reindexIndexingMap(linalgOp.getTiedIndexingMap(opOperand)));
511 vectorType = VectorType::get(map.compose(linalgOp.getShape(opOperand)),
512 getElementTypeOrSelf(opOperand->get()));
513 }
514 Value vectorRead = buildVectorRead(b, opOperand->get(), vectorType, map);
515 LLVM_DEBUG(dbgs() << "\n[" DEBUG_TYPE "]: new vectorized bbarg("
516 << bbarg.getArgNumber() << "): " << vectorRead);
517 bvm.map(bbarg, vectorRead);
518 bvm.map(opOperand->get(), vectorRead);
519 }
520
521 auto hooks = llvm::to_vector<4>(customVectorizationHooks);
522 // 4a. Register CustomVectorizationHook for yieldOp.
523 CustomVectorizationHook vectorizeYield =
524 [&](Operation *op,
__anon5ecf27e90a02(Operation *op, const BlockAndValueMapping &bvm) 525 const BlockAndValueMapping &bvm) -> VectorizationResult {
526 return vectorizeLinalgYield(b, op, bvm, linalgOp, newResults);
527 };
528 hooks.push_back(vectorizeYield);
529
530 // 4b. Register CustomVectorizationHook for indexOp.
531 CustomVectorizationHook vectorizeIndex =
532 [&](Operation *op,
__anon5ecf27e90b02(Operation *op, const BlockAndValueMapping &bvm) 533 const BlockAndValueMapping &bvm) -> VectorizationResult {
534 return vectorizeLinalgIndex(b, op, linalgOp);
535 };
536 hooks.push_back(vectorizeIndex);
537
538 // 5. Iteratively call `vectorizeOneOp` to each op in the slice.
539 for (Operation &op : block.getOperations()) {
540 VectorizationResult result = vectorizeOneOp(b, &op, bvm, hooks);
541 if (result.status == VectorizationStatus::Failure) {
542 LLVM_DEBUG(dbgs() << "\n[" DEBUG_TYPE "]: failed to vectorize: " << op);
543 return failure();
544 }
545 if (result.status == VectorizationStatus::NewOp) {
546 LLVM_DEBUG(dbgs() << "\n[" DEBUG_TYPE "]: new vector op: "
547 << *result.newOp;);
548 bvm.map(op.getResults(), result.newOp->getResults());
549 }
550 }
551
552 return success();
553 }
554
vectorizeContraction(OpBuilder & b,LinalgOp linalgOp,SmallVectorImpl<Value> & newResults)555 static LogicalResult vectorizeContraction(OpBuilder &b, LinalgOp linalgOp,
556 SmallVectorImpl<Value> &newResults) {
557 assert(isaContractionOpInterface(linalgOp) &&
558 "expected vectorizeContraction preconditions to be met");
559 Location loc = linalgOp.getLoc();
560 // Vectorize other ops as vector contraction.
561 // TODO: interface.
562 LLVM_DEBUG(dbgs() << "\n[" DEBUG_TYPE "]: "
563 << "Rewrite linalg op as vector.contract: ";
564 linalgOp.dump());
565 // Special function that describes how to vectorize the multiplication op in a
566 // linalg contraction.
567 CustomVectorizationHook vectorizeContraction =
568 [&](Operation *op,
569 const BlockAndValueMapping &bvm) -> VectorizationResult {
570 if (!isa<MulIOp, MulFOp>(op))
571 return VectorizationResult{VectorizationStatus::Failure, nullptr};
572 ArrayRef<int64_t> outShape =
573 linalgOp.getShape(linalgOp.getOutputOperand(0));
574 Type vType;
575 if (outShape.empty()) {
576 vType = op->getResult(0).getType();
577 } else {
578 SmallVector<int64_t> resultShape = applyPermutationMap(
579 inversePermutation(reindexIndexingMap(
580 linalgOp.getTiedIndexingMap(linalgOp.getOutputOperand(0)))),
581 outShape);
582 vType = VectorType::get(resultShape, op->getResult(0).getType());
583 }
584 auto zero = b.create<ConstantOp>(loc, vType, b.getZeroAttr(vType));
585 // Indexing maps at the time of vector.transfer_read are adjusted to order
586 // vector dimensions in the same order as the canonical linalg op iteration
587 // space order.
588 // The indexings for the contraction therefore need to be adjusted.
589 // TODO: consider dropping contraction special casing altogether, this will
590 // require more advanced canonicalizations involving vector.multi_reduction
591 // that are not yet available.
592 SmallVector<AffineMap> indexingMaps;
593 indexingMaps.reserve(linalgOp.getNumInputsAndOutputs());
594 llvm::transform(linalgOp.getIndexingMaps(),
595 std::back_inserter(indexingMaps),
596 [](AffineMap indexingMap) {
597 return inversePermutation(reindexIndexingMap(indexingMap))
598 .compose(indexingMap);
599 });
600 Operation *contract = b.create<vector::ContractionOp>(
601 loc, bvm.lookup(op->getOperand(0)), bvm.lookup(op->getOperand(1)), zero,
602 b.getAffineMapArrayAttr(indexingMaps), linalgOp.iterator_types());
603 return VectorizationResult{VectorizationStatus::NewOp, contract};
604 };
605 return vectorizeAsLinalgGeneric(b, linalgOp, newResults,
606 /*broadcastToMaximalCommonShape=*/false,
607 {vectorizeContraction});
608 }
609
allIndexingsAreProjectedPermutation(LinalgOp op)610 static bool allIndexingsAreProjectedPermutation(LinalgOp op) {
611 return llvm::all_of(op.getIndexingMaps(),
612 [](AffineMap m) { return m.isProjectedPermutation(); });
613 }
614
615 // TODO: probably need some extra checks for reduction followed by consumer
616 // ops that may not commute (e.g. linear reduction + non-linear instructions).
reductionPreconditions(LinalgOp op)617 static LogicalResult reductionPreconditions(LinalgOp op) {
618 if (llvm::none_of(op.iterator_types(), isReductionIteratorType))
619 return failure();
620 for (OpOperand *opOperand : op.getOutputOperands()) {
621 Operation *reductionOp = getSingleBinaryOpAssumedReduction(opOperand);
622 if (!getKindForOp(reductionOp))
623 return failure();
624 }
625 return success();
626 }
627
vectorizeLinalgOpPrecondition(Operation * op)628 LogicalResult mlir::linalg::vectorizeLinalgOpPrecondition(Operation *op) {
629 auto linalgOp = cast<linalg::LinalgOp>(op);
630 // All types must be static shape to go to vector.
631 if (linalgOp.hasDynamicShape())
632 return failure();
633 if (isElementwise(op))
634 return success();
635 if (isaContractionOpInterface(linalgOp))
636 return success();
637 // TODO: the common vector shape is equal to the static loop sizes only when
638 // all indexing maps are projected permutations. For convs and stencils the
639 // logic will need to evolve.
640 if (allIndexingsAreProjectedPermutation(linalgOp) &&
641 succeeded(reductionPreconditions(linalgOp)))
642 return success();
643 return failure();
644 }
645
646 LogicalResult
vectorizeLinalgOp(OpBuilder & b,Operation * op,SmallVectorImpl<Value> & newResults)647 mlir::linalg::vectorizeLinalgOp(OpBuilder &b, Operation *op,
648 SmallVectorImpl<Value> &newResults) {
649 if (failed(vectorizeLinalgOpPrecondition(op)))
650 return failure();
651
652 auto linalgOp = cast<LinalgOp>(op);
653 if (isaContractionOpInterface(linalgOp))
654 return vectorizeContraction(b, linalgOp, newResults);
655
656 LLVM_DEBUG(dbgs() << "\n[" DEBUG_TYPE "]: "
657 << "Vectorize linalg op as a generic by broadcasting to "
658 "maximal common shape: "
659 << *op);
660 return vectorizeAsLinalgGeneric(b, linalgOp, newResults,
661 /*broadcastToMaximalCommonShape=*/true);
662 }
663
664 //----------------------------------------------------------------------------//
665 // Misc. vectorization patterns.
666 //----------------------------------------------------------------------------//
667
668 /// Helper function that retrieves the value of an IntegerAttr.
getIntFromAttr(Attribute attr)669 static int64_t getIntFromAttr(Attribute attr) {
670 return attr.cast<IntegerAttr>().getInt();
671 }
672
673 /// Given an ArrayRef of OpFoldResults, return a vector of Values. IntegerAttrs
674 /// are converted to ConstantIndexOps. Other attribute types are not supported.
ofrToIndexValues(OpBuilder & builder,Location loc,ArrayRef<OpFoldResult> ofrs)675 static SmallVector<Value> ofrToIndexValues(OpBuilder &builder, Location loc,
676 ArrayRef<OpFoldResult> ofrs) {
677 SmallVector<Value> result;
678 llvm::for_each(ofrs, [&](auto o) {
679 if (auto val = o.template dyn_cast<Value>()) {
680 result.push_back(val);
681 } else {
682 result.push_back(builder.create<ConstantIndexOp>(
683 loc, getIntFromAttr(o.template get<Attribute>())));
684 }
685 });
686 return result;
687 }
688
689 /// Rewrite a PadTensorOp into a sequence of InitTensorOp, FillOp and
690 /// InsertSliceOp. For now, only constant padding values are supported.
691 /// If there is enough static type information, TransferReadOps and
692 /// TransferWriteOps may be generated instead of InsertSliceOps.
693 struct GenericPadTensorOpVectorizationPattern
694 : public GeneralizePadTensorOpPattern {
GenericPadTensorOpVectorizationPatternGenericPadTensorOpVectorizationPattern695 GenericPadTensorOpVectorizationPattern(MLIRContext *context,
696 PatternBenefit benefit = 1)
697 : GeneralizePadTensorOpPattern(context, tryVectorizeCopy, benefit) {}
698 /// Vectorize the copying of a PadTensorOp's source. This is possible if each
699 /// dimension size is statically know in the source type or the result type
700 /// (or both).
tryVectorizeCopyGenericPadTensorOpVectorizationPattern701 static LogicalResult tryVectorizeCopy(PatternRewriter &rewriter,
702 PadTensorOp padOp, Value dest) {
703 auto sourceType = padOp.getSourceType();
704 auto resultType = padOp.getResultType();
705
706 // Copy cannot be vectorized if pad value is non-constant and source shape
707 // is dynamic. In case of a dynamic source shape, padding must be appended
708 // by TransferReadOp, but TransferReadOp supports only constant padding.
709 auto padValue = padOp.getConstantPaddingValue();
710 if (!padValue) {
711 if (!sourceType.hasStaticShape()) return failure();
712 // Create dummy padding value.
713 auto elemType = sourceType.getElementType();
714 padValue = rewriter.create<ConstantOp>(padOp.getLoc(), elemType,
715 rewriter.getZeroAttr(elemType));
716 }
717
718 SmallVector<int64_t> vecShape;
719 SmallVector<bool> readInBounds;
720 SmallVector<bool> writeInBounds;
721 for (unsigned i = 0; i < sourceType.getRank(); ++i) {
722 if (!sourceType.isDynamicDim(i)) {
723 vecShape.push_back(sourceType.getDimSize(i));
724 // Source shape is statically known: Neither read nor write are out-of-
725 // bounds.
726 readInBounds.push_back(true);
727 writeInBounds.push_back(true);
728 } else if (!resultType.isDynamicDim(i)) {
729 // Source shape is not statically known, but result shape is. Vectorize
730 // with size of result shape. This may be larger than the source size.
731 vecShape.push_back(resultType.getDimSize(i));
732 // Read may be out-of-bounds because the result size could be larger
733 // than the source size.
734 readInBounds.push_back(false);
735 // Write is out-of-bounds if low padding > 0.
736 writeInBounds.push_back(
737 getConstantIntValue(padOp.getMixedLowPad()[i]) ==
738 static_cast<int64_t>(0));
739 } else {
740 // Neither source nor result dim of padOp is static. Cannot vectorize
741 // the copy.
742 return failure();
743 }
744 }
745 auto vecType = VectorType::get(vecShape, sourceType.getElementType());
746
747 // Generate TransferReadOp.
748 SmallVector<Value> readIndices(
749 vecType.getRank(), rewriter.create<ConstantIndexOp>(padOp.getLoc(), 0));
750 auto read = rewriter.create<vector::TransferReadOp>(
751 padOp.getLoc(), vecType, padOp.source(), readIndices, padValue,
752 readInBounds);
753
754 // Generate TransferWriteOp.
755 auto writeIndices = ofrToIndexValues(
756 rewriter, padOp.getLoc(), padOp.getMixedLowPad());
757 rewriter.replaceOpWithNewOp<vector::TransferWriteOp>(
758 padOp, read, dest, writeIndices, writeInBounds);
759
760 return success();
761 }
762 };
763
764 /// Base pattern for rewriting PadTensorOps whose result is consumed by a given
765 /// operation type OpTy.
766 template <typename OpTy>
767 struct VectorizePadTensorOpUserPattern : public OpRewritePattern<PadTensorOp> {
768 using OpRewritePattern<PadTensorOp>::OpRewritePattern;
769
matchAndRewriteVectorizePadTensorOpUserPattern770 LogicalResult matchAndRewrite(PadTensorOp padOp,
771 PatternRewriter &rewriter) const final {
772 bool changed = false;
773 // Insert users in vector, because some users may be replaced/removed.
774 for (auto *user : llvm::to_vector<4>(padOp->getUsers()))
775 if (auto op = dyn_cast<OpTy>(user))
776 changed |= rewriteUser(rewriter, padOp, op).succeeded();
777 return success(changed);
778 }
779
780 protected:
781 virtual LogicalResult rewriteUser(
782 PatternRewriter &rewriter, PadTensorOp padOp, OpTy op) const = 0;
783 };
784
785 /// Rewrite use of PadTensorOp result in TransferReadOp. E.g.:
786 /// ```
787 /// %0 = linalg.pad_tensor %src ... : tensor<?x?xf32> to tensor<17x5xf32>
788 /// %r = vector.transfer_read %0[%c0, %c0], %cst
789 /// {in_bounds = [true, true]} : tensor<17x5xf32>, vector<17x5xf32>
790 /// ```
791 /// is rewritten to:
792 /// ```
793 /// %r = vector.transfer_read %src[%c0, %c0], %padding
794 /// {in_bounds = [true, true]}
795 /// : tensor<?x?xf32>, vector<17x5xf32>
796 /// ```
797 /// Note: By restricting this pattern to in-bounds TransferReadOps, we can be
798 /// sure that the original padding value %cst was never used.
799 ///
800 /// This rewrite is possible if:
801 /// - `xferOp` has no out-of-bounds dims or mask.
802 /// - Low padding is static 0.
803 /// - Single, scalar padding value.
804 struct PadTensorOpVectorizationWithTransferReadPattern
805 : public VectorizePadTensorOpUserPattern<vector::TransferReadOp> {
806 using VectorizePadTensorOpUserPattern<vector::TransferReadOp>
807 ::VectorizePadTensorOpUserPattern;
808
rewriteUserPadTensorOpVectorizationWithTransferReadPattern809 LogicalResult rewriteUser(PatternRewriter &rewriter, PadTensorOp padOp,
810 vector::TransferReadOp xferOp) const override {
811 // Low padding must be static 0.
812 if (!padOp.hasZeroLowPad()) return failure();
813 // Pad value must be a constant.
814 auto padValue = padOp.getConstantPaddingValue();
815 if (!padValue) return failure();
816 // Padding value of existing `xferOp` is unused.
817 if (xferOp.hasOutOfBoundsDim() || xferOp.mask()) return failure();
818
819 rewriter.updateRootInPlace(xferOp, [&]() {
820 SmallVector<bool> inBounds(xferOp.getVectorType().getRank(), false);
821 xferOp->setAttr(xferOp.getInBoundsAttrName(),
822 rewriter.getBoolArrayAttr(inBounds));
823 xferOp.sourceMutable().assign(padOp.source());
824 xferOp.paddingMutable().assign(padValue);
825 });
826
827 return success();
828 }
829 };
830
831 /// Rewrite use of PadTensorOp result in TransferWriteOp.
832 /// This pattern rewrites TransferWriteOps that write to a padded tensor value,
833 /// where the same amount of padding is immediately removed again after the
834 /// write. In such cases, the TransferWriteOp can write to the non-padded tensor
835 /// value and apply out-of-bounds masking. E.g.:
836 /// ```
837 /// %0 = tensor.extract_slice ...[...] [%s0, %s1] [1, 1]
838 /// : tensor<...> to tensor<?x?xf32>
839 /// %1 = linalg.pad_tensor %0 ... : tensor<?x?xf32> to tensor<17x5xf32>
840 /// %2 = vector.transfer_write %vec, %1[...]
841 /// : vector<17x5xf32>, tensor<17x5xf32>
842 /// %r = tensor.extract_slice %2[0, 0] [%s0, %s1] [1, 1]
843 /// : tensor<17x5xf32> to tensor<?x?xf32>
844 /// ```
845 /// is rewritten to:
846 /// ```
847 /// %0 = tensor.extract_slice ...[...] [%s0, %s1] [1, 1]
848 /// : tensor<...> to tensor<?x?xf32>
849 /// %r = vector.transfer_write %vec, %0[...] : vector<17x5xf32>, tensor<?x?xf32>
850 /// ```
851 /// Note: It is important that the ExtractSliceOp %r resizes the result of the
852 /// TransferWriteOp to the same size as the input of the TensorPadOp (or an even
853 /// smaller size). Otherwise, %r's new (dynamic) dimensions would differ from
854 /// %r's old dimensions.
855 ///
856 /// This rewrite is possible if:
857 /// - Low padding is static 0.
858 /// - `xferOp` has exactly one use, which is an ExtractSliceOp. This
859 /// ExtractSliceOp trims the same amount of padding that was added beforehand.
860 /// - Single, scalar padding value.
861 struct PadTensorOpVectorizationWithTransferWritePattern
862 : public VectorizePadTensorOpUserPattern<vector::TransferWriteOp> {
863 using VectorizePadTensorOpUserPattern<vector::TransferWriteOp>
864 ::VectorizePadTensorOpUserPattern;
865
rewriteUserPadTensorOpVectorizationWithTransferWritePattern866 LogicalResult rewriteUser(PatternRewriter &rewriter, PadTensorOp padOp,
867 vector::TransferWriteOp xferOp) const override {
868 // Low padding must be static 0.
869 if (!padOp.hasZeroLowPad()) return failure();
870 // Pad value must be a constant.
871 auto padValue = padOp.getConstantPaddingValue();
872 if (!padValue) return failure();
873 // TransferWriteOp result must be directly consumed by an ExtractSliceOp.
874 if (!xferOp->hasOneUse()) return failure();
875 auto trimPadding = dyn_cast<tensor::ExtractSliceOp>(*xferOp->user_begin());
876 if (!trimPadding) return failure();
877 // Only static zero offsets supported when trimming padding.
878 if (!trimPadding.hasZeroOffset()) return failure();
879 // trimPadding must remove the amount of padding that was added earlier.
880 if (!hasSameTensorSize(padOp.source(), trimPadding)) return failure();
881
882 // Insert the new TransferWriteOp at position of the old TransferWriteOp.
883 rewriter.setInsertionPoint(xferOp);
884
885 SmallVector<bool> inBounds(xferOp.getVectorType().getRank(), false);
886 auto newXferOp = rewriter.replaceOpWithNewOp<vector::TransferWriteOp>(
887 xferOp, padOp.source().getType(), xferOp.vector(), padOp.source(),
888 xferOp.indices(), xferOp.permutation_mapAttr(), xferOp.mask(),
889 rewriter.getBoolArrayAttr(inBounds));
890 rewriter.replaceOp(trimPadding, newXferOp->getResult(0));
891
892 return success();
893 }
894
895 /// Check if `beforePadding` and `afterTrimming` have the same tensor size,
896 /// i.e., same dimensions.
897 ///
898 /// Dimensions may be static, dynamic or mix of both. In case of dynamic
899 /// dimensions, this function tries to infer the (static) tensor size by
900 /// looking at the defining op and utilizing op-specific knowledge.
901 ///
902 /// This is a conservative analysis. In case equal tensor sizes cannot be
903 /// proven statically, this analysis returns `false` even though the tensor
904 /// sizes may turn out to be equal at runtime.
hasSameTensorSizePadTensorOpVectorizationWithTransferWritePattern905 bool hasSameTensorSize(Value beforePadding,
906 tensor::ExtractSliceOp afterTrimming) const {
907 // If the input to PadTensorOp is a CastOp, try with with both CastOp result
908 // and CastOp operand.
909 if (auto castOp = beforePadding.getDefiningOp<tensor::CastOp>())
910 if (hasSameTensorSize(castOp.source(), afterTrimming)) return true;
911
912 auto t1 = beforePadding.getType().dyn_cast<RankedTensorType>();
913 auto t2 = afterTrimming.getType().dyn_cast<RankedTensorType>();
914 // Only RankedTensorType supported.
915 if (!t1 || !t2) return false;
916 // Rank of both values must be the same.
917 if (t1.getRank() != t2.getRank()) return false;
918
919 // All static dimensions must be the same. Mixed cases (e.g., dimension
920 // static in `t1` but dynamic in `t2`) are not supported.
921 for (unsigned i = 0; i < t1.getRank(); ++i) {
922 if (t1.isDynamicDim(i) != t2.isDynamicDim(i))
923 return false;
924 if (!t1.isDynamicDim(i) && t1.getDimSize(i) != t2.getDimSize(i))
925 return false;
926 }
927
928 // Nothing more to check if all dimensions are static.
929 if (t1.getNumDynamicDims() == 0) return true;
930
931 // All dynamic sizes must be the same. The only supported case at the moment
932 // is when `beforePadding` is an ExtractSliceOp (or a cast thereof).
933
934 // Apart from CastOp, only ExtractSliceOp is supported.
935 auto beforeSlice = beforePadding.getDefiningOp<tensor::ExtractSliceOp>();
936 if (!beforeSlice)
937 return false;
938
939 assert(static_cast<size_t>(t1.getRank()) ==
940 beforeSlice.getMixedSizes().size());
941 assert(static_cast<size_t>(t2.getRank())
942 == afterTrimming.getMixedSizes().size());
943
944 for (unsigned i = 0; i < t1.getRank(); ++i) {
945 // Skip static dimensions.
946 if (!t1.isDynamicDim(i)) continue;
947 auto size1 = beforeSlice.getMixedSizes()[i];
948 auto size2 = afterTrimming.getMixedSizes()[i];
949
950 // Case 1: Same value or same constant int.
951 if (isEqualConstantIntOrValue(size1, size2)) continue;
952
953 // Other cases: Take a deeper look at defining ops of values.
954 auto v1 = size1.dyn_cast<Value>();
955 auto v2 = size2.dyn_cast<Value>();
956 if (!v1 || !v2) return false;
957
958 // Case 2: Both values are identical AffineMinOps. (Should not happen if
959 // CSE is run.)
960 auto minOp1 = v1.getDefiningOp<AffineMinOp>();
961 auto minOp2 = v2.getDefiningOp<AffineMinOp>();
962 if (minOp1 && minOp2 && minOp1.getAffineMap() == minOp2.getAffineMap()
963 && minOp1.operands() == minOp2.operands()) continue;
964
965 // Add additional cases as needed.
966 }
967
968 // All tests passed.
969 return true;
970 }
971 };
972
973 /// Rewrite use of PadTensorOp result in InsertSliceOp. E.g.:
974 /// ```
975 /// %0 = linalg.pad_tensor %src ... : tensor<?x?xf32> to tensor<17x5xf32>
976 /// %r = tensor.insert_slice %0
977 /// into %dest[%a, %b, 0, 0] [1, 1, 17, 5] [1, 1, 1, 1]
978 /// : tensor<17x5xf32> into tensor<?x?x17x5xf32>
979 /// ```
980 /// is rewritten to:
981 /// ```
982 /// %0 = vector.transfer_read %src[%c0, %c0], %padding
983 /// : tensor<?x?xf32>, vector<17x5xf32>
984 /// %r = vector.transfer_write %0, %dest[%a, %b, %c0, %c0]
985 /// {in_bounds = [true, true]} : vector<17x5xf32>, tensor<?x?x17x5xf32>
986 /// ```
987 ///
988 /// This rewrite is possible if:
989 /// - Low padding is static 0.
990 /// - `padOp` result shape is static.
991 /// - The entire padded tensor is inserted.
992 /// (Implies that sizes of `insertOp` are all static.)
993 /// - Only unit strides in `insertOp`.
994 /// - Single, scalar padding value.
995 struct PadTensorOpVectorizationWithInsertSlicePattern
996 : public VectorizePadTensorOpUserPattern<tensor::InsertSliceOp> {
997 using VectorizePadTensorOpUserPattern<
998 tensor::InsertSliceOp>::VectorizePadTensorOpUserPattern;
999
rewriteUserPadTensorOpVectorizationWithInsertSlicePattern1000 LogicalResult rewriteUser(PatternRewriter &rewriter, PadTensorOp padOp,
1001 tensor::InsertSliceOp insertOp) const override {
1002 // Low padding must be static 0.
1003 if (!padOp.hasZeroLowPad()) return failure();
1004 // Only unit stride supported.
1005 if (!insertOp.hasUnitStride()) return failure();
1006 // Pad value must be a constant.
1007 auto padValue = padOp.getConstantPaddingValue();
1008 if (!padValue)
1009 return failure();
1010 // Dynamic shapes not supported.
1011 if (!padOp.result().getType().cast<ShapedType>().hasStaticShape())
1012 return failure();
1013
1014 auto vecType = VectorType::get(padOp.getType().getShape(),
1015 padOp.getType().getElementType());
1016 unsigned vecRank = vecType.getRank();
1017 unsigned tensorRank = insertOp.getType().getRank();
1018
1019 // Check if sizes match: Insert the entire tensor into most minor dims.
1020 // (No permutations allowed.)
1021 SmallVector<int64_t> expectedSizes(tensorRank - vecRank, 1);
1022 expectedSizes.append(vecType.getShape().begin(), vecType.getShape().end());
1023 if (!llvm::all_of(
1024 llvm::zip(insertOp.getMixedSizes(), expectedSizes), [](auto it) {
1025 return getConstantIntValue(std::get<0>(it)) == std::get<1>(it);
1026 }))
1027 return failure();
1028
1029 // Insert the TransferReadOp and TransferWriteOp at the position of the
1030 // InsertSliceOp.
1031 rewriter.setInsertionPoint(insertOp);
1032
1033 // Generate TransferReadOp: Read entire source tensor and add high padding.
1034 SmallVector<Value> readIndices(
1035 vecRank, rewriter.create<ConstantIndexOp>(padOp.getLoc(), 0));
1036 auto read = rewriter.create<vector::TransferReadOp>(
1037 padOp.getLoc(), vecType, padOp.source(), readIndices, padValue);
1038
1039 // Generate TransferWriteOp: Write to InsertSliceOp's dest tensor at
1040 // specified offsets. Write is fully in-bounds because a InsertSliceOp's
1041 // source must fit into the destination at the specified offsets.
1042 auto writeIndices =
1043 ofrToIndexValues(rewriter, padOp.getLoc(), insertOp.getMixedOffsets());
1044 SmallVector<bool> inBounds(vecRank, true);
1045 rewriter.replaceOpWithNewOp<vector::TransferWriteOp>(
1046 insertOp, read, insertOp.dest(), writeIndices, inBounds);
1047
1048 return success();
1049 }
1050 };
1051
populatePadTensorOpVectorizationPatterns(RewritePatternSet & patterns,PatternBenefit baseBenefit)1052 void mlir::linalg::populatePadTensorOpVectorizationPatterns(
1053 RewritePatternSet &patterns, PatternBenefit baseBenefit) {
1054 patterns.add<GenericPadTensorOpVectorizationPattern>(
1055 patterns.getContext(), baseBenefit);
1056 // Try these specialized patterns first before resorting to the generic one.
1057 patterns.add<PadTensorOpVectorizationWithTransferReadPattern,
1058 PadTensorOpVectorizationWithTransferWritePattern,
1059 PadTensorOpVectorizationWithInsertSlicePattern>(
1060 patterns.getContext(), baseBenefit.getBenefit() + 1);
1061 }
1062
1063 // TODO: cleanup all the convolution vectorization patterns.
1064 template <class ConvOp, int N>
matchAndRewrite(ConvOp op,PatternRewriter & rewriter) const1065 LogicalResult ConvOpVectorization<ConvOp, N>::matchAndRewrite(
1066 ConvOp op, PatternRewriter &rewriter) const {
1067 Location loc = op.getLoc();
1068 MLIRContext *context = op.getContext();
1069
1070 OpOperand *input = op.getInputOperand(0);
1071 OpOperand *kernel = op.getInputOperand(1);
1072 OpOperand *output = op.getOutputOperand(0);
1073 ArrayRef<int64_t> inShape = op.getShape(input);
1074 ArrayRef<int64_t> kShape = op.getShape(kernel);
1075
1076 if (llvm::any_of(inShape, ShapedType::isDynamic) ||
1077 llvm::any_of(kShape, ShapedType::isDynamic))
1078 return failure();
1079
1080 SmallVector<AffineExpr, 4> mapping;
1081 SmallVector<int64_t, 4> vectorDims;
1082 // Fail to apply when the size of not vectorized dimension is not 1.
1083 for (unsigned i = 0; i < N; i++) {
1084 if (!mask[i] && (inShape[i] != 1 || kShape[i] != 1))
1085 return failure();
1086
1087 if (mask[i] && inShape[i] != kShape[i])
1088 return failure();
1089
1090 if (mask[i]) {
1091 mapping.push_back(getAffineDimExpr(i, context));
1092 vectorDims.push_back(inShape[i]);
1093 }
1094 }
1095
1096 int64_t rank = op.getRank(input);
1097 int64_t numDims = mapping.size();
1098 Type elemType = getElementTypeOrSelf(input->get());
1099
1100 auto map = AffineMap::get(rank, 0, mapping, context);
1101 SmallVector<Value, 4> zeros(rank, rewriter.create<ConstantIndexOp>(loc, 0));
1102 auto vecType = VectorType::get(vectorDims, elemType);
1103
1104 auto inputVec = rewriter.create<vector::TransferReadOp>(
1105 loc, vecType, input->get(), zeros, map);
1106 auto kernelVec = rewriter.create<vector::TransferReadOp>(
1107 loc, vecType, kernel->get(), zeros, map);
1108
1109 auto acc = rewriter.create<ConstantOp>(loc, elemType,
1110 rewriter.getZeroAttr(elemType));
1111
1112 std::array<AffineMap, 3> indexingMaps{
1113 AffineMap::getMultiDimIdentityMap(numDims, context),
1114 AffineMap::getMultiDimIdentityMap(numDims, context),
1115 AffineMap::get(numDims, 0, {}, context)};
1116
1117 std::vector<StringRef> iteratorTypes(numDims, "reduction");
1118
1119 auto result = rewriter.create<vector::ContractionOp>(
1120 loc, inputVec, kernelVec, acc,
1121 rewriter.getAffineMapArrayAttr(indexingMaps),
1122 rewriter.getStrArrayAttr(iteratorTypes));
1123
1124 rewriter.create<memref::StoreOp>(loc, result, output->get(),
1125 ValueRange(zeros));
1126 rewriter.eraseOp(op);
1127 return success();
1128 }
1129
1130 using ConvOpConst = ConvOpVectorization<ConvWOp, 1>;
1131
1132 /// Inserts tiling, promotion and vectorization pattern for ConvOp
1133 /// conversion into corresponding pattern lists.
1134 template <typename ConvOp, unsigned N>
populateVectorizationPatterns(RewritePatternSet & tilingPatterns,RewritePatternSet & promotionPatterns,RewritePatternSet & vectorizationPatterns,ArrayRef<int64_t> tileSizes)1135 static void populateVectorizationPatterns(
1136 RewritePatternSet &tilingPatterns, RewritePatternSet &promotionPatterns,
1137 RewritePatternSet &vectorizationPatterns, ArrayRef<int64_t> tileSizes) {
1138 auto *context = tilingPatterns.getContext();
1139 if (tileSizes.size() < N)
1140 return;
1141
1142 constexpr static StringRef kTiledMarker = "TILED";
1143 constexpr static StringRef kPromotedMarker = "PROMOTED";
1144 tilingPatterns.add<LinalgTilingPattern<ConvOp>>(
1145 context, LinalgTilingOptions().setTileSizes(tileSizes),
1146 LinalgTransformationFilter(ArrayRef<Identifier>{},
1147 Identifier::get(kTiledMarker, context)));
1148
1149 promotionPatterns.add<LinalgPromotionPattern<ConvOp>>(
1150 context, LinalgPromotionOptions().setUseFullTileBuffersByDefault(true),
1151 LinalgTransformationFilter(Identifier::get(kTiledMarker, context),
1152 Identifier::get(kPromotedMarker, context)));
1153
1154 SmallVector<bool, 4> mask(N);
1155 int offset = tileSizes.size() - N;
1156 std::transform(tileSizes.begin() + offset, tileSizes.end(), mask.begin(),
1157 [](int64_t i) -> bool { return i > 1; });
1158
1159 vectorizationPatterns.add<ConvOpVectorization<ConvOp, N>>(context, mask);
1160 }
1161
populateConvVectorizationPatterns(MLIRContext * context,SmallVectorImpl<RewritePatternSet> & patterns,ArrayRef<int64_t> tileSizes)1162 void mlir::linalg::populateConvVectorizationPatterns(
1163 MLIRContext *context, SmallVectorImpl<RewritePatternSet> &patterns,
1164 ArrayRef<int64_t> tileSizes) {
1165 RewritePatternSet tiling(context);
1166 RewritePatternSet promotion(context);
1167 RewritePatternSet vectorization(context);
1168 populateVectorizationPatterns<ConvWOp, 1>(tiling, promotion, vectorization,
1169 tileSizes);
1170
1171 populateVectorizationPatterns<ConvNWCOp, 3>(tiling, promotion, vectorization,
1172 tileSizes);
1173 populateVectorizationPatterns<ConvInputNWCFilterWCFOp, 3>(
1174 tiling, promotion, vectorization, tileSizes);
1175
1176 populateVectorizationPatterns<ConvNCWOp, 3>(tiling, promotion, vectorization,
1177 tileSizes);
1178 populateVectorizationPatterns<ConvInputNCWFilterWCFOp, 3>(
1179 tiling, promotion, vectorization, tileSizes);
1180
1181 populateVectorizationPatterns<ConvHWOp, 2>(tiling, promotion, vectorization,
1182 tileSizes);
1183
1184 populateVectorizationPatterns<ConvNHWCOp, 4>(tiling, promotion, vectorization,
1185 tileSizes);
1186 populateVectorizationPatterns<ConvInputNHWCFilterHWCFOp, 4>(
1187 tiling, promotion, vectorization, tileSizes);
1188
1189 populateVectorizationPatterns<Conv2DNchwOp, 4>(tiling, promotion,
1190 vectorization, tileSizes);
1191 populateVectorizationPatterns<ConvInputNCHWFilterHWCFOp, 4>(
1192 tiling, promotion, vectorization, tileSizes);
1193
1194 populateVectorizationPatterns<ConvDHWOp, 3>(tiling, promotion, vectorization,
1195 tileSizes);
1196
1197 populateVectorizationPatterns<ConvNDHWCOp, 5>(tiling, promotion,
1198 vectorization, tileSizes);
1199 populateVectorizationPatterns<ConvInputNDHWCFilterDHWCFOp, 5>(
1200 tiling, promotion, vectorization, tileSizes);
1201
1202 populateVectorizationPatterns<ConvNCDHWOp, 5>(tiling, promotion,
1203 vectorization, tileSizes);
1204 populateVectorizationPatterns<ConvInputNCDHWFilterDHWCFOp, 5>(
1205 tiling, promotion, vectorization, tileSizes);
1206
1207 patterns.push_back(std::move(tiling));
1208 patterns.push_back(std::move(promotion));
1209 patterns.push_back(std::move(vectorization));
1210 }
1211
1212 //----------------------------------------------------------------------------//
1213 // Forwarding patterns
1214 //----------------------------------------------------------------------------//
1215
1216 /// Check whether there is any interleaved use of any `values` between `firstOp`
1217 /// and `secondOp`. Conservatively return `true` if any op or value is in a
1218 /// different block.
mayExistInterleavedUses(Operation * firstOp,Operation * secondOp,ValueRange values)1219 static bool mayExistInterleavedUses(Operation *firstOp, Operation *secondOp,
1220 ValueRange values) {
1221 if (firstOp->getBlock() != secondOp->getBlock() ||
1222 !firstOp->isBeforeInBlock(secondOp)) {
1223 LLVM_DEBUG(llvm::dbgs() << "\n[" DEBUG_TYPE "]: "
1224 << "interleavedUses precondition failed, firstOp: "
1225 << *firstOp << ", second op: " << *secondOp);
1226 return true;
1227 }
1228 for (auto v : values) {
1229 for (auto &u : v.getUses()) {
1230 Operation *owner = u.getOwner();
1231 if (owner == firstOp || owner == secondOp)
1232 continue;
1233 // TODO: this is too conservative, use dominance info in the future.
1234 if (owner->getBlock() == firstOp->getBlock() &&
1235 (owner->isBeforeInBlock(firstOp) || secondOp->isBeforeInBlock(owner)))
1236 continue;
1237 LLVM_DEBUG(llvm::dbgs()
1238 << "\n[" DEBUG_TYPE "]: "
1239 << " found interleaved op " << *owner
1240 << ", firstOp: " << *firstOp << ", second op: " << *secondOp);
1241 return true;
1242 }
1243 }
1244 return false;
1245 }
1246
1247 /// Return the unique subview use of `v` if it is indeed unique, null otherwise.
getSubViewUseIfUnique(Value v)1248 static memref::SubViewOp getSubViewUseIfUnique(Value v) {
1249 memref::SubViewOp subViewOp;
1250 for (auto &u : v.getUses()) {
1251 if (auto newSubViewOp = dyn_cast<memref::SubViewOp>(u.getOwner())) {
1252 if (subViewOp)
1253 return memref::SubViewOp();
1254 subViewOp = newSubViewOp;
1255 }
1256 }
1257 return subViewOp;
1258 }
1259
1260 /// TODO: use interfaces, side-effects and aliasing analysis as appropriate,
1261 /// when available.
matchAndRewrite(vector::TransferReadOp xferOp,PatternRewriter & rewriter) const1262 LogicalResult LinalgCopyVTRForwardingPattern::matchAndRewrite(
1263 vector::TransferReadOp xferOp, PatternRewriter &rewriter) const {
1264
1265 // Transfer into `view`.
1266 Value viewOrAlloc = xferOp.source();
1267 if (!viewOrAlloc.getDefiningOp<memref::ViewOp>() &&
1268 !viewOrAlloc.getDefiningOp<memref::AllocOp>())
1269 return failure();
1270
1271 LLVM_DEBUG(llvm::dbgs() << "\n[" DEBUG_TYPE "]: " << viewOrAlloc);
1272
1273 // Ensure there is exactly one subview of `viewOrAlloc` defining `subView`.
1274 memref::SubViewOp subViewOp = getSubViewUseIfUnique(viewOrAlloc);
1275 if (!subViewOp)
1276 return failure();
1277 Value subView = subViewOp.getResult();
1278 LLVM_DEBUG(llvm::dbgs() << "\n[" DEBUG_TYPE "]: "
1279 << "with subView " << subView);
1280
1281 // Find the copy into `subView` without interleaved uses.
1282 CopyOp copyOp;
1283 for (auto &u : subView.getUses()) {
1284 if (auto newCopyOp = dyn_cast<CopyOp>(u.getOwner())) {
1285 assert(newCopyOp.output().getType().isa<MemRefType>());
1286 if (newCopyOp.output() != subView)
1287 continue;
1288 LLVM_DEBUG(llvm::dbgs() << "\n[" DEBUG_TYPE "]: "
1289 << "copy candidate " << *newCopyOp);
1290 if (mayExistInterleavedUses(newCopyOp, xferOp, {viewOrAlloc, subView}))
1291 continue;
1292 copyOp = newCopyOp;
1293 break;
1294 }
1295 }
1296 if (!copyOp)
1297 return failure();
1298 LLVM_DEBUG(llvm::dbgs() << "\n[" DEBUG_TYPE "]: "
1299 << "with copy " << *copyOp);
1300
1301 // Find the fill into `viewOrAlloc` without interleaved uses before the copy.
1302 FillOp maybeFillOp;
1303 for (auto &u : viewOrAlloc.getUses()) {
1304 if (auto newFillOp = dyn_cast<FillOp>(u.getOwner())) {
1305 assert(newFillOp.output().getType().isa<MemRefType>());
1306 if (newFillOp.output() != viewOrAlloc)
1307 continue;
1308 LLVM_DEBUG(llvm::dbgs() << "\n[" DEBUG_TYPE "]: "
1309 << "fill candidate " << *newFillOp);
1310 if (mayExistInterleavedUses(newFillOp, copyOp, {viewOrAlloc, subView}))
1311 continue;
1312 maybeFillOp = newFillOp;
1313 break;
1314 }
1315 }
1316 // Ensure padding matches.
1317 if (maybeFillOp && xferOp.padding() != maybeFillOp.value())
1318 return failure();
1319 if (maybeFillOp)
1320 LLVM_DEBUG(llvm::dbgs() << "\n[" DEBUG_TYPE "]: "
1321 << "with maybeFillOp " << *maybeFillOp);
1322
1323 // `in` is the subview that linalg.copy reads. Replace it.
1324 Value in = copyOp.input();
1325
1326 // linalg.copy + linalg.fill can be used to create a padded local buffer.
1327 // The `masked` attribute is only valid on this padded buffer.
1328 // When forwarding to vector.transfer_read, the attribute must be reset
1329 // conservatively.
1330 Value res = rewriter.create<vector::TransferReadOp>(
1331 xferOp.getLoc(), xferOp.getVectorType(), in, xferOp.indices(),
1332 xferOp.permutation_map(), xferOp.padding(), ArrayAttr());
1333
1334 if (maybeFillOp)
1335 rewriter.eraseOp(maybeFillOp);
1336 rewriter.eraseOp(copyOp);
1337 rewriter.replaceOp(xferOp, res);
1338
1339 return success();
1340 }
1341
1342 /// TODO: use interfaces, side-effects and aliasing analysis as appropriate,
1343 /// when available.
matchAndRewrite(vector::TransferWriteOp xferOp,PatternRewriter & rewriter) const1344 LogicalResult LinalgCopyVTWForwardingPattern::matchAndRewrite(
1345 vector::TransferWriteOp xferOp, PatternRewriter &rewriter) const {
1346 // Transfer into `viewOrAlloc`.
1347 Value viewOrAlloc = xferOp.source();
1348 if (!viewOrAlloc.getDefiningOp<memref::ViewOp>() &&
1349 !viewOrAlloc.getDefiningOp<memref::AllocOp>())
1350 return failure();
1351
1352 // Ensure there is exactly one subview of `viewOrAlloc` defining `subView`.
1353 memref::SubViewOp subViewOp = getSubViewUseIfUnique(viewOrAlloc);
1354 if (!subViewOp)
1355 return failure();
1356 Value subView = subViewOp.getResult();
1357
1358 // Find the copy from `subView` without interleaved uses.
1359 CopyOp copyOp;
1360 for (auto &u : subViewOp.getResult().getUses()) {
1361 if (auto newCopyOp = dyn_cast<CopyOp>(u.getOwner())) {
1362 if (newCopyOp.getInputOperand(0)->get() != subView)
1363 continue;
1364 if (mayExistInterleavedUses(xferOp, newCopyOp, {viewOrAlloc, subView}))
1365 continue;
1366 copyOp = newCopyOp;
1367 break;
1368 }
1369 }
1370 if (!copyOp)
1371 return failure();
1372
1373 // `out` is the subview copied into that we replace.
1374 assert(copyOp.output().getType().isa<MemRefType>());
1375 Value out = copyOp.output();
1376
1377 // Forward vector.transfer into copy.
1378 // linalg.copy + linalg.fill can be used to create a padded local buffer.
1379 // The `masked` attribute is only valid on this padded buffer.
1380 // When forwarding to vector.transfer_write, the attribute must be reset
1381 // conservatively.
1382 rewriter.create<vector::TransferWriteOp>(
1383 xferOp.getLoc(), xferOp.vector(), out, xferOp.indices(),
1384 xferOp.permutation_map(), ArrayAttr());
1385
1386 rewriter.eraseOp(copyOp);
1387 rewriter.eraseOp(xferOp);
1388
1389 return success();
1390 }
1391