1 //===- VectorToLLVM.cpp - Conversion from Vector to the LLVM dialect ------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #include "mlir/Conversion/VectorToLLVM/ConvertVectorToLLVM.h"
10
11 #include "../PassDetail.h"
12 #include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h"
13 #include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVMPass.h"
14 #include "mlir/Dialect/LLVMIR/LLVMDialect.h"
15 #include "mlir/Dialect/StandardOps/IR/Ops.h"
16 #include "mlir/Dialect/Vector/VectorOps.h"
17 #include "mlir/IR/AffineMap.h"
18 #include "mlir/IR/Attributes.h"
19 #include "mlir/IR/Builders.h"
20 #include "mlir/IR/MLIRContext.h"
21 #include "mlir/IR/Module.h"
22 #include "mlir/IR/Operation.h"
23 #include "mlir/IR/PatternMatch.h"
24 #include "mlir/IR/StandardTypes.h"
25 #include "mlir/IR/Types.h"
26 #include "mlir/Transforms/DialectConversion.h"
27 #include "mlir/Transforms/Passes.h"
28 #include "llvm/IR/DerivedTypes.h"
29 #include "llvm/IR/Module.h"
30 #include "llvm/IR/Type.h"
31 #include "llvm/Support/Allocator.h"
32 #include "llvm/Support/ErrorHandling.h"
33
34 using namespace mlir;
35 using namespace mlir::vector;
36
37 template <typename T>
getPtrToElementType(T containerType,LLVMTypeConverter & typeConverter)38 static LLVM::LLVMType getPtrToElementType(T containerType,
39 LLVMTypeConverter &typeConverter) {
40 return typeConverter.convertType(containerType.getElementType())
41 .template cast<LLVM::LLVMType>()
42 .getPointerTo();
43 }
44
45 // Helper to reduce vector type by one rank at front.
reducedVectorTypeFront(VectorType tp)46 static VectorType reducedVectorTypeFront(VectorType tp) {
47 assert((tp.getRank() > 1) && "unlowerable vector type");
48 return VectorType::get(tp.getShape().drop_front(), tp.getElementType());
49 }
50
51 // Helper to reduce vector type by *all* but one rank at back.
reducedVectorTypeBack(VectorType tp)52 static VectorType reducedVectorTypeBack(VectorType tp) {
53 assert((tp.getRank() > 1) && "unlowerable vector type");
54 return VectorType::get(tp.getShape().take_back(), tp.getElementType());
55 }
56
57 // Helper that picks the proper sequence for inserting.
insertOne(ConversionPatternRewriter & rewriter,LLVMTypeConverter & typeConverter,Location loc,Value val1,Value val2,Type llvmType,int64_t rank,int64_t pos)58 static Value insertOne(ConversionPatternRewriter &rewriter,
59 LLVMTypeConverter &typeConverter, Location loc,
60 Value val1, Value val2, Type llvmType, int64_t rank,
61 int64_t pos) {
62 if (rank == 1) {
63 auto idxType = rewriter.getIndexType();
64 auto constant = rewriter.create<LLVM::ConstantOp>(
65 loc, typeConverter.convertType(idxType),
66 rewriter.getIntegerAttr(idxType, pos));
67 return rewriter.create<LLVM::InsertElementOp>(loc, llvmType, val1, val2,
68 constant);
69 }
70 return rewriter.create<LLVM::InsertValueOp>(loc, llvmType, val1, val2,
71 rewriter.getI64ArrayAttr(pos));
72 }
73
74 // Helper that picks the proper sequence for inserting.
insertOne(PatternRewriter & rewriter,Location loc,Value from,Value into,int64_t offset)75 static Value insertOne(PatternRewriter &rewriter, Location loc, Value from,
76 Value into, int64_t offset) {
77 auto vectorType = into.getType().cast<VectorType>();
78 if (vectorType.getRank() > 1)
79 return rewriter.create<InsertOp>(loc, from, into, offset);
80 return rewriter.create<vector::InsertElementOp>(
81 loc, vectorType, from, into,
82 rewriter.create<ConstantIndexOp>(loc, offset));
83 }
84
85 // Helper that picks the proper sequence for extracting.
extractOne(ConversionPatternRewriter & rewriter,LLVMTypeConverter & typeConverter,Location loc,Value val,Type llvmType,int64_t rank,int64_t pos)86 static Value extractOne(ConversionPatternRewriter &rewriter,
87 LLVMTypeConverter &typeConverter, Location loc,
88 Value val, Type llvmType, int64_t rank, int64_t pos) {
89 if (rank == 1) {
90 auto idxType = rewriter.getIndexType();
91 auto constant = rewriter.create<LLVM::ConstantOp>(
92 loc, typeConverter.convertType(idxType),
93 rewriter.getIntegerAttr(idxType, pos));
94 return rewriter.create<LLVM::ExtractElementOp>(loc, llvmType, val,
95 constant);
96 }
97 return rewriter.create<LLVM::ExtractValueOp>(loc, llvmType, val,
98 rewriter.getI64ArrayAttr(pos));
99 }
100
101 // Helper that picks the proper sequence for extracting.
extractOne(PatternRewriter & rewriter,Location loc,Value vector,int64_t offset)102 static Value extractOne(PatternRewriter &rewriter, Location loc, Value vector,
103 int64_t offset) {
104 auto vectorType = vector.getType().cast<VectorType>();
105 if (vectorType.getRank() > 1)
106 return rewriter.create<ExtractOp>(loc, vector, offset);
107 return rewriter.create<vector::ExtractElementOp>(
108 loc, vectorType.getElementType(), vector,
109 rewriter.create<ConstantIndexOp>(loc, offset));
110 }
111
112 // Helper that returns a subset of `arrayAttr` as a vector of int64_t.
113 // TODO: Better support for attribute subtype forwarding + slicing.
getI64SubArray(ArrayAttr arrayAttr,unsigned dropFront=0,unsigned dropBack=0)114 static SmallVector<int64_t, 4> getI64SubArray(ArrayAttr arrayAttr,
115 unsigned dropFront = 0,
116 unsigned dropBack = 0) {
117 assert(arrayAttr.size() > dropFront + dropBack && "Out of bounds");
118 auto range = arrayAttr.getAsRange<IntegerAttr>();
119 SmallVector<int64_t, 4> res;
120 res.reserve(arrayAttr.size() - dropFront - dropBack);
121 for (auto it = range.begin() + dropFront, eit = range.end() - dropBack;
122 it != eit; ++it)
123 res.push_back((*it).getValue().getSExtValue());
124 return res;
125 }
126
127 template <typename TransferOp>
getVectorTransferAlignment(LLVMTypeConverter & typeConverter,TransferOp xferOp,unsigned & align)128 LogicalResult getVectorTransferAlignment(LLVMTypeConverter &typeConverter,
129 TransferOp xferOp, unsigned &align) {
130 Type elementTy =
131 typeConverter.convertType(xferOp.getMemRefType().getElementType());
132 if (!elementTy)
133 return failure();
134
135 auto dataLayout = typeConverter.getDialect()->getLLVMModule().getDataLayout();
136 align = dataLayout.getPrefTypeAlignment(
137 elementTy.cast<LLVM::LLVMType>().getUnderlyingType());
138 return success();
139 }
140
141 static LogicalResult
replaceTransferOpWithLoadOrStore(ConversionPatternRewriter & rewriter,LLVMTypeConverter & typeConverter,Location loc,TransferReadOp xferOp,ArrayRef<Value> operands,Value dataPtr)142 replaceTransferOpWithLoadOrStore(ConversionPatternRewriter &rewriter,
143 LLVMTypeConverter &typeConverter, Location loc,
144 TransferReadOp xferOp,
145 ArrayRef<Value> operands, Value dataPtr) {
146 unsigned align;
147 if (failed(getVectorTransferAlignment(typeConverter, xferOp, align)))
148 return failure();
149 rewriter.replaceOpWithNewOp<LLVM::LoadOp>(xferOp, dataPtr, align);
150 return success();
151 }
152
153 static LogicalResult
replaceTransferOpWithMasked(ConversionPatternRewriter & rewriter,LLVMTypeConverter & typeConverter,Location loc,TransferReadOp xferOp,ArrayRef<Value> operands,Value dataPtr,Value mask)154 replaceTransferOpWithMasked(ConversionPatternRewriter &rewriter,
155 LLVMTypeConverter &typeConverter, Location loc,
156 TransferReadOp xferOp, ArrayRef<Value> operands,
157 Value dataPtr, Value mask) {
158 auto toLLVMTy = [&](Type t) { return typeConverter.convertType(t); };
159 VectorType fillType = xferOp.getVectorType();
160 Value fill = rewriter.create<SplatOp>(loc, fillType, xferOp.padding());
161 fill = rewriter.create<LLVM::DialectCastOp>(loc, toLLVMTy(fillType), fill);
162
163 Type vecTy = typeConverter.convertType(xferOp.getVectorType());
164 if (!vecTy)
165 return failure();
166
167 unsigned align;
168 if (failed(getVectorTransferAlignment(typeConverter, xferOp, align)))
169 return failure();
170
171 rewriter.replaceOpWithNewOp<LLVM::MaskedLoadOp>(
172 xferOp, vecTy, dataPtr, mask, ValueRange{fill},
173 rewriter.getI32IntegerAttr(align));
174 return success();
175 }
176
177 static LogicalResult
replaceTransferOpWithLoadOrStore(ConversionPatternRewriter & rewriter,LLVMTypeConverter & typeConverter,Location loc,TransferWriteOp xferOp,ArrayRef<Value> operands,Value dataPtr)178 replaceTransferOpWithLoadOrStore(ConversionPatternRewriter &rewriter,
179 LLVMTypeConverter &typeConverter, Location loc,
180 TransferWriteOp xferOp,
181 ArrayRef<Value> operands, Value dataPtr) {
182 unsigned align;
183 if (failed(getVectorTransferAlignment(typeConverter, xferOp, align)))
184 return failure();
185 auto adaptor = TransferWriteOpAdaptor(operands);
186 rewriter.replaceOpWithNewOp<LLVM::StoreOp>(xferOp, adaptor.vector(), dataPtr,
187 align);
188 return success();
189 }
190
191 static LogicalResult
replaceTransferOpWithMasked(ConversionPatternRewriter & rewriter,LLVMTypeConverter & typeConverter,Location loc,TransferWriteOp xferOp,ArrayRef<Value> operands,Value dataPtr,Value mask)192 replaceTransferOpWithMasked(ConversionPatternRewriter &rewriter,
193 LLVMTypeConverter &typeConverter, Location loc,
194 TransferWriteOp xferOp, ArrayRef<Value> operands,
195 Value dataPtr, Value mask) {
196 unsigned align;
197 if (failed(getVectorTransferAlignment(typeConverter, xferOp, align)))
198 return failure();
199
200 auto adaptor = TransferWriteOpAdaptor(operands);
201 rewriter.replaceOpWithNewOp<LLVM::MaskedStoreOp>(
202 xferOp, adaptor.vector(), dataPtr, mask,
203 rewriter.getI32IntegerAttr(align));
204 return success();
205 }
206
getTransferOpAdapter(TransferReadOp xferOp,ArrayRef<Value> operands)207 static TransferReadOpAdaptor getTransferOpAdapter(TransferReadOp xferOp,
208 ArrayRef<Value> operands) {
209 return TransferReadOpAdaptor(operands);
210 }
211
getTransferOpAdapter(TransferWriteOp xferOp,ArrayRef<Value> operands)212 static TransferWriteOpAdaptor getTransferOpAdapter(TransferWriteOp xferOp,
213 ArrayRef<Value> operands) {
214 return TransferWriteOpAdaptor(operands);
215 }
216
217 namespace {
218
219 /// Conversion pattern for a vector.matrix_multiply.
220 /// This is lowered directly to the proper llvm.intr.matrix.multiply.
221 class VectorMatmulOpConversion : public ConvertToLLVMPattern {
222 public:
VectorMatmulOpConversion(MLIRContext * context,LLVMTypeConverter & typeConverter)223 explicit VectorMatmulOpConversion(MLIRContext *context,
224 LLVMTypeConverter &typeConverter)
225 : ConvertToLLVMPattern(vector::MatmulOp::getOperationName(), context,
226 typeConverter) {}
227
228 LogicalResult
matchAndRewrite(Operation * op,ArrayRef<Value> operands,ConversionPatternRewriter & rewriter) const229 matchAndRewrite(Operation *op, ArrayRef<Value> operands,
230 ConversionPatternRewriter &rewriter) const override {
231 auto matmulOp = cast<vector::MatmulOp>(op);
232 auto adaptor = vector::MatmulOpAdaptor(operands);
233 rewriter.replaceOpWithNewOp<LLVM::MatrixMultiplyOp>(
234 op, typeConverter.convertType(matmulOp.res().getType()), adaptor.lhs(),
235 adaptor.rhs(), matmulOp.lhs_rows(), matmulOp.lhs_columns(),
236 matmulOp.rhs_columns());
237 return success();
238 }
239 };
240
241 /// Conversion pattern for a vector.flat_transpose.
242 /// This is lowered directly to the proper llvm.intr.matrix.transpose.
243 class VectorFlatTransposeOpConversion : public ConvertToLLVMPattern {
244 public:
VectorFlatTransposeOpConversion(MLIRContext * context,LLVMTypeConverter & typeConverter)245 explicit VectorFlatTransposeOpConversion(MLIRContext *context,
246 LLVMTypeConverter &typeConverter)
247 : ConvertToLLVMPattern(vector::FlatTransposeOp::getOperationName(),
248 context, typeConverter) {}
249
250 LogicalResult
matchAndRewrite(Operation * op,ArrayRef<Value> operands,ConversionPatternRewriter & rewriter) const251 matchAndRewrite(Operation *op, ArrayRef<Value> operands,
252 ConversionPatternRewriter &rewriter) const override {
253 auto transOp = cast<vector::FlatTransposeOp>(op);
254 auto adaptor = vector::FlatTransposeOpAdaptor(operands);
255 rewriter.replaceOpWithNewOp<LLVM::MatrixTransposeOp>(
256 transOp, typeConverter.convertType(transOp.res().getType()),
257 adaptor.matrix(), transOp.rows(), transOp.columns());
258 return success();
259 }
260 };
261
262 class VectorReductionOpConversion : public ConvertToLLVMPattern {
263 public:
VectorReductionOpConversion(MLIRContext * context,LLVMTypeConverter & typeConverter,bool reassociateFP)264 explicit VectorReductionOpConversion(MLIRContext *context,
265 LLVMTypeConverter &typeConverter,
266 bool reassociateFP)
267 : ConvertToLLVMPattern(vector::ReductionOp::getOperationName(), context,
268 typeConverter),
269 reassociateFPReductions(reassociateFP) {}
270
271 LogicalResult
matchAndRewrite(Operation * op,ArrayRef<Value> operands,ConversionPatternRewriter & rewriter) const272 matchAndRewrite(Operation *op, ArrayRef<Value> operands,
273 ConversionPatternRewriter &rewriter) const override {
274 auto reductionOp = cast<vector::ReductionOp>(op);
275 auto kind = reductionOp.kind();
276 Type eltType = reductionOp.dest().getType();
277 Type llvmType = typeConverter.convertType(eltType);
278 if (eltType.isSignlessInteger(32) || eltType.isSignlessInteger(64)) {
279 // Integer reductions: add/mul/min/max/and/or/xor.
280 if (kind == "add")
281 rewriter.replaceOpWithNewOp<LLVM::experimental_vector_reduce_add>(
282 op, llvmType, operands[0]);
283 else if (kind == "mul")
284 rewriter.replaceOpWithNewOp<LLVM::experimental_vector_reduce_mul>(
285 op, llvmType, operands[0]);
286 else if (kind == "min")
287 rewriter.replaceOpWithNewOp<LLVM::experimental_vector_reduce_smin>(
288 op, llvmType, operands[0]);
289 else if (kind == "max")
290 rewriter.replaceOpWithNewOp<LLVM::experimental_vector_reduce_smax>(
291 op, llvmType, operands[0]);
292 else if (kind == "and")
293 rewriter.replaceOpWithNewOp<LLVM::experimental_vector_reduce_and>(
294 op, llvmType, operands[0]);
295 else if (kind == "or")
296 rewriter.replaceOpWithNewOp<LLVM::experimental_vector_reduce_or>(
297 op, llvmType, operands[0]);
298 else if (kind == "xor")
299 rewriter.replaceOpWithNewOp<LLVM::experimental_vector_reduce_xor>(
300 op, llvmType, operands[0]);
301 else
302 return failure();
303 return success();
304
305 } else if (eltType.isF32() || eltType.isF64()) {
306 // Floating-point reductions: add/mul/min/max
307 if (kind == "add") {
308 // Optional accumulator (or zero).
309 Value acc = operands.size() > 1 ? operands[1]
310 : rewriter.create<LLVM::ConstantOp>(
311 op->getLoc(), llvmType,
312 rewriter.getZeroAttr(eltType));
313 rewriter.replaceOpWithNewOp<LLVM::experimental_vector_reduce_v2_fadd>(
314 op, llvmType, acc, operands[0],
315 rewriter.getBoolAttr(reassociateFPReductions));
316 } else if (kind == "mul") {
317 // Optional accumulator (or one).
318 Value acc = operands.size() > 1
319 ? operands[1]
320 : rewriter.create<LLVM::ConstantOp>(
321 op->getLoc(), llvmType,
322 rewriter.getFloatAttr(eltType, 1.0));
323 rewriter.replaceOpWithNewOp<LLVM::experimental_vector_reduce_v2_fmul>(
324 op, llvmType, acc, operands[0],
325 rewriter.getBoolAttr(reassociateFPReductions));
326 } else if (kind == "min")
327 rewriter.replaceOpWithNewOp<LLVM::experimental_vector_reduce_fmin>(
328 op, llvmType, operands[0]);
329 else if (kind == "max")
330 rewriter.replaceOpWithNewOp<LLVM::experimental_vector_reduce_fmax>(
331 op, llvmType, operands[0]);
332 else
333 return failure();
334 return success();
335 }
336 return failure();
337 }
338
339 private:
340 const bool reassociateFPReductions;
341 };
342
343 class VectorShuffleOpConversion : public ConvertToLLVMPattern {
344 public:
VectorShuffleOpConversion(MLIRContext * context,LLVMTypeConverter & typeConverter)345 explicit VectorShuffleOpConversion(MLIRContext *context,
346 LLVMTypeConverter &typeConverter)
347 : ConvertToLLVMPattern(vector::ShuffleOp::getOperationName(), context,
348 typeConverter) {}
349
350 LogicalResult
matchAndRewrite(Operation * op,ArrayRef<Value> operands,ConversionPatternRewriter & rewriter) const351 matchAndRewrite(Operation *op, ArrayRef<Value> operands,
352 ConversionPatternRewriter &rewriter) const override {
353 auto loc = op->getLoc();
354 auto adaptor = vector::ShuffleOpAdaptor(operands);
355 auto shuffleOp = cast<vector::ShuffleOp>(op);
356 auto v1Type = shuffleOp.getV1VectorType();
357 auto v2Type = shuffleOp.getV2VectorType();
358 auto vectorType = shuffleOp.getVectorType();
359 Type llvmType = typeConverter.convertType(vectorType);
360 auto maskArrayAttr = shuffleOp.mask();
361
362 // Bail if result type cannot be lowered.
363 if (!llvmType)
364 return failure();
365
366 // Get rank and dimension sizes.
367 int64_t rank = vectorType.getRank();
368 assert(v1Type.getRank() == rank);
369 assert(v2Type.getRank() == rank);
370 int64_t v1Dim = v1Type.getDimSize(0);
371
372 // For rank 1, where both operands have *exactly* the same vector type,
373 // there is direct shuffle support in LLVM. Use it!
374 if (rank == 1 && v1Type == v2Type) {
375 Value shuffle = rewriter.create<LLVM::ShuffleVectorOp>(
376 loc, adaptor.v1(), adaptor.v2(), maskArrayAttr);
377 rewriter.replaceOp(op, shuffle);
378 return success();
379 }
380
381 // For all other cases, insert the individual values individually.
382 Value insert = rewriter.create<LLVM::UndefOp>(loc, llvmType);
383 int64_t insPos = 0;
384 for (auto en : llvm::enumerate(maskArrayAttr)) {
385 int64_t extPos = en.value().cast<IntegerAttr>().getInt();
386 Value value = adaptor.v1();
387 if (extPos >= v1Dim) {
388 extPos -= v1Dim;
389 value = adaptor.v2();
390 }
391 Value extract = extractOne(rewriter, typeConverter, loc, value, llvmType,
392 rank, extPos);
393 insert = insertOne(rewriter, typeConverter, loc, insert, extract,
394 llvmType, rank, insPos++);
395 }
396 rewriter.replaceOp(op, insert);
397 return success();
398 }
399 };
400
401 class VectorExtractElementOpConversion : public ConvertToLLVMPattern {
402 public:
VectorExtractElementOpConversion(MLIRContext * context,LLVMTypeConverter & typeConverter)403 explicit VectorExtractElementOpConversion(MLIRContext *context,
404 LLVMTypeConverter &typeConverter)
405 : ConvertToLLVMPattern(vector::ExtractElementOp::getOperationName(),
406 context, typeConverter) {}
407
408 LogicalResult
matchAndRewrite(Operation * op,ArrayRef<Value> operands,ConversionPatternRewriter & rewriter) const409 matchAndRewrite(Operation *op, ArrayRef<Value> operands,
410 ConversionPatternRewriter &rewriter) const override {
411 auto adaptor = vector::ExtractElementOpAdaptor(operands);
412 auto extractEltOp = cast<vector::ExtractElementOp>(op);
413 auto vectorType = extractEltOp.getVectorType();
414 auto llvmType = typeConverter.convertType(vectorType.getElementType());
415
416 // Bail if result type cannot be lowered.
417 if (!llvmType)
418 return failure();
419
420 rewriter.replaceOpWithNewOp<LLVM::ExtractElementOp>(
421 op, llvmType, adaptor.vector(), adaptor.position());
422 return success();
423 }
424 };
425
426 class VectorExtractOpConversion : public ConvertToLLVMPattern {
427 public:
VectorExtractOpConversion(MLIRContext * context,LLVMTypeConverter & typeConverter)428 explicit VectorExtractOpConversion(MLIRContext *context,
429 LLVMTypeConverter &typeConverter)
430 : ConvertToLLVMPattern(vector::ExtractOp::getOperationName(), context,
431 typeConverter) {}
432
433 LogicalResult
matchAndRewrite(Operation * op,ArrayRef<Value> operands,ConversionPatternRewriter & rewriter) const434 matchAndRewrite(Operation *op, ArrayRef<Value> operands,
435 ConversionPatternRewriter &rewriter) const override {
436 auto loc = op->getLoc();
437 auto adaptor = vector::ExtractOpAdaptor(operands);
438 auto extractOp = cast<vector::ExtractOp>(op);
439 auto vectorType = extractOp.getVectorType();
440 auto resultType = extractOp.getResult().getType();
441 auto llvmResultType = typeConverter.convertType(resultType);
442 auto positionArrayAttr = extractOp.position();
443
444 // Bail if result type cannot be lowered.
445 if (!llvmResultType)
446 return failure();
447
448 // One-shot extraction of vector from array (only requires extractvalue).
449 if (resultType.isa<VectorType>()) {
450 Value extracted = rewriter.create<LLVM::ExtractValueOp>(
451 loc, llvmResultType, adaptor.vector(), positionArrayAttr);
452 rewriter.replaceOp(op, extracted);
453 return success();
454 }
455
456 // Potential extraction of 1-D vector from array.
457 auto *context = op->getContext();
458 Value extracted = adaptor.vector();
459 auto positionAttrs = positionArrayAttr.getValue();
460 if (positionAttrs.size() > 1) {
461 auto oneDVectorType = reducedVectorTypeBack(vectorType);
462 auto nMinusOnePositionAttrs =
463 ArrayAttr::get(positionAttrs.drop_back(), context);
464 extracted = rewriter.create<LLVM::ExtractValueOp>(
465 loc, typeConverter.convertType(oneDVectorType), extracted,
466 nMinusOnePositionAttrs);
467 }
468
469 // Remaining extraction of element from 1-D LLVM vector
470 auto position = positionAttrs.back().cast<IntegerAttr>();
471 auto i64Type = LLVM::LLVMType::getInt64Ty(typeConverter.getDialect());
472 auto constant = rewriter.create<LLVM::ConstantOp>(loc, i64Type, position);
473 extracted =
474 rewriter.create<LLVM::ExtractElementOp>(loc, extracted, constant);
475 rewriter.replaceOp(op, extracted);
476
477 return success();
478 }
479 };
480
481 /// Conversion pattern that turns a vector.fma on a 1-D vector
482 /// into an llvm.intr.fmuladd. This is a trivial 1-1 conversion.
483 /// This does not match vectors of n >= 2 rank.
484 ///
485 /// Example:
486 /// ```
487 /// vector.fma %a, %a, %a : vector<8xf32>
488 /// ```
489 /// is converted to:
490 /// ```
491 /// llvm.intr.fmuladd %va, %va, %va:
492 /// (!llvm<"<8 x float>">, !llvm<"<8 x float>">, !llvm<"<8 x float>">)
493 /// -> !llvm<"<8 x float>">
494 /// ```
495 class VectorFMAOp1DConversion : public ConvertToLLVMPattern {
496 public:
VectorFMAOp1DConversion(MLIRContext * context,LLVMTypeConverter & typeConverter)497 explicit VectorFMAOp1DConversion(MLIRContext *context,
498 LLVMTypeConverter &typeConverter)
499 : ConvertToLLVMPattern(vector::FMAOp::getOperationName(), context,
500 typeConverter) {}
501
502 LogicalResult
matchAndRewrite(Operation * op,ArrayRef<Value> operands,ConversionPatternRewriter & rewriter) const503 matchAndRewrite(Operation *op, ArrayRef<Value> operands,
504 ConversionPatternRewriter &rewriter) const override {
505 auto adaptor = vector::FMAOpAdaptor(operands);
506 vector::FMAOp fmaOp = cast<vector::FMAOp>(op);
507 VectorType vType = fmaOp.getVectorType();
508 if (vType.getRank() != 1)
509 return failure();
510 rewriter.replaceOpWithNewOp<LLVM::FMulAddOp>(op, adaptor.lhs(),
511 adaptor.rhs(), adaptor.acc());
512 return success();
513 }
514 };
515
516 class VectorInsertElementOpConversion : public ConvertToLLVMPattern {
517 public:
VectorInsertElementOpConversion(MLIRContext * context,LLVMTypeConverter & typeConverter)518 explicit VectorInsertElementOpConversion(MLIRContext *context,
519 LLVMTypeConverter &typeConverter)
520 : ConvertToLLVMPattern(vector::InsertElementOp::getOperationName(),
521 context, typeConverter) {}
522
523 LogicalResult
matchAndRewrite(Operation * op,ArrayRef<Value> operands,ConversionPatternRewriter & rewriter) const524 matchAndRewrite(Operation *op, ArrayRef<Value> operands,
525 ConversionPatternRewriter &rewriter) const override {
526 auto adaptor = vector::InsertElementOpAdaptor(operands);
527 auto insertEltOp = cast<vector::InsertElementOp>(op);
528 auto vectorType = insertEltOp.getDestVectorType();
529 auto llvmType = typeConverter.convertType(vectorType);
530
531 // Bail if result type cannot be lowered.
532 if (!llvmType)
533 return failure();
534
535 rewriter.replaceOpWithNewOp<LLVM::InsertElementOp>(
536 op, llvmType, adaptor.dest(), adaptor.source(), adaptor.position());
537 return success();
538 }
539 };
540
541 class VectorInsertOpConversion : public ConvertToLLVMPattern {
542 public:
VectorInsertOpConversion(MLIRContext * context,LLVMTypeConverter & typeConverter)543 explicit VectorInsertOpConversion(MLIRContext *context,
544 LLVMTypeConverter &typeConverter)
545 : ConvertToLLVMPattern(vector::InsertOp::getOperationName(), context,
546 typeConverter) {}
547
548 LogicalResult
matchAndRewrite(Operation * op,ArrayRef<Value> operands,ConversionPatternRewriter & rewriter) const549 matchAndRewrite(Operation *op, ArrayRef<Value> operands,
550 ConversionPatternRewriter &rewriter) const override {
551 auto loc = op->getLoc();
552 auto adaptor = vector::InsertOpAdaptor(operands);
553 auto insertOp = cast<vector::InsertOp>(op);
554 auto sourceType = insertOp.getSourceType();
555 auto destVectorType = insertOp.getDestVectorType();
556 auto llvmResultType = typeConverter.convertType(destVectorType);
557 auto positionArrayAttr = insertOp.position();
558
559 // Bail if result type cannot be lowered.
560 if (!llvmResultType)
561 return failure();
562
563 // One-shot insertion of a vector into an array (only requires insertvalue).
564 if (sourceType.isa<VectorType>()) {
565 Value inserted = rewriter.create<LLVM::InsertValueOp>(
566 loc, llvmResultType, adaptor.dest(), adaptor.source(),
567 positionArrayAttr);
568 rewriter.replaceOp(op, inserted);
569 return success();
570 }
571
572 // Potential extraction of 1-D vector from array.
573 auto *context = op->getContext();
574 Value extracted = adaptor.dest();
575 auto positionAttrs = positionArrayAttr.getValue();
576 auto position = positionAttrs.back().cast<IntegerAttr>();
577 auto oneDVectorType = destVectorType;
578 if (positionAttrs.size() > 1) {
579 oneDVectorType = reducedVectorTypeBack(destVectorType);
580 auto nMinusOnePositionAttrs =
581 ArrayAttr::get(positionAttrs.drop_back(), context);
582 extracted = rewriter.create<LLVM::ExtractValueOp>(
583 loc, typeConverter.convertType(oneDVectorType), extracted,
584 nMinusOnePositionAttrs);
585 }
586
587 // Insertion of an element into a 1-D LLVM vector.
588 auto i64Type = LLVM::LLVMType::getInt64Ty(typeConverter.getDialect());
589 auto constant = rewriter.create<LLVM::ConstantOp>(loc, i64Type, position);
590 Value inserted = rewriter.create<LLVM::InsertElementOp>(
591 loc, typeConverter.convertType(oneDVectorType), extracted,
592 adaptor.source(), constant);
593
594 // Potential insertion of resulting 1-D vector into array.
595 if (positionAttrs.size() > 1) {
596 auto nMinusOnePositionAttrs =
597 ArrayAttr::get(positionAttrs.drop_back(), context);
598 inserted = rewriter.create<LLVM::InsertValueOp>(loc, llvmResultType,
599 adaptor.dest(), inserted,
600 nMinusOnePositionAttrs);
601 }
602
603 rewriter.replaceOp(op, inserted);
604 return success();
605 }
606 };
607
608 /// Rank reducing rewrite for n-D FMA into (n-1)-D FMA where n > 1.
609 ///
610 /// Example:
611 /// ```
612 /// %d = vector.fma %a, %b, %c : vector<2x4xf32>
613 /// ```
614 /// is rewritten into:
615 /// ```
616 /// %r = splat %f0: vector<2x4xf32>
617 /// %va = vector.extractvalue %a[0] : vector<2x4xf32>
618 /// %vb = vector.extractvalue %b[0] : vector<2x4xf32>
619 /// %vc = vector.extractvalue %c[0] : vector<2x4xf32>
620 /// %vd = vector.fma %va, %vb, %vc : vector<4xf32>
621 /// %r2 = vector.insertvalue %vd, %r[0] : vector<4xf32> into vector<2x4xf32>
622 /// %va2 = vector.extractvalue %a2[1] : vector<2x4xf32>
623 /// %vb2 = vector.extractvalue %b2[1] : vector<2x4xf32>
624 /// %vc2 = vector.extractvalue %c2[1] : vector<2x4xf32>
625 /// %vd2 = vector.fma %va2, %vb2, %vc2 : vector<4xf32>
626 /// %r3 = vector.insertvalue %vd2, %r2[1] : vector<4xf32> into vector<2x4xf32>
627 /// // %r3 holds the final value.
628 /// ```
629 class VectorFMAOpNDRewritePattern : public OpRewritePattern<FMAOp> {
630 public:
631 using OpRewritePattern<FMAOp>::OpRewritePattern;
632
matchAndRewrite(FMAOp op,PatternRewriter & rewriter) const633 LogicalResult matchAndRewrite(FMAOp op,
634 PatternRewriter &rewriter) const override {
635 auto vType = op.getVectorType();
636 if (vType.getRank() < 2)
637 return failure();
638
639 auto loc = op.getLoc();
640 auto elemType = vType.getElementType();
641 Value zero = rewriter.create<ConstantOp>(loc, elemType,
642 rewriter.getZeroAttr(elemType));
643 Value desc = rewriter.create<SplatOp>(loc, vType, zero);
644 for (int64_t i = 0, e = vType.getShape().front(); i != e; ++i) {
645 Value extrLHS = rewriter.create<ExtractOp>(loc, op.lhs(), i);
646 Value extrRHS = rewriter.create<ExtractOp>(loc, op.rhs(), i);
647 Value extrACC = rewriter.create<ExtractOp>(loc, op.acc(), i);
648 Value fma = rewriter.create<FMAOp>(loc, extrLHS, extrRHS, extrACC);
649 desc = rewriter.create<InsertOp>(loc, fma, desc, i);
650 }
651 rewriter.replaceOp(op, desc);
652 return success();
653 }
654 };
655
656 // When ranks are different, InsertStridedSlice needs to extract a properly
657 // ranked vector from the destination vector into which to insert. This pattern
658 // only takes care of this part and forwards the rest of the conversion to
659 // another pattern that converts InsertStridedSlice for operands of the same
660 // rank.
661 //
662 // RewritePattern for InsertStridedSliceOp where source and destination vectors
663 // have different ranks. In this case:
664 // 1. the proper subvector is extracted from the destination vector
665 // 2. a new InsertStridedSlice op is created to insert the source in the
666 // destination subvector
667 // 3. the destination subvector is inserted back in the proper place
668 // 4. the op is replaced by the result of step 3.
669 // The new InsertStridedSlice from step 2. will be picked up by a
670 // `VectorInsertStridedSliceOpSameRankRewritePattern`.
671 class VectorInsertStridedSliceOpDifferentRankRewritePattern
672 : public OpRewritePattern<InsertStridedSliceOp> {
673 public:
674 using OpRewritePattern<InsertStridedSliceOp>::OpRewritePattern;
675
matchAndRewrite(InsertStridedSliceOp op,PatternRewriter & rewriter) const676 LogicalResult matchAndRewrite(InsertStridedSliceOp op,
677 PatternRewriter &rewriter) const override {
678 auto srcType = op.getSourceVectorType();
679 auto dstType = op.getDestVectorType();
680
681 if (op.offsets().getValue().empty())
682 return failure();
683
684 auto loc = op.getLoc();
685 int64_t rankDiff = dstType.getRank() - srcType.getRank();
686 assert(rankDiff >= 0);
687 if (rankDiff == 0)
688 return failure();
689
690 int64_t rankRest = dstType.getRank() - rankDiff;
691 // Extract / insert the subvector of matching rank and InsertStridedSlice
692 // on it.
693 Value extracted =
694 rewriter.create<ExtractOp>(loc, op.dest(),
695 getI64SubArray(op.offsets(), /*dropFront=*/0,
696 /*dropFront=*/rankRest));
697 // A different pattern will kick in for InsertStridedSlice with matching
698 // ranks.
699 auto stridedSliceInnerOp = rewriter.create<InsertStridedSliceOp>(
700 loc, op.source(), extracted,
701 getI64SubArray(op.offsets(), /*dropFront=*/rankDiff),
702 getI64SubArray(op.strides(), /*dropFront=*/0));
703 rewriter.replaceOpWithNewOp<InsertOp>(
704 op, stridedSliceInnerOp.getResult(), op.dest(),
705 getI64SubArray(op.offsets(), /*dropFront=*/0,
706 /*dropFront=*/rankRest));
707 return success();
708 }
709 };
710
711 // RewritePattern for InsertStridedSliceOp where source and destination vectors
712 // have the same rank. In this case, we reduce
713 // 1. the proper subvector is extracted from the destination vector
714 // 2. a new InsertStridedSlice op is created to insert the source in the
715 // destination subvector
716 // 3. the destination subvector is inserted back in the proper place
717 // 4. the op is replaced by the result of step 3.
718 // The new InsertStridedSlice from step 2. will be picked up by a
719 // `VectorInsertStridedSliceOpSameRankRewritePattern`.
720 class VectorInsertStridedSliceOpSameRankRewritePattern
721 : public OpRewritePattern<InsertStridedSliceOp> {
722 public:
723 using OpRewritePattern<InsertStridedSliceOp>::OpRewritePattern;
724
matchAndRewrite(InsertStridedSliceOp op,PatternRewriter & rewriter) const725 LogicalResult matchAndRewrite(InsertStridedSliceOp op,
726 PatternRewriter &rewriter) const override {
727 auto srcType = op.getSourceVectorType();
728 auto dstType = op.getDestVectorType();
729
730 if (op.offsets().getValue().empty())
731 return failure();
732
733 int64_t rankDiff = dstType.getRank() - srcType.getRank();
734 assert(rankDiff >= 0);
735 if (rankDiff != 0)
736 return failure();
737
738 if (srcType == dstType) {
739 rewriter.replaceOp(op, op.source());
740 return success();
741 }
742
743 int64_t offset =
744 op.offsets().getValue().front().cast<IntegerAttr>().getInt();
745 int64_t size = srcType.getShape().front();
746 int64_t stride =
747 op.strides().getValue().front().cast<IntegerAttr>().getInt();
748
749 auto loc = op.getLoc();
750 Value res = op.dest();
751 // For each slice of the source vector along the most major dimension.
752 for (int64_t off = offset, e = offset + size * stride, idx = 0; off < e;
753 off += stride, ++idx) {
754 // 1. extract the proper subvector (or element) from source
755 Value extractedSource = extractOne(rewriter, loc, op.source(), idx);
756 if (extractedSource.getType().isa<VectorType>()) {
757 // 2. If we have a vector, extract the proper subvector from destination
758 // Otherwise we are at the element level and no need to recurse.
759 Value extractedDest = extractOne(rewriter, loc, op.dest(), off);
760 // 3. Reduce the problem to lowering a new InsertStridedSlice op with
761 // smaller rank.
762 extractedSource = rewriter.create<InsertStridedSliceOp>(
763 loc, extractedSource, extractedDest,
764 getI64SubArray(op.offsets(), /* dropFront=*/1),
765 getI64SubArray(op.strides(), /* dropFront=*/1));
766 }
767 // 4. Insert the extractedSource into the res vector.
768 res = insertOne(rewriter, loc, extractedSource, res, off);
769 }
770
771 rewriter.replaceOp(op, res);
772 return success();
773 }
774 /// This pattern creates recursive InsertStridedSliceOp, but the recursion is
775 /// bounded as the rank is strictly decreasing.
hasBoundedRewriteRecursion() const776 bool hasBoundedRewriteRecursion() const final { return true; }
777 };
778
779 class VectorTypeCastOpConversion : public ConvertToLLVMPattern {
780 public:
VectorTypeCastOpConversion(MLIRContext * context,LLVMTypeConverter & typeConverter)781 explicit VectorTypeCastOpConversion(MLIRContext *context,
782 LLVMTypeConverter &typeConverter)
783 : ConvertToLLVMPattern(vector::TypeCastOp::getOperationName(), context,
784 typeConverter) {}
785
786 LogicalResult
matchAndRewrite(Operation * op,ArrayRef<Value> operands,ConversionPatternRewriter & rewriter) const787 matchAndRewrite(Operation *op, ArrayRef<Value> operands,
788 ConversionPatternRewriter &rewriter) const override {
789 auto loc = op->getLoc();
790 vector::TypeCastOp castOp = cast<vector::TypeCastOp>(op);
791 MemRefType sourceMemRefType =
792 castOp.getOperand().getType().cast<MemRefType>();
793 MemRefType targetMemRefType =
794 castOp.getResult().getType().cast<MemRefType>();
795
796 // Only static shape casts supported atm.
797 if (!sourceMemRefType.hasStaticShape() ||
798 !targetMemRefType.hasStaticShape())
799 return failure();
800
801 auto llvmSourceDescriptorTy =
802 operands[0].getType().dyn_cast<LLVM::LLVMType>();
803 if (!llvmSourceDescriptorTy || !llvmSourceDescriptorTy.isStructTy())
804 return failure();
805 MemRefDescriptor sourceMemRef(operands[0]);
806
807 auto llvmTargetDescriptorTy = typeConverter.convertType(targetMemRefType)
808 .dyn_cast_or_null<LLVM::LLVMType>();
809 if (!llvmTargetDescriptorTy || !llvmTargetDescriptorTy.isStructTy())
810 return failure();
811
812 int64_t offset;
813 SmallVector<int64_t, 4> strides;
814 auto successStrides =
815 getStridesAndOffset(sourceMemRefType, strides, offset);
816 bool isContiguous = (strides.back() == 1);
817 if (isContiguous) {
818 auto sizes = sourceMemRefType.getShape();
819 for (int index = 0, e = strides.size() - 2; index < e; ++index) {
820 if (strides[index] != strides[index + 1] * sizes[index + 1]) {
821 isContiguous = false;
822 break;
823 }
824 }
825 }
826 // Only contiguous source tensors supported atm.
827 if (failed(successStrides) || !isContiguous)
828 return failure();
829
830 auto int64Ty = LLVM::LLVMType::getInt64Ty(typeConverter.getDialect());
831
832 // Create descriptor.
833 auto desc = MemRefDescriptor::undef(rewriter, loc, llvmTargetDescriptorTy);
834 Type llvmTargetElementTy = desc.getElementType();
835 // Set allocated ptr.
836 Value allocated = sourceMemRef.allocatedPtr(rewriter, loc);
837 allocated =
838 rewriter.create<LLVM::BitcastOp>(loc, llvmTargetElementTy, allocated);
839 desc.setAllocatedPtr(rewriter, loc, allocated);
840 // Set aligned ptr.
841 Value ptr = sourceMemRef.alignedPtr(rewriter, loc);
842 ptr = rewriter.create<LLVM::BitcastOp>(loc, llvmTargetElementTy, ptr);
843 desc.setAlignedPtr(rewriter, loc, ptr);
844 // Fill offset 0.
845 auto attr = rewriter.getIntegerAttr(rewriter.getIndexType(), 0);
846 auto zero = rewriter.create<LLVM::ConstantOp>(loc, int64Ty, attr);
847 desc.setOffset(rewriter, loc, zero);
848
849 // Fill size and stride descriptors in memref.
850 for (auto indexedSize : llvm::enumerate(targetMemRefType.getShape())) {
851 int64_t index = indexedSize.index();
852 auto sizeAttr =
853 rewriter.getIntegerAttr(rewriter.getIndexType(), indexedSize.value());
854 auto size = rewriter.create<LLVM::ConstantOp>(loc, int64Ty, sizeAttr);
855 desc.setSize(rewriter, loc, index, size);
856 auto strideAttr =
857 rewriter.getIntegerAttr(rewriter.getIndexType(), strides[index]);
858 auto stride = rewriter.create<LLVM::ConstantOp>(loc, int64Ty, strideAttr);
859 desc.setStride(rewriter, loc, index, stride);
860 }
861
862 rewriter.replaceOp(op, {desc});
863 return success();
864 }
865 };
866
867 /// Conversion pattern that converts a 1-D vector transfer read/write op in a
868 /// sequence of:
869 /// 1. Bitcast or addrspacecast to vector form.
870 /// 2. Create an offsetVector = [ offset + 0 .. offset + vector_length - 1 ].
871 /// 3. Create a mask where offsetVector is compared against memref upper bound.
872 /// 4. Rewrite op as a masked read or write.
873 template <typename ConcreteOp>
874 class VectorTransferConversion : public ConvertToLLVMPattern {
875 public:
VectorTransferConversion(MLIRContext * context,LLVMTypeConverter & typeConv)876 explicit VectorTransferConversion(MLIRContext *context,
877 LLVMTypeConverter &typeConv)
878 : ConvertToLLVMPattern(ConcreteOp::getOperationName(), context,
879 typeConv) {}
880
881 LogicalResult
matchAndRewrite(Operation * op,ArrayRef<Value> operands,ConversionPatternRewriter & rewriter) const882 matchAndRewrite(Operation *op, ArrayRef<Value> operands,
883 ConversionPatternRewriter &rewriter) const override {
884 auto xferOp = cast<ConcreteOp>(op);
885 auto adaptor = getTransferOpAdapter(xferOp, operands);
886
887 if (xferOp.getVectorType().getRank() > 1 ||
888 llvm::size(xferOp.indices()) == 0)
889 return failure();
890 if (xferOp.permutation_map() !=
891 AffineMap::getMinorIdentityMap(xferOp.permutation_map().getNumInputs(),
892 xferOp.getVectorType().getRank(),
893 op->getContext()))
894 return failure();
895
896 auto toLLVMTy = [&](Type t) { return typeConverter.convertType(t); };
897
898 Location loc = op->getLoc();
899 Type i64Type = rewriter.getIntegerType(64);
900 MemRefType memRefType = xferOp.getMemRefType();
901
902 // 1. Get the source/dst address as an LLVM vector pointer.
903 // The vector pointer would always be on address space 0, therefore
904 // addrspacecast shall be used when source/dst memrefs are not on
905 // address space 0.
906 // TODO: support alignment when possible.
907 Value dataPtr = getDataPtr(loc, memRefType, adaptor.memref(),
908 adaptor.indices(), rewriter, getModule());
909 auto vecTy =
910 toLLVMTy(xferOp.getVectorType()).template cast<LLVM::LLVMType>();
911 Value vectorDataPtr;
912 if (memRefType.getMemorySpace() == 0)
913 vectorDataPtr =
914 rewriter.create<LLVM::BitcastOp>(loc, vecTy.getPointerTo(), dataPtr);
915 else
916 vectorDataPtr = rewriter.create<LLVM::AddrSpaceCastOp>(
917 loc, vecTy.getPointerTo(), dataPtr);
918
919 if (!xferOp.isMaskedDim(0))
920 return replaceTransferOpWithLoadOrStore(rewriter, typeConverter, loc,
921 xferOp, operands, vectorDataPtr);
922
923 // 2. Create a vector with linear indices [ 0 .. vector_length - 1 ].
924 unsigned vecWidth = vecTy.getVectorNumElements();
925 VectorType vectorCmpType = VectorType::get(vecWidth, i64Type);
926 SmallVector<int64_t, 8> indices;
927 indices.reserve(vecWidth);
928 for (unsigned i = 0; i < vecWidth; ++i)
929 indices.push_back(i);
930 Value linearIndices = rewriter.create<ConstantOp>(
931 loc, vectorCmpType,
932 DenseElementsAttr::get(vectorCmpType, ArrayRef<int64_t>(indices)));
933 linearIndices = rewriter.create<LLVM::DialectCastOp>(
934 loc, toLLVMTy(vectorCmpType), linearIndices);
935
936 // 3. Create offsetVector = [ offset + 0 .. offset + vector_length - 1 ].
937 // TODO: when the leaf transfer rank is k > 1 we need the last
938 // `k` dimensions here.
939 unsigned lastIndex = llvm::size(xferOp.indices()) - 1;
940 Value offsetIndex = *(xferOp.indices().begin() + lastIndex);
941 offsetIndex = rewriter.create<IndexCastOp>(loc, i64Type, offsetIndex);
942 Value base = rewriter.create<SplatOp>(loc, vectorCmpType, offsetIndex);
943 Value offsetVector = rewriter.create<AddIOp>(loc, base, linearIndices);
944
945 // 4. Let dim the memref dimension, compute the vector comparison mask:
946 // [ offset + 0 .. offset + vector_length - 1 ] < [ dim .. dim ]
947 Value dim = rewriter.create<DimOp>(loc, xferOp.memref(), lastIndex);
948 dim = rewriter.create<IndexCastOp>(loc, i64Type, dim);
949 dim = rewriter.create<SplatOp>(loc, vectorCmpType, dim);
950 Value mask =
951 rewriter.create<CmpIOp>(loc, CmpIPredicate::slt, offsetVector, dim);
952 mask = rewriter.create<LLVM::DialectCastOp>(loc, toLLVMTy(mask.getType()),
953 mask);
954
955 // 5. Rewrite as a masked read / write.
956 return replaceTransferOpWithMasked(rewriter, typeConverter, loc, xferOp,
957 operands, vectorDataPtr, mask);
958 }
959 };
960
961 class VectorPrintOpConversion : public ConvertToLLVMPattern {
962 public:
VectorPrintOpConversion(MLIRContext * context,LLVMTypeConverter & typeConverter)963 explicit VectorPrintOpConversion(MLIRContext *context,
964 LLVMTypeConverter &typeConverter)
965 : ConvertToLLVMPattern(vector::PrintOp::getOperationName(), context,
966 typeConverter) {}
967
968 // Proof-of-concept lowering implementation that relies on a small
969 // runtime support library, which only needs to provide a few
970 // printing methods (single value for all data types, opening/closing
971 // bracket, comma, newline). The lowering fully unrolls a vector
972 // in terms of these elementary printing operations. The advantage
973 // of this approach is that the library can remain unaware of all
974 // low-level implementation details of vectors while still supporting
975 // output of any shaped and dimensioned vector. Due to full unrolling,
976 // this approach is less suited for very large vectors though.
977 //
978 // TODO: rely solely on libc in future? something else?
979 //
980 LogicalResult
matchAndRewrite(Operation * op,ArrayRef<Value> operands,ConversionPatternRewriter & rewriter) const981 matchAndRewrite(Operation *op, ArrayRef<Value> operands,
982 ConversionPatternRewriter &rewriter) const override {
983 auto printOp = cast<vector::PrintOp>(op);
984 auto adaptor = vector::PrintOpAdaptor(operands);
985 Type printType = printOp.getPrintType();
986
987 if (typeConverter.convertType(printType) == nullptr)
988 return failure();
989
990 // Make sure element type has runtime support (currently just Float/Double).
991 VectorType vectorType = printType.dyn_cast<VectorType>();
992 Type eltType = vectorType ? vectorType.getElementType() : printType;
993 int64_t rank = vectorType ? vectorType.getRank() : 0;
994 Operation *printer;
995 if (eltType.isSignlessInteger(1) || eltType.isSignlessInteger(32))
996 printer = getPrintI32(op);
997 else if (eltType.isSignlessInteger(64))
998 printer = getPrintI64(op);
999 else if (eltType.isF32())
1000 printer = getPrintFloat(op);
1001 else if (eltType.isF64())
1002 printer = getPrintDouble(op);
1003 else
1004 return failure();
1005
1006 // Unroll vector into elementary print calls.
1007 emitRanks(rewriter, op, adaptor.source(), vectorType, printer, rank);
1008 emitCall(rewriter, op->getLoc(), getPrintNewline(op));
1009 rewriter.eraseOp(op);
1010 return success();
1011 }
1012
1013 private:
emitRanks(ConversionPatternRewriter & rewriter,Operation * op,Value value,VectorType vectorType,Operation * printer,int64_t rank) const1014 void emitRanks(ConversionPatternRewriter &rewriter, Operation *op,
1015 Value value, VectorType vectorType, Operation *printer,
1016 int64_t rank) const {
1017 Location loc = op->getLoc();
1018 if (rank == 0) {
1019 if (value.getType() ==
1020 LLVM::LLVMType::getInt1Ty(typeConverter.getDialect())) {
1021 // Convert i1 (bool) to i32 so we can use the print_i32 method.
1022 // This avoids the need for a print_i1 method with an unclear ABI.
1023 auto i32Type = LLVM::LLVMType::getInt32Ty(typeConverter.getDialect());
1024 auto trueVal = rewriter.create<ConstantOp>(
1025 loc, i32Type, rewriter.getI32IntegerAttr(1));
1026 auto falseVal = rewriter.create<ConstantOp>(
1027 loc, i32Type, rewriter.getI32IntegerAttr(0));
1028 value = rewriter.create<SelectOp>(loc, value, trueVal, falseVal);
1029 }
1030 emitCall(rewriter, loc, printer, value);
1031 return;
1032 }
1033
1034 emitCall(rewriter, loc, getPrintOpen(op));
1035 Operation *printComma = getPrintComma(op);
1036 int64_t dim = vectorType.getDimSize(0);
1037 for (int64_t d = 0; d < dim; ++d) {
1038 auto reducedType =
1039 rank > 1 ? reducedVectorTypeFront(vectorType) : nullptr;
1040 auto llvmType = typeConverter.convertType(
1041 rank > 1 ? reducedType : vectorType.getElementType());
1042 Value nestedVal =
1043 extractOne(rewriter, typeConverter, loc, value, llvmType, rank, d);
1044 emitRanks(rewriter, op, nestedVal, reducedType, printer, rank - 1);
1045 if (d != dim - 1)
1046 emitCall(rewriter, loc, printComma);
1047 }
1048 emitCall(rewriter, loc, getPrintClose(op));
1049 }
1050
1051 // Helper to emit a call.
emitCall(ConversionPatternRewriter & rewriter,Location loc,Operation * ref,ValueRange params=ValueRange ())1052 static void emitCall(ConversionPatternRewriter &rewriter, Location loc,
1053 Operation *ref, ValueRange params = ValueRange()) {
1054 rewriter.create<LLVM::CallOp>(loc, ArrayRef<Type>{},
1055 rewriter.getSymbolRefAttr(ref), params);
1056 }
1057
1058 // Helper for printer method declaration (first hit) and lookup.
getPrint(Operation * op,LLVM::LLVMDialect * dialect,StringRef name,ArrayRef<LLVM::LLVMType> params)1059 static Operation *getPrint(Operation *op, LLVM::LLVMDialect *dialect,
1060 StringRef name, ArrayRef<LLVM::LLVMType> params) {
1061 auto module = op->getParentOfType<ModuleOp>();
1062 auto func = module.lookupSymbol<LLVM::LLVMFuncOp>(name);
1063 if (func)
1064 return func;
1065 OpBuilder moduleBuilder(module.getBodyRegion());
1066 return moduleBuilder.create<LLVM::LLVMFuncOp>(
1067 op->getLoc(), name,
1068 LLVM::LLVMType::getFunctionTy(LLVM::LLVMType::getVoidTy(dialect),
1069 params, /*isVarArg=*/false));
1070 }
1071
1072 // Helpers for method names.
getPrintI32(Operation * op) const1073 Operation *getPrintI32(Operation *op) const {
1074 LLVM::LLVMDialect *dialect = typeConverter.getDialect();
1075 return getPrint(op, dialect, "print_i32",
1076 LLVM::LLVMType::getInt32Ty(dialect));
1077 }
getPrintI64(Operation * op) const1078 Operation *getPrintI64(Operation *op) const {
1079 LLVM::LLVMDialect *dialect = typeConverter.getDialect();
1080 return getPrint(op, dialect, "print_i64",
1081 LLVM::LLVMType::getInt64Ty(dialect));
1082 }
getPrintFloat(Operation * op) const1083 Operation *getPrintFloat(Operation *op) const {
1084 LLVM::LLVMDialect *dialect = typeConverter.getDialect();
1085 return getPrint(op, dialect, "print_f32",
1086 LLVM::LLVMType::getFloatTy(dialect));
1087 }
getPrintDouble(Operation * op) const1088 Operation *getPrintDouble(Operation *op) const {
1089 LLVM::LLVMDialect *dialect = typeConverter.getDialect();
1090 return getPrint(op, dialect, "print_f64",
1091 LLVM::LLVMType::getDoubleTy(dialect));
1092 }
getPrintOpen(Operation * op) const1093 Operation *getPrintOpen(Operation *op) const {
1094 return getPrint(op, typeConverter.getDialect(), "print_open", {});
1095 }
getPrintClose(Operation * op) const1096 Operation *getPrintClose(Operation *op) const {
1097 return getPrint(op, typeConverter.getDialect(), "print_close", {});
1098 }
getPrintComma(Operation * op) const1099 Operation *getPrintComma(Operation *op) const {
1100 return getPrint(op, typeConverter.getDialect(), "print_comma", {});
1101 }
getPrintNewline(Operation * op) const1102 Operation *getPrintNewline(Operation *op) const {
1103 return getPrint(op, typeConverter.getDialect(), "print_newline", {});
1104 }
1105 };
1106
1107 /// Progressive lowering of ExtractStridedSliceOp to either:
1108 /// 1. extractelement + insertelement for the 1-D case
1109 /// 2. extract + optional strided_slice + insert for the n-D case.
1110 class VectorStridedSliceOpConversion
1111 : public OpRewritePattern<ExtractStridedSliceOp> {
1112 public:
1113 using OpRewritePattern<ExtractStridedSliceOp>::OpRewritePattern;
1114
matchAndRewrite(ExtractStridedSliceOp op,PatternRewriter & rewriter) const1115 LogicalResult matchAndRewrite(ExtractStridedSliceOp op,
1116 PatternRewriter &rewriter) const override {
1117 auto dstType = op.getResult().getType().cast<VectorType>();
1118
1119 assert(!op.offsets().getValue().empty() && "Unexpected empty offsets");
1120
1121 int64_t offset =
1122 op.offsets().getValue().front().cast<IntegerAttr>().getInt();
1123 int64_t size = op.sizes().getValue().front().cast<IntegerAttr>().getInt();
1124 int64_t stride =
1125 op.strides().getValue().front().cast<IntegerAttr>().getInt();
1126
1127 auto loc = op.getLoc();
1128 auto elemType = dstType.getElementType();
1129 assert(elemType.isSignlessIntOrIndexOrFloat());
1130 Value zero = rewriter.create<ConstantOp>(loc, elemType,
1131 rewriter.getZeroAttr(elemType));
1132 Value res = rewriter.create<SplatOp>(loc, dstType, zero);
1133 for (int64_t off = offset, e = offset + size * stride, idx = 0; off < e;
1134 off += stride, ++idx) {
1135 Value extracted = extractOne(rewriter, loc, op.vector(), off);
1136 if (op.offsets().getValue().size() > 1) {
1137 extracted = rewriter.create<ExtractStridedSliceOp>(
1138 loc, extracted, getI64SubArray(op.offsets(), /* dropFront=*/1),
1139 getI64SubArray(op.sizes(), /* dropFront=*/1),
1140 getI64SubArray(op.strides(), /* dropFront=*/1));
1141 }
1142 res = insertOne(rewriter, loc, extracted, res, idx);
1143 }
1144 rewriter.replaceOp(op, {res});
1145 return success();
1146 }
1147 /// This pattern creates recursive ExtractStridedSliceOp, but the recursion is
1148 /// bounded as the rank is strictly decreasing.
hasBoundedRewriteRecursion() const1149 bool hasBoundedRewriteRecursion() const final { return true; }
1150 };
1151
1152 } // namespace
1153
1154 /// Populate the given list with patterns that convert from Vector to LLVM.
populateVectorToLLVMConversionPatterns(LLVMTypeConverter & converter,OwningRewritePatternList & patterns,bool reassociateFPReductions)1155 void mlir::populateVectorToLLVMConversionPatterns(
1156 LLVMTypeConverter &converter, OwningRewritePatternList &patterns,
1157 bool reassociateFPReductions) {
1158 MLIRContext *ctx = converter.getDialect()->getContext();
1159 // clang-format off
1160 patterns.insert<VectorFMAOpNDRewritePattern,
1161 VectorInsertStridedSliceOpDifferentRankRewritePattern,
1162 VectorInsertStridedSliceOpSameRankRewritePattern,
1163 VectorStridedSliceOpConversion>(ctx);
1164 patterns.insert<VectorReductionOpConversion>(
1165 ctx, converter, reassociateFPReductions);
1166 patterns
1167 .insert<VectorShuffleOpConversion,
1168 VectorExtractElementOpConversion,
1169 VectorExtractOpConversion,
1170 VectorFMAOp1DConversion,
1171 VectorInsertElementOpConversion,
1172 VectorInsertOpConversion,
1173 VectorPrintOpConversion,
1174 VectorTransferConversion<TransferReadOp>,
1175 VectorTransferConversion<TransferWriteOp>,
1176 VectorTypeCastOpConversion>(ctx, converter);
1177 // clang-format on
1178 }
1179
populateVectorToLLVMMatrixConversionPatterns(LLVMTypeConverter & converter,OwningRewritePatternList & patterns)1180 void mlir::populateVectorToLLVMMatrixConversionPatterns(
1181 LLVMTypeConverter &converter, OwningRewritePatternList &patterns) {
1182 MLIRContext *ctx = converter.getDialect()->getContext();
1183 patterns.insert<VectorMatmulOpConversion>(ctx, converter);
1184 patterns.insert<VectorFlatTransposeOpConversion>(ctx, converter);
1185 }
1186
1187 namespace {
1188 struct LowerVectorToLLVMPass
1189 : public ConvertVectorToLLVMBase<LowerVectorToLLVMPass> {
LowerVectorToLLVMPass__anon85aff89e0411::LowerVectorToLLVMPass1190 LowerVectorToLLVMPass(const LowerVectorToLLVMOptions &options) {
1191 this->reassociateFPReductions = options.reassociateFPReductions;
1192 }
1193 void runOnOperation() override;
1194 };
1195 } // namespace
1196
runOnOperation()1197 void LowerVectorToLLVMPass::runOnOperation() {
1198 // Perform progressive lowering of operations on slices and
1199 // all contraction operations. Also applies folding and DCE.
1200 {
1201 OwningRewritePatternList patterns;
1202 populateVectorToVectorCanonicalizationPatterns(patterns, &getContext());
1203 populateVectorSlicesLoweringPatterns(patterns, &getContext());
1204 populateVectorContractLoweringPatterns(patterns, &getContext());
1205 applyPatternsAndFoldGreedily(getOperation(), patterns);
1206 }
1207
1208 // Convert to the LLVM IR dialect.
1209 LLVMTypeConverter converter(&getContext());
1210 OwningRewritePatternList patterns;
1211 populateVectorToLLVMMatrixConversionPatterns(converter, patterns);
1212 populateVectorToLLVMConversionPatterns(converter, patterns,
1213 reassociateFPReductions);
1214 populateVectorToLLVMMatrixConversionPatterns(converter, patterns);
1215 populateStdToLLVMConversionPatterns(converter, patterns);
1216
1217 LLVMConversionTarget target(getContext());
1218 if (failed(applyPartialConversion(getOperation(), target, patterns))) {
1219 signalPassFailure();
1220 }
1221 }
1222
1223 std::unique_ptr<OperationPass<ModuleOp>>
createConvertVectorToLLVMPass(const LowerVectorToLLVMOptions & options)1224 mlir::createConvertVectorToLLVMPass(const LowerVectorToLLVMOptions &options) {
1225 return std::make_unique<LowerVectorToLLVMPass>(options);
1226 }
1227