1//===- LinalgOps.td - Linalg dialect ops -------------------*- tablegen -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This is the operation definition file for linear algebra operations.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LINALG_OPS
14#define LINALG_OPS
15
16include "mlir/Dialect/Linalg/IR/LinalgBase.td"
17include "mlir/Interfaces/ControlFlowInterfaces.td"
18include "mlir/Interfaces/SideEffectInterfaces.td"
19include "mlir/Interfaces/ViewLikeInterface.td"
20
21// Base class for Linalg dialect ops that do not correspond to library calls.
22class Linalg_Op<string mnemonic, list<OpTrait> traits = []> :
23    Op<Linalg_Dialect, mnemonic, traits> {
24  // For every linalg op, there needs to be a:
25  //   * void print(OpAsmPrinter &p, ${C++ class of Op} op)
26  //   * LogicalResult verify(${C++ class of Op} op)
27  //   * ParseResult parse${C++ class of Op}(OpAsmParser &parser,
28  //                                         OperationState &result)
29  // functions.
30  let printer = [{ return ::print(p, *this); }];
31  let verifier = [{ return ::verify(*this); }];
32  let parser = [{ return ::parse$cppClass(parser, result); }];
33}
34
35def Linalg_InitTensorOp : Linalg_Op<"init_tensor", [NoSideEffect]> {
36  let summary = "operation to define a tensor of particular value";
37
38  let description = [{
39    `linalg.init_tensor` is an operation that materializes a tensor of
40    a given shape. The shape could be dynamic or static.
41  }];
42
43  let arguments =
44    (ins Variadic<Index>:$sizes, I64ArrayAttr:$static_sizes);
45
46  let results = (outs AnyTensor:$result);
47
48  let verifier = [{ return ::verify(*this); }];
49
50  let extraClassDeclaration = [{
51    static StringRef getStaticSizesAttrName() {
52      return "static_sizes";
53    }
54
55    RankedTensorType getType() {
56      return getResult().getType().cast<RankedTensorType>(); }
57
58    // Infer the shape of the result tensor given the static shapes
59    // and element type of the result tensor.
60    static Type inferResultType(ArrayRef<int64_t> staticSizes, Type elementType);
61
62    // Return true if the size of the tensor is dynamic at `idx`
63    bool isDynamicSize(unsigned idx) {
64      APInt v = *(static_sizes().getAsValueRange<IntegerAttr>().begin() + idx);
65      return ShapedType::isDynamic(v.getSExtValue());
66    }
67
68    // Assert that the size of the result tensor is static at `idx`
69    // and return the shape.
70    int64_t getStaticSize(unsigned idx) {
71      assert(!isDynamicSize(idx) && "expected static size");
72      APInt v = *(static_sizes().
73          template getAsValueRange<IntegerAttr>().begin() + idx);
74        return v.getSExtValue();
75    }
76
77    // Return the argument position that contains the dynamic size of
78    // the tensor at dimension `idx`. Asserts that the shape is
79    // dynamic at that `idx`.
80    unsigned getIndexOfDynamicSize(unsigned idx) {
81      assert(isDynamicSize(idx) && "expected dynamic size");
82      return std::count_if(
83          static_sizes().getValue().begin(),
84          static_sizes().getValue().begin() + idx,
85          [&](Attribute attr) {
86            return ShapedType::isDynamic(attr.cast<IntegerAttr>().getInt());
87          });
88    }
89
90    // Return the Value of the dynamic size of the tensor at dimension
91    // `idx`. Asserts that the shape is dynamic at that `idx.
92    Value getDynamicSize(unsigned idx) {
93      return getOperand(getIndexOfDynamicSize(idx));
94    }
95  }];
96
97  let builders = [
98    OpBuilderDAG<(ins "ValueRange":$shape,
99                  "ArrayRef<int64_t>":$staticShape, "Type":$elementType),
100    [{
101      build($_builder, $_state,
102            InitTensorOp::inferResultType(staticShape, elementType),
103            shape, $_builder.getI64ArrayAttr(staticShape));
104    }]>,
105    OpBuilderDAG<(ins "ValueRange":$shape, "Type":$elementType),
106    [{
107      SmallVector<int64_t, 4> staticShape(
108        shape.size(), ShapedType::kDynamicSize);
109      build($_builder, $_state, shape, staticShape, elementType);
110    }]>,
111    OpBuilderDAG<(ins "ArrayRef<int64_t>":$staticShape, "Type":$elementType),
112    [{
113      build($_builder, $_state, ValueRange{}, staticShape, elementType);
114    }]>
115  ];
116
117  let hasCanonicalizer = 1;
118}
119
120def Linalg_PadTensorOp : Linalg_Op<"pad_tensor",
121    [AttrSizedOperandSegments, SingleBlockImplicitTerminator<"YieldOp">]> {
122  let summary = "tensor pad operation";
123  let description = [{
124    `linalg.pad_tensor` is an operation that pads the `source` tensor
125    with given `low` and `high` padding config.
126
127    The PadTensor operation supports the following arguments:
128
129    * source: the "base" tensor on which to pad.
130    * low: A list contains the padding along the start of each
131           dimension, i.e `low`.
132    * high: A list contains the padding along the end of each
133           dimension, i.e. `high`.
134
135    The result tensor dimensions are `low` + `dim` + `high` along that
136    dimension. The number of elements of `low` and `high` must match
137    the rank of the input tensor (which is also the rank of the output
138    tensor). They can be either a constant or a dynamic value.
139
140    The region of the `pad_tensor` operation returns the value to use
141    for the padding. The arguments of the region represent the index
142    of the source being accessed. There should be as many arguments as
143    the rank of the `source` tensor. The value `yield`-ed by the
144    region is used as the value of the view at the given position.
145
146    Example 1:
147
148    ```mlir
149      %pad_value = ... : f32
150      %0 = linalg.pad_tensor %0 low[1, 2] high[2, 3] {
151      ^bb0(%arg0 : index, %arg1 : index):
152        linalg.yield %pad_value : f32
153      } : tensor<?x?xf32> to tensor<?x?xf32>
154    ```
155
156    Example 2:
157
158    ```mlir
159      %pad_value = ... : f32
160      %0 = linalg.pad_tensor %arg0 low[2, %arg1, 3, 3] high[3, 3, %arg1, 2] {
161      ^bb0(%arg2: index, %arg3: index, %arg4: index, %arg5: index):
162          linalg.yield %pad_value : f32
163      } : tensor<1x2x2x?xf32> to tensor<6x?x?x?xf32>
164    ```
165
166    Example 3:
167
168    ```mlir
169      %pad_value = ... : f32
170      %0 = linalg.pad_tensor %arg0 low[0, 0] high[%ub0, %ub1] {
171      ^bb0(%arg1: index, %arg2: index):
172        linalg.yield %pad_value : f32
173      } : tensor<2x3xf32> to tensor<?x?xf32>
174    ```
175  }];
176
177  let arguments = (ins
178    AnyTensor:$source,
179    Variadic<Index>:$low,
180    Variadic<Index>:$high,
181    I64ArrayAttr:$static_low,
182    I64ArrayAttr:$static_high);
183
184  let regions = (region AnyRegion:$region);
185
186  let results = (outs AnyTensor:$result);
187
188  let extraClassDeclaration = [{
189    static StringRef getStaticLowAttrName() {
190      return "static_low";
191    }
192
193    static StringRef getStaticHighAttrName() {
194      return "static_high";
195    }
196
197    // Infer the shape of the result tensor given the static shapes
198    // and element type of the result tensor.
199    static RankedTensorType inferResultType(RankedTensorType sourceType,
200                                ArrayRef<int64_t> staticLow,
201                                ArrayRef<int64_t> staticHigh);
202  }];
203
204  let builders = [
205    // Build a PadTensorOp with mixed static and dynamic entries.
206    OpBuilderDAG<(ins "Value":$source, "ArrayRef<int64_t>":$staticLow,
207      "ArrayRef<int64_t>":$staticHigh, "ValueRange":$low, "ValueRange":$high,
208      CArg<"ArrayRef<NamedAttribute>", "{}">:$attrs)>,
209    // Build a PadTensorOp with all dynamic entries.
210    OpBuilderDAG<(ins "Value":$source, "ValueRange":$low, "ValueRange":$high,
211      CArg<"ArrayRef<NamedAttribute>", "{}">:$attrs)>
212  ];
213}
214
215def Linalg_RangeOp :
216    Linalg_Op<"range", [NoSideEffect]>,
217    Arguments<(ins Index:$min, Index:$max, Index:$step)>,
218    Results<(outs Range)> {
219  let summary = "Create a `range` type value, used to create `view`s";
220  let description = [{
221    The `linalg.range` op creates a `!linalg.range` from 3 values of type
222    `index` that represent the min, max and step values of the `range`. This
223    type does not pass function boundaries at the moment.
224
225    Example:
226
227    ```mlir
228    %3 = linalg.range %0:%1:%2 : !linalg.range
229    ````
230  }];
231  let builders = [
232    OpBuilderDAG<(ins "Value":$min, "Value":$max, "Value":$step),
233    [{
234      auto rangeType = RangeType::get($_builder.getContext());
235      build($_builder, $_state, rangeType, min, max, step);
236    }]>];
237
238  // Fully specified by traits.
239  let verifier = ?;
240  let assemblyFormat = "$min `:` $max `:` $step attr-dict `:` type(results)";
241}
242
243class Linalg_ReshapeLikeOp<string mnemonic, list<OpTrait> traits = []> :
244    Linalg_Op<mnemonic, !listconcat(traits, [NoSideEffect])> {
245  let builders = [
246    // Builders for a contracting reshape whose result type is computed from
247    // `src` and `reassociation`.
248    OpBuilderDAG<(ins "Value":$src,
249      "ArrayRef<ReassociationExprs>":$reassociation,
250      CArg<"ArrayRef<NamedAttribute>", "{}">:$attrs)>,
251    OpBuilderDAG<(ins "Value":$src,
252      "ArrayRef<ReassociationIndices>":$reassociation,
253      CArg<"ArrayRef<NamedAttribute>", "{}">:$attrs),
254    [{
255      auto reassociationMaps =
256          convertReassociationIndicesToMaps($_builder, reassociation);
257      build($_builder, $_state, src, reassociationMaps, attrs);
258    }]>,
259
260    // Builders for a reshape whose result type is passed explicitly. This may
261    // be either a contracting or expanding reshape.
262    OpBuilderDAG<(ins "Type":$resultType, "Value":$src,
263      "ArrayRef<ReassociationExprs>":$reassociation,
264      CArg<"ArrayRef<NamedAttribute>", "{}">:$attrs)>,
265    OpBuilderDAG<(ins "Type":$resultType, "Value":$src,
266      "ArrayRef<ReassociationIndices>":$reassociation,
267      CArg<"ArrayRef<NamedAttribute>", "{}">:$attrs),
268    [{
269      auto reassociationMaps =
270          convertReassociationIndicesToMaps($_builder, reassociation);
271      build($_builder, $_state, resultType, src, reassociationMaps, attrs);
272    }]>
273  ];
274
275  code commonExtraClassDeclaration = [{
276    static StringRef getReassociationAttrName() { return "reassociation"; }
277    SmallVector<AffineMap, 4> getReassociationMaps() {
278      return llvm::to_vector<4>(llvm::map_range(reassociation(), [
279      ](Attribute a) { return a.cast<AffineMapAttr>().getValue(); }));
280    }
281    SmallVector<ReassociationExprs, 4> getReassociationExprs() {
282      return
283        llvm::to_vector<4>(llvm::map_range(reassociation(),
284	  [](Attribute a) {
285	    return llvm::to_vector<2>(
286	      a.cast<AffineMapAttr>().getValue().getResults());
287	  }));
288    }
289  }];
290  let assemblyFormat = [{
291    $src $reassociation attr-dict `:` type($src) `into` type(results)
292  }];
293}
294
295def Linalg_ReshapeOp : Linalg_ReshapeLikeOp<"reshape",
296    [DeclareOpInterfaceMethods<ViewLikeOpInterface>]>,
297    Arguments<(ins AnyStridedMemRef:$src, AffineMapArrayAttr:$reassociation)>,
298    Results<(outs AnyStridedMemRef:$result)> {
299  let summary = "linalg.reshape produces a new view into the operand view";
300  let description = [{
301    The `linalg.reshape` op produces a new view whose sizes are a reassociation
302    of the original `view`. Depending on whether or not the reassociated
303    MemRefType is contiguous, the resulting memref may require explicit alloc
304    and copies.
305
306    A reassociation is defined as a continuous grouping of dimensions and is
307    represented with an affine map array attribute. In the future,
308    non-continuous groupings may be allowed (i.e. permutations, reindexings
309    etc).
310
311    For now, it is assumed that either:
312      1. a reassociation produces and consumes contiguous MemRefType or,
313      2. the reshape op will be folded into its consumers (by changing the shape
314         of the computations).
315    All other cases are undefined behavior and a reshape op may not lower to
316    LLVM if it cannot be proven statically that it does not require alloc+copy.
317
318    A reshape may either collapse or expand dimensions, depending on the
319    relationship between source and target memref ranks. The verification rule
320    is that the reassociation maps are applied to the memref with the larger
321    rank to obtain the memref with the smaller rank. In the case of a dimension
322    expansion, the reassociation maps can be interpreted as inverse maps.
323
324    The result memref type of a reshape when dimensions are collapsed
325    (operand memref type when dimensions are expanded) can be
326    zero-ranked if the operand memref type (or the result memref type
327    when dimensions are expanded) is statically shaped with all
328    dimensions being unit extent. In such cases the reassociation map
329    is empty.
330
331    Examples:
332
333    ```mlir
334    // Dimension collapse (i, j) -> i' and k -> k'
335    %1 = linalg.reshape %0 [(i, j, k) -> (i, j), (i, j, k) -> (k)] :
336      memref<?x?x?xf32, stride_spec> into memref<?x?xf32, stride_spec_2>
337    ```
338
339    ```mlir
340    // Dimension expansion i -> (i', j') and (k) -> (k')
341    %1 = linalg.reshape %0 [(i, j, k) -> (i, j), (i, j, k) -> (k)] :
342      memref<?x?xf32, stride_spec> into memref<?x?x?xf32, stride_spec_2>
343    ```
344  }];
345  let extraClassDeclaration = commonExtraClassDeclaration # [{
346    MemRefType getSrcType() { return src().getType().cast<MemRefType>(); }
347    MemRefType getResultType() { return result().getType().cast<MemRefType>(); }
348  }];
349  let hasFolder = 1;
350  let hasCanonicalizer = 1;
351}
352
353def Linalg_TensorReshapeOp : Linalg_ReshapeLikeOp<"tensor_reshape">,
354    Arguments<(ins AnyTensor:$src,
355                   AffineMapArrayAttr:$reassociation)>,
356    Results<(outs AnyTensor:$result)> {
357  let summary = "linalg.tensor_reshape produces a new reshaped tensor.";
358  let description = [{
359    The `linalg.reshape` op produces a new tensor whose sizes are a
360    reassociation of the original `src`.
361
362    A reassociation is defined as a continuous grouping of dimensions and is
363    represented with an affine map array attribute. In the future,
364    non-continuous groupings may be allowed (i.e. permutations, reindexings
365    etc).
366
367    A reshape may either collapse or expand dimensions, depending on the
368    relationship between source and target tensor ranks. The verification rule
369    is that the reassociation maps are applied to the tensor with the larger
370    rank to obtain the tensor with the smaller rank. In the case of a dimension
371    expansion, the reassociation maps can be interpreted as inverse maps.
372
373    The result tensor type of a reshape when dimensions are collapsed
374    (operand tensor type when dimensions are expanded) can be
375    zero-ranked if the operand tensor type (or the result tensor type
376    when dimensions are expanded) is statically shaped with all
377    dimensions being unit extent. In such cases the reassociation map
378    is empty.
379
380    Examples:
381
382    ```mlir
383    // Dimension collapse (i, j) -> i' and k -> k'
384    %b = linalg.tensor_reshape %a [(i, j, k) -> (i, j), (i, j, k) -> (k)] :
385      tensor<?x?x?xf32> into tensor<?x?xf32>
386    ```
387
388    ```mlir
389    // Dimension expansion i -> (i', j') and (k) -> (k')
390    %b = linalg.tensor_reshape %a [(i, j, k) -> (i, j), (i, j, k) -> (k)] :
391      tensor<?x?xf32> into tensor<?x?x?xf32>
392    ```
393  }];
394  let extraClassDeclaration = commonExtraClassDeclaration # [{
395    RankedTensorType getSrcType() {
396      return src().getType().cast<RankedTensorType>();
397    }
398    RankedTensorType getResultType() {
399      return result().getType().cast<RankedTensorType>();
400    }
401  }];
402  let hasFolder = 1;
403  let hasCanonicalizer = 1;
404}
405
406def Linalg_SliceOp : Linalg_Op<"slice", [
407      DeclareOpInterfaceMethods<ViewLikeOpInterface>, NoSideEffect]>,
408    Arguments<(ins AnyStridedMemRef:$view,
409                   Variadic<AnyTypeOf<[Range, Index]>>:$indexings)>,
410    Results<(outs AnyStridedMemRef)> {
411  let summary = "Produce a rank-reduced `subview` of a base `view`.";
412  let description = [{
413    The `linalg.slice` op allows defining a subregion of a smaller rank than the
414    operand `view` within the underlying buffer.
415
416    A `linalg.slice` op takes a view and a variadic number of indexings and
417    produces a `view` of the same elemental type. An indexing is either:
418      1. a `linalg.range`, in which case it does not reduce the rank of the
419         parent `view` along the corresponding dimension.
420      2. an `index`, in which case it reduces the rank of the parent view by
421         one.
422
423    If an indexing extends past the size of the `view`, this is undefined
424    behavior. Ideally the `linalg.slice` operation would automatically truncate
425    it to be within bounds but there are tradeoffs involved now that `std.view`
426    is a standard op.
427
428    Examples:
429
430      1. rank-preserving `slice`:
431
432      ```mlir
433      %4 = linalg.slice %0[%1, %2] : memref<?x?xf32, stride_spec>,
434        !linalg.range, !linalg.range, memref<?x?xf32, stride_spec>
435      ```
436
437      2. rank-reducing `slice` (from 2-D to 1-D):
438
439      ```mlir
440      %4 = linalg.slice %0[%1, %2] : memref<?x?xf32, stride_spec>,
441        index, !linalg.range, memref<?x?xf32, stride_spec>
442      ```
443
444      3. rank-reducing `slice` (from 2-D to 0-D):
445
446      ```mlir
447      %4 = linalg.slice %0[%1, %2] : memref<?x?xf32, stride_spec>,
448        index, index, memref<?x?xf32, stride_spec>
449      ```
450  }];
451
452  let builders = [OpBuilderDAG<(ins "Value":$base, "ValueRange":$indexings)>];
453
454  let extraClassDeclaration = [{
455    enum { FirstIndexingOperand = 1 };
456    unsigned getRank() { return getShapedType().getRank(); }
457    Type getElementType() { return getShapedType().getElementType(); }
458    ShapedType getShapedType() { return getType().cast<ShapedType>(); }
459    unsigned getBaseViewRank() { return getBaseViewType().getRank(); }
460    ShapedType getBaseViewType() { return view().getType().cast<ShapedType>();}
461
462    // Get the underlying indexing at a given rank.
463    Value indexing(unsigned rank) { return *(indexings().begin() + rank); }
464
465    // Get the subset of indexings that are of RangeType.
466    SmallVector<Value, 8> getRanges() {
467      SmallVector<Value, 8> res;
468      for (auto operand : indexings())
469        if (!operand.getType().isa<IndexType>())
470          res.push_back(operand);
471      return res;
472    }
473  }];
474
475  let hasFolder = 1;
476}
477
478def Linalg_SimplePadOp : Linalg_Op<"simple_pad", [NoSideEffect]> {
479  let summary = "TODO: replace with pad_tensors when ready.";
480
481  let description = [{
482    `linalg.simple_pad` is a tmp placeholder for padding and packing on tensors.
483    Its semantics are to pad a partially dynamic tensor to a fully static tensor
484    where the static sizes are assumed to be greater than the dynamic sizes. The
485    op perforrms "high" padding (i.e. it adds trailing padding values until the
486    desired size is met).
487  }];
488
489  let arguments = (ins AnyRankedTensor:$tensor, AnyType:$padding);
490  let results = (outs AnyRankedTensor:$result);
491
492  // TODO: verify all static result, some dynamic input, static shapes match,
493  // element types match, ranks match etc. Use pad_tensors when ready but for
494  // now just let it ne fully specified by traits.
495  let verifier = ?;
496
497  let extraClassDeclaration = [{
498    RankedTensorType getSourceType() {
499      return tensor().getType().cast<RankedTensorType>(); }
500    RankedTensorType getResultType() {
501      return getResult().getType().cast<RankedTensorType>(); }
502   }];
503
504  let assemblyFormat = [{
505    $tensor `pad` $padding attr-dict `:`
506      type($tensor) `to` type($result) `pad` type($padding)
507  }];
508}
509
510def Linalg_YieldOp : Linalg_Op<"yield", [NoSideEffect, ReturnLike, Terminator]>,
511    Arguments<(ins Variadic<AnyType>:$values)> {
512  let summary = "Linalg yield operation";
513  let description = [{
514    `linalg.yield` is a special terminator operation for blocks inside regions
515    in `linalg` generic ops. It returns values to the immediately enclosing
516    `linalg` generic op.
517
518    Example:
519
520    ```mlir
521    linalg.yield %f0, %f1 : f32, f32
522    ```
523  }];
524}
525
526#endif // LINALG_OPS
527