1// RUN: mlir-opt %s -sparsification | FileCheck %s --check-prefix=CHECK-HIR
2//
3// RUN: mlir-opt %s -sparsification --sparse-tensor-conversion | \
4// RUN: FileCheck %s --check-prefix=CHECK-MIR
5//
6// RUN: mlir-opt %s -sparsification --sparse-tensor-conversion \
7// RUN: --func-bufferize --tensor-constant-bufferize           \
8// RUN: --tensor-bufferize --finalizing-bufferize |            \
9// RUN: FileCheck %s --check-prefix=CHECK-LIR
10
11#CSC = #sparse_tensor.encoding<{
12  dimLevelType = [ "dense", "compressed" ],
13  dimOrdering = affine_map<(i,j) -> (j,i)>
14}>
15
16#trait_matvec = {
17  indexing_maps = [
18    affine_map<(i,j) -> (i,j)>,  // A
19    affine_map<(i,j) -> (j)>,    // b
20    affine_map<(i,j) -> (i)>     // x (out)
21  ],
22  iterator_types = ["parallel","reduction"],
23  doc = "x(i) += A(i,j) * b(j)"
24}
25
26// CHECK-HIR-LABEL:   func @matvec(
27// CHECK-HIR-SAME:                 %[[VAL_0:.*]]: tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)>, pointerBitWidth = 0, indexBitWidth = 0 }>>,
28// CHECK-HIR-SAME:                 %[[VAL_1:.*]]: tensor<64xf64>,
29// CHECK-HIR-SAME:                 %[[VAL_2:.*]]: tensor<32xf64>) -> tensor<32xf64> {
30// CHECK-HIR:           %[[VAL_3:.*]] = constant 64 : index
31// CHECK-HIR:           %[[VAL_4:.*]] = constant 0 : index
32// CHECK-HIR:           %[[VAL_5:.*]] = constant 1 : index
33// CHECK-HIR:           %[[VAL_6:.*]] = sparse_tensor.pointers %[[VAL_0]], %[[VAL_5]] : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)>, pointerBitWidth = 0, indexBitWidth = 0 }>> to memref<?xindex>
34// CHECK-HIR:           %[[VAL_7:.*]] = sparse_tensor.indices %[[VAL_0]], %[[VAL_5]] : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)>, pointerBitWidth = 0, indexBitWidth = 0 }>> to memref<?xindex>
35// CHECK-HIR:           %[[VAL_8:.*]] = sparse_tensor.values %[[VAL_0]] : tensor<32x64xf64, #sparse_tensor.encoding<{ dimLevelType = [ "dense", "compressed" ], dimOrdering = affine_map<(d0, d1) -> (d1, d0)>, pointerBitWidth = 0, indexBitWidth = 0 }>> to memref<?xf64>
36// CHECK-HIR:           %[[VAL_9:.*]] = memref.buffer_cast %[[VAL_1]] : memref<64xf64>
37// CHECK-HIR:           %[[VAL_10:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32xf64>
38// CHECK-HIR:           %[[VAL_11:.*]] = memref.alloc() : memref<32xf64>
39// CHECK-HIR:           memref.copy %[[VAL_10]], %[[VAL_11]] : memref<32xf64> to memref<32xf64>
40// CHECK-HIR:           scf.for %[[VAL_12:.*]] = %[[VAL_4]] to %[[VAL_3]] step %[[VAL_5]] {
41// CHECK-HIR:             %[[VAL_13:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_12]]] : memref<64xf64>
42// CHECK-HIR:             %[[VAL_14:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_12]]] : memref<?xindex>
43// CHECK-HIR:             %[[VAL_15:.*]] = addi %[[VAL_12]], %[[VAL_5]] : index
44// CHECK-HIR:             %[[VAL_16:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_15]]] : memref<?xindex>
45// CHECK-HIR:             scf.for %[[VAL_17:.*]] = %[[VAL_14]] to %[[VAL_16]] step %[[VAL_5]] {
46// CHECK-HIR:               %[[VAL_18:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_17]]] : memref<?xindex>
47// CHECK-HIR:               %[[VAL_19:.*]] = memref.load %[[VAL_11]]{{\[}}%[[VAL_18]]] : memref<32xf64>
48// CHECK-HIR:               %[[VAL_20:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_17]]] : memref<?xf64>
49// CHECK-HIR:               %[[VAL_21:.*]] = mulf %[[VAL_20]], %[[VAL_13]] : f64
50// CHECK-HIR:               %[[VAL_22:.*]] = addf %[[VAL_19]], %[[VAL_21]] : f64
51// CHECK-HIR:               memref.store %[[VAL_22]], %[[VAL_11]]{{\[}}%[[VAL_18]]] : memref<32xf64>
52// CHECK-HIR:             }
53// CHECK-HIR:           }
54// CHECK-HIR:           %[[VAL_23:.*]] = memref.tensor_load %[[VAL_11]] : memref<32xf64>
55// CHECK-HIR:           return %[[VAL_23]] : tensor<32xf64>
56// CHECK-HIR:         }
57
58// CHECK-MIR-LABEL:   func @matvec(
59// CHECK-MIR-SAME:                 %[[VAL_0:.*]]: !llvm.ptr<i8>,
60// CHECK-MIR-SAME:                 %[[VAL_1:.*]]: tensor<64xf64>,
61// CHECK-MIR-SAME:                 %[[VAL_2:.*]]: tensor<32xf64>) -> tensor<32xf64> {
62// CHECK-MIR:           %[[VAL_3:.*]] = constant 64 : index
63// CHECK-MIR:           %[[VAL_5:.*]] = constant 0 : index
64// CHECK-MIR:           %[[VAL_6:.*]] = constant 1 : index
65// CHECK-MIR:           %[[VAL_7:.*]] = call @sparsePointers(%[[VAL_0]], %[[VAL_6]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
66// CHECK-MIR:           %[[VAL_8:.*]] = call @sparseIndices(%[[VAL_0]], %[[VAL_6]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
67// CHECK-MIR:           %[[VAL_9:.*]] = call @sparseValuesF64(%[[VAL_0]]) : (!llvm.ptr<i8>) -> memref<?xf64>
68// CHECK-MIR:           %[[VAL_10:.*]] = memref.buffer_cast %[[VAL_1]] : memref<64xf64>
69// CHECK-MIR:           %[[VAL_11:.*]] = memref.buffer_cast %[[VAL_2]] : memref<32xf64>
70// CHECK-MIR:           %[[VAL_12:.*]] = memref.alloc() : memref<32xf64>
71// CHECK-MIR:           memref.copy %[[VAL_11]], %[[VAL_12]] : memref<32xf64> to memref<32xf64>
72// CHECK-MIR:           scf.for %[[VAL_15:.*]] = %[[VAL_5]] to %[[VAL_3]] step %[[VAL_6]] {
73// CHECK-MIR:             %[[VAL_16:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_15]]] : memref<64xf64>
74// CHECK-MIR:             %[[VAL_17:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_15]]] : memref<?xindex>
75// CHECK-MIR:             %[[VAL_18:.*]] = addi %[[VAL_15]], %[[VAL_6]] : index
76// CHECK-MIR:             %[[VAL_19:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_18]]] : memref<?xindex>
77// CHECK-MIR:             scf.for %[[VAL_20:.*]] = %[[VAL_17]] to %[[VAL_19]] step %[[VAL_6]] {
78// CHECK-MIR:               %[[VAL_21:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_20]]] : memref<?xindex>
79// CHECK-MIR:               %[[VAL_22:.*]] = memref.load %[[VAL_12]]{{\[}}%[[VAL_21]]] : memref<32xf64>
80// CHECK-MIR:               %[[VAL_23:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_20]]] : memref<?xf64>
81// CHECK-MIR:               %[[VAL_24:.*]] = mulf %[[VAL_23]], %[[VAL_16]] : f64
82// CHECK-MIR:               %[[VAL_25:.*]] = addf %[[VAL_22]], %[[VAL_24]] : f64
83// CHECK-MIR:               memref.store %[[VAL_25]], %[[VAL_12]]{{\[}}%[[VAL_21]]] : memref<32xf64>
84// CHECK-MIR:             }
85// CHECK-MIR:           }
86// CHECK-MIR:           %[[VAL_26:.*]] = memref.tensor_load %[[VAL_12]] : memref<32xf64>
87// CHECK-MIR:           return %[[VAL_26]] : tensor<32xf64>
88// CHECK-MIR:         }
89
90// CHECK-LIR-LABEL:   func @matvec(
91// CHECK-LIR-SAME:                 %[[VAL_0:.*]]: !llvm.ptr<i8>,
92// CHECK-LIR-SAME:                 %[[VAL_1:.*]]: memref<64xf64>,
93// CHECK-LIR-SAME:                 %[[VAL_2:.*]]: memref<32xf64>) -> memref<32xf64> {
94// CHECK-LIR:           %[[VAL_3:.*]] = constant 64 : index
95// CHECK-LIR:           %[[VAL_5:.*]] = constant 0 : index
96// CHECK-LIR:           %[[VAL_6:.*]] = constant 1 : index
97// CHECK-LIR:           %[[VAL_7:.*]] = call @sparsePointers(%[[VAL_0]], %[[VAL_6]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
98// CHECK-LIR:           %[[VAL_8:.*]] = call @sparseIndices(%[[VAL_0]], %[[VAL_6]]) : (!llvm.ptr<i8>, index) -> memref<?xindex>
99// CHECK-LIR:           %[[VAL_9:.*]] = call @sparseValuesF64(%[[VAL_0]]) : (!llvm.ptr<i8>) -> memref<?xf64>
100// CHECK-LIR:           %[[VAL_10:.*]] = memref.alloc() : memref<32xf64>
101// CHECK-LIR:           memref.copy %[[VAL_2]], %[[VAL_10]] : memref<32xf64> to memref<32xf64>
102// CHECK-LIR:           scf.for %[[VAL_13:.*]] = %[[VAL_5]] to %[[VAL_3]] step %[[VAL_6]] {
103// CHECK-LIR:             %[[VAL_14:.*]] = memref.load %[[VAL_1]]{{\[}}%[[VAL_13]]] : memref<64xf64>
104// CHECK-LIR:             %[[VAL_15:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_13]]] : memref<?xindex>
105// CHECK-LIR:             %[[VAL_16:.*]] = addi %[[VAL_13]], %[[VAL_6]] : index
106// CHECK-LIR:             %[[VAL_17:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_16]]] : memref<?xindex>
107// CHECK-LIR:             scf.for %[[VAL_18:.*]] = %[[VAL_15]] to %[[VAL_17]] step %[[VAL_6]] {
108// CHECK-LIR:               %[[VAL_19:.*]] = memref.load %[[VAL_8]]{{\[}}%[[VAL_18]]] : memref<?xindex>
109// CHECK-LIR:               %[[VAL_20:.*]] = memref.load %[[VAL_10]]{{\[}}%[[VAL_19]]] : memref<32xf64>
110// CHECK-LIR:               %[[VAL_21:.*]] = memref.load %[[VAL_9]]{{\[}}%[[VAL_18]]] : memref<?xf64>
111// CHECK-LIR:               %[[VAL_22:.*]] = mulf %[[VAL_21]], %[[VAL_14]] : f64
112// CHECK-LIR:               %[[VAL_23:.*]] = addf %[[VAL_20]], %[[VAL_22]] : f64
113// CHECK-LIR:               memref.store %[[VAL_23]], %[[VAL_10]]{{\[}}%[[VAL_19]]] : memref<32xf64>
114// CHECK-LIR:             }
115// CHECK-LIR:           }
116// CHECK-LIR:           return %[[VAL_10]] : memref<32xf64>
117// CHECK-LIR:         }
118
119func @matvec(%arga: tensor<32x64xf64, #CSC>,
120             %argb: tensor<64xf64>,
121             %argx: tensor<32xf64>) -> tensor<32xf64> {
122  %0 = linalg.generic #trait_matvec
123      ins(%arga, %argb : tensor<32x64xf64, #CSC>, tensor<64xf64>)
124      outs(%argx: tensor<32xf64>) {
125    ^bb(%A: f64, %b: f64, %x: f64):
126      %0 = mulf %A, %b : f64
127      %1 = addf %x, %0 : f64
128      linalg.yield %1 : f64
129  } -> tensor<32xf64>
130  return %0 : tensor<32xf64>
131}
132