1// RUN: mlir-opt %s | mlir-opt | FileCheck %s 2// RUN: mlir-opt %s --mlir-print-op-generic | mlir-opt | FileCheck %s 3 4// CHECK-DAG: #[[$strided2D:.*]] = affine_map<(d0, d1)[s0, s1] -> (d0 * s1 + s0 + d1)> 5// CHECK-DAG: #[[$strided3D:.*]] = affine_map<(d0, d1, d2)[s0, s1, s2] -> (d0 * s1 + s0 + d1 * s2 + d2)> 6// CHECK-DAG: #[[$strided2DOFF0:.*]] = affine_map<(d0, d1)[s0] -> (d0 * s0 + d1)> 7// CHECK-DAG: #[[$strided3DOFF0:.*]] = affine_map<(d0, d1, d2)[s0, s1] -> (d0 * s0 + d1 * s1 + d2)> 8 9// CHECK-LABEL: test_buffer_cast 10func @test_buffer_cast(%arg0: tensor<?xi64>, %arg1: tensor<*xi64>) -> (memref<?xi64, affine_map<(d0) -> (d0 + 7)>>, memref<*xi64, 1>) { 11 %0 = memref.buffer_cast %arg0 : memref<?xi64, affine_map<(d0) -> (d0 + 7)>> 12 %1 = memref.buffer_cast %arg1 : memref<*xi64, 1> 13 return %0, %1 : memref<?xi64, affine_map<(d0) -> (d0 + 7)>>, memref<*xi64, 1> 14} 15 16// CHECK-LABEL: func @memref_reinterpret_cast 17func @memref_reinterpret_cast(%in: memref<?xf32>) 18 -> memref<10x?xf32, offset: ?, strides: [?, 1]> { 19 %c0 = constant 0 : index 20 %c10 = constant 10 : index 21 %out = memref.reinterpret_cast %in to 22 offset: [%c0], sizes: [10, %c10], strides: [%c10, 1] 23 : memref<?xf32> to memref<10x?xf32, offset: ?, strides: [?, 1]> 24 return %out : memref<10x?xf32, offset: ?, strides: [?, 1]> 25} 26 27// CHECK-LABEL: func @memref_reshape( 28func @memref_reshape(%unranked: memref<*xf32>, %shape1: memref<1xi32>, 29 %shape2: memref<2xi32>, %shape3: memref<?xi32>) -> memref<*xf32> { 30 %dyn_vec = memref.reshape %unranked(%shape1) 31 : (memref<*xf32>, memref<1xi32>) -> memref<?xf32> 32 %dyn_mat = memref.reshape %dyn_vec(%shape2) 33 : (memref<?xf32>, memref<2xi32>) -> memref<?x?xf32> 34 %new_unranked = memref.reshape %dyn_mat(%shape3) 35 : (memref<?x?xf32>, memref<?xi32>) -> memref<*xf32> 36 return %new_unranked : memref<*xf32> 37} 38 39// CHECK-LABEL: memref.global @memref0 : memref<2xf32> 40memref.global @memref0 : memref<2xf32> 41 42// CHECK-LABEL: memref.global constant @memref1 : memref<2xf32> = dense<[0.000000e+00, 1.000000e+00]> 43memref.global constant @memref1 : memref<2xf32> = dense<[0.0, 1.0]> 44 45// CHECK-LABEL: memref.global @memref2 : memref<2xf32> = uninitialized 46memref.global @memref2 : memref<2xf32> = uninitialized 47 48// CHECK-LABEL: memref.global "private" @memref3 : memref<2xf32> = uninitialized 49memref.global "private" @memref3 : memref<2xf32> = uninitialized 50 51// CHECK-LABEL: memref.global "private" constant @memref4 : memref<2xf32> = uninitialized 52memref.global "private" constant @memref4 : memref<2xf32> = uninitialized 53 54// CHECK-LABEL: func @write_global_memref 55func @write_global_memref() { 56 %0 = memref.get_global @memref0 : memref<2xf32> 57 %1 = constant dense<[1.0, 2.0]> : tensor<2xf32> 58 memref.tensor_store %1, %0 : memref<2xf32> 59 return 60} 61 62// CHECK-LABEL: func @read_global_memref 63func @read_global_memref() { 64 %0 = memref.get_global @memref0 : memref<2xf32> 65 %1 = memref.tensor_load %0 : memref<2xf32> 66 return 67} 68 69// CHECK-LABEL: func @memref_clone 70func @memref_clone() { 71 %0 = memref.alloc() : memref<2xf32> 72 %1 = memref.cast %0 : memref<2xf32> to memref<*xf32> 73 %2 = memref.clone %1 : memref<*xf32> to memref<*xf32> 74 return 75} 76 77// CHECK-LABEL: func @memref_copy 78func @memref_copy() { 79 %0 = memref.alloc() : memref<2xf32> 80 %1 = memref.cast %0 : memref<2xf32> to memref<*xf32> 81 %2 = memref.alloc() : memref<2xf32> 82 %3 = memref.cast %0 : memref<2xf32> to memref<*xf32> 83 memref.copy %1, %3 : memref<*xf32> to memref<*xf32> 84 return 85} 86 87// CHECK-LABEL: func @memref_dealloc 88func @memref_dealloc() { 89 %0 = memref.alloc() : memref<2xf32> 90 %1 = memref.cast %0 : memref<2xf32> to memref<*xf32> 91 memref.dealloc %1 : memref<*xf32> 92 return 93} 94 95 96// CHECK-LABEL: func @memref_alloca_scope 97func @memref_alloca_scope() { 98 memref.alloca_scope { 99 memref.alloca_scope.return 100 } 101 return 102} 103 104func @expand_collapse_shape_static(%arg0: memref<3x4x5xf32>, 105 %arg1: tensor<3x4x5xf32>, 106 %arg2: tensor<3x?x5xf32>) { 107 // Reshapes that collapse and expand back a contiguous buffer. 108 %0 = memref.collapse_shape %arg0 [[0, 1], [2]] : 109 memref<3x4x5xf32> into memref<12x5xf32> 110 %r0 = memref.expand_shape %0 [[0, 1], [2]] : 111 memref<12x5xf32> into memref<3x4x5xf32> 112 %1 = memref.collapse_shape %arg0 [[0], [1, 2]] : 113 memref<3x4x5xf32> into memref<3x20xf32> 114 %r1 = memref.expand_shape %1 [[0], [1, 2]] : 115 memref<3x20xf32> into memref<3x4x5xf32> 116 %2 = memref.collapse_shape %arg0 [[0, 1, 2]] : 117 memref<3x4x5xf32> into memref<60xf32> 118 %r2 = memref.expand_shape %2 [[0, 1, 2]] : 119 memref<60xf32> into memref<3x4x5xf32> 120 // Reshapes that expand and collapse back a contiguous buffer with some 1's. 121 %3 = memref.expand_shape %arg0 [[0, 1], [2], [3, 4]] : 122 memref<3x4x5xf32> into memref<1x3x4x1x5xf32> 123 %r3 = memref.collapse_shape %3 [[0, 1], [2], [3, 4]] : 124 memref<1x3x4x1x5xf32> into memref<3x4x5xf32> 125 // Reshapes on tensors. 126 %t0 = linalg.tensor_expand_shape %arg1 [[0, 1], [2], [3, 4]] : 127 tensor<3x4x5xf32> into tensor<1x3x4x1x5xf32> 128 %rt0 = linalg.tensor_collapse_shape %t0 [[0, 1], [2], [3, 4]] : 129 tensor<1x3x4x1x5xf32> into tensor<3x4x5xf32> 130 %t1 = linalg.tensor_expand_shape %arg2 [[0, 1], [2], [3, 4]] : 131 tensor<3x?x5xf32> into tensor<1x3x?x1x5xf32> 132 %rt1 = linalg.tensor_collapse_shape %t1 [[0], [1, 2], [3, 4]] : 133 tensor<1x3x?x1x5xf32> into tensor<1x?x5xf32> 134 return 135} 136// CHECK-LABEL: func @expand_collapse_shape_static 137// CHECK: memref.collapse_shape {{.*}} {{\[}}[0, 1], [2]] 138// CHECK-SAME: memref<3x4x5xf32> into memref<12x5xf32> 139// CHECK: memref.expand_shape {{.*}} {{\[}}[0, 1], [2]] 140// CHECK-SAME: memref<12x5xf32> into memref<3x4x5xf32> 141// CHECK: memref.collapse_shape {{.*}} {{\[}}[0], [1, 2]] 142// CHECK-SAME: memref<3x4x5xf32> into memref<3x20xf32> 143// CHECK: memref.expand_shape {{.*}} {{\[}}[0], [1, 2]] 144// CHECK-SAME: memref<3x20xf32> into memref<3x4x5xf32> 145// CHECK: memref.collapse_shape {{.*}} {{\[}}[0, 1, 2]] 146// CHECK-SAME: memref<3x4x5xf32> into memref<60xf32> 147// CHECK: memref.expand_shape {{.*}} {{\[}}[0, 1, 2]] 148// CHECK-SAME: memref<60xf32> into memref<3x4x5xf32> 149// CHECK: memref.expand_shape {{.*}} {{\[}}[0, 1], [2], [3, 4]] 150// CHECK-SAME: memref<3x4x5xf32> into memref<1x3x4x1x5xf32> 151// CHECK: memref.collapse_shape {{.*}} {{\[}}[0, 1], [2], [3, 4]] 152// CHECK-SAME: memref<1x3x4x1x5xf32> into memref<3x4x5xf32> 153// 154// CHECK: linalg.tensor_expand_shape {{.*}}: tensor<3x4x5xf32> into tensor<1x3x4x1x5xf32> 155// CHECK: linalg.tensor_collapse_shape {{.*}}: tensor<1x3x4x1x5xf32> into tensor<3x4x5xf32> 156// CHECK: linalg.tensor_expand_shape {{.*}}: tensor<3x?x5xf32> into tensor<1x3x?x1x5xf32> 157// CHECK: linalg.tensor_collapse_shape {{.*}}: tensor<1x3x?x1x5xf32> into tensor<1x?x5xf32> 158 159 160func @expand_collapse_shape_dynamic(%arg0: memref<?x?x?xf32>, 161 %arg1: memref<?x?x?xf32, offset : 0, strides : [?, ?, 1]>, 162 %arg2: memref<?x?x?xf32, offset : ?, strides : [?, ?, 1]>) { 163 %0 = memref.collapse_shape %arg0 [[0, 1], [2]] : 164 memref<?x?x?xf32> into memref<?x?xf32> 165 %r0 = memref.expand_shape %0 [[0, 1], [2]] : 166 memref<?x?xf32> into memref<?x4x?xf32> 167 %1 = memref.collapse_shape %arg1 [[0, 1], [2]] : 168 memref<?x?x?xf32, offset : 0, strides : [?, ?, 1]> into 169 memref<?x?xf32, offset : 0, strides : [?, 1]> 170 %r1 = memref.expand_shape %1 [[0, 1], [2]] : 171 memref<?x?xf32, offset : 0, strides : [?, 1]> into 172 memref<?x4x?xf32, offset : 0, strides : [?, ?, 1]> 173 %2 = memref.collapse_shape %arg2 [[0, 1], [2]] : 174 memref<?x?x?xf32, offset : ?, strides : [?, ?, 1]> into 175 memref<?x?xf32, offset : ?, strides : [?, 1]> 176 %r2 = memref.expand_shape %2 [[0, 1], [2]] : 177 memref<?x?xf32, offset : ?, strides : [?, 1]> into 178 memref<?x4x?xf32, offset : ?, strides : [?, ?, 1]> 179 return 180} 181// CHECK-LABEL: func @expand_collapse_shape_dynamic 182// CHECK: memref.collapse_shape {{.*}} {{\[}}[0, 1], [2]] 183// CHECK-SAME: memref<?x?x?xf32> into memref<?x?xf32> 184// CHECK: memref.expand_shape {{.*}} {{\[}}[0, 1], [2]] 185// CHECK-SAME: memref<?x?xf32> into memref<?x4x?xf32> 186// CHECK: memref.collapse_shape {{.*}} {{\[}}[0, 1], [2]] 187// CHECK-SAME: memref<?x?x?xf32, #[[$strided3DOFF0]]> into memref<?x?xf32, #[[$strided2DOFF0]]> 188// CHECK: memref.expand_shape {{.*}} {{\[}}[0, 1], [2]] 189// CHECK-SAME: memref<?x?xf32, #[[$strided2DOFF0]]> into memref<?x4x?xf32, #[[$strided3DOFF0]]> 190// CHECK: memref.collapse_shape {{.*}} {{\[}}[0, 1], [2]] 191// CHECK-SAME: memref<?x?x?xf32, #[[$strided3D]]> into memref<?x?xf32, #[[$strided2D]]> 192// CHECK: memref.expand_shape {{.*}} {{\[}}[0, 1], [2]] 193// CHECK-SAME: memref<?x?xf32, #[[$strided2D]]> into memref<?x4x?xf32, #[[$strided3D]]> 194 195func @expand_collapse_shape_zero_dim(%arg0 : memref<1x1xf32>, %arg1 : memref<f32>) 196 -> (memref<f32>, memref<1x1xf32>) { 197 %0 = memref.collapse_shape %arg0 [] : memref<1x1xf32> into memref<f32> 198 %1 = memref.expand_shape %0 [] : memref<f32> into memref<1x1xf32> 199 return %0, %1 : memref<f32>, memref<1x1xf32> 200} 201// CHECK-LABEL: func @expand_collapse_shape_zero_dim 202// CHECK: memref.collapse_shape %{{.*}} [] : memref<1x1xf32> into memref<f32> 203// CHECK: memref.expand_shape %{{.*}} [] : memref<f32> into memref<1x1xf32> 204 205func @collapse_shape_to_dynamic 206 (%arg0: memref<?x?x?x4x?xf32>) -> memref<?x?x?xf32> { 207 %0 = memref.collapse_shape %arg0 [[0], [1], [2, 3, 4]] : 208 memref<?x?x?x4x?xf32> into memref<?x?x?xf32> 209 return %0 : memref<?x?x?xf32> 210} 211// CHECK: func @collapse_shape_to_dynamic 212// CHECK: memref.collapse_shape 213// CHECK-SAME: [0], [1], [2, 3, 4] 214