1 //
2 // CoreMLRaster.cpp
3 // MNN
4 //
5 // Created by MNN on 2021/03/25.
6 // Copyright © 2018, Alibaba Group Holding Limited
7 //
8
9 #include "CoreMLRaster.hpp"
10 #include <cmath>
11
12 namespace MNN {
13
CoreMLRaster(MNN::Backend * b,const MNN::Op * op,const std::vector<Tensor * > & inputs,const std::vector<MNN::Tensor * > & outputs)14 CoreMLRaster::CoreMLRaster(MNN::Backend *b, const MNN::Op *op, const std::vector<Tensor *> &inputs, const std::vector<MNN::Tensor *> &outputs) : CoreMLCommonExecution(b, op) {
15 initLayer();
16 }
17
isTranspose(const Tensor::InsideDescribe::Region & region)18 static bool isTranspose(const Tensor::InsideDescribe::Region& region) {
19 int srcOne = -1, dstOne = -1;
20 for (int i = 0; i < 3; i++) {
21 if (region.src.stride[i] == 1 && region.size[i] != 1) {
22 if (srcOne >= 0 || region.size[i] < 4) {
23 return false;
24 }
25 srcOne = i;
26 }
27 if (region.dst.stride[i] == 1 && region.size[i] != 1) {
28 if (dstOne >= 0 || region.size[i] < 4) {
29 return false;
30 }
31 dstOne = i;
32 }
33 }
34 return srcOne >= 0 && dstOne >= 0 && srcOne != dstOne;
35 }
36
buildReshape(CoreML__Specification__NeuralNetworkLayer * layer,const Tensor * input,const Tensor * output)37 bool CoreMLRaster::buildReshape(CoreML__Specification__NeuralNetworkLayer* layer, const Tensor* input, const Tensor* output) {
38 mCoreMLBackend->setLayerName(layer, "Reshape");
39 layer->layer_case = CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_RESHAPE_STATIC;
40 layer->reshapestatic = mCoreMLBackend->create<CoreML__Specification__ReshapeStaticLayerParams>();
41 core_ml__specification__reshape_static_layer_params__init(layer->reshapestatic);
42 auto outputShape = output->shape();
43 layer->reshapestatic->n_targetshape = outputShape.size();
44 layer->reshapestatic->targetshape = mCoreMLBackend->create<int64_t>(layer->reshapestatic->n_targetshape);
45 for (int i = 0; i < outputShape.size(); i++) {
46 layer->reshapestatic->targetshape[i] = outputShape[i];
47 }
48 mCoreMLBackend->setLayerInputs(layer, {mCoreMLBackend->getTensorName(input)});
49 return true;
50 }
buildPermute(CoreML__Specification__NeuralNetworkLayer * layer,const Tensor * input,const Tensor * output)51 bool CoreMLRaster::buildPermute(CoreML__Specification__NeuralNetworkLayer* layer, const Tensor* input, const Tensor* output) {
52 bool needReshape = input->dimensions() != output->dimensions();
53 CoreML__Specification__NeuralNetworkLayer *permuteLayer = layer, *reshapeLayer = nullptr;
54 if (needReshape) {
55 permuteLayer = mCoreMLBackend->create<CoreML__Specification__NeuralNetworkLayer>();
56 core_ml__specification__neural_network_layer__init(permuteLayer);
57 reshapeLayer = layer;
58 }
59 mCoreMLBackend->setLayerName(permuteLayer, "Permute");
60 permuteLayer->layer_case = CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_PERMUTE;
61 permuteLayer->permute = mCoreMLBackend->create<CoreML__Specification__PermuteLayerParams>();
62 core_ml__specification__permute_layer_params__init(permuteLayer->permute);
63 permuteLayer->permute->n_axis = 4;
64 permuteLayer->permute->axis = mCoreMLBackend->create<uint64_t>(permuteLayer->permute->n_axis);
65 auto srcFormat = TensorUtils::getDescribe(input)->dimensionFormat;
66 auto dstFormat = TensorUtils::getDescribe(output)->dimensionFormat;
67 // NCHW -> NHWC
68 if ((srcFormat == MNN_DATA_FORMAT_NC4HW4 || srcFormat == MNN_DATA_FORMAT_NCHW)
69 && dstFormat == MNN_DATA_FORMAT_NHWC) {
70 permuteLayer->permute->axis[0] = 0;
71 permuteLayer->permute->axis[1] = 2;
72 permuteLayer->permute->axis[2] = 3;
73 permuteLayer->permute->axis[3] = 1;
74 }
75 // NHWC -> NCHW
76 if ((dstFormat == MNN_DATA_FORMAT_NC4HW4 || srcFormat == MNN_DATA_FORMAT_NCHW)
77 && srcFormat == MNN_DATA_FORMAT_NHWC) {
78 permuteLayer->permute->axis[0] = 0;
79 permuteLayer->permute->axis[1] = 3;
80 permuteLayer->permute->axis[2] = 1;
81 permuteLayer->permute->axis[3] = 2;
82 }
83 mCoreMLBackend->setLayerInputs(permuteLayer, {mCoreMLBackend->getTensorName(input)});
84 if (reshapeLayer) {
85 std::string middleName = mCoreMLBackend->getTensorName(input) + "_permute_" + mCoreMLBackend->getTensorName(output);
86 mCoreMLBackend->setLayerOutputs(permuteLayer, {middleName});
87 mCoreMLBackend->addLayer(permuteLayer);
88 mCoreMLBackend->setLayerName(reshapeLayer, "Permute_Reshape");
89 reshapeLayer->layer_case = CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_RESHAPE_STATIC;
90 reshapeLayer->reshapestatic = mCoreMLBackend->create<CoreML__Specification__ReshapeStaticLayerParams>();
91 core_ml__specification__reshape_static_layer_params__init(reshapeLayer->reshapestatic);
92 auto outputShape = output->shape();
93 reshapeLayer->reshapestatic->n_targetshape = outputShape.size();
94 reshapeLayer->reshapestatic->targetshape = mCoreMLBackend->create<int64_t>(reshapeLayer->reshapestatic->n_targetshape);
95 for (int i = 0; i < outputShape.size(); i++) {
96 reshapeLayer->reshapestatic->targetshape[i] = outputShape[i];
97 }
98 mCoreMLBackend->setLayerInputs(reshapeLayer, {middleName});
99 }
100 return true;
101 }
102
buildPad(CoreML__Specification__NeuralNetworkLayer * layer,const Tensor * input,const Tensor * output)103 bool CoreMLRaster::buildPad(CoreML__Specification__NeuralNetworkLayer* layer, const Tensor* input, const Tensor* output) {
104 bool needPermute = TensorUtils::getDescribe(input)->dimensionFormat == MNN_DATA_FORMAT_NHWC;
105 CoreML__Specification__NeuralNetworkLayer *padLayer = layer, *postPermute = nullptr;
106 std::string inputName = mCoreMLBackend->getTensorName(input);
107 if (needPermute) {
108 padLayer = mCoreMLBackend->create<CoreML__Specification__NeuralNetworkLayer>();
109 core_ml__specification__neural_network_layer__init(padLayer);
110 postPermute = layer;
111 // NHWC -> NCHW
112 auto prePermute = mCoreMLBackend->create<CoreML__Specification__NeuralNetworkLayer>();
113 core_ml__specification__neural_network_layer__init(prePermute);
114 mCoreMLBackend->setLayerName(prePermute, "prePermute");
115 prePermute->layer_case = CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_PERMUTE;
116 prePermute->permute = mCoreMLBackend->create<CoreML__Specification__PermuteLayerParams>();
117 core_ml__specification__permute_layer_params__init(prePermute->permute);
118 prePermute->permute->n_axis = 4;
119 prePermute->permute->axis = mCoreMLBackend->create<uint64_t>(prePermute->permute->n_axis);
120 prePermute->permute->axis[0] = 0;
121 prePermute->permute->axis[1] = 3;
122 prePermute->permute->axis[2] = 1;
123 prePermute->permute->axis[3] = 2;
124 setLayerInputsAndOutputs(prePermute, {inputName}, {inputName + "-permute"});
125 inputName = inputName + "-permute";
126 mCoreMLBackend->addLayer(prePermute);
127 }
128 int padh = output->height() - input->height(), padw = output->width() - input->width();
129 int top = padh / 2, bottom = std::ceil(padh / 2.0), left = padw / 2, right = std::ceil(padw / 2.0);
130 mCoreMLBackend->setLayerName(padLayer, "Pad");
131 padLayer->layer_case = CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_PADDING;
132 padLayer->padding = mCoreMLBackend->create<CoreML__Specification__PaddingLayerParams>();
133 core_ml__specification__padding_layer_params__init(padLayer->padding);
134 padLayer->padding->padding_type_case = CORE_ML__SPECIFICATION__PADDING_LAYER_PARAMS__PADDING_TYPE_CONSTANT;
135 padLayer->padding->constant = mCoreMLBackend->create<CoreML__Specification__PaddingLayerParams__PaddingConstant>();
136 core_ml__specification__padding_layer_params__padding_constant__init(padLayer->padding->constant);
137 padLayer->padding->constant->value = 0;
138 padLayer->padding->paddingamounts = mCoreMLBackend->create<CoreML__Specification__BorderAmounts>();
139 core_ml__specification__border_amounts__init(padLayer->padding->paddingamounts);
140 padLayer->padding->paddingamounts->n_borderamounts = 2;
141 padLayer->padding->paddingamounts->borderamounts = mCoreMLBackend->create<CoreML__Specification__BorderAmounts__EdgeSizes*>(2);
142 padLayer->padding->paddingamounts->borderamounts[0] = mCoreMLBackend->create<CoreML__Specification__BorderAmounts__EdgeSizes>();
143 core_ml__specification__border_amounts__edge_sizes__init(padLayer->padding->paddingamounts->borderamounts[0]);
144 padLayer->padding->paddingamounts->borderamounts[0]->startedgesize = top;
145 padLayer->padding->paddingamounts->borderamounts[0]->endedgesize = bottom;
146 padLayer->padding->paddingamounts->borderamounts[1] = mCoreMLBackend->create<CoreML__Specification__BorderAmounts__EdgeSizes>();
147 core_ml__specification__border_amounts__edge_sizes__init(padLayer->padding->paddingamounts->borderamounts[1]);
148 padLayer->padding->paddingamounts->borderamounts[1]->startedgesize = left;
149 padLayer->padding->paddingamounts->borderamounts[1]->endedgesize = right;
150 mCoreMLBackend->setLayerInputs(padLayer, {inputName});
151 if (needPermute) {
152 inputName = inputName + "-pad";
153 mCoreMLBackend->setLayerOutputs(padLayer, {inputName});
154 mCoreMLBackend->addLayer(padLayer);
155 // NHWC -> NCHW
156 mCoreMLBackend->setLayerName(postPermute, "postPermute");
157 postPermute->layer_case = CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_PERMUTE;
158 postPermute->permute = mCoreMLBackend->create<CoreML__Specification__PermuteLayerParams>();
159 core_ml__specification__permute_layer_params__init(postPermute->permute);
160 postPermute->permute->n_axis = 4;
161 postPermute->permute->axis = mCoreMLBackend->create<uint64_t>(postPermute->permute->n_axis);
162 postPermute->permute->axis[0] = 0;
163 postPermute->permute->axis[1] = 2;
164 postPermute->permute->axis[2] = 3;
165 postPermute->permute->axis[3] = 1;
166 mCoreMLBackend->setLayerInputs(postPermute, {inputName});
167 }
168 return true;
169 }
170
buildCrop(CoreML__Specification__NeuralNetworkLayer * layer,const Tensor * input,const Tensor * output)171 bool CoreMLRaster::buildCrop(CoreML__Specification__NeuralNetworkLayer* layer, const Tensor* input, const Tensor* output) {
172 if (TensorUtils::getDescribe(input)->dimensionFormat == MNN_DATA_FORMAT_NHWC) {
173 return false;
174 }
175 int croph = input->height() - output->height(), cropw = input->width() - output->width();
176 int top = croph / 2, bottom = std::ceil(croph / 2.0), left = cropw / 2, right = std::ceil(cropw / 2.0);
177 mCoreMLBackend->setLayerName(layer, "Crop");
178 layer->layer_case = CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_CROP;
179 layer->crop = mCoreMLBackend->create<CoreML__Specification__CropLayerParams>();
180 core_ml__specification__crop_layer_params__init(layer->crop);
181 layer->crop->cropamounts = mCoreMLBackend->create<CoreML__Specification__BorderAmounts>();
182 core_ml__specification__border_amounts__init(layer->padding->paddingamounts);
183 layer->crop->cropamounts->n_borderamounts = 2;
184 layer->crop->cropamounts->borderamounts = mCoreMLBackend->create<CoreML__Specification__BorderAmounts__EdgeSizes*>(2);
185 layer->crop->cropamounts->borderamounts[0] = mCoreMLBackend->create<CoreML__Specification__BorderAmounts__EdgeSizes>();
186 core_ml__specification__border_amounts__edge_sizes__init(layer->crop->cropamounts->borderamounts[0]);
187 layer->crop->cropamounts->borderamounts[0]->startedgesize = top;
188 layer->crop->cropamounts->borderamounts[0]->endedgesize = bottom;
189 layer->crop->cropamounts->borderamounts[1] = mCoreMLBackend->create<CoreML__Specification__BorderAmounts__EdgeSizes>();
190 core_ml__specification__border_amounts__edge_sizes__init(layer->crop->cropamounts->borderamounts[1]);
191 layer->crop->cropamounts->borderamounts[1]->startedgesize = left;
192 layer->crop->cropamounts->borderamounts[1]->endedgesize = right;
193 mCoreMLBackend->setLayerInputs(layer, {mCoreMLBackend->getTensorName(input)});
194 return true;
195 }
196
buildSlice(CoreML__Specification__NeuralNetworkLayer * layer,const Tensor * input,const Tensor * output)197 bool CoreMLRaster::buildSlice(CoreML__Specification__NeuralNetworkLayer* layer, const Tensor* input, const Tensor* output) {
198 int endc = output->channel(), endh = output->height(), endw = output->width();
199 bool maskc = endc == input->channel(), maskh = endh == input->height(), maskw = endw == input->width();
200 mCoreMLBackend->setLayerName(layer, "Slice");
201 layer->layer_case = CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_SLICE_STATIC;
202 layer->slicestatic = mCoreMLBackend->create<CoreML__Specification__SliceStaticLayerParams>();
203 core_ml__specification__slice_static_layer_params__init(layer->slicestatic);
204 // [Seq, N, C, H, W] : [0:1:-1, 0:1:-1, 0:1:endc, 0:1:endh, 0:1:endw]
205 int dims = 5;
206 layer->slicestatic->n_beginids = dims;
207 layer->slicestatic->beginids = mCoreMLBackend->create<int64_t>(dims);
208 layer->slicestatic->n_beginmasks = dims;
209 layer->slicestatic->beginmasks = mCoreMLBackend->create<int>(dims);
210 layer->slicestatic->n_strides = dims;
211 layer->slicestatic->strides = mCoreMLBackend->create<int64_t>(dims);
212 for (int i = 0; i < dims; i++) {
213 layer->slicestatic->beginids[i] = 0;
214 layer->slicestatic->beginmasks[i] = true;
215 layer->slicestatic->strides[i] = 1;
216 }
217 layer->slicestatic->n_endids = dims;
218 layer->slicestatic->endids = mCoreMLBackend->create<int64_t>(dims);
219 layer->slicestatic->n_endmasks = dims;
220 layer->slicestatic->endmasks = mCoreMLBackend->create<int>(dims);
221 layer->slicestatic->endids[0] = -1;
222 layer->slicestatic->endids[1] = -1;
223 layer->slicestatic->endids[2] = endc;
224 layer->slicestatic->endids[3] = endh;
225 layer->slicestatic->endids[4] = endw;
226 layer->slicestatic->endmasks[0] = true;
227 layer->slicestatic->endmasks[1] = true;
228 layer->slicestatic->endmasks[2] = maskc;
229 layer->slicestatic->endmasks[3] = maskh;
230 layer->slicestatic->endmasks[4] = maskw;
231 mCoreMLBackend->setLayerInputs(layer, {mCoreMLBackend->getTensorName(input)});
232 return true;
233 }
234
rasterOptimization(const std::vector<Tensor * > & inputs,const std::vector<Tensor * > & outputs)235 bool CoreMLRaster::rasterOptimization(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) {
236 const auto& regions = TensorUtils::getDescribe(inputs[0])->regions;
237 const auto region = regions[0];
238 // region_size = 1: reshape, transpose
239 if (regions.size() == 1) {
240 int inputSize = 1, outputSize = 1;
241 for (int i = 0; i < region.origin->dimensions(); i++) {
242 inputSize *= region.origin->length(i);
243 }
244 for (int i = 0; i < outputs[0]->dimensions(); i++) {
245 outputSize *= outputs[0]->length(i);
246 }
247 // reshape, permute
248 if (inputSize == outputSize) {
249 // reshape
250 if (TensorUtils::isCopyRegion(region)) {
251 return buildReshape(mLayer_, region.origin, outputs[0]);
252 }
253 // transpose
254 if (isTranspose(region)) {
255 return buildPermute(mLayer_, region.origin, outputs[0]);
256 }
257 }
258 // pad
259 if (inputSize < outputSize) {
260 return buildPad(mLayer_, region.origin, outputs[0]);
261 }
262 // slice/crop
263 if (inputSize > outputSize) {
264 return false;
265 // TODO: Apple NPU will ANCE Error.
266 // return buildCrop(mLayer_, region.origin, outputs[0]);
267 // return buildSlice(mLayer_, region.origin, outputs[0]);
268 }
269 return false;
270 }
271 // region_size > 1: concat
272 {
273 int dim = outputs[0]->dimensions();
274 if (region.origin->dimensions() != dim) {
275 return false;
276 }
277 int axis = -1;
278 for (int i = 0; i < outputs[0]->dimensions(); i++) {
279 if (region.origin->length(i) != outputs[0]->length(i)) {
280 if (axis >= 0) {
281 return false;
282 }
283 axis = i;
284 }
285 }
286 int elemSize = region.size[0] * region.size[1] * region.size[2];
287 bool isSameShape = true;
288 for (int i = 1; i < regions.size(); i++) {
289 isSameShape &= (elemSize == regions[i].size[0] * regions[i].size[1] * regions[i].size[2]);
290 if (regions[i].origin->dimensions() != dim) {
291 return false;
292 }
293 for (int j = 0; j < dim; j++) {
294 if (j != axis && regions[i].origin->length(j) != outputs[0]->length(j)) {
295 return false;
296 }
297 }
298 }
299 if (isSameShape && (axis - dim == -3)) {
300 mCoreMLBackend->setLayerName(mLayer_, "Concat");
301 mLayer_->layer_case = CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_CONCAT;
302 mLayer_->concat = mCoreMLBackend->create<CoreML__Specification__ConcatLayerParams>();
303 core_ml__specification__concat_layer_params__init(mLayer_->concat);
304 mLayer_->concat->sequenceconcat = false;
305 } else {
306 mCoreMLBackend->setLayerName(mLayer_, "NDConcat");
307 mLayer_->layer_case = CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_CONCAT_ND;
308 mLayer_->concatnd = mCoreMLBackend->create<CoreML__Specification__ConcatNDLayerParams>();
309 core_ml__specification__concat_ndlayer_params__init(mLayer_->concatnd);
310 mLayer_->concatnd->axis = axis - dim;
311 }
312 std::vector<std::string> inputNames;
313 for (const auto& reg : regions) {
314 inputNames.push_back(mCoreMLBackend->getTensorName(reg.origin));
315 }
316 mCoreMLBackend->setLayerInputs(mLayer_, std::move(inputNames));
317 return true;
318 }
319 return false;
320 }
dumpRegion(const Tensor::InsideDescribe::Region & reg)321 static void dumpRegion(const Tensor::InsideDescribe::Region& reg) {
322 printf("\n{\nsize: [%d, %d, %d], origin: %p\n", reg.size[0], reg.size[1], reg.size[2], reg.origin);
323 printf("src: { stride: [%d, %d, %d], offset: %d }\n", reg.src.stride[0],reg.src.stride[1],reg.src.stride[2],reg.src.offset);
324 printf("dst: { stride: [%d, %d, %d], offset: %d }\n}\n", reg.dst.stride[0],reg.dst.stride[1],reg.dst.stride[2],reg.dst.offset);
325 }
onResize(const std::vector<Tensor * > & inputs,const std::vector<Tensor * > & outputs)326 ErrorCode CoreMLRaster::onResize(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) {
327 MNN_ASSERT(inputs.size() == 1 && outputs.size() == 1);
328 if (!rasterOptimization(inputs, outputs)) {
329 /*
330 printf(">>> start\n");
331 for (const auto& reg : TensorUtils::getDescribe(inputs[0])->regions) {
332 printf("inputShape: ["); for (auto x : reg.origin->shape()) printf("%d, ", x); printf("]\n");
333 dumpRegion(reg);
334 }
335 printf("outputShape: ["); for (auto x : outputs[0]->shape()) printf("%d, ", x); printf("]\n");
336 printf(">>> end\n");*/
337 auto outputShape = outputs[0]->shape();
338 mLayer_->layer_case = CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_CUSTOM;
339 mLayer_->custom = mCoreMLBackend->create<CoreML__Specification__CustomLayerParams>();
340 core_ml__specification__custom_layer_params__init(mLayer_->custom);
341 mCoreMLBackend->copyName(&(mLayer_->custom->classname), "RasterLayer");
342 const auto& regions = TensorUtils::getDescribe(inputs[0])->regions;
343 mLayer_->custom->n_weights = regions.size() + 1;
344 mLayer_->custom->weights = mCoreMLBackend->create<CoreML__Specification__WeightParams*>(mLayer_->custom->n_weights);
345 std::vector<std::string> inputNames;
346 for (int i = 0; i <= regions.size(); i++) {
347 mLayer_->custom->weights[i] = mCoreMLBackend->create<CoreML__Specification__WeightParams>();
348 core_ml__specification__weight_params__init(mLayer_->custom->weights[i]);
349 if (i == 0) {
350 // first set outputShape
351 mLayer_->custom->weights[i]->n_floatvalue = outputShape.size();
352 mLayer_->custom->weights[i]->floatvalue = mCoreMLBackend->create<float>(mLayer_->custom->weights[i]->n_floatvalue);
353 memcpy(mLayer_->custom->weights[i]->floatvalue, outputShape.data(), outputShape.size() * sizeof(int));
354 } else {
355 // then set regions info
356 mLayer_->custom->weights[i]->n_floatvalue = 11;
357 mLayer_->custom->weights[i]->floatvalue = mCoreMLBackend->create<float>(mLayer_->custom->weights[i]->n_floatvalue);
358 memcpy(mLayer_->custom->weights[i]->floatvalue, &(regions[i-1]), 11 * sizeof(int));
359 inputNames.push_back(mCoreMLBackend->getTensorName(regions[i-1].origin));
360 }
361 }
362 mCoreMLBackend->setLayerInputs(mLayer_, std::move(inputNames));
363 }
364 mCoreMLBackend->setLayerOutputs(mLayer_, {mCoreMLBackend->getTensorName(outputs[0])});
365 mCoreMLBackend->addLayer(mLayer_);
366 return NO_ERROR;
367 }
368
369 CoreMLCreatorRegister<TypedCreator<CoreMLRaster>> __raster_op(OpType_Raster);
370 } // namespace MNN
371