1 //
2 //  CoreMLPool.cpp
3 //  MNN
4 //
5 //  Created by MNN on 2021/03/25.
6 //  Copyright © 2018, Alibaba Group Holding Limited
7 //
8 
9 #include "CoreMLPool.hpp"
10 
11 namespace MNN {
12 
13 
CoreMLPool(MNN::Backend * b,const MNN::Op * op,const std::vector<Tensor * > & inputs,const std::vector<MNN::Tensor * > & outputs)14 CoreMLPool::CoreMLPool(MNN::Backend *b, const MNN::Op *op, const std::vector<Tensor *> &inputs, const std::vector<MNN::Tensor *> &outputs) : CoreMLCommonExecution(b, op) {
15     initLayer();
16 }
17 
onResize(const std::vector<Tensor * > & inputs,const std::vector<Tensor * > & outputs)18 ErrorCode CoreMLPool::onResize(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) {
19     MNN_ASSERT(inputs.size() == 1 && outputs.size() == 1);
20     auto pool    = mOp->main_as_Pool();
21     auto strideX = pool->strideX();
22     auto strideY = pool->strideY();
23     auto kernelX = pool->kernelX();
24     auto kernelY = pool->kernelY();
25     auto padMod  = pool->padType();
26     auto global  = pool->isGlobal();
27     mLayer_->pooling = mCoreMLBackend->create<CoreML__Specification__PoolingLayerParams>();
28     mLayer_->layer_case = CORE_ML__SPECIFICATION__NEURAL_NETWORK_LAYER__LAYER_POOLING;
29     core_ml__specification__pooling_layer_params__init(mLayer_->pooling);
30     mLayer_->pooling->globalpooling = global;
31     mLayer_->pooling->n_stride = 2;
32     mLayer_->pooling->stride = mCoreMLBackend->create<uint64_t>(mLayer_->pooling->n_stride);
33     mLayer_->pooling->stride[0] = strideY;
34     mLayer_->pooling->stride[1] = strideX;
35     mLayer_->pooling->n_kernelsize = 2;
36     mLayer_->pooling->kernelsize = mCoreMLBackend->create<uint64_t>(mLayer_->pooling->n_kernelsize);
37     mLayer_->pooling->kernelsize[0] = kernelY;
38     mLayer_->pooling->kernelsize[1] = kernelX;
39     switch (padMod) {
40         case PoolPadType_SAME:
41             mLayer_->pooling->pooling_padding_type_case = CORE_ML__SPECIFICATION__POOLING_LAYER_PARAMS__POOLING_PADDING_TYPE_SAME;
42             mLayer_->pooling->same = mCoreMLBackend->create<CoreML__Specification__SamePadding>();
43             core_ml__specification__same_padding__init(mLayer_->pooling->same);
44             break;
45         case PoolPadType_VALID:
46             mLayer_->pooling->pooling_padding_type_case = CORE_ML__SPECIFICATION__POOLING_LAYER_PARAMS__POOLING_PADDING_TYPE_VALID;
47             mLayer_->pooling->valid = mCoreMLBackend->create<CoreML__Specification__ValidPadding>();
48             core_ml__specification__valid_padding__init(mLayer_->pooling->valid);
49             break;
50         case PoolPadType_CAFFE:
51             // TODO: deal caffe pad mode
52             mLayer_->pooling->pooling_padding_type_case = CORE_ML__SPECIFICATION__POOLING_LAYER_PARAMS__POOLING_PADDING_TYPE_INCLUDE_LAST_PIXEL;
53             mLayer_->pooling->includelastpixel = mCoreMLBackend->create<CoreML__Specification__PoolingLayerParams__ValidCompletePadding>();
54             core_ml__specification__pooling_layer_params__valid_complete_padding__init(mLayer_->pooling->includelastpixel);
55             break;
56         default:
57             break;
58     }
59     if (pool->type() == PoolType_AVEPOOL) {
60         mLayer_->pooling->type = CORE_ML__SPECIFICATION__POOLING_LAYER_PARAMS__POOLING_TYPE__AVERAGE;
61         mLayer_->pooling->avgpoolexcludepadding = true;
62     } else {
63         mLayer_->pooling->type = CORE_ML__SPECIFICATION__POOLING_LAYER_PARAMS__POOLING_TYPE__MAX;
64     }
65     setLayerInputsAndOutputs(mLayer_, {mCoreMLBackend->getTensorName(inputs[0])}, {mCoreMLBackend->getTensorName(outputs[0])});
66     mCoreMLBackend->addLayer(mLayer_);
67     return NO_ERROR;
68 }
69 
70 
71 CoreMLCreatorRegister<TypedCreator<CoreMLPool>> __pool_op(OpType_Pooling);
72 
73 
74 } // namespace MNN
75