1 //
2 //  ConvolutionWinograd3D.cpp
3 //  MNN
4 //
5 //  Created by MNN on 2018/09/23.
6 //  Copyright © 2018, Alibaba Group Holding Limited
7 //
8 
9 #include "backend/cpu/compute/ConvolutionWinograd3D.hpp"
10 #include "backend/cpu/CPUBackend.hpp"
11 #include <math.h>
12 #include "backend/cpu/compute/CommonOptFunction.h"
13 #include "core/Concurrency.h"
14 #include "backend/cpu/compute/ConvOpt.h"
15 #include "core/Macro.h"
16 #include "core/TensorUtils.hpp"
17 #include "math/WingoradGenerater.hpp"
18 #ifdef MNN_USE_NEON
19 #include <arm_neon.h>
20 #endif
21 #define CONVOLUTION_WINOGRAD_MAX_UNIT 8
22 #define CONVOLUTION_WINOGRAD_MIN_UNIT 2
23 using namespace MNN::Math;
24 
25 //#define MNN_WINOGRAD_PRINT_REDUCE_RATE
26 
27 namespace MNN {
ConvolutionWinograd3D(const Convolution3DCommon * convOp,const Tensor * input,const Tensor * output,Backend * b,const float * originWeight,size_t originWeightSize,const float * bias,size_t biasSize,int unit)28 ConvolutionWinograd3D::ConvolutionWinograd3D(const Convolution3DCommon *convOp, const Tensor *input, const Tensor *output,
29                                              Backend *b, const float *originWeight, size_t originWeightSize,
30                                              const float *bias, size_t biasSize, int unit) : Execution(b), mUnit(unit) {
31     for (int32_t kernel: *(convOp->kernels())) {
32         mKernels.push_back(kernel);
33     }
34     MNN_ASSERT(mKernels[1] == mKernels[2]);
35     mPadMode = convOp->padMode();
36     if (mPadMode != PadMode_SAME) {
37         for (int32_t pad: *(convOp->pads())) {
38             mPads.push_back(pad);
39         }
40     }
41     mPostFunction = CPUConvolution3D::getPostFunction(convOp);
42 
43     const int inputChannel = convOp->inputCount(), outputChannel = convOp->outputCount();
44     const int kernelDepth = mKernels[0], kernelSize = mKernels[1], alpha = unit + kernelSize - 1, alpha2 = alpha * alpha;
45     mAlpha = alpha;
46 
47     mSourceTransform = WinogradFunction::chooseSourceTransform(alpha, alpha);
48     mDestTransform   = WinogradFunction::chooseDestTransform(alpha, unit);
49 
50     mWeight.reset(Tensor::createDevice<float>({ALIGN_UP4(inputChannel) * ALIGN_UP4(outputChannel) * kernelDepth * alpha2}));
51     mBias.reset(Tensor::createDevice<float>({ALIGN_UP4((int)biasSize)}));
52     bool valid = b->onAcquireBuffer(mWeight.get(), Backend::STATIC);
53     valid = valid && b->onAcquireBuffer(mBias.get(), Backend::STATIC);
54     if (!valid) {
55         return;
56     }
57 
58     memset(mBias->host<float>(), 0, mBias->size());
59     memcpy(mBias->host<float>(), bias, biasSize * sizeof(float));
60 
61     WinogradGenerater generator(unit, kernelSize);
62 
63     const int srcDepthStep = inputChannel * outputChannel * kernelSize * kernelSize;
64     const int dstDepthStep = ALIGN_UP4(inputChannel) * ALIGN_UP4(outputChannel) * alpha2;
65     std::shared_ptr<Tensor> srcWeight, transWeight;
66     for (int d = 0; d < kernelDepth; ++d) {
67         srcWeight.reset(Tensor::create<float>({outputChannel, inputChannel, kernelSize, kernelSize}, (void*)(originWeight + d * srcDepthStep)));
68         transWeight.reset(Tensor::create<float>({alpha2, UP_DIV(outputChannel, 4), UP_DIV(inputChannel, 4), 4, 4},
69                                                 (void*)(mWeight->host<float>() + d * dstDepthStep)));
70         generator.transformWeight(transWeight.get(), srcWeight.get());
71     }
72 }
~ConvolutionWinograd3D()73 ConvolutionWinograd3D::~ConvolutionWinograd3D() {
74     if (nullptr != mBias) {
75         backend()->onReleaseBuffer(mBias.get(), Backend::STATIC);
76     }
77     if (nullptr != mWeight) {
78         backend()->onReleaseBuffer(mWeight.get(), Backend::STATIC);
79     }
80 }
81 
onResize(const std::vector<Tensor * > & inputs,const std::vector<Tensor * > & outputs)82 ErrorCode ConvolutionWinograd3D::onResize(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) {
83     auto input = inputs[0];
84     auto output = outputs[0];
85     const int oc = output->length(1), od = output->length(2);
86     const int ic = input->length(1), id = input->length(2);
87     const int threadNumber = ((CPUBackend*)backend())->threadNumber();
88     const int alpha2 = mAlpha * mAlpha;
89     auto CONVOLUTION_TILED_NUMBER = MNNGetConvolutionTileNumber();
90 
91     if (mPadMode == PadMode_SAME) {
92         mPads.clear();
93         for (int i = 0; i < 3; ++i) {
94             int inputNeeded = output->length(i + 2) - 1 + mKernels[i];
95             mPads.push_back((inputNeeded - input->length(i + 2)) / 2);
96         }
97     }
98 
99     mSourceBuffer.reset(Tensor::createDevice<float>({threadNumber, id, alpha2, UP_DIV(ic, 4), CONVOLUTION_TILED_NUMBER, 4}));
100     mDestBuffer.reset(Tensor::createDevice<float>({threadNumber, od + 1, alpha2, UP_DIV(oc, 4), CONVOLUTION_TILED_NUMBER, 4}));
101     mTempBuffer.reset(Tensor::createDevice<float>({threadNumber, 2, alpha2, 4}));
102 
103     bool succ = backend()->onAcquireBuffer(mSourceBuffer.get(), Backend::DYNAMIC);
104     succ = succ && backend()->onAcquireBuffer(mDestBuffer.get(), Backend::DYNAMIC);
105     succ = succ && backend()->onAcquireBuffer(mTempBuffer.get(), Backend::DYNAMIC);
106     if (!succ) {
107         return OUT_OF_MEMORY;
108     }
109     backend()->onReleaseBuffer(mSourceBuffer.get(), Backend::DYNAMIC);
110     backend()->onReleaseBuffer(mDestBuffer.get(), Backend::DYNAMIC);
111     backend()->onReleaseBuffer(mTempBuffer.get(), Backend::DYNAMIC);
112     return NO_ERROR;
113 }
114 
onExecute(const std::vector<Tensor * > & inputs,const std::vector<Tensor * > & outputs)115 ErrorCode ConvolutionWinograd3D::onExecute(const std::vector<Tensor *> &inputs, const std::vector<Tensor *> &outputs) {
116     auto input   = inputs[0];
117     auto output  = outputs[0];
118     auto CONVOLUTION_TILED_NUMBER = MNNGetConvolutionTileNumber();
119 
120     const int dstUnit = mUnit, srcUnit = mAlpha, srcUnit2 = srcUnit * srcUnit;
121     const int outputWidth = output->length(4), outputHeight = output->length(3), outputDepth = output->length(2);
122     const int inputWidth = input->length(4), inputHeight = input->length(3), inputDepth = input->length(2);
123     const int wUnit = UP_DIV(outputWidth, dstUnit), hUnit = UP_DIV(outputHeight, dstUnit);
124     const int ic_4 = UP_DIV(input->length(1), 4), dc_4 = UP_DIV(output->length(1), 4);
125     const int padY = mPads[1], padX = mPads[2], padDepth = mPads[0], kernelDepth = mKernels[0];
126     const int totalCount = wUnit * hUnit, tileCount = UP_DIV(totalCount, CONVOLUTION_TILED_NUMBER);
127 
128     auto postFunction = mPostFunction;
129     const int threadNumber = std::max(((CPUBackend *)backend())->threadNumber(), 1);
130 
131     auto sourceTransformFunc = [=](int xIndex, int xC, const float* srcOrigin, float* dstOrigin, float* midBuffer0, float* midBuffer1) {
132         int sourceZStep = inputDepth * inputWidth * inputHeight * 4;
133         int dstZStep    = xC * 4;
134         int unitStep    = ic_4 * xC * 4;
135         for (int xi = 0; xi < xC; ++xi) {
136             const int index = xIndex + xi, wIndex = index % wUnit, hIndex = index / wUnit;
137             const int srcX = wIndex * dstUnit - padX, srcY = hIndex * dstUnit - padY;
138             const int sx = ALIMAX(0, srcX) - srcX, ex = ALIMIN(srcX + srcUnit, inputWidth) - srcX;
139             const int sy = ALIMAX(0, srcY) - srcY, ey = ALIMIN(srcY + srcUnit, inputHeight) - srcY;
140             const int count = 4 * (ex - sx);
141 
142             auto dst_x = dstOrigin + 4 * xi;
143 
144             auto srcStart = srcOrigin + (srcX + srcY * inputWidth) * 4;
145             if (ey - sy < srcUnit) {
146                 memset(midBuffer1, 0, srcUnit2 * 4 * sizeof(float));
147             }
148             if (ex - sx == srcUnit) {
149                 for (int z = 0; z < ic_4; ++z) {
150                     auto srcZ = srcStart + z * sourceZStep;
151                     auto dstZ = dst_x + z * dstZStep;
152                     for (int d = 0; d < inputDepth; ++d) {
153                         auto src_depth = srcZ + d * inputWidth * inputHeight * 4;
154                         auto dst_depth = dstZ + d * srcUnit2 * ic_4 * xC * 4;
155                         // Transform
156                         for (int i = sy; i < ey; ++i) {
157                             mSourceTransform(src_depth + 4 * i * inputWidth, midBuffer1 + 4 * i, 4, 4 * srcUnit);
158                         }
159                         for (int i = 0; i < srcUnit; ++i) {
160                             mSourceTransform(midBuffer1 + 4 * i * srcUnit, dst_depth + i * unitStep, 4,
161                                              unitStep * srcUnit);
162                         }
163                     }
164                 }
165             } else {
166                 memset(midBuffer0, 0, srcUnit2 * 4 * sizeof(float));
167                 for (int z = 0; z < ic_4; ++z) {
168                     // Extract
169                     auto srcZ = srcStart + z * sourceZStep;
170                     auto dstZ = dst_x + z * dstZStep;
171                     for (int d = 0; d < inputDepth; ++d) {
172                         auto src_depth = srcZ + d * inputWidth * inputHeight * 4;
173                         auto dst_depth = dstZ + d * srcUnit2 * ic_4 * xC * 4;
174                         if (count > 0) {
175                             for (int yy = sy; yy < ey; ++yy) {
176                                 auto dst_yy = midBuffer0 + yy * srcUnit * 4 + sx * 4;
177                                 auto src_yy = src_depth + 4 * inputWidth * yy + sx * 4;
178                                 memcpy(dst_yy, src_yy, count * sizeof(float));
179                             }
180                         }
181                         // Transform
182                         for (int i = sy; i < ey; ++i) {
183                             mSourceTransform(midBuffer0 + 4 * i * srcUnit, midBuffer1 + 4 * i, 4, 4 * srcUnit);
184                         }
185                         for (int i = 0; i < srcUnit; ++i) {
186                             mSourceTransform(midBuffer1 + 4 * i * srcUnit, dst_depth + i * unitStep, 4,
187                                              unitStep * srcUnit);
188                         }
189                     }
190                 }
191             }
192         }
193     };
194 
195     auto destTransformFunc = [=](int xIndex, int xC, const float* srcOrigin, float* dstOrigin, float* midBuffer0, float* midBuffer1) {
196         int dstZStep = outputDepth * outputHeight * outputWidth * 4;
197         int srcZStep = xC * 4;
198         int unitStep = dc_4 * xC * 4;
199         for (int xi = 0; xi < xC; ++xi) {
200             const int index = xIndex + xi, wIndex = index % wUnit, hIndex = index / wUnit;
201             auto srcXi = srcOrigin + 4 * xi;
202 
203             const int dstX = wIndex * dstUnit, dstY = hIndex * dstUnit;
204             auto dstStart = dstOrigin + 4 * (dstX + dstY * outputWidth);
205 
206             const int ey = ALIMIN(dstY + dstUnit, outputHeight) - dstY;
207             const int ex = ALIMIN(dstX + dstUnit, outputWidth) - dstX;
208 
209             const int count = ex * 4;
210             if (ex == dstUnit) {
211                 for (int z = 0; z < dc_4; ++z) {
212                     auto dstZAddr = dstStart + z * dstZStep;
213                     auto srcZ     = srcXi + z * srcZStep;
214                     for (int d = 0; d < outputDepth; ++d) {
215                         auto dst_depth = dstZAddr + d * outputHeight * outputWidth * 4;
216                         auto src_depth = srcZ + d * srcUnit2 * dc_4 * xC * 4;
217                         for (int i = 0; i < srcUnit; ++i) {
218                             mDestTransform(src_depth + i * unitStep, midBuffer0 + i * dstUnit * 4,
219                                            srcUnit * unitStep, 4);
220                         }
221                         for (int i = 0; i < ey; ++i) {
222                             auto dstAddr = dst_depth + i * 4 * outputWidth;
223                             mDestTransform(midBuffer0 + i * 4, dstAddr, 4 * dstUnit, 4);
224                         }
225                     }
226                 }
227             } else {
228                 for (int z = 0; z < dc_4; ++z) {
229                     auto dstZAddr = dstStart + z * dstZStep;
230                     auto srcZ     = srcXi + z * srcZStep;
231                     for (int d = 0; d < outputDepth; ++d) {
232                         auto dst_depth = dstZAddr + d * outputHeight * outputWidth * 4;
233                         auto src_depth = srcZ + d * srcUnit2 * dc_4 * xC * 4;
234                         for (int i = 0; i < srcUnit; ++i) {
235                             mDestTransform(src_depth + i * unitStep, midBuffer0 + i * dstUnit * 4,
236                                            srcUnit * unitStep, 4);
237                         }
238                         for (int i = 0; i < ey; ++i) {
239                             mDestTransform(midBuffer0 + i * 4, midBuffer1 + i * dstUnit * 4, 4 * dstUnit, 4);
240                         }
241 
242                         for (int yy = 0; yy < ey; ++yy) {
243                             auto dstYAddr = dst_depth + yy * 4 * outputWidth;
244                             auto srcYAddr = midBuffer1 + yy * 4 * dstUnit;
245                             memcpy(dstYAddr, srcYAddr, count * sizeof(float));
246                         }
247                     }
248                 }
249             }
250         }
251     };
252 
253     auto gemmFunc = [=](int xC, int start, int end, const float* srcOrigin, const float* weight, float* dstOrigin) {
254         float* tempDst = dstOrigin + outputDepth * srcUnit2 * dc_4 * xC * 4;
255         const int element = (end - start) * dc_4 * xC * 4, offset = start * dc_4 * xC * 4;
256         for (int od = 0; od < outputDepth; ++od) {
257             bool add = false;
258             float* _dstOrigin = dstOrigin + (od * srcUnit2 + start) * dc_4 * xC * 4;
259             const int srcD = od - padDepth, kdStart = -ALIMIN(srcD, 0), kdEnd = kernelDepth - ALIMAX(srcD + kernelDepth - inputDepth, 0);
260             for (int kd = kdStart; kd < kdEnd; ++kd) {
261                 const float* _srcOrigin = srcOrigin + (kd + srcD) * srcUnit2 * ic_4 * xC * 4;
262                 const float* _weight = weight + kd * srcUnit2 * dc_4 * ic_4 * 16;
263                 for (int i = start; i < end; ++i) {
264                     if (xC == CONVOLUTION_TILED_NUMBER) {
265                         MNNGemmFloatUnit_4(tempDst + i * dc_4 * xC * 4, _srcOrigin + i * ic_4 * 4 * xC,
266                                            _weight + i * 16 * ic_4 * dc_4, ic_4, xC * 4, dc_4, 0);
267                     } else {
268                         MNNGemmFloatCommon_4(tempDst + i * dc_4 * xC * 4, _srcOrigin + i * ic_4 * 4 * xC,
269                                              _weight + (i * dc_4) * ic_4 * 16, ic_4, xC * 4, dc_4, xC, 0);
270                     }
271                 }
272                 if (add) {
273                     MNNMatrixAdd(_dstOrigin, _dstOrigin, tempDst + offset, element / 4, 0, 0, 0, 1);
274                 } else {
275                     memcpy(_dstOrigin, tempDst + offset, element * sizeof(float));
276                 }
277                 add = true;
278             }
279         }
280     };
281 
282     auto gemmConcurrencyFunc = [=, &gemmFunc](int xC, const float* _srcOrigin, const float* weight, float* _dstOrigin) {
283         MNN_CONCURRENCY_BEGIN(tId, threadNumber) {
284             const int step = UP_DIV(srcUnit2, threadNumber);
285             gemmFunc(xC, tId * step, ALIMIN((tId + 1) * step, srcUnit2), _srcOrigin, weight, _dstOrigin);
286         }
287         MNN_CONCURRENCY_END()
288     };
289 
290     auto tFunction = [&](const int tId, const int tileStart, const int tileStep, const int tileEnd, const float* srcOrigin, float* dstOrigin) {
291         auto _srcOrigin = mSourceBuffer->host<float>() + tId * mSourceBuffer->stride(0);
292         auto _dstOrigin = mDestBuffer->host<float>() + tId * mDestBuffer->stride(0);
293         auto midBuffer0 = mTempBuffer->host<float>() + tId * mTempBuffer->stride(0);
294         auto midBuffer1 = midBuffer0 + mTempBuffer->stride(1);
295         for (int tIndex = (int)tId; tIndex < tileCount; tIndex += threadNumber) {
296             int xIndex  = (int)tIndex * CONVOLUTION_TILED_NUMBER;
297             int xReamin = totalCount - xIndex;
298             int xC      = xReamin > CONVOLUTION_TILED_NUMBER ? CONVOLUTION_TILED_NUMBER : xReamin;
299 
300             sourceTransformFunc(xIndex, xC, srcOrigin, _srcOrigin, midBuffer0, midBuffer1);
301 
302             if (threadNumber != tileStep) {
303                 gemmConcurrencyFunc(xC, _srcOrigin, mWeight->host<float>(), _dstOrigin);
304             } else {
305                 gemmFunc(xC, 0, srcUnit2, _srcOrigin, mWeight->host<float>(), _dstOrigin);
306             }
307 
308             destTransformFunc(xIndex, xC, _dstOrigin, dstOrigin, midBuffer0, midBuffer1);
309         }
310     };
311 
312     for (int batchIndex = 0; batchIndex < input->batch(); ++batchIndex) {
313         auto srcOrigin = input->host<float>() + batchIndex * input->stride(0);
314         auto dstOrigin = output->host<float>() + batchIndex * output->stride(0);
315 
316         if (tileCount >= threadNumber) {
317             MNN_CONCURRENCY_BEGIN(tId, threadNumber) {
318                 tFunction((int)tId, (int)tId, threadNumber, tileCount / threadNumber * threadNumber, srcOrigin, dstOrigin);
319             }
320             MNN_CONCURRENCY_END();
321         }
322 
323         if (tileCount % threadNumber != 0) {
324             tFunction(0, tileCount / threadNumber * threadNumber, 1, tileCount, srcOrigin, dstOrigin);
325         }
326 
327         MNN_CONCURRENCY_BEGIN(tId, threadNumber) {
328             int channelStep = UP_DIV(dc_4, threadNumber);
329             int channelStart = channelStep * tId, channelNum = ALIMIN(channelStep * (tId + 1), dc_4) - channelStart;
330             if (channelNum > 0) {
331                 postFunction(dstOrigin + channelStart * outputHeight * outputWidth * outputDepth * 4, mBias->host<float>() + 4 * channelStart, outputWidth * outputHeight * outputDepth, channelNum);
332             }
333         }
334         MNN_CONCURRENCY_END();
335     }
336 
337     return NO_ERROR;
338 }
339 
bestWinogradUnit(const Convolution3DCommon * common,const Tensor * inputTensor,const Tensor * outputTensor,int threadNumber)340 int ConvolutionWinograd3D::bestWinogradUnit(const Convolution3DCommon *common, const Tensor *inputTensor,
341                                           const Tensor *outputTensor, int threadNumber) {
342     const int ow = outputTensor->length(4), oh = outputTensor->length(3), oc = outputTensor->length(1);
343     auto CONVOLUTION_TILED_NUMBER = MNNGetConvolutionTileNumber();
344 
345     int unit2   = UP_DIV(ow * oh, CONVOLUTION_TILED_NUMBER * threadNumber);
346     int maxUnit = (int)::sqrtf((float)unit2);
347     maxUnit     = std::min(maxUnit, CONVOLUTION_WINOGRAD_MAX_UNIT);
348     maxUnit     = std::max(maxUnit, CONVOLUTION_WINOGRAD_MIN_UNIT);
349 
350     int ic           = inputTensor->channel();
351     auto kernelSize  = (*common->kernels())[1];
352     int unit         = CONVOLUTION_WINOGRAD_MIN_UNIT;
353     float maxRate    = 0.0f;
354     float originCost = (float)ow * oh * (float)ic * oc * kernelSize * kernelSize;
355     static std::set<int> supportSu{4, 8};
356     for (int u = CONVOLUTION_WINOGRAD_MIN_UNIT; u <= maxUnit; ++u) {
357         float su = (float)(u + kernelSize - 1);
358         if (supportSu.find(su) == supportSu.end()) {
359             continue;
360         }
361         if (nullptr == WinogradFunction::chooseDestTransform((int)su, u)) {
362             continue;
363         }
364         /*Let F(6,3) be choosed when it can speed up from F(2,3) than 0.6*/
365         float penalty = (su * su) / (float)(kernelSize * kernelSize) * 0.12f;
366         float winogradCost =
367             (2 * su * su * su * ic + su * su * ic * oc + 2 * su * u * u * oc) * (UP_DIV(ow, u) * UP_DIV(oh, u));
368         float reduceRate = originCost / winogradCost - penalty;
369         // MNN_PRINT("ow=%d, oh=%d, %f, %f, winograd unit:%d\n", ow, oh, winogradCost, reduceRate, u);
370         if (reduceRate > maxRate) {
371             maxRate = reduceRate;
372             unit    = u;
373         }
374     }
375     if (maxRate < 1.0f) {
376         return 0;
377     }
378     return unit;
379 }
380 
canUseWinograd(const Convolution3DCommon * common)381 bool ConvolutionWinograd3D::canUseWinograd(const Convolution3DCommon *common) {
382     std::vector<int> kernels;
383     for (int kernel: *(common->kernels())) {
384         if (kernel <= 1) {
385             return false;
386         }
387         kernels.push_back(kernel);
388     }
389     if (kernels[1] != kernels[2]) {
390         return false;
391     }
392     for (int dialate: *(common->dilates())) {
393         if (dialate != 1) {
394             return false;
395         }
396     }
397     for (int stride: *(common->strides())) {
398         if (stride != 1) {
399             return false;
400         }
401     }
402     return true;
403 }
404 } // namespace MNN
405